feat: extract devops packages from core/go
Build system, release automation, SDK generation, Ansible executor, LinuxKit dev environments, container runtime, deployment, infra metrics, and developer toolkit. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
commit
392ad68047
146 changed files with 31602 additions and 0 deletions
1021
ansible/executor.go
Normal file
1021
ansible/executor.go
Normal file
File diff suppressed because it is too large
Load diff
1434
ansible/modules.go
Normal file
1434
ansible/modules.go
Normal file
File diff suppressed because it is too large
Load diff
438
ansible/parser.go
Normal file
438
ansible/parser.go
Normal file
|
|
@ -0,0 +1,438 @@
|
|||
package ansible
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/log"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Parser handles Ansible YAML parsing.
|
||||
type Parser struct {
|
||||
basePath string
|
||||
vars map[string]any
|
||||
}
|
||||
|
||||
// NewParser creates a new Ansible parser.
|
||||
func NewParser(basePath string) *Parser {
|
||||
return &Parser{
|
||||
basePath: basePath,
|
||||
vars: make(map[string]any),
|
||||
}
|
||||
}
|
||||
|
||||
// ParsePlaybook parses an Ansible playbook file.
|
||||
func (p *Parser) ParsePlaybook(path string) ([]Play, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read playbook: %w", err)
|
||||
}
|
||||
|
||||
var plays []Play
|
||||
if err := yaml.Unmarshal(data, &plays); err != nil {
|
||||
return nil, fmt.Errorf("parse playbook: %w", err)
|
||||
}
|
||||
|
||||
// Process each play
|
||||
for i := range plays {
|
||||
if err := p.processPlay(&plays[i]); err != nil {
|
||||
return nil, fmt.Errorf("process play %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
return plays, nil
|
||||
}
|
||||
|
||||
// ParseInventory parses an Ansible inventory file.
|
||||
func (p *Parser) ParseInventory(path string) (*Inventory, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read inventory: %w", err)
|
||||
}
|
||||
|
||||
var inv Inventory
|
||||
if err := yaml.Unmarshal(data, &inv); err != nil {
|
||||
return nil, fmt.Errorf("parse inventory: %w", err)
|
||||
}
|
||||
|
||||
return &inv, nil
|
||||
}
|
||||
|
||||
// ParseTasks parses a tasks file (used by include_tasks).
|
||||
func (p *Parser) ParseTasks(path string) ([]Task, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read tasks: %w", err)
|
||||
}
|
||||
|
||||
var tasks []Task
|
||||
if err := yaml.Unmarshal(data, &tasks); err != nil {
|
||||
return nil, fmt.Errorf("parse tasks: %w", err)
|
||||
}
|
||||
|
||||
for i := range tasks {
|
||||
if err := p.extractModule(&tasks[i]); err != nil {
|
||||
return nil, fmt.Errorf("task %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
return tasks, nil
|
||||
}
|
||||
|
||||
// ParseRole parses a role and returns its tasks.
|
||||
func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
|
||||
if tasksFrom == "" {
|
||||
tasksFrom = "main.yml"
|
||||
}
|
||||
|
||||
// Search paths for roles (in order of precedence)
|
||||
searchPaths := []string{
|
||||
// Relative to playbook
|
||||
filepath.Join(p.basePath, "roles", name, "tasks", tasksFrom),
|
||||
// Parent directory roles
|
||||
filepath.Join(filepath.Dir(p.basePath), "roles", name, "tasks", tasksFrom),
|
||||
// Sibling roles directory
|
||||
filepath.Join(p.basePath, "..", "roles", name, "tasks", tasksFrom),
|
||||
// playbooks/roles pattern
|
||||
filepath.Join(p.basePath, "playbooks", "roles", name, "tasks", tasksFrom),
|
||||
// Common DevOps structure
|
||||
filepath.Join(filepath.Dir(filepath.Dir(p.basePath)), "roles", name, "tasks", tasksFrom),
|
||||
}
|
||||
|
||||
var tasksPath string
|
||||
for _, sp := range searchPaths {
|
||||
// Clean the path to resolve .. segments
|
||||
sp = filepath.Clean(sp)
|
||||
if _, err := os.Stat(sp); err == nil {
|
||||
tasksPath = sp
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if tasksPath == "" {
|
||||
return nil, log.E("parser.ParseRole", fmt.Sprintf("role %s not found in search paths: %v", name, searchPaths), nil)
|
||||
}
|
||||
|
||||
// Load role defaults
|
||||
defaultsPath := filepath.Join(filepath.Dir(filepath.Dir(tasksPath)), "defaults", "main.yml")
|
||||
if data, err := os.ReadFile(defaultsPath); err == nil {
|
||||
var defaults map[string]any
|
||||
if yaml.Unmarshal(data, &defaults) == nil {
|
||||
for k, v := range defaults {
|
||||
if _, exists := p.vars[k]; !exists {
|
||||
p.vars[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load role vars
|
||||
varsPath := filepath.Join(filepath.Dir(filepath.Dir(tasksPath)), "vars", "main.yml")
|
||||
if data, err := os.ReadFile(varsPath); err == nil {
|
||||
var roleVars map[string]any
|
||||
if yaml.Unmarshal(data, &roleVars) == nil {
|
||||
for k, v := range roleVars {
|
||||
p.vars[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return p.ParseTasks(tasksPath)
|
||||
}
|
||||
|
||||
// processPlay processes a play and extracts modules from tasks.
|
||||
func (p *Parser) processPlay(play *Play) error {
|
||||
// Merge play vars
|
||||
for k, v := range play.Vars {
|
||||
p.vars[k] = v
|
||||
}
|
||||
|
||||
for i := range play.PreTasks {
|
||||
if err := p.extractModule(&play.PreTasks[i]); err != nil {
|
||||
return fmt.Errorf("pre_task %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range play.Tasks {
|
||||
if err := p.extractModule(&play.Tasks[i]); err != nil {
|
||||
return fmt.Errorf("task %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range play.PostTasks {
|
||||
if err := p.extractModule(&play.PostTasks[i]); err != nil {
|
||||
return fmt.Errorf("post_task %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range play.Handlers {
|
||||
if err := p.extractModule(&play.Handlers[i]); err != nil {
|
||||
return fmt.Errorf("handler %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractModule extracts the module name and args from a task.
|
||||
func (p *Parser) extractModule(task *Task) error {
|
||||
// First, unmarshal the raw YAML to get all keys
|
||||
// This is a workaround since we need to find the module key dynamically
|
||||
|
||||
// Handle block tasks
|
||||
for i := range task.Block {
|
||||
if err := p.extractModule(&task.Block[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for i := range task.Rescue {
|
||||
if err := p.extractModule(&task.Rescue[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for i := range task.Always {
|
||||
if err := p.extractModule(&task.Always[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements custom YAML unmarshaling for Task.
|
||||
func (t *Task) UnmarshalYAML(node *yaml.Node) error {
|
||||
// First decode known fields
|
||||
type rawTask Task
|
||||
var raw rawTask
|
||||
|
||||
// Create a map to capture all fields
|
||||
var m map[string]any
|
||||
if err := node.Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode into struct
|
||||
if err := node.Decode(&raw); err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Task(raw)
|
||||
t.raw = m
|
||||
|
||||
// Find the module key
|
||||
knownKeys := map[string]bool{
|
||||
"name": true, "register": true, "when": true, "loop": true,
|
||||
"loop_control": true, "vars": true, "environment": true,
|
||||
"changed_when": true, "failed_when": true, "ignore_errors": true,
|
||||
"no_log": true, "become": true, "become_user": true,
|
||||
"delegate_to": true, "run_once": true, "tags": true,
|
||||
"block": true, "rescue": true, "always": true, "notify": true,
|
||||
"retries": true, "delay": true, "until": true,
|
||||
"include_tasks": true, "import_tasks": true,
|
||||
"include_role": true, "import_role": true,
|
||||
"with_items": true, "with_dict": true, "with_file": true,
|
||||
}
|
||||
|
||||
for key, val := range m {
|
||||
if knownKeys[key] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this is a module
|
||||
if isModule(key) {
|
||||
t.Module = key
|
||||
t.Args = make(map[string]any)
|
||||
|
||||
switch v := val.(type) {
|
||||
case string:
|
||||
// Free-form args (e.g., shell: echo hello)
|
||||
t.Args["_raw_params"] = v
|
||||
case map[string]any:
|
||||
t.Args = v
|
||||
case nil:
|
||||
// Module with no args
|
||||
default:
|
||||
t.Args["_raw_params"] = v
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Handle with_items as loop
|
||||
if items, ok := m["with_items"]; ok && t.Loop == nil {
|
||||
t.Loop = items
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isModule checks if a key is a known module.
|
||||
func isModule(key string) bool {
|
||||
for _, m := range KnownModules {
|
||||
if key == m {
|
||||
return true
|
||||
}
|
||||
// Also check without ansible.builtin. prefix
|
||||
if strings.HasPrefix(m, "ansible.builtin.") {
|
||||
if key == strings.TrimPrefix(m, "ansible.builtin.") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Accept any key with dots (likely a module)
|
||||
return strings.Contains(key, ".")
|
||||
}
|
||||
|
||||
// NormalizeModule normalizes a module name to its canonical form.
|
||||
func NormalizeModule(name string) string {
|
||||
// Add ansible.builtin. prefix if missing
|
||||
if !strings.Contains(name, ".") {
|
||||
return "ansible.builtin." + name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// GetHosts returns hosts matching a pattern from inventory.
|
||||
func GetHosts(inv *Inventory, pattern string) []string {
|
||||
if pattern == "all" {
|
||||
return getAllHosts(inv.All)
|
||||
}
|
||||
if pattern == "localhost" {
|
||||
return []string{"localhost"}
|
||||
}
|
||||
|
||||
// Check if it's a group name
|
||||
hosts := getGroupHosts(inv.All, pattern)
|
||||
if len(hosts) > 0 {
|
||||
return hosts
|
||||
}
|
||||
|
||||
// Check if it's a specific host
|
||||
if hasHost(inv.All, pattern) {
|
||||
return []string{pattern}
|
||||
}
|
||||
|
||||
// Handle patterns with : (intersection/union)
|
||||
// For now, just return empty
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAllHosts(group *InventoryGroup) []string {
|
||||
if group == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var hosts []string
|
||||
for name := range group.Hosts {
|
||||
hosts = append(hosts, name)
|
||||
}
|
||||
for _, child := range group.Children {
|
||||
hosts = append(hosts, getAllHosts(child)...)
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
|
||||
func getGroupHosts(group *InventoryGroup, name string) []string {
|
||||
if group == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check children for the group name
|
||||
if child, ok := group.Children[name]; ok {
|
||||
return getAllHosts(child)
|
||||
}
|
||||
|
||||
// Recurse
|
||||
for _, child := range group.Children {
|
||||
if hosts := getGroupHosts(child, name); len(hosts) > 0 {
|
||||
return hosts
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasHost(group *InventoryGroup, name string) bool {
|
||||
if group == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, ok := group.Hosts[name]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, child := range group.Children {
|
||||
if hasHost(child, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetHostVars returns variables for a specific host.
|
||||
func GetHostVars(inv *Inventory, hostname string) map[string]any {
|
||||
vars := make(map[string]any)
|
||||
|
||||
// Collect vars from all levels
|
||||
collectHostVars(inv.All, hostname, vars)
|
||||
|
||||
return vars
|
||||
}
|
||||
|
||||
func collectHostVars(group *InventoryGroup, hostname string, vars map[string]any) bool {
|
||||
if group == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if host is in this group
|
||||
found := false
|
||||
if host, ok := group.Hosts[hostname]; ok {
|
||||
found = true
|
||||
// Apply group vars first
|
||||
for k, v := range group.Vars {
|
||||
vars[k] = v
|
||||
}
|
||||
// Then host vars
|
||||
if host != nil {
|
||||
if host.AnsibleHost != "" {
|
||||
vars["ansible_host"] = host.AnsibleHost
|
||||
}
|
||||
if host.AnsiblePort != 0 {
|
||||
vars["ansible_port"] = host.AnsiblePort
|
||||
}
|
||||
if host.AnsibleUser != "" {
|
||||
vars["ansible_user"] = host.AnsibleUser
|
||||
}
|
||||
if host.AnsiblePassword != "" {
|
||||
vars["ansible_password"] = host.AnsiblePassword
|
||||
}
|
||||
if host.AnsibleSSHPrivateKeyFile != "" {
|
||||
vars["ansible_ssh_private_key_file"] = host.AnsibleSSHPrivateKeyFile
|
||||
}
|
||||
if host.AnsibleConnection != "" {
|
||||
vars["ansible_connection"] = host.AnsibleConnection
|
||||
}
|
||||
for k, v := range host.Vars {
|
||||
vars[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check children
|
||||
for _, child := range group.Children {
|
||||
if collectHostVars(child, hostname, vars) {
|
||||
// Apply this group's vars (parent vars)
|
||||
for k, v := range group.Vars {
|
||||
if _, exists := vars[k]; !exists {
|
||||
vars[k] = v
|
||||
}
|
||||
}
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
return found
|
||||
}
|
||||
451
ansible/ssh.go
Normal file
451
ansible/ssh.go
Normal file
|
|
@ -0,0 +1,451 @@
|
|||
package ansible
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/log"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/knownhosts"
|
||||
)
|
||||
|
||||
// SSHClient handles SSH connections to remote hosts.
|
||||
type SSHClient struct {
|
||||
host string
|
||||
port int
|
||||
user string
|
||||
password string
|
||||
keyFile string
|
||||
client *ssh.Client
|
||||
mu sync.Mutex
|
||||
become bool
|
||||
becomeUser string
|
||||
becomePass string
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// SSHConfig holds SSH connection configuration.
|
||||
type SSHConfig struct {
|
||||
Host string
|
||||
Port int
|
||||
User string
|
||||
Password string
|
||||
KeyFile string
|
||||
Become bool
|
||||
BecomeUser string
|
||||
BecomePass string
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// NewSSHClient creates a new SSH client.
|
||||
func NewSSHClient(cfg SSHConfig) (*SSHClient, error) {
|
||||
if cfg.Port == 0 {
|
||||
cfg.Port = 22
|
||||
}
|
||||
if cfg.User == "" {
|
||||
cfg.User = "root"
|
||||
}
|
||||
if cfg.Timeout == 0 {
|
||||
cfg.Timeout = 30 * time.Second
|
||||
}
|
||||
|
||||
client := &SSHClient{
|
||||
host: cfg.Host,
|
||||
port: cfg.Port,
|
||||
user: cfg.User,
|
||||
password: cfg.Password,
|
||||
keyFile: cfg.KeyFile,
|
||||
become: cfg.Become,
|
||||
becomeUser: cfg.BecomeUser,
|
||||
becomePass: cfg.BecomePass,
|
||||
timeout: cfg.Timeout,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Connect establishes the SSH connection.
|
||||
func (c *SSHClient) Connect(ctx context.Context) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.client != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var authMethods []ssh.AuthMethod
|
||||
|
||||
// Try key-based auth first
|
||||
if c.keyFile != "" {
|
||||
keyPath := c.keyFile
|
||||
if strings.HasPrefix(keyPath, "~") {
|
||||
home, _ := os.UserHomeDir()
|
||||
keyPath = filepath.Join(home, keyPath[1:])
|
||||
}
|
||||
|
||||
if key, err := os.ReadFile(keyPath); err == nil {
|
||||
if signer, err := ssh.ParsePrivateKey(key); err == nil {
|
||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try default SSH keys
|
||||
if len(authMethods) == 0 {
|
||||
home, _ := os.UserHomeDir()
|
||||
defaultKeys := []string{
|
||||
filepath.Join(home, ".ssh", "id_ed25519"),
|
||||
filepath.Join(home, ".ssh", "id_rsa"),
|
||||
}
|
||||
for _, keyPath := range defaultKeys {
|
||||
if key, err := os.ReadFile(keyPath); err == nil {
|
||||
if signer, err := ssh.ParsePrivateKey(key); err == nil {
|
||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to password auth
|
||||
if c.password != "" {
|
||||
authMethods = append(authMethods, ssh.Password(c.password))
|
||||
authMethods = append(authMethods, ssh.KeyboardInteractive(func(user, instruction string, questions []string, echos []bool) ([]string, error) {
|
||||
answers := make([]string, len(questions))
|
||||
for i := range questions {
|
||||
answers[i] = c.password
|
||||
}
|
||||
return answers, nil
|
||||
}))
|
||||
}
|
||||
|
||||
if len(authMethods) == 0 {
|
||||
return log.E("ssh.Connect", "no authentication method available", nil)
|
||||
}
|
||||
|
||||
// Host key verification
|
||||
var hostKeyCallback ssh.HostKeyCallback
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return log.E("ssh.Connect", "failed to get user home dir", err)
|
||||
}
|
||||
knownHostsPath := filepath.Join(home, ".ssh", "known_hosts")
|
||||
|
||||
// Ensure known_hosts file exists
|
||||
if _, err := os.Stat(knownHostsPath); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Dir(knownHostsPath), 0700); err != nil {
|
||||
return log.E("ssh.Connect", "failed to create .ssh dir", err)
|
||||
}
|
||||
if err := os.WriteFile(knownHostsPath, nil, 0600); err != nil {
|
||||
return log.E("ssh.Connect", "failed to create known_hosts file", err)
|
||||
}
|
||||
}
|
||||
|
||||
cb, err := knownhosts.New(knownHostsPath)
|
||||
if err != nil {
|
||||
return log.E("ssh.Connect", "failed to load known_hosts", err)
|
||||
}
|
||||
hostKeyCallback = cb
|
||||
|
||||
config := &ssh.ClientConfig{
|
||||
User: c.user,
|
||||
Auth: authMethods,
|
||||
HostKeyCallback: hostKeyCallback,
|
||||
Timeout: c.timeout,
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("%s:%d", c.host, c.port)
|
||||
|
||||
// Connect with context timeout
|
||||
var d net.Dialer
|
||||
conn, err := d.DialContext(ctx, "tcp", addr)
|
||||
if err != nil {
|
||||
return log.E("ssh.Connect", fmt.Sprintf("dial %s", addr), err)
|
||||
}
|
||||
|
||||
sshConn, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
|
||||
if err != nil {
|
||||
// conn is closed by NewClientConn on error
|
||||
return log.E("ssh.Connect", fmt.Sprintf("ssh connect %s", addr), err)
|
||||
}
|
||||
|
||||
c.client = ssh.NewClient(sshConn, chans, reqs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the SSH connection.
|
||||
func (c *SSHClient) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.client != nil {
|
||||
err := c.client.Close()
|
||||
c.client = nil
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run executes a command on the remote host.
|
||||
func (c *SSHClient) Run(ctx context.Context, cmd string) (stdout, stderr string, exitCode int, err error) {
|
||||
if err := c.Connect(ctx); err != nil {
|
||||
return "", "", -1, err
|
||||
}
|
||||
|
||||
session, err := c.client.NewSession()
|
||||
if err != nil {
|
||||
return "", "", -1, log.E("ssh.Run", "new session", err)
|
||||
}
|
||||
defer func() { _ = session.Close() }()
|
||||
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
session.Stdout = &stdoutBuf
|
||||
session.Stderr = &stderrBuf
|
||||
|
||||
// Apply become if needed
|
||||
if c.become {
|
||||
becomeUser := c.becomeUser
|
||||
if becomeUser == "" {
|
||||
becomeUser = "root"
|
||||
}
|
||||
// Escape single quotes in the command
|
||||
escapedCmd := strings.ReplaceAll(cmd, "'", "'\\''")
|
||||
if c.becomePass != "" {
|
||||
// Use sudo with password via stdin (-S flag)
|
||||
// We launch a goroutine to write the password to stdin
|
||||
cmd = fmt.Sprintf("sudo -S -u %s bash -c '%s'", becomeUser, escapedCmd)
|
||||
stdin, err := session.StdinPipe()
|
||||
if err != nil {
|
||||
return "", "", -1, log.E("ssh.Run", "stdin pipe", err)
|
||||
}
|
||||
go func() {
|
||||
defer func() { _ = stdin.Close() }()
|
||||
_, _ = io.WriteString(stdin, c.becomePass+"\n")
|
||||
}()
|
||||
} else if c.password != "" {
|
||||
// Try using connection password for sudo
|
||||
cmd = fmt.Sprintf("sudo -S -u %s bash -c '%s'", becomeUser, escapedCmd)
|
||||
stdin, err := session.StdinPipe()
|
||||
if err != nil {
|
||||
return "", "", -1, log.E("ssh.Run", "stdin pipe", err)
|
||||
}
|
||||
go func() {
|
||||
defer func() { _ = stdin.Close() }()
|
||||
_, _ = io.WriteString(stdin, c.password+"\n")
|
||||
}()
|
||||
} else {
|
||||
// Try passwordless sudo
|
||||
cmd = fmt.Sprintf("sudo -n -u %s bash -c '%s'", becomeUser, escapedCmd)
|
||||
}
|
||||
}
|
||||
|
||||
// Run with context
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- session.Run(cmd)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
_ = session.Signal(ssh.SIGKILL)
|
||||
return "", "", -1, ctx.Err()
|
||||
case err := <-done:
|
||||
exitCode = 0
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*ssh.ExitError); ok {
|
||||
exitCode = exitErr.ExitStatus()
|
||||
} else {
|
||||
return stdoutBuf.String(), stderrBuf.String(), -1, err
|
||||
}
|
||||
}
|
||||
return stdoutBuf.String(), stderrBuf.String(), exitCode, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RunScript runs a script on the remote host.
|
||||
func (c *SSHClient) RunScript(ctx context.Context, script string) (stdout, stderr string, exitCode int, err error) {
|
||||
// Escape the script for heredoc
|
||||
cmd := fmt.Sprintf("bash <<'ANSIBLE_SCRIPT_EOF'\n%s\nANSIBLE_SCRIPT_EOF", script)
|
||||
return c.Run(ctx, cmd)
|
||||
}
|
||||
|
||||
// Upload copies a file to the remote host.
|
||||
func (c *SSHClient) Upload(ctx context.Context, local io.Reader, remote string, mode os.FileMode) error {
|
||||
if err := c.Connect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read content
|
||||
content, err := io.ReadAll(local)
|
||||
if err != nil {
|
||||
return log.E("ssh.Upload", "read content", err)
|
||||
}
|
||||
|
||||
// Create parent directory
|
||||
dir := filepath.Dir(remote)
|
||||
dirCmd := fmt.Sprintf("mkdir -p %q", dir)
|
||||
if c.become {
|
||||
dirCmd = fmt.Sprintf("sudo mkdir -p %q", dir)
|
||||
}
|
||||
if _, _, _, err := c.Run(ctx, dirCmd); err != nil {
|
||||
return log.E("ssh.Upload", "create parent dir", err)
|
||||
}
|
||||
|
||||
// Use cat to write the file (simpler than SCP)
|
||||
writeCmd := fmt.Sprintf("cat > %q && chmod %o %q", remote, mode, remote)
|
||||
|
||||
// If become is needed, we construct a command that reads password then content from stdin
|
||||
// But we need to be careful with handling stdin for sudo + cat.
|
||||
// We'll use a session with piped stdin.
|
||||
|
||||
session2, err := c.client.NewSession()
|
||||
if err != nil {
|
||||
return log.E("ssh.Upload", "new session for write", err)
|
||||
}
|
||||
defer func() { _ = session2.Close() }()
|
||||
|
||||
stdin, err := session2.StdinPipe()
|
||||
if err != nil {
|
||||
return log.E("ssh.Upload", "stdin pipe", err)
|
||||
}
|
||||
|
||||
var stderrBuf bytes.Buffer
|
||||
session2.Stderr = &stderrBuf
|
||||
|
||||
if c.become {
|
||||
becomeUser := c.becomeUser
|
||||
if becomeUser == "" {
|
||||
becomeUser = "root"
|
||||
}
|
||||
|
||||
pass := c.becomePass
|
||||
if pass == "" {
|
||||
pass = c.password
|
||||
}
|
||||
|
||||
if pass != "" {
|
||||
// Use sudo -S with password from stdin
|
||||
writeCmd = fmt.Sprintf("sudo -S -u %s bash -c 'cat > %q && chmod %o %q'",
|
||||
becomeUser, remote, mode, remote)
|
||||
} else {
|
||||
// Use passwordless sudo (sudo -n) to avoid consuming file content as password
|
||||
writeCmd = fmt.Sprintf("sudo -n -u %s bash -c 'cat > %q && chmod %o %q'",
|
||||
becomeUser, remote, mode, remote)
|
||||
}
|
||||
|
||||
if err := session2.Start(writeCmd); err != nil {
|
||||
return log.E("ssh.Upload", "start write", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() { _ = stdin.Close() }()
|
||||
if pass != "" {
|
||||
_, _ = io.WriteString(stdin, pass+"\n")
|
||||
}
|
||||
_, _ = stdin.Write(content)
|
||||
}()
|
||||
} else {
|
||||
// Normal write
|
||||
if err := session2.Start(writeCmd); err != nil {
|
||||
return log.E("ssh.Upload", "start write", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() { _ = stdin.Close() }()
|
||||
_, _ = stdin.Write(content)
|
||||
}()
|
||||
}
|
||||
|
||||
if err := session2.Wait(); err != nil {
|
||||
return log.E("ssh.Upload", fmt.Sprintf("write failed (stderr: %s)", stderrBuf.String()), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Download copies a file from the remote host.
|
||||
func (c *SSHClient) Download(ctx context.Context, remote string) ([]byte, error) {
|
||||
if err := c.Connect(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmd := fmt.Sprintf("cat %q", remote)
|
||||
|
||||
stdout, stderr, exitCode, err := c.Run(ctx, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exitCode != 0 {
|
||||
return nil, log.E("ssh.Download", fmt.Sprintf("cat failed: %s", stderr), nil)
|
||||
}
|
||||
|
||||
return []byte(stdout), nil
|
||||
}
|
||||
|
||||
// FileExists checks if a file exists on the remote host.
|
||||
func (c *SSHClient) FileExists(ctx context.Context, path string) (bool, error) {
|
||||
cmd := fmt.Sprintf("test -e %q && echo yes || echo no", path)
|
||||
stdout, _, exitCode, err := c.Run(ctx, cmd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if exitCode != 0 {
|
||||
// test command failed but didn't error - file doesn't exist
|
||||
return false, nil
|
||||
}
|
||||
return strings.TrimSpace(stdout) == "yes", nil
|
||||
}
|
||||
|
||||
// Stat returns file info from the remote host.
|
||||
func (c *SSHClient) Stat(ctx context.Context, path string) (map[string]any, error) {
|
||||
// Simple approach - get basic file info
|
||||
cmd := fmt.Sprintf(`
|
||||
if [ -e %q ]; then
|
||||
if [ -d %q ]; then
|
||||
echo "exists=true isdir=true"
|
||||
else
|
||||
echo "exists=true isdir=false"
|
||||
fi
|
||||
else
|
||||
echo "exists=false"
|
||||
fi
|
||||
`, path, path)
|
||||
|
||||
stdout, _, _, err := c.Run(ctx, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make(map[string]any)
|
||||
parts := strings.Fields(strings.TrimSpace(stdout))
|
||||
for _, part := range parts {
|
||||
kv := strings.SplitN(part, "=", 2)
|
||||
if len(kv) == 2 {
|
||||
result[kv[0]] = kv[1] == "true"
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SetBecome enables privilege escalation.
|
||||
func (c *SSHClient) SetBecome(become bool, user, password string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.become = become
|
||||
if user != "" {
|
||||
c.becomeUser = user
|
||||
}
|
||||
if password != "" {
|
||||
c.becomePass = password
|
||||
}
|
||||
}
|
||||
36
ansible/ssh_test.go
Normal file
36
ansible/ssh_test.go
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
package ansible
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewSSHClient(t *testing.T) {
|
||||
cfg := SSHConfig{
|
||||
Host: "localhost",
|
||||
Port: 2222,
|
||||
User: "root",
|
||||
}
|
||||
|
||||
client, err := NewSSHClient(cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.Equal(t, "localhost", client.host)
|
||||
assert.Equal(t, 2222, client.port)
|
||||
assert.Equal(t, "root", client.user)
|
||||
assert.Equal(t, 30*time.Second, client.timeout)
|
||||
}
|
||||
|
||||
func TestSSHConfig_Defaults(t *testing.T) {
|
||||
cfg := SSHConfig{
|
||||
Host: "localhost",
|
||||
}
|
||||
|
||||
client, err := NewSSHClient(cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 22, client.port)
|
||||
assert.Equal(t, "root", client.user)
|
||||
assert.Equal(t, 30*time.Second, client.timeout)
|
||||
}
|
||||
258
ansible/types.go
Normal file
258
ansible/types.go
Normal file
|
|
@ -0,0 +1,258 @@
|
|||
package ansible
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Playbook represents an Ansible playbook.
|
||||
type Playbook struct {
|
||||
Plays []Play `yaml:",inline"`
|
||||
}
|
||||
|
||||
// Play represents a single play in a playbook.
|
||||
type Play struct {
|
||||
Name string `yaml:"name"`
|
||||
Hosts string `yaml:"hosts"`
|
||||
Connection string `yaml:"connection,omitempty"`
|
||||
Become bool `yaml:"become,omitempty"`
|
||||
BecomeUser string `yaml:"become_user,omitempty"`
|
||||
GatherFacts *bool `yaml:"gather_facts,omitempty"`
|
||||
Vars map[string]any `yaml:"vars,omitempty"`
|
||||
PreTasks []Task `yaml:"pre_tasks,omitempty"`
|
||||
Tasks []Task `yaml:"tasks,omitempty"`
|
||||
PostTasks []Task `yaml:"post_tasks,omitempty"`
|
||||
Roles []RoleRef `yaml:"roles,omitempty"`
|
||||
Handlers []Task `yaml:"handlers,omitempty"`
|
||||
Tags []string `yaml:"tags,omitempty"`
|
||||
Environment map[string]string `yaml:"environment,omitempty"`
|
||||
Serial any `yaml:"serial,omitempty"` // int or string
|
||||
MaxFailPercent int `yaml:"max_fail_percentage,omitempty"`
|
||||
}
|
||||
|
||||
// RoleRef represents a role reference in a play.
|
||||
type RoleRef struct {
|
||||
Role string `yaml:"role,omitempty"`
|
||||
Name string `yaml:"name,omitempty"` // Alternative to role
|
||||
TasksFrom string `yaml:"tasks_from,omitempty"`
|
||||
Vars map[string]any `yaml:"vars,omitempty"`
|
||||
When any `yaml:"when,omitempty"`
|
||||
Tags []string `yaml:"tags,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalYAML handles both string and struct role refs.
|
||||
func (r *RoleRef) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
// Try string first
|
||||
var s string
|
||||
if err := unmarshal(&s); err == nil {
|
||||
r.Role = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try struct
|
||||
type rawRoleRef RoleRef
|
||||
var raw rawRoleRef
|
||||
if err := unmarshal(&raw); err != nil {
|
||||
return err
|
||||
}
|
||||
*r = RoleRef(raw)
|
||||
if r.Role == "" && r.Name != "" {
|
||||
r.Role = r.Name
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Task represents an Ansible task.
|
||||
type Task struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Module string `yaml:"-"` // Derived from the module key
|
||||
Args map[string]any `yaml:"-"` // Module arguments
|
||||
Register string `yaml:"register,omitempty"`
|
||||
When any `yaml:"when,omitempty"` // string or []string
|
||||
Loop any `yaml:"loop,omitempty"` // string or []any
|
||||
LoopControl *LoopControl `yaml:"loop_control,omitempty"`
|
||||
Vars map[string]any `yaml:"vars,omitempty"`
|
||||
Environment map[string]string `yaml:"environment,omitempty"`
|
||||
ChangedWhen any `yaml:"changed_when,omitempty"`
|
||||
FailedWhen any `yaml:"failed_when,omitempty"`
|
||||
IgnoreErrors bool `yaml:"ignore_errors,omitempty"`
|
||||
NoLog bool `yaml:"no_log,omitempty"`
|
||||
Become *bool `yaml:"become,omitempty"`
|
||||
BecomeUser string `yaml:"become_user,omitempty"`
|
||||
Delegate string `yaml:"delegate_to,omitempty"`
|
||||
RunOnce bool `yaml:"run_once,omitempty"`
|
||||
Tags []string `yaml:"tags,omitempty"`
|
||||
Block []Task `yaml:"block,omitempty"`
|
||||
Rescue []Task `yaml:"rescue,omitempty"`
|
||||
Always []Task `yaml:"always,omitempty"`
|
||||
Notify any `yaml:"notify,omitempty"` // string or []string
|
||||
Retries int `yaml:"retries,omitempty"`
|
||||
Delay int `yaml:"delay,omitempty"`
|
||||
Until string `yaml:"until,omitempty"`
|
||||
|
||||
// Include/import directives
|
||||
IncludeTasks string `yaml:"include_tasks,omitempty"`
|
||||
ImportTasks string `yaml:"import_tasks,omitempty"`
|
||||
IncludeRole *struct {
|
||||
Name string `yaml:"name"`
|
||||
TasksFrom string `yaml:"tasks_from,omitempty"`
|
||||
Vars map[string]any `yaml:"vars,omitempty"`
|
||||
} `yaml:"include_role,omitempty"`
|
||||
ImportRole *struct {
|
||||
Name string `yaml:"name"`
|
||||
TasksFrom string `yaml:"tasks_from,omitempty"`
|
||||
Vars map[string]any `yaml:"vars,omitempty"`
|
||||
} `yaml:"import_role,omitempty"`
|
||||
|
||||
// Raw YAML for module extraction
|
||||
raw map[string]any
|
||||
}
|
||||
|
||||
// LoopControl controls loop behavior.
|
||||
type LoopControl struct {
|
||||
LoopVar string `yaml:"loop_var,omitempty"`
|
||||
IndexVar string `yaml:"index_var,omitempty"`
|
||||
Label string `yaml:"label,omitempty"`
|
||||
Pause int `yaml:"pause,omitempty"`
|
||||
Extended bool `yaml:"extended,omitempty"`
|
||||
}
|
||||
|
||||
// TaskResult holds the result of executing a task.
|
||||
type TaskResult struct {
|
||||
Changed bool `json:"changed"`
|
||||
Failed bool `json:"failed"`
|
||||
Skipped bool `json:"skipped"`
|
||||
Msg string `json:"msg,omitempty"`
|
||||
Stdout string `json:"stdout,omitempty"`
|
||||
Stderr string `json:"stderr,omitempty"`
|
||||
RC int `json:"rc,omitempty"`
|
||||
Results []TaskResult `json:"results,omitempty"` // For loops
|
||||
Data map[string]any `json:"data,omitempty"` // Module-specific data
|
||||
Duration time.Duration `json:"duration,omitempty"`
|
||||
}
|
||||
|
||||
// Inventory represents Ansible inventory.
|
||||
type Inventory struct {
|
||||
All *InventoryGroup `yaml:"all"`
|
||||
}
|
||||
|
||||
// InventoryGroup represents a group in inventory.
|
||||
type InventoryGroup struct {
|
||||
Hosts map[string]*Host `yaml:"hosts,omitempty"`
|
||||
Children map[string]*InventoryGroup `yaml:"children,omitempty"`
|
||||
Vars map[string]any `yaml:"vars,omitempty"`
|
||||
}
|
||||
|
||||
// Host represents a host in inventory.
|
||||
type Host struct {
|
||||
AnsibleHost string `yaml:"ansible_host,omitempty"`
|
||||
AnsiblePort int `yaml:"ansible_port,omitempty"`
|
||||
AnsibleUser string `yaml:"ansible_user,omitempty"`
|
||||
AnsiblePassword string `yaml:"ansible_password,omitempty"`
|
||||
AnsibleSSHPrivateKeyFile string `yaml:"ansible_ssh_private_key_file,omitempty"`
|
||||
AnsibleConnection string `yaml:"ansible_connection,omitempty"`
|
||||
AnsibleBecomePassword string `yaml:"ansible_become_password,omitempty"`
|
||||
|
||||
// Custom vars
|
||||
Vars map[string]any `yaml:",inline"`
|
||||
}
|
||||
|
||||
// Facts holds gathered facts about a host.
|
||||
type Facts struct {
|
||||
Hostname string `json:"ansible_hostname"`
|
||||
FQDN string `json:"ansible_fqdn"`
|
||||
OS string `json:"ansible_os_family"`
|
||||
Distribution string `json:"ansible_distribution"`
|
||||
Version string `json:"ansible_distribution_version"`
|
||||
Architecture string `json:"ansible_architecture"`
|
||||
Kernel string `json:"ansible_kernel"`
|
||||
Memory int64 `json:"ansible_memtotal_mb"`
|
||||
CPUs int `json:"ansible_processor_vcpus"`
|
||||
IPv4 string `json:"ansible_default_ipv4_address"`
|
||||
}
|
||||
|
||||
// Known Ansible modules
|
||||
var KnownModules = []string{
|
||||
// Builtin
|
||||
"ansible.builtin.shell",
|
||||
"ansible.builtin.command",
|
||||
"ansible.builtin.raw",
|
||||
"ansible.builtin.script",
|
||||
"ansible.builtin.copy",
|
||||
"ansible.builtin.template",
|
||||
"ansible.builtin.file",
|
||||
"ansible.builtin.lineinfile",
|
||||
"ansible.builtin.blockinfile",
|
||||
"ansible.builtin.stat",
|
||||
"ansible.builtin.slurp",
|
||||
"ansible.builtin.fetch",
|
||||
"ansible.builtin.get_url",
|
||||
"ansible.builtin.uri",
|
||||
"ansible.builtin.apt",
|
||||
"ansible.builtin.apt_key",
|
||||
"ansible.builtin.apt_repository",
|
||||
"ansible.builtin.yum",
|
||||
"ansible.builtin.dnf",
|
||||
"ansible.builtin.package",
|
||||
"ansible.builtin.pip",
|
||||
"ansible.builtin.service",
|
||||
"ansible.builtin.systemd",
|
||||
"ansible.builtin.user",
|
||||
"ansible.builtin.group",
|
||||
"ansible.builtin.cron",
|
||||
"ansible.builtin.git",
|
||||
"ansible.builtin.unarchive",
|
||||
"ansible.builtin.archive",
|
||||
"ansible.builtin.debug",
|
||||
"ansible.builtin.fail",
|
||||
"ansible.builtin.assert",
|
||||
"ansible.builtin.pause",
|
||||
"ansible.builtin.wait_for",
|
||||
"ansible.builtin.set_fact",
|
||||
"ansible.builtin.include_vars",
|
||||
"ansible.builtin.add_host",
|
||||
"ansible.builtin.group_by",
|
||||
"ansible.builtin.meta",
|
||||
"ansible.builtin.setup",
|
||||
|
||||
// Short forms (legacy)
|
||||
"shell",
|
||||
"command",
|
||||
"raw",
|
||||
"script",
|
||||
"copy",
|
||||
"template",
|
||||
"file",
|
||||
"lineinfile",
|
||||
"blockinfile",
|
||||
"stat",
|
||||
"slurp",
|
||||
"fetch",
|
||||
"get_url",
|
||||
"uri",
|
||||
"apt",
|
||||
"apt_key",
|
||||
"apt_repository",
|
||||
"yum",
|
||||
"dnf",
|
||||
"package",
|
||||
"pip",
|
||||
"service",
|
||||
"systemd",
|
||||
"user",
|
||||
"group",
|
||||
"cron",
|
||||
"git",
|
||||
"unarchive",
|
||||
"archive",
|
||||
"debug",
|
||||
"fail",
|
||||
"assert",
|
||||
"pause",
|
||||
"wait_for",
|
||||
"set_fact",
|
||||
"include_vars",
|
||||
"add_host",
|
||||
"group_by",
|
||||
"meta",
|
||||
"setup",
|
||||
}
|
||||
297
build/archive.go
Normal file
297
build/archive.go
Normal file
|
|
@ -0,0 +1,297 @@
|
|||
// Package build provides project type detection and cross-compilation for the Core build system.
|
||||
package build
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Snider/Borg/pkg/compress"
|
||||
io_interface "forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// ArchiveFormat specifies the compression format for archives.
|
||||
type ArchiveFormat string
|
||||
|
||||
const (
|
||||
// ArchiveFormatGzip uses tar.gz (gzip compression) - widely compatible.
|
||||
ArchiveFormatGzip ArchiveFormat = "gz"
|
||||
// ArchiveFormatXZ uses tar.xz (xz/LZMA2 compression) - better compression ratio.
|
||||
ArchiveFormatXZ ArchiveFormat = "xz"
|
||||
// ArchiveFormatZip uses zip - for Windows.
|
||||
ArchiveFormatZip ArchiveFormat = "zip"
|
||||
)
|
||||
|
||||
// Archive creates an archive for a single artifact using gzip compression.
|
||||
// Uses tar.gz for linux/darwin and zip for windows.
|
||||
// The archive is created alongside the binary (e.g., dist/myapp_linux_amd64.tar.gz).
|
||||
// Returns a new Artifact with Path pointing to the archive.
|
||||
func Archive(fs io_interface.Medium, artifact Artifact) (Artifact, error) {
|
||||
return ArchiveWithFormat(fs, artifact, ArchiveFormatGzip)
|
||||
}
|
||||
|
||||
// ArchiveXZ creates an archive for a single artifact using xz compression.
|
||||
// Uses tar.xz for linux/darwin and zip for windows.
|
||||
// Returns a new Artifact with Path pointing to the archive.
|
||||
func ArchiveXZ(fs io_interface.Medium, artifact Artifact) (Artifact, error) {
|
||||
return ArchiveWithFormat(fs, artifact, ArchiveFormatXZ)
|
||||
}
|
||||
|
||||
// ArchiveWithFormat creates an archive for a single artifact with the specified format.
|
||||
// Uses tar.gz or tar.xz for linux/darwin and zip for windows.
|
||||
// The archive is created alongside the binary (e.g., dist/myapp_linux_amd64.tar.xz).
|
||||
// Returns a new Artifact with Path pointing to the archive.
|
||||
func ArchiveWithFormat(fs io_interface.Medium, artifact Artifact, format ArchiveFormat) (Artifact, error) {
|
||||
if artifact.Path == "" {
|
||||
return Artifact{}, fmt.Errorf("build.Archive: artifact path is empty")
|
||||
}
|
||||
|
||||
// Verify the source file exists
|
||||
info, err := fs.Stat(artifact.Path)
|
||||
if err != nil {
|
||||
return Artifact{}, fmt.Errorf("build.Archive: source file not found: %w", err)
|
||||
}
|
||||
if info.IsDir() {
|
||||
return Artifact{}, fmt.Errorf("build.Archive: source path is a directory, expected file")
|
||||
}
|
||||
|
||||
// Determine archive type based on OS and format
|
||||
var archivePath string
|
||||
var archiveFunc func(fs io_interface.Medium, src, dst string) error
|
||||
|
||||
if artifact.OS == "windows" {
|
||||
archivePath = archiveFilename(artifact, ".zip")
|
||||
archiveFunc = createZipArchive
|
||||
} else {
|
||||
switch format {
|
||||
case ArchiveFormatXZ:
|
||||
archivePath = archiveFilename(artifact, ".tar.xz")
|
||||
archiveFunc = createTarXzArchive
|
||||
default:
|
||||
archivePath = archiveFilename(artifact, ".tar.gz")
|
||||
archiveFunc = createTarGzArchive
|
||||
}
|
||||
}
|
||||
|
||||
// Create the archive
|
||||
if err := archiveFunc(fs, artifact.Path, archivePath); err != nil {
|
||||
return Artifact{}, fmt.Errorf("build.Archive: failed to create archive: %w", err)
|
||||
}
|
||||
|
||||
return Artifact{
|
||||
Path: archivePath,
|
||||
OS: artifact.OS,
|
||||
Arch: artifact.Arch,
|
||||
Checksum: artifact.Checksum,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ArchiveAll archives all artifacts using gzip compression.
|
||||
// Returns a slice of new artifacts pointing to the archives.
|
||||
func ArchiveAll(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) {
|
||||
return ArchiveAllWithFormat(fs, artifacts, ArchiveFormatGzip)
|
||||
}
|
||||
|
||||
// ArchiveAllXZ archives all artifacts using xz compression.
|
||||
// Returns a slice of new artifacts pointing to the archives.
|
||||
func ArchiveAllXZ(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) {
|
||||
return ArchiveAllWithFormat(fs, artifacts, ArchiveFormatXZ)
|
||||
}
|
||||
|
||||
// ArchiveAllWithFormat archives all artifacts with the specified format.
|
||||
// Returns a slice of new artifacts pointing to the archives.
|
||||
func ArchiveAllWithFormat(fs io_interface.Medium, artifacts []Artifact, format ArchiveFormat) ([]Artifact, error) {
|
||||
if len(artifacts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var archived []Artifact
|
||||
for _, artifact := range artifacts {
|
||||
arch, err := ArchiveWithFormat(fs, artifact, format)
|
||||
if err != nil {
|
||||
return archived, fmt.Errorf("build.ArchiveAll: failed to archive %s: %w", artifact.Path, err)
|
||||
}
|
||||
archived = append(archived, arch)
|
||||
}
|
||||
|
||||
return archived, nil
|
||||
}
|
||||
|
||||
// archiveFilename generates the archive filename based on the artifact and extension.
|
||||
// Format: dist/myapp_linux_amd64.tar.gz (binary name taken from artifact path).
|
||||
func archiveFilename(artifact Artifact, ext string) string {
|
||||
// Get the directory containing the binary (e.g., dist/linux_amd64)
|
||||
dir := filepath.Dir(artifact.Path)
|
||||
// Go up one level to the output directory (e.g., dist)
|
||||
outputDir := filepath.Dir(dir)
|
||||
|
||||
// Get the binary name without extension
|
||||
binaryName := filepath.Base(artifact.Path)
|
||||
binaryName = strings.TrimSuffix(binaryName, ".exe")
|
||||
|
||||
// Construct archive name: myapp_linux_amd64.tar.gz
|
||||
archiveName := fmt.Sprintf("%s_%s_%s%s", binaryName, artifact.OS, artifact.Arch, ext)
|
||||
|
||||
return filepath.Join(outputDir, archiveName)
|
||||
}
|
||||
|
||||
// createTarXzArchive creates a tar.xz archive containing a single file.
|
||||
// Uses Borg's compress package for xz compression.
|
||||
func createTarXzArchive(fs io_interface.Medium, src, dst string) error {
|
||||
// Open the source file
|
||||
srcFile, err := fs.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() { _ = srcFile.Close() }()
|
||||
|
||||
srcInfo, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat source file: %w", err)
|
||||
}
|
||||
|
||||
// Create tar archive in memory
|
||||
var tarBuf bytes.Buffer
|
||||
tarWriter := tar.NewWriter(&tarBuf)
|
||||
|
||||
// Create tar header
|
||||
header, err := tar.FileInfoHeader(srcInfo, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tar header: %w", err)
|
||||
}
|
||||
header.Name = filepath.Base(src)
|
||||
|
||||
if err := tarWriter.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("failed to write tar header: %w", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tarWriter, srcFile); err != nil {
|
||||
return fmt.Errorf("failed to write file content to tar: %w", err)
|
||||
}
|
||||
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close tar writer: %w", err)
|
||||
}
|
||||
|
||||
// Compress with xz using Borg
|
||||
xzData, err := compress.Compress(tarBuf.Bytes(), "xz")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compress with xz: %w", err)
|
||||
}
|
||||
|
||||
// Write to destination file
|
||||
dstFile, err := fs.Create(dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create archive file: %w", err)
|
||||
}
|
||||
defer func() { _ = dstFile.Close() }()
|
||||
|
||||
if _, err := dstFile.Write(xzData); err != nil {
|
||||
return fmt.Errorf("failed to write archive file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createTarGzArchive creates a tar.gz archive containing a single file.
|
||||
func createTarGzArchive(fs io_interface.Medium, src, dst string) error {
|
||||
// Open the source file
|
||||
srcFile, err := fs.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() { _ = srcFile.Close() }()
|
||||
|
||||
srcInfo, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat source file: %w", err)
|
||||
}
|
||||
|
||||
// Create the destination file
|
||||
dstFile, err := fs.Create(dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create archive file: %w", err)
|
||||
}
|
||||
defer func() { _ = dstFile.Close() }()
|
||||
|
||||
// Create gzip writer
|
||||
gzWriter := gzip.NewWriter(dstFile)
|
||||
defer func() { _ = gzWriter.Close() }()
|
||||
|
||||
// Create tar writer
|
||||
tarWriter := tar.NewWriter(gzWriter)
|
||||
defer func() { _ = tarWriter.Close() }()
|
||||
|
||||
// Create tar header
|
||||
header, err := tar.FileInfoHeader(srcInfo, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tar header: %w", err)
|
||||
}
|
||||
// Use just the filename, not the full path
|
||||
header.Name = filepath.Base(src)
|
||||
|
||||
// Write header
|
||||
if err := tarWriter.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("failed to write tar header: %w", err)
|
||||
}
|
||||
|
||||
// Write file content
|
||||
if _, err := io.Copy(tarWriter, srcFile); err != nil {
|
||||
return fmt.Errorf("failed to write file content to tar: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createZipArchive creates a zip archive containing a single file.
|
||||
func createZipArchive(fs io_interface.Medium, src, dst string) error {
|
||||
// Open the source file
|
||||
srcFile, err := fs.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() { _ = srcFile.Close() }()
|
||||
|
||||
srcInfo, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat source file: %w", err)
|
||||
}
|
||||
|
||||
// Create the destination file
|
||||
dstFile, err := fs.Create(dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create archive file: %w", err)
|
||||
}
|
||||
defer func() { _ = dstFile.Close() }()
|
||||
|
||||
// Create zip writer
|
||||
zipWriter := zip.NewWriter(dstFile)
|
||||
defer func() { _ = zipWriter.Close() }()
|
||||
|
||||
// Create zip header
|
||||
header, err := zip.FileInfoHeader(srcInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create zip header: %w", err)
|
||||
}
|
||||
// Use just the filename, not the full path
|
||||
header.Name = filepath.Base(src)
|
||||
header.Method = zip.Deflate
|
||||
|
||||
// Create file in archive
|
||||
writer, err := zipWriter.CreateHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create zip entry: %w", err)
|
||||
}
|
||||
|
||||
// Write file content
|
||||
if _, err := io.Copy(writer, srcFile); err != nil {
|
||||
return fmt.Errorf("failed to write file content to zip: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
397
build/archive_test.go
Normal file
397
build/archive_test.go
Normal file
|
|
@ -0,0 +1,397 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/Snider/Borg/pkg/compress"
|
||||
io_interface "forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupArchiveTestFile creates a test binary file in a temp directory with the standard structure.
|
||||
// Returns the path to the binary and the output directory.
|
||||
func setupArchiveTestFile(t *testing.T, name, os_, arch string) (binaryPath string, outputDir string) {
|
||||
t.Helper()
|
||||
|
||||
outputDir = t.TempDir()
|
||||
|
||||
// Create platform directory: dist/os_arch
|
||||
platformDir := filepath.Join(outputDir, os_+"_"+arch)
|
||||
err := os.MkdirAll(platformDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test binary
|
||||
binaryPath = filepath.Join(platformDir, name)
|
||||
content := []byte("#!/bin/bash\necho 'Hello, World!'\n")
|
||||
err = os.WriteFile(binaryPath, content, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
return binaryPath, outputDir
|
||||
}
|
||||
|
||||
func TestArchive_Good(t *testing.T) {
|
||||
fs := io_interface.Local
|
||||
t.Run("creates tar.gz for linux", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify archive was created
|
||||
expectedPath := filepath.Join(outputDir, "myapp_linux_amd64.tar.gz")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
// Verify OS and Arch are preserved
|
||||
assert.Equal(t, "linux", result.OS)
|
||||
assert.Equal(t, "amd64", result.Arch)
|
||||
|
||||
// Verify archive content
|
||||
verifyTarGzContent(t, result.Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("creates tar.gz for darwin", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "darwin", "arm64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "darwin",
|
||||
Arch: "arm64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPath := filepath.Join(outputDir, "myapp_darwin_arm64.tar.gz")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyTarGzContent(t, result.Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("creates zip for windows", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp.exe", "windows", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "windows",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Windows archives should strip .exe from archive name
|
||||
expectedPath := filepath.Join(outputDir, "myapp_windows_amd64.zip")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyZipContent(t, result.Path, "myapp.exe")
|
||||
})
|
||||
|
||||
t.Run("preserves checksum field", func(t *testing.T) {
|
||||
binaryPath, _ := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
Checksum: "abc123",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "abc123", result.Checksum)
|
||||
})
|
||||
|
||||
t.Run("creates tar.xz for linux with ArchiveXZ", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := ArchiveXZ(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPath := filepath.Join(outputDir, "myapp_linux_amd64.tar.xz")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyTarXzContent(t, result.Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("creates tar.xz for darwin with ArchiveWithFormat", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "darwin", "arm64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "darwin",
|
||||
Arch: "arm64",
|
||||
}
|
||||
|
||||
result, err := ArchiveWithFormat(fs, artifact, ArchiveFormatXZ)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPath := filepath.Join(outputDir, "myapp_darwin_arm64.tar.xz")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyTarXzContent(t, result.Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("windows still uses zip even with xz format", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp.exe", "windows", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "windows",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := ArchiveWithFormat(fs, artifact, ArchiveFormatXZ)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Windows should still get .zip regardless of format
|
||||
expectedPath := filepath.Join(outputDir, "myapp_windows_amd64.zip")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyZipContent(t, result.Path, "myapp.exe")
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchive_Bad(t *testing.T) {
|
||||
fs := io_interface.Local
|
||||
t.Run("returns error for empty path", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "artifact path is empty")
|
||||
assert.Empty(t, result.Path)
|
||||
})
|
||||
|
||||
t.Run("returns error for non-existent file", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/nonexistent/path/binary",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "source file not found")
|
||||
assert.Empty(t, result.Path)
|
||||
})
|
||||
|
||||
t.Run("returns error for directory path", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
artifact := Artifact{
|
||||
Path: dir,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "source path is a directory")
|
||||
assert.Empty(t, result.Path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveAll_Good(t *testing.T) {
|
||||
fs := io_interface.Local
|
||||
t.Run("archives multiple artifacts", func(t *testing.T) {
|
||||
outputDir := t.TempDir()
|
||||
|
||||
// Create multiple binaries
|
||||
var artifacts []Artifact
|
||||
targets := []struct {
|
||||
os_ string
|
||||
arch string
|
||||
}{
|
||||
{"linux", "amd64"},
|
||||
{"linux", "arm64"},
|
||||
{"darwin", "arm64"},
|
||||
{"windows", "amd64"},
|
||||
}
|
||||
|
||||
for _, target := range targets {
|
||||
platformDir := filepath.Join(outputDir, target.os_+"_"+target.arch)
|
||||
err := os.MkdirAll(platformDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
name := "myapp"
|
||||
if target.os_ == "windows" {
|
||||
name = "myapp.exe"
|
||||
}
|
||||
|
||||
binaryPath := filepath.Join(platformDir, name)
|
||||
err = os.WriteFile(binaryPath, []byte("binary content"), 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
artifacts = append(artifacts, Artifact{
|
||||
Path: binaryPath,
|
||||
OS: target.os_,
|
||||
Arch: target.arch,
|
||||
})
|
||||
}
|
||||
|
||||
results, err := ArchiveAll(fs, artifacts)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 4)
|
||||
|
||||
// Verify all archives were created
|
||||
for i, result := range results {
|
||||
assert.FileExists(t, result.Path)
|
||||
assert.Equal(t, artifacts[i].OS, result.OS)
|
||||
assert.Equal(t, artifacts[i].Arch, result.Arch)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("returns nil for empty slice", func(t *testing.T) {
|
||||
results, err := ArchiveAll(fs, []Artifact{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, results)
|
||||
})
|
||||
|
||||
t.Run("returns nil for nil slice", func(t *testing.T) {
|
||||
results, err := ArchiveAll(fs, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, results)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveAll_Bad(t *testing.T) {
|
||||
fs := io_interface.Local
|
||||
t.Run("returns partial results on error", func(t *testing.T) {
|
||||
binaryPath, _ := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: binaryPath, OS: "linux", Arch: "amd64"},
|
||||
{Path: "/nonexistent/binary", OS: "linux", Arch: "arm64"}, // This will fail
|
||||
}
|
||||
|
||||
results, err := ArchiveAll(fs, artifacts)
|
||||
assert.Error(t, err)
|
||||
// Should have the first successful result
|
||||
assert.Len(t, results, 1)
|
||||
assert.FileExists(t, results[0].Path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveFilename_Good(t *testing.T) {
|
||||
t.Run("generates correct tar.gz filename", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/output/linux_amd64/myapp",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
filename := archiveFilename(artifact, ".tar.gz")
|
||||
assert.Equal(t, "/output/myapp_linux_amd64.tar.gz", filename)
|
||||
})
|
||||
|
||||
t.Run("generates correct zip filename", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/output/windows_amd64/myapp.exe",
|
||||
OS: "windows",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
filename := archiveFilename(artifact, ".zip")
|
||||
assert.Equal(t, "/output/myapp_windows_amd64.zip", filename)
|
||||
})
|
||||
|
||||
t.Run("handles nested output directories", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/project/dist/linux_arm64/cli",
|
||||
OS: "linux",
|
||||
Arch: "arm64",
|
||||
}
|
||||
|
||||
filename := archiveFilename(artifact, ".tar.gz")
|
||||
assert.Equal(t, "/project/dist/cli_linux_arm64.tar.gz", filename)
|
||||
})
|
||||
}
|
||||
|
||||
// verifyTarGzContent opens a tar.gz file and verifies it contains the expected file.
|
||||
func verifyTarGzContent(t *testing.T, archivePath, expectedName string) {
|
||||
t.Helper()
|
||||
|
||||
file, err := os.Open(archivePath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
gzReader, err := gzip.NewReader(file)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = gzReader.Close() }()
|
||||
|
||||
tarReader := tar.NewReader(gzReader)
|
||||
|
||||
header, err := tarReader.Next()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedName, header.Name)
|
||||
|
||||
// Verify there's only one file
|
||||
_, err = tarReader.Next()
|
||||
assert.Equal(t, io.EOF, err)
|
||||
}
|
||||
|
||||
// verifyZipContent opens a zip file and verifies it contains the expected file.
|
||||
func verifyZipContent(t *testing.T, archivePath, expectedName string) {
|
||||
t.Helper()
|
||||
|
||||
reader, err := zip.OpenReader(archivePath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
require.Len(t, reader.File, 1)
|
||||
assert.Equal(t, expectedName, reader.File[0].Name)
|
||||
}
|
||||
|
||||
// verifyTarXzContent opens a tar.xz file and verifies it contains the expected file.
|
||||
func verifyTarXzContent(t *testing.T, archivePath, expectedName string) {
|
||||
t.Helper()
|
||||
|
||||
// Read the xz-compressed file
|
||||
xzData, err := os.ReadFile(archivePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Decompress with Borg
|
||||
tarData, err := compress.Decompress(xzData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read tar archive
|
||||
tarReader := tar.NewReader(bytes.NewReader(tarData))
|
||||
|
||||
header, err := tarReader.Next()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedName, header.Name)
|
||||
|
||||
// Verify there's only one file
|
||||
_, err = tarReader.Next()
|
||||
assert.Equal(t, io.EOF, err)
|
||||
}
|
||||
90
build/build.go
Normal file
90
build/build.go
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
// Package build provides project type detection and cross-compilation for the Core build system.
|
||||
// It supports Go, Wails, Node.js, and PHP projects with automatic detection based on
|
||||
// marker files (go.mod, wails.json, package.json, composer.json).
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// ProjectType represents a detected project type.
|
||||
type ProjectType string
|
||||
|
||||
// Project type constants for build detection.
|
||||
const (
|
||||
// ProjectTypeGo indicates a standard Go project with go.mod.
|
||||
ProjectTypeGo ProjectType = "go"
|
||||
// ProjectTypeWails indicates a Wails desktop application.
|
||||
ProjectTypeWails ProjectType = "wails"
|
||||
// ProjectTypeNode indicates a Node.js project with package.json.
|
||||
ProjectTypeNode ProjectType = "node"
|
||||
// ProjectTypePHP indicates a PHP/Laravel project with composer.json.
|
||||
ProjectTypePHP ProjectType = "php"
|
||||
// ProjectTypeCPP indicates a C++ project with CMakeLists.txt.
|
||||
ProjectTypeCPP ProjectType = "cpp"
|
||||
// ProjectTypeDocker indicates a Docker-based project with Dockerfile.
|
||||
ProjectTypeDocker ProjectType = "docker"
|
||||
// ProjectTypeLinuxKit indicates a LinuxKit VM configuration.
|
||||
ProjectTypeLinuxKit ProjectType = "linuxkit"
|
||||
// ProjectTypeTaskfile indicates a project using Taskfile automation.
|
||||
ProjectTypeTaskfile ProjectType = "taskfile"
|
||||
)
|
||||
|
||||
// Target represents a build target platform.
|
||||
type Target struct {
|
||||
OS string
|
||||
Arch string
|
||||
}
|
||||
|
||||
// String returns the target in GOOS/GOARCH format.
|
||||
func (t Target) String() string {
|
||||
return t.OS + "/" + t.Arch
|
||||
}
|
||||
|
||||
// Artifact represents a build output file.
|
||||
type Artifact struct {
|
||||
Path string
|
||||
OS string
|
||||
Arch string
|
||||
Checksum string
|
||||
}
|
||||
|
||||
// Config holds build configuration.
|
||||
type Config struct {
|
||||
// FS is the medium used for file operations.
|
||||
FS io.Medium
|
||||
// ProjectDir is the root directory of the project.
|
||||
ProjectDir string
|
||||
// OutputDir is where build artifacts are placed.
|
||||
OutputDir string
|
||||
// Name is the output binary name.
|
||||
Name string
|
||||
// Version is the build version string.
|
||||
Version string
|
||||
// LDFlags are additional linker flags.
|
||||
LDFlags []string
|
||||
|
||||
// Docker-specific config
|
||||
Dockerfile string // Path to Dockerfile (default: Dockerfile)
|
||||
Registry string // Container registry (default: ghcr.io)
|
||||
Image string // Image name (owner/repo format)
|
||||
Tags []string // Additional tags to apply
|
||||
BuildArgs map[string]string // Docker build arguments
|
||||
Push bool // Whether to push after build
|
||||
|
||||
// LinuxKit-specific config
|
||||
LinuxKitConfig string // Path to LinuxKit YAML config
|
||||
Formats []string // Output formats (iso, qcow2, raw, vmdk)
|
||||
}
|
||||
|
||||
// Builder defines the interface for project-specific build implementations.
|
||||
type Builder interface {
|
||||
// Name returns the builder's identifier.
|
||||
Name() string
|
||||
// Detect checks if this builder can handle the project in the given directory.
|
||||
Detect(fs io.Medium, dir string) (bool, error)
|
||||
// Build compiles the project for the specified targets.
|
||||
Build(ctx context.Context, cfg *Config, targets []Target) ([]Artifact, error)
|
||||
}
|
||||
144
build/buildcmd/cmd_build.go
Normal file
144
build/buildcmd/cmd_build.go
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
// Package buildcmd provides project build commands with auto-detection.
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"embed"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go/pkg/i18n"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddBuildCommands)
|
||||
}
|
||||
|
||||
// Style aliases from shared package
|
||||
var (
|
||||
buildHeaderStyle = cli.TitleStyle
|
||||
buildTargetStyle = cli.ValueStyle
|
||||
buildSuccessStyle = cli.SuccessStyle
|
||||
buildErrorStyle = cli.ErrorStyle
|
||||
buildDimStyle = cli.DimStyle
|
||||
)
|
||||
|
||||
//go:embed all:tmpl/gui
|
||||
var guiTemplate embed.FS
|
||||
|
||||
// Flags for the main build command
|
||||
var (
|
||||
buildType string
|
||||
ciMode bool
|
||||
targets string
|
||||
outputDir string
|
||||
doArchive bool
|
||||
doChecksum bool
|
||||
verbose bool
|
||||
|
||||
// Docker/LinuxKit specific flags
|
||||
configPath string
|
||||
format string
|
||||
push bool
|
||||
imageName string
|
||||
|
||||
// Signing flags
|
||||
noSign bool
|
||||
notarize bool
|
||||
|
||||
// from-path subcommand
|
||||
fromPath string
|
||||
|
||||
// pwa subcommand
|
||||
pwaURL string
|
||||
|
||||
// sdk subcommand
|
||||
sdkSpec string
|
||||
sdkLang string
|
||||
sdkVersion string
|
||||
sdkDryRun bool
|
||||
)
|
||||
|
||||
var buildCmd = &cobra.Command{
|
||||
Use: "build",
|
||||
Short: i18n.T("cmd.build.short"),
|
||||
Long: i18n.T("cmd.build.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runProjectBuild(cmd.Context(), buildType, ciMode, targets, outputDir, doArchive, doChecksum, configPath, format, push, imageName, noSign, notarize, verbose)
|
||||
},
|
||||
}
|
||||
|
||||
var fromPathCmd = &cobra.Command{
|
||||
Use: "from-path",
|
||||
Short: i18n.T("cmd.build.from_path.short"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if fromPath == "" {
|
||||
return errPathRequired
|
||||
}
|
||||
return runBuild(fromPath)
|
||||
},
|
||||
}
|
||||
|
||||
var pwaCmd = &cobra.Command{
|
||||
Use: "pwa",
|
||||
Short: i18n.T("cmd.build.pwa.short"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if pwaURL == "" {
|
||||
return errURLRequired
|
||||
}
|
||||
return runPwaBuild(pwaURL)
|
||||
},
|
||||
}
|
||||
|
||||
var sdkBuildCmd = &cobra.Command{
|
||||
Use: "sdk",
|
||||
Short: i18n.T("cmd.build.sdk.short"),
|
||||
Long: i18n.T("cmd.build.sdk.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runBuildSDK(sdkSpec, sdkLang, sdkVersion, sdkDryRun)
|
||||
},
|
||||
}
|
||||
|
||||
func initBuildFlags() {
|
||||
// Main build command flags
|
||||
buildCmd.Flags().StringVar(&buildType, "type", "", i18n.T("cmd.build.flag.type"))
|
||||
buildCmd.Flags().BoolVar(&ciMode, "ci", false, i18n.T("cmd.build.flag.ci"))
|
||||
buildCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, i18n.T("common.flag.verbose"))
|
||||
buildCmd.Flags().StringVar(&targets, "targets", "", i18n.T("cmd.build.flag.targets"))
|
||||
buildCmd.Flags().StringVar(&outputDir, "output", "", i18n.T("cmd.build.flag.output"))
|
||||
buildCmd.Flags().BoolVar(&doArchive, "archive", true, i18n.T("cmd.build.flag.archive"))
|
||||
buildCmd.Flags().BoolVar(&doChecksum, "checksum", true, i18n.T("cmd.build.flag.checksum"))
|
||||
|
||||
// Docker/LinuxKit specific
|
||||
buildCmd.Flags().StringVar(&configPath, "config", "", i18n.T("cmd.build.flag.config"))
|
||||
buildCmd.Flags().StringVar(&format, "format", "", i18n.T("cmd.build.flag.format"))
|
||||
buildCmd.Flags().BoolVar(&push, "push", false, i18n.T("cmd.build.flag.push"))
|
||||
buildCmd.Flags().StringVar(&imageName, "image", "", i18n.T("cmd.build.flag.image"))
|
||||
|
||||
// Signing flags
|
||||
buildCmd.Flags().BoolVar(&noSign, "no-sign", false, i18n.T("cmd.build.flag.no_sign"))
|
||||
buildCmd.Flags().BoolVar(¬arize, "notarize", false, i18n.T("cmd.build.flag.notarize"))
|
||||
|
||||
// from-path subcommand flags
|
||||
fromPathCmd.Flags().StringVar(&fromPath, "path", "", i18n.T("cmd.build.from_path.flag.path"))
|
||||
|
||||
// pwa subcommand flags
|
||||
pwaCmd.Flags().StringVar(&pwaURL, "url", "", i18n.T("cmd.build.pwa.flag.url"))
|
||||
|
||||
// sdk subcommand flags
|
||||
sdkBuildCmd.Flags().StringVar(&sdkSpec, "spec", "", i18n.T("common.flag.spec"))
|
||||
sdkBuildCmd.Flags().StringVar(&sdkLang, "lang", "", i18n.T("cmd.build.sdk.flag.lang"))
|
||||
sdkBuildCmd.Flags().StringVar(&sdkVersion, "version", "", i18n.T("cmd.build.sdk.flag.version"))
|
||||
sdkBuildCmd.Flags().BoolVar(&sdkDryRun, "dry-run", false, i18n.T("cmd.build.sdk.flag.dry_run"))
|
||||
|
||||
// Add subcommands
|
||||
buildCmd.AddCommand(fromPathCmd)
|
||||
buildCmd.AddCommand(pwaCmd)
|
||||
buildCmd.AddCommand(sdkBuildCmd)
|
||||
}
|
||||
|
||||
// AddBuildCommands registers the 'build' command and all subcommands.
|
||||
func AddBuildCommands(root *cobra.Command) {
|
||||
initBuildFlags()
|
||||
AddReleaseCommand(buildCmd)
|
||||
root.AddCommand(buildCmd)
|
||||
}
|
||||
21
build/buildcmd/cmd_commands.go
Normal file
21
build/buildcmd/cmd_commands.go
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
// Package buildcmd provides project build commands with auto-detection.
|
||||
//
|
||||
// Supports building:
|
||||
// - Go projects (standard and cross-compilation)
|
||||
// - Wails desktop applications
|
||||
// - Docker images
|
||||
// - LinuxKit VM images
|
||||
// - Taskfile-based projects
|
||||
//
|
||||
// Configuration via .core/build.yaml or command-line flags.
|
||||
//
|
||||
// Subcommands:
|
||||
// - build: Auto-detect and build the current project
|
||||
// - build from-path: Build from a local static web app directory
|
||||
// - build pwa: Build from a live PWA URL
|
||||
// - build sdk: Generate API SDKs from OpenAPI spec
|
||||
package buildcmd
|
||||
|
||||
// Note: The AddBuildCommands function is defined in cmd_build.go
|
||||
// This file exists for documentation purposes and maintains the original
|
||||
// package documentation from commands.go.
|
||||
392
build/buildcmd/cmd_project.go
Normal file
392
build/buildcmd/cmd_project.go
Normal file
|
|
@ -0,0 +1,392 @@
|
|||
// cmd_project.go implements the main project build logic.
|
||||
//
|
||||
// This handles auto-detection of project types (Go, Wails, Docker, LinuxKit, Taskfile)
|
||||
// and orchestrates the build process including signing, archiving, and checksums.
|
||||
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-devops/build/builders"
|
||||
"forge.lthn.ai/core/go-devops/build/signing"
|
||||
"forge.lthn.ai/core/go/pkg/i18n"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// runProjectBuild handles the main `core build` command with auto-detection.
|
||||
func runProjectBuild(ctx context.Context, buildType string, ciMode bool, targetsFlag string, outputDir string, doArchive bool, doChecksum bool, configPath string, format string, push bool, imageName string, noSign bool, notarize bool, verbose bool) error {
|
||||
// Use local filesystem as the default medium
|
||||
fs := io.Local
|
||||
|
||||
// Get current working directory as project root
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "get working directory"}), err)
|
||||
}
|
||||
|
||||
// Load configuration from .core/build.yaml (or defaults)
|
||||
buildCfg, err := build.LoadConfig(fs, projectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "load config"}), err)
|
||||
}
|
||||
|
||||
// Detect project type if not specified
|
||||
var projectType build.ProjectType
|
||||
if buildType != "" {
|
||||
projectType = build.ProjectType(buildType)
|
||||
} else {
|
||||
projectType, err = build.PrimaryType(fs, projectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "detect project type"}), err)
|
||||
}
|
||||
if projectType == "" {
|
||||
return fmt.Errorf("%s", i18n.T("cmd.build.error.no_project_type", map[string]interface{}{"Dir": projectDir}))
|
||||
}
|
||||
}
|
||||
|
||||
// Determine targets
|
||||
var buildTargets []build.Target
|
||||
if targetsFlag != "" {
|
||||
// Parse from command line
|
||||
buildTargets, err = parseTargets(targetsFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(buildCfg.Targets) > 0 {
|
||||
// Use config targets
|
||||
buildTargets = buildCfg.ToTargets()
|
||||
} else {
|
||||
// Fall back to current OS/arch
|
||||
buildTargets = []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
}
|
||||
|
||||
// Determine output directory
|
||||
if outputDir == "" {
|
||||
outputDir = "dist"
|
||||
}
|
||||
if !filepath.IsAbs(outputDir) {
|
||||
outputDir = filepath.Join(projectDir, outputDir)
|
||||
}
|
||||
outputDir = filepath.Clean(outputDir)
|
||||
|
||||
// Ensure config path is absolute if provided
|
||||
if configPath != "" && !filepath.IsAbs(configPath) {
|
||||
configPath = filepath.Join(projectDir, configPath)
|
||||
}
|
||||
|
||||
// Determine binary name
|
||||
binaryName := buildCfg.Project.Binary
|
||||
if binaryName == "" {
|
||||
binaryName = buildCfg.Project.Name
|
||||
}
|
||||
if binaryName == "" {
|
||||
binaryName = filepath.Base(projectDir)
|
||||
}
|
||||
|
||||
// Print build info (verbose mode only)
|
||||
if verbose && !ciMode {
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.build")), i18n.T("cmd.build.building_project"))
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.type"), buildTargetStyle.Render(string(projectType)))
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.output"), buildTargetStyle.Render(outputDir))
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.binary"), buildTargetStyle.Render(binaryName))
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.targets"), buildTargetStyle.Render(formatTargets(buildTargets)))
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Get the appropriate builder
|
||||
builder, err := getBuilder(projectType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create build config for the builder
|
||||
cfg := &build.Config{
|
||||
FS: fs,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: binaryName,
|
||||
Version: buildCfg.Project.Name, // Could be enhanced with git describe
|
||||
LDFlags: buildCfg.Build.LDFlags,
|
||||
// Docker/LinuxKit specific
|
||||
Dockerfile: configPath, // Reuse for Dockerfile path
|
||||
LinuxKitConfig: configPath,
|
||||
Push: push,
|
||||
Image: imageName,
|
||||
}
|
||||
|
||||
// Parse formats for LinuxKit
|
||||
if format != "" {
|
||||
cfg.Formats = strings.Split(format, ",")
|
||||
}
|
||||
|
||||
// Execute build
|
||||
artifacts, err := builder.Build(ctx, cfg, buildTargets)
|
||||
if err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if verbose && !ciMode {
|
||||
fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("common.label.success")), i18n.T("cmd.build.built_artifacts", map[string]interface{}{"Count": len(artifacts)}))
|
||||
fmt.Println()
|
||||
for _, artifact := range artifacts {
|
||||
relPath, err := filepath.Rel(projectDir, artifact.Path)
|
||||
if err != nil {
|
||||
relPath = artifact.Path
|
||||
}
|
||||
fmt.Printf(" %s %s %s\n",
|
||||
buildSuccessStyle.Render("*"),
|
||||
buildTargetStyle.Render(relPath),
|
||||
buildDimStyle.Render(fmt.Sprintf("(%s/%s)", artifact.OS, artifact.Arch)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Sign macOS binaries if enabled
|
||||
signCfg := buildCfg.Sign
|
||||
if notarize {
|
||||
signCfg.MacOS.Notarize = true
|
||||
}
|
||||
if noSign {
|
||||
signCfg.Enabled = false
|
||||
}
|
||||
|
||||
if signCfg.Enabled && runtime.GOOS == "darwin" {
|
||||
if verbose && !ciMode {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.sign")), i18n.T("cmd.build.signing_binaries"))
|
||||
}
|
||||
|
||||
// Convert build.Artifact to signing.Artifact
|
||||
signingArtifacts := make([]signing.Artifact, len(artifacts))
|
||||
for i, a := range artifacts {
|
||||
signingArtifacts[i] = signing.Artifact{Path: a.Path, OS: a.OS, Arch: a.Arch}
|
||||
}
|
||||
|
||||
if err := signing.SignBinaries(ctx, fs, signCfg, signingArtifacts); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.signing_failed"), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if signCfg.MacOS.Notarize {
|
||||
if err := signing.NotarizeBinaries(ctx, fs, signCfg, signingArtifacts); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.notarization_failed"), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Archive artifacts if enabled
|
||||
var archivedArtifacts []build.Artifact
|
||||
if doArchive && len(artifacts) > 0 {
|
||||
if verbose && !ciMode {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.archive")), i18n.T("cmd.build.creating_archives"))
|
||||
}
|
||||
|
||||
archivedArtifacts, err = build.ArchiveAll(fs, artifacts)
|
||||
if err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.archive_failed"), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if verbose && !ciMode {
|
||||
for _, artifact := range archivedArtifacts {
|
||||
relPath, err := filepath.Rel(projectDir, artifact.Path)
|
||||
if err != nil {
|
||||
relPath = artifact.Path
|
||||
}
|
||||
fmt.Printf(" %s %s %s\n",
|
||||
buildSuccessStyle.Render("*"),
|
||||
buildTargetStyle.Render(relPath),
|
||||
buildDimStyle.Render(fmt.Sprintf("(%s/%s)", artifact.OS, artifact.Arch)),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute checksums if enabled
|
||||
var checksummedArtifacts []build.Artifact
|
||||
if doChecksum && len(archivedArtifacts) > 0 {
|
||||
checksummedArtifacts, err = computeAndWriteChecksums(ctx, projectDir, outputDir, archivedArtifacts, signCfg, ciMode, verbose)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if doChecksum && len(artifacts) > 0 && !doArchive {
|
||||
// Checksum raw binaries if archiving is disabled
|
||||
checksummedArtifacts, err = computeAndWriteChecksums(ctx, projectDir, outputDir, artifacts, signCfg, ciMode, verbose)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Output results
|
||||
if ciMode {
|
||||
// Determine which artifacts to output (prefer checksummed > archived > raw)
|
||||
var outputArtifacts []build.Artifact
|
||||
if len(checksummedArtifacts) > 0 {
|
||||
outputArtifacts = checksummedArtifacts
|
||||
} else if len(archivedArtifacts) > 0 {
|
||||
outputArtifacts = archivedArtifacts
|
||||
} else {
|
||||
outputArtifacts = artifacts
|
||||
}
|
||||
|
||||
// JSON output for CI
|
||||
output, err := json.MarshalIndent(outputArtifacts, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "marshal artifacts"}), err)
|
||||
}
|
||||
fmt.Println(string(output))
|
||||
} else if !verbose {
|
||||
// Minimal output: just success with artifact count
|
||||
fmt.Printf("%s %s %s\n",
|
||||
buildSuccessStyle.Render(i18n.T("common.label.success")),
|
||||
i18n.T("cmd.build.built_artifacts", map[string]interface{}{"Count": len(artifacts)}),
|
||||
buildDimStyle.Render(fmt.Sprintf("(%s)", outputDir)),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// computeAndWriteChecksums computes checksums for artifacts and writes CHECKSUMS.txt.
|
||||
func computeAndWriteChecksums(ctx context.Context, projectDir, outputDir string, artifacts []build.Artifact, signCfg signing.SignConfig, ciMode bool, verbose bool) ([]build.Artifact, error) {
|
||||
fs := io.Local
|
||||
if verbose && !ciMode {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.checksum")), i18n.T("cmd.build.computing_checksums"))
|
||||
}
|
||||
|
||||
checksummedArtifacts, err := build.ChecksumAll(fs, artifacts)
|
||||
if err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.checksum_failed"), err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Write CHECKSUMS.txt
|
||||
checksumPath := filepath.Join(outputDir, "CHECKSUMS.txt")
|
||||
if err := build.WriteChecksumFile(fs, checksummedArtifacts, checksumPath); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("common.error.failed", map[string]any{"Action": "write CHECKSUMS.txt"}), err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sign checksums with GPG
|
||||
if signCfg.Enabled {
|
||||
if err := signing.SignChecksums(ctx, fs, signCfg, checksumPath); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.gpg_signing_failed"), err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if verbose && !ciMode {
|
||||
for _, artifact := range checksummedArtifacts {
|
||||
relPath, err := filepath.Rel(projectDir, artifact.Path)
|
||||
if err != nil {
|
||||
relPath = artifact.Path
|
||||
}
|
||||
fmt.Printf(" %s %s\n",
|
||||
buildSuccessStyle.Render("*"),
|
||||
buildTargetStyle.Render(relPath),
|
||||
)
|
||||
fmt.Printf(" %s\n", buildDimStyle.Render(artifact.Checksum))
|
||||
}
|
||||
|
||||
relChecksumPath, err := filepath.Rel(projectDir, checksumPath)
|
||||
if err != nil {
|
||||
relChecksumPath = checksumPath
|
||||
}
|
||||
fmt.Printf(" %s %s\n",
|
||||
buildSuccessStyle.Render("*"),
|
||||
buildTargetStyle.Render(relChecksumPath),
|
||||
)
|
||||
}
|
||||
|
||||
return checksummedArtifacts, nil
|
||||
}
|
||||
|
||||
// parseTargets parses a comma-separated list of OS/arch pairs.
|
||||
func parseTargets(targetsFlag string) ([]build.Target, error) {
|
||||
parts := strings.Split(targetsFlag, ",")
|
||||
var targets []build.Target
|
||||
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
osArch := strings.Split(part, "/")
|
||||
if len(osArch) != 2 {
|
||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.invalid_target", map[string]interface{}{"Target": part}))
|
||||
}
|
||||
|
||||
targets = append(targets, build.Target{
|
||||
OS: strings.TrimSpace(osArch[0]),
|
||||
Arch: strings.TrimSpace(osArch[1]),
|
||||
})
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.no_targets"))
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
// formatTargets returns a human-readable string of targets.
|
||||
func formatTargets(targets []build.Target) string {
|
||||
var parts []string
|
||||
for _, t := range targets {
|
||||
parts = append(parts, t.String())
|
||||
}
|
||||
return strings.Join(parts, ", ")
|
||||
}
|
||||
|
||||
// getBuilder returns the appropriate builder for the project type.
|
||||
func getBuilder(projectType build.ProjectType) (build.Builder, error) {
|
||||
switch projectType {
|
||||
case build.ProjectTypeWails:
|
||||
return builders.NewWailsBuilder(), nil
|
||||
case build.ProjectTypeGo:
|
||||
return builders.NewGoBuilder(), nil
|
||||
case build.ProjectTypeDocker:
|
||||
return builders.NewDockerBuilder(), nil
|
||||
case build.ProjectTypeLinuxKit:
|
||||
return builders.NewLinuxKitBuilder(), nil
|
||||
case build.ProjectTypeTaskfile:
|
||||
return builders.NewTaskfileBuilder(), nil
|
||||
case build.ProjectTypeCPP:
|
||||
return builders.NewCPPBuilder(), nil
|
||||
case build.ProjectTypeNode:
|
||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.node_not_implemented"))
|
||||
case build.ProjectTypePHP:
|
||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.php_not_implemented"))
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: %s", i18n.T("cmd.build.error.unsupported_type"), projectType)
|
||||
}
|
||||
}
|
||||
324
build/buildcmd/cmd_pwa.go
Normal file
324
build/buildcmd/cmd_pwa.go
Normal file
|
|
@ -0,0 +1,324 @@
|
|||
// cmd_pwa.go implements PWA and legacy GUI build functionality.
|
||||
//
|
||||
// Supports building desktop applications from:
|
||||
// - Local static web application directories
|
||||
// - Live PWA URLs (downloads and packages)
|
||||
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/i18n"
|
||||
"github.com/leaanthony/debme"
|
||||
"github.com/leaanthony/gosod"
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
// Error sentinels for build commands
|
||||
var (
|
||||
errPathRequired = errors.New("the --path flag is required")
|
||||
errURLRequired = errors.New("the --url flag is required")
|
||||
)
|
||||
|
||||
// runPwaBuild downloads a PWA from URL and builds it.
|
||||
func runPwaBuild(pwaURL string) error {
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.starting"), pwaURL)
|
||||
|
||||
tempDir, err := os.MkdirTemp("", "core-pwa-build-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "create temporary directory"}), err)
|
||||
}
|
||||
// defer os.RemoveAll(tempDir) // Keep temp dir for debugging
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.downloading_to"), tempDir)
|
||||
|
||||
if err := downloadPWA(pwaURL, tempDir); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "download PWA"}), err)
|
||||
}
|
||||
|
||||
return runBuild(tempDir)
|
||||
}
|
||||
|
||||
// downloadPWA fetches a PWA from a URL and saves assets locally.
|
||||
func downloadPWA(baseURL, destDir string) error {
|
||||
// Fetch the main HTML page
|
||||
resp, err := http.Get(baseURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s %s: %w", i18n.T("common.error.failed", map[string]any{"Action": "fetch URL"}), baseURL, err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "read response body"}), err)
|
||||
}
|
||||
|
||||
// Find the manifest URL from the HTML
|
||||
manifestURL, err := findManifestURL(string(body), baseURL)
|
||||
if err != nil {
|
||||
// If no manifest, it's not a PWA, but we can still try to package it as a simple site.
|
||||
fmt.Printf("%s %s\n", i18n.T("common.label.warning"), i18n.T("cmd.build.pwa.no_manifest"))
|
||||
if err := os.WriteFile(filepath.Join(destDir, "index.html"), body, 0644); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "write index.html"}), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.found_manifest"), manifestURL)
|
||||
|
||||
// Fetch and parse the manifest
|
||||
manifest, err := fetchManifest(manifestURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "fetch or parse manifest"}), err)
|
||||
}
|
||||
|
||||
// Download all assets listed in the manifest
|
||||
assets := collectAssets(manifest, manifestURL)
|
||||
for _, assetURL := range assets {
|
||||
if err := downloadAsset(assetURL, destDir); err != nil {
|
||||
fmt.Printf("%s %s %s: %v\n", i18n.T("common.label.warning"), i18n.T("common.error.failed", map[string]any{"Action": "download asset"}), assetURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Also save the root index.html
|
||||
if err := os.WriteFile(filepath.Join(destDir, "index.html"), body, 0644); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "write index.html"}), err)
|
||||
}
|
||||
|
||||
fmt.Println(i18n.T("cmd.build.pwa.download_complete"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// findManifestURL extracts the manifest URL from HTML content.
|
||||
func findManifestURL(htmlContent, baseURL string) (string, error) {
|
||||
doc, err := html.Parse(strings.NewReader(htmlContent))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var manifestPath string
|
||||
var f func(*html.Node)
|
||||
f = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "link" {
|
||||
var rel, href string
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "rel" {
|
||||
rel = a.Val
|
||||
}
|
||||
if a.Key == "href" {
|
||||
href = a.Val
|
||||
}
|
||||
}
|
||||
if rel == "manifest" && href != "" {
|
||||
manifestPath = href
|
||||
return
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
f(c)
|
||||
}
|
||||
}
|
||||
f(doc)
|
||||
|
||||
if manifestPath == "" {
|
||||
return "", fmt.Errorf("%s", i18n.T("cmd.build.pwa.error.no_manifest_tag"))
|
||||
}
|
||||
|
||||
base, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
manifestURL, err := base.Parse(manifestPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return manifestURL.String(), nil
|
||||
}
|
||||
|
||||
// fetchManifest downloads and parses a PWA manifest.
|
||||
func fetchManifest(manifestURL string) (map[string]interface{}, error) {
|
||||
resp, err := http.Get(manifestURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
var manifest map[string]interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// collectAssets extracts asset URLs from a PWA manifest.
|
||||
func collectAssets(manifest map[string]interface{}, manifestURL string) []string {
|
||||
var assets []string
|
||||
base, _ := url.Parse(manifestURL)
|
||||
|
||||
// Add start_url
|
||||
if startURL, ok := manifest["start_url"].(string); ok {
|
||||
if resolved, err := base.Parse(startURL); err == nil {
|
||||
assets = append(assets, resolved.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Add icons
|
||||
if icons, ok := manifest["icons"].([]interface{}); ok {
|
||||
for _, icon := range icons {
|
||||
if iconMap, ok := icon.(map[string]interface{}); ok {
|
||||
if src, ok := iconMap["src"].(string); ok {
|
||||
if resolved, err := base.Parse(src); err == nil {
|
||||
assets = append(assets, resolved.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return assets
|
||||
}
|
||||
|
||||
// downloadAsset fetches a single asset and saves it locally.
|
||||
func downloadAsset(assetURL, destDir string) error {
|
||||
resp, err := http.Get(assetURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
u, err := url.Parse(assetURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := filepath.Join(destDir, filepath.FromSlash(u.Path))
|
||||
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = out.Close() }()
|
||||
|
||||
_, err = io.Copy(out, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
// runBuild builds a desktop application from a local directory.
|
||||
func runBuild(fromPath string) error {
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.from_path.starting"), fromPath)
|
||||
|
||||
info, err := os.Stat(fromPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.invalid_path"), err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return fmt.Errorf("%s", i18n.T("cmd.build.from_path.error.must_be_directory"))
|
||||
}
|
||||
|
||||
buildDir := ".core/build/app"
|
||||
htmlDir := filepath.Join(buildDir, "html")
|
||||
appName := filepath.Base(fromPath)
|
||||
if strings.HasPrefix(appName, "core-pwa-build-") {
|
||||
appName = "pwa-app"
|
||||
}
|
||||
outputExe := appName
|
||||
|
||||
if err := os.RemoveAll(buildDir); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "clean build directory"}), err)
|
||||
}
|
||||
|
||||
// 1. Generate the project from the embedded template
|
||||
fmt.Println(i18n.T("cmd.build.from_path.generating_template"))
|
||||
templateFS, err := debme.FS(guiTemplate, "tmpl/gui")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "anchor template filesystem"}), err)
|
||||
}
|
||||
sod := gosod.New(templateFS)
|
||||
if sod == nil {
|
||||
return fmt.Errorf("%s", i18n.T("common.error.failed", map[string]any{"Action": "create new sod instance"}))
|
||||
}
|
||||
|
||||
templateData := map[string]string{"AppName": appName}
|
||||
if err := sod.Extract(buildDir, templateData); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "extract template"}), err)
|
||||
}
|
||||
|
||||
// 2. Copy the user's web app files
|
||||
fmt.Println(i18n.T("cmd.build.from_path.copying_files"))
|
||||
if err := copyDir(fromPath, htmlDir); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "copy application files"}), err)
|
||||
}
|
||||
|
||||
// 3. Compile the application
|
||||
fmt.Println(i18n.T("cmd.build.from_path.compiling"))
|
||||
|
||||
// Run go mod tidy
|
||||
cmd := exec.Command("go", "mod", "tidy")
|
||||
cmd.Dir = buildDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.go_mod_tidy"), err)
|
||||
}
|
||||
|
||||
// Run go build
|
||||
cmd = exec.Command("go", "build", "-o", outputExe)
|
||||
cmd.Dir = buildDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.go_build"), err)
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s %s/%s\n", i18n.T("cmd.build.from_path.success"), buildDir, outputExe)
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyDir recursively copies a directory from src to dst.
|
||||
func copyDir(src, dst string) error {
|
||||
return filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(src, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstPath := filepath.Join(dst, relPath)
|
||||
|
||||
if info.IsDir() {
|
||||
return os.MkdirAll(dstPath, info.Mode())
|
||||
}
|
||||
|
||||
srcFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = srcFile.Close() }()
|
||||
|
||||
dstFile, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = dstFile.Close() }()
|
||||
|
||||
_, err = io.Copy(dstFile, srcFile)
|
||||
return err
|
||||
})
|
||||
}
|
||||
111
build/buildcmd/cmd_release.go
Normal file
111
build/buildcmd/cmd_release.go
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
// cmd_release.go implements the release command: build + archive + publish in one step.
|
||||
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"forge.lthn.ai/core/go/pkg/i18n"
|
||||
"forge.lthn.ai/core/go-devops/release"
|
||||
)
|
||||
|
||||
// Flag variables for release command
|
||||
var (
|
||||
releaseVersion string
|
||||
releaseDraft bool
|
||||
releasePrerelease bool
|
||||
releaseGoForLaunch bool
|
||||
)
|
||||
|
||||
var releaseCmd = &cli.Command{
|
||||
Use: "release",
|
||||
Short: i18n.T("cmd.build.release.short"),
|
||||
Long: i18n.T("cmd.build.release.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runRelease(cmd.Context(), !releaseGoForLaunch, releaseVersion, releaseDraft, releasePrerelease)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
releaseCmd.Flags().BoolVar(&releaseGoForLaunch, "we-are-go-for-launch", false, i18n.T("cmd.build.release.flag.go_for_launch"))
|
||||
releaseCmd.Flags().StringVar(&releaseVersion, "version", "", i18n.T("cmd.build.release.flag.version"))
|
||||
releaseCmd.Flags().BoolVar(&releaseDraft, "draft", false, i18n.T("cmd.build.release.flag.draft"))
|
||||
releaseCmd.Flags().BoolVar(&releasePrerelease, "prerelease", false, i18n.T("cmd.build.release.flag.prerelease"))
|
||||
}
|
||||
|
||||
// AddReleaseCommand adds the release subcommand to the build command.
|
||||
func AddReleaseCommand(buildCmd *cli.Command) {
|
||||
buildCmd.AddCommand(releaseCmd)
|
||||
}
|
||||
|
||||
// runRelease executes the full release workflow: build + archive + checksum + publish.
|
||||
func runRelease(ctx context.Context, dryRun bool, version string, draft, prerelease bool) error {
|
||||
// Get current directory
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return core.E("release", "get working directory", err)
|
||||
}
|
||||
|
||||
// Check for release config
|
||||
if !release.ConfigExists(projectDir) {
|
||||
cli.Print("%s %s\n",
|
||||
buildErrorStyle.Render(i18n.Label("error")),
|
||||
i18n.T("cmd.build.release.error.no_config"),
|
||||
)
|
||||
cli.Print(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.release.hint.create_config")))
|
||||
return core.E("release", "config not found", nil)
|
||||
}
|
||||
|
||||
// Load configuration
|
||||
cfg, err := release.LoadConfig(projectDir)
|
||||
if err != nil {
|
||||
return core.E("release", "load config", err)
|
||||
}
|
||||
|
||||
// Apply CLI overrides
|
||||
if version != "" {
|
||||
cfg.SetVersion(version)
|
||||
}
|
||||
|
||||
// Apply draft/prerelease overrides to all publishers
|
||||
if draft || prerelease {
|
||||
for i := range cfg.Publishers {
|
||||
if draft {
|
||||
cfg.Publishers[i].Draft = true
|
||||
}
|
||||
if prerelease {
|
||||
cfg.Publishers[i].Prerelease = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print header
|
||||
cli.Print("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.release.label.release")), i18n.T("cmd.build.release.building_and_publishing"))
|
||||
if dryRun {
|
||||
cli.Print(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.release.dry_run_hint")))
|
||||
}
|
||||
cli.Blank()
|
||||
|
||||
// Run full release (build + archive + checksum + publish)
|
||||
rel, err := release.Run(ctx, cfg, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Print summary
|
||||
cli.Blank()
|
||||
cli.Print("%s %s\n", buildSuccessStyle.Render(i18n.T("i18n.done.pass")), i18n.T("cmd.build.release.completed"))
|
||||
cli.Print(" %s %s\n", i18n.Label("version"), buildTargetStyle.Render(rel.Version))
|
||||
cli.Print(" %s %d\n", i18n.T("cmd.build.release.label.artifacts"), len(rel.Artifacts))
|
||||
|
||||
if !dryRun {
|
||||
for _, pub := range cfg.Publishers {
|
||||
cli.Print(" %s %s\n", i18n.T("cmd.build.release.label.published"), buildTargetStyle.Render(pub.Type))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
82
build/buildcmd/cmd_sdk.go
Normal file
82
build/buildcmd/cmd_sdk.go
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
// cmd_sdk.go implements SDK generation from OpenAPI specifications.
|
||||
//
|
||||
// Generates typed API clients for TypeScript, Python, Go, and PHP
|
||||
// from OpenAPI/Swagger specifications.
|
||||
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/sdk"
|
||||
"forge.lthn.ai/core/go/pkg/i18n"
|
||||
)
|
||||
|
||||
// runBuildSDK handles the `core build sdk` command.
|
||||
func runBuildSDK(specPath, lang, version string, dryRun bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "get working directory"}), err)
|
||||
}
|
||||
|
||||
// Load config
|
||||
config := sdk.DefaultConfig()
|
||||
if specPath != "" {
|
||||
config.Spec = specPath
|
||||
}
|
||||
|
||||
s := sdk.New(projectDir, config)
|
||||
if version != "" {
|
||||
s.SetVersion(version)
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.sdk.label")), i18n.T("cmd.build.sdk.generating"))
|
||||
if dryRun {
|
||||
fmt.Printf(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.sdk.dry_run_mode")))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Detect spec
|
||||
detectedSpec, err := s.DetectSpec()
|
||||
if err != nil {
|
||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" %s %s\n", i18n.T("common.label.spec"), buildTargetStyle.Render(detectedSpec))
|
||||
|
||||
if dryRun {
|
||||
if lang != "" {
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.language_label"), buildTargetStyle.Render(lang))
|
||||
} else {
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.languages_label"), buildTargetStyle.Render(strings.Join(config.Languages, ", ")))
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("cmd.build.label.ok")), i18n.T("cmd.build.sdk.would_generate"))
|
||||
return nil
|
||||
}
|
||||
|
||||
if lang != "" {
|
||||
// Generate single language
|
||||
if err := s.GenerateLanguage(ctx, lang); err != nil {
|
||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.generated_label"), buildTargetStyle.Render(lang))
|
||||
} else {
|
||||
// Generate all
|
||||
if err := s.Generate(ctx); err != nil {
|
||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.generated_label"), buildTargetStyle.Render(strings.Join(config.Languages, ", ")))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("common.label.success")), i18n.T("cmd.build.sdk.complete"))
|
||||
return nil
|
||||
}
|
||||
7
build/buildcmd/tmpl/gui/go.mod.tmpl
Normal file
7
build/buildcmd/tmpl/gui/go.mod.tmpl
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
module {{.AppName}}
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/wailsapp/wails/v3 v3.0.0-alpha.8
|
||||
)
|
||||
0
build/buildcmd/tmpl/gui/html/.gitkeep
Normal file
0
build/buildcmd/tmpl/gui/html/.gitkeep
Normal file
1
build/buildcmd/tmpl/gui/html/.placeholder
Normal file
1
build/buildcmd/tmpl/gui/html/.placeholder
Normal file
|
|
@ -0,0 +1 @@
|
|||
// This file ensures the 'html' directory is correctly embedded by the Go compiler.
|
||||
25
build/buildcmd/tmpl/gui/main.go.tmpl
Normal file
25
build/buildcmd/tmpl/gui/main.go.tmpl
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"log"
|
||||
|
||||
"github.com/wailsapp/wails/v3/pkg/application"
|
||||
)
|
||||
|
||||
//go:embed all:html
|
||||
var assets embed.FS
|
||||
|
||||
func main() {
|
||||
app := application.New(application.Options{
|
||||
Name: "{{.AppName}}",
|
||||
Description: "A web application enclaved by Core.",
|
||||
Assets: application.AssetOptions{
|
||||
FS: assets,
|
||||
},
|
||||
})
|
||||
|
||||
if err := app.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
253
build/builders/cpp.go
Normal file
253
build/builders/cpp.go
Normal file
|
|
@ -0,0 +1,253 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// CPPBuilder implements the Builder interface for C++ projects using CMake + Conan.
|
||||
// It wraps the Makefile-based build system from the .core/build submodule.
|
||||
type CPPBuilder struct{}
|
||||
|
||||
// NewCPPBuilder creates a new CPPBuilder instance.
|
||||
func NewCPPBuilder() *CPPBuilder {
|
||||
return &CPPBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *CPPBuilder) Name() string {
|
||||
return "cpp"
|
||||
}
|
||||
|
||||
// Detect checks if this builder can handle the project in the given directory.
|
||||
func (b *CPPBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
return build.IsCPPProject(fs, dir), nil
|
||||
}
|
||||
|
||||
// Build compiles the C++ project using Make targets.
|
||||
// The build flow is: make configure → make build → make package.
|
||||
// Cross-compilation is handled via Conan profiles specified in .core/build.yaml.
|
||||
func (b *CPPBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("builders.CPPBuilder.Build: config is nil")
|
||||
}
|
||||
|
||||
// Validate make is available
|
||||
if err := b.validateMake(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For C++ projects, the Makefile handles everything.
|
||||
// We don't iterate per-target like Go — the Makefile's configure + build
|
||||
// produces binaries for the host platform, and cross-compilation uses
|
||||
// named Conan profiles (e.g., make gcc-linux-armv8).
|
||||
if len(targets) == 0 {
|
||||
// Default to host platform
|
||||
targets = []build.Target{{OS: runtime.GOOS, Arch: runtime.GOARCH}}
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
for _, target := range targets {
|
||||
built, err := b.buildTarget(ctx, cfg, target)
|
||||
if err != nil {
|
||||
return artifacts, fmt.Errorf("builders.CPPBuilder.Build: %w", err)
|
||||
}
|
||||
artifacts = append(artifacts, built...)
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// buildTarget compiles for a single target platform.
|
||||
func (b *CPPBuilder) buildTarget(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) {
|
||||
// Determine if this is a cross-compile or host build
|
||||
isHostBuild := target.OS == runtime.GOOS && target.Arch == runtime.GOARCH
|
||||
|
||||
if isHostBuild {
|
||||
return b.buildHost(ctx, cfg, target)
|
||||
}
|
||||
|
||||
return b.buildCross(ctx, cfg, target)
|
||||
}
|
||||
|
||||
// buildHost runs the standard make configure → make build → make package flow.
|
||||
func (b *CPPBuilder) buildHost(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) {
|
||||
fmt.Printf("Building C++ project for %s/%s (host)\n", target.OS, target.Arch)
|
||||
|
||||
// Step 1: Configure (runs conan install + cmake configure)
|
||||
if err := b.runMake(ctx, cfg.ProjectDir, "configure"); err != nil {
|
||||
return nil, fmt.Errorf("configure failed: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Build
|
||||
if err := b.runMake(ctx, cfg.ProjectDir, "build"); err != nil {
|
||||
return nil, fmt.Errorf("build failed: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Package
|
||||
if err := b.runMake(ctx, cfg.ProjectDir, "package"); err != nil {
|
||||
return nil, fmt.Errorf("package failed: %w", err)
|
||||
}
|
||||
|
||||
// Discover artifacts from build/packages/
|
||||
return b.findArtifacts(cfg.FS, cfg.ProjectDir, target)
|
||||
}
|
||||
|
||||
// buildCross runs a cross-compilation using a Conan profile name.
|
||||
// The Makefile supports profile targets like: make gcc-linux-armv8
|
||||
func (b *CPPBuilder) buildCross(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) {
|
||||
// Map target to a Conan profile name
|
||||
profile := b.targetToProfile(target)
|
||||
if profile == "" {
|
||||
return nil, fmt.Errorf("no Conan profile mapped for target %s/%s", target.OS, target.Arch)
|
||||
}
|
||||
|
||||
fmt.Printf("Building C++ project for %s/%s (cross: %s)\n", target.OS, target.Arch, profile)
|
||||
|
||||
// The Makefile exposes each profile as a top-level target
|
||||
if err := b.runMake(ctx, cfg.ProjectDir, profile); err != nil {
|
||||
return nil, fmt.Errorf("cross-compile for %s failed: %w", profile, err)
|
||||
}
|
||||
|
||||
return b.findArtifacts(cfg.FS, cfg.ProjectDir, target)
|
||||
}
|
||||
|
||||
// runMake executes a make target in the project directory.
|
||||
func (b *CPPBuilder) runMake(ctx context.Context, projectDir string, target string) error {
|
||||
cmd := exec.CommandContext(ctx, "make", target)
|
||||
cmd.Dir = projectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Env = os.Environ()
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("make %s: %w", target, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findArtifacts searches for built packages in build/packages/.
|
||||
func (b *CPPBuilder) findArtifacts(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) {
|
||||
packagesDir := filepath.Join(projectDir, "build", "packages")
|
||||
|
||||
if !fs.IsDir(packagesDir) {
|
||||
// Fall back to searching build/release/src/ for raw binaries
|
||||
return b.findBinaries(fs, projectDir, target)
|
||||
}
|
||||
|
||||
entries, err := fs.List(packagesDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list packages directory: %w", err)
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
name := entry.Name()
|
||||
// Skip checksum files and hidden files
|
||||
if strings.HasSuffix(name, ".sha256") || strings.HasPrefix(name, ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: filepath.Join(packagesDir, name),
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// findBinaries searches for compiled binaries in build/release/src/.
|
||||
func (b *CPPBuilder) findBinaries(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) {
|
||||
binDir := filepath.Join(projectDir, "build", "release", "src")
|
||||
|
||||
if !fs.IsDir(binDir) {
|
||||
return nil, fmt.Errorf("no build output found in %s", binDir)
|
||||
}
|
||||
|
||||
entries, err := fs.List(binDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list build directory: %w", err)
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
name := entry.Name()
|
||||
// Skip non-executable files (libraries, cmake files, etc.)
|
||||
if strings.HasSuffix(name, ".a") || strings.HasSuffix(name, ".o") ||
|
||||
strings.HasSuffix(name, ".cmake") || strings.HasPrefix(name, ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
fullPath := filepath.Join(binDir, name)
|
||||
|
||||
// On Unix, check if file is executable
|
||||
if target.OS != "windows" {
|
||||
info, err := os.Stat(fullPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if info.Mode()&0111 == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: fullPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// targetToProfile maps a build target to a Conan cross-compilation profile name.
|
||||
// Profile names match those in .core/build/cmake/profiles/.
|
||||
func (b *CPPBuilder) targetToProfile(target build.Target) string {
|
||||
key := target.OS + "/" + target.Arch
|
||||
profiles := map[string]string{
|
||||
"linux/amd64": "gcc-linux-x86_64",
|
||||
"linux/x86_64": "gcc-linux-x86_64",
|
||||
"linux/arm64": "gcc-linux-armv8",
|
||||
"linux/armv8": "gcc-linux-armv8",
|
||||
"darwin/arm64": "apple-clang-armv8",
|
||||
"darwin/armv8": "apple-clang-armv8",
|
||||
"darwin/amd64": "apple-clang-x86_64",
|
||||
"darwin/x86_64": "apple-clang-x86_64",
|
||||
"windows/amd64": "msvc-194-x86_64",
|
||||
"windows/x86_64": "msvc-194-x86_64",
|
||||
}
|
||||
|
||||
return profiles[key]
|
||||
}
|
||||
|
||||
// validateMake checks if make is available.
|
||||
func (b *CPPBuilder) validateMake() error {
|
||||
if _, err := exec.LookPath("make"); err != nil {
|
||||
return fmt.Errorf("cpp: make not found. Install build-essential (Linux) or Xcode Command Line Tools (macOS)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure CPPBuilder implements the Builder interface.
|
||||
var _ build.Builder = (*CPPBuilder)(nil)
|
||||
149
build/builders/cpp_test.go
Normal file
149
build/builders/cpp_test.go
Normal file
|
|
@ -0,0 +1,149 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCPPBuilder_Name_Good(t *testing.T) {
|
||||
builder := NewCPPBuilder()
|
||||
assert.Equal(t, "cpp", builder.Name())
|
||||
}
|
||||
|
||||
func TestCPPBuilder_Detect_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
|
||||
t.Run("detects C++ project with CMakeLists.txt", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "CMakeLists.txt"), []byte("cmake_minimum_required(VERSION 3.16)"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for non-C++ project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCPPBuilder_Build_Bad(t *testing.T) {
|
||||
t.Run("returns error for nil config", func(t *testing.T) {
|
||||
builder := NewCPPBuilder()
|
||||
artifacts, err := builder.Build(nil, nil, []build.Target{{OS: "linux", Arch: "amd64"}})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "config is nil")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCPPBuilder_TargetToProfile_Good(t *testing.T) {
|
||||
builder := NewCPPBuilder()
|
||||
|
||||
tests := []struct {
|
||||
os, arch string
|
||||
expected string
|
||||
}{
|
||||
{"linux", "amd64", "gcc-linux-x86_64"},
|
||||
{"linux", "x86_64", "gcc-linux-x86_64"},
|
||||
{"linux", "arm64", "gcc-linux-armv8"},
|
||||
{"darwin", "arm64", "apple-clang-armv8"},
|
||||
{"darwin", "amd64", "apple-clang-x86_64"},
|
||||
{"windows", "amd64", "msvc-194-x86_64"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.os+"/"+tt.arch, func(t *testing.T) {
|
||||
profile := builder.targetToProfile(build.Target{OS: tt.os, Arch: tt.arch})
|
||||
assert.Equal(t, tt.expected, profile)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCPPBuilder_TargetToProfile_Bad(t *testing.T) {
|
||||
builder := NewCPPBuilder()
|
||||
|
||||
t.Run("returns empty for unknown target", func(t *testing.T) {
|
||||
profile := builder.targetToProfile(build.Target{OS: "plan9", Arch: "mips"})
|
||||
assert.Empty(t, profile)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCPPBuilder_FindArtifacts_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
|
||||
t.Run("finds packages in build/packages", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
packagesDir := filepath.Join(dir, "build", "packages")
|
||||
require.NoError(t, os.MkdirAll(packagesDir, 0755))
|
||||
|
||||
// Create mock package files
|
||||
require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz"), []byte("pkg"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz.sha256"), []byte("checksum"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.rpm"), []byte("rpm"), 0644))
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
target := build.Target{OS: "linux", Arch: "amd64"}
|
||||
artifacts, err := builder.findArtifacts(fs, dir, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should find tar.xz and rpm but not sha256
|
||||
assert.Len(t, artifacts, 2)
|
||||
for _, a := range artifacts {
|
||||
assert.Equal(t, "linux", a.OS)
|
||||
assert.Equal(t, "amd64", a.Arch)
|
||||
assert.False(t, filepath.Ext(a.Path) == ".sha256")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("falls back to binaries in build/release/src", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
binDir := filepath.Join(dir, "build", "release", "src")
|
||||
require.NoError(t, os.MkdirAll(binDir, 0755))
|
||||
|
||||
// Create mock binary (executable)
|
||||
binPath := filepath.Join(binDir, "test-daemon")
|
||||
require.NoError(t, os.WriteFile(binPath, []byte("binary"), 0755))
|
||||
|
||||
// Create a library (should be skipped)
|
||||
require.NoError(t, os.WriteFile(filepath.Join(binDir, "libcrypto.a"), []byte("lib"), 0644))
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
target := build.Target{OS: "linux", Arch: "amd64"}
|
||||
artifacts, err := builder.findArtifacts(fs, dir, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should find the executable but not the library
|
||||
assert.Len(t, artifacts, 1)
|
||||
assert.Contains(t, artifacts[0].Path, "test-daemon")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCPPBuilder_Interface_Good(t *testing.T) {
|
||||
var _ build.Builder = (*CPPBuilder)(nil)
|
||||
var _ build.Builder = NewCPPBuilder()
|
||||
}
|
||||
215
build/builders/docker.go
Normal file
215
build/builders/docker.go
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// DockerBuilder builds Docker images.
|
||||
type DockerBuilder struct{}
|
||||
|
||||
// NewDockerBuilder creates a new Docker builder.
|
||||
func NewDockerBuilder() *DockerBuilder {
|
||||
return &DockerBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *DockerBuilder) Name() string {
|
||||
return "docker"
|
||||
}
|
||||
|
||||
// Detect checks if a Dockerfile exists in the directory.
|
||||
func (b *DockerBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
dockerfilePath := filepath.Join(dir, "Dockerfile")
|
||||
if fs.IsFile(dockerfilePath) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Build builds Docker images for the specified targets.
|
||||
func (b *DockerBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
// Validate docker CLI is available
|
||||
if err := b.validateDockerCli(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure buildx is available
|
||||
if err := b.ensureBuildx(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine Dockerfile path
|
||||
dockerfile := cfg.Dockerfile
|
||||
if dockerfile == "" {
|
||||
dockerfile = filepath.Join(cfg.ProjectDir, "Dockerfile")
|
||||
}
|
||||
|
||||
// Validate Dockerfile exists
|
||||
if !cfg.FS.IsFile(dockerfile) {
|
||||
return nil, fmt.Errorf("docker.Build: Dockerfile not found: %s", dockerfile)
|
||||
}
|
||||
|
||||
// Determine image name
|
||||
imageName := cfg.Image
|
||||
if imageName == "" {
|
||||
imageName = cfg.Name
|
||||
}
|
||||
if imageName == "" {
|
||||
imageName = filepath.Base(cfg.ProjectDir)
|
||||
}
|
||||
|
||||
// Build platform string from targets
|
||||
var platforms []string
|
||||
for _, t := range targets {
|
||||
platforms = append(platforms, fmt.Sprintf("%s/%s", t.OS, t.Arch))
|
||||
}
|
||||
|
||||
// If no targets specified, use current platform
|
||||
if len(platforms) == 0 {
|
||||
platforms = []string{"linux/amd64"}
|
||||
}
|
||||
|
||||
// Determine registry
|
||||
registry := cfg.Registry
|
||||
if registry == "" {
|
||||
registry = "ghcr.io"
|
||||
}
|
||||
|
||||
// Determine tags
|
||||
tags := cfg.Tags
|
||||
if len(tags) == 0 {
|
||||
tags = []string{"latest"}
|
||||
if cfg.Version != "" {
|
||||
tags = append(tags, cfg.Version)
|
||||
}
|
||||
}
|
||||
|
||||
// Build full image references
|
||||
var imageRefs []string
|
||||
for _, tag := range tags {
|
||||
// Expand version template
|
||||
expandedTag := strings.ReplaceAll(tag, "{{.Version}}", cfg.Version)
|
||||
expandedTag = strings.ReplaceAll(expandedTag, "{{Version}}", cfg.Version)
|
||||
|
||||
if registry != "" {
|
||||
imageRefs = append(imageRefs, fmt.Sprintf("%s/%s:%s", registry, imageName, expandedTag))
|
||||
} else {
|
||||
imageRefs = append(imageRefs, fmt.Sprintf("%s:%s", imageName, expandedTag))
|
||||
}
|
||||
}
|
||||
|
||||
// Build the docker buildx command
|
||||
args := []string{"buildx", "build"}
|
||||
|
||||
// Multi-platform support
|
||||
args = append(args, "--platform", strings.Join(platforms, ","))
|
||||
|
||||
// Add all tags
|
||||
for _, ref := range imageRefs {
|
||||
args = append(args, "-t", ref)
|
||||
}
|
||||
|
||||
// Dockerfile path
|
||||
args = append(args, "-f", dockerfile)
|
||||
|
||||
// Build arguments
|
||||
for k, v := range cfg.BuildArgs {
|
||||
expandedValue := strings.ReplaceAll(v, "{{.Version}}", cfg.Version)
|
||||
expandedValue = strings.ReplaceAll(expandedValue, "{{Version}}", cfg.Version)
|
||||
args = append(args, "--build-arg", fmt.Sprintf("%s=%s", k, expandedValue))
|
||||
}
|
||||
|
||||
// Always add VERSION build arg if version is set
|
||||
if cfg.Version != "" {
|
||||
args = append(args, "--build-arg", fmt.Sprintf("VERSION=%s", cfg.Version))
|
||||
}
|
||||
|
||||
// Output to local docker images or push
|
||||
if cfg.Push {
|
||||
args = append(args, "--push")
|
||||
} else {
|
||||
// For multi-platform builds without push, we need to load or output somewhere
|
||||
if len(platforms) == 1 {
|
||||
args = append(args, "--load")
|
||||
} else {
|
||||
// Multi-platform builds can't use --load, output to tarball
|
||||
outputPath := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s.tar", imageName))
|
||||
args = append(args, "--output", fmt.Sprintf("type=oci,dest=%s", outputPath))
|
||||
}
|
||||
}
|
||||
|
||||
// Build context (project directory)
|
||||
args = append(args, cfg.ProjectDir)
|
||||
|
||||
// Create output directory
|
||||
if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil {
|
||||
return nil, fmt.Errorf("docker.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
// Execute build
|
||||
cmd := exec.CommandContext(ctx, "docker", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
fmt.Printf("Building Docker image: %s\n", imageName)
|
||||
fmt.Printf(" Platforms: %s\n", strings.Join(platforms, ", "))
|
||||
fmt.Printf(" Tags: %s\n", strings.Join(imageRefs, ", "))
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("docker.Build: buildx build failed: %w", err)
|
||||
}
|
||||
|
||||
// Create artifacts for each platform
|
||||
var artifacts []build.Artifact
|
||||
for _, t := range targets {
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: imageRefs[0], // Primary image reference
|
||||
OS: t.OS,
|
||||
Arch: t.Arch,
|
||||
})
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// validateDockerCli checks if the docker CLI is available.
|
||||
func (b *DockerBuilder) validateDockerCli() error {
|
||||
cmd := exec.Command("docker", "--version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("docker: docker CLI not found. Install it from https://docs.docker.com/get-docker/")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureBuildx ensures docker buildx is available and has a builder.
|
||||
func (b *DockerBuilder) ensureBuildx(ctx context.Context) error {
|
||||
// Check if buildx is available
|
||||
cmd := exec.CommandContext(ctx, "docker", "buildx", "version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("docker: buildx is not available. Install it from https://docs.docker.com/buildx/working-with-buildx/")
|
||||
}
|
||||
|
||||
// Check if we have a builder, create one if not
|
||||
cmd = exec.CommandContext(ctx, "docker", "buildx", "inspect", "--bootstrap")
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Try to create a builder
|
||||
cmd = exec.CommandContext(ctx, "docker", "buildx", "create", "--use", "--bootstrap")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("docker: failed to create buildx builder: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
129
build/builders/go.go
Normal file
129
build/builders/go.go
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// GoBuilder implements the Builder interface for Go projects.
|
||||
type GoBuilder struct{}
|
||||
|
||||
// NewGoBuilder creates a new GoBuilder instance.
|
||||
func NewGoBuilder() *GoBuilder {
|
||||
return &GoBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *GoBuilder) Name() string {
|
||||
return "go"
|
||||
}
|
||||
|
||||
// Detect checks if this builder can handle the project in the given directory.
|
||||
// Uses IsGoProject from the build package which checks for go.mod or wails.json.
|
||||
func (b *GoBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
return build.IsGoProject(fs, dir), nil
|
||||
}
|
||||
|
||||
// Build compiles the Go project for the specified targets.
|
||||
// It sets GOOS, GOARCH, and CGO_ENABLED environment variables,
|
||||
// applies ldflags and trimpath, and runs go build.
|
||||
func (b *GoBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("builders.GoBuilder.Build: config is nil")
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return nil, fmt.Errorf("builders.GoBuilder.Build: no targets specified")
|
||||
}
|
||||
|
||||
// Ensure output directory exists
|
||||
if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil {
|
||||
return nil, fmt.Errorf("builders.GoBuilder.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
for _, target := range targets {
|
||||
artifact, err := b.buildTarget(ctx, cfg, target)
|
||||
if err != nil {
|
||||
return artifacts, fmt.Errorf("builders.GoBuilder.Build: failed to build %s: %w", target.String(), err)
|
||||
}
|
||||
artifacts = append(artifacts, artifact)
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// buildTarget compiles for a single target platform.
|
||||
func (b *GoBuilder) buildTarget(ctx context.Context, cfg *build.Config, target build.Target) (build.Artifact, error) {
|
||||
// Determine output binary name
|
||||
binaryName := cfg.Name
|
||||
if binaryName == "" {
|
||||
binaryName = filepath.Base(cfg.ProjectDir)
|
||||
}
|
||||
|
||||
// Add .exe extension for Windows
|
||||
if target.OS == "windows" && !strings.HasSuffix(binaryName, ".exe") {
|
||||
binaryName += ".exe"
|
||||
}
|
||||
|
||||
// Create platform-specific output path: output/os_arch/binary
|
||||
platformDir := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch))
|
||||
if err := cfg.FS.EnsureDir(platformDir); err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("failed to create platform directory: %w", err)
|
||||
}
|
||||
|
||||
outputPath := filepath.Join(platformDir, binaryName)
|
||||
|
||||
// Build the go build arguments
|
||||
args := []string{"build"}
|
||||
|
||||
// Add trimpath flag
|
||||
args = append(args, "-trimpath")
|
||||
|
||||
// Add ldflags if specified
|
||||
if len(cfg.LDFlags) > 0 {
|
||||
ldflags := strings.Join(cfg.LDFlags, " ")
|
||||
args = append(args, "-ldflags", ldflags)
|
||||
}
|
||||
|
||||
// Add output path
|
||||
args = append(args, "-o", outputPath)
|
||||
|
||||
// Add the project directory as the build target (current directory)
|
||||
args = append(args, ".")
|
||||
|
||||
// Create the command
|
||||
cmd := exec.CommandContext(ctx, "go", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
|
||||
// Set up environment
|
||||
env := os.Environ()
|
||||
env = append(env, fmt.Sprintf("GOOS=%s", target.OS))
|
||||
env = append(env, fmt.Sprintf("GOARCH=%s", target.Arch))
|
||||
env = append(env, "CGO_ENABLED=0") // CGO disabled by default for cross-compilation
|
||||
cmd.Env = env
|
||||
|
||||
// Capture output for error messages
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("go build failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return build.Artifact{
|
||||
Path: outputPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Ensure GoBuilder implements the Builder interface.
|
||||
var _ build.Builder = (*GoBuilder)(nil)
|
||||
398
build/builders/go_test.go
Normal file
398
build/builders/go_test.go
Normal file
|
|
@ -0,0 +1,398 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupGoTestProject creates a minimal Go project for testing.
|
||||
func setupGoTestProject(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create a minimal go.mod
|
||||
goMod := `module testproject
|
||||
|
||||
go 1.21
|
||||
`
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a minimal main.go
|
||||
mainGo := `package main
|
||||
|
||||
func main() {
|
||||
println("hello")
|
||||
}
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestGoBuilder_Name_Good(t *testing.T) {
|
||||
builder := NewGoBuilder()
|
||||
assert.Equal(t, "go", builder.Name())
|
||||
}
|
||||
|
||||
func TestGoBuilder_Detect_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("detects Go project with go.mod", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("detects Wails project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for non-Go project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create a Node.js project instead
|
||||
err := os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGoBuilder_Build_Good(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
t.Run("builds for current platform", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "testbinary",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
|
||||
// Verify artifact properties
|
||||
artifact := artifacts[0]
|
||||
assert.Equal(t, runtime.GOOS, artifact.OS)
|
||||
assert.Equal(t, runtime.GOARCH, artifact.Arch)
|
||||
|
||||
// Verify binary was created
|
||||
assert.FileExists(t, artifact.Path)
|
||||
|
||||
// Verify the path is in the expected location
|
||||
expectedName := "testbinary"
|
||||
if runtime.GOOS == "windows" {
|
||||
expectedName += ".exe"
|
||||
}
|
||||
assert.Contains(t, artifact.Path, expectedName)
|
||||
})
|
||||
|
||||
t.Run("builds multiple targets", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "multitest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: "linux", Arch: "amd64"},
|
||||
{OS: "linux", Arch: "arm64"},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 2)
|
||||
|
||||
// Verify both artifacts were created
|
||||
for i, artifact := range artifacts {
|
||||
assert.Equal(t, targets[i].OS, artifact.OS)
|
||||
assert.Equal(t, targets[i].Arch, artifact.Arch)
|
||||
assert.FileExists(t, artifact.Path)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("adds .exe extension for Windows", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "wintest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: "windows", Arch: "amd64"},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
|
||||
// Verify .exe extension
|
||||
assert.True(t, filepath.Ext(artifacts[0].Path) == ".exe")
|
||||
assert.FileExists(t, artifacts[0].Path)
|
||||
})
|
||||
|
||||
t.Run("uses directory name when Name not specified", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "", // Empty name
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
|
||||
// Binary should use the project directory base name
|
||||
baseName := filepath.Base(projectDir)
|
||||
if runtime.GOOS == "windows" {
|
||||
baseName += ".exe"
|
||||
}
|
||||
assert.Contains(t, artifacts[0].Path, baseName)
|
||||
})
|
||||
|
||||
t.Run("applies ldflags", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "ldflagstest",
|
||||
LDFlags: []string{"-s", "-w"}, // Strip debug info
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
assert.FileExists(t, artifacts[0].Path)
|
||||
})
|
||||
|
||||
t.Run("creates output directory if missing", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := filepath.Join(t.TempDir(), "nested", "output")
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "nestedtest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
assert.FileExists(t, artifacts[0].Path)
|
||||
assert.DirExists(t, outputDir)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGoBuilder_Build_Bad(t *testing.T) {
|
||||
t.Run("returns error for nil config", func(t *testing.T) {
|
||||
builder := NewGoBuilder()
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), nil, []build.Target{{OS: "linux", Arch: "amd64"}})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "config is nil")
|
||||
})
|
||||
|
||||
t.Run("returns error for empty targets", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, []build.Target{})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "no targets specified")
|
||||
})
|
||||
|
||||
t.Run("returns error for invalid project directory", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: "/nonexistent/path",
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
|
||||
t.Run("returns error for invalid Go code", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create go.mod
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test\n\ngo 1.21"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid Go code
|
||||
err = os.WriteFile(filepath.Join(dir, "main.go"), []byte("this is not valid go code"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: dir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "go build failed")
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
|
||||
t.Run("returns partial artifacts on partial failure", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create a project that will fail on one target
|
||||
// Using an invalid arch for linux
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "partialtest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH}, // This should succeed
|
||||
{OS: "linux", Arch: "invalid_arch"}, // This should fail
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
// Should return error for the failed build
|
||||
assert.Error(t, err)
|
||||
// Should have the successful artifact
|
||||
assert.Len(t, artifacts, 1)
|
||||
})
|
||||
|
||||
t.Run("respects context cancellation", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
projectDir := setupGoTestProject(t)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "canceltest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
// Create an already cancelled context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
artifacts, err := builder.Build(ctx, cfg, targets)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGoBuilder_Interface_Good(t *testing.T) {
|
||||
// Verify GoBuilder implements Builder interface
|
||||
var _ build.Builder = (*GoBuilder)(nil)
|
||||
var _ build.Builder = NewGoBuilder()
|
||||
}
|
||||
270
build/builders/linuxkit.go
Normal file
270
build/builders/linuxkit.go
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// LinuxKitBuilder builds LinuxKit images.
|
||||
type LinuxKitBuilder struct{}
|
||||
|
||||
// NewLinuxKitBuilder creates a new LinuxKit builder.
|
||||
func NewLinuxKitBuilder() *LinuxKitBuilder {
|
||||
return &LinuxKitBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *LinuxKitBuilder) Name() string {
|
||||
return "linuxkit"
|
||||
}
|
||||
|
||||
// Detect checks if a linuxkit.yml or .yml config exists in the directory.
|
||||
func (b *LinuxKitBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
// Check for linuxkit.yml
|
||||
if fs.IsFile(filepath.Join(dir, "linuxkit.yml")) {
|
||||
return true, nil
|
||||
}
|
||||
// Check for .core/linuxkit/
|
||||
lkDir := filepath.Join(dir, ".core", "linuxkit")
|
||||
if fs.IsDir(lkDir) {
|
||||
entries, err := fs.List(lkDir)
|
||||
if err == nil {
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".yml") {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Build builds LinuxKit images for the specified targets.
|
||||
func (b *LinuxKitBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
// Validate linuxkit CLI is available
|
||||
if err := b.validateLinuxKitCli(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine config file path
|
||||
configPath := cfg.LinuxKitConfig
|
||||
if configPath == "" {
|
||||
// Auto-detect
|
||||
if cfg.FS.IsFile(filepath.Join(cfg.ProjectDir, "linuxkit.yml")) {
|
||||
configPath = filepath.Join(cfg.ProjectDir, "linuxkit.yml")
|
||||
} else {
|
||||
// Look in .core/linuxkit/
|
||||
lkDir := filepath.Join(cfg.ProjectDir, ".core", "linuxkit")
|
||||
if cfg.FS.IsDir(lkDir) {
|
||||
entries, err := cfg.FS.List(lkDir)
|
||||
if err == nil {
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".yml") {
|
||||
configPath = filepath.Join(lkDir, entry.Name())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if configPath == "" {
|
||||
return nil, fmt.Errorf("linuxkit.Build: no LinuxKit config file found. Specify with --config or create linuxkit.yml")
|
||||
}
|
||||
|
||||
// Validate config file exists
|
||||
if !cfg.FS.IsFile(configPath) {
|
||||
return nil, fmt.Errorf("linuxkit.Build: config file not found: %s", configPath)
|
||||
}
|
||||
|
||||
// Determine output formats
|
||||
formats := cfg.Formats
|
||||
if len(formats) == 0 {
|
||||
formats = []string{"qcow2-bios"} // Default to QEMU-compatible format
|
||||
}
|
||||
|
||||
// Create output directory
|
||||
outputDir := cfg.OutputDir
|
||||
if outputDir == "" {
|
||||
outputDir = filepath.Join(cfg.ProjectDir, "dist")
|
||||
}
|
||||
if err := cfg.FS.EnsureDir(outputDir); err != nil {
|
||||
return nil, fmt.Errorf("linuxkit.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
// Determine base name from config file or project name
|
||||
baseName := cfg.Name
|
||||
if baseName == "" {
|
||||
baseName = strings.TrimSuffix(filepath.Base(configPath), ".yml")
|
||||
}
|
||||
|
||||
// If no targets, default to linux/amd64
|
||||
if len(targets) == 0 {
|
||||
targets = []build.Target{{OS: "linux", Arch: "amd64"}}
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
// Build for each target and format
|
||||
for _, target := range targets {
|
||||
// LinuxKit only supports Linux
|
||||
if target.OS != "linux" {
|
||||
fmt.Printf("Skipping %s/%s (LinuxKit only supports Linux)\n", target.OS, target.Arch)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, format := range formats {
|
||||
outputName := fmt.Sprintf("%s-%s", baseName, target.Arch)
|
||||
|
||||
args := b.buildLinuxKitArgs(configPath, format, outputName, outputDir, target.Arch)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "linuxkit", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
fmt.Printf("Building LinuxKit image: %s (%s, %s)\n", outputName, format, target.Arch)
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("linuxkit.Build: build failed for %s/%s: %w", target.Arch, format, err)
|
||||
}
|
||||
|
||||
// Determine the actual output file path
|
||||
artifactPath := b.getArtifactPath(outputDir, outputName, format)
|
||||
|
||||
// Verify the artifact was created
|
||||
if !cfg.FS.Exists(artifactPath) {
|
||||
// Try alternate naming conventions
|
||||
artifactPath = b.findArtifact(cfg.FS, outputDir, outputName, format)
|
||||
if artifactPath == "" {
|
||||
return nil, fmt.Errorf("linuxkit.Build: artifact not found after build: expected %s", b.getArtifactPath(outputDir, outputName, format))
|
||||
}
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: artifactPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// buildLinuxKitArgs builds the arguments for linuxkit build command.
|
||||
func (b *LinuxKitBuilder) buildLinuxKitArgs(configPath, format, outputName, outputDir, arch string) []string {
|
||||
args := []string{"build"}
|
||||
|
||||
// Output format
|
||||
args = append(args, "--format", format)
|
||||
|
||||
// Output name
|
||||
args = append(args, "--name", outputName)
|
||||
|
||||
// Output directory
|
||||
args = append(args, "--dir", outputDir)
|
||||
|
||||
// Architecture (if not amd64)
|
||||
if arch != "amd64" {
|
||||
args = append(args, "--arch", arch)
|
||||
}
|
||||
|
||||
// Config file
|
||||
args = append(args, configPath)
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// getArtifactPath returns the expected path of the built artifact.
|
||||
func (b *LinuxKitBuilder) getArtifactPath(outputDir, outputName, format string) string {
|
||||
ext := b.getFormatExtension(format)
|
||||
return filepath.Join(outputDir, outputName+ext)
|
||||
}
|
||||
|
||||
// findArtifact searches for the built artifact with various naming conventions.
|
||||
func (b *LinuxKitBuilder) findArtifact(fs io.Medium, outputDir, outputName, format string) string {
|
||||
// LinuxKit can create files with different suffixes
|
||||
extensions := []string{
|
||||
b.getFormatExtension(format),
|
||||
"-bios" + b.getFormatExtension(format),
|
||||
"-efi" + b.getFormatExtension(format),
|
||||
}
|
||||
|
||||
for _, ext := range extensions {
|
||||
path := filepath.Join(outputDir, outputName+ext)
|
||||
if fs.Exists(path) {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
// Try to find any file matching the output name
|
||||
entries, err := fs.List(outputDir)
|
||||
if err == nil {
|
||||
for _, entry := range entries {
|
||||
if strings.HasPrefix(entry.Name(), outputName) {
|
||||
match := filepath.Join(outputDir, entry.Name())
|
||||
// Return first match that looks like an image
|
||||
ext := filepath.Ext(match)
|
||||
if ext == ".iso" || ext == ".qcow2" || ext == ".raw" || ext == ".vmdk" || ext == ".vhd" {
|
||||
return match
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// getFormatExtension returns the file extension for a LinuxKit output format.
|
||||
func (b *LinuxKitBuilder) getFormatExtension(format string) string {
|
||||
switch format {
|
||||
case "iso", "iso-bios", "iso-efi":
|
||||
return ".iso"
|
||||
case "raw", "raw-bios", "raw-efi":
|
||||
return ".raw"
|
||||
case "qcow2", "qcow2-bios", "qcow2-efi":
|
||||
return ".qcow2"
|
||||
case "vmdk":
|
||||
return ".vmdk"
|
||||
case "vhd":
|
||||
return ".vhd"
|
||||
case "gcp":
|
||||
return ".img.tar.gz"
|
||||
case "aws":
|
||||
return ".raw"
|
||||
default:
|
||||
return "." + strings.TrimSuffix(format, "-bios")
|
||||
}
|
||||
}
|
||||
|
||||
// validateLinuxKitCli checks if the linuxkit CLI is available.
|
||||
func (b *LinuxKitBuilder) validateLinuxKitCli() error {
|
||||
// Check PATH first
|
||||
if _, err := exec.LookPath("linuxkit"); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check common locations
|
||||
paths := []string{
|
||||
"/usr/local/bin/linuxkit",
|
||||
"/opt/homebrew/bin/linuxkit",
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("linuxkit: linuxkit CLI not found. Install with: brew install linuxkit (macOS) or see https://github.com/linuxkit/linuxkit")
|
||||
}
|
||||
275
build/builders/taskfile.go
Normal file
275
build/builders/taskfile.go
Normal file
|
|
@ -0,0 +1,275 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// TaskfileBuilder builds projects using Taskfile (https://taskfile.dev/).
|
||||
// This is a generic builder that can handle any project type that has a Taskfile.
|
||||
type TaskfileBuilder struct{}
|
||||
|
||||
// NewTaskfileBuilder creates a new Taskfile builder.
|
||||
func NewTaskfileBuilder() *TaskfileBuilder {
|
||||
return &TaskfileBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *TaskfileBuilder) Name() string {
|
||||
return "taskfile"
|
||||
}
|
||||
|
||||
// Detect checks if a Taskfile exists in the directory.
|
||||
func (b *TaskfileBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
// Check for Taskfile.yml, Taskfile.yaml, or Taskfile
|
||||
taskfiles := []string{
|
||||
"Taskfile.yml",
|
||||
"Taskfile.yaml",
|
||||
"Taskfile",
|
||||
"taskfile.yml",
|
||||
"taskfile.yaml",
|
||||
}
|
||||
|
||||
for _, tf := range taskfiles {
|
||||
if fs.IsFile(filepath.Join(dir, tf)) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Build runs the Taskfile build task for each target platform.
|
||||
func (b *TaskfileBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
// Validate task CLI is available
|
||||
if err := b.validateTaskCli(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create output directory
|
||||
outputDir := cfg.OutputDir
|
||||
if outputDir == "" {
|
||||
outputDir = filepath.Join(cfg.ProjectDir, "dist")
|
||||
}
|
||||
if err := cfg.FS.EnsureDir(outputDir); err != nil {
|
||||
return nil, fmt.Errorf("taskfile.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
// If no targets specified, just run the build task once
|
||||
if len(targets) == 0 {
|
||||
if err := b.runTask(ctx, cfg, "", ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to find artifacts in output directory
|
||||
found := b.findArtifacts(cfg.FS, outputDir)
|
||||
artifacts = append(artifacts, found...)
|
||||
} else {
|
||||
// Run build task for each target
|
||||
for _, target := range targets {
|
||||
if err := b.runTask(ctx, cfg, target.OS, target.Arch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to find artifacts for this target
|
||||
found := b.findArtifactsForTarget(cfg.FS, outputDir, target)
|
||||
artifacts = append(artifacts, found...)
|
||||
}
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// runTask executes the Taskfile build task.
|
||||
func (b *TaskfileBuilder) runTask(ctx context.Context, cfg *build.Config, goos, goarch string) error {
|
||||
// Build task command
|
||||
args := []string{"build"}
|
||||
|
||||
// Pass variables if targets are specified
|
||||
if goos != "" {
|
||||
args = append(args, fmt.Sprintf("GOOS=%s", goos))
|
||||
}
|
||||
if goarch != "" {
|
||||
args = append(args, fmt.Sprintf("GOARCH=%s", goarch))
|
||||
}
|
||||
if cfg.OutputDir != "" {
|
||||
args = append(args, fmt.Sprintf("OUTPUT_DIR=%s", cfg.OutputDir))
|
||||
}
|
||||
if cfg.Name != "" {
|
||||
args = append(args, fmt.Sprintf("NAME=%s", cfg.Name))
|
||||
}
|
||||
if cfg.Version != "" {
|
||||
args = append(args, fmt.Sprintf("VERSION=%s", cfg.Version))
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "task", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Set environment variables
|
||||
cmd.Env = os.Environ()
|
||||
if goos != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GOOS=%s", goos))
|
||||
}
|
||||
if goarch != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GOARCH=%s", goarch))
|
||||
}
|
||||
if cfg.OutputDir != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("OUTPUT_DIR=%s", cfg.OutputDir))
|
||||
}
|
||||
if cfg.Name != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("NAME=%s", cfg.Name))
|
||||
}
|
||||
if cfg.Version != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("VERSION=%s", cfg.Version))
|
||||
}
|
||||
|
||||
if goos != "" && goarch != "" {
|
||||
fmt.Printf("Running task build for %s/%s\n", goos, goarch)
|
||||
} else {
|
||||
fmt.Println("Running task build")
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("taskfile.Build: task build failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findArtifacts searches for built artifacts in the output directory.
|
||||
func (b *TaskfileBuilder) findArtifacts(fs io.Medium, outputDir string) []build.Artifact {
|
||||
var artifacts []build.Artifact
|
||||
|
||||
entries, err := fs.List(outputDir)
|
||||
if err != nil {
|
||||
return artifacts
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip common non-artifact files
|
||||
name := entry.Name()
|
||||
if strings.HasPrefix(name, ".") || name == "CHECKSUMS.txt" {
|
||||
continue
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: filepath.Join(outputDir, name),
|
||||
OS: "",
|
||||
Arch: "",
|
||||
})
|
||||
}
|
||||
|
||||
return artifacts
|
||||
}
|
||||
|
||||
// findArtifactsForTarget searches for built artifacts for a specific target.
|
||||
func (b *TaskfileBuilder) findArtifactsForTarget(fs io.Medium, outputDir string, target build.Target) []build.Artifact {
|
||||
var artifacts []build.Artifact
|
||||
|
||||
// 1. Look for platform-specific subdirectory: output/os_arch/
|
||||
platformSubdir := filepath.Join(outputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch))
|
||||
if fs.IsDir(platformSubdir) {
|
||||
entries, _ := fs.List(platformSubdir)
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
// Handle .app bundles on macOS
|
||||
if target.OS == "darwin" && strings.HasSuffix(entry.Name(), ".app") {
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: filepath.Join(platformSubdir, entry.Name()),
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Skip hidden files
|
||||
if strings.HasPrefix(entry.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: filepath.Join(platformSubdir, entry.Name()),
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
if len(artifacts) > 0 {
|
||||
return artifacts
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Look for files matching the target pattern in the root output dir
|
||||
patterns := []string{
|
||||
fmt.Sprintf("*-%s-%s*", target.OS, target.Arch),
|
||||
fmt.Sprintf("*_%s_%s*", target.OS, target.Arch),
|
||||
fmt.Sprintf("*-%s*", target.Arch),
|
||||
}
|
||||
|
||||
for _, pattern := range patterns {
|
||||
entries, _ := fs.List(outputDir)
|
||||
for _, entry := range entries {
|
||||
match := entry.Name()
|
||||
// Simple glob matching
|
||||
if b.matchPattern(match, pattern) {
|
||||
fullPath := filepath.Join(outputDir, match)
|
||||
if fs.IsDir(fullPath) {
|
||||
continue
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: fullPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(artifacts) > 0 {
|
||||
break // Found matches, stop looking
|
||||
}
|
||||
}
|
||||
|
||||
return artifacts
|
||||
}
|
||||
|
||||
// matchPattern implements glob matching for Taskfile artifacts.
|
||||
func (b *TaskfileBuilder) matchPattern(name, pattern string) bool {
|
||||
matched, _ := filepath.Match(pattern, name)
|
||||
return matched
|
||||
}
|
||||
|
||||
// validateTaskCli checks if the task CLI is available.
|
||||
func (b *TaskfileBuilder) validateTaskCli() error {
|
||||
// Check PATH first
|
||||
if _, err := exec.LookPath("task"); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check common locations
|
||||
paths := []string{
|
||||
"/usr/local/bin/task",
|
||||
"/opt/homebrew/bin/task",
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("taskfile: task CLI not found. Install with: brew install go-task (macOS), go install github.com/go-task/task/v3/cmd/task@latest, or see https://taskfile.dev/installation/")
|
||||
}
|
||||
247
build/builders/wails.go
Normal file
247
build/builders/wails.go
Normal file
|
|
@ -0,0 +1,247 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// WailsBuilder implements the Builder interface for Wails v3 projects.
|
||||
type WailsBuilder struct{}
|
||||
|
||||
// NewWailsBuilder creates a new WailsBuilder instance.
|
||||
func NewWailsBuilder() *WailsBuilder {
|
||||
return &WailsBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *WailsBuilder) Name() string {
|
||||
return "wails"
|
||||
}
|
||||
|
||||
// Detect checks if this builder can handle the project in the given directory.
|
||||
// Uses IsWailsProject from the build package which checks for wails.json.
|
||||
func (b *WailsBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
return build.IsWailsProject(fs, dir), nil
|
||||
}
|
||||
|
||||
// Build compiles the Wails project for the specified targets.
|
||||
// It detects the Wails version and chooses the appropriate build strategy:
|
||||
// - Wails v3: Delegates to Taskfile (error if missing)
|
||||
// - Wails v2: Uses 'wails build' command
|
||||
func (b *WailsBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("builders.WailsBuilder.Build: config is nil")
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return nil, fmt.Errorf("builders.WailsBuilder.Build: no targets specified")
|
||||
}
|
||||
|
||||
// Detect Wails version
|
||||
isV3 := b.isWailsV3(cfg.FS, cfg.ProjectDir)
|
||||
|
||||
if isV3 {
|
||||
// Wails v3 strategy: Delegate to Taskfile
|
||||
taskBuilder := NewTaskfileBuilder()
|
||||
if detected, _ := taskBuilder.Detect(cfg.FS, cfg.ProjectDir); detected {
|
||||
return taskBuilder.Build(ctx, cfg, targets)
|
||||
}
|
||||
return nil, fmt.Errorf("wails v3 projects require a Taskfile for building")
|
||||
}
|
||||
|
||||
// Wails v2 strategy: Use 'wails build'
|
||||
// Ensure output directory exists
|
||||
if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil {
|
||||
return nil, fmt.Errorf("builders.WailsBuilder.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
// Note: Wails v2 handles frontend installation/building automatically via wails.json config
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
for _, target := range targets {
|
||||
artifact, err := b.buildV2Target(ctx, cfg, target)
|
||||
if err != nil {
|
||||
return artifacts, fmt.Errorf("builders.WailsBuilder.Build: failed to build %s: %w", target.String(), err)
|
||||
}
|
||||
artifacts = append(artifacts, artifact)
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// isWailsV3 checks if the project uses Wails v3 by inspecting go.mod.
|
||||
func (b *WailsBuilder) isWailsV3(fs io.Medium, dir string) bool {
|
||||
goModPath := filepath.Join(dir, "go.mod")
|
||||
content, err := fs.Read(goModPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(content, "github.com/wailsapp/wails/v3")
|
||||
}
|
||||
|
||||
// buildV2Target compiles for a single target platform using wails (v2).
|
||||
func (b *WailsBuilder) buildV2Target(ctx context.Context, cfg *build.Config, target build.Target) (build.Artifact, error) {
|
||||
// Determine output binary name
|
||||
binaryName := cfg.Name
|
||||
if binaryName == "" {
|
||||
binaryName = filepath.Base(cfg.ProjectDir)
|
||||
}
|
||||
|
||||
// Build the wails build arguments
|
||||
args := []string{"build"}
|
||||
|
||||
// Platform
|
||||
args = append(args, "-platform", fmt.Sprintf("%s/%s", target.OS, target.Arch))
|
||||
|
||||
// Output (Wails v2 uses -o for the binary name, relative to build/bin usually, but we want to control it)
|
||||
// Actually, Wails v2 is opinionated about output dir (build/bin).
|
||||
// We might need to copy artifacts after build if we want them in cfg.OutputDir.
|
||||
// For now, let's try to let Wails do its thing and find the artifact.
|
||||
|
||||
// Create the command
|
||||
cmd := exec.CommandContext(ctx, "wails", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
|
||||
// Capture output for error messages
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("wails build failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
// Wails v2 typically outputs to build/bin
|
||||
// We need to move/copy it to our desired output dir
|
||||
|
||||
// Construct the source path where Wails v2 puts the binary
|
||||
wailsOutputDir := filepath.Join(cfg.ProjectDir, "build", "bin")
|
||||
|
||||
// Find the artifact in Wails output dir
|
||||
sourcePath, err := b.findArtifact(cfg.FS, wailsOutputDir, binaryName, target)
|
||||
if err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("failed to find Wails v2 build artifact: %w", err)
|
||||
}
|
||||
|
||||
// Move/Copy to our output dir
|
||||
// Create platform specific dir in our output
|
||||
platformDir := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch))
|
||||
if err := cfg.FS.EnsureDir(platformDir); err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("failed to create output dir: %w", err)
|
||||
}
|
||||
|
||||
destPath := filepath.Join(platformDir, filepath.Base(sourcePath))
|
||||
|
||||
// Simple copy using the medium
|
||||
content, err := cfg.FS.Read(sourcePath)
|
||||
if err != nil {
|
||||
return build.Artifact{}, err
|
||||
}
|
||||
if err := cfg.FS.Write(destPath, content); err != nil {
|
||||
return build.Artifact{}, err
|
||||
}
|
||||
|
||||
return build.Artifact{
|
||||
Path: destPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// findArtifact locates the built artifact based on the target platform.
|
||||
func (b *WailsBuilder) findArtifact(fs io.Medium, platformDir, binaryName string, target build.Target) (string, error) {
|
||||
var candidates []string
|
||||
|
||||
switch target.OS {
|
||||
case "windows":
|
||||
// Look for NSIS installer first, then plain exe
|
||||
candidates = []string{
|
||||
filepath.Join(platformDir, binaryName+"-installer.exe"),
|
||||
filepath.Join(platformDir, binaryName+".exe"),
|
||||
filepath.Join(platformDir, binaryName+"-amd64-installer.exe"),
|
||||
}
|
||||
case "darwin":
|
||||
// Look for .dmg, then .app bundle, then plain binary
|
||||
candidates = []string{
|
||||
filepath.Join(platformDir, binaryName+".dmg"),
|
||||
filepath.Join(platformDir, binaryName+".app"),
|
||||
filepath.Join(platformDir, binaryName),
|
||||
}
|
||||
default:
|
||||
// Linux and others: look for plain binary
|
||||
candidates = []string{
|
||||
filepath.Join(platformDir, binaryName),
|
||||
}
|
||||
}
|
||||
|
||||
// Try each candidate
|
||||
for _, candidate := range candidates {
|
||||
if fs.Exists(candidate) {
|
||||
return candidate, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no specific candidate found, try to find any executable or package in the directory
|
||||
entries, err := fs.List(platformDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read platform directory: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
// Skip common non-artifact files
|
||||
if strings.HasSuffix(name, ".go") || strings.HasSuffix(name, ".json") {
|
||||
continue
|
||||
}
|
||||
|
||||
path := filepath.Join(platformDir, name)
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// On Unix, check if it's executable; on Windows, check for .exe
|
||||
if target.OS == "windows" {
|
||||
if strings.HasSuffix(name, ".exe") {
|
||||
return path, nil
|
||||
}
|
||||
} else if info.Mode()&0111 != 0 || entry.IsDir() {
|
||||
// Executable file or directory (.app bundle)
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no artifact found in %s", platformDir)
|
||||
}
|
||||
|
||||
// detectPackageManager detects the frontend package manager based on lock files.
|
||||
// Returns "bun", "pnpm", "yarn", or "npm" (default).
|
||||
func detectPackageManager(fs io.Medium, dir string) string {
|
||||
// Check in priority order: bun, pnpm, yarn, npm
|
||||
lockFiles := []struct {
|
||||
file string
|
||||
manager string
|
||||
}{
|
||||
{"bun.lockb", "bun"},
|
||||
{"pnpm-lock.yaml", "pnpm"},
|
||||
{"yarn.lock", "yarn"},
|
||||
{"package-lock.json", "npm"},
|
||||
}
|
||||
|
||||
for _, lf := range lockFiles {
|
||||
if fs.IsFile(filepath.Join(dir, lf.file)) {
|
||||
return lf.manager
|
||||
}
|
||||
}
|
||||
|
||||
// Default to npm if no lock file found
|
||||
return "npm"
|
||||
}
|
||||
|
||||
// Ensure WailsBuilder implements the Builder interface.
|
||||
var _ build.Builder = (*WailsBuilder)(nil)
|
||||
416
build/builders/wails_test.go
Normal file
416
build/builders/wails_test.go
Normal file
|
|
@ -0,0 +1,416 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupWailsTestProject creates a minimal Wails project structure for testing.
|
||||
func setupWailsTestProject(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create wails.json
|
||||
wailsJSON := `{
|
||||
"name": "testapp",
|
||||
"outputfilename": "testapp"
|
||||
}`
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte(wailsJSON), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a minimal go.mod
|
||||
goMod := `module testapp
|
||||
|
||||
go 1.21
|
||||
|
||||
require github.com/wailsapp/wails/v3 v3.0.0
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a minimal main.go
|
||||
mainGo := `package main
|
||||
|
||||
func main() {
|
||||
println("hello wails")
|
||||
}
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a minimal Taskfile.yml
|
||||
taskfile := `version: '3'
|
||||
tasks:
|
||||
build:
|
||||
cmds:
|
||||
- mkdir -p {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}
|
||||
- touch {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}/testapp
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "Taskfile.yml"), []byte(taskfile), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
// setupWailsV2TestProject creates a Wails v2 project structure.
|
||||
func setupWailsV2TestProject(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
// wails.json
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// go.mod with v2
|
||||
goMod := `module testapp
|
||||
go 1.21
|
||||
require github.com/wailsapp/wails/v2 v2.8.0
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Build_Taskfile_Good(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Check if task is available
|
||||
if _, err := exec.LookPath("task"); err != nil {
|
||||
t.Skip("task not installed, skipping test")
|
||||
}
|
||||
|
||||
t.Run("delegates to Taskfile if present", func(t *testing.T) {
|
||||
fs := io.Local
|
||||
projectDir := setupWailsTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
// Create a Taskfile that just touches a file
|
||||
taskfile := `version: '3'
|
||||
tasks:
|
||||
build:
|
||||
cmds:
|
||||
- mkdir -p {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}
|
||||
- touch {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}/testapp
|
||||
`
|
||||
err := os.WriteFile(filepath.Join(projectDir, "Taskfile.yml"), []byte(taskfile), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: fs,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "testapp",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, artifacts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Name_Good(t *testing.T) {
|
||||
builder := NewWailsBuilder()
|
||||
assert.Equal(t, "wails", builder.Name())
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Build_V2_Good(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
if _, err := exec.LookPath("wails"); err != nil {
|
||||
t.Skip("wails not installed, skipping integration test")
|
||||
}
|
||||
|
||||
t.Run("builds v2 project", func(t *testing.T) {
|
||||
fs := io.Local
|
||||
projectDir := setupWailsV2TestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: fs,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "testapp",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
// This will likely fail in a real run because we can't easily mock the full wails v2 build process
|
||||
// (which needs a valid project with main.go etc).
|
||||
// But it validates we are trying to run the command.
|
||||
// For now, we just verify it attempts the build - error is expected
|
||||
_, _ = builder.Build(context.Background(), cfg, targets)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Detect_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("detects Wails project with wails.json", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for Go-only project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for Node.js project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDetectPackageManager_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("detects bun from bun.lockb", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "bun.lockb"), []byte(""), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "bun", result)
|
||||
})
|
||||
|
||||
t.Run("detects pnpm from pnpm-lock.yaml", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "pnpm-lock.yaml"), []byte(""), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "pnpm", result)
|
||||
})
|
||||
|
||||
t.Run("detects yarn from yarn.lock", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "yarn", result)
|
||||
})
|
||||
|
||||
t.Run("detects npm from package-lock.json", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "npm", result)
|
||||
})
|
||||
|
||||
t.Run("defaults to npm when no lock file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "npm", result)
|
||||
})
|
||||
|
||||
t.Run("prefers bun over other lock files", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create multiple lock files
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "bun.lockb"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644))
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "bun", result)
|
||||
})
|
||||
|
||||
t.Run("prefers pnpm over yarn and npm", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create multiple lock files (no bun)
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "pnpm-lock.yaml"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644))
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "pnpm", result)
|
||||
})
|
||||
|
||||
t.Run("prefers yarn over npm", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create multiple lock files (no bun or pnpm)
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644))
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "yarn", result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Build_Bad(t *testing.T) {
|
||||
t.Run("returns error for nil config", func(t *testing.T) {
|
||||
builder := NewWailsBuilder()
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), nil, []build.Target{{OS: "linux", Arch: "amd64"}})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "config is nil")
|
||||
})
|
||||
|
||||
t.Run("returns error for empty targets", func(t *testing.T) {
|
||||
projectDir := setupWailsTestProject(t)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, []build.Target{})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "no targets specified")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Build_Good(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Check if wails3 is available in PATH
|
||||
if _, err := exec.LookPath("wails3"); err != nil {
|
||||
t.Skip("wails3 not installed, skipping integration test")
|
||||
}
|
||||
|
||||
t.Run("builds for current platform", func(t *testing.T) {
|
||||
projectDir := setupWailsTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "testapp",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
|
||||
// Verify artifact properties
|
||||
artifact := artifacts[0]
|
||||
assert.Equal(t, runtime.GOOS, artifact.OS)
|
||||
assert.Equal(t, runtime.GOARCH, artifact.Arch)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Interface_Good(t *testing.T) {
|
||||
// Verify WailsBuilder implements Builder interface
|
||||
var _ build.Builder = (*WailsBuilder)(nil)
|
||||
var _ build.Builder = NewWailsBuilder()
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Ugly(t *testing.T) {
|
||||
t.Run("handles nonexistent frontend directory gracefully", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create a Wails project without a frontend directory
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: dir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
// This will fail because wails3 isn't set up, but it shouldn't panic
|
||||
// due to missing frontend directory
|
||||
_, err = builder.Build(context.Background(), cfg, targets)
|
||||
// We expect an error (wails3 build will fail), but not a panic
|
||||
// The error should be about wails3 build, not about frontend
|
||||
if err != nil {
|
||||
assert.NotContains(t, err.Error(), "frontend dependencies")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("handles context cancellation", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
projectDir := setupWailsTestProject(t)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "canceltest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
// Create an already cancelled context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
artifacts, err := builder.Build(ctx, cfg, targets)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
}
|
||||
97
build/checksum.go
Normal file
97
build/checksum.go
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
// Package build provides project type detection and cross-compilation for the Core build system.
|
||||
package build
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
io_interface "forge.lthn.ai/core/go/pkg/io"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Checksum computes SHA256 for an artifact and returns the artifact with the Checksum field filled.
|
||||
func Checksum(fs io_interface.Medium, artifact Artifact) (Artifact, error) {
|
||||
if artifact.Path == "" {
|
||||
return Artifact{}, fmt.Errorf("build.Checksum: artifact path is empty")
|
||||
}
|
||||
|
||||
// Open the file
|
||||
file, err := fs.Open(artifact.Path)
|
||||
if err != nil {
|
||||
return Artifact{}, fmt.Errorf("build.Checksum: failed to open file: %w", err)
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
// Compute SHA256 hash
|
||||
hasher := sha256.New()
|
||||
if _, err := io.Copy(hasher, file); err != nil {
|
||||
return Artifact{}, fmt.Errorf("build.Checksum: failed to hash file: %w", err)
|
||||
}
|
||||
|
||||
checksum := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
return Artifact{
|
||||
Path: artifact.Path,
|
||||
OS: artifact.OS,
|
||||
Arch: artifact.Arch,
|
||||
Checksum: checksum,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ChecksumAll computes checksums for all artifacts.
|
||||
// Returns a slice of artifacts with their Checksum fields filled.
|
||||
func ChecksumAll(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) {
|
||||
if len(artifacts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var checksummed []Artifact
|
||||
for _, artifact := range artifacts {
|
||||
cs, err := Checksum(fs, artifact)
|
||||
if err != nil {
|
||||
return checksummed, fmt.Errorf("build.ChecksumAll: failed to checksum %s: %w", artifact.Path, err)
|
||||
}
|
||||
checksummed = append(checksummed, cs)
|
||||
}
|
||||
|
||||
return checksummed, nil
|
||||
}
|
||||
|
||||
// WriteChecksumFile writes a CHECKSUMS.txt file with the format:
|
||||
//
|
||||
// sha256hash filename1
|
||||
// sha256hash filename2
|
||||
//
|
||||
// The artifacts should have their Checksum fields filled (call ChecksumAll first).
|
||||
// Filenames are relative to the output directory (just the basename).
|
||||
func WriteChecksumFile(fs io_interface.Medium, artifacts []Artifact, path string) error {
|
||||
if len(artifacts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the content
|
||||
var lines []string
|
||||
for _, artifact := range artifacts {
|
||||
if artifact.Checksum == "" {
|
||||
return fmt.Errorf("build.WriteChecksumFile: artifact %s has no checksum", artifact.Path)
|
||||
}
|
||||
filename := filepath.Base(artifact.Path)
|
||||
lines = append(lines, fmt.Sprintf("%s %s", artifact.Checksum, filename))
|
||||
}
|
||||
|
||||
// Sort lines for consistent output
|
||||
sort.Strings(lines)
|
||||
|
||||
content := strings.Join(lines, "\n") + "\n"
|
||||
|
||||
// Write the file using the medium (which handles directory creation in Write)
|
||||
if err := fs.Write(path, content); err != nil {
|
||||
return fmt.Errorf("build.WriteChecksumFile: failed to write file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
282
build/checksum_test.go
Normal file
282
build/checksum_test.go
Normal file
|
|
@ -0,0 +1,282 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupChecksumTestFile creates a test file with known content.
|
||||
func setupChecksumTestFile(t *testing.T, content string) string {
|
||||
t.Helper()
|
||||
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "testfile")
|
||||
err := os.WriteFile(path, []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
func TestChecksum_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("computes SHA256 checksum", func(t *testing.T) {
|
||||
// Known SHA256 of "Hello, World!\n"
|
||||
path := setupChecksumTestFile(t, "Hello, World!\n")
|
||||
expectedChecksum := "c98c24b677eff44860afea6f493bbaec5bb1c4cbb209c6fc2bbb47f66ff2ad31"
|
||||
|
||||
artifact := Artifact{
|
||||
Path: path,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedChecksum, result.Checksum)
|
||||
})
|
||||
|
||||
t.Run("preserves artifact fields", func(t *testing.T) {
|
||||
path := setupChecksumTestFile(t, "test content")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: path,
|
||||
OS: "darwin",
|
||||
Arch: "arm64",
|
||||
}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, path, result.Path)
|
||||
assert.Equal(t, "darwin", result.OS)
|
||||
assert.Equal(t, "arm64", result.Arch)
|
||||
assert.NotEmpty(t, result.Checksum)
|
||||
})
|
||||
|
||||
t.Run("produces 64 character hex string", func(t *testing.T) {
|
||||
path := setupChecksumTestFile(t, "any content")
|
||||
|
||||
artifact := Artifact{Path: path, OS: "linux", Arch: "amd64"}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
// SHA256 produces 32 bytes = 64 hex characters
|
||||
assert.Len(t, result.Checksum, 64)
|
||||
})
|
||||
|
||||
t.Run("different content produces different checksums", func(t *testing.T) {
|
||||
path1 := setupChecksumTestFile(t, "content one")
|
||||
path2 := setupChecksumTestFile(t, "content two")
|
||||
|
||||
result1, err := Checksum(fs, Artifact{Path: path1, OS: "linux", Arch: "amd64"})
|
||||
require.NoError(t, err)
|
||||
|
||||
result2, err := Checksum(fs, Artifact{Path: path2, OS: "linux", Arch: "amd64"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotEqual(t, result1.Checksum, result2.Checksum)
|
||||
})
|
||||
|
||||
t.Run("same content produces same checksum", func(t *testing.T) {
|
||||
content := "identical content"
|
||||
path1 := setupChecksumTestFile(t, content)
|
||||
path2 := setupChecksumTestFile(t, content)
|
||||
|
||||
result1, err := Checksum(fs, Artifact{Path: path1, OS: "linux", Arch: "amd64"})
|
||||
require.NoError(t, err)
|
||||
|
||||
result2, err := Checksum(fs, Artifact{Path: path2, OS: "linux", Arch: "amd64"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, result1.Checksum, result2.Checksum)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChecksum_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns error for empty path", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "artifact path is empty")
|
||||
assert.Empty(t, result.Checksum)
|
||||
})
|
||||
|
||||
t.Run("returns error for non-existent file", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/nonexistent/path/file",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to open file")
|
||||
assert.Empty(t, result.Checksum)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChecksumAll_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("checksums multiple artifacts", func(t *testing.T) {
|
||||
paths := []string{
|
||||
setupChecksumTestFile(t, "content one"),
|
||||
setupChecksumTestFile(t, "content two"),
|
||||
setupChecksumTestFile(t, "content three"),
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: paths[0], OS: "linux", Arch: "amd64"},
|
||||
{Path: paths[1], OS: "darwin", Arch: "arm64"},
|
||||
{Path: paths[2], OS: "windows", Arch: "amd64"},
|
||||
}
|
||||
|
||||
results, err := ChecksumAll(fs, artifacts)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 3)
|
||||
|
||||
for i, result := range results {
|
||||
assert.Equal(t, artifacts[i].Path, result.Path)
|
||||
assert.Equal(t, artifacts[i].OS, result.OS)
|
||||
assert.Equal(t, artifacts[i].Arch, result.Arch)
|
||||
assert.NotEmpty(t, result.Checksum)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("returns nil for empty slice", func(t *testing.T) {
|
||||
results, err := ChecksumAll(fs, []Artifact{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, results)
|
||||
})
|
||||
|
||||
t.Run("returns nil for nil slice", func(t *testing.T) {
|
||||
results, err := ChecksumAll(fs, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, results)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChecksumAll_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns partial results on error", func(t *testing.T) {
|
||||
path := setupChecksumTestFile(t, "valid content")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: path, OS: "linux", Arch: "amd64"},
|
||||
{Path: "/nonexistent/file", OS: "linux", Arch: "arm64"}, // This will fail
|
||||
}
|
||||
|
||||
results, err := ChecksumAll(fs, artifacts)
|
||||
assert.Error(t, err)
|
||||
// Should have the first successful result
|
||||
assert.Len(t, results, 1)
|
||||
assert.NotEmpty(t, results[0].Checksum)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteChecksumFile_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("writes checksum file with correct format", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/output/app_linux_amd64.tar.gz", Checksum: "abc123def456", OS: "linux", Arch: "amd64"},
|
||||
{Path: "/output/app_darwin_arm64.tar.gz", Checksum: "789xyz000111", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read and verify content
|
||||
content, err := os.ReadFile(checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(content)), "\n")
|
||||
require.Len(t, lines, 2)
|
||||
|
||||
// Lines should be sorted alphabetically
|
||||
assert.Equal(t, "789xyz000111 app_darwin_arm64.tar.gz", lines[0])
|
||||
assert.Equal(t, "abc123def456 app_linux_amd64.tar.gz", lines[1])
|
||||
})
|
||||
|
||||
t.Run("creates parent directories", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "nested", "deep", "CHECKSUMS.txt")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/output/app.tar.gz", Checksum: "abc123", OS: "linux", Arch: "amd64"},
|
||||
}
|
||||
|
||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
||||
require.NoError(t, err)
|
||||
assert.FileExists(t, checksumPath)
|
||||
})
|
||||
|
||||
t.Run("does nothing for empty artifacts", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
err := WriteChecksumFile(fs, []Artifact{}, checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// File should not exist
|
||||
_, err = os.Stat(checksumPath)
|
||||
assert.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("does nothing for nil artifacts", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
err := WriteChecksumFile(fs, nil, checksumPath)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("uses only basename for filenames", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/some/deep/nested/path/myapp_linux_amd64.tar.gz", Checksum: "checksum123", OS: "linux", Arch: "amd64"},
|
||||
}
|
||||
|
||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should only contain the basename
|
||||
assert.Contains(t, string(content), "myapp_linux_amd64.tar.gz")
|
||||
assert.NotContains(t, string(content), "/some/deep/nested/path/")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteChecksumFile_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns error for artifact without checksum", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/output/app.tar.gz", Checksum: "", OS: "linux", Arch: "amd64"}, // No checksum
|
||||
}
|
||||
|
||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "has no checksum")
|
||||
})
|
||||
}
|
||||
169
build/config.go
Normal file
169
build/config.go
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
// Package build provides project type detection and cross-compilation for the Core build system.
|
||||
// This file handles configuration loading from .core/build.yaml files.
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build/signing"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// ConfigFileName is the name of the build configuration file.
|
||||
const ConfigFileName = "build.yaml"
|
||||
|
||||
// ConfigDir is the directory where build configuration is stored.
|
||||
const ConfigDir = ".core"
|
||||
|
||||
// BuildConfig holds the complete build configuration loaded from .core/build.yaml.
|
||||
// This is distinct from Config which holds runtime build parameters.
|
||||
type BuildConfig struct {
|
||||
// Version is the config file format version.
|
||||
Version int `yaml:"version"`
|
||||
// Project contains project metadata.
|
||||
Project Project `yaml:"project"`
|
||||
// Build contains build settings.
|
||||
Build Build `yaml:"build"`
|
||||
// Targets defines the build targets.
|
||||
Targets []TargetConfig `yaml:"targets"`
|
||||
// Sign contains code signing configuration.
|
||||
Sign signing.SignConfig `yaml:"sign,omitempty"`
|
||||
}
|
||||
|
||||
// Project holds project metadata.
|
||||
type Project struct {
|
||||
// Name is the project name.
|
||||
Name string `yaml:"name"`
|
||||
// Description is a brief description of the project.
|
||||
Description string `yaml:"description"`
|
||||
// Main is the path to the main package (e.g., ./cmd/core).
|
||||
Main string `yaml:"main"`
|
||||
// Binary is the output binary name.
|
||||
Binary string `yaml:"binary"`
|
||||
}
|
||||
|
||||
// Build holds build-time settings.
|
||||
type Build struct {
|
||||
// CGO enables CGO for the build.
|
||||
CGO bool `yaml:"cgo"`
|
||||
// Flags are additional build flags (e.g., ["-trimpath"]).
|
||||
Flags []string `yaml:"flags"`
|
||||
// LDFlags are linker flags (e.g., ["-s", "-w"]).
|
||||
LDFlags []string `yaml:"ldflags"`
|
||||
// Env are additional environment variables.
|
||||
Env []string `yaml:"env"`
|
||||
}
|
||||
|
||||
// TargetConfig defines a build target in the config file.
|
||||
// This is separate from Target to allow for additional config-specific fields.
|
||||
type TargetConfig struct {
|
||||
// OS is the target operating system (e.g., "linux", "darwin", "windows").
|
||||
OS string `yaml:"os"`
|
||||
// Arch is the target architecture (e.g., "amd64", "arm64").
|
||||
Arch string `yaml:"arch"`
|
||||
}
|
||||
|
||||
// LoadConfig loads build configuration from the .core/build.yaml file in the given directory.
|
||||
// If the config file does not exist, it returns DefaultConfig().
|
||||
// Returns an error if the file exists but cannot be parsed.
|
||||
func LoadConfig(fs io.Medium, dir string) (*BuildConfig, error) {
|
||||
configPath := filepath.Join(dir, ConfigDir, ConfigFileName)
|
||||
|
||||
content, err := fs.Read(configPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return DefaultConfig(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("build.LoadConfig: failed to read config file: %w", err)
|
||||
}
|
||||
|
||||
var cfg BuildConfig
|
||||
data := []byte(content)
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("build.LoadConfig: failed to parse config file: %w", err)
|
||||
}
|
||||
|
||||
// Apply defaults for any missing fields
|
||||
applyDefaults(&cfg)
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// DefaultConfig returns sensible defaults for Go projects.
|
||||
func DefaultConfig() *BuildConfig {
|
||||
return &BuildConfig{
|
||||
Version: 1,
|
||||
Project: Project{
|
||||
Name: "",
|
||||
Main: ".",
|
||||
Binary: "",
|
||||
},
|
||||
Build: Build{
|
||||
CGO: false,
|
||||
Flags: []string{"-trimpath"},
|
||||
LDFlags: []string{"-s", "-w"},
|
||||
Env: []string{},
|
||||
},
|
||||
Targets: []TargetConfig{
|
||||
{OS: "linux", Arch: "amd64"},
|
||||
{OS: "linux", Arch: "arm64"},
|
||||
{OS: "darwin", Arch: "arm64"},
|
||||
{OS: "windows", Arch: "amd64"},
|
||||
},
|
||||
Sign: signing.DefaultSignConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
// applyDefaults fills in default values for any empty fields in the config.
|
||||
func applyDefaults(cfg *BuildConfig) {
|
||||
defaults := DefaultConfig()
|
||||
|
||||
if cfg.Version == 0 {
|
||||
cfg.Version = defaults.Version
|
||||
}
|
||||
|
||||
if cfg.Project.Main == "" {
|
||||
cfg.Project.Main = defaults.Project.Main
|
||||
}
|
||||
|
||||
if cfg.Build.Flags == nil {
|
||||
cfg.Build.Flags = defaults.Build.Flags
|
||||
}
|
||||
|
||||
if cfg.Build.LDFlags == nil {
|
||||
cfg.Build.LDFlags = defaults.Build.LDFlags
|
||||
}
|
||||
|
||||
if cfg.Build.Env == nil {
|
||||
cfg.Build.Env = defaults.Build.Env
|
||||
}
|
||||
|
||||
if len(cfg.Targets) == 0 {
|
||||
cfg.Targets = defaults.Targets
|
||||
}
|
||||
|
||||
// Expand environment variables in sign config
|
||||
cfg.Sign.ExpandEnv()
|
||||
}
|
||||
|
||||
// ConfigPath returns the path to the build config file for a given directory.
|
||||
func ConfigPath(dir string) string {
|
||||
return filepath.Join(dir, ConfigDir, ConfigFileName)
|
||||
}
|
||||
|
||||
// ConfigExists checks if a build config file exists in the given directory.
|
||||
func ConfigExists(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, ConfigPath(dir))
|
||||
}
|
||||
|
||||
// ToTargets converts TargetConfig slice to Target slice for use with builders.
|
||||
func (cfg *BuildConfig) ToTargets() []Target {
|
||||
targets := make([]Target, len(cfg.Targets))
|
||||
for i, t := range cfg.Targets {
|
||||
targets[i] = Target(t)
|
||||
}
|
||||
return targets
|
||||
}
|
||||
324
build/config_test.go
Normal file
324
build/config_test.go
Normal file
|
|
@ -0,0 +1,324 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupConfigTestDir creates a temp directory with optional .core/build.yaml content.
|
||||
func setupConfigTestDir(t *testing.T, configContent string) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
if configContent != "" {
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
||||
err = os.WriteFile(configPath, []byte(configContent), 0644)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestLoadConfig_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("loads valid config", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: myapp
|
||||
description: A test application
|
||||
main: ./cmd/myapp
|
||||
binary: myapp
|
||||
build:
|
||||
cgo: true
|
||||
flags:
|
||||
- -trimpath
|
||||
- -race
|
||||
ldflags:
|
||||
- -s
|
||||
- -w
|
||||
env:
|
||||
- FOO=bar
|
||||
targets:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
arch: arm64
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, "myapp", cfg.Project.Name)
|
||||
assert.Equal(t, "A test application", cfg.Project.Description)
|
||||
assert.Equal(t, "./cmd/myapp", cfg.Project.Main)
|
||||
assert.Equal(t, "myapp", cfg.Project.Binary)
|
||||
assert.True(t, cfg.Build.CGO)
|
||||
assert.Equal(t, []string{"-trimpath", "-race"}, cfg.Build.Flags)
|
||||
assert.Equal(t, []string{"-s", "-w"}, cfg.Build.LDFlags)
|
||||
assert.Equal(t, []string{"FOO=bar"}, cfg.Build.Env)
|
||||
assert.Len(t, cfg.Targets, 2)
|
||||
assert.Equal(t, "linux", cfg.Targets[0].OS)
|
||||
assert.Equal(t, "amd64", cfg.Targets[0].Arch)
|
||||
assert.Equal(t, "darwin", cfg.Targets[1].OS)
|
||||
assert.Equal(t, "arm64", cfg.Targets[1].Arch)
|
||||
})
|
||||
|
||||
t.Run("returns defaults when config file missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
defaults := DefaultConfig()
|
||||
assert.Equal(t, defaults.Version, cfg.Version)
|
||||
assert.Equal(t, defaults.Project.Main, cfg.Project.Main)
|
||||
assert.Equal(t, defaults.Build.CGO, cfg.Build.CGO)
|
||||
assert.Equal(t, defaults.Build.Flags, cfg.Build.Flags)
|
||||
assert.Equal(t, defaults.Build.LDFlags, cfg.Build.LDFlags)
|
||||
assert.Equal(t, defaults.Targets, cfg.Targets)
|
||||
})
|
||||
|
||||
t.Run("applies defaults for missing fields", func(t *testing.T) {
|
||||
content := `
|
||||
version: 2
|
||||
project:
|
||||
name: partial
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
// Explicit values preserved
|
||||
assert.Equal(t, 2, cfg.Version)
|
||||
assert.Equal(t, "partial", cfg.Project.Name)
|
||||
|
||||
// Defaults applied
|
||||
defaults := DefaultConfig()
|
||||
assert.Equal(t, defaults.Project.Main, cfg.Project.Main)
|
||||
assert.Equal(t, defaults.Build.Flags, cfg.Build.Flags)
|
||||
assert.Equal(t, defaults.Build.LDFlags, cfg.Build.LDFlags)
|
||||
assert.Equal(t, defaults.Targets, cfg.Targets)
|
||||
})
|
||||
|
||||
t.Run("preserves empty arrays when explicitly set", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: noflags
|
||||
build:
|
||||
flags: []
|
||||
ldflags: []
|
||||
targets:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
// Empty arrays are preserved (not replaced with defaults)
|
||||
assert.Empty(t, cfg.Build.Flags)
|
||||
assert.Empty(t, cfg.Build.LDFlags)
|
||||
// Targets explicitly set
|
||||
assert.Len(t, cfg.Targets, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns error for invalid YAML", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: [invalid yaml
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, cfg)
|
||||
assert.Contains(t, err.Error(), "failed to parse config file")
|
||||
})
|
||||
|
||||
t.Run("returns error for unreadable file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create config as a directory instead of file
|
||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
||||
err = os.Mkdir(configPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, cfg)
|
||||
assert.Contains(t, err.Error(), "failed to read config file")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultConfig_Good(t *testing.T) {
|
||||
t.Run("returns sensible defaults", func(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, ".", cfg.Project.Main)
|
||||
assert.Empty(t, cfg.Project.Name)
|
||||
assert.Empty(t, cfg.Project.Binary)
|
||||
assert.False(t, cfg.Build.CGO)
|
||||
assert.Contains(t, cfg.Build.Flags, "-trimpath")
|
||||
assert.Contains(t, cfg.Build.LDFlags, "-s")
|
||||
assert.Contains(t, cfg.Build.LDFlags, "-w")
|
||||
assert.Empty(t, cfg.Build.Env)
|
||||
|
||||
// Default targets cover common platforms
|
||||
assert.Len(t, cfg.Targets, 4)
|
||||
hasLinuxAmd64 := false
|
||||
hasDarwinArm64 := false
|
||||
hasWindowsAmd64 := false
|
||||
for _, t := range cfg.Targets {
|
||||
if t.OS == "linux" && t.Arch == "amd64" {
|
||||
hasLinuxAmd64 = true
|
||||
}
|
||||
if t.OS == "darwin" && t.Arch == "arm64" {
|
||||
hasDarwinArm64 = true
|
||||
}
|
||||
if t.OS == "windows" && t.Arch == "amd64" {
|
||||
hasWindowsAmd64 = true
|
||||
}
|
||||
}
|
||||
assert.True(t, hasLinuxAmd64)
|
||||
assert.True(t, hasDarwinArm64)
|
||||
assert.True(t, hasWindowsAmd64)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigPath_Good(t *testing.T) {
|
||||
t.Run("returns correct path", func(t *testing.T) {
|
||||
path := ConfigPath("/project/root")
|
||||
assert.Equal(t, "/project/root/.core/build.yaml", path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigExists_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns true when config exists", func(t *testing.T) {
|
||||
dir := setupConfigTestDir(t, "version: 1")
|
||||
assert.True(t, ConfigExists(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("returns false when config missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, ConfigExists(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("returns false when .core dir missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, ConfigExists(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig_Good_SignConfig(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core")
|
||||
_ = os.MkdirAll(coreDir, 0755)
|
||||
|
||||
configContent := `version: 1
|
||||
sign:
|
||||
enabled: true
|
||||
gpg:
|
||||
key: "ABCD1234"
|
||||
macos:
|
||||
identity: "Developer ID Application: Test"
|
||||
notarize: true
|
||||
`
|
||||
_ = os.WriteFile(filepath.Join(coreDir, "build.yaml"), []byte(configContent), 0644)
|
||||
|
||||
cfg, err := LoadConfig(io.Local, tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !cfg.Sign.Enabled {
|
||||
t.Error("expected Sign.Enabled to be true")
|
||||
}
|
||||
if cfg.Sign.GPG.Key != "ABCD1234" {
|
||||
t.Errorf("expected GPG.Key 'ABCD1234', got %q", cfg.Sign.GPG.Key)
|
||||
}
|
||||
if cfg.Sign.MacOS.Identity != "Developer ID Application: Test" {
|
||||
t.Errorf("expected MacOS.Identity, got %q", cfg.Sign.MacOS.Identity)
|
||||
}
|
||||
if !cfg.Sign.MacOS.Notarize {
|
||||
t.Error("expected MacOS.Notarize to be true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildConfig_ToTargets_Good(t *testing.T) {
|
||||
t.Run("converts TargetConfig to Target", func(t *testing.T) {
|
||||
cfg := &BuildConfig{
|
||||
Targets: []TargetConfig{
|
||||
{OS: "linux", Arch: "amd64"},
|
||||
{OS: "darwin", Arch: "arm64"},
|
||||
{OS: "windows", Arch: "386"},
|
||||
},
|
||||
}
|
||||
|
||||
targets := cfg.ToTargets()
|
||||
require.Len(t, targets, 3)
|
||||
|
||||
assert.Equal(t, Target{OS: "linux", Arch: "amd64"}, targets[0])
|
||||
assert.Equal(t, Target{OS: "darwin", Arch: "arm64"}, targets[1])
|
||||
assert.Equal(t, Target{OS: "windows", Arch: "386"}, targets[2])
|
||||
})
|
||||
|
||||
t.Run("returns empty slice for no targets", func(t *testing.T) {
|
||||
cfg := &BuildConfig{
|
||||
Targets: []TargetConfig{},
|
||||
}
|
||||
|
||||
targets := cfg.ToTargets()
|
||||
assert.Empty(t, targets)
|
||||
})
|
||||
}
|
||||
|
||||
// TestLoadConfig_Testdata tests loading from the testdata fixture.
|
||||
func TestLoadConfig_Testdata(t *testing.T) {
|
||||
fs := io.Local
|
||||
abs, err := filepath.Abs("testdata/config-project")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("loads config-project fixture", func(t *testing.T) {
|
||||
cfg, err := LoadConfig(fs, abs)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, "example-cli", cfg.Project.Name)
|
||||
assert.Equal(t, "An example CLI application", cfg.Project.Description)
|
||||
assert.Equal(t, "./cmd/example", cfg.Project.Main)
|
||||
assert.Equal(t, "example", cfg.Project.Binary)
|
||||
assert.False(t, cfg.Build.CGO)
|
||||
assert.Equal(t, []string{"-trimpath"}, cfg.Build.Flags)
|
||||
assert.Equal(t, []string{"-s", "-w"}, cfg.Build.LDFlags)
|
||||
assert.Len(t, cfg.Targets, 3)
|
||||
})
|
||||
}
|
||||
94
build/discovery.go
Normal file
94
build/discovery.go
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// Marker files for project type detection.
|
||||
const (
|
||||
markerGoMod = "go.mod"
|
||||
markerWails = "wails.json"
|
||||
markerNodePackage = "package.json"
|
||||
markerComposer = "composer.json"
|
||||
)
|
||||
|
||||
// projectMarker maps a marker file to its project type.
|
||||
type projectMarker struct {
|
||||
file string
|
||||
projectType ProjectType
|
||||
}
|
||||
|
||||
// markers defines the detection order. More specific types come first.
|
||||
// Wails projects have both wails.json and go.mod, so wails is checked first.
|
||||
var markers = []projectMarker{
|
||||
{markerWails, ProjectTypeWails},
|
||||
{markerGoMod, ProjectTypeGo},
|
||||
{markerNodePackage, ProjectTypeNode},
|
||||
{markerComposer, ProjectTypePHP},
|
||||
}
|
||||
|
||||
// Discover detects project types in the given directory by checking for marker files.
|
||||
// Returns a slice of detected project types, ordered by priority (most specific first).
|
||||
// For example, a Wails project returns [wails, go] since it has both wails.json and go.mod.
|
||||
func Discover(fs io.Medium, dir string) ([]ProjectType, error) {
|
||||
var detected []ProjectType
|
||||
|
||||
for _, m := range markers {
|
||||
path := filepath.Join(dir, m.file)
|
||||
if fileExists(fs, path) {
|
||||
// Avoid duplicates (shouldn't happen with current markers, but defensive)
|
||||
if !slices.Contains(detected, m.projectType) {
|
||||
detected = append(detected, m.projectType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return detected, nil
|
||||
}
|
||||
|
||||
// PrimaryType returns the most specific project type detected in the directory.
|
||||
// Returns empty string if no project type is detected.
|
||||
func PrimaryType(fs io.Medium, dir string) (ProjectType, error) {
|
||||
types, err := Discover(fs, dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(types) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return types[0], nil
|
||||
}
|
||||
|
||||
// IsGoProject checks if the directory contains a Go project (go.mod or wails.json).
|
||||
func IsGoProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, markerGoMod)) ||
|
||||
fileExists(fs, filepath.Join(dir, markerWails))
|
||||
}
|
||||
|
||||
// IsWailsProject checks if the directory contains a Wails project.
|
||||
func IsWailsProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, markerWails))
|
||||
}
|
||||
|
||||
// IsNodeProject checks if the directory contains a Node.js project.
|
||||
func IsNodeProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, markerNodePackage))
|
||||
}
|
||||
|
||||
// IsPHPProject checks if the directory contains a PHP project.
|
||||
func IsPHPProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, markerComposer))
|
||||
}
|
||||
|
||||
// IsCPPProject checks if the directory contains a C++ project (CMakeLists.txt).
|
||||
func IsCPPProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, "CMakeLists.txt"))
|
||||
}
|
||||
|
||||
// fileExists checks if a file exists and is not a directory.
|
||||
func fileExists(fs io.Medium, path string) bool {
|
||||
return fs.IsFile(path)
|
||||
}
|
||||
228
build/discovery_test.go
Normal file
228
build/discovery_test.go
Normal file
|
|
@ -0,0 +1,228 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupTestDir creates a temporary directory with the specified marker files.
|
||||
func setupTestDir(t *testing.T, markers ...string) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
for _, m := range markers {
|
||||
path := filepath.Join(dir, m)
|
||||
err := os.WriteFile(path, []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestDiscover_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("detects Go project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypeGo}, types)
|
||||
})
|
||||
|
||||
t.Run("detects Wails project with priority over Go", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "wails.json", "go.mod")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypeWails, ProjectTypeGo}, types)
|
||||
})
|
||||
|
||||
t.Run("detects Node.js project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "package.json")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypeNode}, types)
|
||||
})
|
||||
|
||||
t.Run("detects PHP project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "composer.json")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypePHP}, types)
|
||||
})
|
||||
|
||||
t.Run("detects multiple project types", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod", "package.json")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypeGo, ProjectTypeNode}, types)
|
||||
})
|
||||
|
||||
t.Run("empty directory returns empty slice", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, types)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDiscover_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("non-existent directory returns empty slice", func(t *testing.T) {
|
||||
types, err := Discover(fs, "/non/existent/path")
|
||||
assert.NoError(t, err) // os.Stat fails silently in fileExists
|
||||
assert.Empty(t, types)
|
||||
})
|
||||
|
||||
t.Run("directory marker is ignored", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create go.mod as a directory instead of a file
|
||||
err := os.Mkdir(filepath.Join(dir, "go.mod"), 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, types)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrimaryType_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns wails for wails project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "wails.json", "go.mod")
|
||||
primary, err := PrimaryType(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ProjectTypeWails, primary)
|
||||
})
|
||||
|
||||
t.Run("returns go for go-only project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod")
|
||||
primary, err := PrimaryType(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ProjectTypeGo, primary)
|
||||
})
|
||||
|
||||
t.Run("returns empty string for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
primary, err := PrimaryType(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, primary)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsGoProject_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("true with go.mod", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod")
|
||||
assert.True(t, IsGoProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("true with wails.json", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "wails.json")
|
||||
assert.True(t, IsGoProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("false without markers", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, IsGoProject(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsWailsProject_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("true with wails.json", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "wails.json")
|
||||
assert.True(t, IsWailsProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("false with only go.mod", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod")
|
||||
assert.False(t, IsWailsProject(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsNodeProject_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("true with package.json", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "package.json")
|
||||
assert.True(t, IsNodeProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("false without package.json", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, IsNodeProject(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsPHPProject_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("true with composer.json", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "composer.json")
|
||||
assert.True(t, IsPHPProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("false without composer.json", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, IsPHPProject(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestTarget_Good(t *testing.T) {
|
||||
target := Target{OS: "linux", Arch: "amd64"}
|
||||
assert.Equal(t, "linux/amd64", target.String())
|
||||
}
|
||||
|
||||
func TestFileExists_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns true for existing file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "test.txt")
|
||||
err := os.WriteFile(path, []byte("content"), 0644)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fileExists(fs, path))
|
||||
})
|
||||
|
||||
t.Run("returns false for directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, fileExists(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("returns false for non-existent path", func(t *testing.T) {
|
||||
assert.False(t, fileExists(fs, "/non/existent/file"))
|
||||
})
|
||||
}
|
||||
|
||||
// TestDiscover_Testdata tests discovery using the testdata fixtures.
|
||||
// These serve as integration tests with realistic project structures.
|
||||
func TestDiscover_Testdata(t *testing.T) {
|
||||
fs := io.Local
|
||||
testdataDir, err := filepath.Abs("testdata")
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dir string
|
||||
expected []ProjectType
|
||||
}{
|
||||
{"go-project", "go-project", []ProjectType{ProjectTypeGo}},
|
||||
{"wails-project", "wails-project", []ProjectType{ProjectTypeWails, ProjectTypeGo}},
|
||||
{"node-project", "node-project", []ProjectType{ProjectTypeNode}},
|
||||
{"php-project", "php-project", []ProjectType{ProjectTypePHP}},
|
||||
{"multi-project", "multi-project", []ProjectType{ProjectTypeGo, ProjectTypeNode}},
|
||||
{"empty-project", "empty-project", []ProjectType{}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dir := filepath.Join(testdataDir, tt.dir)
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
if len(tt.expected) == 0 {
|
||||
assert.Empty(t, types)
|
||||
} else {
|
||||
assert.Equal(t, tt.expected, types)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
103
build/signing/codesign.go
Normal file
103
build/signing/codesign.go
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// MacOSSigner signs binaries using macOS codesign.
|
||||
type MacOSSigner struct {
|
||||
config MacOSConfig
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ Signer = (*MacOSSigner)(nil)
|
||||
|
||||
// NewMacOSSigner creates a new macOS signer.
|
||||
func NewMacOSSigner(cfg MacOSConfig) *MacOSSigner {
|
||||
return &MacOSSigner{config: cfg}
|
||||
}
|
||||
|
||||
// Name returns "codesign".
|
||||
func (s *MacOSSigner) Name() string {
|
||||
return "codesign"
|
||||
}
|
||||
|
||||
// Available checks if running on macOS with codesign and identity configured.
|
||||
func (s *MacOSSigner) Available() bool {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return false
|
||||
}
|
||||
if s.config.Identity == "" {
|
||||
return false
|
||||
}
|
||||
_, err := exec.LookPath("codesign")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Sign codesigns a binary with hardened runtime.
|
||||
func (s *MacOSSigner) Sign(ctx context.Context, fs io.Medium, binary string) error {
|
||||
if !s.Available() {
|
||||
return fmt.Errorf("codesign.Sign: codesign not available")
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "codesign",
|
||||
"--sign", s.config.Identity,
|
||||
"--timestamp",
|
||||
"--options", "runtime", // Hardened runtime for notarization
|
||||
"--force",
|
||||
binary,
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("codesign.Sign: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notarize submits binary to Apple for notarization and staples the ticket.
|
||||
// This blocks until Apple responds (typically 1-5 minutes).
|
||||
func (s *MacOSSigner) Notarize(ctx context.Context, fs io.Medium, binary string) error {
|
||||
if s.config.AppleID == "" || s.config.TeamID == "" || s.config.AppPassword == "" {
|
||||
return fmt.Errorf("codesign.Notarize: missing Apple credentials (apple_id, team_id, app_password)")
|
||||
}
|
||||
|
||||
// Create ZIP for submission
|
||||
zipPath := binary + ".zip"
|
||||
zipCmd := exec.CommandContext(ctx, "zip", "-j", zipPath, binary)
|
||||
if output, err := zipCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("codesign.Notarize: failed to create zip: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
defer func() { _ = fs.Delete(zipPath) }()
|
||||
|
||||
// Submit to Apple and wait
|
||||
submitCmd := exec.CommandContext(ctx, "xcrun", "notarytool", "submit",
|
||||
zipPath,
|
||||
"--apple-id", s.config.AppleID,
|
||||
"--team-id", s.config.TeamID,
|
||||
"--password", s.config.AppPassword,
|
||||
"--wait",
|
||||
)
|
||||
if output, err := submitCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("codesign.Notarize: notarization failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
// Staple the ticket
|
||||
stapleCmd := exec.CommandContext(ctx, "xcrun", "stapler", "staple", binary)
|
||||
if output, err := stapleCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("codesign.Notarize: failed to staple: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldNotarize returns true if notarization is enabled.
|
||||
func (s *MacOSSigner) ShouldNotarize() bool {
|
||||
return s.config.Notarize
|
||||
}
|
||||
62
build/signing/codesign_test.go
Normal file
62
build/signing/codesign_test.go
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMacOSSigner_Good_Name(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"})
|
||||
assert.Equal(t, "codesign", s.Name())
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Good_Available(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"})
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Just verify it doesn't panic
|
||||
_ = s.Available()
|
||||
} else {
|
||||
assert.False(t, s.Available())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Bad_NoIdentity(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{})
|
||||
assert.False(t, s.Available())
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Sign_Bad(t *testing.T) {
|
||||
t.Run("fails when not available", func(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("skipping on macOS")
|
||||
}
|
||||
fs := io.Local
|
||||
s := NewMacOSSigner(MacOSConfig{Identity: "test"})
|
||||
err := s.Sign(context.Background(), fs, "test")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not available")
|
||||
})
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Notarize_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("fails with missing credentials", func(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{})
|
||||
err := s.Notarize(context.Background(), fs, "test")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "missing Apple credentials")
|
||||
})
|
||||
}
|
||||
|
||||
func TestMacOSSigner_ShouldNotarize(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{Notarize: true})
|
||||
assert.True(t, s.ShouldNotarize())
|
||||
|
||||
s2 := NewMacOSSigner(MacOSConfig{Notarize: false})
|
||||
assert.False(t, s2.ShouldNotarize())
|
||||
}
|
||||
59
build/signing/gpg.go
Normal file
59
build/signing/gpg.go
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// GPGSigner signs files using GPG.
|
||||
type GPGSigner struct {
|
||||
KeyID string
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ Signer = (*GPGSigner)(nil)
|
||||
|
||||
// NewGPGSigner creates a new GPG signer.
|
||||
func NewGPGSigner(keyID string) *GPGSigner {
|
||||
return &GPGSigner{KeyID: keyID}
|
||||
}
|
||||
|
||||
// Name returns "gpg".
|
||||
func (s *GPGSigner) Name() string {
|
||||
return "gpg"
|
||||
}
|
||||
|
||||
// Available checks if gpg is installed and key is configured.
|
||||
func (s *GPGSigner) Available() bool {
|
||||
if s.KeyID == "" {
|
||||
return false
|
||||
}
|
||||
_, err := exec.LookPath("gpg")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Sign creates a detached ASCII-armored signature.
|
||||
// For file.txt, creates file.txt.asc
|
||||
func (s *GPGSigner) Sign(ctx context.Context, fs io.Medium, file string) error {
|
||||
if !s.Available() {
|
||||
return fmt.Errorf("gpg.Sign: gpg not available or key not configured")
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "gpg",
|
||||
"--detach-sign",
|
||||
"--armor",
|
||||
"--local-user", s.KeyID,
|
||||
"--output", file+".asc",
|
||||
file,
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("gpg.Sign: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
34
build/signing/gpg_test.go
Normal file
34
build/signing/gpg_test.go
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGPGSigner_Good_Name(t *testing.T) {
|
||||
s := NewGPGSigner("ABCD1234")
|
||||
assert.Equal(t, "gpg", s.Name())
|
||||
}
|
||||
|
||||
func TestGPGSigner_Good_Available(t *testing.T) {
|
||||
s := NewGPGSigner("ABCD1234")
|
||||
_ = s.Available()
|
||||
}
|
||||
|
||||
func TestGPGSigner_Bad_NoKey(t *testing.T) {
|
||||
s := NewGPGSigner("")
|
||||
assert.False(t, s.Available())
|
||||
}
|
||||
|
||||
func TestGPGSigner_Sign_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("fails when no key", func(t *testing.T) {
|
||||
s := NewGPGSigner("")
|
||||
err := s.Sign(context.Background(), fs, "test.txt")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not available or key not configured")
|
||||
})
|
||||
}
|
||||
96
build/signing/sign.go
Normal file
96
build/signing/sign.go
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// Artifact represents a build output that can be signed.
|
||||
// This mirrors build.Artifact to avoid import cycles.
|
||||
type Artifact struct {
|
||||
Path string
|
||||
OS string
|
||||
Arch string
|
||||
}
|
||||
|
||||
// SignBinaries signs macOS binaries in the artifacts list.
|
||||
// Only signs darwin binaries when running on macOS with a configured identity.
|
||||
func SignBinaries(ctx context.Context, fs io.Medium, cfg SignConfig, artifacts []Artifact) error {
|
||||
if !cfg.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only sign on macOS
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil
|
||||
}
|
||||
|
||||
signer := NewMacOSSigner(cfg.MacOS)
|
||||
if !signer.Available() {
|
||||
return nil // Silently skip if not configured
|
||||
}
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
if artifact.OS != "darwin" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf(" Signing %s...\n", artifact.Path)
|
||||
if err := signer.Sign(ctx, fs, artifact.Path); err != nil {
|
||||
return fmt.Errorf("failed to sign %s: %w", artifact.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotarizeBinaries notarizes macOS binaries if enabled.
|
||||
func NotarizeBinaries(ctx context.Context, fs io.Medium, cfg SignConfig, artifacts []Artifact) error {
|
||||
if !cfg.Enabled || !cfg.MacOS.Notarize {
|
||||
return nil
|
||||
}
|
||||
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil
|
||||
}
|
||||
|
||||
signer := NewMacOSSigner(cfg.MacOS)
|
||||
if !signer.Available() {
|
||||
return fmt.Errorf("notarization requested but codesign not available")
|
||||
}
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
if artifact.OS != "darwin" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf(" Notarizing %s (this may take a few minutes)...\n", artifact.Path)
|
||||
if err := signer.Notarize(ctx, fs, artifact.Path); err != nil {
|
||||
return fmt.Errorf("failed to notarize %s: %w", artifact.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignChecksums signs the checksums file with GPG.
|
||||
func SignChecksums(ctx context.Context, fs io.Medium, cfg SignConfig, checksumFile string) error {
|
||||
if !cfg.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
signer := NewGPGSigner(cfg.GPG.Key)
|
||||
if !signer.Available() {
|
||||
return nil // Silently skip if not configured
|
||||
}
|
||||
|
||||
fmt.Printf(" Signing %s with GPG...\n", checksumFile)
|
||||
if err := signer.Sign(ctx, fs, checksumFile); err != nil {
|
||||
return fmt.Errorf("failed to sign checksums: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
83
build/signing/signer.go
Normal file
83
build/signing/signer.go
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
// Package signing provides code signing for build artifacts.
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// Signer defines the interface for code signing implementations.
|
||||
type Signer interface {
|
||||
// Name returns the signer's identifier.
|
||||
Name() string
|
||||
// Available checks if this signer can be used.
|
||||
Available() bool
|
||||
// Sign signs the artifact at the given path.
|
||||
Sign(ctx context.Context, fs io.Medium, path string) error
|
||||
}
|
||||
|
||||
// SignConfig holds signing configuration from .core/build.yaml.
|
||||
type SignConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
GPG GPGConfig `yaml:"gpg,omitempty"`
|
||||
MacOS MacOSConfig `yaml:"macos,omitempty"`
|
||||
Windows WindowsConfig `yaml:"windows,omitempty"`
|
||||
}
|
||||
|
||||
// GPGConfig holds GPG signing configuration.
|
||||
type GPGConfig struct {
|
||||
Key string `yaml:"key"` // Key ID or fingerprint, supports $ENV
|
||||
}
|
||||
|
||||
// MacOSConfig holds macOS codesign configuration.
|
||||
type MacOSConfig struct {
|
||||
Identity string `yaml:"identity"` // Developer ID Application: ...
|
||||
Notarize bool `yaml:"notarize"` // Submit to Apple for notarization
|
||||
AppleID string `yaml:"apple_id"` // Apple account email
|
||||
TeamID string `yaml:"team_id"` // Team ID
|
||||
AppPassword string `yaml:"app_password"` // App-specific password
|
||||
}
|
||||
|
||||
// WindowsConfig holds Windows signtool configuration (placeholder).
|
||||
type WindowsConfig struct {
|
||||
Certificate string `yaml:"certificate"` // Path to .pfx
|
||||
Password string `yaml:"password"` // Certificate password
|
||||
}
|
||||
|
||||
// DefaultSignConfig returns sensible defaults.
|
||||
func DefaultSignConfig() SignConfig {
|
||||
return SignConfig{
|
||||
Enabled: true,
|
||||
GPG: GPGConfig{
|
||||
Key: os.Getenv("GPG_KEY_ID"),
|
||||
},
|
||||
MacOS: MacOSConfig{
|
||||
Identity: os.Getenv("CODESIGN_IDENTITY"),
|
||||
AppleID: os.Getenv("APPLE_ID"),
|
||||
TeamID: os.Getenv("APPLE_TEAM_ID"),
|
||||
AppPassword: os.Getenv("APPLE_APP_PASSWORD"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ExpandEnv expands environment variables in config values.
|
||||
func (c *SignConfig) ExpandEnv() {
|
||||
c.GPG.Key = expandEnv(c.GPG.Key)
|
||||
c.MacOS.Identity = expandEnv(c.MacOS.Identity)
|
||||
c.MacOS.AppleID = expandEnv(c.MacOS.AppleID)
|
||||
c.MacOS.TeamID = expandEnv(c.MacOS.TeamID)
|
||||
c.MacOS.AppPassword = expandEnv(c.MacOS.AppPassword)
|
||||
c.Windows.Certificate = expandEnv(c.Windows.Certificate)
|
||||
c.Windows.Password = expandEnv(c.Windows.Password)
|
||||
}
|
||||
|
||||
// expandEnv expands $VAR or ${VAR} in a string.
|
||||
func expandEnv(s string) string {
|
||||
if strings.HasPrefix(s, "$") {
|
||||
return os.ExpandEnv(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
162
build/signing/signing_test.go
Normal file
162
build/signing/signing_test.go
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSignBinaries_Good_SkipsNonDarwin(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{
|
||||
Identity: "Developer ID Application: Test",
|
||||
},
|
||||
}
|
||||
|
||||
// Create fake artifact for linux
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "linux", Arch: "amd64"},
|
||||
}
|
||||
|
||||
// Should not error even though binary doesn't exist (skips non-darwin)
|
||||
err := SignBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignBinaries_Good_DisabledConfig(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := SignBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignBinaries_Good_SkipsOnNonMacOS(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Skipping on macOS - this tests non-macOS behavior")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{
|
||||
Identity: "Developer ID Application: Test",
|
||||
},
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := SignBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotarizeBinaries_Good_DisabledConfig(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := NotarizeBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotarizeBinaries_Good_NotarizeDisabled(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{
|
||||
Notarize: false,
|
||||
},
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := NotarizeBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignChecksums_Good_SkipsNoKey(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
GPG: GPGConfig{
|
||||
Key: "", // No key configured
|
||||
},
|
||||
}
|
||||
|
||||
// Should silently skip when no key
|
||||
err := SignChecksums(ctx, fs, cfg, "/tmp/CHECKSUMS.txt")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignChecksums_Good_Disabled(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
err := SignChecksums(ctx, fs, cfg, "/tmp/CHECKSUMS.txt")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultSignConfig(t *testing.T) {
|
||||
cfg := DefaultSignConfig()
|
||||
assert.True(t, cfg.Enabled)
|
||||
}
|
||||
|
||||
func TestSignConfig_ExpandEnv(t *testing.T) {
|
||||
t.Setenv("TEST_KEY", "ABC")
|
||||
cfg := SignConfig{
|
||||
GPG: GPGConfig{Key: "$TEST_KEY"},
|
||||
}
|
||||
cfg.ExpandEnv()
|
||||
assert.Equal(t, "ABC", cfg.GPG.Key)
|
||||
}
|
||||
|
||||
func TestWindowsSigner_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
s := NewWindowsSigner(WindowsConfig{})
|
||||
assert.Equal(t, "signtool", s.Name())
|
||||
assert.False(t, s.Available())
|
||||
assert.NoError(t, s.Sign(context.Background(), fs, "test.exe"))
|
||||
}
|
||||
36
build/signing/signtool.go
Normal file
36
build/signing/signtool.go
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// WindowsSigner signs binaries using Windows signtool (placeholder).
|
||||
type WindowsSigner struct {
|
||||
config WindowsConfig
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ Signer = (*WindowsSigner)(nil)
|
||||
|
||||
// NewWindowsSigner creates a new Windows signer.
|
||||
func NewWindowsSigner(cfg WindowsConfig) *WindowsSigner {
|
||||
return &WindowsSigner{config: cfg}
|
||||
}
|
||||
|
||||
// Name returns "signtool".
|
||||
func (s *WindowsSigner) Name() string {
|
||||
return "signtool"
|
||||
}
|
||||
|
||||
// Available returns false (not yet implemented).
|
||||
func (s *WindowsSigner) Available() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Sign is a placeholder that does nothing.
|
||||
func (s *WindowsSigner) Sign(ctx context.Context, fs io.Medium, binary string) error {
|
||||
// TODO: Implement Windows signing
|
||||
return nil
|
||||
}
|
||||
25
build/testdata/config-project/.core/build.yaml
vendored
Normal file
25
build/testdata/config-project/.core/build.yaml
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# Example build configuration for Core build system
|
||||
version: 1
|
||||
|
||||
project:
|
||||
name: example-cli
|
||||
description: An example CLI application
|
||||
main: ./cmd/example
|
||||
binary: example
|
||||
|
||||
build:
|
||||
cgo: false
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -s
|
||||
- -w
|
||||
env: []
|
||||
|
||||
targets:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
arch: arm64
|
||||
- os: windows
|
||||
arch: amd64
|
||||
2
build/testdata/cpp-project/CMakeLists.txt
vendored
Normal file
2
build/testdata/cpp-project/CMakeLists.txt
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
cmake_minimum_required(VERSION 3.16)
|
||||
project(TestCPP)
|
||||
0
build/testdata/empty-project/.gitkeep
vendored
Normal file
0
build/testdata/empty-project/.gitkeep
vendored
Normal file
3
build/testdata/go-project/go.mod
vendored
Normal file
3
build/testdata/go-project/go.mod
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
module example.com/go-project
|
||||
|
||||
go 1.21
|
||||
3
build/testdata/multi-project/go.mod
vendored
Normal file
3
build/testdata/multi-project/go.mod
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
module example.com/multi-project
|
||||
|
||||
go 1.21
|
||||
4
build/testdata/multi-project/package.json
vendored
Normal file
4
build/testdata/multi-project/package.json
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"name": "multi-project",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
4
build/testdata/node-project/package.json
vendored
Normal file
4
build/testdata/node-project/package.json
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"name": "node-project",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
4
build/testdata/php-project/composer.json
vendored
Normal file
4
build/testdata/php-project/composer.json
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"name": "vendor/php-project",
|
||||
"type": "library"
|
||||
}
|
||||
3
build/testdata/wails-project/go.mod
vendored
Normal file
3
build/testdata/wails-project/go.mod
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
module example.com/wails-project
|
||||
|
||||
go 1.21
|
||||
4
build/testdata/wails-project/wails.json
vendored
Normal file
4
build/testdata/wails-project/wails.json
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"name": "wails-project",
|
||||
"outputfilename": "wails-project"
|
||||
}
|
||||
106
container/container.go
Normal file
106
container/container.go
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
// Package container provides a runtime for managing LinuxKit containers.
|
||||
// It supports running LinuxKit images (ISO, qcow2, vmdk, raw) using
|
||||
// available hypervisors (QEMU on Linux, Hyperkit on macOS).
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Container represents a running LinuxKit container/VM instance.
|
||||
type Container struct {
|
||||
// ID is a unique identifier for the container (8 character hex string).
|
||||
ID string `json:"id"`
|
||||
// Name is the optional human-readable name for the container.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Image is the path to the LinuxKit image being run.
|
||||
Image string `json:"image"`
|
||||
// Status represents the current state of the container.
|
||||
Status Status `json:"status"`
|
||||
// PID is the process ID of the hypervisor running this container.
|
||||
PID int `json:"pid"`
|
||||
// StartedAt is when the container was started.
|
||||
StartedAt time.Time `json:"started_at"`
|
||||
// Ports maps host ports to container ports.
|
||||
Ports map[int]int `json:"ports,omitempty"`
|
||||
// Memory is the amount of memory allocated in MB.
|
||||
Memory int `json:"memory,omitempty"`
|
||||
// CPUs is the number of CPUs allocated.
|
||||
CPUs int `json:"cpus,omitempty"`
|
||||
}
|
||||
|
||||
// Status represents the state of a container.
|
||||
type Status string
|
||||
|
||||
const (
|
||||
// StatusRunning indicates the container is running.
|
||||
StatusRunning Status = "running"
|
||||
// StatusStopped indicates the container has stopped.
|
||||
StatusStopped Status = "stopped"
|
||||
// StatusError indicates the container encountered an error.
|
||||
StatusError Status = "error"
|
||||
)
|
||||
|
||||
// RunOptions configures how a container should be run.
|
||||
type RunOptions struct {
|
||||
// Name is an optional human-readable name for the container.
|
||||
Name string
|
||||
// Detach runs the container in the background.
|
||||
Detach bool
|
||||
// Memory is the amount of memory to allocate in MB (default: 1024).
|
||||
Memory int
|
||||
// CPUs is the number of CPUs to allocate (default: 1).
|
||||
CPUs int
|
||||
// Ports maps host ports to container ports.
|
||||
Ports map[int]int
|
||||
// Volumes maps host paths to container paths.
|
||||
Volumes map[string]string
|
||||
// SSHPort is the port to use for SSH access (default: 2222).
|
||||
SSHPort int
|
||||
// SSHKey is the path to the SSH private key for exec commands.
|
||||
SSHKey string
|
||||
}
|
||||
|
||||
// Manager defines the interface for container lifecycle management.
|
||||
type Manager interface {
|
||||
// Run starts a new container from the given image.
|
||||
Run(ctx context.Context, image string, opts RunOptions) (*Container, error)
|
||||
// Stop stops a running container by ID.
|
||||
Stop(ctx context.Context, id string) error
|
||||
// List returns all known containers.
|
||||
List(ctx context.Context) ([]*Container, error)
|
||||
// Logs returns a reader for the container's log output.
|
||||
// If follow is true, the reader will continue to stream new log entries.
|
||||
Logs(ctx context.Context, id string, follow bool) (io.ReadCloser, error)
|
||||
// Exec executes a command inside the container via SSH.
|
||||
Exec(ctx context.Context, id string, cmd []string) error
|
||||
}
|
||||
|
||||
// GenerateID creates a new unique container ID (8 hex characters).
|
||||
func GenerateID() (string, error) {
|
||||
bytes := make([]byte, 4)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(bytes), nil
|
||||
}
|
||||
|
||||
// ImageFormat represents the format of a LinuxKit image.
|
||||
type ImageFormat string
|
||||
|
||||
const (
|
||||
// FormatISO is an ISO image format.
|
||||
FormatISO ImageFormat = "iso"
|
||||
// FormatQCOW2 is a QEMU Copy-On-Write image format.
|
||||
FormatQCOW2 ImageFormat = "qcow2"
|
||||
// FormatVMDK is a VMware disk image format.
|
||||
FormatVMDK ImageFormat = "vmdk"
|
||||
// FormatRaw is a raw disk image format.
|
||||
FormatRaw ImageFormat = "raw"
|
||||
// FormatUnknown indicates an unknown image format.
|
||||
FormatUnknown ImageFormat = "unknown"
|
||||
)
|
||||
273
container/hypervisor.go
Normal file
273
container/hypervisor.go
Normal file
|
|
@ -0,0 +1,273 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Hypervisor defines the interface for VM hypervisors.
|
||||
type Hypervisor interface {
|
||||
// Name returns the name of the hypervisor.
|
||||
Name() string
|
||||
// Available checks if the hypervisor is available on the system.
|
||||
Available() bool
|
||||
// BuildCommand builds the command to run a VM with the given options.
|
||||
BuildCommand(ctx context.Context, image string, opts *HypervisorOptions) (*exec.Cmd, error)
|
||||
}
|
||||
|
||||
// HypervisorOptions contains options for running a VM.
|
||||
type HypervisorOptions struct {
|
||||
// Memory in MB.
|
||||
Memory int
|
||||
// CPUs count.
|
||||
CPUs int
|
||||
// LogFile path for output.
|
||||
LogFile string
|
||||
// SSHPort for SSH access.
|
||||
SSHPort int
|
||||
// Ports maps host ports to guest ports.
|
||||
Ports map[int]int
|
||||
// Volumes maps host paths to guest paths (9p shares).
|
||||
Volumes map[string]string
|
||||
// Detach runs in background (nographic mode).
|
||||
Detach bool
|
||||
}
|
||||
|
||||
// QemuHypervisor implements Hypervisor for QEMU.
|
||||
type QemuHypervisor struct {
|
||||
// Binary is the path to the qemu binary (defaults to qemu-system-x86_64).
|
||||
Binary string
|
||||
}
|
||||
|
||||
// NewQemuHypervisor creates a new QEMU hypervisor instance.
|
||||
func NewQemuHypervisor() *QemuHypervisor {
|
||||
return &QemuHypervisor{
|
||||
Binary: "qemu-system-x86_64",
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the hypervisor name.
|
||||
func (q *QemuHypervisor) Name() string {
|
||||
return "qemu"
|
||||
}
|
||||
|
||||
// Available checks if QEMU is installed and accessible.
|
||||
func (q *QemuHypervisor) Available() bool {
|
||||
_, err := exec.LookPath(q.Binary)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// BuildCommand creates the QEMU command for running a VM.
|
||||
func (q *QemuHypervisor) BuildCommand(ctx context.Context, image string, opts *HypervisorOptions) (*exec.Cmd, error) {
|
||||
format := DetectImageFormat(image)
|
||||
if format == FormatUnknown {
|
||||
return nil, fmt.Errorf("unknown image format: %s", image)
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-m", fmt.Sprintf("%d", opts.Memory),
|
||||
"-smp", fmt.Sprintf("%d", opts.CPUs),
|
||||
"-enable-kvm",
|
||||
}
|
||||
|
||||
// Add the image based on format
|
||||
switch format {
|
||||
case FormatISO:
|
||||
args = append(args, "-cdrom", image)
|
||||
args = append(args, "-boot", "d")
|
||||
case FormatQCOW2:
|
||||
args = append(args, "-drive", fmt.Sprintf("file=%s,format=qcow2", image))
|
||||
case FormatVMDK:
|
||||
args = append(args, "-drive", fmt.Sprintf("file=%s,format=vmdk", image))
|
||||
case FormatRaw:
|
||||
args = append(args, "-drive", fmt.Sprintf("file=%s,format=raw", image))
|
||||
}
|
||||
|
||||
// Always run in nographic mode for container-like behavior
|
||||
args = append(args, "-nographic")
|
||||
|
||||
// Add serial console for log output
|
||||
args = append(args, "-serial", "stdio")
|
||||
|
||||
// Network with port forwarding
|
||||
netdev := "user,id=net0"
|
||||
if opts.SSHPort > 0 {
|
||||
netdev += fmt.Sprintf(",hostfwd=tcp::%d-:22", opts.SSHPort)
|
||||
}
|
||||
for hostPort, guestPort := range opts.Ports {
|
||||
netdev += fmt.Sprintf(",hostfwd=tcp::%d-:%d", hostPort, guestPort)
|
||||
}
|
||||
args = append(args, "-netdev", netdev)
|
||||
args = append(args, "-device", "virtio-net-pci,netdev=net0")
|
||||
|
||||
// Add 9p shares for volumes
|
||||
shareID := 0
|
||||
for hostPath, guestPath := range opts.Volumes {
|
||||
tag := fmt.Sprintf("share%d", shareID)
|
||||
args = append(args,
|
||||
"-fsdev", fmt.Sprintf("local,id=%s,path=%s,security_model=none", tag, hostPath),
|
||||
"-device", fmt.Sprintf("virtio-9p-pci,fsdev=%s,mount_tag=%s", tag, filepath.Base(guestPath)),
|
||||
)
|
||||
shareID++
|
||||
}
|
||||
|
||||
// Check if KVM is available on Linux, remove -enable-kvm if not
|
||||
if runtime.GOOS != "linux" || !isKVMAvailable() {
|
||||
// Remove -enable-kvm from args
|
||||
newArgs := make([]string, 0, len(args))
|
||||
for _, arg := range args {
|
||||
if arg != "-enable-kvm" {
|
||||
newArgs = append(newArgs, arg)
|
||||
}
|
||||
}
|
||||
args = newArgs
|
||||
|
||||
// On macOS, use HVF acceleration if available
|
||||
if runtime.GOOS == "darwin" {
|
||||
args = append(args, "-accel", "hvf")
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, q.Binary, args...)
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
// isKVMAvailable checks if KVM is available on the system.
|
||||
func isKVMAvailable() bool {
|
||||
_, err := os.Stat("/dev/kvm")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// HyperkitHypervisor implements Hypervisor for macOS Hyperkit.
|
||||
type HyperkitHypervisor struct {
|
||||
// Binary is the path to the hyperkit binary.
|
||||
Binary string
|
||||
}
|
||||
|
||||
// NewHyperkitHypervisor creates a new Hyperkit hypervisor instance.
|
||||
func NewHyperkitHypervisor() *HyperkitHypervisor {
|
||||
return &HyperkitHypervisor{
|
||||
Binary: "hyperkit",
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the hypervisor name.
|
||||
func (h *HyperkitHypervisor) Name() string {
|
||||
return "hyperkit"
|
||||
}
|
||||
|
||||
// Available checks if Hyperkit is installed and accessible.
|
||||
func (h *HyperkitHypervisor) Available() bool {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return false
|
||||
}
|
||||
_, err := exec.LookPath(h.Binary)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// BuildCommand creates the Hyperkit command for running a VM.
|
||||
func (h *HyperkitHypervisor) BuildCommand(ctx context.Context, image string, opts *HypervisorOptions) (*exec.Cmd, error) {
|
||||
format := DetectImageFormat(image)
|
||||
if format == FormatUnknown {
|
||||
return nil, fmt.Errorf("unknown image format: %s", image)
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-m", fmt.Sprintf("%dM", opts.Memory),
|
||||
"-c", fmt.Sprintf("%d", opts.CPUs),
|
||||
"-A", // ACPI
|
||||
"-u", // Unlimited console output
|
||||
"-s", "0:0,hostbridge",
|
||||
"-s", "31,lpc",
|
||||
"-l", "com1,stdio", // Serial console
|
||||
}
|
||||
|
||||
// Add PCI slot for disk (slot 2)
|
||||
switch format {
|
||||
case FormatISO:
|
||||
args = append(args, "-s", fmt.Sprintf("2:0,ahci-cd,%s", image))
|
||||
case FormatQCOW2, FormatVMDK, FormatRaw:
|
||||
args = append(args, "-s", fmt.Sprintf("2:0,virtio-blk,%s", image))
|
||||
}
|
||||
|
||||
// Network with port forwarding (slot 3)
|
||||
netArgs := "virtio-net"
|
||||
if opts.SSHPort > 0 || len(opts.Ports) > 0 {
|
||||
// Hyperkit uses slirp for user networking with port forwarding
|
||||
portForwards := make([]string, 0)
|
||||
if opts.SSHPort > 0 {
|
||||
portForwards = append(portForwards, fmt.Sprintf("tcp:%d:22", opts.SSHPort))
|
||||
}
|
||||
for hostPort, guestPort := range opts.Ports {
|
||||
portForwards = append(portForwards, fmt.Sprintf("tcp:%d:%d", hostPort, guestPort))
|
||||
}
|
||||
if len(portForwards) > 0 {
|
||||
netArgs += "," + strings.Join(portForwards, ",")
|
||||
}
|
||||
}
|
||||
args = append(args, "-s", "3:0,"+netArgs)
|
||||
|
||||
cmd := exec.CommandContext(ctx, h.Binary, args...)
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
// DetectImageFormat determines the image format from its file extension.
|
||||
func DetectImageFormat(path string) ImageFormat {
|
||||
ext := strings.ToLower(filepath.Ext(path))
|
||||
switch ext {
|
||||
case ".iso":
|
||||
return FormatISO
|
||||
case ".qcow2":
|
||||
return FormatQCOW2
|
||||
case ".vmdk":
|
||||
return FormatVMDK
|
||||
case ".raw", ".img":
|
||||
return FormatRaw
|
||||
default:
|
||||
return FormatUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// DetectHypervisor returns the best available hypervisor for the current platform.
|
||||
func DetectHypervisor() (Hypervisor, error) {
|
||||
// On macOS, prefer Hyperkit if available, fall back to QEMU
|
||||
if runtime.GOOS == "darwin" {
|
||||
hk := NewHyperkitHypervisor()
|
||||
if hk.Available() {
|
||||
return hk, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Try QEMU on all platforms
|
||||
qemu := NewQemuHypervisor()
|
||||
if qemu.Available() {
|
||||
return qemu, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no hypervisor available: install qemu or hyperkit (macOS)")
|
||||
}
|
||||
|
||||
// GetHypervisor returns a specific hypervisor by name.
|
||||
func GetHypervisor(name string) (Hypervisor, error) {
|
||||
switch strings.ToLower(name) {
|
||||
case "qemu":
|
||||
h := NewQemuHypervisor()
|
||||
if !h.Available() {
|
||||
return nil, fmt.Errorf("qemu is not available")
|
||||
}
|
||||
return h, nil
|
||||
case "hyperkit":
|
||||
h := NewHyperkitHypervisor()
|
||||
if !h.Available() {
|
||||
return nil, fmt.Errorf("hyperkit is not available (requires macOS)")
|
||||
}
|
||||
return h, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown hypervisor: %s", name)
|
||||
}
|
||||
}
|
||||
358
container/hypervisor_test.go
Normal file
358
container/hypervisor_test.go
Normal file
|
|
@ -0,0 +1,358 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQemuHypervisor_Available_Good(t *testing.T) {
|
||||
q := NewQemuHypervisor()
|
||||
|
||||
// Check if qemu is available on this system
|
||||
available := q.Available()
|
||||
|
||||
// We just verify it returns a boolean without error
|
||||
// The actual availability depends on the system
|
||||
assert.IsType(t, true, available)
|
||||
}
|
||||
|
||||
func TestQemuHypervisor_Available_Bad_InvalidBinary(t *testing.T) {
|
||||
q := &QemuHypervisor{
|
||||
Binary: "nonexistent-qemu-binary-that-does-not-exist",
|
||||
}
|
||||
|
||||
available := q.Available()
|
||||
|
||||
assert.False(t, available)
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_Available_Good(t *testing.T) {
|
||||
h := NewHyperkitHypervisor()
|
||||
|
||||
available := h.Available()
|
||||
|
||||
// On non-darwin systems, should always be false
|
||||
if runtime.GOOS != "darwin" {
|
||||
assert.False(t, available)
|
||||
} else {
|
||||
// On darwin, just verify it returns a boolean
|
||||
assert.IsType(t, true, available)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_Available_Bad_NotDarwin(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("This test only runs on non-darwin systems")
|
||||
}
|
||||
|
||||
h := NewHyperkitHypervisor()
|
||||
|
||||
available := h.Available()
|
||||
|
||||
assert.False(t, available, "Hyperkit should not be available on non-darwin systems")
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_Available_Bad_InvalidBinary(t *testing.T) {
|
||||
h := &HyperkitHypervisor{
|
||||
Binary: "nonexistent-hyperkit-binary-that-does-not-exist",
|
||||
}
|
||||
|
||||
available := h.Available()
|
||||
|
||||
assert.False(t, available)
|
||||
}
|
||||
|
||||
func TestIsKVMAvailable_Good(t *testing.T) {
|
||||
// This test verifies the function runs without error
|
||||
// The actual result depends on the system
|
||||
result := isKVMAvailable()
|
||||
|
||||
// On non-linux systems, should be false
|
||||
if runtime.GOOS != "linux" {
|
||||
assert.False(t, result, "KVM should not be available on non-linux systems")
|
||||
} else {
|
||||
// On linux, just verify it returns a boolean
|
||||
assert.IsType(t, true, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectHypervisor_Good(t *testing.T) {
|
||||
// DetectHypervisor tries to find an available hypervisor
|
||||
hv, err := DetectHypervisor()
|
||||
|
||||
// This test may pass or fail depending on system configuration
|
||||
// If no hypervisor is available, it should return an error
|
||||
if err != nil {
|
||||
assert.Nil(t, hv)
|
||||
assert.Contains(t, err.Error(), "no hypervisor available")
|
||||
} else {
|
||||
assert.NotNil(t, hv)
|
||||
assert.NotEmpty(t, hv.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHypervisor_Good_Qemu(t *testing.T) {
|
||||
hv, err := GetHypervisor("qemu")
|
||||
|
||||
// Depends on whether qemu is installed
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "not available")
|
||||
} else {
|
||||
assert.NotNil(t, hv)
|
||||
assert.Equal(t, "qemu", hv.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHypervisor_Good_QemuUppercase(t *testing.T) {
|
||||
hv, err := GetHypervisor("QEMU")
|
||||
|
||||
// Depends on whether qemu is installed
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "not available")
|
||||
} else {
|
||||
assert.NotNil(t, hv)
|
||||
assert.Equal(t, "qemu", hv.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHypervisor_Good_Hyperkit(t *testing.T) {
|
||||
hv, err := GetHypervisor("hyperkit")
|
||||
|
||||
// On non-darwin systems, should always fail
|
||||
if runtime.GOOS != "darwin" {
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not available")
|
||||
} else {
|
||||
// On darwin, depends on whether hyperkit is installed
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "not available")
|
||||
} else {
|
||||
assert.NotNil(t, hv)
|
||||
assert.Equal(t, "hyperkit", hv.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHypervisor_Bad_Unknown(t *testing.T) {
|
||||
_, err := GetHypervisor("unknown-hypervisor")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown hypervisor")
|
||||
}
|
||||
|
||||
func TestQemuHypervisor_BuildCommand_Good_WithPortsAndVolumes(t *testing.T) {
|
||||
q := NewQemuHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{
|
||||
Memory: 2048,
|
||||
CPUs: 4,
|
||||
SSHPort: 2222,
|
||||
Ports: map[int]int{8080: 80, 443: 443},
|
||||
Volumes: map[string]string{
|
||||
"/host/data": "/container/data",
|
||||
"/host/logs": "/container/logs",
|
||||
},
|
||||
Detach: true,
|
||||
}
|
||||
|
||||
cmd, err := q.BuildCommand(ctx, "/path/to/image.iso", opts)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cmd)
|
||||
|
||||
// Verify command includes all expected args
|
||||
args := cmd.Args
|
||||
assert.Contains(t, args, "-m")
|
||||
assert.Contains(t, args, "2048")
|
||||
assert.Contains(t, args, "-smp")
|
||||
assert.Contains(t, args, "4")
|
||||
}
|
||||
|
||||
func TestQemuHypervisor_BuildCommand_Good_QCow2Format(t *testing.T) {
|
||||
q := NewQemuHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{Memory: 1024, CPUs: 1}
|
||||
|
||||
cmd, err := q.BuildCommand(ctx, "/path/to/image.qcow2", opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the drive format is qcow2
|
||||
found := false
|
||||
for _, arg := range cmd.Args {
|
||||
if arg == "file=/path/to/image.qcow2,format=qcow2" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Should have qcow2 drive argument")
|
||||
}
|
||||
|
||||
func TestQemuHypervisor_BuildCommand_Good_VMDKFormat(t *testing.T) {
|
||||
q := NewQemuHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{Memory: 1024, CPUs: 1}
|
||||
|
||||
cmd, err := q.BuildCommand(ctx, "/path/to/image.vmdk", opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the drive format is vmdk
|
||||
found := false
|
||||
for _, arg := range cmd.Args {
|
||||
if arg == "file=/path/to/image.vmdk,format=vmdk" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Should have vmdk drive argument")
|
||||
}
|
||||
|
||||
func TestQemuHypervisor_BuildCommand_Good_RawFormat(t *testing.T) {
|
||||
q := NewQemuHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{Memory: 1024, CPUs: 1}
|
||||
|
||||
cmd, err := q.BuildCommand(ctx, "/path/to/image.raw", opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the drive format is raw
|
||||
found := false
|
||||
for _, arg := range cmd.Args {
|
||||
if arg == "file=/path/to/image.raw,format=raw" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Should have raw drive argument")
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_BuildCommand_Good_WithPorts(t *testing.T) {
|
||||
h := NewHyperkitHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{
|
||||
Memory: 1024,
|
||||
CPUs: 2,
|
||||
SSHPort: 2222,
|
||||
Ports: map[int]int{8080: 80},
|
||||
}
|
||||
|
||||
cmd, err := h.BuildCommand(ctx, "/path/to/image.iso", opts)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cmd)
|
||||
|
||||
// Verify it creates a command with memory and CPU args
|
||||
args := cmd.Args
|
||||
assert.Contains(t, args, "-m")
|
||||
assert.Contains(t, args, "1024M")
|
||||
assert.Contains(t, args, "-c")
|
||||
assert.Contains(t, args, "2")
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_BuildCommand_Good_QCow2Format(t *testing.T) {
|
||||
h := NewHyperkitHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{Memory: 1024, CPUs: 1}
|
||||
|
||||
cmd, err := h.BuildCommand(ctx, "/path/to/image.qcow2", opts)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cmd)
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_BuildCommand_Good_RawFormat(t *testing.T) {
|
||||
h := NewHyperkitHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{Memory: 1024, CPUs: 1}
|
||||
|
||||
cmd, err := h.BuildCommand(ctx, "/path/to/image.raw", opts)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cmd)
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_BuildCommand_Good_NoPorts(t *testing.T) {
|
||||
h := NewHyperkitHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{
|
||||
Memory: 512,
|
||||
CPUs: 1,
|
||||
SSHPort: 0, // No SSH port
|
||||
Ports: nil,
|
||||
}
|
||||
|
||||
cmd, err := h.BuildCommand(ctx, "/path/to/image.iso", opts)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cmd)
|
||||
}
|
||||
|
||||
func TestQemuHypervisor_BuildCommand_Good_NoSSHPort(t *testing.T) {
|
||||
q := NewQemuHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{
|
||||
Memory: 512,
|
||||
CPUs: 1,
|
||||
SSHPort: 0, // No SSH port
|
||||
Ports: nil,
|
||||
}
|
||||
|
||||
cmd, err := q.BuildCommand(ctx, "/path/to/image.iso", opts)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cmd)
|
||||
}
|
||||
|
||||
func TestQemuHypervisor_BuildCommand_Bad_UnknownFormat(t *testing.T) {
|
||||
q := NewQemuHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{Memory: 1024, CPUs: 1}
|
||||
|
||||
_, err := q.BuildCommand(ctx, "/path/to/image.txt", opts)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown image format")
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_BuildCommand_Bad_UnknownFormat(t *testing.T) {
|
||||
h := NewHyperkitHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{Memory: 1024, CPUs: 1}
|
||||
|
||||
_, err := h.BuildCommand(ctx, "/path/to/image.unknown", opts)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown image format")
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_Name_Good(t *testing.T) {
|
||||
h := NewHyperkitHypervisor()
|
||||
assert.Equal(t, "hyperkit", h.Name())
|
||||
}
|
||||
|
||||
func TestHyperkitHypervisor_BuildCommand_Good_ISOFormat(t *testing.T) {
|
||||
h := NewHyperkitHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{
|
||||
Memory: 1024,
|
||||
CPUs: 2,
|
||||
SSHPort: 2222,
|
||||
}
|
||||
|
||||
cmd, err := h.BuildCommand(ctx, "/path/to/image.iso", opts)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cmd)
|
||||
|
||||
args := cmd.Args
|
||||
assert.Contains(t, args, "-m")
|
||||
assert.Contains(t, args, "1024M")
|
||||
assert.Contains(t, args, "-c")
|
||||
assert.Contains(t, args, "2")
|
||||
}
|
||||
462
container/linuxkit.go
Normal file
462
container/linuxkit.go
Normal file
|
|
@ -0,0 +1,462 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
goio "io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// LinuxKitManager implements the Manager interface for LinuxKit VMs.
|
||||
type LinuxKitManager struct {
|
||||
state *State
|
||||
hypervisor Hypervisor
|
||||
medium io.Medium
|
||||
}
|
||||
|
||||
// NewLinuxKitManager creates a new LinuxKit manager with auto-detected hypervisor.
|
||||
func NewLinuxKitManager(m io.Medium) (*LinuxKitManager, error) {
|
||||
statePath, err := DefaultStatePath()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine state path: %w", err)
|
||||
}
|
||||
|
||||
state, err := LoadState(statePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load state: %w", err)
|
||||
}
|
||||
|
||||
hypervisor, err := DetectHypervisor()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &LinuxKitManager{
|
||||
state: state,
|
||||
hypervisor: hypervisor,
|
||||
medium: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewLinuxKitManagerWithHypervisor creates a manager with a specific hypervisor.
|
||||
func NewLinuxKitManagerWithHypervisor(m io.Medium, state *State, hypervisor Hypervisor) *LinuxKitManager {
|
||||
return &LinuxKitManager{
|
||||
state: state,
|
||||
hypervisor: hypervisor,
|
||||
medium: m,
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts a new LinuxKit VM from the given image.
|
||||
func (m *LinuxKitManager) Run(ctx context.Context, image string, opts RunOptions) (*Container, error) {
|
||||
// Validate image exists
|
||||
if !m.medium.IsFile(image) {
|
||||
return nil, fmt.Errorf("image not found: %s", image)
|
||||
}
|
||||
|
||||
// Detect image format
|
||||
format := DetectImageFormat(image)
|
||||
if format == FormatUnknown {
|
||||
return nil, fmt.Errorf("unsupported image format: %s", image)
|
||||
}
|
||||
|
||||
// Generate container ID
|
||||
id, err := GenerateID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate container ID: %w", err)
|
||||
}
|
||||
|
||||
// Apply defaults
|
||||
if opts.Memory <= 0 {
|
||||
opts.Memory = 1024
|
||||
}
|
||||
if opts.CPUs <= 0 {
|
||||
opts.CPUs = 1
|
||||
}
|
||||
if opts.SSHPort <= 0 {
|
||||
opts.SSHPort = 2222
|
||||
}
|
||||
|
||||
// Use name or generate from ID
|
||||
name := opts.Name
|
||||
if name == "" {
|
||||
name = id[:8]
|
||||
}
|
||||
|
||||
// Ensure logs directory exists
|
||||
if err := EnsureLogsDir(); err != nil {
|
||||
return nil, fmt.Errorf("failed to create logs directory: %w", err)
|
||||
}
|
||||
|
||||
// Get log file path
|
||||
logPath, err := LogPath(id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine log path: %w", err)
|
||||
}
|
||||
|
||||
// Build hypervisor options
|
||||
hvOpts := &HypervisorOptions{
|
||||
Memory: opts.Memory,
|
||||
CPUs: opts.CPUs,
|
||||
LogFile: logPath,
|
||||
SSHPort: opts.SSHPort,
|
||||
Ports: opts.Ports,
|
||||
Volumes: opts.Volumes,
|
||||
Detach: opts.Detach,
|
||||
}
|
||||
|
||||
// Build the command
|
||||
cmd, err := m.hypervisor.BuildCommand(ctx, image, hvOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build hypervisor command: %w", err)
|
||||
}
|
||||
|
||||
// Create log file
|
||||
logFile, err := os.Create(logPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create log file: %w", err)
|
||||
}
|
||||
|
||||
// Create container record
|
||||
container := &Container{
|
||||
ID: id,
|
||||
Name: name,
|
||||
Image: image,
|
||||
Status: StatusRunning,
|
||||
StartedAt: time.Now(),
|
||||
Ports: opts.Ports,
|
||||
Memory: opts.Memory,
|
||||
CPUs: opts.CPUs,
|
||||
}
|
||||
|
||||
if opts.Detach {
|
||||
// Run in background
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
// Start the process
|
||||
if err := cmd.Start(); err != nil {
|
||||
_ = logFile.Close()
|
||||
return nil, fmt.Errorf("failed to start VM: %w", err)
|
||||
}
|
||||
|
||||
container.PID = cmd.Process.Pid
|
||||
|
||||
// Save state
|
||||
if err := m.state.Add(container); err != nil {
|
||||
// Try to kill the process we just started
|
||||
_ = cmd.Process.Kill()
|
||||
_ = logFile.Close()
|
||||
return nil, fmt.Errorf("failed to save state: %w", err)
|
||||
}
|
||||
|
||||
// Close log file handle (process has its own)
|
||||
_ = logFile.Close()
|
||||
|
||||
// Start a goroutine to wait for process exit and update state
|
||||
go m.waitForExit(container.ID, cmd)
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
// Run in foreground
|
||||
// Tee output to both log file and stdout
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
_ = logFile.Close()
|
||||
return nil, fmt.Errorf("failed to get stdout pipe: %w", err)
|
||||
}
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
_ = logFile.Close()
|
||||
return nil, fmt.Errorf("failed to get stderr pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
_ = logFile.Close()
|
||||
return nil, fmt.Errorf("failed to start VM: %w", err)
|
||||
}
|
||||
|
||||
container.PID = cmd.Process.Pid
|
||||
|
||||
// Save state before waiting
|
||||
if err := m.state.Add(container); err != nil {
|
||||
_ = cmd.Process.Kill()
|
||||
_ = logFile.Close()
|
||||
return nil, fmt.Errorf("failed to save state: %w", err)
|
||||
}
|
||||
|
||||
// Copy output to both log and stdout
|
||||
go func() {
|
||||
mw := goio.MultiWriter(logFile, os.Stdout)
|
||||
_, _ = goio.Copy(mw, stdout)
|
||||
}()
|
||||
go func() {
|
||||
mw := goio.MultiWriter(logFile, os.Stderr)
|
||||
_, _ = goio.Copy(mw, stderr)
|
||||
}()
|
||||
|
||||
// Wait for the process to complete
|
||||
if err := cmd.Wait(); err != nil {
|
||||
container.Status = StatusError
|
||||
} else {
|
||||
container.Status = StatusStopped
|
||||
}
|
||||
|
||||
_ = logFile.Close()
|
||||
if err := m.state.Update(container); err != nil {
|
||||
return container, fmt.Errorf("update container state: %w", err)
|
||||
}
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
// waitForExit monitors a detached process and updates state when it exits.
|
||||
func (m *LinuxKitManager) waitForExit(id string, cmd *exec.Cmd) {
|
||||
err := cmd.Wait()
|
||||
|
||||
container, ok := m.state.Get(id)
|
||||
if ok {
|
||||
if err != nil {
|
||||
container.Status = StatusError
|
||||
} else {
|
||||
container.Status = StatusStopped
|
||||
}
|
||||
_ = m.state.Update(container)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops a running container by sending SIGTERM.
|
||||
func (m *LinuxKitManager) Stop(ctx context.Context, id string) error {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
container, ok := m.state.Get(id)
|
||||
if !ok {
|
||||
return fmt.Errorf("container not found: %s", id)
|
||||
}
|
||||
|
||||
if container.Status != StatusRunning {
|
||||
return fmt.Errorf("container is not running: %s", id)
|
||||
}
|
||||
|
||||
// Find the process
|
||||
process, err := os.FindProcess(container.PID)
|
||||
if err != nil {
|
||||
// Process doesn't exist, update state
|
||||
container.Status = StatusStopped
|
||||
_ = m.state.Update(container)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send SIGTERM
|
||||
if err := process.Signal(syscall.SIGTERM); err != nil {
|
||||
// Process might already be gone
|
||||
container.Status = StatusStopped
|
||||
_ = m.state.Update(container)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Honour already-cancelled contexts before waiting
|
||||
if err := ctx.Err(); err != nil {
|
||||
_ = process.Signal(syscall.SIGKILL)
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for graceful shutdown with timeout
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = process.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
// Process exited gracefully
|
||||
case <-time.After(10 * time.Second):
|
||||
// Force kill
|
||||
_ = process.Signal(syscall.SIGKILL)
|
||||
<-done
|
||||
case <-ctx.Done():
|
||||
// Context cancelled
|
||||
_ = process.Signal(syscall.SIGKILL)
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
container.Status = StatusStopped
|
||||
return m.state.Update(container)
|
||||
}
|
||||
|
||||
// List returns all known containers, verifying process state.
|
||||
func (m *LinuxKitManager) List(ctx context.Context) ([]*Container, error) {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
containers := m.state.All()
|
||||
|
||||
// Verify each running container's process is still alive
|
||||
for _, c := range containers {
|
||||
if c.Status == StatusRunning {
|
||||
if !isProcessRunning(c.PID) {
|
||||
c.Status = StatusStopped
|
||||
_ = m.state.Update(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
// isProcessRunning checks if a process with the given PID is still running.
|
||||
func isProcessRunning(pid int) bool {
|
||||
process, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// On Unix, FindProcess always succeeds, so we need to send signal 0 to check
|
||||
err = process.Signal(syscall.Signal(0))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Logs returns a reader for the container's log output.
|
||||
func (m *LinuxKitManager) Logs(ctx context.Context, id string, follow bool) (goio.ReadCloser, error) {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, ok := m.state.Get(id)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("container not found: %s", id)
|
||||
}
|
||||
|
||||
logPath, err := LogPath(id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine log path: %w", err)
|
||||
}
|
||||
|
||||
if !m.medium.IsFile(logPath) {
|
||||
return nil, fmt.Errorf("no logs available for container: %s", id)
|
||||
}
|
||||
|
||||
if !follow {
|
||||
// Simple case: just open and return the file
|
||||
return m.medium.Open(logPath)
|
||||
}
|
||||
|
||||
// Follow mode: create a reader that tails the file
|
||||
return newFollowReader(ctx, m.medium, logPath)
|
||||
}
|
||||
|
||||
// followReader implements goio.ReadCloser for following log files.
|
||||
type followReader struct {
|
||||
file goio.ReadCloser
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
reader *bufio.Reader
|
||||
medium io.Medium
|
||||
path string
|
||||
}
|
||||
|
||||
func newFollowReader(ctx context.Context, m io.Medium, path string) (*followReader, error) {
|
||||
file, err := m.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Note: We don't seek here because Medium.Open doesn't guarantee Seekability.
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
return &followReader{
|
||||
file: file,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
reader: bufio.NewReader(file),
|
||||
medium: m,
|
||||
path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *followReader) Read(p []byte) (int, error) {
|
||||
for {
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
return 0, goio.EOF
|
||||
default:
|
||||
}
|
||||
|
||||
n, err := f.reader.Read(p)
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
if err != nil && err != goio.EOF {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// No data available, wait a bit and try again
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
return 0, goio.EOF
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// Reset reader to pick up new data
|
||||
f.reader.Reset(f.file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *followReader) Close() error {
|
||||
f.cancel()
|
||||
return f.file.Close()
|
||||
}
|
||||
|
||||
// Exec executes a command inside the container via SSH.
|
||||
func (m *LinuxKitManager) Exec(ctx context.Context, id string, cmd []string) error {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
container, ok := m.state.Get(id)
|
||||
if !ok {
|
||||
return fmt.Errorf("container not found: %s", id)
|
||||
}
|
||||
|
||||
if container.Status != StatusRunning {
|
||||
return fmt.Errorf("container is not running: %s", id)
|
||||
}
|
||||
|
||||
// Default SSH port
|
||||
sshPort := 2222
|
||||
|
||||
// Build SSH command
|
||||
sshArgs := []string{
|
||||
"-p", fmt.Sprintf("%d", sshPort),
|
||||
"-o", "StrictHostKeyChecking=yes",
|
||||
"-o", "UserKnownHostsFile=~/.core/known_hosts",
|
||||
"-o", "LogLevel=ERROR",
|
||||
"root@localhost",
|
||||
}
|
||||
sshArgs = append(sshArgs, cmd...)
|
||||
|
||||
sshCmd := exec.CommandContext(ctx, "ssh", sshArgs...)
|
||||
sshCmd.Stdin = os.Stdin
|
||||
sshCmd.Stdout = os.Stdout
|
||||
sshCmd.Stderr = os.Stderr
|
||||
|
||||
return sshCmd.Run()
|
||||
}
|
||||
|
||||
// State returns the manager's state (for testing).
|
||||
func (m *LinuxKitManager) State() *State {
|
||||
return m.state
|
||||
}
|
||||
|
||||
// Hypervisor returns the manager's hypervisor (for testing).
|
||||
func (m *LinuxKitManager) Hypervisor() Hypervisor {
|
||||
return m.hypervisor
|
||||
}
|
||||
786
container/linuxkit_test.go
Normal file
786
container/linuxkit_test.go
Normal file
|
|
@ -0,0 +1,786 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// MockHypervisor is a mock implementation for testing.
|
||||
type MockHypervisor struct {
|
||||
name string
|
||||
available bool
|
||||
buildErr error
|
||||
lastImage string
|
||||
lastOpts *HypervisorOptions
|
||||
commandToRun string
|
||||
}
|
||||
|
||||
func NewMockHypervisor() *MockHypervisor {
|
||||
return &MockHypervisor{
|
||||
name: "mock",
|
||||
available: true,
|
||||
commandToRun: "echo",
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockHypervisor) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *MockHypervisor) Available() bool {
|
||||
return m.available
|
||||
}
|
||||
|
||||
func (m *MockHypervisor) BuildCommand(ctx context.Context, image string, opts *HypervisorOptions) (*exec.Cmd, error) {
|
||||
m.lastImage = image
|
||||
m.lastOpts = opts
|
||||
if m.buildErr != nil {
|
||||
return nil, m.buildErr
|
||||
}
|
||||
// Return a simple command that exits quickly
|
||||
return exec.CommandContext(ctx, m.commandToRun, "test"), nil
|
||||
}
|
||||
|
||||
// newTestManager creates a LinuxKitManager with mock hypervisor for testing.
|
||||
// Uses manual temp directory management to avoid race conditions with t.TempDir cleanup.
|
||||
func newTestManager(t *testing.T) (*LinuxKitManager, *MockHypervisor, string) {
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test-*")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Manual cleanup that handles race conditions with state file writes
|
||||
t.Cleanup(func() {
|
||||
// Give any pending file operations time to complete
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
_ = os.RemoveAll(tmpDir)
|
||||
})
|
||||
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
|
||||
state, err := LoadState(io.Local, statePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
mock := NewMockHypervisor()
|
||||
manager := NewLinuxKitManagerWithHypervisor(io.Local, state, mock)
|
||||
|
||||
return manager, mock, tmpDir
|
||||
}
|
||||
|
||||
func TestNewLinuxKitManagerWithHypervisor_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
state, _ := LoadState(io.Local, statePath)
|
||||
mock := NewMockHypervisor()
|
||||
|
||||
manager := NewLinuxKitManagerWithHypervisor(io.Local, state, mock)
|
||||
|
||||
assert.NotNil(t, manager)
|
||||
assert.Equal(t, state, manager.State())
|
||||
assert.Equal(t, mock, manager.Hypervisor())
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Good_Detached(t *testing.T) {
|
||||
manager, mock, tmpDir := newTestManager(t)
|
||||
|
||||
// Create a test image file
|
||||
imagePath := filepath.Join(tmpDir, "test.iso")
|
||||
err := os.WriteFile(imagePath, []byte("fake image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a command that runs briefly then exits
|
||||
mock.commandToRun = "sleep"
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{
|
||||
Name: "test-vm",
|
||||
Detach: true,
|
||||
Memory: 512,
|
||||
CPUs: 2,
|
||||
}
|
||||
|
||||
container, err := manager.Run(ctx, imagePath, opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotEmpty(t, container.ID)
|
||||
assert.Equal(t, "test-vm", container.Name)
|
||||
assert.Equal(t, imagePath, container.Image)
|
||||
assert.Equal(t, StatusRunning, container.Status)
|
||||
assert.Greater(t, container.PID, 0)
|
||||
assert.Equal(t, 512, container.Memory)
|
||||
assert.Equal(t, 2, container.CPUs)
|
||||
|
||||
// Verify hypervisor was called with correct options
|
||||
assert.Equal(t, imagePath, mock.lastImage)
|
||||
assert.Equal(t, 512, mock.lastOpts.Memory)
|
||||
assert.Equal(t, 2, mock.lastOpts.CPUs)
|
||||
|
||||
// Clean up - stop the container
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Good_DefaultValues(t *testing.T) {
|
||||
manager, mock, tmpDir := newTestManager(t)
|
||||
|
||||
imagePath := filepath.Join(tmpDir, "test.qcow2")
|
||||
err := os.WriteFile(imagePath, []byte("fake image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{Detach: true}
|
||||
|
||||
container, err := manager.Run(ctx, imagePath, opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check defaults were applied
|
||||
assert.Equal(t, 1024, mock.lastOpts.Memory)
|
||||
assert.Equal(t, 1, mock.lastOpts.CPUs)
|
||||
assert.Equal(t, 2222, mock.lastOpts.SSHPort)
|
||||
|
||||
// Name should default to first 8 chars of ID
|
||||
assert.Equal(t, container.ID[:8], container.Name)
|
||||
|
||||
// Wait for the mock process to complete to avoid temp dir cleanup issues
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Bad_ImageNotFound(t *testing.T) {
|
||||
manager, _, _ := newTestManager(t)
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{Detach: true}
|
||||
|
||||
_, err := manager.Run(ctx, "/nonexistent/image.iso", opts)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "image not found")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Bad_UnsupportedFormat(t *testing.T) {
|
||||
manager, _, tmpDir := newTestManager(t)
|
||||
|
||||
imagePath := filepath.Join(tmpDir, "test.txt")
|
||||
err := os.WriteFile(imagePath, []byte("not an image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{Detach: true}
|
||||
|
||||
_, err = manager.Run(ctx, imagePath, opts)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unsupported image format")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Stop_Good(t *testing.T) {
|
||||
manager, _, _ := newTestManager(t)
|
||||
|
||||
// Add a fake running container with a non-existent PID
|
||||
// The Stop function should handle this gracefully
|
||||
container := &Container{
|
||||
ID: "abc12345",
|
||||
Status: StatusRunning,
|
||||
PID: 999999, // Non-existent PID
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
_ = manager.State().Add(container)
|
||||
|
||||
ctx := context.Background()
|
||||
err := manager.Stop(ctx, "abc12345")
|
||||
|
||||
// Stop should succeed (process doesn't exist, so container is marked stopped)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify the container status was updated
|
||||
c, ok := manager.State().Get("abc12345")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, StatusStopped, c.Status)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Stop_Bad_NotFound(t *testing.T) {
|
||||
manager, _, _ := newTestManager(t)
|
||||
|
||||
ctx := context.Background()
|
||||
err := manager.Stop(ctx, "nonexistent")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "container not found")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Stop_Bad_NotRunning(t *testing.T) {
|
||||
_, _, tmpDir := newTestManager(t)
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
state, err := LoadState(io.Local, statePath)
|
||||
require.NoError(t, err)
|
||||
manager := NewLinuxKitManagerWithHypervisor(io.Local, state, NewMockHypervisor())
|
||||
|
||||
container := &Container{
|
||||
ID: "abc12345",
|
||||
Status: StatusStopped,
|
||||
}
|
||||
_ = state.Add(container)
|
||||
|
||||
ctx := context.Background()
|
||||
err = manager.Stop(ctx, "abc12345")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not running")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_List_Good(t *testing.T) {
|
||||
_, _, tmpDir := newTestManager(t)
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
state, err := LoadState(io.Local, statePath)
|
||||
require.NoError(t, err)
|
||||
manager := NewLinuxKitManagerWithHypervisor(io.Local, state, NewMockHypervisor())
|
||||
|
||||
_ = state.Add(&Container{ID: "aaa11111", Status: StatusStopped})
|
||||
_ = state.Add(&Container{ID: "bbb22222", Status: StatusStopped})
|
||||
|
||||
ctx := context.Background()
|
||||
containers, err := manager.List(ctx)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, containers, 2)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_List_Good_VerifiesRunningStatus(t *testing.T) {
|
||||
_, _, tmpDir := newTestManager(t)
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
state, err := LoadState(io.Local, statePath)
|
||||
require.NoError(t, err)
|
||||
manager := NewLinuxKitManagerWithHypervisor(io.Local, state, NewMockHypervisor())
|
||||
|
||||
// Add a "running" container with a fake PID that doesn't exist
|
||||
_ = state.Add(&Container{
|
||||
ID: "abc12345",
|
||||
Status: StatusRunning,
|
||||
PID: 999999, // PID that almost certainly doesn't exist
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
containers, err := manager.List(ctx)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, containers, 1)
|
||||
// Status should have been updated to stopped since PID doesn't exist
|
||||
assert.Equal(t, StatusStopped, containers[0].Status)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Logs_Good(t *testing.T) {
|
||||
manager, _, tmpDir := newTestManager(t)
|
||||
|
||||
// Create a log file manually
|
||||
logsDir := filepath.Join(tmpDir, "logs")
|
||||
require.NoError(t, os.MkdirAll(logsDir, 0755))
|
||||
|
||||
container := &Container{ID: "abc12345"}
|
||||
_ = manager.State().Add(container)
|
||||
|
||||
// Override the default logs dir for testing by creating the log file
|
||||
// at the expected location
|
||||
logContent := "test log content\nline 2\n"
|
||||
logPath, err := LogPath("abc12345")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(logPath), 0755))
|
||||
require.NoError(t, os.WriteFile(logPath, []byte(logContent), 0644))
|
||||
|
||||
ctx := context.Background()
|
||||
reader, err := manager.Logs(ctx, "abc12345", false)
|
||||
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := reader.Read(buf)
|
||||
assert.Equal(t, logContent, string(buf[:n]))
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Logs_Bad_NotFound(t *testing.T) {
|
||||
manager, _, _ := newTestManager(t)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := manager.Logs(ctx, "nonexistent", false)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "container not found")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Logs_Bad_NoLogFile(t *testing.T) {
|
||||
manager, _, _ := newTestManager(t)
|
||||
|
||||
// Use a unique ID that won't have a log file
|
||||
uniqueID, err := GenerateID()
|
||||
require.NoError(t, err)
|
||||
container := &Container{ID: uniqueID}
|
||||
_ = manager.State().Add(container)
|
||||
|
||||
ctx := context.Background()
|
||||
reader, err := manager.Logs(ctx, uniqueID, false)
|
||||
|
||||
// If logs existed somehow, clean up the reader
|
||||
if reader != nil {
|
||||
_ = reader.Close()
|
||||
}
|
||||
|
||||
assert.Error(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "no logs available")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Exec_Bad_NotFound(t *testing.T) {
|
||||
manager, _, _ := newTestManager(t)
|
||||
|
||||
ctx := context.Background()
|
||||
err := manager.Exec(ctx, "nonexistent", []string{"ls"})
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "container not found")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Exec_Bad_NotRunning(t *testing.T) {
|
||||
manager, _, _ := newTestManager(t)
|
||||
|
||||
container := &Container{ID: "abc12345", Status: StatusStopped}
|
||||
_ = manager.State().Add(container)
|
||||
|
||||
ctx := context.Background()
|
||||
err := manager.Exec(ctx, "abc12345", []string{"ls"})
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not running")
|
||||
}
|
||||
|
||||
func TestDetectImageFormat_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
format ImageFormat
|
||||
}{
|
||||
{"/path/to/image.iso", FormatISO},
|
||||
{"/path/to/image.ISO", FormatISO},
|
||||
{"/path/to/image.qcow2", FormatQCOW2},
|
||||
{"/path/to/image.QCOW2", FormatQCOW2},
|
||||
{"/path/to/image.vmdk", FormatVMDK},
|
||||
{"/path/to/image.raw", FormatRaw},
|
||||
{"/path/to/image.img", FormatRaw},
|
||||
{"image.iso", FormatISO},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
assert.Equal(t, tt.format, DetectImageFormat(tt.path))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectImageFormat_Bad_Unknown(t *testing.T) {
|
||||
tests := []string{
|
||||
"/path/to/image.txt",
|
||||
"/path/to/image",
|
||||
"noextension",
|
||||
"/path/to/image.docx",
|
||||
}
|
||||
|
||||
for _, path := range tests {
|
||||
t.Run(path, func(t *testing.T) {
|
||||
assert.Equal(t, FormatUnknown, DetectImageFormat(path))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQemuHypervisor_Name_Good(t *testing.T) {
|
||||
q := NewQemuHypervisor()
|
||||
assert.Equal(t, "qemu", q.Name())
|
||||
}
|
||||
|
||||
func TestQemuHypervisor_BuildCommand_Good(t *testing.T) {
|
||||
q := NewQemuHypervisor()
|
||||
|
||||
ctx := context.Background()
|
||||
opts := &HypervisorOptions{
|
||||
Memory: 2048,
|
||||
CPUs: 4,
|
||||
SSHPort: 2222,
|
||||
Ports: map[int]int{8080: 80},
|
||||
Detach: true,
|
||||
}
|
||||
|
||||
cmd, err := q.BuildCommand(ctx, "/path/to/image.iso", opts)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cmd)
|
||||
|
||||
// Check command path
|
||||
assert.Contains(t, cmd.Path, "qemu")
|
||||
|
||||
// Check that args contain expected values
|
||||
args := cmd.Args
|
||||
assert.Contains(t, args, "-m")
|
||||
assert.Contains(t, args, "2048")
|
||||
assert.Contains(t, args, "-smp")
|
||||
assert.Contains(t, args, "4")
|
||||
assert.Contains(t, args, "-nographic")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Logs_Good_Follow(t *testing.T) {
|
||||
manager, _, _ := newTestManager(t)
|
||||
|
||||
// Create a unique container ID
|
||||
uniqueID, err := GenerateID()
|
||||
require.NoError(t, err)
|
||||
container := &Container{ID: uniqueID}
|
||||
_ = manager.State().Add(container)
|
||||
|
||||
// Create a log file at the expected location
|
||||
logPath, err := LogPath(uniqueID)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(logPath), 0755))
|
||||
|
||||
// Write initial content
|
||||
err = os.WriteFile(logPath, []byte("initial log content\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a cancellable context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// Get the follow reader
|
||||
reader, err := manager.Logs(ctx, uniqueID, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Cancel the context to stop the follow
|
||||
cancel()
|
||||
|
||||
// Read should return EOF after context cancellation
|
||||
buf := make([]byte, 1024)
|
||||
_, readErr := reader.Read(buf)
|
||||
// After context cancel, Read should return EOF
|
||||
assert.Equal(t, "EOF", readErr.Error())
|
||||
|
||||
// Close the reader
|
||||
assert.NoError(t, reader.Close())
|
||||
}
|
||||
|
||||
func TestFollowReader_Read_Good_WithData(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
logPath := filepath.Join(tmpDir, "test.log")
|
||||
|
||||
// Create log file with content
|
||||
content := "test log line 1\ntest log line 2\n"
|
||||
err := os.WriteFile(logPath, []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
reader, err := newFollowReader(ctx, io.Local, logPath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
// The followReader seeks to end, so we need to append more content
|
||||
f, err := os.OpenFile(logPath, os.O_APPEND|os.O_WRONLY, 0644)
|
||||
require.NoError(t, err)
|
||||
_, err = f.WriteString("new line\n")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
// Give the reader time to poll
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
n, err := reader.Read(buf)
|
||||
if err == nil {
|
||||
assert.Greater(t, n, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowReader_Read_Good_ContextCancel(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
logPath := filepath.Join(tmpDir, "test.log")
|
||||
|
||||
// Create log file
|
||||
err := os.WriteFile(logPath, []byte("initial content\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
reader, err := newFollowReader(ctx, io.Local, logPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Cancel the context
|
||||
cancel()
|
||||
|
||||
// Read should return EOF
|
||||
buf := make([]byte, 1024)
|
||||
_, readErr := reader.Read(buf)
|
||||
assert.Equal(t, "EOF", readErr.Error())
|
||||
|
||||
_ = reader.Close()
|
||||
}
|
||||
|
||||
func TestFollowReader_Close_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
logPath := filepath.Join(tmpDir, "test.log")
|
||||
|
||||
err := os.WriteFile(logPath, []byte("content\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
reader, err := newFollowReader(ctx, io.Local, logPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = reader.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Reading after close should fail or return EOF
|
||||
buf := make([]byte, 1024)
|
||||
_, readErr := reader.Read(buf)
|
||||
assert.Error(t, readErr)
|
||||
}
|
||||
|
||||
func TestNewFollowReader_Bad_FileNotFound(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, err := newFollowReader(ctx, io.Local, "/nonexistent/path/to/file.log")
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Bad_BuildCommandError(t *testing.T) {
|
||||
manager, mock, tmpDir := newTestManager(t)
|
||||
|
||||
// Create a test image file
|
||||
imagePath := filepath.Join(tmpDir, "test.iso")
|
||||
err := os.WriteFile(imagePath, []byte("fake image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Configure mock to return an error
|
||||
mock.buildErr = assert.AnError
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{Detach: true}
|
||||
|
||||
_, err = manager.Run(ctx, imagePath, opts)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to build hypervisor command")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Good_Foreground(t *testing.T) {
|
||||
manager, mock, tmpDir := newTestManager(t)
|
||||
|
||||
// Create a test image file
|
||||
imagePath := filepath.Join(tmpDir, "test.iso")
|
||||
err := os.WriteFile(imagePath, []byte("fake image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use echo which exits quickly
|
||||
mock.commandToRun = "echo"
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{
|
||||
Name: "test-foreground",
|
||||
Detach: false, // Run in foreground
|
||||
Memory: 512,
|
||||
CPUs: 1,
|
||||
}
|
||||
|
||||
container, err := manager.Run(ctx, imagePath, opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotEmpty(t, container.ID)
|
||||
assert.Equal(t, "test-foreground", container.Name)
|
||||
// Foreground process should have completed
|
||||
assert.Equal(t, StatusStopped, container.Status)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Stop_Good_ContextCancelled(t *testing.T) {
|
||||
manager, mock, tmpDir := newTestManager(t)
|
||||
|
||||
// Create a test image file
|
||||
imagePath := filepath.Join(tmpDir, "test.iso")
|
||||
err := os.WriteFile(imagePath, []byte("fake image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a command that takes a long time
|
||||
mock.commandToRun = "sleep"
|
||||
|
||||
// Start a container
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{
|
||||
Name: "test-cancel",
|
||||
Detach: true,
|
||||
}
|
||||
|
||||
container, err := manager.Run(ctx, imagePath, opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Ensure cleanup happens regardless of test outcome
|
||||
t.Cleanup(func() {
|
||||
_ = manager.Stop(context.Background(), container.ID)
|
||||
})
|
||||
|
||||
// Create a context that's already cancelled
|
||||
cancelCtx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
// Stop with cancelled context
|
||||
err = manager.Stop(cancelCtx, container.ID)
|
||||
// Should return context error
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, context.Canceled, err)
|
||||
}
|
||||
|
||||
func TestIsProcessRunning_Good_ExistingProcess(t *testing.T) {
|
||||
// Use our own PID which definitely exists
|
||||
running := isProcessRunning(os.Getpid())
|
||||
assert.True(t, running)
|
||||
}
|
||||
|
||||
func TestIsProcessRunning_Bad_NonexistentProcess(t *testing.T) {
|
||||
// Use a PID that almost certainly doesn't exist
|
||||
running := isProcessRunning(999999)
|
||||
assert.False(t, running)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Good_WithPortsAndVolumes(t *testing.T) {
|
||||
manager, mock, tmpDir := newTestManager(t)
|
||||
|
||||
imagePath := filepath.Join(tmpDir, "test.iso")
|
||||
err := os.WriteFile(imagePath, []byte("fake image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{
|
||||
Name: "test-ports",
|
||||
Detach: true,
|
||||
Memory: 512,
|
||||
CPUs: 1,
|
||||
SSHPort: 2223,
|
||||
Ports: map[int]int{8080: 80, 443: 443},
|
||||
Volumes: map[string]string{"/host/data": "/container/data"},
|
||||
}
|
||||
|
||||
container, err := manager.Run(ctx, imagePath, opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotEmpty(t, container.ID)
|
||||
assert.Equal(t, map[int]int{8080: 80, 443: 443}, container.Ports)
|
||||
assert.Equal(t, 2223, mock.lastOpts.SSHPort)
|
||||
assert.Equal(t, map[string]string{"/host/data": "/container/data"}, mock.lastOpts.Volumes)
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
func TestFollowReader_Read_Bad_ReaderError(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
logPath := filepath.Join(tmpDir, "test.log")
|
||||
|
||||
// Create log file
|
||||
err := os.WriteFile(logPath, []byte("content\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
reader, err := newFollowReader(ctx, io.Local, logPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Close the underlying file to cause read errors
|
||||
_ = reader.file.Close()
|
||||
|
||||
// Read should return an error
|
||||
buf := make([]byte, 1024)
|
||||
_, readErr := reader.Read(buf)
|
||||
assert.Error(t, readErr)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Bad_StartError(t *testing.T) {
|
||||
manager, mock, tmpDir := newTestManager(t)
|
||||
|
||||
imagePath := filepath.Join(tmpDir, "test.iso")
|
||||
err := os.WriteFile(imagePath, []byte("fake image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a command that doesn't exist to cause Start() to fail
|
||||
mock.commandToRun = "/nonexistent/command/that/does/not/exist"
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{
|
||||
Name: "test-start-error",
|
||||
Detach: true,
|
||||
}
|
||||
|
||||
_, err = manager.Run(ctx, imagePath, opts)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to start VM")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Bad_ForegroundStartError(t *testing.T) {
|
||||
manager, mock, tmpDir := newTestManager(t)
|
||||
|
||||
imagePath := filepath.Join(tmpDir, "test.iso")
|
||||
err := os.WriteFile(imagePath, []byte("fake image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a command that doesn't exist to cause Start() to fail
|
||||
mock.commandToRun = "/nonexistent/command/that/does/not/exist"
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{
|
||||
Name: "test-foreground-error",
|
||||
Detach: false,
|
||||
}
|
||||
|
||||
_, err = manager.Run(ctx, imagePath, opts)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to start VM")
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Run_Good_ForegroundWithError(t *testing.T) {
|
||||
manager, mock, tmpDir := newTestManager(t)
|
||||
|
||||
imagePath := filepath.Join(tmpDir, "test.iso")
|
||||
err := os.WriteFile(imagePath, []byte("fake image"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a command that exits with error
|
||||
mock.commandToRun = "false" // false command exits with code 1
|
||||
|
||||
ctx := context.Background()
|
||||
opts := RunOptions{
|
||||
Name: "test-foreground-exit-error",
|
||||
Detach: false,
|
||||
}
|
||||
|
||||
container, err := manager.Run(ctx, imagePath, opts)
|
||||
require.NoError(t, err) // Run itself should succeed
|
||||
|
||||
// Container should be in error state since process exited with error
|
||||
assert.Equal(t, StatusError, container.Status)
|
||||
}
|
||||
|
||||
func TestLinuxKitManager_Stop_Good_ProcessExitedWhileRunning(t *testing.T) {
|
||||
manager, _, _ := newTestManager(t)
|
||||
|
||||
// Add a "running" container with a process that has already exited
|
||||
// This simulates the race condition where process exits between status check
|
||||
// and signal send
|
||||
container := &Container{
|
||||
ID: "test1234",
|
||||
Status: StatusRunning,
|
||||
PID: 999999, // Non-existent PID
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
_ = manager.State().Add(container)
|
||||
|
||||
ctx := context.Background()
|
||||
err := manager.Stop(ctx, "test1234")
|
||||
|
||||
// Stop should succeed gracefully
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Container should be stopped
|
||||
c, ok := manager.State().Get("test1234")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, StatusStopped, c.Status)
|
||||
}
|
||||
172
container/state.go
Normal file
172
container/state.go
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// State manages persistent container state.
|
||||
type State struct {
|
||||
// Containers is a map of container ID to Container.
|
||||
Containers map[string]*Container `json:"containers"`
|
||||
|
||||
mu sync.RWMutex
|
||||
filePath string
|
||||
}
|
||||
|
||||
// DefaultStateDir returns the default directory for state files (~/.core).
|
||||
func DefaultStateDir() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(home, ".core"), nil
|
||||
}
|
||||
|
||||
// DefaultStatePath returns the default path for the state file.
|
||||
func DefaultStatePath() (string, error) {
|
||||
dir, err := DefaultStateDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(dir, "containers.json"), nil
|
||||
}
|
||||
|
||||
// DefaultLogsDir returns the default directory for container logs.
|
||||
func DefaultLogsDir() (string, error) {
|
||||
dir, err := DefaultStateDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(dir, "logs"), nil
|
||||
}
|
||||
|
||||
// NewState creates a new State instance.
|
||||
func NewState(filePath string) *State {
|
||||
return &State{
|
||||
Containers: make(map[string]*Container),
|
||||
filePath: filePath,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadState loads the state from the given file path.
|
||||
// If the file doesn't exist, returns an empty state.
|
||||
func LoadState(filePath string) (*State, error) {
|
||||
state := NewState(filePath)
|
||||
|
||||
dataStr, err := io.Local.Read(filePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return state, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(dataStr), state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// SaveState persists the state to the configured file path.
|
||||
func (s *State) SaveState() error {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Ensure the directory exists
|
||||
dir := filepath.Dir(s.filePath)
|
||||
if err := io.Local.EnsureDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(s, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return io.Local.Write(s.filePath, string(data))
|
||||
}
|
||||
|
||||
// Add adds a container to the state and persists it.
|
||||
func (s *State) Add(c *Container) error {
|
||||
s.mu.Lock()
|
||||
s.Containers[c.ID] = c
|
||||
s.mu.Unlock()
|
||||
|
||||
return s.SaveState()
|
||||
}
|
||||
|
||||
// Get retrieves a copy of a container by ID.
|
||||
// Returns a copy to prevent data races when the container is modified.
|
||||
func (s *State) Get(id string) (*Container, bool) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
c, ok := s.Containers[id]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
// Return a copy to prevent data races
|
||||
copy := *c
|
||||
return ©, true
|
||||
}
|
||||
|
||||
// Update updates a container in the state and persists it.
|
||||
func (s *State) Update(c *Container) error {
|
||||
s.mu.Lock()
|
||||
s.Containers[c.ID] = c
|
||||
s.mu.Unlock()
|
||||
|
||||
return s.SaveState()
|
||||
}
|
||||
|
||||
// Remove removes a container from the state and persists it.
|
||||
func (s *State) Remove(id string) error {
|
||||
s.mu.Lock()
|
||||
delete(s.Containers, id)
|
||||
s.mu.Unlock()
|
||||
|
||||
return s.SaveState()
|
||||
}
|
||||
|
||||
// All returns copies of all containers in the state.
|
||||
// Returns copies to prevent data races when containers are modified.
|
||||
func (s *State) All() []*Container {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
containers := make([]*Container, 0, len(s.Containers))
|
||||
for _, c := range s.Containers {
|
||||
copy := *c
|
||||
containers = append(containers, ©)
|
||||
}
|
||||
return containers
|
||||
}
|
||||
|
||||
// FilePath returns the path to the state file.
|
||||
func (s *State) FilePath() string {
|
||||
return s.filePath
|
||||
}
|
||||
|
||||
// LogPath returns the log file path for a given container ID.
|
||||
func LogPath(id string) (string, error) {
|
||||
logsDir, err := DefaultLogsDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(logsDir, id+".log"), nil
|
||||
}
|
||||
|
||||
// EnsureLogsDir ensures the logs directory exists.
|
||||
func EnsureLogsDir() error {
|
||||
logsDir, err := DefaultLogsDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return io.Local.EnsureDir(logsDir)
|
||||
}
|
||||
223
container/state_test.go
Normal file
223
container/state_test.go
Normal file
|
|
@ -0,0 +1,223 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewState_Good(t *testing.T) {
|
||||
state := NewState(io.Local, "/tmp/test-state.json")
|
||||
|
||||
assert.NotNil(t, state)
|
||||
assert.NotNil(t, state.Containers)
|
||||
assert.Equal(t, "/tmp/test-state.json", state.FilePath())
|
||||
}
|
||||
|
||||
func TestLoadState_Good_NewFile(t *testing.T) {
|
||||
// Test loading from non-existent file
|
||||
tmpDir := t.TempDir()
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
|
||||
state, err := LoadState(io.Local, statePath)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, state)
|
||||
assert.Empty(t, state.Containers)
|
||||
}
|
||||
|
||||
func TestLoadState_Good_ExistingFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
|
||||
// Create a state file with data
|
||||
content := `{
|
||||
"containers": {
|
||||
"abc12345": {
|
||||
"id": "abc12345",
|
||||
"name": "test-container",
|
||||
"image": "/path/to/image.iso",
|
||||
"status": "running",
|
||||
"pid": 12345,
|
||||
"started_at": "2024-01-01T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}`
|
||||
err := os.WriteFile(statePath, []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
state, err := LoadState(io.Local, statePath)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, state.Containers, 1)
|
||||
|
||||
c, ok := state.Get("abc12345")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "test-container", c.Name)
|
||||
assert.Equal(t, StatusRunning, c.Status)
|
||||
}
|
||||
|
||||
func TestLoadState_Bad_InvalidJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
|
||||
// Create invalid JSON
|
||||
err := os.WriteFile(statePath, []byte("invalid json{"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = LoadState(io.Local, statePath)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestState_Add_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
state := NewState(io.Local, statePath)
|
||||
|
||||
container := &Container{
|
||||
ID: "abc12345",
|
||||
Name: "test",
|
||||
Image: "/path/to/image.iso",
|
||||
Status: StatusRunning,
|
||||
PID: 12345,
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
|
||||
err := state.Add(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify it's in memory
|
||||
c, ok := state.Get("abc12345")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, container.Name, c.Name)
|
||||
|
||||
// Verify file was created
|
||||
_, err = os.Stat(statePath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestState_Update_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
state := NewState(io.Local, statePath)
|
||||
|
||||
container := &Container{
|
||||
ID: "abc12345",
|
||||
Status: StatusRunning,
|
||||
}
|
||||
_ = state.Add(container)
|
||||
|
||||
// Update status
|
||||
container.Status = StatusStopped
|
||||
err := state.Update(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify update
|
||||
c, ok := state.Get("abc12345")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, StatusStopped, c.Status)
|
||||
}
|
||||
|
||||
func TestState_Remove_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
state := NewState(io.Local, statePath)
|
||||
|
||||
container := &Container{
|
||||
ID: "abc12345",
|
||||
}
|
||||
_ = state.Add(container)
|
||||
|
||||
err := state.Remove("abc12345")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ok := state.Get("abc12345")
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestState_Get_Bad_NotFound(t *testing.T) {
|
||||
state := NewState(io.Local, "/tmp/test-state.json")
|
||||
|
||||
_, ok := state.Get("nonexistent")
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestState_All_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
statePath := filepath.Join(tmpDir, "containers.json")
|
||||
state := NewState(io.Local, statePath)
|
||||
|
||||
_ = state.Add(&Container{ID: "aaa11111"})
|
||||
_ = state.Add(&Container{ID: "bbb22222"})
|
||||
_ = state.Add(&Container{ID: "ccc33333"})
|
||||
|
||||
all := state.All()
|
||||
assert.Len(t, all, 3)
|
||||
}
|
||||
|
||||
func TestState_SaveState_Good_CreatesDirectory(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
nestedPath := filepath.Join(tmpDir, "nested", "dir", "containers.json")
|
||||
state := NewState(io.Local, nestedPath)
|
||||
|
||||
_ = state.Add(&Container{ID: "abc12345"})
|
||||
|
||||
err := state.SaveState()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify directory was created
|
||||
_, err = os.Stat(filepath.Dir(nestedPath))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestDefaultStateDir_Good(t *testing.T) {
|
||||
dir, err := DefaultStateDir()
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, dir, ".core")
|
||||
}
|
||||
|
||||
func TestDefaultStatePath_Good(t *testing.T) {
|
||||
path, err := DefaultStatePath()
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, path, "containers.json")
|
||||
}
|
||||
|
||||
func TestDefaultLogsDir_Good(t *testing.T) {
|
||||
dir, err := DefaultLogsDir()
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, dir, "logs")
|
||||
}
|
||||
|
||||
func TestLogPath_Good(t *testing.T) {
|
||||
path, err := LogPath("abc12345")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, path, "abc12345.log")
|
||||
}
|
||||
|
||||
func TestEnsureLogsDir_Good(t *testing.T) {
|
||||
// This test creates real directories - skip in CI if needed
|
||||
err := EnsureLogsDir(io.Local)
|
||||
assert.NoError(t, err)
|
||||
|
||||
logsDir, _ := DefaultLogsDir()
|
||||
_, err = os.Stat(logsDir)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGenerateID_Good(t *testing.T) {
|
||||
id1, err := GenerateID()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, id1, 8)
|
||||
|
||||
id2, err := GenerateID()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, id2, 8)
|
||||
|
||||
// IDs should be different
|
||||
assert.NotEqual(t, id1, id2)
|
||||
}
|
||||
301
container/templates.go
Normal file
301
container/templates.go
Normal file
|
|
@ -0,0 +1,301 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
//go:embed templates/*.yml
|
||||
var embeddedTemplates embed.FS
|
||||
|
||||
// Template represents a LinuxKit YAML template.
|
||||
type Template struct {
|
||||
// Name is the template identifier (e.g., "core-dev", "server-php").
|
||||
Name string
|
||||
// Description is a human-readable description of the template.
|
||||
Description string
|
||||
// Path is the file path to the template (relative or absolute).
|
||||
Path string
|
||||
}
|
||||
|
||||
// builtinTemplates defines the metadata for embedded templates.
|
||||
var builtinTemplates = []Template{
|
||||
{
|
||||
Name: "core-dev",
|
||||
Description: "Development environment with Go, Node.js, PHP, Docker-in-LinuxKit, and SSH access",
|
||||
Path: "templates/core-dev.yml",
|
||||
},
|
||||
{
|
||||
Name: "server-php",
|
||||
Description: "Production PHP server with FrankenPHP, Caddy reverse proxy, and health checks",
|
||||
Path: "templates/server-php.yml",
|
||||
},
|
||||
}
|
||||
|
||||
// ListTemplates returns all available LinuxKit templates.
|
||||
// It combines embedded templates with any templates found in the user's
|
||||
// .core/linuxkit directory.
|
||||
func ListTemplates() []Template {
|
||||
templates := make([]Template, len(builtinTemplates))
|
||||
copy(templates, builtinTemplates)
|
||||
|
||||
// Check for user templates in .core/linuxkit/
|
||||
userTemplatesDir := getUserTemplatesDir()
|
||||
if userTemplatesDir != "" {
|
||||
userTemplates := scanUserTemplates(userTemplatesDir)
|
||||
templates = append(templates, userTemplates...)
|
||||
}
|
||||
|
||||
return templates
|
||||
}
|
||||
|
||||
// GetTemplate returns the content of a template by name.
|
||||
// It first checks embedded templates, then user templates.
|
||||
func GetTemplate(name string) (string, error) {
|
||||
// Check embedded templates first
|
||||
for _, t := range builtinTemplates {
|
||||
if t.Name == name {
|
||||
content, err := embeddedTemplates.ReadFile(t.Path)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read embedded template %s: %w", name, err)
|
||||
}
|
||||
return string(content), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check user templates
|
||||
userTemplatesDir := getUserTemplatesDir()
|
||||
if userTemplatesDir != "" {
|
||||
templatePath := filepath.Join(userTemplatesDir, name+".yml")
|
||||
if io.Local.IsFile(templatePath) {
|
||||
content, err := io.Local.Read(templatePath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read user template %s: %w", name, err)
|
||||
}
|
||||
return content, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("template not found: %s", name)
|
||||
}
|
||||
|
||||
// ApplyTemplate applies variable substitution to a template.
|
||||
// It supports two syntaxes:
|
||||
// - ${VAR} - required variable, returns error if not provided
|
||||
// - ${VAR:-default} - variable with default value
|
||||
func ApplyTemplate(name string, vars map[string]string) (string, error) {
|
||||
content, err := GetTemplate(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return ApplyVariables(content, vars)
|
||||
}
|
||||
|
||||
// ApplyVariables applies variable substitution to content string.
|
||||
// It supports two syntaxes:
|
||||
// - ${VAR} - required variable, returns error if not provided
|
||||
// - ${VAR:-default} - variable with default value
|
||||
func ApplyVariables(content string, vars map[string]string) (string, error) {
|
||||
// Pattern for ${VAR:-default} syntax
|
||||
defaultPattern := regexp.MustCompile(`\$\{([A-Za-z_][A-Za-z0-9_]*):-([^}]*)\}`)
|
||||
|
||||
// Pattern for ${VAR} syntax (no default)
|
||||
requiredPattern := regexp.MustCompile(`\$\{([A-Za-z_][A-Za-z0-9_]*)\}`)
|
||||
|
||||
// Track missing required variables
|
||||
var missingVars []string
|
||||
|
||||
// First pass: replace variables with defaults
|
||||
result := defaultPattern.ReplaceAllStringFunc(content, func(match string) string {
|
||||
submatch := defaultPattern.FindStringSubmatch(match)
|
||||
if len(submatch) != 3 {
|
||||
return match
|
||||
}
|
||||
varName := submatch[1]
|
||||
defaultVal := submatch[2]
|
||||
|
||||
if val, ok := vars[varName]; ok {
|
||||
return val
|
||||
}
|
||||
return defaultVal
|
||||
})
|
||||
|
||||
// Second pass: replace required variables and track missing ones
|
||||
result = requiredPattern.ReplaceAllStringFunc(result, func(match string) string {
|
||||
submatch := requiredPattern.FindStringSubmatch(match)
|
||||
if len(submatch) != 2 {
|
||||
return match
|
||||
}
|
||||
varName := submatch[1]
|
||||
|
||||
if val, ok := vars[varName]; ok {
|
||||
return val
|
||||
}
|
||||
missingVars = append(missingVars, varName)
|
||||
return match // Keep original if missing
|
||||
})
|
||||
|
||||
if len(missingVars) > 0 {
|
||||
return "", fmt.Errorf("missing required variables: %s", strings.Join(missingVars, ", "))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ExtractVariables extracts all variable names from a template.
|
||||
// Returns two slices: required variables and optional variables (with defaults).
|
||||
func ExtractVariables(content string) (required []string, optional map[string]string) {
|
||||
optional = make(map[string]string)
|
||||
requiredSet := make(map[string]bool)
|
||||
|
||||
// Pattern for ${VAR:-default} syntax
|
||||
defaultPattern := regexp.MustCompile(`\$\{([A-Za-z_][A-Za-z0-9_]*):-([^}]*)\}`)
|
||||
|
||||
// Pattern for ${VAR} syntax (no default)
|
||||
requiredPattern := regexp.MustCompile(`\$\{([A-Za-z_][A-Za-z0-9_]*)\}`)
|
||||
|
||||
// Find optional variables with defaults
|
||||
matches := defaultPattern.FindAllStringSubmatch(content, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) == 3 {
|
||||
optional[match[1]] = match[2]
|
||||
}
|
||||
}
|
||||
|
||||
// Find required variables
|
||||
matches = requiredPattern.FindAllStringSubmatch(content, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) == 2 {
|
||||
varName := match[1]
|
||||
// Only add if not already in optional (with default)
|
||||
if _, hasDefault := optional[varName]; !hasDefault {
|
||||
requiredSet[varName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert set to slice
|
||||
for v := range requiredSet {
|
||||
required = append(required, v)
|
||||
}
|
||||
|
||||
return required, optional
|
||||
}
|
||||
|
||||
// getUserTemplatesDir returns the path to user templates directory.
|
||||
// Returns empty string if the directory doesn't exist.
|
||||
func getUserTemplatesDir() string {
|
||||
// Try workspace-relative .core/linuxkit first
|
||||
cwd, err := os.Getwd()
|
||||
if err == nil {
|
||||
wsDir := filepath.Join(cwd, ".core", "linuxkit")
|
||||
if io.Local.IsDir(wsDir) {
|
||||
return wsDir
|
||||
}
|
||||
}
|
||||
|
||||
// Try home directory
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
homeDir := filepath.Join(home, ".core", "linuxkit")
|
||||
if io.Local.IsDir(homeDir) {
|
||||
return homeDir
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// scanUserTemplates scans a directory for .yml template files.
|
||||
func scanUserTemplates(dir string) []Template {
|
||||
var templates []Template
|
||||
|
||||
entries, err := io.Local.List(dir)
|
||||
if err != nil {
|
||||
return templates
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
name := entry.Name()
|
||||
if !strings.HasSuffix(name, ".yml") && !strings.HasSuffix(name, ".yaml") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract template name from filename
|
||||
templateName := strings.TrimSuffix(strings.TrimSuffix(name, ".yml"), ".yaml")
|
||||
|
||||
// Skip if this is a builtin template name (embedded takes precedence)
|
||||
isBuiltin := false
|
||||
for _, bt := range builtinTemplates {
|
||||
if bt.Name == templateName {
|
||||
isBuiltin = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isBuiltin {
|
||||
continue
|
||||
}
|
||||
|
||||
// Read file to extract description from comments
|
||||
description := extractTemplateDescription(filepath.Join(dir, name))
|
||||
if description == "" {
|
||||
description = "User-defined template"
|
||||
}
|
||||
|
||||
templates = append(templates, Template{
|
||||
Name: templateName,
|
||||
Description: description,
|
||||
Path: filepath.Join(dir, name),
|
||||
})
|
||||
}
|
||||
|
||||
return templates
|
||||
}
|
||||
|
||||
// extractTemplateDescription reads the first comment block from a YAML file
|
||||
// to use as a description.
|
||||
func extractTemplateDescription(path string) string {
|
||||
content, err := io.Local.Read(path)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
lines := strings.Split(content, "\n")
|
||||
var descLines []string
|
||||
|
||||
for _, line := range lines {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if strings.HasPrefix(trimmed, "#") {
|
||||
// Remove the # and trim
|
||||
comment := strings.TrimSpace(strings.TrimPrefix(trimmed, "#"))
|
||||
if comment != "" {
|
||||
descLines = append(descLines, comment)
|
||||
// Only take the first meaningful comment line as description
|
||||
if len(descLines) == 1 {
|
||||
return comment
|
||||
}
|
||||
}
|
||||
} else if trimmed != "" {
|
||||
// Hit non-comment content, stop
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(descLines) > 0 {
|
||||
return descLines[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
121
container/templates/core-dev.yml
Normal file
121
container/templates/core-dev.yml
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
# Core Development Environment Template
|
||||
# A full-featured development environment with multiple runtimes
|
||||
#
|
||||
# Variables:
|
||||
# ${SSH_KEY} - SSH public key for access (required)
|
||||
# ${MEMORY:-2048} - Memory in MB (default: 2048)
|
||||
# ${CPUS:-2} - Number of CPUs (default: 2)
|
||||
# ${HOSTNAME:-core-dev} - Hostname for the VM
|
||||
# ${DATA_SIZE:-10G} - Size of persistent /data volume
|
||||
|
||||
kernel:
|
||||
image: linuxkit/kernel:6.6.13
|
||||
cmdline: "console=tty0 console=ttyS0"
|
||||
|
||||
init:
|
||||
- linuxkit/init:v1.2.0
|
||||
- linuxkit/runc:v1.1.12
|
||||
- linuxkit/containerd:v1.7.13
|
||||
- linuxkit/ca-certificates:v1.0.0
|
||||
|
||||
onboot:
|
||||
- name: sysctl
|
||||
image: linuxkit/sysctl:v1.0.0
|
||||
- name: format
|
||||
image: linuxkit/format:v1.0.0
|
||||
- name: mount
|
||||
image: linuxkit/mount:v1.0.0
|
||||
command: ["/usr/bin/mountie", "/dev/sda1", "/data"]
|
||||
- name: dhcpcd
|
||||
image: linuxkit/dhcpcd:v1.0.0
|
||||
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
|
||||
|
||||
onshutdown:
|
||||
- name: shutdown
|
||||
image: busybox:latest
|
||||
command: ["/bin/echo", "Shutting down..."]
|
||||
|
||||
services:
|
||||
- name: getty
|
||||
image: linuxkit/getty:v1.0.0
|
||||
env:
|
||||
- INSECURE=true
|
||||
|
||||
- name: sshd
|
||||
image: linuxkit/sshd:v1.2.0
|
||||
binds:
|
||||
- /etc/ssh/authorized_keys:/root/.ssh/authorized_keys
|
||||
|
||||
- name: docker
|
||||
image: docker:24.0-dind
|
||||
capabilities:
|
||||
- all
|
||||
net: host
|
||||
pid: host
|
||||
binds:
|
||||
- /var/run:/var/run
|
||||
- /data/docker:/var/lib/docker
|
||||
rootfsPropagation: shared
|
||||
|
||||
- name: dev-tools
|
||||
image: alpine:3.19
|
||||
capabilities:
|
||||
- all
|
||||
net: host
|
||||
binds:
|
||||
- /data:/data
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
# Install development tools
|
||||
apk add --no-cache \
|
||||
git curl wget vim nano htop tmux \
|
||||
build-base gcc musl-dev linux-headers \
|
||||
openssh-client jq yq
|
||||
|
||||
# Install Go 1.22.0
|
||||
wget -q https://go.dev/dl/go1.22.0.linux-amd64.tar.gz
|
||||
tar -C /usr/local -xzf go1.22.0.linux-amd64.tar.gz
|
||||
rm go1.22.0.linux-amd64.tar.gz
|
||||
echo 'export PATH=/usr/local/go/bin:$PATH' >> /etc/profile
|
||||
|
||||
# Install Node.js
|
||||
apk add --no-cache nodejs npm
|
||||
|
||||
# Install PHP
|
||||
apk add --no-cache php82 php82-cli php82-curl php82-json php82-mbstring \
|
||||
php82-openssl php82-pdo php82-pdo_mysql php82-pdo_pgsql php82-phar \
|
||||
php82-session php82-tokenizer php82-xml php82-zip composer
|
||||
|
||||
# Keep container running
|
||||
tail -f /dev/null
|
||||
|
||||
files:
|
||||
- path: /etc/hostname
|
||||
contents: "${HOSTNAME:-core-dev}"
|
||||
- path: /etc/ssh/authorized_keys
|
||||
contents: "${SSH_KEY}"
|
||||
mode: "0600"
|
||||
- path: /etc/profile.d/dev.sh
|
||||
contents: |
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
export GOPATH=/data/go
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
cd /data
|
||||
mode: "0755"
|
||||
- path: /etc/motd
|
||||
contents: |
|
||||
================================================
|
||||
Core Development Environment
|
||||
|
||||
Runtimes: Go, Node.js, PHP
|
||||
Tools: git, curl, vim, docker
|
||||
|
||||
Data directory: /data (persistent)
|
||||
================================================
|
||||
|
||||
trust:
|
||||
org:
|
||||
- linuxkit
|
||||
- library
|
||||
142
container/templates/server-php.yml
Normal file
142
container/templates/server-php.yml
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
# PHP/FrankenPHP Server Template
|
||||
# A minimal production-ready PHP server with FrankenPHP and Caddy
|
||||
#
|
||||
# Variables:
|
||||
# ${SSH_KEY} - SSH public key for management access (required)
|
||||
# ${MEMORY:-512} - Memory in MB (default: 512)
|
||||
# ${CPUS:-1} - Number of CPUs (default: 1)
|
||||
# ${HOSTNAME:-php-server} - Hostname for the VM
|
||||
# ${APP_NAME:-app} - Application name
|
||||
# ${DOMAIN:-localhost} - Domain for SSL certificates
|
||||
# ${PHP_MEMORY:-128M} - PHP memory limit
|
||||
|
||||
kernel:
|
||||
image: linuxkit/kernel:6.6.13
|
||||
cmdline: "console=tty0 console=ttyS0"
|
||||
|
||||
init:
|
||||
- linuxkit/init:v1.2.0
|
||||
- linuxkit/runc:v1.1.12
|
||||
- linuxkit/containerd:v1.7.13
|
||||
- linuxkit/ca-certificates:v1.0.0
|
||||
|
||||
onboot:
|
||||
- name: sysctl
|
||||
image: linuxkit/sysctl:v1.0.0
|
||||
- name: dhcpcd
|
||||
image: linuxkit/dhcpcd:v1.0.0
|
||||
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
|
||||
|
||||
services:
|
||||
- name: sshd
|
||||
image: linuxkit/sshd:v1.2.0
|
||||
binds:
|
||||
- /etc/ssh/authorized_keys:/root/.ssh/authorized_keys
|
||||
|
||||
- name: frankenphp
|
||||
image: dunglas/frankenphp:latest
|
||||
capabilities:
|
||||
- CAP_NET_BIND_SERVICE
|
||||
net: host
|
||||
binds:
|
||||
- /app:/app
|
||||
- /data:/data
|
||||
- /etc/caddy/Caddyfile:/etc/caddy/Caddyfile
|
||||
env:
|
||||
- SERVER_NAME=${DOMAIN:-localhost}
|
||||
- FRANKENPHP_CONFIG=/etc/caddy/Caddyfile
|
||||
command:
|
||||
- frankenphp
|
||||
- run
|
||||
- --config
|
||||
- /etc/caddy/Caddyfile
|
||||
|
||||
- name: healthcheck
|
||||
image: alpine:3.19
|
||||
net: host
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
apk add --no-cache curl
|
||||
while true; do
|
||||
sleep 30
|
||||
curl -sf http://localhost/health || echo "Health check failed"
|
||||
done
|
||||
|
||||
files:
|
||||
- path: /etc/hostname
|
||||
contents: "${HOSTNAME:-php-server}"
|
||||
- path: /etc/ssh/authorized_keys
|
||||
contents: "${SSH_KEY}"
|
||||
mode: "0600"
|
||||
- path: /etc/caddy/Caddyfile
|
||||
contents: |
|
||||
{
|
||||
frankenphp
|
||||
order php_server before file_server
|
||||
}
|
||||
|
||||
${DOMAIN:-localhost} {
|
||||
root * /app/public
|
||||
|
||||
# Health check endpoint
|
||||
handle /health {
|
||||
respond "OK" 200
|
||||
}
|
||||
|
||||
# PHP handling
|
||||
php_server
|
||||
|
||||
# Encode responses
|
||||
encode zstd gzip
|
||||
|
||||
# Security headers
|
||||
header {
|
||||
X-Content-Type-Options nosniff
|
||||
X-Frame-Options DENY
|
||||
X-XSS-Protection "1; mode=block"
|
||||
Referrer-Policy strict-origin-when-cross-origin
|
||||
}
|
||||
|
||||
# Logging
|
||||
log {
|
||||
output file /data/logs/access.log
|
||||
format json
|
||||
}
|
||||
}
|
||||
mode: "0644"
|
||||
- path: /app/public/index.php
|
||||
contents: |
|
||||
<?php
|
||||
echo "Welcome to ${APP_NAME:-app}";
|
||||
mode: "0644"
|
||||
- path: /app/public/health.php
|
||||
contents: |
|
||||
<?php
|
||||
header('Content-Type: application/json');
|
||||
echo json_encode([
|
||||
'status' => 'healthy',
|
||||
'app' => '${APP_NAME:-app}',
|
||||
'timestamp' => date('c'),
|
||||
'php_version' => PHP_VERSION,
|
||||
]);
|
||||
mode: "0644"
|
||||
- path: /etc/php/php.ini
|
||||
contents: |
|
||||
memory_limit = ${PHP_MEMORY:-128M}
|
||||
max_execution_time = 30
|
||||
upload_max_filesize = 64M
|
||||
post_max_size = 64M
|
||||
display_errors = Off
|
||||
log_errors = On
|
||||
error_log = /data/logs/php_errors.log
|
||||
mode: "0644"
|
||||
- path: /data/logs/.gitkeep
|
||||
contents: ""
|
||||
|
||||
trust:
|
||||
org:
|
||||
- linuxkit
|
||||
- library
|
||||
- dunglas
|
||||
604
container/templates_test.go
Normal file
604
container/templates_test.go
Normal file
|
|
@ -0,0 +1,604 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestListTemplates_Good(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
templates := tm.ListTemplates()
|
||||
|
||||
// Should have at least the builtin templates
|
||||
assert.GreaterOrEqual(t, len(templates), 2)
|
||||
|
||||
// Find the core-dev template
|
||||
var found bool
|
||||
for _, tmpl := range templates {
|
||||
if tmpl.Name == "core-dev" {
|
||||
found = true
|
||||
assert.NotEmpty(t, tmpl.Description)
|
||||
assert.NotEmpty(t, tmpl.Path)
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "core-dev template should exist")
|
||||
|
||||
// Find the server-php template
|
||||
found = false
|
||||
for _, tmpl := range templates {
|
||||
if tmpl.Name == "server-php" {
|
||||
found = true
|
||||
assert.NotEmpty(t, tmpl.Description)
|
||||
assert.NotEmpty(t, tmpl.Path)
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "server-php template should exist")
|
||||
}
|
||||
|
||||
func TestGetTemplate_Good_CoreDev(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
content, err := tm.GetTemplate("core-dev")
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, content)
|
||||
assert.Contains(t, content, "kernel:")
|
||||
assert.Contains(t, content, "linuxkit/kernel")
|
||||
assert.Contains(t, content, "${SSH_KEY}")
|
||||
assert.Contains(t, content, "services:")
|
||||
}
|
||||
|
||||
func TestGetTemplate_Good_ServerPhp(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
content, err := tm.GetTemplate("server-php")
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, content)
|
||||
assert.Contains(t, content, "kernel:")
|
||||
assert.Contains(t, content, "frankenphp")
|
||||
assert.Contains(t, content, "${SSH_KEY}")
|
||||
assert.Contains(t, content, "${DOMAIN:-localhost}")
|
||||
}
|
||||
|
||||
func TestGetTemplate_Bad_NotFound(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
_, err := tm.GetTemplate("nonexistent-template")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "template not found")
|
||||
}
|
||||
|
||||
func TestApplyVariables_Good_SimpleSubstitution(t *testing.T) {
|
||||
content := "Hello ${NAME}, welcome to ${PLACE}!"
|
||||
vars := map[string]string{
|
||||
"NAME": "World",
|
||||
"PLACE": "Core",
|
||||
}
|
||||
|
||||
result, err := ApplyVariables(content, vars)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "Hello World, welcome to Core!", result)
|
||||
}
|
||||
|
||||
func TestApplyVariables_Good_WithDefaults(t *testing.T) {
|
||||
content := "Memory: ${MEMORY:-1024}MB, CPUs: ${CPUS:-2}"
|
||||
vars := map[string]string{
|
||||
"MEMORY": "2048",
|
||||
// CPUS not provided, should use default
|
||||
}
|
||||
|
||||
result, err := ApplyVariables(content, vars)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "Memory: 2048MB, CPUs: 2", result)
|
||||
}
|
||||
|
||||
func TestApplyVariables_Good_AllDefaults(t *testing.T) {
|
||||
content := "${HOST:-localhost}:${PORT:-8080}"
|
||||
vars := map[string]string{} // No vars provided
|
||||
|
||||
result, err := ApplyVariables(content, vars)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "localhost:8080", result)
|
||||
}
|
||||
|
||||
func TestApplyVariables_Good_MixedSyntax(t *testing.T) {
|
||||
content := `
|
||||
hostname: ${HOSTNAME:-myhost}
|
||||
ssh_key: ${SSH_KEY}
|
||||
memory: ${MEMORY:-512}
|
||||
`
|
||||
vars := map[string]string{
|
||||
"SSH_KEY": "ssh-rsa AAAA...",
|
||||
"HOSTNAME": "custom-host",
|
||||
}
|
||||
|
||||
result, err := ApplyVariables(content, vars)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "hostname: custom-host")
|
||||
assert.Contains(t, result, "ssh_key: ssh-rsa AAAA...")
|
||||
assert.Contains(t, result, "memory: 512")
|
||||
}
|
||||
|
||||
func TestApplyVariables_Good_EmptyDefault(t *testing.T) {
|
||||
content := "value: ${OPT:-}"
|
||||
vars := map[string]string{}
|
||||
|
||||
result, err := ApplyVariables(content, vars)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "value: ", result)
|
||||
}
|
||||
|
||||
func TestApplyVariables_Bad_MissingRequired(t *testing.T) {
|
||||
content := "SSH Key: ${SSH_KEY}"
|
||||
vars := map[string]string{} // Missing required SSH_KEY
|
||||
|
||||
_, err := ApplyVariables(content, vars)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "missing required variables")
|
||||
assert.Contains(t, err.Error(), "SSH_KEY")
|
||||
}
|
||||
|
||||
func TestApplyVariables_Bad_MultipleMissing(t *testing.T) {
|
||||
content := "${VAR1} and ${VAR2} and ${VAR3}"
|
||||
vars := map[string]string{
|
||||
"VAR2": "provided",
|
||||
}
|
||||
|
||||
_, err := ApplyVariables(content, vars)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "missing required variables")
|
||||
// Should mention both missing vars
|
||||
errStr := err.Error()
|
||||
assert.True(t, strings.Contains(errStr, "VAR1") || strings.Contains(errStr, "VAR3"))
|
||||
}
|
||||
|
||||
func TestApplyTemplate_Good(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
vars := map[string]string{
|
||||
"SSH_KEY": "ssh-rsa AAAA... user@host",
|
||||
}
|
||||
|
||||
result, err := tm.ApplyTemplate("core-dev", vars)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, result)
|
||||
assert.Contains(t, result, "ssh-rsa AAAA... user@host")
|
||||
// Default values should be applied
|
||||
assert.Contains(t, result, "core-dev") // HOSTNAME default
|
||||
}
|
||||
|
||||
func TestApplyTemplate_Bad_TemplateNotFound(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
vars := map[string]string{
|
||||
"SSH_KEY": "test",
|
||||
}
|
||||
|
||||
_, err := tm.ApplyTemplate("nonexistent", vars)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "template not found")
|
||||
}
|
||||
|
||||
func TestApplyTemplate_Bad_MissingVariable(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
// server-php requires SSH_KEY
|
||||
vars := map[string]string{} // Missing required SSH_KEY
|
||||
|
||||
_, err := tm.ApplyTemplate("server-php", vars)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "missing required variables")
|
||||
}
|
||||
|
||||
func TestExtractVariables_Good(t *testing.T) {
|
||||
content := `
|
||||
hostname: ${HOSTNAME:-myhost}
|
||||
ssh_key: ${SSH_KEY}
|
||||
memory: ${MEMORY:-1024}
|
||||
cpus: ${CPUS:-2}
|
||||
api_key: ${API_KEY}
|
||||
`
|
||||
required, optional := ExtractVariables(content)
|
||||
|
||||
// Required variables (no default)
|
||||
assert.Contains(t, required, "SSH_KEY")
|
||||
assert.Contains(t, required, "API_KEY")
|
||||
assert.Len(t, required, 2)
|
||||
|
||||
// Optional variables (with defaults)
|
||||
assert.Equal(t, "myhost", optional["HOSTNAME"])
|
||||
assert.Equal(t, "1024", optional["MEMORY"])
|
||||
assert.Equal(t, "2", optional["CPUS"])
|
||||
assert.Len(t, optional, 3)
|
||||
}
|
||||
|
||||
func TestExtractVariables_Good_NoVariables(t *testing.T) {
|
||||
content := "This has no variables at all"
|
||||
|
||||
required, optional := ExtractVariables(content)
|
||||
|
||||
assert.Empty(t, required)
|
||||
assert.Empty(t, optional)
|
||||
}
|
||||
|
||||
func TestExtractVariables_Good_OnlyDefaults(t *testing.T) {
|
||||
content := "${A:-default1} ${B:-default2}"
|
||||
|
||||
required, optional := ExtractVariables(content)
|
||||
|
||||
assert.Empty(t, required)
|
||||
assert.Len(t, optional, 2)
|
||||
assert.Equal(t, "default1", optional["A"])
|
||||
assert.Equal(t, "default2", optional["B"])
|
||||
}
|
||||
|
||||
func TestScanUserTemplates_Good(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
// Create a temporary directory with template files
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create a valid template file
|
||||
templateContent := `# My Custom Template
|
||||
# A custom template for testing
|
||||
kernel:
|
||||
image: linuxkit/kernel:6.6
|
||||
`
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "custom.yml"), []byte(templateContent), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a non-template file (should be ignored)
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "readme.txt"), []byte("Not a template"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
templates := tm.scanUserTemplates(tmpDir)
|
||||
|
||||
assert.Len(t, templates, 1)
|
||||
assert.Equal(t, "custom", templates[0].Name)
|
||||
assert.Equal(t, "My Custom Template", templates[0].Description)
|
||||
}
|
||||
|
||||
func TestScanUserTemplates_Good_MultipleTemplates(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create multiple template files
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "web.yml"), []byte("# Web Server\nkernel:"), 0644)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "db.yaml"), []byte("# Database Server\nkernel:"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
templates := tm.scanUserTemplates(tmpDir)
|
||||
|
||||
assert.Len(t, templates, 2)
|
||||
|
||||
// Check names are extracted correctly
|
||||
names := make(map[string]bool)
|
||||
for _, tmpl := range templates {
|
||||
names[tmpl.Name] = true
|
||||
}
|
||||
assert.True(t, names["web"])
|
||||
assert.True(t, names["db"])
|
||||
}
|
||||
|
||||
func TestScanUserTemplates_Good_EmptyDirectory(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
templates := tm.scanUserTemplates(tmpDir)
|
||||
|
||||
assert.Empty(t, templates)
|
||||
}
|
||||
|
||||
func TestScanUserTemplates_Bad_NonexistentDirectory(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
templates := tm.scanUserTemplates("/nonexistent/path/to/templates")
|
||||
|
||||
assert.Empty(t, templates)
|
||||
}
|
||||
|
||||
func TestExtractTemplateDescription_Good(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
path := filepath.Join(tmpDir, "test.yml")
|
||||
|
||||
content := `# My Template Description
|
||||
# More details here
|
||||
kernel:
|
||||
image: test
|
||||
`
|
||||
err := os.WriteFile(path, []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
desc := tm.extractTemplateDescription(path)
|
||||
|
||||
assert.Equal(t, "My Template Description", desc)
|
||||
}
|
||||
|
||||
func TestExtractTemplateDescription_Good_NoComments(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
path := filepath.Join(tmpDir, "test.yml")
|
||||
|
||||
content := `kernel:
|
||||
image: test
|
||||
`
|
||||
err := os.WriteFile(path, []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
desc := tm.extractTemplateDescription(path)
|
||||
|
||||
assert.Empty(t, desc)
|
||||
}
|
||||
|
||||
func TestExtractTemplateDescription_Bad_FileNotFound(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
desc := tm.extractTemplateDescription("/nonexistent/file.yml")
|
||||
|
||||
assert.Empty(t, desc)
|
||||
}
|
||||
|
||||
func TestVariablePatternEdgeCases_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
content string
|
||||
vars map[string]string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "underscore in name",
|
||||
content: "${MY_VAR:-default}",
|
||||
vars: map[string]string{"MY_VAR": "value"},
|
||||
expected: "value",
|
||||
},
|
||||
{
|
||||
name: "numbers in name",
|
||||
content: "${VAR123:-default}",
|
||||
vars: map[string]string{},
|
||||
expected: "default",
|
||||
},
|
||||
{
|
||||
name: "default with special chars",
|
||||
content: "${URL:-http://localhost:8080}",
|
||||
vars: map[string]string{},
|
||||
expected: "http://localhost:8080",
|
||||
},
|
||||
{
|
||||
name: "default with path",
|
||||
content: "${PATH:-/usr/local/bin}",
|
||||
vars: map[string]string{},
|
||||
expected: "/usr/local/bin",
|
||||
},
|
||||
{
|
||||
name: "adjacent variables",
|
||||
content: "${A:-a}${B:-b}${C:-c}",
|
||||
vars: map[string]string{"B": "X"},
|
||||
expected: "aXc",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := ApplyVariables(tt.content, tt.vars)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListTemplates_Good_WithUserTemplates(t *testing.T) {
|
||||
// Create a workspace directory with user templates
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core", "linuxkit")
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a user template
|
||||
templateContent := `# Custom user template
|
||||
kernel:
|
||||
image: linuxkit/kernel:6.6
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(coreDir, "user-custom.yml"), []byte(templateContent), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
tm := NewTemplateManager(io.Local).WithWorkingDir(tmpDir)
|
||||
templates := tm.ListTemplates()
|
||||
|
||||
// Should have at least the builtin templates plus the user template
|
||||
assert.GreaterOrEqual(t, len(templates), 3)
|
||||
|
||||
// Check that user template is included
|
||||
found := false
|
||||
for _, tmpl := range templates {
|
||||
if tmpl.Name == "user-custom" {
|
||||
found = true
|
||||
assert.Equal(t, "Custom user template", tmpl.Description)
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "user-custom template should exist")
|
||||
}
|
||||
|
||||
func TestGetTemplate_Good_UserTemplate(t *testing.T) {
|
||||
// Create a workspace directory with user templates
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core", "linuxkit")
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a user template
|
||||
templateContent := `# My user template
|
||||
kernel:
|
||||
image: linuxkit/kernel:6.6
|
||||
services:
|
||||
- name: test
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(coreDir, "my-user-template.yml"), []byte(templateContent), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
tm := NewTemplateManager(io.Local).WithWorkingDir(tmpDir)
|
||||
content, err := tm.GetTemplate("my-user-template")
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, content, "kernel:")
|
||||
assert.Contains(t, content, "My user template")
|
||||
}
|
||||
|
||||
func TestGetTemplate_Good_UserTemplate_YamlExtension(t *testing.T) {
|
||||
// Create a workspace directory with user templates
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core", "linuxkit")
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a user template with .yaml extension
|
||||
templateContent := `# My yaml template
|
||||
kernel:
|
||||
image: linuxkit/kernel:6.6
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(coreDir, "my-yaml-template.yaml"), []byte(templateContent), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
tm := NewTemplateManager(io.Local).WithWorkingDir(tmpDir)
|
||||
content, err := tm.GetTemplate("my-yaml-template")
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, content, "kernel:")
|
||||
assert.Contains(t, content, "My yaml template")
|
||||
}
|
||||
|
||||
func TestScanUserTemplates_Good_SkipsBuiltinNames(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create a template with a builtin name (should be skipped)
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "core-dev.yml"), []byte("# Duplicate\nkernel:"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a unique template
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "unique.yml"), []byte("# Unique\nkernel:"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
templates := tm.scanUserTemplates(tmpDir)
|
||||
|
||||
// Should only have the unique template, not the builtin name
|
||||
assert.Len(t, templates, 1)
|
||||
assert.Equal(t, "unique", templates[0].Name)
|
||||
}
|
||||
|
||||
func TestScanUserTemplates_Good_SkipsDirectories(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create a subdirectory (should be skipped)
|
||||
err := os.MkdirAll(filepath.Join(tmpDir, "subdir"), 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a valid template
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "valid.yml"), []byte("# Valid\nkernel:"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
templates := tm.scanUserTemplates(tmpDir)
|
||||
|
||||
assert.Len(t, templates, 1)
|
||||
assert.Equal(t, "valid", templates[0].Name)
|
||||
}
|
||||
|
||||
func TestScanUserTemplates_Good_YamlExtension(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create templates with both extensions
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "template1.yml"), []byte("# Template 1\nkernel:"), 0644)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "template2.yaml"), []byte("# Template 2\nkernel:"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
templates := tm.scanUserTemplates(tmpDir)
|
||||
|
||||
assert.Len(t, templates, 2)
|
||||
|
||||
names := make(map[string]bool)
|
||||
for _, tmpl := range templates {
|
||||
names[tmpl.Name] = true
|
||||
}
|
||||
assert.True(t, names["template1"])
|
||||
assert.True(t, names["template2"])
|
||||
}
|
||||
|
||||
func TestExtractTemplateDescription_Good_EmptyComment(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
path := filepath.Join(tmpDir, "test.yml")
|
||||
|
||||
// First comment is empty, second has content
|
||||
content := `#
|
||||
# Actual description here
|
||||
kernel:
|
||||
image: test
|
||||
`
|
||||
err := os.WriteFile(path, []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
desc := tm.extractTemplateDescription(path)
|
||||
|
||||
assert.Equal(t, "Actual description here", desc)
|
||||
}
|
||||
|
||||
func TestExtractTemplateDescription_Good_MultipleEmptyComments(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
path := filepath.Join(tmpDir, "test.yml")
|
||||
|
||||
// Multiple empty comments before actual content
|
||||
content := `#
|
||||
#
|
||||
#
|
||||
# Real description
|
||||
kernel:
|
||||
image: test
|
||||
`
|
||||
err := os.WriteFile(path, []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
desc := tm.extractTemplateDescription(path)
|
||||
|
||||
assert.Equal(t, "Real description", desc)
|
||||
}
|
||||
|
||||
func TestGetUserTemplatesDir_Good_NoDirectory(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local).WithWorkingDir("/tmp/nonexistent-wd").WithHomeDir("/tmp/nonexistent-home")
|
||||
dir := tm.getUserTemplatesDir()
|
||||
|
||||
assert.Empty(t, dir)
|
||||
}
|
||||
|
||||
func TestScanUserTemplates_Good_DefaultDescription(t *testing.T) {
|
||||
tm := NewTemplateManager(io.Local)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create a template without comments
|
||||
content := `kernel:
|
||||
image: test
|
||||
`
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "nocomment.yml"), []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
templates := tm.scanUserTemplates(tmpDir)
|
||||
|
||||
assert.Len(t, templates, 1)
|
||||
assert.Equal(t, "User-defined template", templates[0].Description)
|
||||
}
|
||||
219
deploy/coolify/client.go
Normal file
219
deploy/coolify/client.go
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
package coolify
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/deploy/python"
|
||||
)
|
||||
|
||||
// Client wraps the Python CoolifyClient for Go usage.
|
||||
type Client struct {
|
||||
baseURL string
|
||||
apiToken string
|
||||
timeout int
|
||||
verifySSL bool
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Config holds Coolify client configuration.
|
||||
type Config struct {
|
||||
BaseURL string
|
||||
APIToken string
|
||||
Timeout int
|
||||
VerifySSL bool
|
||||
}
|
||||
|
||||
// DefaultConfig returns default configuration from environment.
|
||||
func DefaultConfig() Config {
|
||||
return Config{
|
||||
BaseURL: os.Getenv("COOLIFY_URL"),
|
||||
APIToken: os.Getenv("COOLIFY_TOKEN"),
|
||||
Timeout: 30,
|
||||
VerifySSL: true,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClient creates a new Coolify client.
|
||||
func NewClient(cfg Config) (*Client, error) {
|
||||
if cfg.BaseURL == "" {
|
||||
return nil, fmt.Errorf("COOLIFY_URL not set")
|
||||
}
|
||||
if cfg.APIToken == "" {
|
||||
return nil, fmt.Errorf("COOLIFY_TOKEN not set")
|
||||
}
|
||||
|
||||
// Initialize Python runtime
|
||||
if err := python.Init(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize Python: %w", err)
|
||||
}
|
||||
|
||||
return &Client{
|
||||
baseURL: cfg.BaseURL,
|
||||
apiToken: cfg.APIToken,
|
||||
timeout: cfg.Timeout,
|
||||
verifySSL: cfg.VerifySSL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Call invokes a Coolify API operation by operationId.
|
||||
func (c *Client) Call(ctx context.Context, operationID string, params map[string]any) (map[string]any, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if params == nil {
|
||||
params = map[string]any{}
|
||||
}
|
||||
|
||||
// Generate and run Python script
|
||||
script, err := python.CoolifyScript(c.baseURL, c.apiToken, operationID, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate script: %w", err)
|
||||
}
|
||||
output, err := python.RunScript(ctx, script)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("API call %s failed: %w", operationID, err)
|
||||
}
|
||||
|
||||
// Parse JSON result
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal([]byte(output), &result); err != nil {
|
||||
// Try parsing as array
|
||||
var arrResult []any
|
||||
if err2 := json.Unmarshal([]byte(output), &arrResult); err2 == nil {
|
||||
return map[string]any{"result": arrResult}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to parse response: %w (output: %s)", err, output)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListServers returns all servers.
|
||||
func (c *Client) ListServers(ctx context.Context) ([]map[string]any, error) {
|
||||
result, err := c.Call(ctx, "list-servers", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractArray(result)
|
||||
}
|
||||
|
||||
// GetServer returns a server by UUID.
|
||||
func (c *Client) GetServer(ctx context.Context, uuid string) (map[string]any, error) {
|
||||
return c.Call(ctx, "get-server-by-uuid", map[string]any{"uuid": uuid})
|
||||
}
|
||||
|
||||
// ValidateServer validates a server by UUID.
|
||||
func (c *Client) ValidateServer(ctx context.Context, uuid string) (map[string]any, error) {
|
||||
return c.Call(ctx, "validate-server-by-uuid", map[string]any{"uuid": uuid})
|
||||
}
|
||||
|
||||
// ListProjects returns all projects.
|
||||
func (c *Client) ListProjects(ctx context.Context) ([]map[string]any, error) {
|
||||
result, err := c.Call(ctx, "list-projects", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractArray(result)
|
||||
}
|
||||
|
||||
// GetProject returns a project by UUID.
|
||||
func (c *Client) GetProject(ctx context.Context, uuid string) (map[string]any, error) {
|
||||
return c.Call(ctx, "get-project-by-uuid", map[string]any{"uuid": uuid})
|
||||
}
|
||||
|
||||
// CreateProject creates a new project.
|
||||
func (c *Client) CreateProject(ctx context.Context, name, description string) (map[string]any, error) {
|
||||
return c.Call(ctx, "create-project", map[string]any{
|
||||
"name": name,
|
||||
"description": description,
|
||||
})
|
||||
}
|
||||
|
||||
// ListApplications returns all applications.
|
||||
func (c *Client) ListApplications(ctx context.Context) ([]map[string]any, error) {
|
||||
result, err := c.Call(ctx, "list-applications", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractArray(result)
|
||||
}
|
||||
|
||||
// GetApplication returns an application by UUID.
|
||||
func (c *Client) GetApplication(ctx context.Context, uuid string) (map[string]any, error) {
|
||||
return c.Call(ctx, "get-application-by-uuid", map[string]any{"uuid": uuid})
|
||||
}
|
||||
|
||||
// DeployApplication triggers deployment of an application.
|
||||
func (c *Client) DeployApplication(ctx context.Context, uuid string) (map[string]any, error) {
|
||||
return c.Call(ctx, "deploy-by-tag-or-uuid", map[string]any{"uuid": uuid})
|
||||
}
|
||||
|
||||
// ListDatabases returns all databases.
|
||||
func (c *Client) ListDatabases(ctx context.Context) ([]map[string]any, error) {
|
||||
result, err := c.Call(ctx, "list-databases", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractArray(result)
|
||||
}
|
||||
|
||||
// GetDatabase returns a database by UUID.
|
||||
func (c *Client) GetDatabase(ctx context.Context, uuid string) (map[string]any, error) {
|
||||
return c.Call(ctx, "get-database-by-uuid", map[string]any{"uuid": uuid})
|
||||
}
|
||||
|
||||
// ListServices returns all services.
|
||||
func (c *Client) ListServices(ctx context.Context) ([]map[string]any, error) {
|
||||
result, err := c.Call(ctx, "list-services", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractArray(result)
|
||||
}
|
||||
|
||||
// GetService returns a service by UUID.
|
||||
func (c *Client) GetService(ctx context.Context, uuid string) (map[string]any, error) {
|
||||
return c.Call(ctx, "get-service-by-uuid", map[string]any{"uuid": uuid})
|
||||
}
|
||||
|
||||
// ListEnvironments returns environments for a project.
|
||||
func (c *Client) ListEnvironments(ctx context.Context, projectUUID string) ([]map[string]any, error) {
|
||||
result, err := c.Call(ctx, "get-environments", map[string]any{"project_uuid": projectUUID})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractArray(result)
|
||||
}
|
||||
|
||||
// GetTeam returns the current team.
|
||||
func (c *Client) GetTeam(ctx context.Context) (map[string]any, error) {
|
||||
return c.Call(ctx, "get-current-team", nil)
|
||||
}
|
||||
|
||||
// GetTeamMembers returns members of the current team.
|
||||
func (c *Client) GetTeamMembers(ctx context.Context) ([]map[string]any, error) {
|
||||
result, err := c.Call(ctx, "get-current-team-members", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractArray(result)
|
||||
}
|
||||
|
||||
// extractArray extracts an array from result["result"] or returns empty.
|
||||
func extractArray(result map[string]any) ([]map[string]any, error) {
|
||||
if arr, ok := result["result"].([]any); ok {
|
||||
items := make([]map[string]any, 0, len(arr))
|
||||
for _, item := range arr {
|
||||
if m, ok := item.(map[string]any); ok {
|
||||
items = append(items, m)
|
||||
}
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
147
deploy/python/python.go
Normal file
147
deploy/python/python.go
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
package python
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/framework/core"
|
||||
"github.com/kluctl/go-embed-python/python"
|
||||
)
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
ep *python.EmbeddedPython
|
||||
initErr error
|
||||
)
|
||||
|
||||
// Init initializes the embedded Python runtime.
|
||||
func Init() error {
|
||||
once.Do(func() {
|
||||
ep, initErr = python.NewEmbeddedPython("core-deploy")
|
||||
})
|
||||
return initErr
|
||||
}
|
||||
|
||||
// GetPython returns the embedded Python instance.
|
||||
func GetPython() *python.EmbeddedPython {
|
||||
return ep
|
||||
}
|
||||
|
||||
// RunScript runs a Python script with the given code and returns stdout.
|
||||
func RunScript(ctx context.Context, code string, args ...string) (string, error) {
|
||||
if err := Init(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Write code to temp file
|
||||
tmpFile, err := os.CreateTemp("", "core-*.py")
|
||||
if err != nil {
|
||||
return "", core.E("python", "create temp file", err)
|
||||
}
|
||||
defer func() { _ = os.Remove(tmpFile.Name()) }()
|
||||
|
||||
if _, err := tmpFile.WriteString(code); err != nil {
|
||||
_ = tmpFile.Close()
|
||||
return "", core.E("python", "write script", err)
|
||||
}
|
||||
_ = tmpFile.Close()
|
||||
|
||||
// Build args: script path + any additional args
|
||||
cmdArgs := append([]string{tmpFile.Name()}, args...)
|
||||
|
||||
// Get the command
|
||||
cmd, err := ep.PythonCmd(cmdArgs...)
|
||||
if err != nil {
|
||||
return "", core.E("python", "create command", err)
|
||||
}
|
||||
|
||||
// Run with context
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// Try to get stderr for better error message
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return "", core.E("python", "run script", fmt.Errorf("%w: %s", err, string(exitErr.Stderr)))
|
||||
}
|
||||
return "", core.E("python", "run script", err)
|
||||
}
|
||||
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// RunModule runs a Python module (python -m module_name).
|
||||
func RunModule(ctx context.Context, module string, args ...string) (string, error) {
|
||||
if err := Init(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cmdArgs := append([]string{"-m", module}, args...)
|
||||
cmd, err := ep.PythonCmd(cmdArgs...)
|
||||
if err != nil {
|
||||
return "", core.E("python", "create command", err)
|
||||
}
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", core.E("python", fmt.Sprintf("run module %s", module), err)
|
||||
}
|
||||
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// DevOpsPath returns the path to the DevOps repo.
|
||||
func DevOpsPath() (string, error) {
|
||||
if path := os.Getenv("DEVOPS_PATH"); path != "" {
|
||||
return path, nil
|
||||
}
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", core.E("python", "get user home", err)
|
||||
}
|
||||
return filepath.Join(home, "Code", "DevOps"), nil
|
||||
}
|
||||
|
||||
// CoolifyModulePath returns the path to the Coolify module_utils.
|
||||
func CoolifyModulePath() (string, error) {
|
||||
path, err := DevOpsPath()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(path, "playbooks", "roles", "coolify", "module_utils"), nil
|
||||
}
|
||||
|
||||
// CoolifyScript generates Python code to call the Coolify API.
|
||||
func CoolifyScript(baseURL, apiToken, operation string, params map[string]any) (string, error) {
|
||||
paramsJSON, err := json.Marshal(params)
|
||||
if err != nil {
|
||||
return "", core.E("python", "marshal params", err)
|
||||
}
|
||||
|
||||
modulePath, err := CoolifyModulePath()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`
|
||||
import sys
|
||||
import json
|
||||
sys.path.insert(0, %q)
|
||||
|
||||
from swagger.coolify_api import CoolifyClient
|
||||
|
||||
client = CoolifyClient(
|
||||
base_url=%q,
|
||||
api_token=%q,
|
||||
timeout=30,
|
||||
verify_ssl=True,
|
||||
)
|
||||
|
||||
params = json.loads(%q)
|
||||
result = client._call(%q, params, check_response=False)
|
||||
print(json.dumps(result))
|
||||
`, modulePath, baseURL, apiToken, string(paramsJSON), operation), nil
|
||||
}
|
||||
560
devkit/devkit.go
Normal file
560
devkit/devkit.go
Normal file
|
|
@ -0,0 +1,560 @@
|
|||
// Package devkit provides a developer toolkit for common automation commands.
|
||||
// Designed by Gemini 3 Pro (Hypnos) + Claude Opus (Charon), signed LEK-1 | lthn.ai | EUPL-1.2
|
||||
package devkit
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// --- Code Quality ---
|
||||
|
||||
// Finding represents a single issue found by a linting tool.
|
||||
type Finding struct {
|
||||
File string
|
||||
Line int
|
||||
Message string
|
||||
Tool string
|
||||
}
|
||||
|
||||
// CoverageReport holds the test coverage percentage for a package.
|
||||
type CoverageReport struct {
|
||||
Package string
|
||||
Percentage float64
|
||||
}
|
||||
|
||||
// RaceCondition represents a data race detected by the Go race detector.
|
||||
type RaceCondition struct {
|
||||
File string
|
||||
Line int
|
||||
Desc string
|
||||
}
|
||||
|
||||
// TODO represents a tracked code comment like TODO, FIXME, or HACK.
|
||||
type TODO struct {
|
||||
File string
|
||||
Line int
|
||||
Type string
|
||||
Message string
|
||||
}
|
||||
|
||||
// --- Security ---
|
||||
|
||||
// Vulnerability represents a dependency vulnerability.
|
||||
type Vulnerability struct {
|
||||
ID string
|
||||
Package string
|
||||
Version string
|
||||
Description string
|
||||
}
|
||||
|
||||
// SecretLeak represents a potential secret found in the codebase.
|
||||
type SecretLeak struct {
|
||||
File string
|
||||
Line int
|
||||
RuleID string
|
||||
Match string
|
||||
}
|
||||
|
||||
// PermIssue represents a file permission issue.
|
||||
type PermIssue struct {
|
||||
File string
|
||||
Permission string
|
||||
Issue string
|
||||
}
|
||||
|
||||
// --- Git Operations ---
|
||||
|
||||
// DiffSummary provides a summary of changes.
|
||||
type DiffSummary struct {
|
||||
FilesChanged int
|
||||
Insertions int
|
||||
Deletions int
|
||||
}
|
||||
|
||||
// Commit represents a single git commit.
|
||||
type Commit struct {
|
||||
Hash string
|
||||
Author string
|
||||
Date time.Time
|
||||
Message string
|
||||
}
|
||||
|
||||
// --- Build & Dependencies ---
|
||||
|
||||
// BuildResult holds the outcome of a single build target.
|
||||
type BuildResult struct {
|
||||
Target string
|
||||
Path string
|
||||
Error error
|
||||
}
|
||||
|
||||
// Graph represents a dependency graph.
|
||||
type Graph struct {
|
||||
Nodes []string
|
||||
Edges map[string][]string
|
||||
}
|
||||
|
||||
// --- Metrics ---
|
||||
|
||||
// ComplexFunc represents a function with its cyclomatic complexity score.
|
||||
type ComplexFunc struct {
|
||||
Package string
|
||||
FuncName string
|
||||
File string
|
||||
Line int
|
||||
Score int
|
||||
}
|
||||
|
||||
// Toolkit wraps common dev automation commands into structured Go APIs.
|
||||
type Toolkit struct {
|
||||
Dir string // Working directory for commands
|
||||
}
|
||||
|
||||
// New creates a Toolkit rooted at the given directory.
|
||||
func New(dir string) *Toolkit {
|
||||
return &Toolkit{Dir: dir}
|
||||
}
|
||||
|
||||
// Run executes a command and captures stdout, stderr, and exit code.
|
||||
func (t *Toolkit) Run(name string, args ...string) (stdout, stderr string, exitCode int, err error) {
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = t.Dir
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
cmd.Stdout = &stdoutBuf
|
||||
cmd.Stderr = &stderrBuf
|
||||
|
||||
err = cmd.Run()
|
||||
stdout = stdoutBuf.String()
|
||||
stderr = stderrBuf.String()
|
||||
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
exitCode = exitErr.ExitCode()
|
||||
} else {
|
||||
exitCode = -1
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FindTODOs greps for TODO/FIXME/HACK comments within a directory.
|
||||
func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) {
|
||||
pattern := `\b(TODO|FIXME|HACK)\b(\(.*\))?:`
|
||||
stdout, stderr, exitCode, err := t.Run("git", "grep", "--line-number", "-E", pattern, "--", dir)
|
||||
|
||||
if exitCode == 1 && stdout == "" {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil && exitCode != 1 {
|
||||
return nil, fmt.Errorf("git grep failed (exit %d): %s\n%s", exitCode, err, stderr)
|
||||
}
|
||||
|
||||
var todos []TODO
|
||||
re := regexp.MustCompile(pattern)
|
||||
|
||||
for _, line := range strings.Split(strings.TrimSpace(stdout), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(line, ":", 3)
|
||||
if len(parts) < 3 {
|
||||
continue
|
||||
}
|
||||
lineNum, _ := strconv.Atoi(parts[1])
|
||||
match := re.FindStringSubmatch(parts[2])
|
||||
todoType := ""
|
||||
if len(match) > 1 {
|
||||
todoType = match[1]
|
||||
}
|
||||
msg := strings.TrimSpace(re.Split(parts[2], 2)[1])
|
||||
|
||||
todos = append(todos, TODO{
|
||||
File: parts[0],
|
||||
Line: lineNum,
|
||||
Type: todoType,
|
||||
Message: msg,
|
||||
})
|
||||
}
|
||||
return todos, nil
|
||||
}
|
||||
|
||||
// AuditDeps runs govulncheck to find dependency vulnerabilities.
|
||||
func (t *Toolkit) AuditDeps() ([]Vulnerability, error) {
|
||||
stdout, stderr, exitCode, err := t.Run("govulncheck", "./...")
|
||||
if err != nil && exitCode != 0 && !strings.Contains(stdout, "Vulnerability") {
|
||||
return nil, fmt.Errorf("govulncheck failed (exit %d): %s\n%s", exitCode, err, stderr)
|
||||
}
|
||||
|
||||
var vulns []Vulnerability
|
||||
scanner := bufio.NewScanner(strings.NewReader(stdout))
|
||||
var cur Vulnerability
|
||||
inBlock := false
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.HasPrefix(line, "Vulnerability #") {
|
||||
if cur.ID != "" {
|
||||
vulns = append(vulns, cur)
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
cur = Vulnerability{}
|
||||
if len(fields) > 1 {
|
||||
cur.ID = fields[1]
|
||||
}
|
||||
inBlock = true
|
||||
} else if inBlock {
|
||||
switch {
|
||||
case strings.Contains(line, "Package:"):
|
||||
cur.Package = strings.TrimSpace(strings.SplitN(line, ":", 2)[1])
|
||||
case strings.Contains(line, "Found in version:"):
|
||||
cur.Version = strings.TrimSpace(strings.SplitN(line, ":", 2)[1])
|
||||
case line == "":
|
||||
if cur.ID != "" {
|
||||
vulns = append(vulns, cur)
|
||||
cur = Vulnerability{}
|
||||
}
|
||||
inBlock = false
|
||||
default:
|
||||
if !strings.HasPrefix(line, " ") && cur.Description == "" {
|
||||
cur.Description = strings.TrimSpace(line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if cur.ID != "" {
|
||||
vulns = append(vulns, cur)
|
||||
}
|
||||
return vulns, nil
|
||||
}
|
||||
|
||||
// DiffStat returns a summary of uncommitted changes.
|
||||
func (t *Toolkit) DiffStat() (DiffSummary, error) {
|
||||
stdout, stderr, exitCode, err := t.Run("git", "diff", "--stat")
|
||||
if err != nil && exitCode != 0 {
|
||||
return DiffSummary{}, fmt.Errorf("git diff failed (exit %d): %s\n%s", exitCode, err, stderr)
|
||||
}
|
||||
|
||||
var s DiffSummary
|
||||
lines := strings.Split(strings.TrimSpace(stdout), "\n")
|
||||
if len(lines) == 0 || lines[0] == "" {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
last := lines[len(lines)-1]
|
||||
for _, part := range strings.Split(last, ",") {
|
||||
part = strings.TrimSpace(part)
|
||||
fields := strings.Fields(part)
|
||||
if len(fields) < 2 {
|
||||
continue
|
||||
}
|
||||
val, _ := strconv.Atoi(fields[0])
|
||||
switch {
|
||||
case strings.Contains(part, "file"):
|
||||
s.FilesChanged = val
|
||||
case strings.Contains(part, "insertion"):
|
||||
s.Insertions = val
|
||||
case strings.Contains(part, "deletion"):
|
||||
s.Deletions = val
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// UncommittedFiles returns paths of files with uncommitted changes.
|
||||
func (t *Toolkit) UncommittedFiles() ([]string, error) {
|
||||
stdout, stderr, exitCode, err := t.Run("git", "status", "--porcelain")
|
||||
if err != nil && exitCode != 0 {
|
||||
return nil, fmt.Errorf("git status failed: %s\n%s", err, stderr)
|
||||
}
|
||||
var files []string
|
||||
for _, line := range strings.Split(strings.TrimSpace(stdout), "\n") {
|
||||
if len(line) > 3 {
|
||||
files = append(files, strings.TrimSpace(line[3:]))
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// Lint runs go vet on the given package pattern.
|
||||
func (t *Toolkit) Lint(pkg string) ([]Finding, error) {
|
||||
_, stderr, exitCode, err := t.Run("go", "vet", pkg)
|
||||
if exitCode == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil && exitCode != 2 {
|
||||
return nil, fmt.Errorf("go vet failed: %w", err)
|
||||
}
|
||||
|
||||
var findings []Finding
|
||||
for _, line := range strings.Split(strings.TrimSpace(stderr), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(line, ":", 4)
|
||||
if len(parts) < 4 {
|
||||
continue
|
||||
}
|
||||
lineNum, _ := strconv.Atoi(parts[1])
|
||||
findings = append(findings, Finding{
|
||||
File: parts[0],
|
||||
Line: lineNum,
|
||||
Message: strings.TrimSpace(parts[3]),
|
||||
Tool: "go vet",
|
||||
})
|
||||
}
|
||||
return findings, nil
|
||||
}
|
||||
|
||||
// ScanSecrets runs gitleaks to find potential secret leaks.
|
||||
func (t *Toolkit) ScanSecrets(dir string) ([]SecretLeak, error) {
|
||||
stdout, _, exitCode, err := t.Run("gitleaks", "detect", "--source", dir, "--report-format", "csv", "--no-git")
|
||||
if exitCode == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil && exitCode != 1 {
|
||||
return nil, fmt.Errorf("gitleaks failed: %w", err)
|
||||
}
|
||||
|
||||
var leaks []SecretLeak
|
||||
for _, line := range strings.Split(strings.TrimSpace(stdout), "\n") {
|
||||
if line == "" || strings.HasPrefix(line, "RuleID") {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(line, ",", 4)
|
||||
if len(parts) < 4 {
|
||||
continue
|
||||
}
|
||||
lineNum, _ := strconv.Atoi(parts[2])
|
||||
leaks = append(leaks, SecretLeak{
|
||||
RuleID: parts[0],
|
||||
File: parts[1],
|
||||
Line: lineNum,
|
||||
Match: parts[3],
|
||||
})
|
||||
}
|
||||
return leaks, nil
|
||||
}
|
||||
|
||||
// ModTidy runs go mod tidy.
|
||||
func (t *Toolkit) ModTidy() error {
|
||||
_, stderr, exitCode, err := t.Run("go", "mod", "tidy")
|
||||
if err != nil && exitCode != 0 {
|
||||
return fmt.Errorf("go mod tidy failed: %s", stderr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build compiles the given targets.
|
||||
func (t *Toolkit) Build(targets ...string) ([]BuildResult, error) {
|
||||
var results []BuildResult
|
||||
for _, target := range targets {
|
||||
_, stderr, _, err := t.Run("go", "build", "-o", "/dev/null", target)
|
||||
r := BuildResult{Target: target}
|
||||
if err != nil {
|
||||
r.Error = fmt.Errorf("%s", strings.TrimSpace(stderr))
|
||||
}
|
||||
results = append(results, r)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// TestCount returns the number of test functions in a package.
|
||||
func (t *Toolkit) TestCount(pkg string) (int, error) {
|
||||
stdout, stderr, exitCode, err := t.Run("go", "test", "-list", ".*", pkg)
|
||||
if err != nil && exitCode != 0 {
|
||||
return 0, fmt.Errorf("go test -list failed: %s\n%s", err, stderr)
|
||||
}
|
||||
count := 0
|
||||
for _, line := range strings.Split(strings.TrimSpace(stdout), "\n") {
|
||||
if strings.HasPrefix(line, "Test") || strings.HasPrefix(line, "Benchmark") {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// Coverage runs go test -cover and parses per-package coverage percentages.
|
||||
func (t *Toolkit) Coverage(pkg string) ([]CoverageReport, error) {
|
||||
if pkg == "" {
|
||||
pkg = "./..."
|
||||
}
|
||||
stdout, stderr, exitCode, err := t.Run("go", "test", "-cover", pkg)
|
||||
if err != nil && exitCode != 0 && !strings.Contains(stdout, "coverage:") {
|
||||
return nil, fmt.Errorf("go test -cover failed (exit %d): %s\n%s", exitCode, err, stderr)
|
||||
}
|
||||
|
||||
var reports []CoverageReport
|
||||
re := regexp.MustCompile(`ok\s+(\S+)\s+.*coverage:\s+([\d.]+)%`)
|
||||
scanner := bufio.NewScanner(strings.NewReader(stdout))
|
||||
|
||||
for scanner.Scan() {
|
||||
matches := re.FindStringSubmatch(scanner.Text())
|
||||
if len(matches) == 3 {
|
||||
pct, _ := strconv.ParseFloat(matches[2], 64)
|
||||
reports = append(reports, CoverageReport{
|
||||
Package: matches[1],
|
||||
Percentage: pct,
|
||||
})
|
||||
}
|
||||
}
|
||||
return reports, nil
|
||||
}
|
||||
|
||||
// RaceDetect runs go test -race and parses data race warnings.
|
||||
func (t *Toolkit) RaceDetect(pkg string) ([]RaceCondition, error) {
|
||||
if pkg == "" {
|
||||
pkg = "./..."
|
||||
}
|
||||
_, stderr, _, err := t.Run("go", "test", "-race", pkg)
|
||||
if err != nil && !strings.Contains(stderr, "WARNING: DATA RACE") {
|
||||
return nil, fmt.Errorf("go test -race failed: %w", err)
|
||||
}
|
||||
|
||||
var races []RaceCondition
|
||||
lines := strings.Split(stderr, "\n")
|
||||
reFile := regexp.MustCompile(`\s+(.*\.go):(\d+)`)
|
||||
|
||||
for i, line := range lines {
|
||||
if strings.Contains(line, "WARNING: DATA RACE") {
|
||||
rc := RaceCondition{Desc: "Data race detected"}
|
||||
for j := i + 1; j < len(lines) && j < i+15; j++ {
|
||||
if match := reFile.FindStringSubmatch(lines[j]); len(match) == 3 {
|
||||
rc.File = strings.TrimSpace(match[1])
|
||||
rc.Line, _ = strconv.Atoi(match[2])
|
||||
break
|
||||
}
|
||||
}
|
||||
races = append(races, rc)
|
||||
}
|
||||
}
|
||||
return races, nil
|
||||
}
|
||||
|
||||
// Complexity runs gocyclo and returns functions exceeding the threshold.
|
||||
func (t *Toolkit) Complexity(threshold int) ([]ComplexFunc, error) {
|
||||
stdout, stderr, exitCode, err := t.Run("gocyclo", "-over", strconv.Itoa(threshold), ".")
|
||||
if err != nil && exitCode == -1 {
|
||||
return nil, fmt.Errorf("gocyclo not available: %s\n%s", err, stderr)
|
||||
}
|
||||
|
||||
var funcs []ComplexFunc
|
||||
scanner := bufio.NewScanner(strings.NewReader(stdout))
|
||||
|
||||
for scanner.Scan() {
|
||||
fields := strings.Fields(scanner.Text())
|
||||
if len(fields) < 4 {
|
||||
continue
|
||||
}
|
||||
score, _ := strconv.Atoi(fields[0])
|
||||
fileParts := strings.Split(fields[3], ":")
|
||||
line := 0
|
||||
if len(fileParts) > 1 {
|
||||
line, _ = strconv.Atoi(fileParts[1])
|
||||
}
|
||||
|
||||
funcs = append(funcs, ComplexFunc{
|
||||
Score: score,
|
||||
Package: fields[1],
|
||||
FuncName: fields[2],
|
||||
File: fileParts[0],
|
||||
Line: line,
|
||||
})
|
||||
}
|
||||
return funcs, nil
|
||||
}
|
||||
|
||||
// DepGraph runs go mod graph and builds a dependency graph.
|
||||
func (t *Toolkit) DepGraph(pkg string) (*Graph, error) {
|
||||
stdout, stderr, exitCode, err := t.Run("go", "mod", "graph")
|
||||
if err != nil && exitCode != 0 {
|
||||
return nil, fmt.Errorf("go mod graph failed (exit %d): %s\n%s", exitCode, err, stderr)
|
||||
}
|
||||
|
||||
graph := &Graph{Edges: make(map[string][]string)}
|
||||
nodes := make(map[string]struct{})
|
||||
scanner := bufio.NewScanner(strings.NewReader(stdout))
|
||||
|
||||
for scanner.Scan() {
|
||||
parts := strings.Fields(scanner.Text())
|
||||
if len(parts) >= 2 {
|
||||
src, dst := parts[0], parts[1]
|
||||
graph.Edges[src] = append(graph.Edges[src], dst)
|
||||
nodes[src] = struct{}{}
|
||||
nodes[dst] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for node := range nodes {
|
||||
graph.Nodes = append(graph.Nodes, node)
|
||||
}
|
||||
return graph, nil
|
||||
}
|
||||
|
||||
// GitLog returns the last n commits from git history.
|
||||
func (t *Toolkit) GitLog(n int) ([]Commit, error) {
|
||||
stdout, stderr, exitCode, err := t.Run("git", "log", fmt.Sprintf("-n%d", n), "--format=%H|%an|%aI|%s")
|
||||
if err != nil && exitCode != 0 {
|
||||
return nil, fmt.Errorf("git log failed (exit %d): %s\n%s", exitCode, err, stderr)
|
||||
}
|
||||
|
||||
var commits []Commit
|
||||
scanner := bufio.NewScanner(strings.NewReader(stdout))
|
||||
|
||||
for scanner.Scan() {
|
||||
parts := strings.SplitN(scanner.Text(), "|", 4)
|
||||
if len(parts) < 4 {
|
||||
continue
|
||||
}
|
||||
date, _ := time.Parse(time.RFC3339, parts[2])
|
||||
commits = append(commits, Commit{
|
||||
Hash: parts[0],
|
||||
Author: parts[1],
|
||||
Date: date,
|
||||
Message: parts[3],
|
||||
})
|
||||
}
|
||||
return commits, nil
|
||||
}
|
||||
|
||||
// CheckPerms walks a directory and flags files with overly permissive modes.
|
||||
func (t *Toolkit) CheckPerms(dir string) ([]PermIssue, error) {
|
||||
var issues []PermIssue
|
||||
err := filepath.Walk(filepath.Join(t.Dir, dir), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
mode := info.Mode().Perm()
|
||||
if mode&0o002 != 0 {
|
||||
issues = append(issues, PermIssue{
|
||||
File: path,
|
||||
Permission: fmt.Sprintf("%04o", mode),
|
||||
Issue: "World-writable",
|
||||
})
|
||||
} else if mode&0o020 != 0 && mode&0o002 != 0 {
|
||||
issues = append(issues, PermIssue{
|
||||
File: path,
|
||||
Permission: fmt.Sprintf("%04o", mode),
|
||||
Issue: "Group and world-writable",
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk failed: %w", err)
|
||||
}
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
270
devkit/devkit_test.go
Normal file
270
devkit/devkit_test.go
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
// Designed by Gemini 3 Pro (Hypnos) + Claude Opus (Charon), signed LEK-1 | lthn.ai | EUPL-1.2
|
||||
package devkit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// setupMockCmd creates a shell script in a temp dir that echoes predetermined
|
||||
// content, and prepends that dir to PATH so Run() picks it up.
|
||||
func setupMockCmd(t *testing.T, name, content string) {
|
||||
t.Helper()
|
||||
tmpDir := t.TempDir()
|
||||
scriptPath := filepath.Join(tmpDir, name)
|
||||
|
||||
script := fmt.Sprintf("#!/bin/sh\ncat <<'MOCK_EOF'\n%s\nMOCK_EOF\n", content)
|
||||
if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
|
||||
t.Fatalf("failed to write mock command %s: %v", name, err)
|
||||
}
|
||||
|
||||
oldPath := os.Getenv("PATH")
|
||||
t.Setenv("PATH", tmpDir+string(os.PathListSeparator)+oldPath)
|
||||
}
|
||||
|
||||
// setupMockCmdExit creates a mock that echoes to stdout/stderr and exits with a code.
|
||||
func setupMockCmdExit(t *testing.T, name, stdout, stderr string, exitCode int) {
|
||||
t.Helper()
|
||||
tmpDir := t.TempDir()
|
||||
scriptPath := filepath.Join(tmpDir, name)
|
||||
|
||||
script := fmt.Sprintf("#!/bin/sh\ncat <<'MOCK_EOF'\n%s\nMOCK_EOF\ncat <<'MOCK_ERR' >&2\n%s\nMOCK_ERR\nexit %d\n", stdout, stderr, exitCode)
|
||||
if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
|
||||
t.Fatalf("failed to write mock command %s: %v", name, err)
|
||||
}
|
||||
|
||||
oldPath := os.Getenv("PATH")
|
||||
t.Setenv("PATH", tmpDir+string(os.PathListSeparator)+oldPath)
|
||||
}
|
||||
|
||||
func TestCoverage_Good(t *testing.T) {
|
||||
output := `? example.com/skipped [no test files]
|
||||
ok example.com/pkg1 0.5s coverage: 85.0% of statements
|
||||
ok example.com/pkg2 0.2s coverage: 100.0% of statements`
|
||||
|
||||
setupMockCmd(t, "go", output)
|
||||
|
||||
tk := New(t.TempDir())
|
||||
reports, err := tk.Coverage("./...")
|
||||
if err != nil {
|
||||
t.Fatalf("Coverage failed: %v", err)
|
||||
}
|
||||
if len(reports) != 2 {
|
||||
t.Fatalf("expected 2 reports, got %d", len(reports))
|
||||
}
|
||||
if reports[0].Package != "example.com/pkg1" || reports[0].Percentage != 85.0 {
|
||||
t.Errorf("report 0: want pkg1@85%%, got %s@%.1f%%", reports[0].Package, reports[0].Percentage)
|
||||
}
|
||||
if reports[1].Package != "example.com/pkg2" || reports[1].Percentage != 100.0 {
|
||||
t.Errorf("report 1: want pkg2@100%%, got %s@%.1f%%", reports[1].Package, reports[1].Percentage)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoverage_Bad(t *testing.T) {
|
||||
// No coverage lines in output
|
||||
setupMockCmd(t, "go", "FAIL\texample.com/broken [build failed]")
|
||||
|
||||
tk := New(t.TempDir())
|
||||
reports, err := tk.Coverage("./...")
|
||||
if err != nil {
|
||||
t.Fatalf("Coverage should not error on partial output: %v", err)
|
||||
}
|
||||
if len(reports) != 0 {
|
||||
t.Errorf("expected 0 reports from failed build, got %d", len(reports))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitLog_Good(t *testing.T) {
|
||||
now := time.Now().Truncate(time.Second)
|
||||
nowStr := now.Format(time.RFC3339)
|
||||
|
||||
output := fmt.Sprintf("abc123|Alice|%s|Fix the bug\ndef456|Bob|%s|Add feature", nowStr, nowStr)
|
||||
setupMockCmd(t, "git", output)
|
||||
|
||||
tk := New(t.TempDir())
|
||||
commits, err := tk.GitLog(2)
|
||||
if err != nil {
|
||||
t.Fatalf("GitLog failed: %v", err)
|
||||
}
|
||||
if len(commits) != 2 {
|
||||
t.Fatalf("expected 2 commits, got %d", len(commits))
|
||||
}
|
||||
if commits[0].Hash != "abc123" {
|
||||
t.Errorf("hash: want abc123, got %s", commits[0].Hash)
|
||||
}
|
||||
if commits[0].Author != "Alice" {
|
||||
t.Errorf("author: want Alice, got %s", commits[0].Author)
|
||||
}
|
||||
if commits[0].Message != "Fix the bug" {
|
||||
t.Errorf("message: want 'Fix the bug', got %q", commits[0].Message)
|
||||
}
|
||||
if !commits[0].Date.Equal(now) {
|
||||
t.Errorf("date: want %v, got %v", now, commits[0].Date)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitLog_Bad(t *testing.T) {
|
||||
// Malformed lines should be skipped
|
||||
setupMockCmd(t, "git", "incomplete|line\nabc|Bob|2025-01-01T00:00:00Z|Good commit")
|
||||
|
||||
tk := New(t.TempDir())
|
||||
commits, err := tk.GitLog(5)
|
||||
if err != nil {
|
||||
t.Fatalf("GitLog failed: %v", err)
|
||||
}
|
||||
if len(commits) != 1 {
|
||||
t.Errorf("expected 1 valid commit (skip malformed), got %d", len(commits))
|
||||
}
|
||||
}
|
||||
|
||||
func TestComplexity_Good(t *testing.T) {
|
||||
output := "15 main ComplexFunc file.go:10:1\n20 pkg VeryComplex other.go:50:1"
|
||||
setupMockCmd(t, "gocyclo", output)
|
||||
|
||||
tk := New(t.TempDir())
|
||||
funcs, err := tk.Complexity(10)
|
||||
if err != nil {
|
||||
t.Fatalf("Complexity failed: %v", err)
|
||||
}
|
||||
if len(funcs) != 2 {
|
||||
t.Fatalf("expected 2 funcs, got %d", len(funcs))
|
||||
}
|
||||
if funcs[0].Score != 15 || funcs[0].FuncName != "ComplexFunc" || funcs[0].File != "file.go" || funcs[0].Line != 10 {
|
||||
t.Errorf("func 0: unexpected %+v", funcs[0])
|
||||
}
|
||||
if funcs[1].Score != 20 || funcs[1].Package != "pkg" {
|
||||
t.Errorf("func 1: unexpected %+v", funcs[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestComplexity_Bad(t *testing.T) {
|
||||
// No functions above threshold = empty output
|
||||
setupMockCmd(t, "gocyclo", "")
|
||||
|
||||
tk := New(t.TempDir())
|
||||
funcs, err := tk.Complexity(50)
|
||||
if err != nil {
|
||||
t.Fatalf("Complexity should not error on empty output: %v", err)
|
||||
}
|
||||
if len(funcs) != 0 {
|
||||
t.Errorf("expected 0 funcs, got %d", len(funcs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDepGraph_Good(t *testing.T) {
|
||||
output := "modA@v1 modB@v2\nmodA@v1 modC@v3\nmodB@v2 modD@v1"
|
||||
setupMockCmd(t, "go", output)
|
||||
|
||||
tk := New(t.TempDir())
|
||||
graph, err := tk.DepGraph("./...")
|
||||
if err != nil {
|
||||
t.Fatalf("DepGraph failed: %v", err)
|
||||
}
|
||||
if len(graph.Nodes) != 4 {
|
||||
t.Errorf("expected 4 nodes, got %d: %v", len(graph.Nodes), graph.Nodes)
|
||||
}
|
||||
edgesA := graph.Edges["modA@v1"]
|
||||
if len(edgesA) != 2 {
|
||||
t.Errorf("expected 2 edges from modA@v1, got %d", len(edgesA))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRaceDetect_Good(t *testing.T) {
|
||||
// No races = clean run
|
||||
setupMockCmd(t, "go", "ok\texample.com/safe\t0.1s")
|
||||
|
||||
tk := New(t.TempDir())
|
||||
races, err := tk.RaceDetect("./...")
|
||||
if err != nil {
|
||||
t.Fatalf("RaceDetect failed on clean run: %v", err)
|
||||
}
|
||||
if len(races) != 0 {
|
||||
t.Errorf("expected 0 races, got %d", len(races))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRaceDetect_Bad(t *testing.T) {
|
||||
stderrOut := `WARNING: DATA RACE
|
||||
Read at 0x00c000123456 by goroutine 7:
|
||||
/home/user/project/main.go:42
|
||||
Previous write at 0x00c000123456 by goroutine 6:
|
||||
/home/user/project/main.go:38`
|
||||
|
||||
setupMockCmdExit(t, "go", "", stderrOut, 1)
|
||||
|
||||
tk := New(t.TempDir())
|
||||
races, err := tk.RaceDetect("./...")
|
||||
if err != nil {
|
||||
t.Fatalf("RaceDetect should parse races, not error: %v", err)
|
||||
}
|
||||
if len(races) != 1 {
|
||||
t.Fatalf("expected 1 race, got %d", len(races))
|
||||
}
|
||||
if races[0].File != "/home/user/project/main.go" || races[0].Line != 42 {
|
||||
t.Errorf("race: unexpected %+v", races[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiffStat_Good(t *testing.T) {
|
||||
output := ` file1.go | 10 +++++++---
|
||||
file2.go | 5 +++++
|
||||
2 files changed, 12 insertions(+), 3 deletions(-)`
|
||||
setupMockCmd(t, "git", output)
|
||||
|
||||
tk := New(t.TempDir())
|
||||
s, err := tk.DiffStat()
|
||||
if err != nil {
|
||||
t.Fatalf("DiffStat failed: %v", err)
|
||||
}
|
||||
if s.FilesChanged != 2 {
|
||||
t.Errorf("files: want 2, got %d", s.FilesChanged)
|
||||
}
|
||||
if s.Insertions != 12 {
|
||||
t.Errorf("insertions: want 12, got %d", s.Insertions)
|
||||
}
|
||||
if s.Deletions != 3 {
|
||||
t.Errorf("deletions: want 3, got %d", s.Deletions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckPerms_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create a world-writable file
|
||||
badFile := filepath.Join(dir, "bad.txt")
|
||||
if err := os.WriteFile(badFile, []byte("test"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Chmod(badFile, 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Create a safe file
|
||||
goodFile := filepath.Join(dir, "good.txt")
|
||||
if err := os.WriteFile(goodFile, []byte("test"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tk := New("/")
|
||||
issues, err := tk.CheckPerms(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("CheckPerms failed: %v", err)
|
||||
}
|
||||
if len(issues) != 1 {
|
||||
t.Fatalf("expected 1 issue (world-writable), got %d", len(issues))
|
||||
}
|
||||
if issues[0].Issue != "World-writable" {
|
||||
t.Errorf("issue: want 'World-writable', got %q", issues[0].Issue)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
tk := New("/tmp")
|
||||
if tk.Dir != "/tmp" {
|
||||
t.Errorf("Dir: want /tmp, got %s", tk.Dir)
|
||||
}
|
||||
}
|
||||
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
143
devops/claude.go
Normal file
143
devops/claude.go
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// ClaudeOptions configures the Claude sandbox session.
|
||||
type ClaudeOptions struct {
|
||||
NoAuth bool // Don't forward any auth
|
||||
Auth []string // Selective auth: "gh", "anthropic", "ssh", "git"
|
||||
Model string // Model to use: opus, sonnet
|
||||
}
|
||||
|
||||
// Claude starts a sandboxed Claude session in the dev environment.
|
||||
func (d *DevOps) Claude(ctx context.Context, projectDir string, opts ClaudeOptions) error {
|
||||
// Auto-boot if not running
|
||||
running, err := d.IsRunning(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !running {
|
||||
fmt.Println("Dev environment not running, booting...")
|
||||
if err := d.Boot(ctx, DefaultBootOptions()); err != nil {
|
||||
return fmt.Errorf("failed to boot: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Mount project
|
||||
if err := d.mountProject(ctx, projectDir); err != nil {
|
||||
return fmt.Errorf("failed to mount project: %w", err)
|
||||
}
|
||||
|
||||
// Prepare environment variables to forward
|
||||
envVars := []string{}
|
||||
|
||||
if !opts.NoAuth {
|
||||
authTypes := opts.Auth
|
||||
if len(authTypes) == 0 {
|
||||
authTypes = []string{"gh", "anthropic", "ssh", "git"}
|
||||
}
|
||||
|
||||
for _, auth := range authTypes {
|
||||
switch auth {
|
||||
case "anthropic":
|
||||
if key := os.Getenv("ANTHROPIC_API_KEY"); key != "" {
|
||||
envVars = append(envVars, "ANTHROPIC_API_KEY="+key)
|
||||
}
|
||||
case "git":
|
||||
// Forward git config
|
||||
name, _ := exec.Command("git", "config", "user.name").Output()
|
||||
email, _ := exec.Command("git", "config", "user.email").Output()
|
||||
if len(name) > 0 {
|
||||
envVars = append(envVars, "GIT_AUTHOR_NAME="+strings.TrimSpace(string(name)))
|
||||
envVars = append(envVars, "GIT_COMMITTER_NAME="+strings.TrimSpace(string(name)))
|
||||
}
|
||||
if len(email) > 0 {
|
||||
envVars = append(envVars, "GIT_AUTHOR_EMAIL="+strings.TrimSpace(string(email)))
|
||||
envVars = append(envVars, "GIT_COMMITTER_EMAIL="+strings.TrimSpace(string(email)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build SSH command with agent forwarding
|
||||
args := []string{
|
||||
"-o", "StrictHostKeyChecking=yes",
|
||||
"-o", "UserKnownHostsFile=~/.core/known_hosts",
|
||||
"-o", "LogLevel=ERROR",
|
||||
"-A", // SSH agent forwarding
|
||||
"-p", fmt.Sprintf("%d", DefaultSSHPort),
|
||||
}
|
||||
|
||||
args = append(args, "root@localhost")
|
||||
|
||||
// Build command to run inside
|
||||
claudeCmd := "cd /app && claude"
|
||||
if opts.Model != "" {
|
||||
claudeCmd += " --model " + opts.Model
|
||||
}
|
||||
args = append(args, claudeCmd)
|
||||
|
||||
// Set environment for SSH
|
||||
cmd := exec.CommandContext(ctx, "ssh", args...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Pass environment variables through SSH
|
||||
for _, env := range envVars {
|
||||
parts := strings.SplitN(env, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
cmd.Env = append(os.Environ(), env)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Starting Claude in sandboxed environment...")
|
||||
fmt.Println("Project mounted at /app")
|
||||
fmt.Println("Auth forwarded: SSH agent" + formatAuthList(opts))
|
||||
fmt.Println()
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func formatAuthList(opts ClaudeOptions) string {
|
||||
if opts.NoAuth {
|
||||
return " (none)"
|
||||
}
|
||||
if len(opts.Auth) == 0 {
|
||||
return ", gh, anthropic, git"
|
||||
}
|
||||
return ", " + strings.Join(opts.Auth, ", ")
|
||||
}
|
||||
|
||||
// CopyGHAuth copies GitHub CLI auth to the VM.
|
||||
func (d *DevOps) CopyGHAuth(ctx context.Context) error {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ghConfigDir := filepath.Join(home, ".config", "gh")
|
||||
if !io.Local.IsDir(ghConfigDir) {
|
||||
return nil // No gh config to copy
|
||||
}
|
||||
|
||||
// Use scp to copy gh config
|
||||
cmd := exec.CommandContext(ctx, "scp",
|
||||
"-o", "StrictHostKeyChecking=yes",
|
||||
"-o", "UserKnownHostsFile=~/.core/known_hosts",
|
||||
"-o", "LogLevel=ERROR",
|
||||
"-P", fmt.Sprintf("%d", DefaultSSHPort),
|
||||
"-r", ghConfigDir,
|
||||
"root@localhost:/root/.config/",
|
||||
)
|
||||
return cmd.Run()
|
||||
}
|
||||
61
devops/claude_test.go
Normal file
61
devops/claude_test.go
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestClaudeOptions_Default(t *testing.T) {
|
||||
opts := ClaudeOptions{}
|
||||
assert.False(t, opts.NoAuth)
|
||||
assert.Nil(t, opts.Auth)
|
||||
assert.Empty(t, opts.Model)
|
||||
}
|
||||
|
||||
func TestClaudeOptions_Custom(t *testing.T) {
|
||||
opts := ClaudeOptions{
|
||||
NoAuth: true,
|
||||
Auth: []string{"gh", "anthropic"},
|
||||
Model: "opus",
|
||||
}
|
||||
assert.True(t, opts.NoAuth)
|
||||
assert.Equal(t, []string{"gh", "anthropic"}, opts.Auth)
|
||||
assert.Equal(t, "opus", opts.Model)
|
||||
}
|
||||
|
||||
func TestFormatAuthList_Good_NoAuth(t *testing.T) {
|
||||
opts := ClaudeOptions{NoAuth: true}
|
||||
result := formatAuthList(opts)
|
||||
assert.Equal(t, " (none)", result)
|
||||
}
|
||||
|
||||
func TestFormatAuthList_Good_Default(t *testing.T) {
|
||||
opts := ClaudeOptions{}
|
||||
result := formatAuthList(opts)
|
||||
assert.Equal(t, ", gh, anthropic, git", result)
|
||||
}
|
||||
|
||||
func TestFormatAuthList_Good_CustomAuth(t *testing.T) {
|
||||
opts := ClaudeOptions{
|
||||
Auth: []string{"gh"},
|
||||
}
|
||||
result := formatAuthList(opts)
|
||||
assert.Equal(t, ", gh", result)
|
||||
}
|
||||
|
||||
func TestFormatAuthList_Good_MultipleAuth(t *testing.T) {
|
||||
opts := ClaudeOptions{
|
||||
Auth: []string{"gh", "ssh", "git"},
|
||||
}
|
||||
result := formatAuthList(opts)
|
||||
assert.Equal(t, ", gh, ssh, git", result)
|
||||
}
|
||||
|
||||
func TestFormatAuthList_Good_EmptyAuth(t *testing.T) {
|
||||
opts := ClaudeOptions{
|
||||
Auth: []string{},
|
||||
}
|
||||
result := formatAuthList(opts)
|
||||
assert.Equal(t, ", gh, anthropic, git", result)
|
||||
}
|
||||
90
devops/config.go
Normal file
90
devops/config.go
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/config"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// Config holds global devops configuration from ~/.core/config.yaml.
|
||||
type Config struct {
|
||||
Version int `yaml:"version" mapstructure:"version"`
|
||||
Images ImagesConfig `yaml:"images" mapstructure:"images"`
|
||||
}
|
||||
|
||||
// ImagesConfig holds image source configuration.
|
||||
type ImagesConfig struct {
|
||||
Source string `yaml:"source" mapstructure:"source"` // auto, github, registry, cdn
|
||||
GitHub GitHubConfig `yaml:"github,omitempty" mapstructure:"github,omitempty"`
|
||||
Registry RegistryConfig `yaml:"registry,omitempty" mapstructure:"registry,omitempty"`
|
||||
CDN CDNConfig `yaml:"cdn,omitempty" mapstructure:"cdn,omitempty"`
|
||||
}
|
||||
|
||||
// GitHubConfig holds GitHub Releases configuration.
|
||||
type GitHubConfig struct {
|
||||
Repo string `yaml:"repo" mapstructure:"repo"` // owner/repo format
|
||||
}
|
||||
|
||||
// RegistryConfig holds container registry configuration.
|
||||
type RegistryConfig struct {
|
||||
Image string `yaml:"image" mapstructure:"image"` // e.g., ghcr.io/host-uk/core-devops
|
||||
}
|
||||
|
||||
// CDNConfig holds CDN/S3 configuration.
|
||||
type CDNConfig struct {
|
||||
URL string `yaml:"url" mapstructure:"url"` // base URL for downloads
|
||||
}
|
||||
|
||||
// DefaultConfig returns sensible defaults.
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Version: 1,
|
||||
Images: ImagesConfig{
|
||||
Source: "auto",
|
||||
GitHub: GitHubConfig{
|
||||
Repo: "host-uk/core-images",
|
||||
},
|
||||
Registry: RegistryConfig{
|
||||
Image: "ghcr.io/host-uk/core-devops",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigPath returns the path to the config file.
|
||||
func ConfigPath() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(home, ".core", "config.yaml"), nil
|
||||
}
|
||||
|
||||
// LoadConfig loads configuration from ~/.core/config.yaml using the provided medium.
|
||||
// Returns default config if file doesn't exist.
|
||||
func LoadConfig(m io.Medium) (*Config, error) {
|
||||
configPath, err := ConfigPath()
|
||||
if err != nil {
|
||||
return DefaultConfig(), nil
|
||||
}
|
||||
|
||||
cfg := DefaultConfig()
|
||||
|
||||
if !m.IsFile(configPath) {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// Use centralized config service
|
||||
c, err := config.New(config.WithMedium(m), config.WithPath(configPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.Get("", cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
255
devops/config_test.go
Normal file
255
devops/config_test.go
Normal file
|
|
@ -0,0 +1,255 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDefaultConfig(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, "auto", cfg.Images.Source)
|
||||
assert.Equal(t, "host-uk/core-images", cfg.Images.GitHub.Repo)
|
||||
}
|
||||
|
||||
func TestConfigPath(t *testing.T) {
|
||||
path, err := ConfigPath()
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, path, ".core/config.yaml")
|
||||
}
|
||||
|
||||
func TestLoadConfig_Good(t *testing.T) {
|
||||
t.Run("returns default if not exists", func(t *testing.T) {
|
||||
// Mock HOME to a temp dir
|
||||
tempHome := t.TempDir()
|
||||
origHome := os.Getenv("HOME")
|
||||
t.Setenv("HOME", tempHome)
|
||||
defer func() { _ = os.Setenv("HOME", origHome) }()
|
||||
|
||||
cfg, err := LoadConfig(io.Local)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, DefaultConfig(), cfg)
|
||||
})
|
||||
|
||||
t.Run("loads existing config", func(t *testing.T) {
|
||||
tempHome := t.TempDir()
|
||||
t.Setenv("HOME", tempHome)
|
||||
|
||||
coreDir := filepath.Join(tempHome, ".core")
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configData := `
|
||||
version: 2
|
||||
images:
|
||||
source: cdn
|
||||
cdn:
|
||||
url: https://cdn.example.com
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(coreDir, "config.yaml"), []byte(configData), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg, err := LoadConfig(io.Local)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, cfg.Version)
|
||||
assert.Equal(t, "cdn", cfg.Images.Source)
|
||||
assert.Equal(t, "https://cdn.example.com", cfg.Images.CDN.URL)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig_Bad(t *testing.T) {
|
||||
t.Run("invalid yaml", func(t *testing.T) {
|
||||
tempHome := t.TempDir()
|
||||
t.Setenv("HOME", tempHome)
|
||||
|
||||
coreDir := filepath.Join(tempHome, ".core")
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(filepath.Join(coreDir, "config.yaml"), []byte("invalid: yaml: :"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = LoadConfig(io.Local)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig_Struct(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Version: 2,
|
||||
Images: ImagesConfig{
|
||||
Source: "github",
|
||||
GitHub: GitHubConfig{
|
||||
Repo: "owner/repo",
|
||||
},
|
||||
Registry: RegistryConfig{
|
||||
Image: "ghcr.io/owner/image",
|
||||
},
|
||||
CDN: CDNConfig{
|
||||
URL: "https://cdn.example.com",
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, 2, cfg.Version)
|
||||
assert.Equal(t, "github", cfg.Images.Source)
|
||||
assert.Equal(t, "owner/repo", cfg.Images.GitHub.Repo)
|
||||
assert.Equal(t, "ghcr.io/owner/image", cfg.Images.Registry.Image)
|
||||
assert.Equal(t, "https://cdn.example.com", cfg.Images.CDN.URL)
|
||||
}
|
||||
|
||||
func TestDefaultConfig_Complete(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, "auto", cfg.Images.Source)
|
||||
assert.Equal(t, "host-uk/core-images", cfg.Images.GitHub.Repo)
|
||||
assert.Equal(t, "ghcr.io/host-uk/core-devops", cfg.Images.Registry.Image)
|
||||
assert.Empty(t, cfg.Images.CDN.URL)
|
||||
}
|
||||
|
||||
func TestLoadConfig_Good_PartialConfig(t *testing.T) {
|
||||
tempHome := t.TempDir()
|
||||
t.Setenv("HOME", tempHome)
|
||||
|
||||
coreDir := filepath.Join(tempHome, ".core")
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Config only specifies source, should merge with defaults
|
||||
configData := `
|
||||
version: 1
|
||||
images:
|
||||
source: github
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(coreDir, "config.yaml"), []byte(configData), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg, err := LoadConfig(io.Local)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, "github", cfg.Images.Source)
|
||||
// Default values should be preserved
|
||||
assert.Equal(t, "host-uk/core-images", cfg.Images.GitHub.Repo)
|
||||
}
|
||||
|
||||
func TestLoadConfig_Good_AllSourceTypes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config string
|
||||
check func(*testing.T, *Config)
|
||||
}{
|
||||
{
|
||||
name: "github source",
|
||||
config: `
|
||||
version: 1
|
||||
images:
|
||||
source: github
|
||||
github:
|
||||
repo: custom/repo
|
||||
`,
|
||||
check: func(t *testing.T, cfg *Config) {
|
||||
assert.Equal(t, "github", cfg.Images.Source)
|
||||
assert.Equal(t, "custom/repo", cfg.Images.GitHub.Repo)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cdn source",
|
||||
config: `
|
||||
version: 1
|
||||
images:
|
||||
source: cdn
|
||||
cdn:
|
||||
url: https://custom-cdn.com
|
||||
`,
|
||||
check: func(t *testing.T, cfg *Config) {
|
||||
assert.Equal(t, "cdn", cfg.Images.Source)
|
||||
assert.Equal(t, "https://custom-cdn.com", cfg.Images.CDN.URL)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "registry source",
|
||||
config: `
|
||||
version: 1
|
||||
images:
|
||||
source: registry
|
||||
registry:
|
||||
image: docker.io/custom/image
|
||||
`,
|
||||
check: func(t *testing.T, cfg *Config) {
|
||||
assert.Equal(t, "registry", cfg.Images.Source)
|
||||
assert.Equal(t, "docker.io/custom/image", cfg.Images.Registry.Image)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempHome := t.TempDir()
|
||||
t.Setenv("HOME", tempHome)
|
||||
|
||||
coreDir := filepath.Join(tempHome, ".core")
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(filepath.Join(coreDir, "config.yaml"), []byte(tt.config), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg, err := LoadConfig(io.Local)
|
||||
assert.NoError(t, err)
|
||||
tt.check(t, cfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImagesConfig_Struct(t *testing.T) {
|
||||
ic := ImagesConfig{
|
||||
Source: "auto",
|
||||
GitHub: GitHubConfig{Repo: "test/repo"},
|
||||
}
|
||||
assert.Equal(t, "auto", ic.Source)
|
||||
assert.Equal(t, "test/repo", ic.GitHub.Repo)
|
||||
}
|
||||
|
||||
func TestGitHubConfig_Struct(t *testing.T) {
|
||||
gc := GitHubConfig{Repo: "owner/repo"}
|
||||
assert.Equal(t, "owner/repo", gc.Repo)
|
||||
}
|
||||
|
||||
func TestRegistryConfig_Struct(t *testing.T) {
|
||||
rc := RegistryConfig{Image: "ghcr.io/owner/image:latest"}
|
||||
assert.Equal(t, "ghcr.io/owner/image:latest", rc.Image)
|
||||
}
|
||||
|
||||
func TestCDNConfig_Struct(t *testing.T) {
|
||||
cc := CDNConfig{URL: "https://cdn.example.com/images"}
|
||||
assert.Equal(t, "https://cdn.example.com/images", cc.URL)
|
||||
}
|
||||
|
||||
func TestLoadConfig_Bad_UnreadableFile(t *testing.T) {
|
||||
// This test is platform-specific and may not work on all systems
|
||||
// Skip if we can't test file permissions properly
|
||||
if os.Getuid() == 0 {
|
||||
t.Skip("Skipping permission test when running as root")
|
||||
}
|
||||
|
||||
tempHome := t.TempDir()
|
||||
t.Setenv("HOME", tempHome)
|
||||
|
||||
coreDir := filepath.Join(tempHome, ".core")
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(coreDir, "config.yaml")
|
||||
err = os.WriteFile(configPath, []byte("version: 1"), 0000)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = LoadConfig(io.Local)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Restore permissions so cleanup works
|
||||
_ = os.Chmod(configPath, 0644)
|
||||
}
|
||||
243
devops/devops.go
Normal file
243
devops/devops.go
Normal file
|
|
@ -0,0 +1,243 @@
|
|||
// Package devops provides a portable development environment using LinuxKit images.
|
||||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/container"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSSHPort is the default port for SSH connections to the dev environment.
|
||||
DefaultSSHPort = 2222
|
||||
)
|
||||
|
||||
// DevOps manages the portable development environment.
|
||||
type DevOps struct {
|
||||
medium io.Medium
|
||||
config *Config
|
||||
images *ImageManager
|
||||
container *container.LinuxKitManager
|
||||
}
|
||||
|
||||
// New creates a new DevOps instance using the provided medium.
|
||||
func New(m io.Medium) (*DevOps, error) {
|
||||
cfg, err := LoadConfig(m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("devops.New: failed to load config: %w", err)
|
||||
}
|
||||
|
||||
images, err := NewImageManager(m, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("devops.New: failed to create image manager: %w", err)
|
||||
}
|
||||
|
||||
mgr, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("devops.New: failed to create container manager: %w", err)
|
||||
}
|
||||
|
||||
return &DevOps{
|
||||
medium: m,
|
||||
config: cfg,
|
||||
images: images,
|
||||
container: mgr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ImageName returns the platform-specific image name.
|
||||
func ImageName() string {
|
||||
return fmt.Sprintf("core-devops-%s-%s.qcow2", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// ImagesDir returns the path to the images directory.
|
||||
func ImagesDir() (string, error) {
|
||||
if dir := os.Getenv("CORE_IMAGES_DIR"); dir != "" {
|
||||
return dir, nil
|
||||
}
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(home, ".core", "images"), nil
|
||||
}
|
||||
|
||||
// ImagePath returns the full path to the platform-specific image.
|
||||
func ImagePath() (string, error) {
|
||||
dir, err := ImagesDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(dir, ImageName()), nil
|
||||
}
|
||||
|
||||
// IsInstalled checks if the dev image is installed.
|
||||
func (d *DevOps) IsInstalled() bool {
|
||||
path, err := ImagePath()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return d.medium.IsFile(path)
|
||||
}
|
||||
|
||||
// Install downloads and installs the dev image.
|
||||
func (d *DevOps) Install(ctx context.Context, progress func(downloaded, total int64)) error {
|
||||
return d.images.Install(ctx, progress)
|
||||
}
|
||||
|
||||
// CheckUpdate checks if an update is available.
|
||||
func (d *DevOps) CheckUpdate(ctx context.Context) (current, latest string, hasUpdate bool, err error) {
|
||||
return d.images.CheckUpdate(ctx)
|
||||
}
|
||||
|
||||
// BootOptions configures how to boot the dev environment.
|
||||
type BootOptions struct {
|
||||
Memory int // MB, default 4096
|
||||
CPUs int // default 2
|
||||
Name string // container name
|
||||
Fresh bool // destroy existing and start fresh
|
||||
}
|
||||
|
||||
// DefaultBootOptions returns sensible defaults.
|
||||
func DefaultBootOptions() BootOptions {
|
||||
return BootOptions{
|
||||
Memory: 4096,
|
||||
CPUs: 2,
|
||||
Name: "core-dev",
|
||||
}
|
||||
}
|
||||
|
||||
// Boot starts the dev environment.
|
||||
func (d *DevOps) Boot(ctx context.Context, opts BootOptions) error {
|
||||
if !d.images.IsInstalled() {
|
||||
return fmt.Errorf("dev image not installed (run 'core dev install' first)")
|
||||
}
|
||||
|
||||
// Check if already running
|
||||
if !opts.Fresh {
|
||||
running, err := d.IsRunning(ctx)
|
||||
if err == nil && running {
|
||||
return fmt.Errorf("dev environment already running (use 'core dev stop' first or --fresh)")
|
||||
}
|
||||
}
|
||||
|
||||
// Stop existing if fresh
|
||||
if opts.Fresh {
|
||||
_ = d.Stop(ctx)
|
||||
}
|
||||
|
||||
imagePath, err := ImagePath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build run options for LinuxKitManager
|
||||
runOpts := container.RunOptions{
|
||||
Name: opts.Name,
|
||||
Memory: opts.Memory,
|
||||
CPUs: opts.CPUs,
|
||||
SSHPort: DefaultSSHPort,
|
||||
Detach: true,
|
||||
}
|
||||
|
||||
_, err = d.container.Run(ctx, imagePath, runOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for SSH to be ready and scan host key
|
||||
// We try for up to 60 seconds as the VM takes a moment to boot
|
||||
var lastErr error
|
||||
for i := 0; i < 30; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(2 * time.Second):
|
||||
if err := ensureHostKey(ctx, runOpts.SSHPort); err == nil {
|
||||
return nil
|
||||
} else {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to verify host key after boot: %w", lastErr)
|
||||
}
|
||||
|
||||
// Stop stops the dev environment.
|
||||
func (d *DevOps) Stop(ctx context.Context) error {
|
||||
c, err := d.findContainer(ctx, "core-dev")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c == nil {
|
||||
return fmt.Errorf("dev environment not found")
|
||||
}
|
||||
return d.container.Stop(ctx, c.ID)
|
||||
}
|
||||
|
||||
// IsRunning checks if the dev environment is running.
|
||||
func (d *DevOps) IsRunning(ctx context.Context) (bool, error) {
|
||||
c, err := d.findContainer(ctx, "core-dev")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return c != nil && c.Status == container.StatusRunning, nil
|
||||
}
|
||||
|
||||
// findContainer finds a container by name.
|
||||
func (d *DevOps) findContainer(ctx context.Context, name string) (*container.Container, error) {
|
||||
containers, err := d.container.List(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, c := range containers {
|
||||
if c.Name == name {
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// DevStatus returns information about the dev environment.
|
||||
type DevStatus struct {
|
||||
Installed bool
|
||||
Running bool
|
||||
ImageVersion string
|
||||
ContainerID string
|
||||
Memory int
|
||||
CPUs int
|
||||
SSHPort int
|
||||
Uptime time.Duration
|
||||
}
|
||||
|
||||
// Status returns the current dev environment status.
|
||||
func (d *DevOps) Status(ctx context.Context) (*DevStatus, error) {
|
||||
status := &DevStatus{
|
||||
Installed: d.images.IsInstalled(),
|
||||
SSHPort: DefaultSSHPort,
|
||||
}
|
||||
|
||||
if info, ok := d.images.manifest.Images[ImageName()]; ok {
|
||||
status.ImageVersion = info.Version
|
||||
}
|
||||
|
||||
c, _ := d.findContainer(ctx, "core-dev")
|
||||
if c != nil {
|
||||
status.Running = c.Status == container.StatusRunning
|
||||
status.ContainerID = c.ID
|
||||
status.Memory = c.Memory
|
||||
status.CPUs = c.CPUs
|
||||
if status.Running {
|
||||
status.Uptime = time.Since(c.StartedAt)
|
||||
}
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
833
devops/devops_test.go
Normal file
833
devops/devops_test.go
Normal file
|
|
@ -0,0 +1,833 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/container"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestImageName(t *testing.T) {
|
||||
name := ImageName()
|
||||
assert.Contains(t, name, "core-devops-")
|
||||
assert.Contains(t, name, runtime.GOOS)
|
||||
assert.Contains(t, name, runtime.GOARCH)
|
||||
assert.True(t, (name[len(name)-6:] == ".qcow2"))
|
||||
}
|
||||
|
||||
func TestImagesDir(t *testing.T) {
|
||||
t.Run("default directory", func(t *testing.T) {
|
||||
// Unset env if it exists
|
||||
orig := os.Getenv("CORE_IMAGES_DIR")
|
||||
_ = os.Unsetenv("CORE_IMAGES_DIR")
|
||||
defer func() { _ = os.Setenv("CORE_IMAGES_DIR", orig) }()
|
||||
|
||||
dir, err := ImagesDir()
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, dir, ".core/images")
|
||||
})
|
||||
|
||||
t.Run("environment override", func(t *testing.T) {
|
||||
customDir := "/tmp/custom-images"
|
||||
t.Setenv("CORE_IMAGES_DIR", customDir)
|
||||
|
||||
dir, err := ImagesDir()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, customDir, dir)
|
||||
})
|
||||
}
|
||||
|
||||
func TestImagePath(t *testing.T) {
|
||||
customDir := "/tmp/images"
|
||||
t.Setenv("CORE_IMAGES_DIR", customDir)
|
||||
|
||||
path, err := ImagePath()
|
||||
assert.NoError(t, err)
|
||||
expected := filepath.Join(customDir, ImageName())
|
||||
assert.Equal(t, expected, path)
|
||||
}
|
||||
|
||||
func TestDefaultBootOptions(t *testing.T) {
|
||||
opts := DefaultBootOptions()
|
||||
assert.Equal(t, 4096, opts.Memory)
|
||||
assert.Equal(t, 2, opts.CPUs)
|
||||
assert.Equal(t, "core-dev", opts.Name)
|
||||
assert.False(t, opts.Fresh)
|
||||
}
|
||||
|
||||
func TestIsInstalled_Bad(t *testing.T) {
|
||||
t.Run("returns false for non-existent image", func(t *testing.T) {
|
||||
// Point to a temp directory that is empty
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
// Create devops instance manually to avoid loading real config/images
|
||||
d := &DevOps{medium: io.Local}
|
||||
assert.False(t, d.IsInstalled())
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsInstalled_Good(t *testing.T) {
|
||||
t.Run("returns true when image exists", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
// Create the image file
|
||||
imagePath := filepath.Join(tempDir, ImageName())
|
||||
err := os.WriteFile(imagePath, []byte("fake image data"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
d := &DevOps{medium: io.Local}
|
||||
assert.True(t, d.IsInstalled())
|
||||
})
|
||||
}
|
||||
|
||||
type mockHypervisor struct{}
|
||||
|
||||
func (m *mockHypervisor) Name() string { return "mock" }
|
||||
func (m *mockHypervisor) Available() bool { return true }
|
||||
func (m *mockHypervisor) BuildCommand(ctx context.Context, image string, opts *container.HypervisorOptions) (*exec.Cmd, error) {
|
||||
return exec.Command("true"), nil
|
||||
}
|
||||
|
||||
func TestDevOps_Status_Good(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup mock container manager
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
// Add a fake running container
|
||||
c := &container.Container{
|
||||
ID: "test-id",
|
||||
Name: "core-dev",
|
||||
Status: container.StatusRunning,
|
||||
PID: os.Getpid(), // Use our own PID so isProcessRunning returns true
|
||||
StartedAt: time.Now().Add(-time.Hour),
|
||||
Memory: 2048,
|
||||
CPUs: 4,
|
||||
}
|
||||
err = state.Add(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
status, err := d.Status(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, status)
|
||||
assert.True(t, status.Running)
|
||||
assert.Equal(t, "test-id", status.ContainerID)
|
||||
assert.Equal(t, 2048, status.Memory)
|
||||
assert.Equal(t, 4, status.CPUs)
|
||||
}
|
||||
|
||||
func TestDevOps_Status_Good_NotInstalled(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
status, err := d.Status(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, status)
|
||||
assert.False(t, status.Installed)
|
||||
assert.False(t, status.Running)
|
||||
assert.Equal(t, 2222, status.SSHPort)
|
||||
}
|
||||
|
||||
func TestDevOps_Status_Good_NoContainer(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
// Create fake image to mark as installed
|
||||
imagePath := filepath.Join(tempDir, ImageName())
|
||||
err := os.WriteFile(imagePath, []byte("fake"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
status, err := d.Status(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, status)
|
||||
assert.True(t, status.Installed)
|
||||
assert.False(t, status.Running)
|
||||
assert.Empty(t, status.ContainerID)
|
||||
}
|
||||
|
||||
func TestDevOps_IsRunning_Good(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
c := &container.Container{
|
||||
ID: "test-id",
|
||||
Name: "core-dev",
|
||||
Status: container.StatusRunning,
|
||||
PID: os.Getpid(),
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
err = state.Add(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
running, err := d.IsRunning(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, running)
|
||||
}
|
||||
|
||||
func TestDevOps_IsRunning_Bad_NotRunning(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
running, err := d.IsRunning(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, running)
|
||||
}
|
||||
|
||||
func TestDevOps_IsRunning_Bad_ContainerStopped(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
c := &container.Container{
|
||||
ID: "test-id",
|
||||
Name: "core-dev",
|
||||
Status: container.StatusStopped,
|
||||
PID: 12345,
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
err = state.Add(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
running, err := d.IsRunning(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, running)
|
||||
}
|
||||
|
||||
func TestDevOps_findContainer_Good(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
c := &container.Container{
|
||||
ID: "test-id",
|
||||
Name: "my-container",
|
||||
Status: container.StatusRunning,
|
||||
PID: os.Getpid(),
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
err = state.Add(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
found, err := d.findContainer(context.Background(), "my-container")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, found)
|
||||
assert.Equal(t, "test-id", found.ID)
|
||||
assert.Equal(t, "my-container", found.Name)
|
||||
}
|
||||
|
||||
func TestDevOps_findContainer_Bad_NotFound(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
found, err := d.findContainer(context.Background(), "nonexistent")
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, found)
|
||||
}
|
||||
|
||||
func TestDevOps_Stop_Bad_NotFound(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
err = d.Stop(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not found")
|
||||
}
|
||||
|
||||
func TestBootOptions_Custom(t *testing.T) {
|
||||
opts := BootOptions{
|
||||
Memory: 8192,
|
||||
CPUs: 4,
|
||||
Name: "custom-dev",
|
||||
Fresh: true,
|
||||
}
|
||||
assert.Equal(t, 8192, opts.Memory)
|
||||
assert.Equal(t, 4, opts.CPUs)
|
||||
assert.Equal(t, "custom-dev", opts.Name)
|
||||
assert.True(t, opts.Fresh)
|
||||
}
|
||||
|
||||
func TestDevStatus_Struct(t *testing.T) {
|
||||
status := DevStatus{
|
||||
Installed: true,
|
||||
Running: true,
|
||||
ImageVersion: "v1.2.3",
|
||||
ContainerID: "abc123",
|
||||
Memory: 4096,
|
||||
CPUs: 2,
|
||||
SSHPort: 2222,
|
||||
Uptime: time.Hour,
|
||||
}
|
||||
assert.True(t, status.Installed)
|
||||
assert.True(t, status.Running)
|
||||
assert.Equal(t, "v1.2.3", status.ImageVersion)
|
||||
assert.Equal(t, "abc123", status.ContainerID)
|
||||
assert.Equal(t, 4096, status.Memory)
|
||||
assert.Equal(t, 2, status.CPUs)
|
||||
assert.Equal(t, 2222, status.SSHPort)
|
||||
assert.Equal(t, time.Hour, status.Uptime)
|
||||
}
|
||||
|
||||
func TestDevOps_Boot_Bad_NotInstalled(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
err = d.Boot(context.Background(), DefaultBootOptions())
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not installed")
|
||||
}
|
||||
|
||||
func TestDevOps_Boot_Bad_AlreadyRunning(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
// Create fake image
|
||||
imagePath := filepath.Join(tempDir, ImageName())
|
||||
err := os.WriteFile(imagePath, []byte("fake"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
// Add a running container
|
||||
c := &container.Container{
|
||||
ID: "test-id",
|
||||
Name: "core-dev",
|
||||
Status: container.StatusRunning,
|
||||
PID: os.Getpid(),
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
err = state.Add(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = d.Boot(context.Background(), DefaultBootOptions())
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "already running")
|
||||
}
|
||||
|
||||
func TestDevOps_Status_Good_WithImageVersion(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
// Create fake image
|
||||
imagePath := filepath.Join(tempDir, ImageName())
|
||||
err := os.WriteFile(imagePath, []byte("fake"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Manually set manifest with version info
|
||||
mgr.manifest.Images[ImageName()] = ImageInfo{
|
||||
Version: "v1.2.3",
|
||||
Source: "test",
|
||||
}
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
config: cfg,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
status, err := d.Status(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, status.Installed)
|
||||
assert.Equal(t, "v1.2.3", status.ImageVersion)
|
||||
}
|
||||
|
||||
func TestDevOps_findContainer_Good_MultipleContainers(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
// Add multiple containers
|
||||
c1 := &container.Container{
|
||||
ID: "id-1",
|
||||
Name: "container-1",
|
||||
Status: container.StatusRunning,
|
||||
PID: os.Getpid(),
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
c2 := &container.Container{
|
||||
ID: "id-2",
|
||||
Name: "container-2",
|
||||
Status: container.StatusRunning,
|
||||
PID: os.Getpid(),
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
err = state.Add(c1)
|
||||
require.NoError(t, err)
|
||||
err = state.Add(c2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Find specific container
|
||||
found, err := d.findContainer(context.Background(), "container-2")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, found)
|
||||
assert.Equal(t, "id-2", found.ID)
|
||||
}
|
||||
|
||||
func TestDevOps_Status_Good_ContainerWithUptime(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
startTime := time.Now().Add(-2 * time.Hour)
|
||||
c := &container.Container{
|
||||
ID: "test-id",
|
||||
Name: "core-dev",
|
||||
Status: container.StatusRunning,
|
||||
PID: os.Getpid(),
|
||||
StartedAt: startTime,
|
||||
Memory: 4096,
|
||||
CPUs: 2,
|
||||
}
|
||||
err = state.Add(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
status, err := d.Status(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, status.Running)
|
||||
assert.GreaterOrEqual(t, status.Uptime.Hours(), float64(1))
|
||||
}
|
||||
|
||||
func TestDevOps_IsRunning_Bad_DifferentContainerName(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
// Add a container with different name
|
||||
c := &container.Container{
|
||||
ID: "test-id",
|
||||
Name: "other-container",
|
||||
Status: container.StatusRunning,
|
||||
PID: os.Getpid(),
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
err = state.Add(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// IsRunning looks for "core-dev", not "other-container"
|
||||
running, err := d.IsRunning(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, running)
|
||||
}
|
||||
|
||||
func TestDevOps_Boot_Good_FreshFlag(t *testing.T) {
|
||||
t.Setenv("CORE_SKIP_SSH_SCAN", "true")
|
||||
tempDir, err := os.MkdirTemp("", "devops-test-*")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = os.RemoveAll(tempDir) })
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
// Create fake image
|
||||
imagePath := filepath.Join(tempDir, ImageName())
|
||||
err = os.WriteFile(imagePath, []byte("fake"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
// Add an existing container with non-existent PID (will be seen as stopped)
|
||||
c := &container.Container{
|
||||
ID: "old-id",
|
||||
Name: "core-dev",
|
||||
Status: container.StatusRunning,
|
||||
PID: 99999999, // Non-existent PID - List() will mark it as stopped
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
err = state.Add(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Boot with Fresh=true should try to stop the existing container
|
||||
// then run a new one. The mock hypervisor "succeeds" so this won't error
|
||||
opts := BootOptions{
|
||||
Memory: 4096,
|
||||
CPUs: 2,
|
||||
Name: "core-dev",
|
||||
Fresh: true,
|
||||
}
|
||||
err = d.Boot(context.Background(), opts)
|
||||
// The mock hypervisor's Run succeeds
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestDevOps_Stop_Bad_ContainerNotRunning(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
// Add a container that's already stopped
|
||||
c := &container.Container{
|
||||
ID: "test-id",
|
||||
Name: "core-dev",
|
||||
Status: container.StatusStopped,
|
||||
PID: 99999999,
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
err = state.Add(c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Stop should fail because container is not running
|
||||
err = d.Stop(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not running")
|
||||
}
|
||||
|
||||
func TestDevOps_Boot_Good_FreshWithNoExisting(t *testing.T) {
|
||||
t.Setenv("CORE_SKIP_SSH_SCAN", "true")
|
||||
tempDir, err := os.MkdirTemp("", "devops-boot-fresh-*")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = os.RemoveAll(tempDir) })
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
// Create fake image
|
||||
imagePath := filepath.Join(tempDir, ImageName())
|
||||
err = os.WriteFile(imagePath, []byte("fake"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
// Boot with Fresh=true but no existing container
|
||||
opts := BootOptions{
|
||||
Memory: 4096,
|
||||
CPUs: 2,
|
||||
Name: "core-dev",
|
||||
Fresh: true,
|
||||
}
|
||||
err = d.Boot(context.Background(), opts)
|
||||
// The mock hypervisor succeeds
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestImageName_Format(t *testing.T) {
|
||||
name := ImageName()
|
||||
// Check format: core-devops-{os}-{arch}.qcow2
|
||||
assert.Contains(t, name, "core-devops-")
|
||||
assert.Contains(t, name, runtime.GOOS)
|
||||
assert.Contains(t, name, runtime.GOARCH)
|
||||
assert.True(t, filepath.Ext(name) == ".qcow2")
|
||||
}
|
||||
|
||||
func TestDevOps_Install_Delegates(t *testing.T) {
|
||||
// This test verifies the Install method delegates to ImageManager
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
}
|
||||
|
||||
// This will fail because no source is available, but it tests delegation
|
||||
err = d.Install(context.Background(), nil)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDevOps_CheckUpdate_Delegates(t *testing.T) {
|
||||
// This test verifies the CheckUpdate method delegates to ImageManager
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
}
|
||||
|
||||
// This will fail because image not installed, but it tests delegation
|
||||
_, _, _, err = d.CheckUpdate(context.Background())
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDevOps_Boot_Good_Success(t *testing.T) {
|
||||
t.Setenv("CORE_SKIP_SSH_SCAN", "true")
|
||||
tempDir, err := os.MkdirTemp("", "devops-boot-success-*")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = os.RemoveAll(tempDir) })
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
// Create fake image
|
||||
imagePath := filepath.Join(tempDir, ImageName())
|
||||
err = os.WriteFile(imagePath, []byte("fake"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
statePath := filepath.Join(tempDir, "containers.json")
|
||||
state := container.NewState(io.Local, statePath)
|
||||
h := &mockHypervisor{}
|
||||
cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
images: mgr,
|
||||
container: cm,
|
||||
}
|
||||
|
||||
// Boot without Fresh flag and no existing container
|
||||
opts := DefaultBootOptions()
|
||||
err = d.Boot(context.Background(), opts)
|
||||
assert.NoError(t, err) // Mock hypervisor succeeds
|
||||
}
|
||||
|
||||
func TestDevOps_Config(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tempDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
d := &DevOps{medium: io.Local,
|
||||
config: cfg,
|
||||
images: mgr,
|
||||
}
|
||||
|
||||
assert.NotNil(t, d.config)
|
||||
assert.Equal(t, "auto", d.config.Images.Source)
|
||||
}
|
||||
198
devops/images.go
Normal file
198
devops/images.go
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/devops/sources"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// ImageManager handles image downloads and updates.
|
||||
type ImageManager struct {
|
||||
medium io.Medium
|
||||
config *Config
|
||||
manifest *Manifest
|
||||
sources []sources.ImageSource
|
||||
}
|
||||
|
||||
// Manifest tracks installed images.
|
||||
type Manifest struct {
|
||||
medium io.Medium
|
||||
Images map[string]ImageInfo `json:"images"`
|
||||
path string
|
||||
}
|
||||
|
||||
// ImageInfo holds metadata about an installed image.
|
||||
type ImageInfo struct {
|
||||
Version string `json:"version"`
|
||||
SHA256 string `json:"sha256,omitempty"`
|
||||
Downloaded time.Time `json:"downloaded"`
|
||||
Source string `json:"source"`
|
||||
}
|
||||
|
||||
// NewImageManager creates a new image manager.
|
||||
func NewImageManager(m io.Medium, cfg *Config) (*ImageManager, error) {
|
||||
imagesDir, err := ImagesDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure images directory exists
|
||||
if err := m.EnsureDir(imagesDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load or create manifest
|
||||
manifestPath := filepath.Join(imagesDir, "manifest.json")
|
||||
manifest, err := loadManifest(m, manifestPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build source list based on config
|
||||
imageName := ImageName()
|
||||
sourceCfg := sources.SourceConfig{
|
||||
GitHubRepo: cfg.Images.GitHub.Repo,
|
||||
RegistryImage: cfg.Images.Registry.Image,
|
||||
CDNURL: cfg.Images.CDN.URL,
|
||||
ImageName: imageName,
|
||||
}
|
||||
|
||||
var srcs []sources.ImageSource
|
||||
switch cfg.Images.Source {
|
||||
case "github":
|
||||
srcs = []sources.ImageSource{sources.NewGitHubSource(sourceCfg)}
|
||||
case "cdn":
|
||||
srcs = []sources.ImageSource{sources.NewCDNSource(sourceCfg)}
|
||||
default: // "auto"
|
||||
srcs = []sources.ImageSource{
|
||||
sources.NewGitHubSource(sourceCfg),
|
||||
sources.NewCDNSource(sourceCfg),
|
||||
}
|
||||
}
|
||||
|
||||
return &ImageManager{
|
||||
medium: m,
|
||||
config: cfg,
|
||||
manifest: manifest,
|
||||
sources: srcs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsInstalled checks if the dev image is installed.
|
||||
func (m *ImageManager) IsInstalled() bool {
|
||||
path, err := ImagePath()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return m.medium.IsFile(path)
|
||||
}
|
||||
|
||||
// Install downloads and installs the dev image.
|
||||
func (m *ImageManager) Install(ctx context.Context, progress func(downloaded, total int64)) error {
|
||||
imagesDir, err := ImagesDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find first available source
|
||||
var src sources.ImageSource
|
||||
for _, s := range m.sources {
|
||||
if s.Available() {
|
||||
src = s
|
||||
break
|
||||
}
|
||||
}
|
||||
if src == nil {
|
||||
return fmt.Errorf("no image source available")
|
||||
}
|
||||
|
||||
// Get version
|
||||
version, err := src.LatestVersion(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest version: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Downloading %s from %s...\n", ImageName(), src.Name())
|
||||
|
||||
// Download
|
||||
if err := src.Download(ctx, m.medium, imagesDir, progress); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update manifest
|
||||
m.manifest.Images[ImageName()] = ImageInfo{
|
||||
Version: version,
|
||||
Downloaded: time.Now(),
|
||||
Source: src.Name(),
|
||||
}
|
||||
|
||||
return m.manifest.Save()
|
||||
}
|
||||
|
||||
// CheckUpdate checks if an update is available.
|
||||
func (m *ImageManager) CheckUpdate(ctx context.Context) (current, latest string, hasUpdate bool, err error) {
|
||||
info, ok := m.manifest.Images[ImageName()]
|
||||
if !ok {
|
||||
return "", "", false, fmt.Errorf("image not installed")
|
||||
}
|
||||
current = info.Version
|
||||
|
||||
// Find first available source
|
||||
var src sources.ImageSource
|
||||
for _, s := range m.sources {
|
||||
if s.Available() {
|
||||
src = s
|
||||
break
|
||||
}
|
||||
}
|
||||
if src == nil {
|
||||
return current, "", false, fmt.Errorf("no image source available")
|
||||
}
|
||||
|
||||
latest, err = src.LatestVersion(ctx)
|
||||
if err != nil {
|
||||
return current, "", false, err
|
||||
}
|
||||
|
||||
hasUpdate = current != latest
|
||||
return current, latest, hasUpdate, nil
|
||||
}
|
||||
|
||||
func loadManifest(m io.Medium, path string) (*Manifest, error) {
|
||||
manifest := &Manifest{
|
||||
medium: m,
|
||||
Images: make(map[string]ImageInfo),
|
||||
path: path,
|
||||
}
|
||||
|
||||
content, err := m.Read(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return manifest, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(content), manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifest.medium = m
|
||||
manifest.path = path
|
||||
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// Save writes the manifest to disk.
|
||||
func (m *Manifest) Save() error {
|
||||
data, err := json.MarshalIndent(m, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.medium.Write(m.path, string(data))
|
||||
}
|
||||
583
devops/images_test.go
Normal file
583
devops/images_test.go
Normal file
|
|
@ -0,0 +1,583 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/devops/sources"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestImageManager_Good_IsInstalled(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Not installed yet
|
||||
assert.False(t, mgr.IsInstalled())
|
||||
|
||||
// Create fake image
|
||||
imagePath := filepath.Join(tmpDir, ImageName())
|
||||
err = os.WriteFile(imagePath, []byte("fake"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Now installed
|
||||
assert.True(t, mgr.IsInstalled())
|
||||
}
|
||||
|
||||
func TestNewImageManager_Good(t *testing.T) {
|
||||
t.Run("creates manager with cdn source", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.Images.Source = "cdn"
|
||||
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, mgr)
|
||||
assert.Len(t, mgr.sources, 1)
|
||||
assert.Equal(t, "cdn", mgr.sources[0].Name())
|
||||
})
|
||||
|
||||
t.Run("creates manager with github source", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.Images.Source = "github"
|
||||
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, mgr)
|
||||
assert.Len(t, mgr.sources, 1)
|
||||
assert.Equal(t, "github", mgr.sources[0].Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestManifest_Save(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := filepath.Join(tmpDir, "manifest.json")
|
||||
|
||||
m := &Manifest{
|
||||
medium: io.Local,
|
||||
Images: make(map[string]ImageInfo),
|
||||
path: path,
|
||||
}
|
||||
|
||||
m.Images["test.img"] = ImageInfo{
|
||||
Version: "1.0.0",
|
||||
Source: "test",
|
||||
}
|
||||
|
||||
err := m.Save()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify file exists and has content
|
||||
_, err = os.Stat(path)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Reload
|
||||
m2, err := loadManifest(io.Local, path)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1.0.0", m2.Images["test.img"].Version)
|
||||
}
|
||||
|
||||
func TestLoadManifest_Bad(t *testing.T) {
|
||||
t.Run("invalid json", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := filepath.Join(tmpDir, "manifest.json")
|
||||
err := os.WriteFile(path, []byte("invalid json"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = loadManifest(io.Local, path)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckUpdate_Bad(t *testing.T) {
|
||||
t.Run("image not installed", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, err = mgr.CheckUpdate(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "image not installed")
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewImageManager_Good_AutoSource(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.Images.Source = "auto"
|
||||
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, mgr)
|
||||
assert.Len(t, mgr.sources, 2) // github and cdn
|
||||
}
|
||||
|
||||
func TestNewImageManager_Good_UnknownSourceFallsToAuto(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.Images.Source = "unknown"
|
||||
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, mgr)
|
||||
assert.Len(t, mgr.sources, 2) // falls to default (auto) which is github + cdn
|
||||
}
|
||||
|
||||
func TestLoadManifest_Good_Empty(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := filepath.Join(tmpDir, "nonexistent.json")
|
||||
|
||||
m, err := loadManifest(io.Local, path)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
assert.NotNil(t, m.Images)
|
||||
assert.Empty(t, m.Images)
|
||||
assert.Equal(t, path, m.path)
|
||||
}
|
||||
|
||||
func TestLoadManifest_Good_ExistingData(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := filepath.Join(tmpDir, "manifest.json")
|
||||
|
||||
data := `{"images":{"test.img":{"version":"2.0.0","source":"cdn"}}}`
|
||||
err := os.WriteFile(path, []byte(data), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
m, err := loadManifest(io.Local, path)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
assert.Equal(t, "2.0.0", m.Images["test.img"].Version)
|
||||
assert.Equal(t, "cdn", m.Images["test.img"].Source)
|
||||
}
|
||||
|
||||
func TestImageInfo_Struct(t *testing.T) {
|
||||
info := ImageInfo{
|
||||
Version: "1.0.0",
|
||||
SHA256: "abc123",
|
||||
Downloaded: time.Now(),
|
||||
Source: "github",
|
||||
}
|
||||
assert.Equal(t, "1.0.0", info.Version)
|
||||
assert.Equal(t, "abc123", info.SHA256)
|
||||
assert.False(t, info.Downloaded.IsZero())
|
||||
assert.Equal(t, "github", info.Source)
|
||||
}
|
||||
|
||||
func TestManifest_Save_Good_CreatesDirs(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
nestedPath := filepath.Join(tmpDir, "nested", "dir", "manifest.json")
|
||||
|
||||
m := &Manifest{
|
||||
medium: io.Local,
|
||||
Images: make(map[string]ImageInfo),
|
||||
path: nestedPath,
|
||||
}
|
||||
m.Images["test.img"] = ImageInfo{Version: "1.0.0"}
|
||||
|
||||
// Save creates parent directories automatically via io.Local.Write
|
||||
err := m.Save()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify file was created
|
||||
_, err = os.Stat(nestedPath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestManifest_Save_Good_Overwrite(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := filepath.Join(tmpDir, "manifest.json")
|
||||
|
||||
// First save
|
||||
m1 := &Manifest{
|
||||
medium: io.Local,
|
||||
Images: make(map[string]ImageInfo),
|
||||
path: path,
|
||||
}
|
||||
m1.Images["test.img"] = ImageInfo{Version: "1.0.0"}
|
||||
err := m1.Save()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second save with different data
|
||||
m2 := &Manifest{
|
||||
medium: io.Local,
|
||||
Images: make(map[string]ImageInfo),
|
||||
path: path,
|
||||
}
|
||||
m2.Images["other.img"] = ImageInfo{Version: "2.0.0"}
|
||||
err = m2.Save()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify second data
|
||||
loaded, err := loadManifest(io.Local, path)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "2.0.0", loaded.Images["other.img"].Version)
|
||||
_, exists := loaded.Images["test.img"]
|
||||
assert.False(t, exists)
|
||||
}
|
||||
|
||||
func TestImageManager_Install_Bad_NoSourceAvailable(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
// Create manager with empty sources
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")},
|
||||
sources: nil, // no sources
|
||||
}
|
||||
|
||||
err := mgr.Install(context.Background(), nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no image source available")
|
||||
}
|
||||
|
||||
func TestNewImageManager_Good_CreatesDir(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
imagesDir := filepath.Join(tmpDir, "images")
|
||||
t.Setenv("CORE_IMAGES_DIR", imagesDir)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
mgr, err := NewImageManager(io.Local, cfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, mgr)
|
||||
|
||||
// Verify directory was created
|
||||
info, err := os.Stat(imagesDir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
}
|
||||
|
||||
// mockImageSource is a test helper for simulating image sources
|
||||
type mockImageSource struct {
|
||||
name string
|
||||
available bool
|
||||
latestVersion string
|
||||
latestErr error
|
||||
downloadErr error
|
||||
}
|
||||
|
||||
func (m *mockImageSource) Name() string { return m.name }
|
||||
func (m *mockImageSource) Available() bool { return m.available }
|
||||
func (m *mockImageSource) LatestVersion(ctx context.Context) (string, error) {
|
||||
return m.latestVersion, m.latestErr
|
||||
}
|
||||
func (m *mockImageSource) Download(ctx context.Context, medium io.Medium, dest string, progress func(downloaded, total int64)) error {
|
||||
if m.downloadErr != nil {
|
||||
return m.downloadErr
|
||||
}
|
||||
// Create a fake image file
|
||||
imagePath := filepath.Join(dest, ImageName())
|
||||
return os.WriteFile(imagePath, []byte("mock image content"), 0644)
|
||||
}
|
||||
|
||||
func TestImageManager_Install_Good_WithMockSource(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
mock := &mockImageSource{
|
||||
name: "mock",
|
||||
available: true,
|
||||
latestVersion: "v1.0.0",
|
||||
}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")},
|
||||
sources: []sources.ImageSource{mock},
|
||||
}
|
||||
|
||||
err := mgr.Install(context.Background(), nil)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, mgr.IsInstalled())
|
||||
|
||||
// Verify manifest was updated
|
||||
info, ok := mgr.manifest.Images[ImageName()]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "v1.0.0", info.Version)
|
||||
assert.Equal(t, "mock", info.Source)
|
||||
}
|
||||
|
||||
func TestImageManager_Install_Bad_DownloadError(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
mock := &mockImageSource{
|
||||
name: "mock",
|
||||
available: true,
|
||||
latestVersion: "v1.0.0",
|
||||
downloadErr: assert.AnError,
|
||||
}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")},
|
||||
sources: []sources.ImageSource{mock},
|
||||
}
|
||||
|
||||
err := mgr.Install(context.Background(), nil)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestImageManager_Install_Bad_VersionError(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
mock := &mockImageSource{
|
||||
name: "mock",
|
||||
available: true,
|
||||
latestErr: assert.AnError,
|
||||
}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")},
|
||||
sources: []sources.ImageSource{mock},
|
||||
}
|
||||
|
||||
err := mgr.Install(context.Background(), nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to get latest version")
|
||||
}
|
||||
|
||||
func TestImageManager_Install_Good_SkipsUnavailableSource(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
unavailableMock := &mockImageSource{
|
||||
name: "unavailable",
|
||||
available: false,
|
||||
}
|
||||
availableMock := &mockImageSource{
|
||||
name: "available",
|
||||
available: true,
|
||||
latestVersion: "v2.0.0",
|
||||
}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")},
|
||||
sources: []sources.ImageSource{unavailableMock, availableMock},
|
||||
}
|
||||
|
||||
err := mgr.Install(context.Background(), nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Should have used the available source
|
||||
info := mgr.manifest.Images[ImageName()]
|
||||
assert.Equal(t, "available", info.Source)
|
||||
}
|
||||
|
||||
func TestImageManager_CheckUpdate_Good_WithMockSource(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
mock := &mockImageSource{
|
||||
name: "mock",
|
||||
available: true,
|
||||
latestVersion: "v2.0.0",
|
||||
}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{
|
||||
medium: io.Local,
|
||||
Images: map[string]ImageInfo{
|
||||
ImageName(): {Version: "v1.0.0", Source: "mock"},
|
||||
},
|
||||
path: filepath.Join(tmpDir, "manifest.json"),
|
||||
},
|
||||
sources: []sources.ImageSource{mock},
|
||||
}
|
||||
|
||||
current, latest, hasUpdate, err := mgr.CheckUpdate(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", current)
|
||||
assert.Equal(t, "v2.0.0", latest)
|
||||
assert.True(t, hasUpdate)
|
||||
}
|
||||
|
||||
func TestImageManager_CheckUpdate_Good_NoUpdate(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
mock := &mockImageSource{
|
||||
name: "mock",
|
||||
available: true,
|
||||
latestVersion: "v1.0.0",
|
||||
}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{
|
||||
medium: io.Local,
|
||||
Images: map[string]ImageInfo{
|
||||
ImageName(): {Version: "v1.0.0", Source: "mock"},
|
||||
},
|
||||
path: filepath.Join(tmpDir, "manifest.json"),
|
||||
},
|
||||
sources: []sources.ImageSource{mock},
|
||||
}
|
||||
|
||||
current, latest, hasUpdate, err := mgr.CheckUpdate(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", current)
|
||||
assert.Equal(t, "v1.0.0", latest)
|
||||
assert.False(t, hasUpdate)
|
||||
}
|
||||
|
||||
func TestImageManager_CheckUpdate_Bad_NoSource(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
unavailableMock := &mockImageSource{
|
||||
name: "mock",
|
||||
available: false,
|
||||
}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{
|
||||
medium: io.Local,
|
||||
Images: map[string]ImageInfo{
|
||||
ImageName(): {Version: "v1.0.0", Source: "mock"},
|
||||
},
|
||||
path: filepath.Join(tmpDir, "manifest.json"),
|
||||
},
|
||||
sources: []sources.ImageSource{unavailableMock},
|
||||
}
|
||||
|
||||
_, _, _, err := mgr.CheckUpdate(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no image source available")
|
||||
}
|
||||
|
||||
func TestImageManager_CheckUpdate_Bad_VersionError(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
mock := &mockImageSource{
|
||||
name: "mock",
|
||||
available: true,
|
||||
latestErr: assert.AnError,
|
||||
}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{
|
||||
medium: io.Local,
|
||||
Images: map[string]ImageInfo{
|
||||
ImageName(): {Version: "v1.0.0", Source: "mock"},
|
||||
},
|
||||
path: filepath.Join(tmpDir, "manifest.json"),
|
||||
},
|
||||
sources: []sources.ImageSource{mock},
|
||||
}
|
||||
|
||||
current, _, _, err := mgr.CheckUpdate(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "v1.0.0", current) // Current should still be returned
|
||||
}
|
||||
|
||||
func TestImageManager_Install_Bad_EmptySources(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")},
|
||||
sources: []sources.ImageSource{}, // Empty slice, not nil
|
||||
}
|
||||
|
||||
err := mgr.Install(context.Background(), nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no image source available")
|
||||
}
|
||||
|
||||
func TestImageManager_Install_Bad_AllUnavailable(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
mock1 := &mockImageSource{name: "mock1", available: false}
|
||||
mock2 := &mockImageSource{name: "mock2", available: false}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")},
|
||||
sources: []sources.ImageSource{mock1, mock2},
|
||||
}
|
||||
|
||||
err := mgr.Install(context.Background(), nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no image source available")
|
||||
}
|
||||
|
||||
func TestImageManager_CheckUpdate_Good_FirstSourceUnavailable(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
t.Setenv("CORE_IMAGES_DIR", tmpDir)
|
||||
|
||||
unavailable := &mockImageSource{name: "unavailable", available: false}
|
||||
available := &mockImageSource{name: "available", available: true, latestVersion: "v2.0.0"}
|
||||
|
||||
mgr := &ImageManager{
|
||||
medium: io.Local,
|
||||
config: DefaultConfig(),
|
||||
manifest: &Manifest{
|
||||
medium: io.Local,
|
||||
Images: map[string]ImageInfo{
|
||||
ImageName(): {Version: "v1.0.0", Source: "available"},
|
||||
},
|
||||
path: filepath.Join(tmpDir, "manifest.json"),
|
||||
},
|
||||
sources: []sources.ImageSource{unavailable, available},
|
||||
}
|
||||
|
||||
current, latest, hasUpdate, err := mgr.CheckUpdate(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", current)
|
||||
assert.Equal(t, "v2.0.0", latest)
|
||||
assert.True(t, hasUpdate)
|
||||
}
|
||||
|
||||
func TestManifest_Struct(t *testing.T) {
|
||||
m := &Manifest{
|
||||
Images: map[string]ImageInfo{
|
||||
"test.img": {Version: "1.0.0"},
|
||||
},
|
||||
path: "/path/to/manifest.json",
|
||||
}
|
||||
assert.Equal(t, "/path/to/manifest.json", m.path)
|
||||
assert.Len(t, m.Images, 1)
|
||||
assert.Equal(t, "1.0.0", m.Images["test.img"].Version)
|
||||
}
|
||||
109
devops/serve.go
Normal file
109
devops/serve.go
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// ServeOptions configures the dev server.
|
||||
type ServeOptions struct {
|
||||
Port int // Port to serve on (default 8000)
|
||||
Path string // Subdirectory to serve (default: current dir)
|
||||
}
|
||||
|
||||
// Serve mounts the project and starts a dev server.
|
||||
func (d *DevOps) Serve(ctx context.Context, projectDir string, opts ServeOptions) error {
|
||||
running, err := d.IsRunning(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !running {
|
||||
return fmt.Errorf("dev environment not running (run 'core dev boot' first)")
|
||||
}
|
||||
|
||||
if opts.Port == 0 {
|
||||
opts.Port = 8000
|
||||
}
|
||||
|
||||
servePath := projectDir
|
||||
if opts.Path != "" {
|
||||
servePath = filepath.Join(projectDir, opts.Path)
|
||||
}
|
||||
|
||||
// Mount project directory via SSHFS
|
||||
if err := d.mountProject(ctx, servePath); err != nil {
|
||||
return fmt.Errorf("failed to mount project: %w", err)
|
||||
}
|
||||
|
||||
// Detect and run serve command
|
||||
serveCmd := DetectServeCommand(d.medium, servePath)
|
||||
fmt.Printf("Starting server: %s\n", serveCmd)
|
||||
fmt.Printf("Listening on http://localhost:%d\n", opts.Port)
|
||||
|
||||
// Run serve command via SSH
|
||||
return d.sshShell(ctx, []string{"cd", "/app", "&&", serveCmd})
|
||||
}
|
||||
|
||||
// mountProject mounts a directory into the VM via SSHFS.
|
||||
func (d *DevOps) mountProject(ctx context.Context, path string) error {
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use reverse SSHFS mount
|
||||
// The VM connects back to host to mount the directory
|
||||
cmd := exec.CommandContext(ctx, "ssh",
|
||||
"-o", "StrictHostKeyChecking=yes",
|
||||
"-o", "UserKnownHostsFile=~/.core/known_hosts",
|
||||
"-o", "LogLevel=ERROR",
|
||||
"-R", "10000:localhost:22", // Reverse tunnel for SSHFS
|
||||
"-p", fmt.Sprintf("%d", DefaultSSHPort),
|
||||
"root@localhost",
|
||||
fmt.Sprintf("mkdir -p /app && sshfs -p 10000 %s@localhost:%s /app -o allow_other", os.Getenv("USER"), absPath),
|
||||
)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// DetectServeCommand auto-detects the serve command for a project.
|
||||
func DetectServeCommand(m io.Medium, projectDir string) string {
|
||||
// Laravel/Octane
|
||||
if hasFile(m, projectDir, "artisan") {
|
||||
return "php artisan octane:start --host=0.0.0.0 --port=8000"
|
||||
}
|
||||
|
||||
// Node.js with dev script
|
||||
if hasFile(m, projectDir, "package.json") {
|
||||
if hasPackageScript(m, projectDir, "dev") {
|
||||
return "npm run dev -- --host 0.0.0.0"
|
||||
}
|
||||
if hasPackageScript(m, projectDir, "start") {
|
||||
return "npm start"
|
||||
}
|
||||
}
|
||||
|
||||
// PHP with composer
|
||||
if hasFile(m, projectDir, "composer.json") {
|
||||
return "frankenphp php-server -l :8000"
|
||||
}
|
||||
|
||||
// Go
|
||||
if hasFile(m, projectDir, "go.mod") {
|
||||
if hasFile(m, projectDir, "main.go") {
|
||||
return "go run ."
|
||||
}
|
||||
}
|
||||
|
||||
// Python Django
|
||||
if hasFile(m, projectDir, "manage.py") {
|
||||
return "python manage.py runserver 0.0.0.0:8000"
|
||||
}
|
||||
|
||||
// Fallback: simple HTTP server
|
||||
return "python3 -m http.server 8000"
|
||||
}
|
||||
137
devops/serve_test.go
Normal file
137
devops/serve_test.go
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDetectServeCommand_Good_Laravel(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "artisan"), []byte("#!/usr/bin/env php"), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cmd := DetectServeCommand(io.Local, tmpDir)
|
||||
assert.Equal(t, "php artisan octane:start --host=0.0.0.0 --port=8000", cmd)
|
||||
}
|
||||
|
||||
func TestDetectServeCommand_Good_NodeDev(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
packageJSON := `{"scripts":{"dev":"vite","start":"node index.js"}}`
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(packageJSON), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cmd := DetectServeCommand(io.Local, tmpDir)
|
||||
assert.Equal(t, "npm run dev -- --host 0.0.0.0", cmd)
|
||||
}
|
||||
|
||||
func TestDetectServeCommand_Good_NodeStart(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
packageJSON := `{"scripts":{"start":"node server.js"}}`
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(packageJSON), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cmd := DetectServeCommand(io.Local, tmpDir)
|
||||
assert.Equal(t, "npm start", cmd)
|
||||
}
|
||||
|
||||
func TestDetectServeCommand_Good_PHP(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"require":{}}`), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cmd := DetectServeCommand(io.Local, tmpDir)
|
||||
assert.Equal(t, "frankenphp php-server -l :8000", cmd)
|
||||
}
|
||||
|
||||
func TestDetectServeCommand_Good_GoMain(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module example"), 0644)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "main.go"), []byte("package main"), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cmd := DetectServeCommand(io.Local, tmpDir)
|
||||
assert.Equal(t, "go run .", cmd)
|
||||
}
|
||||
|
||||
func TestDetectServeCommand_Good_GoWithoutMain(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module example"), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// No main.go, so falls through to fallback
|
||||
cmd := DetectServeCommand(io.Local, tmpDir)
|
||||
assert.Equal(t, "python3 -m http.server 8000", cmd)
|
||||
}
|
||||
|
||||
func TestDetectServeCommand_Good_Django(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "manage.py"), []byte("#!/usr/bin/env python"), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cmd := DetectServeCommand(io.Local, tmpDir)
|
||||
assert.Equal(t, "python manage.py runserver 0.0.0.0:8000", cmd)
|
||||
}
|
||||
|
||||
func TestDetectServeCommand_Good_Fallback(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
cmd := DetectServeCommand(io.Local, tmpDir)
|
||||
assert.Equal(t, "python3 -m http.server 8000", cmd)
|
||||
}
|
||||
|
||||
func TestDetectServeCommand_Good_Priority(t *testing.T) {
|
||||
// Laravel (artisan) should take priority over PHP (composer.json)
|
||||
tmpDir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "artisan"), []byte("#!/usr/bin/env php"), 0644)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"require":{}}`), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cmd := DetectServeCommand(io.Local, tmpDir)
|
||||
assert.Equal(t, "php artisan octane:start --host=0.0.0.0 --port=8000", cmd)
|
||||
}
|
||||
|
||||
func TestServeOptions_Default(t *testing.T) {
|
||||
opts := ServeOptions{}
|
||||
assert.Equal(t, 0, opts.Port)
|
||||
assert.Equal(t, "", opts.Path)
|
||||
}
|
||||
|
||||
func TestServeOptions_Custom(t *testing.T) {
|
||||
opts := ServeOptions{
|
||||
Port: 3000,
|
||||
Path: "public",
|
||||
}
|
||||
assert.Equal(t, 3000, opts.Port)
|
||||
assert.Equal(t, "public", opts.Path)
|
||||
}
|
||||
|
||||
func TestHasFile_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
testFile := filepath.Join(tmpDir, "test.txt")
|
||||
err := os.WriteFile(testFile, []byte("content"), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, hasFile(io.Local, tmpDir, "test.txt"))
|
||||
}
|
||||
|
||||
func TestHasFile_Bad(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
assert.False(t, hasFile(io.Local, tmpDir, "nonexistent.txt"))
|
||||
}
|
||||
|
||||
func TestHasFile_Bad_Directory(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
subDir := filepath.Join(tmpDir, "subdir")
|
||||
err := os.Mkdir(subDir, 0755)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// hasFile correctly returns false for directories (only true for regular files)
|
||||
assert.False(t, hasFile(io.Local, tmpDir, "subdir"))
|
||||
}
|
||||
74
devops/shell.go
Normal file
74
devops/shell.go
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// ShellOptions configures the shell connection.
|
||||
type ShellOptions struct {
|
||||
Console bool // Use serial console instead of SSH
|
||||
Command []string // Command to run (empty = interactive shell)
|
||||
}
|
||||
|
||||
// Shell connects to the dev environment.
|
||||
func (d *DevOps) Shell(ctx context.Context, opts ShellOptions) error {
|
||||
running, err := d.IsRunning(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !running {
|
||||
return fmt.Errorf("dev environment not running (run 'core dev boot' first)")
|
||||
}
|
||||
|
||||
if opts.Console {
|
||||
return d.serialConsole(ctx)
|
||||
}
|
||||
|
||||
return d.sshShell(ctx, opts.Command)
|
||||
}
|
||||
|
||||
// sshShell connects via SSH.
|
||||
func (d *DevOps) sshShell(ctx context.Context, command []string) error {
|
||||
args := []string{
|
||||
"-o", "StrictHostKeyChecking=yes",
|
||||
"-o", "UserKnownHostsFile=~/.core/known_hosts",
|
||||
"-o", "LogLevel=ERROR",
|
||||
"-A", // Agent forwarding
|
||||
"-p", fmt.Sprintf("%d", DefaultSSHPort),
|
||||
"root@localhost",
|
||||
}
|
||||
|
||||
if len(command) > 0 {
|
||||
args = append(args, command...)
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "ssh", args...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// serialConsole attaches to the QEMU serial console.
|
||||
func (d *DevOps) serialConsole(ctx context.Context) error {
|
||||
// Find the container to get its console socket
|
||||
c, err := d.findContainer(ctx, "core-dev")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c == nil {
|
||||
return fmt.Errorf("console not available: container not found")
|
||||
}
|
||||
|
||||
// Use socat to connect to the console socket
|
||||
socketPath := fmt.Sprintf("/tmp/core-%s-console.sock", c.ID)
|
||||
cmd := exec.CommandContext(ctx, "socat", "-,raw,echo=0", "unix-connect:"+socketPath)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
47
devops/shell_test.go
Normal file
47
devops/shell_test.go
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestShellOptions_Default(t *testing.T) {
|
||||
opts := ShellOptions{}
|
||||
assert.False(t, opts.Console)
|
||||
assert.Nil(t, opts.Command)
|
||||
}
|
||||
|
||||
func TestShellOptions_Console(t *testing.T) {
|
||||
opts := ShellOptions{
|
||||
Console: true,
|
||||
}
|
||||
assert.True(t, opts.Console)
|
||||
assert.Nil(t, opts.Command)
|
||||
}
|
||||
|
||||
func TestShellOptions_Command(t *testing.T) {
|
||||
opts := ShellOptions{
|
||||
Command: []string{"ls", "-la"},
|
||||
}
|
||||
assert.False(t, opts.Console)
|
||||
assert.Equal(t, []string{"ls", "-la"}, opts.Command)
|
||||
}
|
||||
|
||||
func TestShellOptions_ConsoleWithCommand(t *testing.T) {
|
||||
opts := ShellOptions{
|
||||
Console: true,
|
||||
Command: []string{"echo", "hello"},
|
||||
}
|
||||
assert.True(t, opts.Console)
|
||||
assert.Equal(t, []string{"echo", "hello"}, opts.Command)
|
||||
}
|
||||
|
||||
func TestShellOptions_EmptyCommand(t *testing.T) {
|
||||
opts := ShellOptions{
|
||||
Command: []string{},
|
||||
}
|
||||
assert.False(t, opts.Console)
|
||||
assert.Empty(t, opts.Command)
|
||||
assert.Len(t, opts.Command, 0)
|
||||
}
|
||||
113
devops/sources/cdn.go
Normal file
113
devops/sources/cdn.go
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
package sources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
goio "io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// CDNSource downloads images from a CDN or S3 bucket.
|
||||
type CDNSource struct {
|
||||
config SourceConfig
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ ImageSource = (*CDNSource)(nil)
|
||||
|
||||
// NewCDNSource creates a new CDN source.
|
||||
func NewCDNSource(cfg SourceConfig) *CDNSource {
|
||||
return &CDNSource{config: cfg}
|
||||
}
|
||||
|
||||
// Name returns "cdn".
|
||||
func (s *CDNSource) Name() string {
|
||||
return "cdn"
|
||||
}
|
||||
|
||||
// Available checks if CDN URL is configured.
|
||||
func (s *CDNSource) Available() bool {
|
||||
return s.config.CDNURL != ""
|
||||
}
|
||||
|
||||
// LatestVersion fetches version from manifest or returns "latest".
|
||||
func (s *CDNSource) LatestVersion(ctx context.Context) (string, error) {
|
||||
// Try to fetch manifest.json for version info
|
||||
url := fmt.Sprintf("%s/manifest.json", s.config.CDNURL)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return "latest", nil
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
return "latest", nil
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
// For now, just return latest - could parse manifest for version
|
||||
return "latest", nil
|
||||
}
|
||||
|
||||
// Download downloads the image from CDN.
|
||||
func (s *CDNSource) Download(ctx context.Context, m io.Medium, dest string, progress func(downloaded, total int64)) error {
|
||||
url := fmt.Sprintf("%s/%s", s.config.CDNURL, s.config.ImageName)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cdn.Download: %w", err)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cdn.Download: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("cdn.Download: HTTP %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Ensure dest directory exists
|
||||
if err := m.EnsureDir(dest); err != nil {
|
||||
return fmt.Errorf("cdn.Download: %w", err)
|
||||
}
|
||||
|
||||
// Create destination file
|
||||
destPath := filepath.Join(dest, s.config.ImageName)
|
||||
f, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cdn.Download: %w", err)
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
// Copy with progress
|
||||
total := resp.ContentLength
|
||||
var downloaded int64
|
||||
|
||||
buf := make([]byte, 32*1024)
|
||||
for {
|
||||
n, err := resp.Body.Read(buf)
|
||||
if n > 0 {
|
||||
if _, werr := f.Write(buf[:n]); werr != nil {
|
||||
return fmt.Errorf("cdn.Download: %w", werr)
|
||||
}
|
||||
downloaded += int64(n)
|
||||
if progress != nil {
|
||||
progress(downloaded, total)
|
||||
}
|
||||
}
|
||||
if err == goio.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("cdn.Download: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
306
devops/sources/cdn_test.go
Normal file
306
devops/sources/cdn_test.go
Normal file
|
|
@ -0,0 +1,306 @@
|
|||
package sources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCDNSource_Good_Available(t *testing.T) {
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: "https://images.example.com",
|
||||
ImageName: "core-devops-darwin-arm64.qcow2",
|
||||
})
|
||||
|
||||
assert.Equal(t, "cdn", src.Name())
|
||||
assert.True(t, src.Available())
|
||||
}
|
||||
|
||||
func TestCDNSource_Bad_NoURL(t *testing.T) {
|
||||
src := NewCDNSource(SourceConfig{
|
||||
ImageName: "core-devops-darwin-arm64.qcow2",
|
||||
})
|
||||
|
||||
assert.False(t, src.Available())
|
||||
}
|
||||
|
||||
func TestCDNSource_LatestVersion_Good(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/manifest.json" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = fmt.Fprint(w, `{"version": "1.2.3"}`)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: server.URL,
|
||||
ImageName: "test.img",
|
||||
})
|
||||
|
||||
version, err := src.LatestVersion(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "latest", version) // Current impl always returns "latest"
|
||||
}
|
||||
|
||||
func TestCDNSource_Download_Good(t *testing.T) {
|
||||
content := "fake image data"
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/test.img" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = fmt.Fprint(w, content)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
dest := t.TempDir()
|
||||
imageName := "test.img"
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: server.URL,
|
||||
ImageName: imageName,
|
||||
})
|
||||
|
||||
var progressCalled bool
|
||||
err := src.Download(context.Background(), io.Local, dest, func(downloaded, total int64) {
|
||||
progressCalled = true
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, progressCalled)
|
||||
|
||||
// Verify file content
|
||||
data, err := os.ReadFile(filepath.Join(dest, imageName))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, content, string(data))
|
||||
}
|
||||
|
||||
func TestCDNSource_Download_Bad(t *testing.T) {
|
||||
t.Run("HTTP error", func(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
dest := t.TempDir()
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: server.URL,
|
||||
ImageName: "test.img",
|
||||
})
|
||||
|
||||
err := src.Download(context.Background(), io.Local, dest, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "HTTP 500")
|
||||
})
|
||||
|
||||
t.Run("Invalid URL", func(t *testing.T) {
|
||||
dest := t.TempDir()
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: "http://invalid-url-that-should-fail",
|
||||
ImageName: "test.img",
|
||||
})
|
||||
|
||||
err := src.Download(context.Background(), io.Local, dest, nil)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCDNSource_LatestVersion_Bad_NoManifest(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: server.URL,
|
||||
ImageName: "test.img",
|
||||
})
|
||||
|
||||
version, err := src.LatestVersion(context.Background())
|
||||
assert.NoError(t, err) // Should not error, just return "latest"
|
||||
assert.Equal(t, "latest", version)
|
||||
}
|
||||
|
||||
func TestCDNSource_LatestVersion_Bad_ServerError(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: server.URL,
|
||||
ImageName: "test.img",
|
||||
})
|
||||
|
||||
version, err := src.LatestVersion(context.Background())
|
||||
assert.NoError(t, err) // Falls back to "latest"
|
||||
assert.Equal(t, "latest", version)
|
||||
}
|
||||
|
||||
func TestCDNSource_Download_Good_NoProgress(t *testing.T) {
|
||||
content := "test content"
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(content)))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = fmt.Fprint(w, content)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
dest := t.TempDir()
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: server.URL,
|
||||
ImageName: "test.img",
|
||||
})
|
||||
|
||||
// nil progress callback should be handled gracefully
|
||||
err := src.Download(context.Background(), io.Local, dest, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
data, err := os.ReadFile(filepath.Join(dest, "test.img"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, content, string(data))
|
||||
}
|
||||
|
||||
func TestCDNSource_Download_Good_LargeFile(t *testing.T) {
|
||||
// Create content larger than buffer size (32KB)
|
||||
content := make([]byte, 64*1024) // 64KB
|
||||
for i := range content {
|
||||
content[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(content)))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write(content)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
dest := t.TempDir()
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: server.URL,
|
||||
ImageName: "large.img",
|
||||
})
|
||||
|
||||
var progressCalls int
|
||||
var lastDownloaded int64
|
||||
err := src.Download(context.Background(), io.Local, dest, func(downloaded, total int64) {
|
||||
progressCalls++
|
||||
lastDownloaded = downloaded
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, progressCalls, 1) // Should be called multiple times for large file
|
||||
assert.Equal(t, int64(len(content)), lastDownloaded)
|
||||
}
|
||||
|
||||
func TestCDNSource_Download_Bad_HTTPErrorCodes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
statusCode int
|
||||
}{
|
||||
{"Bad Request", http.StatusBadRequest},
|
||||
{"Unauthorized", http.StatusUnauthorized},
|
||||
{"Forbidden", http.StatusForbidden},
|
||||
{"Not Found", http.StatusNotFound},
|
||||
{"Service Unavailable", http.StatusServiceUnavailable},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(tc.statusCode)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
dest := t.TempDir()
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: server.URL,
|
||||
ImageName: "test.img",
|
||||
})
|
||||
|
||||
err := src.Download(context.Background(), io.Local, dest, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), fmt.Sprintf("HTTP %d", tc.statusCode))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCDNSource_InterfaceCompliance(t *testing.T) {
|
||||
// Verify CDNSource implements ImageSource
|
||||
var _ ImageSource = (*CDNSource)(nil)
|
||||
}
|
||||
|
||||
func TestCDNSource_Config(t *testing.T) {
|
||||
cfg := SourceConfig{
|
||||
CDNURL: "https://cdn.example.com",
|
||||
ImageName: "my-image.qcow2",
|
||||
}
|
||||
src := NewCDNSource(cfg)
|
||||
|
||||
assert.Equal(t, "https://cdn.example.com", src.config.CDNURL)
|
||||
assert.Equal(t, "my-image.qcow2", src.config.ImageName)
|
||||
}
|
||||
|
||||
func TestNewCDNSource_Good(t *testing.T) {
|
||||
cfg := SourceConfig{
|
||||
GitHubRepo: "host-uk/core-images",
|
||||
RegistryImage: "ghcr.io/host-uk/core-devops",
|
||||
CDNURL: "https://cdn.example.com",
|
||||
ImageName: "core-devops-darwin-arm64.qcow2",
|
||||
}
|
||||
|
||||
src := NewCDNSource(cfg)
|
||||
assert.NotNil(t, src)
|
||||
assert.Equal(t, "cdn", src.Name())
|
||||
assert.Equal(t, cfg.CDNURL, src.config.CDNURL)
|
||||
}
|
||||
|
||||
func TestCDNSource_Download_Good_CreatesDestDir(t *testing.T) {
|
||||
content := "test content"
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = fmt.Fprint(w, content)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
dest := filepath.Join(tmpDir, "nested", "dir")
|
||||
// dest doesn't exist yet
|
||||
|
||||
src := NewCDNSource(SourceConfig{
|
||||
CDNURL: server.URL,
|
||||
ImageName: "test.img",
|
||||
})
|
||||
|
||||
err := src.Download(context.Background(), io.Local, dest, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify nested dir was created
|
||||
info, err := os.Stat(dest)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
}
|
||||
|
||||
func TestSourceConfig_Struct(t *testing.T) {
|
||||
cfg := SourceConfig{
|
||||
GitHubRepo: "owner/repo",
|
||||
RegistryImage: "ghcr.io/owner/image",
|
||||
CDNURL: "https://cdn.example.com",
|
||||
ImageName: "image.qcow2",
|
||||
}
|
||||
|
||||
assert.Equal(t, "owner/repo", cfg.GitHubRepo)
|
||||
assert.Equal(t, "ghcr.io/owner/image", cfg.RegistryImage)
|
||||
assert.Equal(t, "https://cdn.example.com", cfg.CDNURL)
|
||||
assert.Equal(t, "image.qcow2", cfg.ImageName)
|
||||
}
|
||||
72
devops/sources/github.go
Normal file
72
devops/sources/github.go
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
package sources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// GitHubSource downloads images from GitHub Releases.
|
||||
type GitHubSource struct {
|
||||
config SourceConfig
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ ImageSource = (*GitHubSource)(nil)
|
||||
|
||||
// NewGitHubSource creates a new GitHub source.
|
||||
func NewGitHubSource(cfg SourceConfig) *GitHubSource {
|
||||
return &GitHubSource{config: cfg}
|
||||
}
|
||||
|
||||
// Name returns "github".
|
||||
func (s *GitHubSource) Name() string {
|
||||
return "github"
|
||||
}
|
||||
|
||||
// Available checks if gh CLI is installed and authenticated.
|
||||
func (s *GitHubSource) Available() bool {
|
||||
_, err := exec.LookPath("gh")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Check if authenticated
|
||||
cmd := exec.Command("gh", "auth", "status")
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
|
||||
// LatestVersion returns the latest release tag.
|
||||
func (s *GitHubSource) LatestVersion(ctx context.Context) (string, error) {
|
||||
cmd := exec.CommandContext(ctx, "gh", "release", "view",
|
||||
"-R", s.config.GitHubRepo,
|
||||
"--json", "tagName",
|
||||
"-q", ".tagName",
|
||||
)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("github.LatestVersion: %w", err)
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
}
|
||||
|
||||
// Download downloads the image from the latest release.
|
||||
func (s *GitHubSource) Download(ctx context.Context, m io.Medium, dest string, progress func(downloaded, total int64)) error {
|
||||
// Get release assets to find our image
|
||||
cmd := exec.CommandContext(ctx, "gh", "release", "download",
|
||||
"-R", s.config.GitHubRepo,
|
||||
"-p", s.config.ImageName,
|
||||
"-D", dest,
|
||||
"--clobber",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("github.Download: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
68
devops/sources/github_test.go
Normal file
68
devops/sources/github_test.go
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
package sources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGitHubSource_Good_Available(t *testing.T) {
|
||||
src := NewGitHubSource(SourceConfig{
|
||||
GitHubRepo: "host-uk/core-images",
|
||||
ImageName: "core-devops-darwin-arm64.qcow2",
|
||||
})
|
||||
|
||||
if src.Name() != "github" {
|
||||
t.Errorf("expected name 'github', got %q", src.Name())
|
||||
}
|
||||
|
||||
// Available depends on gh CLI being installed
|
||||
_ = src.Available()
|
||||
}
|
||||
|
||||
func TestGitHubSource_Name(t *testing.T) {
|
||||
src := NewGitHubSource(SourceConfig{})
|
||||
assert.Equal(t, "github", src.Name())
|
||||
}
|
||||
|
||||
func TestGitHubSource_Config(t *testing.T) {
|
||||
cfg := SourceConfig{
|
||||
GitHubRepo: "owner/repo",
|
||||
ImageName: "test-image.qcow2",
|
||||
}
|
||||
src := NewGitHubSource(cfg)
|
||||
|
||||
// Verify the config is stored
|
||||
assert.Equal(t, "owner/repo", src.config.GitHubRepo)
|
||||
assert.Equal(t, "test-image.qcow2", src.config.ImageName)
|
||||
}
|
||||
|
||||
func TestGitHubSource_Good_Multiple(t *testing.T) {
|
||||
// Test creating multiple sources with different configs
|
||||
src1 := NewGitHubSource(SourceConfig{GitHubRepo: "org1/repo1", ImageName: "img1.qcow2"})
|
||||
src2 := NewGitHubSource(SourceConfig{GitHubRepo: "org2/repo2", ImageName: "img2.qcow2"})
|
||||
|
||||
assert.Equal(t, "org1/repo1", src1.config.GitHubRepo)
|
||||
assert.Equal(t, "org2/repo2", src2.config.GitHubRepo)
|
||||
assert.Equal(t, "github", src1.Name())
|
||||
assert.Equal(t, "github", src2.Name())
|
||||
}
|
||||
|
||||
func TestNewGitHubSource_Good(t *testing.T) {
|
||||
cfg := SourceConfig{
|
||||
GitHubRepo: "host-uk/core-images",
|
||||
RegistryImage: "ghcr.io/host-uk/core-devops",
|
||||
CDNURL: "https://cdn.example.com",
|
||||
ImageName: "core-devops-darwin-arm64.qcow2",
|
||||
}
|
||||
|
||||
src := NewGitHubSource(cfg)
|
||||
assert.NotNil(t, src)
|
||||
assert.Equal(t, "github", src.Name())
|
||||
assert.Equal(t, cfg.GitHubRepo, src.config.GitHubRepo)
|
||||
}
|
||||
|
||||
func TestGitHubSource_InterfaceCompliance(t *testing.T) {
|
||||
// Verify GitHubSource implements ImageSource
|
||||
var _ ImageSource = (*GitHubSource)(nil)
|
||||
}
|
||||
33
devops/sources/source.go
Normal file
33
devops/sources/source.go
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
// Package sources provides image download sources for core-devops.
|
||||
package sources
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
// ImageSource defines the interface for downloading dev images.
|
||||
type ImageSource interface {
|
||||
// Name returns the source identifier.
|
||||
Name() string
|
||||
// Available checks if this source can be used.
|
||||
Available() bool
|
||||
// LatestVersion returns the latest available version.
|
||||
LatestVersion(ctx context.Context) (string, error)
|
||||
// Download downloads the image to the destination path.
|
||||
// Reports progress via the callback if provided.
|
||||
Download(ctx context.Context, m io.Medium, dest string, progress func(downloaded, total int64)) error
|
||||
}
|
||||
|
||||
// SourceConfig holds configuration for a source.
|
||||
type SourceConfig struct {
|
||||
// GitHub configuration
|
||||
GitHubRepo string
|
||||
// Registry configuration
|
||||
RegistryImage string
|
||||
// CDN configuration
|
||||
CDNURL string
|
||||
// Image name (e.g., core-devops-darwin-arm64.qcow2)
|
||||
ImageName string
|
||||
}
|
||||
35
devops/sources/source_test.go
Normal file
35
devops/sources/source_test.go
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
package sources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSourceConfig_Empty(t *testing.T) {
|
||||
cfg := SourceConfig{}
|
||||
assert.Empty(t, cfg.GitHubRepo)
|
||||
assert.Empty(t, cfg.RegistryImage)
|
||||
assert.Empty(t, cfg.CDNURL)
|
||||
assert.Empty(t, cfg.ImageName)
|
||||
}
|
||||
|
||||
func TestSourceConfig_Complete(t *testing.T) {
|
||||
cfg := SourceConfig{
|
||||
GitHubRepo: "owner/repo",
|
||||
RegistryImage: "ghcr.io/owner/image:v1",
|
||||
CDNURL: "https://cdn.example.com/images",
|
||||
ImageName: "my-image-darwin-arm64.qcow2",
|
||||
}
|
||||
|
||||
assert.Equal(t, "owner/repo", cfg.GitHubRepo)
|
||||
assert.Equal(t, "ghcr.io/owner/image:v1", cfg.RegistryImage)
|
||||
assert.Equal(t, "https://cdn.example.com/images", cfg.CDNURL)
|
||||
assert.Equal(t, "my-image-darwin-arm64.qcow2", cfg.ImageName)
|
||||
}
|
||||
|
||||
func TestImageSource_Interface(t *testing.T) {
|
||||
// Ensure both sources implement the interface
|
||||
var _ ImageSource = (*GitHubSource)(nil)
|
||||
var _ ImageSource = (*CDNSource)(nil)
|
||||
}
|
||||
68
devops/ssh_utils.go
Normal file
68
devops/ssh_utils.go
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ensureHostKey ensures that the host key for the dev environment is in the known hosts file.
|
||||
// This is used after boot to allow StrictHostKeyChecking=yes to work.
|
||||
func ensureHostKey(ctx context.Context, port int) error {
|
||||
// Skip if requested (used in tests)
|
||||
if os.Getenv("CORE_SKIP_SSH_SCAN") == "true" {
|
||||
return nil
|
||||
}
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get home dir: %w", err)
|
||||
}
|
||||
|
||||
knownHostsPath := filepath.Join(home, ".core", "known_hosts")
|
||||
|
||||
// Ensure directory exists
|
||||
if err := os.MkdirAll(filepath.Dir(knownHostsPath), 0755); err != nil {
|
||||
return fmt.Errorf("create known_hosts dir: %w", err)
|
||||
}
|
||||
|
||||
// Get host key using ssh-keyscan
|
||||
cmd := exec.CommandContext(ctx, "ssh-keyscan", "-p", fmt.Sprintf("%d", port), "localhost")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh-keyscan failed: %w", err)
|
||||
}
|
||||
|
||||
if len(out) == 0 {
|
||||
return fmt.Errorf("ssh-keyscan returned no keys")
|
||||
}
|
||||
|
||||
// Read existing known_hosts to avoid duplicates
|
||||
existing, _ := os.ReadFile(knownHostsPath)
|
||||
existingStr := string(existing)
|
||||
|
||||
// Append new keys that aren't already there
|
||||
f, err := os.OpenFile(knownHostsPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open known_hosts: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
lines := strings.Split(string(out), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(existingStr, line) {
|
||||
if _, err := f.WriteString(line + "\n"); err != nil {
|
||||
return fmt.Errorf("write known_hosts: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
188
devops/test.go
Normal file
188
devops/test.go
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// TestConfig holds test configuration from .core/test.yaml.
|
||||
type TestConfig struct {
|
||||
Version int `yaml:"version"`
|
||||
Command string `yaml:"command,omitempty"`
|
||||
Commands []TestCommand `yaml:"commands,omitempty"`
|
||||
Env map[string]string `yaml:"env,omitempty"`
|
||||
}
|
||||
|
||||
// TestCommand is a named test command.
|
||||
type TestCommand struct {
|
||||
Name string `yaml:"name"`
|
||||
Run string `yaml:"run"`
|
||||
}
|
||||
|
||||
// TestOptions configures test execution.
|
||||
type TestOptions struct {
|
||||
Name string // Run specific named command from .core/test.yaml
|
||||
Command []string // Override command (from -- args)
|
||||
}
|
||||
|
||||
// Test runs tests in the dev environment.
|
||||
func (d *DevOps) Test(ctx context.Context, projectDir string, opts TestOptions) error {
|
||||
running, err := d.IsRunning(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !running {
|
||||
return fmt.Errorf("dev environment not running (run 'core dev boot' first)")
|
||||
}
|
||||
|
||||
var cmd string
|
||||
|
||||
// Priority: explicit command > named command > auto-detect
|
||||
if len(opts.Command) > 0 {
|
||||
cmd = strings.Join(opts.Command, " ")
|
||||
} else if opts.Name != "" {
|
||||
cfg, err := LoadTestConfig(d.medium, projectDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range cfg.Commands {
|
||||
if c.Name == opts.Name {
|
||||
cmd = c.Run
|
||||
break
|
||||
}
|
||||
}
|
||||
if cmd == "" {
|
||||
return fmt.Errorf("test command %q not found in .core/test.yaml", opts.Name)
|
||||
}
|
||||
} else {
|
||||
cmd = DetectTestCommand(d.medium, projectDir)
|
||||
if cmd == "" {
|
||||
return fmt.Errorf("could not detect test command (create .core/test.yaml)")
|
||||
}
|
||||
}
|
||||
|
||||
// Run via SSH - construct command as single string for shell execution
|
||||
return d.sshShell(ctx, []string{"cd", "/app", "&&", cmd})
|
||||
}
|
||||
|
||||
// DetectTestCommand auto-detects the test command for a project.
|
||||
func DetectTestCommand(m io.Medium, projectDir string) string {
|
||||
// 1. Check .core/test.yaml
|
||||
cfg, err := LoadTestConfig(m, projectDir)
|
||||
if err == nil && cfg.Command != "" {
|
||||
return cfg.Command
|
||||
}
|
||||
|
||||
// 2. Check composer.json for test script
|
||||
if hasFile(m, projectDir, "composer.json") {
|
||||
if hasComposerScript(m, projectDir, "test") {
|
||||
return "composer test"
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Check package.json for test script
|
||||
if hasFile(m, projectDir, "package.json") {
|
||||
if hasPackageScript(m, projectDir, "test") {
|
||||
return "npm test"
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Check go.mod
|
||||
if hasFile(m, projectDir, "go.mod") {
|
||||
return "go test ./..."
|
||||
}
|
||||
|
||||
// 5. Check pytest
|
||||
if hasFile(m, projectDir, "pytest.ini") || hasFile(m, projectDir, "pyproject.toml") {
|
||||
return "pytest"
|
||||
}
|
||||
|
||||
// 6. Check Taskfile
|
||||
if hasFile(m, projectDir, "Taskfile.yaml") || hasFile(m, projectDir, "Taskfile.yml") {
|
||||
return "task test"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// LoadTestConfig loads .core/test.yaml.
|
||||
func LoadTestConfig(m io.Medium, projectDir string) (*TestConfig, error) {
|
||||
path := filepath.Join(projectDir, ".core", "test.yaml")
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
content, err := m.Read(absPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg TestConfig
|
||||
if err := yaml.Unmarshal([]byte(content), &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func hasFile(m io.Medium, dir, name string) bool {
|
||||
path := filepath.Join(dir, name)
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return m.IsFile(absPath)
|
||||
}
|
||||
|
||||
func hasPackageScript(m io.Medium, projectDir, script string) bool {
|
||||
path := filepath.Join(projectDir, "package.json")
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
content, err := m.Read(absPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var pkg struct {
|
||||
Scripts map[string]string `json:"scripts"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(content), &pkg); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := pkg.Scripts[script]
|
||||
return ok
|
||||
}
|
||||
|
||||
func hasComposerScript(m io.Medium, projectDir, script string) bool {
|
||||
path := filepath.Join(projectDir, "composer.json")
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
content, err := m.Read(absPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var pkg struct {
|
||||
Scripts map[string]interface{} `json:"scripts"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(content), &pkg); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := pkg.Scripts[script]
|
||||
return ok
|
||||
}
|
||||
354
devops/test_test.go
Normal file
354
devops/test_test.go
Normal file
|
|
@ -0,0 +1,354 @@
|
|||
package devops
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
func TestDetectTestCommand_Good_ComposerJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"scripts":{"test":"pest"}}`), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "composer test" {
|
||||
t.Errorf("expected 'composer test', got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_PackageJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"scripts":{"test":"vitest"}}`), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "npm test" {
|
||||
t.Errorf("expected 'npm test', got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_GoMod(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module example"), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "go test ./..." {
|
||||
t.Errorf("expected 'go test ./...', got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_CoreTestYaml(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core")
|
||||
_ = os.MkdirAll(coreDir, 0755)
|
||||
_ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte("command: custom-test"), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "custom-test" {
|
||||
t.Errorf("expected 'custom-test', got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_Pytest(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "pytest.ini"), []byte("[pytest]"), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "pytest" {
|
||||
t.Errorf("expected 'pytest', got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_Taskfile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "Taskfile.yaml"), []byte("version: '3'"), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "task test" {
|
||||
t.Errorf("expected 'task test', got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Bad_NoFiles(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "" {
|
||||
t.Errorf("expected empty string, got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_Priority(t *testing.T) {
|
||||
// .core/test.yaml should take priority over other detection methods
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core")
|
||||
_ = os.MkdirAll(coreDir, 0755)
|
||||
_ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte("command: my-custom-test"), 0644)
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module example"), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "my-custom-test" {
|
||||
t.Errorf("expected 'my-custom-test' (from .core/test.yaml), got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadTestConfig_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core")
|
||||
_ = os.MkdirAll(coreDir, 0755)
|
||||
|
||||
configYAML := `version: 1
|
||||
command: default-test
|
||||
commands:
|
||||
- name: unit
|
||||
run: go test ./...
|
||||
- name: integration
|
||||
run: go test -tags=integration ./...
|
||||
env:
|
||||
CI: "true"
|
||||
`
|
||||
_ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte(configYAML), 0644)
|
||||
|
||||
cfg, err := LoadTestConfig(io.Local, tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if cfg.Version != 1 {
|
||||
t.Errorf("expected version 1, got %d", cfg.Version)
|
||||
}
|
||||
if cfg.Command != "default-test" {
|
||||
t.Errorf("expected command 'default-test', got %q", cfg.Command)
|
||||
}
|
||||
if len(cfg.Commands) != 2 {
|
||||
t.Errorf("expected 2 commands, got %d", len(cfg.Commands))
|
||||
}
|
||||
if cfg.Commands[0].Name != "unit" {
|
||||
t.Errorf("expected first command name 'unit', got %q", cfg.Commands[0].Name)
|
||||
}
|
||||
if cfg.Env["CI"] != "true" {
|
||||
t.Errorf("expected env CI='true', got %q", cfg.Env["CI"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadTestConfig_Bad_NotFound(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
_, err := LoadTestConfig(io.Local, tmpDir)
|
||||
if err == nil {
|
||||
t.Error("expected error for missing config, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasPackageScript_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"scripts":{"test":"jest","build":"webpack"}}`), 0644)
|
||||
|
||||
if !hasPackageScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected to find 'test' script")
|
||||
}
|
||||
if !hasPackageScript(io.Local, tmpDir, "build") {
|
||||
t.Error("expected to find 'build' script")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasPackageScript_Bad_MissingScript(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"scripts":{"build":"webpack"}}`), 0644)
|
||||
|
||||
if hasPackageScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected not to find 'test' script")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasComposerScript_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"scripts":{"test":"pest","post-install-cmd":"@php artisan migrate"}}`), 0644)
|
||||
|
||||
if !hasComposerScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected to find 'test' script")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasComposerScript_Bad_MissingScript(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"scripts":{"build":"@php build.php"}}`), 0644)
|
||||
|
||||
if hasComposerScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected not to find 'test' script")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestConfig_Struct(t *testing.T) {
|
||||
cfg := &TestConfig{
|
||||
Version: 2,
|
||||
Command: "my-test",
|
||||
Commands: []TestCommand{{Name: "unit", Run: "go test ./..."}},
|
||||
Env: map[string]string{"CI": "true"},
|
||||
}
|
||||
if cfg.Version != 2 {
|
||||
t.Errorf("expected version 2, got %d", cfg.Version)
|
||||
}
|
||||
if cfg.Command != "my-test" {
|
||||
t.Errorf("expected command 'my-test', got %q", cfg.Command)
|
||||
}
|
||||
if len(cfg.Commands) != 1 {
|
||||
t.Errorf("expected 1 command, got %d", len(cfg.Commands))
|
||||
}
|
||||
if cfg.Env["CI"] != "true" {
|
||||
t.Errorf("expected CI=true, got %q", cfg.Env["CI"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestCommand_Struct(t *testing.T) {
|
||||
cmd := TestCommand{
|
||||
Name: "integration",
|
||||
Run: "go test -tags=integration ./...",
|
||||
}
|
||||
if cmd.Name != "integration" {
|
||||
t.Errorf("expected name 'integration', got %q", cmd.Name)
|
||||
}
|
||||
if cmd.Run != "go test -tags=integration ./..." {
|
||||
t.Errorf("expected run command, got %q", cmd.Run)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestOptions_Struct(t *testing.T) {
|
||||
opts := TestOptions{
|
||||
Name: "unit",
|
||||
Command: []string{"go", "test", "-v"},
|
||||
}
|
||||
if opts.Name != "unit" {
|
||||
t.Errorf("expected name 'unit', got %q", opts.Name)
|
||||
}
|
||||
if len(opts.Command) != 3 {
|
||||
t.Errorf("expected 3 command parts, got %d", len(opts.Command))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_TaskfileYml(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "Taskfile.yml"), []byte("version: '3'"), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "task test" {
|
||||
t.Errorf("expected 'task test', got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_Pyproject(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "pyproject.toml"), []byte("[tool.pytest]"), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
if cmd != "pytest" {
|
||||
t.Errorf("expected 'pytest', got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasPackageScript_Bad_NoFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
if hasPackageScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected false for missing package.json")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasPackageScript_Bad_InvalidJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`invalid json`), 0644)
|
||||
|
||||
if hasPackageScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected false for invalid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasPackageScript_Bad_NoScripts(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"name":"test"}`), 0644)
|
||||
|
||||
if hasPackageScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected false for missing scripts section")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasComposerScript_Bad_NoFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
if hasComposerScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected false for missing composer.json")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasComposerScript_Bad_InvalidJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`invalid json`), 0644)
|
||||
|
||||
if hasComposerScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected false for invalid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasComposerScript_Bad_NoScripts(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"name":"test/pkg"}`), 0644)
|
||||
|
||||
if hasComposerScript(io.Local, tmpDir, "test") {
|
||||
t.Error("expected false for missing scripts section")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadTestConfig_Bad_InvalidYAML(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core")
|
||||
_ = os.MkdirAll(coreDir, 0755)
|
||||
_ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte("invalid: yaml: :"), 0644)
|
||||
|
||||
_, err := LoadTestConfig(io.Local, tmpDir)
|
||||
if err == nil {
|
||||
t.Error("expected error for invalid YAML")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadTestConfig_Good_MinimalConfig(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core")
|
||||
_ = os.MkdirAll(coreDir, 0755)
|
||||
_ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte("version: 1"), 0644)
|
||||
|
||||
cfg, err := LoadTestConfig(io.Local, tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if cfg.Version != 1 {
|
||||
t.Errorf("expected version 1, got %d", cfg.Version)
|
||||
}
|
||||
if cfg.Command != "" {
|
||||
t.Errorf("expected empty command, got %q", cfg.Command)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_ComposerWithoutScript(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
// composer.json without test script should not return composer test
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"name":"test/pkg"}`), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
// Falls through to empty (no match)
|
||||
if cmd != "" {
|
||||
t.Errorf("expected empty string, got %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectTestCommand_Good_PackageJSONWithoutScript(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
// package.json without test or dev script
|
||||
_ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"name":"test"}`), 0644)
|
||||
|
||||
cmd := DetectTestCommand(io.Local, tmpDir)
|
||||
// Falls through to empty
|
||||
if cmd != "" {
|
||||
t.Errorf("expected empty string, got %q", cmd)
|
||||
}
|
||||
}
|
||||
62
go.mod
Normal file
62
go.mod
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
module forge.lthn.ai/core/go-devops
|
||||
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
forge.lthn.ai/core/go v0.0.0
|
||||
github.com/Snider/Borg v0.2.0
|
||||
github.com/getkin/kin-openapi v0.133.0
|
||||
github.com/kluctl/go-embed-python v0.0.0-3.13.1-20241219-1
|
||||
github.com/leaanthony/debme v1.2.1
|
||||
github.com/leaanthony/gosod v1.0.4
|
||||
github.com/oasdiff/oasdiff v1.11.10
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/crypto v0.48.0
|
||||
golang.org/x/net v0.50.0
|
||||
golang.org/x/text v0.34.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.123.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/TwiN/go-color v1.4.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.22.4 // indirect
|
||||
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/mailru/easyjson v0.9.1 // indirect
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/perimeterx/marshmallow v1.1.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/spf13/viper v1.21.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tidwall/gjson v1.18.0 // indirect
|
||||
github.com/tidwall/match v1.2.0 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/tidwall/sjson v1.2.5 // indirect
|
||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||
github.com/wI2L/jsondiff v0.7.0 // indirect
|
||||
github.com/woodsbury/decimal128 v1.4.0 // indirect
|
||||
github.com/yargevad/filepathx v1.0.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/term v0.40.0 // indirect
|
||||
)
|
||||
|
||||
replace forge.lthn.ai/core/go => ../go
|
||||
140
go.sum
Normal file
140
go.sum
Normal file
|
|
@ -0,0 +1,140 @@
|
|||
cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE=
|
||||
cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU=
|
||||
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
|
||||
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
|
||||
github.com/Snider/Borg v0.2.0 h1:iCyDhY4WTXi39+FexRwXbn2YpZ2U9FUXVXDZk9xRCXQ=
|
||||
github.com/Snider/Borg v0.2.0/go.mod h1:TqlKnfRo9okioHbgrZPfWjQsztBV0Nfskz4Om1/vdMY=
|
||||
github.com/TwiN/go-color v1.4.1 h1:mqG0P/KBgHKVqmtL5ye7K0/Gr4l6hTksPgTgMk3mUzc=
|
||||
github.com/TwiN/go-color v1.4.1/go.mod h1:WcPf/jtiW95WBIsEeY1Lc/b8aaWoiqQpu5cf8WFxu+s=
|
||||
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
|
||||
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ=
|
||||
github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE=
|
||||
github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
|
||||
github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
|
||||
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
|
||||
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
|
||||
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
|
||||
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
|
||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/kluctl/go-embed-python v0.0.0-3.13.1-20241219-1 h1:x1cSEj4Ug5mpuZgUHLvUmlc5r//KHFn6iYiRSrRcVy4=
|
||||
github.com/kluctl/go-embed-python v0.0.0-3.13.1-20241219-1/go.mod h1:3ebNU9QBrNpUO+Hj6bHaGpkh5pymDHQ+wwVPHTE4mCE=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leaanthony/debme v1.2.1 h1:9Tgwf+kjcrbMQ4WnPcEIUcQuIZYqdWftzZkBr+i/oOc=
|
||||
github.com/leaanthony/debme v1.2.1/go.mod h1:3V+sCm5tYAgQymvSOfYQ5Xx2JCr+OXiD9Jkw3otUjiA=
|
||||
github.com/leaanthony/gosod v1.0.4 h1:YLAbVyd591MRffDgxUOU1NwLhT9T1/YiwjKZpkNFeaI=
|
||||
github.com/leaanthony/gosod v1.0.4/go.mod h1:GKuIL0zzPj3O1SdWQOdgURSuhkF+Urizzxh26t9f1cw=
|
||||
github.com/leaanthony/slicer v1.5.0/go.mod h1:FwrApmf8gOrpzEWM2J/9Lh79tyq8KTX5AzRtwV7m4AY=
|
||||
github.com/leaanthony/slicer v1.6.0 h1:1RFP5uiPJvT93TAHi+ipd3NACobkW53yUiBqZheE/Js=
|
||||
github.com/leaanthony/slicer v1.6.0/go.mod h1:o/Iz29g7LN0GqH3aMjWAe90381nyZlDNquK+mtH2Fj8=
|
||||
github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8=
|
||||
github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
||||
github.com/matryer/is v1.4.1 h1:55ehd8zaGABKLXQUe2awZ99BD/PTc2ls+KV/dXphgEQ=
|
||||
github.com/matryer/is v1.4.1/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/oasdiff/oasdiff v1.11.10 h1:4I9VrktUoHmwydkJqVOC7Bd6BXKu9dc4UUP3PIu1VjM=
|
||||
github.com/oasdiff/oasdiff v1.11.10/go.mod h1:GXARzmqBKN8lZHsTQD35ZM41ePbu6JdAZza4sRMeEKg=
|
||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY=
|
||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw=
|
||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c=
|
||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
|
||||
github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
|
||||
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/match v1.2.0 h1:0pt8FlkOwjN2fPt4bIl4BoNxb98gGHN2ObFEDkrfZnM=
|
||||
github.com/tidwall/match v1.2.0/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY=
|
||||
github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
|
||||
github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY=
|
||||
github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/wI2L/jsondiff v0.7.0 h1:1lH1G37GhBPqCfp/lrs91rf/2j3DktX6qYAKZkLuCQQ=
|
||||
github.com/wI2L/jsondiff v0.7.0/go.mod h1:KAEIojdQq66oJiHhDyQez2x+sRit0vIzC9KeK0yizxM=
|
||||
github.com/woodsbury/decimal128 v1.4.0 h1:xJATj7lLu4f2oObouMt2tgGiElE5gO6mSWUjQsBgUlc=
|
||||
github.com/woodsbury/decimal128 v1.4.0/go.mod h1:BP46FUrVjVhdTbKT+XuQh2xfQaGki9LMIRJSFuh6THU=
|
||||
github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc=
|
||||
github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||
golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
|
||||
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
||||
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
272
infra/cloudns.go
Normal file
272
infra/cloudns.go
Normal file
|
|
@ -0,0 +1,272 @@
|
|||
package infra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const cloudnsBaseURL = "https://api.cloudns.net"
|
||||
|
||||
// CloudNSClient is an HTTP client for the CloudNS DNS API.
|
||||
type CloudNSClient struct {
|
||||
authID string
|
||||
password string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewCloudNSClient creates a new CloudNS API client.
|
||||
// Uses sub-auth-user (auth-id) authentication.
|
||||
func NewCloudNSClient(authID, password string) *CloudNSClient {
|
||||
return &CloudNSClient{
|
||||
authID: authID,
|
||||
password: password,
|
||||
client: &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CloudNSZone represents a DNS zone.
|
||||
type CloudNSZone struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Zone string `json:"zone"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// CloudNSRecord represents a DNS record.
|
||||
type CloudNSRecord struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Host string `json:"host"`
|
||||
Record string `json:"record"`
|
||||
TTL string `json:"ttl"`
|
||||
Priority string `json:"priority,omitempty"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// ListZones returns all DNS zones.
|
||||
func (c *CloudNSClient) ListZones(ctx context.Context) ([]CloudNSZone, error) {
|
||||
params := c.authParams()
|
||||
params.Set("page", "1")
|
||||
params.Set("rows-per-page", "100")
|
||||
params.Set("search", "")
|
||||
|
||||
data, err := c.get(ctx, "/dns/list-zones.json", params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var zones []CloudNSZone
|
||||
if err := json.Unmarshal(data, &zones); err != nil {
|
||||
// CloudNS returns an empty object {} for no results instead of []
|
||||
return nil, nil
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// ListRecords returns all DNS records for a zone.
|
||||
func (c *CloudNSClient) ListRecords(ctx context.Context, domain string) (map[string]CloudNSRecord, error) {
|
||||
params := c.authParams()
|
||||
params.Set("domain-name", domain)
|
||||
|
||||
data, err := c.get(ctx, "/dns/records.json", params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var records map[string]CloudNSRecord
|
||||
if err := json.Unmarshal(data, &records); err != nil {
|
||||
return nil, fmt.Errorf("parse records: %w", err)
|
||||
}
|
||||
return records, nil
|
||||
}
|
||||
|
||||
// CreateRecord creates a DNS record. Returns the record ID.
|
||||
func (c *CloudNSClient) CreateRecord(ctx context.Context, domain, host, recordType, value string, ttl int) (string, error) {
|
||||
params := c.authParams()
|
||||
params.Set("domain-name", domain)
|
||||
params.Set("host", host)
|
||||
params.Set("record-type", recordType)
|
||||
params.Set("record", value)
|
||||
params.Set("ttl", strconv.Itoa(ttl))
|
||||
|
||||
data, err := c.post(ctx, "/dns/add-record.json", params)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Status string `json:"status"`
|
||||
StatusDescription string `json:"statusDescription"`
|
||||
Data struct {
|
||||
ID int `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &result); err != nil {
|
||||
return "", fmt.Errorf("parse response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != "Success" {
|
||||
return "", fmt.Errorf("cloudns: %s", result.StatusDescription)
|
||||
}
|
||||
|
||||
return strconv.Itoa(result.Data.ID), nil
|
||||
}
|
||||
|
||||
// UpdateRecord updates an existing DNS record.
|
||||
func (c *CloudNSClient) UpdateRecord(ctx context.Context, domain, recordID, host, recordType, value string, ttl int) error {
|
||||
params := c.authParams()
|
||||
params.Set("domain-name", domain)
|
||||
params.Set("record-id", recordID)
|
||||
params.Set("host", host)
|
||||
params.Set("record-type", recordType)
|
||||
params.Set("record", value)
|
||||
params.Set("ttl", strconv.Itoa(ttl))
|
||||
|
||||
data, err := c.post(ctx, "/dns/mod-record.json", params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Status string `json:"status"`
|
||||
StatusDescription string `json:"statusDescription"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &result); err != nil {
|
||||
return fmt.Errorf("parse response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != "Success" {
|
||||
return fmt.Errorf("cloudns: %s", result.StatusDescription)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRecord deletes a DNS record by ID.
|
||||
func (c *CloudNSClient) DeleteRecord(ctx context.Context, domain, recordID string) error {
|
||||
params := c.authParams()
|
||||
params.Set("domain-name", domain)
|
||||
params.Set("record-id", recordID)
|
||||
|
||||
data, err := c.post(ctx, "/dns/delete-record.json", params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Status string `json:"status"`
|
||||
StatusDescription string `json:"statusDescription"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &result); err != nil {
|
||||
return fmt.Errorf("parse response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != "Success" {
|
||||
return fmt.Errorf("cloudns: %s", result.StatusDescription)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureRecord creates or updates a DNS record to match the desired state.
|
||||
// Returns true if a change was made.
|
||||
func (c *CloudNSClient) EnsureRecord(ctx context.Context, domain, host, recordType, value string, ttl int) (bool, error) {
|
||||
records, err := c.ListRecords(ctx, domain)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("list records: %w", err)
|
||||
}
|
||||
|
||||
// Check if record already exists
|
||||
for id, r := range records {
|
||||
if r.Host == host && r.Type == recordType {
|
||||
if r.Record == value {
|
||||
return false, nil // Already correct
|
||||
}
|
||||
// Update existing record
|
||||
if err := c.UpdateRecord(ctx, domain, id, host, recordType, value, ttl); err != nil {
|
||||
return false, fmt.Errorf("update record: %w", err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Create new record
|
||||
if _, err := c.CreateRecord(ctx, domain, host, recordType, value, ttl); err != nil {
|
||||
return false, fmt.Errorf("create record: %w", err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SetACMEChallenge creates a DNS-01 ACME challenge TXT record.
|
||||
func (c *CloudNSClient) SetACMEChallenge(ctx context.Context, domain, value string) (string, error) {
|
||||
return c.CreateRecord(ctx, domain, "_acme-challenge", "TXT", value, 60)
|
||||
}
|
||||
|
||||
// ClearACMEChallenge removes the DNS-01 ACME challenge TXT record.
|
||||
func (c *CloudNSClient) ClearACMEChallenge(ctx context.Context, domain string) error {
|
||||
records, err := c.ListRecords(ctx, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for id, r := range records {
|
||||
if r.Host == "_acme-challenge" && r.Type == "TXT" {
|
||||
if err := c.DeleteRecord(ctx, domain, id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CloudNSClient) authParams() url.Values {
|
||||
params := url.Values{}
|
||||
params.Set("auth-id", c.authID)
|
||||
params.Set("auth-password", c.password)
|
||||
return params
|
||||
}
|
||||
|
||||
func (c *CloudNSClient) get(ctx context.Context, path string, params url.Values) ([]byte, error) {
|
||||
u := cloudnsBaseURL + path + "?" + params.Encode()
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.doRaw(req)
|
||||
}
|
||||
|
||||
func (c *CloudNSClient) post(ctx context.Context, path string, params url.Values) ([]byte, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, cloudnsBaseURL+path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
return c.doRaw(req)
|
||||
}
|
||||
|
||||
func (c *CloudNSClient) doRaw(req *http.Request) ([]byte, error) {
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cloudns API: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return nil, fmt.Errorf("cloudns API %d: %s", resp.StatusCode, string(data))
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
300
infra/config.go
Normal file
300
infra/config.go
Normal file
|
|
@ -0,0 +1,300 @@
|
|||
// Package infra provides infrastructure configuration and API clients
|
||||
// for managing the Host UK production environment.
|
||||
package infra
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Config is the top-level infrastructure configuration parsed from infra.yaml.
|
||||
type Config struct {
|
||||
Hosts map[string]*Host `yaml:"hosts"`
|
||||
LoadBalancer LoadBalancer `yaml:"load_balancer"`
|
||||
Network Network `yaml:"network"`
|
||||
DNS DNS `yaml:"dns"`
|
||||
SSL SSL `yaml:"ssl"`
|
||||
Database Database `yaml:"database"`
|
||||
Cache Cache `yaml:"cache"`
|
||||
Containers map[string]*Container `yaml:"containers"`
|
||||
S3 S3Config `yaml:"s3"`
|
||||
CDN CDN `yaml:"cdn"`
|
||||
CICD CICD `yaml:"cicd"`
|
||||
Monitoring Monitoring `yaml:"monitoring"`
|
||||
Backups Backups `yaml:"backups"`
|
||||
}
|
||||
|
||||
// Host represents a server in the infrastructure.
|
||||
type Host struct {
|
||||
FQDN string `yaml:"fqdn"`
|
||||
IP string `yaml:"ip"`
|
||||
PrivateIP string `yaml:"private_ip,omitempty"`
|
||||
Type string `yaml:"type"` // hcloud, hrobot
|
||||
Role string `yaml:"role"` // bastion, app, builder
|
||||
SSH SSHConf `yaml:"ssh"`
|
||||
Services []string `yaml:"services"`
|
||||
}
|
||||
|
||||
// SSHConf holds SSH connection details for a host.
|
||||
type SSHConf struct {
|
||||
User string `yaml:"user"`
|
||||
Key string `yaml:"key"`
|
||||
Port int `yaml:"port"`
|
||||
}
|
||||
|
||||
// LoadBalancer represents a Hetzner managed load balancer.
|
||||
type LoadBalancer struct {
|
||||
Name string `yaml:"name"`
|
||||
FQDN string `yaml:"fqdn"`
|
||||
Provider string `yaml:"provider"`
|
||||
Type string `yaml:"type"`
|
||||
Location string `yaml:"location"`
|
||||
Algorithm string `yaml:"algorithm"`
|
||||
Backends []Backend `yaml:"backends"`
|
||||
Health HealthCheck `yaml:"health_check"`
|
||||
Listeners []Listener `yaml:"listeners"`
|
||||
SSL LBCert `yaml:"ssl"`
|
||||
}
|
||||
|
||||
// Backend is a load balancer backend target.
|
||||
type Backend struct {
|
||||
Host string `yaml:"host"`
|
||||
Port int `yaml:"port"`
|
||||
}
|
||||
|
||||
// HealthCheck configures load balancer health checking.
|
||||
type HealthCheck struct {
|
||||
Protocol string `yaml:"protocol"`
|
||||
Path string `yaml:"path"`
|
||||
Interval int `yaml:"interval"`
|
||||
}
|
||||
|
||||
// Listener maps a frontend port to a backend port.
|
||||
type Listener struct {
|
||||
Frontend int `yaml:"frontend"`
|
||||
Backend int `yaml:"backend"`
|
||||
Protocol string `yaml:"protocol"`
|
||||
ProxyProtocol bool `yaml:"proxy_protocol"`
|
||||
}
|
||||
|
||||
// LBCert holds the SSL certificate configuration for the load balancer.
|
||||
type LBCert struct {
|
||||
Certificate string `yaml:"certificate"`
|
||||
SAN []string `yaml:"san"`
|
||||
}
|
||||
|
||||
// Network describes the private network.
|
||||
type Network struct {
|
||||
CIDR string `yaml:"cidr"`
|
||||
Name string `yaml:"name"`
|
||||
}
|
||||
|
||||
// DNS holds DNS provider configuration and zone records.
|
||||
type DNS struct {
|
||||
Provider string `yaml:"provider"`
|
||||
Nameservers []string `yaml:"nameservers"`
|
||||
Zones map[string]*Zone `yaml:"zones"`
|
||||
}
|
||||
|
||||
// Zone is a DNS zone with its records.
|
||||
type Zone struct {
|
||||
Records []DNSRecord `yaml:"records"`
|
||||
}
|
||||
|
||||
// DNSRecord is a single DNS record.
|
||||
type DNSRecord struct {
|
||||
Name string `yaml:"name"`
|
||||
Type string `yaml:"type"`
|
||||
Value string `yaml:"value"`
|
||||
TTL int `yaml:"ttl"`
|
||||
}
|
||||
|
||||
// SSL holds SSL certificate configuration.
|
||||
type SSL struct {
|
||||
Wildcard WildcardCert `yaml:"wildcard"`
|
||||
}
|
||||
|
||||
// WildcardCert describes a wildcard SSL certificate.
|
||||
type WildcardCert struct {
|
||||
Domains []string `yaml:"domains"`
|
||||
Method string `yaml:"method"`
|
||||
DNSProvider string `yaml:"dns_provider"`
|
||||
Termination string `yaml:"termination"`
|
||||
}
|
||||
|
||||
// Database describes the database cluster.
|
||||
type Database struct {
|
||||
Engine string `yaml:"engine"`
|
||||
Version string `yaml:"version"`
|
||||
Cluster string `yaml:"cluster"`
|
||||
Nodes []DBNode `yaml:"nodes"`
|
||||
SSTMethod string `yaml:"sst_method"`
|
||||
Backup BackupConfig `yaml:"backup"`
|
||||
}
|
||||
|
||||
// DBNode is a database cluster node.
|
||||
type DBNode struct {
|
||||
Host string `yaml:"host"`
|
||||
Port int `yaml:"port"`
|
||||
}
|
||||
|
||||
// BackupConfig describes automated backup settings.
|
||||
type BackupConfig struct {
|
||||
Schedule string `yaml:"schedule"`
|
||||
Destination string `yaml:"destination"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
Prefix string `yaml:"prefix"`
|
||||
}
|
||||
|
||||
// Cache describes the cache/session cluster.
|
||||
type Cache struct {
|
||||
Engine string `yaml:"engine"`
|
||||
Version string `yaml:"version"`
|
||||
Sentinel bool `yaml:"sentinel"`
|
||||
Nodes []CacheNode `yaml:"nodes"`
|
||||
}
|
||||
|
||||
// CacheNode is a cache cluster node.
|
||||
type CacheNode struct {
|
||||
Host string `yaml:"host"`
|
||||
Port int `yaml:"port"`
|
||||
}
|
||||
|
||||
// Container describes a container deployment.
|
||||
type Container struct {
|
||||
Image string `yaml:"image"`
|
||||
Port int `yaml:"port,omitempty"`
|
||||
Runtime string `yaml:"runtime,omitempty"`
|
||||
Command string `yaml:"command,omitempty"`
|
||||
Replicas int `yaml:"replicas,omitempty"`
|
||||
DependsOn []string `yaml:"depends_on,omitempty"`
|
||||
}
|
||||
|
||||
// S3Config describes object storage.
|
||||
type S3Config struct {
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Buckets map[string]*S3Bucket `yaml:"buckets"`
|
||||
}
|
||||
|
||||
// S3Bucket is an S3 bucket configuration.
|
||||
type S3Bucket struct {
|
||||
Purpose string `yaml:"purpose"`
|
||||
Paths []string `yaml:"paths"`
|
||||
}
|
||||
|
||||
// CDN describes CDN configuration.
|
||||
type CDN struct {
|
||||
Provider string `yaml:"provider"`
|
||||
Origin string `yaml:"origin"`
|
||||
Zones []string `yaml:"zones"`
|
||||
}
|
||||
|
||||
// CICD describes CI/CD configuration.
|
||||
type CICD struct {
|
||||
Provider string `yaml:"provider"`
|
||||
URL string `yaml:"url"`
|
||||
Runner string `yaml:"runner"`
|
||||
Registry string `yaml:"registry"`
|
||||
DeployHook string `yaml:"deploy_hook"`
|
||||
}
|
||||
|
||||
// Monitoring describes monitoring configuration.
|
||||
type Monitoring struct {
|
||||
HealthEndpoints []HealthEndpoint `yaml:"health_endpoints"`
|
||||
Alerts map[string]int `yaml:"alerts"`
|
||||
}
|
||||
|
||||
// HealthEndpoint is a URL to monitor.
|
||||
type HealthEndpoint struct {
|
||||
URL string `yaml:"url"`
|
||||
Interval int `yaml:"interval"`
|
||||
}
|
||||
|
||||
// Backups describes backup schedules.
|
||||
type Backups struct {
|
||||
Daily []BackupJob `yaml:"daily"`
|
||||
Weekly []BackupJob `yaml:"weekly"`
|
||||
}
|
||||
|
||||
// BackupJob is a scheduled backup task.
|
||||
type BackupJob struct {
|
||||
Name string `yaml:"name"`
|
||||
Type string `yaml:"type"`
|
||||
Destination string `yaml:"destination,omitempty"`
|
||||
Hosts []string `yaml:"hosts,omitempty"`
|
||||
}
|
||||
|
||||
// Load reads and parses an infra.yaml file.
|
||||
func Load(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read infra config: %w", err)
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("parse infra config: %w", err)
|
||||
}
|
||||
|
||||
// Expand SSH key paths
|
||||
for _, h := range cfg.Hosts {
|
||||
if h.SSH.Key != "" {
|
||||
h.SSH.Key = expandPath(h.SSH.Key)
|
||||
}
|
||||
if h.SSH.Port == 0 {
|
||||
h.SSH.Port = 22
|
||||
}
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// Discover searches for infra.yaml in the given directory and parent directories.
|
||||
func Discover(startDir string) (*Config, string, error) {
|
||||
dir := startDir
|
||||
for {
|
||||
path := filepath.Join(dir, "infra.yaml")
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
cfg, err := Load(path)
|
||||
return cfg, path, err
|
||||
}
|
||||
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
break
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
return nil, "", fmt.Errorf("infra.yaml not found (searched from %s)", startDir)
|
||||
}
|
||||
|
||||
// HostsByRole returns all hosts matching the given role.
|
||||
func (c *Config) HostsByRole(role string) map[string]*Host {
|
||||
result := make(map[string]*Host)
|
||||
for name, h := range c.Hosts {
|
||||
if h.Role == role {
|
||||
result[name] = h
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// AppServers returns hosts with role "app".
|
||||
func (c *Config) AppServers() map[string]*Host {
|
||||
return c.HostsByRole("app")
|
||||
}
|
||||
|
||||
// expandPath expands ~ to home directory.
|
||||
func expandPath(path string) string {
|
||||
if len(path) > 0 && path[0] == '~' {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return path
|
||||
}
|
||||
return filepath.Join(home, path[1:])
|
||||
}
|
||||
return path
|
||||
}
|
||||
100
infra/config_test.go
Normal file
100
infra/config_test.go
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
package infra
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLoad_Good(t *testing.T) {
|
||||
// Find infra.yaml relative to test
|
||||
// Walk up from test dir to find it
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg, path, err := Discover(dir)
|
||||
if err != nil {
|
||||
t.Skipf("infra.yaml not found from %s: %v", dir, err)
|
||||
}
|
||||
|
||||
t.Logf("Loaded %s", path)
|
||||
|
||||
if len(cfg.Hosts) == 0 {
|
||||
t.Error("expected at least one host")
|
||||
}
|
||||
|
||||
// Check required hosts exist
|
||||
for _, name := range []string{"noc", "de", "de2", "build"} {
|
||||
if _, ok := cfg.Hosts[name]; !ok {
|
||||
t.Errorf("expected host %q in config", name)
|
||||
}
|
||||
}
|
||||
|
||||
// Check de host details
|
||||
de := cfg.Hosts["de"]
|
||||
if de.IP != "116.202.82.115" {
|
||||
t.Errorf("de IP = %q, want 116.202.82.115", de.IP)
|
||||
}
|
||||
if de.Role != "app" {
|
||||
t.Errorf("de role = %q, want app", de.Role)
|
||||
}
|
||||
|
||||
// Check LB config
|
||||
if cfg.LoadBalancer.Name != "hermes" {
|
||||
t.Errorf("LB name = %q, want hermes", cfg.LoadBalancer.Name)
|
||||
}
|
||||
if cfg.LoadBalancer.Type != "lb11" {
|
||||
t.Errorf("LB type = %q, want lb11", cfg.LoadBalancer.Type)
|
||||
}
|
||||
if len(cfg.LoadBalancer.Backends) != 2 {
|
||||
t.Errorf("LB backends = %d, want 2", len(cfg.LoadBalancer.Backends))
|
||||
}
|
||||
|
||||
// Check app servers helper
|
||||
apps := cfg.AppServers()
|
||||
if len(apps) != 2 {
|
||||
t.Errorf("AppServers() = %d, want 2", len(apps))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoad_Bad(t *testing.T) {
|
||||
_, err := Load("/nonexistent/infra.yaml")
|
||||
if err == nil {
|
||||
t.Error("expected error for nonexistent file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoad_Ugly(t *testing.T) {
|
||||
// Invalid YAML
|
||||
tmp := filepath.Join(t.TempDir(), "infra.yaml")
|
||||
if err := os.WriteFile(tmp, []byte("{{invalid yaml"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err := Load(tmp)
|
||||
if err == nil {
|
||||
t.Error("expected error for invalid YAML")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpandPath(t *testing.T) {
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
tests := []struct {
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{"~/.ssh/id_rsa", filepath.Join(home, ".ssh/id_rsa")},
|
||||
{"/absolute/path", "/absolute/path"},
|
||||
{"relative/path", "relative/path"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := expandPath(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("expandPath(%q) = %q, want %q", tt.input, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
381
infra/hetzner.go
Normal file
381
infra/hetzner.go
Normal file
|
|
@ -0,0 +1,381 @@
|
|||
package infra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
hcloudBaseURL = "https://api.hetzner.cloud/v1"
|
||||
hrobotBaseURL = "https://robot-ws.your-server.de"
|
||||
)
|
||||
|
||||
// HCloudClient is an HTTP client for the Hetzner Cloud API.
|
||||
type HCloudClient struct {
|
||||
token string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewHCloudClient creates a new Hetzner Cloud API client.
|
||||
func NewHCloudClient(token string) *HCloudClient {
|
||||
return &HCloudClient{
|
||||
token: token,
|
||||
client: &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// HCloudServer represents a Hetzner Cloud server.
|
||||
type HCloudServer struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
PublicNet HCloudPublicNet `json:"public_net"`
|
||||
PrivateNet []HCloudPrivateNet `json:"private_net"`
|
||||
ServerType HCloudServerType `json:"server_type"`
|
||||
Datacenter HCloudDatacenter `json:"datacenter"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
// HCloudPublicNet holds public network info.
|
||||
type HCloudPublicNet struct {
|
||||
IPv4 HCloudIPv4 `json:"ipv4"`
|
||||
}
|
||||
|
||||
// HCloudIPv4 holds an IPv4 address.
|
||||
type HCloudIPv4 struct {
|
||||
IP string `json:"ip"`
|
||||
}
|
||||
|
||||
// HCloudPrivateNet holds private network info.
|
||||
type HCloudPrivateNet struct {
|
||||
IP string `json:"ip"`
|
||||
Network int `json:"network"`
|
||||
}
|
||||
|
||||
// HCloudServerType holds server type info.
|
||||
type HCloudServerType struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Cores int `json:"cores"`
|
||||
Memory float64 `json:"memory"`
|
||||
Disk int `json:"disk"`
|
||||
}
|
||||
|
||||
// HCloudDatacenter holds datacenter info.
|
||||
type HCloudDatacenter struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// HCloudLoadBalancer represents a Hetzner Cloud load balancer.
|
||||
type HCloudLoadBalancer struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
PublicNet HCloudLBPublicNet `json:"public_net"`
|
||||
Algorithm HCloudLBAlgorithm `json:"algorithm"`
|
||||
Services []HCloudLBService `json:"services"`
|
||||
Targets []HCloudLBTarget `json:"targets"`
|
||||
Location HCloudDatacenter `json:"location"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
// HCloudLBPublicNet holds LB public network info.
|
||||
type HCloudLBPublicNet struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
IPv4 HCloudIPv4 `json:"ipv4"`
|
||||
}
|
||||
|
||||
// HCloudLBAlgorithm holds the LB algorithm.
|
||||
type HCloudLBAlgorithm struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// HCloudLBService describes an LB listener.
|
||||
type HCloudLBService struct {
|
||||
Protocol string `json:"protocol"`
|
||||
ListenPort int `json:"listen_port"`
|
||||
DestinationPort int `json:"destination_port"`
|
||||
Proxyprotocol bool `json:"proxyprotocol"`
|
||||
HTTP *HCloudLBHTTP `json:"http,omitempty"`
|
||||
HealthCheck *HCloudLBHealthCheck `json:"health_check,omitempty"`
|
||||
}
|
||||
|
||||
// HCloudLBHTTP holds HTTP-specific LB options.
|
||||
type HCloudLBHTTP struct {
|
||||
RedirectHTTP bool `json:"redirect_http"`
|
||||
}
|
||||
|
||||
// HCloudLBHealthCheck holds LB health check config.
|
||||
type HCloudLBHealthCheck struct {
|
||||
Protocol string `json:"protocol"`
|
||||
Port int `json:"port"`
|
||||
Interval int `json:"interval"`
|
||||
Timeout int `json:"timeout"`
|
||||
Retries int `json:"retries"`
|
||||
HTTP *HCloudLBHCHTTP `json:"http,omitempty"`
|
||||
}
|
||||
|
||||
// HCloudLBHCHTTP holds HTTP health check options.
|
||||
type HCloudLBHCHTTP struct {
|
||||
Path string `json:"path"`
|
||||
StatusCode string `json:"status_codes"`
|
||||
}
|
||||
|
||||
// HCloudLBTarget is a load balancer backend target.
|
||||
type HCloudLBTarget struct {
|
||||
Type string `json:"type"`
|
||||
IP *HCloudLBTargetIP `json:"ip,omitempty"`
|
||||
Server *HCloudLBTargetServer `json:"server,omitempty"`
|
||||
HealthStatus []HCloudLBHealthStatus `json:"health_status"`
|
||||
}
|
||||
|
||||
// HCloudLBTargetIP is an IP-based LB target.
|
||||
type HCloudLBTargetIP struct {
|
||||
IP string `json:"ip"`
|
||||
}
|
||||
|
||||
// HCloudLBTargetServer is a server-based LB target.
|
||||
type HCloudLBTargetServer struct {
|
||||
ID int `json:"id"`
|
||||
}
|
||||
|
||||
// HCloudLBHealthStatus holds target health info.
|
||||
type HCloudLBHealthStatus struct {
|
||||
ListenPort int `json:"listen_port"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// HCloudLBCreateRequest holds load balancer creation params.
|
||||
type HCloudLBCreateRequest struct {
|
||||
Name string `json:"name"`
|
||||
LoadBalancerType string `json:"load_balancer_type"`
|
||||
Location string `json:"location"`
|
||||
Algorithm HCloudLBAlgorithm `json:"algorithm"`
|
||||
Services []HCloudLBService `json:"services"`
|
||||
Targets []HCloudLBCreateTarget `json:"targets"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
// HCloudLBCreateTarget is a target for LB creation.
|
||||
type HCloudLBCreateTarget struct {
|
||||
Type string `json:"type"`
|
||||
IP *HCloudLBTargetIP `json:"ip,omitempty"`
|
||||
}
|
||||
|
||||
// ListServers returns all Hetzner Cloud servers.
|
||||
func (c *HCloudClient) ListServers(ctx context.Context) ([]HCloudServer, error) {
|
||||
var result struct {
|
||||
Servers []HCloudServer `json:"servers"`
|
||||
}
|
||||
if err := c.get(ctx, "/servers", &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.Servers, nil
|
||||
}
|
||||
|
||||
// ListLoadBalancers returns all load balancers.
|
||||
func (c *HCloudClient) ListLoadBalancers(ctx context.Context) ([]HCloudLoadBalancer, error) {
|
||||
var result struct {
|
||||
LoadBalancers []HCloudLoadBalancer `json:"load_balancers"`
|
||||
}
|
||||
if err := c.get(ctx, "/load_balancers", &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.LoadBalancers, nil
|
||||
}
|
||||
|
||||
// GetLoadBalancer returns a load balancer by ID.
|
||||
func (c *HCloudClient) GetLoadBalancer(ctx context.Context, id int) (*HCloudLoadBalancer, error) {
|
||||
var result struct {
|
||||
LoadBalancer HCloudLoadBalancer `json:"load_balancer"`
|
||||
}
|
||||
if err := c.get(ctx, fmt.Sprintf("/load_balancers/%d", id), &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result.LoadBalancer, nil
|
||||
}
|
||||
|
||||
// CreateLoadBalancer creates a new load balancer.
|
||||
func (c *HCloudClient) CreateLoadBalancer(ctx context.Context, req HCloudLBCreateRequest) (*HCloudLoadBalancer, error) {
|
||||
body, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal request: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
LoadBalancer HCloudLoadBalancer `json:"load_balancer"`
|
||||
}
|
||||
if err := c.post(ctx, "/load_balancers", body, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result.LoadBalancer, nil
|
||||
}
|
||||
|
||||
// DeleteLoadBalancer deletes a load balancer by ID.
|
||||
func (c *HCloudClient) DeleteLoadBalancer(ctx context.Context, id int) error {
|
||||
return c.delete(ctx, fmt.Sprintf("/load_balancers/%d", id))
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a server snapshot.
|
||||
func (c *HCloudClient) CreateSnapshot(ctx context.Context, serverID int, description string) error {
|
||||
body, _ := json.Marshal(map[string]string{
|
||||
"description": description,
|
||||
"type": "snapshot",
|
||||
})
|
||||
return c.post(ctx, fmt.Sprintf("/servers/%d/actions/create_image", serverID), body, nil)
|
||||
}
|
||||
|
||||
func (c *HCloudClient) get(ctx context.Context, path string, result any) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, hcloudBaseURL+path, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.do(req, result)
|
||||
}
|
||||
|
||||
func (c *HCloudClient) post(ctx context.Context, path string, body []byte, result any) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, hcloudBaseURL+path, strings.NewReader(string(body)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return c.do(req, result)
|
||||
}
|
||||
|
||||
func (c *HCloudClient) delete(ctx context.Context, path string) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, hcloudBaseURL+path, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.do(req, nil)
|
||||
}
|
||||
|
||||
func (c *HCloudClient) do(req *http.Request, result any) error {
|
||||
req.Header.Set("Authorization", "Bearer "+c.token)
|
||||
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hcloud API: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
var apiErr struct {
|
||||
Error struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
if json.Unmarshal(data, &apiErr) == nil && apiErr.Error.Message != "" {
|
||||
return fmt.Errorf("hcloud API %d: %s — %s", resp.StatusCode, apiErr.Error.Code, apiErr.Error.Message)
|
||||
}
|
||||
return fmt.Errorf("hcloud API %d: %s", resp.StatusCode, string(data))
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
if err := json.Unmarshal(data, result); err != nil {
|
||||
return fmt.Errorf("decode response: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- Hetzner Robot API ---
|
||||
|
||||
// HRobotClient is an HTTP client for the Hetzner Robot API.
|
||||
type HRobotClient struct {
|
||||
user string
|
||||
password string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewHRobotClient creates a new Hetzner Robot API client.
|
||||
func NewHRobotClient(user, password string) *HRobotClient {
|
||||
return &HRobotClient{
|
||||
user: user,
|
||||
password: password,
|
||||
client: &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// HRobotServer represents a Hetzner Robot dedicated server.
|
||||
type HRobotServer struct {
|
||||
ServerIP string `json:"server_ip"`
|
||||
ServerName string `json:"server_name"`
|
||||
Product string `json:"product"`
|
||||
Datacenter string `json:"dc"`
|
||||
Status string `json:"status"`
|
||||
Cancelled bool `json:"cancelled"`
|
||||
PaidUntil string `json:"paid_until"`
|
||||
}
|
||||
|
||||
// ListServers returns all Robot dedicated servers.
|
||||
func (c *HRobotClient) ListServers(ctx context.Context) ([]HRobotServer, error) {
|
||||
var raw []struct {
|
||||
Server HRobotServer `json:"server"`
|
||||
}
|
||||
if err := c.get(ctx, "/server", &raw); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
servers := make([]HRobotServer, len(raw))
|
||||
for i, s := range raw {
|
||||
servers[i] = s.Server
|
||||
}
|
||||
return servers, nil
|
||||
}
|
||||
|
||||
// GetServer returns a Robot server by IP.
|
||||
func (c *HRobotClient) GetServer(ctx context.Context, ip string) (*HRobotServer, error) {
|
||||
var raw struct {
|
||||
Server HRobotServer `json:"server"`
|
||||
}
|
||||
if err := c.get(ctx, "/server/"+ip, &raw); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &raw.Server, nil
|
||||
}
|
||||
|
||||
func (c *HRobotClient) get(ctx context.Context, path string, result any) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, hrobotBaseURL+path, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.SetBasicAuth(c.user, c.password)
|
||||
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hrobot API: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf("hrobot API %d: %s", resp.StatusCode, string(data))
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
if err := json.Unmarshal(data, result); err != nil {
|
||||
return fmt.Errorf("decode response: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
321
release/changelog.go
Normal file
321
release/changelog.go
Normal file
|
|
@ -0,0 +1,321 @@
|
|||
// Package release provides release automation with changelog generation and publishing.
|
||||
package release
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
// ConventionalCommit represents a parsed conventional commit.
|
||||
type ConventionalCommit struct {
|
||||
Type string // feat, fix, etc.
|
||||
Scope string // optional scope in parentheses
|
||||
Description string // commit description
|
||||
Hash string // short commit hash
|
||||
Breaking bool // has breaking change indicator
|
||||
}
|
||||
|
||||
// commitTypeLabels maps commit types to human-readable labels for the changelog.
|
||||
var commitTypeLabels = map[string]string{
|
||||
"feat": "Features",
|
||||
"fix": "Bug Fixes",
|
||||
"perf": "Performance Improvements",
|
||||
"refactor": "Code Refactoring",
|
||||
"docs": "Documentation",
|
||||
"style": "Styles",
|
||||
"test": "Tests",
|
||||
"build": "Build System",
|
||||
"ci": "Continuous Integration",
|
||||
"chore": "Chores",
|
||||
"revert": "Reverts",
|
||||
}
|
||||
|
||||
// commitTypeOrder defines the order of sections in the changelog.
|
||||
var commitTypeOrder = []string{
|
||||
"feat",
|
||||
"fix",
|
||||
"perf",
|
||||
"refactor",
|
||||
"docs",
|
||||
"style",
|
||||
"test",
|
||||
"build",
|
||||
"ci",
|
||||
"chore",
|
||||
"revert",
|
||||
}
|
||||
|
||||
// conventionalCommitRegex matches conventional commit format.
|
||||
// Examples: "feat: add feature", "fix(scope): fix bug", "feat!: breaking change"
|
||||
var conventionalCommitRegex = regexp.MustCompile(`^(\w+)(?:\(([^)]+)\))?(!)?:\s*(.+)$`)
|
||||
|
||||
// Generate generates a markdown changelog from git commits between two refs.
|
||||
// If fromRef is empty, it uses the previous tag or initial commit.
|
||||
// If toRef is empty, it uses HEAD.
|
||||
func Generate(dir, fromRef, toRef string) (string, error) {
|
||||
if toRef == "" {
|
||||
toRef = "HEAD"
|
||||
}
|
||||
|
||||
// If fromRef is empty, try to find previous tag
|
||||
if fromRef == "" {
|
||||
prevTag, err := getPreviousTag(dir, toRef)
|
||||
if err != nil {
|
||||
// No previous tag, use initial commit
|
||||
fromRef = ""
|
||||
} else {
|
||||
fromRef = prevTag
|
||||
}
|
||||
}
|
||||
|
||||
// Get commits between refs
|
||||
commits, err := getCommits(dir, fromRef, toRef)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("changelog.Generate: failed to get commits: %w", err)
|
||||
}
|
||||
|
||||
// Parse conventional commits
|
||||
var parsedCommits []ConventionalCommit
|
||||
for _, commit := range commits {
|
||||
parsed := parseConventionalCommit(commit)
|
||||
if parsed != nil {
|
||||
parsedCommits = append(parsedCommits, *parsed)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate markdown
|
||||
return formatChangelog(parsedCommits, toRef), nil
|
||||
}
|
||||
|
||||
// GenerateWithConfig generates a changelog with filtering based on config.
|
||||
func GenerateWithConfig(dir, fromRef, toRef string, cfg *ChangelogConfig) (string, error) {
|
||||
if toRef == "" {
|
||||
toRef = "HEAD"
|
||||
}
|
||||
|
||||
// If fromRef is empty, try to find previous tag
|
||||
if fromRef == "" {
|
||||
prevTag, err := getPreviousTag(dir, toRef)
|
||||
if err != nil {
|
||||
fromRef = ""
|
||||
} else {
|
||||
fromRef = prevTag
|
||||
}
|
||||
}
|
||||
|
||||
// Get commits between refs
|
||||
commits, err := getCommits(dir, fromRef, toRef)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("changelog.GenerateWithConfig: failed to get commits: %w", err)
|
||||
}
|
||||
|
||||
// Build include/exclude sets
|
||||
includeSet := make(map[string]bool)
|
||||
excludeSet := make(map[string]bool)
|
||||
for _, t := range cfg.Include {
|
||||
includeSet[t] = true
|
||||
}
|
||||
for _, t := range cfg.Exclude {
|
||||
excludeSet[t] = true
|
||||
}
|
||||
|
||||
// Parse and filter conventional commits
|
||||
var parsedCommits []ConventionalCommit
|
||||
for _, commit := range commits {
|
||||
parsed := parseConventionalCommit(commit)
|
||||
if parsed == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply filters
|
||||
if len(includeSet) > 0 && !includeSet[parsed.Type] {
|
||||
continue
|
||||
}
|
||||
if excludeSet[parsed.Type] {
|
||||
continue
|
||||
}
|
||||
|
||||
parsedCommits = append(parsedCommits, *parsed)
|
||||
}
|
||||
|
||||
return formatChangelog(parsedCommits, toRef), nil
|
||||
}
|
||||
|
||||
// getPreviousTag returns the tag before the given ref.
|
||||
func getPreviousTag(dir, ref string) (string, error) {
|
||||
cmd := exec.Command("git", "describe", "--tags", "--abbrev=0", ref+"^")
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(output)), nil
|
||||
}
|
||||
|
||||
// getCommits returns a slice of commit strings between two refs.
|
||||
// Format: "hash subject"
|
||||
func getCommits(dir, fromRef, toRef string) ([]string, error) {
|
||||
var args []string
|
||||
if fromRef == "" {
|
||||
// All commits up to toRef
|
||||
args = []string{"log", "--oneline", "--no-merges", toRef}
|
||||
} else {
|
||||
// Commits between refs
|
||||
args = []string{"log", "--oneline", "--no-merges", fromRef + ".." + toRef}
|
||||
}
|
||||
|
||||
cmd := exec.Command("git", args...)
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var commits []string
|
||||
scanner := bufio.NewScanner(bytes.NewReader(output))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line != "" {
|
||||
commits = append(commits, line)
|
||||
}
|
||||
}
|
||||
|
||||
return commits, scanner.Err()
|
||||
}
|
||||
|
||||
// parseConventionalCommit parses a git log --oneline output into a ConventionalCommit.
|
||||
// Returns nil if the commit doesn't follow conventional commit format.
|
||||
func parseConventionalCommit(commitLine string) *ConventionalCommit {
|
||||
// Split hash and subject
|
||||
parts := strings.SplitN(commitLine, " ", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
hash := parts[0]
|
||||
subject := parts[1]
|
||||
|
||||
// Match conventional commit format
|
||||
matches := conventionalCommitRegex.FindStringSubmatch(subject)
|
||||
if matches == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ConventionalCommit{
|
||||
Type: strings.ToLower(matches[1]),
|
||||
Scope: matches[2],
|
||||
Breaking: matches[3] == "!",
|
||||
Description: matches[4],
|
||||
Hash: hash,
|
||||
}
|
||||
}
|
||||
|
||||
// formatChangelog formats parsed commits into markdown.
|
||||
func formatChangelog(commits []ConventionalCommit, version string) string {
|
||||
if len(commits) == 0 {
|
||||
return fmt.Sprintf("## %s\n\nNo notable changes.", version)
|
||||
}
|
||||
|
||||
// Group commits by type
|
||||
grouped := make(map[string][]ConventionalCommit)
|
||||
var breaking []ConventionalCommit
|
||||
|
||||
for _, commit := range commits {
|
||||
if commit.Breaking {
|
||||
breaking = append(breaking, commit)
|
||||
}
|
||||
grouped[commit.Type] = append(grouped[commit.Type], commit)
|
||||
}
|
||||
|
||||
var buf strings.Builder
|
||||
buf.WriteString(fmt.Sprintf("## %s\n\n", version))
|
||||
|
||||
// Breaking changes first
|
||||
if len(breaking) > 0 {
|
||||
buf.WriteString("### BREAKING CHANGES\n\n")
|
||||
for _, commit := range breaking {
|
||||
buf.WriteString(formatCommitLine(commit))
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
|
||||
// Other sections in order
|
||||
for _, commitType := range commitTypeOrder {
|
||||
commits, ok := grouped[commitType]
|
||||
if !ok || len(commits) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
label, ok := commitTypeLabels[commitType]
|
||||
if !ok {
|
||||
label = cases.Title(language.English).String(commitType)
|
||||
}
|
||||
|
||||
buf.WriteString(fmt.Sprintf("### %s\n\n", label))
|
||||
for _, commit := range commits {
|
||||
buf.WriteString(formatCommitLine(commit))
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
|
||||
// Any remaining types not in the order list
|
||||
var remainingTypes []string
|
||||
for commitType := range grouped {
|
||||
found := false
|
||||
for _, t := range commitTypeOrder {
|
||||
if t == commitType {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
remainingTypes = append(remainingTypes, commitType)
|
||||
}
|
||||
}
|
||||
sort.Strings(remainingTypes)
|
||||
|
||||
for _, commitType := range remainingTypes {
|
||||
commits := grouped[commitType]
|
||||
label := cases.Title(language.English).String(commitType)
|
||||
buf.WriteString(fmt.Sprintf("### %s\n\n", label))
|
||||
for _, commit := range commits {
|
||||
buf.WriteString(formatCommitLine(commit))
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(buf.String(), "\n")
|
||||
}
|
||||
|
||||
// formatCommitLine formats a single commit as a changelog line.
|
||||
func formatCommitLine(commit ConventionalCommit) string {
|
||||
var buf strings.Builder
|
||||
buf.WriteString("- ")
|
||||
|
||||
if commit.Scope != "" {
|
||||
buf.WriteString(fmt.Sprintf("**%s**: ", commit.Scope))
|
||||
}
|
||||
|
||||
buf.WriteString(commit.Description)
|
||||
buf.WriteString(fmt.Sprintf(" (%s)\n", commit.Hash))
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// ParseCommitType extracts the type from a conventional commit subject.
|
||||
// Returns empty string if not a conventional commit.
|
||||
func ParseCommitType(subject string) string {
|
||||
matches := conventionalCommitRegex.FindStringSubmatch(subject)
|
||||
if matches == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.ToLower(matches[1])
|
||||
}
|
||||
695
release/changelog_test.go
Normal file
695
release/changelog_test.go
Normal file
|
|
@ -0,0 +1,695 @@
|
|||
package release
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseConventionalCommit_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected *ConventionalCommit
|
||||
}{
|
||||
{
|
||||
name: "feat without scope",
|
||||
input: "abc1234 feat: add new feature",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "feat",
|
||||
Scope: "",
|
||||
Description: "add new feature",
|
||||
Hash: "abc1234",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fix with scope",
|
||||
input: "def5678 fix(auth): resolve login issue",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "fix",
|
||||
Scope: "auth",
|
||||
Description: "resolve login issue",
|
||||
Hash: "def5678",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "breaking change with exclamation",
|
||||
input: "ghi9012 feat!: breaking API change",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "feat",
|
||||
Scope: "",
|
||||
Description: "breaking API change",
|
||||
Hash: "ghi9012",
|
||||
Breaking: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "breaking change with scope",
|
||||
input: "jkl3456 fix(api)!: remove deprecated endpoint",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "fix",
|
||||
Scope: "api",
|
||||
Description: "remove deprecated endpoint",
|
||||
Hash: "jkl3456",
|
||||
Breaking: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "perf type",
|
||||
input: "mno7890 perf: optimize database queries",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "perf",
|
||||
Scope: "",
|
||||
Description: "optimize database queries",
|
||||
Hash: "mno7890",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "chore type",
|
||||
input: "pqr1234 chore: update dependencies",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "chore",
|
||||
Scope: "",
|
||||
Description: "update dependencies",
|
||||
Hash: "pqr1234",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "uppercase type normalizes to lowercase",
|
||||
input: "stu5678 FEAT: uppercase type",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "feat",
|
||||
Scope: "",
|
||||
Description: "uppercase type",
|
||||
Hash: "stu5678",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := parseConventionalCommit(tc.input)
|
||||
assert.NotNil(t, result)
|
||||
assert.Equal(t, tc.expected.Type, result.Type)
|
||||
assert.Equal(t, tc.expected.Scope, result.Scope)
|
||||
assert.Equal(t, tc.expected.Description, result.Description)
|
||||
assert.Equal(t, tc.expected.Hash, result.Hash)
|
||||
assert.Equal(t, tc.expected.Breaking, result.Breaking)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseConventionalCommit_Bad(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
}{
|
||||
{
|
||||
name: "non-conventional commit",
|
||||
input: "abc1234 Update README",
|
||||
},
|
||||
{
|
||||
name: "missing colon",
|
||||
input: "def5678 feat add feature",
|
||||
},
|
||||
{
|
||||
name: "empty subject",
|
||||
input: "ghi9012",
|
||||
},
|
||||
{
|
||||
name: "just hash",
|
||||
input: "abc1234",
|
||||
},
|
||||
{
|
||||
name: "merge commit",
|
||||
input: "abc1234 Merge pull request #123",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := parseConventionalCommit(tc.input)
|
||||
assert.Nil(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatChangelog_Good(t *testing.T) {
|
||||
t.Run("formats commits by type", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "feat", Description: "add feature A", Hash: "abc1234"},
|
||||
{Type: "fix", Description: "fix bug B", Hash: "def5678"},
|
||||
{Type: "feat", Description: "add feature C", Hash: "ghi9012"},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "## v1.0.0")
|
||||
assert.Contains(t, result, "### Features")
|
||||
assert.Contains(t, result, "### Bug Fixes")
|
||||
assert.Contains(t, result, "- add feature A (abc1234)")
|
||||
assert.Contains(t, result, "- fix bug B (def5678)")
|
||||
assert.Contains(t, result, "- add feature C (ghi9012)")
|
||||
})
|
||||
|
||||
t.Run("includes scope in output", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "feat", Scope: "api", Description: "add endpoint", Hash: "abc1234"},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "**api**: add endpoint")
|
||||
})
|
||||
|
||||
t.Run("breaking changes first", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "feat", Description: "normal feature", Hash: "abc1234"},
|
||||
{Type: "feat", Description: "breaking feature", Hash: "def5678", Breaking: true},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "### BREAKING CHANGES")
|
||||
// Breaking changes section should appear before Features
|
||||
breakingPos := indexOf(result, "BREAKING CHANGES")
|
||||
featuresPos := indexOf(result, "Features")
|
||||
assert.Less(t, breakingPos, featuresPos)
|
||||
})
|
||||
|
||||
t.Run("empty commits returns minimal changelog", func(t *testing.T) {
|
||||
result := formatChangelog([]ConventionalCommit{}, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "## v1.0.0")
|
||||
assert.Contains(t, result, "No notable changes")
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseCommitType_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"feat: add feature", "feat"},
|
||||
{"fix(scope): fix bug", "fix"},
|
||||
{"perf!: breaking perf", "perf"},
|
||||
{"chore: update deps", "chore"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
result := ParseCommitType(tc.input)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCommitType_Bad(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
}{
|
||||
{"not a conventional commit"},
|
||||
{"Update README"},
|
||||
{"Merge branch 'main'"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
result := ParseCommitType(tc.input)
|
||||
assert.Empty(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateWithConfig_ConfigValues(t *testing.T) {
|
||||
t.Run("config filters are parsed correctly", func(t *testing.T) {
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat", "fix"},
|
||||
Exclude: []string{"chore", "docs"},
|
||||
}
|
||||
|
||||
// Verify the config values
|
||||
assert.Contains(t, cfg.Include, "feat")
|
||||
assert.Contains(t, cfg.Include, "fix")
|
||||
assert.Contains(t, cfg.Exclude, "chore")
|
||||
assert.Contains(t, cfg.Exclude, "docs")
|
||||
})
|
||||
}
|
||||
|
||||
// indexOf returns the position of a substring in a string, or -1 if not found.
|
||||
func indexOf(s, substr string) int {
|
||||
for i := 0; i+len(substr) <= len(s); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// setupChangelogGitRepo creates a temporary directory with an initialized git repository.
|
||||
func setupChangelogGitRepo(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
// Configure git user for commits
|
||||
cmd = exec.Command("git", "config", "user.email", "test@example.com")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "config", "user.name", "Test User")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
// createChangelogCommit creates a commit in the given directory.
|
||||
func createChangelogCommit(t *testing.T, dir, message string) {
|
||||
t.Helper()
|
||||
|
||||
// Create or modify a file
|
||||
filePath := filepath.Join(dir, "changelog_test.txt")
|
||||
content, _ := os.ReadFile(filePath)
|
||||
content = append(content, []byte(message+"\n")...)
|
||||
require.NoError(t, os.WriteFile(filePath, content, 0644))
|
||||
|
||||
// Stage and commit
|
||||
cmd := exec.Command("git", "add", ".")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "commit", "-m", message)
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
}
|
||||
|
||||
// createChangelogTag creates a tag in the given directory.
|
||||
func createChangelogTag(t *testing.T, dir, tag string) {
|
||||
t.Helper()
|
||||
cmd := exec.Command("git", "tag", tag)
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
}
|
||||
|
||||
func TestGenerate_Good(t *testing.T) {
|
||||
t.Run("generates changelog from commits", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: add new feature")
|
||||
createChangelogCommit(t, dir, "fix: resolve bug")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "## HEAD")
|
||||
assert.Contains(t, changelog, "### Features")
|
||||
assert.Contains(t, changelog, "add new feature")
|
||||
assert.Contains(t, changelog, "### Bug Fixes")
|
||||
assert.Contains(t, changelog, "resolve bug")
|
||||
})
|
||||
|
||||
t.Run("generates changelog between tags", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: initial feature")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
createChangelogCommit(t, dir, "fix: bug fix")
|
||||
createChangelogTag(t, dir, "v1.1.0")
|
||||
|
||||
changelog, err := Generate(dir, "v1.0.0", "v1.1.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "## v1.1.0")
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.Contains(t, changelog, "bug fix")
|
||||
// Should NOT contain the initial feature
|
||||
assert.NotContains(t, changelog, "initial feature")
|
||||
})
|
||||
|
||||
t.Run("handles empty changelog when no conventional commits", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "Update README")
|
||||
createChangelogCommit(t, dir, "Merge branch main")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "No notable changes")
|
||||
})
|
||||
|
||||
t.Run("uses previous tag when fromRef is empty", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: old feature")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.NotContains(t, changelog, "old feature")
|
||||
})
|
||||
|
||||
t.Run("includes breaking changes", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat!: breaking API change")
|
||||
createChangelogCommit(t, dir, "feat: normal feature")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "### BREAKING CHANGES")
|
||||
assert.Contains(t, changelog, "breaking API change")
|
||||
})
|
||||
|
||||
t.Run("includes scope in output", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat(api): add endpoint")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "**api**:")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerate_Bad(t *testing.T) {
|
||||
t.Run("returns error for non-git directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
_, err := Generate(dir, "", "HEAD")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerateWithConfig_Good(t *testing.T) {
|
||||
t.Run("filters commits by include list", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
createChangelogCommit(t, dir, "fix: bug fix")
|
||||
createChangelogCommit(t, dir, "chore: update deps")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.NotContains(t, changelog, "bug fix")
|
||||
assert.NotContains(t, changelog, "update deps")
|
||||
})
|
||||
|
||||
t.Run("filters commits by exclude list", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
createChangelogCommit(t, dir, "fix: bug fix")
|
||||
createChangelogCommit(t, dir, "chore: update deps")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Exclude: []string{"chore"},
|
||||
}
|
||||
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.Contains(t, changelog, "bug fix")
|
||||
assert.NotContains(t, changelog, "update deps")
|
||||
})
|
||||
|
||||
t.Run("combines include and exclude filters", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
createChangelogCommit(t, dir, "fix: bug fix")
|
||||
createChangelogCommit(t, dir, "perf: performance")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat", "fix", "perf"},
|
||||
Exclude: []string{"perf"},
|
||||
}
|
||||
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.Contains(t, changelog, "bug fix")
|
||||
assert.NotContains(t, changelog, "performance")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetCommits_Good(t *testing.T) {
|
||||
t.Run("returns all commits when fromRef is empty", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogCommit(t, dir, "feat: second")
|
||||
createChangelogCommit(t, dir, "feat: third")
|
||||
|
||||
commits, err := getCommits(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, commits, 3)
|
||||
})
|
||||
|
||||
t.Run("returns commits between refs", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: second")
|
||||
createChangelogCommit(t, dir, "feat: third")
|
||||
|
||||
commits, err := getCommits(dir, "v1.0.0", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, commits, 2)
|
||||
})
|
||||
|
||||
t.Run("excludes merge commits", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: regular commit")
|
||||
// Merge commits are excluded by --no-merges flag
|
||||
// We can verify by checking the count matches expected
|
||||
|
||||
commits, err := getCommits(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, commits, 1)
|
||||
assert.Contains(t, commits[0], "regular commit")
|
||||
})
|
||||
|
||||
t.Run("returns empty slice for no commits in range", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: only commit")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
|
||||
commits, err := getCommits(dir, "v1.0.0", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Empty(t, commits)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetCommits_Bad(t *testing.T) {
|
||||
t.Run("returns error for invalid ref", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: commit")
|
||||
|
||||
_, err := getCommits(dir, "nonexistent-tag", "HEAD")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("returns error for non-git directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
_, err := getCommits(dir, "", "HEAD")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetPreviousTag_Good(t *testing.T) {
|
||||
t.Run("returns previous tag", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: second")
|
||||
createChangelogTag(t, dir, "v1.1.0")
|
||||
|
||||
tag, err := getPreviousTag(dir, "v1.1.0")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", tag)
|
||||
})
|
||||
|
||||
t.Run("returns tag before HEAD", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: second")
|
||||
|
||||
tag, err := getPreviousTag(dir, "HEAD")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", tag)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetPreviousTag_Bad(t *testing.T) {
|
||||
t.Run("returns error when no previous tag exists", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
|
||||
// v1.0.0^ has no tag before it
|
||||
_, err := getPreviousTag(dir, "v1.0.0")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("returns error for invalid ref", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: commit")
|
||||
|
||||
_, err := getPreviousTag(dir, "nonexistent")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFormatCommitLine_Good(t *testing.T) {
|
||||
t.Run("formats commit without scope", func(t *testing.T) {
|
||||
commit := ConventionalCommit{
|
||||
Type: "feat",
|
||||
Description: "add feature",
|
||||
Hash: "abc1234",
|
||||
}
|
||||
|
||||
result := formatCommitLine(commit)
|
||||
assert.Equal(t, "- add feature (abc1234)\n", result)
|
||||
})
|
||||
|
||||
t.Run("formats commit with scope", func(t *testing.T) {
|
||||
commit := ConventionalCommit{
|
||||
Type: "fix",
|
||||
Scope: "api",
|
||||
Description: "fix bug",
|
||||
Hash: "def5678",
|
||||
}
|
||||
|
||||
result := formatCommitLine(commit)
|
||||
assert.Equal(t, "- **api**: fix bug (def5678)\n", result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFormatChangelog_Ugly(t *testing.T) {
|
||||
t.Run("handles custom commit type not in order", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "custom", Description: "custom type", Hash: "abc1234"},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "### Custom")
|
||||
assert.Contains(t, result, "custom type")
|
||||
})
|
||||
|
||||
t.Run("handles multiple custom commit types", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "alpha", Description: "alpha feature", Hash: "abc1234"},
|
||||
{Type: "beta", Description: "beta feature", Hash: "def5678"},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
// Should be sorted alphabetically for custom types
|
||||
assert.Contains(t, result, "### Alpha")
|
||||
assert.Contains(t, result, "### Beta")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerateWithConfig_Bad(t *testing.T) {
|
||||
t.Run("returns error for non-git directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
_, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerateWithConfig_EdgeCases(t *testing.T) {
|
||||
t.Run("uses HEAD when toRef is empty", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
// Pass empty toRef
|
||||
changelog, err := GenerateWithConfig(dir, "", "", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "## HEAD")
|
||||
})
|
||||
|
||||
t.Run("handles previous tag lookup failure gracefully", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
// No tags exist, should still work
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "first")
|
||||
})
|
||||
|
||||
t.Run("uses explicit fromRef when provided", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: old feature")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
// Use explicit fromRef
|
||||
changelog, err := GenerateWithConfig(dir, "v1.0.0", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.NotContains(t, changelog, "old feature")
|
||||
})
|
||||
|
||||
t.Run("skips non-conventional commits", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: conventional commit")
|
||||
createChangelogCommit(t, dir, "Update README")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "conventional commit")
|
||||
assert.NotContains(t, changelog, "Update README")
|
||||
})
|
||||
}
|
||||
316
release/config.go
Normal file
316
release/config.go
Normal file
|
|
@ -0,0 +1,316 @@
|
|||
// Package release provides release automation with changelog generation and publishing.
|
||||
package release
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// ConfigFileName is the name of the release configuration file.
|
||||
const ConfigFileName = "release.yaml"
|
||||
|
||||
// ConfigDir is the directory where release configuration is stored.
|
||||
const ConfigDir = ".core"
|
||||
|
||||
// Config holds the complete release configuration loaded from .core/release.yaml.
|
||||
type Config struct {
|
||||
// Version is the config file format version.
|
||||
Version int `yaml:"version"`
|
||||
// Project contains project metadata.
|
||||
Project ProjectConfig `yaml:"project"`
|
||||
// Build contains build settings for the release.
|
||||
Build BuildConfig `yaml:"build"`
|
||||
// Publishers defines where to publish the release.
|
||||
Publishers []PublisherConfig `yaml:"publishers"`
|
||||
// Changelog configures changelog generation.
|
||||
Changelog ChangelogConfig `yaml:"changelog"`
|
||||
// SDK configures SDK generation.
|
||||
SDK *SDKConfig `yaml:"sdk,omitempty"`
|
||||
|
||||
// Internal fields (not serialized)
|
||||
projectDir string // Set by LoadConfig
|
||||
version string // Set by CLI flag
|
||||
}
|
||||
|
||||
// ProjectConfig holds project metadata for releases.
|
||||
type ProjectConfig struct {
|
||||
// Name is the project name.
|
||||
Name string `yaml:"name"`
|
||||
// Repository is the GitHub repository in owner/repo format.
|
||||
Repository string `yaml:"repository"`
|
||||
}
|
||||
|
||||
// BuildConfig holds build settings for releases.
|
||||
type BuildConfig struct {
|
||||
// Targets defines the build targets.
|
||||
Targets []TargetConfig `yaml:"targets"`
|
||||
}
|
||||
|
||||
// TargetConfig defines a build target.
|
||||
type TargetConfig struct {
|
||||
// OS is the target operating system (e.g., "linux", "darwin", "windows").
|
||||
OS string `yaml:"os"`
|
||||
// Arch is the target architecture (e.g., "amd64", "arm64").
|
||||
Arch string `yaml:"arch"`
|
||||
}
|
||||
|
||||
// PublisherConfig holds configuration for a publisher.
|
||||
type PublisherConfig struct {
|
||||
// Type is the publisher type (e.g., "github", "linuxkit", "docker").
|
||||
Type string `yaml:"type"`
|
||||
// Prerelease marks the release as a prerelease.
|
||||
Prerelease bool `yaml:"prerelease"`
|
||||
// Draft creates the release as a draft.
|
||||
Draft bool `yaml:"draft"`
|
||||
|
||||
// LinuxKit-specific configuration
|
||||
// Config is the path to the LinuxKit YAML configuration file.
|
||||
Config string `yaml:"config,omitempty"`
|
||||
// Formats are the output formats to build (iso, raw, qcow2, vmdk).
|
||||
Formats []string `yaml:"formats,omitempty"`
|
||||
// Platforms are the target platforms (linux/amd64, linux/arm64).
|
||||
Platforms []string `yaml:"platforms,omitempty"`
|
||||
|
||||
// Docker-specific configuration
|
||||
// Registry is the container registry (default: ghcr.io).
|
||||
Registry string `yaml:"registry,omitempty"`
|
||||
// Image is the image name in owner/repo format.
|
||||
Image string `yaml:"image,omitempty"`
|
||||
// Dockerfile is the path to the Dockerfile (default: Dockerfile).
|
||||
Dockerfile string `yaml:"dockerfile,omitempty"`
|
||||
// Tags are the image tags to apply.
|
||||
Tags []string `yaml:"tags,omitempty"`
|
||||
// BuildArgs are additional Docker build arguments.
|
||||
BuildArgs map[string]string `yaml:"build_args,omitempty"`
|
||||
|
||||
// npm-specific configuration
|
||||
// Package is the npm package name (e.g., "@host-uk/core").
|
||||
Package string `yaml:"package,omitempty"`
|
||||
// Access is the npm access level: "public" or "restricted".
|
||||
Access string `yaml:"access,omitempty"`
|
||||
|
||||
// Homebrew-specific configuration
|
||||
// Tap is the Homebrew tap repository (e.g., "host-uk/homebrew-tap").
|
||||
Tap string `yaml:"tap,omitempty"`
|
||||
// Formula is the formula name (defaults to project name).
|
||||
Formula string `yaml:"formula,omitempty"`
|
||||
|
||||
// Scoop-specific configuration
|
||||
// Bucket is the Scoop bucket repository (e.g., "host-uk/scoop-bucket").
|
||||
Bucket string `yaml:"bucket,omitempty"`
|
||||
|
||||
// AUR-specific configuration
|
||||
// Maintainer is the AUR package maintainer (e.g., "Name <email>").
|
||||
Maintainer string `yaml:"maintainer,omitempty"`
|
||||
|
||||
// Chocolatey-specific configuration
|
||||
// Push determines whether to push to Chocolatey (false = generate only).
|
||||
Push bool `yaml:"push,omitempty"`
|
||||
|
||||
// Official repo configuration (for Homebrew, Scoop)
|
||||
// When enabled, generates files for PR to official repos.
|
||||
Official *OfficialConfig `yaml:"official,omitempty"`
|
||||
}
|
||||
|
||||
// OfficialConfig holds configuration for generating files for official repo PRs.
|
||||
type OfficialConfig struct {
|
||||
// Enabled determines whether to generate files for official repos.
|
||||
Enabled bool `yaml:"enabled"`
|
||||
// Output is the directory to write generated files.
|
||||
Output string `yaml:"output,omitempty"`
|
||||
}
|
||||
|
||||
// SDKConfig holds SDK generation configuration.
|
||||
type SDKConfig struct {
|
||||
// Spec is the path to the OpenAPI spec file.
|
||||
Spec string `yaml:"spec,omitempty"`
|
||||
// Languages to generate.
|
||||
Languages []string `yaml:"languages,omitempty"`
|
||||
// Output directory (default: sdk/).
|
||||
Output string `yaml:"output,omitempty"`
|
||||
// Package naming.
|
||||
Package SDKPackageConfig `yaml:"package,omitempty"`
|
||||
// Diff configuration.
|
||||
Diff SDKDiffConfig `yaml:"diff,omitempty"`
|
||||
// Publish configuration.
|
||||
Publish SDKPublishConfig `yaml:"publish,omitempty"`
|
||||
}
|
||||
|
||||
// SDKPackageConfig holds package naming configuration.
|
||||
type SDKPackageConfig struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Version string `yaml:"version,omitempty"`
|
||||
}
|
||||
|
||||
// SDKDiffConfig holds diff configuration.
|
||||
type SDKDiffConfig struct {
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
FailOnBreaking bool `yaml:"fail_on_breaking,omitempty"`
|
||||
}
|
||||
|
||||
// SDKPublishConfig holds monorepo publish configuration.
|
||||
type SDKPublishConfig struct {
|
||||
Repo string `yaml:"repo,omitempty"`
|
||||
Path string `yaml:"path,omitempty"`
|
||||
}
|
||||
|
||||
// ChangelogConfig holds changelog generation settings.
|
||||
type ChangelogConfig struct {
|
||||
// Include specifies commit types to include in the changelog.
|
||||
Include []string `yaml:"include"`
|
||||
// Exclude specifies commit types to exclude from the changelog.
|
||||
Exclude []string `yaml:"exclude"`
|
||||
}
|
||||
|
||||
// LoadConfig loads release configuration from the .core/release.yaml file in the given directory.
|
||||
// If the config file does not exist, it returns DefaultConfig().
|
||||
// Returns an error if the file exists but cannot be parsed.
|
||||
func LoadConfig(dir string) (*Config, error) {
|
||||
configPath := filepath.Join(dir, ConfigDir, ConfigFileName)
|
||||
|
||||
// Convert to absolute path for io.Local
|
||||
absPath, err := filepath.Abs(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.LoadConfig: failed to resolve path: %w", err)
|
||||
}
|
||||
|
||||
content, err := io.Local.Read(absPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
cfg := DefaultConfig()
|
||||
cfg.projectDir = dir
|
||||
return cfg, nil
|
||||
}
|
||||
return nil, fmt.Errorf("release.LoadConfig: failed to read config file: %w", err)
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if err := yaml.Unmarshal([]byte(content), &cfg); err != nil {
|
||||
return nil, fmt.Errorf("release.LoadConfig: failed to parse config file: %w", err)
|
||||
}
|
||||
|
||||
// Apply defaults for any missing fields
|
||||
applyDefaults(&cfg)
|
||||
cfg.projectDir = dir
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// DefaultConfig returns sensible defaults for release configuration.
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Version: 1,
|
||||
Project: ProjectConfig{
|
||||
Name: "",
|
||||
Repository: "",
|
||||
},
|
||||
Build: BuildConfig{
|
||||
Targets: []TargetConfig{
|
||||
{OS: "linux", Arch: "amd64"},
|
||||
{OS: "linux", Arch: "arm64"},
|
||||
{OS: "darwin", Arch: "arm64"},
|
||||
{OS: "windows", Arch: "amd64"},
|
||||
},
|
||||
},
|
||||
Publishers: []PublisherConfig{
|
||||
{
|
||||
Type: "github",
|
||||
Prerelease: false,
|
||||
Draft: false,
|
||||
},
|
||||
},
|
||||
Changelog: ChangelogConfig{
|
||||
Include: []string{"feat", "fix", "perf", "refactor"},
|
||||
Exclude: []string{"chore", "docs", "style", "test", "ci"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// applyDefaults fills in default values for any empty fields in the config.
|
||||
func applyDefaults(cfg *Config) {
|
||||
defaults := DefaultConfig()
|
||||
|
||||
if cfg.Version == 0 {
|
||||
cfg.Version = defaults.Version
|
||||
}
|
||||
|
||||
if len(cfg.Build.Targets) == 0 {
|
||||
cfg.Build.Targets = defaults.Build.Targets
|
||||
}
|
||||
|
||||
if len(cfg.Publishers) == 0 {
|
||||
cfg.Publishers = defaults.Publishers
|
||||
}
|
||||
|
||||
if len(cfg.Changelog.Include) == 0 && len(cfg.Changelog.Exclude) == 0 {
|
||||
cfg.Changelog.Include = defaults.Changelog.Include
|
||||
cfg.Changelog.Exclude = defaults.Changelog.Exclude
|
||||
}
|
||||
}
|
||||
|
||||
// SetProjectDir sets the project directory on the config.
|
||||
func (c *Config) SetProjectDir(dir string) {
|
||||
c.projectDir = dir
|
||||
}
|
||||
|
||||
// SetVersion sets the version override on the config.
|
||||
func (c *Config) SetVersion(version string) {
|
||||
c.version = version
|
||||
}
|
||||
|
||||
// ConfigPath returns the path to the release config file for a given directory.
|
||||
func ConfigPath(dir string) string {
|
||||
return filepath.Join(dir, ConfigDir, ConfigFileName)
|
||||
}
|
||||
|
||||
// ConfigExists checks if a release config file exists in the given directory.
|
||||
func ConfigExists(dir string) bool {
|
||||
configPath := ConfigPath(dir)
|
||||
absPath, err := filepath.Abs(configPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return io.Local.IsFile(absPath)
|
||||
}
|
||||
|
||||
// GetRepository returns the repository from the config.
|
||||
func (c *Config) GetRepository() string {
|
||||
return c.Project.Repository
|
||||
}
|
||||
|
||||
// GetProjectName returns the project name from the config.
|
||||
func (c *Config) GetProjectName() string {
|
||||
return c.Project.Name
|
||||
}
|
||||
|
||||
// WriteConfig writes the config to the .core/release.yaml file.
|
||||
func WriteConfig(cfg *Config, dir string) error {
|
||||
configPath := ConfigPath(dir)
|
||||
|
||||
// Convert to absolute path for io.Local
|
||||
absPath, err := filepath.Abs(configPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("release.WriteConfig: failed to resolve path: %w", err)
|
||||
}
|
||||
|
||||
// Ensure directory exists
|
||||
configDir := filepath.Dir(absPath)
|
||||
if err := io.Local.EnsureDir(configDir); err != nil {
|
||||
return fmt.Errorf("release.WriteConfig: failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
data, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("release.WriteConfig: failed to marshal config: %w", err)
|
||||
}
|
||||
|
||||
if err := io.Local.Write(absPath, string(data)); err != nil {
|
||||
return fmt.Errorf("release.WriteConfig: failed to write config file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
363
release/config_test.go
Normal file
363
release/config_test.go
Normal file
|
|
@ -0,0 +1,363 @@
|
|||
package release
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupConfigTestDir creates a temp directory with optional .core/release.yaml content.
|
||||
func setupConfigTestDir(t *testing.T, configContent string) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
if configContent != "" {
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
||||
err = os.WriteFile(configPath, []byte(configContent), 0644)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestLoadConfig_Good(t *testing.T) {
|
||||
t.Run("loads valid config", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: myapp
|
||||
repository: owner/repo
|
||||
build:
|
||||
targets:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
arch: arm64
|
||||
publishers:
|
||||
- type: github
|
||||
prerelease: true
|
||||
draft: false
|
||||
changelog:
|
||||
include:
|
||||
- feat
|
||||
- fix
|
||||
exclude:
|
||||
- chore
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, "myapp", cfg.Project.Name)
|
||||
assert.Equal(t, "owner/repo", cfg.Project.Repository)
|
||||
assert.Len(t, cfg.Build.Targets, 2)
|
||||
assert.Equal(t, "linux", cfg.Build.Targets[0].OS)
|
||||
assert.Equal(t, "amd64", cfg.Build.Targets[0].Arch)
|
||||
assert.Equal(t, "darwin", cfg.Build.Targets[1].OS)
|
||||
assert.Equal(t, "arm64", cfg.Build.Targets[1].Arch)
|
||||
assert.Len(t, cfg.Publishers, 1)
|
||||
assert.Equal(t, "github", cfg.Publishers[0].Type)
|
||||
assert.True(t, cfg.Publishers[0].Prerelease)
|
||||
assert.False(t, cfg.Publishers[0].Draft)
|
||||
assert.Equal(t, []string{"feat", "fix"}, cfg.Changelog.Include)
|
||||
assert.Equal(t, []string{"chore"}, cfg.Changelog.Exclude)
|
||||
})
|
||||
|
||||
t.Run("returns defaults when config file missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
defaults := DefaultConfig()
|
||||
assert.Equal(t, defaults.Version, cfg.Version)
|
||||
assert.Equal(t, defaults.Build.Targets, cfg.Build.Targets)
|
||||
assert.Equal(t, defaults.Publishers, cfg.Publishers)
|
||||
assert.Equal(t, defaults.Changelog.Include, cfg.Changelog.Include)
|
||||
assert.Equal(t, defaults.Changelog.Exclude, cfg.Changelog.Exclude)
|
||||
})
|
||||
|
||||
t.Run("applies defaults for missing fields", func(t *testing.T) {
|
||||
content := `
|
||||
version: 2
|
||||
project:
|
||||
name: partial
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
// Explicit values preserved
|
||||
assert.Equal(t, 2, cfg.Version)
|
||||
assert.Equal(t, "partial", cfg.Project.Name)
|
||||
|
||||
// Defaults applied
|
||||
defaults := DefaultConfig()
|
||||
assert.Equal(t, defaults.Build.Targets, cfg.Build.Targets)
|
||||
assert.Equal(t, defaults.Publishers, cfg.Publishers)
|
||||
})
|
||||
|
||||
t.Run("sets project directory on load", func(t *testing.T) {
|
||||
dir := setupConfigTestDir(t, "version: 1")
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, dir, cfg.projectDir)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig_Bad(t *testing.T) {
|
||||
t.Run("returns error for invalid YAML", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: [invalid yaml
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, cfg)
|
||||
assert.Contains(t, err.Error(), "failed to parse config file")
|
||||
})
|
||||
|
||||
t.Run("returns error for unreadable file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create config as a directory instead of file
|
||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
||||
err = os.Mkdir(configPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, cfg)
|
||||
assert.Contains(t, err.Error(), "failed to read config file")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultConfig_Good(t *testing.T) {
|
||||
t.Run("returns sensible defaults", func(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Empty(t, cfg.Project.Name)
|
||||
assert.Empty(t, cfg.Project.Repository)
|
||||
|
||||
// Default targets
|
||||
assert.Len(t, cfg.Build.Targets, 4)
|
||||
hasLinuxAmd64 := false
|
||||
hasDarwinArm64 := false
|
||||
hasWindowsAmd64 := false
|
||||
for _, target := range cfg.Build.Targets {
|
||||
if target.OS == "linux" && target.Arch == "amd64" {
|
||||
hasLinuxAmd64 = true
|
||||
}
|
||||
if target.OS == "darwin" && target.Arch == "arm64" {
|
||||
hasDarwinArm64 = true
|
||||
}
|
||||
if target.OS == "windows" && target.Arch == "amd64" {
|
||||
hasWindowsAmd64 = true
|
||||
}
|
||||
}
|
||||
assert.True(t, hasLinuxAmd64)
|
||||
assert.True(t, hasDarwinArm64)
|
||||
assert.True(t, hasWindowsAmd64)
|
||||
|
||||
// Default publisher
|
||||
assert.Len(t, cfg.Publishers, 1)
|
||||
assert.Equal(t, "github", cfg.Publishers[0].Type)
|
||||
assert.False(t, cfg.Publishers[0].Prerelease)
|
||||
assert.False(t, cfg.Publishers[0].Draft)
|
||||
|
||||
// Default changelog settings
|
||||
assert.Contains(t, cfg.Changelog.Include, "feat")
|
||||
assert.Contains(t, cfg.Changelog.Include, "fix")
|
||||
assert.Contains(t, cfg.Changelog.Exclude, "chore")
|
||||
assert.Contains(t, cfg.Changelog.Exclude, "docs")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigPath_Good(t *testing.T) {
|
||||
t.Run("returns correct path", func(t *testing.T) {
|
||||
path := ConfigPath("/project/root")
|
||||
assert.Equal(t, "/project/root/.core/release.yaml", path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigExists_Good(t *testing.T) {
|
||||
t.Run("returns true when config exists", func(t *testing.T) {
|
||||
dir := setupConfigTestDir(t, "version: 1")
|
||||
assert.True(t, ConfigExists(dir))
|
||||
})
|
||||
|
||||
t.Run("returns false when config missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, ConfigExists(dir))
|
||||
})
|
||||
|
||||
t.Run("returns false when .core dir missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, ConfigExists(dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteConfig_Good(t *testing.T) {
|
||||
t.Run("writes config to file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.Project.Name = "testapp"
|
||||
cfg.Project.Repository = "owner/testapp"
|
||||
|
||||
err := WriteConfig(cfg, dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify file exists
|
||||
assert.True(t, ConfigExists(dir))
|
||||
|
||||
// Reload and verify
|
||||
loaded, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "testapp", loaded.Project.Name)
|
||||
assert.Equal(t, "owner/testapp", loaded.Project.Repository)
|
||||
})
|
||||
|
||||
t.Run("creates .core directory if missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg := DefaultConfig()
|
||||
err := WriteConfig(cfg, dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check directory was created
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
info, err := os.Stat(coreDir)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig_GetRepository_Good(t *testing.T) {
|
||||
t.Run("returns repository", func(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Project: ProjectConfig{
|
||||
Repository: "owner/repo",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "owner/repo", cfg.GetRepository())
|
||||
})
|
||||
|
||||
t.Run("returns empty string when not set", func(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
assert.Empty(t, cfg.GetRepository())
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig_GetProjectName_Good(t *testing.T) {
|
||||
t.Run("returns project name", func(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Project: ProjectConfig{
|
||||
Name: "myapp",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "myapp", cfg.GetProjectName())
|
||||
})
|
||||
|
||||
t.Run("returns empty string when not set", func(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
assert.Empty(t, cfg.GetProjectName())
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig_SetVersion_Good(t *testing.T) {
|
||||
t.Run("sets version override", func(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
cfg.SetVersion("v1.2.3")
|
||||
assert.Equal(t, "v1.2.3", cfg.version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig_SetProjectDir_Good(t *testing.T) {
|
||||
t.Run("sets project directory", func(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
cfg.SetProjectDir("/path/to/project")
|
||||
assert.Equal(t, "/path/to/project", cfg.projectDir)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteConfig_Bad(t *testing.T) {
|
||||
t.Run("returns error for unwritable directory", func(t *testing.T) {
|
||||
if os.Geteuid() == 0 {
|
||||
t.Skip("root can write to any directory")
|
||||
}
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create .core directory and make it unwritable
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make directory read-only
|
||||
err = os.Chmod(coreDir, 0555)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.Chmod(coreDir, 0755) }()
|
||||
|
||||
cfg := DefaultConfig()
|
||||
err = WriteConfig(cfg, dir)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to write config file")
|
||||
})
|
||||
|
||||
t.Run("returns error when directory creation fails", func(t *testing.T) {
|
||||
if os.Geteuid() == 0 {
|
||||
t.Skip("root can create directories anywhere")
|
||||
}
|
||||
// Use a path that doesn't exist and can't be created
|
||||
cfg := DefaultConfig()
|
||||
err := WriteConfig(cfg, "/nonexistent/path/that/cannot/be/created")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplyDefaults_Good(t *testing.T) {
|
||||
t.Run("applies version default when zero", func(t *testing.T) {
|
||||
cfg := &Config{Version: 0}
|
||||
applyDefaults(cfg)
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
})
|
||||
|
||||
t.Run("preserves existing version", func(t *testing.T) {
|
||||
cfg := &Config{Version: 2}
|
||||
applyDefaults(cfg)
|
||||
assert.Equal(t, 2, cfg.Version)
|
||||
})
|
||||
|
||||
t.Run("applies changelog defaults only when both empty", func(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Changelog: ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
},
|
||||
}
|
||||
applyDefaults(cfg)
|
||||
// Should not apply defaults because Include is set
|
||||
assert.Equal(t, []string{"feat"}, cfg.Changelog.Include)
|
||||
assert.Empty(t, cfg.Changelog.Exclude)
|
||||
})
|
||||
}
|
||||
313
release/publishers/aur.go
Normal file
313
release/publishers/aur.go
Normal file
|
|
@ -0,0 +1,313 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
)
|
||||
|
||||
//go:embed templates/aur/*.tmpl
|
||||
var aurTemplates embed.FS
|
||||
|
||||
// AURConfig holds AUR-specific configuration.
|
||||
type AURConfig struct {
|
||||
// Package is the AUR package name.
|
||||
Package string
|
||||
// Maintainer is the package maintainer (e.g., "Name <email>").
|
||||
Maintainer string
|
||||
// Official config for generating files for official repo PRs.
|
||||
Official *OfficialConfig
|
||||
}
|
||||
|
||||
// AURPublisher publishes releases to AUR.
|
||||
type AURPublisher struct{}
|
||||
|
||||
// NewAURPublisher creates a new AUR publisher.
|
||||
func NewAURPublisher() *AURPublisher {
|
||||
return &AURPublisher{}
|
||||
}
|
||||
|
||||
// Name returns the publisher's identifier.
|
||||
func (p *AURPublisher) Name() string {
|
||||
return "aur"
|
||||
}
|
||||
|
||||
// Publish publishes the release to AUR.
|
||||
func (p *AURPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error {
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
if cfg.Maintainer == "" {
|
||||
return fmt.Errorf("aur.Publish: maintainer is required (set publish.aur.maintainer in config)")
|
||||
}
|
||||
|
||||
repo := ""
|
||||
if relCfg != nil {
|
||||
repo = relCfg.GetRepository()
|
||||
}
|
||||
if repo == "" {
|
||||
detectedRepo, err := detectRepository(release.ProjectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.Publish: could not determine repository: %w", err)
|
||||
}
|
||||
repo = detectedRepo
|
||||
}
|
||||
|
||||
projectName := ""
|
||||
if relCfg != nil {
|
||||
projectName = relCfg.GetProjectName()
|
||||
}
|
||||
if projectName == "" {
|
||||
parts := strings.Split(repo, "/")
|
||||
projectName = parts[len(parts)-1]
|
||||
}
|
||||
|
||||
packageName := cfg.Package
|
||||
if packageName == "" {
|
||||
packageName = projectName
|
||||
}
|
||||
|
||||
version := strings.TrimPrefix(release.Version, "v")
|
||||
checksums := buildChecksumMap(release.Artifacts)
|
||||
|
||||
data := aurTemplateData{
|
||||
PackageName: packageName,
|
||||
Description: fmt.Sprintf("%s CLI", projectName),
|
||||
Repository: repo,
|
||||
Version: version,
|
||||
License: "MIT",
|
||||
BinaryName: projectName,
|
||||
Maintainer: cfg.Maintainer,
|
||||
Checksums: checksums,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return p.dryRunPublish(release.FS, data, cfg)
|
||||
}
|
||||
|
||||
return p.executePublish(ctx, release.ProjectDir, data, cfg, release)
|
||||
}
|
||||
|
||||
type aurTemplateData struct {
|
||||
PackageName string
|
||||
Description string
|
||||
Repository string
|
||||
Version string
|
||||
License string
|
||||
BinaryName string
|
||||
Maintainer string
|
||||
Checksums ChecksumMap
|
||||
}
|
||||
|
||||
func (p *AURPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) AURConfig {
|
||||
cfg := AURConfig{}
|
||||
|
||||
if ext, ok := pubCfg.Extended.(map[string]any); ok {
|
||||
if pkg, ok := ext["package"].(string); ok && pkg != "" {
|
||||
cfg.Package = pkg
|
||||
}
|
||||
if maintainer, ok := ext["maintainer"].(string); ok && maintainer != "" {
|
||||
cfg.Maintainer = maintainer
|
||||
}
|
||||
if official, ok := ext["official"].(map[string]any); ok {
|
||||
cfg.Official = &OfficialConfig{}
|
||||
if enabled, ok := official["enabled"].(bool); ok {
|
||||
cfg.Official.Enabled = enabled
|
||||
}
|
||||
if output, ok := official["output"].(string); ok {
|
||||
cfg.Official.Output = output
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (p *AURPublisher) dryRunPublish(m io.Medium, data aurTemplateData, cfg AURConfig) error {
|
||||
fmt.Println()
|
||||
fmt.Println("=== DRY RUN: AUR Publish ===")
|
||||
fmt.Println()
|
||||
fmt.Printf("Package: %s-bin\n", data.PackageName)
|
||||
fmt.Printf("Version: %s\n", data.Version)
|
||||
fmt.Printf("Maintainer: %s\n", data.Maintainer)
|
||||
fmt.Printf("Repository: %s\n", data.Repository)
|
||||
fmt.Println()
|
||||
|
||||
pkgbuild, err := p.renderTemplate(m, "templates/aur/PKGBUILD.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.dryRunPublish: %w", err)
|
||||
}
|
||||
fmt.Println("Generated PKGBUILD:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(pkgbuild)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
|
||||
srcinfo, err := p.renderTemplate(m, "templates/aur/.SRCINFO.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.dryRunPublish: %w", err)
|
||||
}
|
||||
fmt.Println("Generated .SRCINFO:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(srcinfo)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("Would push to AUR: ssh://aur@aur.archlinux.org/%s-bin.git\n", data.PackageName)
|
||||
fmt.Println()
|
||||
fmt.Println("=== END DRY RUN ===")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AURPublisher) executePublish(ctx context.Context, projectDir string, data aurTemplateData, cfg AURConfig, release *Release) error {
|
||||
pkgbuild, err := p.renderTemplate(release.FS, "templates/aur/PKGBUILD.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to render PKGBUILD: %w", err)
|
||||
}
|
||||
|
||||
srcinfo, err := p.renderTemplate(release.FS, "templates/aur/.SRCINFO.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to render .SRCINFO: %w", err)
|
||||
}
|
||||
|
||||
// If official config is enabled, write to output directory
|
||||
if cfg.Official != nil && cfg.Official.Enabled {
|
||||
output := cfg.Official.Output
|
||||
if output == "" {
|
||||
output = filepath.Join(projectDir, "dist", "aur")
|
||||
} else if !filepath.IsAbs(output) {
|
||||
output = filepath.Join(projectDir, output)
|
||||
}
|
||||
|
||||
if err := release.FS.EnsureDir(output); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
pkgbuildPath := filepath.Join(output, "PKGBUILD")
|
||||
if err := release.FS.Write(pkgbuildPath, pkgbuild); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to write PKGBUILD: %w", err)
|
||||
}
|
||||
|
||||
srcinfoPath := filepath.Join(output, ".SRCINFO")
|
||||
if err := release.FS.Write(srcinfoPath, srcinfo); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to write .SRCINFO: %w", err)
|
||||
}
|
||||
fmt.Printf("Wrote AUR files: %s\n", output)
|
||||
}
|
||||
|
||||
// Push to AUR if not in official-only mode
|
||||
if cfg.Official == nil || !cfg.Official.Enabled {
|
||||
if err := p.pushToAUR(ctx, data, pkgbuild, srcinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AURPublisher) pushToAUR(ctx context.Context, data aurTemplateData, pkgbuild, srcinfo string) error {
|
||||
aurURL := fmt.Sprintf("ssh://aur@aur.archlinux.org/%s-bin.git", data.PackageName)
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "aur-package-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to create temp directory: %w", err)
|
||||
}
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Clone existing AUR repo (or initialize new one)
|
||||
fmt.Printf("Cloning AUR package %s-bin...\n", data.PackageName)
|
||||
cmd := exec.CommandContext(ctx, "git", "clone", aurURL, tmpDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
// If clone fails, init a new repo
|
||||
cmd = exec.CommandContext(ctx, "git", "init", tmpDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to initialize repo: %w", err)
|
||||
}
|
||||
cmd = exec.CommandContext(ctx, "git", "-C", tmpDir, "remote", "add", "origin", aurURL)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to add remote: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write files
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, "PKGBUILD"), []byte(pkgbuild), 0644); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to write PKGBUILD: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, ".SRCINFO"), []byte(srcinfo), 0644); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to write .SRCINFO: %w", err)
|
||||
}
|
||||
|
||||
commitMsg := fmt.Sprintf("Update to %s", data.Version)
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "add", ".")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: git add failed: %w", err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "commit", "-m", commitMsg)
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: git commit failed: %w", err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "push", "origin", "master")
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: git push failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Published to AUR: https://aur.archlinux.org/packages/%s-bin\n", data.PackageName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AURPublisher) renderTemplate(m io.Medium, name string, data aurTemplateData) (string, error) {
|
||||
var content []byte
|
||||
var err error
|
||||
|
||||
// Try custom template from medium
|
||||
customPath := filepath.Join(".core", name)
|
||||
if m != nil && m.IsFile(customPath) {
|
||||
customContent, err := m.Read(customPath)
|
||||
if err == nil {
|
||||
content = []byte(customContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to embedded template
|
||||
if content == nil {
|
||||
content, err = aurTemplates.ReadFile(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read template %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
tmpl, err := template.New(filepath.Base(name)).Parse(string(content))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template %s: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return "", fmt.Errorf("failed to execute template %s: %w", name, err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// Ensure build package is used
|
||||
var _ = build.Artifact{}
|
||||
226
release/publishers/aur_test.go
Normal file
226
release/publishers/aur_test.go
Normal file
|
|
@ -0,0 +1,226 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAURPublisher_Name_Good(t *testing.T) {
|
||||
t.Run("returns aur", func(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
assert.Equal(t, "aur", p.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_ParseConfig_Good(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("uses defaults when no extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{Type: "aur"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.Empty(t, cfg.Maintainer)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
|
||||
t.Run("parses package and maintainer from extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "aur",
|
||||
Extended: map[string]any{
|
||||
"package": "mypackage",
|
||||
"maintainer": "John Doe <john@example.com>",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Equal(t, "mypackage", cfg.Package)
|
||||
assert.Equal(t, "John Doe <john@example.com>", cfg.Maintainer)
|
||||
})
|
||||
|
||||
t.Run("parses official config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "aur",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{
|
||||
"enabled": true,
|
||||
"output": "dist/aur-files",
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.True(t, cfg.Official.Enabled)
|
||||
assert.Equal(t, "dist/aur-files", cfg.Official.Output)
|
||||
})
|
||||
|
||||
t.Run("handles missing official fields", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "aur",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.False(t, cfg.Official.Enabled)
|
||||
assert.Empty(t, cfg.Official.Output)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_RenderTemplate_Good(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("renders PKGBUILD template with data", func(t *testing.T) {
|
||||
data := aurTemplateData{
|
||||
PackageName: "myapp",
|
||||
Description: "My awesome CLI",
|
||||
Repository: "owner/myapp",
|
||||
Version: "1.2.3",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Maintainer: "John Doe <john@example.com>",
|
||||
Checksums: ChecksumMap{
|
||||
LinuxAmd64: "abc123",
|
||||
LinuxArm64: "def456",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/aur/PKGBUILD.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, "# Maintainer: John Doe <john@example.com>")
|
||||
assert.Contains(t, result, "pkgname=myapp-bin")
|
||||
assert.Contains(t, result, "pkgver=1.2.3")
|
||||
assert.Contains(t, result, `pkgdesc="My awesome CLI"`)
|
||||
assert.Contains(t, result, "url=\"https://github.com/owner/myapp\"")
|
||||
assert.Contains(t, result, "license=('MIT')")
|
||||
assert.Contains(t, result, "sha256sums_x86_64=('abc123')")
|
||||
assert.Contains(t, result, "sha256sums_aarch64=('def456')")
|
||||
})
|
||||
|
||||
t.Run("renders .SRCINFO template with data", func(t *testing.T) {
|
||||
data := aurTemplateData{
|
||||
PackageName: "myapp",
|
||||
Description: "My CLI",
|
||||
Repository: "owner/myapp",
|
||||
Version: "1.0.0",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Maintainer: "Test <test@test.com>",
|
||||
Checksums: ChecksumMap{
|
||||
LinuxAmd64: "checksum1",
|
||||
LinuxArm64: "checksum2",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/aur/.SRCINFO.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, "pkgbase = myapp-bin")
|
||||
assert.Contains(t, result, "pkgdesc = My CLI")
|
||||
assert.Contains(t, result, "pkgver = 1.0.0")
|
||||
assert.Contains(t, result, "arch = x86_64")
|
||||
assert.Contains(t, result, "arch = aarch64")
|
||||
assert.Contains(t, result, "sha256sums_x86_64 = checksum1")
|
||||
assert.Contains(t, result, "sha256sums_aarch64 = checksum2")
|
||||
assert.Contains(t, result, "pkgname = myapp-bin")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_RenderTemplate_Bad(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("returns error for non-existent template", func(t *testing.T) {
|
||||
data := aurTemplateData{}
|
||||
_, err := p.renderTemplate(io.Local, "templates/aur/nonexistent.tmpl", data)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to read template")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_DryRunPublish_Good(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("outputs expected dry run information", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := aurTemplateData{
|
||||
PackageName: "myapp",
|
||||
Version: "1.0.0",
|
||||
Maintainer: "John Doe <john@example.com>",
|
||||
Repository: "owner/repo",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := AURConfig{
|
||||
Maintainer: "John Doe <john@example.com>",
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: AUR Publish")
|
||||
assert.Contains(t, output, "Package: myapp-bin")
|
||||
assert.Contains(t, output, "Version: 1.0.0")
|
||||
assert.Contains(t, output, "Maintainer: John Doe <john@example.com>")
|
||||
assert.Contains(t, output, "Repository: owner/repo")
|
||||
assert.Contains(t, output, "Generated PKGBUILD:")
|
||||
assert.Contains(t, output, "Generated .SRCINFO:")
|
||||
assert.Contains(t, output, "Would push to AUR: ssh://aur@aur.archlinux.org/myapp-bin.git")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_Publish_Bad(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("fails when maintainer not configured", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "aur"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "maintainer is required")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURConfig_Defaults_Good(t *testing.T) {
|
||||
t.Run("has sensible defaults", func(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
pubCfg := PublisherConfig{Type: "aur"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.Empty(t, cfg.Maintainer)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue