commit 392ad6804711ff1241002ac62fb496ede8ec6c1e Author: Claude Date: Mon Feb 16 15:21:39 2026 +0000 feat: extract devops packages from core/go Build system, release automation, SDK generation, Ansible executor, LinuxKit dev environments, container runtime, deployment, infra metrics, and developer toolkit. Co-Authored-By: Claude Opus 4.6 diff --git a/ansible/executor.go b/ansible/executor.go new file mode 100644 index 0000000..c13591e --- /dev/null +++ b/ansible/executor.go @@ -0,0 +1,1021 @@ +package ansible + +import ( + "context" + "fmt" + "os" + "regexp" + "strings" + "sync" + "text/template" + "time" + + "forge.lthn.ai/core/go/pkg/log" +) + +// Executor runs Ansible playbooks. +type Executor struct { + parser *Parser + inventory *Inventory + vars map[string]any + facts map[string]*Facts + results map[string]map[string]*TaskResult // host -> register_name -> result + handlers map[string][]Task + notified map[string]bool + clients map[string]*SSHClient + mu sync.RWMutex + + // Callbacks + OnPlayStart func(play *Play) + OnTaskStart func(host string, task *Task) + OnTaskEnd func(host string, task *Task, result *TaskResult) + OnPlayEnd func(play *Play) + + // Options + Limit string + Tags []string + SkipTags []string + CheckMode bool + Diff bool + Verbose int +} + +// NewExecutor creates a new playbook executor. +func NewExecutor(basePath string) *Executor { + return &Executor{ + parser: NewParser(basePath), + vars: make(map[string]any), + facts: make(map[string]*Facts), + results: make(map[string]map[string]*TaskResult), + handlers: make(map[string][]Task), + notified: make(map[string]bool), + clients: make(map[string]*SSHClient), + } +} + +// SetInventory loads inventory from a file. +func (e *Executor) SetInventory(path string) error { + inv, err := e.parser.ParseInventory(path) + if err != nil { + return err + } + e.inventory = inv + return nil +} + +// SetInventoryDirect sets inventory directly. +func (e *Executor) SetInventoryDirect(inv *Inventory) { + e.inventory = inv +} + +// SetVar sets a variable. +func (e *Executor) SetVar(key string, value any) { + e.mu.Lock() + defer e.mu.Unlock() + e.vars[key] = value +} + +// Run executes a playbook. +func (e *Executor) Run(ctx context.Context, playbookPath string) error { + plays, err := e.parser.ParsePlaybook(playbookPath) + if err != nil { + return fmt.Errorf("parse playbook: %w", err) + } + + for i := range plays { + if err := e.runPlay(ctx, &plays[i]); err != nil { + return fmt.Errorf("play %d (%s): %w", i, plays[i].Name, err) + } + } + + return nil +} + +// runPlay executes a single play. +func (e *Executor) runPlay(ctx context.Context, play *Play) error { + if e.OnPlayStart != nil { + e.OnPlayStart(play) + } + defer func() { + if e.OnPlayEnd != nil { + e.OnPlayEnd(play) + } + }() + + // Get target hosts + hosts := e.getHosts(play.Hosts) + if len(hosts) == 0 { + return nil // No hosts matched + } + + // Merge play vars + for k, v := range play.Vars { + e.vars[k] = v + } + + // Gather facts if needed + gatherFacts := play.GatherFacts == nil || *play.GatherFacts + if gatherFacts { + for _, host := range hosts { + if err := e.gatherFacts(ctx, host, play); err != nil { + // Non-fatal + if e.Verbose > 0 { + log.Warn("gather facts failed", "host", host, "err", err) + } + } + } + } + + // Execute pre_tasks + for _, task := range play.PreTasks { + if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil { + return err + } + } + + // Execute roles + for _, roleRef := range play.Roles { + if err := e.runRole(ctx, hosts, &roleRef, play); err != nil { + return err + } + } + + // Execute tasks + for _, task := range play.Tasks { + if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil { + return err + } + } + + // Execute post_tasks + for _, task := range play.PostTasks { + if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil { + return err + } + } + + // Run notified handlers + for _, handler := range play.Handlers { + if e.notified[handler.Name] { + if err := e.runTaskOnHosts(ctx, hosts, &handler, play); err != nil { + return err + } + } + } + + return nil +} + +// runRole executes a role on hosts. +func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef, play *Play) error { + // Check when condition + if roleRef.When != nil { + if !e.evaluateWhen(roleRef.When, "", nil) { + return nil + } + } + + // Parse role tasks + tasks, err := e.parser.ParseRole(roleRef.Role, roleRef.TasksFrom) + if err != nil { + return log.E("executor.runRole", fmt.Sprintf("parse role %s", roleRef.Role), err) + } + + // Merge role vars + oldVars := make(map[string]any) + for k, v := range e.vars { + oldVars[k] = v + } + for k, v := range roleRef.Vars { + e.vars[k] = v + } + + // Execute tasks + for _, task := range tasks { + if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil { + // Restore vars + e.vars = oldVars + return err + } + } + + // Restore vars + e.vars = oldVars + return nil +} + +// runTaskOnHosts runs a task on all hosts. +func (e *Executor) runTaskOnHosts(ctx context.Context, hosts []string, task *Task, play *Play) error { + // Check tags + if !e.matchesTags(task.Tags) { + return nil + } + + // Handle block tasks + if len(task.Block) > 0 { + return e.runBlock(ctx, hosts, task, play) + } + + // Handle include/import + if task.IncludeTasks != "" || task.ImportTasks != "" { + return e.runIncludeTasks(ctx, hosts, task, play) + } + if task.IncludeRole != nil || task.ImportRole != nil { + return e.runIncludeRole(ctx, hosts, task, play) + } + + for _, host := range hosts { + if err := e.runTaskOnHost(ctx, host, task, play); err != nil { + if !task.IgnoreErrors { + return err + } + } + } + + return nil +} + +// runTaskOnHost runs a task on a single host. +func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, play *Play) error { + start := time.Now() + + if e.OnTaskStart != nil { + e.OnTaskStart(host, task) + } + + // Initialize host results + if e.results[host] == nil { + e.results[host] = make(map[string]*TaskResult) + } + + // Check when condition + if task.When != nil { + if !e.evaluateWhen(task.When, host, task) { + result := &TaskResult{Skipped: true, Msg: "Skipped due to when condition"} + if task.Register != "" { + e.results[host][task.Register] = result + } + if e.OnTaskEnd != nil { + e.OnTaskEnd(host, task, result) + } + return nil + } + } + + // Get SSH client + client, err := e.getClient(host, play) + if err != nil { + return fmt.Errorf("get client for %s: %w", host, err) + } + + // Handle loops + if task.Loop != nil { + return e.runLoop(ctx, host, client, task, play) + } + + // Execute the task + result, err := e.executeModule(ctx, host, client, task, play) + if err != nil { + result = &TaskResult{Failed: true, Msg: err.Error()} + } + result.Duration = time.Since(start) + + // Store result + if task.Register != "" { + e.results[host][task.Register] = result + } + + // Handle notify + if result.Changed && task.Notify != nil { + e.handleNotify(task.Notify) + } + + if e.OnTaskEnd != nil { + e.OnTaskEnd(host, task, result) + } + + if result.Failed && !task.IgnoreErrors { + return fmt.Errorf("task failed: %s", result.Msg) + } + + return nil +} + +// runLoop handles task loops. +func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient, task *Task, play *Play) error { + items := e.resolveLoop(task.Loop, host) + + loopVar := "item" + if task.LoopControl != nil && task.LoopControl.LoopVar != "" { + loopVar = task.LoopControl.LoopVar + } + + // Save loop state to restore after loop + savedVars := make(map[string]any) + if v, ok := e.vars[loopVar]; ok { + savedVars[loopVar] = v + } + indexVar := "" + if task.LoopControl != nil && task.LoopControl.IndexVar != "" { + indexVar = task.LoopControl.IndexVar + if v, ok := e.vars[indexVar]; ok { + savedVars[indexVar] = v + } + } + + var results []TaskResult + for i, item := range items { + // Set loop variables + e.vars[loopVar] = item + if indexVar != "" { + e.vars[indexVar] = i + } + + result, err := e.executeModule(ctx, host, client, task, play) + if err != nil { + result = &TaskResult{Failed: true, Msg: err.Error()} + } + results = append(results, *result) + + if result.Failed && !task.IgnoreErrors { + break + } + } + + // Restore loop variables + if v, ok := savedVars[loopVar]; ok { + e.vars[loopVar] = v + } else { + delete(e.vars, loopVar) + } + if indexVar != "" { + if v, ok := savedVars[indexVar]; ok { + e.vars[indexVar] = v + } else { + delete(e.vars, indexVar) + } + } + + // Store combined result + if task.Register != "" { + combined := &TaskResult{ + Results: results, + Changed: false, + } + for _, r := range results { + if r.Changed { + combined.Changed = true + } + if r.Failed { + combined.Failed = true + } + } + e.results[host][task.Register] = combined + } + + return nil +} + +// runBlock handles block/rescue/always. +func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, play *Play) error { + var blockErr error + + // Try block + for _, t := range task.Block { + if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil { + blockErr = err + break + } + } + + // Run rescue if block failed + if blockErr != nil && len(task.Rescue) > 0 { + for _, t := range task.Rescue { + if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil { + // Rescue also failed + break + } + } + } + + // Always run always block + for _, t := range task.Always { + if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil { + if blockErr == nil { + blockErr = err + } + } + } + + if blockErr != nil && len(task.Rescue) == 0 { + return blockErr + } + + return nil +} + +// runIncludeTasks handles include_tasks/import_tasks. +func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Task, play *Play) error { + path := task.IncludeTasks + if path == "" { + path = task.ImportTasks + } + + // Resolve path relative to playbook + path = e.templateString(path, "", nil) + + tasks, err := e.parser.ParseTasks(path) + if err != nil { + return fmt.Errorf("include_tasks %s: %w", path, err) + } + + for _, t := range tasks { + if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil { + return err + } + } + + return nil +} + +// runIncludeRole handles include_role/import_role. +func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Task, play *Play) error { + var roleName, tasksFrom string + var roleVars map[string]any + + if task.IncludeRole != nil { + roleName = task.IncludeRole.Name + tasksFrom = task.IncludeRole.TasksFrom + roleVars = task.IncludeRole.Vars + } else { + roleName = task.ImportRole.Name + tasksFrom = task.ImportRole.TasksFrom + roleVars = task.ImportRole.Vars + } + + roleRef := &RoleRef{ + Role: roleName, + TasksFrom: tasksFrom, + Vars: roleVars, + } + + return e.runRole(ctx, hosts, roleRef, play) +} + +// getHosts returns hosts matching the pattern. +func (e *Executor) getHosts(pattern string) []string { + if e.inventory == nil { + if pattern == "localhost" { + return []string{"localhost"} + } + return nil + } + + hosts := GetHosts(e.inventory, pattern) + + // Apply limit - filter to hosts that are also in the limit group + if e.Limit != "" { + limitHosts := GetHosts(e.inventory, e.Limit) + limitSet := make(map[string]bool) + for _, h := range limitHosts { + limitSet[h] = true + } + + var filtered []string + for _, h := range hosts { + if limitSet[h] || h == e.Limit || strings.Contains(h, e.Limit) { + filtered = append(filtered, h) + } + } + hosts = filtered + } + + return hosts +} + +// getClient returns or creates an SSH client for a host. +func (e *Executor) getClient(host string, play *Play) (*SSHClient, error) { + e.mu.Lock() + defer e.mu.Unlock() + + if client, ok := e.clients[host]; ok { + return client, nil + } + + // Get host vars + vars := make(map[string]any) + if e.inventory != nil { + vars = GetHostVars(e.inventory, host) + } + + // Merge with play vars + for k, v := range e.vars { + if _, exists := vars[k]; !exists { + vars[k] = v + } + } + + // Build SSH config + cfg := SSHConfig{ + Host: host, + Port: 22, + User: "root", + } + + if h, ok := vars["ansible_host"].(string); ok { + cfg.Host = h + } + if p, ok := vars["ansible_port"].(int); ok { + cfg.Port = p + } + if u, ok := vars["ansible_user"].(string); ok { + cfg.User = u + } + if p, ok := vars["ansible_password"].(string); ok { + cfg.Password = p + } + if k, ok := vars["ansible_ssh_private_key_file"].(string); ok { + cfg.KeyFile = k + } + + // Apply play become settings + if play.Become { + cfg.Become = true + cfg.BecomeUser = play.BecomeUser + if bp, ok := vars["ansible_become_password"].(string); ok { + cfg.BecomePass = bp + } else if cfg.Password != "" { + // Use SSH password for sudo if no become password specified + cfg.BecomePass = cfg.Password + } + } + + client, err := NewSSHClient(cfg) + if err != nil { + return nil, err + } + + e.clients[host] = client + return client, nil +} + +// gatherFacts collects facts from a host. +func (e *Executor) gatherFacts(ctx context.Context, host string, play *Play) error { + if play.Connection == "local" || host == "localhost" { + // Local facts + e.facts[host] = &Facts{ + Hostname: "localhost", + } + return nil + } + + client, err := e.getClient(host, play) + if err != nil { + return err + } + + // Gather basic facts + facts := &Facts{} + + // Hostname + stdout, _, _, err := client.Run(ctx, "hostname -f 2>/dev/null || hostname") + if err == nil { + facts.FQDN = strings.TrimSpace(stdout) + } + + stdout, _, _, err = client.Run(ctx, "hostname -s 2>/dev/null || hostname") + if err == nil { + facts.Hostname = strings.TrimSpace(stdout) + } + + // OS info + stdout, _, _, _ = client.Run(ctx, "cat /etc/os-release 2>/dev/null | grep -E '^(ID|VERSION_ID)=' | head -2") + for _, line := range strings.Split(stdout, "\n") { + if strings.HasPrefix(line, "ID=") { + facts.Distribution = strings.Trim(strings.TrimPrefix(line, "ID="), "\"") + } + if strings.HasPrefix(line, "VERSION_ID=") { + facts.Version = strings.Trim(strings.TrimPrefix(line, "VERSION_ID="), "\"") + } + } + + // Architecture + stdout, _, _, _ = client.Run(ctx, "uname -m") + facts.Architecture = strings.TrimSpace(stdout) + + // Kernel + stdout, _, _, _ = client.Run(ctx, "uname -r") + facts.Kernel = strings.TrimSpace(stdout) + + e.mu.Lock() + e.facts[host] = facts + e.mu.Unlock() + + return nil +} + +// evaluateWhen evaluates a when condition. +func (e *Executor) evaluateWhen(when any, host string, task *Task) bool { + conditions := normalizeConditions(when) + + for _, cond := range conditions { + cond = e.templateString(cond, host, task) + if !e.evalCondition(cond, host) { + return false + } + } + + return true +} + +func normalizeConditions(when any) []string { + switch v := when.(type) { + case string: + return []string{v} + case []any: + var conds []string + for _, c := range v { + if s, ok := c.(string); ok { + conds = append(conds, s) + } + } + return conds + case []string: + return v + } + return nil +} + +// evalCondition evaluates a single condition. +func (e *Executor) evalCondition(cond string, host string) bool { + cond = strings.TrimSpace(cond) + + // Handle negation + if strings.HasPrefix(cond, "not ") { + return !e.evalCondition(strings.TrimPrefix(cond, "not "), host) + } + + // Handle boolean literals + if cond == "true" || cond == "True" { + return true + } + if cond == "false" || cond == "False" { + return false + } + + // Handle registered variable checks + // e.g., "result is success", "result.rc == 0" + if strings.Contains(cond, " is ") { + parts := strings.SplitN(cond, " is ", 2) + varName := strings.TrimSpace(parts[0]) + check := strings.TrimSpace(parts[1]) + + result := e.getRegisteredVar(host, varName) + if result == nil { + return check == "not defined" || check == "undefined" + } + + switch check { + case "defined": + return true + case "not defined", "undefined": + return false + case "success", "succeeded": + return !result.Failed + case "failed": + return result.Failed + case "changed": + return result.Changed + case "skipped": + return result.Skipped + } + } + + // Handle simple var checks + if strings.Contains(cond, " | default(") { + // Extract var name and check if defined + re := regexp.MustCompile(`(\w+)\s*\|\s*default\([^)]*\)`) + if match := re.FindStringSubmatch(cond); len(match) > 1 { + // Has default, so condition is satisfied + return true + } + } + + // Check if it's a variable that should be truthy + if result := e.getRegisteredVar(host, cond); result != nil { + return !result.Failed && !result.Skipped + } + + // Check vars + if val, ok := e.vars[cond]; ok { + switch v := val.(type) { + case bool: + return v + case string: + return v != "" && v != "false" && v != "False" + case int: + return v != 0 + } + } + + // Default to true for unknown conditions (be permissive) + return true +} + +// getRegisteredVar gets a registered task result. +func (e *Executor) getRegisteredVar(host string, name string) *TaskResult { + e.mu.RLock() + defer e.mu.RUnlock() + + // Handle dotted access (e.g., "result.stdout") + parts := strings.SplitN(name, ".", 2) + varName := parts[0] + + if hostResults, ok := e.results[host]; ok { + if result, ok := hostResults[varName]; ok { + return result + } + } + + return nil +} + +// templateString applies Jinja2-like templating. +func (e *Executor) templateString(s string, host string, task *Task) string { + // Handle {{ var }} syntax + re := regexp.MustCompile(`\{\{\s*([^}]+)\s*\}\}`) + + return re.ReplaceAllStringFunc(s, func(match string) string { + expr := strings.TrimSpace(match[2 : len(match)-2]) + return e.resolveExpr(expr, host, task) + }) +} + +// resolveExpr resolves a template expression. +func (e *Executor) resolveExpr(expr string, host string, task *Task) string { + // Handle filters + if strings.Contains(expr, " | ") { + parts := strings.SplitN(expr, " | ", 2) + value := e.resolveExpr(parts[0], host, task) + return e.applyFilter(value, parts[1]) + } + + // Handle lookups + if strings.HasPrefix(expr, "lookup(") { + return e.handleLookup(expr) + } + + // Handle registered vars + if strings.Contains(expr, ".") { + parts := strings.SplitN(expr, ".", 2) + if result := e.getRegisteredVar(host, parts[0]); result != nil { + switch parts[1] { + case "stdout": + return result.Stdout + case "stderr": + return result.Stderr + case "rc": + return fmt.Sprintf("%d", result.RC) + case "changed": + return fmt.Sprintf("%t", result.Changed) + case "failed": + return fmt.Sprintf("%t", result.Failed) + } + } + } + + // Check vars + if val, ok := e.vars[expr]; ok { + return fmt.Sprintf("%v", val) + } + + // Check task vars + if task != nil { + if val, ok := task.Vars[expr]; ok { + return fmt.Sprintf("%v", val) + } + } + + // Check host vars + if e.inventory != nil { + hostVars := GetHostVars(e.inventory, host) + if val, ok := hostVars[expr]; ok { + return fmt.Sprintf("%v", val) + } + } + + // Check facts + if facts, ok := e.facts[host]; ok { + switch expr { + case "ansible_hostname": + return facts.Hostname + case "ansible_fqdn": + return facts.FQDN + case "ansible_distribution": + return facts.Distribution + case "ansible_distribution_version": + return facts.Version + case "ansible_architecture": + return facts.Architecture + case "ansible_kernel": + return facts.Kernel + } + } + + return "{{ " + expr + " }}" // Return as-is if unresolved +} + +// applyFilter applies a Jinja2 filter. +func (e *Executor) applyFilter(value, filter string) string { + filter = strings.TrimSpace(filter) + + // Handle default filter + if strings.HasPrefix(filter, "default(") { + if value == "" || value == "{{ "+filter+" }}" { + // Extract default value + re := regexp.MustCompile(`default\(([^)]*)\)`) + if match := re.FindStringSubmatch(filter); len(match) > 1 { + return strings.Trim(match[1], "'\"") + } + } + return value + } + + // Handle bool filter + if filter == "bool" { + lower := strings.ToLower(value) + if lower == "true" || lower == "yes" || lower == "1" { + return "true" + } + return "false" + } + + // Handle trim + if filter == "trim" { + return strings.TrimSpace(value) + } + + // Handle b64decode + if filter == "b64decode" { + // Would need base64 decode + return value + } + + return value +} + +// handleLookup handles lookup() expressions. +func (e *Executor) handleLookup(expr string) string { + // Parse lookup('type', 'arg') + re := regexp.MustCompile(`lookup\s*\(\s*['"](\w+)['"]\s*,\s*['"]([^'"]+)['"]\s*`) + match := re.FindStringSubmatch(expr) + if len(match) < 3 { + return "" + } + + lookupType := match[1] + arg := match[2] + + switch lookupType { + case "env": + return os.Getenv(arg) + case "file": + if data, err := os.ReadFile(arg); err == nil { + return string(data) + } + } + + return "" +} + +// resolveLoop resolves loop items. +func (e *Executor) resolveLoop(loop any, host string) []any { + switch v := loop.(type) { + case []any: + return v + case []string: + items := make([]any, len(v)) + for i, s := range v { + items[i] = s + } + return items + case string: + // Template the string and see if it's a var reference + resolved := e.templateString(v, host, nil) + if val, ok := e.vars[resolved]; ok { + if items, ok := val.([]any); ok { + return items + } + } + } + return nil +} + +// matchesTags checks if task tags match execution tags. +func (e *Executor) matchesTags(taskTags []string) bool { + // If no tags specified, run all + if len(e.Tags) == 0 && len(e.SkipTags) == 0 { + return true + } + + // Check skip tags + for _, skip := range e.SkipTags { + for _, tt := range taskTags { + if skip == tt { + return false + } + } + } + + // Check include tags + if len(e.Tags) > 0 { + for _, tag := range e.Tags { + for _, tt := range taskTags { + if tag == tt || tag == "all" { + return true + } + } + } + return false + } + + return true +} + +// handleNotify marks handlers as notified. +func (e *Executor) handleNotify(notify any) { + switch v := notify.(type) { + case string: + e.notified[v] = true + case []any: + for _, n := range v { + if s, ok := n.(string); ok { + e.notified[s] = true + } + } + case []string: + for _, s := range v { + e.notified[s] = true + } + } +} + +// Close closes all SSH connections. +func (e *Executor) Close() { + e.mu.Lock() + defer e.mu.Unlock() + + for _, client := range e.clients { + _ = client.Close() + } + e.clients = make(map[string]*SSHClient) +} + +// TemplateFile processes a template file. +func (e *Executor) TemplateFile(src, host string, task *Task) (string, error) { + content, err := os.ReadFile(src) + if err != nil { + return "", err + } + + // Convert Jinja2 to Go template syntax (basic conversion) + tmplContent := string(content) + tmplContent = strings.ReplaceAll(tmplContent, "{{", "{{ .") + tmplContent = strings.ReplaceAll(tmplContent, "{%", "{{") + tmplContent = strings.ReplaceAll(tmplContent, "%}", "}}") + + tmpl, err := template.New("template").Parse(tmplContent) + if err != nil { + // Fall back to simple replacement + return e.templateString(string(content), host, task), nil + } + + // Build context map + context := make(map[string]any) + for k, v := range e.vars { + context[k] = v + } + // Add host vars + if e.inventory != nil { + hostVars := GetHostVars(e.inventory, host) + for k, v := range hostVars { + context[k] = v + } + } + // Add facts + if facts, ok := e.facts[host]; ok { + context["ansible_hostname"] = facts.Hostname + context["ansible_fqdn"] = facts.FQDN + context["ansible_distribution"] = facts.Distribution + context["ansible_distribution_version"] = facts.Version + context["ansible_architecture"] = facts.Architecture + context["ansible_kernel"] = facts.Kernel + } + + var buf strings.Builder + if err := tmpl.Execute(&buf, context); err != nil { + return e.templateString(string(content), host, task), nil + } + + return buf.String(), nil +} diff --git a/ansible/modules.go b/ansible/modules.go new file mode 100644 index 0000000..6819cf8 --- /dev/null +++ b/ansible/modules.go @@ -0,0 +1,1434 @@ +package ansible + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" +) + +// executeModule dispatches to the appropriate module handler. +func (e *Executor) executeModule(ctx context.Context, host string, client *SSHClient, task *Task, play *Play) (*TaskResult, error) { + module := NormalizeModule(task.Module) + + // Apply task-level become + if task.Become != nil && *task.Become { + // Save old state to restore + oldBecome := client.become + oldUser := client.becomeUser + oldPass := client.becomePass + + client.SetBecome(true, task.BecomeUser, "") + + defer client.SetBecome(oldBecome, oldUser, oldPass) + } + + // Template the args + args := e.templateArgs(task.Args, host, task) + + switch module { + // Command execution + case "ansible.builtin.shell": + return e.moduleShell(ctx, client, args) + case "ansible.builtin.command": + return e.moduleCommand(ctx, client, args) + case "ansible.builtin.raw": + return e.moduleRaw(ctx, client, args) + case "ansible.builtin.script": + return e.moduleScript(ctx, client, args) + + // File operations + case "ansible.builtin.copy": + return e.moduleCopy(ctx, client, args, host, task) + case "ansible.builtin.template": + return e.moduleTemplate(ctx, client, args, host, task) + case "ansible.builtin.file": + return e.moduleFile(ctx, client, args) + case "ansible.builtin.lineinfile": + return e.moduleLineinfile(ctx, client, args) + case "ansible.builtin.stat": + return e.moduleStat(ctx, client, args) + case "ansible.builtin.slurp": + return e.moduleSlurp(ctx, client, args) + case "ansible.builtin.fetch": + return e.moduleFetch(ctx, client, args) + case "ansible.builtin.get_url": + return e.moduleGetURL(ctx, client, args) + + // Package management + case "ansible.builtin.apt": + return e.moduleApt(ctx, client, args) + case "ansible.builtin.apt_key": + return e.moduleAptKey(ctx, client, args) + case "ansible.builtin.apt_repository": + return e.moduleAptRepository(ctx, client, args) + case "ansible.builtin.package": + return e.modulePackage(ctx, client, args) + case "ansible.builtin.pip": + return e.modulePip(ctx, client, args) + + // Service management + case "ansible.builtin.service": + return e.moduleService(ctx, client, args) + case "ansible.builtin.systemd": + return e.moduleSystemd(ctx, client, args) + + // User/Group + case "ansible.builtin.user": + return e.moduleUser(ctx, client, args) + case "ansible.builtin.group": + return e.moduleGroup(ctx, client, args) + + // HTTP + case "ansible.builtin.uri": + return e.moduleURI(ctx, client, args) + + // Misc + case "ansible.builtin.debug": + return e.moduleDebug(args) + case "ansible.builtin.fail": + return e.moduleFail(args) + case "ansible.builtin.assert": + return e.moduleAssert(args, host) + case "ansible.builtin.set_fact": + return e.moduleSetFact(args) + case "ansible.builtin.pause": + return e.modulePause(ctx, args) + case "ansible.builtin.wait_for": + return e.moduleWaitFor(ctx, client, args) + case "ansible.builtin.git": + return e.moduleGit(ctx, client, args) + case "ansible.builtin.unarchive": + return e.moduleUnarchive(ctx, client, args) + + // Additional modules + case "ansible.builtin.hostname": + return e.moduleHostname(ctx, client, args) + case "ansible.builtin.sysctl": + return e.moduleSysctl(ctx, client, args) + case "ansible.builtin.cron": + return e.moduleCron(ctx, client, args) + case "ansible.builtin.blockinfile": + return e.moduleBlockinfile(ctx, client, args) + case "ansible.builtin.include_vars": + return e.moduleIncludeVars(args) + case "ansible.builtin.meta": + return e.moduleMeta(args) + case "ansible.builtin.setup": + return e.moduleSetup(ctx, client) + case "ansible.builtin.reboot": + return e.moduleReboot(ctx, client, args) + + // Community modules (basic support) + case "community.general.ufw": + return e.moduleUFW(ctx, client, args) + case "ansible.posix.authorized_key": + return e.moduleAuthorizedKey(ctx, client, args) + case "community.docker.docker_compose": + return e.moduleDockerCompose(ctx, client, args) + + default: + // For unknown modules, try to execute as shell if it looks like a command + if strings.Contains(task.Module, " ") || task.Module == "" { + return e.moduleShell(ctx, client, args) + } + return nil, fmt.Errorf("unsupported module: %s", module) + } +} + +// templateArgs templates all string values in args. +func (e *Executor) templateArgs(args map[string]any, host string, task *Task) map[string]any { + // Set inventory_hostname for templating + e.vars["inventory_hostname"] = host + + result := make(map[string]any) + for k, v := range args { + switch val := v.(type) { + case string: + result[k] = e.templateString(val, host, task) + case map[string]any: + // Recurse for nested maps + result[k] = e.templateArgs(val, host, task) + case []any: + // Template strings in arrays + templated := make([]any, len(val)) + for i, item := range val { + if s, ok := item.(string); ok { + templated[i] = e.templateString(s, host, task) + } else { + templated[i] = item + } + } + result[k] = templated + default: + result[k] = v + } + } + return result +} + +// --- Command Modules --- + +func (e *Executor) moduleShell(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + cmd := getStringArg(args, "_raw_params", "") + if cmd == "" { + cmd = getStringArg(args, "cmd", "") + } + if cmd == "" { + return nil, fmt.Errorf("shell: no command specified") + } + + // Handle chdir + if chdir := getStringArg(args, "chdir", ""); chdir != "" { + cmd = fmt.Sprintf("cd %q && %s", chdir, cmd) + } + + stdout, stderr, rc, err := client.RunScript(ctx, cmd) + if err != nil { + return &TaskResult{Failed: true, Msg: err.Error(), Stdout: stdout, Stderr: stderr, RC: rc}, nil + } + + return &TaskResult{ + Changed: true, + Stdout: stdout, + Stderr: stderr, + RC: rc, + Failed: rc != 0, + }, nil +} + +func (e *Executor) moduleCommand(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + cmd := getStringArg(args, "_raw_params", "") + if cmd == "" { + cmd = getStringArg(args, "cmd", "") + } + if cmd == "" { + return nil, fmt.Errorf("command: no command specified") + } + + // Handle chdir + if chdir := getStringArg(args, "chdir", ""); chdir != "" { + cmd = fmt.Sprintf("cd %q && %s", chdir, cmd) + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil { + return &TaskResult{Failed: true, Msg: err.Error()}, nil + } + + return &TaskResult{ + Changed: true, + Stdout: stdout, + Stderr: stderr, + RC: rc, + Failed: rc != 0, + }, nil +} + +func (e *Executor) moduleRaw(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + cmd := getStringArg(args, "_raw_params", "") + if cmd == "" { + return nil, fmt.Errorf("raw: no command specified") + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil { + return &TaskResult{Failed: true, Msg: err.Error()}, nil + } + + return &TaskResult{ + Changed: true, + Stdout: stdout, + Stderr: stderr, + RC: rc, + }, nil +} + +func (e *Executor) moduleScript(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + script := getStringArg(args, "_raw_params", "") + if script == "" { + return nil, fmt.Errorf("script: no script specified") + } + + // Read local script + content, err := os.ReadFile(script) + if err != nil { + return nil, fmt.Errorf("read script: %w", err) + } + + stdout, stderr, rc, err := client.RunScript(ctx, string(content)) + if err != nil { + return &TaskResult{Failed: true, Msg: err.Error()}, nil + } + + return &TaskResult{ + Changed: true, + Stdout: stdout, + Stderr: stderr, + RC: rc, + Failed: rc != 0, + }, nil +} + +// --- File Modules --- + +func (e *Executor) moduleCopy(ctx context.Context, client *SSHClient, args map[string]any, host string, task *Task) (*TaskResult, error) { + dest := getStringArg(args, "dest", "") + if dest == "" { + return nil, fmt.Errorf("copy: dest required") + } + + var content []byte + var err error + + if src := getStringArg(args, "src", ""); src != "" { + content, err = os.ReadFile(src) + if err != nil { + return nil, fmt.Errorf("read src: %w", err) + } + } else if c := getStringArg(args, "content", ""); c != "" { + content = []byte(c) + } else { + return nil, fmt.Errorf("copy: src or content required") + } + + mode := os.FileMode(0644) + if m := getStringArg(args, "mode", ""); m != "" { + if parsed, err := strconv.ParseInt(m, 8, 32); err == nil { + mode = os.FileMode(parsed) + } + } + + err = client.Upload(ctx, strings.NewReader(string(content)), dest, mode) + if err != nil { + return nil, err + } + + // Handle owner/group (best-effort, errors ignored) + if owner := getStringArg(args, "owner", ""); owner != "" { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("chown %s %q", owner, dest)) + } + if group := getStringArg(args, "group", ""); group != "" { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("chgrp %s %q", group, dest)) + } + + return &TaskResult{Changed: true, Msg: fmt.Sprintf("copied to %s", dest)}, nil +} + +func (e *Executor) moduleTemplate(ctx context.Context, client *SSHClient, args map[string]any, host string, task *Task) (*TaskResult, error) { + src := getStringArg(args, "src", "") + dest := getStringArg(args, "dest", "") + if src == "" || dest == "" { + return nil, fmt.Errorf("template: src and dest required") + } + + // Process template + content, err := e.TemplateFile(src, host, task) + if err != nil { + return nil, fmt.Errorf("template: %w", err) + } + + mode := os.FileMode(0644) + if m := getStringArg(args, "mode", ""); m != "" { + if parsed, err := strconv.ParseInt(m, 8, 32); err == nil { + mode = os.FileMode(parsed) + } + } + + err = client.Upload(ctx, strings.NewReader(content), dest, mode) + if err != nil { + return nil, err + } + + return &TaskResult{Changed: true, Msg: fmt.Sprintf("templated to %s", dest)}, nil +} + +func (e *Executor) moduleFile(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + path := getStringArg(args, "path", "") + if path == "" { + path = getStringArg(args, "dest", "") + } + if path == "" { + return nil, fmt.Errorf("file: path required") + } + + state := getStringArg(args, "state", "file") + + switch state { + case "directory": + mode := getStringArg(args, "mode", "0755") + cmd := fmt.Sprintf("mkdir -p %q && chmod %s %q", path, mode, path) + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + case "absent": + cmd := fmt.Sprintf("rm -rf %q", path) + _, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, RC: rc}, nil + } + + case "touch": + cmd := fmt.Sprintf("touch %q", path) + _, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, RC: rc}, nil + } + + case "link": + src := getStringArg(args, "src", "") + if src == "" { + return nil, fmt.Errorf("file: src required for link state") + } + cmd := fmt.Sprintf("ln -sf %q %q", src, path) + _, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, RC: rc}, nil + } + + case "file": + // Ensure file exists and set permissions + if mode := getStringArg(args, "mode", ""); mode != "" { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("chmod %s %q", mode, path)) + } + } + + // Handle owner/group (best-effort, errors ignored) + if owner := getStringArg(args, "owner", ""); owner != "" { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("chown %s %q", owner, path)) + } + if group := getStringArg(args, "group", ""); group != "" { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("chgrp %s %q", group, path)) + } + if recurse := getBoolArg(args, "recurse", false); recurse { + if owner := getStringArg(args, "owner", ""); owner != "" { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("chown -R %s %q", owner, path)) + } + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleLineinfile(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + path := getStringArg(args, "path", "") + if path == "" { + path = getStringArg(args, "dest", "") + } + if path == "" { + return nil, fmt.Errorf("lineinfile: path required") + } + + line := getStringArg(args, "line", "") + regexp := getStringArg(args, "regexp", "") + state := getStringArg(args, "state", "present") + + if state == "absent" { + if regexp != "" { + cmd := fmt.Sprintf("sed -i '/%s/d' %q", regexp, path) + _, stderr, rc, _ := client.Run(ctx, cmd) + if rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, RC: rc}, nil + } + } + } else { + // state == present + if regexp != "" { + // Replace line matching regexp + escapedLine := strings.ReplaceAll(line, "/", "\\/") + cmd := fmt.Sprintf("sed -i 's/%s/%s/' %q", regexp, escapedLine, path) + _, _, rc, _ := client.Run(ctx, cmd) + if rc != 0 { + // Line not found, append + cmd = fmt.Sprintf("echo %q >> %q", line, path) + _, _, _, _ = client.Run(ctx, cmd) + } + } else if line != "" { + // Ensure line is present + cmd := fmt.Sprintf("grep -qxF %q %q || echo %q >> %q", line, path, line, path) + _, _, _, _ = client.Run(ctx, cmd) + } + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleStat(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + path := getStringArg(args, "path", "") + if path == "" { + return nil, fmt.Errorf("stat: path required") + } + + stat, err := client.Stat(ctx, path) + if err != nil { + return nil, err + } + + return &TaskResult{ + Changed: false, + Data: map[string]any{"stat": stat}, + }, nil +} + +func (e *Executor) moduleSlurp(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + path := getStringArg(args, "path", "") + if path == "" { + path = getStringArg(args, "src", "") + } + if path == "" { + return nil, fmt.Errorf("slurp: path required") + } + + content, err := client.Download(ctx, path) + if err != nil { + return nil, err + } + + encoded := base64.StdEncoding.EncodeToString(content) + + return &TaskResult{ + Changed: false, + Data: map[string]any{"content": encoded, "encoding": "base64"}, + }, nil +} + +func (e *Executor) moduleFetch(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + src := getStringArg(args, "src", "") + dest := getStringArg(args, "dest", "") + if src == "" || dest == "" { + return nil, fmt.Errorf("fetch: src and dest required") + } + + content, err := client.Download(ctx, src) + if err != nil { + return nil, err + } + + // Create dest directory + if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil { + return nil, err + } + + if err := os.WriteFile(dest, content, 0644); err != nil { + return nil, err + } + + return &TaskResult{Changed: true, Msg: fmt.Sprintf("fetched %s to %s", src, dest)}, nil +} + +func (e *Executor) moduleGetURL(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + url := getStringArg(args, "url", "") + dest := getStringArg(args, "dest", "") + if url == "" || dest == "" { + return nil, fmt.Errorf("get_url: url and dest required") + } + + // Use curl or wget + cmd := fmt.Sprintf("curl -fsSL -o %q %q || wget -q -O %q %q", dest, url, dest, url) + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + // Set mode if specified (best-effort) + if mode := getStringArg(args, "mode", ""); mode != "" { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("chmod %s %q", mode, dest)) + } + + return &TaskResult{Changed: true}, nil +} + +// --- Package Modules --- + +func (e *Executor) moduleApt(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + name := getStringArg(args, "name", "") + state := getStringArg(args, "state", "present") + updateCache := getBoolArg(args, "update_cache", false) + + var cmd string + + if updateCache { + _, _, _, _ = client.Run(ctx, "apt-get update -qq") + } + + switch state { + case "present", "installed": + if name != "" { + cmd = fmt.Sprintf("DEBIAN_FRONTEND=noninteractive apt-get install -y -qq %s", name) + } + case "absent", "removed": + cmd = fmt.Sprintf("DEBIAN_FRONTEND=noninteractive apt-get remove -y -qq %s", name) + case "latest": + cmd = fmt.Sprintf("DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --only-upgrade %s", name) + } + + if cmd == "" { + return &TaskResult{Changed: false}, nil + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleAptKey(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + url := getStringArg(args, "url", "") + keyring := getStringArg(args, "keyring", "") + state := getStringArg(args, "state", "present") + + if state == "absent" { + if keyring != "" { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("rm -f %q", keyring)) + } + return &TaskResult{Changed: true}, nil + } + + if url == "" { + return nil, fmt.Errorf("apt_key: url required") + } + + var cmd string + if keyring != "" { + cmd = fmt.Sprintf("curl -fsSL %q | gpg --dearmor -o %q", url, keyring) + } else { + cmd = fmt.Sprintf("curl -fsSL %q | apt-key add -", url) + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleAptRepository(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + repo := getStringArg(args, "repo", "") + filename := getStringArg(args, "filename", "") + state := getStringArg(args, "state", "present") + + if repo == "" { + return nil, fmt.Errorf("apt_repository: repo required") + } + + if filename == "" { + // Generate filename from repo + filename = strings.ReplaceAll(repo, " ", "-") + filename = strings.ReplaceAll(filename, "/", "-") + filename = strings.ReplaceAll(filename, ":", "") + } + + path := fmt.Sprintf("/etc/apt/sources.list.d/%s.list", filename) + + if state == "absent" { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("rm -f %q", path)) + return &TaskResult{Changed: true}, nil + } + + cmd := fmt.Sprintf("echo %q > %q", repo, path) + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + // Update apt cache (best-effort) + if getBoolArg(args, "update_cache", true) { + _, _, _, _ = client.Run(ctx, "apt-get update -qq") + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) modulePackage(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + // Detect package manager and delegate + stdout, _, _, _ := client.Run(ctx, "which apt-get yum dnf 2>/dev/null | head -1") + stdout = strings.TrimSpace(stdout) + + if strings.Contains(stdout, "apt") { + return e.moduleApt(ctx, client, args) + } + + // Default to apt + return e.moduleApt(ctx, client, args) +} + +func (e *Executor) modulePip(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + name := getStringArg(args, "name", "") + state := getStringArg(args, "state", "present") + executable := getStringArg(args, "executable", "pip3") + + var cmd string + switch state { + case "present", "installed": + cmd = fmt.Sprintf("%s install %s", executable, name) + case "absent", "removed": + cmd = fmt.Sprintf("%s uninstall -y %s", executable, name) + case "latest": + cmd = fmt.Sprintf("%s install --upgrade %s", executable, name) + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + return &TaskResult{Changed: true}, nil +} + +// --- Service Modules --- + +func (e *Executor) moduleService(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + name := getStringArg(args, "name", "") + state := getStringArg(args, "state", "") + enabled := args["enabled"] + + if name == "" { + return nil, fmt.Errorf("service: name required") + } + + var cmds []string + + if state != "" { + switch state { + case "started": + cmds = append(cmds, fmt.Sprintf("systemctl start %s", name)) + case "stopped": + cmds = append(cmds, fmt.Sprintf("systemctl stop %s", name)) + case "restarted": + cmds = append(cmds, fmt.Sprintf("systemctl restart %s", name)) + case "reloaded": + cmds = append(cmds, fmt.Sprintf("systemctl reload %s", name)) + } + } + + if enabled != nil { + if getBoolArg(args, "enabled", false) { + cmds = append(cmds, fmt.Sprintf("systemctl enable %s", name)) + } else { + cmds = append(cmds, fmt.Sprintf("systemctl disable %s", name)) + } + } + + for _, cmd := range cmds { + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + } + + return &TaskResult{Changed: len(cmds) > 0}, nil +} + +func (e *Executor) moduleSystemd(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + // systemd is similar to service + if getBoolArg(args, "daemon_reload", false) { + _, _, _, _ = client.Run(ctx, "systemctl daemon-reload") + } + + return e.moduleService(ctx, client, args) +} + +// --- User/Group Modules --- + +func (e *Executor) moduleUser(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + name := getStringArg(args, "name", "") + state := getStringArg(args, "state", "present") + + if name == "" { + return nil, fmt.Errorf("user: name required") + } + + if state == "absent" { + cmd := fmt.Sprintf("userdel -r %s 2>/dev/null || true", name) + _, _, _, _ = client.Run(ctx, cmd) + return &TaskResult{Changed: true}, nil + } + + // Build useradd/usermod command + var opts []string + + if uid := getStringArg(args, "uid", ""); uid != "" { + opts = append(opts, "-u", uid) + } + if group := getStringArg(args, "group", ""); group != "" { + opts = append(opts, "-g", group) + } + if groups := getStringArg(args, "groups", ""); groups != "" { + opts = append(opts, "-G", groups) + } + if home := getStringArg(args, "home", ""); home != "" { + opts = append(opts, "-d", home) + } + if shell := getStringArg(args, "shell", ""); shell != "" { + opts = append(opts, "-s", shell) + } + if getBoolArg(args, "system", false) { + opts = append(opts, "-r") + } + if getBoolArg(args, "create_home", true) { + opts = append(opts, "-m") + } + + // Try usermod first, then useradd + optsStr := strings.Join(opts, " ") + var cmd string + if optsStr == "" { + cmd = fmt.Sprintf("id %s >/dev/null 2>&1 || useradd %s", name, name) + } else { + cmd = fmt.Sprintf("id %s >/dev/null 2>&1 && usermod %s %s || useradd %s %s", + name, optsStr, name, optsStr, name) + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleGroup(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + name := getStringArg(args, "name", "") + state := getStringArg(args, "state", "present") + + if name == "" { + return nil, fmt.Errorf("group: name required") + } + + if state == "absent" { + cmd := fmt.Sprintf("groupdel %s 2>/dev/null || true", name) + _, _, _, _ = client.Run(ctx, cmd) + return &TaskResult{Changed: true}, nil + } + + var opts []string + if gid := getStringArg(args, "gid", ""); gid != "" { + opts = append(opts, "-g", gid) + } + if getBoolArg(args, "system", false) { + opts = append(opts, "-r") + } + + cmd := fmt.Sprintf("getent group %s >/dev/null 2>&1 || groupadd %s %s", + name, strings.Join(opts, " "), name) + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + return &TaskResult{Changed: true}, nil +} + +// --- HTTP Module --- + +func (e *Executor) moduleURI(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + url := getStringArg(args, "url", "") + method := getStringArg(args, "method", "GET") + + if url == "" { + return nil, fmt.Errorf("uri: url required") + } + + var curlOpts []string + curlOpts = append(curlOpts, "-s", "-S") + curlOpts = append(curlOpts, "-X", method) + + // Headers + if headers, ok := args["headers"].(map[string]any); ok { + for k, v := range headers { + curlOpts = append(curlOpts, "-H", fmt.Sprintf("%s: %v", k, v)) + } + } + + // Body + if body := getStringArg(args, "body", ""); body != "" { + curlOpts = append(curlOpts, "-d", body) + } + + // Status code + curlOpts = append(curlOpts, "-w", "\\n%{http_code}") + + cmd := fmt.Sprintf("curl %s %q", strings.Join(curlOpts, " "), url) + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil { + return &TaskResult{Failed: true, Msg: err.Error()}, nil + } + + // Parse status code from last line + lines := strings.Split(strings.TrimSpace(stdout), "\n") + statusCode := 0 + if len(lines) > 0 { + statusCode, _ = strconv.Atoi(lines[len(lines)-1]) + } + + // Check expected status + expectedStatus := 200 + if s, ok := args["status_code"].(int); ok { + expectedStatus = s + } + + failed := rc != 0 || statusCode != expectedStatus + + return &TaskResult{ + Changed: false, + Failed: failed, + Stdout: stdout, + Stderr: stderr, + RC: statusCode, + Data: map[string]any{"status": statusCode}, + }, nil +} + +// --- Misc Modules --- + +func (e *Executor) moduleDebug(args map[string]any) (*TaskResult, error) { + msg := getStringArg(args, "msg", "") + if v, ok := args["var"]; ok { + msg = fmt.Sprintf("%v = %v", v, e.vars[fmt.Sprintf("%v", v)]) + } + + return &TaskResult{ + Changed: false, + Msg: msg, + }, nil +} + +func (e *Executor) moduleFail(args map[string]any) (*TaskResult, error) { + msg := getStringArg(args, "msg", "Failed as requested") + return &TaskResult{ + Failed: true, + Msg: msg, + }, nil +} + +func (e *Executor) moduleAssert(args map[string]any, host string) (*TaskResult, error) { + that, ok := args["that"] + if !ok { + return nil, fmt.Errorf("assert: 'that' required") + } + + conditions := normalizeConditions(that) + for _, cond := range conditions { + if !e.evalCondition(cond, host) { + msg := getStringArg(args, "fail_msg", fmt.Sprintf("Assertion failed: %s", cond)) + return &TaskResult{Failed: true, Msg: msg}, nil + } + } + + return &TaskResult{Changed: false, Msg: "All assertions passed"}, nil +} + +func (e *Executor) moduleSetFact(args map[string]any) (*TaskResult, error) { + for k, v := range args { + if k != "cacheable" { + e.vars[k] = v + } + } + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) modulePause(ctx context.Context, args map[string]any) (*TaskResult, error) { + seconds := 0 + if s, ok := args["seconds"].(int); ok { + seconds = s + } + if s, ok := args["seconds"].(string); ok { + seconds, _ = strconv.Atoi(s) + } + + if seconds > 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ctxSleep(ctx, seconds): + } + } + + return &TaskResult{Changed: false}, nil +} + +func ctxSleep(ctx context.Context, seconds int) <-chan struct{} { + ch := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + case <-sleepChan(seconds): + } + close(ch) + }() + return ch +} + +func sleepChan(seconds int) <-chan struct{} { + ch := make(chan struct{}) + go func() { + for i := 0; i < seconds; i++ { + select { + case <-ch: + return + default: + // Sleep 1 second at a time + } + } + close(ch) + }() + return ch +} + +func (e *Executor) moduleWaitFor(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + port := 0 + if p, ok := args["port"].(int); ok { + port = p + } + host := getStringArg(args, "host", "127.0.0.1") + state := getStringArg(args, "state", "started") + timeout := 300 + if t, ok := args["timeout"].(int); ok { + timeout = t + } + + if port > 0 && state == "started" { + cmd := fmt.Sprintf("timeout %d bash -c 'until nc -z %s %d; do sleep 1; done'", + timeout, host, port) + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + } + + return &TaskResult{Changed: false}, nil +} + +func (e *Executor) moduleGit(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + repo := getStringArg(args, "repo", "") + dest := getStringArg(args, "dest", "") + version := getStringArg(args, "version", "HEAD") + + if repo == "" || dest == "" { + return nil, fmt.Errorf("git: repo and dest required") + } + + // Check if dest exists + exists, _ := client.FileExists(ctx, dest+"/.git") + + var cmd string + if exists { + // Fetch and checkout (force to ensure clean state) + cmd = fmt.Sprintf("cd %q && git fetch --all && git checkout --force %q", dest, version) + } else { + cmd = fmt.Sprintf("git clone %q %q && cd %q && git checkout %q", + repo, dest, dest, version) + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleUnarchive(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + src := getStringArg(args, "src", "") + dest := getStringArg(args, "dest", "") + remote := getBoolArg(args, "remote_src", false) + + if src == "" || dest == "" { + return nil, fmt.Errorf("unarchive: src and dest required") + } + + // Create dest directory (best-effort) + _, _, _, _ = client.Run(ctx, fmt.Sprintf("mkdir -p %q", dest)) + + var cmd string + if !remote { + // Upload local file first + content, err := os.ReadFile(src) + if err != nil { + return nil, fmt.Errorf("read src: %w", err) + } + tmpPath := "/tmp/ansible_unarchive_" + filepath.Base(src) + err = client.Upload(ctx, strings.NewReader(string(content)), tmpPath, 0644) + if err != nil { + return nil, err + } + src = tmpPath + defer func() { _, _, _, _ = client.Run(ctx, fmt.Sprintf("rm -f %q", tmpPath)) }() + } + + // Detect archive type and extract + if strings.HasSuffix(src, ".tar.gz") || strings.HasSuffix(src, ".tgz") { + cmd = fmt.Sprintf("tar -xzf %q -C %q", src, dest) + } else if strings.HasSuffix(src, ".tar.xz") { + cmd = fmt.Sprintf("tar -xJf %q -C %q", src, dest) + } else if strings.HasSuffix(src, ".tar.bz2") { + cmd = fmt.Sprintf("tar -xjf %q -C %q", src, dest) + } else if strings.HasSuffix(src, ".tar") { + cmd = fmt.Sprintf("tar -xf %q -C %q", src, dest) + } else if strings.HasSuffix(src, ".zip") { + cmd = fmt.Sprintf("unzip -o %q -d %q", src, dest) + } else { + cmd = fmt.Sprintf("tar -xf %q -C %q", src, dest) // Guess tar + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + return &TaskResult{Changed: true}, nil +} + +// --- Helpers --- + +func getStringArg(args map[string]any, key, def string) string { + if v, ok := args[key]; ok { + if s, ok := v.(string); ok { + return s + } + return fmt.Sprintf("%v", v) + } + return def +} + +func getBoolArg(args map[string]any, key string, def bool) bool { + if v, ok := args[key]; ok { + switch b := v.(type) { + case bool: + return b + case string: + lower := strings.ToLower(b) + return lower == "true" || lower == "yes" || lower == "1" + } + } + return def +} + +// --- Additional Modules --- + +func (e *Executor) moduleHostname(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + name := getStringArg(args, "name", "") + if name == "" { + return nil, fmt.Errorf("hostname: name required") + } + + // Set hostname + cmd := fmt.Sprintf("hostnamectl set-hostname %q || hostname %q", name, name) + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + // Update /etc/hosts if needed (best-effort) + _, _, _, _ = client.Run(ctx, fmt.Sprintf("sed -i 's/127.0.1.1.*/127.0.1.1\t%s/' /etc/hosts", name)) + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleSysctl(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + name := getStringArg(args, "name", "") + value := getStringArg(args, "value", "") + state := getStringArg(args, "state", "present") + + if name == "" { + return nil, fmt.Errorf("sysctl: name required") + } + + if state == "absent" { + // Remove from sysctl.conf + cmd := fmt.Sprintf("sed -i '/%s/d' /etc/sysctl.conf", name) + _, _, _, _ = client.Run(ctx, cmd) + return &TaskResult{Changed: true}, nil + } + + // Set value + cmd := fmt.Sprintf("sysctl -w %s=%s", name, value) + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + // Persist if requested (best-effort) + if getBoolArg(args, "sysctl_set", true) { + cmd = fmt.Sprintf("grep -q '^%s' /etc/sysctl.conf && sed -i 's/^%s.*/%s=%s/' /etc/sysctl.conf || echo '%s=%s' >> /etc/sysctl.conf", + name, name, name, value, name, value) + _, _, _, _ = client.Run(ctx, cmd) + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleCron(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + name := getStringArg(args, "name", "") + job := getStringArg(args, "job", "") + state := getStringArg(args, "state", "present") + user := getStringArg(args, "user", "root") + + minute := getStringArg(args, "minute", "*") + hour := getStringArg(args, "hour", "*") + day := getStringArg(args, "day", "*") + month := getStringArg(args, "month", "*") + weekday := getStringArg(args, "weekday", "*") + + if state == "absent" { + if name != "" { + // Remove by name (comment marker) + cmd := fmt.Sprintf("crontab -u %s -l 2>/dev/null | grep -v '# %s' | grep -v '%s' | crontab -u %s -", + user, name, job, user) + _, _, _, _ = client.Run(ctx, cmd) + } + return &TaskResult{Changed: true}, nil + } + + // Build cron entry + schedule := fmt.Sprintf("%s %s %s %s %s", minute, hour, day, month, weekday) + entry := fmt.Sprintf("%s %s # %s", schedule, job, name) + + // Add to crontab + cmd := fmt.Sprintf("(crontab -u %s -l 2>/dev/null | grep -v '# %s' ; echo %q) | crontab -u %s -", + user, name, entry, user) + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleBlockinfile(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + path := getStringArg(args, "path", "") + if path == "" { + path = getStringArg(args, "dest", "") + } + if path == "" { + return nil, fmt.Errorf("blockinfile: path required") + } + + block := getStringArg(args, "block", "") + marker := getStringArg(args, "marker", "# {mark} ANSIBLE MANAGED BLOCK") + state := getStringArg(args, "state", "present") + create := getBoolArg(args, "create", false) + + beginMarker := strings.Replace(marker, "{mark}", "BEGIN", 1) + endMarker := strings.Replace(marker, "{mark}", "END", 1) + + if state == "absent" { + // Remove block + cmd := fmt.Sprintf("sed -i '/%s/,/%s/d' %q", + strings.ReplaceAll(beginMarker, "/", "\\/"), + strings.ReplaceAll(endMarker, "/", "\\/"), + path) + _, _, _, _ = client.Run(ctx, cmd) + return &TaskResult{Changed: true}, nil + } + + // Create file if needed (best-effort) + if create { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("touch %q", path)) + } + + // Remove existing block and add new one + escapedBlock := strings.ReplaceAll(block, "'", "'\\''") + cmd := fmt.Sprintf(` +sed -i '/%s/,/%s/d' %q 2>/dev/null || true +cat >> %q << 'BLOCK_EOF' +%s +%s +%s +BLOCK_EOF +`, strings.ReplaceAll(beginMarker, "/", "\\/"), + strings.ReplaceAll(endMarker, "/", "\\/"), + path, path, beginMarker, escapedBlock, endMarker) + + stdout, stderr, rc, err := client.RunScript(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleIncludeVars(args map[string]any) (*TaskResult, error) { + file := getStringArg(args, "file", "") + if file == "" { + file = getStringArg(args, "_raw_params", "") + } + + if file != "" { + // Would need to read and parse the vars file + // For now, just acknowledge + return &TaskResult{Changed: false, Msg: "include_vars: " + file}, nil + } + + return &TaskResult{Changed: false}, nil +} + +func (e *Executor) moduleMeta(args map[string]any) (*TaskResult, error) { + // meta module controls play execution + // Most actions are no-ops for us + return &TaskResult{Changed: false}, nil +} + +func (e *Executor) moduleSetup(ctx context.Context, client *SSHClient) (*TaskResult, error) { + // Gather facts - similar to what we do in gatherFacts + return &TaskResult{Changed: false, Msg: "facts gathered"}, nil +} + +func (e *Executor) moduleReboot(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + preRebootDelay := 0 + if d, ok := args["pre_reboot_delay"].(int); ok { + preRebootDelay = d + } + + msg := getStringArg(args, "msg", "Reboot initiated by Ansible") + + if preRebootDelay > 0 { + cmd := fmt.Sprintf("sleep %d && shutdown -r now '%s' &", preRebootDelay, msg) + _, _, _, _ = client.Run(ctx, cmd) + } else { + _, _, _, _ = client.Run(ctx, fmt.Sprintf("shutdown -r now '%s' &", msg)) + } + + return &TaskResult{Changed: true, Msg: "Reboot initiated"}, nil +} + +func (e *Executor) moduleUFW(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + rule := getStringArg(args, "rule", "") + port := getStringArg(args, "port", "") + proto := getStringArg(args, "proto", "tcp") + state := getStringArg(args, "state", "") + + var cmd string + + // Handle state (enable/disable) + if state != "" { + switch state { + case "enabled": + cmd = "ufw --force enable" + case "disabled": + cmd = "ufw disable" + case "reloaded": + cmd = "ufw reload" + case "reset": + cmd = "ufw --force reset" + } + if cmd != "" { + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + return &TaskResult{Changed: true}, nil + } + } + + // Handle rule + if rule != "" && port != "" { + switch rule { + case "allow": + cmd = fmt.Sprintf("ufw allow %s/%s", port, proto) + case "deny": + cmd = fmt.Sprintf("ufw deny %s/%s", port, proto) + case "reject": + cmd = fmt.Sprintf("ufw reject %s/%s", port, proto) + case "limit": + cmd = fmt.Sprintf("ufw limit %s/%s", port, proto) + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + } + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleAuthorizedKey(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + user := getStringArg(args, "user", "") + key := getStringArg(args, "key", "") + state := getStringArg(args, "state", "present") + + if user == "" || key == "" { + return nil, fmt.Errorf("authorized_key: user and key required") + } + + // Get user's home directory + stdout, _, _, err := client.Run(ctx, fmt.Sprintf("getent passwd %s | cut -d: -f6", user)) + if err != nil { + return nil, fmt.Errorf("get home dir: %w", err) + } + home := strings.TrimSpace(stdout) + if home == "" { + home = "/root" + if user != "root" { + home = "/home/" + user + } + } + + authKeysPath := filepath.Join(home, ".ssh", "authorized_keys") + + if state == "absent" { + // Remove key + escapedKey := strings.ReplaceAll(key, "/", "\\/") + cmd := fmt.Sprintf("sed -i '/%s/d' %q 2>/dev/null || true", escapedKey[:40], authKeysPath) + _, _, _, _ = client.Run(ctx, cmd) + return &TaskResult{Changed: true}, nil + } + + // Ensure .ssh directory exists (best-effort) + _, _, _, _ = client.Run(ctx, fmt.Sprintf("mkdir -p %q && chmod 700 %q && chown %s:%s %q", + filepath.Dir(authKeysPath), filepath.Dir(authKeysPath), user, user, filepath.Dir(authKeysPath))) + + // Add key if not present + cmd := fmt.Sprintf("grep -qF %q %q 2>/dev/null || echo %q >> %q", + key[:40], authKeysPath, key, authKeysPath) + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + // Fix permissions (best-effort) + _, _, _, _ = client.Run(ctx, fmt.Sprintf("chmod 600 %q && chown %s:%s %q", + authKeysPath, user, user, authKeysPath)) + + return &TaskResult{Changed: true}, nil +} + +func (e *Executor) moduleDockerCompose(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { + projectSrc := getStringArg(args, "project_src", "") + state := getStringArg(args, "state", "present") + + if projectSrc == "" { + return nil, fmt.Errorf("docker_compose: project_src required") + } + + var cmd string + switch state { + case "present": + cmd = fmt.Sprintf("cd %q && docker compose up -d", projectSrc) + case "absent": + cmd = fmt.Sprintf("cd %q && docker compose down", projectSrc) + case "restarted": + cmd = fmt.Sprintf("cd %q && docker compose restart", projectSrc) + default: + cmd = fmt.Sprintf("cd %q && docker compose up -d", projectSrc) + } + + stdout, stderr, rc, err := client.Run(ctx, cmd) + if err != nil || rc != 0 { + return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil + } + + // Heuristic for changed + changed := !strings.Contains(stdout, "Up to date") && !strings.Contains(stderr, "Up to date") + + return &TaskResult{Changed: changed, Stdout: stdout}, nil +} diff --git a/ansible/parser.go b/ansible/parser.go new file mode 100644 index 0000000..b050c6e --- /dev/null +++ b/ansible/parser.go @@ -0,0 +1,438 @@ +package ansible + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go/pkg/log" + "gopkg.in/yaml.v3" +) + +// Parser handles Ansible YAML parsing. +type Parser struct { + basePath string + vars map[string]any +} + +// NewParser creates a new Ansible parser. +func NewParser(basePath string) *Parser { + return &Parser{ + basePath: basePath, + vars: make(map[string]any), + } +} + +// ParsePlaybook parses an Ansible playbook file. +func (p *Parser) ParsePlaybook(path string) ([]Play, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("read playbook: %w", err) + } + + var plays []Play + if err := yaml.Unmarshal(data, &plays); err != nil { + return nil, fmt.Errorf("parse playbook: %w", err) + } + + // Process each play + for i := range plays { + if err := p.processPlay(&plays[i]); err != nil { + return nil, fmt.Errorf("process play %d: %w", i, err) + } + } + + return plays, nil +} + +// ParseInventory parses an Ansible inventory file. +func (p *Parser) ParseInventory(path string) (*Inventory, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("read inventory: %w", err) + } + + var inv Inventory + if err := yaml.Unmarshal(data, &inv); err != nil { + return nil, fmt.Errorf("parse inventory: %w", err) + } + + return &inv, nil +} + +// ParseTasks parses a tasks file (used by include_tasks). +func (p *Parser) ParseTasks(path string) ([]Task, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("read tasks: %w", err) + } + + var tasks []Task + if err := yaml.Unmarshal(data, &tasks); err != nil { + return nil, fmt.Errorf("parse tasks: %w", err) + } + + for i := range tasks { + if err := p.extractModule(&tasks[i]); err != nil { + return nil, fmt.Errorf("task %d: %w", i, err) + } + } + + return tasks, nil +} + +// ParseRole parses a role and returns its tasks. +func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) { + if tasksFrom == "" { + tasksFrom = "main.yml" + } + + // Search paths for roles (in order of precedence) + searchPaths := []string{ + // Relative to playbook + filepath.Join(p.basePath, "roles", name, "tasks", tasksFrom), + // Parent directory roles + filepath.Join(filepath.Dir(p.basePath), "roles", name, "tasks", tasksFrom), + // Sibling roles directory + filepath.Join(p.basePath, "..", "roles", name, "tasks", tasksFrom), + // playbooks/roles pattern + filepath.Join(p.basePath, "playbooks", "roles", name, "tasks", tasksFrom), + // Common DevOps structure + filepath.Join(filepath.Dir(filepath.Dir(p.basePath)), "roles", name, "tasks", tasksFrom), + } + + var tasksPath string + for _, sp := range searchPaths { + // Clean the path to resolve .. segments + sp = filepath.Clean(sp) + if _, err := os.Stat(sp); err == nil { + tasksPath = sp + break + } + } + + if tasksPath == "" { + return nil, log.E("parser.ParseRole", fmt.Sprintf("role %s not found in search paths: %v", name, searchPaths), nil) + } + + // Load role defaults + defaultsPath := filepath.Join(filepath.Dir(filepath.Dir(tasksPath)), "defaults", "main.yml") + if data, err := os.ReadFile(defaultsPath); err == nil { + var defaults map[string]any + if yaml.Unmarshal(data, &defaults) == nil { + for k, v := range defaults { + if _, exists := p.vars[k]; !exists { + p.vars[k] = v + } + } + } + } + + // Load role vars + varsPath := filepath.Join(filepath.Dir(filepath.Dir(tasksPath)), "vars", "main.yml") + if data, err := os.ReadFile(varsPath); err == nil { + var roleVars map[string]any + if yaml.Unmarshal(data, &roleVars) == nil { + for k, v := range roleVars { + p.vars[k] = v + } + } + } + + return p.ParseTasks(tasksPath) +} + +// processPlay processes a play and extracts modules from tasks. +func (p *Parser) processPlay(play *Play) error { + // Merge play vars + for k, v := range play.Vars { + p.vars[k] = v + } + + for i := range play.PreTasks { + if err := p.extractModule(&play.PreTasks[i]); err != nil { + return fmt.Errorf("pre_task %d: %w", i, err) + } + } + + for i := range play.Tasks { + if err := p.extractModule(&play.Tasks[i]); err != nil { + return fmt.Errorf("task %d: %w", i, err) + } + } + + for i := range play.PostTasks { + if err := p.extractModule(&play.PostTasks[i]); err != nil { + return fmt.Errorf("post_task %d: %w", i, err) + } + } + + for i := range play.Handlers { + if err := p.extractModule(&play.Handlers[i]); err != nil { + return fmt.Errorf("handler %d: %w", i, err) + } + } + + return nil +} + +// extractModule extracts the module name and args from a task. +func (p *Parser) extractModule(task *Task) error { + // First, unmarshal the raw YAML to get all keys + // This is a workaround since we need to find the module key dynamically + + // Handle block tasks + for i := range task.Block { + if err := p.extractModule(&task.Block[i]); err != nil { + return err + } + } + for i := range task.Rescue { + if err := p.extractModule(&task.Rescue[i]); err != nil { + return err + } + } + for i := range task.Always { + if err := p.extractModule(&task.Always[i]); err != nil { + return err + } + } + + return nil +} + +// UnmarshalYAML implements custom YAML unmarshaling for Task. +func (t *Task) UnmarshalYAML(node *yaml.Node) error { + // First decode known fields + type rawTask Task + var raw rawTask + + // Create a map to capture all fields + var m map[string]any + if err := node.Decode(&m); err != nil { + return err + } + + // Decode into struct + if err := node.Decode(&raw); err != nil { + return err + } + *t = Task(raw) + t.raw = m + + // Find the module key + knownKeys := map[string]bool{ + "name": true, "register": true, "when": true, "loop": true, + "loop_control": true, "vars": true, "environment": true, + "changed_when": true, "failed_when": true, "ignore_errors": true, + "no_log": true, "become": true, "become_user": true, + "delegate_to": true, "run_once": true, "tags": true, + "block": true, "rescue": true, "always": true, "notify": true, + "retries": true, "delay": true, "until": true, + "include_tasks": true, "import_tasks": true, + "include_role": true, "import_role": true, + "with_items": true, "with_dict": true, "with_file": true, + } + + for key, val := range m { + if knownKeys[key] { + continue + } + + // Check if this is a module + if isModule(key) { + t.Module = key + t.Args = make(map[string]any) + + switch v := val.(type) { + case string: + // Free-form args (e.g., shell: echo hello) + t.Args["_raw_params"] = v + case map[string]any: + t.Args = v + case nil: + // Module with no args + default: + t.Args["_raw_params"] = v + } + break + } + } + + // Handle with_items as loop + if items, ok := m["with_items"]; ok && t.Loop == nil { + t.Loop = items + } + + return nil +} + +// isModule checks if a key is a known module. +func isModule(key string) bool { + for _, m := range KnownModules { + if key == m { + return true + } + // Also check without ansible.builtin. prefix + if strings.HasPrefix(m, "ansible.builtin.") { + if key == strings.TrimPrefix(m, "ansible.builtin.") { + return true + } + } + } + // Accept any key with dots (likely a module) + return strings.Contains(key, ".") +} + +// NormalizeModule normalizes a module name to its canonical form. +func NormalizeModule(name string) string { + // Add ansible.builtin. prefix if missing + if !strings.Contains(name, ".") { + return "ansible.builtin." + name + } + return name +} + +// GetHosts returns hosts matching a pattern from inventory. +func GetHosts(inv *Inventory, pattern string) []string { + if pattern == "all" { + return getAllHosts(inv.All) + } + if pattern == "localhost" { + return []string{"localhost"} + } + + // Check if it's a group name + hosts := getGroupHosts(inv.All, pattern) + if len(hosts) > 0 { + return hosts + } + + // Check if it's a specific host + if hasHost(inv.All, pattern) { + return []string{pattern} + } + + // Handle patterns with : (intersection/union) + // For now, just return empty + return nil +} + +func getAllHosts(group *InventoryGroup) []string { + if group == nil { + return nil + } + + var hosts []string + for name := range group.Hosts { + hosts = append(hosts, name) + } + for _, child := range group.Children { + hosts = append(hosts, getAllHosts(child)...) + } + return hosts +} + +func getGroupHosts(group *InventoryGroup, name string) []string { + if group == nil { + return nil + } + + // Check children for the group name + if child, ok := group.Children[name]; ok { + return getAllHosts(child) + } + + // Recurse + for _, child := range group.Children { + if hosts := getGroupHosts(child, name); len(hosts) > 0 { + return hosts + } + } + + return nil +} + +func hasHost(group *InventoryGroup, name string) bool { + if group == nil { + return false + } + + if _, ok := group.Hosts[name]; ok { + return true + } + + for _, child := range group.Children { + if hasHost(child, name) { + return true + } + } + + return false +} + +// GetHostVars returns variables for a specific host. +func GetHostVars(inv *Inventory, hostname string) map[string]any { + vars := make(map[string]any) + + // Collect vars from all levels + collectHostVars(inv.All, hostname, vars) + + return vars +} + +func collectHostVars(group *InventoryGroup, hostname string, vars map[string]any) bool { + if group == nil { + return false + } + + // Check if host is in this group + found := false + if host, ok := group.Hosts[hostname]; ok { + found = true + // Apply group vars first + for k, v := range group.Vars { + vars[k] = v + } + // Then host vars + if host != nil { + if host.AnsibleHost != "" { + vars["ansible_host"] = host.AnsibleHost + } + if host.AnsiblePort != 0 { + vars["ansible_port"] = host.AnsiblePort + } + if host.AnsibleUser != "" { + vars["ansible_user"] = host.AnsibleUser + } + if host.AnsiblePassword != "" { + vars["ansible_password"] = host.AnsiblePassword + } + if host.AnsibleSSHPrivateKeyFile != "" { + vars["ansible_ssh_private_key_file"] = host.AnsibleSSHPrivateKeyFile + } + if host.AnsibleConnection != "" { + vars["ansible_connection"] = host.AnsibleConnection + } + for k, v := range host.Vars { + vars[k] = v + } + } + } + + // Check children + for _, child := range group.Children { + if collectHostVars(child, hostname, vars) { + // Apply this group's vars (parent vars) + for k, v := range group.Vars { + if _, exists := vars[k]; !exists { + vars[k] = v + } + } + found = true + } + } + + return found +} diff --git a/ansible/ssh.go b/ansible/ssh.go new file mode 100644 index 0000000..d9423ac --- /dev/null +++ b/ansible/ssh.go @@ -0,0 +1,451 @@ +package ansible + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "forge.lthn.ai/core/go/pkg/log" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/knownhosts" +) + +// SSHClient handles SSH connections to remote hosts. +type SSHClient struct { + host string + port int + user string + password string + keyFile string + client *ssh.Client + mu sync.Mutex + become bool + becomeUser string + becomePass string + timeout time.Duration +} + +// SSHConfig holds SSH connection configuration. +type SSHConfig struct { + Host string + Port int + User string + Password string + KeyFile string + Become bool + BecomeUser string + BecomePass string + Timeout time.Duration +} + +// NewSSHClient creates a new SSH client. +func NewSSHClient(cfg SSHConfig) (*SSHClient, error) { + if cfg.Port == 0 { + cfg.Port = 22 + } + if cfg.User == "" { + cfg.User = "root" + } + if cfg.Timeout == 0 { + cfg.Timeout = 30 * time.Second + } + + client := &SSHClient{ + host: cfg.Host, + port: cfg.Port, + user: cfg.User, + password: cfg.Password, + keyFile: cfg.KeyFile, + become: cfg.Become, + becomeUser: cfg.BecomeUser, + becomePass: cfg.BecomePass, + timeout: cfg.Timeout, + } + + return client, nil +} + +// Connect establishes the SSH connection. +func (c *SSHClient) Connect(ctx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.client != nil { + return nil + } + + var authMethods []ssh.AuthMethod + + // Try key-based auth first + if c.keyFile != "" { + keyPath := c.keyFile + if strings.HasPrefix(keyPath, "~") { + home, _ := os.UserHomeDir() + keyPath = filepath.Join(home, keyPath[1:]) + } + + if key, err := os.ReadFile(keyPath); err == nil { + if signer, err := ssh.ParsePrivateKey(key); err == nil { + authMethods = append(authMethods, ssh.PublicKeys(signer)) + } + } + } + + // Try default SSH keys + if len(authMethods) == 0 { + home, _ := os.UserHomeDir() + defaultKeys := []string{ + filepath.Join(home, ".ssh", "id_ed25519"), + filepath.Join(home, ".ssh", "id_rsa"), + } + for _, keyPath := range defaultKeys { + if key, err := os.ReadFile(keyPath); err == nil { + if signer, err := ssh.ParsePrivateKey(key); err == nil { + authMethods = append(authMethods, ssh.PublicKeys(signer)) + break + } + } + } + } + + // Fall back to password auth + if c.password != "" { + authMethods = append(authMethods, ssh.Password(c.password)) + authMethods = append(authMethods, ssh.KeyboardInteractive(func(user, instruction string, questions []string, echos []bool) ([]string, error) { + answers := make([]string, len(questions)) + for i := range questions { + answers[i] = c.password + } + return answers, nil + })) + } + + if len(authMethods) == 0 { + return log.E("ssh.Connect", "no authentication method available", nil) + } + + // Host key verification + var hostKeyCallback ssh.HostKeyCallback + + home, err := os.UserHomeDir() + if err != nil { + return log.E("ssh.Connect", "failed to get user home dir", err) + } + knownHostsPath := filepath.Join(home, ".ssh", "known_hosts") + + // Ensure known_hosts file exists + if _, err := os.Stat(knownHostsPath); os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(knownHostsPath), 0700); err != nil { + return log.E("ssh.Connect", "failed to create .ssh dir", err) + } + if err := os.WriteFile(knownHostsPath, nil, 0600); err != nil { + return log.E("ssh.Connect", "failed to create known_hosts file", err) + } + } + + cb, err := knownhosts.New(knownHostsPath) + if err != nil { + return log.E("ssh.Connect", "failed to load known_hosts", err) + } + hostKeyCallback = cb + + config := &ssh.ClientConfig{ + User: c.user, + Auth: authMethods, + HostKeyCallback: hostKeyCallback, + Timeout: c.timeout, + } + + addr := fmt.Sprintf("%s:%d", c.host, c.port) + + // Connect with context timeout + var d net.Dialer + conn, err := d.DialContext(ctx, "tcp", addr) + if err != nil { + return log.E("ssh.Connect", fmt.Sprintf("dial %s", addr), err) + } + + sshConn, chans, reqs, err := ssh.NewClientConn(conn, addr, config) + if err != nil { + // conn is closed by NewClientConn on error + return log.E("ssh.Connect", fmt.Sprintf("ssh connect %s", addr), err) + } + + c.client = ssh.NewClient(sshConn, chans, reqs) + return nil +} + +// Close closes the SSH connection. +func (c *SSHClient) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.client != nil { + err := c.client.Close() + c.client = nil + return err + } + return nil +} + +// Run executes a command on the remote host. +func (c *SSHClient) Run(ctx context.Context, cmd string) (stdout, stderr string, exitCode int, err error) { + if err := c.Connect(ctx); err != nil { + return "", "", -1, err + } + + session, err := c.client.NewSession() + if err != nil { + return "", "", -1, log.E("ssh.Run", "new session", err) + } + defer func() { _ = session.Close() }() + + var stdoutBuf, stderrBuf bytes.Buffer + session.Stdout = &stdoutBuf + session.Stderr = &stderrBuf + + // Apply become if needed + if c.become { + becomeUser := c.becomeUser + if becomeUser == "" { + becomeUser = "root" + } + // Escape single quotes in the command + escapedCmd := strings.ReplaceAll(cmd, "'", "'\\''") + if c.becomePass != "" { + // Use sudo with password via stdin (-S flag) + // We launch a goroutine to write the password to stdin + cmd = fmt.Sprintf("sudo -S -u %s bash -c '%s'", becomeUser, escapedCmd) + stdin, err := session.StdinPipe() + if err != nil { + return "", "", -1, log.E("ssh.Run", "stdin pipe", err) + } + go func() { + defer func() { _ = stdin.Close() }() + _, _ = io.WriteString(stdin, c.becomePass+"\n") + }() + } else if c.password != "" { + // Try using connection password for sudo + cmd = fmt.Sprintf("sudo -S -u %s bash -c '%s'", becomeUser, escapedCmd) + stdin, err := session.StdinPipe() + if err != nil { + return "", "", -1, log.E("ssh.Run", "stdin pipe", err) + } + go func() { + defer func() { _ = stdin.Close() }() + _, _ = io.WriteString(stdin, c.password+"\n") + }() + } else { + // Try passwordless sudo + cmd = fmt.Sprintf("sudo -n -u %s bash -c '%s'", becomeUser, escapedCmd) + } + } + + // Run with context + done := make(chan error, 1) + go func() { + done <- session.Run(cmd) + }() + + select { + case <-ctx.Done(): + _ = session.Signal(ssh.SIGKILL) + return "", "", -1, ctx.Err() + case err := <-done: + exitCode = 0 + if err != nil { + if exitErr, ok := err.(*ssh.ExitError); ok { + exitCode = exitErr.ExitStatus() + } else { + return stdoutBuf.String(), stderrBuf.String(), -1, err + } + } + return stdoutBuf.String(), stderrBuf.String(), exitCode, nil + } +} + +// RunScript runs a script on the remote host. +func (c *SSHClient) RunScript(ctx context.Context, script string) (stdout, stderr string, exitCode int, err error) { + // Escape the script for heredoc + cmd := fmt.Sprintf("bash <<'ANSIBLE_SCRIPT_EOF'\n%s\nANSIBLE_SCRIPT_EOF", script) + return c.Run(ctx, cmd) +} + +// Upload copies a file to the remote host. +func (c *SSHClient) Upload(ctx context.Context, local io.Reader, remote string, mode os.FileMode) error { + if err := c.Connect(ctx); err != nil { + return err + } + + // Read content + content, err := io.ReadAll(local) + if err != nil { + return log.E("ssh.Upload", "read content", err) + } + + // Create parent directory + dir := filepath.Dir(remote) + dirCmd := fmt.Sprintf("mkdir -p %q", dir) + if c.become { + dirCmd = fmt.Sprintf("sudo mkdir -p %q", dir) + } + if _, _, _, err := c.Run(ctx, dirCmd); err != nil { + return log.E("ssh.Upload", "create parent dir", err) + } + + // Use cat to write the file (simpler than SCP) + writeCmd := fmt.Sprintf("cat > %q && chmod %o %q", remote, mode, remote) + + // If become is needed, we construct a command that reads password then content from stdin + // But we need to be careful with handling stdin for sudo + cat. + // We'll use a session with piped stdin. + + session2, err := c.client.NewSession() + if err != nil { + return log.E("ssh.Upload", "new session for write", err) + } + defer func() { _ = session2.Close() }() + + stdin, err := session2.StdinPipe() + if err != nil { + return log.E("ssh.Upload", "stdin pipe", err) + } + + var stderrBuf bytes.Buffer + session2.Stderr = &stderrBuf + + if c.become { + becomeUser := c.becomeUser + if becomeUser == "" { + becomeUser = "root" + } + + pass := c.becomePass + if pass == "" { + pass = c.password + } + + if pass != "" { + // Use sudo -S with password from stdin + writeCmd = fmt.Sprintf("sudo -S -u %s bash -c 'cat > %q && chmod %o %q'", + becomeUser, remote, mode, remote) + } else { + // Use passwordless sudo (sudo -n) to avoid consuming file content as password + writeCmd = fmt.Sprintf("sudo -n -u %s bash -c 'cat > %q && chmod %o %q'", + becomeUser, remote, mode, remote) + } + + if err := session2.Start(writeCmd); err != nil { + return log.E("ssh.Upload", "start write", err) + } + + go func() { + defer func() { _ = stdin.Close() }() + if pass != "" { + _, _ = io.WriteString(stdin, pass+"\n") + } + _, _ = stdin.Write(content) + }() + } else { + // Normal write + if err := session2.Start(writeCmd); err != nil { + return log.E("ssh.Upload", "start write", err) + } + + go func() { + defer func() { _ = stdin.Close() }() + _, _ = stdin.Write(content) + }() + } + + if err := session2.Wait(); err != nil { + return log.E("ssh.Upload", fmt.Sprintf("write failed (stderr: %s)", stderrBuf.String()), err) + } + + return nil +} + +// Download copies a file from the remote host. +func (c *SSHClient) Download(ctx context.Context, remote string) ([]byte, error) { + if err := c.Connect(ctx); err != nil { + return nil, err + } + + cmd := fmt.Sprintf("cat %q", remote) + + stdout, stderr, exitCode, err := c.Run(ctx, cmd) + if err != nil { + return nil, err + } + if exitCode != 0 { + return nil, log.E("ssh.Download", fmt.Sprintf("cat failed: %s", stderr), nil) + } + + return []byte(stdout), nil +} + +// FileExists checks if a file exists on the remote host. +func (c *SSHClient) FileExists(ctx context.Context, path string) (bool, error) { + cmd := fmt.Sprintf("test -e %q && echo yes || echo no", path) + stdout, _, exitCode, err := c.Run(ctx, cmd) + if err != nil { + return false, err + } + if exitCode != 0 { + // test command failed but didn't error - file doesn't exist + return false, nil + } + return strings.TrimSpace(stdout) == "yes", nil +} + +// Stat returns file info from the remote host. +func (c *SSHClient) Stat(ctx context.Context, path string) (map[string]any, error) { + // Simple approach - get basic file info + cmd := fmt.Sprintf(` +if [ -e %q ]; then + if [ -d %q ]; then + echo "exists=true isdir=true" + else + echo "exists=true isdir=false" + fi +else + echo "exists=false" +fi +`, path, path) + + stdout, _, _, err := c.Run(ctx, cmd) + if err != nil { + return nil, err + } + + result := make(map[string]any) + parts := strings.Fields(strings.TrimSpace(stdout)) + for _, part := range parts { + kv := strings.SplitN(part, "=", 2) + if len(kv) == 2 { + result[kv[0]] = kv[1] == "true" + } + } + + return result, nil +} + +// SetBecome enables privilege escalation. +func (c *SSHClient) SetBecome(become bool, user, password string) { + c.mu.Lock() + defer c.mu.Unlock() + c.become = become + if user != "" { + c.becomeUser = user + } + if password != "" { + c.becomePass = password + } +} diff --git a/ansible/ssh_test.go b/ansible/ssh_test.go new file mode 100644 index 0000000..17179b0 --- /dev/null +++ b/ansible/ssh_test.go @@ -0,0 +1,36 @@ +package ansible + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestNewSSHClient(t *testing.T) { + cfg := SSHConfig{ + Host: "localhost", + Port: 2222, + User: "root", + } + + client, err := NewSSHClient(cfg) + assert.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, "localhost", client.host) + assert.Equal(t, 2222, client.port) + assert.Equal(t, "root", client.user) + assert.Equal(t, 30*time.Second, client.timeout) +} + +func TestSSHConfig_Defaults(t *testing.T) { + cfg := SSHConfig{ + Host: "localhost", + } + + client, err := NewSSHClient(cfg) + assert.NoError(t, err) + assert.Equal(t, 22, client.port) + assert.Equal(t, "root", client.user) + assert.Equal(t, 30*time.Second, client.timeout) +} diff --git a/ansible/types.go b/ansible/types.go new file mode 100644 index 0000000..5a6939f --- /dev/null +++ b/ansible/types.go @@ -0,0 +1,258 @@ +package ansible + +import ( + "time" +) + +// Playbook represents an Ansible playbook. +type Playbook struct { + Plays []Play `yaml:",inline"` +} + +// Play represents a single play in a playbook. +type Play struct { + Name string `yaml:"name"` + Hosts string `yaml:"hosts"` + Connection string `yaml:"connection,omitempty"` + Become bool `yaml:"become,omitempty"` + BecomeUser string `yaml:"become_user,omitempty"` + GatherFacts *bool `yaml:"gather_facts,omitempty"` + Vars map[string]any `yaml:"vars,omitempty"` + PreTasks []Task `yaml:"pre_tasks,omitempty"` + Tasks []Task `yaml:"tasks,omitempty"` + PostTasks []Task `yaml:"post_tasks,omitempty"` + Roles []RoleRef `yaml:"roles,omitempty"` + Handlers []Task `yaml:"handlers,omitempty"` + Tags []string `yaml:"tags,omitempty"` + Environment map[string]string `yaml:"environment,omitempty"` + Serial any `yaml:"serial,omitempty"` // int or string + MaxFailPercent int `yaml:"max_fail_percentage,omitempty"` +} + +// RoleRef represents a role reference in a play. +type RoleRef struct { + Role string `yaml:"role,omitempty"` + Name string `yaml:"name,omitempty"` // Alternative to role + TasksFrom string `yaml:"tasks_from,omitempty"` + Vars map[string]any `yaml:"vars,omitempty"` + When any `yaml:"when,omitempty"` + Tags []string `yaml:"tags,omitempty"` +} + +// UnmarshalYAML handles both string and struct role refs. +func (r *RoleRef) UnmarshalYAML(unmarshal func(any) error) error { + // Try string first + var s string + if err := unmarshal(&s); err == nil { + r.Role = s + return nil + } + + // Try struct + type rawRoleRef RoleRef + var raw rawRoleRef + if err := unmarshal(&raw); err != nil { + return err + } + *r = RoleRef(raw) + if r.Role == "" && r.Name != "" { + r.Role = r.Name + } + return nil +} + +// Task represents an Ansible task. +type Task struct { + Name string `yaml:"name,omitempty"` + Module string `yaml:"-"` // Derived from the module key + Args map[string]any `yaml:"-"` // Module arguments + Register string `yaml:"register,omitempty"` + When any `yaml:"when,omitempty"` // string or []string + Loop any `yaml:"loop,omitempty"` // string or []any + LoopControl *LoopControl `yaml:"loop_control,omitempty"` + Vars map[string]any `yaml:"vars,omitempty"` + Environment map[string]string `yaml:"environment,omitempty"` + ChangedWhen any `yaml:"changed_when,omitempty"` + FailedWhen any `yaml:"failed_when,omitempty"` + IgnoreErrors bool `yaml:"ignore_errors,omitempty"` + NoLog bool `yaml:"no_log,omitempty"` + Become *bool `yaml:"become,omitempty"` + BecomeUser string `yaml:"become_user,omitempty"` + Delegate string `yaml:"delegate_to,omitempty"` + RunOnce bool `yaml:"run_once,omitempty"` + Tags []string `yaml:"tags,omitempty"` + Block []Task `yaml:"block,omitempty"` + Rescue []Task `yaml:"rescue,omitempty"` + Always []Task `yaml:"always,omitempty"` + Notify any `yaml:"notify,omitempty"` // string or []string + Retries int `yaml:"retries,omitempty"` + Delay int `yaml:"delay,omitempty"` + Until string `yaml:"until,omitempty"` + + // Include/import directives + IncludeTasks string `yaml:"include_tasks,omitempty"` + ImportTasks string `yaml:"import_tasks,omitempty"` + IncludeRole *struct { + Name string `yaml:"name"` + TasksFrom string `yaml:"tasks_from,omitempty"` + Vars map[string]any `yaml:"vars,omitempty"` + } `yaml:"include_role,omitempty"` + ImportRole *struct { + Name string `yaml:"name"` + TasksFrom string `yaml:"tasks_from,omitempty"` + Vars map[string]any `yaml:"vars,omitempty"` + } `yaml:"import_role,omitempty"` + + // Raw YAML for module extraction + raw map[string]any +} + +// LoopControl controls loop behavior. +type LoopControl struct { + LoopVar string `yaml:"loop_var,omitempty"` + IndexVar string `yaml:"index_var,omitempty"` + Label string `yaml:"label,omitempty"` + Pause int `yaml:"pause,omitempty"` + Extended bool `yaml:"extended,omitempty"` +} + +// TaskResult holds the result of executing a task. +type TaskResult struct { + Changed bool `json:"changed"` + Failed bool `json:"failed"` + Skipped bool `json:"skipped"` + Msg string `json:"msg,omitempty"` + Stdout string `json:"stdout,omitempty"` + Stderr string `json:"stderr,omitempty"` + RC int `json:"rc,omitempty"` + Results []TaskResult `json:"results,omitempty"` // For loops + Data map[string]any `json:"data,omitempty"` // Module-specific data + Duration time.Duration `json:"duration,omitempty"` +} + +// Inventory represents Ansible inventory. +type Inventory struct { + All *InventoryGroup `yaml:"all"` +} + +// InventoryGroup represents a group in inventory. +type InventoryGroup struct { + Hosts map[string]*Host `yaml:"hosts,omitempty"` + Children map[string]*InventoryGroup `yaml:"children,omitempty"` + Vars map[string]any `yaml:"vars,omitempty"` +} + +// Host represents a host in inventory. +type Host struct { + AnsibleHost string `yaml:"ansible_host,omitempty"` + AnsiblePort int `yaml:"ansible_port,omitempty"` + AnsibleUser string `yaml:"ansible_user,omitempty"` + AnsiblePassword string `yaml:"ansible_password,omitempty"` + AnsibleSSHPrivateKeyFile string `yaml:"ansible_ssh_private_key_file,omitempty"` + AnsibleConnection string `yaml:"ansible_connection,omitempty"` + AnsibleBecomePassword string `yaml:"ansible_become_password,omitempty"` + + // Custom vars + Vars map[string]any `yaml:",inline"` +} + +// Facts holds gathered facts about a host. +type Facts struct { + Hostname string `json:"ansible_hostname"` + FQDN string `json:"ansible_fqdn"` + OS string `json:"ansible_os_family"` + Distribution string `json:"ansible_distribution"` + Version string `json:"ansible_distribution_version"` + Architecture string `json:"ansible_architecture"` + Kernel string `json:"ansible_kernel"` + Memory int64 `json:"ansible_memtotal_mb"` + CPUs int `json:"ansible_processor_vcpus"` + IPv4 string `json:"ansible_default_ipv4_address"` +} + +// Known Ansible modules +var KnownModules = []string{ + // Builtin + "ansible.builtin.shell", + "ansible.builtin.command", + "ansible.builtin.raw", + "ansible.builtin.script", + "ansible.builtin.copy", + "ansible.builtin.template", + "ansible.builtin.file", + "ansible.builtin.lineinfile", + "ansible.builtin.blockinfile", + "ansible.builtin.stat", + "ansible.builtin.slurp", + "ansible.builtin.fetch", + "ansible.builtin.get_url", + "ansible.builtin.uri", + "ansible.builtin.apt", + "ansible.builtin.apt_key", + "ansible.builtin.apt_repository", + "ansible.builtin.yum", + "ansible.builtin.dnf", + "ansible.builtin.package", + "ansible.builtin.pip", + "ansible.builtin.service", + "ansible.builtin.systemd", + "ansible.builtin.user", + "ansible.builtin.group", + "ansible.builtin.cron", + "ansible.builtin.git", + "ansible.builtin.unarchive", + "ansible.builtin.archive", + "ansible.builtin.debug", + "ansible.builtin.fail", + "ansible.builtin.assert", + "ansible.builtin.pause", + "ansible.builtin.wait_for", + "ansible.builtin.set_fact", + "ansible.builtin.include_vars", + "ansible.builtin.add_host", + "ansible.builtin.group_by", + "ansible.builtin.meta", + "ansible.builtin.setup", + + // Short forms (legacy) + "shell", + "command", + "raw", + "script", + "copy", + "template", + "file", + "lineinfile", + "blockinfile", + "stat", + "slurp", + "fetch", + "get_url", + "uri", + "apt", + "apt_key", + "apt_repository", + "yum", + "dnf", + "package", + "pip", + "service", + "systemd", + "user", + "group", + "cron", + "git", + "unarchive", + "archive", + "debug", + "fail", + "assert", + "pause", + "wait_for", + "set_fact", + "include_vars", + "add_host", + "group_by", + "meta", + "setup", +} diff --git a/build/archive.go b/build/archive.go new file mode 100644 index 0000000..aa54021 --- /dev/null +++ b/build/archive.go @@ -0,0 +1,297 @@ +// Package build provides project type detection and cross-compilation for the Core build system. +package build + +import ( + "archive/tar" + "archive/zip" + "bytes" + "compress/gzip" + "fmt" + "io" + "path/filepath" + "strings" + + "github.com/Snider/Borg/pkg/compress" + io_interface "forge.lthn.ai/core/go/pkg/io" +) + +// ArchiveFormat specifies the compression format for archives. +type ArchiveFormat string + +const ( + // ArchiveFormatGzip uses tar.gz (gzip compression) - widely compatible. + ArchiveFormatGzip ArchiveFormat = "gz" + // ArchiveFormatXZ uses tar.xz (xz/LZMA2 compression) - better compression ratio. + ArchiveFormatXZ ArchiveFormat = "xz" + // ArchiveFormatZip uses zip - for Windows. + ArchiveFormatZip ArchiveFormat = "zip" +) + +// Archive creates an archive for a single artifact using gzip compression. +// Uses tar.gz for linux/darwin and zip for windows. +// The archive is created alongside the binary (e.g., dist/myapp_linux_amd64.tar.gz). +// Returns a new Artifact with Path pointing to the archive. +func Archive(fs io_interface.Medium, artifact Artifact) (Artifact, error) { + return ArchiveWithFormat(fs, artifact, ArchiveFormatGzip) +} + +// ArchiveXZ creates an archive for a single artifact using xz compression. +// Uses tar.xz for linux/darwin and zip for windows. +// Returns a new Artifact with Path pointing to the archive. +func ArchiveXZ(fs io_interface.Medium, artifact Artifact) (Artifact, error) { + return ArchiveWithFormat(fs, artifact, ArchiveFormatXZ) +} + +// ArchiveWithFormat creates an archive for a single artifact with the specified format. +// Uses tar.gz or tar.xz for linux/darwin and zip for windows. +// The archive is created alongside the binary (e.g., dist/myapp_linux_amd64.tar.xz). +// Returns a new Artifact with Path pointing to the archive. +func ArchiveWithFormat(fs io_interface.Medium, artifact Artifact, format ArchiveFormat) (Artifact, error) { + if artifact.Path == "" { + return Artifact{}, fmt.Errorf("build.Archive: artifact path is empty") + } + + // Verify the source file exists + info, err := fs.Stat(artifact.Path) + if err != nil { + return Artifact{}, fmt.Errorf("build.Archive: source file not found: %w", err) + } + if info.IsDir() { + return Artifact{}, fmt.Errorf("build.Archive: source path is a directory, expected file") + } + + // Determine archive type based on OS and format + var archivePath string + var archiveFunc func(fs io_interface.Medium, src, dst string) error + + if artifact.OS == "windows" { + archivePath = archiveFilename(artifact, ".zip") + archiveFunc = createZipArchive + } else { + switch format { + case ArchiveFormatXZ: + archivePath = archiveFilename(artifact, ".tar.xz") + archiveFunc = createTarXzArchive + default: + archivePath = archiveFilename(artifact, ".tar.gz") + archiveFunc = createTarGzArchive + } + } + + // Create the archive + if err := archiveFunc(fs, artifact.Path, archivePath); err != nil { + return Artifact{}, fmt.Errorf("build.Archive: failed to create archive: %w", err) + } + + return Artifact{ + Path: archivePath, + OS: artifact.OS, + Arch: artifact.Arch, + Checksum: artifact.Checksum, + }, nil +} + +// ArchiveAll archives all artifacts using gzip compression. +// Returns a slice of new artifacts pointing to the archives. +func ArchiveAll(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) { + return ArchiveAllWithFormat(fs, artifacts, ArchiveFormatGzip) +} + +// ArchiveAllXZ archives all artifacts using xz compression. +// Returns a slice of new artifacts pointing to the archives. +func ArchiveAllXZ(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) { + return ArchiveAllWithFormat(fs, artifacts, ArchiveFormatXZ) +} + +// ArchiveAllWithFormat archives all artifacts with the specified format. +// Returns a slice of new artifacts pointing to the archives. +func ArchiveAllWithFormat(fs io_interface.Medium, artifacts []Artifact, format ArchiveFormat) ([]Artifact, error) { + if len(artifacts) == 0 { + return nil, nil + } + + var archived []Artifact + for _, artifact := range artifacts { + arch, err := ArchiveWithFormat(fs, artifact, format) + if err != nil { + return archived, fmt.Errorf("build.ArchiveAll: failed to archive %s: %w", artifact.Path, err) + } + archived = append(archived, arch) + } + + return archived, nil +} + +// archiveFilename generates the archive filename based on the artifact and extension. +// Format: dist/myapp_linux_amd64.tar.gz (binary name taken from artifact path). +func archiveFilename(artifact Artifact, ext string) string { + // Get the directory containing the binary (e.g., dist/linux_amd64) + dir := filepath.Dir(artifact.Path) + // Go up one level to the output directory (e.g., dist) + outputDir := filepath.Dir(dir) + + // Get the binary name without extension + binaryName := filepath.Base(artifact.Path) + binaryName = strings.TrimSuffix(binaryName, ".exe") + + // Construct archive name: myapp_linux_amd64.tar.gz + archiveName := fmt.Sprintf("%s_%s_%s%s", binaryName, artifact.OS, artifact.Arch, ext) + + return filepath.Join(outputDir, archiveName) +} + +// createTarXzArchive creates a tar.xz archive containing a single file. +// Uses Borg's compress package for xz compression. +func createTarXzArchive(fs io_interface.Medium, src, dst string) error { + // Open the source file + srcFile, err := fs.Open(src) + if err != nil { + return fmt.Errorf("failed to open source file: %w", err) + } + defer func() { _ = srcFile.Close() }() + + srcInfo, err := srcFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat source file: %w", err) + } + + // Create tar archive in memory + var tarBuf bytes.Buffer + tarWriter := tar.NewWriter(&tarBuf) + + // Create tar header + header, err := tar.FileInfoHeader(srcInfo, "") + if err != nil { + return fmt.Errorf("failed to create tar header: %w", err) + } + header.Name = filepath.Base(src) + + if err := tarWriter.WriteHeader(header); err != nil { + return fmt.Errorf("failed to write tar header: %w", err) + } + + if _, err := io.Copy(tarWriter, srcFile); err != nil { + return fmt.Errorf("failed to write file content to tar: %w", err) + } + + if err := tarWriter.Close(); err != nil { + return fmt.Errorf("failed to close tar writer: %w", err) + } + + // Compress with xz using Borg + xzData, err := compress.Compress(tarBuf.Bytes(), "xz") + if err != nil { + return fmt.Errorf("failed to compress with xz: %w", err) + } + + // Write to destination file + dstFile, err := fs.Create(dst) + if err != nil { + return fmt.Errorf("failed to create archive file: %w", err) + } + defer func() { _ = dstFile.Close() }() + + if _, err := dstFile.Write(xzData); err != nil { + return fmt.Errorf("failed to write archive file: %w", err) + } + + return nil +} + +// createTarGzArchive creates a tar.gz archive containing a single file. +func createTarGzArchive(fs io_interface.Medium, src, dst string) error { + // Open the source file + srcFile, err := fs.Open(src) + if err != nil { + return fmt.Errorf("failed to open source file: %w", err) + } + defer func() { _ = srcFile.Close() }() + + srcInfo, err := srcFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat source file: %w", err) + } + + // Create the destination file + dstFile, err := fs.Create(dst) + if err != nil { + return fmt.Errorf("failed to create archive file: %w", err) + } + defer func() { _ = dstFile.Close() }() + + // Create gzip writer + gzWriter := gzip.NewWriter(dstFile) + defer func() { _ = gzWriter.Close() }() + + // Create tar writer + tarWriter := tar.NewWriter(gzWriter) + defer func() { _ = tarWriter.Close() }() + + // Create tar header + header, err := tar.FileInfoHeader(srcInfo, "") + if err != nil { + return fmt.Errorf("failed to create tar header: %w", err) + } + // Use just the filename, not the full path + header.Name = filepath.Base(src) + + // Write header + if err := tarWriter.WriteHeader(header); err != nil { + return fmt.Errorf("failed to write tar header: %w", err) + } + + // Write file content + if _, err := io.Copy(tarWriter, srcFile); err != nil { + return fmt.Errorf("failed to write file content to tar: %w", err) + } + + return nil +} + +// createZipArchive creates a zip archive containing a single file. +func createZipArchive(fs io_interface.Medium, src, dst string) error { + // Open the source file + srcFile, err := fs.Open(src) + if err != nil { + return fmt.Errorf("failed to open source file: %w", err) + } + defer func() { _ = srcFile.Close() }() + + srcInfo, err := srcFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat source file: %w", err) + } + + // Create the destination file + dstFile, err := fs.Create(dst) + if err != nil { + return fmt.Errorf("failed to create archive file: %w", err) + } + defer func() { _ = dstFile.Close() }() + + // Create zip writer + zipWriter := zip.NewWriter(dstFile) + defer func() { _ = zipWriter.Close() }() + + // Create zip header + header, err := zip.FileInfoHeader(srcInfo) + if err != nil { + return fmt.Errorf("failed to create zip header: %w", err) + } + // Use just the filename, not the full path + header.Name = filepath.Base(src) + header.Method = zip.Deflate + + // Create file in archive + writer, err := zipWriter.CreateHeader(header) + if err != nil { + return fmt.Errorf("failed to create zip entry: %w", err) + } + + // Write file content + if _, err := io.Copy(writer, srcFile); err != nil { + return fmt.Errorf("failed to write file content to zip: %w", err) + } + + return nil +} diff --git a/build/archive_test.go b/build/archive_test.go new file mode 100644 index 0000000..9edb520 --- /dev/null +++ b/build/archive_test.go @@ -0,0 +1,397 @@ +package build + +import ( + "archive/tar" + "archive/zip" + "bytes" + "compress/gzip" + "io" + "os" + "path/filepath" + "testing" + + "github.com/Snider/Borg/pkg/compress" + io_interface "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// setupArchiveTestFile creates a test binary file in a temp directory with the standard structure. +// Returns the path to the binary and the output directory. +func setupArchiveTestFile(t *testing.T, name, os_, arch string) (binaryPath string, outputDir string) { + t.Helper() + + outputDir = t.TempDir() + + // Create platform directory: dist/os_arch + platformDir := filepath.Join(outputDir, os_+"_"+arch) + err := os.MkdirAll(platformDir, 0755) + require.NoError(t, err) + + // Create test binary + binaryPath = filepath.Join(platformDir, name) + content := []byte("#!/bin/bash\necho 'Hello, World!'\n") + err = os.WriteFile(binaryPath, content, 0755) + require.NoError(t, err) + + return binaryPath, outputDir +} + +func TestArchive_Good(t *testing.T) { + fs := io_interface.Local + t.Run("creates tar.gz for linux", func(t *testing.T) { + binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "linux", "amd64") + + artifact := Artifact{ + Path: binaryPath, + OS: "linux", + Arch: "amd64", + } + + result, err := Archive(fs, artifact) + require.NoError(t, err) + + // Verify archive was created + expectedPath := filepath.Join(outputDir, "myapp_linux_amd64.tar.gz") + assert.Equal(t, expectedPath, result.Path) + assert.FileExists(t, result.Path) + + // Verify OS and Arch are preserved + assert.Equal(t, "linux", result.OS) + assert.Equal(t, "amd64", result.Arch) + + // Verify archive content + verifyTarGzContent(t, result.Path, "myapp") + }) + + t.Run("creates tar.gz for darwin", func(t *testing.T) { + binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "darwin", "arm64") + + artifact := Artifact{ + Path: binaryPath, + OS: "darwin", + Arch: "arm64", + } + + result, err := Archive(fs, artifact) + require.NoError(t, err) + + expectedPath := filepath.Join(outputDir, "myapp_darwin_arm64.tar.gz") + assert.Equal(t, expectedPath, result.Path) + assert.FileExists(t, result.Path) + + verifyTarGzContent(t, result.Path, "myapp") + }) + + t.Run("creates zip for windows", func(t *testing.T) { + binaryPath, outputDir := setupArchiveTestFile(t, "myapp.exe", "windows", "amd64") + + artifact := Artifact{ + Path: binaryPath, + OS: "windows", + Arch: "amd64", + } + + result, err := Archive(fs, artifact) + require.NoError(t, err) + + // Windows archives should strip .exe from archive name + expectedPath := filepath.Join(outputDir, "myapp_windows_amd64.zip") + assert.Equal(t, expectedPath, result.Path) + assert.FileExists(t, result.Path) + + verifyZipContent(t, result.Path, "myapp.exe") + }) + + t.Run("preserves checksum field", func(t *testing.T) { + binaryPath, _ := setupArchiveTestFile(t, "myapp", "linux", "amd64") + + artifact := Artifact{ + Path: binaryPath, + OS: "linux", + Arch: "amd64", + Checksum: "abc123", + } + + result, err := Archive(fs, artifact) + require.NoError(t, err) + assert.Equal(t, "abc123", result.Checksum) + }) + + t.Run("creates tar.xz for linux with ArchiveXZ", func(t *testing.T) { + binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "linux", "amd64") + + artifact := Artifact{ + Path: binaryPath, + OS: "linux", + Arch: "amd64", + } + + result, err := ArchiveXZ(fs, artifact) + require.NoError(t, err) + + expectedPath := filepath.Join(outputDir, "myapp_linux_amd64.tar.xz") + assert.Equal(t, expectedPath, result.Path) + assert.FileExists(t, result.Path) + + verifyTarXzContent(t, result.Path, "myapp") + }) + + t.Run("creates tar.xz for darwin with ArchiveWithFormat", func(t *testing.T) { + binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "darwin", "arm64") + + artifact := Artifact{ + Path: binaryPath, + OS: "darwin", + Arch: "arm64", + } + + result, err := ArchiveWithFormat(fs, artifact, ArchiveFormatXZ) + require.NoError(t, err) + + expectedPath := filepath.Join(outputDir, "myapp_darwin_arm64.tar.xz") + assert.Equal(t, expectedPath, result.Path) + assert.FileExists(t, result.Path) + + verifyTarXzContent(t, result.Path, "myapp") + }) + + t.Run("windows still uses zip even with xz format", func(t *testing.T) { + binaryPath, outputDir := setupArchiveTestFile(t, "myapp.exe", "windows", "amd64") + + artifact := Artifact{ + Path: binaryPath, + OS: "windows", + Arch: "amd64", + } + + result, err := ArchiveWithFormat(fs, artifact, ArchiveFormatXZ) + require.NoError(t, err) + + // Windows should still get .zip regardless of format + expectedPath := filepath.Join(outputDir, "myapp_windows_amd64.zip") + assert.Equal(t, expectedPath, result.Path) + assert.FileExists(t, result.Path) + + verifyZipContent(t, result.Path, "myapp.exe") + }) +} + +func TestArchive_Bad(t *testing.T) { + fs := io_interface.Local + t.Run("returns error for empty path", func(t *testing.T) { + artifact := Artifact{ + Path: "", + OS: "linux", + Arch: "amd64", + } + + result, err := Archive(fs, artifact) + assert.Error(t, err) + assert.Contains(t, err.Error(), "artifact path is empty") + assert.Empty(t, result.Path) + }) + + t.Run("returns error for non-existent file", func(t *testing.T) { + artifact := Artifact{ + Path: "/nonexistent/path/binary", + OS: "linux", + Arch: "amd64", + } + + result, err := Archive(fs, artifact) + assert.Error(t, err) + assert.Contains(t, err.Error(), "source file not found") + assert.Empty(t, result.Path) + }) + + t.Run("returns error for directory path", func(t *testing.T) { + dir := t.TempDir() + + artifact := Artifact{ + Path: dir, + OS: "linux", + Arch: "amd64", + } + + result, err := Archive(fs, artifact) + assert.Error(t, err) + assert.Contains(t, err.Error(), "source path is a directory") + assert.Empty(t, result.Path) + }) +} + +func TestArchiveAll_Good(t *testing.T) { + fs := io_interface.Local + t.Run("archives multiple artifacts", func(t *testing.T) { + outputDir := t.TempDir() + + // Create multiple binaries + var artifacts []Artifact + targets := []struct { + os_ string + arch string + }{ + {"linux", "amd64"}, + {"linux", "arm64"}, + {"darwin", "arm64"}, + {"windows", "amd64"}, + } + + for _, target := range targets { + platformDir := filepath.Join(outputDir, target.os_+"_"+target.arch) + err := os.MkdirAll(platformDir, 0755) + require.NoError(t, err) + + name := "myapp" + if target.os_ == "windows" { + name = "myapp.exe" + } + + binaryPath := filepath.Join(platformDir, name) + err = os.WriteFile(binaryPath, []byte("binary content"), 0755) + require.NoError(t, err) + + artifacts = append(artifacts, Artifact{ + Path: binaryPath, + OS: target.os_, + Arch: target.arch, + }) + } + + results, err := ArchiveAll(fs, artifacts) + require.NoError(t, err) + require.Len(t, results, 4) + + // Verify all archives were created + for i, result := range results { + assert.FileExists(t, result.Path) + assert.Equal(t, artifacts[i].OS, result.OS) + assert.Equal(t, artifacts[i].Arch, result.Arch) + } + }) + + t.Run("returns nil for empty slice", func(t *testing.T) { + results, err := ArchiveAll(fs, []Artifact{}) + assert.NoError(t, err) + assert.Nil(t, results) + }) + + t.Run("returns nil for nil slice", func(t *testing.T) { + results, err := ArchiveAll(fs, nil) + assert.NoError(t, err) + assert.Nil(t, results) + }) +} + +func TestArchiveAll_Bad(t *testing.T) { + fs := io_interface.Local + t.Run("returns partial results on error", func(t *testing.T) { + binaryPath, _ := setupArchiveTestFile(t, "myapp", "linux", "amd64") + + artifacts := []Artifact{ + {Path: binaryPath, OS: "linux", Arch: "amd64"}, + {Path: "/nonexistent/binary", OS: "linux", Arch: "arm64"}, // This will fail + } + + results, err := ArchiveAll(fs, artifacts) + assert.Error(t, err) + // Should have the first successful result + assert.Len(t, results, 1) + assert.FileExists(t, results[0].Path) + }) +} + +func TestArchiveFilename_Good(t *testing.T) { + t.Run("generates correct tar.gz filename", func(t *testing.T) { + artifact := Artifact{ + Path: "/output/linux_amd64/myapp", + OS: "linux", + Arch: "amd64", + } + + filename := archiveFilename(artifact, ".tar.gz") + assert.Equal(t, "/output/myapp_linux_amd64.tar.gz", filename) + }) + + t.Run("generates correct zip filename", func(t *testing.T) { + artifact := Artifact{ + Path: "/output/windows_amd64/myapp.exe", + OS: "windows", + Arch: "amd64", + } + + filename := archiveFilename(artifact, ".zip") + assert.Equal(t, "/output/myapp_windows_amd64.zip", filename) + }) + + t.Run("handles nested output directories", func(t *testing.T) { + artifact := Artifact{ + Path: "/project/dist/linux_arm64/cli", + OS: "linux", + Arch: "arm64", + } + + filename := archiveFilename(artifact, ".tar.gz") + assert.Equal(t, "/project/dist/cli_linux_arm64.tar.gz", filename) + }) +} + +// verifyTarGzContent opens a tar.gz file and verifies it contains the expected file. +func verifyTarGzContent(t *testing.T, archivePath, expectedName string) { + t.Helper() + + file, err := os.Open(archivePath) + require.NoError(t, err) + defer func() { _ = file.Close() }() + + gzReader, err := gzip.NewReader(file) + require.NoError(t, err) + defer func() { _ = gzReader.Close() }() + + tarReader := tar.NewReader(gzReader) + + header, err := tarReader.Next() + require.NoError(t, err) + assert.Equal(t, expectedName, header.Name) + + // Verify there's only one file + _, err = tarReader.Next() + assert.Equal(t, io.EOF, err) +} + +// verifyZipContent opens a zip file and verifies it contains the expected file. +func verifyZipContent(t *testing.T, archivePath, expectedName string) { + t.Helper() + + reader, err := zip.OpenReader(archivePath) + require.NoError(t, err) + defer func() { _ = reader.Close() }() + + require.Len(t, reader.File, 1) + assert.Equal(t, expectedName, reader.File[0].Name) +} + +// verifyTarXzContent opens a tar.xz file and verifies it contains the expected file. +func verifyTarXzContent(t *testing.T, archivePath, expectedName string) { + t.Helper() + + // Read the xz-compressed file + xzData, err := os.ReadFile(archivePath) + require.NoError(t, err) + + // Decompress with Borg + tarData, err := compress.Decompress(xzData) + require.NoError(t, err) + + // Read tar archive + tarReader := tar.NewReader(bytes.NewReader(tarData)) + + header, err := tarReader.Next() + require.NoError(t, err) + assert.Equal(t, expectedName, header.Name) + + // Verify there's only one file + _, err = tarReader.Next() + assert.Equal(t, io.EOF, err) +} diff --git a/build/build.go b/build/build.go new file mode 100644 index 0000000..34fb6f5 --- /dev/null +++ b/build/build.go @@ -0,0 +1,90 @@ +// Package build provides project type detection and cross-compilation for the Core build system. +// It supports Go, Wails, Node.js, and PHP projects with automatic detection based on +// marker files (go.mod, wails.json, package.json, composer.json). +package build + +import ( + "context" + + "forge.lthn.ai/core/go/pkg/io" +) + +// ProjectType represents a detected project type. +type ProjectType string + +// Project type constants for build detection. +const ( + // ProjectTypeGo indicates a standard Go project with go.mod. + ProjectTypeGo ProjectType = "go" + // ProjectTypeWails indicates a Wails desktop application. + ProjectTypeWails ProjectType = "wails" + // ProjectTypeNode indicates a Node.js project with package.json. + ProjectTypeNode ProjectType = "node" + // ProjectTypePHP indicates a PHP/Laravel project with composer.json. + ProjectTypePHP ProjectType = "php" + // ProjectTypeCPP indicates a C++ project with CMakeLists.txt. + ProjectTypeCPP ProjectType = "cpp" + // ProjectTypeDocker indicates a Docker-based project with Dockerfile. + ProjectTypeDocker ProjectType = "docker" + // ProjectTypeLinuxKit indicates a LinuxKit VM configuration. + ProjectTypeLinuxKit ProjectType = "linuxkit" + // ProjectTypeTaskfile indicates a project using Taskfile automation. + ProjectTypeTaskfile ProjectType = "taskfile" +) + +// Target represents a build target platform. +type Target struct { + OS string + Arch string +} + +// String returns the target in GOOS/GOARCH format. +func (t Target) String() string { + return t.OS + "/" + t.Arch +} + +// Artifact represents a build output file. +type Artifact struct { + Path string + OS string + Arch string + Checksum string +} + +// Config holds build configuration. +type Config struct { + // FS is the medium used for file operations. + FS io.Medium + // ProjectDir is the root directory of the project. + ProjectDir string + // OutputDir is where build artifacts are placed. + OutputDir string + // Name is the output binary name. + Name string + // Version is the build version string. + Version string + // LDFlags are additional linker flags. + LDFlags []string + + // Docker-specific config + Dockerfile string // Path to Dockerfile (default: Dockerfile) + Registry string // Container registry (default: ghcr.io) + Image string // Image name (owner/repo format) + Tags []string // Additional tags to apply + BuildArgs map[string]string // Docker build arguments + Push bool // Whether to push after build + + // LinuxKit-specific config + LinuxKitConfig string // Path to LinuxKit YAML config + Formats []string // Output formats (iso, qcow2, raw, vmdk) +} + +// Builder defines the interface for project-specific build implementations. +type Builder interface { + // Name returns the builder's identifier. + Name() string + // Detect checks if this builder can handle the project in the given directory. + Detect(fs io.Medium, dir string) (bool, error) + // Build compiles the project for the specified targets. + Build(ctx context.Context, cfg *Config, targets []Target) ([]Artifact, error) +} diff --git a/build/buildcmd/cmd_build.go b/build/buildcmd/cmd_build.go new file mode 100644 index 0000000..8f73a0a --- /dev/null +++ b/build/buildcmd/cmd_build.go @@ -0,0 +1,144 @@ +// Package buildcmd provides project build commands with auto-detection. +package buildcmd + +import ( + "embed" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "github.com/spf13/cobra" +) + +func init() { + cli.RegisterCommands(AddBuildCommands) +} + +// Style aliases from shared package +var ( + buildHeaderStyle = cli.TitleStyle + buildTargetStyle = cli.ValueStyle + buildSuccessStyle = cli.SuccessStyle + buildErrorStyle = cli.ErrorStyle + buildDimStyle = cli.DimStyle +) + +//go:embed all:tmpl/gui +var guiTemplate embed.FS + +// Flags for the main build command +var ( + buildType string + ciMode bool + targets string + outputDir string + doArchive bool + doChecksum bool + verbose bool + + // Docker/LinuxKit specific flags + configPath string + format string + push bool + imageName string + + // Signing flags + noSign bool + notarize bool + + // from-path subcommand + fromPath string + + // pwa subcommand + pwaURL string + + // sdk subcommand + sdkSpec string + sdkLang string + sdkVersion string + sdkDryRun bool +) + +var buildCmd = &cobra.Command{ + Use: "build", + Short: i18n.T("cmd.build.short"), + Long: i18n.T("cmd.build.long"), + RunE: func(cmd *cobra.Command, args []string) error { + return runProjectBuild(cmd.Context(), buildType, ciMode, targets, outputDir, doArchive, doChecksum, configPath, format, push, imageName, noSign, notarize, verbose) + }, +} + +var fromPathCmd = &cobra.Command{ + Use: "from-path", + Short: i18n.T("cmd.build.from_path.short"), + RunE: func(cmd *cobra.Command, args []string) error { + if fromPath == "" { + return errPathRequired + } + return runBuild(fromPath) + }, +} + +var pwaCmd = &cobra.Command{ + Use: "pwa", + Short: i18n.T("cmd.build.pwa.short"), + RunE: func(cmd *cobra.Command, args []string) error { + if pwaURL == "" { + return errURLRequired + } + return runPwaBuild(pwaURL) + }, +} + +var sdkBuildCmd = &cobra.Command{ + Use: "sdk", + Short: i18n.T("cmd.build.sdk.short"), + Long: i18n.T("cmd.build.sdk.long"), + RunE: func(cmd *cobra.Command, args []string) error { + return runBuildSDK(sdkSpec, sdkLang, sdkVersion, sdkDryRun) + }, +} + +func initBuildFlags() { + // Main build command flags + buildCmd.Flags().StringVar(&buildType, "type", "", i18n.T("cmd.build.flag.type")) + buildCmd.Flags().BoolVar(&ciMode, "ci", false, i18n.T("cmd.build.flag.ci")) + buildCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, i18n.T("common.flag.verbose")) + buildCmd.Flags().StringVar(&targets, "targets", "", i18n.T("cmd.build.flag.targets")) + buildCmd.Flags().StringVar(&outputDir, "output", "", i18n.T("cmd.build.flag.output")) + buildCmd.Flags().BoolVar(&doArchive, "archive", true, i18n.T("cmd.build.flag.archive")) + buildCmd.Flags().BoolVar(&doChecksum, "checksum", true, i18n.T("cmd.build.flag.checksum")) + + // Docker/LinuxKit specific + buildCmd.Flags().StringVar(&configPath, "config", "", i18n.T("cmd.build.flag.config")) + buildCmd.Flags().StringVar(&format, "format", "", i18n.T("cmd.build.flag.format")) + buildCmd.Flags().BoolVar(&push, "push", false, i18n.T("cmd.build.flag.push")) + buildCmd.Flags().StringVar(&imageName, "image", "", i18n.T("cmd.build.flag.image")) + + // Signing flags + buildCmd.Flags().BoolVar(&noSign, "no-sign", false, i18n.T("cmd.build.flag.no_sign")) + buildCmd.Flags().BoolVar(¬arize, "notarize", false, i18n.T("cmd.build.flag.notarize")) + + // from-path subcommand flags + fromPathCmd.Flags().StringVar(&fromPath, "path", "", i18n.T("cmd.build.from_path.flag.path")) + + // pwa subcommand flags + pwaCmd.Flags().StringVar(&pwaURL, "url", "", i18n.T("cmd.build.pwa.flag.url")) + + // sdk subcommand flags + sdkBuildCmd.Flags().StringVar(&sdkSpec, "spec", "", i18n.T("common.flag.spec")) + sdkBuildCmd.Flags().StringVar(&sdkLang, "lang", "", i18n.T("cmd.build.sdk.flag.lang")) + sdkBuildCmd.Flags().StringVar(&sdkVersion, "version", "", i18n.T("cmd.build.sdk.flag.version")) + sdkBuildCmd.Flags().BoolVar(&sdkDryRun, "dry-run", false, i18n.T("cmd.build.sdk.flag.dry_run")) + + // Add subcommands + buildCmd.AddCommand(fromPathCmd) + buildCmd.AddCommand(pwaCmd) + buildCmd.AddCommand(sdkBuildCmd) +} + +// AddBuildCommands registers the 'build' command and all subcommands. +func AddBuildCommands(root *cobra.Command) { + initBuildFlags() + AddReleaseCommand(buildCmd) + root.AddCommand(buildCmd) +} diff --git a/build/buildcmd/cmd_commands.go b/build/buildcmd/cmd_commands.go new file mode 100644 index 0000000..310d558 --- /dev/null +++ b/build/buildcmd/cmd_commands.go @@ -0,0 +1,21 @@ +// Package buildcmd provides project build commands with auto-detection. +// +// Supports building: +// - Go projects (standard and cross-compilation) +// - Wails desktop applications +// - Docker images +// - LinuxKit VM images +// - Taskfile-based projects +// +// Configuration via .core/build.yaml or command-line flags. +// +// Subcommands: +// - build: Auto-detect and build the current project +// - build from-path: Build from a local static web app directory +// - build pwa: Build from a live PWA URL +// - build sdk: Generate API SDKs from OpenAPI spec +package buildcmd + +// Note: The AddBuildCommands function is defined in cmd_build.go +// This file exists for documentation purposes and maintains the original +// package documentation from commands.go. diff --git a/build/buildcmd/cmd_project.go b/build/buildcmd/cmd_project.go new file mode 100644 index 0000000..386f1e7 --- /dev/null +++ b/build/buildcmd/cmd_project.go @@ -0,0 +1,392 @@ +// cmd_project.go implements the main project build logic. +// +// This handles auto-detection of project types (Go, Wails, Docker, LinuxKit, Taskfile) +// and orchestrates the build process including signing, archiving, and checksums. + +package buildcmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go-devops/build/builders" + "forge.lthn.ai/core/go-devops/build/signing" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" +) + +// runProjectBuild handles the main `core build` command with auto-detection. +func runProjectBuild(ctx context.Context, buildType string, ciMode bool, targetsFlag string, outputDir string, doArchive bool, doChecksum bool, configPath string, format string, push bool, imageName string, noSign bool, notarize bool, verbose bool) error { + // Use local filesystem as the default medium + fs := io.Local + + // Get current working directory as project root + projectDir, err := os.Getwd() + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "get working directory"}), err) + } + + // Load configuration from .core/build.yaml (or defaults) + buildCfg, err := build.LoadConfig(fs, projectDir) + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "load config"}), err) + } + + // Detect project type if not specified + var projectType build.ProjectType + if buildType != "" { + projectType = build.ProjectType(buildType) + } else { + projectType, err = build.PrimaryType(fs, projectDir) + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "detect project type"}), err) + } + if projectType == "" { + return fmt.Errorf("%s", i18n.T("cmd.build.error.no_project_type", map[string]interface{}{"Dir": projectDir})) + } + } + + // Determine targets + var buildTargets []build.Target + if targetsFlag != "" { + // Parse from command line + buildTargets, err = parseTargets(targetsFlag) + if err != nil { + return err + } + } else if len(buildCfg.Targets) > 0 { + // Use config targets + buildTargets = buildCfg.ToTargets() + } else { + // Fall back to current OS/arch + buildTargets = []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + } + + // Determine output directory + if outputDir == "" { + outputDir = "dist" + } + if !filepath.IsAbs(outputDir) { + outputDir = filepath.Join(projectDir, outputDir) + } + outputDir = filepath.Clean(outputDir) + + // Ensure config path is absolute if provided + if configPath != "" && !filepath.IsAbs(configPath) { + configPath = filepath.Join(projectDir, configPath) + } + + // Determine binary name + binaryName := buildCfg.Project.Binary + if binaryName == "" { + binaryName = buildCfg.Project.Name + } + if binaryName == "" { + binaryName = filepath.Base(projectDir) + } + + // Print build info (verbose mode only) + if verbose && !ciMode { + fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.build")), i18n.T("cmd.build.building_project")) + fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.type"), buildTargetStyle.Render(string(projectType))) + fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.output"), buildTargetStyle.Render(outputDir)) + fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.binary"), buildTargetStyle.Render(binaryName)) + fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.targets"), buildTargetStyle.Render(formatTargets(buildTargets))) + fmt.Println() + } + + // Get the appropriate builder + builder, err := getBuilder(projectType) + if err != nil { + return err + } + + // Create build config for the builder + cfg := &build.Config{ + FS: fs, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: binaryName, + Version: buildCfg.Project.Name, // Could be enhanced with git describe + LDFlags: buildCfg.Build.LDFlags, + // Docker/LinuxKit specific + Dockerfile: configPath, // Reuse for Dockerfile path + LinuxKitConfig: configPath, + Push: push, + Image: imageName, + } + + // Parse formats for LinuxKit + if format != "" { + cfg.Formats = strings.Split(format, ",") + } + + // Execute build + artifacts, err := builder.Build(ctx, cfg, buildTargets) + if err != nil { + if !ciMode { + fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err) + } + return err + } + + if verbose && !ciMode { + fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("common.label.success")), i18n.T("cmd.build.built_artifacts", map[string]interface{}{"Count": len(artifacts)})) + fmt.Println() + for _, artifact := range artifacts { + relPath, err := filepath.Rel(projectDir, artifact.Path) + if err != nil { + relPath = artifact.Path + } + fmt.Printf(" %s %s %s\n", + buildSuccessStyle.Render("*"), + buildTargetStyle.Render(relPath), + buildDimStyle.Render(fmt.Sprintf("(%s/%s)", artifact.OS, artifact.Arch)), + ) + } + } + + // Sign macOS binaries if enabled + signCfg := buildCfg.Sign + if notarize { + signCfg.MacOS.Notarize = true + } + if noSign { + signCfg.Enabled = false + } + + if signCfg.Enabled && runtime.GOOS == "darwin" { + if verbose && !ciMode { + fmt.Println() + fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.sign")), i18n.T("cmd.build.signing_binaries")) + } + + // Convert build.Artifact to signing.Artifact + signingArtifacts := make([]signing.Artifact, len(artifacts)) + for i, a := range artifacts { + signingArtifacts[i] = signing.Artifact{Path: a.Path, OS: a.OS, Arch: a.Arch} + } + + if err := signing.SignBinaries(ctx, fs, signCfg, signingArtifacts); err != nil { + if !ciMode { + fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.signing_failed"), err) + } + return err + } + + if signCfg.MacOS.Notarize { + if err := signing.NotarizeBinaries(ctx, fs, signCfg, signingArtifacts); err != nil { + if !ciMode { + fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.notarization_failed"), err) + } + return err + } + } + } + + // Archive artifacts if enabled + var archivedArtifacts []build.Artifact + if doArchive && len(artifacts) > 0 { + if verbose && !ciMode { + fmt.Println() + fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.archive")), i18n.T("cmd.build.creating_archives")) + } + + archivedArtifacts, err = build.ArchiveAll(fs, artifacts) + if err != nil { + if !ciMode { + fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.archive_failed"), err) + } + return err + } + + if verbose && !ciMode { + for _, artifact := range archivedArtifacts { + relPath, err := filepath.Rel(projectDir, artifact.Path) + if err != nil { + relPath = artifact.Path + } + fmt.Printf(" %s %s %s\n", + buildSuccessStyle.Render("*"), + buildTargetStyle.Render(relPath), + buildDimStyle.Render(fmt.Sprintf("(%s/%s)", artifact.OS, artifact.Arch)), + ) + } + } + } + + // Compute checksums if enabled + var checksummedArtifacts []build.Artifact + if doChecksum && len(archivedArtifacts) > 0 { + checksummedArtifacts, err = computeAndWriteChecksums(ctx, projectDir, outputDir, archivedArtifacts, signCfg, ciMode, verbose) + if err != nil { + return err + } + } else if doChecksum && len(artifacts) > 0 && !doArchive { + // Checksum raw binaries if archiving is disabled + checksummedArtifacts, err = computeAndWriteChecksums(ctx, projectDir, outputDir, artifacts, signCfg, ciMode, verbose) + if err != nil { + return err + } + } + + // Output results + if ciMode { + // Determine which artifacts to output (prefer checksummed > archived > raw) + var outputArtifacts []build.Artifact + if len(checksummedArtifacts) > 0 { + outputArtifacts = checksummedArtifacts + } else if len(archivedArtifacts) > 0 { + outputArtifacts = archivedArtifacts + } else { + outputArtifacts = artifacts + } + + // JSON output for CI + output, err := json.MarshalIndent(outputArtifacts, "", " ") + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "marshal artifacts"}), err) + } + fmt.Println(string(output)) + } else if !verbose { + // Minimal output: just success with artifact count + fmt.Printf("%s %s %s\n", + buildSuccessStyle.Render(i18n.T("common.label.success")), + i18n.T("cmd.build.built_artifacts", map[string]interface{}{"Count": len(artifacts)}), + buildDimStyle.Render(fmt.Sprintf("(%s)", outputDir)), + ) + } + + return nil +} + +// computeAndWriteChecksums computes checksums for artifacts and writes CHECKSUMS.txt. +func computeAndWriteChecksums(ctx context.Context, projectDir, outputDir string, artifacts []build.Artifact, signCfg signing.SignConfig, ciMode bool, verbose bool) ([]build.Artifact, error) { + fs := io.Local + if verbose && !ciMode { + fmt.Println() + fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.checksum")), i18n.T("cmd.build.computing_checksums")) + } + + checksummedArtifacts, err := build.ChecksumAll(fs, artifacts) + if err != nil { + if !ciMode { + fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.checksum_failed"), err) + } + return nil, err + } + + // Write CHECKSUMS.txt + checksumPath := filepath.Join(outputDir, "CHECKSUMS.txt") + if err := build.WriteChecksumFile(fs, checksummedArtifacts, checksumPath); err != nil { + if !ciMode { + fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("common.error.failed", map[string]any{"Action": "write CHECKSUMS.txt"}), err) + } + return nil, err + } + + // Sign checksums with GPG + if signCfg.Enabled { + if err := signing.SignChecksums(ctx, fs, signCfg, checksumPath); err != nil { + if !ciMode { + fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.gpg_signing_failed"), err) + } + return nil, err + } + } + + if verbose && !ciMode { + for _, artifact := range checksummedArtifacts { + relPath, err := filepath.Rel(projectDir, artifact.Path) + if err != nil { + relPath = artifact.Path + } + fmt.Printf(" %s %s\n", + buildSuccessStyle.Render("*"), + buildTargetStyle.Render(relPath), + ) + fmt.Printf(" %s\n", buildDimStyle.Render(artifact.Checksum)) + } + + relChecksumPath, err := filepath.Rel(projectDir, checksumPath) + if err != nil { + relChecksumPath = checksumPath + } + fmt.Printf(" %s %s\n", + buildSuccessStyle.Render("*"), + buildTargetStyle.Render(relChecksumPath), + ) + } + + return checksummedArtifacts, nil +} + +// parseTargets parses a comma-separated list of OS/arch pairs. +func parseTargets(targetsFlag string) ([]build.Target, error) { + parts := strings.Split(targetsFlag, ",") + var targets []build.Target + + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + + osArch := strings.Split(part, "/") + if len(osArch) != 2 { + return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.invalid_target", map[string]interface{}{"Target": part})) + } + + targets = append(targets, build.Target{ + OS: strings.TrimSpace(osArch[0]), + Arch: strings.TrimSpace(osArch[1]), + }) + } + + if len(targets) == 0 { + return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.no_targets")) + } + + return targets, nil +} + +// formatTargets returns a human-readable string of targets. +func formatTargets(targets []build.Target) string { + var parts []string + for _, t := range targets { + parts = append(parts, t.String()) + } + return strings.Join(parts, ", ") +} + +// getBuilder returns the appropriate builder for the project type. +func getBuilder(projectType build.ProjectType) (build.Builder, error) { + switch projectType { + case build.ProjectTypeWails: + return builders.NewWailsBuilder(), nil + case build.ProjectTypeGo: + return builders.NewGoBuilder(), nil + case build.ProjectTypeDocker: + return builders.NewDockerBuilder(), nil + case build.ProjectTypeLinuxKit: + return builders.NewLinuxKitBuilder(), nil + case build.ProjectTypeTaskfile: + return builders.NewTaskfileBuilder(), nil + case build.ProjectTypeCPP: + return builders.NewCPPBuilder(), nil + case build.ProjectTypeNode: + return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.node_not_implemented")) + case build.ProjectTypePHP: + return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.php_not_implemented")) + default: + return nil, fmt.Errorf("%s: %s", i18n.T("cmd.build.error.unsupported_type"), projectType) + } +} diff --git a/build/buildcmd/cmd_pwa.go b/build/buildcmd/cmd_pwa.go new file mode 100644 index 0000000..1fdc0b6 --- /dev/null +++ b/build/buildcmd/cmd_pwa.go @@ -0,0 +1,324 @@ +// cmd_pwa.go implements PWA and legacy GUI build functionality. +// +// Supports building desktop applications from: +// - Local static web application directories +// - Live PWA URLs (downloads and packages) + +package buildcmd + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go/pkg/i18n" + "github.com/leaanthony/debme" + "github.com/leaanthony/gosod" + "golang.org/x/net/html" +) + +// Error sentinels for build commands +var ( + errPathRequired = errors.New("the --path flag is required") + errURLRequired = errors.New("the --url flag is required") +) + +// runPwaBuild downloads a PWA from URL and builds it. +func runPwaBuild(pwaURL string) error { + fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.starting"), pwaURL) + + tempDir, err := os.MkdirTemp("", "core-pwa-build-*") + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "create temporary directory"}), err) + } + // defer os.RemoveAll(tempDir) // Keep temp dir for debugging + fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.downloading_to"), tempDir) + + if err := downloadPWA(pwaURL, tempDir); err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "download PWA"}), err) + } + + return runBuild(tempDir) +} + +// downloadPWA fetches a PWA from a URL and saves assets locally. +func downloadPWA(baseURL, destDir string) error { + // Fetch the main HTML page + resp, err := http.Get(baseURL) + if err != nil { + return fmt.Errorf("%s %s: %w", i18n.T("common.error.failed", map[string]any{"Action": "fetch URL"}), baseURL, err) + } + defer func() { _ = resp.Body.Close() }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "read response body"}), err) + } + + // Find the manifest URL from the HTML + manifestURL, err := findManifestURL(string(body), baseURL) + if err != nil { + // If no manifest, it's not a PWA, but we can still try to package it as a simple site. + fmt.Printf("%s %s\n", i18n.T("common.label.warning"), i18n.T("cmd.build.pwa.no_manifest")) + if err := os.WriteFile(filepath.Join(destDir, "index.html"), body, 0644); err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "write index.html"}), err) + } + return nil + } + + fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.found_manifest"), manifestURL) + + // Fetch and parse the manifest + manifest, err := fetchManifest(manifestURL) + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "fetch or parse manifest"}), err) + } + + // Download all assets listed in the manifest + assets := collectAssets(manifest, manifestURL) + for _, assetURL := range assets { + if err := downloadAsset(assetURL, destDir); err != nil { + fmt.Printf("%s %s %s: %v\n", i18n.T("common.label.warning"), i18n.T("common.error.failed", map[string]any{"Action": "download asset"}), assetURL, err) + } + } + + // Also save the root index.html + if err := os.WriteFile(filepath.Join(destDir, "index.html"), body, 0644); err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "write index.html"}), err) + } + + fmt.Println(i18n.T("cmd.build.pwa.download_complete")) + return nil +} + +// findManifestURL extracts the manifest URL from HTML content. +func findManifestURL(htmlContent, baseURL string) (string, error) { + doc, err := html.Parse(strings.NewReader(htmlContent)) + if err != nil { + return "", err + } + + var manifestPath string + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "link" { + var rel, href string + for _, a := range n.Attr { + if a.Key == "rel" { + rel = a.Val + } + if a.Key == "href" { + href = a.Val + } + } + if rel == "manifest" && href != "" { + manifestPath = href + return + } + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + + if manifestPath == "" { + return "", fmt.Errorf("%s", i18n.T("cmd.build.pwa.error.no_manifest_tag")) + } + + base, err := url.Parse(baseURL) + if err != nil { + return "", err + } + + manifestURL, err := base.Parse(manifestPath) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// fetchManifest downloads and parses a PWA manifest. +func fetchManifest(manifestURL string) (map[string]interface{}, error) { + resp, err := http.Get(manifestURL) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + var manifest map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&manifest); err != nil { + return nil, err + } + return manifest, nil +} + +// collectAssets extracts asset URLs from a PWA manifest. +func collectAssets(manifest map[string]interface{}, manifestURL string) []string { + var assets []string + base, _ := url.Parse(manifestURL) + + // Add start_url + if startURL, ok := manifest["start_url"].(string); ok { + if resolved, err := base.Parse(startURL); err == nil { + assets = append(assets, resolved.String()) + } + } + + // Add icons + if icons, ok := manifest["icons"].([]interface{}); ok { + for _, icon := range icons { + if iconMap, ok := icon.(map[string]interface{}); ok { + if src, ok := iconMap["src"].(string); ok { + if resolved, err := base.Parse(src); err == nil { + assets = append(assets, resolved.String()) + } + } + } + } + } + + return assets +} + +// downloadAsset fetches a single asset and saves it locally. +func downloadAsset(assetURL, destDir string) error { + resp, err := http.Get(assetURL) + if err != nil { + return err + } + defer func() { _ = resp.Body.Close() }() + + u, err := url.Parse(assetURL) + if err != nil { + return err + } + + path := filepath.Join(destDir, filepath.FromSlash(u.Path)) + if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil { + return err + } + + out, err := os.Create(path) + if err != nil { + return err + } + defer func() { _ = out.Close() }() + + _, err = io.Copy(out, resp.Body) + return err +} + +// runBuild builds a desktop application from a local directory. +func runBuild(fromPath string) error { + fmt.Printf("%s %s\n", i18n.T("cmd.build.from_path.starting"), fromPath) + + info, err := os.Stat(fromPath) + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.invalid_path"), err) + } + if !info.IsDir() { + return fmt.Errorf("%s", i18n.T("cmd.build.from_path.error.must_be_directory")) + } + + buildDir := ".core/build/app" + htmlDir := filepath.Join(buildDir, "html") + appName := filepath.Base(fromPath) + if strings.HasPrefix(appName, "core-pwa-build-") { + appName = "pwa-app" + } + outputExe := appName + + if err := os.RemoveAll(buildDir); err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "clean build directory"}), err) + } + + // 1. Generate the project from the embedded template + fmt.Println(i18n.T("cmd.build.from_path.generating_template")) + templateFS, err := debme.FS(guiTemplate, "tmpl/gui") + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "anchor template filesystem"}), err) + } + sod := gosod.New(templateFS) + if sod == nil { + return fmt.Errorf("%s", i18n.T("common.error.failed", map[string]any{"Action": "create new sod instance"})) + } + + templateData := map[string]string{"AppName": appName} + if err := sod.Extract(buildDir, templateData); err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "extract template"}), err) + } + + // 2. Copy the user's web app files + fmt.Println(i18n.T("cmd.build.from_path.copying_files")) + if err := copyDir(fromPath, htmlDir); err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "copy application files"}), err) + } + + // 3. Compile the application + fmt.Println(i18n.T("cmd.build.from_path.compiling")) + + // Run go mod tidy + cmd := exec.Command("go", "mod", "tidy") + cmd.Dir = buildDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.go_mod_tidy"), err) + } + + // Run go build + cmd = exec.Command("go", "build", "-o", outputExe) + cmd.Dir = buildDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.go_build"), err) + } + + fmt.Printf("\n%s %s/%s\n", i18n.T("cmd.build.from_path.success"), buildDir, outputExe) + return nil +} + +// copyDir recursively copies a directory from src to dst. +func copyDir(src, dst string) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(src, path) + if err != nil { + return err + } + + dstPath := filepath.Join(dst, relPath) + + if info.IsDir() { + return os.MkdirAll(dstPath, info.Mode()) + } + + srcFile, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = srcFile.Close() }() + + dstFile, err := os.Create(dstPath) + if err != nil { + return err + } + defer func() { _ = dstFile.Close() }() + + _, err = io.Copy(dstFile, srcFile) + return err + }) +} diff --git a/build/buildcmd/cmd_release.go b/build/buildcmd/cmd_release.go new file mode 100644 index 0000000..cd1883c --- /dev/null +++ b/build/buildcmd/cmd_release.go @@ -0,0 +1,111 @@ +// cmd_release.go implements the release command: build + archive + publish in one step. + +package buildcmd + +import ( + "context" + "os" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/framework/core" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go-devops/release" +) + +// Flag variables for release command +var ( + releaseVersion string + releaseDraft bool + releasePrerelease bool + releaseGoForLaunch bool +) + +var releaseCmd = &cli.Command{ + Use: "release", + Short: i18n.T("cmd.build.release.short"), + Long: i18n.T("cmd.build.release.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runRelease(cmd.Context(), !releaseGoForLaunch, releaseVersion, releaseDraft, releasePrerelease) + }, +} + +func init() { + releaseCmd.Flags().BoolVar(&releaseGoForLaunch, "we-are-go-for-launch", false, i18n.T("cmd.build.release.flag.go_for_launch")) + releaseCmd.Flags().StringVar(&releaseVersion, "version", "", i18n.T("cmd.build.release.flag.version")) + releaseCmd.Flags().BoolVar(&releaseDraft, "draft", false, i18n.T("cmd.build.release.flag.draft")) + releaseCmd.Flags().BoolVar(&releasePrerelease, "prerelease", false, i18n.T("cmd.build.release.flag.prerelease")) +} + +// AddReleaseCommand adds the release subcommand to the build command. +func AddReleaseCommand(buildCmd *cli.Command) { + buildCmd.AddCommand(releaseCmd) +} + +// runRelease executes the full release workflow: build + archive + checksum + publish. +func runRelease(ctx context.Context, dryRun bool, version string, draft, prerelease bool) error { + // Get current directory + projectDir, err := os.Getwd() + if err != nil { + return core.E("release", "get working directory", err) + } + + // Check for release config + if !release.ConfigExists(projectDir) { + cli.Print("%s %s\n", + buildErrorStyle.Render(i18n.Label("error")), + i18n.T("cmd.build.release.error.no_config"), + ) + cli.Print(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.release.hint.create_config"))) + return core.E("release", "config not found", nil) + } + + // Load configuration + cfg, err := release.LoadConfig(projectDir) + if err != nil { + return core.E("release", "load config", err) + } + + // Apply CLI overrides + if version != "" { + cfg.SetVersion(version) + } + + // Apply draft/prerelease overrides to all publishers + if draft || prerelease { + for i := range cfg.Publishers { + if draft { + cfg.Publishers[i].Draft = true + } + if prerelease { + cfg.Publishers[i].Prerelease = true + } + } + } + + // Print header + cli.Print("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.release.label.release")), i18n.T("cmd.build.release.building_and_publishing")) + if dryRun { + cli.Print(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.release.dry_run_hint"))) + } + cli.Blank() + + // Run full release (build + archive + checksum + publish) + rel, err := release.Run(ctx, cfg, dryRun) + if err != nil { + return err + } + + // Print summary + cli.Blank() + cli.Print("%s %s\n", buildSuccessStyle.Render(i18n.T("i18n.done.pass")), i18n.T("cmd.build.release.completed")) + cli.Print(" %s %s\n", i18n.Label("version"), buildTargetStyle.Render(rel.Version)) + cli.Print(" %s %d\n", i18n.T("cmd.build.release.label.artifacts"), len(rel.Artifacts)) + + if !dryRun { + for _, pub := range cfg.Publishers { + cli.Print(" %s %s\n", i18n.T("cmd.build.release.label.published"), buildTargetStyle.Render(pub.Type)) + } + } + + return nil +} diff --git a/build/buildcmd/cmd_sdk.go b/build/buildcmd/cmd_sdk.go new file mode 100644 index 0000000..d7fd7ca --- /dev/null +++ b/build/buildcmd/cmd_sdk.go @@ -0,0 +1,82 @@ +// cmd_sdk.go implements SDK generation from OpenAPI specifications. +// +// Generates typed API clients for TypeScript, Python, Go, and PHP +// from OpenAPI/Swagger specifications. + +package buildcmd + +import ( + "context" + "fmt" + "os" + "strings" + + "forge.lthn.ai/core/go-devops/sdk" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// runBuildSDK handles the `core build sdk` command. +func runBuildSDK(specPath, lang, version string, dryRun bool) error { + ctx := context.Background() + + projectDir, err := os.Getwd() + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "get working directory"}), err) + } + + // Load config + config := sdk.DefaultConfig() + if specPath != "" { + config.Spec = specPath + } + + s := sdk.New(projectDir, config) + if version != "" { + s.SetVersion(version) + } + + fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.sdk.label")), i18n.T("cmd.build.sdk.generating")) + if dryRun { + fmt.Printf(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.sdk.dry_run_mode"))) + } + fmt.Println() + + // Detect spec + detectedSpec, err := s.DetectSpec() + if err != nil { + fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err) + return err + } + fmt.Printf(" %s %s\n", i18n.T("common.label.spec"), buildTargetStyle.Render(detectedSpec)) + + if dryRun { + if lang != "" { + fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.language_label"), buildTargetStyle.Render(lang)) + } else { + fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.languages_label"), buildTargetStyle.Render(strings.Join(config.Languages, ", "))) + } + fmt.Println() + fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("cmd.build.label.ok")), i18n.T("cmd.build.sdk.would_generate")) + return nil + } + + if lang != "" { + // Generate single language + if err := s.GenerateLanguage(ctx, lang); err != nil { + fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err) + return err + } + fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.generated_label"), buildTargetStyle.Render(lang)) + } else { + // Generate all + if err := s.Generate(ctx); err != nil { + fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err) + return err + } + fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.generated_label"), buildTargetStyle.Render(strings.Join(config.Languages, ", "))) + } + + fmt.Println() + fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("common.label.success")), i18n.T("cmd.build.sdk.complete")) + return nil +} diff --git a/build/buildcmd/tmpl/gui/go.mod.tmpl b/build/buildcmd/tmpl/gui/go.mod.tmpl new file mode 100644 index 0000000..1a30708 --- /dev/null +++ b/build/buildcmd/tmpl/gui/go.mod.tmpl @@ -0,0 +1,7 @@ +module {{.AppName}} + +go 1.21 + +require ( + github.com/wailsapp/wails/v3 v3.0.0-alpha.8 +) diff --git a/build/buildcmd/tmpl/gui/html/.gitkeep b/build/buildcmd/tmpl/gui/html/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/buildcmd/tmpl/gui/html/.placeholder b/build/buildcmd/tmpl/gui/html/.placeholder new file mode 100644 index 0000000..1044078 --- /dev/null +++ b/build/buildcmd/tmpl/gui/html/.placeholder @@ -0,0 +1 @@ +// This file ensures the 'html' directory is correctly embedded by the Go compiler. diff --git a/build/buildcmd/tmpl/gui/main.go.tmpl b/build/buildcmd/tmpl/gui/main.go.tmpl new file mode 100644 index 0000000..2b71fed --- /dev/null +++ b/build/buildcmd/tmpl/gui/main.go.tmpl @@ -0,0 +1,25 @@ +package main + +import ( + "embed" + "log" + + "github.com/wailsapp/wails/v3/pkg/application" +) + +//go:embed all:html +var assets embed.FS + +func main() { + app := application.New(application.Options{ + Name: "{{.AppName}}", + Description: "A web application enclaved by Core.", + Assets: application.AssetOptions{ + FS: assets, + }, + }) + + if err := app.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/build/builders/cpp.go b/build/builders/cpp.go new file mode 100644 index 0000000..0ce3b34 --- /dev/null +++ b/build/builders/cpp.go @@ -0,0 +1,253 @@ +// Package builders provides build implementations for different project types. +package builders + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +// CPPBuilder implements the Builder interface for C++ projects using CMake + Conan. +// It wraps the Makefile-based build system from the .core/build submodule. +type CPPBuilder struct{} + +// NewCPPBuilder creates a new CPPBuilder instance. +func NewCPPBuilder() *CPPBuilder { + return &CPPBuilder{} +} + +// Name returns the builder's identifier. +func (b *CPPBuilder) Name() string { + return "cpp" +} + +// Detect checks if this builder can handle the project in the given directory. +func (b *CPPBuilder) Detect(fs io.Medium, dir string) (bool, error) { + return build.IsCPPProject(fs, dir), nil +} + +// Build compiles the C++ project using Make targets. +// The build flow is: make configure → make build → make package. +// Cross-compilation is handled via Conan profiles specified in .core/build.yaml. +func (b *CPPBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) { + if cfg == nil { + return nil, fmt.Errorf("builders.CPPBuilder.Build: config is nil") + } + + // Validate make is available + if err := b.validateMake(); err != nil { + return nil, err + } + + // For C++ projects, the Makefile handles everything. + // We don't iterate per-target like Go — the Makefile's configure + build + // produces binaries for the host platform, and cross-compilation uses + // named Conan profiles (e.g., make gcc-linux-armv8). + if len(targets) == 0 { + // Default to host platform + targets = []build.Target{{OS: runtime.GOOS, Arch: runtime.GOARCH}} + } + + var artifacts []build.Artifact + + for _, target := range targets { + built, err := b.buildTarget(ctx, cfg, target) + if err != nil { + return artifacts, fmt.Errorf("builders.CPPBuilder.Build: %w", err) + } + artifacts = append(artifacts, built...) + } + + return artifacts, nil +} + +// buildTarget compiles for a single target platform. +func (b *CPPBuilder) buildTarget(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) { + // Determine if this is a cross-compile or host build + isHostBuild := target.OS == runtime.GOOS && target.Arch == runtime.GOARCH + + if isHostBuild { + return b.buildHost(ctx, cfg, target) + } + + return b.buildCross(ctx, cfg, target) +} + +// buildHost runs the standard make configure → make build → make package flow. +func (b *CPPBuilder) buildHost(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) { + fmt.Printf("Building C++ project for %s/%s (host)\n", target.OS, target.Arch) + + // Step 1: Configure (runs conan install + cmake configure) + if err := b.runMake(ctx, cfg.ProjectDir, "configure"); err != nil { + return nil, fmt.Errorf("configure failed: %w", err) + } + + // Step 2: Build + if err := b.runMake(ctx, cfg.ProjectDir, "build"); err != nil { + return nil, fmt.Errorf("build failed: %w", err) + } + + // Step 3: Package + if err := b.runMake(ctx, cfg.ProjectDir, "package"); err != nil { + return nil, fmt.Errorf("package failed: %w", err) + } + + // Discover artifacts from build/packages/ + return b.findArtifacts(cfg.FS, cfg.ProjectDir, target) +} + +// buildCross runs a cross-compilation using a Conan profile name. +// The Makefile supports profile targets like: make gcc-linux-armv8 +func (b *CPPBuilder) buildCross(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) { + // Map target to a Conan profile name + profile := b.targetToProfile(target) + if profile == "" { + return nil, fmt.Errorf("no Conan profile mapped for target %s/%s", target.OS, target.Arch) + } + + fmt.Printf("Building C++ project for %s/%s (cross: %s)\n", target.OS, target.Arch, profile) + + // The Makefile exposes each profile as a top-level target + if err := b.runMake(ctx, cfg.ProjectDir, profile); err != nil { + return nil, fmt.Errorf("cross-compile for %s failed: %w", profile, err) + } + + return b.findArtifacts(cfg.FS, cfg.ProjectDir, target) +} + +// runMake executes a make target in the project directory. +func (b *CPPBuilder) runMake(ctx context.Context, projectDir string, target string) error { + cmd := exec.CommandContext(ctx, "make", target) + cmd.Dir = projectDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = os.Environ() + + if err := cmd.Run(); err != nil { + return fmt.Errorf("make %s: %w", target, err) + } + return nil +} + +// findArtifacts searches for built packages in build/packages/. +func (b *CPPBuilder) findArtifacts(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) { + packagesDir := filepath.Join(projectDir, "build", "packages") + + if !fs.IsDir(packagesDir) { + // Fall back to searching build/release/src/ for raw binaries + return b.findBinaries(fs, projectDir, target) + } + + entries, err := fs.List(packagesDir) + if err != nil { + return nil, fmt.Errorf("failed to list packages directory: %w", err) + } + + var artifacts []build.Artifact + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + // Skip checksum files and hidden files + if strings.HasSuffix(name, ".sha256") || strings.HasPrefix(name, ".") { + continue + } + + artifacts = append(artifacts, build.Artifact{ + Path: filepath.Join(packagesDir, name), + OS: target.OS, + Arch: target.Arch, + }) + } + + return artifacts, nil +} + +// findBinaries searches for compiled binaries in build/release/src/. +func (b *CPPBuilder) findBinaries(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) { + binDir := filepath.Join(projectDir, "build", "release", "src") + + if !fs.IsDir(binDir) { + return nil, fmt.Errorf("no build output found in %s", binDir) + } + + entries, err := fs.List(binDir) + if err != nil { + return nil, fmt.Errorf("failed to list build directory: %w", err) + } + + var artifacts []build.Artifact + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + // Skip non-executable files (libraries, cmake files, etc.) + if strings.HasSuffix(name, ".a") || strings.HasSuffix(name, ".o") || + strings.HasSuffix(name, ".cmake") || strings.HasPrefix(name, ".") { + continue + } + + fullPath := filepath.Join(binDir, name) + + // On Unix, check if file is executable + if target.OS != "windows" { + info, err := os.Stat(fullPath) + if err != nil { + continue + } + if info.Mode()&0111 == 0 { + continue + } + } + + artifacts = append(artifacts, build.Artifact{ + Path: fullPath, + OS: target.OS, + Arch: target.Arch, + }) + } + + return artifacts, nil +} + +// targetToProfile maps a build target to a Conan cross-compilation profile name. +// Profile names match those in .core/build/cmake/profiles/. +func (b *CPPBuilder) targetToProfile(target build.Target) string { + key := target.OS + "/" + target.Arch + profiles := map[string]string{ + "linux/amd64": "gcc-linux-x86_64", + "linux/x86_64": "gcc-linux-x86_64", + "linux/arm64": "gcc-linux-armv8", + "linux/armv8": "gcc-linux-armv8", + "darwin/arm64": "apple-clang-armv8", + "darwin/armv8": "apple-clang-armv8", + "darwin/amd64": "apple-clang-x86_64", + "darwin/x86_64": "apple-clang-x86_64", + "windows/amd64": "msvc-194-x86_64", + "windows/x86_64": "msvc-194-x86_64", + } + + return profiles[key] +} + +// validateMake checks if make is available. +func (b *CPPBuilder) validateMake() error { + if _, err := exec.LookPath("make"); err != nil { + return fmt.Errorf("cpp: make not found. Install build-essential (Linux) or Xcode Command Line Tools (macOS)") + } + return nil +} + +// Ensure CPPBuilder implements the Builder interface. +var _ build.Builder = (*CPPBuilder)(nil) diff --git a/build/builders/cpp_test.go b/build/builders/cpp_test.go new file mode 100644 index 0000000..afd9458 --- /dev/null +++ b/build/builders/cpp_test.go @@ -0,0 +1,149 @@ +package builders + +import ( + "os" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCPPBuilder_Name_Good(t *testing.T) { + builder := NewCPPBuilder() + assert.Equal(t, "cpp", builder.Name()) +} + +func TestCPPBuilder_Detect_Good(t *testing.T) { + fs := io.Local + + t.Run("detects C++ project with CMakeLists.txt", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "CMakeLists.txt"), []byte("cmake_minimum_required(VERSION 3.16)"), 0644) + require.NoError(t, err) + + builder := NewCPPBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.True(t, detected) + }) + + t.Run("returns false for non-C++ project", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644) + require.NoError(t, err) + + builder := NewCPPBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.False(t, detected) + }) + + t.Run("returns false for empty directory", func(t *testing.T) { + dir := t.TempDir() + + builder := NewCPPBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.False(t, detected) + }) +} + +func TestCPPBuilder_Build_Bad(t *testing.T) { + t.Run("returns error for nil config", func(t *testing.T) { + builder := NewCPPBuilder() + artifacts, err := builder.Build(nil, nil, []build.Target{{OS: "linux", Arch: "amd64"}}) + assert.Error(t, err) + assert.Nil(t, artifacts) + assert.Contains(t, err.Error(), "config is nil") + }) +} + +func TestCPPBuilder_TargetToProfile_Good(t *testing.T) { + builder := NewCPPBuilder() + + tests := []struct { + os, arch string + expected string + }{ + {"linux", "amd64", "gcc-linux-x86_64"}, + {"linux", "x86_64", "gcc-linux-x86_64"}, + {"linux", "arm64", "gcc-linux-armv8"}, + {"darwin", "arm64", "apple-clang-armv8"}, + {"darwin", "amd64", "apple-clang-x86_64"}, + {"windows", "amd64", "msvc-194-x86_64"}, + } + + for _, tt := range tests { + t.Run(tt.os+"/"+tt.arch, func(t *testing.T) { + profile := builder.targetToProfile(build.Target{OS: tt.os, Arch: tt.arch}) + assert.Equal(t, tt.expected, profile) + }) + } +} + +func TestCPPBuilder_TargetToProfile_Bad(t *testing.T) { + builder := NewCPPBuilder() + + t.Run("returns empty for unknown target", func(t *testing.T) { + profile := builder.targetToProfile(build.Target{OS: "plan9", Arch: "mips"}) + assert.Empty(t, profile) + }) +} + +func TestCPPBuilder_FindArtifacts_Good(t *testing.T) { + fs := io.Local + + t.Run("finds packages in build/packages", func(t *testing.T) { + dir := t.TempDir() + packagesDir := filepath.Join(dir, "build", "packages") + require.NoError(t, os.MkdirAll(packagesDir, 0755)) + + // Create mock package files + require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz"), []byte("pkg"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz.sha256"), []byte("checksum"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.rpm"), []byte("rpm"), 0644)) + + builder := NewCPPBuilder() + target := build.Target{OS: "linux", Arch: "amd64"} + artifacts, err := builder.findArtifacts(fs, dir, target) + require.NoError(t, err) + + // Should find tar.xz and rpm but not sha256 + assert.Len(t, artifacts, 2) + for _, a := range artifacts { + assert.Equal(t, "linux", a.OS) + assert.Equal(t, "amd64", a.Arch) + assert.False(t, filepath.Ext(a.Path) == ".sha256") + } + }) + + t.Run("falls back to binaries in build/release/src", func(t *testing.T) { + dir := t.TempDir() + binDir := filepath.Join(dir, "build", "release", "src") + require.NoError(t, os.MkdirAll(binDir, 0755)) + + // Create mock binary (executable) + binPath := filepath.Join(binDir, "test-daemon") + require.NoError(t, os.WriteFile(binPath, []byte("binary"), 0755)) + + // Create a library (should be skipped) + require.NoError(t, os.WriteFile(filepath.Join(binDir, "libcrypto.a"), []byte("lib"), 0644)) + + builder := NewCPPBuilder() + target := build.Target{OS: "linux", Arch: "amd64"} + artifacts, err := builder.findArtifacts(fs, dir, target) + require.NoError(t, err) + + // Should find the executable but not the library + assert.Len(t, artifacts, 1) + assert.Contains(t, artifacts[0].Path, "test-daemon") + }) +} + +func TestCPPBuilder_Interface_Good(t *testing.T) { + var _ build.Builder = (*CPPBuilder)(nil) + var _ build.Builder = NewCPPBuilder() +} diff --git a/build/builders/docker.go b/build/builders/docker.go new file mode 100644 index 0000000..560c49c --- /dev/null +++ b/build/builders/docker.go @@ -0,0 +1,215 @@ +// Package builders provides build implementations for different project types. +package builders + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +// DockerBuilder builds Docker images. +type DockerBuilder struct{} + +// NewDockerBuilder creates a new Docker builder. +func NewDockerBuilder() *DockerBuilder { + return &DockerBuilder{} +} + +// Name returns the builder's identifier. +func (b *DockerBuilder) Name() string { + return "docker" +} + +// Detect checks if a Dockerfile exists in the directory. +func (b *DockerBuilder) Detect(fs io.Medium, dir string) (bool, error) { + dockerfilePath := filepath.Join(dir, "Dockerfile") + if fs.IsFile(dockerfilePath) { + return true, nil + } + return false, nil +} + +// Build builds Docker images for the specified targets. +func (b *DockerBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) { + // Validate docker CLI is available + if err := b.validateDockerCli(); err != nil { + return nil, err + } + + // Ensure buildx is available + if err := b.ensureBuildx(ctx); err != nil { + return nil, err + } + + // Determine Dockerfile path + dockerfile := cfg.Dockerfile + if dockerfile == "" { + dockerfile = filepath.Join(cfg.ProjectDir, "Dockerfile") + } + + // Validate Dockerfile exists + if !cfg.FS.IsFile(dockerfile) { + return nil, fmt.Errorf("docker.Build: Dockerfile not found: %s", dockerfile) + } + + // Determine image name + imageName := cfg.Image + if imageName == "" { + imageName = cfg.Name + } + if imageName == "" { + imageName = filepath.Base(cfg.ProjectDir) + } + + // Build platform string from targets + var platforms []string + for _, t := range targets { + platforms = append(platforms, fmt.Sprintf("%s/%s", t.OS, t.Arch)) + } + + // If no targets specified, use current platform + if len(platforms) == 0 { + platforms = []string{"linux/amd64"} + } + + // Determine registry + registry := cfg.Registry + if registry == "" { + registry = "ghcr.io" + } + + // Determine tags + tags := cfg.Tags + if len(tags) == 0 { + tags = []string{"latest"} + if cfg.Version != "" { + tags = append(tags, cfg.Version) + } + } + + // Build full image references + var imageRefs []string + for _, tag := range tags { + // Expand version template + expandedTag := strings.ReplaceAll(tag, "{{.Version}}", cfg.Version) + expandedTag = strings.ReplaceAll(expandedTag, "{{Version}}", cfg.Version) + + if registry != "" { + imageRefs = append(imageRefs, fmt.Sprintf("%s/%s:%s", registry, imageName, expandedTag)) + } else { + imageRefs = append(imageRefs, fmt.Sprintf("%s:%s", imageName, expandedTag)) + } + } + + // Build the docker buildx command + args := []string{"buildx", "build"} + + // Multi-platform support + args = append(args, "--platform", strings.Join(platforms, ",")) + + // Add all tags + for _, ref := range imageRefs { + args = append(args, "-t", ref) + } + + // Dockerfile path + args = append(args, "-f", dockerfile) + + // Build arguments + for k, v := range cfg.BuildArgs { + expandedValue := strings.ReplaceAll(v, "{{.Version}}", cfg.Version) + expandedValue = strings.ReplaceAll(expandedValue, "{{Version}}", cfg.Version) + args = append(args, "--build-arg", fmt.Sprintf("%s=%s", k, expandedValue)) + } + + // Always add VERSION build arg if version is set + if cfg.Version != "" { + args = append(args, "--build-arg", fmt.Sprintf("VERSION=%s", cfg.Version)) + } + + // Output to local docker images or push + if cfg.Push { + args = append(args, "--push") + } else { + // For multi-platform builds without push, we need to load or output somewhere + if len(platforms) == 1 { + args = append(args, "--load") + } else { + // Multi-platform builds can't use --load, output to tarball + outputPath := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s.tar", imageName)) + args = append(args, "--output", fmt.Sprintf("type=oci,dest=%s", outputPath)) + } + } + + // Build context (project directory) + args = append(args, cfg.ProjectDir) + + // Create output directory + if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil { + return nil, fmt.Errorf("docker.Build: failed to create output directory: %w", err) + } + + // Execute build + cmd := exec.CommandContext(ctx, "docker", args...) + cmd.Dir = cfg.ProjectDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + fmt.Printf("Building Docker image: %s\n", imageName) + fmt.Printf(" Platforms: %s\n", strings.Join(platforms, ", ")) + fmt.Printf(" Tags: %s\n", strings.Join(imageRefs, ", ")) + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("docker.Build: buildx build failed: %w", err) + } + + // Create artifacts for each platform + var artifacts []build.Artifact + for _, t := range targets { + artifacts = append(artifacts, build.Artifact{ + Path: imageRefs[0], // Primary image reference + OS: t.OS, + Arch: t.Arch, + }) + } + + return artifacts, nil +} + +// validateDockerCli checks if the docker CLI is available. +func (b *DockerBuilder) validateDockerCli() error { + cmd := exec.Command("docker", "--version") + if err := cmd.Run(); err != nil { + return fmt.Errorf("docker: docker CLI not found. Install it from https://docs.docker.com/get-docker/") + } + return nil +} + +// ensureBuildx ensures docker buildx is available and has a builder. +func (b *DockerBuilder) ensureBuildx(ctx context.Context) error { + // Check if buildx is available + cmd := exec.CommandContext(ctx, "docker", "buildx", "version") + if err := cmd.Run(); err != nil { + return fmt.Errorf("docker: buildx is not available. Install it from https://docs.docker.com/buildx/working-with-buildx/") + } + + // Check if we have a builder, create one if not + cmd = exec.CommandContext(ctx, "docker", "buildx", "inspect", "--bootstrap") + if err := cmd.Run(); err != nil { + // Try to create a builder + cmd = exec.CommandContext(ctx, "docker", "buildx", "create", "--use", "--bootstrap") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("docker: failed to create buildx builder: %w", err) + } + } + + return nil +} diff --git a/build/builders/go.go b/build/builders/go.go new file mode 100644 index 0000000..379572e --- /dev/null +++ b/build/builders/go.go @@ -0,0 +1,129 @@ +// Package builders provides build implementations for different project types. +package builders + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +// GoBuilder implements the Builder interface for Go projects. +type GoBuilder struct{} + +// NewGoBuilder creates a new GoBuilder instance. +func NewGoBuilder() *GoBuilder { + return &GoBuilder{} +} + +// Name returns the builder's identifier. +func (b *GoBuilder) Name() string { + return "go" +} + +// Detect checks if this builder can handle the project in the given directory. +// Uses IsGoProject from the build package which checks for go.mod or wails.json. +func (b *GoBuilder) Detect(fs io.Medium, dir string) (bool, error) { + return build.IsGoProject(fs, dir), nil +} + +// Build compiles the Go project for the specified targets. +// It sets GOOS, GOARCH, and CGO_ENABLED environment variables, +// applies ldflags and trimpath, and runs go build. +func (b *GoBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) { + if cfg == nil { + return nil, fmt.Errorf("builders.GoBuilder.Build: config is nil") + } + + if len(targets) == 0 { + return nil, fmt.Errorf("builders.GoBuilder.Build: no targets specified") + } + + // Ensure output directory exists + if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil { + return nil, fmt.Errorf("builders.GoBuilder.Build: failed to create output directory: %w", err) + } + + var artifacts []build.Artifact + + for _, target := range targets { + artifact, err := b.buildTarget(ctx, cfg, target) + if err != nil { + return artifacts, fmt.Errorf("builders.GoBuilder.Build: failed to build %s: %w", target.String(), err) + } + artifacts = append(artifacts, artifact) + } + + return artifacts, nil +} + +// buildTarget compiles for a single target platform. +func (b *GoBuilder) buildTarget(ctx context.Context, cfg *build.Config, target build.Target) (build.Artifact, error) { + // Determine output binary name + binaryName := cfg.Name + if binaryName == "" { + binaryName = filepath.Base(cfg.ProjectDir) + } + + // Add .exe extension for Windows + if target.OS == "windows" && !strings.HasSuffix(binaryName, ".exe") { + binaryName += ".exe" + } + + // Create platform-specific output path: output/os_arch/binary + platformDir := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch)) + if err := cfg.FS.EnsureDir(platformDir); err != nil { + return build.Artifact{}, fmt.Errorf("failed to create platform directory: %w", err) + } + + outputPath := filepath.Join(platformDir, binaryName) + + // Build the go build arguments + args := []string{"build"} + + // Add trimpath flag + args = append(args, "-trimpath") + + // Add ldflags if specified + if len(cfg.LDFlags) > 0 { + ldflags := strings.Join(cfg.LDFlags, " ") + args = append(args, "-ldflags", ldflags) + } + + // Add output path + args = append(args, "-o", outputPath) + + // Add the project directory as the build target (current directory) + args = append(args, ".") + + // Create the command + cmd := exec.CommandContext(ctx, "go", args...) + cmd.Dir = cfg.ProjectDir + + // Set up environment + env := os.Environ() + env = append(env, fmt.Sprintf("GOOS=%s", target.OS)) + env = append(env, fmt.Sprintf("GOARCH=%s", target.Arch)) + env = append(env, "CGO_ENABLED=0") // CGO disabled by default for cross-compilation + cmd.Env = env + + // Capture output for error messages + output, err := cmd.CombinedOutput() + if err != nil { + return build.Artifact{}, fmt.Errorf("go build failed: %w\nOutput: %s", err, string(output)) + } + + return build.Artifact{ + Path: outputPath, + OS: target.OS, + Arch: target.Arch, + }, nil +} + +// Ensure GoBuilder implements the Builder interface. +var _ build.Builder = (*GoBuilder)(nil) diff --git a/build/builders/go_test.go b/build/builders/go_test.go new file mode 100644 index 0000000..495f9d8 --- /dev/null +++ b/build/builders/go_test.go @@ -0,0 +1,398 @@ +package builders + +import ( + "context" + "os" + "path/filepath" + "runtime" + "testing" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// setupGoTestProject creates a minimal Go project for testing. +func setupGoTestProject(t *testing.T) string { + t.Helper() + dir := t.TempDir() + + // Create a minimal go.mod + goMod := `module testproject + +go 1.21 +` + err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644) + require.NoError(t, err) + + // Create a minimal main.go + mainGo := `package main + +func main() { + println("hello") +} +` + err = os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644) + require.NoError(t, err) + + return dir +} + +func TestGoBuilder_Name_Good(t *testing.T) { + builder := NewGoBuilder() + assert.Equal(t, "go", builder.Name()) +} + +func TestGoBuilder_Detect_Good(t *testing.T) { + fs := io.Local + t.Run("detects Go project with go.mod", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644) + require.NoError(t, err) + + builder := NewGoBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.True(t, detected) + }) + + t.Run("detects Wails project", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644) + require.NoError(t, err) + + builder := NewGoBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.True(t, detected) + }) + + t.Run("returns false for non-Go project", func(t *testing.T) { + dir := t.TempDir() + // Create a Node.js project instead + err := os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}"), 0644) + require.NoError(t, err) + + builder := NewGoBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.False(t, detected) + }) + + t.Run("returns false for empty directory", func(t *testing.T) { + dir := t.TempDir() + + builder := NewGoBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.False(t, detected) + }) +} + +func TestGoBuilder_Build_Good(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + t.Run("builds for current platform", func(t *testing.T) { + projectDir := setupGoTestProject(t) + outputDir := t.TempDir() + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "testbinary", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + require.NoError(t, err) + require.Len(t, artifacts, 1) + + // Verify artifact properties + artifact := artifacts[0] + assert.Equal(t, runtime.GOOS, artifact.OS) + assert.Equal(t, runtime.GOARCH, artifact.Arch) + + // Verify binary was created + assert.FileExists(t, artifact.Path) + + // Verify the path is in the expected location + expectedName := "testbinary" + if runtime.GOOS == "windows" { + expectedName += ".exe" + } + assert.Contains(t, artifact.Path, expectedName) + }) + + t.Run("builds multiple targets", func(t *testing.T) { + projectDir := setupGoTestProject(t) + outputDir := t.TempDir() + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "multitest", + } + targets := []build.Target{ + {OS: "linux", Arch: "amd64"}, + {OS: "linux", Arch: "arm64"}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + require.NoError(t, err) + require.Len(t, artifacts, 2) + + // Verify both artifacts were created + for i, artifact := range artifacts { + assert.Equal(t, targets[i].OS, artifact.OS) + assert.Equal(t, targets[i].Arch, artifact.Arch) + assert.FileExists(t, artifact.Path) + } + }) + + t.Run("adds .exe extension for Windows", func(t *testing.T) { + projectDir := setupGoTestProject(t) + outputDir := t.TempDir() + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "wintest", + } + targets := []build.Target{ + {OS: "windows", Arch: "amd64"}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + require.NoError(t, err) + require.Len(t, artifacts, 1) + + // Verify .exe extension + assert.True(t, filepath.Ext(artifacts[0].Path) == ".exe") + assert.FileExists(t, artifacts[0].Path) + }) + + t.Run("uses directory name when Name not specified", func(t *testing.T) { + projectDir := setupGoTestProject(t) + outputDir := t.TempDir() + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "", // Empty name + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + require.NoError(t, err) + require.Len(t, artifacts, 1) + + // Binary should use the project directory base name + baseName := filepath.Base(projectDir) + if runtime.GOOS == "windows" { + baseName += ".exe" + } + assert.Contains(t, artifacts[0].Path, baseName) + }) + + t.Run("applies ldflags", func(t *testing.T) { + projectDir := setupGoTestProject(t) + outputDir := t.TempDir() + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "ldflagstest", + LDFlags: []string{"-s", "-w"}, // Strip debug info + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + require.NoError(t, err) + require.Len(t, artifacts, 1) + assert.FileExists(t, artifacts[0].Path) + }) + + t.Run("creates output directory if missing", func(t *testing.T) { + projectDir := setupGoTestProject(t) + outputDir := filepath.Join(t.TempDir(), "nested", "output") + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "nestedtest", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + require.NoError(t, err) + require.Len(t, artifacts, 1) + assert.FileExists(t, artifacts[0].Path) + assert.DirExists(t, outputDir) + }) +} + +func TestGoBuilder_Build_Bad(t *testing.T) { + t.Run("returns error for nil config", func(t *testing.T) { + builder := NewGoBuilder() + + artifacts, err := builder.Build(context.Background(), nil, []build.Target{{OS: "linux", Arch: "amd64"}}) + assert.Error(t, err) + assert.Nil(t, artifacts) + assert.Contains(t, err.Error(), "config is nil") + }) + + t.Run("returns error for empty targets", func(t *testing.T) { + projectDir := setupGoTestProject(t) + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: t.TempDir(), + Name: "test", + } + + artifacts, err := builder.Build(context.Background(), cfg, []build.Target{}) + assert.Error(t, err) + assert.Nil(t, artifacts) + assert.Contains(t, err.Error(), "no targets specified") + }) + + t.Run("returns error for invalid project directory", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: "/nonexistent/path", + OutputDir: t.TempDir(), + Name: "test", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + assert.Error(t, err) + assert.Empty(t, artifacts) + }) + + t.Run("returns error for invalid Go code", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + dir := t.TempDir() + + // Create go.mod + err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test\n\ngo 1.21"), 0644) + require.NoError(t, err) + + // Create invalid Go code + err = os.WriteFile(filepath.Join(dir, "main.go"), []byte("this is not valid go code"), 0644) + require.NoError(t, err) + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: dir, + OutputDir: t.TempDir(), + Name: "test", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + assert.Error(t, err) + assert.Contains(t, err.Error(), "go build failed") + assert.Empty(t, artifacts) + }) + + t.Run("returns partial artifacts on partial failure", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Create a project that will fail on one target + // Using an invalid arch for linux + projectDir := setupGoTestProject(t) + outputDir := t.TempDir() + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "partialtest", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, // This should succeed + {OS: "linux", Arch: "invalid_arch"}, // This should fail + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + // Should return error for the failed build + assert.Error(t, err) + // Should have the successful artifact + assert.Len(t, artifacts, 1) + }) + + t.Run("respects context cancellation", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + projectDir := setupGoTestProject(t) + + builder := NewGoBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: t.TempDir(), + Name: "canceltest", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + // Create an already cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + artifacts, err := builder.Build(ctx, cfg, targets) + assert.Error(t, err) + assert.Empty(t, artifacts) + }) +} + +func TestGoBuilder_Interface_Good(t *testing.T) { + // Verify GoBuilder implements Builder interface + var _ build.Builder = (*GoBuilder)(nil) + var _ build.Builder = NewGoBuilder() +} diff --git a/build/builders/linuxkit.go b/build/builders/linuxkit.go new file mode 100644 index 0000000..93055f6 --- /dev/null +++ b/build/builders/linuxkit.go @@ -0,0 +1,270 @@ +// Package builders provides build implementations for different project types. +package builders + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +// LinuxKitBuilder builds LinuxKit images. +type LinuxKitBuilder struct{} + +// NewLinuxKitBuilder creates a new LinuxKit builder. +func NewLinuxKitBuilder() *LinuxKitBuilder { + return &LinuxKitBuilder{} +} + +// Name returns the builder's identifier. +func (b *LinuxKitBuilder) Name() string { + return "linuxkit" +} + +// Detect checks if a linuxkit.yml or .yml config exists in the directory. +func (b *LinuxKitBuilder) Detect(fs io.Medium, dir string) (bool, error) { + // Check for linuxkit.yml + if fs.IsFile(filepath.Join(dir, "linuxkit.yml")) { + return true, nil + } + // Check for .core/linuxkit/ + lkDir := filepath.Join(dir, ".core", "linuxkit") + if fs.IsDir(lkDir) { + entries, err := fs.List(lkDir) + if err == nil { + for _, entry := range entries { + if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".yml") { + return true, nil + } + } + } + } + return false, nil +} + +// Build builds LinuxKit images for the specified targets. +func (b *LinuxKitBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) { + // Validate linuxkit CLI is available + if err := b.validateLinuxKitCli(); err != nil { + return nil, err + } + + // Determine config file path + configPath := cfg.LinuxKitConfig + if configPath == "" { + // Auto-detect + if cfg.FS.IsFile(filepath.Join(cfg.ProjectDir, "linuxkit.yml")) { + configPath = filepath.Join(cfg.ProjectDir, "linuxkit.yml") + } else { + // Look in .core/linuxkit/ + lkDir := filepath.Join(cfg.ProjectDir, ".core", "linuxkit") + if cfg.FS.IsDir(lkDir) { + entries, err := cfg.FS.List(lkDir) + if err == nil { + for _, entry := range entries { + if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".yml") { + configPath = filepath.Join(lkDir, entry.Name()) + break + } + } + } + } + } + } + + if configPath == "" { + return nil, fmt.Errorf("linuxkit.Build: no LinuxKit config file found. Specify with --config or create linuxkit.yml") + } + + // Validate config file exists + if !cfg.FS.IsFile(configPath) { + return nil, fmt.Errorf("linuxkit.Build: config file not found: %s", configPath) + } + + // Determine output formats + formats := cfg.Formats + if len(formats) == 0 { + formats = []string{"qcow2-bios"} // Default to QEMU-compatible format + } + + // Create output directory + outputDir := cfg.OutputDir + if outputDir == "" { + outputDir = filepath.Join(cfg.ProjectDir, "dist") + } + if err := cfg.FS.EnsureDir(outputDir); err != nil { + return nil, fmt.Errorf("linuxkit.Build: failed to create output directory: %w", err) + } + + // Determine base name from config file or project name + baseName := cfg.Name + if baseName == "" { + baseName = strings.TrimSuffix(filepath.Base(configPath), ".yml") + } + + // If no targets, default to linux/amd64 + if len(targets) == 0 { + targets = []build.Target{{OS: "linux", Arch: "amd64"}} + } + + var artifacts []build.Artifact + + // Build for each target and format + for _, target := range targets { + // LinuxKit only supports Linux + if target.OS != "linux" { + fmt.Printf("Skipping %s/%s (LinuxKit only supports Linux)\n", target.OS, target.Arch) + continue + } + + for _, format := range formats { + outputName := fmt.Sprintf("%s-%s", baseName, target.Arch) + + args := b.buildLinuxKitArgs(configPath, format, outputName, outputDir, target.Arch) + + cmd := exec.CommandContext(ctx, "linuxkit", args...) + cmd.Dir = cfg.ProjectDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + fmt.Printf("Building LinuxKit image: %s (%s, %s)\n", outputName, format, target.Arch) + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("linuxkit.Build: build failed for %s/%s: %w", target.Arch, format, err) + } + + // Determine the actual output file path + artifactPath := b.getArtifactPath(outputDir, outputName, format) + + // Verify the artifact was created + if !cfg.FS.Exists(artifactPath) { + // Try alternate naming conventions + artifactPath = b.findArtifact(cfg.FS, outputDir, outputName, format) + if artifactPath == "" { + return nil, fmt.Errorf("linuxkit.Build: artifact not found after build: expected %s", b.getArtifactPath(outputDir, outputName, format)) + } + } + + artifacts = append(artifacts, build.Artifact{ + Path: artifactPath, + OS: target.OS, + Arch: target.Arch, + }) + } + } + + return artifacts, nil +} + +// buildLinuxKitArgs builds the arguments for linuxkit build command. +func (b *LinuxKitBuilder) buildLinuxKitArgs(configPath, format, outputName, outputDir, arch string) []string { + args := []string{"build"} + + // Output format + args = append(args, "--format", format) + + // Output name + args = append(args, "--name", outputName) + + // Output directory + args = append(args, "--dir", outputDir) + + // Architecture (if not amd64) + if arch != "amd64" { + args = append(args, "--arch", arch) + } + + // Config file + args = append(args, configPath) + + return args +} + +// getArtifactPath returns the expected path of the built artifact. +func (b *LinuxKitBuilder) getArtifactPath(outputDir, outputName, format string) string { + ext := b.getFormatExtension(format) + return filepath.Join(outputDir, outputName+ext) +} + +// findArtifact searches for the built artifact with various naming conventions. +func (b *LinuxKitBuilder) findArtifact(fs io.Medium, outputDir, outputName, format string) string { + // LinuxKit can create files with different suffixes + extensions := []string{ + b.getFormatExtension(format), + "-bios" + b.getFormatExtension(format), + "-efi" + b.getFormatExtension(format), + } + + for _, ext := range extensions { + path := filepath.Join(outputDir, outputName+ext) + if fs.Exists(path) { + return path + } + } + + // Try to find any file matching the output name + entries, err := fs.List(outputDir) + if err == nil { + for _, entry := range entries { + if strings.HasPrefix(entry.Name(), outputName) { + match := filepath.Join(outputDir, entry.Name()) + // Return first match that looks like an image + ext := filepath.Ext(match) + if ext == ".iso" || ext == ".qcow2" || ext == ".raw" || ext == ".vmdk" || ext == ".vhd" { + return match + } + } + } + } + + return "" +} + +// getFormatExtension returns the file extension for a LinuxKit output format. +func (b *LinuxKitBuilder) getFormatExtension(format string) string { + switch format { + case "iso", "iso-bios", "iso-efi": + return ".iso" + case "raw", "raw-bios", "raw-efi": + return ".raw" + case "qcow2", "qcow2-bios", "qcow2-efi": + return ".qcow2" + case "vmdk": + return ".vmdk" + case "vhd": + return ".vhd" + case "gcp": + return ".img.tar.gz" + case "aws": + return ".raw" + default: + return "." + strings.TrimSuffix(format, "-bios") + } +} + +// validateLinuxKitCli checks if the linuxkit CLI is available. +func (b *LinuxKitBuilder) validateLinuxKitCli() error { + // Check PATH first + if _, err := exec.LookPath("linuxkit"); err == nil { + return nil + } + + // Check common locations + paths := []string{ + "/usr/local/bin/linuxkit", + "/opt/homebrew/bin/linuxkit", + } + + for _, p := range paths { + if _, err := os.Stat(p); err == nil { + return nil + } + } + + return fmt.Errorf("linuxkit: linuxkit CLI not found. Install with: brew install linuxkit (macOS) or see https://github.com/linuxkit/linuxkit") +} diff --git a/build/builders/taskfile.go b/build/builders/taskfile.go new file mode 100644 index 0000000..5661ee9 --- /dev/null +++ b/build/builders/taskfile.go @@ -0,0 +1,275 @@ +// Package builders provides build implementations for different project types. +package builders + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +// TaskfileBuilder builds projects using Taskfile (https://taskfile.dev/). +// This is a generic builder that can handle any project type that has a Taskfile. +type TaskfileBuilder struct{} + +// NewTaskfileBuilder creates a new Taskfile builder. +func NewTaskfileBuilder() *TaskfileBuilder { + return &TaskfileBuilder{} +} + +// Name returns the builder's identifier. +func (b *TaskfileBuilder) Name() string { + return "taskfile" +} + +// Detect checks if a Taskfile exists in the directory. +func (b *TaskfileBuilder) Detect(fs io.Medium, dir string) (bool, error) { + // Check for Taskfile.yml, Taskfile.yaml, or Taskfile + taskfiles := []string{ + "Taskfile.yml", + "Taskfile.yaml", + "Taskfile", + "taskfile.yml", + "taskfile.yaml", + } + + for _, tf := range taskfiles { + if fs.IsFile(filepath.Join(dir, tf)) { + return true, nil + } + } + return false, nil +} + +// Build runs the Taskfile build task for each target platform. +func (b *TaskfileBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) { + // Validate task CLI is available + if err := b.validateTaskCli(); err != nil { + return nil, err + } + + // Create output directory + outputDir := cfg.OutputDir + if outputDir == "" { + outputDir = filepath.Join(cfg.ProjectDir, "dist") + } + if err := cfg.FS.EnsureDir(outputDir); err != nil { + return nil, fmt.Errorf("taskfile.Build: failed to create output directory: %w", err) + } + + var artifacts []build.Artifact + + // If no targets specified, just run the build task once + if len(targets) == 0 { + if err := b.runTask(ctx, cfg, "", ""); err != nil { + return nil, err + } + + // Try to find artifacts in output directory + found := b.findArtifacts(cfg.FS, outputDir) + artifacts = append(artifacts, found...) + } else { + // Run build task for each target + for _, target := range targets { + if err := b.runTask(ctx, cfg, target.OS, target.Arch); err != nil { + return nil, err + } + + // Try to find artifacts for this target + found := b.findArtifactsForTarget(cfg.FS, outputDir, target) + artifacts = append(artifacts, found...) + } + } + + return artifacts, nil +} + +// runTask executes the Taskfile build task. +func (b *TaskfileBuilder) runTask(ctx context.Context, cfg *build.Config, goos, goarch string) error { + // Build task command + args := []string{"build"} + + // Pass variables if targets are specified + if goos != "" { + args = append(args, fmt.Sprintf("GOOS=%s", goos)) + } + if goarch != "" { + args = append(args, fmt.Sprintf("GOARCH=%s", goarch)) + } + if cfg.OutputDir != "" { + args = append(args, fmt.Sprintf("OUTPUT_DIR=%s", cfg.OutputDir)) + } + if cfg.Name != "" { + args = append(args, fmt.Sprintf("NAME=%s", cfg.Name)) + } + if cfg.Version != "" { + args = append(args, fmt.Sprintf("VERSION=%s", cfg.Version)) + } + + cmd := exec.CommandContext(ctx, "task", args...) + cmd.Dir = cfg.ProjectDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + // Set environment variables + cmd.Env = os.Environ() + if goos != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("GOOS=%s", goos)) + } + if goarch != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("GOARCH=%s", goarch)) + } + if cfg.OutputDir != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("OUTPUT_DIR=%s", cfg.OutputDir)) + } + if cfg.Name != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("NAME=%s", cfg.Name)) + } + if cfg.Version != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("VERSION=%s", cfg.Version)) + } + + if goos != "" && goarch != "" { + fmt.Printf("Running task build for %s/%s\n", goos, goarch) + } else { + fmt.Println("Running task build") + } + + if err := cmd.Run(); err != nil { + return fmt.Errorf("taskfile.Build: task build failed: %w", err) + } + + return nil +} + +// findArtifacts searches for built artifacts in the output directory. +func (b *TaskfileBuilder) findArtifacts(fs io.Medium, outputDir string) []build.Artifact { + var artifacts []build.Artifact + + entries, err := fs.List(outputDir) + if err != nil { + return artifacts + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + // Skip common non-artifact files + name := entry.Name() + if strings.HasPrefix(name, ".") || name == "CHECKSUMS.txt" { + continue + } + + artifacts = append(artifacts, build.Artifact{ + Path: filepath.Join(outputDir, name), + OS: "", + Arch: "", + }) + } + + return artifacts +} + +// findArtifactsForTarget searches for built artifacts for a specific target. +func (b *TaskfileBuilder) findArtifactsForTarget(fs io.Medium, outputDir string, target build.Target) []build.Artifact { + var artifacts []build.Artifact + + // 1. Look for platform-specific subdirectory: output/os_arch/ + platformSubdir := filepath.Join(outputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch)) + if fs.IsDir(platformSubdir) { + entries, _ := fs.List(platformSubdir) + for _, entry := range entries { + if entry.IsDir() { + // Handle .app bundles on macOS + if target.OS == "darwin" && strings.HasSuffix(entry.Name(), ".app") { + artifacts = append(artifacts, build.Artifact{ + Path: filepath.Join(platformSubdir, entry.Name()), + OS: target.OS, + Arch: target.Arch, + }) + } + continue + } + // Skip hidden files + if strings.HasPrefix(entry.Name(), ".") { + continue + } + artifacts = append(artifacts, build.Artifact{ + Path: filepath.Join(platformSubdir, entry.Name()), + OS: target.OS, + Arch: target.Arch, + }) + } + if len(artifacts) > 0 { + return artifacts + } + } + + // 2. Look for files matching the target pattern in the root output dir + patterns := []string{ + fmt.Sprintf("*-%s-%s*", target.OS, target.Arch), + fmt.Sprintf("*_%s_%s*", target.OS, target.Arch), + fmt.Sprintf("*-%s*", target.Arch), + } + + for _, pattern := range patterns { + entries, _ := fs.List(outputDir) + for _, entry := range entries { + match := entry.Name() + // Simple glob matching + if b.matchPattern(match, pattern) { + fullPath := filepath.Join(outputDir, match) + if fs.IsDir(fullPath) { + continue + } + + artifacts = append(artifacts, build.Artifact{ + Path: fullPath, + OS: target.OS, + Arch: target.Arch, + }) + } + } + + if len(artifacts) > 0 { + break // Found matches, stop looking + } + } + + return artifacts +} + +// matchPattern implements glob matching for Taskfile artifacts. +func (b *TaskfileBuilder) matchPattern(name, pattern string) bool { + matched, _ := filepath.Match(pattern, name) + return matched +} + +// validateTaskCli checks if the task CLI is available. +func (b *TaskfileBuilder) validateTaskCli() error { + // Check PATH first + if _, err := exec.LookPath("task"); err == nil { + return nil + } + + // Check common locations + paths := []string{ + "/usr/local/bin/task", + "/opt/homebrew/bin/task", + } + + for _, p := range paths { + if _, err := os.Stat(p); err == nil { + return nil + } + } + + return fmt.Errorf("taskfile: task CLI not found. Install with: brew install go-task (macOS), go install github.com/go-task/task/v3/cmd/task@latest, or see https://taskfile.dev/installation/") +} diff --git a/build/builders/wails.go b/build/builders/wails.go new file mode 100644 index 0000000..649703f --- /dev/null +++ b/build/builders/wails.go @@ -0,0 +1,247 @@ +// Package builders provides build implementations for different project types. +package builders + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +// WailsBuilder implements the Builder interface for Wails v3 projects. +type WailsBuilder struct{} + +// NewWailsBuilder creates a new WailsBuilder instance. +func NewWailsBuilder() *WailsBuilder { + return &WailsBuilder{} +} + +// Name returns the builder's identifier. +func (b *WailsBuilder) Name() string { + return "wails" +} + +// Detect checks if this builder can handle the project in the given directory. +// Uses IsWailsProject from the build package which checks for wails.json. +func (b *WailsBuilder) Detect(fs io.Medium, dir string) (bool, error) { + return build.IsWailsProject(fs, dir), nil +} + +// Build compiles the Wails project for the specified targets. +// It detects the Wails version and chooses the appropriate build strategy: +// - Wails v3: Delegates to Taskfile (error if missing) +// - Wails v2: Uses 'wails build' command +func (b *WailsBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) { + if cfg == nil { + return nil, fmt.Errorf("builders.WailsBuilder.Build: config is nil") + } + + if len(targets) == 0 { + return nil, fmt.Errorf("builders.WailsBuilder.Build: no targets specified") + } + + // Detect Wails version + isV3 := b.isWailsV3(cfg.FS, cfg.ProjectDir) + + if isV3 { + // Wails v3 strategy: Delegate to Taskfile + taskBuilder := NewTaskfileBuilder() + if detected, _ := taskBuilder.Detect(cfg.FS, cfg.ProjectDir); detected { + return taskBuilder.Build(ctx, cfg, targets) + } + return nil, fmt.Errorf("wails v3 projects require a Taskfile for building") + } + + // Wails v2 strategy: Use 'wails build' + // Ensure output directory exists + if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil { + return nil, fmt.Errorf("builders.WailsBuilder.Build: failed to create output directory: %w", err) + } + + // Note: Wails v2 handles frontend installation/building automatically via wails.json config + + var artifacts []build.Artifact + + for _, target := range targets { + artifact, err := b.buildV2Target(ctx, cfg, target) + if err != nil { + return artifacts, fmt.Errorf("builders.WailsBuilder.Build: failed to build %s: %w", target.String(), err) + } + artifacts = append(artifacts, artifact) + } + + return artifacts, nil +} + +// isWailsV3 checks if the project uses Wails v3 by inspecting go.mod. +func (b *WailsBuilder) isWailsV3(fs io.Medium, dir string) bool { + goModPath := filepath.Join(dir, "go.mod") + content, err := fs.Read(goModPath) + if err != nil { + return false + } + return strings.Contains(content, "github.com/wailsapp/wails/v3") +} + +// buildV2Target compiles for a single target platform using wails (v2). +func (b *WailsBuilder) buildV2Target(ctx context.Context, cfg *build.Config, target build.Target) (build.Artifact, error) { + // Determine output binary name + binaryName := cfg.Name + if binaryName == "" { + binaryName = filepath.Base(cfg.ProjectDir) + } + + // Build the wails build arguments + args := []string{"build"} + + // Platform + args = append(args, "-platform", fmt.Sprintf("%s/%s", target.OS, target.Arch)) + + // Output (Wails v2 uses -o for the binary name, relative to build/bin usually, but we want to control it) + // Actually, Wails v2 is opinionated about output dir (build/bin). + // We might need to copy artifacts after build if we want them in cfg.OutputDir. + // For now, let's try to let Wails do its thing and find the artifact. + + // Create the command + cmd := exec.CommandContext(ctx, "wails", args...) + cmd.Dir = cfg.ProjectDir + + // Capture output for error messages + output, err := cmd.CombinedOutput() + if err != nil { + return build.Artifact{}, fmt.Errorf("wails build failed: %w\nOutput: %s", err, string(output)) + } + + // Wails v2 typically outputs to build/bin + // We need to move/copy it to our desired output dir + + // Construct the source path where Wails v2 puts the binary + wailsOutputDir := filepath.Join(cfg.ProjectDir, "build", "bin") + + // Find the artifact in Wails output dir + sourcePath, err := b.findArtifact(cfg.FS, wailsOutputDir, binaryName, target) + if err != nil { + return build.Artifact{}, fmt.Errorf("failed to find Wails v2 build artifact: %w", err) + } + + // Move/Copy to our output dir + // Create platform specific dir in our output + platformDir := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch)) + if err := cfg.FS.EnsureDir(platformDir); err != nil { + return build.Artifact{}, fmt.Errorf("failed to create output dir: %w", err) + } + + destPath := filepath.Join(platformDir, filepath.Base(sourcePath)) + + // Simple copy using the medium + content, err := cfg.FS.Read(sourcePath) + if err != nil { + return build.Artifact{}, err + } + if err := cfg.FS.Write(destPath, content); err != nil { + return build.Artifact{}, err + } + + return build.Artifact{ + Path: destPath, + OS: target.OS, + Arch: target.Arch, + }, nil +} + +// findArtifact locates the built artifact based on the target platform. +func (b *WailsBuilder) findArtifact(fs io.Medium, platformDir, binaryName string, target build.Target) (string, error) { + var candidates []string + + switch target.OS { + case "windows": + // Look for NSIS installer first, then plain exe + candidates = []string{ + filepath.Join(platformDir, binaryName+"-installer.exe"), + filepath.Join(platformDir, binaryName+".exe"), + filepath.Join(platformDir, binaryName+"-amd64-installer.exe"), + } + case "darwin": + // Look for .dmg, then .app bundle, then plain binary + candidates = []string{ + filepath.Join(platformDir, binaryName+".dmg"), + filepath.Join(platformDir, binaryName+".app"), + filepath.Join(platformDir, binaryName), + } + default: + // Linux and others: look for plain binary + candidates = []string{ + filepath.Join(platformDir, binaryName), + } + } + + // Try each candidate + for _, candidate := range candidates { + if fs.Exists(candidate) { + return candidate, nil + } + } + + // If no specific candidate found, try to find any executable or package in the directory + entries, err := fs.List(platformDir) + if err != nil { + return "", fmt.Errorf("failed to read platform directory: %w", err) + } + + for _, entry := range entries { + name := entry.Name() + // Skip common non-artifact files + if strings.HasSuffix(name, ".go") || strings.HasSuffix(name, ".json") { + continue + } + + path := filepath.Join(platformDir, name) + info, err := entry.Info() + if err != nil { + continue + } + + // On Unix, check if it's executable; on Windows, check for .exe + if target.OS == "windows" { + if strings.HasSuffix(name, ".exe") { + return path, nil + } + } else if info.Mode()&0111 != 0 || entry.IsDir() { + // Executable file or directory (.app bundle) + return path, nil + } + } + + return "", fmt.Errorf("no artifact found in %s", platformDir) +} + +// detectPackageManager detects the frontend package manager based on lock files. +// Returns "bun", "pnpm", "yarn", or "npm" (default). +func detectPackageManager(fs io.Medium, dir string) string { + // Check in priority order: bun, pnpm, yarn, npm + lockFiles := []struct { + file string + manager string + }{ + {"bun.lockb", "bun"}, + {"pnpm-lock.yaml", "pnpm"}, + {"yarn.lock", "yarn"}, + {"package-lock.json", "npm"}, + } + + for _, lf := range lockFiles { + if fs.IsFile(filepath.Join(dir, lf.file)) { + return lf.manager + } + } + + // Default to npm if no lock file found + return "npm" +} + +// Ensure WailsBuilder implements the Builder interface. +var _ build.Builder = (*WailsBuilder)(nil) diff --git a/build/builders/wails_test.go b/build/builders/wails_test.go new file mode 100644 index 0000000..2427408 --- /dev/null +++ b/build/builders/wails_test.go @@ -0,0 +1,416 @@ +package builders + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// setupWailsTestProject creates a minimal Wails project structure for testing. +func setupWailsTestProject(t *testing.T) string { + t.Helper() + dir := t.TempDir() + + // Create wails.json + wailsJSON := `{ + "name": "testapp", + "outputfilename": "testapp" +}` + err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte(wailsJSON), 0644) + require.NoError(t, err) + + // Create a minimal go.mod + goMod := `module testapp + +go 1.21 + +require github.com/wailsapp/wails/v3 v3.0.0 +` + err = os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644) + require.NoError(t, err) + + // Create a minimal main.go + mainGo := `package main + +func main() { + println("hello wails") +} +` + err = os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644) + require.NoError(t, err) + + // Create a minimal Taskfile.yml + taskfile := `version: '3' +tasks: + build: + cmds: + - mkdir -p {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}} + - touch {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}/testapp +` + err = os.WriteFile(filepath.Join(dir, "Taskfile.yml"), []byte(taskfile), 0644) + require.NoError(t, err) + + return dir +} + +// setupWailsV2TestProject creates a Wails v2 project structure. +func setupWailsV2TestProject(t *testing.T) string { + t.Helper() + dir := t.TempDir() + + // wails.json + err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644) + require.NoError(t, err) + + // go.mod with v2 + goMod := `module testapp +go 1.21 +require github.com/wailsapp/wails/v2 v2.8.0 +` + err = os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644) + require.NoError(t, err) + + return dir +} + +func TestWailsBuilder_Build_Taskfile_Good(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Check if task is available + if _, err := exec.LookPath("task"); err != nil { + t.Skip("task not installed, skipping test") + } + + t.Run("delegates to Taskfile if present", func(t *testing.T) { + fs := io.Local + projectDir := setupWailsTestProject(t) + outputDir := t.TempDir() + + // Create a Taskfile that just touches a file + taskfile := `version: '3' +tasks: + build: + cmds: + - mkdir -p {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}} + - touch {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}/testapp +` + err := os.WriteFile(filepath.Join(projectDir, "Taskfile.yml"), []byte(taskfile), 0644) + require.NoError(t, err) + + builder := NewWailsBuilder() + cfg := &build.Config{ + FS: fs, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "testapp", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + require.NoError(t, err) + assert.NotEmpty(t, artifacts) + }) +} + +func TestWailsBuilder_Name_Good(t *testing.T) { + builder := NewWailsBuilder() + assert.Equal(t, "wails", builder.Name()) +} + +func TestWailsBuilder_Build_V2_Good(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + if _, err := exec.LookPath("wails"); err != nil { + t.Skip("wails not installed, skipping integration test") + } + + t.Run("builds v2 project", func(t *testing.T) { + fs := io.Local + projectDir := setupWailsV2TestProject(t) + outputDir := t.TempDir() + + builder := NewWailsBuilder() + cfg := &build.Config{ + FS: fs, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "testapp", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + // This will likely fail in a real run because we can't easily mock the full wails v2 build process + // (which needs a valid project with main.go etc). + // But it validates we are trying to run the command. + // For now, we just verify it attempts the build - error is expected + _, _ = builder.Build(context.Background(), cfg, targets) + }) +} + +func TestWailsBuilder_Detect_Good(t *testing.T) { + fs := io.Local + t.Run("detects Wails project with wails.json", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644) + require.NoError(t, err) + + builder := NewWailsBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.True(t, detected) + }) + + t.Run("returns false for Go-only project", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644) + require.NoError(t, err) + + builder := NewWailsBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.False(t, detected) + }) + + t.Run("returns false for Node.js project", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}"), 0644) + require.NoError(t, err) + + builder := NewWailsBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.False(t, detected) + }) + + t.Run("returns false for empty directory", func(t *testing.T) { + dir := t.TempDir() + + builder := NewWailsBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.False(t, detected) + }) +} + +func TestDetectPackageManager_Good(t *testing.T) { + fs := io.Local + t.Run("detects bun from bun.lockb", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "bun.lockb"), []byte(""), 0644) + require.NoError(t, err) + + result := detectPackageManager(fs, dir) + assert.Equal(t, "bun", result) + }) + + t.Run("detects pnpm from pnpm-lock.yaml", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "pnpm-lock.yaml"), []byte(""), 0644) + require.NoError(t, err) + + result := detectPackageManager(fs, dir) + assert.Equal(t, "pnpm", result) + }) + + t.Run("detects yarn from yarn.lock", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644) + require.NoError(t, err) + + result := detectPackageManager(fs, dir) + assert.Equal(t, "yarn", result) + }) + + t.Run("detects npm from package-lock.json", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644) + require.NoError(t, err) + + result := detectPackageManager(fs, dir) + assert.Equal(t, "npm", result) + }) + + t.Run("defaults to npm when no lock file", func(t *testing.T) { + dir := t.TempDir() + + result := detectPackageManager(fs, dir) + assert.Equal(t, "npm", result) + }) + + t.Run("prefers bun over other lock files", func(t *testing.T) { + dir := t.TempDir() + // Create multiple lock files + require.NoError(t, os.WriteFile(filepath.Join(dir, "bun.lockb"), []byte(""), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644)) + + result := detectPackageManager(fs, dir) + assert.Equal(t, "bun", result) + }) + + t.Run("prefers pnpm over yarn and npm", func(t *testing.T) { + dir := t.TempDir() + // Create multiple lock files (no bun) + require.NoError(t, os.WriteFile(filepath.Join(dir, "pnpm-lock.yaml"), []byte(""), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644)) + + result := detectPackageManager(fs, dir) + assert.Equal(t, "pnpm", result) + }) + + t.Run("prefers yarn over npm", func(t *testing.T) { + dir := t.TempDir() + // Create multiple lock files (no bun or pnpm) + require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644)) + + result := detectPackageManager(fs, dir) + assert.Equal(t, "yarn", result) + }) +} + +func TestWailsBuilder_Build_Bad(t *testing.T) { + t.Run("returns error for nil config", func(t *testing.T) { + builder := NewWailsBuilder() + + artifacts, err := builder.Build(context.Background(), nil, []build.Target{{OS: "linux", Arch: "amd64"}}) + assert.Error(t, err) + assert.Nil(t, artifacts) + assert.Contains(t, err.Error(), "config is nil") + }) + + t.Run("returns error for empty targets", func(t *testing.T) { + projectDir := setupWailsTestProject(t) + + builder := NewWailsBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: t.TempDir(), + Name: "test", + } + + artifacts, err := builder.Build(context.Background(), cfg, []build.Target{}) + assert.Error(t, err) + assert.Nil(t, artifacts) + assert.Contains(t, err.Error(), "no targets specified") + }) +} + +func TestWailsBuilder_Build_Good(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Check if wails3 is available in PATH + if _, err := exec.LookPath("wails3"); err != nil { + t.Skip("wails3 not installed, skipping integration test") + } + + t.Run("builds for current platform", func(t *testing.T) { + projectDir := setupWailsTestProject(t) + outputDir := t.TempDir() + + builder := NewWailsBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: "testapp", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + artifacts, err := builder.Build(context.Background(), cfg, targets) + require.NoError(t, err) + require.Len(t, artifacts, 1) + + // Verify artifact properties + artifact := artifacts[0] + assert.Equal(t, runtime.GOOS, artifact.OS) + assert.Equal(t, runtime.GOARCH, artifact.Arch) + }) +} + +func TestWailsBuilder_Interface_Good(t *testing.T) { + // Verify WailsBuilder implements Builder interface + var _ build.Builder = (*WailsBuilder)(nil) + var _ build.Builder = NewWailsBuilder() +} + +func TestWailsBuilder_Ugly(t *testing.T) { + t.Run("handles nonexistent frontend directory gracefully", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Create a Wails project without a frontend directory + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644) + require.NoError(t, err) + + builder := NewWailsBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: dir, + OutputDir: t.TempDir(), + Name: "test", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + // This will fail because wails3 isn't set up, but it shouldn't panic + // due to missing frontend directory + _, err = builder.Build(context.Background(), cfg, targets) + // We expect an error (wails3 build will fail), but not a panic + // The error should be about wails3 build, not about frontend + if err != nil { + assert.NotContains(t, err.Error(), "frontend dependencies") + } + }) + + t.Run("handles context cancellation", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + projectDir := setupWailsTestProject(t) + + builder := NewWailsBuilder() + cfg := &build.Config{ + FS: io.Local, + ProjectDir: projectDir, + OutputDir: t.TempDir(), + Name: "canceltest", + } + targets := []build.Target{ + {OS: runtime.GOOS, Arch: runtime.GOARCH}, + } + + // Create an already cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + artifacts, err := builder.Build(ctx, cfg, targets) + assert.Error(t, err) + assert.Empty(t, artifacts) + }) +} diff --git a/build/checksum.go b/build/checksum.go new file mode 100644 index 0000000..7738dbe --- /dev/null +++ b/build/checksum.go @@ -0,0 +1,97 @@ +// Package build provides project type detection and cross-compilation for the Core build system. +package build + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "path/filepath" + + io_interface "forge.lthn.ai/core/go/pkg/io" + "sort" + "strings" +) + +// Checksum computes SHA256 for an artifact and returns the artifact with the Checksum field filled. +func Checksum(fs io_interface.Medium, artifact Artifact) (Artifact, error) { + if artifact.Path == "" { + return Artifact{}, fmt.Errorf("build.Checksum: artifact path is empty") + } + + // Open the file + file, err := fs.Open(artifact.Path) + if err != nil { + return Artifact{}, fmt.Errorf("build.Checksum: failed to open file: %w", err) + } + defer func() { _ = file.Close() }() + + // Compute SHA256 hash + hasher := sha256.New() + if _, err := io.Copy(hasher, file); err != nil { + return Artifact{}, fmt.Errorf("build.Checksum: failed to hash file: %w", err) + } + + checksum := hex.EncodeToString(hasher.Sum(nil)) + + return Artifact{ + Path: artifact.Path, + OS: artifact.OS, + Arch: artifact.Arch, + Checksum: checksum, + }, nil +} + +// ChecksumAll computes checksums for all artifacts. +// Returns a slice of artifacts with their Checksum fields filled. +func ChecksumAll(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) { + if len(artifacts) == 0 { + return nil, nil + } + + var checksummed []Artifact + for _, artifact := range artifacts { + cs, err := Checksum(fs, artifact) + if err != nil { + return checksummed, fmt.Errorf("build.ChecksumAll: failed to checksum %s: %w", artifact.Path, err) + } + checksummed = append(checksummed, cs) + } + + return checksummed, nil +} + +// WriteChecksumFile writes a CHECKSUMS.txt file with the format: +// +// sha256hash filename1 +// sha256hash filename2 +// +// The artifacts should have their Checksum fields filled (call ChecksumAll first). +// Filenames are relative to the output directory (just the basename). +func WriteChecksumFile(fs io_interface.Medium, artifacts []Artifact, path string) error { + if len(artifacts) == 0 { + return nil + } + + // Build the content + var lines []string + for _, artifact := range artifacts { + if artifact.Checksum == "" { + return fmt.Errorf("build.WriteChecksumFile: artifact %s has no checksum", artifact.Path) + } + filename := filepath.Base(artifact.Path) + lines = append(lines, fmt.Sprintf("%s %s", artifact.Checksum, filename)) + } + + // Sort lines for consistent output + sort.Strings(lines) + + content := strings.Join(lines, "\n") + "\n" + + // Write the file using the medium (which handles directory creation in Write) + if err := fs.Write(path, content); err != nil { + return fmt.Errorf("build.WriteChecksumFile: failed to write file: %w", err) + } + + return nil +} diff --git a/build/checksum_test.go b/build/checksum_test.go new file mode 100644 index 0000000..fc0580f --- /dev/null +++ b/build/checksum_test.go @@ -0,0 +1,282 @@ +package build + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// setupChecksumTestFile creates a test file with known content. +func setupChecksumTestFile(t *testing.T, content string) string { + t.Helper() + + dir := t.TempDir() + path := filepath.Join(dir, "testfile") + err := os.WriteFile(path, []byte(content), 0644) + require.NoError(t, err) + + return path +} + +func TestChecksum_Good(t *testing.T) { + fs := io.Local + t.Run("computes SHA256 checksum", func(t *testing.T) { + // Known SHA256 of "Hello, World!\n" + path := setupChecksumTestFile(t, "Hello, World!\n") + expectedChecksum := "c98c24b677eff44860afea6f493bbaec5bb1c4cbb209c6fc2bbb47f66ff2ad31" + + artifact := Artifact{ + Path: path, + OS: "linux", + Arch: "amd64", + } + + result, err := Checksum(fs, artifact) + require.NoError(t, err) + assert.Equal(t, expectedChecksum, result.Checksum) + }) + + t.Run("preserves artifact fields", func(t *testing.T) { + path := setupChecksumTestFile(t, "test content") + + artifact := Artifact{ + Path: path, + OS: "darwin", + Arch: "arm64", + } + + result, err := Checksum(fs, artifact) + require.NoError(t, err) + + assert.Equal(t, path, result.Path) + assert.Equal(t, "darwin", result.OS) + assert.Equal(t, "arm64", result.Arch) + assert.NotEmpty(t, result.Checksum) + }) + + t.Run("produces 64 character hex string", func(t *testing.T) { + path := setupChecksumTestFile(t, "any content") + + artifact := Artifact{Path: path, OS: "linux", Arch: "amd64"} + + result, err := Checksum(fs, artifact) + require.NoError(t, err) + + // SHA256 produces 32 bytes = 64 hex characters + assert.Len(t, result.Checksum, 64) + }) + + t.Run("different content produces different checksums", func(t *testing.T) { + path1 := setupChecksumTestFile(t, "content one") + path2 := setupChecksumTestFile(t, "content two") + + result1, err := Checksum(fs, Artifact{Path: path1, OS: "linux", Arch: "amd64"}) + require.NoError(t, err) + + result2, err := Checksum(fs, Artifact{Path: path2, OS: "linux", Arch: "amd64"}) + require.NoError(t, err) + + assert.NotEqual(t, result1.Checksum, result2.Checksum) + }) + + t.Run("same content produces same checksum", func(t *testing.T) { + content := "identical content" + path1 := setupChecksumTestFile(t, content) + path2 := setupChecksumTestFile(t, content) + + result1, err := Checksum(fs, Artifact{Path: path1, OS: "linux", Arch: "amd64"}) + require.NoError(t, err) + + result2, err := Checksum(fs, Artifact{Path: path2, OS: "linux", Arch: "amd64"}) + require.NoError(t, err) + + assert.Equal(t, result1.Checksum, result2.Checksum) + }) +} + +func TestChecksum_Bad(t *testing.T) { + fs := io.Local + t.Run("returns error for empty path", func(t *testing.T) { + artifact := Artifact{ + Path: "", + OS: "linux", + Arch: "amd64", + } + + result, err := Checksum(fs, artifact) + assert.Error(t, err) + assert.Contains(t, err.Error(), "artifact path is empty") + assert.Empty(t, result.Checksum) + }) + + t.Run("returns error for non-existent file", func(t *testing.T) { + artifact := Artifact{ + Path: "/nonexistent/path/file", + OS: "linux", + Arch: "amd64", + } + + result, err := Checksum(fs, artifact) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to open file") + assert.Empty(t, result.Checksum) + }) +} + +func TestChecksumAll_Good(t *testing.T) { + fs := io.Local + t.Run("checksums multiple artifacts", func(t *testing.T) { + paths := []string{ + setupChecksumTestFile(t, "content one"), + setupChecksumTestFile(t, "content two"), + setupChecksumTestFile(t, "content three"), + } + + artifacts := []Artifact{ + {Path: paths[0], OS: "linux", Arch: "amd64"}, + {Path: paths[1], OS: "darwin", Arch: "arm64"}, + {Path: paths[2], OS: "windows", Arch: "amd64"}, + } + + results, err := ChecksumAll(fs, artifacts) + require.NoError(t, err) + require.Len(t, results, 3) + + for i, result := range results { + assert.Equal(t, artifacts[i].Path, result.Path) + assert.Equal(t, artifacts[i].OS, result.OS) + assert.Equal(t, artifacts[i].Arch, result.Arch) + assert.NotEmpty(t, result.Checksum) + } + }) + + t.Run("returns nil for empty slice", func(t *testing.T) { + results, err := ChecksumAll(fs, []Artifact{}) + assert.NoError(t, err) + assert.Nil(t, results) + }) + + t.Run("returns nil for nil slice", func(t *testing.T) { + results, err := ChecksumAll(fs, nil) + assert.NoError(t, err) + assert.Nil(t, results) + }) +} + +func TestChecksumAll_Bad(t *testing.T) { + fs := io.Local + t.Run("returns partial results on error", func(t *testing.T) { + path := setupChecksumTestFile(t, "valid content") + + artifacts := []Artifact{ + {Path: path, OS: "linux", Arch: "amd64"}, + {Path: "/nonexistent/file", OS: "linux", Arch: "arm64"}, // This will fail + } + + results, err := ChecksumAll(fs, artifacts) + assert.Error(t, err) + // Should have the first successful result + assert.Len(t, results, 1) + assert.NotEmpty(t, results[0].Checksum) + }) +} + +func TestWriteChecksumFile_Good(t *testing.T) { + fs := io.Local + t.Run("writes checksum file with correct format", func(t *testing.T) { + dir := t.TempDir() + checksumPath := filepath.Join(dir, "CHECKSUMS.txt") + + artifacts := []Artifact{ + {Path: "/output/app_linux_amd64.tar.gz", Checksum: "abc123def456", OS: "linux", Arch: "amd64"}, + {Path: "/output/app_darwin_arm64.tar.gz", Checksum: "789xyz000111", OS: "darwin", Arch: "arm64"}, + } + + err := WriteChecksumFile(fs, artifacts, checksumPath) + require.NoError(t, err) + + // Read and verify content + content, err := os.ReadFile(checksumPath) + require.NoError(t, err) + + lines := strings.Split(strings.TrimSpace(string(content)), "\n") + require.Len(t, lines, 2) + + // Lines should be sorted alphabetically + assert.Equal(t, "789xyz000111 app_darwin_arm64.tar.gz", lines[0]) + assert.Equal(t, "abc123def456 app_linux_amd64.tar.gz", lines[1]) + }) + + t.Run("creates parent directories", func(t *testing.T) { + dir := t.TempDir() + checksumPath := filepath.Join(dir, "nested", "deep", "CHECKSUMS.txt") + + artifacts := []Artifact{ + {Path: "/output/app.tar.gz", Checksum: "abc123", OS: "linux", Arch: "amd64"}, + } + + err := WriteChecksumFile(fs, artifacts, checksumPath) + require.NoError(t, err) + assert.FileExists(t, checksumPath) + }) + + t.Run("does nothing for empty artifacts", func(t *testing.T) { + dir := t.TempDir() + checksumPath := filepath.Join(dir, "CHECKSUMS.txt") + + err := WriteChecksumFile(fs, []Artifact{}, checksumPath) + require.NoError(t, err) + + // File should not exist + _, err = os.Stat(checksumPath) + assert.True(t, os.IsNotExist(err)) + }) + + t.Run("does nothing for nil artifacts", func(t *testing.T) { + dir := t.TempDir() + checksumPath := filepath.Join(dir, "CHECKSUMS.txt") + + err := WriteChecksumFile(fs, nil, checksumPath) + require.NoError(t, err) + }) + + t.Run("uses only basename for filenames", func(t *testing.T) { + dir := t.TempDir() + checksumPath := filepath.Join(dir, "CHECKSUMS.txt") + + artifacts := []Artifact{ + {Path: "/some/deep/nested/path/myapp_linux_amd64.tar.gz", Checksum: "checksum123", OS: "linux", Arch: "amd64"}, + } + + err := WriteChecksumFile(fs, artifacts, checksumPath) + require.NoError(t, err) + + content, err := os.ReadFile(checksumPath) + require.NoError(t, err) + + // Should only contain the basename + assert.Contains(t, string(content), "myapp_linux_amd64.tar.gz") + assert.NotContains(t, string(content), "/some/deep/nested/path/") + }) +} + +func TestWriteChecksumFile_Bad(t *testing.T) { + fs := io.Local + t.Run("returns error for artifact without checksum", func(t *testing.T) { + dir := t.TempDir() + checksumPath := filepath.Join(dir, "CHECKSUMS.txt") + + artifacts := []Artifact{ + {Path: "/output/app.tar.gz", Checksum: "", OS: "linux", Arch: "amd64"}, // No checksum + } + + err := WriteChecksumFile(fs, artifacts, checksumPath) + assert.Error(t, err) + assert.Contains(t, err.Error(), "has no checksum") + }) +} diff --git a/build/config.go b/build/config.go new file mode 100644 index 0000000..3dd5ab0 --- /dev/null +++ b/build/config.go @@ -0,0 +1,169 @@ +// Package build provides project type detection and cross-compilation for the Core build system. +// This file handles configuration loading from .core/build.yaml files. +package build + +import ( + "fmt" + "os" + "path/filepath" + + "forge.lthn.ai/core/go-devops/build/signing" + "forge.lthn.ai/core/go/pkg/io" + "gopkg.in/yaml.v3" +) + +// ConfigFileName is the name of the build configuration file. +const ConfigFileName = "build.yaml" + +// ConfigDir is the directory where build configuration is stored. +const ConfigDir = ".core" + +// BuildConfig holds the complete build configuration loaded from .core/build.yaml. +// This is distinct from Config which holds runtime build parameters. +type BuildConfig struct { + // Version is the config file format version. + Version int `yaml:"version"` + // Project contains project metadata. + Project Project `yaml:"project"` + // Build contains build settings. + Build Build `yaml:"build"` + // Targets defines the build targets. + Targets []TargetConfig `yaml:"targets"` + // Sign contains code signing configuration. + Sign signing.SignConfig `yaml:"sign,omitempty"` +} + +// Project holds project metadata. +type Project struct { + // Name is the project name. + Name string `yaml:"name"` + // Description is a brief description of the project. + Description string `yaml:"description"` + // Main is the path to the main package (e.g., ./cmd/core). + Main string `yaml:"main"` + // Binary is the output binary name. + Binary string `yaml:"binary"` +} + +// Build holds build-time settings. +type Build struct { + // CGO enables CGO for the build. + CGO bool `yaml:"cgo"` + // Flags are additional build flags (e.g., ["-trimpath"]). + Flags []string `yaml:"flags"` + // LDFlags are linker flags (e.g., ["-s", "-w"]). + LDFlags []string `yaml:"ldflags"` + // Env are additional environment variables. + Env []string `yaml:"env"` +} + +// TargetConfig defines a build target in the config file. +// This is separate from Target to allow for additional config-specific fields. +type TargetConfig struct { + // OS is the target operating system (e.g., "linux", "darwin", "windows"). + OS string `yaml:"os"` + // Arch is the target architecture (e.g., "amd64", "arm64"). + Arch string `yaml:"arch"` +} + +// LoadConfig loads build configuration from the .core/build.yaml file in the given directory. +// If the config file does not exist, it returns DefaultConfig(). +// Returns an error if the file exists but cannot be parsed. +func LoadConfig(fs io.Medium, dir string) (*BuildConfig, error) { + configPath := filepath.Join(dir, ConfigDir, ConfigFileName) + + content, err := fs.Read(configPath) + if err != nil { + if os.IsNotExist(err) { + return DefaultConfig(), nil + } + return nil, fmt.Errorf("build.LoadConfig: failed to read config file: %w", err) + } + + var cfg BuildConfig + data := []byte(content) + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("build.LoadConfig: failed to parse config file: %w", err) + } + + // Apply defaults for any missing fields + applyDefaults(&cfg) + + return &cfg, nil +} + +// DefaultConfig returns sensible defaults for Go projects. +func DefaultConfig() *BuildConfig { + return &BuildConfig{ + Version: 1, + Project: Project{ + Name: "", + Main: ".", + Binary: "", + }, + Build: Build{ + CGO: false, + Flags: []string{"-trimpath"}, + LDFlags: []string{"-s", "-w"}, + Env: []string{}, + }, + Targets: []TargetConfig{ + {OS: "linux", Arch: "amd64"}, + {OS: "linux", Arch: "arm64"}, + {OS: "darwin", Arch: "arm64"}, + {OS: "windows", Arch: "amd64"}, + }, + Sign: signing.DefaultSignConfig(), + } +} + +// applyDefaults fills in default values for any empty fields in the config. +func applyDefaults(cfg *BuildConfig) { + defaults := DefaultConfig() + + if cfg.Version == 0 { + cfg.Version = defaults.Version + } + + if cfg.Project.Main == "" { + cfg.Project.Main = defaults.Project.Main + } + + if cfg.Build.Flags == nil { + cfg.Build.Flags = defaults.Build.Flags + } + + if cfg.Build.LDFlags == nil { + cfg.Build.LDFlags = defaults.Build.LDFlags + } + + if cfg.Build.Env == nil { + cfg.Build.Env = defaults.Build.Env + } + + if len(cfg.Targets) == 0 { + cfg.Targets = defaults.Targets + } + + // Expand environment variables in sign config + cfg.Sign.ExpandEnv() +} + +// ConfigPath returns the path to the build config file for a given directory. +func ConfigPath(dir string) string { + return filepath.Join(dir, ConfigDir, ConfigFileName) +} + +// ConfigExists checks if a build config file exists in the given directory. +func ConfigExists(fs io.Medium, dir string) bool { + return fileExists(fs, ConfigPath(dir)) +} + +// ToTargets converts TargetConfig slice to Target slice for use with builders. +func (cfg *BuildConfig) ToTargets() []Target { + targets := make([]Target, len(cfg.Targets)) + for i, t := range cfg.Targets { + targets[i] = Target(t) + } + return targets +} diff --git a/build/config_test.go b/build/config_test.go new file mode 100644 index 0000000..0d56236 --- /dev/null +++ b/build/config_test.go @@ -0,0 +1,324 @@ +package build + +import ( + "os" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// setupConfigTestDir creates a temp directory with optional .core/build.yaml content. +func setupConfigTestDir(t *testing.T, configContent string) string { + t.Helper() + dir := t.TempDir() + + if configContent != "" { + coreDir := filepath.Join(dir, ConfigDir) + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + configPath := filepath.Join(coreDir, ConfigFileName) + err = os.WriteFile(configPath, []byte(configContent), 0644) + require.NoError(t, err) + } + + return dir +} + +func TestLoadConfig_Good(t *testing.T) { + fs := io.Local + t.Run("loads valid config", func(t *testing.T) { + content := ` +version: 1 +project: + name: myapp + description: A test application + main: ./cmd/myapp + binary: myapp +build: + cgo: true + flags: + - -trimpath + - -race + ldflags: + - -s + - -w + env: + - FOO=bar +targets: + - os: linux + arch: amd64 + - os: darwin + arch: arm64 +` + dir := setupConfigTestDir(t, content) + + cfg, err := LoadConfig(fs, dir) + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, 1, cfg.Version) + assert.Equal(t, "myapp", cfg.Project.Name) + assert.Equal(t, "A test application", cfg.Project.Description) + assert.Equal(t, "./cmd/myapp", cfg.Project.Main) + assert.Equal(t, "myapp", cfg.Project.Binary) + assert.True(t, cfg.Build.CGO) + assert.Equal(t, []string{"-trimpath", "-race"}, cfg.Build.Flags) + assert.Equal(t, []string{"-s", "-w"}, cfg.Build.LDFlags) + assert.Equal(t, []string{"FOO=bar"}, cfg.Build.Env) + assert.Len(t, cfg.Targets, 2) + assert.Equal(t, "linux", cfg.Targets[0].OS) + assert.Equal(t, "amd64", cfg.Targets[0].Arch) + assert.Equal(t, "darwin", cfg.Targets[1].OS) + assert.Equal(t, "arm64", cfg.Targets[1].Arch) + }) + + t.Run("returns defaults when config file missing", func(t *testing.T) { + dir := t.TempDir() + + cfg, err := LoadConfig(fs, dir) + require.NoError(t, err) + require.NotNil(t, cfg) + + defaults := DefaultConfig() + assert.Equal(t, defaults.Version, cfg.Version) + assert.Equal(t, defaults.Project.Main, cfg.Project.Main) + assert.Equal(t, defaults.Build.CGO, cfg.Build.CGO) + assert.Equal(t, defaults.Build.Flags, cfg.Build.Flags) + assert.Equal(t, defaults.Build.LDFlags, cfg.Build.LDFlags) + assert.Equal(t, defaults.Targets, cfg.Targets) + }) + + t.Run("applies defaults for missing fields", func(t *testing.T) { + content := ` +version: 2 +project: + name: partial +` + dir := setupConfigTestDir(t, content) + + cfg, err := LoadConfig(fs, dir) + require.NoError(t, err) + require.NotNil(t, cfg) + + // Explicit values preserved + assert.Equal(t, 2, cfg.Version) + assert.Equal(t, "partial", cfg.Project.Name) + + // Defaults applied + defaults := DefaultConfig() + assert.Equal(t, defaults.Project.Main, cfg.Project.Main) + assert.Equal(t, defaults.Build.Flags, cfg.Build.Flags) + assert.Equal(t, defaults.Build.LDFlags, cfg.Build.LDFlags) + assert.Equal(t, defaults.Targets, cfg.Targets) + }) + + t.Run("preserves empty arrays when explicitly set", func(t *testing.T) { + content := ` +version: 1 +project: + name: noflags +build: + flags: [] + ldflags: [] +targets: + - os: linux + arch: amd64 +` + dir := setupConfigTestDir(t, content) + + cfg, err := LoadConfig(fs, dir) + require.NoError(t, err) + require.NotNil(t, cfg) + + // Empty arrays are preserved (not replaced with defaults) + assert.Empty(t, cfg.Build.Flags) + assert.Empty(t, cfg.Build.LDFlags) + // Targets explicitly set + assert.Len(t, cfg.Targets, 1) + }) +} + +func TestLoadConfig_Bad(t *testing.T) { + fs := io.Local + t.Run("returns error for invalid YAML", func(t *testing.T) { + content := ` +version: 1 +project: + name: [invalid yaml +` + dir := setupConfigTestDir(t, content) + + cfg, err := LoadConfig(fs, dir) + assert.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), "failed to parse config file") + }) + + t.Run("returns error for unreadable file", func(t *testing.T) { + dir := t.TempDir() + coreDir := filepath.Join(dir, ConfigDir) + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + // Create config as a directory instead of file + configPath := filepath.Join(coreDir, ConfigFileName) + err = os.Mkdir(configPath, 0755) + require.NoError(t, err) + + cfg, err := LoadConfig(fs, dir) + assert.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), "failed to read config file") + }) +} + +func TestDefaultConfig_Good(t *testing.T) { + t.Run("returns sensible defaults", func(t *testing.T) { + cfg := DefaultConfig() + + assert.Equal(t, 1, cfg.Version) + assert.Equal(t, ".", cfg.Project.Main) + assert.Empty(t, cfg.Project.Name) + assert.Empty(t, cfg.Project.Binary) + assert.False(t, cfg.Build.CGO) + assert.Contains(t, cfg.Build.Flags, "-trimpath") + assert.Contains(t, cfg.Build.LDFlags, "-s") + assert.Contains(t, cfg.Build.LDFlags, "-w") + assert.Empty(t, cfg.Build.Env) + + // Default targets cover common platforms + assert.Len(t, cfg.Targets, 4) + hasLinuxAmd64 := false + hasDarwinArm64 := false + hasWindowsAmd64 := false + for _, t := range cfg.Targets { + if t.OS == "linux" && t.Arch == "amd64" { + hasLinuxAmd64 = true + } + if t.OS == "darwin" && t.Arch == "arm64" { + hasDarwinArm64 = true + } + if t.OS == "windows" && t.Arch == "amd64" { + hasWindowsAmd64 = true + } + } + assert.True(t, hasLinuxAmd64) + assert.True(t, hasDarwinArm64) + assert.True(t, hasWindowsAmd64) + }) +} + +func TestConfigPath_Good(t *testing.T) { + t.Run("returns correct path", func(t *testing.T) { + path := ConfigPath("/project/root") + assert.Equal(t, "/project/root/.core/build.yaml", path) + }) +} + +func TestConfigExists_Good(t *testing.T) { + fs := io.Local + t.Run("returns true when config exists", func(t *testing.T) { + dir := setupConfigTestDir(t, "version: 1") + assert.True(t, ConfigExists(fs, dir)) + }) + + t.Run("returns false when config missing", func(t *testing.T) { + dir := t.TempDir() + assert.False(t, ConfigExists(fs, dir)) + }) + + t.Run("returns false when .core dir missing", func(t *testing.T) { + dir := t.TempDir() + assert.False(t, ConfigExists(fs, dir)) + }) +} + +func TestLoadConfig_Good_SignConfig(t *testing.T) { + tmpDir := t.TempDir() + coreDir := filepath.Join(tmpDir, ".core") + _ = os.MkdirAll(coreDir, 0755) + + configContent := `version: 1 +sign: + enabled: true + gpg: + key: "ABCD1234" + macos: + identity: "Developer ID Application: Test" + notarize: true +` + _ = os.WriteFile(filepath.Join(coreDir, "build.yaml"), []byte(configContent), 0644) + + cfg, err := LoadConfig(io.Local, tmpDir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !cfg.Sign.Enabled { + t.Error("expected Sign.Enabled to be true") + } + if cfg.Sign.GPG.Key != "ABCD1234" { + t.Errorf("expected GPG.Key 'ABCD1234', got %q", cfg.Sign.GPG.Key) + } + if cfg.Sign.MacOS.Identity != "Developer ID Application: Test" { + t.Errorf("expected MacOS.Identity, got %q", cfg.Sign.MacOS.Identity) + } + if !cfg.Sign.MacOS.Notarize { + t.Error("expected MacOS.Notarize to be true") + } +} + +func TestBuildConfig_ToTargets_Good(t *testing.T) { + t.Run("converts TargetConfig to Target", func(t *testing.T) { + cfg := &BuildConfig{ + Targets: []TargetConfig{ + {OS: "linux", Arch: "amd64"}, + {OS: "darwin", Arch: "arm64"}, + {OS: "windows", Arch: "386"}, + }, + } + + targets := cfg.ToTargets() + require.Len(t, targets, 3) + + assert.Equal(t, Target{OS: "linux", Arch: "amd64"}, targets[0]) + assert.Equal(t, Target{OS: "darwin", Arch: "arm64"}, targets[1]) + assert.Equal(t, Target{OS: "windows", Arch: "386"}, targets[2]) + }) + + t.Run("returns empty slice for no targets", func(t *testing.T) { + cfg := &BuildConfig{ + Targets: []TargetConfig{}, + } + + targets := cfg.ToTargets() + assert.Empty(t, targets) + }) +} + +// TestLoadConfig_Testdata tests loading from the testdata fixture. +func TestLoadConfig_Testdata(t *testing.T) { + fs := io.Local + abs, err := filepath.Abs("testdata/config-project") + require.NoError(t, err) + + t.Run("loads config-project fixture", func(t *testing.T) { + cfg, err := LoadConfig(fs, abs) + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, 1, cfg.Version) + assert.Equal(t, "example-cli", cfg.Project.Name) + assert.Equal(t, "An example CLI application", cfg.Project.Description) + assert.Equal(t, "./cmd/example", cfg.Project.Main) + assert.Equal(t, "example", cfg.Project.Binary) + assert.False(t, cfg.Build.CGO) + assert.Equal(t, []string{"-trimpath"}, cfg.Build.Flags) + assert.Equal(t, []string{"-s", "-w"}, cfg.Build.LDFlags) + assert.Len(t, cfg.Targets, 3) + }) +} diff --git a/build/discovery.go b/build/discovery.go new file mode 100644 index 0000000..70d12ff --- /dev/null +++ b/build/discovery.go @@ -0,0 +1,94 @@ +package build + +import ( + "path/filepath" + "slices" + + "forge.lthn.ai/core/go/pkg/io" +) + +// Marker files for project type detection. +const ( + markerGoMod = "go.mod" + markerWails = "wails.json" + markerNodePackage = "package.json" + markerComposer = "composer.json" +) + +// projectMarker maps a marker file to its project type. +type projectMarker struct { + file string + projectType ProjectType +} + +// markers defines the detection order. More specific types come first. +// Wails projects have both wails.json and go.mod, so wails is checked first. +var markers = []projectMarker{ + {markerWails, ProjectTypeWails}, + {markerGoMod, ProjectTypeGo}, + {markerNodePackage, ProjectTypeNode}, + {markerComposer, ProjectTypePHP}, +} + +// Discover detects project types in the given directory by checking for marker files. +// Returns a slice of detected project types, ordered by priority (most specific first). +// For example, a Wails project returns [wails, go] since it has both wails.json and go.mod. +func Discover(fs io.Medium, dir string) ([]ProjectType, error) { + var detected []ProjectType + + for _, m := range markers { + path := filepath.Join(dir, m.file) + if fileExists(fs, path) { + // Avoid duplicates (shouldn't happen with current markers, but defensive) + if !slices.Contains(detected, m.projectType) { + detected = append(detected, m.projectType) + } + } + } + + return detected, nil +} + +// PrimaryType returns the most specific project type detected in the directory. +// Returns empty string if no project type is detected. +func PrimaryType(fs io.Medium, dir string) (ProjectType, error) { + types, err := Discover(fs, dir) + if err != nil { + return "", err + } + if len(types) == 0 { + return "", nil + } + return types[0], nil +} + +// IsGoProject checks if the directory contains a Go project (go.mod or wails.json). +func IsGoProject(fs io.Medium, dir string) bool { + return fileExists(fs, filepath.Join(dir, markerGoMod)) || + fileExists(fs, filepath.Join(dir, markerWails)) +} + +// IsWailsProject checks if the directory contains a Wails project. +func IsWailsProject(fs io.Medium, dir string) bool { + return fileExists(fs, filepath.Join(dir, markerWails)) +} + +// IsNodeProject checks if the directory contains a Node.js project. +func IsNodeProject(fs io.Medium, dir string) bool { + return fileExists(fs, filepath.Join(dir, markerNodePackage)) +} + +// IsPHPProject checks if the directory contains a PHP project. +func IsPHPProject(fs io.Medium, dir string) bool { + return fileExists(fs, filepath.Join(dir, markerComposer)) +} + +// IsCPPProject checks if the directory contains a C++ project (CMakeLists.txt). +func IsCPPProject(fs io.Medium, dir string) bool { + return fileExists(fs, filepath.Join(dir, "CMakeLists.txt")) +} + +// fileExists checks if a file exists and is not a directory. +func fileExists(fs io.Medium, path string) bool { + return fs.IsFile(path) +} diff --git a/build/discovery_test.go b/build/discovery_test.go new file mode 100644 index 0000000..f9d1ada --- /dev/null +++ b/build/discovery_test.go @@ -0,0 +1,228 @@ +package build + +import ( + "os" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// setupTestDir creates a temporary directory with the specified marker files. +func setupTestDir(t *testing.T, markers ...string) string { + t.Helper() + dir := t.TempDir() + for _, m := range markers { + path := filepath.Join(dir, m) + err := os.WriteFile(path, []byte("{}"), 0644) + require.NoError(t, err) + } + return dir +} + +func TestDiscover_Good(t *testing.T) { + fs := io.Local + t.Run("detects Go project", func(t *testing.T) { + dir := setupTestDir(t, "go.mod") + types, err := Discover(fs, dir) + assert.NoError(t, err) + assert.Equal(t, []ProjectType{ProjectTypeGo}, types) + }) + + t.Run("detects Wails project with priority over Go", func(t *testing.T) { + dir := setupTestDir(t, "wails.json", "go.mod") + types, err := Discover(fs, dir) + assert.NoError(t, err) + assert.Equal(t, []ProjectType{ProjectTypeWails, ProjectTypeGo}, types) + }) + + t.Run("detects Node.js project", func(t *testing.T) { + dir := setupTestDir(t, "package.json") + types, err := Discover(fs, dir) + assert.NoError(t, err) + assert.Equal(t, []ProjectType{ProjectTypeNode}, types) + }) + + t.Run("detects PHP project", func(t *testing.T) { + dir := setupTestDir(t, "composer.json") + types, err := Discover(fs, dir) + assert.NoError(t, err) + assert.Equal(t, []ProjectType{ProjectTypePHP}, types) + }) + + t.Run("detects multiple project types", func(t *testing.T) { + dir := setupTestDir(t, "go.mod", "package.json") + types, err := Discover(fs, dir) + assert.NoError(t, err) + assert.Equal(t, []ProjectType{ProjectTypeGo, ProjectTypeNode}, types) + }) + + t.Run("empty directory returns empty slice", func(t *testing.T) { + dir := t.TempDir() + types, err := Discover(fs, dir) + assert.NoError(t, err) + assert.Empty(t, types) + }) +} + +func TestDiscover_Bad(t *testing.T) { + fs := io.Local + t.Run("non-existent directory returns empty slice", func(t *testing.T) { + types, err := Discover(fs, "/non/existent/path") + assert.NoError(t, err) // os.Stat fails silently in fileExists + assert.Empty(t, types) + }) + + t.Run("directory marker is ignored", func(t *testing.T) { + dir := t.TempDir() + // Create go.mod as a directory instead of a file + err := os.Mkdir(filepath.Join(dir, "go.mod"), 0755) + require.NoError(t, err) + + types, err := Discover(fs, dir) + assert.NoError(t, err) + assert.Empty(t, types) + }) +} + +func TestPrimaryType_Good(t *testing.T) { + fs := io.Local + t.Run("returns wails for wails project", func(t *testing.T) { + dir := setupTestDir(t, "wails.json", "go.mod") + primary, err := PrimaryType(fs, dir) + assert.NoError(t, err) + assert.Equal(t, ProjectTypeWails, primary) + }) + + t.Run("returns go for go-only project", func(t *testing.T) { + dir := setupTestDir(t, "go.mod") + primary, err := PrimaryType(fs, dir) + assert.NoError(t, err) + assert.Equal(t, ProjectTypeGo, primary) + }) + + t.Run("returns empty string for empty directory", func(t *testing.T) { + dir := t.TempDir() + primary, err := PrimaryType(fs, dir) + assert.NoError(t, err) + assert.Empty(t, primary) + }) +} + +func TestIsGoProject_Good(t *testing.T) { + fs := io.Local + t.Run("true with go.mod", func(t *testing.T) { + dir := setupTestDir(t, "go.mod") + assert.True(t, IsGoProject(fs, dir)) + }) + + t.Run("true with wails.json", func(t *testing.T) { + dir := setupTestDir(t, "wails.json") + assert.True(t, IsGoProject(fs, dir)) + }) + + t.Run("false without markers", func(t *testing.T) { + dir := t.TempDir() + assert.False(t, IsGoProject(fs, dir)) + }) +} + +func TestIsWailsProject_Good(t *testing.T) { + fs := io.Local + t.Run("true with wails.json", func(t *testing.T) { + dir := setupTestDir(t, "wails.json") + assert.True(t, IsWailsProject(fs, dir)) + }) + + t.Run("false with only go.mod", func(t *testing.T) { + dir := setupTestDir(t, "go.mod") + assert.False(t, IsWailsProject(fs, dir)) + }) +} + +func TestIsNodeProject_Good(t *testing.T) { + fs := io.Local + t.Run("true with package.json", func(t *testing.T) { + dir := setupTestDir(t, "package.json") + assert.True(t, IsNodeProject(fs, dir)) + }) + + t.Run("false without package.json", func(t *testing.T) { + dir := t.TempDir() + assert.False(t, IsNodeProject(fs, dir)) + }) +} + +func TestIsPHPProject_Good(t *testing.T) { + fs := io.Local + t.Run("true with composer.json", func(t *testing.T) { + dir := setupTestDir(t, "composer.json") + assert.True(t, IsPHPProject(fs, dir)) + }) + + t.Run("false without composer.json", func(t *testing.T) { + dir := t.TempDir() + assert.False(t, IsPHPProject(fs, dir)) + }) +} + +func TestTarget_Good(t *testing.T) { + target := Target{OS: "linux", Arch: "amd64"} + assert.Equal(t, "linux/amd64", target.String()) +} + +func TestFileExists_Good(t *testing.T) { + fs := io.Local + t.Run("returns true for existing file", func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.txt") + err := os.WriteFile(path, []byte("content"), 0644) + require.NoError(t, err) + assert.True(t, fileExists(fs, path)) + }) + + t.Run("returns false for directory", func(t *testing.T) { + dir := t.TempDir() + assert.False(t, fileExists(fs, dir)) + }) + + t.Run("returns false for non-existent path", func(t *testing.T) { + assert.False(t, fileExists(fs, "/non/existent/file")) + }) +} + +// TestDiscover_Testdata tests discovery using the testdata fixtures. +// These serve as integration tests with realistic project structures. +func TestDiscover_Testdata(t *testing.T) { + fs := io.Local + testdataDir, err := filepath.Abs("testdata") + require.NoError(t, err) + + tests := []struct { + name string + dir string + expected []ProjectType + }{ + {"go-project", "go-project", []ProjectType{ProjectTypeGo}}, + {"wails-project", "wails-project", []ProjectType{ProjectTypeWails, ProjectTypeGo}}, + {"node-project", "node-project", []ProjectType{ProjectTypeNode}}, + {"php-project", "php-project", []ProjectType{ProjectTypePHP}}, + {"multi-project", "multi-project", []ProjectType{ProjectTypeGo, ProjectTypeNode}}, + {"empty-project", "empty-project", []ProjectType{}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := filepath.Join(testdataDir, tt.dir) + types, err := Discover(fs, dir) + assert.NoError(t, err) + if len(tt.expected) == 0 { + assert.Empty(t, types) + } else { + assert.Equal(t, tt.expected, types) + } + }) + } +} diff --git a/build/signing/codesign.go b/build/signing/codesign.go new file mode 100644 index 0000000..014a026 --- /dev/null +++ b/build/signing/codesign.go @@ -0,0 +1,103 @@ +package signing + +import ( + "context" + "fmt" + "os/exec" + "runtime" + + "forge.lthn.ai/core/go/pkg/io" +) + +// MacOSSigner signs binaries using macOS codesign. +type MacOSSigner struct { + config MacOSConfig +} + +// Compile-time interface check. +var _ Signer = (*MacOSSigner)(nil) + +// NewMacOSSigner creates a new macOS signer. +func NewMacOSSigner(cfg MacOSConfig) *MacOSSigner { + return &MacOSSigner{config: cfg} +} + +// Name returns "codesign". +func (s *MacOSSigner) Name() string { + return "codesign" +} + +// Available checks if running on macOS with codesign and identity configured. +func (s *MacOSSigner) Available() bool { + if runtime.GOOS != "darwin" { + return false + } + if s.config.Identity == "" { + return false + } + _, err := exec.LookPath("codesign") + return err == nil +} + +// Sign codesigns a binary with hardened runtime. +func (s *MacOSSigner) Sign(ctx context.Context, fs io.Medium, binary string) error { + if !s.Available() { + return fmt.Errorf("codesign.Sign: codesign not available") + } + + cmd := exec.CommandContext(ctx, "codesign", + "--sign", s.config.Identity, + "--timestamp", + "--options", "runtime", // Hardened runtime for notarization + "--force", + binary, + ) + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("codesign.Sign: %w\nOutput: %s", err, string(output)) + } + + return nil +} + +// Notarize submits binary to Apple for notarization and staples the ticket. +// This blocks until Apple responds (typically 1-5 minutes). +func (s *MacOSSigner) Notarize(ctx context.Context, fs io.Medium, binary string) error { + if s.config.AppleID == "" || s.config.TeamID == "" || s.config.AppPassword == "" { + return fmt.Errorf("codesign.Notarize: missing Apple credentials (apple_id, team_id, app_password)") + } + + // Create ZIP for submission + zipPath := binary + ".zip" + zipCmd := exec.CommandContext(ctx, "zip", "-j", zipPath, binary) + if output, err := zipCmd.CombinedOutput(); err != nil { + return fmt.Errorf("codesign.Notarize: failed to create zip: %w\nOutput: %s", err, string(output)) + } + defer func() { _ = fs.Delete(zipPath) }() + + // Submit to Apple and wait + submitCmd := exec.CommandContext(ctx, "xcrun", "notarytool", "submit", + zipPath, + "--apple-id", s.config.AppleID, + "--team-id", s.config.TeamID, + "--password", s.config.AppPassword, + "--wait", + ) + if output, err := submitCmd.CombinedOutput(); err != nil { + return fmt.Errorf("codesign.Notarize: notarization failed: %w\nOutput: %s", err, string(output)) + } + + // Staple the ticket + stapleCmd := exec.CommandContext(ctx, "xcrun", "stapler", "staple", binary) + if output, err := stapleCmd.CombinedOutput(); err != nil { + return fmt.Errorf("codesign.Notarize: failed to staple: %w\nOutput: %s", err, string(output)) + } + + return nil +} + +// ShouldNotarize returns true if notarization is enabled. +func (s *MacOSSigner) ShouldNotarize() bool { + return s.config.Notarize +} diff --git a/build/signing/codesign_test.go b/build/signing/codesign_test.go new file mode 100644 index 0000000..bb7e7d2 --- /dev/null +++ b/build/signing/codesign_test.go @@ -0,0 +1,62 @@ +package signing + +import ( + "context" + "runtime" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" +) + +func TestMacOSSigner_Good_Name(t *testing.T) { + s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"}) + assert.Equal(t, "codesign", s.Name()) +} + +func TestMacOSSigner_Good_Available(t *testing.T) { + s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"}) + + if runtime.GOOS == "darwin" { + // Just verify it doesn't panic + _ = s.Available() + } else { + assert.False(t, s.Available()) + } +} + +func TestMacOSSigner_Bad_NoIdentity(t *testing.T) { + s := NewMacOSSigner(MacOSConfig{}) + assert.False(t, s.Available()) +} + +func TestMacOSSigner_Sign_Bad(t *testing.T) { + t.Run("fails when not available", func(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("skipping on macOS") + } + fs := io.Local + s := NewMacOSSigner(MacOSConfig{Identity: "test"}) + err := s.Sign(context.Background(), fs, "test") + assert.Error(t, err) + assert.Contains(t, err.Error(), "not available") + }) +} + +func TestMacOSSigner_Notarize_Bad(t *testing.T) { + fs := io.Local + t.Run("fails with missing credentials", func(t *testing.T) { + s := NewMacOSSigner(MacOSConfig{}) + err := s.Notarize(context.Background(), fs, "test") + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing Apple credentials") + }) +} + +func TestMacOSSigner_ShouldNotarize(t *testing.T) { + s := NewMacOSSigner(MacOSConfig{Notarize: true}) + assert.True(t, s.ShouldNotarize()) + + s2 := NewMacOSSigner(MacOSConfig{Notarize: false}) + assert.False(t, s2.ShouldNotarize()) +} diff --git a/build/signing/gpg.go b/build/signing/gpg.go new file mode 100644 index 0000000..6183510 --- /dev/null +++ b/build/signing/gpg.go @@ -0,0 +1,59 @@ +package signing + +import ( + "context" + "fmt" + "os/exec" + + "forge.lthn.ai/core/go/pkg/io" +) + +// GPGSigner signs files using GPG. +type GPGSigner struct { + KeyID string +} + +// Compile-time interface check. +var _ Signer = (*GPGSigner)(nil) + +// NewGPGSigner creates a new GPG signer. +func NewGPGSigner(keyID string) *GPGSigner { + return &GPGSigner{KeyID: keyID} +} + +// Name returns "gpg". +func (s *GPGSigner) Name() string { + return "gpg" +} + +// Available checks if gpg is installed and key is configured. +func (s *GPGSigner) Available() bool { + if s.KeyID == "" { + return false + } + _, err := exec.LookPath("gpg") + return err == nil +} + +// Sign creates a detached ASCII-armored signature. +// For file.txt, creates file.txt.asc +func (s *GPGSigner) Sign(ctx context.Context, fs io.Medium, file string) error { + if !s.Available() { + return fmt.Errorf("gpg.Sign: gpg not available or key not configured") + } + + cmd := exec.CommandContext(ctx, "gpg", + "--detach-sign", + "--armor", + "--local-user", s.KeyID, + "--output", file+".asc", + file, + ) + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("gpg.Sign: %w\nOutput: %s", err, string(output)) + } + + return nil +} diff --git a/build/signing/gpg_test.go b/build/signing/gpg_test.go new file mode 100644 index 0000000..7710c75 --- /dev/null +++ b/build/signing/gpg_test.go @@ -0,0 +1,34 @@ +package signing + +import ( + "context" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" +) + +func TestGPGSigner_Good_Name(t *testing.T) { + s := NewGPGSigner("ABCD1234") + assert.Equal(t, "gpg", s.Name()) +} + +func TestGPGSigner_Good_Available(t *testing.T) { + s := NewGPGSigner("ABCD1234") + _ = s.Available() +} + +func TestGPGSigner_Bad_NoKey(t *testing.T) { + s := NewGPGSigner("") + assert.False(t, s.Available()) +} + +func TestGPGSigner_Sign_Bad(t *testing.T) { + fs := io.Local + t.Run("fails when no key", func(t *testing.T) { + s := NewGPGSigner("") + err := s.Sign(context.Background(), fs, "test.txt") + assert.Error(t, err) + assert.Contains(t, err.Error(), "not available or key not configured") + }) +} diff --git a/build/signing/sign.go b/build/signing/sign.go new file mode 100644 index 0000000..33bd907 --- /dev/null +++ b/build/signing/sign.go @@ -0,0 +1,96 @@ +package signing + +import ( + "context" + "fmt" + "runtime" + + "forge.lthn.ai/core/go/pkg/io" +) + +// Artifact represents a build output that can be signed. +// This mirrors build.Artifact to avoid import cycles. +type Artifact struct { + Path string + OS string + Arch string +} + +// SignBinaries signs macOS binaries in the artifacts list. +// Only signs darwin binaries when running on macOS with a configured identity. +func SignBinaries(ctx context.Context, fs io.Medium, cfg SignConfig, artifacts []Artifact) error { + if !cfg.Enabled { + return nil + } + + // Only sign on macOS + if runtime.GOOS != "darwin" { + return nil + } + + signer := NewMacOSSigner(cfg.MacOS) + if !signer.Available() { + return nil // Silently skip if not configured + } + + for _, artifact := range artifacts { + if artifact.OS != "darwin" { + continue + } + + fmt.Printf(" Signing %s...\n", artifact.Path) + if err := signer.Sign(ctx, fs, artifact.Path); err != nil { + return fmt.Errorf("failed to sign %s: %w", artifact.Path, err) + } + } + + return nil +} + +// NotarizeBinaries notarizes macOS binaries if enabled. +func NotarizeBinaries(ctx context.Context, fs io.Medium, cfg SignConfig, artifacts []Artifact) error { + if !cfg.Enabled || !cfg.MacOS.Notarize { + return nil + } + + if runtime.GOOS != "darwin" { + return nil + } + + signer := NewMacOSSigner(cfg.MacOS) + if !signer.Available() { + return fmt.Errorf("notarization requested but codesign not available") + } + + for _, artifact := range artifacts { + if artifact.OS != "darwin" { + continue + } + + fmt.Printf(" Notarizing %s (this may take a few minutes)...\n", artifact.Path) + if err := signer.Notarize(ctx, fs, artifact.Path); err != nil { + return fmt.Errorf("failed to notarize %s: %w", artifact.Path, err) + } + } + + return nil +} + +// SignChecksums signs the checksums file with GPG. +func SignChecksums(ctx context.Context, fs io.Medium, cfg SignConfig, checksumFile string) error { + if !cfg.Enabled { + return nil + } + + signer := NewGPGSigner(cfg.GPG.Key) + if !signer.Available() { + return nil // Silently skip if not configured + } + + fmt.Printf(" Signing %s with GPG...\n", checksumFile) + if err := signer.Sign(ctx, fs, checksumFile); err != nil { + return fmt.Errorf("failed to sign checksums: %w", err) + } + + return nil +} diff --git a/build/signing/signer.go b/build/signing/signer.go new file mode 100644 index 0000000..27b8946 --- /dev/null +++ b/build/signing/signer.go @@ -0,0 +1,83 @@ +// Package signing provides code signing for build artifacts. +package signing + +import ( + "context" + "os" + "strings" + + "forge.lthn.ai/core/go/pkg/io" +) + +// Signer defines the interface for code signing implementations. +type Signer interface { + // Name returns the signer's identifier. + Name() string + // Available checks if this signer can be used. + Available() bool + // Sign signs the artifact at the given path. + Sign(ctx context.Context, fs io.Medium, path string) error +} + +// SignConfig holds signing configuration from .core/build.yaml. +type SignConfig struct { + Enabled bool `yaml:"enabled"` + GPG GPGConfig `yaml:"gpg,omitempty"` + MacOS MacOSConfig `yaml:"macos,omitempty"` + Windows WindowsConfig `yaml:"windows,omitempty"` +} + +// GPGConfig holds GPG signing configuration. +type GPGConfig struct { + Key string `yaml:"key"` // Key ID or fingerprint, supports $ENV +} + +// MacOSConfig holds macOS codesign configuration. +type MacOSConfig struct { + Identity string `yaml:"identity"` // Developer ID Application: ... + Notarize bool `yaml:"notarize"` // Submit to Apple for notarization + AppleID string `yaml:"apple_id"` // Apple account email + TeamID string `yaml:"team_id"` // Team ID + AppPassword string `yaml:"app_password"` // App-specific password +} + +// WindowsConfig holds Windows signtool configuration (placeholder). +type WindowsConfig struct { + Certificate string `yaml:"certificate"` // Path to .pfx + Password string `yaml:"password"` // Certificate password +} + +// DefaultSignConfig returns sensible defaults. +func DefaultSignConfig() SignConfig { + return SignConfig{ + Enabled: true, + GPG: GPGConfig{ + Key: os.Getenv("GPG_KEY_ID"), + }, + MacOS: MacOSConfig{ + Identity: os.Getenv("CODESIGN_IDENTITY"), + AppleID: os.Getenv("APPLE_ID"), + TeamID: os.Getenv("APPLE_TEAM_ID"), + AppPassword: os.Getenv("APPLE_APP_PASSWORD"), + }, + } +} + +// ExpandEnv expands environment variables in config values. +func (c *SignConfig) ExpandEnv() { + c.GPG.Key = expandEnv(c.GPG.Key) + c.MacOS.Identity = expandEnv(c.MacOS.Identity) + c.MacOS.AppleID = expandEnv(c.MacOS.AppleID) + c.MacOS.TeamID = expandEnv(c.MacOS.TeamID) + c.MacOS.AppPassword = expandEnv(c.MacOS.AppPassword) + c.Windows.Certificate = expandEnv(c.Windows.Certificate) + c.Windows.Password = expandEnv(c.Windows.Password) +} + +// expandEnv expands $VAR or ${VAR} in a string. +func expandEnv(s string) string { + if strings.HasPrefix(s, "$") { + return os.ExpandEnv(s) + } + return s +} diff --git a/build/signing/signing_test.go b/build/signing/signing_test.go new file mode 100644 index 0000000..262a2b5 --- /dev/null +++ b/build/signing/signing_test.go @@ -0,0 +1,162 @@ +package signing + +import ( + "context" + "runtime" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" +) + +func TestSignBinaries_Good_SkipsNonDarwin(t *testing.T) { + ctx := context.Background() + fs := io.Local + cfg := SignConfig{ + Enabled: true, + MacOS: MacOSConfig{ + Identity: "Developer ID Application: Test", + }, + } + + // Create fake artifact for linux + artifacts := []Artifact{ + {Path: "/tmp/test-binary", OS: "linux", Arch: "amd64"}, + } + + // Should not error even though binary doesn't exist (skips non-darwin) + err := SignBinaries(ctx, fs, cfg, artifacts) + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestSignBinaries_Good_DisabledConfig(t *testing.T) { + ctx := context.Background() + fs := io.Local + cfg := SignConfig{ + Enabled: false, + } + + artifacts := []Artifact{ + {Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"}, + } + + err := SignBinaries(ctx, fs, cfg, artifacts) + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestSignBinaries_Good_SkipsOnNonMacOS(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("Skipping on macOS - this tests non-macOS behavior") + } + + ctx := context.Background() + fs := io.Local + cfg := SignConfig{ + Enabled: true, + MacOS: MacOSConfig{ + Identity: "Developer ID Application: Test", + }, + } + + artifacts := []Artifact{ + {Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"}, + } + + err := SignBinaries(ctx, fs, cfg, artifacts) + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestNotarizeBinaries_Good_DisabledConfig(t *testing.T) { + ctx := context.Background() + fs := io.Local + cfg := SignConfig{ + Enabled: false, + } + + artifacts := []Artifact{ + {Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"}, + } + + err := NotarizeBinaries(ctx, fs, cfg, artifacts) + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestNotarizeBinaries_Good_NotarizeDisabled(t *testing.T) { + ctx := context.Background() + fs := io.Local + cfg := SignConfig{ + Enabled: true, + MacOS: MacOSConfig{ + Notarize: false, + }, + } + + artifacts := []Artifact{ + {Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"}, + } + + err := NotarizeBinaries(ctx, fs, cfg, artifacts) + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestSignChecksums_Good_SkipsNoKey(t *testing.T) { + ctx := context.Background() + fs := io.Local + cfg := SignConfig{ + Enabled: true, + GPG: GPGConfig{ + Key: "", // No key configured + }, + } + + // Should silently skip when no key + err := SignChecksums(ctx, fs, cfg, "/tmp/CHECKSUMS.txt") + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestSignChecksums_Good_Disabled(t *testing.T) { + ctx := context.Background() + fs := io.Local + cfg := SignConfig{ + Enabled: false, + } + + err := SignChecksums(ctx, fs, cfg, "/tmp/CHECKSUMS.txt") + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestDefaultSignConfig(t *testing.T) { + cfg := DefaultSignConfig() + assert.True(t, cfg.Enabled) +} + +func TestSignConfig_ExpandEnv(t *testing.T) { + t.Setenv("TEST_KEY", "ABC") + cfg := SignConfig{ + GPG: GPGConfig{Key: "$TEST_KEY"}, + } + cfg.ExpandEnv() + assert.Equal(t, "ABC", cfg.GPG.Key) +} + +func TestWindowsSigner_Good(t *testing.T) { + fs := io.Local + s := NewWindowsSigner(WindowsConfig{}) + assert.Equal(t, "signtool", s.Name()) + assert.False(t, s.Available()) + assert.NoError(t, s.Sign(context.Background(), fs, "test.exe")) +} diff --git a/build/signing/signtool.go b/build/signing/signtool.go new file mode 100644 index 0000000..a4fba5c --- /dev/null +++ b/build/signing/signtool.go @@ -0,0 +1,36 @@ +package signing + +import ( + "context" + + "forge.lthn.ai/core/go/pkg/io" +) + +// WindowsSigner signs binaries using Windows signtool (placeholder). +type WindowsSigner struct { + config WindowsConfig +} + +// Compile-time interface check. +var _ Signer = (*WindowsSigner)(nil) + +// NewWindowsSigner creates a new Windows signer. +func NewWindowsSigner(cfg WindowsConfig) *WindowsSigner { + return &WindowsSigner{config: cfg} +} + +// Name returns "signtool". +func (s *WindowsSigner) Name() string { + return "signtool" +} + +// Available returns false (not yet implemented). +func (s *WindowsSigner) Available() bool { + return false +} + +// Sign is a placeholder that does nothing. +func (s *WindowsSigner) Sign(ctx context.Context, fs io.Medium, binary string) error { + // TODO: Implement Windows signing + return nil +} diff --git a/build/testdata/config-project/.core/build.yaml b/build/testdata/config-project/.core/build.yaml new file mode 100644 index 0000000..ff3a997 --- /dev/null +++ b/build/testdata/config-project/.core/build.yaml @@ -0,0 +1,25 @@ +# Example build configuration for Core build system +version: 1 + +project: + name: example-cli + description: An example CLI application + main: ./cmd/example + binary: example + +build: + cgo: false + flags: + - -trimpath + ldflags: + - -s + - -w + env: [] + +targets: + - os: linux + arch: amd64 + - os: darwin + arch: arm64 + - os: windows + arch: amd64 diff --git a/build/testdata/cpp-project/CMakeLists.txt b/build/testdata/cpp-project/CMakeLists.txt new file mode 100644 index 0000000..f6ba2c7 --- /dev/null +++ b/build/testdata/cpp-project/CMakeLists.txt @@ -0,0 +1,2 @@ +cmake_minimum_required(VERSION 3.16) +project(TestCPP) diff --git a/build/testdata/empty-project/.gitkeep b/build/testdata/empty-project/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/build/testdata/go-project/go.mod b/build/testdata/go-project/go.mod new file mode 100644 index 0000000..deedf38 --- /dev/null +++ b/build/testdata/go-project/go.mod @@ -0,0 +1,3 @@ +module example.com/go-project + +go 1.21 diff --git a/build/testdata/multi-project/go.mod b/build/testdata/multi-project/go.mod new file mode 100644 index 0000000..f45e24d --- /dev/null +++ b/build/testdata/multi-project/go.mod @@ -0,0 +1,3 @@ +module example.com/multi-project + +go 1.21 diff --git a/build/testdata/multi-project/package.json b/build/testdata/multi-project/package.json new file mode 100644 index 0000000..18c5954 --- /dev/null +++ b/build/testdata/multi-project/package.json @@ -0,0 +1,4 @@ +{ + "name": "multi-project", + "version": "1.0.0" +} diff --git a/build/testdata/node-project/package.json b/build/testdata/node-project/package.json new file mode 100644 index 0000000..6d873ce --- /dev/null +++ b/build/testdata/node-project/package.json @@ -0,0 +1,4 @@ +{ + "name": "node-project", + "version": "1.0.0" +} diff --git a/build/testdata/php-project/composer.json b/build/testdata/php-project/composer.json new file mode 100644 index 0000000..962108e --- /dev/null +++ b/build/testdata/php-project/composer.json @@ -0,0 +1,4 @@ +{ + "name": "vendor/php-project", + "type": "library" +} diff --git a/build/testdata/wails-project/go.mod b/build/testdata/wails-project/go.mod new file mode 100644 index 0000000..e4daed1 --- /dev/null +++ b/build/testdata/wails-project/go.mod @@ -0,0 +1,3 @@ +module example.com/wails-project + +go 1.21 diff --git a/build/testdata/wails-project/wails.json b/build/testdata/wails-project/wails.json new file mode 100644 index 0000000..aaa778f --- /dev/null +++ b/build/testdata/wails-project/wails.json @@ -0,0 +1,4 @@ +{ + "name": "wails-project", + "outputfilename": "wails-project" +} diff --git a/container/container.go b/container/container.go new file mode 100644 index 0000000..d7161c3 --- /dev/null +++ b/container/container.go @@ -0,0 +1,106 @@ +// Package container provides a runtime for managing LinuxKit containers. +// It supports running LinuxKit images (ISO, qcow2, vmdk, raw) using +// available hypervisors (QEMU on Linux, Hyperkit on macOS). +package container + +import ( + "context" + "crypto/rand" + "encoding/hex" + "io" + "time" +) + +// Container represents a running LinuxKit container/VM instance. +type Container struct { + // ID is a unique identifier for the container (8 character hex string). + ID string `json:"id"` + // Name is the optional human-readable name for the container. + Name string `json:"name,omitempty"` + // Image is the path to the LinuxKit image being run. + Image string `json:"image"` + // Status represents the current state of the container. + Status Status `json:"status"` + // PID is the process ID of the hypervisor running this container. + PID int `json:"pid"` + // StartedAt is when the container was started. + StartedAt time.Time `json:"started_at"` + // Ports maps host ports to container ports. + Ports map[int]int `json:"ports,omitempty"` + // Memory is the amount of memory allocated in MB. + Memory int `json:"memory,omitempty"` + // CPUs is the number of CPUs allocated. + CPUs int `json:"cpus,omitempty"` +} + +// Status represents the state of a container. +type Status string + +const ( + // StatusRunning indicates the container is running. + StatusRunning Status = "running" + // StatusStopped indicates the container has stopped. + StatusStopped Status = "stopped" + // StatusError indicates the container encountered an error. + StatusError Status = "error" +) + +// RunOptions configures how a container should be run. +type RunOptions struct { + // Name is an optional human-readable name for the container. + Name string + // Detach runs the container in the background. + Detach bool + // Memory is the amount of memory to allocate in MB (default: 1024). + Memory int + // CPUs is the number of CPUs to allocate (default: 1). + CPUs int + // Ports maps host ports to container ports. + Ports map[int]int + // Volumes maps host paths to container paths. + Volumes map[string]string + // SSHPort is the port to use for SSH access (default: 2222). + SSHPort int + // SSHKey is the path to the SSH private key for exec commands. + SSHKey string +} + +// Manager defines the interface for container lifecycle management. +type Manager interface { + // Run starts a new container from the given image. + Run(ctx context.Context, image string, opts RunOptions) (*Container, error) + // Stop stops a running container by ID. + Stop(ctx context.Context, id string) error + // List returns all known containers. + List(ctx context.Context) ([]*Container, error) + // Logs returns a reader for the container's log output. + // If follow is true, the reader will continue to stream new log entries. + Logs(ctx context.Context, id string, follow bool) (io.ReadCloser, error) + // Exec executes a command inside the container via SSH. + Exec(ctx context.Context, id string, cmd []string) error +} + +// GenerateID creates a new unique container ID (8 hex characters). +func GenerateID() (string, error) { + bytes := make([]byte, 4) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// ImageFormat represents the format of a LinuxKit image. +type ImageFormat string + +const ( + // FormatISO is an ISO image format. + FormatISO ImageFormat = "iso" + // FormatQCOW2 is a QEMU Copy-On-Write image format. + FormatQCOW2 ImageFormat = "qcow2" + // FormatVMDK is a VMware disk image format. + FormatVMDK ImageFormat = "vmdk" + // FormatRaw is a raw disk image format. + FormatRaw ImageFormat = "raw" + // FormatUnknown indicates an unknown image format. + FormatUnknown ImageFormat = "unknown" +) diff --git a/container/hypervisor.go b/container/hypervisor.go new file mode 100644 index 0000000..dbf151f --- /dev/null +++ b/container/hypervisor.go @@ -0,0 +1,273 @@ +package container + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" +) + +// Hypervisor defines the interface for VM hypervisors. +type Hypervisor interface { + // Name returns the name of the hypervisor. + Name() string + // Available checks if the hypervisor is available on the system. + Available() bool + // BuildCommand builds the command to run a VM with the given options. + BuildCommand(ctx context.Context, image string, opts *HypervisorOptions) (*exec.Cmd, error) +} + +// HypervisorOptions contains options for running a VM. +type HypervisorOptions struct { + // Memory in MB. + Memory int + // CPUs count. + CPUs int + // LogFile path for output. + LogFile string + // SSHPort for SSH access. + SSHPort int + // Ports maps host ports to guest ports. + Ports map[int]int + // Volumes maps host paths to guest paths (9p shares). + Volumes map[string]string + // Detach runs in background (nographic mode). + Detach bool +} + +// QemuHypervisor implements Hypervisor for QEMU. +type QemuHypervisor struct { + // Binary is the path to the qemu binary (defaults to qemu-system-x86_64). + Binary string +} + +// NewQemuHypervisor creates a new QEMU hypervisor instance. +func NewQemuHypervisor() *QemuHypervisor { + return &QemuHypervisor{ + Binary: "qemu-system-x86_64", + } +} + +// Name returns the hypervisor name. +func (q *QemuHypervisor) Name() string { + return "qemu" +} + +// Available checks if QEMU is installed and accessible. +func (q *QemuHypervisor) Available() bool { + _, err := exec.LookPath(q.Binary) + return err == nil +} + +// BuildCommand creates the QEMU command for running a VM. +func (q *QemuHypervisor) BuildCommand(ctx context.Context, image string, opts *HypervisorOptions) (*exec.Cmd, error) { + format := DetectImageFormat(image) + if format == FormatUnknown { + return nil, fmt.Errorf("unknown image format: %s", image) + } + + args := []string{ + "-m", fmt.Sprintf("%d", opts.Memory), + "-smp", fmt.Sprintf("%d", opts.CPUs), + "-enable-kvm", + } + + // Add the image based on format + switch format { + case FormatISO: + args = append(args, "-cdrom", image) + args = append(args, "-boot", "d") + case FormatQCOW2: + args = append(args, "-drive", fmt.Sprintf("file=%s,format=qcow2", image)) + case FormatVMDK: + args = append(args, "-drive", fmt.Sprintf("file=%s,format=vmdk", image)) + case FormatRaw: + args = append(args, "-drive", fmt.Sprintf("file=%s,format=raw", image)) + } + + // Always run in nographic mode for container-like behavior + args = append(args, "-nographic") + + // Add serial console for log output + args = append(args, "-serial", "stdio") + + // Network with port forwarding + netdev := "user,id=net0" + if opts.SSHPort > 0 { + netdev += fmt.Sprintf(",hostfwd=tcp::%d-:22", opts.SSHPort) + } + for hostPort, guestPort := range opts.Ports { + netdev += fmt.Sprintf(",hostfwd=tcp::%d-:%d", hostPort, guestPort) + } + args = append(args, "-netdev", netdev) + args = append(args, "-device", "virtio-net-pci,netdev=net0") + + // Add 9p shares for volumes + shareID := 0 + for hostPath, guestPath := range opts.Volumes { + tag := fmt.Sprintf("share%d", shareID) + args = append(args, + "-fsdev", fmt.Sprintf("local,id=%s,path=%s,security_model=none", tag, hostPath), + "-device", fmt.Sprintf("virtio-9p-pci,fsdev=%s,mount_tag=%s", tag, filepath.Base(guestPath)), + ) + shareID++ + } + + // Check if KVM is available on Linux, remove -enable-kvm if not + if runtime.GOOS != "linux" || !isKVMAvailable() { + // Remove -enable-kvm from args + newArgs := make([]string, 0, len(args)) + for _, arg := range args { + if arg != "-enable-kvm" { + newArgs = append(newArgs, arg) + } + } + args = newArgs + + // On macOS, use HVF acceleration if available + if runtime.GOOS == "darwin" { + args = append(args, "-accel", "hvf") + } + } + + cmd := exec.CommandContext(ctx, q.Binary, args...) + return cmd, nil +} + +// isKVMAvailable checks if KVM is available on the system. +func isKVMAvailable() bool { + _, err := os.Stat("/dev/kvm") + return err == nil +} + +// HyperkitHypervisor implements Hypervisor for macOS Hyperkit. +type HyperkitHypervisor struct { + // Binary is the path to the hyperkit binary. + Binary string +} + +// NewHyperkitHypervisor creates a new Hyperkit hypervisor instance. +func NewHyperkitHypervisor() *HyperkitHypervisor { + return &HyperkitHypervisor{ + Binary: "hyperkit", + } +} + +// Name returns the hypervisor name. +func (h *HyperkitHypervisor) Name() string { + return "hyperkit" +} + +// Available checks if Hyperkit is installed and accessible. +func (h *HyperkitHypervisor) Available() bool { + if runtime.GOOS != "darwin" { + return false + } + _, err := exec.LookPath(h.Binary) + return err == nil +} + +// BuildCommand creates the Hyperkit command for running a VM. +func (h *HyperkitHypervisor) BuildCommand(ctx context.Context, image string, opts *HypervisorOptions) (*exec.Cmd, error) { + format := DetectImageFormat(image) + if format == FormatUnknown { + return nil, fmt.Errorf("unknown image format: %s", image) + } + + args := []string{ + "-m", fmt.Sprintf("%dM", opts.Memory), + "-c", fmt.Sprintf("%d", opts.CPUs), + "-A", // ACPI + "-u", // Unlimited console output + "-s", "0:0,hostbridge", + "-s", "31,lpc", + "-l", "com1,stdio", // Serial console + } + + // Add PCI slot for disk (slot 2) + switch format { + case FormatISO: + args = append(args, "-s", fmt.Sprintf("2:0,ahci-cd,%s", image)) + case FormatQCOW2, FormatVMDK, FormatRaw: + args = append(args, "-s", fmt.Sprintf("2:0,virtio-blk,%s", image)) + } + + // Network with port forwarding (slot 3) + netArgs := "virtio-net" + if opts.SSHPort > 0 || len(opts.Ports) > 0 { + // Hyperkit uses slirp for user networking with port forwarding + portForwards := make([]string, 0) + if opts.SSHPort > 0 { + portForwards = append(portForwards, fmt.Sprintf("tcp:%d:22", opts.SSHPort)) + } + for hostPort, guestPort := range opts.Ports { + portForwards = append(portForwards, fmt.Sprintf("tcp:%d:%d", hostPort, guestPort)) + } + if len(portForwards) > 0 { + netArgs += "," + strings.Join(portForwards, ",") + } + } + args = append(args, "-s", "3:0,"+netArgs) + + cmd := exec.CommandContext(ctx, h.Binary, args...) + return cmd, nil +} + +// DetectImageFormat determines the image format from its file extension. +func DetectImageFormat(path string) ImageFormat { + ext := strings.ToLower(filepath.Ext(path)) + switch ext { + case ".iso": + return FormatISO + case ".qcow2": + return FormatQCOW2 + case ".vmdk": + return FormatVMDK + case ".raw", ".img": + return FormatRaw + default: + return FormatUnknown + } +} + +// DetectHypervisor returns the best available hypervisor for the current platform. +func DetectHypervisor() (Hypervisor, error) { + // On macOS, prefer Hyperkit if available, fall back to QEMU + if runtime.GOOS == "darwin" { + hk := NewHyperkitHypervisor() + if hk.Available() { + return hk, nil + } + } + + // Try QEMU on all platforms + qemu := NewQemuHypervisor() + if qemu.Available() { + return qemu, nil + } + + return nil, fmt.Errorf("no hypervisor available: install qemu or hyperkit (macOS)") +} + +// GetHypervisor returns a specific hypervisor by name. +func GetHypervisor(name string) (Hypervisor, error) { + switch strings.ToLower(name) { + case "qemu": + h := NewQemuHypervisor() + if !h.Available() { + return nil, fmt.Errorf("qemu is not available") + } + return h, nil + case "hyperkit": + h := NewHyperkitHypervisor() + if !h.Available() { + return nil, fmt.Errorf("hyperkit is not available (requires macOS)") + } + return h, nil + default: + return nil, fmt.Errorf("unknown hypervisor: %s", name) + } +} diff --git a/container/hypervisor_test.go b/container/hypervisor_test.go new file mode 100644 index 0000000..e5c9964 --- /dev/null +++ b/container/hypervisor_test.go @@ -0,0 +1,358 @@ +package container + +import ( + "context" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestQemuHypervisor_Available_Good(t *testing.T) { + q := NewQemuHypervisor() + + // Check if qemu is available on this system + available := q.Available() + + // We just verify it returns a boolean without error + // The actual availability depends on the system + assert.IsType(t, true, available) +} + +func TestQemuHypervisor_Available_Bad_InvalidBinary(t *testing.T) { + q := &QemuHypervisor{ + Binary: "nonexistent-qemu-binary-that-does-not-exist", + } + + available := q.Available() + + assert.False(t, available) +} + +func TestHyperkitHypervisor_Available_Good(t *testing.T) { + h := NewHyperkitHypervisor() + + available := h.Available() + + // On non-darwin systems, should always be false + if runtime.GOOS != "darwin" { + assert.False(t, available) + } else { + // On darwin, just verify it returns a boolean + assert.IsType(t, true, available) + } +} + +func TestHyperkitHypervisor_Available_Bad_NotDarwin(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("This test only runs on non-darwin systems") + } + + h := NewHyperkitHypervisor() + + available := h.Available() + + assert.False(t, available, "Hyperkit should not be available on non-darwin systems") +} + +func TestHyperkitHypervisor_Available_Bad_InvalidBinary(t *testing.T) { + h := &HyperkitHypervisor{ + Binary: "nonexistent-hyperkit-binary-that-does-not-exist", + } + + available := h.Available() + + assert.False(t, available) +} + +func TestIsKVMAvailable_Good(t *testing.T) { + // This test verifies the function runs without error + // The actual result depends on the system + result := isKVMAvailable() + + // On non-linux systems, should be false + if runtime.GOOS != "linux" { + assert.False(t, result, "KVM should not be available on non-linux systems") + } else { + // On linux, just verify it returns a boolean + assert.IsType(t, true, result) + } +} + +func TestDetectHypervisor_Good(t *testing.T) { + // DetectHypervisor tries to find an available hypervisor + hv, err := DetectHypervisor() + + // This test may pass or fail depending on system configuration + // If no hypervisor is available, it should return an error + if err != nil { + assert.Nil(t, hv) + assert.Contains(t, err.Error(), "no hypervisor available") + } else { + assert.NotNil(t, hv) + assert.NotEmpty(t, hv.Name()) + } +} + +func TestGetHypervisor_Good_Qemu(t *testing.T) { + hv, err := GetHypervisor("qemu") + + // Depends on whether qemu is installed + if err != nil { + assert.Contains(t, err.Error(), "not available") + } else { + assert.NotNil(t, hv) + assert.Equal(t, "qemu", hv.Name()) + } +} + +func TestGetHypervisor_Good_QemuUppercase(t *testing.T) { + hv, err := GetHypervisor("QEMU") + + // Depends on whether qemu is installed + if err != nil { + assert.Contains(t, err.Error(), "not available") + } else { + assert.NotNil(t, hv) + assert.Equal(t, "qemu", hv.Name()) + } +} + +func TestGetHypervisor_Good_Hyperkit(t *testing.T) { + hv, err := GetHypervisor("hyperkit") + + // On non-darwin systems, should always fail + if runtime.GOOS != "darwin" { + assert.Error(t, err) + assert.Contains(t, err.Error(), "not available") + } else { + // On darwin, depends on whether hyperkit is installed + if err != nil { + assert.Contains(t, err.Error(), "not available") + } else { + assert.NotNil(t, hv) + assert.Equal(t, "hyperkit", hv.Name()) + } + } +} + +func TestGetHypervisor_Bad_Unknown(t *testing.T) { + _, err := GetHypervisor("unknown-hypervisor") + + assert.Error(t, err) + assert.Contains(t, err.Error(), "unknown hypervisor") +} + +func TestQemuHypervisor_BuildCommand_Good_WithPortsAndVolumes(t *testing.T) { + q := NewQemuHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{ + Memory: 2048, + CPUs: 4, + SSHPort: 2222, + Ports: map[int]int{8080: 80, 443: 443}, + Volumes: map[string]string{ + "/host/data": "/container/data", + "/host/logs": "/container/logs", + }, + Detach: true, + } + + cmd, err := q.BuildCommand(ctx, "/path/to/image.iso", opts) + require.NoError(t, err) + assert.NotNil(t, cmd) + + // Verify command includes all expected args + args := cmd.Args + assert.Contains(t, args, "-m") + assert.Contains(t, args, "2048") + assert.Contains(t, args, "-smp") + assert.Contains(t, args, "4") +} + +func TestQemuHypervisor_BuildCommand_Good_QCow2Format(t *testing.T) { + q := NewQemuHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{Memory: 1024, CPUs: 1} + + cmd, err := q.BuildCommand(ctx, "/path/to/image.qcow2", opts) + require.NoError(t, err) + + // Check that the drive format is qcow2 + found := false + for _, arg := range cmd.Args { + if arg == "file=/path/to/image.qcow2,format=qcow2" { + found = true + break + } + } + assert.True(t, found, "Should have qcow2 drive argument") +} + +func TestQemuHypervisor_BuildCommand_Good_VMDKFormat(t *testing.T) { + q := NewQemuHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{Memory: 1024, CPUs: 1} + + cmd, err := q.BuildCommand(ctx, "/path/to/image.vmdk", opts) + require.NoError(t, err) + + // Check that the drive format is vmdk + found := false + for _, arg := range cmd.Args { + if arg == "file=/path/to/image.vmdk,format=vmdk" { + found = true + break + } + } + assert.True(t, found, "Should have vmdk drive argument") +} + +func TestQemuHypervisor_BuildCommand_Good_RawFormat(t *testing.T) { + q := NewQemuHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{Memory: 1024, CPUs: 1} + + cmd, err := q.BuildCommand(ctx, "/path/to/image.raw", opts) + require.NoError(t, err) + + // Check that the drive format is raw + found := false + for _, arg := range cmd.Args { + if arg == "file=/path/to/image.raw,format=raw" { + found = true + break + } + } + assert.True(t, found, "Should have raw drive argument") +} + +func TestHyperkitHypervisor_BuildCommand_Good_WithPorts(t *testing.T) { + h := NewHyperkitHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{ + Memory: 1024, + CPUs: 2, + SSHPort: 2222, + Ports: map[int]int{8080: 80}, + } + + cmd, err := h.BuildCommand(ctx, "/path/to/image.iso", opts) + require.NoError(t, err) + assert.NotNil(t, cmd) + + // Verify it creates a command with memory and CPU args + args := cmd.Args + assert.Contains(t, args, "-m") + assert.Contains(t, args, "1024M") + assert.Contains(t, args, "-c") + assert.Contains(t, args, "2") +} + +func TestHyperkitHypervisor_BuildCommand_Good_QCow2Format(t *testing.T) { + h := NewHyperkitHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{Memory: 1024, CPUs: 1} + + cmd, err := h.BuildCommand(ctx, "/path/to/image.qcow2", opts) + require.NoError(t, err) + assert.NotNil(t, cmd) +} + +func TestHyperkitHypervisor_BuildCommand_Good_RawFormat(t *testing.T) { + h := NewHyperkitHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{Memory: 1024, CPUs: 1} + + cmd, err := h.BuildCommand(ctx, "/path/to/image.raw", opts) + require.NoError(t, err) + assert.NotNil(t, cmd) +} + +func TestHyperkitHypervisor_BuildCommand_Good_NoPorts(t *testing.T) { + h := NewHyperkitHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{ + Memory: 512, + CPUs: 1, + SSHPort: 0, // No SSH port + Ports: nil, + } + + cmd, err := h.BuildCommand(ctx, "/path/to/image.iso", opts) + require.NoError(t, err) + assert.NotNil(t, cmd) +} + +func TestQemuHypervisor_BuildCommand_Good_NoSSHPort(t *testing.T) { + q := NewQemuHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{ + Memory: 512, + CPUs: 1, + SSHPort: 0, // No SSH port + Ports: nil, + } + + cmd, err := q.BuildCommand(ctx, "/path/to/image.iso", opts) + require.NoError(t, err) + assert.NotNil(t, cmd) +} + +func TestQemuHypervisor_BuildCommand_Bad_UnknownFormat(t *testing.T) { + q := NewQemuHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{Memory: 1024, CPUs: 1} + + _, err := q.BuildCommand(ctx, "/path/to/image.txt", opts) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unknown image format") +} + +func TestHyperkitHypervisor_BuildCommand_Bad_UnknownFormat(t *testing.T) { + h := NewHyperkitHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{Memory: 1024, CPUs: 1} + + _, err := h.BuildCommand(ctx, "/path/to/image.unknown", opts) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unknown image format") +} + +func TestHyperkitHypervisor_Name_Good(t *testing.T) { + h := NewHyperkitHypervisor() + assert.Equal(t, "hyperkit", h.Name()) +} + +func TestHyperkitHypervisor_BuildCommand_Good_ISOFormat(t *testing.T) { + h := NewHyperkitHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{ + Memory: 1024, + CPUs: 2, + SSHPort: 2222, + } + + cmd, err := h.BuildCommand(ctx, "/path/to/image.iso", opts) + require.NoError(t, err) + assert.NotNil(t, cmd) + + args := cmd.Args + assert.Contains(t, args, "-m") + assert.Contains(t, args, "1024M") + assert.Contains(t, args, "-c") + assert.Contains(t, args, "2") +} diff --git a/container/linuxkit.go b/container/linuxkit.go new file mode 100644 index 0000000..5145804 --- /dev/null +++ b/container/linuxkit.go @@ -0,0 +1,462 @@ +package container + +import ( + "bufio" + "context" + "fmt" + goio "io" + "os" + "os/exec" + "syscall" + "time" + + "forge.lthn.ai/core/go/pkg/io" +) + +// LinuxKitManager implements the Manager interface for LinuxKit VMs. +type LinuxKitManager struct { + state *State + hypervisor Hypervisor + medium io.Medium +} + +// NewLinuxKitManager creates a new LinuxKit manager with auto-detected hypervisor. +func NewLinuxKitManager(m io.Medium) (*LinuxKitManager, error) { + statePath, err := DefaultStatePath() + if err != nil { + return nil, fmt.Errorf("failed to determine state path: %w", err) + } + + state, err := LoadState(statePath) + if err != nil { + return nil, fmt.Errorf("failed to load state: %w", err) + } + + hypervisor, err := DetectHypervisor() + if err != nil { + return nil, err + } + + return &LinuxKitManager{ + state: state, + hypervisor: hypervisor, + medium: m, + }, nil +} + +// NewLinuxKitManagerWithHypervisor creates a manager with a specific hypervisor. +func NewLinuxKitManagerWithHypervisor(m io.Medium, state *State, hypervisor Hypervisor) *LinuxKitManager { + return &LinuxKitManager{ + state: state, + hypervisor: hypervisor, + medium: m, + } +} + +// Run starts a new LinuxKit VM from the given image. +func (m *LinuxKitManager) Run(ctx context.Context, image string, opts RunOptions) (*Container, error) { + // Validate image exists + if !m.medium.IsFile(image) { + return nil, fmt.Errorf("image not found: %s", image) + } + + // Detect image format + format := DetectImageFormat(image) + if format == FormatUnknown { + return nil, fmt.Errorf("unsupported image format: %s", image) + } + + // Generate container ID + id, err := GenerateID() + if err != nil { + return nil, fmt.Errorf("failed to generate container ID: %w", err) + } + + // Apply defaults + if opts.Memory <= 0 { + opts.Memory = 1024 + } + if opts.CPUs <= 0 { + opts.CPUs = 1 + } + if opts.SSHPort <= 0 { + opts.SSHPort = 2222 + } + + // Use name or generate from ID + name := opts.Name + if name == "" { + name = id[:8] + } + + // Ensure logs directory exists + if err := EnsureLogsDir(); err != nil { + return nil, fmt.Errorf("failed to create logs directory: %w", err) + } + + // Get log file path + logPath, err := LogPath(id) + if err != nil { + return nil, fmt.Errorf("failed to determine log path: %w", err) + } + + // Build hypervisor options + hvOpts := &HypervisorOptions{ + Memory: opts.Memory, + CPUs: opts.CPUs, + LogFile: logPath, + SSHPort: opts.SSHPort, + Ports: opts.Ports, + Volumes: opts.Volumes, + Detach: opts.Detach, + } + + // Build the command + cmd, err := m.hypervisor.BuildCommand(ctx, image, hvOpts) + if err != nil { + return nil, fmt.Errorf("failed to build hypervisor command: %w", err) + } + + // Create log file + logFile, err := os.Create(logPath) + if err != nil { + return nil, fmt.Errorf("failed to create log file: %w", err) + } + + // Create container record + container := &Container{ + ID: id, + Name: name, + Image: image, + Status: StatusRunning, + StartedAt: time.Now(), + Ports: opts.Ports, + Memory: opts.Memory, + CPUs: opts.CPUs, + } + + if opts.Detach { + // Run in background + cmd.Stdout = logFile + cmd.Stderr = logFile + + // Start the process + if err := cmd.Start(); err != nil { + _ = logFile.Close() + return nil, fmt.Errorf("failed to start VM: %w", err) + } + + container.PID = cmd.Process.Pid + + // Save state + if err := m.state.Add(container); err != nil { + // Try to kill the process we just started + _ = cmd.Process.Kill() + _ = logFile.Close() + return nil, fmt.Errorf("failed to save state: %w", err) + } + + // Close log file handle (process has its own) + _ = logFile.Close() + + // Start a goroutine to wait for process exit and update state + go m.waitForExit(container.ID, cmd) + + return container, nil + } + + // Run in foreground + // Tee output to both log file and stdout + stdout, err := cmd.StdoutPipe() + if err != nil { + _ = logFile.Close() + return nil, fmt.Errorf("failed to get stdout pipe: %w", err) + } + + stderr, err := cmd.StderrPipe() + if err != nil { + _ = logFile.Close() + return nil, fmt.Errorf("failed to get stderr pipe: %w", err) + } + + if err := cmd.Start(); err != nil { + _ = logFile.Close() + return nil, fmt.Errorf("failed to start VM: %w", err) + } + + container.PID = cmd.Process.Pid + + // Save state before waiting + if err := m.state.Add(container); err != nil { + _ = cmd.Process.Kill() + _ = logFile.Close() + return nil, fmt.Errorf("failed to save state: %w", err) + } + + // Copy output to both log and stdout + go func() { + mw := goio.MultiWriter(logFile, os.Stdout) + _, _ = goio.Copy(mw, stdout) + }() + go func() { + mw := goio.MultiWriter(logFile, os.Stderr) + _, _ = goio.Copy(mw, stderr) + }() + + // Wait for the process to complete + if err := cmd.Wait(); err != nil { + container.Status = StatusError + } else { + container.Status = StatusStopped + } + + _ = logFile.Close() + if err := m.state.Update(container); err != nil { + return container, fmt.Errorf("update container state: %w", err) + } + + return container, nil +} + +// waitForExit monitors a detached process and updates state when it exits. +func (m *LinuxKitManager) waitForExit(id string, cmd *exec.Cmd) { + err := cmd.Wait() + + container, ok := m.state.Get(id) + if ok { + if err != nil { + container.Status = StatusError + } else { + container.Status = StatusStopped + } + _ = m.state.Update(container) + } +} + +// Stop stops a running container by sending SIGTERM. +func (m *LinuxKitManager) Stop(ctx context.Context, id string) error { + if err := ctx.Err(); err != nil { + return err + } + container, ok := m.state.Get(id) + if !ok { + return fmt.Errorf("container not found: %s", id) + } + + if container.Status != StatusRunning { + return fmt.Errorf("container is not running: %s", id) + } + + // Find the process + process, err := os.FindProcess(container.PID) + if err != nil { + // Process doesn't exist, update state + container.Status = StatusStopped + _ = m.state.Update(container) + return nil + } + + // Send SIGTERM + if err := process.Signal(syscall.SIGTERM); err != nil { + // Process might already be gone + container.Status = StatusStopped + _ = m.state.Update(container) + return nil + } + + // Honour already-cancelled contexts before waiting + if err := ctx.Err(); err != nil { + _ = process.Signal(syscall.SIGKILL) + return err + } + + // Wait for graceful shutdown with timeout + done := make(chan struct{}) + go func() { + _, _ = process.Wait() + close(done) + }() + + select { + case <-done: + // Process exited gracefully + case <-time.After(10 * time.Second): + // Force kill + _ = process.Signal(syscall.SIGKILL) + <-done + case <-ctx.Done(): + // Context cancelled + _ = process.Signal(syscall.SIGKILL) + return ctx.Err() + } + + container.Status = StatusStopped + return m.state.Update(container) +} + +// List returns all known containers, verifying process state. +func (m *LinuxKitManager) List(ctx context.Context) ([]*Container, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + containers := m.state.All() + + // Verify each running container's process is still alive + for _, c := range containers { + if c.Status == StatusRunning { + if !isProcessRunning(c.PID) { + c.Status = StatusStopped + _ = m.state.Update(c) + } + } + } + + return containers, nil +} + +// isProcessRunning checks if a process with the given PID is still running. +func isProcessRunning(pid int) bool { + process, err := os.FindProcess(pid) + if err != nil { + return false + } + + // On Unix, FindProcess always succeeds, so we need to send signal 0 to check + err = process.Signal(syscall.Signal(0)) + return err == nil +} + +// Logs returns a reader for the container's log output. +func (m *LinuxKitManager) Logs(ctx context.Context, id string, follow bool) (goio.ReadCloser, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + _, ok := m.state.Get(id) + if !ok { + return nil, fmt.Errorf("container not found: %s", id) + } + + logPath, err := LogPath(id) + if err != nil { + return nil, fmt.Errorf("failed to determine log path: %w", err) + } + + if !m.medium.IsFile(logPath) { + return nil, fmt.Errorf("no logs available for container: %s", id) + } + + if !follow { + // Simple case: just open and return the file + return m.medium.Open(logPath) + } + + // Follow mode: create a reader that tails the file + return newFollowReader(ctx, m.medium, logPath) +} + +// followReader implements goio.ReadCloser for following log files. +type followReader struct { + file goio.ReadCloser + ctx context.Context + cancel context.CancelFunc + reader *bufio.Reader + medium io.Medium + path string +} + +func newFollowReader(ctx context.Context, m io.Medium, path string) (*followReader, error) { + file, err := m.Open(path) + if err != nil { + return nil, err + } + + // Note: We don't seek here because Medium.Open doesn't guarantee Seekability. + + ctx, cancel := context.WithCancel(ctx) + + return &followReader{ + file: file, + ctx: ctx, + cancel: cancel, + reader: bufio.NewReader(file), + medium: m, + path: path, + }, nil +} + +func (f *followReader) Read(p []byte) (int, error) { + for { + select { + case <-f.ctx.Done(): + return 0, goio.EOF + default: + } + + n, err := f.reader.Read(p) + if n > 0 { + return n, nil + } + if err != nil && err != goio.EOF { + return 0, err + } + + // No data available, wait a bit and try again + select { + case <-f.ctx.Done(): + return 0, goio.EOF + case <-time.After(100 * time.Millisecond): + // Reset reader to pick up new data + f.reader.Reset(f.file) + } + } +} + +func (f *followReader) Close() error { + f.cancel() + return f.file.Close() +} + +// Exec executes a command inside the container via SSH. +func (m *LinuxKitManager) Exec(ctx context.Context, id string, cmd []string) error { + if err := ctx.Err(); err != nil { + return err + } + container, ok := m.state.Get(id) + if !ok { + return fmt.Errorf("container not found: %s", id) + } + + if container.Status != StatusRunning { + return fmt.Errorf("container is not running: %s", id) + } + + // Default SSH port + sshPort := 2222 + + // Build SSH command + sshArgs := []string{ + "-p", fmt.Sprintf("%d", sshPort), + "-o", "StrictHostKeyChecking=yes", + "-o", "UserKnownHostsFile=~/.core/known_hosts", + "-o", "LogLevel=ERROR", + "root@localhost", + } + sshArgs = append(sshArgs, cmd...) + + sshCmd := exec.CommandContext(ctx, "ssh", sshArgs...) + sshCmd.Stdin = os.Stdin + sshCmd.Stdout = os.Stdout + sshCmd.Stderr = os.Stderr + + return sshCmd.Run() +} + +// State returns the manager's state (for testing). +func (m *LinuxKitManager) State() *State { + return m.state +} + +// Hypervisor returns the manager's hypervisor (for testing). +func (m *LinuxKitManager) Hypervisor() Hypervisor { + return m.hypervisor +} diff --git a/container/linuxkit_test.go b/container/linuxkit_test.go new file mode 100644 index 0000000..7d02e37 --- /dev/null +++ b/container/linuxkit_test.go @@ -0,0 +1,786 @@ +package container + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// MockHypervisor is a mock implementation for testing. +type MockHypervisor struct { + name string + available bool + buildErr error + lastImage string + lastOpts *HypervisorOptions + commandToRun string +} + +func NewMockHypervisor() *MockHypervisor { + return &MockHypervisor{ + name: "mock", + available: true, + commandToRun: "echo", + } +} + +func (m *MockHypervisor) Name() string { + return m.name +} + +func (m *MockHypervisor) Available() bool { + return m.available +} + +func (m *MockHypervisor) BuildCommand(ctx context.Context, image string, opts *HypervisorOptions) (*exec.Cmd, error) { + m.lastImage = image + m.lastOpts = opts + if m.buildErr != nil { + return nil, m.buildErr + } + // Return a simple command that exits quickly + return exec.CommandContext(ctx, m.commandToRun, "test"), nil +} + +// newTestManager creates a LinuxKitManager with mock hypervisor for testing. +// Uses manual temp directory management to avoid race conditions with t.TempDir cleanup. +func newTestManager(t *testing.T) (*LinuxKitManager, *MockHypervisor, string) { + tmpDir, err := os.MkdirTemp("", "linuxkit-test-*") + require.NoError(t, err) + + // Manual cleanup that handles race conditions with state file writes + t.Cleanup(func() { + // Give any pending file operations time to complete + time.Sleep(10 * time.Millisecond) + _ = os.RemoveAll(tmpDir) + }) + + statePath := filepath.Join(tmpDir, "containers.json") + + state, err := LoadState(io.Local, statePath) + require.NoError(t, err) + + mock := NewMockHypervisor() + manager := NewLinuxKitManagerWithHypervisor(io.Local, state, mock) + + return manager, mock, tmpDir +} + +func TestNewLinuxKitManagerWithHypervisor_Good(t *testing.T) { + tmpDir := t.TempDir() + statePath := filepath.Join(tmpDir, "containers.json") + state, _ := LoadState(io.Local, statePath) + mock := NewMockHypervisor() + + manager := NewLinuxKitManagerWithHypervisor(io.Local, state, mock) + + assert.NotNil(t, manager) + assert.Equal(t, state, manager.State()) + assert.Equal(t, mock, manager.Hypervisor()) +} + +func TestLinuxKitManager_Run_Good_Detached(t *testing.T) { + manager, mock, tmpDir := newTestManager(t) + + // Create a test image file + imagePath := filepath.Join(tmpDir, "test.iso") + err := os.WriteFile(imagePath, []byte("fake image"), 0644) + require.NoError(t, err) + + // Use a command that runs briefly then exits + mock.commandToRun = "sleep" + + ctx := context.Background() + opts := RunOptions{ + Name: "test-vm", + Detach: true, + Memory: 512, + CPUs: 2, + } + + container, err := manager.Run(ctx, imagePath, opts) + require.NoError(t, err) + + assert.NotEmpty(t, container.ID) + assert.Equal(t, "test-vm", container.Name) + assert.Equal(t, imagePath, container.Image) + assert.Equal(t, StatusRunning, container.Status) + assert.Greater(t, container.PID, 0) + assert.Equal(t, 512, container.Memory) + assert.Equal(t, 2, container.CPUs) + + // Verify hypervisor was called with correct options + assert.Equal(t, imagePath, mock.lastImage) + assert.Equal(t, 512, mock.lastOpts.Memory) + assert.Equal(t, 2, mock.lastOpts.CPUs) + + // Clean up - stop the container + time.Sleep(100 * time.Millisecond) +} + +func TestLinuxKitManager_Run_Good_DefaultValues(t *testing.T) { + manager, mock, tmpDir := newTestManager(t) + + imagePath := filepath.Join(tmpDir, "test.qcow2") + err := os.WriteFile(imagePath, []byte("fake image"), 0644) + require.NoError(t, err) + + ctx := context.Background() + opts := RunOptions{Detach: true} + + container, err := manager.Run(ctx, imagePath, opts) + require.NoError(t, err) + + // Check defaults were applied + assert.Equal(t, 1024, mock.lastOpts.Memory) + assert.Equal(t, 1, mock.lastOpts.CPUs) + assert.Equal(t, 2222, mock.lastOpts.SSHPort) + + // Name should default to first 8 chars of ID + assert.Equal(t, container.ID[:8], container.Name) + + // Wait for the mock process to complete to avoid temp dir cleanup issues + time.Sleep(50 * time.Millisecond) +} + +func TestLinuxKitManager_Run_Bad_ImageNotFound(t *testing.T) { + manager, _, _ := newTestManager(t) + + ctx := context.Background() + opts := RunOptions{Detach: true} + + _, err := manager.Run(ctx, "/nonexistent/image.iso", opts) + assert.Error(t, err) + assert.Contains(t, err.Error(), "image not found") +} + +func TestLinuxKitManager_Run_Bad_UnsupportedFormat(t *testing.T) { + manager, _, tmpDir := newTestManager(t) + + imagePath := filepath.Join(tmpDir, "test.txt") + err := os.WriteFile(imagePath, []byte("not an image"), 0644) + require.NoError(t, err) + + ctx := context.Background() + opts := RunOptions{Detach: true} + + _, err = manager.Run(ctx, imagePath, opts) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported image format") +} + +func TestLinuxKitManager_Stop_Good(t *testing.T) { + manager, _, _ := newTestManager(t) + + // Add a fake running container with a non-existent PID + // The Stop function should handle this gracefully + container := &Container{ + ID: "abc12345", + Status: StatusRunning, + PID: 999999, // Non-existent PID + StartedAt: time.Now(), + } + _ = manager.State().Add(container) + + ctx := context.Background() + err := manager.Stop(ctx, "abc12345") + + // Stop should succeed (process doesn't exist, so container is marked stopped) + assert.NoError(t, err) + + // Verify the container status was updated + c, ok := manager.State().Get("abc12345") + assert.True(t, ok) + assert.Equal(t, StatusStopped, c.Status) +} + +func TestLinuxKitManager_Stop_Bad_NotFound(t *testing.T) { + manager, _, _ := newTestManager(t) + + ctx := context.Background() + err := manager.Stop(ctx, "nonexistent") + + assert.Error(t, err) + assert.Contains(t, err.Error(), "container not found") +} + +func TestLinuxKitManager_Stop_Bad_NotRunning(t *testing.T) { + _, _, tmpDir := newTestManager(t) + statePath := filepath.Join(tmpDir, "containers.json") + state, err := LoadState(io.Local, statePath) + require.NoError(t, err) + manager := NewLinuxKitManagerWithHypervisor(io.Local, state, NewMockHypervisor()) + + container := &Container{ + ID: "abc12345", + Status: StatusStopped, + } + _ = state.Add(container) + + ctx := context.Background() + err = manager.Stop(ctx, "abc12345") + + assert.Error(t, err) + assert.Contains(t, err.Error(), "not running") +} + +func TestLinuxKitManager_List_Good(t *testing.T) { + _, _, tmpDir := newTestManager(t) + statePath := filepath.Join(tmpDir, "containers.json") + state, err := LoadState(io.Local, statePath) + require.NoError(t, err) + manager := NewLinuxKitManagerWithHypervisor(io.Local, state, NewMockHypervisor()) + + _ = state.Add(&Container{ID: "aaa11111", Status: StatusStopped}) + _ = state.Add(&Container{ID: "bbb22222", Status: StatusStopped}) + + ctx := context.Background() + containers, err := manager.List(ctx) + + require.NoError(t, err) + assert.Len(t, containers, 2) +} + +func TestLinuxKitManager_List_Good_VerifiesRunningStatus(t *testing.T) { + _, _, tmpDir := newTestManager(t) + statePath := filepath.Join(tmpDir, "containers.json") + state, err := LoadState(io.Local, statePath) + require.NoError(t, err) + manager := NewLinuxKitManagerWithHypervisor(io.Local, state, NewMockHypervisor()) + + // Add a "running" container with a fake PID that doesn't exist + _ = state.Add(&Container{ + ID: "abc12345", + Status: StatusRunning, + PID: 999999, // PID that almost certainly doesn't exist + }) + + ctx := context.Background() + containers, err := manager.List(ctx) + + require.NoError(t, err) + assert.Len(t, containers, 1) + // Status should have been updated to stopped since PID doesn't exist + assert.Equal(t, StatusStopped, containers[0].Status) +} + +func TestLinuxKitManager_Logs_Good(t *testing.T) { + manager, _, tmpDir := newTestManager(t) + + // Create a log file manually + logsDir := filepath.Join(tmpDir, "logs") + require.NoError(t, os.MkdirAll(logsDir, 0755)) + + container := &Container{ID: "abc12345"} + _ = manager.State().Add(container) + + // Override the default logs dir for testing by creating the log file + // at the expected location + logContent := "test log content\nline 2\n" + logPath, err := LogPath("abc12345") + require.NoError(t, err) + require.NoError(t, os.MkdirAll(filepath.Dir(logPath), 0755)) + require.NoError(t, os.WriteFile(logPath, []byte(logContent), 0644)) + + ctx := context.Background() + reader, err := manager.Logs(ctx, "abc12345", false) + + require.NoError(t, err) + defer func() { _ = reader.Close() }() + + buf := make([]byte, 1024) + n, _ := reader.Read(buf) + assert.Equal(t, logContent, string(buf[:n])) +} + +func TestLinuxKitManager_Logs_Bad_NotFound(t *testing.T) { + manager, _, _ := newTestManager(t) + + ctx := context.Background() + _, err := manager.Logs(ctx, "nonexistent", false) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "container not found") +} + +func TestLinuxKitManager_Logs_Bad_NoLogFile(t *testing.T) { + manager, _, _ := newTestManager(t) + + // Use a unique ID that won't have a log file + uniqueID, err := GenerateID() + require.NoError(t, err) + container := &Container{ID: uniqueID} + _ = manager.State().Add(container) + + ctx := context.Background() + reader, err := manager.Logs(ctx, uniqueID, false) + + // If logs existed somehow, clean up the reader + if reader != nil { + _ = reader.Close() + } + + assert.Error(t, err) + if err != nil { + assert.Contains(t, err.Error(), "no logs available") + } +} + +func TestLinuxKitManager_Exec_Bad_NotFound(t *testing.T) { + manager, _, _ := newTestManager(t) + + ctx := context.Background() + err := manager.Exec(ctx, "nonexistent", []string{"ls"}) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "container not found") +} + +func TestLinuxKitManager_Exec_Bad_NotRunning(t *testing.T) { + manager, _, _ := newTestManager(t) + + container := &Container{ID: "abc12345", Status: StatusStopped} + _ = manager.State().Add(container) + + ctx := context.Background() + err := manager.Exec(ctx, "abc12345", []string{"ls"}) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "not running") +} + +func TestDetectImageFormat_Good(t *testing.T) { + tests := []struct { + path string + format ImageFormat + }{ + {"/path/to/image.iso", FormatISO}, + {"/path/to/image.ISO", FormatISO}, + {"/path/to/image.qcow2", FormatQCOW2}, + {"/path/to/image.QCOW2", FormatQCOW2}, + {"/path/to/image.vmdk", FormatVMDK}, + {"/path/to/image.raw", FormatRaw}, + {"/path/to/image.img", FormatRaw}, + {"image.iso", FormatISO}, + } + + for _, tt := range tests { + t.Run(tt.path, func(t *testing.T) { + assert.Equal(t, tt.format, DetectImageFormat(tt.path)) + }) + } +} + +func TestDetectImageFormat_Bad_Unknown(t *testing.T) { + tests := []string{ + "/path/to/image.txt", + "/path/to/image", + "noextension", + "/path/to/image.docx", + } + + for _, path := range tests { + t.Run(path, func(t *testing.T) { + assert.Equal(t, FormatUnknown, DetectImageFormat(path)) + }) + } +} + +func TestQemuHypervisor_Name_Good(t *testing.T) { + q := NewQemuHypervisor() + assert.Equal(t, "qemu", q.Name()) +} + +func TestQemuHypervisor_BuildCommand_Good(t *testing.T) { + q := NewQemuHypervisor() + + ctx := context.Background() + opts := &HypervisorOptions{ + Memory: 2048, + CPUs: 4, + SSHPort: 2222, + Ports: map[int]int{8080: 80}, + Detach: true, + } + + cmd, err := q.BuildCommand(ctx, "/path/to/image.iso", opts) + require.NoError(t, err) + assert.NotNil(t, cmd) + + // Check command path + assert.Contains(t, cmd.Path, "qemu") + + // Check that args contain expected values + args := cmd.Args + assert.Contains(t, args, "-m") + assert.Contains(t, args, "2048") + assert.Contains(t, args, "-smp") + assert.Contains(t, args, "4") + assert.Contains(t, args, "-nographic") +} + +func TestLinuxKitManager_Logs_Good_Follow(t *testing.T) { + manager, _, _ := newTestManager(t) + + // Create a unique container ID + uniqueID, err := GenerateID() + require.NoError(t, err) + container := &Container{ID: uniqueID} + _ = manager.State().Add(container) + + // Create a log file at the expected location + logPath, err := LogPath(uniqueID) + require.NoError(t, err) + require.NoError(t, os.MkdirAll(filepath.Dir(logPath), 0755)) + + // Write initial content + err = os.WriteFile(logPath, []byte("initial log content\n"), 0644) + require.NoError(t, err) + + // Create a cancellable context + ctx, cancel := context.WithCancel(context.Background()) + + // Get the follow reader + reader, err := manager.Logs(ctx, uniqueID, true) + require.NoError(t, err) + + // Cancel the context to stop the follow + cancel() + + // Read should return EOF after context cancellation + buf := make([]byte, 1024) + _, readErr := reader.Read(buf) + // After context cancel, Read should return EOF + assert.Equal(t, "EOF", readErr.Error()) + + // Close the reader + assert.NoError(t, reader.Close()) +} + +func TestFollowReader_Read_Good_WithData(t *testing.T) { + tmpDir := t.TempDir() + logPath := filepath.Join(tmpDir, "test.log") + + // Create log file with content + content := "test log line 1\ntest log line 2\n" + err := os.WriteFile(logPath, []byte(content), 0644) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + reader, err := newFollowReader(ctx, io.Local, logPath) + require.NoError(t, err) + defer func() { _ = reader.Close() }() + + // The followReader seeks to end, so we need to append more content + f, err := os.OpenFile(logPath, os.O_APPEND|os.O_WRONLY, 0644) + require.NoError(t, err) + _, err = f.WriteString("new line\n") + require.NoError(t, err) + require.NoError(t, f.Close()) + + // Give the reader time to poll + time.Sleep(150 * time.Millisecond) + + buf := make([]byte, 1024) + n, err := reader.Read(buf) + if err == nil { + assert.Greater(t, n, 0) + } +} + +func TestFollowReader_Read_Good_ContextCancel(t *testing.T) { + tmpDir := t.TempDir() + logPath := filepath.Join(tmpDir, "test.log") + + // Create log file + err := os.WriteFile(logPath, []byte("initial content\n"), 0644) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + + reader, err := newFollowReader(ctx, io.Local, logPath) + require.NoError(t, err) + + // Cancel the context + cancel() + + // Read should return EOF + buf := make([]byte, 1024) + _, readErr := reader.Read(buf) + assert.Equal(t, "EOF", readErr.Error()) + + _ = reader.Close() +} + +func TestFollowReader_Close_Good(t *testing.T) { + tmpDir := t.TempDir() + logPath := filepath.Join(tmpDir, "test.log") + + err := os.WriteFile(logPath, []byte("content\n"), 0644) + require.NoError(t, err) + + ctx := context.Background() + reader, err := newFollowReader(ctx, io.Local, logPath) + require.NoError(t, err) + + err = reader.Close() + assert.NoError(t, err) + + // Reading after close should fail or return EOF + buf := make([]byte, 1024) + _, readErr := reader.Read(buf) + assert.Error(t, readErr) +} + +func TestNewFollowReader_Bad_FileNotFound(t *testing.T) { + ctx := context.Background() + _, err := newFollowReader(ctx, io.Local, "/nonexistent/path/to/file.log") + + assert.Error(t, err) +} + +func TestLinuxKitManager_Run_Bad_BuildCommandError(t *testing.T) { + manager, mock, tmpDir := newTestManager(t) + + // Create a test image file + imagePath := filepath.Join(tmpDir, "test.iso") + err := os.WriteFile(imagePath, []byte("fake image"), 0644) + require.NoError(t, err) + + // Configure mock to return an error + mock.buildErr = assert.AnError + + ctx := context.Background() + opts := RunOptions{Detach: true} + + _, err = manager.Run(ctx, imagePath, opts) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to build hypervisor command") +} + +func TestLinuxKitManager_Run_Good_Foreground(t *testing.T) { + manager, mock, tmpDir := newTestManager(t) + + // Create a test image file + imagePath := filepath.Join(tmpDir, "test.iso") + err := os.WriteFile(imagePath, []byte("fake image"), 0644) + require.NoError(t, err) + + // Use echo which exits quickly + mock.commandToRun = "echo" + + ctx := context.Background() + opts := RunOptions{ + Name: "test-foreground", + Detach: false, // Run in foreground + Memory: 512, + CPUs: 1, + } + + container, err := manager.Run(ctx, imagePath, opts) + require.NoError(t, err) + + assert.NotEmpty(t, container.ID) + assert.Equal(t, "test-foreground", container.Name) + // Foreground process should have completed + assert.Equal(t, StatusStopped, container.Status) +} + +func TestLinuxKitManager_Stop_Good_ContextCancelled(t *testing.T) { + manager, mock, tmpDir := newTestManager(t) + + // Create a test image file + imagePath := filepath.Join(tmpDir, "test.iso") + err := os.WriteFile(imagePath, []byte("fake image"), 0644) + require.NoError(t, err) + + // Use a command that takes a long time + mock.commandToRun = "sleep" + + // Start a container + ctx := context.Background() + opts := RunOptions{ + Name: "test-cancel", + Detach: true, + } + + container, err := manager.Run(ctx, imagePath, opts) + require.NoError(t, err) + + // Ensure cleanup happens regardless of test outcome + t.Cleanup(func() { + _ = manager.Stop(context.Background(), container.ID) + }) + + // Create a context that's already cancelled + cancelCtx, cancel := context.WithCancel(context.Background()) + cancel() + + // Stop with cancelled context + err = manager.Stop(cancelCtx, container.ID) + // Should return context error + assert.Error(t, err) + assert.Equal(t, context.Canceled, err) +} + +func TestIsProcessRunning_Good_ExistingProcess(t *testing.T) { + // Use our own PID which definitely exists + running := isProcessRunning(os.Getpid()) + assert.True(t, running) +} + +func TestIsProcessRunning_Bad_NonexistentProcess(t *testing.T) { + // Use a PID that almost certainly doesn't exist + running := isProcessRunning(999999) + assert.False(t, running) +} + +func TestLinuxKitManager_Run_Good_WithPortsAndVolumes(t *testing.T) { + manager, mock, tmpDir := newTestManager(t) + + imagePath := filepath.Join(tmpDir, "test.iso") + err := os.WriteFile(imagePath, []byte("fake image"), 0644) + require.NoError(t, err) + + ctx := context.Background() + opts := RunOptions{ + Name: "test-ports", + Detach: true, + Memory: 512, + CPUs: 1, + SSHPort: 2223, + Ports: map[int]int{8080: 80, 443: 443}, + Volumes: map[string]string{"/host/data": "/container/data"}, + } + + container, err := manager.Run(ctx, imagePath, opts) + require.NoError(t, err) + + assert.NotEmpty(t, container.ID) + assert.Equal(t, map[int]int{8080: 80, 443: 443}, container.Ports) + assert.Equal(t, 2223, mock.lastOpts.SSHPort) + assert.Equal(t, map[string]string{"/host/data": "/container/data"}, mock.lastOpts.Volumes) + + time.Sleep(50 * time.Millisecond) +} + +func TestFollowReader_Read_Bad_ReaderError(t *testing.T) { + tmpDir := t.TempDir() + logPath := filepath.Join(tmpDir, "test.log") + + // Create log file + err := os.WriteFile(logPath, []byte("content\n"), 0644) + require.NoError(t, err) + + ctx := context.Background() + reader, err := newFollowReader(ctx, io.Local, logPath) + require.NoError(t, err) + + // Close the underlying file to cause read errors + _ = reader.file.Close() + + // Read should return an error + buf := make([]byte, 1024) + _, readErr := reader.Read(buf) + assert.Error(t, readErr) +} + +func TestLinuxKitManager_Run_Bad_StartError(t *testing.T) { + manager, mock, tmpDir := newTestManager(t) + + imagePath := filepath.Join(tmpDir, "test.iso") + err := os.WriteFile(imagePath, []byte("fake image"), 0644) + require.NoError(t, err) + + // Use a command that doesn't exist to cause Start() to fail + mock.commandToRun = "/nonexistent/command/that/does/not/exist" + + ctx := context.Background() + opts := RunOptions{ + Name: "test-start-error", + Detach: true, + } + + _, err = manager.Run(ctx, imagePath, opts) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to start VM") +} + +func TestLinuxKitManager_Run_Bad_ForegroundStartError(t *testing.T) { + manager, mock, tmpDir := newTestManager(t) + + imagePath := filepath.Join(tmpDir, "test.iso") + err := os.WriteFile(imagePath, []byte("fake image"), 0644) + require.NoError(t, err) + + // Use a command that doesn't exist to cause Start() to fail + mock.commandToRun = "/nonexistent/command/that/does/not/exist" + + ctx := context.Background() + opts := RunOptions{ + Name: "test-foreground-error", + Detach: false, + } + + _, err = manager.Run(ctx, imagePath, opts) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to start VM") +} + +func TestLinuxKitManager_Run_Good_ForegroundWithError(t *testing.T) { + manager, mock, tmpDir := newTestManager(t) + + imagePath := filepath.Join(tmpDir, "test.iso") + err := os.WriteFile(imagePath, []byte("fake image"), 0644) + require.NoError(t, err) + + // Use a command that exits with error + mock.commandToRun = "false" // false command exits with code 1 + + ctx := context.Background() + opts := RunOptions{ + Name: "test-foreground-exit-error", + Detach: false, + } + + container, err := manager.Run(ctx, imagePath, opts) + require.NoError(t, err) // Run itself should succeed + + // Container should be in error state since process exited with error + assert.Equal(t, StatusError, container.Status) +} + +func TestLinuxKitManager_Stop_Good_ProcessExitedWhileRunning(t *testing.T) { + manager, _, _ := newTestManager(t) + + // Add a "running" container with a process that has already exited + // This simulates the race condition where process exits between status check + // and signal send + container := &Container{ + ID: "test1234", + Status: StatusRunning, + PID: 999999, // Non-existent PID + StartedAt: time.Now(), + } + _ = manager.State().Add(container) + + ctx := context.Background() + err := manager.Stop(ctx, "test1234") + + // Stop should succeed gracefully + assert.NoError(t, err) + + // Container should be stopped + c, ok := manager.State().Get("test1234") + assert.True(t, ok) + assert.Equal(t, StatusStopped, c.Status) +} diff --git a/container/state.go b/container/state.go new file mode 100644 index 0000000..ef5f3c1 --- /dev/null +++ b/container/state.go @@ -0,0 +1,172 @@ +package container + +import ( + "encoding/json" + "os" + "path/filepath" + "sync" + + "forge.lthn.ai/core/go/pkg/io" +) + +// State manages persistent container state. +type State struct { + // Containers is a map of container ID to Container. + Containers map[string]*Container `json:"containers"` + + mu sync.RWMutex + filePath string +} + +// DefaultStateDir returns the default directory for state files (~/.core). +func DefaultStateDir() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, ".core"), nil +} + +// DefaultStatePath returns the default path for the state file. +func DefaultStatePath() (string, error) { + dir, err := DefaultStateDir() + if err != nil { + return "", err + } + return filepath.Join(dir, "containers.json"), nil +} + +// DefaultLogsDir returns the default directory for container logs. +func DefaultLogsDir() (string, error) { + dir, err := DefaultStateDir() + if err != nil { + return "", err + } + return filepath.Join(dir, "logs"), nil +} + +// NewState creates a new State instance. +func NewState(filePath string) *State { + return &State{ + Containers: make(map[string]*Container), + filePath: filePath, + } +} + +// LoadState loads the state from the given file path. +// If the file doesn't exist, returns an empty state. +func LoadState(filePath string) (*State, error) { + state := NewState(filePath) + + dataStr, err := io.Local.Read(filePath) + if err != nil { + if os.IsNotExist(err) { + return state, nil + } + return nil, err + } + + if err := json.Unmarshal([]byte(dataStr), state); err != nil { + return nil, err + } + + return state, nil +} + +// SaveState persists the state to the configured file path. +func (s *State) SaveState() error { + s.mu.RLock() + defer s.mu.RUnlock() + + // Ensure the directory exists + dir := filepath.Dir(s.filePath) + if err := io.Local.EnsureDir(dir); err != nil { + return err + } + + data, err := json.MarshalIndent(s, "", " ") + if err != nil { + return err + } + + return io.Local.Write(s.filePath, string(data)) +} + +// Add adds a container to the state and persists it. +func (s *State) Add(c *Container) error { + s.mu.Lock() + s.Containers[c.ID] = c + s.mu.Unlock() + + return s.SaveState() +} + +// Get retrieves a copy of a container by ID. +// Returns a copy to prevent data races when the container is modified. +func (s *State) Get(id string) (*Container, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + c, ok := s.Containers[id] + if !ok { + return nil, false + } + // Return a copy to prevent data races + copy := *c + return ©, true +} + +// Update updates a container in the state and persists it. +func (s *State) Update(c *Container) error { + s.mu.Lock() + s.Containers[c.ID] = c + s.mu.Unlock() + + return s.SaveState() +} + +// Remove removes a container from the state and persists it. +func (s *State) Remove(id string) error { + s.mu.Lock() + delete(s.Containers, id) + s.mu.Unlock() + + return s.SaveState() +} + +// All returns copies of all containers in the state. +// Returns copies to prevent data races when containers are modified. +func (s *State) All() []*Container { + s.mu.RLock() + defer s.mu.RUnlock() + + containers := make([]*Container, 0, len(s.Containers)) + for _, c := range s.Containers { + copy := *c + containers = append(containers, ©) + } + return containers +} + +// FilePath returns the path to the state file. +func (s *State) FilePath() string { + return s.filePath +} + +// LogPath returns the log file path for a given container ID. +func LogPath(id string) (string, error) { + logsDir, err := DefaultLogsDir() + if err != nil { + return "", err + } + return filepath.Join(logsDir, id+".log"), nil +} + +// EnsureLogsDir ensures the logs directory exists. +func EnsureLogsDir() error { + logsDir, err := DefaultLogsDir() + if err != nil { + return err + } + return io.Local.EnsureDir(logsDir) +} diff --git a/container/state_test.go b/container/state_test.go new file mode 100644 index 0000000..5d23dfc --- /dev/null +++ b/container/state_test.go @@ -0,0 +1,223 @@ +package container + +import ( + "os" + "path/filepath" + "testing" + "time" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewState_Good(t *testing.T) { + state := NewState(io.Local, "/tmp/test-state.json") + + assert.NotNil(t, state) + assert.NotNil(t, state.Containers) + assert.Equal(t, "/tmp/test-state.json", state.FilePath()) +} + +func TestLoadState_Good_NewFile(t *testing.T) { + // Test loading from non-existent file + tmpDir := t.TempDir() + statePath := filepath.Join(tmpDir, "containers.json") + + state, err := LoadState(io.Local, statePath) + + require.NoError(t, err) + assert.NotNil(t, state) + assert.Empty(t, state.Containers) +} + +func TestLoadState_Good_ExistingFile(t *testing.T) { + tmpDir := t.TempDir() + statePath := filepath.Join(tmpDir, "containers.json") + + // Create a state file with data + content := `{ + "containers": { + "abc12345": { + "id": "abc12345", + "name": "test-container", + "image": "/path/to/image.iso", + "status": "running", + "pid": 12345, + "started_at": "2024-01-01T00:00:00Z" + } + } + }` + err := os.WriteFile(statePath, []byte(content), 0644) + require.NoError(t, err) + + state, err := LoadState(io.Local, statePath) + + require.NoError(t, err) + assert.Len(t, state.Containers, 1) + + c, ok := state.Get("abc12345") + assert.True(t, ok) + assert.Equal(t, "test-container", c.Name) + assert.Equal(t, StatusRunning, c.Status) +} + +func TestLoadState_Bad_InvalidJSON(t *testing.T) { + tmpDir := t.TempDir() + statePath := filepath.Join(tmpDir, "containers.json") + + // Create invalid JSON + err := os.WriteFile(statePath, []byte("invalid json{"), 0644) + require.NoError(t, err) + + _, err = LoadState(io.Local, statePath) + assert.Error(t, err) +} + +func TestState_Add_Good(t *testing.T) { + tmpDir := t.TempDir() + statePath := filepath.Join(tmpDir, "containers.json") + state := NewState(io.Local, statePath) + + container := &Container{ + ID: "abc12345", + Name: "test", + Image: "/path/to/image.iso", + Status: StatusRunning, + PID: 12345, + StartedAt: time.Now(), + } + + err := state.Add(container) + require.NoError(t, err) + + // Verify it's in memory + c, ok := state.Get("abc12345") + assert.True(t, ok) + assert.Equal(t, container.Name, c.Name) + + // Verify file was created + _, err = os.Stat(statePath) + assert.NoError(t, err) +} + +func TestState_Update_Good(t *testing.T) { + tmpDir := t.TempDir() + statePath := filepath.Join(tmpDir, "containers.json") + state := NewState(io.Local, statePath) + + container := &Container{ + ID: "abc12345", + Status: StatusRunning, + } + _ = state.Add(container) + + // Update status + container.Status = StatusStopped + err := state.Update(container) + require.NoError(t, err) + + // Verify update + c, ok := state.Get("abc12345") + assert.True(t, ok) + assert.Equal(t, StatusStopped, c.Status) +} + +func TestState_Remove_Good(t *testing.T) { + tmpDir := t.TempDir() + statePath := filepath.Join(tmpDir, "containers.json") + state := NewState(io.Local, statePath) + + container := &Container{ + ID: "abc12345", + } + _ = state.Add(container) + + err := state.Remove("abc12345") + require.NoError(t, err) + + _, ok := state.Get("abc12345") + assert.False(t, ok) +} + +func TestState_Get_Bad_NotFound(t *testing.T) { + state := NewState(io.Local, "/tmp/test-state.json") + + _, ok := state.Get("nonexistent") + assert.False(t, ok) +} + +func TestState_All_Good(t *testing.T) { + tmpDir := t.TempDir() + statePath := filepath.Join(tmpDir, "containers.json") + state := NewState(io.Local, statePath) + + _ = state.Add(&Container{ID: "aaa11111"}) + _ = state.Add(&Container{ID: "bbb22222"}) + _ = state.Add(&Container{ID: "ccc33333"}) + + all := state.All() + assert.Len(t, all, 3) +} + +func TestState_SaveState_Good_CreatesDirectory(t *testing.T) { + tmpDir := t.TempDir() + nestedPath := filepath.Join(tmpDir, "nested", "dir", "containers.json") + state := NewState(io.Local, nestedPath) + + _ = state.Add(&Container{ID: "abc12345"}) + + err := state.SaveState() + require.NoError(t, err) + + // Verify directory was created + _, err = os.Stat(filepath.Dir(nestedPath)) + assert.NoError(t, err) +} + +func TestDefaultStateDir_Good(t *testing.T) { + dir, err := DefaultStateDir() + require.NoError(t, err) + assert.Contains(t, dir, ".core") +} + +func TestDefaultStatePath_Good(t *testing.T) { + path, err := DefaultStatePath() + require.NoError(t, err) + assert.Contains(t, path, "containers.json") +} + +func TestDefaultLogsDir_Good(t *testing.T) { + dir, err := DefaultLogsDir() + require.NoError(t, err) + assert.Contains(t, dir, "logs") +} + +func TestLogPath_Good(t *testing.T) { + path, err := LogPath("abc12345") + require.NoError(t, err) + assert.Contains(t, path, "abc12345.log") +} + +func TestEnsureLogsDir_Good(t *testing.T) { + // This test creates real directories - skip in CI if needed + err := EnsureLogsDir(io.Local) + assert.NoError(t, err) + + logsDir, _ := DefaultLogsDir() + _, err = os.Stat(logsDir) + assert.NoError(t, err) +} + +func TestGenerateID_Good(t *testing.T) { + id1, err := GenerateID() + require.NoError(t, err) + assert.Len(t, id1, 8) + + id2, err := GenerateID() + require.NoError(t, err) + assert.Len(t, id2, 8) + + // IDs should be different + assert.NotEqual(t, id1, id2) +} diff --git a/container/templates.go b/container/templates.go new file mode 100644 index 0000000..7c16c37 --- /dev/null +++ b/container/templates.go @@ -0,0 +1,301 @@ +package container + +import ( + "embed" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "forge.lthn.ai/core/go/pkg/io" +) + +//go:embed templates/*.yml +var embeddedTemplates embed.FS + +// Template represents a LinuxKit YAML template. +type Template struct { + // Name is the template identifier (e.g., "core-dev", "server-php"). + Name string + // Description is a human-readable description of the template. + Description string + // Path is the file path to the template (relative or absolute). + Path string +} + +// builtinTemplates defines the metadata for embedded templates. +var builtinTemplates = []Template{ + { + Name: "core-dev", + Description: "Development environment with Go, Node.js, PHP, Docker-in-LinuxKit, and SSH access", + Path: "templates/core-dev.yml", + }, + { + Name: "server-php", + Description: "Production PHP server with FrankenPHP, Caddy reverse proxy, and health checks", + Path: "templates/server-php.yml", + }, +} + +// ListTemplates returns all available LinuxKit templates. +// It combines embedded templates with any templates found in the user's +// .core/linuxkit directory. +func ListTemplates() []Template { + templates := make([]Template, len(builtinTemplates)) + copy(templates, builtinTemplates) + + // Check for user templates in .core/linuxkit/ + userTemplatesDir := getUserTemplatesDir() + if userTemplatesDir != "" { + userTemplates := scanUserTemplates(userTemplatesDir) + templates = append(templates, userTemplates...) + } + + return templates +} + +// GetTemplate returns the content of a template by name. +// It first checks embedded templates, then user templates. +func GetTemplate(name string) (string, error) { + // Check embedded templates first + for _, t := range builtinTemplates { + if t.Name == name { + content, err := embeddedTemplates.ReadFile(t.Path) + if err != nil { + return "", fmt.Errorf("failed to read embedded template %s: %w", name, err) + } + return string(content), nil + } + } + + // Check user templates + userTemplatesDir := getUserTemplatesDir() + if userTemplatesDir != "" { + templatePath := filepath.Join(userTemplatesDir, name+".yml") + if io.Local.IsFile(templatePath) { + content, err := io.Local.Read(templatePath) + if err != nil { + return "", fmt.Errorf("failed to read user template %s: %w", name, err) + } + return content, nil + } + } + + return "", fmt.Errorf("template not found: %s", name) +} + +// ApplyTemplate applies variable substitution to a template. +// It supports two syntaxes: +// - ${VAR} - required variable, returns error if not provided +// - ${VAR:-default} - variable with default value +func ApplyTemplate(name string, vars map[string]string) (string, error) { + content, err := GetTemplate(name) + if err != nil { + return "", err + } + + return ApplyVariables(content, vars) +} + +// ApplyVariables applies variable substitution to content string. +// It supports two syntaxes: +// - ${VAR} - required variable, returns error if not provided +// - ${VAR:-default} - variable with default value +func ApplyVariables(content string, vars map[string]string) (string, error) { + // Pattern for ${VAR:-default} syntax + defaultPattern := regexp.MustCompile(`\$\{([A-Za-z_][A-Za-z0-9_]*):-([^}]*)\}`) + + // Pattern for ${VAR} syntax (no default) + requiredPattern := regexp.MustCompile(`\$\{([A-Za-z_][A-Za-z0-9_]*)\}`) + + // Track missing required variables + var missingVars []string + + // First pass: replace variables with defaults + result := defaultPattern.ReplaceAllStringFunc(content, func(match string) string { + submatch := defaultPattern.FindStringSubmatch(match) + if len(submatch) != 3 { + return match + } + varName := submatch[1] + defaultVal := submatch[2] + + if val, ok := vars[varName]; ok { + return val + } + return defaultVal + }) + + // Second pass: replace required variables and track missing ones + result = requiredPattern.ReplaceAllStringFunc(result, func(match string) string { + submatch := requiredPattern.FindStringSubmatch(match) + if len(submatch) != 2 { + return match + } + varName := submatch[1] + + if val, ok := vars[varName]; ok { + return val + } + missingVars = append(missingVars, varName) + return match // Keep original if missing + }) + + if len(missingVars) > 0 { + return "", fmt.Errorf("missing required variables: %s", strings.Join(missingVars, ", ")) + } + + return result, nil +} + +// ExtractVariables extracts all variable names from a template. +// Returns two slices: required variables and optional variables (with defaults). +func ExtractVariables(content string) (required []string, optional map[string]string) { + optional = make(map[string]string) + requiredSet := make(map[string]bool) + + // Pattern for ${VAR:-default} syntax + defaultPattern := regexp.MustCompile(`\$\{([A-Za-z_][A-Za-z0-9_]*):-([^}]*)\}`) + + // Pattern for ${VAR} syntax (no default) + requiredPattern := regexp.MustCompile(`\$\{([A-Za-z_][A-Za-z0-9_]*)\}`) + + // Find optional variables with defaults + matches := defaultPattern.FindAllStringSubmatch(content, -1) + for _, match := range matches { + if len(match) == 3 { + optional[match[1]] = match[2] + } + } + + // Find required variables + matches = requiredPattern.FindAllStringSubmatch(content, -1) + for _, match := range matches { + if len(match) == 2 { + varName := match[1] + // Only add if not already in optional (with default) + if _, hasDefault := optional[varName]; !hasDefault { + requiredSet[varName] = true + } + } + } + + // Convert set to slice + for v := range requiredSet { + required = append(required, v) + } + + return required, optional +} + +// getUserTemplatesDir returns the path to user templates directory. +// Returns empty string if the directory doesn't exist. +func getUserTemplatesDir() string { + // Try workspace-relative .core/linuxkit first + cwd, err := os.Getwd() + if err == nil { + wsDir := filepath.Join(cwd, ".core", "linuxkit") + if io.Local.IsDir(wsDir) { + return wsDir + } + } + + // Try home directory + home, err := os.UserHomeDir() + if err != nil { + return "" + } + + homeDir := filepath.Join(home, ".core", "linuxkit") + if io.Local.IsDir(homeDir) { + return homeDir + } + + return "" +} + +// scanUserTemplates scans a directory for .yml template files. +func scanUserTemplates(dir string) []Template { + var templates []Template + + entries, err := io.Local.List(dir) + if err != nil { + return templates + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + if !strings.HasSuffix(name, ".yml") && !strings.HasSuffix(name, ".yaml") { + continue + } + + // Extract template name from filename + templateName := strings.TrimSuffix(strings.TrimSuffix(name, ".yml"), ".yaml") + + // Skip if this is a builtin template name (embedded takes precedence) + isBuiltin := false + for _, bt := range builtinTemplates { + if bt.Name == templateName { + isBuiltin = true + break + } + } + if isBuiltin { + continue + } + + // Read file to extract description from comments + description := extractTemplateDescription(filepath.Join(dir, name)) + if description == "" { + description = "User-defined template" + } + + templates = append(templates, Template{ + Name: templateName, + Description: description, + Path: filepath.Join(dir, name), + }) + } + + return templates +} + +// extractTemplateDescription reads the first comment block from a YAML file +// to use as a description. +func extractTemplateDescription(path string) string { + content, err := io.Local.Read(path) + if err != nil { + return "" + } + + lines := strings.Split(content, "\n") + var descLines []string + + for _, line := range lines { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(trimmed, "#") { + // Remove the # and trim + comment := strings.TrimSpace(strings.TrimPrefix(trimmed, "#")) + if comment != "" { + descLines = append(descLines, comment) + // Only take the first meaningful comment line as description + if len(descLines) == 1 { + return comment + } + } + } else if trimmed != "" { + // Hit non-comment content, stop + break + } + } + + if len(descLines) > 0 { + return descLines[0] + } + return "" +} diff --git a/container/templates/core-dev.yml b/container/templates/core-dev.yml new file mode 100644 index 0000000..712e43e --- /dev/null +++ b/container/templates/core-dev.yml @@ -0,0 +1,121 @@ +# Core Development Environment Template +# A full-featured development environment with multiple runtimes +# +# Variables: +# ${SSH_KEY} - SSH public key for access (required) +# ${MEMORY:-2048} - Memory in MB (default: 2048) +# ${CPUS:-2} - Number of CPUs (default: 2) +# ${HOSTNAME:-core-dev} - Hostname for the VM +# ${DATA_SIZE:-10G} - Size of persistent /data volume + +kernel: + image: linuxkit/kernel:6.6.13 + cmdline: "console=tty0 console=ttyS0" + +init: + - linuxkit/init:v1.2.0 + - linuxkit/runc:v1.1.12 + - linuxkit/containerd:v1.7.13 + - linuxkit/ca-certificates:v1.0.0 + +onboot: + - name: sysctl + image: linuxkit/sysctl:v1.0.0 + - name: format + image: linuxkit/format:v1.0.0 + - name: mount + image: linuxkit/mount:v1.0.0 + command: ["/usr/bin/mountie", "/dev/sda1", "/data"] + - name: dhcpcd + image: linuxkit/dhcpcd:v1.0.0 + command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"] + +onshutdown: + - name: shutdown + image: busybox:latest + command: ["/bin/echo", "Shutting down..."] + +services: + - name: getty + image: linuxkit/getty:v1.0.0 + env: + - INSECURE=true + + - name: sshd + image: linuxkit/sshd:v1.2.0 + binds: + - /etc/ssh/authorized_keys:/root/.ssh/authorized_keys + + - name: docker + image: docker:24.0-dind + capabilities: + - all + net: host + pid: host + binds: + - /var/run:/var/run + - /data/docker:/var/lib/docker + rootfsPropagation: shared + + - name: dev-tools + image: alpine:3.19 + capabilities: + - all + net: host + binds: + - /data:/data + command: + - /bin/sh + - -c + - | + # Install development tools + apk add --no-cache \ + git curl wget vim nano htop tmux \ + build-base gcc musl-dev linux-headers \ + openssh-client jq yq + + # Install Go 1.22.0 + wget -q https://go.dev/dl/go1.22.0.linux-amd64.tar.gz + tar -C /usr/local -xzf go1.22.0.linux-amd64.tar.gz + rm go1.22.0.linux-amd64.tar.gz + echo 'export PATH=/usr/local/go/bin:$PATH' >> /etc/profile + + # Install Node.js + apk add --no-cache nodejs npm + + # Install PHP + apk add --no-cache php82 php82-cli php82-curl php82-json php82-mbstring \ + php82-openssl php82-pdo php82-pdo_mysql php82-pdo_pgsql php82-phar \ + php82-session php82-tokenizer php82-xml php82-zip composer + + # Keep container running + tail -f /dev/null + +files: + - path: /etc/hostname + contents: "${HOSTNAME:-core-dev}" + - path: /etc/ssh/authorized_keys + contents: "${SSH_KEY}" + mode: "0600" + - path: /etc/profile.d/dev.sh + contents: | + export PATH=$PATH:/usr/local/go/bin + export GOPATH=/data/go + export PATH=$PATH:$GOPATH/bin + cd /data + mode: "0755" + - path: /etc/motd + contents: | + ================================================ + Core Development Environment + + Runtimes: Go, Node.js, PHP + Tools: git, curl, vim, docker + + Data directory: /data (persistent) + ================================================ + +trust: + org: + - linuxkit + - library diff --git a/container/templates/server-php.yml b/container/templates/server-php.yml new file mode 100644 index 0000000..9db9f74 --- /dev/null +++ b/container/templates/server-php.yml @@ -0,0 +1,142 @@ +# PHP/FrankenPHP Server Template +# A minimal production-ready PHP server with FrankenPHP and Caddy +# +# Variables: +# ${SSH_KEY} - SSH public key for management access (required) +# ${MEMORY:-512} - Memory in MB (default: 512) +# ${CPUS:-1} - Number of CPUs (default: 1) +# ${HOSTNAME:-php-server} - Hostname for the VM +# ${APP_NAME:-app} - Application name +# ${DOMAIN:-localhost} - Domain for SSL certificates +# ${PHP_MEMORY:-128M} - PHP memory limit + +kernel: + image: linuxkit/kernel:6.6.13 + cmdline: "console=tty0 console=ttyS0" + +init: + - linuxkit/init:v1.2.0 + - linuxkit/runc:v1.1.12 + - linuxkit/containerd:v1.7.13 + - linuxkit/ca-certificates:v1.0.0 + +onboot: + - name: sysctl + image: linuxkit/sysctl:v1.0.0 + - name: dhcpcd + image: linuxkit/dhcpcd:v1.0.0 + command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"] + +services: + - name: sshd + image: linuxkit/sshd:v1.2.0 + binds: + - /etc/ssh/authorized_keys:/root/.ssh/authorized_keys + + - name: frankenphp + image: dunglas/frankenphp:latest + capabilities: + - CAP_NET_BIND_SERVICE + net: host + binds: + - /app:/app + - /data:/data + - /etc/caddy/Caddyfile:/etc/caddy/Caddyfile + env: + - SERVER_NAME=${DOMAIN:-localhost} + - FRANKENPHP_CONFIG=/etc/caddy/Caddyfile + command: + - frankenphp + - run + - --config + - /etc/caddy/Caddyfile + + - name: healthcheck + image: alpine:3.19 + net: host + command: + - /bin/sh + - -c + - | + apk add --no-cache curl + while true; do + sleep 30 + curl -sf http://localhost/health || echo "Health check failed" + done + +files: + - path: /etc/hostname + contents: "${HOSTNAME:-php-server}" + - path: /etc/ssh/authorized_keys + contents: "${SSH_KEY}" + mode: "0600" + - path: /etc/caddy/Caddyfile + contents: | + { + frankenphp + order php_server before file_server + } + + ${DOMAIN:-localhost} { + root * /app/public + + # Health check endpoint + handle /health { + respond "OK" 200 + } + + # PHP handling + php_server + + # Encode responses + encode zstd gzip + + # Security headers + header { + X-Content-Type-Options nosniff + X-Frame-Options DENY + X-XSS-Protection "1; mode=block" + Referrer-Policy strict-origin-when-cross-origin + } + + # Logging + log { + output file /data/logs/access.log + format json + } + } + mode: "0644" + - path: /app/public/index.php + contents: | + 'healthy', + 'app' => '${APP_NAME:-app}', + 'timestamp' => date('c'), + 'php_version' => PHP_VERSION, + ]); + mode: "0644" + - path: /etc/php/php.ini + contents: | + memory_limit = ${PHP_MEMORY:-128M} + max_execution_time = 30 + upload_max_filesize = 64M + post_max_size = 64M + display_errors = Off + log_errors = On + error_log = /data/logs/php_errors.log + mode: "0644" + - path: /data/logs/.gitkeep + contents: "" + +trust: + org: + - linuxkit + - library + - dunglas diff --git a/container/templates_test.go b/container/templates_test.go new file mode 100644 index 0000000..5e94659 --- /dev/null +++ b/container/templates_test.go @@ -0,0 +1,604 @@ +package container + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestListTemplates_Good(t *testing.T) { + tm := NewTemplateManager(io.Local) + templates := tm.ListTemplates() + + // Should have at least the builtin templates + assert.GreaterOrEqual(t, len(templates), 2) + + // Find the core-dev template + var found bool + for _, tmpl := range templates { + if tmpl.Name == "core-dev" { + found = true + assert.NotEmpty(t, tmpl.Description) + assert.NotEmpty(t, tmpl.Path) + break + } + } + assert.True(t, found, "core-dev template should exist") + + // Find the server-php template + found = false + for _, tmpl := range templates { + if tmpl.Name == "server-php" { + found = true + assert.NotEmpty(t, tmpl.Description) + assert.NotEmpty(t, tmpl.Path) + break + } + } + assert.True(t, found, "server-php template should exist") +} + +func TestGetTemplate_Good_CoreDev(t *testing.T) { + tm := NewTemplateManager(io.Local) + content, err := tm.GetTemplate("core-dev") + + require.NoError(t, err) + assert.NotEmpty(t, content) + assert.Contains(t, content, "kernel:") + assert.Contains(t, content, "linuxkit/kernel") + assert.Contains(t, content, "${SSH_KEY}") + assert.Contains(t, content, "services:") +} + +func TestGetTemplate_Good_ServerPhp(t *testing.T) { + tm := NewTemplateManager(io.Local) + content, err := tm.GetTemplate("server-php") + + require.NoError(t, err) + assert.NotEmpty(t, content) + assert.Contains(t, content, "kernel:") + assert.Contains(t, content, "frankenphp") + assert.Contains(t, content, "${SSH_KEY}") + assert.Contains(t, content, "${DOMAIN:-localhost}") +} + +func TestGetTemplate_Bad_NotFound(t *testing.T) { + tm := NewTemplateManager(io.Local) + _, err := tm.GetTemplate("nonexistent-template") + + assert.Error(t, err) + assert.Contains(t, err.Error(), "template not found") +} + +func TestApplyVariables_Good_SimpleSubstitution(t *testing.T) { + content := "Hello ${NAME}, welcome to ${PLACE}!" + vars := map[string]string{ + "NAME": "World", + "PLACE": "Core", + } + + result, err := ApplyVariables(content, vars) + + require.NoError(t, err) + assert.Equal(t, "Hello World, welcome to Core!", result) +} + +func TestApplyVariables_Good_WithDefaults(t *testing.T) { + content := "Memory: ${MEMORY:-1024}MB, CPUs: ${CPUS:-2}" + vars := map[string]string{ + "MEMORY": "2048", + // CPUS not provided, should use default + } + + result, err := ApplyVariables(content, vars) + + require.NoError(t, err) + assert.Equal(t, "Memory: 2048MB, CPUs: 2", result) +} + +func TestApplyVariables_Good_AllDefaults(t *testing.T) { + content := "${HOST:-localhost}:${PORT:-8080}" + vars := map[string]string{} // No vars provided + + result, err := ApplyVariables(content, vars) + + require.NoError(t, err) + assert.Equal(t, "localhost:8080", result) +} + +func TestApplyVariables_Good_MixedSyntax(t *testing.T) { + content := ` +hostname: ${HOSTNAME:-myhost} +ssh_key: ${SSH_KEY} +memory: ${MEMORY:-512} +` + vars := map[string]string{ + "SSH_KEY": "ssh-rsa AAAA...", + "HOSTNAME": "custom-host", + } + + result, err := ApplyVariables(content, vars) + + require.NoError(t, err) + assert.Contains(t, result, "hostname: custom-host") + assert.Contains(t, result, "ssh_key: ssh-rsa AAAA...") + assert.Contains(t, result, "memory: 512") +} + +func TestApplyVariables_Good_EmptyDefault(t *testing.T) { + content := "value: ${OPT:-}" + vars := map[string]string{} + + result, err := ApplyVariables(content, vars) + + require.NoError(t, err) + assert.Equal(t, "value: ", result) +} + +func TestApplyVariables_Bad_MissingRequired(t *testing.T) { + content := "SSH Key: ${SSH_KEY}" + vars := map[string]string{} // Missing required SSH_KEY + + _, err := ApplyVariables(content, vars) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing required variables") + assert.Contains(t, err.Error(), "SSH_KEY") +} + +func TestApplyVariables_Bad_MultipleMissing(t *testing.T) { + content := "${VAR1} and ${VAR2} and ${VAR3}" + vars := map[string]string{ + "VAR2": "provided", + } + + _, err := ApplyVariables(content, vars) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing required variables") + // Should mention both missing vars + errStr := err.Error() + assert.True(t, strings.Contains(errStr, "VAR1") || strings.Contains(errStr, "VAR3")) +} + +func TestApplyTemplate_Good(t *testing.T) { + tm := NewTemplateManager(io.Local) + vars := map[string]string{ + "SSH_KEY": "ssh-rsa AAAA... user@host", + } + + result, err := tm.ApplyTemplate("core-dev", vars) + + require.NoError(t, err) + assert.NotEmpty(t, result) + assert.Contains(t, result, "ssh-rsa AAAA... user@host") + // Default values should be applied + assert.Contains(t, result, "core-dev") // HOSTNAME default +} + +func TestApplyTemplate_Bad_TemplateNotFound(t *testing.T) { + tm := NewTemplateManager(io.Local) + vars := map[string]string{ + "SSH_KEY": "test", + } + + _, err := tm.ApplyTemplate("nonexistent", vars) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "template not found") +} + +func TestApplyTemplate_Bad_MissingVariable(t *testing.T) { + tm := NewTemplateManager(io.Local) + // server-php requires SSH_KEY + vars := map[string]string{} // Missing required SSH_KEY + + _, err := tm.ApplyTemplate("server-php", vars) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing required variables") +} + +func TestExtractVariables_Good(t *testing.T) { + content := ` +hostname: ${HOSTNAME:-myhost} +ssh_key: ${SSH_KEY} +memory: ${MEMORY:-1024} +cpus: ${CPUS:-2} +api_key: ${API_KEY} +` + required, optional := ExtractVariables(content) + + // Required variables (no default) + assert.Contains(t, required, "SSH_KEY") + assert.Contains(t, required, "API_KEY") + assert.Len(t, required, 2) + + // Optional variables (with defaults) + assert.Equal(t, "myhost", optional["HOSTNAME"]) + assert.Equal(t, "1024", optional["MEMORY"]) + assert.Equal(t, "2", optional["CPUS"]) + assert.Len(t, optional, 3) +} + +func TestExtractVariables_Good_NoVariables(t *testing.T) { + content := "This has no variables at all" + + required, optional := ExtractVariables(content) + + assert.Empty(t, required) + assert.Empty(t, optional) +} + +func TestExtractVariables_Good_OnlyDefaults(t *testing.T) { + content := "${A:-default1} ${B:-default2}" + + required, optional := ExtractVariables(content) + + assert.Empty(t, required) + assert.Len(t, optional, 2) + assert.Equal(t, "default1", optional["A"]) + assert.Equal(t, "default2", optional["B"]) +} + +func TestScanUserTemplates_Good(t *testing.T) { + tm := NewTemplateManager(io.Local) + // Create a temporary directory with template files + tmpDir := t.TempDir() + + // Create a valid template file + templateContent := `# My Custom Template +# A custom template for testing +kernel: + image: linuxkit/kernel:6.6 +` + err := os.WriteFile(filepath.Join(tmpDir, "custom.yml"), []byte(templateContent), 0644) + require.NoError(t, err) + + // Create a non-template file (should be ignored) + err = os.WriteFile(filepath.Join(tmpDir, "readme.txt"), []byte("Not a template"), 0644) + require.NoError(t, err) + + templates := tm.scanUserTemplates(tmpDir) + + assert.Len(t, templates, 1) + assert.Equal(t, "custom", templates[0].Name) + assert.Equal(t, "My Custom Template", templates[0].Description) +} + +func TestScanUserTemplates_Good_MultipleTemplates(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + + // Create multiple template files + err := os.WriteFile(filepath.Join(tmpDir, "web.yml"), []byte("# Web Server\nkernel:"), 0644) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(tmpDir, "db.yaml"), []byte("# Database Server\nkernel:"), 0644) + require.NoError(t, err) + + templates := tm.scanUserTemplates(tmpDir) + + assert.Len(t, templates, 2) + + // Check names are extracted correctly + names := make(map[string]bool) + for _, tmpl := range templates { + names[tmpl.Name] = true + } + assert.True(t, names["web"]) + assert.True(t, names["db"]) +} + +func TestScanUserTemplates_Good_EmptyDirectory(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + + templates := tm.scanUserTemplates(tmpDir) + + assert.Empty(t, templates) +} + +func TestScanUserTemplates_Bad_NonexistentDirectory(t *testing.T) { + tm := NewTemplateManager(io.Local) + templates := tm.scanUserTemplates("/nonexistent/path/to/templates") + + assert.Empty(t, templates) +} + +func TestExtractTemplateDescription_Good(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "test.yml") + + content := `# My Template Description +# More details here +kernel: + image: test +` + err := os.WriteFile(path, []byte(content), 0644) + require.NoError(t, err) + + desc := tm.extractTemplateDescription(path) + + assert.Equal(t, "My Template Description", desc) +} + +func TestExtractTemplateDescription_Good_NoComments(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "test.yml") + + content := `kernel: + image: test +` + err := os.WriteFile(path, []byte(content), 0644) + require.NoError(t, err) + + desc := tm.extractTemplateDescription(path) + + assert.Empty(t, desc) +} + +func TestExtractTemplateDescription_Bad_FileNotFound(t *testing.T) { + tm := NewTemplateManager(io.Local) + desc := tm.extractTemplateDescription("/nonexistent/file.yml") + + assert.Empty(t, desc) +} + +func TestVariablePatternEdgeCases_Good(t *testing.T) { + tests := []struct { + name string + content string + vars map[string]string + expected string + }{ + { + name: "underscore in name", + content: "${MY_VAR:-default}", + vars: map[string]string{"MY_VAR": "value"}, + expected: "value", + }, + { + name: "numbers in name", + content: "${VAR123:-default}", + vars: map[string]string{}, + expected: "default", + }, + { + name: "default with special chars", + content: "${URL:-http://localhost:8080}", + vars: map[string]string{}, + expected: "http://localhost:8080", + }, + { + name: "default with path", + content: "${PATH:-/usr/local/bin}", + vars: map[string]string{}, + expected: "/usr/local/bin", + }, + { + name: "adjacent variables", + content: "${A:-a}${B:-b}${C:-c}", + vars: map[string]string{"B": "X"}, + expected: "aXc", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := ApplyVariables(tt.content, tt.vars) + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestListTemplates_Good_WithUserTemplates(t *testing.T) { + // Create a workspace directory with user templates + tmpDir := t.TempDir() + coreDir := filepath.Join(tmpDir, ".core", "linuxkit") + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + // Create a user template + templateContent := `# Custom user template +kernel: + image: linuxkit/kernel:6.6 +` + err = os.WriteFile(filepath.Join(coreDir, "user-custom.yml"), []byte(templateContent), 0644) + require.NoError(t, err) + + tm := NewTemplateManager(io.Local).WithWorkingDir(tmpDir) + templates := tm.ListTemplates() + + // Should have at least the builtin templates plus the user template + assert.GreaterOrEqual(t, len(templates), 3) + + // Check that user template is included + found := false + for _, tmpl := range templates { + if tmpl.Name == "user-custom" { + found = true + assert.Equal(t, "Custom user template", tmpl.Description) + break + } + } + assert.True(t, found, "user-custom template should exist") +} + +func TestGetTemplate_Good_UserTemplate(t *testing.T) { + // Create a workspace directory with user templates + tmpDir := t.TempDir() + coreDir := filepath.Join(tmpDir, ".core", "linuxkit") + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + // Create a user template + templateContent := `# My user template +kernel: + image: linuxkit/kernel:6.6 +services: + - name: test +` + err = os.WriteFile(filepath.Join(coreDir, "my-user-template.yml"), []byte(templateContent), 0644) + require.NoError(t, err) + + tm := NewTemplateManager(io.Local).WithWorkingDir(tmpDir) + content, err := tm.GetTemplate("my-user-template") + + require.NoError(t, err) + assert.Contains(t, content, "kernel:") + assert.Contains(t, content, "My user template") +} + +func TestGetTemplate_Good_UserTemplate_YamlExtension(t *testing.T) { + // Create a workspace directory with user templates + tmpDir := t.TempDir() + coreDir := filepath.Join(tmpDir, ".core", "linuxkit") + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + // Create a user template with .yaml extension + templateContent := `# My yaml template +kernel: + image: linuxkit/kernel:6.6 +` + err = os.WriteFile(filepath.Join(coreDir, "my-yaml-template.yaml"), []byte(templateContent), 0644) + require.NoError(t, err) + + tm := NewTemplateManager(io.Local).WithWorkingDir(tmpDir) + content, err := tm.GetTemplate("my-yaml-template") + + require.NoError(t, err) + assert.Contains(t, content, "kernel:") + assert.Contains(t, content, "My yaml template") +} + +func TestScanUserTemplates_Good_SkipsBuiltinNames(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + + // Create a template with a builtin name (should be skipped) + err := os.WriteFile(filepath.Join(tmpDir, "core-dev.yml"), []byte("# Duplicate\nkernel:"), 0644) + require.NoError(t, err) + + // Create a unique template + err = os.WriteFile(filepath.Join(tmpDir, "unique.yml"), []byte("# Unique\nkernel:"), 0644) + require.NoError(t, err) + + templates := tm.scanUserTemplates(tmpDir) + + // Should only have the unique template, not the builtin name + assert.Len(t, templates, 1) + assert.Equal(t, "unique", templates[0].Name) +} + +func TestScanUserTemplates_Good_SkipsDirectories(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + + // Create a subdirectory (should be skipped) + err := os.MkdirAll(filepath.Join(tmpDir, "subdir"), 0755) + require.NoError(t, err) + + // Create a valid template + err = os.WriteFile(filepath.Join(tmpDir, "valid.yml"), []byte("# Valid\nkernel:"), 0644) + require.NoError(t, err) + + templates := tm.scanUserTemplates(tmpDir) + + assert.Len(t, templates, 1) + assert.Equal(t, "valid", templates[0].Name) +} + +func TestScanUserTemplates_Good_YamlExtension(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + + // Create templates with both extensions + err := os.WriteFile(filepath.Join(tmpDir, "template1.yml"), []byte("# Template 1\nkernel:"), 0644) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(tmpDir, "template2.yaml"), []byte("# Template 2\nkernel:"), 0644) + require.NoError(t, err) + + templates := tm.scanUserTemplates(tmpDir) + + assert.Len(t, templates, 2) + + names := make(map[string]bool) + for _, tmpl := range templates { + names[tmpl.Name] = true + } + assert.True(t, names["template1"]) + assert.True(t, names["template2"]) +} + +func TestExtractTemplateDescription_Good_EmptyComment(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "test.yml") + + // First comment is empty, second has content + content := `# +# Actual description here +kernel: + image: test +` + err := os.WriteFile(path, []byte(content), 0644) + require.NoError(t, err) + + desc := tm.extractTemplateDescription(path) + + assert.Equal(t, "Actual description here", desc) +} + +func TestExtractTemplateDescription_Good_MultipleEmptyComments(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "test.yml") + + // Multiple empty comments before actual content + content := `# +# +# +# Real description +kernel: + image: test +` + err := os.WriteFile(path, []byte(content), 0644) + require.NoError(t, err) + + desc := tm.extractTemplateDescription(path) + + assert.Equal(t, "Real description", desc) +} + +func TestGetUserTemplatesDir_Good_NoDirectory(t *testing.T) { + tm := NewTemplateManager(io.Local).WithWorkingDir("/tmp/nonexistent-wd").WithHomeDir("/tmp/nonexistent-home") + dir := tm.getUserTemplatesDir() + + assert.Empty(t, dir) +} + +func TestScanUserTemplates_Good_DefaultDescription(t *testing.T) { + tm := NewTemplateManager(io.Local) + tmpDir := t.TempDir() + + // Create a template without comments + content := `kernel: + image: test +` + err := os.WriteFile(filepath.Join(tmpDir, "nocomment.yml"), []byte(content), 0644) + require.NoError(t, err) + + templates := tm.scanUserTemplates(tmpDir) + + assert.Len(t, templates, 1) + assert.Equal(t, "User-defined template", templates[0].Description) +} diff --git a/deploy/coolify/client.go b/deploy/coolify/client.go new file mode 100644 index 0000000..c1b849a --- /dev/null +++ b/deploy/coolify/client.go @@ -0,0 +1,219 @@ +package coolify + +import ( + "context" + "encoding/json" + "fmt" + "os" + "sync" + + "forge.lthn.ai/core/go-devops/deploy/python" +) + +// Client wraps the Python CoolifyClient for Go usage. +type Client struct { + baseURL string + apiToken string + timeout int + verifySSL bool + + mu sync.Mutex +} + +// Config holds Coolify client configuration. +type Config struct { + BaseURL string + APIToken string + Timeout int + VerifySSL bool +} + +// DefaultConfig returns default configuration from environment. +func DefaultConfig() Config { + return Config{ + BaseURL: os.Getenv("COOLIFY_URL"), + APIToken: os.Getenv("COOLIFY_TOKEN"), + Timeout: 30, + VerifySSL: true, + } +} + +// NewClient creates a new Coolify client. +func NewClient(cfg Config) (*Client, error) { + if cfg.BaseURL == "" { + return nil, fmt.Errorf("COOLIFY_URL not set") + } + if cfg.APIToken == "" { + return nil, fmt.Errorf("COOLIFY_TOKEN not set") + } + + // Initialize Python runtime + if err := python.Init(); err != nil { + return nil, fmt.Errorf("failed to initialize Python: %w", err) + } + + return &Client{ + baseURL: cfg.BaseURL, + apiToken: cfg.APIToken, + timeout: cfg.Timeout, + verifySSL: cfg.VerifySSL, + }, nil +} + +// Call invokes a Coolify API operation by operationId. +func (c *Client) Call(ctx context.Context, operationID string, params map[string]any) (map[string]any, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if params == nil { + params = map[string]any{} + } + + // Generate and run Python script + script, err := python.CoolifyScript(c.baseURL, c.apiToken, operationID, params) + if err != nil { + return nil, fmt.Errorf("failed to generate script: %w", err) + } + output, err := python.RunScript(ctx, script) + if err != nil { + return nil, fmt.Errorf("API call %s failed: %w", operationID, err) + } + + // Parse JSON result + var result map[string]any + if err := json.Unmarshal([]byte(output), &result); err != nil { + // Try parsing as array + var arrResult []any + if err2 := json.Unmarshal([]byte(output), &arrResult); err2 == nil { + return map[string]any{"result": arrResult}, nil + } + return nil, fmt.Errorf("failed to parse response: %w (output: %s)", err, output) + } + + return result, nil +} + +// ListServers returns all servers. +func (c *Client) ListServers(ctx context.Context) ([]map[string]any, error) { + result, err := c.Call(ctx, "list-servers", nil) + if err != nil { + return nil, err + } + return extractArray(result) +} + +// GetServer returns a server by UUID. +func (c *Client) GetServer(ctx context.Context, uuid string) (map[string]any, error) { + return c.Call(ctx, "get-server-by-uuid", map[string]any{"uuid": uuid}) +} + +// ValidateServer validates a server by UUID. +func (c *Client) ValidateServer(ctx context.Context, uuid string) (map[string]any, error) { + return c.Call(ctx, "validate-server-by-uuid", map[string]any{"uuid": uuid}) +} + +// ListProjects returns all projects. +func (c *Client) ListProjects(ctx context.Context) ([]map[string]any, error) { + result, err := c.Call(ctx, "list-projects", nil) + if err != nil { + return nil, err + } + return extractArray(result) +} + +// GetProject returns a project by UUID. +func (c *Client) GetProject(ctx context.Context, uuid string) (map[string]any, error) { + return c.Call(ctx, "get-project-by-uuid", map[string]any{"uuid": uuid}) +} + +// CreateProject creates a new project. +func (c *Client) CreateProject(ctx context.Context, name, description string) (map[string]any, error) { + return c.Call(ctx, "create-project", map[string]any{ + "name": name, + "description": description, + }) +} + +// ListApplications returns all applications. +func (c *Client) ListApplications(ctx context.Context) ([]map[string]any, error) { + result, err := c.Call(ctx, "list-applications", nil) + if err != nil { + return nil, err + } + return extractArray(result) +} + +// GetApplication returns an application by UUID. +func (c *Client) GetApplication(ctx context.Context, uuid string) (map[string]any, error) { + return c.Call(ctx, "get-application-by-uuid", map[string]any{"uuid": uuid}) +} + +// DeployApplication triggers deployment of an application. +func (c *Client) DeployApplication(ctx context.Context, uuid string) (map[string]any, error) { + return c.Call(ctx, "deploy-by-tag-or-uuid", map[string]any{"uuid": uuid}) +} + +// ListDatabases returns all databases. +func (c *Client) ListDatabases(ctx context.Context) ([]map[string]any, error) { + result, err := c.Call(ctx, "list-databases", nil) + if err != nil { + return nil, err + } + return extractArray(result) +} + +// GetDatabase returns a database by UUID. +func (c *Client) GetDatabase(ctx context.Context, uuid string) (map[string]any, error) { + return c.Call(ctx, "get-database-by-uuid", map[string]any{"uuid": uuid}) +} + +// ListServices returns all services. +func (c *Client) ListServices(ctx context.Context) ([]map[string]any, error) { + result, err := c.Call(ctx, "list-services", nil) + if err != nil { + return nil, err + } + return extractArray(result) +} + +// GetService returns a service by UUID. +func (c *Client) GetService(ctx context.Context, uuid string) (map[string]any, error) { + return c.Call(ctx, "get-service-by-uuid", map[string]any{"uuid": uuid}) +} + +// ListEnvironments returns environments for a project. +func (c *Client) ListEnvironments(ctx context.Context, projectUUID string) ([]map[string]any, error) { + result, err := c.Call(ctx, "get-environments", map[string]any{"project_uuid": projectUUID}) + if err != nil { + return nil, err + } + return extractArray(result) +} + +// GetTeam returns the current team. +func (c *Client) GetTeam(ctx context.Context) (map[string]any, error) { + return c.Call(ctx, "get-current-team", nil) +} + +// GetTeamMembers returns members of the current team. +func (c *Client) GetTeamMembers(ctx context.Context) ([]map[string]any, error) { + result, err := c.Call(ctx, "get-current-team-members", nil) + if err != nil { + return nil, err + } + return extractArray(result) +} + +// extractArray extracts an array from result["result"] or returns empty. +func extractArray(result map[string]any) ([]map[string]any, error) { + if arr, ok := result["result"].([]any); ok { + items := make([]map[string]any, 0, len(arr)) + for _, item := range arr { + if m, ok := item.(map[string]any); ok { + items = append(items, m) + } + } + return items, nil + } + return nil, nil +} diff --git a/deploy/python/python.go b/deploy/python/python.go new file mode 100644 index 0000000..0a0692c --- /dev/null +++ b/deploy/python/python.go @@ -0,0 +1,147 @@ +package python + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "sync" + + "forge.lthn.ai/core/go/pkg/framework/core" + "github.com/kluctl/go-embed-python/python" +) + +var ( + once sync.Once + ep *python.EmbeddedPython + initErr error +) + +// Init initializes the embedded Python runtime. +func Init() error { + once.Do(func() { + ep, initErr = python.NewEmbeddedPython("core-deploy") + }) + return initErr +} + +// GetPython returns the embedded Python instance. +func GetPython() *python.EmbeddedPython { + return ep +} + +// RunScript runs a Python script with the given code and returns stdout. +func RunScript(ctx context.Context, code string, args ...string) (string, error) { + if err := Init(); err != nil { + return "", err + } + + // Write code to temp file + tmpFile, err := os.CreateTemp("", "core-*.py") + if err != nil { + return "", core.E("python", "create temp file", err) + } + defer func() { _ = os.Remove(tmpFile.Name()) }() + + if _, err := tmpFile.WriteString(code); err != nil { + _ = tmpFile.Close() + return "", core.E("python", "write script", err) + } + _ = tmpFile.Close() + + // Build args: script path + any additional args + cmdArgs := append([]string{tmpFile.Name()}, args...) + + // Get the command + cmd, err := ep.PythonCmd(cmdArgs...) + if err != nil { + return "", core.E("python", "create command", err) + } + + // Run with context + output, err := cmd.Output() + if err != nil { + // Try to get stderr for better error message + if exitErr, ok := err.(*exec.ExitError); ok { + return "", core.E("python", "run script", fmt.Errorf("%w: %s", err, string(exitErr.Stderr))) + } + return "", core.E("python", "run script", err) + } + + return string(output), nil +} + +// RunModule runs a Python module (python -m module_name). +func RunModule(ctx context.Context, module string, args ...string) (string, error) { + if err := Init(); err != nil { + return "", err + } + + cmdArgs := append([]string{"-m", module}, args...) + cmd, err := ep.PythonCmd(cmdArgs...) + if err != nil { + return "", core.E("python", "create command", err) + } + + output, err := cmd.Output() + if err != nil { + return "", core.E("python", fmt.Sprintf("run module %s", module), err) + } + + return string(output), nil +} + +// DevOpsPath returns the path to the DevOps repo. +func DevOpsPath() (string, error) { + if path := os.Getenv("DEVOPS_PATH"); path != "" { + return path, nil + } + home, err := os.UserHomeDir() + if err != nil { + return "", core.E("python", "get user home", err) + } + return filepath.Join(home, "Code", "DevOps"), nil +} + +// CoolifyModulePath returns the path to the Coolify module_utils. +func CoolifyModulePath() (string, error) { + path, err := DevOpsPath() + if err != nil { + return "", err + } + return filepath.Join(path, "playbooks", "roles", "coolify", "module_utils"), nil +} + +// CoolifyScript generates Python code to call the Coolify API. +func CoolifyScript(baseURL, apiToken, operation string, params map[string]any) (string, error) { + paramsJSON, err := json.Marshal(params) + if err != nil { + return "", core.E("python", "marshal params", err) + } + + modulePath, err := CoolifyModulePath() + if err != nil { + return "", err + } + + return fmt.Sprintf(` +import sys +import json +sys.path.insert(0, %q) + +from swagger.coolify_api import CoolifyClient + +client = CoolifyClient( + base_url=%q, + api_token=%q, + timeout=30, + verify_ssl=True, +) + +params = json.loads(%q) +result = client._call(%q, params, check_response=False) +print(json.dumps(result)) +`, modulePath, baseURL, apiToken, string(paramsJSON), operation), nil +} diff --git a/devkit/devkit.go b/devkit/devkit.go new file mode 100644 index 0000000..a7dec8d --- /dev/null +++ b/devkit/devkit.go @@ -0,0 +1,560 @@ +// Package devkit provides a developer toolkit for common automation commands. +// Designed by Gemini 3 Pro (Hypnos) + Claude Opus (Charon), signed LEK-1 | lthn.ai | EUPL-1.2 +package devkit + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" +) + +// --- Code Quality --- + +// Finding represents a single issue found by a linting tool. +type Finding struct { + File string + Line int + Message string + Tool string +} + +// CoverageReport holds the test coverage percentage for a package. +type CoverageReport struct { + Package string + Percentage float64 +} + +// RaceCondition represents a data race detected by the Go race detector. +type RaceCondition struct { + File string + Line int + Desc string +} + +// TODO represents a tracked code comment like TODO, FIXME, or HACK. +type TODO struct { + File string + Line int + Type string + Message string +} + +// --- Security --- + +// Vulnerability represents a dependency vulnerability. +type Vulnerability struct { + ID string + Package string + Version string + Description string +} + +// SecretLeak represents a potential secret found in the codebase. +type SecretLeak struct { + File string + Line int + RuleID string + Match string +} + +// PermIssue represents a file permission issue. +type PermIssue struct { + File string + Permission string + Issue string +} + +// --- Git Operations --- + +// DiffSummary provides a summary of changes. +type DiffSummary struct { + FilesChanged int + Insertions int + Deletions int +} + +// Commit represents a single git commit. +type Commit struct { + Hash string + Author string + Date time.Time + Message string +} + +// --- Build & Dependencies --- + +// BuildResult holds the outcome of a single build target. +type BuildResult struct { + Target string + Path string + Error error +} + +// Graph represents a dependency graph. +type Graph struct { + Nodes []string + Edges map[string][]string +} + +// --- Metrics --- + +// ComplexFunc represents a function with its cyclomatic complexity score. +type ComplexFunc struct { + Package string + FuncName string + File string + Line int + Score int +} + +// Toolkit wraps common dev automation commands into structured Go APIs. +type Toolkit struct { + Dir string // Working directory for commands +} + +// New creates a Toolkit rooted at the given directory. +func New(dir string) *Toolkit { + return &Toolkit{Dir: dir} +} + +// Run executes a command and captures stdout, stderr, and exit code. +func (t *Toolkit) Run(name string, args ...string) (stdout, stderr string, exitCode int, err error) { + cmd := exec.Command(name, args...) + cmd.Dir = t.Dir + var stdoutBuf, stderrBuf bytes.Buffer + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + + err = cmd.Run() + stdout = stdoutBuf.String() + stderr = stderrBuf.String() + + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } else { + exitCode = -1 + } + } + return +} + +// FindTODOs greps for TODO/FIXME/HACK comments within a directory. +func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) { + pattern := `\b(TODO|FIXME|HACK)\b(\(.*\))?:` + stdout, stderr, exitCode, err := t.Run("git", "grep", "--line-number", "-E", pattern, "--", dir) + + if exitCode == 1 && stdout == "" { + return nil, nil + } + if err != nil && exitCode != 1 { + return nil, fmt.Errorf("git grep failed (exit %d): %s\n%s", exitCode, err, stderr) + } + + var todos []TODO + re := regexp.MustCompile(pattern) + + for _, line := range strings.Split(strings.TrimSpace(stdout), "\n") { + if line == "" { + continue + } + parts := strings.SplitN(line, ":", 3) + if len(parts) < 3 { + continue + } + lineNum, _ := strconv.Atoi(parts[1]) + match := re.FindStringSubmatch(parts[2]) + todoType := "" + if len(match) > 1 { + todoType = match[1] + } + msg := strings.TrimSpace(re.Split(parts[2], 2)[1]) + + todos = append(todos, TODO{ + File: parts[0], + Line: lineNum, + Type: todoType, + Message: msg, + }) + } + return todos, nil +} + +// AuditDeps runs govulncheck to find dependency vulnerabilities. +func (t *Toolkit) AuditDeps() ([]Vulnerability, error) { + stdout, stderr, exitCode, err := t.Run("govulncheck", "./...") + if err != nil && exitCode != 0 && !strings.Contains(stdout, "Vulnerability") { + return nil, fmt.Errorf("govulncheck failed (exit %d): %s\n%s", exitCode, err, stderr) + } + + var vulns []Vulnerability + scanner := bufio.NewScanner(strings.NewReader(stdout)) + var cur Vulnerability + inBlock := false + + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "Vulnerability #") { + if cur.ID != "" { + vulns = append(vulns, cur) + } + fields := strings.Fields(line) + cur = Vulnerability{} + if len(fields) > 1 { + cur.ID = fields[1] + } + inBlock = true + } else if inBlock { + switch { + case strings.Contains(line, "Package:"): + cur.Package = strings.TrimSpace(strings.SplitN(line, ":", 2)[1]) + case strings.Contains(line, "Found in version:"): + cur.Version = strings.TrimSpace(strings.SplitN(line, ":", 2)[1]) + case line == "": + if cur.ID != "" { + vulns = append(vulns, cur) + cur = Vulnerability{} + } + inBlock = false + default: + if !strings.HasPrefix(line, " ") && cur.Description == "" { + cur.Description = strings.TrimSpace(line) + } + } + } + } + if cur.ID != "" { + vulns = append(vulns, cur) + } + return vulns, nil +} + +// DiffStat returns a summary of uncommitted changes. +func (t *Toolkit) DiffStat() (DiffSummary, error) { + stdout, stderr, exitCode, err := t.Run("git", "diff", "--stat") + if err != nil && exitCode != 0 { + return DiffSummary{}, fmt.Errorf("git diff failed (exit %d): %s\n%s", exitCode, err, stderr) + } + + var s DiffSummary + lines := strings.Split(strings.TrimSpace(stdout), "\n") + if len(lines) == 0 || lines[0] == "" { + return s, nil + } + + last := lines[len(lines)-1] + for _, part := range strings.Split(last, ",") { + part = strings.TrimSpace(part) + fields := strings.Fields(part) + if len(fields) < 2 { + continue + } + val, _ := strconv.Atoi(fields[0]) + switch { + case strings.Contains(part, "file"): + s.FilesChanged = val + case strings.Contains(part, "insertion"): + s.Insertions = val + case strings.Contains(part, "deletion"): + s.Deletions = val + } + } + return s, nil +} + +// UncommittedFiles returns paths of files with uncommitted changes. +func (t *Toolkit) UncommittedFiles() ([]string, error) { + stdout, stderr, exitCode, err := t.Run("git", "status", "--porcelain") + if err != nil && exitCode != 0 { + return nil, fmt.Errorf("git status failed: %s\n%s", err, stderr) + } + var files []string + for _, line := range strings.Split(strings.TrimSpace(stdout), "\n") { + if len(line) > 3 { + files = append(files, strings.TrimSpace(line[3:])) + } + } + return files, nil +} + +// Lint runs go vet on the given package pattern. +func (t *Toolkit) Lint(pkg string) ([]Finding, error) { + _, stderr, exitCode, err := t.Run("go", "vet", pkg) + if exitCode == 0 { + return nil, nil + } + if err != nil && exitCode != 2 { + return nil, fmt.Errorf("go vet failed: %w", err) + } + + var findings []Finding + for _, line := range strings.Split(strings.TrimSpace(stderr), "\n") { + if line == "" { + continue + } + parts := strings.SplitN(line, ":", 4) + if len(parts) < 4 { + continue + } + lineNum, _ := strconv.Atoi(parts[1]) + findings = append(findings, Finding{ + File: parts[0], + Line: lineNum, + Message: strings.TrimSpace(parts[3]), + Tool: "go vet", + }) + } + return findings, nil +} + +// ScanSecrets runs gitleaks to find potential secret leaks. +func (t *Toolkit) ScanSecrets(dir string) ([]SecretLeak, error) { + stdout, _, exitCode, err := t.Run("gitleaks", "detect", "--source", dir, "--report-format", "csv", "--no-git") + if exitCode == 0 { + return nil, nil + } + if err != nil && exitCode != 1 { + return nil, fmt.Errorf("gitleaks failed: %w", err) + } + + var leaks []SecretLeak + for _, line := range strings.Split(strings.TrimSpace(stdout), "\n") { + if line == "" || strings.HasPrefix(line, "RuleID") { + continue + } + parts := strings.SplitN(line, ",", 4) + if len(parts) < 4 { + continue + } + lineNum, _ := strconv.Atoi(parts[2]) + leaks = append(leaks, SecretLeak{ + RuleID: parts[0], + File: parts[1], + Line: lineNum, + Match: parts[3], + }) + } + return leaks, nil +} + +// ModTidy runs go mod tidy. +func (t *Toolkit) ModTidy() error { + _, stderr, exitCode, err := t.Run("go", "mod", "tidy") + if err != nil && exitCode != 0 { + return fmt.Errorf("go mod tidy failed: %s", stderr) + } + return nil +} + +// Build compiles the given targets. +func (t *Toolkit) Build(targets ...string) ([]BuildResult, error) { + var results []BuildResult + for _, target := range targets { + _, stderr, _, err := t.Run("go", "build", "-o", "/dev/null", target) + r := BuildResult{Target: target} + if err != nil { + r.Error = fmt.Errorf("%s", strings.TrimSpace(stderr)) + } + results = append(results, r) + } + return results, nil +} + +// TestCount returns the number of test functions in a package. +func (t *Toolkit) TestCount(pkg string) (int, error) { + stdout, stderr, exitCode, err := t.Run("go", "test", "-list", ".*", pkg) + if err != nil && exitCode != 0 { + return 0, fmt.Errorf("go test -list failed: %s\n%s", err, stderr) + } + count := 0 + for _, line := range strings.Split(strings.TrimSpace(stdout), "\n") { + if strings.HasPrefix(line, "Test") || strings.HasPrefix(line, "Benchmark") { + count++ + } + } + return count, nil +} + +// Coverage runs go test -cover and parses per-package coverage percentages. +func (t *Toolkit) Coverage(pkg string) ([]CoverageReport, error) { + if pkg == "" { + pkg = "./..." + } + stdout, stderr, exitCode, err := t.Run("go", "test", "-cover", pkg) + if err != nil && exitCode != 0 && !strings.Contains(stdout, "coverage:") { + return nil, fmt.Errorf("go test -cover failed (exit %d): %s\n%s", exitCode, err, stderr) + } + + var reports []CoverageReport + re := regexp.MustCompile(`ok\s+(\S+)\s+.*coverage:\s+([\d.]+)%`) + scanner := bufio.NewScanner(strings.NewReader(stdout)) + + for scanner.Scan() { + matches := re.FindStringSubmatch(scanner.Text()) + if len(matches) == 3 { + pct, _ := strconv.ParseFloat(matches[2], 64) + reports = append(reports, CoverageReport{ + Package: matches[1], + Percentage: pct, + }) + } + } + return reports, nil +} + +// RaceDetect runs go test -race and parses data race warnings. +func (t *Toolkit) RaceDetect(pkg string) ([]RaceCondition, error) { + if pkg == "" { + pkg = "./..." + } + _, stderr, _, err := t.Run("go", "test", "-race", pkg) + if err != nil && !strings.Contains(stderr, "WARNING: DATA RACE") { + return nil, fmt.Errorf("go test -race failed: %w", err) + } + + var races []RaceCondition + lines := strings.Split(stderr, "\n") + reFile := regexp.MustCompile(`\s+(.*\.go):(\d+)`) + + for i, line := range lines { + if strings.Contains(line, "WARNING: DATA RACE") { + rc := RaceCondition{Desc: "Data race detected"} + for j := i + 1; j < len(lines) && j < i+15; j++ { + if match := reFile.FindStringSubmatch(lines[j]); len(match) == 3 { + rc.File = strings.TrimSpace(match[1]) + rc.Line, _ = strconv.Atoi(match[2]) + break + } + } + races = append(races, rc) + } + } + return races, nil +} + +// Complexity runs gocyclo and returns functions exceeding the threshold. +func (t *Toolkit) Complexity(threshold int) ([]ComplexFunc, error) { + stdout, stderr, exitCode, err := t.Run("gocyclo", "-over", strconv.Itoa(threshold), ".") + if err != nil && exitCode == -1 { + return nil, fmt.Errorf("gocyclo not available: %s\n%s", err, stderr) + } + + var funcs []ComplexFunc + scanner := bufio.NewScanner(strings.NewReader(stdout)) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) < 4 { + continue + } + score, _ := strconv.Atoi(fields[0]) + fileParts := strings.Split(fields[3], ":") + line := 0 + if len(fileParts) > 1 { + line, _ = strconv.Atoi(fileParts[1]) + } + + funcs = append(funcs, ComplexFunc{ + Score: score, + Package: fields[1], + FuncName: fields[2], + File: fileParts[0], + Line: line, + }) + } + return funcs, nil +} + +// DepGraph runs go mod graph and builds a dependency graph. +func (t *Toolkit) DepGraph(pkg string) (*Graph, error) { + stdout, stderr, exitCode, err := t.Run("go", "mod", "graph") + if err != nil && exitCode != 0 { + return nil, fmt.Errorf("go mod graph failed (exit %d): %s\n%s", exitCode, err, stderr) + } + + graph := &Graph{Edges: make(map[string][]string)} + nodes := make(map[string]struct{}) + scanner := bufio.NewScanner(strings.NewReader(stdout)) + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) >= 2 { + src, dst := parts[0], parts[1] + graph.Edges[src] = append(graph.Edges[src], dst) + nodes[src] = struct{}{} + nodes[dst] = struct{}{} + } + } + + for node := range nodes { + graph.Nodes = append(graph.Nodes, node) + } + return graph, nil +} + +// GitLog returns the last n commits from git history. +func (t *Toolkit) GitLog(n int) ([]Commit, error) { + stdout, stderr, exitCode, err := t.Run("git", "log", fmt.Sprintf("-n%d", n), "--format=%H|%an|%aI|%s") + if err != nil && exitCode != 0 { + return nil, fmt.Errorf("git log failed (exit %d): %s\n%s", exitCode, err, stderr) + } + + var commits []Commit + scanner := bufio.NewScanner(strings.NewReader(stdout)) + + for scanner.Scan() { + parts := strings.SplitN(scanner.Text(), "|", 4) + if len(parts) < 4 { + continue + } + date, _ := time.Parse(time.RFC3339, parts[2]) + commits = append(commits, Commit{ + Hash: parts[0], + Author: parts[1], + Date: date, + Message: parts[3], + }) + } + return commits, nil +} + +// CheckPerms walks a directory and flags files with overly permissive modes. +func (t *Toolkit) CheckPerms(dir string) ([]PermIssue, error) { + var issues []PermIssue + err := filepath.Walk(filepath.Join(t.Dir, dir), func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + if info.IsDir() { + return nil + } + mode := info.Mode().Perm() + if mode&0o002 != 0 { + issues = append(issues, PermIssue{ + File: path, + Permission: fmt.Sprintf("%04o", mode), + Issue: "World-writable", + }) + } else if mode&0o020 != 0 && mode&0o002 != 0 { + issues = append(issues, PermIssue{ + File: path, + Permission: fmt.Sprintf("%04o", mode), + Issue: "Group and world-writable", + }) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("walk failed: %w", err) + } + return issues, nil +} + +// LEK-1 | lthn.ai | EUPL-1.2 diff --git a/devkit/devkit_test.go b/devkit/devkit_test.go new file mode 100644 index 0000000..ffcdecd --- /dev/null +++ b/devkit/devkit_test.go @@ -0,0 +1,270 @@ +// Designed by Gemini 3 Pro (Hypnos) + Claude Opus (Charon), signed LEK-1 | lthn.ai | EUPL-1.2 +package devkit + +import ( + "fmt" + "os" + "path/filepath" + "testing" + "time" +) + +// setupMockCmd creates a shell script in a temp dir that echoes predetermined +// content, and prepends that dir to PATH so Run() picks it up. +func setupMockCmd(t *testing.T, name, content string) { + t.Helper() + tmpDir := t.TempDir() + scriptPath := filepath.Join(tmpDir, name) + + script := fmt.Sprintf("#!/bin/sh\ncat <<'MOCK_EOF'\n%s\nMOCK_EOF\n", content) + if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil { + t.Fatalf("failed to write mock command %s: %v", name, err) + } + + oldPath := os.Getenv("PATH") + t.Setenv("PATH", tmpDir+string(os.PathListSeparator)+oldPath) +} + +// setupMockCmdExit creates a mock that echoes to stdout/stderr and exits with a code. +func setupMockCmdExit(t *testing.T, name, stdout, stderr string, exitCode int) { + t.Helper() + tmpDir := t.TempDir() + scriptPath := filepath.Join(tmpDir, name) + + script := fmt.Sprintf("#!/bin/sh\ncat <<'MOCK_EOF'\n%s\nMOCK_EOF\ncat <<'MOCK_ERR' >&2\n%s\nMOCK_ERR\nexit %d\n", stdout, stderr, exitCode) + if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil { + t.Fatalf("failed to write mock command %s: %v", name, err) + } + + oldPath := os.Getenv("PATH") + t.Setenv("PATH", tmpDir+string(os.PathListSeparator)+oldPath) +} + +func TestCoverage_Good(t *testing.T) { + output := `? example.com/skipped [no test files] +ok example.com/pkg1 0.5s coverage: 85.0% of statements +ok example.com/pkg2 0.2s coverage: 100.0% of statements` + + setupMockCmd(t, "go", output) + + tk := New(t.TempDir()) + reports, err := tk.Coverage("./...") + if err != nil { + t.Fatalf("Coverage failed: %v", err) + } + if len(reports) != 2 { + t.Fatalf("expected 2 reports, got %d", len(reports)) + } + if reports[0].Package != "example.com/pkg1" || reports[0].Percentage != 85.0 { + t.Errorf("report 0: want pkg1@85%%, got %s@%.1f%%", reports[0].Package, reports[0].Percentage) + } + if reports[1].Package != "example.com/pkg2" || reports[1].Percentage != 100.0 { + t.Errorf("report 1: want pkg2@100%%, got %s@%.1f%%", reports[1].Package, reports[1].Percentage) + } +} + +func TestCoverage_Bad(t *testing.T) { + // No coverage lines in output + setupMockCmd(t, "go", "FAIL\texample.com/broken [build failed]") + + tk := New(t.TempDir()) + reports, err := tk.Coverage("./...") + if err != nil { + t.Fatalf("Coverage should not error on partial output: %v", err) + } + if len(reports) != 0 { + t.Errorf("expected 0 reports from failed build, got %d", len(reports)) + } +} + +func TestGitLog_Good(t *testing.T) { + now := time.Now().Truncate(time.Second) + nowStr := now.Format(time.RFC3339) + + output := fmt.Sprintf("abc123|Alice|%s|Fix the bug\ndef456|Bob|%s|Add feature", nowStr, nowStr) + setupMockCmd(t, "git", output) + + tk := New(t.TempDir()) + commits, err := tk.GitLog(2) + if err != nil { + t.Fatalf("GitLog failed: %v", err) + } + if len(commits) != 2 { + t.Fatalf("expected 2 commits, got %d", len(commits)) + } + if commits[0].Hash != "abc123" { + t.Errorf("hash: want abc123, got %s", commits[0].Hash) + } + if commits[0].Author != "Alice" { + t.Errorf("author: want Alice, got %s", commits[0].Author) + } + if commits[0].Message != "Fix the bug" { + t.Errorf("message: want 'Fix the bug', got %q", commits[0].Message) + } + if !commits[0].Date.Equal(now) { + t.Errorf("date: want %v, got %v", now, commits[0].Date) + } +} + +func TestGitLog_Bad(t *testing.T) { + // Malformed lines should be skipped + setupMockCmd(t, "git", "incomplete|line\nabc|Bob|2025-01-01T00:00:00Z|Good commit") + + tk := New(t.TempDir()) + commits, err := tk.GitLog(5) + if err != nil { + t.Fatalf("GitLog failed: %v", err) + } + if len(commits) != 1 { + t.Errorf("expected 1 valid commit (skip malformed), got %d", len(commits)) + } +} + +func TestComplexity_Good(t *testing.T) { + output := "15 main ComplexFunc file.go:10:1\n20 pkg VeryComplex other.go:50:1" + setupMockCmd(t, "gocyclo", output) + + tk := New(t.TempDir()) + funcs, err := tk.Complexity(10) + if err != nil { + t.Fatalf("Complexity failed: %v", err) + } + if len(funcs) != 2 { + t.Fatalf("expected 2 funcs, got %d", len(funcs)) + } + if funcs[0].Score != 15 || funcs[0].FuncName != "ComplexFunc" || funcs[0].File != "file.go" || funcs[0].Line != 10 { + t.Errorf("func 0: unexpected %+v", funcs[0]) + } + if funcs[1].Score != 20 || funcs[1].Package != "pkg" { + t.Errorf("func 1: unexpected %+v", funcs[1]) + } +} + +func TestComplexity_Bad(t *testing.T) { + // No functions above threshold = empty output + setupMockCmd(t, "gocyclo", "") + + tk := New(t.TempDir()) + funcs, err := tk.Complexity(50) + if err != nil { + t.Fatalf("Complexity should not error on empty output: %v", err) + } + if len(funcs) != 0 { + t.Errorf("expected 0 funcs, got %d", len(funcs)) + } +} + +func TestDepGraph_Good(t *testing.T) { + output := "modA@v1 modB@v2\nmodA@v1 modC@v3\nmodB@v2 modD@v1" + setupMockCmd(t, "go", output) + + tk := New(t.TempDir()) + graph, err := tk.DepGraph("./...") + if err != nil { + t.Fatalf("DepGraph failed: %v", err) + } + if len(graph.Nodes) != 4 { + t.Errorf("expected 4 nodes, got %d: %v", len(graph.Nodes), graph.Nodes) + } + edgesA := graph.Edges["modA@v1"] + if len(edgesA) != 2 { + t.Errorf("expected 2 edges from modA@v1, got %d", len(edgesA)) + } +} + +func TestRaceDetect_Good(t *testing.T) { + // No races = clean run + setupMockCmd(t, "go", "ok\texample.com/safe\t0.1s") + + tk := New(t.TempDir()) + races, err := tk.RaceDetect("./...") + if err != nil { + t.Fatalf("RaceDetect failed on clean run: %v", err) + } + if len(races) != 0 { + t.Errorf("expected 0 races, got %d", len(races)) + } +} + +func TestRaceDetect_Bad(t *testing.T) { + stderrOut := `WARNING: DATA RACE +Read at 0x00c000123456 by goroutine 7: + /home/user/project/main.go:42 +Previous write at 0x00c000123456 by goroutine 6: + /home/user/project/main.go:38` + + setupMockCmdExit(t, "go", "", stderrOut, 1) + + tk := New(t.TempDir()) + races, err := tk.RaceDetect("./...") + if err != nil { + t.Fatalf("RaceDetect should parse races, not error: %v", err) + } + if len(races) != 1 { + t.Fatalf("expected 1 race, got %d", len(races)) + } + if races[0].File != "/home/user/project/main.go" || races[0].Line != 42 { + t.Errorf("race: unexpected %+v", races[0]) + } +} + +func TestDiffStat_Good(t *testing.T) { + output := ` file1.go | 10 +++++++--- + file2.go | 5 +++++ + 2 files changed, 12 insertions(+), 3 deletions(-)` + setupMockCmd(t, "git", output) + + tk := New(t.TempDir()) + s, err := tk.DiffStat() + if err != nil { + t.Fatalf("DiffStat failed: %v", err) + } + if s.FilesChanged != 2 { + t.Errorf("files: want 2, got %d", s.FilesChanged) + } + if s.Insertions != 12 { + t.Errorf("insertions: want 12, got %d", s.Insertions) + } + if s.Deletions != 3 { + t.Errorf("deletions: want 3, got %d", s.Deletions) + } +} + +func TestCheckPerms_Good(t *testing.T) { + dir := t.TempDir() + + // Create a world-writable file + badFile := filepath.Join(dir, "bad.txt") + if err := os.WriteFile(badFile, []byte("test"), 0644); err != nil { + t.Fatal(err) + } + if err := os.Chmod(badFile, 0666); err != nil { + t.Fatal(err) + } + // Create a safe file + goodFile := filepath.Join(dir, "good.txt") + if err := os.WriteFile(goodFile, []byte("test"), 0644); err != nil { + t.Fatal(err) + } + + tk := New("/") + issues, err := tk.CheckPerms(dir) + if err != nil { + t.Fatalf("CheckPerms failed: %v", err) + } + if len(issues) != 1 { + t.Fatalf("expected 1 issue (world-writable), got %d", len(issues)) + } + if issues[0].Issue != "World-writable" { + t.Errorf("issue: want 'World-writable', got %q", issues[0].Issue) + } +} + +func TestNew(t *testing.T) { + tk := New("/tmp") + if tk.Dir != "/tmp" { + t.Errorf("Dir: want /tmp, got %s", tk.Dir) + } +} + +// LEK-1 | lthn.ai | EUPL-1.2 diff --git a/devops/claude.go b/devops/claude.go new file mode 100644 index 0000000..b5af149 --- /dev/null +++ b/devops/claude.go @@ -0,0 +1,143 @@ +package devops + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go/pkg/io" +) + +// ClaudeOptions configures the Claude sandbox session. +type ClaudeOptions struct { + NoAuth bool // Don't forward any auth + Auth []string // Selective auth: "gh", "anthropic", "ssh", "git" + Model string // Model to use: opus, sonnet +} + +// Claude starts a sandboxed Claude session in the dev environment. +func (d *DevOps) Claude(ctx context.Context, projectDir string, opts ClaudeOptions) error { + // Auto-boot if not running + running, err := d.IsRunning(ctx) + if err != nil { + return err + } + if !running { + fmt.Println("Dev environment not running, booting...") + if err := d.Boot(ctx, DefaultBootOptions()); err != nil { + return fmt.Errorf("failed to boot: %w", err) + } + } + + // Mount project + if err := d.mountProject(ctx, projectDir); err != nil { + return fmt.Errorf("failed to mount project: %w", err) + } + + // Prepare environment variables to forward + envVars := []string{} + + if !opts.NoAuth { + authTypes := opts.Auth + if len(authTypes) == 0 { + authTypes = []string{"gh", "anthropic", "ssh", "git"} + } + + for _, auth := range authTypes { + switch auth { + case "anthropic": + if key := os.Getenv("ANTHROPIC_API_KEY"); key != "" { + envVars = append(envVars, "ANTHROPIC_API_KEY="+key) + } + case "git": + // Forward git config + name, _ := exec.Command("git", "config", "user.name").Output() + email, _ := exec.Command("git", "config", "user.email").Output() + if len(name) > 0 { + envVars = append(envVars, "GIT_AUTHOR_NAME="+strings.TrimSpace(string(name))) + envVars = append(envVars, "GIT_COMMITTER_NAME="+strings.TrimSpace(string(name))) + } + if len(email) > 0 { + envVars = append(envVars, "GIT_AUTHOR_EMAIL="+strings.TrimSpace(string(email))) + envVars = append(envVars, "GIT_COMMITTER_EMAIL="+strings.TrimSpace(string(email))) + } + } + } + } + + // Build SSH command with agent forwarding + args := []string{ + "-o", "StrictHostKeyChecking=yes", + "-o", "UserKnownHostsFile=~/.core/known_hosts", + "-o", "LogLevel=ERROR", + "-A", // SSH agent forwarding + "-p", fmt.Sprintf("%d", DefaultSSHPort), + } + + args = append(args, "root@localhost") + + // Build command to run inside + claudeCmd := "cd /app && claude" + if opts.Model != "" { + claudeCmd += " --model " + opts.Model + } + args = append(args, claudeCmd) + + // Set environment for SSH + cmd := exec.CommandContext(ctx, "ssh", args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + // Pass environment variables through SSH + for _, env := range envVars { + parts := strings.SplitN(env, "=", 2) + if len(parts) == 2 { + cmd.Env = append(os.Environ(), env) + } + } + + fmt.Println("Starting Claude in sandboxed environment...") + fmt.Println("Project mounted at /app") + fmt.Println("Auth forwarded: SSH agent" + formatAuthList(opts)) + fmt.Println() + + return cmd.Run() +} + +func formatAuthList(opts ClaudeOptions) string { + if opts.NoAuth { + return " (none)" + } + if len(opts.Auth) == 0 { + return ", gh, anthropic, git" + } + return ", " + strings.Join(opts.Auth, ", ") +} + +// CopyGHAuth copies GitHub CLI auth to the VM. +func (d *DevOps) CopyGHAuth(ctx context.Context) error { + home, err := os.UserHomeDir() + if err != nil { + return err + } + + ghConfigDir := filepath.Join(home, ".config", "gh") + if !io.Local.IsDir(ghConfigDir) { + return nil // No gh config to copy + } + + // Use scp to copy gh config + cmd := exec.CommandContext(ctx, "scp", + "-o", "StrictHostKeyChecking=yes", + "-o", "UserKnownHostsFile=~/.core/known_hosts", + "-o", "LogLevel=ERROR", + "-P", fmt.Sprintf("%d", DefaultSSHPort), + "-r", ghConfigDir, + "root@localhost:/root/.config/", + ) + return cmd.Run() +} diff --git a/devops/claude_test.go b/devops/claude_test.go new file mode 100644 index 0000000..6c96b9b --- /dev/null +++ b/devops/claude_test.go @@ -0,0 +1,61 @@ +package devops + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestClaudeOptions_Default(t *testing.T) { + opts := ClaudeOptions{} + assert.False(t, opts.NoAuth) + assert.Nil(t, opts.Auth) + assert.Empty(t, opts.Model) +} + +func TestClaudeOptions_Custom(t *testing.T) { + opts := ClaudeOptions{ + NoAuth: true, + Auth: []string{"gh", "anthropic"}, + Model: "opus", + } + assert.True(t, opts.NoAuth) + assert.Equal(t, []string{"gh", "anthropic"}, opts.Auth) + assert.Equal(t, "opus", opts.Model) +} + +func TestFormatAuthList_Good_NoAuth(t *testing.T) { + opts := ClaudeOptions{NoAuth: true} + result := formatAuthList(opts) + assert.Equal(t, " (none)", result) +} + +func TestFormatAuthList_Good_Default(t *testing.T) { + opts := ClaudeOptions{} + result := formatAuthList(opts) + assert.Equal(t, ", gh, anthropic, git", result) +} + +func TestFormatAuthList_Good_CustomAuth(t *testing.T) { + opts := ClaudeOptions{ + Auth: []string{"gh"}, + } + result := formatAuthList(opts) + assert.Equal(t, ", gh", result) +} + +func TestFormatAuthList_Good_MultipleAuth(t *testing.T) { + opts := ClaudeOptions{ + Auth: []string{"gh", "ssh", "git"}, + } + result := formatAuthList(opts) + assert.Equal(t, ", gh, ssh, git", result) +} + +func TestFormatAuthList_Good_EmptyAuth(t *testing.T) { + opts := ClaudeOptions{ + Auth: []string{}, + } + result := formatAuthList(opts) + assert.Equal(t, ", gh, anthropic, git", result) +} diff --git a/devops/config.go b/devops/config.go new file mode 100644 index 0000000..bd00255 --- /dev/null +++ b/devops/config.go @@ -0,0 +1,90 @@ +package devops + +import ( + "os" + "path/filepath" + + "forge.lthn.ai/core/go/pkg/config" + "forge.lthn.ai/core/go/pkg/io" +) + +// Config holds global devops configuration from ~/.core/config.yaml. +type Config struct { + Version int `yaml:"version" mapstructure:"version"` + Images ImagesConfig `yaml:"images" mapstructure:"images"` +} + +// ImagesConfig holds image source configuration. +type ImagesConfig struct { + Source string `yaml:"source" mapstructure:"source"` // auto, github, registry, cdn + GitHub GitHubConfig `yaml:"github,omitempty" mapstructure:"github,omitempty"` + Registry RegistryConfig `yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + CDN CDNConfig `yaml:"cdn,omitempty" mapstructure:"cdn,omitempty"` +} + +// GitHubConfig holds GitHub Releases configuration. +type GitHubConfig struct { + Repo string `yaml:"repo" mapstructure:"repo"` // owner/repo format +} + +// RegistryConfig holds container registry configuration. +type RegistryConfig struct { + Image string `yaml:"image" mapstructure:"image"` // e.g., ghcr.io/host-uk/core-devops +} + +// CDNConfig holds CDN/S3 configuration. +type CDNConfig struct { + URL string `yaml:"url" mapstructure:"url"` // base URL for downloads +} + +// DefaultConfig returns sensible defaults. +func DefaultConfig() *Config { + return &Config{ + Version: 1, + Images: ImagesConfig{ + Source: "auto", + GitHub: GitHubConfig{ + Repo: "host-uk/core-images", + }, + Registry: RegistryConfig{ + Image: "ghcr.io/host-uk/core-devops", + }, + }, + } +} + +// ConfigPath returns the path to the config file. +func ConfigPath() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, ".core", "config.yaml"), nil +} + +// LoadConfig loads configuration from ~/.core/config.yaml using the provided medium. +// Returns default config if file doesn't exist. +func LoadConfig(m io.Medium) (*Config, error) { + configPath, err := ConfigPath() + if err != nil { + return DefaultConfig(), nil + } + + cfg := DefaultConfig() + + if !m.IsFile(configPath) { + return cfg, nil + } + + // Use centralized config service + c, err := config.New(config.WithMedium(m), config.WithPath(configPath)) + if err != nil { + return nil, err + } + + if err := c.Get("", cfg); err != nil { + return nil, err + } + + return cfg, nil +} diff --git a/devops/config_test.go b/devops/config_test.go new file mode 100644 index 0000000..07a5b6d --- /dev/null +++ b/devops/config_test.go @@ -0,0 +1,255 @@ +package devops + +import ( + "os" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDefaultConfig(t *testing.T) { + cfg := DefaultConfig() + assert.Equal(t, 1, cfg.Version) + assert.Equal(t, "auto", cfg.Images.Source) + assert.Equal(t, "host-uk/core-images", cfg.Images.GitHub.Repo) +} + +func TestConfigPath(t *testing.T) { + path, err := ConfigPath() + assert.NoError(t, err) + assert.Contains(t, path, ".core/config.yaml") +} + +func TestLoadConfig_Good(t *testing.T) { + t.Run("returns default if not exists", func(t *testing.T) { + // Mock HOME to a temp dir + tempHome := t.TempDir() + origHome := os.Getenv("HOME") + t.Setenv("HOME", tempHome) + defer func() { _ = os.Setenv("HOME", origHome) }() + + cfg, err := LoadConfig(io.Local) + assert.NoError(t, err) + assert.Equal(t, DefaultConfig(), cfg) + }) + + t.Run("loads existing config", func(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + coreDir := filepath.Join(tempHome, ".core") + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + configData := ` +version: 2 +images: + source: cdn + cdn: + url: https://cdn.example.com +` + err = os.WriteFile(filepath.Join(coreDir, "config.yaml"), []byte(configData), 0644) + require.NoError(t, err) + + cfg, err := LoadConfig(io.Local) + assert.NoError(t, err) + assert.Equal(t, 2, cfg.Version) + assert.Equal(t, "cdn", cfg.Images.Source) + assert.Equal(t, "https://cdn.example.com", cfg.Images.CDN.URL) + }) +} + +func TestLoadConfig_Bad(t *testing.T) { + t.Run("invalid yaml", func(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + coreDir := filepath.Join(tempHome, ".core") + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + err = os.WriteFile(filepath.Join(coreDir, "config.yaml"), []byte("invalid: yaml: :"), 0644) + require.NoError(t, err) + + _, err = LoadConfig(io.Local) + assert.Error(t, err) + }) +} + +func TestConfig_Struct(t *testing.T) { + cfg := &Config{ + Version: 2, + Images: ImagesConfig{ + Source: "github", + GitHub: GitHubConfig{ + Repo: "owner/repo", + }, + Registry: RegistryConfig{ + Image: "ghcr.io/owner/image", + }, + CDN: CDNConfig{ + URL: "https://cdn.example.com", + }, + }, + } + assert.Equal(t, 2, cfg.Version) + assert.Equal(t, "github", cfg.Images.Source) + assert.Equal(t, "owner/repo", cfg.Images.GitHub.Repo) + assert.Equal(t, "ghcr.io/owner/image", cfg.Images.Registry.Image) + assert.Equal(t, "https://cdn.example.com", cfg.Images.CDN.URL) +} + +func TestDefaultConfig_Complete(t *testing.T) { + cfg := DefaultConfig() + assert.Equal(t, 1, cfg.Version) + assert.Equal(t, "auto", cfg.Images.Source) + assert.Equal(t, "host-uk/core-images", cfg.Images.GitHub.Repo) + assert.Equal(t, "ghcr.io/host-uk/core-devops", cfg.Images.Registry.Image) + assert.Empty(t, cfg.Images.CDN.URL) +} + +func TestLoadConfig_Good_PartialConfig(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + coreDir := filepath.Join(tempHome, ".core") + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + // Config only specifies source, should merge with defaults + configData := ` +version: 1 +images: + source: github +` + err = os.WriteFile(filepath.Join(coreDir, "config.yaml"), []byte(configData), 0644) + require.NoError(t, err) + + cfg, err := LoadConfig(io.Local) + assert.NoError(t, err) + assert.Equal(t, 1, cfg.Version) + assert.Equal(t, "github", cfg.Images.Source) + // Default values should be preserved + assert.Equal(t, "host-uk/core-images", cfg.Images.GitHub.Repo) +} + +func TestLoadConfig_Good_AllSourceTypes(t *testing.T) { + tests := []struct { + name string + config string + check func(*testing.T, *Config) + }{ + { + name: "github source", + config: ` +version: 1 +images: + source: github + github: + repo: custom/repo +`, + check: func(t *testing.T, cfg *Config) { + assert.Equal(t, "github", cfg.Images.Source) + assert.Equal(t, "custom/repo", cfg.Images.GitHub.Repo) + }, + }, + { + name: "cdn source", + config: ` +version: 1 +images: + source: cdn + cdn: + url: https://custom-cdn.com +`, + check: func(t *testing.T, cfg *Config) { + assert.Equal(t, "cdn", cfg.Images.Source) + assert.Equal(t, "https://custom-cdn.com", cfg.Images.CDN.URL) + }, + }, + { + name: "registry source", + config: ` +version: 1 +images: + source: registry + registry: + image: docker.io/custom/image +`, + check: func(t *testing.T, cfg *Config) { + assert.Equal(t, "registry", cfg.Images.Source) + assert.Equal(t, "docker.io/custom/image", cfg.Images.Registry.Image) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + coreDir := filepath.Join(tempHome, ".core") + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + err = os.WriteFile(filepath.Join(coreDir, "config.yaml"), []byte(tt.config), 0644) + require.NoError(t, err) + + cfg, err := LoadConfig(io.Local) + assert.NoError(t, err) + tt.check(t, cfg) + }) + } +} + +func TestImagesConfig_Struct(t *testing.T) { + ic := ImagesConfig{ + Source: "auto", + GitHub: GitHubConfig{Repo: "test/repo"}, + } + assert.Equal(t, "auto", ic.Source) + assert.Equal(t, "test/repo", ic.GitHub.Repo) +} + +func TestGitHubConfig_Struct(t *testing.T) { + gc := GitHubConfig{Repo: "owner/repo"} + assert.Equal(t, "owner/repo", gc.Repo) +} + +func TestRegistryConfig_Struct(t *testing.T) { + rc := RegistryConfig{Image: "ghcr.io/owner/image:latest"} + assert.Equal(t, "ghcr.io/owner/image:latest", rc.Image) +} + +func TestCDNConfig_Struct(t *testing.T) { + cc := CDNConfig{URL: "https://cdn.example.com/images"} + assert.Equal(t, "https://cdn.example.com/images", cc.URL) +} + +func TestLoadConfig_Bad_UnreadableFile(t *testing.T) { + // This test is platform-specific and may not work on all systems + // Skip if we can't test file permissions properly + if os.Getuid() == 0 { + t.Skip("Skipping permission test when running as root") + } + + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + coreDir := filepath.Join(tempHome, ".core") + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + configPath := filepath.Join(coreDir, "config.yaml") + err = os.WriteFile(configPath, []byte("version: 1"), 0000) + require.NoError(t, err) + + _, err = LoadConfig(io.Local) + assert.Error(t, err) + + // Restore permissions so cleanup works + _ = os.Chmod(configPath, 0644) +} diff --git a/devops/devops.go b/devops/devops.go new file mode 100644 index 0000000..5ca2456 --- /dev/null +++ b/devops/devops.go @@ -0,0 +1,243 @@ +// Package devops provides a portable development environment using LinuxKit images. +package devops + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "time" + + "forge.lthn.ai/core/go-devops/container" + "forge.lthn.ai/core/go/pkg/io" +) + +const ( + // DefaultSSHPort is the default port for SSH connections to the dev environment. + DefaultSSHPort = 2222 +) + +// DevOps manages the portable development environment. +type DevOps struct { + medium io.Medium + config *Config + images *ImageManager + container *container.LinuxKitManager +} + +// New creates a new DevOps instance using the provided medium. +func New(m io.Medium) (*DevOps, error) { + cfg, err := LoadConfig(m) + if err != nil { + return nil, fmt.Errorf("devops.New: failed to load config: %w", err) + } + + images, err := NewImageManager(m, cfg) + if err != nil { + return nil, fmt.Errorf("devops.New: failed to create image manager: %w", err) + } + + mgr, err := container.NewLinuxKitManager(io.Local) + if err != nil { + return nil, fmt.Errorf("devops.New: failed to create container manager: %w", err) + } + + return &DevOps{ + medium: m, + config: cfg, + images: images, + container: mgr, + }, nil +} + +// ImageName returns the platform-specific image name. +func ImageName() string { + return fmt.Sprintf("core-devops-%s-%s.qcow2", runtime.GOOS, runtime.GOARCH) +} + +// ImagesDir returns the path to the images directory. +func ImagesDir() (string, error) { + if dir := os.Getenv("CORE_IMAGES_DIR"); dir != "" { + return dir, nil + } + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, ".core", "images"), nil +} + +// ImagePath returns the full path to the platform-specific image. +func ImagePath() (string, error) { + dir, err := ImagesDir() + if err != nil { + return "", err + } + return filepath.Join(dir, ImageName()), nil +} + +// IsInstalled checks if the dev image is installed. +func (d *DevOps) IsInstalled() bool { + path, err := ImagePath() + if err != nil { + return false + } + return d.medium.IsFile(path) +} + +// Install downloads and installs the dev image. +func (d *DevOps) Install(ctx context.Context, progress func(downloaded, total int64)) error { + return d.images.Install(ctx, progress) +} + +// CheckUpdate checks if an update is available. +func (d *DevOps) CheckUpdate(ctx context.Context) (current, latest string, hasUpdate bool, err error) { + return d.images.CheckUpdate(ctx) +} + +// BootOptions configures how to boot the dev environment. +type BootOptions struct { + Memory int // MB, default 4096 + CPUs int // default 2 + Name string // container name + Fresh bool // destroy existing and start fresh +} + +// DefaultBootOptions returns sensible defaults. +func DefaultBootOptions() BootOptions { + return BootOptions{ + Memory: 4096, + CPUs: 2, + Name: "core-dev", + } +} + +// Boot starts the dev environment. +func (d *DevOps) Boot(ctx context.Context, opts BootOptions) error { + if !d.images.IsInstalled() { + return fmt.Errorf("dev image not installed (run 'core dev install' first)") + } + + // Check if already running + if !opts.Fresh { + running, err := d.IsRunning(ctx) + if err == nil && running { + return fmt.Errorf("dev environment already running (use 'core dev stop' first or --fresh)") + } + } + + // Stop existing if fresh + if opts.Fresh { + _ = d.Stop(ctx) + } + + imagePath, err := ImagePath() + if err != nil { + return err + } + + // Build run options for LinuxKitManager + runOpts := container.RunOptions{ + Name: opts.Name, + Memory: opts.Memory, + CPUs: opts.CPUs, + SSHPort: DefaultSSHPort, + Detach: true, + } + + _, err = d.container.Run(ctx, imagePath, runOpts) + if err != nil { + return err + } + + // Wait for SSH to be ready and scan host key + // We try for up to 60 seconds as the VM takes a moment to boot + var lastErr error + for i := 0; i < 30; i++ { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(2 * time.Second): + if err := ensureHostKey(ctx, runOpts.SSHPort); err == nil { + return nil + } else { + lastErr = err + } + } + } + + return fmt.Errorf("failed to verify host key after boot: %w", lastErr) +} + +// Stop stops the dev environment. +func (d *DevOps) Stop(ctx context.Context) error { + c, err := d.findContainer(ctx, "core-dev") + if err != nil { + return err + } + if c == nil { + return fmt.Errorf("dev environment not found") + } + return d.container.Stop(ctx, c.ID) +} + +// IsRunning checks if the dev environment is running. +func (d *DevOps) IsRunning(ctx context.Context) (bool, error) { + c, err := d.findContainer(ctx, "core-dev") + if err != nil { + return false, err + } + return c != nil && c.Status == container.StatusRunning, nil +} + +// findContainer finds a container by name. +func (d *DevOps) findContainer(ctx context.Context, name string) (*container.Container, error) { + containers, err := d.container.List(ctx) + if err != nil { + return nil, err + } + for _, c := range containers { + if c.Name == name { + return c, nil + } + } + return nil, nil +} + +// DevStatus returns information about the dev environment. +type DevStatus struct { + Installed bool + Running bool + ImageVersion string + ContainerID string + Memory int + CPUs int + SSHPort int + Uptime time.Duration +} + +// Status returns the current dev environment status. +func (d *DevOps) Status(ctx context.Context) (*DevStatus, error) { + status := &DevStatus{ + Installed: d.images.IsInstalled(), + SSHPort: DefaultSSHPort, + } + + if info, ok := d.images.manifest.Images[ImageName()]; ok { + status.ImageVersion = info.Version + } + + c, _ := d.findContainer(ctx, "core-dev") + if c != nil { + status.Running = c.Status == container.StatusRunning + status.ContainerID = c.ID + status.Memory = c.Memory + status.CPUs = c.CPUs + if status.Running { + status.Uptime = time.Since(c.StartedAt) + } + } + + return status, nil +} diff --git a/devops/devops_test.go b/devops/devops_test.go new file mode 100644 index 0000000..dab5126 --- /dev/null +++ b/devops/devops_test.go @@ -0,0 +1,833 @@ +package devops + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + "time" + + "forge.lthn.ai/core/go-devops/container" + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestImageName(t *testing.T) { + name := ImageName() + assert.Contains(t, name, "core-devops-") + assert.Contains(t, name, runtime.GOOS) + assert.Contains(t, name, runtime.GOARCH) + assert.True(t, (name[len(name)-6:] == ".qcow2")) +} + +func TestImagesDir(t *testing.T) { + t.Run("default directory", func(t *testing.T) { + // Unset env if it exists + orig := os.Getenv("CORE_IMAGES_DIR") + _ = os.Unsetenv("CORE_IMAGES_DIR") + defer func() { _ = os.Setenv("CORE_IMAGES_DIR", orig) }() + + dir, err := ImagesDir() + assert.NoError(t, err) + assert.Contains(t, dir, ".core/images") + }) + + t.Run("environment override", func(t *testing.T) { + customDir := "/tmp/custom-images" + t.Setenv("CORE_IMAGES_DIR", customDir) + + dir, err := ImagesDir() + assert.NoError(t, err) + assert.Equal(t, customDir, dir) + }) +} + +func TestImagePath(t *testing.T) { + customDir := "/tmp/images" + t.Setenv("CORE_IMAGES_DIR", customDir) + + path, err := ImagePath() + assert.NoError(t, err) + expected := filepath.Join(customDir, ImageName()) + assert.Equal(t, expected, path) +} + +func TestDefaultBootOptions(t *testing.T) { + opts := DefaultBootOptions() + assert.Equal(t, 4096, opts.Memory) + assert.Equal(t, 2, opts.CPUs) + assert.Equal(t, "core-dev", opts.Name) + assert.False(t, opts.Fresh) +} + +func TestIsInstalled_Bad(t *testing.T) { + t.Run("returns false for non-existent image", func(t *testing.T) { + // Point to a temp directory that is empty + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + // Create devops instance manually to avoid loading real config/images + d := &DevOps{medium: io.Local} + assert.False(t, d.IsInstalled()) + }) +} + +func TestIsInstalled_Good(t *testing.T) { + t.Run("returns true when image exists", func(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + // Create the image file + imagePath := filepath.Join(tempDir, ImageName()) + err := os.WriteFile(imagePath, []byte("fake image data"), 0644) + require.NoError(t, err) + + d := &DevOps{medium: io.Local} + assert.True(t, d.IsInstalled()) + }) +} + +type mockHypervisor struct{} + +func (m *mockHypervisor) Name() string { return "mock" } +func (m *mockHypervisor) Available() bool { return true } +func (m *mockHypervisor) BuildCommand(ctx context.Context, image string, opts *container.HypervisorOptions) (*exec.Cmd, error) { + return exec.Command("true"), nil +} + +func TestDevOps_Status_Good(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + // Setup mock container manager + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + // Add a fake running container + c := &container.Container{ + ID: "test-id", + Name: "core-dev", + Status: container.StatusRunning, + PID: os.Getpid(), // Use our own PID so isProcessRunning returns true + StartedAt: time.Now().Add(-time.Hour), + Memory: 2048, + CPUs: 4, + } + err = state.Add(c) + require.NoError(t, err) + + status, err := d.Status(context.Background()) + assert.NoError(t, err) + assert.NotNil(t, status) + assert.True(t, status.Running) + assert.Equal(t, "test-id", status.ContainerID) + assert.Equal(t, 2048, status.Memory) + assert.Equal(t, 4, status.CPUs) +} + +func TestDevOps_Status_Good_NotInstalled(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + status, err := d.Status(context.Background()) + assert.NoError(t, err) + assert.NotNil(t, status) + assert.False(t, status.Installed) + assert.False(t, status.Running) + assert.Equal(t, 2222, status.SSHPort) +} + +func TestDevOps_Status_Good_NoContainer(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + // Create fake image to mark as installed + imagePath := filepath.Join(tempDir, ImageName()) + err := os.WriteFile(imagePath, []byte("fake"), 0644) + require.NoError(t, err) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + status, err := d.Status(context.Background()) + assert.NoError(t, err) + assert.NotNil(t, status) + assert.True(t, status.Installed) + assert.False(t, status.Running) + assert.Empty(t, status.ContainerID) +} + +func TestDevOps_IsRunning_Good(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + c := &container.Container{ + ID: "test-id", + Name: "core-dev", + Status: container.StatusRunning, + PID: os.Getpid(), + StartedAt: time.Now(), + } + err = state.Add(c) + require.NoError(t, err) + + running, err := d.IsRunning(context.Background()) + assert.NoError(t, err) + assert.True(t, running) +} + +func TestDevOps_IsRunning_Bad_NotRunning(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + running, err := d.IsRunning(context.Background()) + assert.NoError(t, err) + assert.False(t, running) +} + +func TestDevOps_IsRunning_Bad_ContainerStopped(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + c := &container.Container{ + ID: "test-id", + Name: "core-dev", + Status: container.StatusStopped, + PID: 12345, + StartedAt: time.Now(), + } + err = state.Add(c) + require.NoError(t, err) + + running, err := d.IsRunning(context.Background()) + assert.NoError(t, err) + assert.False(t, running) +} + +func TestDevOps_findContainer_Good(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + c := &container.Container{ + ID: "test-id", + Name: "my-container", + Status: container.StatusRunning, + PID: os.Getpid(), + StartedAt: time.Now(), + } + err = state.Add(c) + require.NoError(t, err) + + found, err := d.findContainer(context.Background(), "my-container") + assert.NoError(t, err) + assert.NotNil(t, found) + assert.Equal(t, "test-id", found.ID) + assert.Equal(t, "my-container", found.Name) +} + +func TestDevOps_findContainer_Bad_NotFound(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + found, err := d.findContainer(context.Background(), "nonexistent") + assert.NoError(t, err) + assert.Nil(t, found) +} + +func TestDevOps_Stop_Bad_NotFound(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + err = d.Stop(context.Background()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not found") +} + +func TestBootOptions_Custom(t *testing.T) { + opts := BootOptions{ + Memory: 8192, + CPUs: 4, + Name: "custom-dev", + Fresh: true, + } + assert.Equal(t, 8192, opts.Memory) + assert.Equal(t, 4, opts.CPUs) + assert.Equal(t, "custom-dev", opts.Name) + assert.True(t, opts.Fresh) +} + +func TestDevStatus_Struct(t *testing.T) { + status := DevStatus{ + Installed: true, + Running: true, + ImageVersion: "v1.2.3", + ContainerID: "abc123", + Memory: 4096, + CPUs: 2, + SSHPort: 2222, + Uptime: time.Hour, + } + assert.True(t, status.Installed) + assert.True(t, status.Running) + assert.Equal(t, "v1.2.3", status.ImageVersion) + assert.Equal(t, "abc123", status.ContainerID) + assert.Equal(t, 4096, status.Memory) + assert.Equal(t, 2, status.CPUs) + assert.Equal(t, 2222, status.SSHPort) + assert.Equal(t, time.Hour, status.Uptime) +} + +func TestDevOps_Boot_Bad_NotInstalled(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + err = d.Boot(context.Background(), DefaultBootOptions()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not installed") +} + +func TestDevOps_Boot_Bad_AlreadyRunning(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + // Create fake image + imagePath := filepath.Join(tempDir, ImageName()) + err := os.WriteFile(imagePath, []byte("fake"), 0644) + require.NoError(t, err) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + // Add a running container + c := &container.Container{ + ID: "test-id", + Name: "core-dev", + Status: container.StatusRunning, + PID: os.Getpid(), + StartedAt: time.Now(), + } + err = state.Add(c) + require.NoError(t, err) + + err = d.Boot(context.Background(), DefaultBootOptions()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "already running") +} + +func TestDevOps_Status_Good_WithImageVersion(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + // Create fake image + imagePath := filepath.Join(tempDir, ImageName()) + err := os.WriteFile(imagePath, []byte("fake"), 0644) + require.NoError(t, err) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + // Manually set manifest with version info + mgr.manifest.Images[ImageName()] = ImageInfo{ + Version: "v1.2.3", + Source: "test", + } + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + config: cfg, + images: mgr, + container: cm, + } + + status, err := d.Status(context.Background()) + assert.NoError(t, err) + assert.True(t, status.Installed) + assert.Equal(t, "v1.2.3", status.ImageVersion) +} + +func TestDevOps_findContainer_Good_MultipleContainers(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + // Add multiple containers + c1 := &container.Container{ + ID: "id-1", + Name: "container-1", + Status: container.StatusRunning, + PID: os.Getpid(), + StartedAt: time.Now(), + } + c2 := &container.Container{ + ID: "id-2", + Name: "container-2", + Status: container.StatusRunning, + PID: os.Getpid(), + StartedAt: time.Now(), + } + err = state.Add(c1) + require.NoError(t, err) + err = state.Add(c2) + require.NoError(t, err) + + // Find specific container + found, err := d.findContainer(context.Background(), "container-2") + assert.NoError(t, err) + assert.NotNil(t, found) + assert.Equal(t, "id-2", found.ID) +} + +func TestDevOps_Status_Good_ContainerWithUptime(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + startTime := time.Now().Add(-2 * time.Hour) + c := &container.Container{ + ID: "test-id", + Name: "core-dev", + Status: container.StatusRunning, + PID: os.Getpid(), + StartedAt: startTime, + Memory: 4096, + CPUs: 2, + } + err = state.Add(c) + require.NoError(t, err) + + status, err := d.Status(context.Background()) + assert.NoError(t, err) + assert.True(t, status.Running) + assert.GreaterOrEqual(t, status.Uptime.Hours(), float64(1)) +} + +func TestDevOps_IsRunning_Bad_DifferentContainerName(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + // Add a container with different name + c := &container.Container{ + ID: "test-id", + Name: "other-container", + Status: container.StatusRunning, + PID: os.Getpid(), + StartedAt: time.Now(), + } + err = state.Add(c) + require.NoError(t, err) + + // IsRunning looks for "core-dev", not "other-container" + running, err := d.IsRunning(context.Background()) + assert.NoError(t, err) + assert.False(t, running) +} + +func TestDevOps_Boot_Good_FreshFlag(t *testing.T) { + t.Setenv("CORE_SKIP_SSH_SCAN", "true") + tempDir, err := os.MkdirTemp("", "devops-test-*") + require.NoError(t, err) + t.Cleanup(func() { _ = os.RemoveAll(tempDir) }) + t.Setenv("CORE_IMAGES_DIR", tempDir) + + // Create fake image + imagePath := filepath.Join(tempDir, ImageName()) + err = os.WriteFile(imagePath, []byte("fake"), 0644) + require.NoError(t, err) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + // Add an existing container with non-existent PID (will be seen as stopped) + c := &container.Container{ + ID: "old-id", + Name: "core-dev", + Status: container.StatusRunning, + PID: 99999999, // Non-existent PID - List() will mark it as stopped + StartedAt: time.Now(), + } + err = state.Add(c) + require.NoError(t, err) + + // Boot with Fresh=true should try to stop the existing container + // then run a new one. The mock hypervisor "succeeds" so this won't error + opts := BootOptions{ + Memory: 4096, + CPUs: 2, + Name: "core-dev", + Fresh: true, + } + err = d.Boot(context.Background(), opts) + // The mock hypervisor's Run succeeds + assert.NoError(t, err) +} + +func TestDevOps_Stop_Bad_ContainerNotRunning(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + // Add a container that's already stopped + c := &container.Container{ + ID: "test-id", + Name: "core-dev", + Status: container.StatusStopped, + PID: 99999999, + StartedAt: time.Now(), + } + err = state.Add(c) + require.NoError(t, err) + + // Stop should fail because container is not running + err = d.Stop(context.Background()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not running") +} + +func TestDevOps_Boot_Good_FreshWithNoExisting(t *testing.T) { + t.Setenv("CORE_SKIP_SSH_SCAN", "true") + tempDir, err := os.MkdirTemp("", "devops-boot-fresh-*") + require.NoError(t, err) + t.Cleanup(func() { _ = os.RemoveAll(tempDir) }) + t.Setenv("CORE_IMAGES_DIR", tempDir) + + // Create fake image + imagePath := filepath.Join(tempDir, ImageName()) + err = os.WriteFile(imagePath, []byte("fake"), 0644) + require.NoError(t, err) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + // Boot with Fresh=true but no existing container + opts := BootOptions{ + Memory: 4096, + CPUs: 2, + Name: "core-dev", + Fresh: true, + } + err = d.Boot(context.Background(), opts) + // The mock hypervisor succeeds + assert.NoError(t, err) +} + +func TestImageName_Format(t *testing.T) { + name := ImageName() + // Check format: core-devops-{os}-{arch}.qcow2 + assert.Contains(t, name, "core-devops-") + assert.Contains(t, name, runtime.GOOS) + assert.Contains(t, name, runtime.GOARCH) + assert.True(t, filepath.Ext(name) == ".qcow2") +} + +func TestDevOps_Install_Delegates(t *testing.T) { + // This test verifies the Install method delegates to ImageManager + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + d := &DevOps{medium: io.Local, + images: mgr, + } + + // This will fail because no source is available, but it tests delegation + err = d.Install(context.Background(), nil) + assert.Error(t, err) +} + +func TestDevOps_CheckUpdate_Delegates(t *testing.T) { + // This test verifies the CheckUpdate method delegates to ImageManager + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + d := &DevOps{medium: io.Local, + images: mgr, + } + + // This will fail because image not installed, but it tests delegation + _, _, _, err = d.CheckUpdate(context.Background()) + assert.Error(t, err) +} + +func TestDevOps_Boot_Good_Success(t *testing.T) { + t.Setenv("CORE_SKIP_SSH_SCAN", "true") + tempDir, err := os.MkdirTemp("", "devops-boot-success-*") + require.NoError(t, err) + t.Cleanup(func() { _ = os.RemoveAll(tempDir) }) + t.Setenv("CORE_IMAGES_DIR", tempDir) + + // Create fake image + imagePath := filepath.Join(tempDir, ImageName()) + err = os.WriteFile(imagePath, []byte("fake"), 0644) + require.NoError(t, err) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + statePath := filepath.Join(tempDir, "containers.json") + state := container.NewState(io.Local, statePath) + h := &mockHypervisor{} + cm := container.NewLinuxKitManagerWithHypervisor(io.Local, state, h) + + d := &DevOps{medium: io.Local, + images: mgr, + container: cm, + } + + // Boot without Fresh flag and no existing container + opts := DefaultBootOptions() + err = d.Boot(context.Background(), opts) + assert.NoError(t, err) // Mock hypervisor succeeds +} + +func TestDevOps_Config(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tempDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + d := &DevOps{medium: io.Local, + config: cfg, + images: mgr, + } + + assert.NotNil(t, d.config) + assert.Equal(t, "auto", d.config.Images.Source) +} diff --git a/devops/images.go b/devops/images.go new file mode 100644 index 0000000..8a6b46d --- /dev/null +++ b/devops/images.go @@ -0,0 +1,198 @@ +package devops + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "time" + + "forge.lthn.ai/core/go-devops/devops/sources" + "forge.lthn.ai/core/go/pkg/io" +) + +// ImageManager handles image downloads and updates. +type ImageManager struct { + medium io.Medium + config *Config + manifest *Manifest + sources []sources.ImageSource +} + +// Manifest tracks installed images. +type Manifest struct { + medium io.Medium + Images map[string]ImageInfo `json:"images"` + path string +} + +// ImageInfo holds metadata about an installed image. +type ImageInfo struct { + Version string `json:"version"` + SHA256 string `json:"sha256,omitempty"` + Downloaded time.Time `json:"downloaded"` + Source string `json:"source"` +} + +// NewImageManager creates a new image manager. +func NewImageManager(m io.Medium, cfg *Config) (*ImageManager, error) { + imagesDir, err := ImagesDir() + if err != nil { + return nil, err + } + + // Ensure images directory exists + if err := m.EnsureDir(imagesDir); err != nil { + return nil, err + } + + // Load or create manifest + manifestPath := filepath.Join(imagesDir, "manifest.json") + manifest, err := loadManifest(m, manifestPath) + if err != nil { + return nil, err + } + + // Build source list based on config + imageName := ImageName() + sourceCfg := sources.SourceConfig{ + GitHubRepo: cfg.Images.GitHub.Repo, + RegistryImage: cfg.Images.Registry.Image, + CDNURL: cfg.Images.CDN.URL, + ImageName: imageName, + } + + var srcs []sources.ImageSource + switch cfg.Images.Source { + case "github": + srcs = []sources.ImageSource{sources.NewGitHubSource(sourceCfg)} + case "cdn": + srcs = []sources.ImageSource{sources.NewCDNSource(sourceCfg)} + default: // "auto" + srcs = []sources.ImageSource{ + sources.NewGitHubSource(sourceCfg), + sources.NewCDNSource(sourceCfg), + } + } + + return &ImageManager{ + medium: m, + config: cfg, + manifest: manifest, + sources: srcs, + }, nil +} + +// IsInstalled checks if the dev image is installed. +func (m *ImageManager) IsInstalled() bool { + path, err := ImagePath() + if err != nil { + return false + } + return m.medium.IsFile(path) +} + +// Install downloads and installs the dev image. +func (m *ImageManager) Install(ctx context.Context, progress func(downloaded, total int64)) error { + imagesDir, err := ImagesDir() + if err != nil { + return err + } + + // Find first available source + var src sources.ImageSource + for _, s := range m.sources { + if s.Available() { + src = s + break + } + } + if src == nil { + return fmt.Errorf("no image source available") + } + + // Get version + version, err := src.LatestVersion(ctx) + if err != nil { + return fmt.Errorf("failed to get latest version: %w", err) + } + + fmt.Printf("Downloading %s from %s...\n", ImageName(), src.Name()) + + // Download + if err := src.Download(ctx, m.medium, imagesDir, progress); err != nil { + return err + } + + // Update manifest + m.manifest.Images[ImageName()] = ImageInfo{ + Version: version, + Downloaded: time.Now(), + Source: src.Name(), + } + + return m.manifest.Save() +} + +// CheckUpdate checks if an update is available. +func (m *ImageManager) CheckUpdate(ctx context.Context) (current, latest string, hasUpdate bool, err error) { + info, ok := m.manifest.Images[ImageName()] + if !ok { + return "", "", false, fmt.Errorf("image not installed") + } + current = info.Version + + // Find first available source + var src sources.ImageSource + for _, s := range m.sources { + if s.Available() { + src = s + break + } + } + if src == nil { + return current, "", false, fmt.Errorf("no image source available") + } + + latest, err = src.LatestVersion(ctx) + if err != nil { + return current, "", false, err + } + + hasUpdate = current != latest + return current, latest, hasUpdate, nil +} + +func loadManifest(m io.Medium, path string) (*Manifest, error) { + manifest := &Manifest{ + medium: m, + Images: make(map[string]ImageInfo), + path: path, + } + + content, err := m.Read(path) + if err != nil { + if os.IsNotExist(err) { + return manifest, nil + } + return nil, err + } + + if err := json.Unmarshal([]byte(content), manifest); err != nil { + return nil, err + } + manifest.medium = m + manifest.path = path + + return manifest, nil +} + +// Save writes the manifest to disk. +func (m *Manifest) Save() error { + data, err := json.MarshalIndent(m, "", " ") + if err != nil { + return err + } + return m.medium.Write(m.path, string(data)) +} diff --git a/devops/images_test.go b/devops/images_test.go new file mode 100644 index 0000000..b034d64 --- /dev/null +++ b/devops/images_test.go @@ -0,0 +1,583 @@ +package devops + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" + + "forge.lthn.ai/core/go-devops/devops/sources" + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestImageManager_Good_IsInstalled(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + // Not installed yet + assert.False(t, mgr.IsInstalled()) + + // Create fake image + imagePath := filepath.Join(tmpDir, ImageName()) + err = os.WriteFile(imagePath, []byte("fake"), 0644) + require.NoError(t, err) + + // Now installed + assert.True(t, mgr.IsInstalled()) +} + +func TestNewImageManager_Good(t *testing.T) { + t.Run("creates manager with cdn source", func(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + cfg := DefaultConfig() + cfg.Images.Source = "cdn" + + mgr, err := NewImageManager(io.Local, cfg) + assert.NoError(t, err) + assert.NotNil(t, mgr) + assert.Len(t, mgr.sources, 1) + assert.Equal(t, "cdn", mgr.sources[0].Name()) + }) + + t.Run("creates manager with github source", func(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + cfg := DefaultConfig() + cfg.Images.Source = "github" + + mgr, err := NewImageManager(io.Local, cfg) + assert.NoError(t, err) + assert.NotNil(t, mgr) + assert.Len(t, mgr.sources, 1) + assert.Equal(t, "github", mgr.sources[0].Name()) + }) +} + +func TestManifest_Save(t *testing.T) { + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "manifest.json") + + m := &Manifest{ + medium: io.Local, + Images: make(map[string]ImageInfo), + path: path, + } + + m.Images["test.img"] = ImageInfo{ + Version: "1.0.0", + Source: "test", + } + + err := m.Save() + assert.NoError(t, err) + + // Verify file exists and has content + _, err = os.Stat(path) + assert.NoError(t, err) + + // Reload + m2, err := loadManifest(io.Local, path) + assert.NoError(t, err) + assert.Equal(t, "1.0.0", m2.Images["test.img"].Version) +} + +func TestLoadManifest_Bad(t *testing.T) { + t.Run("invalid json", func(t *testing.T) { + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "manifest.json") + err := os.WriteFile(path, []byte("invalid json"), 0644) + require.NoError(t, err) + + _, err = loadManifest(io.Local, path) + assert.Error(t, err) + }) +} + +func TestCheckUpdate_Bad(t *testing.T) { + t.Run("image not installed", func(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + require.NoError(t, err) + + _, _, _, err = mgr.CheckUpdate(context.Background()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "image not installed") + }) +} + +func TestNewImageManager_Good_AutoSource(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + cfg := DefaultConfig() + cfg.Images.Source = "auto" + + mgr, err := NewImageManager(io.Local, cfg) + assert.NoError(t, err) + assert.NotNil(t, mgr) + assert.Len(t, mgr.sources, 2) // github and cdn +} + +func TestNewImageManager_Good_UnknownSourceFallsToAuto(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + cfg := DefaultConfig() + cfg.Images.Source = "unknown" + + mgr, err := NewImageManager(io.Local, cfg) + assert.NoError(t, err) + assert.NotNil(t, mgr) + assert.Len(t, mgr.sources, 2) // falls to default (auto) which is github + cdn +} + +func TestLoadManifest_Good_Empty(t *testing.T) { + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "nonexistent.json") + + m, err := loadManifest(io.Local, path) + assert.NoError(t, err) + assert.NotNil(t, m) + assert.NotNil(t, m.Images) + assert.Empty(t, m.Images) + assert.Equal(t, path, m.path) +} + +func TestLoadManifest_Good_ExistingData(t *testing.T) { + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "manifest.json") + + data := `{"images":{"test.img":{"version":"2.0.0","source":"cdn"}}}` + err := os.WriteFile(path, []byte(data), 0644) + require.NoError(t, err) + + m, err := loadManifest(io.Local, path) + assert.NoError(t, err) + assert.NotNil(t, m) + assert.Equal(t, "2.0.0", m.Images["test.img"].Version) + assert.Equal(t, "cdn", m.Images["test.img"].Source) +} + +func TestImageInfo_Struct(t *testing.T) { + info := ImageInfo{ + Version: "1.0.0", + SHA256: "abc123", + Downloaded: time.Now(), + Source: "github", + } + assert.Equal(t, "1.0.0", info.Version) + assert.Equal(t, "abc123", info.SHA256) + assert.False(t, info.Downloaded.IsZero()) + assert.Equal(t, "github", info.Source) +} + +func TestManifest_Save_Good_CreatesDirs(t *testing.T) { + tmpDir := t.TempDir() + nestedPath := filepath.Join(tmpDir, "nested", "dir", "manifest.json") + + m := &Manifest{ + medium: io.Local, + Images: make(map[string]ImageInfo), + path: nestedPath, + } + m.Images["test.img"] = ImageInfo{Version: "1.0.0"} + + // Save creates parent directories automatically via io.Local.Write + err := m.Save() + assert.NoError(t, err) + + // Verify file was created + _, err = os.Stat(nestedPath) + assert.NoError(t, err) +} + +func TestManifest_Save_Good_Overwrite(t *testing.T) { + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "manifest.json") + + // First save + m1 := &Manifest{ + medium: io.Local, + Images: make(map[string]ImageInfo), + path: path, + } + m1.Images["test.img"] = ImageInfo{Version: "1.0.0"} + err := m1.Save() + require.NoError(t, err) + + // Second save with different data + m2 := &Manifest{ + medium: io.Local, + Images: make(map[string]ImageInfo), + path: path, + } + m2.Images["other.img"] = ImageInfo{Version: "2.0.0"} + err = m2.Save() + require.NoError(t, err) + + // Verify second data + loaded, err := loadManifest(io.Local, path) + assert.NoError(t, err) + assert.Equal(t, "2.0.0", loaded.Images["other.img"].Version) + _, exists := loaded.Images["test.img"] + assert.False(t, exists) +} + +func TestImageManager_Install_Bad_NoSourceAvailable(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + // Create manager with empty sources + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")}, + sources: nil, // no sources + } + + err := mgr.Install(context.Background(), nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no image source available") +} + +func TestNewImageManager_Good_CreatesDir(t *testing.T) { + tmpDir := t.TempDir() + imagesDir := filepath.Join(tmpDir, "images") + t.Setenv("CORE_IMAGES_DIR", imagesDir) + + cfg := DefaultConfig() + mgr, err := NewImageManager(io.Local, cfg) + assert.NoError(t, err) + assert.NotNil(t, mgr) + + // Verify directory was created + info, err := os.Stat(imagesDir) + assert.NoError(t, err) + assert.True(t, info.IsDir()) +} + +// mockImageSource is a test helper for simulating image sources +type mockImageSource struct { + name string + available bool + latestVersion string + latestErr error + downloadErr error +} + +func (m *mockImageSource) Name() string { return m.name } +func (m *mockImageSource) Available() bool { return m.available } +func (m *mockImageSource) LatestVersion(ctx context.Context) (string, error) { + return m.latestVersion, m.latestErr +} +func (m *mockImageSource) Download(ctx context.Context, medium io.Medium, dest string, progress func(downloaded, total int64)) error { + if m.downloadErr != nil { + return m.downloadErr + } + // Create a fake image file + imagePath := filepath.Join(dest, ImageName()) + return os.WriteFile(imagePath, []byte("mock image content"), 0644) +} + +func TestImageManager_Install_Good_WithMockSource(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + mock := &mockImageSource{ + name: "mock", + available: true, + latestVersion: "v1.0.0", + } + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")}, + sources: []sources.ImageSource{mock}, + } + + err := mgr.Install(context.Background(), nil) + assert.NoError(t, err) + assert.True(t, mgr.IsInstalled()) + + // Verify manifest was updated + info, ok := mgr.manifest.Images[ImageName()] + assert.True(t, ok) + assert.Equal(t, "v1.0.0", info.Version) + assert.Equal(t, "mock", info.Source) +} + +func TestImageManager_Install_Bad_DownloadError(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + mock := &mockImageSource{ + name: "mock", + available: true, + latestVersion: "v1.0.0", + downloadErr: assert.AnError, + } + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")}, + sources: []sources.ImageSource{mock}, + } + + err := mgr.Install(context.Background(), nil) + assert.Error(t, err) +} + +func TestImageManager_Install_Bad_VersionError(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + mock := &mockImageSource{ + name: "mock", + available: true, + latestErr: assert.AnError, + } + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")}, + sources: []sources.ImageSource{mock}, + } + + err := mgr.Install(context.Background(), nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to get latest version") +} + +func TestImageManager_Install_Good_SkipsUnavailableSource(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + unavailableMock := &mockImageSource{ + name: "unavailable", + available: false, + } + availableMock := &mockImageSource{ + name: "available", + available: true, + latestVersion: "v2.0.0", + } + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")}, + sources: []sources.ImageSource{unavailableMock, availableMock}, + } + + err := mgr.Install(context.Background(), nil) + assert.NoError(t, err) + + // Should have used the available source + info := mgr.manifest.Images[ImageName()] + assert.Equal(t, "available", info.Source) +} + +func TestImageManager_CheckUpdate_Good_WithMockSource(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + mock := &mockImageSource{ + name: "mock", + available: true, + latestVersion: "v2.0.0", + } + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{ + medium: io.Local, + Images: map[string]ImageInfo{ + ImageName(): {Version: "v1.0.0", Source: "mock"}, + }, + path: filepath.Join(tmpDir, "manifest.json"), + }, + sources: []sources.ImageSource{mock}, + } + + current, latest, hasUpdate, err := mgr.CheckUpdate(context.Background()) + assert.NoError(t, err) + assert.Equal(t, "v1.0.0", current) + assert.Equal(t, "v2.0.0", latest) + assert.True(t, hasUpdate) +} + +func TestImageManager_CheckUpdate_Good_NoUpdate(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + mock := &mockImageSource{ + name: "mock", + available: true, + latestVersion: "v1.0.0", + } + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{ + medium: io.Local, + Images: map[string]ImageInfo{ + ImageName(): {Version: "v1.0.0", Source: "mock"}, + }, + path: filepath.Join(tmpDir, "manifest.json"), + }, + sources: []sources.ImageSource{mock}, + } + + current, latest, hasUpdate, err := mgr.CheckUpdate(context.Background()) + assert.NoError(t, err) + assert.Equal(t, "v1.0.0", current) + assert.Equal(t, "v1.0.0", latest) + assert.False(t, hasUpdate) +} + +func TestImageManager_CheckUpdate_Bad_NoSource(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + unavailableMock := &mockImageSource{ + name: "mock", + available: false, + } + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{ + medium: io.Local, + Images: map[string]ImageInfo{ + ImageName(): {Version: "v1.0.0", Source: "mock"}, + }, + path: filepath.Join(tmpDir, "manifest.json"), + }, + sources: []sources.ImageSource{unavailableMock}, + } + + _, _, _, err := mgr.CheckUpdate(context.Background()) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no image source available") +} + +func TestImageManager_CheckUpdate_Bad_VersionError(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + mock := &mockImageSource{ + name: "mock", + available: true, + latestErr: assert.AnError, + } + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{ + medium: io.Local, + Images: map[string]ImageInfo{ + ImageName(): {Version: "v1.0.0", Source: "mock"}, + }, + path: filepath.Join(tmpDir, "manifest.json"), + }, + sources: []sources.ImageSource{mock}, + } + + current, _, _, err := mgr.CheckUpdate(context.Background()) + assert.Error(t, err) + assert.Equal(t, "v1.0.0", current) // Current should still be returned +} + +func TestImageManager_Install_Bad_EmptySources(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")}, + sources: []sources.ImageSource{}, // Empty slice, not nil + } + + err := mgr.Install(context.Background(), nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no image source available") +} + +func TestImageManager_Install_Bad_AllUnavailable(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + mock1 := &mockImageSource{name: "mock1", available: false} + mock2 := &mockImageSource{name: "mock2", available: false} + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{medium: io.Local, Images: make(map[string]ImageInfo), path: filepath.Join(tmpDir, "manifest.json")}, + sources: []sources.ImageSource{mock1, mock2}, + } + + err := mgr.Install(context.Background(), nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no image source available") +} + +func TestImageManager_CheckUpdate_Good_FirstSourceUnavailable(t *testing.T) { + tmpDir := t.TempDir() + t.Setenv("CORE_IMAGES_DIR", tmpDir) + + unavailable := &mockImageSource{name: "unavailable", available: false} + available := &mockImageSource{name: "available", available: true, latestVersion: "v2.0.0"} + + mgr := &ImageManager{ + medium: io.Local, + config: DefaultConfig(), + manifest: &Manifest{ + medium: io.Local, + Images: map[string]ImageInfo{ + ImageName(): {Version: "v1.0.0", Source: "available"}, + }, + path: filepath.Join(tmpDir, "manifest.json"), + }, + sources: []sources.ImageSource{unavailable, available}, + } + + current, latest, hasUpdate, err := mgr.CheckUpdate(context.Background()) + assert.NoError(t, err) + assert.Equal(t, "v1.0.0", current) + assert.Equal(t, "v2.0.0", latest) + assert.True(t, hasUpdate) +} + +func TestManifest_Struct(t *testing.T) { + m := &Manifest{ + Images: map[string]ImageInfo{ + "test.img": {Version: "1.0.0"}, + }, + path: "/path/to/manifest.json", + } + assert.Equal(t, "/path/to/manifest.json", m.path) + assert.Len(t, m.Images, 1) + assert.Equal(t, "1.0.0", m.Images["test.img"].Version) +} diff --git a/devops/serve.go b/devops/serve.go new file mode 100644 index 0000000..6eeb1fa --- /dev/null +++ b/devops/serve.go @@ -0,0 +1,109 @@ +package devops + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + + "forge.lthn.ai/core/go/pkg/io" +) + +// ServeOptions configures the dev server. +type ServeOptions struct { + Port int // Port to serve on (default 8000) + Path string // Subdirectory to serve (default: current dir) +} + +// Serve mounts the project and starts a dev server. +func (d *DevOps) Serve(ctx context.Context, projectDir string, opts ServeOptions) error { + running, err := d.IsRunning(ctx) + if err != nil { + return err + } + if !running { + return fmt.Errorf("dev environment not running (run 'core dev boot' first)") + } + + if opts.Port == 0 { + opts.Port = 8000 + } + + servePath := projectDir + if opts.Path != "" { + servePath = filepath.Join(projectDir, opts.Path) + } + + // Mount project directory via SSHFS + if err := d.mountProject(ctx, servePath); err != nil { + return fmt.Errorf("failed to mount project: %w", err) + } + + // Detect and run serve command + serveCmd := DetectServeCommand(d.medium, servePath) + fmt.Printf("Starting server: %s\n", serveCmd) + fmt.Printf("Listening on http://localhost:%d\n", opts.Port) + + // Run serve command via SSH + return d.sshShell(ctx, []string{"cd", "/app", "&&", serveCmd}) +} + +// mountProject mounts a directory into the VM via SSHFS. +func (d *DevOps) mountProject(ctx context.Context, path string) error { + absPath, err := filepath.Abs(path) + if err != nil { + return err + } + + // Use reverse SSHFS mount + // The VM connects back to host to mount the directory + cmd := exec.CommandContext(ctx, "ssh", + "-o", "StrictHostKeyChecking=yes", + "-o", "UserKnownHostsFile=~/.core/known_hosts", + "-o", "LogLevel=ERROR", + "-R", "10000:localhost:22", // Reverse tunnel for SSHFS + "-p", fmt.Sprintf("%d", DefaultSSHPort), + "root@localhost", + fmt.Sprintf("mkdir -p /app && sshfs -p 10000 %s@localhost:%s /app -o allow_other", os.Getenv("USER"), absPath), + ) + return cmd.Run() +} + +// DetectServeCommand auto-detects the serve command for a project. +func DetectServeCommand(m io.Medium, projectDir string) string { + // Laravel/Octane + if hasFile(m, projectDir, "artisan") { + return "php artisan octane:start --host=0.0.0.0 --port=8000" + } + + // Node.js with dev script + if hasFile(m, projectDir, "package.json") { + if hasPackageScript(m, projectDir, "dev") { + return "npm run dev -- --host 0.0.0.0" + } + if hasPackageScript(m, projectDir, "start") { + return "npm start" + } + } + + // PHP with composer + if hasFile(m, projectDir, "composer.json") { + return "frankenphp php-server -l :8000" + } + + // Go + if hasFile(m, projectDir, "go.mod") { + if hasFile(m, projectDir, "main.go") { + return "go run ." + } + } + + // Python Django + if hasFile(m, projectDir, "manage.py") { + return "python manage.py runserver 0.0.0.0:8000" + } + + // Fallback: simple HTTP server + return "python3 -m http.server 8000" +} diff --git a/devops/serve_test.go b/devops/serve_test.go new file mode 100644 index 0000000..2160ce9 --- /dev/null +++ b/devops/serve_test.go @@ -0,0 +1,137 @@ +package devops + +import ( + "os" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" +) + +func TestDetectServeCommand_Good_Laravel(t *testing.T) { + tmpDir := t.TempDir() + err := os.WriteFile(filepath.Join(tmpDir, "artisan"), []byte("#!/usr/bin/env php"), 0644) + assert.NoError(t, err) + + cmd := DetectServeCommand(io.Local, tmpDir) + assert.Equal(t, "php artisan octane:start --host=0.0.0.0 --port=8000", cmd) +} + +func TestDetectServeCommand_Good_NodeDev(t *testing.T) { + tmpDir := t.TempDir() + packageJSON := `{"scripts":{"dev":"vite","start":"node index.js"}}` + err := os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(packageJSON), 0644) + assert.NoError(t, err) + + cmd := DetectServeCommand(io.Local, tmpDir) + assert.Equal(t, "npm run dev -- --host 0.0.0.0", cmd) +} + +func TestDetectServeCommand_Good_NodeStart(t *testing.T) { + tmpDir := t.TempDir() + packageJSON := `{"scripts":{"start":"node server.js"}}` + err := os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(packageJSON), 0644) + assert.NoError(t, err) + + cmd := DetectServeCommand(io.Local, tmpDir) + assert.Equal(t, "npm start", cmd) +} + +func TestDetectServeCommand_Good_PHP(t *testing.T) { + tmpDir := t.TempDir() + err := os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"require":{}}`), 0644) + assert.NoError(t, err) + + cmd := DetectServeCommand(io.Local, tmpDir) + assert.Equal(t, "frankenphp php-server -l :8000", cmd) +} + +func TestDetectServeCommand_Good_GoMain(t *testing.T) { + tmpDir := t.TempDir() + err := os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module example"), 0644) + assert.NoError(t, err) + err = os.WriteFile(filepath.Join(tmpDir, "main.go"), []byte("package main"), 0644) + assert.NoError(t, err) + + cmd := DetectServeCommand(io.Local, tmpDir) + assert.Equal(t, "go run .", cmd) +} + +func TestDetectServeCommand_Good_GoWithoutMain(t *testing.T) { + tmpDir := t.TempDir() + err := os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module example"), 0644) + assert.NoError(t, err) + + // No main.go, so falls through to fallback + cmd := DetectServeCommand(io.Local, tmpDir) + assert.Equal(t, "python3 -m http.server 8000", cmd) +} + +func TestDetectServeCommand_Good_Django(t *testing.T) { + tmpDir := t.TempDir() + err := os.WriteFile(filepath.Join(tmpDir, "manage.py"), []byte("#!/usr/bin/env python"), 0644) + assert.NoError(t, err) + + cmd := DetectServeCommand(io.Local, tmpDir) + assert.Equal(t, "python manage.py runserver 0.0.0.0:8000", cmd) +} + +func TestDetectServeCommand_Good_Fallback(t *testing.T) { + tmpDir := t.TempDir() + + cmd := DetectServeCommand(io.Local, tmpDir) + assert.Equal(t, "python3 -m http.server 8000", cmd) +} + +func TestDetectServeCommand_Good_Priority(t *testing.T) { + // Laravel (artisan) should take priority over PHP (composer.json) + tmpDir := t.TempDir() + err := os.WriteFile(filepath.Join(tmpDir, "artisan"), []byte("#!/usr/bin/env php"), 0644) + assert.NoError(t, err) + err = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"require":{}}`), 0644) + assert.NoError(t, err) + + cmd := DetectServeCommand(io.Local, tmpDir) + assert.Equal(t, "php artisan octane:start --host=0.0.0.0 --port=8000", cmd) +} + +func TestServeOptions_Default(t *testing.T) { + opts := ServeOptions{} + assert.Equal(t, 0, opts.Port) + assert.Equal(t, "", opts.Path) +} + +func TestServeOptions_Custom(t *testing.T) { + opts := ServeOptions{ + Port: 3000, + Path: "public", + } + assert.Equal(t, 3000, opts.Port) + assert.Equal(t, "public", opts.Path) +} + +func TestHasFile_Good(t *testing.T) { + tmpDir := t.TempDir() + testFile := filepath.Join(tmpDir, "test.txt") + err := os.WriteFile(testFile, []byte("content"), 0644) + assert.NoError(t, err) + + assert.True(t, hasFile(io.Local, tmpDir, "test.txt")) +} + +func TestHasFile_Bad(t *testing.T) { + tmpDir := t.TempDir() + + assert.False(t, hasFile(io.Local, tmpDir, "nonexistent.txt")) +} + +func TestHasFile_Bad_Directory(t *testing.T) { + tmpDir := t.TempDir() + subDir := filepath.Join(tmpDir, "subdir") + err := os.Mkdir(subDir, 0755) + assert.NoError(t, err) + + // hasFile correctly returns false for directories (only true for regular files) + assert.False(t, hasFile(io.Local, tmpDir, "subdir")) +} diff --git a/devops/shell.go b/devops/shell.go new file mode 100644 index 0000000..fe94d1b --- /dev/null +++ b/devops/shell.go @@ -0,0 +1,74 @@ +package devops + +import ( + "context" + "fmt" + "os" + "os/exec" +) + +// ShellOptions configures the shell connection. +type ShellOptions struct { + Console bool // Use serial console instead of SSH + Command []string // Command to run (empty = interactive shell) +} + +// Shell connects to the dev environment. +func (d *DevOps) Shell(ctx context.Context, opts ShellOptions) error { + running, err := d.IsRunning(ctx) + if err != nil { + return err + } + if !running { + return fmt.Errorf("dev environment not running (run 'core dev boot' first)") + } + + if opts.Console { + return d.serialConsole(ctx) + } + + return d.sshShell(ctx, opts.Command) +} + +// sshShell connects via SSH. +func (d *DevOps) sshShell(ctx context.Context, command []string) error { + args := []string{ + "-o", "StrictHostKeyChecking=yes", + "-o", "UserKnownHostsFile=~/.core/known_hosts", + "-o", "LogLevel=ERROR", + "-A", // Agent forwarding + "-p", fmt.Sprintf("%d", DefaultSSHPort), + "root@localhost", + } + + if len(command) > 0 { + args = append(args, command...) + } + + cmd := exec.CommandContext(ctx, "ssh", args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + return cmd.Run() +} + +// serialConsole attaches to the QEMU serial console. +func (d *DevOps) serialConsole(ctx context.Context) error { + // Find the container to get its console socket + c, err := d.findContainer(ctx, "core-dev") + if err != nil { + return err + } + if c == nil { + return fmt.Errorf("console not available: container not found") + } + + // Use socat to connect to the console socket + socketPath := fmt.Sprintf("/tmp/core-%s-console.sock", c.ID) + cmd := exec.CommandContext(ctx, "socat", "-,raw,echo=0", "unix-connect:"+socketPath) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/devops/shell_test.go b/devops/shell_test.go new file mode 100644 index 0000000..e065a78 --- /dev/null +++ b/devops/shell_test.go @@ -0,0 +1,47 @@ +package devops + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestShellOptions_Default(t *testing.T) { + opts := ShellOptions{} + assert.False(t, opts.Console) + assert.Nil(t, opts.Command) +} + +func TestShellOptions_Console(t *testing.T) { + opts := ShellOptions{ + Console: true, + } + assert.True(t, opts.Console) + assert.Nil(t, opts.Command) +} + +func TestShellOptions_Command(t *testing.T) { + opts := ShellOptions{ + Command: []string{"ls", "-la"}, + } + assert.False(t, opts.Console) + assert.Equal(t, []string{"ls", "-la"}, opts.Command) +} + +func TestShellOptions_ConsoleWithCommand(t *testing.T) { + opts := ShellOptions{ + Console: true, + Command: []string{"echo", "hello"}, + } + assert.True(t, opts.Console) + assert.Equal(t, []string{"echo", "hello"}, opts.Command) +} + +func TestShellOptions_EmptyCommand(t *testing.T) { + opts := ShellOptions{ + Command: []string{}, + } + assert.False(t, opts.Console) + assert.Empty(t, opts.Command) + assert.Len(t, opts.Command, 0) +} diff --git a/devops/sources/cdn.go b/devops/sources/cdn.go new file mode 100644 index 0000000..068c49b --- /dev/null +++ b/devops/sources/cdn.go @@ -0,0 +1,113 @@ +package sources + +import ( + "context" + "fmt" + goio "io" + "net/http" + "os" + "path/filepath" + + "forge.lthn.ai/core/go/pkg/io" +) + +// CDNSource downloads images from a CDN or S3 bucket. +type CDNSource struct { + config SourceConfig +} + +// Compile-time interface check. +var _ ImageSource = (*CDNSource)(nil) + +// NewCDNSource creates a new CDN source. +func NewCDNSource(cfg SourceConfig) *CDNSource { + return &CDNSource{config: cfg} +} + +// Name returns "cdn". +func (s *CDNSource) Name() string { + return "cdn" +} + +// Available checks if CDN URL is configured. +func (s *CDNSource) Available() bool { + return s.config.CDNURL != "" +} + +// LatestVersion fetches version from manifest or returns "latest". +func (s *CDNSource) LatestVersion(ctx context.Context) (string, error) { + // Try to fetch manifest.json for version info + url := fmt.Sprintf("%s/manifest.json", s.config.CDNURL) + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return "latest", nil + } + + resp, err := http.DefaultClient.Do(req) + if err != nil || resp.StatusCode != 200 { + return "latest", nil + } + defer func() { _ = resp.Body.Close() }() + + // For now, just return latest - could parse manifest for version + return "latest", nil +} + +// Download downloads the image from CDN. +func (s *CDNSource) Download(ctx context.Context, m io.Medium, dest string, progress func(downloaded, total int64)) error { + url := fmt.Sprintf("%s/%s", s.config.CDNURL, s.config.ImageName) + + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return fmt.Errorf("cdn.Download: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("cdn.Download: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != 200 { + return fmt.Errorf("cdn.Download: HTTP %d", resp.StatusCode) + } + + // Ensure dest directory exists + if err := m.EnsureDir(dest); err != nil { + return fmt.Errorf("cdn.Download: %w", err) + } + + // Create destination file + destPath := filepath.Join(dest, s.config.ImageName) + f, err := os.Create(destPath) + if err != nil { + return fmt.Errorf("cdn.Download: %w", err) + } + defer func() { _ = f.Close() }() + + // Copy with progress + total := resp.ContentLength + var downloaded int64 + + buf := make([]byte, 32*1024) + for { + n, err := resp.Body.Read(buf) + if n > 0 { + if _, werr := f.Write(buf[:n]); werr != nil { + return fmt.Errorf("cdn.Download: %w", werr) + } + downloaded += int64(n) + if progress != nil { + progress(downloaded, total) + } + } + if err == goio.EOF { + break + } + if err != nil { + return fmt.Errorf("cdn.Download: %w", err) + } + } + + return nil +} diff --git a/devops/sources/cdn_test.go b/devops/sources/cdn_test.go new file mode 100644 index 0000000..e3db155 --- /dev/null +++ b/devops/sources/cdn_test.go @@ -0,0 +1,306 @@ +package sources + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" +) + +func TestCDNSource_Good_Available(t *testing.T) { + src := NewCDNSource(SourceConfig{ + CDNURL: "https://images.example.com", + ImageName: "core-devops-darwin-arm64.qcow2", + }) + + assert.Equal(t, "cdn", src.Name()) + assert.True(t, src.Available()) +} + +func TestCDNSource_Bad_NoURL(t *testing.T) { + src := NewCDNSource(SourceConfig{ + ImageName: "core-devops-darwin-arm64.qcow2", + }) + + assert.False(t, src.Available()) +} + +func TestCDNSource_LatestVersion_Good(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/manifest.json" { + w.WriteHeader(http.StatusOK) + _, _ = fmt.Fprint(w, `{"version": "1.2.3"}`) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + src := NewCDNSource(SourceConfig{ + CDNURL: server.URL, + ImageName: "test.img", + }) + + version, err := src.LatestVersion(context.Background()) + assert.NoError(t, err) + assert.Equal(t, "latest", version) // Current impl always returns "latest" +} + +func TestCDNSource_Download_Good(t *testing.T) { + content := "fake image data" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/test.img" { + w.WriteHeader(http.StatusOK) + _, _ = fmt.Fprint(w, content) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + dest := t.TempDir() + imageName := "test.img" + src := NewCDNSource(SourceConfig{ + CDNURL: server.URL, + ImageName: imageName, + }) + + var progressCalled bool + err := src.Download(context.Background(), io.Local, dest, func(downloaded, total int64) { + progressCalled = true + }) + + assert.NoError(t, err) + assert.True(t, progressCalled) + + // Verify file content + data, err := os.ReadFile(filepath.Join(dest, imageName)) + assert.NoError(t, err) + assert.Equal(t, content, string(data)) +} + +func TestCDNSource_Download_Bad(t *testing.T) { + t.Run("HTTP error", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer server.Close() + + dest := t.TempDir() + src := NewCDNSource(SourceConfig{ + CDNURL: server.URL, + ImageName: "test.img", + }) + + err := src.Download(context.Background(), io.Local, dest, nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "HTTP 500") + }) + + t.Run("Invalid URL", func(t *testing.T) { + dest := t.TempDir() + src := NewCDNSource(SourceConfig{ + CDNURL: "http://invalid-url-that-should-fail", + ImageName: "test.img", + }) + + err := src.Download(context.Background(), io.Local, dest, nil) + assert.Error(t, err) + }) +} + +func TestCDNSource_LatestVersion_Bad_NoManifest(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer server.Close() + + src := NewCDNSource(SourceConfig{ + CDNURL: server.URL, + ImageName: "test.img", + }) + + version, err := src.LatestVersion(context.Background()) + assert.NoError(t, err) // Should not error, just return "latest" + assert.Equal(t, "latest", version) +} + +func TestCDNSource_LatestVersion_Bad_ServerError(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer server.Close() + + src := NewCDNSource(SourceConfig{ + CDNURL: server.URL, + ImageName: "test.img", + }) + + version, err := src.LatestVersion(context.Background()) + assert.NoError(t, err) // Falls back to "latest" + assert.Equal(t, "latest", version) +} + +func TestCDNSource_Download_Good_NoProgress(t *testing.T) { + content := "test content" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(content))) + w.WriteHeader(http.StatusOK) + _, _ = fmt.Fprint(w, content) + })) + defer server.Close() + + dest := t.TempDir() + src := NewCDNSource(SourceConfig{ + CDNURL: server.URL, + ImageName: "test.img", + }) + + // nil progress callback should be handled gracefully + err := src.Download(context.Background(), io.Local, dest, nil) + assert.NoError(t, err) + + data, err := os.ReadFile(filepath.Join(dest, "test.img")) + assert.NoError(t, err) + assert.Equal(t, content, string(data)) +} + +func TestCDNSource_Download_Good_LargeFile(t *testing.T) { + // Create content larger than buffer size (32KB) + content := make([]byte, 64*1024) // 64KB + for i := range content { + content[i] = byte(i % 256) + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(content))) + w.WriteHeader(http.StatusOK) + _, _ = w.Write(content) + })) + defer server.Close() + + dest := t.TempDir() + src := NewCDNSource(SourceConfig{ + CDNURL: server.URL, + ImageName: "large.img", + }) + + var progressCalls int + var lastDownloaded int64 + err := src.Download(context.Background(), io.Local, dest, func(downloaded, total int64) { + progressCalls++ + lastDownloaded = downloaded + }) + + assert.NoError(t, err) + assert.Greater(t, progressCalls, 1) // Should be called multiple times for large file + assert.Equal(t, int64(len(content)), lastDownloaded) +} + +func TestCDNSource_Download_Bad_HTTPErrorCodes(t *testing.T) { + testCases := []struct { + name string + statusCode int + }{ + {"Bad Request", http.StatusBadRequest}, + {"Unauthorized", http.StatusUnauthorized}, + {"Forbidden", http.StatusForbidden}, + {"Not Found", http.StatusNotFound}, + {"Service Unavailable", http.StatusServiceUnavailable}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tc.statusCode) + })) + defer server.Close() + + dest := t.TempDir() + src := NewCDNSource(SourceConfig{ + CDNURL: server.URL, + ImageName: "test.img", + }) + + err := src.Download(context.Background(), io.Local, dest, nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("HTTP %d", tc.statusCode)) + }) + } +} + +func TestCDNSource_InterfaceCompliance(t *testing.T) { + // Verify CDNSource implements ImageSource + var _ ImageSource = (*CDNSource)(nil) +} + +func TestCDNSource_Config(t *testing.T) { + cfg := SourceConfig{ + CDNURL: "https://cdn.example.com", + ImageName: "my-image.qcow2", + } + src := NewCDNSource(cfg) + + assert.Equal(t, "https://cdn.example.com", src.config.CDNURL) + assert.Equal(t, "my-image.qcow2", src.config.ImageName) +} + +func TestNewCDNSource_Good(t *testing.T) { + cfg := SourceConfig{ + GitHubRepo: "host-uk/core-images", + RegistryImage: "ghcr.io/host-uk/core-devops", + CDNURL: "https://cdn.example.com", + ImageName: "core-devops-darwin-arm64.qcow2", + } + + src := NewCDNSource(cfg) + assert.NotNil(t, src) + assert.Equal(t, "cdn", src.Name()) + assert.Equal(t, cfg.CDNURL, src.config.CDNURL) +} + +func TestCDNSource_Download_Good_CreatesDestDir(t *testing.T) { + content := "test content" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = fmt.Fprint(w, content) + })) + defer server.Close() + + tmpDir := t.TempDir() + dest := filepath.Join(tmpDir, "nested", "dir") + // dest doesn't exist yet + + src := NewCDNSource(SourceConfig{ + CDNURL: server.URL, + ImageName: "test.img", + }) + + err := src.Download(context.Background(), io.Local, dest, nil) + assert.NoError(t, err) + + // Verify nested dir was created + info, err := os.Stat(dest) + assert.NoError(t, err) + assert.True(t, info.IsDir()) +} + +func TestSourceConfig_Struct(t *testing.T) { + cfg := SourceConfig{ + GitHubRepo: "owner/repo", + RegistryImage: "ghcr.io/owner/image", + CDNURL: "https://cdn.example.com", + ImageName: "image.qcow2", + } + + assert.Equal(t, "owner/repo", cfg.GitHubRepo) + assert.Equal(t, "ghcr.io/owner/image", cfg.RegistryImage) + assert.Equal(t, "https://cdn.example.com", cfg.CDNURL) + assert.Equal(t, "image.qcow2", cfg.ImageName) +} diff --git a/devops/sources/github.go b/devops/sources/github.go new file mode 100644 index 0000000..6624bbd --- /dev/null +++ b/devops/sources/github.go @@ -0,0 +1,72 @@ +package sources + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + + "forge.lthn.ai/core/go/pkg/io" +) + +// GitHubSource downloads images from GitHub Releases. +type GitHubSource struct { + config SourceConfig +} + +// Compile-time interface check. +var _ ImageSource = (*GitHubSource)(nil) + +// NewGitHubSource creates a new GitHub source. +func NewGitHubSource(cfg SourceConfig) *GitHubSource { + return &GitHubSource{config: cfg} +} + +// Name returns "github". +func (s *GitHubSource) Name() string { + return "github" +} + +// Available checks if gh CLI is installed and authenticated. +func (s *GitHubSource) Available() bool { + _, err := exec.LookPath("gh") + if err != nil { + return false + } + // Check if authenticated + cmd := exec.Command("gh", "auth", "status") + return cmd.Run() == nil +} + +// LatestVersion returns the latest release tag. +func (s *GitHubSource) LatestVersion(ctx context.Context) (string, error) { + cmd := exec.CommandContext(ctx, "gh", "release", "view", + "-R", s.config.GitHubRepo, + "--json", "tagName", + "-q", ".tagName", + ) + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("github.LatestVersion: %w", err) + } + return strings.TrimSpace(string(out)), nil +} + +// Download downloads the image from the latest release. +func (s *GitHubSource) Download(ctx context.Context, m io.Medium, dest string, progress func(downloaded, total int64)) error { + // Get release assets to find our image + cmd := exec.CommandContext(ctx, "gh", "release", "download", + "-R", s.config.GitHubRepo, + "-p", s.config.ImageName, + "-D", dest, + "--clobber", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("github.Download: %w", err) + } + return nil +} diff --git a/devops/sources/github_test.go b/devops/sources/github_test.go new file mode 100644 index 0000000..7281129 --- /dev/null +++ b/devops/sources/github_test.go @@ -0,0 +1,68 @@ +package sources + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGitHubSource_Good_Available(t *testing.T) { + src := NewGitHubSource(SourceConfig{ + GitHubRepo: "host-uk/core-images", + ImageName: "core-devops-darwin-arm64.qcow2", + }) + + if src.Name() != "github" { + t.Errorf("expected name 'github', got %q", src.Name()) + } + + // Available depends on gh CLI being installed + _ = src.Available() +} + +func TestGitHubSource_Name(t *testing.T) { + src := NewGitHubSource(SourceConfig{}) + assert.Equal(t, "github", src.Name()) +} + +func TestGitHubSource_Config(t *testing.T) { + cfg := SourceConfig{ + GitHubRepo: "owner/repo", + ImageName: "test-image.qcow2", + } + src := NewGitHubSource(cfg) + + // Verify the config is stored + assert.Equal(t, "owner/repo", src.config.GitHubRepo) + assert.Equal(t, "test-image.qcow2", src.config.ImageName) +} + +func TestGitHubSource_Good_Multiple(t *testing.T) { + // Test creating multiple sources with different configs + src1 := NewGitHubSource(SourceConfig{GitHubRepo: "org1/repo1", ImageName: "img1.qcow2"}) + src2 := NewGitHubSource(SourceConfig{GitHubRepo: "org2/repo2", ImageName: "img2.qcow2"}) + + assert.Equal(t, "org1/repo1", src1.config.GitHubRepo) + assert.Equal(t, "org2/repo2", src2.config.GitHubRepo) + assert.Equal(t, "github", src1.Name()) + assert.Equal(t, "github", src2.Name()) +} + +func TestNewGitHubSource_Good(t *testing.T) { + cfg := SourceConfig{ + GitHubRepo: "host-uk/core-images", + RegistryImage: "ghcr.io/host-uk/core-devops", + CDNURL: "https://cdn.example.com", + ImageName: "core-devops-darwin-arm64.qcow2", + } + + src := NewGitHubSource(cfg) + assert.NotNil(t, src) + assert.Equal(t, "github", src.Name()) + assert.Equal(t, cfg.GitHubRepo, src.config.GitHubRepo) +} + +func TestGitHubSource_InterfaceCompliance(t *testing.T) { + // Verify GitHubSource implements ImageSource + var _ ImageSource = (*GitHubSource)(nil) +} diff --git a/devops/sources/source.go b/devops/sources/source.go new file mode 100644 index 0000000..85c9492 --- /dev/null +++ b/devops/sources/source.go @@ -0,0 +1,33 @@ +// Package sources provides image download sources for core-devops. +package sources + +import ( + "context" + + "forge.lthn.ai/core/go/pkg/io" +) + +// ImageSource defines the interface for downloading dev images. +type ImageSource interface { + // Name returns the source identifier. + Name() string + // Available checks if this source can be used. + Available() bool + // LatestVersion returns the latest available version. + LatestVersion(ctx context.Context) (string, error) + // Download downloads the image to the destination path. + // Reports progress via the callback if provided. + Download(ctx context.Context, m io.Medium, dest string, progress func(downloaded, total int64)) error +} + +// SourceConfig holds configuration for a source. +type SourceConfig struct { + // GitHub configuration + GitHubRepo string + // Registry configuration + RegistryImage string + // CDN configuration + CDNURL string + // Image name (e.g., core-devops-darwin-arm64.qcow2) + ImageName string +} diff --git a/devops/sources/source_test.go b/devops/sources/source_test.go new file mode 100644 index 0000000..a63f09b --- /dev/null +++ b/devops/sources/source_test.go @@ -0,0 +1,35 @@ +package sources + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSourceConfig_Empty(t *testing.T) { + cfg := SourceConfig{} + assert.Empty(t, cfg.GitHubRepo) + assert.Empty(t, cfg.RegistryImage) + assert.Empty(t, cfg.CDNURL) + assert.Empty(t, cfg.ImageName) +} + +func TestSourceConfig_Complete(t *testing.T) { + cfg := SourceConfig{ + GitHubRepo: "owner/repo", + RegistryImage: "ghcr.io/owner/image:v1", + CDNURL: "https://cdn.example.com/images", + ImageName: "my-image-darwin-arm64.qcow2", + } + + assert.Equal(t, "owner/repo", cfg.GitHubRepo) + assert.Equal(t, "ghcr.io/owner/image:v1", cfg.RegistryImage) + assert.Equal(t, "https://cdn.example.com/images", cfg.CDNURL) + assert.Equal(t, "my-image-darwin-arm64.qcow2", cfg.ImageName) +} + +func TestImageSource_Interface(t *testing.T) { + // Ensure both sources implement the interface + var _ ImageSource = (*GitHubSource)(nil) + var _ ImageSource = (*CDNSource)(nil) +} diff --git a/devops/ssh_utils.go b/devops/ssh_utils.go new file mode 100644 index 0000000..d05902b --- /dev/null +++ b/devops/ssh_utils.go @@ -0,0 +1,68 @@ +package devops + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +// ensureHostKey ensures that the host key for the dev environment is in the known hosts file. +// This is used after boot to allow StrictHostKeyChecking=yes to work. +func ensureHostKey(ctx context.Context, port int) error { + // Skip if requested (used in tests) + if os.Getenv("CORE_SKIP_SSH_SCAN") == "true" { + return nil + } + + home, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("get home dir: %w", err) + } + + knownHostsPath := filepath.Join(home, ".core", "known_hosts") + + // Ensure directory exists + if err := os.MkdirAll(filepath.Dir(knownHostsPath), 0755); err != nil { + return fmt.Errorf("create known_hosts dir: %w", err) + } + + // Get host key using ssh-keyscan + cmd := exec.CommandContext(ctx, "ssh-keyscan", "-p", fmt.Sprintf("%d", port), "localhost") + out, err := cmd.Output() + if err != nil { + return fmt.Errorf("ssh-keyscan failed: %w", err) + } + + if len(out) == 0 { + return fmt.Errorf("ssh-keyscan returned no keys") + } + + // Read existing known_hosts to avoid duplicates + existing, _ := os.ReadFile(knownHostsPath) + existingStr := string(existing) + + // Append new keys that aren't already there + f, err := os.OpenFile(knownHostsPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return fmt.Errorf("open known_hosts: %w", err) + } + defer f.Close() + + lines := strings.Split(string(out), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + if !strings.Contains(existingStr, line) { + if _, err := f.WriteString(line + "\n"); err != nil { + return fmt.Errorf("write known_hosts: %w", err) + } + } + } + + return nil +} diff --git a/devops/test.go b/devops/test.go new file mode 100644 index 0000000..430579c --- /dev/null +++ b/devops/test.go @@ -0,0 +1,188 @@ +package devops + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go/pkg/io" + "gopkg.in/yaml.v3" +) + +// TestConfig holds test configuration from .core/test.yaml. +type TestConfig struct { + Version int `yaml:"version"` + Command string `yaml:"command,omitempty"` + Commands []TestCommand `yaml:"commands,omitempty"` + Env map[string]string `yaml:"env,omitempty"` +} + +// TestCommand is a named test command. +type TestCommand struct { + Name string `yaml:"name"` + Run string `yaml:"run"` +} + +// TestOptions configures test execution. +type TestOptions struct { + Name string // Run specific named command from .core/test.yaml + Command []string // Override command (from -- args) +} + +// Test runs tests in the dev environment. +func (d *DevOps) Test(ctx context.Context, projectDir string, opts TestOptions) error { + running, err := d.IsRunning(ctx) + if err != nil { + return err + } + if !running { + return fmt.Errorf("dev environment not running (run 'core dev boot' first)") + } + + var cmd string + + // Priority: explicit command > named command > auto-detect + if len(opts.Command) > 0 { + cmd = strings.Join(opts.Command, " ") + } else if opts.Name != "" { + cfg, err := LoadTestConfig(d.medium, projectDir) + if err != nil { + return err + } + for _, c := range cfg.Commands { + if c.Name == opts.Name { + cmd = c.Run + break + } + } + if cmd == "" { + return fmt.Errorf("test command %q not found in .core/test.yaml", opts.Name) + } + } else { + cmd = DetectTestCommand(d.medium, projectDir) + if cmd == "" { + return fmt.Errorf("could not detect test command (create .core/test.yaml)") + } + } + + // Run via SSH - construct command as single string for shell execution + return d.sshShell(ctx, []string{"cd", "/app", "&&", cmd}) +} + +// DetectTestCommand auto-detects the test command for a project. +func DetectTestCommand(m io.Medium, projectDir string) string { + // 1. Check .core/test.yaml + cfg, err := LoadTestConfig(m, projectDir) + if err == nil && cfg.Command != "" { + return cfg.Command + } + + // 2. Check composer.json for test script + if hasFile(m, projectDir, "composer.json") { + if hasComposerScript(m, projectDir, "test") { + return "composer test" + } + } + + // 3. Check package.json for test script + if hasFile(m, projectDir, "package.json") { + if hasPackageScript(m, projectDir, "test") { + return "npm test" + } + } + + // 4. Check go.mod + if hasFile(m, projectDir, "go.mod") { + return "go test ./..." + } + + // 5. Check pytest + if hasFile(m, projectDir, "pytest.ini") || hasFile(m, projectDir, "pyproject.toml") { + return "pytest" + } + + // 6. Check Taskfile + if hasFile(m, projectDir, "Taskfile.yaml") || hasFile(m, projectDir, "Taskfile.yml") { + return "task test" + } + + return "" +} + +// LoadTestConfig loads .core/test.yaml. +func LoadTestConfig(m io.Medium, projectDir string) (*TestConfig, error) { + path := filepath.Join(projectDir, ".core", "test.yaml") + absPath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + content, err := m.Read(absPath) + if err != nil { + return nil, err + } + + var cfg TestConfig + if err := yaml.Unmarshal([]byte(content), &cfg); err != nil { + return nil, err + } + + return &cfg, nil +} + +func hasFile(m io.Medium, dir, name string) bool { + path := filepath.Join(dir, name) + absPath, err := filepath.Abs(path) + if err != nil { + return false + } + return m.IsFile(absPath) +} + +func hasPackageScript(m io.Medium, projectDir, script string) bool { + path := filepath.Join(projectDir, "package.json") + absPath, err := filepath.Abs(path) + if err != nil { + return false + } + + content, err := m.Read(absPath) + if err != nil { + return false + } + + var pkg struct { + Scripts map[string]string `json:"scripts"` + } + if err := json.Unmarshal([]byte(content), &pkg); err != nil { + return false + } + + _, ok := pkg.Scripts[script] + return ok +} + +func hasComposerScript(m io.Medium, projectDir, script string) bool { + path := filepath.Join(projectDir, "composer.json") + absPath, err := filepath.Abs(path) + if err != nil { + return false + } + + content, err := m.Read(absPath) + if err != nil { + return false + } + + var pkg struct { + Scripts map[string]interface{} `json:"scripts"` + } + if err := json.Unmarshal([]byte(content), &pkg); err != nil { + return false + } + + _, ok := pkg.Scripts[script] + return ok +} diff --git a/devops/test_test.go b/devops/test_test.go new file mode 100644 index 0000000..bcea686 --- /dev/null +++ b/devops/test_test.go @@ -0,0 +1,354 @@ +package devops + +import ( + "os" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go/pkg/io" +) + +func TestDetectTestCommand_Good_ComposerJSON(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"scripts":{"test":"pest"}}`), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "composer test" { + t.Errorf("expected 'composer test', got %q", cmd) + } +} + +func TestDetectTestCommand_Good_PackageJSON(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"scripts":{"test":"vitest"}}`), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "npm test" { + t.Errorf("expected 'npm test', got %q", cmd) + } +} + +func TestDetectTestCommand_Good_GoMod(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module example"), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "go test ./..." { + t.Errorf("expected 'go test ./...', got %q", cmd) + } +} + +func TestDetectTestCommand_Good_CoreTestYaml(t *testing.T) { + tmpDir := t.TempDir() + coreDir := filepath.Join(tmpDir, ".core") + _ = os.MkdirAll(coreDir, 0755) + _ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte("command: custom-test"), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "custom-test" { + t.Errorf("expected 'custom-test', got %q", cmd) + } +} + +func TestDetectTestCommand_Good_Pytest(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "pytest.ini"), []byte("[pytest]"), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "pytest" { + t.Errorf("expected 'pytest', got %q", cmd) + } +} + +func TestDetectTestCommand_Good_Taskfile(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "Taskfile.yaml"), []byte("version: '3'"), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "task test" { + t.Errorf("expected 'task test', got %q", cmd) + } +} + +func TestDetectTestCommand_Bad_NoFiles(t *testing.T) { + tmpDir := t.TempDir() + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "" { + t.Errorf("expected empty string, got %q", cmd) + } +} + +func TestDetectTestCommand_Good_Priority(t *testing.T) { + // .core/test.yaml should take priority over other detection methods + tmpDir := t.TempDir() + coreDir := filepath.Join(tmpDir, ".core") + _ = os.MkdirAll(coreDir, 0755) + _ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte("command: my-custom-test"), 0644) + _ = os.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module example"), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "my-custom-test" { + t.Errorf("expected 'my-custom-test' (from .core/test.yaml), got %q", cmd) + } +} + +func TestLoadTestConfig_Good(t *testing.T) { + tmpDir := t.TempDir() + coreDir := filepath.Join(tmpDir, ".core") + _ = os.MkdirAll(coreDir, 0755) + + configYAML := `version: 1 +command: default-test +commands: + - name: unit + run: go test ./... + - name: integration + run: go test -tags=integration ./... +env: + CI: "true" +` + _ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte(configYAML), 0644) + + cfg, err := LoadTestConfig(io.Local, tmpDir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if cfg.Version != 1 { + t.Errorf("expected version 1, got %d", cfg.Version) + } + if cfg.Command != "default-test" { + t.Errorf("expected command 'default-test', got %q", cfg.Command) + } + if len(cfg.Commands) != 2 { + t.Errorf("expected 2 commands, got %d", len(cfg.Commands)) + } + if cfg.Commands[0].Name != "unit" { + t.Errorf("expected first command name 'unit', got %q", cfg.Commands[0].Name) + } + if cfg.Env["CI"] != "true" { + t.Errorf("expected env CI='true', got %q", cfg.Env["CI"]) + } +} + +func TestLoadTestConfig_Bad_NotFound(t *testing.T) { + tmpDir := t.TempDir() + + _, err := LoadTestConfig(io.Local, tmpDir) + if err == nil { + t.Error("expected error for missing config, got nil") + } +} + +func TestHasPackageScript_Good(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"scripts":{"test":"jest","build":"webpack"}}`), 0644) + + if !hasPackageScript(io.Local, tmpDir, "test") { + t.Error("expected to find 'test' script") + } + if !hasPackageScript(io.Local, tmpDir, "build") { + t.Error("expected to find 'build' script") + } +} + +func TestHasPackageScript_Bad_MissingScript(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"scripts":{"build":"webpack"}}`), 0644) + + if hasPackageScript(io.Local, tmpDir, "test") { + t.Error("expected not to find 'test' script") + } +} + +func TestHasComposerScript_Good(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"scripts":{"test":"pest","post-install-cmd":"@php artisan migrate"}}`), 0644) + + if !hasComposerScript(io.Local, tmpDir, "test") { + t.Error("expected to find 'test' script") + } +} + +func TestHasComposerScript_Bad_MissingScript(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"scripts":{"build":"@php build.php"}}`), 0644) + + if hasComposerScript(io.Local, tmpDir, "test") { + t.Error("expected not to find 'test' script") + } +} + +func TestTestConfig_Struct(t *testing.T) { + cfg := &TestConfig{ + Version: 2, + Command: "my-test", + Commands: []TestCommand{{Name: "unit", Run: "go test ./..."}}, + Env: map[string]string{"CI": "true"}, + } + if cfg.Version != 2 { + t.Errorf("expected version 2, got %d", cfg.Version) + } + if cfg.Command != "my-test" { + t.Errorf("expected command 'my-test', got %q", cfg.Command) + } + if len(cfg.Commands) != 1 { + t.Errorf("expected 1 command, got %d", len(cfg.Commands)) + } + if cfg.Env["CI"] != "true" { + t.Errorf("expected CI=true, got %q", cfg.Env["CI"]) + } +} + +func TestTestCommand_Struct(t *testing.T) { + cmd := TestCommand{ + Name: "integration", + Run: "go test -tags=integration ./...", + } + if cmd.Name != "integration" { + t.Errorf("expected name 'integration', got %q", cmd.Name) + } + if cmd.Run != "go test -tags=integration ./..." { + t.Errorf("expected run command, got %q", cmd.Run) + } +} + +func TestTestOptions_Struct(t *testing.T) { + opts := TestOptions{ + Name: "unit", + Command: []string{"go", "test", "-v"}, + } + if opts.Name != "unit" { + t.Errorf("expected name 'unit', got %q", opts.Name) + } + if len(opts.Command) != 3 { + t.Errorf("expected 3 command parts, got %d", len(opts.Command)) + } +} + +func TestDetectTestCommand_Good_TaskfileYml(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "Taskfile.yml"), []byte("version: '3'"), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "task test" { + t.Errorf("expected 'task test', got %q", cmd) + } +} + +func TestDetectTestCommand_Good_Pyproject(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "pyproject.toml"), []byte("[tool.pytest]"), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + if cmd != "pytest" { + t.Errorf("expected 'pytest', got %q", cmd) + } +} + +func TestHasPackageScript_Bad_NoFile(t *testing.T) { + tmpDir := t.TempDir() + + if hasPackageScript(io.Local, tmpDir, "test") { + t.Error("expected false for missing package.json") + } +} + +func TestHasPackageScript_Bad_InvalidJSON(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`invalid json`), 0644) + + if hasPackageScript(io.Local, tmpDir, "test") { + t.Error("expected false for invalid JSON") + } +} + +func TestHasPackageScript_Bad_NoScripts(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"name":"test"}`), 0644) + + if hasPackageScript(io.Local, tmpDir, "test") { + t.Error("expected false for missing scripts section") + } +} + +func TestHasComposerScript_Bad_NoFile(t *testing.T) { + tmpDir := t.TempDir() + + if hasComposerScript(io.Local, tmpDir, "test") { + t.Error("expected false for missing composer.json") + } +} + +func TestHasComposerScript_Bad_InvalidJSON(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`invalid json`), 0644) + + if hasComposerScript(io.Local, tmpDir, "test") { + t.Error("expected false for invalid JSON") + } +} + +func TestHasComposerScript_Bad_NoScripts(t *testing.T) { + tmpDir := t.TempDir() + _ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"name":"test/pkg"}`), 0644) + + if hasComposerScript(io.Local, tmpDir, "test") { + t.Error("expected false for missing scripts section") + } +} + +func TestLoadTestConfig_Bad_InvalidYAML(t *testing.T) { + tmpDir := t.TempDir() + coreDir := filepath.Join(tmpDir, ".core") + _ = os.MkdirAll(coreDir, 0755) + _ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte("invalid: yaml: :"), 0644) + + _, err := LoadTestConfig(io.Local, tmpDir) + if err == nil { + t.Error("expected error for invalid YAML") + } +} + +func TestLoadTestConfig_Good_MinimalConfig(t *testing.T) { + tmpDir := t.TempDir() + coreDir := filepath.Join(tmpDir, ".core") + _ = os.MkdirAll(coreDir, 0755) + _ = os.WriteFile(filepath.Join(coreDir, "test.yaml"), []byte("version: 1"), 0644) + + cfg, err := LoadTestConfig(io.Local, tmpDir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Version != 1 { + t.Errorf("expected version 1, got %d", cfg.Version) + } + if cfg.Command != "" { + t.Errorf("expected empty command, got %q", cfg.Command) + } +} + +func TestDetectTestCommand_Good_ComposerWithoutScript(t *testing.T) { + tmpDir := t.TempDir() + // composer.json without test script should not return composer test + _ = os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{"name":"test/pkg"}`), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + // Falls through to empty (no match) + if cmd != "" { + t.Errorf("expected empty string, got %q", cmd) + } +} + +func TestDetectTestCommand_Good_PackageJSONWithoutScript(t *testing.T) { + tmpDir := t.TempDir() + // package.json without test or dev script + _ = os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(`{"name":"test"}`), 0644) + + cmd := DetectTestCommand(io.Local, tmpDir) + // Falls through to empty + if cmd != "" { + t.Errorf("expected empty string, got %q", cmd) + } +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..4a2bac1 --- /dev/null +++ b/go.mod @@ -0,0 +1,62 @@ +module forge.lthn.ai/core/go-devops + +go 1.25.5 + +require ( + forge.lthn.ai/core/go v0.0.0 + github.com/Snider/Borg v0.2.0 + github.com/getkin/kin-openapi v0.133.0 + github.com/kluctl/go-embed-python v0.0.0-3.13.1-20241219-1 + github.com/leaanthony/debme v1.2.1 + github.com/leaanthony/gosod v1.0.4 + github.com/oasdiff/oasdiff v1.11.10 + github.com/spf13/cobra v1.10.2 + github.com/stretchr/testify v1.11.1 + golang.org/x/crypto v0.48.0 + golang.org/x/net v0.50.0 + golang.org/x/text v0.34.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + cloud.google.com/go v0.123.0 // indirect + github.com/ProtonMail/go-crypto v1.3.0 // indirect + github.com/TwiN/go-color v1.4.1 // indirect + github.com/cloudflare/circl v1.6.3 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-openapi/jsonpointer v0.22.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect + github.com/gofrs/flock v0.12.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/mailru/easyjson v0.9.1 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect + github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/sagikazarmark/locafero v0.12.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.2.0 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect + github.com/wI2L/jsondiff v0.7.0 // indirect + github.com/woodsbury/decimal128 v1.4.0 // indirect + github.com/yargevad/filepathx v1.0.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.41.0 // indirect + golang.org/x/term v0.40.0 // indirect +) + +replace forge.lthn.ai/core/go => ../go diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..c318ccf --- /dev/null +++ b/go.sum @@ -0,0 +1,140 @@ +cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= +cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= +github.com/Snider/Borg v0.2.0 h1:iCyDhY4WTXi39+FexRwXbn2YpZ2U9FUXVXDZk9xRCXQ= +github.com/Snider/Borg v0.2.0/go.mod h1:TqlKnfRo9okioHbgrZPfWjQsztBV0Nfskz4Om1/vdMY= +github.com/TwiN/go-color v1.4.1 h1:mqG0P/KBgHKVqmtL5ye7K0/Gr4l6hTksPgTgMk3mUzc= +github.com/TwiN/go-color v1.4.1/go.mod h1:WcPf/jtiW95WBIsEeY1Lc/b8aaWoiqQpu5cf8WFxu+s= +github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ= +github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE= +github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= +github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/kluctl/go-embed-python v0.0.0-3.13.1-20241219-1 h1:x1cSEj4Ug5mpuZgUHLvUmlc5r//KHFn6iYiRSrRcVy4= +github.com/kluctl/go-embed-python v0.0.0-3.13.1-20241219-1/go.mod h1:3ebNU9QBrNpUO+Hj6bHaGpkh5pymDHQ+wwVPHTE4mCE= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leaanthony/debme v1.2.1 h1:9Tgwf+kjcrbMQ4WnPcEIUcQuIZYqdWftzZkBr+i/oOc= +github.com/leaanthony/debme v1.2.1/go.mod h1:3V+sCm5tYAgQymvSOfYQ5Xx2JCr+OXiD9Jkw3otUjiA= +github.com/leaanthony/gosod v1.0.4 h1:YLAbVyd591MRffDgxUOU1NwLhT9T1/YiwjKZpkNFeaI= +github.com/leaanthony/gosod v1.0.4/go.mod h1:GKuIL0zzPj3O1SdWQOdgURSuhkF+Urizzxh26t9f1cw= +github.com/leaanthony/slicer v1.5.0/go.mod h1:FwrApmf8gOrpzEWM2J/9Lh79tyq8KTX5AzRtwV7m4AY= +github.com/leaanthony/slicer v1.6.0 h1:1RFP5uiPJvT93TAHi+ipd3NACobkW53yUiBqZheE/Js= +github.com/leaanthony/slicer v1.6.0/go.mod h1:o/Iz29g7LN0GqH3aMjWAe90381nyZlDNquK+mtH2Fj8= +github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8= +github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/matryer/is v1.4.1 h1:55ehd8zaGABKLXQUe2awZ99BD/PTc2ls+KV/dXphgEQ= +github.com/matryer/is v1.4.1/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/oasdiff/oasdiff v1.11.10 h1:4I9VrktUoHmwydkJqVOC7Bd6BXKu9dc4UUP3PIu1VjM= +github.com/oasdiff/oasdiff v1.11.10/go.mod h1:GXARzmqBKN8lZHsTQD35ZM41ePbu6JdAZza4sRMeEKg= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= +github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/match v1.2.0 h1:0pt8FlkOwjN2fPt4bIl4BoNxb98gGHN2ObFEDkrfZnM= +github.com/tidwall/match v1.2.0/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY= +github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/wI2L/jsondiff v0.7.0 h1:1lH1G37GhBPqCfp/lrs91rf/2j3DktX6qYAKZkLuCQQ= +github.com/wI2L/jsondiff v0.7.0/go.mod h1:KAEIojdQq66oJiHhDyQez2x+sRit0vIzC9KeK0yizxM= +github.com/woodsbury/decimal128 v1.4.0 h1:xJATj7lLu4f2oObouMt2tgGiElE5gO6mSWUjQsBgUlc= +github.com/woodsbury/decimal128 v1.4.0/go.mod h1:BP46FUrVjVhdTbKT+XuQh2xfQaGki9LMIRJSFuh6THU= +github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= +github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= +golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/infra/cloudns.go b/infra/cloudns.go new file mode 100644 index 0000000..dd419fe --- /dev/null +++ b/infra/cloudns.go @@ -0,0 +1,272 @@ +package infra + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" +) + +const cloudnsBaseURL = "https://api.cloudns.net" + +// CloudNSClient is an HTTP client for the CloudNS DNS API. +type CloudNSClient struct { + authID string + password string + client *http.Client +} + +// NewCloudNSClient creates a new CloudNS API client. +// Uses sub-auth-user (auth-id) authentication. +func NewCloudNSClient(authID, password string) *CloudNSClient { + return &CloudNSClient{ + authID: authID, + password: password, + client: &http.Client{ + Timeout: 30 * time.Second, + }, + } +} + +// CloudNSZone represents a DNS zone. +type CloudNSZone struct { + Name string `json:"name"` + Type string `json:"type"` + Zone string `json:"zone"` + Status string `json:"status"` +} + +// CloudNSRecord represents a DNS record. +type CloudNSRecord struct { + ID string `json:"id"` + Type string `json:"type"` + Host string `json:"host"` + Record string `json:"record"` + TTL string `json:"ttl"` + Priority string `json:"priority,omitempty"` + Status int `json:"status"` +} + +// ListZones returns all DNS zones. +func (c *CloudNSClient) ListZones(ctx context.Context) ([]CloudNSZone, error) { + params := c.authParams() + params.Set("page", "1") + params.Set("rows-per-page", "100") + params.Set("search", "") + + data, err := c.get(ctx, "/dns/list-zones.json", params) + if err != nil { + return nil, err + } + + var zones []CloudNSZone + if err := json.Unmarshal(data, &zones); err != nil { + // CloudNS returns an empty object {} for no results instead of [] + return nil, nil + } + return zones, nil +} + +// ListRecords returns all DNS records for a zone. +func (c *CloudNSClient) ListRecords(ctx context.Context, domain string) (map[string]CloudNSRecord, error) { + params := c.authParams() + params.Set("domain-name", domain) + + data, err := c.get(ctx, "/dns/records.json", params) + if err != nil { + return nil, err + } + + var records map[string]CloudNSRecord + if err := json.Unmarshal(data, &records); err != nil { + return nil, fmt.Errorf("parse records: %w", err) + } + return records, nil +} + +// CreateRecord creates a DNS record. Returns the record ID. +func (c *CloudNSClient) CreateRecord(ctx context.Context, domain, host, recordType, value string, ttl int) (string, error) { + params := c.authParams() + params.Set("domain-name", domain) + params.Set("host", host) + params.Set("record-type", recordType) + params.Set("record", value) + params.Set("ttl", strconv.Itoa(ttl)) + + data, err := c.post(ctx, "/dns/add-record.json", params) + if err != nil { + return "", err + } + + var result struct { + Status string `json:"status"` + StatusDescription string `json:"statusDescription"` + Data struct { + ID int `json:"id"` + } `json:"data"` + } + if err := json.Unmarshal(data, &result); err != nil { + return "", fmt.Errorf("parse response: %w", err) + } + + if result.Status != "Success" { + return "", fmt.Errorf("cloudns: %s", result.StatusDescription) + } + + return strconv.Itoa(result.Data.ID), nil +} + +// UpdateRecord updates an existing DNS record. +func (c *CloudNSClient) UpdateRecord(ctx context.Context, domain, recordID, host, recordType, value string, ttl int) error { + params := c.authParams() + params.Set("domain-name", domain) + params.Set("record-id", recordID) + params.Set("host", host) + params.Set("record-type", recordType) + params.Set("record", value) + params.Set("ttl", strconv.Itoa(ttl)) + + data, err := c.post(ctx, "/dns/mod-record.json", params) + if err != nil { + return err + } + + var result struct { + Status string `json:"status"` + StatusDescription string `json:"statusDescription"` + } + if err := json.Unmarshal(data, &result); err != nil { + return fmt.Errorf("parse response: %w", err) + } + + if result.Status != "Success" { + return fmt.Errorf("cloudns: %s", result.StatusDescription) + } + + return nil +} + +// DeleteRecord deletes a DNS record by ID. +func (c *CloudNSClient) DeleteRecord(ctx context.Context, domain, recordID string) error { + params := c.authParams() + params.Set("domain-name", domain) + params.Set("record-id", recordID) + + data, err := c.post(ctx, "/dns/delete-record.json", params) + if err != nil { + return err + } + + var result struct { + Status string `json:"status"` + StatusDescription string `json:"statusDescription"` + } + if err := json.Unmarshal(data, &result); err != nil { + return fmt.Errorf("parse response: %w", err) + } + + if result.Status != "Success" { + return fmt.Errorf("cloudns: %s", result.StatusDescription) + } + + return nil +} + +// EnsureRecord creates or updates a DNS record to match the desired state. +// Returns true if a change was made. +func (c *CloudNSClient) EnsureRecord(ctx context.Context, domain, host, recordType, value string, ttl int) (bool, error) { + records, err := c.ListRecords(ctx, domain) + if err != nil { + return false, fmt.Errorf("list records: %w", err) + } + + // Check if record already exists + for id, r := range records { + if r.Host == host && r.Type == recordType { + if r.Record == value { + return false, nil // Already correct + } + // Update existing record + if err := c.UpdateRecord(ctx, domain, id, host, recordType, value, ttl); err != nil { + return false, fmt.Errorf("update record: %w", err) + } + return true, nil + } + } + + // Create new record + if _, err := c.CreateRecord(ctx, domain, host, recordType, value, ttl); err != nil { + return false, fmt.Errorf("create record: %w", err) + } + return true, nil +} + +// SetACMEChallenge creates a DNS-01 ACME challenge TXT record. +func (c *CloudNSClient) SetACMEChallenge(ctx context.Context, domain, value string) (string, error) { + return c.CreateRecord(ctx, domain, "_acme-challenge", "TXT", value, 60) +} + +// ClearACMEChallenge removes the DNS-01 ACME challenge TXT record. +func (c *CloudNSClient) ClearACMEChallenge(ctx context.Context, domain string) error { + records, err := c.ListRecords(ctx, domain) + if err != nil { + return err + } + + for id, r := range records { + if r.Host == "_acme-challenge" && r.Type == "TXT" { + if err := c.DeleteRecord(ctx, domain, id); err != nil { + return err + } + } + } + return nil +} + +func (c *CloudNSClient) authParams() url.Values { + params := url.Values{} + params.Set("auth-id", c.authID) + params.Set("auth-password", c.password) + return params +} + +func (c *CloudNSClient) get(ctx context.Context, path string, params url.Values) ([]byte, error) { + u := cloudnsBaseURL + path + "?" + params.Encode() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, err + } + return c.doRaw(req) +} + +func (c *CloudNSClient) post(ctx context.Context, path string, params url.Values) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, cloudnsBaseURL+path, nil) + if err != nil { + return nil, err + } + req.URL.RawQuery = params.Encode() + return c.doRaw(req) +} + +func (c *CloudNSClient) doRaw(req *http.Request) ([]byte, error) { + resp, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("cloudns API: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read response: %w", err) + } + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("cloudns API %d: %s", resp.StatusCode, string(data)) + } + + return data, nil +} diff --git a/infra/config.go b/infra/config.go new file mode 100644 index 0000000..ec78108 --- /dev/null +++ b/infra/config.go @@ -0,0 +1,300 @@ +// Package infra provides infrastructure configuration and API clients +// for managing the Host UK production environment. +package infra + +import ( + "fmt" + "os" + "path/filepath" + + "gopkg.in/yaml.v3" +) + +// Config is the top-level infrastructure configuration parsed from infra.yaml. +type Config struct { + Hosts map[string]*Host `yaml:"hosts"` + LoadBalancer LoadBalancer `yaml:"load_balancer"` + Network Network `yaml:"network"` + DNS DNS `yaml:"dns"` + SSL SSL `yaml:"ssl"` + Database Database `yaml:"database"` + Cache Cache `yaml:"cache"` + Containers map[string]*Container `yaml:"containers"` + S3 S3Config `yaml:"s3"` + CDN CDN `yaml:"cdn"` + CICD CICD `yaml:"cicd"` + Monitoring Monitoring `yaml:"monitoring"` + Backups Backups `yaml:"backups"` +} + +// Host represents a server in the infrastructure. +type Host struct { + FQDN string `yaml:"fqdn"` + IP string `yaml:"ip"` + PrivateIP string `yaml:"private_ip,omitempty"` + Type string `yaml:"type"` // hcloud, hrobot + Role string `yaml:"role"` // bastion, app, builder + SSH SSHConf `yaml:"ssh"` + Services []string `yaml:"services"` +} + +// SSHConf holds SSH connection details for a host. +type SSHConf struct { + User string `yaml:"user"` + Key string `yaml:"key"` + Port int `yaml:"port"` +} + +// LoadBalancer represents a Hetzner managed load balancer. +type LoadBalancer struct { + Name string `yaml:"name"` + FQDN string `yaml:"fqdn"` + Provider string `yaml:"provider"` + Type string `yaml:"type"` + Location string `yaml:"location"` + Algorithm string `yaml:"algorithm"` + Backends []Backend `yaml:"backends"` + Health HealthCheck `yaml:"health_check"` + Listeners []Listener `yaml:"listeners"` + SSL LBCert `yaml:"ssl"` +} + +// Backend is a load balancer backend target. +type Backend struct { + Host string `yaml:"host"` + Port int `yaml:"port"` +} + +// HealthCheck configures load balancer health checking. +type HealthCheck struct { + Protocol string `yaml:"protocol"` + Path string `yaml:"path"` + Interval int `yaml:"interval"` +} + +// Listener maps a frontend port to a backend port. +type Listener struct { + Frontend int `yaml:"frontend"` + Backend int `yaml:"backend"` + Protocol string `yaml:"protocol"` + ProxyProtocol bool `yaml:"proxy_protocol"` +} + +// LBCert holds the SSL certificate configuration for the load balancer. +type LBCert struct { + Certificate string `yaml:"certificate"` + SAN []string `yaml:"san"` +} + +// Network describes the private network. +type Network struct { + CIDR string `yaml:"cidr"` + Name string `yaml:"name"` +} + +// DNS holds DNS provider configuration and zone records. +type DNS struct { + Provider string `yaml:"provider"` + Nameservers []string `yaml:"nameservers"` + Zones map[string]*Zone `yaml:"zones"` +} + +// Zone is a DNS zone with its records. +type Zone struct { + Records []DNSRecord `yaml:"records"` +} + +// DNSRecord is a single DNS record. +type DNSRecord struct { + Name string `yaml:"name"` + Type string `yaml:"type"` + Value string `yaml:"value"` + TTL int `yaml:"ttl"` +} + +// SSL holds SSL certificate configuration. +type SSL struct { + Wildcard WildcardCert `yaml:"wildcard"` +} + +// WildcardCert describes a wildcard SSL certificate. +type WildcardCert struct { + Domains []string `yaml:"domains"` + Method string `yaml:"method"` + DNSProvider string `yaml:"dns_provider"` + Termination string `yaml:"termination"` +} + +// Database describes the database cluster. +type Database struct { + Engine string `yaml:"engine"` + Version string `yaml:"version"` + Cluster string `yaml:"cluster"` + Nodes []DBNode `yaml:"nodes"` + SSTMethod string `yaml:"sst_method"` + Backup BackupConfig `yaml:"backup"` +} + +// DBNode is a database cluster node. +type DBNode struct { + Host string `yaml:"host"` + Port int `yaml:"port"` +} + +// BackupConfig describes automated backup settings. +type BackupConfig struct { + Schedule string `yaml:"schedule"` + Destination string `yaml:"destination"` + Bucket string `yaml:"bucket"` + Prefix string `yaml:"prefix"` +} + +// Cache describes the cache/session cluster. +type Cache struct { + Engine string `yaml:"engine"` + Version string `yaml:"version"` + Sentinel bool `yaml:"sentinel"` + Nodes []CacheNode `yaml:"nodes"` +} + +// CacheNode is a cache cluster node. +type CacheNode struct { + Host string `yaml:"host"` + Port int `yaml:"port"` +} + +// Container describes a container deployment. +type Container struct { + Image string `yaml:"image"` + Port int `yaml:"port,omitempty"` + Runtime string `yaml:"runtime,omitempty"` + Command string `yaml:"command,omitempty"` + Replicas int `yaml:"replicas,omitempty"` + DependsOn []string `yaml:"depends_on,omitempty"` +} + +// S3Config describes object storage. +type S3Config struct { + Endpoint string `yaml:"endpoint"` + Buckets map[string]*S3Bucket `yaml:"buckets"` +} + +// S3Bucket is an S3 bucket configuration. +type S3Bucket struct { + Purpose string `yaml:"purpose"` + Paths []string `yaml:"paths"` +} + +// CDN describes CDN configuration. +type CDN struct { + Provider string `yaml:"provider"` + Origin string `yaml:"origin"` + Zones []string `yaml:"zones"` +} + +// CICD describes CI/CD configuration. +type CICD struct { + Provider string `yaml:"provider"` + URL string `yaml:"url"` + Runner string `yaml:"runner"` + Registry string `yaml:"registry"` + DeployHook string `yaml:"deploy_hook"` +} + +// Monitoring describes monitoring configuration. +type Monitoring struct { + HealthEndpoints []HealthEndpoint `yaml:"health_endpoints"` + Alerts map[string]int `yaml:"alerts"` +} + +// HealthEndpoint is a URL to monitor. +type HealthEndpoint struct { + URL string `yaml:"url"` + Interval int `yaml:"interval"` +} + +// Backups describes backup schedules. +type Backups struct { + Daily []BackupJob `yaml:"daily"` + Weekly []BackupJob `yaml:"weekly"` +} + +// BackupJob is a scheduled backup task. +type BackupJob struct { + Name string `yaml:"name"` + Type string `yaml:"type"` + Destination string `yaml:"destination,omitempty"` + Hosts []string `yaml:"hosts,omitempty"` +} + +// Load reads and parses an infra.yaml file. +func Load(path string) (*Config, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("read infra config: %w", err) + } + + var cfg Config + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("parse infra config: %w", err) + } + + // Expand SSH key paths + for _, h := range cfg.Hosts { + if h.SSH.Key != "" { + h.SSH.Key = expandPath(h.SSH.Key) + } + if h.SSH.Port == 0 { + h.SSH.Port = 22 + } + } + + return &cfg, nil +} + +// Discover searches for infra.yaml in the given directory and parent directories. +func Discover(startDir string) (*Config, string, error) { + dir := startDir + for { + path := filepath.Join(dir, "infra.yaml") + if _, err := os.Stat(path); err == nil { + cfg, err := Load(path) + return cfg, path, err + } + + parent := filepath.Dir(dir) + if parent == dir { + break + } + dir = parent + } + return nil, "", fmt.Errorf("infra.yaml not found (searched from %s)", startDir) +} + +// HostsByRole returns all hosts matching the given role. +func (c *Config) HostsByRole(role string) map[string]*Host { + result := make(map[string]*Host) + for name, h := range c.Hosts { + if h.Role == role { + result[name] = h + } + } + return result +} + +// AppServers returns hosts with role "app". +func (c *Config) AppServers() map[string]*Host { + return c.HostsByRole("app") +} + +// expandPath expands ~ to home directory. +func expandPath(path string) string { + if len(path) > 0 && path[0] == '~' { + home, err := os.UserHomeDir() + if err != nil { + return path + } + return filepath.Join(home, path[1:]) + } + return path +} diff --git a/infra/config_test.go b/infra/config_test.go new file mode 100644 index 0000000..1ec8b59 --- /dev/null +++ b/infra/config_test.go @@ -0,0 +1,100 @@ +package infra + +import ( + "os" + "path/filepath" + "testing" +) + +func TestLoad_Good(t *testing.T) { + // Find infra.yaml relative to test + // Walk up from test dir to find it + dir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + cfg, path, err := Discover(dir) + if err != nil { + t.Skipf("infra.yaml not found from %s: %v", dir, err) + } + + t.Logf("Loaded %s", path) + + if len(cfg.Hosts) == 0 { + t.Error("expected at least one host") + } + + // Check required hosts exist + for _, name := range []string{"noc", "de", "de2", "build"} { + if _, ok := cfg.Hosts[name]; !ok { + t.Errorf("expected host %q in config", name) + } + } + + // Check de host details + de := cfg.Hosts["de"] + if de.IP != "116.202.82.115" { + t.Errorf("de IP = %q, want 116.202.82.115", de.IP) + } + if de.Role != "app" { + t.Errorf("de role = %q, want app", de.Role) + } + + // Check LB config + if cfg.LoadBalancer.Name != "hermes" { + t.Errorf("LB name = %q, want hermes", cfg.LoadBalancer.Name) + } + if cfg.LoadBalancer.Type != "lb11" { + t.Errorf("LB type = %q, want lb11", cfg.LoadBalancer.Type) + } + if len(cfg.LoadBalancer.Backends) != 2 { + t.Errorf("LB backends = %d, want 2", len(cfg.LoadBalancer.Backends)) + } + + // Check app servers helper + apps := cfg.AppServers() + if len(apps) != 2 { + t.Errorf("AppServers() = %d, want 2", len(apps)) + } +} + +func TestLoad_Bad(t *testing.T) { + _, err := Load("/nonexistent/infra.yaml") + if err == nil { + t.Error("expected error for nonexistent file") + } +} + +func TestLoad_Ugly(t *testing.T) { + // Invalid YAML + tmp := filepath.Join(t.TempDir(), "infra.yaml") + if err := os.WriteFile(tmp, []byte("{{invalid yaml"), 0644); err != nil { + t.Fatal(err) + } + + _, err := Load(tmp) + if err == nil { + t.Error("expected error for invalid YAML") + } +} + +func TestExpandPath(t *testing.T) { + home, _ := os.UserHomeDir() + + tests := []struct { + input string + want string + }{ + {"~/.ssh/id_rsa", filepath.Join(home, ".ssh/id_rsa")}, + {"/absolute/path", "/absolute/path"}, + {"relative/path", "relative/path"}, + } + + for _, tt := range tests { + got := expandPath(tt.input) + if got != tt.want { + t.Errorf("expandPath(%q) = %q, want %q", tt.input, got, tt.want) + } + } +} diff --git a/infra/hetzner.go b/infra/hetzner.go new file mode 100644 index 0000000..93ab819 --- /dev/null +++ b/infra/hetzner.go @@ -0,0 +1,381 @@ +package infra + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +const ( + hcloudBaseURL = "https://api.hetzner.cloud/v1" + hrobotBaseURL = "https://robot-ws.your-server.de" +) + +// HCloudClient is an HTTP client for the Hetzner Cloud API. +type HCloudClient struct { + token string + client *http.Client +} + +// NewHCloudClient creates a new Hetzner Cloud API client. +func NewHCloudClient(token string) *HCloudClient { + return &HCloudClient{ + token: token, + client: &http.Client{ + Timeout: 30 * time.Second, + }, + } +} + +// HCloudServer represents a Hetzner Cloud server. +type HCloudServer struct { + ID int `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + PublicNet HCloudPublicNet `json:"public_net"` + PrivateNet []HCloudPrivateNet `json:"private_net"` + ServerType HCloudServerType `json:"server_type"` + Datacenter HCloudDatacenter `json:"datacenter"` + Labels map[string]string `json:"labels"` +} + +// HCloudPublicNet holds public network info. +type HCloudPublicNet struct { + IPv4 HCloudIPv4 `json:"ipv4"` +} + +// HCloudIPv4 holds an IPv4 address. +type HCloudIPv4 struct { + IP string `json:"ip"` +} + +// HCloudPrivateNet holds private network info. +type HCloudPrivateNet struct { + IP string `json:"ip"` + Network int `json:"network"` +} + +// HCloudServerType holds server type info. +type HCloudServerType struct { + Name string `json:"name"` + Description string `json:"description"` + Cores int `json:"cores"` + Memory float64 `json:"memory"` + Disk int `json:"disk"` +} + +// HCloudDatacenter holds datacenter info. +type HCloudDatacenter struct { + Name string `json:"name"` + Description string `json:"description"` +} + +// HCloudLoadBalancer represents a Hetzner Cloud load balancer. +type HCloudLoadBalancer struct { + ID int `json:"id"` + Name string `json:"name"` + PublicNet HCloudLBPublicNet `json:"public_net"` + Algorithm HCloudLBAlgorithm `json:"algorithm"` + Services []HCloudLBService `json:"services"` + Targets []HCloudLBTarget `json:"targets"` + Location HCloudDatacenter `json:"location"` + Labels map[string]string `json:"labels"` +} + +// HCloudLBPublicNet holds LB public network info. +type HCloudLBPublicNet struct { + Enabled bool `json:"enabled"` + IPv4 HCloudIPv4 `json:"ipv4"` +} + +// HCloudLBAlgorithm holds the LB algorithm. +type HCloudLBAlgorithm struct { + Type string `json:"type"` +} + +// HCloudLBService describes an LB listener. +type HCloudLBService struct { + Protocol string `json:"protocol"` + ListenPort int `json:"listen_port"` + DestinationPort int `json:"destination_port"` + Proxyprotocol bool `json:"proxyprotocol"` + HTTP *HCloudLBHTTP `json:"http,omitempty"` + HealthCheck *HCloudLBHealthCheck `json:"health_check,omitempty"` +} + +// HCloudLBHTTP holds HTTP-specific LB options. +type HCloudLBHTTP struct { + RedirectHTTP bool `json:"redirect_http"` +} + +// HCloudLBHealthCheck holds LB health check config. +type HCloudLBHealthCheck struct { + Protocol string `json:"protocol"` + Port int `json:"port"` + Interval int `json:"interval"` + Timeout int `json:"timeout"` + Retries int `json:"retries"` + HTTP *HCloudLBHCHTTP `json:"http,omitempty"` +} + +// HCloudLBHCHTTP holds HTTP health check options. +type HCloudLBHCHTTP struct { + Path string `json:"path"` + StatusCode string `json:"status_codes"` +} + +// HCloudLBTarget is a load balancer backend target. +type HCloudLBTarget struct { + Type string `json:"type"` + IP *HCloudLBTargetIP `json:"ip,omitempty"` + Server *HCloudLBTargetServer `json:"server,omitempty"` + HealthStatus []HCloudLBHealthStatus `json:"health_status"` +} + +// HCloudLBTargetIP is an IP-based LB target. +type HCloudLBTargetIP struct { + IP string `json:"ip"` +} + +// HCloudLBTargetServer is a server-based LB target. +type HCloudLBTargetServer struct { + ID int `json:"id"` +} + +// HCloudLBHealthStatus holds target health info. +type HCloudLBHealthStatus struct { + ListenPort int `json:"listen_port"` + Status string `json:"status"` +} + +// HCloudLBCreateRequest holds load balancer creation params. +type HCloudLBCreateRequest struct { + Name string `json:"name"` + LoadBalancerType string `json:"load_balancer_type"` + Location string `json:"location"` + Algorithm HCloudLBAlgorithm `json:"algorithm"` + Services []HCloudLBService `json:"services"` + Targets []HCloudLBCreateTarget `json:"targets"` + Labels map[string]string `json:"labels"` +} + +// HCloudLBCreateTarget is a target for LB creation. +type HCloudLBCreateTarget struct { + Type string `json:"type"` + IP *HCloudLBTargetIP `json:"ip,omitempty"` +} + +// ListServers returns all Hetzner Cloud servers. +func (c *HCloudClient) ListServers(ctx context.Context) ([]HCloudServer, error) { + var result struct { + Servers []HCloudServer `json:"servers"` + } + if err := c.get(ctx, "/servers", &result); err != nil { + return nil, err + } + return result.Servers, nil +} + +// ListLoadBalancers returns all load balancers. +func (c *HCloudClient) ListLoadBalancers(ctx context.Context) ([]HCloudLoadBalancer, error) { + var result struct { + LoadBalancers []HCloudLoadBalancer `json:"load_balancers"` + } + if err := c.get(ctx, "/load_balancers", &result); err != nil { + return nil, err + } + return result.LoadBalancers, nil +} + +// GetLoadBalancer returns a load balancer by ID. +func (c *HCloudClient) GetLoadBalancer(ctx context.Context, id int) (*HCloudLoadBalancer, error) { + var result struct { + LoadBalancer HCloudLoadBalancer `json:"load_balancer"` + } + if err := c.get(ctx, fmt.Sprintf("/load_balancers/%d", id), &result); err != nil { + return nil, err + } + return &result.LoadBalancer, nil +} + +// CreateLoadBalancer creates a new load balancer. +func (c *HCloudClient) CreateLoadBalancer(ctx context.Context, req HCloudLBCreateRequest) (*HCloudLoadBalancer, error) { + body, err := json.Marshal(req) + if err != nil { + return nil, fmt.Errorf("marshal request: %w", err) + } + + var result struct { + LoadBalancer HCloudLoadBalancer `json:"load_balancer"` + } + if err := c.post(ctx, "/load_balancers", body, &result); err != nil { + return nil, err + } + return &result.LoadBalancer, nil +} + +// DeleteLoadBalancer deletes a load balancer by ID. +func (c *HCloudClient) DeleteLoadBalancer(ctx context.Context, id int) error { + return c.delete(ctx, fmt.Sprintf("/load_balancers/%d", id)) +} + +// CreateSnapshot creates a server snapshot. +func (c *HCloudClient) CreateSnapshot(ctx context.Context, serverID int, description string) error { + body, _ := json.Marshal(map[string]string{ + "description": description, + "type": "snapshot", + }) + return c.post(ctx, fmt.Sprintf("/servers/%d/actions/create_image", serverID), body, nil) +} + +func (c *HCloudClient) get(ctx context.Context, path string, result any) error { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, hcloudBaseURL+path, nil) + if err != nil { + return err + } + return c.do(req, result) +} + +func (c *HCloudClient) post(ctx context.Context, path string, body []byte, result any) error { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, hcloudBaseURL+path, strings.NewReader(string(body))) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + return c.do(req, result) +} + +func (c *HCloudClient) delete(ctx context.Context, path string) error { + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, hcloudBaseURL+path, nil) + if err != nil { + return err + } + return c.do(req, nil) +} + +func (c *HCloudClient) do(req *http.Request, result any) error { + req.Header.Set("Authorization", "Bearer "+c.token) + + resp, err := c.client.Do(req) + if err != nil { + return fmt.Errorf("hcloud API: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + data, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("read response: %w", err) + } + + if resp.StatusCode >= 400 { + var apiErr struct { + Error struct { + Code string `json:"code"` + Message string `json:"message"` + } `json:"error"` + } + if json.Unmarshal(data, &apiErr) == nil && apiErr.Error.Message != "" { + return fmt.Errorf("hcloud API %d: %s — %s", resp.StatusCode, apiErr.Error.Code, apiErr.Error.Message) + } + return fmt.Errorf("hcloud API %d: %s", resp.StatusCode, string(data)) + } + + if result != nil { + if err := json.Unmarshal(data, result); err != nil { + return fmt.Errorf("decode response: %w", err) + } + } + return nil +} + +// --- Hetzner Robot API --- + +// HRobotClient is an HTTP client for the Hetzner Robot API. +type HRobotClient struct { + user string + password string + client *http.Client +} + +// NewHRobotClient creates a new Hetzner Robot API client. +func NewHRobotClient(user, password string) *HRobotClient { + return &HRobotClient{ + user: user, + password: password, + client: &http.Client{ + Timeout: 30 * time.Second, + }, + } +} + +// HRobotServer represents a Hetzner Robot dedicated server. +type HRobotServer struct { + ServerIP string `json:"server_ip"` + ServerName string `json:"server_name"` + Product string `json:"product"` + Datacenter string `json:"dc"` + Status string `json:"status"` + Cancelled bool `json:"cancelled"` + PaidUntil string `json:"paid_until"` +} + +// ListServers returns all Robot dedicated servers. +func (c *HRobotClient) ListServers(ctx context.Context) ([]HRobotServer, error) { + var raw []struct { + Server HRobotServer `json:"server"` + } + if err := c.get(ctx, "/server", &raw); err != nil { + return nil, err + } + + servers := make([]HRobotServer, len(raw)) + for i, s := range raw { + servers[i] = s.Server + } + return servers, nil +} + +// GetServer returns a Robot server by IP. +func (c *HRobotClient) GetServer(ctx context.Context, ip string) (*HRobotServer, error) { + var raw struct { + Server HRobotServer `json:"server"` + } + if err := c.get(ctx, "/server/"+ip, &raw); err != nil { + return nil, err + } + return &raw.Server, nil +} + +func (c *HRobotClient) get(ctx context.Context, path string, result any) error { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, hrobotBaseURL+path, nil) + if err != nil { + return err + } + req.SetBasicAuth(c.user, c.password) + + resp, err := c.client.Do(req) + if err != nil { + return fmt.Errorf("hrobot API: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + data, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("read response: %w", err) + } + + if resp.StatusCode >= 400 { + return fmt.Errorf("hrobot API %d: %s", resp.StatusCode, string(data)) + } + + if result != nil { + if err := json.Unmarshal(data, result); err != nil { + return fmt.Errorf("decode response: %w", err) + } + } + return nil +} diff --git a/release/changelog.go b/release/changelog.go new file mode 100644 index 0000000..c25fc52 --- /dev/null +++ b/release/changelog.go @@ -0,0 +1,321 @@ +// Package release provides release automation with changelog generation and publishing. +package release + +import ( + "bufio" + "bytes" + "fmt" + "os/exec" + "regexp" + "sort" + "strings" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +// ConventionalCommit represents a parsed conventional commit. +type ConventionalCommit struct { + Type string // feat, fix, etc. + Scope string // optional scope in parentheses + Description string // commit description + Hash string // short commit hash + Breaking bool // has breaking change indicator +} + +// commitTypeLabels maps commit types to human-readable labels for the changelog. +var commitTypeLabels = map[string]string{ + "feat": "Features", + "fix": "Bug Fixes", + "perf": "Performance Improvements", + "refactor": "Code Refactoring", + "docs": "Documentation", + "style": "Styles", + "test": "Tests", + "build": "Build System", + "ci": "Continuous Integration", + "chore": "Chores", + "revert": "Reverts", +} + +// commitTypeOrder defines the order of sections in the changelog. +var commitTypeOrder = []string{ + "feat", + "fix", + "perf", + "refactor", + "docs", + "style", + "test", + "build", + "ci", + "chore", + "revert", +} + +// conventionalCommitRegex matches conventional commit format. +// Examples: "feat: add feature", "fix(scope): fix bug", "feat!: breaking change" +var conventionalCommitRegex = regexp.MustCompile(`^(\w+)(?:\(([^)]+)\))?(!)?:\s*(.+)$`) + +// Generate generates a markdown changelog from git commits between two refs. +// If fromRef is empty, it uses the previous tag or initial commit. +// If toRef is empty, it uses HEAD. +func Generate(dir, fromRef, toRef string) (string, error) { + if toRef == "" { + toRef = "HEAD" + } + + // If fromRef is empty, try to find previous tag + if fromRef == "" { + prevTag, err := getPreviousTag(dir, toRef) + if err != nil { + // No previous tag, use initial commit + fromRef = "" + } else { + fromRef = prevTag + } + } + + // Get commits between refs + commits, err := getCommits(dir, fromRef, toRef) + if err != nil { + return "", fmt.Errorf("changelog.Generate: failed to get commits: %w", err) + } + + // Parse conventional commits + var parsedCommits []ConventionalCommit + for _, commit := range commits { + parsed := parseConventionalCommit(commit) + if parsed != nil { + parsedCommits = append(parsedCommits, *parsed) + } + } + + // Generate markdown + return formatChangelog(parsedCommits, toRef), nil +} + +// GenerateWithConfig generates a changelog with filtering based on config. +func GenerateWithConfig(dir, fromRef, toRef string, cfg *ChangelogConfig) (string, error) { + if toRef == "" { + toRef = "HEAD" + } + + // If fromRef is empty, try to find previous tag + if fromRef == "" { + prevTag, err := getPreviousTag(dir, toRef) + if err != nil { + fromRef = "" + } else { + fromRef = prevTag + } + } + + // Get commits between refs + commits, err := getCommits(dir, fromRef, toRef) + if err != nil { + return "", fmt.Errorf("changelog.GenerateWithConfig: failed to get commits: %w", err) + } + + // Build include/exclude sets + includeSet := make(map[string]bool) + excludeSet := make(map[string]bool) + for _, t := range cfg.Include { + includeSet[t] = true + } + for _, t := range cfg.Exclude { + excludeSet[t] = true + } + + // Parse and filter conventional commits + var parsedCommits []ConventionalCommit + for _, commit := range commits { + parsed := parseConventionalCommit(commit) + if parsed == nil { + continue + } + + // Apply filters + if len(includeSet) > 0 && !includeSet[parsed.Type] { + continue + } + if excludeSet[parsed.Type] { + continue + } + + parsedCommits = append(parsedCommits, *parsed) + } + + return formatChangelog(parsedCommits, toRef), nil +} + +// getPreviousTag returns the tag before the given ref. +func getPreviousTag(dir, ref string) (string, error) { + cmd := exec.Command("git", "describe", "--tags", "--abbrev=0", ref+"^") + cmd.Dir = dir + output, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(output)), nil +} + +// getCommits returns a slice of commit strings between two refs. +// Format: "hash subject" +func getCommits(dir, fromRef, toRef string) ([]string, error) { + var args []string + if fromRef == "" { + // All commits up to toRef + args = []string{"log", "--oneline", "--no-merges", toRef} + } else { + // Commits between refs + args = []string{"log", "--oneline", "--no-merges", fromRef + ".." + toRef} + } + + cmd := exec.Command("git", args...) + cmd.Dir = dir + output, err := cmd.Output() + if err != nil { + return nil, err + } + + var commits []string + scanner := bufio.NewScanner(bytes.NewReader(output)) + for scanner.Scan() { + line := scanner.Text() + if line != "" { + commits = append(commits, line) + } + } + + return commits, scanner.Err() +} + +// parseConventionalCommit parses a git log --oneline output into a ConventionalCommit. +// Returns nil if the commit doesn't follow conventional commit format. +func parseConventionalCommit(commitLine string) *ConventionalCommit { + // Split hash and subject + parts := strings.SplitN(commitLine, " ", 2) + if len(parts) != 2 { + return nil + } + + hash := parts[0] + subject := parts[1] + + // Match conventional commit format + matches := conventionalCommitRegex.FindStringSubmatch(subject) + if matches == nil { + return nil + } + + return &ConventionalCommit{ + Type: strings.ToLower(matches[1]), + Scope: matches[2], + Breaking: matches[3] == "!", + Description: matches[4], + Hash: hash, + } +} + +// formatChangelog formats parsed commits into markdown. +func formatChangelog(commits []ConventionalCommit, version string) string { + if len(commits) == 0 { + return fmt.Sprintf("## %s\n\nNo notable changes.", version) + } + + // Group commits by type + grouped := make(map[string][]ConventionalCommit) + var breaking []ConventionalCommit + + for _, commit := range commits { + if commit.Breaking { + breaking = append(breaking, commit) + } + grouped[commit.Type] = append(grouped[commit.Type], commit) + } + + var buf strings.Builder + buf.WriteString(fmt.Sprintf("## %s\n\n", version)) + + // Breaking changes first + if len(breaking) > 0 { + buf.WriteString("### BREAKING CHANGES\n\n") + for _, commit := range breaking { + buf.WriteString(formatCommitLine(commit)) + } + buf.WriteString("\n") + } + + // Other sections in order + for _, commitType := range commitTypeOrder { + commits, ok := grouped[commitType] + if !ok || len(commits) == 0 { + continue + } + + label, ok := commitTypeLabels[commitType] + if !ok { + label = cases.Title(language.English).String(commitType) + } + + buf.WriteString(fmt.Sprintf("### %s\n\n", label)) + for _, commit := range commits { + buf.WriteString(formatCommitLine(commit)) + } + buf.WriteString("\n") + } + + // Any remaining types not in the order list + var remainingTypes []string + for commitType := range grouped { + found := false + for _, t := range commitTypeOrder { + if t == commitType { + found = true + break + } + } + if !found { + remainingTypes = append(remainingTypes, commitType) + } + } + sort.Strings(remainingTypes) + + for _, commitType := range remainingTypes { + commits := grouped[commitType] + label := cases.Title(language.English).String(commitType) + buf.WriteString(fmt.Sprintf("### %s\n\n", label)) + for _, commit := range commits { + buf.WriteString(formatCommitLine(commit)) + } + buf.WriteString("\n") + } + + return strings.TrimSuffix(buf.String(), "\n") +} + +// formatCommitLine formats a single commit as a changelog line. +func formatCommitLine(commit ConventionalCommit) string { + var buf strings.Builder + buf.WriteString("- ") + + if commit.Scope != "" { + buf.WriteString(fmt.Sprintf("**%s**: ", commit.Scope)) + } + + buf.WriteString(commit.Description) + buf.WriteString(fmt.Sprintf(" (%s)\n", commit.Hash)) + + return buf.String() +} + +// ParseCommitType extracts the type from a conventional commit subject. +// Returns empty string if not a conventional commit. +func ParseCommitType(subject string) string { + matches := conventionalCommitRegex.FindStringSubmatch(subject) + if matches == nil { + return "" + } + return strings.ToLower(matches[1]) +} diff --git a/release/changelog_test.go b/release/changelog_test.go new file mode 100644 index 0000000..ac7d4de --- /dev/null +++ b/release/changelog_test.go @@ -0,0 +1,695 @@ +package release + +import ( + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseConventionalCommit_Good(t *testing.T) { + tests := []struct { + name string + input string + expected *ConventionalCommit + }{ + { + name: "feat without scope", + input: "abc1234 feat: add new feature", + expected: &ConventionalCommit{ + Type: "feat", + Scope: "", + Description: "add new feature", + Hash: "abc1234", + Breaking: false, + }, + }, + { + name: "fix with scope", + input: "def5678 fix(auth): resolve login issue", + expected: &ConventionalCommit{ + Type: "fix", + Scope: "auth", + Description: "resolve login issue", + Hash: "def5678", + Breaking: false, + }, + }, + { + name: "breaking change with exclamation", + input: "ghi9012 feat!: breaking API change", + expected: &ConventionalCommit{ + Type: "feat", + Scope: "", + Description: "breaking API change", + Hash: "ghi9012", + Breaking: true, + }, + }, + { + name: "breaking change with scope", + input: "jkl3456 fix(api)!: remove deprecated endpoint", + expected: &ConventionalCommit{ + Type: "fix", + Scope: "api", + Description: "remove deprecated endpoint", + Hash: "jkl3456", + Breaking: true, + }, + }, + { + name: "perf type", + input: "mno7890 perf: optimize database queries", + expected: &ConventionalCommit{ + Type: "perf", + Scope: "", + Description: "optimize database queries", + Hash: "mno7890", + Breaking: false, + }, + }, + { + name: "chore type", + input: "pqr1234 chore: update dependencies", + expected: &ConventionalCommit{ + Type: "chore", + Scope: "", + Description: "update dependencies", + Hash: "pqr1234", + Breaking: false, + }, + }, + { + name: "uppercase type normalizes to lowercase", + input: "stu5678 FEAT: uppercase type", + expected: &ConventionalCommit{ + Type: "feat", + Scope: "", + Description: "uppercase type", + Hash: "stu5678", + Breaking: false, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := parseConventionalCommit(tc.input) + assert.NotNil(t, result) + assert.Equal(t, tc.expected.Type, result.Type) + assert.Equal(t, tc.expected.Scope, result.Scope) + assert.Equal(t, tc.expected.Description, result.Description) + assert.Equal(t, tc.expected.Hash, result.Hash) + assert.Equal(t, tc.expected.Breaking, result.Breaking) + }) + } +} + +func TestParseConventionalCommit_Bad(t *testing.T) { + tests := []struct { + name string + input string + }{ + { + name: "non-conventional commit", + input: "abc1234 Update README", + }, + { + name: "missing colon", + input: "def5678 feat add feature", + }, + { + name: "empty subject", + input: "ghi9012", + }, + { + name: "just hash", + input: "abc1234", + }, + { + name: "merge commit", + input: "abc1234 Merge pull request #123", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := parseConventionalCommit(tc.input) + assert.Nil(t, result) + }) + } +} + +func TestFormatChangelog_Good(t *testing.T) { + t.Run("formats commits by type", func(t *testing.T) { + commits := []ConventionalCommit{ + {Type: "feat", Description: "add feature A", Hash: "abc1234"}, + {Type: "fix", Description: "fix bug B", Hash: "def5678"}, + {Type: "feat", Description: "add feature C", Hash: "ghi9012"}, + } + + result := formatChangelog(commits, "v1.0.0") + + assert.Contains(t, result, "## v1.0.0") + assert.Contains(t, result, "### Features") + assert.Contains(t, result, "### Bug Fixes") + assert.Contains(t, result, "- add feature A (abc1234)") + assert.Contains(t, result, "- fix bug B (def5678)") + assert.Contains(t, result, "- add feature C (ghi9012)") + }) + + t.Run("includes scope in output", func(t *testing.T) { + commits := []ConventionalCommit{ + {Type: "feat", Scope: "api", Description: "add endpoint", Hash: "abc1234"}, + } + + result := formatChangelog(commits, "v1.0.0") + + assert.Contains(t, result, "**api**: add endpoint") + }) + + t.Run("breaking changes first", func(t *testing.T) { + commits := []ConventionalCommit{ + {Type: "feat", Description: "normal feature", Hash: "abc1234"}, + {Type: "feat", Description: "breaking feature", Hash: "def5678", Breaking: true}, + } + + result := formatChangelog(commits, "v1.0.0") + + assert.Contains(t, result, "### BREAKING CHANGES") + // Breaking changes section should appear before Features + breakingPos := indexOf(result, "BREAKING CHANGES") + featuresPos := indexOf(result, "Features") + assert.Less(t, breakingPos, featuresPos) + }) + + t.Run("empty commits returns minimal changelog", func(t *testing.T) { + result := formatChangelog([]ConventionalCommit{}, "v1.0.0") + + assert.Contains(t, result, "## v1.0.0") + assert.Contains(t, result, "No notable changes") + }) +} + +func TestParseCommitType_Good(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"feat: add feature", "feat"}, + {"fix(scope): fix bug", "fix"}, + {"perf!: breaking perf", "perf"}, + {"chore: update deps", "chore"}, + } + + for _, tc := range tests { + t.Run(tc.input, func(t *testing.T) { + result := ParseCommitType(tc.input) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestParseCommitType_Bad(t *testing.T) { + tests := []struct { + input string + }{ + {"not a conventional commit"}, + {"Update README"}, + {"Merge branch 'main'"}, + } + + for _, tc := range tests { + t.Run(tc.input, func(t *testing.T) { + result := ParseCommitType(tc.input) + assert.Empty(t, result) + }) + } +} + +func TestGenerateWithConfig_ConfigValues(t *testing.T) { + t.Run("config filters are parsed correctly", func(t *testing.T) { + cfg := &ChangelogConfig{ + Include: []string{"feat", "fix"}, + Exclude: []string{"chore", "docs"}, + } + + // Verify the config values + assert.Contains(t, cfg.Include, "feat") + assert.Contains(t, cfg.Include, "fix") + assert.Contains(t, cfg.Exclude, "chore") + assert.Contains(t, cfg.Exclude, "docs") + }) +} + +// indexOf returns the position of a substring in a string, or -1 if not found. +func indexOf(s, substr string) int { + for i := 0; i+len(substr) <= len(s); i++ { + if s[i:i+len(substr)] == substr { + return i + } + } + return -1 +} + +// setupChangelogGitRepo creates a temporary directory with an initialized git repository. +func setupChangelogGitRepo(t *testing.T) string { + t.Helper() + dir := t.TempDir() + + // Initialize git repo + cmd := exec.Command("git", "init") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + // Configure git user for commits + cmd = exec.Command("git", "config", "user.email", "test@example.com") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "config", "user.name", "Test User") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + return dir +} + +// createChangelogCommit creates a commit in the given directory. +func createChangelogCommit(t *testing.T, dir, message string) { + t.Helper() + + // Create or modify a file + filePath := filepath.Join(dir, "changelog_test.txt") + content, _ := os.ReadFile(filePath) + content = append(content, []byte(message+"\n")...) + require.NoError(t, os.WriteFile(filePath, content, 0644)) + + // Stage and commit + cmd := exec.Command("git", "add", ".") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "commit", "-m", message) + cmd.Dir = dir + require.NoError(t, cmd.Run()) +} + +// createChangelogTag creates a tag in the given directory. +func createChangelogTag(t *testing.T, dir, tag string) { + t.Helper() + cmd := exec.Command("git", "tag", tag) + cmd.Dir = dir + require.NoError(t, cmd.Run()) +} + +func TestGenerate_Good(t *testing.T) { + t.Run("generates changelog from commits", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: add new feature") + createChangelogCommit(t, dir, "fix: resolve bug") + + changelog, err := Generate(dir, "", "HEAD") + require.NoError(t, err) + + assert.Contains(t, changelog, "## HEAD") + assert.Contains(t, changelog, "### Features") + assert.Contains(t, changelog, "add new feature") + assert.Contains(t, changelog, "### Bug Fixes") + assert.Contains(t, changelog, "resolve bug") + }) + + t.Run("generates changelog between tags", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: initial feature") + createChangelogTag(t, dir, "v1.0.0") + createChangelogCommit(t, dir, "feat: new feature") + createChangelogCommit(t, dir, "fix: bug fix") + createChangelogTag(t, dir, "v1.1.0") + + changelog, err := Generate(dir, "v1.0.0", "v1.1.0") + require.NoError(t, err) + + assert.Contains(t, changelog, "## v1.1.0") + assert.Contains(t, changelog, "new feature") + assert.Contains(t, changelog, "bug fix") + // Should NOT contain the initial feature + assert.NotContains(t, changelog, "initial feature") + }) + + t.Run("handles empty changelog when no conventional commits", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "Update README") + createChangelogCommit(t, dir, "Merge branch main") + + changelog, err := Generate(dir, "", "HEAD") + require.NoError(t, err) + + assert.Contains(t, changelog, "No notable changes") + }) + + t.Run("uses previous tag when fromRef is empty", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: old feature") + createChangelogTag(t, dir, "v1.0.0") + createChangelogCommit(t, dir, "feat: new feature") + + changelog, err := Generate(dir, "", "HEAD") + require.NoError(t, err) + + assert.Contains(t, changelog, "new feature") + assert.NotContains(t, changelog, "old feature") + }) + + t.Run("includes breaking changes", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat!: breaking API change") + createChangelogCommit(t, dir, "feat: normal feature") + + changelog, err := Generate(dir, "", "HEAD") + require.NoError(t, err) + + assert.Contains(t, changelog, "### BREAKING CHANGES") + assert.Contains(t, changelog, "breaking API change") + }) + + t.Run("includes scope in output", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat(api): add endpoint") + + changelog, err := Generate(dir, "", "HEAD") + require.NoError(t, err) + + assert.Contains(t, changelog, "**api**:") + }) +} + +func TestGenerate_Bad(t *testing.T) { + t.Run("returns error for non-git directory", func(t *testing.T) { + dir := t.TempDir() + + _, err := Generate(dir, "", "HEAD") + assert.Error(t, err) + }) +} + +func TestGenerateWithConfig_Good(t *testing.T) { + t.Run("filters commits by include list", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: new feature") + createChangelogCommit(t, dir, "fix: bug fix") + createChangelogCommit(t, dir, "chore: update deps") + + cfg := &ChangelogConfig{ + Include: []string{"feat"}, + } + + changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg) + require.NoError(t, err) + + assert.Contains(t, changelog, "new feature") + assert.NotContains(t, changelog, "bug fix") + assert.NotContains(t, changelog, "update deps") + }) + + t.Run("filters commits by exclude list", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: new feature") + createChangelogCommit(t, dir, "fix: bug fix") + createChangelogCommit(t, dir, "chore: update deps") + + cfg := &ChangelogConfig{ + Exclude: []string{"chore"}, + } + + changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg) + require.NoError(t, err) + + assert.Contains(t, changelog, "new feature") + assert.Contains(t, changelog, "bug fix") + assert.NotContains(t, changelog, "update deps") + }) + + t.Run("combines include and exclude filters", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: new feature") + createChangelogCommit(t, dir, "fix: bug fix") + createChangelogCommit(t, dir, "perf: performance") + + cfg := &ChangelogConfig{ + Include: []string{"feat", "fix", "perf"}, + Exclude: []string{"perf"}, + } + + changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg) + require.NoError(t, err) + + assert.Contains(t, changelog, "new feature") + assert.Contains(t, changelog, "bug fix") + assert.NotContains(t, changelog, "performance") + }) +} + +func TestGetCommits_Good(t *testing.T) { + t.Run("returns all commits when fromRef is empty", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: first") + createChangelogCommit(t, dir, "feat: second") + createChangelogCommit(t, dir, "feat: third") + + commits, err := getCommits(dir, "", "HEAD") + require.NoError(t, err) + + assert.Len(t, commits, 3) + }) + + t.Run("returns commits between refs", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: first") + createChangelogTag(t, dir, "v1.0.0") + createChangelogCommit(t, dir, "feat: second") + createChangelogCommit(t, dir, "feat: third") + + commits, err := getCommits(dir, "v1.0.0", "HEAD") + require.NoError(t, err) + + assert.Len(t, commits, 2) + }) + + t.Run("excludes merge commits", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: regular commit") + // Merge commits are excluded by --no-merges flag + // We can verify by checking the count matches expected + + commits, err := getCommits(dir, "", "HEAD") + require.NoError(t, err) + + assert.Len(t, commits, 1) + assert.Contains(t, commits[0], "regular commit") + }) + + t.Run("returns empty slice for no commits in range", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: only commit") + createChangelogTag(t, dir, "v1.0.0") + + commits, err := getCommits(dir, "v1.0.0", "HEAD") + require.NoError(t, err) + + assert.Empty(t, commits) + }) +} + +func TestGetCommits_Bad(t *testing.T) { + t.Run("returns error for invalid ref", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: commit") + + _, err := getCommits(dir, "nonexistent-tag", "HEAD") + assert.Error(t, err) + }) + + t.Run("returns error for non-git directory", func(t *testing.T) { + dir := t.TempDir() + + _, err := getCommits(dir, "", "HEAD") + assert.Error(t, err) + }) +} + +func TestGetPreviousTag_Good(t *testing.T) { + t.Run("returns previous tag", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: first") + createChangelogTag(t, dir, "v1.0.0") + createChangelogCommit(t, dir, "feat: second") + createChangelogTag(t, dir, "v1.1.0") + + tag, err := getPreviousTag(dir, "v1.1.0") + require.NoError(t, err) + assert.Equal(t, "v1.0.0", tag) + }) + + t.Run("returns tag before HEAD", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: first") + createChangelogTag(t, dir, "v1.0.0") + createChangelogCommit(t, dir, "feat: second") + + tag, err := getPreviousTag(dir, "HEAD") + require.NoError(t, err) + assert.Equal(t, "v1.0.0", tag) + }) +} + +func TestGetPreviousTag_Bad(t *testing.T) { + t.Run("returns error when no previous tag exists", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: first") + createChangelogTag(t, dir, "v1.0.0") + + // v1.0.0^ has no tag before it + _, err := getPreviousTag(dir, "v1.0.0") + assert.Error(t, err) + }) + + t.Run("returns error for invalid ref", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: commit") + + _, err := getPreviousTag(dir, "nonexistent") + assert.Error(t, err) + }) +} + +func TestFormatCommitLine_Good(t *testing.T) { + t.Run("formats commit without scope", func(t *testing.T) { + commit := ConventionalCommit{ + Type: "feat", + Description: "add feature", + Hash: "abc1234", + } + + result := formatCommitLine(commit) + assert.Equal(t, "- add feature (abc1234)\n", result) + }) + + t.Run("formats commit with scope", func(t *testing.T) { + commit := ConventionalCommit{ + Type: "fix", + Scope: "api", + Description: "fix bug", + Hash: "def5678", + } + + result := formatCommitLine(commit) + assert.Equal(t, "- **api**: fix bug (def5678)\n", result) + }) +} + +func TestFormatChangelog_Ugly(t *testing.T) { + t.Run("handles custom commit type not in order", func(t *testing.T) { + commits := []ConventionalCommit{ + {Type: "custom", Description: "custom type", Hash: "abc1234"}, + } + + result := formatChangelog(commits, "v1.0.0") + + assert.Contains(t, result, "### Custom") + assert.Contains(t, result, "custom type") + }) + + t.Run("handles multiple custom commit types", func(t *testing.T) { + commits := []ConventionalCommit{ + {Type: "alpha", Description: "alpha feature", Hash: "abc1234"}, + {Type: "beta", Description: "beta feature", Hash: "def5678"}, + } + + result := formatChangelog(commits, "v1.0.0") + + // Should be sorted alphabetically for custom types + assert.Contains(t, result, "### Alpha") + assert.Contains(t, result, "### Beta") + }) +} + +func TestGenerateWithConfig_Bad(t *testing.T) { + t.Run("returns error for non-git directory", func(t *testing.T) { + dir := t.TempDir() + cfg := &ChangelogConfig{ + Include: []string{"feat"}, + } + + _, err := GenerateWithConfig(dir, "", "HEAD", cfg) + assert.Error(t, err) + }) +} + +func TestGenerateWithConfig_EdgeCases(t *testing.T) { + t.Run("uses HEAD when toRef is empty", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: new feature") + + cfg := &ChangelogConfig{ + Include: []string{"feat"}, + } + + // Pass empty toRef + changelog, err := GenerateWithConfig(dir, "", "", cfg) + require.NoError(t, err) + + assert.Contains(t, changelog, "## HEAD") + }) + + t.Run("handles previous tag lookup failure gracefully", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: first") + + cfg := &ChangelogConfig{ + Include: []string{"feat"}, + } + + // No tags exist, should still work + changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg) + require.NoError(t, err) + + assert.Contains(t, changelog, "first") + }) + + t.Run("uses explicit fromRef when provided", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: old feature") + createChangelogTag(t, dir, "v1.0.0") + createChangelogCommit(t, dir, "feat: new feature") + + cfg := &ChangelogConfig{ + Include: []string{"feat"}, + } + + // Use explicit fromRef + changelog, err := GenerateWithConfig(dir, "v1.0.0", "HEAD", cfg) + require.NoError(t, err) + + assert.Contains(t, changelog, "new feature") + assert.NotContains(t, changelog, "old feature") + }) + + t.Run("skips non-conventional commits", func(t *testing.T) { + dir := setupChangelogGitRepo(t) + createChangelogCommit(t, dir, "feat: conventional commit") + createChangelogCommit(t, dir, "Update README") + + cfg := &ChangelogConfig{ + Include: []string{"feat"}, + } + + changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg) + require.NoError(t, err) + + assert.Contains(t, changelog, "conventional commit") + assert.NotContains(t, changelog, "Update README") + }) +} diff --git a/release/config.go b/release/config.go new file mode 100644 index 0000000..18e81c8 --- /dev/null +++ b/release/config.go @@ -0,0 +1,316 @@ +// Package release provides release automation with changelog generation and publishing. +package release + +import ( + "fmt" + "os" + "path/filepath" + + "forge.lthn.ai/core/go/pkg/io" + "gopkg.in/yaml.v3" +) + +// ConfigFileName is the name of the release configuration file. +const ConfigFileName = "release.yaml" + +// ConfigDir is the directory where release configuration is stored. +const ConfigDir = ".core" + +// Config holds the complete release configuration loaded from .core/release.yaml. +type Config struct { + // Version is the config file format version. + Version int `yaml:"version"` + // Project contains project metadata. + Project ProjectConfig `yaml:"project"` + // Build contains build settings for the release. + Build BuildConfig `yaml:"build"` + // Publishers defines where to publish the release. + Publishers []PublisherConfig `yaml:"publishers"` + // Changelog configures changelog generation. + Changelog ChangelogConfig `yaml:"changelog"` + // SDK configures SDK generation. + SDK *SDKConfig `yaml:"sdk,omitempty"` + + // Internal fields (not serialized) + projectDir string // Set by LoadConfig + version string // Set by CLI flag +} + +// ProjectConfig holds project metadata for releases. +type ProjectConfig struct { + // Name is the project name. + Name string `yaml:"name"` + // Repository is the GitHub repository in owner/repo format. + Repository string `yaml:"repository"` +} + +// BuildConfig holds build settings for releases. +type BuildConfig struct { + // Targets defines the build targets. + Targets []TargetConfig `yaml:"targets"` +} + +// TargetConfig defines a build target. +type TargetConfig struct { + // OS is the target operating system (e.g., "linux", "darwin", "windows"). + OS string `yaml:"os"` + // Arch is the target architecture (e.g., "amd64", "arm64"). + Arch string `yaml:"arch"` +} + +// PublisherConfig holds configuration for a publisher. +type PublisherConfig struct { + // Type is the publisher type (e.g., "github", "linuxkit", "docker"). + Type string `yaml:"type"` + // Prerelease marks the release as a prerelease. + Prerelease bool `yaml:"prerelease"` + // Draft creates the release as a draft. + Draft bool `yaml:"draft"` + + // LinuxKit-specific configuration + // Config is the path to the LinuxKit YAML configuration file. + Config string `yaml:"config,omitempty"` + // Formats are the output formats to build (iso, raw, qcow2, vmdk). + Formats []string `yaml:"formats,omitempty"` + // Platforms are the target platforms (linux/amd64, linux/arm64). + Platforms []string `yaml:"platforms,omitempty"` + + // Docker-specific configuration + // Registry is the container registry (default: ghcr.io). + Registry string `yaml:"registry,omitempty"` + // Image is the image name in owner/repo format. + Image string `yaml:"image,omitempty"` + // Dockerfile is the path to the Dockerfile (default: Dockerfile). + Dockerfile string `yaml:"dockerfile,omitempty"` + // Tags are the image tags to apply. + Tags []string `yaml:"tags,omitempty"` + // BuildArgs are additional Docker build arguments. + BuildArgs map[string]string `yaml:"build_args,omitempty"` + + // npm-specific configuration + // Package is the npm package name (e.g., "@host-uk/core"). + Package string `yaml:"package,omitempty"` + // Access is the npm access level: "public" or "restricted". + Access string `yaml:"access,omitempty"` + + // Homebrew-specific configuration + // Tap is the Homebrew tap repository (e.g., "host-uk/homebrew-tap"). + Tap string `yaml:"tap,omitempty"` + // Formula is the formula name (defaults to project name). + Formula string `yaml:"formula,omitempty"` + + // Scoop-specific configuration + // Bucket is the Scoop bucket repository (e.g., "host-uk/scoop-bucket"). + Bucket string `yaml:"bucket,omitempty"` + + // AUR-specific configuration + // Maintainer is the AUR package maintainer (e.g., "Name "). + Maintainer string `yaml:"maintainer,omitempty"` + + // Chocolatey-specific configuration + // Push determines whether to push to Chocolatey (false = generate only). + Push bool `yaml:"push,omitempty"` + + // Official repo configuration (for Homebrew, Scoop) + // When enabled, generates files for PR to official repos. + Official *OfficialConfig `yaml:"official,omitempty"` +} + +// OfficialConfig holds configuration for generating files for official repo PRs. +type OfficialConfig struct { + // Enabled determines whether to generate files for official repos. + Enabled bool `yaml:"enabled"` + // Output is the directory to write generated files. + Output string `yaml:"output,omitempty"` +} + +// SDKConfig holds SDK generation configuration. +type SDKConfig struct { + // Spec is the path to the OpenAPI spec file. + Spec string `yaml:"spec,omitempty"` + // Languages to generate. + Languages []string `yaml:"languages,omitempty"` + // Output directory (default: sdk/). + Output string `yaml:"output,omitempty"` + // Package naming. + Package SDKPackageConfig `yaml:"package,omitempty"` + // Diff configuration. + Diff SDKDiffConfig `yaml:"diff,omitempty"` + // Publish configuration. + Publish SDKPublishConfig `yaml:"publish,omitempty"` +} + +// SDKPackageConfig holds package naming configuration. +type SDKPackageConfig struct { + Name string `yaml:"name,omitempty"` + Version string `yaml:"version,omitempty"` +} + +// SDKDiffConfig holds diff configuration. +type SDKDiffConfig struct { + Enabled bool `yaml:"enabled,omitempty"` + FailOnBreaking bool `yaml:"fail_on_breaking,omitempty"` +} + +// SDKPublishConfig holds monorepo publish configuration. +type SDKPublishConfig struct { + Repo string `yaml:"repo,omitempty"` + Path string `yaml:"path,omitempty"` +} + +// ChangelogConfig holds changelog generation settings. +type ChangelogConfig struct { + // Include specifies commit types to include in the changelog. + Include []string `yaml:"include"` + // Exclude specifies commit types to exclude from the changelog. + Exclude []string `yaml:"exclude"` +} + +// LoadConfig loads release configuration from the .core/release.yaml file in the given directory. +// If the config file does not exist, it returns DefaultConfig(). +// Returns an error if the file exists but cannot be parsed. +func LoadConfig(dir string) (*Config, error) { + configPath := filepath.Join(dir, ConfigDir, ConfigFileName) + + // Convert to absolute path for io.Local + absPath, err := filepath.Abs(configPath) + if err != nil { + return nil, fmt.Errorf("release.LoadConfig: failed to resolve path: %w", err) + } + + content, err := io.Local.Read(absPath) + if err != nil { + if os.IsNotExist(err) { + cfg := DefaultConfig() + cfg.projectDir = dir + return cfg, nil + } + return nil, fmt.Errorf("release.LoadConfig: failed to read config file: %w", err) + } + + var cfg Config + if err := yaml.Unmarshal([]byte(content), &cfg); err != nil { + return nil, fmt.Errorf("release.LoadConfig: failed to parse config file: %w", err) + } + + // Apply defaults for any missing fields + applyDefaults(&cfg) + cfg.projectDir = dir + + return &cfg, nil +} + +// DefaultConfig returns sensible defaults for release configuration. +func DefaultConfig() *Config { + return &Config{ + Version: 1, + Project: ProjectConfig{ + Name: "", + Repository: "", + }, + Build: BuildConfig{ + Targets: []TargetConfig{ + {OS: "linux", Arch: "amd64"}, + {OS: "linux", Arch: "arm64"}, + {OS: "darwin", Arch: "arm64"}, + {OS: "windows", Arch: "amd64"}, + }, + }, + Publishers: []PublisherConfig{ + { + Type: "github", + Prerelease: false, + Draft: false, + }, + }, + Changelog: ChangelogConfig{ + Include: []string{"feat", "fix", "perf", "refactor"}, + Exclude: []string{"chore", "docs", "style", "test", "ci"}, + }, + } +} + +// applyDefaults fills in default values for any empty fields in the config. +func applyDefaults(cfg *Config) { + defaults := DefaultConfig() + + if cfg.Version == 0 { + cfg.Version = defaults.Version + } + + if len(cfg.Build.Targets) == 0 { + cfg.Build.Targets = defaults.Build.Targets + } + + if len(cfg.Publishers) == 0 { + cfg.Publishers = defaults.Publishers + } + + if len(cfg.Changelog.Include) == 0 && len(cfg.Changelog.Exclude) == 0 { + cfg.Changelog.Include = defaults.Changelog.Include + cfg.Changelog.Exclude = defaults.Changelog.Exclude + } +} + +// SetProjectDir sets the project directory on the config. +func (c *Config) SetProjectDir(dir string) { + c.projectDir = dir +} + +// SetVersion sets the version override on the config. +func (c *Config) SetVersion(version string) { + c.version = version +} + +// ConfigPath returns the path to the release config file for a given directory. +func ConfigPath(dir string) string { + return filepath.Join(dir, ConfigDir, ConfigFileName) +} + +// ConfigExists checks if a release config file exists in the given directory. +func ConfigExists(dir string) bool { + configPath := ConfigPath(dir) + absPath, err := filepath.Abs(configPath) + if err != nil { + return false + } + return io.Local.IsFile(absPath) +} + +// GetRepository returns the repository from the config. +func (c *Config) GetRepository() string { + return c.Project.Repository +} + +// GetProjectName returns the project name from the config. +func (c *Config) GetProjectName() string { + return c.Project.Name +} + +// WriteConfig writes the config to the .core/release.yaml file. +func WriteConfig(cfg *Config, dir string) error { + configPath := ConfigPath(dir) + + // Convert to absolute path for io.Local + absPath, err := filepath.Abs(configPath) + if err != nil { + return fmt.Errorf("release.WriteConfig: failed to resolve path: %w", err) + } + + // Ensure directory exists + configDir := filepath.Dir(absPath) + if err := io.Local.EnsureDir(configDir); err != nil { + return fmt.Errorf("release.WriteConfig: failed to create directory: %w", err) + } + + data, err := yaml.Marshal(cfg) + if err != nil { + return fmt.Errorf("release.WriteConfig: failed to marshal config: %w", err) + } + + if err := io.Local.Write(absPath, string(data)); err != nil { + return fmt.Errorf("release.WriteConfig: failed to write config file: %w", err) + } + + return nil +} diff --git a/release/config_test.go b/release/config_test.go new file mode 100644 index 0000000..44f65c0 --- /dev/null +++ b/release/config_test.go @@ -0,0 +1,363 @@ +package release + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// setupConfigTestDir creates a temp directory with optional .core/release.yaml content. +func setupConfigTestDir(t *testing.T, configContent string) string { + t.Helper() + dir := t.TempDir() + + if configContent != "" { + coreDir := filepath.Join(dir, ConfigDir) + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + configPath := filepath.Join(coreDir, ConfigFileName) + err = os.WriteFile(configPath, []byte(configContent), 0644) + require.NoError(t, err) + } + + return dir +} + +func TestLoadConfig_Good(t *testing.T) { + t.Run("loads valid config", func(t *testing.T) { + content := ` +version: 1 +project: + name: myapp + repository: owner/repo +build: + targets: + - os: linux + arch: amd64 + - os: darwin + arch: arm64 +publishers: + - type: github + prerelease: true + draft: false +changelog: + include: + - feat + - fix + exclude: + - chore +` + dir := setupConfigTestDir(t, content) + + cfg, err := LoadConfig(dir) + require.NoError(t, err) + require.NotNil(t, cfg) + + assert.Equal(t, 1, cfg.Version) + assert.Equal(t, "myapp", cfg.Project.Name) + assert.Equal(t, "owner/repo", cfg.Project.Repository) + assert.Len(t, cfg.Build.Targets, 2) + assert.Equal(t, "linux", cfg.Build.Targets[0].OS) + assert.Equal(t, "amd64", cfg.Build.Targets[0].Arch) + assert.Equal(t, "darwin", cfg.Build.Targets[1].OS) + assert.Equal(t, "arm64", cfg.Build.Targets[1].Arch) + assert.Len(t, cfg.Publishers, 1) + assert.Equal(t, "github", cfg.Publishers[0].Type) + assert.True(t, cfg.Publishers[0].Prerelease) + assert.False(t, cfg.Publishers[0].Draft) + assert.Equal(t, []string{"feat", "fix"}, cfg.Changelog.Include) + assert.Equal(t, []string{"chore"}, cfg.Changelog.Exclude) + }) + + t.Run("returns defaults when config file missing", func(t *testing.T) { + dir := t.TempDir() + + cfg, err := LoadConfig(dir) + require.NoError(t, err) + require.NotNil(t, cfg) + + defaults := DefaultConfig() + assert.Equal(t, defaults.Version, cfg.Version) + assert.Equal(t, defaults.Build.Targets, cfg.Build.Targets) + assert.Equal(t, defaults.Publishers, cfg.Publishers) + assert.Equal(t, defaults.Changelog.Include, cfg.Changelog.Include) + assert.Equal(t, defaults.Changelog.Exclude, cfg.Changelog.Exclude) + }) + + t.Run("applies defaults for missing fields", func(t *testing.T) { + content := ` +version: 2 +project: + name: partial +` + dir := setupConfigTestDir(t, content) + + cfg, err := LoadConfig(dir) + require.NoError(t, err) + require.NotNil(t, cfg) + + // Explicit values preserved + assert.Equal(t, 2, cfg.Version) + assert.Equal(t, "partial", cfg.Project.Name) + + // Defaults applied + defaults := DefaultConfig() + assert.Equal(t, defaults.Build.Targets, cfg.Build.Targets) + assert.Equal(t, defaults.Publishers, cfg.Publishers) + }) + + t.Run("sets project directory on load", func(t *testing.T) { + dir := setupConfigTestDir(t, "version: 1") + + cfg, err := LoadConfig(dir) + require.NoError(t, err) + assert.Equal(t, dir, cfg.projectDir) + }) +} + +func TestLoadConfig_Bad(t *testing.T) { + t.Run("returns error for invalid YAML", func(t *testing.T) { + content := ` +version: 1 +project: + name: [invalid yaml +` + dir := setupConfigTestDir(t, content) + + cfg, err := LoadConfig(dir) + assert.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), "failed to parse config file") + }) + + t.Run("returns error for unreadable file", func(t *testing.T) { + dir := t.TempDir() + coreDir := filepath.Join(dir, ConfigDir) + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + // Create config as a directory instead of file + configPath := filepath.Join(coreDir, ConfigFileName) + err = os.Mkdir(configPath, 0755) + require.NoError(t, err) + + cfg, err := LoadConfig(dir) + assert.Error(t, err) + assert.Nil(t, cfg) + assert.Contains(t, err.Error(), "failed to read config file") + }) +} + +func TestDefaultConfig_Good(t *testing.T) { + t.Run("returns sensible defaults", func(t *testing.T) { + cfg := DefaultConfig() + + assert.Equal(t, 1, cfg.Version) + assert.Empty(t, cfg.Project.Name) + assert.Empty(t, cfg.Project.Repository) + + // Default targets + assert.Len(t, cfg.Build.Targets, 4) + hasLinuxAmd64 := false + hasDarwinArm64 := false + hasWindowsAmd64 := false + for _, target := range cfg.Build.Targets { + if target.OS == "linux" && target.Arch == "amd64" { + hasLinuxAmd64 = true + } + if target.OS == "darwin" && target.Arch == "arm64" { + hasDarwinArm64 = true + } + if target.OS == "windows" && target.Arch == "amd64" { + hasWindowsAmd64 = true + } + } + assert.True(t, hasLinuxAmd64) + assert.True(t, hasDarwinArm64) + assert.True(t, hasWindowsAmd64) + + // Default publisher + assert.Len(t, cfg.Publishers, 1) + assert.Equal(t, "github", cfg.Publishers[0].Type) + assert.False(t, cfg.Publishers[0].Prerelease) + assert.False(t, cfg.Publishers[0].Draft) + + // Default changelog settings + assert.Contains(t, cfg.Changelog.Include, "feat") + assert.Contains(t, cfg.Changelog.Include, "fix") + assert.Contains(t, cfg.Changelog.Exclude, "chore") + assert.Contains(t, cfg.Changelog.Exclude, "docs") + }) +} + +func TestConfigPath_Good(t *testing.T) { + t.Run("returns correct path", func(t *testing.T) { + path := ConfigPath("/project/root") + assert.Equal(t, "/project/root/.core/release.yaml", path) + }) +} + +func TestConfigExists_Good(t *testing.T) { + t.Run("returns true when config exists", func(t *testing.T) { + dir := setupConfigTestDir(t, "version: 1") + assert.True(t, ConfigExists(dir)) + }) + + t.Run("returns false when config missing", func(t *testing.T) { + dir := t.TempDir() + assert.False(t, ConfigExists(dir)) + }) + + t.Run("returns false when .core dir missing", func(t *testing.T) { + dir := t.TempDir() + assert.False(t, ConfigExists(dir)) + }) +} + +func TestWriteConfig_Good(t *testing.T) { + t.Run("writes config to file", func(t *testing.T) { + dir := t.TempDir() + + cfg := DefaultConfig() + cfg.Project.Name = "testapp" + cfg.Project.Repository = "owner/testapp" + + err := WriteConfig(cfg, dir) + require.NoError(t, err) + + // Verify file exists + assert.True(t, ConfigExists(dir)) + + // Reload and verify + loaded, err := LoadConfig(dir) + require.NoError(t, err) + assert.Equal(t, "testapp", loaded.Project.Name) + assert.Equal(t, "owner/testapp", loaded.Project.Repository) + }) + + t.Run("creates .core directory if missing", func(t *testing.T) { + dir := t.TempDir() + + cfg := DefaultConfig() + err := WriteConfig(cfg, dir) + require.NoError(t, err) + + // Check directory was created + coreDir := filepath.Join(dir, ConfigDir) + info, err := os.Stat(coreDir) + require.NoError(t, err) + assert.True(t, info.IsDir()) + }) +} + +func TestConfig_GetRepository_Good(t *testing.T) { + t.Run("returns repository", func(t *testing.T) { + cfg := &Config{ + Project: ProjectConfig{ + Repository: "owner/repo", + }, + } + assert.Equal(t, "owner/repo", cfg.GetRepository()) + }) + + t.Run("returns empty string when not set", func(t *testing.T) { + cfg := &Config{} + assert.Empty(t, cfg.GetRepository()) + }) +} + +func TestConfig_GetProjectName_Good(t *testing.T) { + t.Run("returns project name", func(t *testing.T) { + cfg := &Config{ + Project: ProjectConfig{ + Name: "myapp", + }, + } + assert.Equal(t, "myapp", cfg.GetProjectName()) + }) + + t.Run("returns empty string when not set", func(t *testing.T) { + cfg := &Config{} + assert.Empty(t, cfg.GetProjectName()) + }) +} + +func TestConfig_SetVersion_Good(t *testing.T) { + t.Run("sets version override", func(t *testing.T) { + cfg := &Config{} + cfg.SetVersion("v1.2.3") + assert.Equal(t, "v1.2.3", cfg.version) + }) +} + +func TestConfig_SetProjectDir_Good(t *testing.T) { + t.Run("sets project directory", func(t *testing.T) { + cfg := &Config{} + cfg.SetProjectDir("/path/to/project") + assert.Equal(t, "/path/to/project", cfg.projectDir) + }) +} + +func TestWriteConfig_Bad(t *testing.T) { + t.Run("returns error for unwritable directory", func(t *testing.T) { + if os.Geteuid() == 0 { + t.Skip("root can write to any directory") + } + dir := t.TempDir() + + // Create .core directory and make it unwritable + coreDir := filepath.Join(dir, ConfigDir) + err := os.MkdirAll(coreDir, 0755) + require.NoError(t, err) + + // Make directory read-only + err = os.Chmod(coreDir, 0555) + require.NoError(t, err) + defer func() { _ = os.Chmod(coreDir, 0755) }() + + cfg := DefaultConfig() + err = WriteConfig(cfg, dir) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to write config file") + }) + + t.Run("returns error when directory creation fails", func(t *testing.T) { + if os.Geteuid() == 0 { + t.Skip("root can create directories anywhere") + } + // Use a path that doesn't exist and can't be created + cfg := DefaultConfig() + err := WriteConfig(cfg, "/nonexistent/path/that/cannot/be/created") + assert.Error(t, err) + }) +} + +func TestApplyDefaults_Good(t *testing.T) { + t.Run("applies version default when zero", func(t *testing.T) { + cfg := &Config{Version: 0} + applyDefaults(cfg) + assert.Equal(t, 1, cfg.Version) + }) + + t.Run("preserves existing version", func(t *testing.T) { + cfg := &Config{Version: 2} + applyDefaults(cfg) + assert.Equal(t, 2, cfg.Version) + }) + + t.Run("applies changelog defaults only when both empty", func(t *testing.T) { + cfg := &Config{ + Changelog: ChangelogConfig{ + Include: []string{"feat"}, + }, + } + applyDefaults(cfg) + // Should not apply defaults because Include is set + assert.Equal(t, []string{"feat"}, cfg.Changelog.Include) + assert.Empty(t, cfg.Changelog.Exclude) + }) +} diff --git a/release/publishers/aur.go b/release/publishers/aur.go new file mode 100644 index 0000000..3234380 --- /dev/null +++ b/release/publishers/aur.go @@ -0,0 +1,313 @@ +// Package publishers provides release publishing implementations. +package publishers + +import ( + "bytes" + "context" + "embed" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +//go:embed templates/aur/*.tmpl +var aurTemplates embed.FS + +// AURConfig holds AUR-specific configuration. +type AURConfig struct { + // Package is the AUR package name. + Package string + // Maintainer is the package maintainer (e.g., "Name "). + Maintainer string + // Official config for generating files for official repo PRs. + Official *OfficialConfig +} + +// AURPublisher publishes releases to AUR. +type AURPublisher struct{} + +// NewAURPublisher creates a new AUR publisher. +func NewAURPublisher() *AURPublisher { + return &AURPublisher{} +} + +// Name returns the publisher's identifier. +func (p *AURPublisher) Name() string { + return "aur" +} + +// Publish publishes the release to AUR. +func (p *AURPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error { + cfg := p.parseConfig(pubCfg, relCfg) + + if cfg.Maintainer == "" { + return fmt.Errorf("aur.Publish: maintainer is required (set publish.aur.maintainer in config)") + } + + repo := "" + if relCfg != nil { + repo = relCfg.GetRepository() + } + if repo == "" { + detectedRepo, err := detectRepository(release.ProjectDir) + if err != nil { + return fmt.Errorf("aur.Publish: could not determine repository: %w", err) + } + repo = detectedRepo + } + + projectName := "" + if relCfg != nil { + projectName = relCfg.GetProjectName() + } + if projectName == "" { + parts := strings.Split(repo, "/") + projectName = parts[len(parts)-1] + } + + packageName := cfg.Package + if packageName == "" { + packageName = projectName + } + + version := strings.TrimPrefix(release.Version, "v") + checksums := buildChecksumMap(release.Artifacts) + + data := aurTemplateData{ + PackageName: packageName, + Description: fmt.Sprintf("%s CLI", projectName), + Repository: repo, + Version: version, + License: "MIT", + BinaryName: projectName, + Maintainer: cfg.Maintainer, + Checksums: checksums, + } + + if dryRun { + return p.dryRunPublish(release.FS, data, cfg) + } + + return p.executePublish(ctx, release.ProjectDir, data, cfg, release) +} + +type aurTemplateData struct { + PackageName string + Description string + Repository string + Version string + License string + BinaryName string + Maintainer string + Checksums ChecksumMap +} + +func (p *AURPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) AURConfig { + cfg := AURConfig{} + + if ext, ok := pubCfg.Extended.(map[string]any); ok { + if pkg, ok := ext["package"].(string); ok && pkg != "" { + cfg.Package = pkg + } + if maintainer, ok := ext["maintainer"].(string); ok && maintainer != "" { + cfg.Maintainer = maintainer + } + if official, ok := ext["official"].(map[string]any); ok { + cfg.Official = &OfficialConfig{} + if enabled, ok := official["enabled"].(bool); ok { + cfg.Official.Enabled = enabled + } + if output, ok := official["output"].(string); ok { + cfg.Official.Output = output + } + } + } + + return cfg +} + +func (p *AURPublisher) dryRunPublish(m io.Medium, data aurTemplateData, cfg AURConfig) error { + fmt.Println() + fmt.Println("=== DRY RUN: AUR Publish ===") + fmt.Println() + fmt.Printf("Package: %s-bin\n", data.PackageName) + fmt.Printf("Version: %s\n", data.Version) + fmt.Printf("Maintainer: %s\n", data.Maintainer) + fmt.Printf("Repository: %s\n", data.Repository) + fmt.Println() + + pkgbuild, err := p.renderTemplate(m, "templates/aur/PKGBUILD.tmpl", data) + if err != nil { + return fmt.Errorf("aur.dryRunPublish: %w", err) + } + fmt.Println("Generated PKGBUILD:") + fmt.Println("---") + fmt.Println(pkgbuild) + fmt.Println("---") + fmt.Println() + + srcinfo, err := p.renderTemplate(m, "templates/aur/.SRCINFO.tmpl", data) + if err != nil { + return fmt.Errorf("aur.dryRunPublish: %w", err) + } + fmt.Println("Generated .SRCINFO:") + fmt.Println("---") + fmt.Println(srcinfo) + fmt.Println("---") + fmt.Println() + + fmt.Printf("Would push to AUR: ssh://aur@aur.archlinux.org/%s-bin.git\n", data.PackageName) + fmt.Println() + fmt.Println("=== END DRY RUN ===") + + return nil +} + +func (p *AURPublisher) executePublish(ctx context.Context, projectDir string, data aurTemplateData, cfg AURConfig, release *Release) error { + pkgbuild, err := p.renderTemplate(release.FS, "templates/aur/PKGBUILD.tmpl", data) + if err != nil { + return fmt.Errorf("aur.Publish: failed to render PKGBUILD: %w", err) + } + + srcinfo, err := p.renderTemplate(release.FS, "templates/aur/.SRCINFO.tmpl", data) + if err != nil { + return fmt.Errorf("aur.Publish: failed to render .SRCINFO: %w", err) + } + + // If official config is enabled, write to output directory + if cfg.Official != nil && cfg.Official.Enabled { + output := cfg.Official.Output + if output == "" { + output = filepath.Join(projectDir, "dist", "aur") + } else if !filepath.IsAbs(output) { + output = filepath.Join(projectDir, output) + } + + if err := release.FS.EnsureDir(output); err != nil { + return fmt.Errorf("aur.Publish: failed to create output directory: %w", err) + } + + pkgbuildPath := filepath.Join(output, "PKGBUILD") + if err := release.FS.Write(pkgbuildPath, pkgbuild); err != nil { + return fmt.Errorf("aur.Publish: failed to write PKGBUILD: %w", err) + } + + srcinfoPath := filepath.Join(output, ".SRCINFO") + if err := release.FS.Write(srcinfoPath, srcinfo); err != nil { + return fmt.Errorf("aur.Publish: failed to write .SRCINFO: %w", err) + } + fmt.Printf("Wrote AUR files: %s\n", output) + } + + // Push to AUR if not in official-only mode + if cfg.Official == nil || !cfg.Official.Enabled { + if err := p.pushToAUR(ctx, data, pkgbuild, srcinfo); err != nil { + return err + } + } + + return nil +} + +func (p *AURPublisher) pushToAUR(ctx context.Context, data aurTemplateData, pkgbuild, srcinfo string) error { + aurURL := fmt.Sprintf("ssh://aur@aur.archlinux.org/%s-bin.git", data.PackageName) + + tmpDir, err := os.MkdirTemp("", "aur-package-*") + if err != nil { + return fmt.Errorf("aur.Publish: failed to create temp directory: %w", err) + } + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Clone existing AUR repo (or initialize new one) + fmt.Printf("Cloning AUR package %s-bin...\n", data.PackageName) + cmd := exec.CommandContext(ctx, "git", "clone", aurURL, tmpDir) + if err := cmd.Run(); err != nil { + // If clone fails, init a new repo + cmd = exec.CommandContext(ctx, "git", "init", tmpDir) + if err := cmd.Run(); err != nil { + return fmt.Errorf("aur.Publish: failed to initialize repo: %w", err) + } + cmd = exec.CommandContext(ctx, "git", "-C", tmpDir, "remote", "add", "origin", aurURL) + if err := cmd.Run(); err != nil { + return fmt.Errorf("aur.Publish: failed to add remote: %w", err) + } + } + + // Write files + if err := os.WriteFile(filepath.Join(tmpDir, "PKGBUILD"), []byte(pkgbuild), 0644); err != nil { + return fmt.Errorf("aur.Publish: failed to write PKGBUILD: %w", err) + } + if err := os.WriteFile(filepath.Join(tmpDir, ".SRCINFO"), []byte(srcinfo), 0644); err != nil { + return fmt.Errorf("aur.Publish: failed to write .SRCINFO: %w", err) + } + + commitMsg := fmt.Sprintf("Update to %s", data.Version) + + cmd = exec.CommandContext(ctx, "git", "add", ".") + cmd.Dir = tmpDir + if err := cmd.Run(); err != nil { + return fmt.Errorf("aur.Publish: git add failed: %w", err) + } + + cmd = exec.CommandContext(ctx, "git", "commit", "-m", commitMsg) + cmd.Dir = tmpDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("aur.Publish: git commit failed: %w", err) + } + + cmd = exec.CommandContext(ctx, "git", "push", "origin", "master") + cmd.Dir = tmpDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("aur.Publish: git push failed: %w", err) + } + + fmt.Printf("Published to AUR: https://aur.archlinux.org/packages/%s-bin\n", data.PackageName) + return nil +} + +func (p *AURPublisher) renderTemplate(m io.Medium, name string, data aurTemplateData) (string, error) { + var content []byte + var err error + + // Try custom template from medium + customPath := filepath.Join(".core", name) + if m != nil && m.IsFile(customPath) { + customContent, err := m.Read(customPath) + if err == nil { + content = []byte(customContent) + } + } + + // Fallback to embedded template + if content == nil { + content, err = aurTemplates.ReadFile(name) + if err != nil { + return "", fmt.Errorf("failed to read template %s: %w", name, err) + } + } + + tmpl, err := template.New(filepath.Base(name)).Parse(string(content)) + if err != nil { + return "", fmt.Errorf("failed to parse template %s: %w", name, err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("failed to execute template %s: %w", name, err) + } + + return buf.String(), nil +} + +// Ensure build package is used +var _ = build.Artifact{} diff --git a/release/publishers/aur_test.go b/release/publishers/aur_test.go new file mode 100644 index 0000000..8355ba6 --- /dev/null +++ b/release/publishers/aur_test.go @@ -0,0 +1,226 @@ +package publishers + +import ( + "bytes" + "context" + "os" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAURPublisher_Name_Good(t *testing.T) { + t.Run("returns aur", func(t *testing.T) { + p := NewAURPublisher() + assert.Equal(t, "aur", p.Name()) + }) +} + +func TestAURPublisher_ParseConfig_Good(t *testing.T) { + p := NewAURPublisher() + + t.Run("uses defaults when no extended config", func(t *testing.T) { + pubCfg := PublisherConfig{Type: "aur"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Package) + assert.Empty(t, cfg.Maintainer) + assert.Nil(t, cfg.Official) + }) + + t.Run("parses package and maintainer from extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "aur", + Extended: map[string]any{ + "package": "mypackage", + "maintainer": "John Doe ", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Equal(t, "mypackage", cfg.Package) + assert.Equal(t, "John Doe ", cfg.Maintainer) + }) + + t.Run("parses official config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "aur", + Extended: map[string]any{ + "official": map[string]any{ + "enabled": true, + "output": "dist/aur-files", + }, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + require.NotNil(t, cfg.Official) + assert.True(t, cfg.Official.Enabled) + assert.Equal(t, "dist/aur-files", cfg.Official.Output) + }) + + t.Run("handles missing official fields", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "aur", + Extended: map[string]any{ + "official": map[string]any{}, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + require.NotNil(t, cfg.Official) + assert.False(t, cfg.Official.Enabled) + assert.Empty(t, cfg.Official.Output) + }) +} + +func TestAURPublisher_RenderTemplate_Good(t *testing.T) { + p := NewAURPublisher() + + t.Run("renders PKGBUILD template with data", func(t *testing.T) { + data := aurTemplateData{ + PackageName: "myapp", + Description: "My awesome CLI", + Repository: "owner/myapp", + Version: "1.2.3", + License: "MIT", + BinaryName: "myapp", + Maintainer: "John Doe ", + Checksums: ChecksumMap{ + LinuxAmd64: "abc123", + LinuxArm64: "def456", + }, + } + + result, err := p.renderTemplate(io.Local, "templates/aur/PKGBUILD.tmpl", data) + require.NoError(t, err) + + assert.Contains(t, result, "# Maintainer: John Doe ") + assert.Contains(t, result, "pkgname=myapp-bin") + assert.Contains(t, result, "pkgver=1.2.3") + assert.Contains(t, result, `pkgdesc="My awesome CLI"`) + assert.Contains(t, result, "url=\"https://github.com/owner/myapp\"") + assert.Contains(t, result, "license=('MIT')") + assert.Contains(t, result, "sha256sums_x86_64=('abc123')") + assert.Contains(t, result, "sha256sums_aarch64=('def456')") + }) + + t.Run("renders .SRCINFO template with data", func(t *testing.T) { + data := aurTemplateData{ + PackageName: "myapp", + Description: "My CLI", + Repository: "owner/myapp", + Version: "1.0.0", + License: "MIT", + BinaryName: "myapp", + Maintainer: "Test ", + Checksums: ChecksumMap{ + LinuxAmd64: "checksum1", + LinuxArm64: "checksum2", + }, + } + + result, err := p.renderTemplate(io.Local, "templates/aur/.SRCINFO.tmpl", data) + require.NoError(t, err) + + assert.Contains(t, result, "pkgbase = myapp-bin") + assert.Contains(t, result, "pkgdesc = My CLI") + assert.Contains(t, result, "pkgver = 1.0.0") + assert.Contains(t, result, "arch = x86_64") + assert.Contains(t, result, "arch = aarch64") + assert.Contains(t, result, "sha256sums_x86_64 = checksum1") + assert.Contains(t, result, "sha256sums_aarch64 = checksum2") + assert.Contains(t, result, "pkgname = myapp-bin") + }) +} + +func TestAURPublisher_RenderTemplate_Bad(t *testing.T) { + p := NewAURPublisher() + + t.Run("returns error for non-existent template", func(t *testing.T) { + data := aurTemplateData{} + _, err := p.renderTemplate(io.Local, "templates/aur/nonexistent.tmpl", data) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to read template") + }) +} + +func TestAURPublisher_DryRunPublish_Good(t *testing.T) { + p := NewAURPublisher() + + t.Run("outputs expected dry run information", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := aurTemplateData{ + PackageName: "myapp", + Version: "1.0.0", + Maintainer: "John Doe ", + Repository: "owner/repo", + BinaryName: "myapp", + Checksums: ChecksumMap{}, + } + cfg := AURConfig{ + Maintainer: "John Doe ", + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "DRY RUN: AUR Publish") + assert.Contains(t, output, "Package: myapp-bin") + assert.Contains(t, output, "Version: 1.0.0") + assert.Contains(t, output, "Maintainer: John Doe ") + assert.Contains(t, output, "Repository: owner/repo") + assert.Contains(t, output, "Generated PKGBUILD:") + assert.Contains(t, output, "Generated .SRCINFO:") + assert.Contains(t, output, "Would push to AUR: ssh://aur@aur.archlinux.org/myapp-bin.git") + assert.Contains(t, output, "END DRY RUN") + }) +} + +func TestAURPublisher_Publish_Bad(t *testing.T) { + p := NewAURPublisher() + + t.Run("fails when maintainer not configured", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/project", + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "aur"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "maintainer is required") + }) +} + +func TestAURConfig_Defaults_Good(t *testing.T) { + t.Run("has sensible defaults", func(t *testing.T) { + p := NewAURPublisher() + pubCfg := PublisherConfig{Type: "aur"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Package) + assert.Empty(t, cfg.Maintainer) + assert.Nil(t, cfg.Official) + }) +} diff --git a/release/publishers/chocolatey.go b/release/publishers/chocolatey.go new file mode 100644 index 0000000..9fb196a --- /dev/null +++ b/release/publishers/chocolatey.go @@ -0,0 +1,294 @@ +// Package publishers provides release publishing implementations. +package publishers + +import ( + "bytes" + "context" + "embed" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" +) + +//go:embed templates/chocolatey/*.tmpl templates/chocolatey/tools/*.tmpl +var chocolateyTemplates embed.FS + +// ChocolateyConfig holds Chocolatey-specific configuration. +type ChocolateyConfig struct { + // Package is the Chocolatey package name. + Package string + // Push determines whether to push to Chocolatey (false = generate only). + Push bool + // Official config for generating files for official repo PRs. + Official *OfficialConfig +} + +// ChocolateyPublisher publishes releases to Chocolatey. +type ChocolateyPublisher struct{} + +// NewChocolateyPublisher creates a new Chocolatey publisher. +func NewChocolateyPublisher() *ChocolateyPublisher { + return &ChocolateyPublisher{} +} + +// Name returns the publisher's identifier. +func (p *ChocolateyPublisher) Name() string { + return "chocolatey" +} + +// Publish publishes the release to Chocolatey. +func (p *ChocolateyPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error { + cfg := p.parseConfig(pubCfg, relCfg) + + repo := "" + if relCfg != nil { + repo = relCfg.GetRepository() + } + if repo == "" { + detectedRepo, err := detectRepository(release.ProjectDir) + if err != nil { + return fmt.Errorf("chocolatey.Publish: could not determine repository: %w", err) + } + repo = detectedRepo + } + + projectName := "" + if relCfg != nil { + projectName = relCfg.GetProjectName() + } + if projectName == "" { + parts := strings.Split(repo, "/") + projectName = parts[len(parts)-1] + } + + packageName := cfg.Package + if packageName == "" { + packageName = projectName + } + + version := strings.TrimPrefix(release.Version, "v") + checksums := buildChecksumMap(release.Artifacts) + + // Extract authors from repository + authors := strings.Split(repo, "/")[0] + + data := chocolateyTemplateData{ + PackageName: packageName, + Title: fmt.Sprintf("%s CLI", i18n.Title(projectName)), + Description: fmt.Sprintf("%s CLI", projectName), + Repository: repo, + Version: version, + License: "MIT", + BinaryName: projectName, + Authors: authors, + Tags: fmt.Sprintf("cli %s", projectName), + Checksums: checksums, + } + + if dryRun { + return p.dryRunPublish(release.FS, data, cfg) + } + + return p.executePublish(ctx, release.ProjectDir, data, cfg, release) +} + +type chocolateyTemplateData struct { + PackageName string + Title string + Description string + Repository string + Version string + License string + BinaryName string + Authors string + Tags string + Checksums ChecksumMap +} + +func (p *ChocolateyPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) ChocolateyConfig { + cfg := ChocolateyConfig{ + Push: false, // Default to generate only + } + + if ext, ok := pubCfg.Extended.(map[string]any); ok { + if pkg, ok := ext["package"].(string); ok && pkg != "" { + cfg.Package = pkg + } + if push, ok := ext["push"].(bool); ok { + cfg.Push = push + } + if official, ok := ext["official"].(map[string]any); ok { + cfg.Official = &OfficialConfig{} + if enabled, ok := official["enabled"].(bool); ok { + cfg.Official.Enabled = enabled + } + if output, ok := official["output"].(string); ok { + cfg.Official.Output = output + } + } + } + + return cfg +} + +func (p *ChocolateyPublisher) dryRunPublish(m io.Medium, data chocolateyTemplateData, cfg ChocolateyConfig) error { + fmt.Println() + fmt.Println("=== DRY RUN: Chocolatey Publish ===") + fmt.Println() + fmt.Printf("Package: %s\n", data.PackageName) + fmt.Printf("Version: %s\n", data.Version) + fmt.Printf("Push: %t\n", cfg.Push) + fmt.Printf("Repository: %s\n", data.Repository) + fmt.Println() + + nuspec, err := p.renderTemplate(m, "templates/chocolatey/package.nuspec.tmpl", data) + if err != nil { + return fmt.Errorf("chocolatey.dryRunPublish: %w", err) + } + fmt.Println("Generated package.nuspec:") + fmt.Println("---") + fmt.Println(nuspec) + fmt.Println("---") + fmt.Println() + + install, err := p.renderTemplate(m, "templates/chocolatey/tools/chocolateyinstall.ps1.tmpl", data) + if err != nil { + return fmt.Errorf("chocolatey.dryRunPublish: %w", err) + } + fmt.Println("Generated chocolateyinstall.ps1:") + fmt.Println("---") + fmt.Println(install) + fmt.Println("---") + fmt.Println() + + if cfg.Push { + fmt.Println("Would push to Chocolatey community repo") + } else { + fmt.Println("Would generate package files only (push=false)") + } + fmt.Println() + fmt.Println("=== END DRY RUN ===") + + return nil +} + +func (p *ChocolateyPublisher) executePublish(ctx context.Context, projectDir string, data chocolateyTemplateData, cfg ChocolateyConfig, release *Release) error { + nuspec, err := p.renderTemplate(release.FS, "templates/chocolatey/package.nuspec.tmpl", data) + if err != nil { + return fmt.Errorf("chocolatey.Publish: failed to render nuspec: %w", err) + } + + install, err := p.renderTemplate(release.FS, "templates/chocolatey/tools/chocolateyinstall.ps1.tmpl", data) + if err != nil { + return fmt.Errorf("chocolatey.Publish: failed to render install script: %w", err) + } + + // Create package directory + output := filepath.Join(projectDir, "dist", "chocolatey") + if cfg.Official != nil && cfg.Official.Enabled && cfg.Official.Output != "" { + output = cfg.Official.Output + if !filepath.IsAbs(output) { + output = filepath.Join(projectDir, output) + } + } + + toolsDir := filepath.Join(output, "tools") + if err := release.FS.EnsureDir(toolsDir); err != nil { + return fmt.Errorf("chocolatey.Publish: failed to create output directory: %w", err) + } + + // Write files + nuspecPath := filepath.Join(output, fmt.Sprintf("%s.nuspec", data.PackageName)) + if err := release.FS.Write(nuspecPath, nuspec); err != nil { + return fmt.Errorf("chocolatey.Publish: failed to write nuspec: %w", err) + } + + installPath := filepath.Join(toolsDir, "chocolateyinstall.ps1") + if err := release.FS.Write(installPath, install); err != nil { + return fmt.Errorf("chocolatey.Publish: failed to write install script: %w", err) + } + + fmt.Printf("Wrote Chocolatey package files: %s\n", output) + + // Push to Chocolatey if configured + if cfg.Push { + if err := p.pushToChocolatey(ctx, output, data); err != nil { + return err + } + } + + return nil +} + +func (p *ChocolateyPublisher) pushToChocolatey(ctx context.Context, packageDir string, data chocolateyTemplateData) error { + // Check for CHOCOLATEY_API_KEY + apiKey := os.Getenv("CHOCOLATEY_API_KEY") + if apiKey == "" { + return fmt.Errorf("chocolatey.Publish: CHOCOLATEY_API_KEY environment variable is required for push") + } + + // Pack the package + nupkgPath := filepath.Join(packageDir, fmt.Sprintf("%s.%s.nupkg", data.PackageName, data.Version)) + + cmd := exec.CommandContext(ctx, "choco", "pack", filepath.Join(packageDir, fmt.Sprintf("%s.nuspec", data.PackageName)), "-OutputDirectory", packageDir) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("chocolatey.Publish: choco pack failed: %w", err) + } + + // Push the package + cmd = exec.CommandContext(ctx, "choco", "push", nupkgPath, "--source", "https://push.chocolatey.org/", "--api-key", apiKey) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("chocolatey.Publish: choco push failed: %w", err) + } + + fmt.Printf("Published to Chocolatey: https://community.chocolatey.org/packages/%s\n", data.PackageName) + return nil +} + +func (p *ChocolateyPublisher) renderTemplate(m io.Medium, name string, data chocolateyTemplateData) (string, error) { + var content []byte + var err error + + // Try custom template from medium + customPath := filepath.Join(".core", name) + if m != nil && m.IsFile(customPath) { + customContent, err := m.Read(customPath) + if err == nil { + content = []byte(customContent) + } + } + + // Fallback to embedded template + if content == nil { + content, err = chocolateyTemplates.ReadFile(name) + if err != nil { + return "", fmt.Errorf("failed to read template %s: %w", name, err) + } + } + + tmpl, err := template.New(filepath.Base(name)).Parse(string(content)) + if err != nil { + return "", fmt.Errorf("failed to parse template %s: %w", name, err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("failed to execute template %s: %w", name, err) + } + + return buf.String(), nil +} + +// Ensure build package is used +var _ = build.Artifact{} diff --git a/release/publishers/chocolatey_test.go b/release/publishers/chocolatey_test.go new file mode 100644 index 0000000..144907d --- /dev/null +++ b/release/publishers/chocolatey_test.go @@ -0,0 +1,323 @@ +package publishers + +import ( + "bytes" + "context" + "os" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestChocolateyPublisher_Name_Good(t *testing.T) { + t.Run("returns chocolatey", func(t *testing.T) { + p := NewChocolateyPublisher() + assert.Equal(t, "chocolatey", p.Name()) + }) +} + +func TestChocolateyPublisher_ParseConfig_Good(t *testing.T) { + p := NewChocolateyPublisher() + + t.Run("uses defaults when no extended config", func(t *testing.T) { + pubCfg := PublisherConfig{Type: "chocolatey"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Package) + assert.False(t, cfg.Push) + assert.Nil(t, cfg.Official) + }) + + t.Run("parses package and push from extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "chocolatey", + Extended: map[string]any{ + "package": "mypackage", + "push": true, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Equal(t, "mypackage", cfg.Package) + assert.True(t, cfg.Push) + }) + + t.Run("parses official config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "chocolatey", + Extended: map[string]any{ + "official": map[string]any{ + "enabled": true, + "output": "dist/choco", + }, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + require.NotNil(t, cfg.Official) + assert.True(t, cfg.Official.Enabled) + assert.Equal(t, "dist/choco", cfg.Official.Output) + }) + + t.Run("handles missing official fields", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "chocolatey", + Extended: map[string]any{ + "official": map[string]any{}, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + require.NotNil(t, cfg.Official) + assert.False(t, cfg.Official.Enabled) + assert.Empty(t, cfg.Official.Output) + }) + + t.Run("handles nil extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "chocolatey", + Extended: nil, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Package) + assert.False(t, cfg.Push) + assert.Nil(t, cfg.Official) + }) + + t.Run("defaults push to false when not specified", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "chocolatey", + Extended: map[string]any{ + "package": "mypackage", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.False(t, cfg.Push) + }) +} + +func TestChocolateyPublisher_RenderTemplate_Good(t *testing.T) { + p := NewChocolateyPublisher() + + t.Run("renders nuspec template with data", func(t *testing.T) { + data := chocolateyTemplateData{ + PackageName: "myapp", + Title: "MyApp CLI", + Description: "My awesome CLI", + Repository: "owner/myapp", + Version: "1.2.3", + License: "MIT", + BinaryName: "myapp", + Authors: "owner", + Tags: "cli myapp", + Checksums: ChecksumMap{}, + } + + result, err := p.renderTemplate(io.Local, "templates/chocolatey/package.nuspec.tmpl", data) + require.NoError(t, err) + + assert.Contains(t, result, `myapp`) + assert.Contains(t, result, `1.2.3`) + assert.Contains(t, result, `MyApp CLI`) + assert.Contains(t, result, `owner`) + assert.Contains(t, result, `My awesome CLI`) + assert.Contains(t, result, `cli myapp`) + assert.Contains(t, result, "projectUrl>https://github.com/owner/myapp") + assert.Contains(t, result, "releaseNotes>https://github.com/owner/myapp/releases/tag/v1.2.3") + }) + + t.Run("renders install script template with data", func(t *testing.T) { + data := chocolateyTemplateData{ + PackageName: "myapp", + Repository: "owner/myapp", + Version: "1.2.3", + BinaryName: "myapp", + Checksums: ChecksumMap{ + WindowsAmd64: "abc123def456", + }, + } + + result, err := p.renderTemplate(io.Local, "templates/chocolatey/tools/chocolateyinstall.ps1.tmpl", data) + require.NoError(t, err) + + assert.Contains(t, result, "$ErrorActionPreference = 'Stop'") + assert.Contains(t, result, "https://github.com/owner/myapp/releases/download/v1.2.3/myapp-windows-amd64.zip") + assert.Contains(t, result, "packageName = 'myapp'") + assert.Contains(t, result, "checksum64 = 'abc123def456'") + assert.Contains(t, result, "checksumType64 = 'sha256'") + assert.Contains(t, result, "Install-ChocolateyZipPackage") + }) +} + +func TestChocolateyPublisher_RenderTemplate_Bad(t *testing.T) { + p := NewChocolateyPublisher() + + t.Run("returns error for non-existent template", func(t *testing.T) { + data := chocolateyTemplateData{} + _, err := p.renderTemplate(io.Local, "templates/chocolatey/nonexistent.tmpl", data) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to read template") + }) +} + +func TestChocolateyPublisher_DryRunPublish_Good(t *testing.T) { + p := NewChocolateyPublisher() + + t.Run("outputs expected dry run information", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := chocolateyTemplateData{ + PackageName: "myapp", + Version: "1.0.0", + Repository: "owner/repo", + BinaryName: "myapp", + Authors: "owner", + Tags: "cli myapp", + Checksums: ChecksumMap{}, + } + cfg := ChocolateyConfig{ + Push: false, + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "DRY RUN: Chocolatey Publish") + assert.Contains(t, output, "Package: myapp") + assert.Contains(t, output, "Version: 1.0.0") + assert.Contains(t, output, "Push: false") + assert.Contains(t, output, "Repository: owner/repo") + assert.Contains(t, output, "Generated package.nuspec:") + assert.Contains(t, output, "Generated chocolateyinstall.ps1:") + assert.Contains(t, output, "Would generate package files only (push=false)") + assert.Contains(t, output, "END DRY RUN") + }) + + t.Run("shows push message when push is enabled", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := chocolateyTemplateData{ + PackageName: "myapp", + Version: "1.0.0", + BinaryName: "myapp", + Authors: "owner", + Tags: "cli", + Checksums: ChecksumMap{}, + } + cfg := ChocolateyConfig{ + Push: true, + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "Push: true") + assert.Contains(t, output, "Would push to Chocolatey community repo") + }) +} + +func TestChocolateyPublisher_ExecutePublish_Bad(t *testing.T) { + p := NewChocolateyPublisher() + + t.Run("fails when CHOCOLATEY_API_KEY not set for push", func(t *testing.T) { + // Ensure CHOCOLATEY_API_KEY is not set + oldKey := os.Getenv("CHOCOLATEY_API_KEY") + _ = os.Unsetenv("CHOCOLATEY_API_KEY") + defer func() { + if oldKey != "" { + _ = os.Setenv("CHOCOLATEY_API_KEY", oldKey) + } + }() + + // Create a temp directory for the test + tmpDir, err := os.MkdirTemp("", "choco-test-*") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + data := chocolateyTemplateData{ + PackageName: "testpkg", + Version: "1.0.0", + BinaryName: "testpkg", + Repository: "owner/repo", + Authors: "owner", + Tags: "cli", + Checksums: ChecksumMap{}, + } + + err = p.pushToChocolatey(context.TODO(), tmpDir, data) + assert.Error(t, err) + assert.Contains(t, err.Error(), "CHOCOLATEY_API_KEY environment variable is required") + }) +} + +func TestChocolateyConfig_Defaults_Good(t *testing.T) { + t.Run("has sensible defaults", func(t *testing.T) { + p := NewChocolateyPublisher() + pubCfg := PublisherConfig{Type: "chocolatey"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Package) + assert.False(t, cfg.Push) + assert.Nil(t, cfg.Official) + }) +} + +func TestChocolateyTemplateData_Good(t *testing.T) { + t.Run("struct has all expected fields", func(t *testing.T) { + data := chocolateyTemplateData{ + PackageName: "myapp", + Title: "MyApp CLI", + Description: "description", + Repository: "org/repo", + Version: "1.0.0", + License: "MIT", + BinaryName: "myapp", + Authors: "org", + Tags: "cli tool", + Checksums: ChecksumMap{ + WindowsAmd64: "hash1", + }, + } + + assert.Equal(t, "myapp", data.PackageName) + assert.Equal(t, "MyApp CLI", data.Title) + assert.Equal(t, "description", data.Description) + assert.Equal(t, "org/repo", data.Repository) + assert.Equal(t, "1.0.0", data.Version) + assert.Equal(t, "MIT", data.License) + assert.Equal(t, "myapp", data.BinaryName) + assert.Equal(t, "org", data.Authors) + assert.Equal(t, "cli tool", data.Tags) + assert.Equal(t, "hash1", data.Checksums.WindowsAmd64) + }) +} diff --git a/release/publishers/docker.go b/release/publishers/docker.go new file mode 100644 index 0000000..981d442 --- /dev/null +++ b/release/publishers/docker.go @@ -0,0 +1,278 @@ +// Package publishers provides release publishing implementations. +package publishers + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +// DockerConfig holds configuration for the Docker publisher. +type DockerConfig struct { + // Registry is the container registry (default: ghcr.io). + Registry string `yaml:"registry"` + // Image is the image name in owner/repo format. + Image string `yaml:"image"` + // Dockerfile is the path to the Dockerfile (default: Dockerfile). + Dockerfile string `yaml:"dockerfile"` + // Platforms are the target platforms (linux/amd64, linux/arm64). + Platforms []string `yaml:"platforms"` + // Tags are additional tags to apply (supports {{.Version}} template). + Tags []string `yaml:"tags"` + // BuildArgs are additional build arguments. + BuildArgs map[string]string `yaml:"build_args"` +} + +// DockerPublisher builds and publishes Docker images. +type DockerPublisher struct{} + +// NewDockerPublisher creates a new Docker publisher. +func NewDockerPublisher() *DockerPublisher { + return &DockerPublisher{} +} + +// Name returns the publisher's identifier. +func (p *DockerPublisher) Name() string { + return "docker" +} + +// Publish builds and pushes Docker images. +func (p *DockerPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error { + // Validate docker CLI is available + if err := validateDockerCli(); err != nil { + return err + } + + // Parse Docker-specific config from publisher config + dockerCfg := p.parseConfig(pubCfg, relCfg, release.ProjectDir) + + // Validate Dockerfile exists + if !release.FS.Exists(dockerCfg.Dockerfile) { + return fmt.Errorf("docker.Publish: Dockerfile not found: %s", dockerCfg.Dockerfile) + } + + if dryRun { + return p.dryRunPublish(release, dockerCfg) + } + + return p.executePublish(ctx, release, dockerCfg) +} + +// parseConfig extracts Docker-specific configuration. +func (p *DockerPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig, projectDir string) DockerConfig { + cfg := DockerConfig{ + Registry: "ghcr.io", + Image: "", + Dockerfile: filepath.Join(projectDir, "Dockerfile"), + Platforms: []string{"linux/amd64", "linux/arm64"}, + Tags: []string{"latest", "{{.Version}}"}, + BuildArgs: make(map[string]string), + } + + // Try to get image from repository config + if relCfg != nil && relCfg.GetRepository() != "" { + cfg.Image = relCfg.GetRepository() + } + + // Override from extended config if present + if ext, ok := pubCfg.Extended.(map[string]any); ok { + if registry, ok := ext["registry"].(string); ok && registry != "" { + cfg.Registry = registry + } + if image, ok := ext["image"].(string); ok && image != "" { + cfg.Image = image + } + if dockerfile, ok := ext["dockerfile"].(string); ok && dockerfile != "" { + if filepath.IsAbs(dockerfile) { + cfg.Dockerfile = dockerfile + } else { + cfg.Dockerfile = filepath.Join(projectDir, dockerfile) + } + } + if platforms, ok := ext["platforms"].([]any); ok && len(platforms) > 0 { + cfg.Platforms = make([]string, 0, len(platforms)) + for _, plat := range platforms { + if s, ok := plat.(string); ok { + cfg.Platforms = append(cfg.Platforms, s) + } + } + } + if tags, ok := ext["tags"].([]any); ok && len(tags) > 0 { + cfg.Tags = make([]string, 0, len(tags)) + for _, tag := range tags { + if s, ok := tag.(string); ok { + cfg.Tags = append(cfg.Tags, s) + } + } + } + if buildArgs, ok := ext["build_args"].(map[string]any); ok { + for k, v := range buildArgs { + if s, ok := v.(string); ok { + cfg.BuildArgs[k] = s + } + } + } + } + + return cfg +} + +// dryRunPublish shows what would be done without actually building. +func (p *DockerPublisher) dryRunPublish(release *Release, cfg DockerConfig) error { + fmt.Println() + fmt.Println("=== DRY RUN: Docker Build & Push ===") + fmt.Println() + fmt.Printf("Version: %s\n", release.Version) + fmt.Printf("Registry: %s\n", cfg.Registry) + fmt.Printf("Image: %s\n", cfg.Image) + fmt.Printf("Dockerfile: %s\n", cfg.Dockerfile) + fmt.Printf("Platforms: %s\n", strings.Join(cfg.Platforms, ", ")) + fmt.Println() + + // Resolve tags + tags := p.resolveTags(cfg.Tags, release.Version) + fmt.Println("Tags to be applied:") + for _, tag := range tags { + fullTag := p.buildFullTag(cfg.Registry, cfg.Image, tag) + fmt.Printf(" - %s\n", fullTag) + } + fmt.Println() + + fmt.Println("Would execute command:") + args := p.buildBuildxArgs(cfg, tags, release.Version) + fmt.Printf(" docker %s\n", strings.Join(args, " ")) + + if len(cfg.BuildArgs) > 0 { + fmt.Println() + fmt.Println("Build arguments:") + for k, v := range cfg.BuildArgs { + fmt.Printf(" %s=%s\n", k, v) + } + } + + fmt.Println() + fmt.Println("=== END DRY RUN ===") + + return nil +} + +// executePublish builds and pushes Docker images. +func (p *DockerPublisher) executePublish(ctx context.Context, release *Release, cfg DockerConfig) error { + // Ensure buildx is available and builder is set up + if err := p.ensureBuildx(ctx); err != nil { + return err + } + + // Resolve tags + tags := p.resolveTags(cfg.Tags, release.Version) + + // Build the docker buildx command + args := p.buildBuildxArgs(cfg, tags, release.Version) + + cmd := exec.CommandContext(ctx, "docker", args...) + cmd.Dir = release.ProjectDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + fmt.Printf("Building and pushing Docker image: %s\n", cfg.Image) + if err := cmd.Run(); err != nil { + return fmt.Errorf("docker.Publish: buildx build failed: %w", err) + } + + return nil +} + +// resolveTags expands template variables in tags. +func (p *DockerPublisher) resolveTags(tags []string, version string) []string { + resolved := make([]string, 0, len(tags)) + for _, tag := range tags { + // Replace {{.Version}} with actual version + resolvedTag := strings.ReplaceAll(tag, "{{.Version}}", version) + // Also support simpler {{Version}} syntax + resolvedTag = strings.ReplaceAll(resolvedTag, "{{Version}}", version) + resolved = append(resolved, resolvedTag) + } + return resolved +} + +// buildFullTag builds the full image tag including registry. +func (p *DockerPublisher) buildFullTag(registry, image, tag string) string { + if registry != "" { + return fmt.Sprintf("%s/%s:%s", registry, image, tag) + } + return fmt.Sprintf("%s:%s", image, tag) +} + +// buildBuildxArgs builds the arguments for docker buildx build command. +func (p *DockerPublisher) buildBuildxArgs(cfg DockerConfig, tags []string, version string) []string { + args := []string{"buildx", "build"} + + // Multi-platform support + if len(cfg.Platforms) > 0 { + args = append(args, "--platform", strings.Join(cfg.Platforms, ",")) + } + + // Add all tags + for _, tag := range tags { + fullTag := p.buildFullTag(cfg.Registry, cfg.Image, tag) + args = append(args, "-t", fullTag) + } + + // Dockerfile path + dockerfilePath := cfg.Dockerfile + args = append(args, "-f", dockerfilePath) + + // Build arguments + for k, v := range cfg.BuildArgs { + // Expand version in build args + expandedValue := strings.ReplaceAll(v, "{{.Version}}", version) + expandedValue = strings.ReplaceAll(expandedValue, "{{Version}}", version) + args = append(args, "--build-arg", fmt.Sprintf("%s=%s", k, expandedValue)) + } + + // Always add VERSION build arg + args = append(args, "--build-arg", fmt.Sprintf("VERSION=%s", version)) + + // Push the image + args = append(args, "--push") + + // Build context (current directory) + args = append(args, ".") + + return args +} + +// ensureBuildx ensures docker buildx is available and has a builder. +func (p *DockerPublisher) ensureBuildx(ctx context.Context) error { + // Check if buildx is available + cmd := exec.CommandContext(ctx, "docker", "buildx", "version") + if err := cmd.Run(); err != nil { + return fmt.Errorf("docker: buildx is not available. Install it from https://docs.docker.com/buildx/working-with-buildx/") + } + + // Check if we have a builder, create one if not + cmd = exec.CommandContext(ctx, "docker", "buildx", "inspect", "--bootstrap") + if err := cmd.Run(); err != nil { + // Try to create a builder + cmd = exec.CommandContext(ctx, "docker", "buildx", "create", "--use", "--bootstrap") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("docker: failed to create buildx builder: %w", err) + } + } + + return nil +} + +// validateDockerCli checks if the docker CLI is available. +func validateDockerCli() error { + cmd := exec.Command("docker", "--version") + if err := cmd.Run(); err != nil { + return fmt.Errorf("docker: docker CLI not found. Install it from https://docs.docker.com/get-docker/") + } + return nil +} diff --git a/release/publishers/docker_test.go b/release/publishers/docker_test.go new file mode 100644 index 0000000..576794c --- /dev/null +++ b/release/publishers/docker_test.go @@ -0,0 +1,810 @@ +package publishers + +import ( + "bytes" + "context" + "os" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDockerPublisher_Name_Good(t *testing.T) { + t.Run("returns docker", func(t *testing.T) { + p := NewDockerPublisher() + assert.Equal(t, "docker", p.Name()) + }) +} + +func TestDockerPublisher_ParseConfig_Good(t *testing.T) { + p := NewDockerPublisher() + + t.Run("uses defaults when no extended config", func(t *testing.T) { + pubCfg := PublisherConfig{Type: "docker"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg, "/project") + + assert.Equal(t, "ghcr.io", cfg.Registry) + assert.Equal(t, "owner/repo", cfg.Image) + assert.Equal(t, "/project/Dockerfile", cfg.Dockerfile) + assert.Equal(t, []string{"linux/amd64", "linux/arm64"}, cfg.Platforms) + assert.Equal(t, []string{"latest", "{{.Version}}"}, cfg.Tags) + }) + + t.Run("parses extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "registry": "docker.io", + "image": "myorg/myimage", + "dockerfile": "docker/Dockerfile.prod", + "platforms": []any{"linux/amd64"}, + "tags": []any{"latest", "stable", "{{.Version}}"}, + "build_args": map[string]any{ + "GO_VERSION": "1.21", + }, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg, "/project") + + assert.Equal(t, "docker.io", cfg.Registry) + assert.Equal(t, "myorg/myimage", cfg.Image) + assert.Equal(t, "/project/docker/Dockerfile.prod", cfg.Dockerfile) + assert.Equal(t, []string{"linux/amd64"}, cfg.Platforms) + assert.Equal(t, []string{"latest", "stable", "{{.Version}}"}, cfg.Tags) + assert.Equal(t, "1.21", cfg.BuildArgs["GO_VERSION"]) + }) + + t.Run("handles absolute dockerfile path", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "dockerfile": "/absolute/path/Dockerfile", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg, "/project") + + assert.Equal(t, "/absolute/path/Dockerfile", cfg.Dockerfile) + }) +} + +func TestDockerPublisher_ResolveTags_Good(t *testing.T) { + p := NewDockerPublisher() + + t.Run("resolves version template", func(t *testing.T) { + tags := p.resolveTags([]string{"latest", "{{.Version}}", "stable"}, "v1.2.3") + + assert.Equal(t, []string{"latest", "v1.2.3", "stable"}, tags) + }) + + t.Run("handles simple version syntax", func(t *testing.T) { + tags := p.resolveTags([]string{"{{Version}}"}, "v1.0.0") + + assert.Equal(t, []string{"v1.0.0"}, tags) + }) + + t.Run("handles no templates", func(t *testing.T) { + tags := p.resolveTags([]string{"latest", "stable"}, "v1.2.3") + + assert.Equal(t, []string{"latest", "stable"}, tags) + }) +} + +func TestDockerPublisher_BuildFullTag_Good(t *testing.T) { + p := NewDockerPublisher() + + tests := []struct { + name string + registry string + image string + tag string + expected string + }{ + { + name: "with registry", + registry: "ghcr.io", + image: "owner/repo", + tag: "v1.0.0", + expected: "ghcr.io/owner/repo:v1.0.0", + }, + { + name: "without registry", + registry: "", + image: "myimage", + tag: "latest", + expected: "myimage:latest", + }, + { + name: "docker hub", + registry: "docker.io", + image: "library/nginx", + tag: "alpine", + expected: "docker.io/library/nginx:alpine", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tag := p.buildFullTag(tc.registry, tc.image, tc.tag) + assert.Equal(t, tc.expected, tag) + }) + } +} + +func TestDockerPublisher_BuildBuildxArgs_Good(t *testing.T) { + p := NewDockerPublisher() + + t.Run("builds basic args", func(t *testing.T) { + cfg := DockerConfig{ + Registry: "ghcr.io", + Image: "owner/repo", + Dockerfile: "/project/Dockerfile", + Platforms: []string{"linux/amd64", "linux/arm64"}, + BuildArgs: make(map[string]string), + } + tags := []string{"latest", "v1.0.0"} + + args := p.buildBuildxArgs(cfg, tags, "v1.0.0") + + assert.Contains(t, args, "buildx") + assert.Contains(t, args, "build") + assert.Contains(t, args, "--platform") + assert.Contains(t, args, "linux/amd64,linux/arm64") + assert.Contains(t, args, "-t") + assert.Contains(t, args, "ghcr.io/owner/repo:latest") + assert.Contains(t, args, "ghcr.io/owner/repo:v1.0.0") + assert.Contains(t, args, "-f") + assert.Contains(t, args, "/project/Dockerfile") + assert.Contains(t, args, "--push") + assert.Contains(t, args, ".") + }) + + t.Run("includes build args", func(t *testing.T) { + cfg := DockerConfig{ + Registry: "ghcr.io", + Image: "owner/repo", + Dockerfile: "/project/Dockerfile", + Platforms: []string{"linux/amd64"}, + BuildArgs: map[string]string{ + "GO_VERSION": "1.21", + "APP_NAME": "myapp", + }, + } + tags := []string{"latest"} + + args := p.buildBuildxArgs(cfg, tags, "v1.0.0") + + assert.Contains(t, args, "--build-arg") + // Check that build args are present (order may vary) + foundGoVersion := false + foundAppName := false + foundVersion := false + for i, arg := range args { + if arg == "--build-arg" && i+1 < len(args) { + if args[i+1] == "GO_VERSION=1.21" { + foundGoVersion = true + } + if args[i+1] == "APP_NAME=myapp" { + foundAppName = true + } + if args[i+1] == "VERSION=v1.0.0" { + foundVersion = true + } + } + } + assert.True(t, foundGoVersion, "GO_VERSION build arg not found") + assert.True(t, foundAppName, "APP_NAME build arg not found") + assert.True(t, foundVersion, "VERSION build arg not found") + }) + + t.Run("expands version in build args", func(t *testing.T) { + cfg := DockerConfig{ + Registry: "ghcr.io", + Image: "owner/repo", + Dockerfile: "/project/Dockerfile", + Platforms: []string{"linux/amd64"}, + BuildArgs: map[string]string{ + "APP_VERSION": "{{.Version}}", + }, + } + tags := []string{"latest"} + + args := p.buildBuildxArgs(cfg, tags, "v2.0.0") + + foundExpandedVersion := false + for i, arg := range args { + if arg == "--build-arg" && i+1 < len(args) { + if args[i+1] == "APP_VERSION=v2.0.0" { + foundExpandedVersion = true + } + } + } + assert.True(t, foundExpandedVersion, "APP_VERSION should be expanded to v2.0.0") + }) +} + +func TestDockerPublisher_Publish_Bad(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + p := NewDockerPublisher() + + t.Run("fails when dockerfile not found", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/nonexistent", + FS: io.Local, + } + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "dockerfile": "/nonexistent/Dockerfile", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Dockerfile not found") + }) +} + +func TestDockerConfig_Defaults_Good(t *testing.T) { + t.Run("has sensible defaults", func(t *testing.T) { + p := NewDockerPublisher() + pubCfg := PublisherConfig{Type: "docker"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + cfg := p.parseConfig(pubCfg, relCfg, "/project") + + // Verify defaults + assert.Equal(t, "ghcr.io", cfg.Registry) + assert.Equal(t, "owner/repo", cfg.Image) + assert.Len(t, cfg.Platforms, 2) + assert.Contains(t, cfg.Platforms, "linux/amd64") + assert.Contains(t, cfg.Platforms, "linux/arm64") + assert.Contains(t, cfg.Tags, "latest") + }) +} + +func TestDockerPublisher_DryRunPublish_Good(t *testing.T) { + p := NewDockerPublisher() + + t.Run("outputs expected dry run information", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/project", + FS: io.Local, + } + cfg := DockerConfig{ + Registry: "ghcr.io", + Image: "owner/repo", + Dockerfile: "/project/Dockerfile", + Platforms: []string{"linux/amd64", "linux/arm64"}, + Tags: []string{"latest", "{{.Version}}"}, + BuildArgs: make(map[string]string), + } + + err := p.dryRunPublish(release, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "DRY RUN: Docker Build & Push") + assert.Contains(t, output, "Version: v1.0.0") + assert.Contains(t, output, "Registry: ghcr.io") + assert.Contains(t, output, "Image: owner/repo") + assert.Contains(t, output, "Dockerfile: /project/Dockerfile") + assert.Contains(t, output, "Platforms: linux/amd64, linux/arm64") + assert.Contains(t, output, "Tags to be applied:") + assert.Contains(t, output, "ghcr.io/owner/repo:latest") + assert.Contains(t, output, "ghcr.io/owner/repo:v1.0.0") + assert.Contains(t, output, "Would execute command:") + assert.Contains(t, output, "docker buildx build") + assert.Contains(t, output, "END DRY RUN") + }) + + t.Run("shows build args when present", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/project", + FS: io.Local, + } + cfg := DockerConfig{ + Registry: "docker.io", + Image: "myorg/myapp", + Dockerfile: "/project/Dockerfile", + Platforms: []string{"linux/amd64"}, + Tags: []string{"latest"}, + BuildArgs: map[string]string{ + "GO_VERSION": "1.21", + "APP_NAME": "myapp", + }, + } + + err := p.dryRunPublish(release, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "Build arguments:") + assert.Contains(t, output, "GO_VERSION=1.21") + assert.Contains(t, output, "APP_NAME=myapp") + }) + + t.Run("handles single platform", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v2.0.0", + ProjectDir: "/project", + FS: io.Local, + } + cfg := DockerConfig{ + Registry: "ghcr.io", + Image: "owner/repo", + Dockerfile: "/project/Dockerfile.prod", + Platforms: []string{"linux/amd64"}, + Tags: []string{"stable"}, + BuildArgs: make(map[string]string), + } + + err := p.dryRunPublish(release, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "Platforms: linux/amd64") + assert.Contains(t, output, "ghcr.io/owner/repo:stable") + }) +} + +func TestDockerPublisher_ParseConfig_EdgeCases_Good(t *testing.T) { + p := NewDockerPublisher() + + t.Run("handles nil release config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "image": "custom/image", + }, + } + + cfg := p.parseConfig(pubCfg, nil, "/project") + + assert.Equal(t, "custom/image", cfg.Image) + assert.Equal(t, "ghcr.io", cfg.Registry) + }) + + t.Run("handles empty repository in release config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "image": "fallback/image", + }, + } + relCfg := &mockReleaseConfig{repository: ""} + + cfg := p.parseConfig(pubCfg, relCfg, "/project") + + assert.Equal(t, "fallback/image", cfg.Image) + }) + + t.Run("extended config overrides repository image", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "image": "override/image", + }, + } + relCfg := &mockReleaseConfig{repository: "original/repo"} + + cfg := p.parseConfig(pubCfg, relCfg, "/project") + + assert.Equal(t, "override/image", cfg.Image) + }) + + t.Run("handles mixed build args types", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "build_args": map[string]any{ + "STRING_ARG": "value", + "INT_ARG": 123, // Non-string value should be skipped + }, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + cfg := p.parseConfig(pubCfg, relCfg, "/project") + + assert.Equal(t, "value", cfg.BuildArgs["STRING_ARG"]) + _, exists := cfg.BuildArgs["INT_ARG"] + assert.False(t, exists, "non-string build arg should not be included") + }) +} + +func TestDockerPublisher_ResolveTags_EdgeCases_Good(t *testing.T) { + p := NewDockerPublisher() + + t.Run("handles empty tags", func(t *testing.T) { + tags := p.resolveTags([]string{}, "v1.0.0") + assert.Empty(t, tags) + }) + + t.Run("handles multiple version placeholders", func(t *testing.T) { + tags := p.resolveTags([]string{"{{.Version}}", "prefix-{{.Version}}", "{{.Version}}-suffix"}, "v1.2.3") + assert.Equal(t, []string{"v1.2.3", "prefix-v1.2.3", "v1.2.3-suffix"}, tags) + }) + + t.Run("handles mixed template formats", func(t *testing.T) { + tags := p.resolveTags([]string{"{{.Version}}", "{{Version}}", "latest"}, "v3.0.0") + assert.Equal(t, []string{"v3.0.0", "v3.0.0", "latest"}, tags) + }) +} + +func TestDockerPublisher_BuildBuildxArgs_EdgeCases_Good(t *testing.T) { + p := NewDockerPublisher() + + t.Run("handles empty platforms", func(t *testing.T) { + cfg := DockerConfig{ + Registry: "ghcr.io", + Image: "owner/repo", + Dockerfile: "/project/Dockerfile", + Platforms: []string{}, + BuildArgs: make(map[string]string), + } + + args := p.buildBuildxArgs(cfg, []string{"latest"}, "v1.0.0") + + assert.Contains(t, args, "buildx") + assert.Contains(t, args, "build") + // Should not have --platform if empty + foundPlatform := false + for i, arg := range args { + if arg == "--platform" { + foundPlatform = true + // Check the next arg exists (it shouldn't be empty) + if i+1 < len(args) && args[i+1] == "" { + t.Error("platform argument should not be empty string") + } + } + } + assert.False(t, foundPlatform, "should not include --platform when platforms is empty") + }) + + t.Run("handles version expansion in build args", func(t *testing.T) { + cfg := DockerConfig{ + Registry: "ghcr.io", + Image: "owner/repo", + Dockerfile: "/Dockerfile", + Platforms: []string{"linux/amd64"}, + BuildArgs: map[string]string{ + "VERSION": "{{.Version}}", + "SIMPLE_VER": "{{Version}}", + "STATIC_VALUE": "static", + }, + } + + args := p.buildBuildxArgs(cfg, []string{"latest"}, "v2.5.0") + + foundVersionArg := false + foundSimpleArg := false + foundStaticArg := false + foundAutoVersion := false + + for i, arg := range args { + if arg == "--build-arg" && i+1 < len(args) { + switch args[i+1] { + case "VERSION=v2.5.0": + foundVersionArg = true + case "SIMPLE_VER=v2.5.0": + foundSimpleArg = true + case "STATIC_VALUE=static": + foundStaticArg = true + } + // Auto-added VERSION build arg + if args[i+1] == "VERSION=v2.5.0" { + foundAutoVersion = true + } + } + } + + // Note: VERSION is both in BuildArgs and auto-added, so we just check it exists + assert.True(t, foundVersionArg || foundAutoVersion, "VERSION build arg not found") + assert.True(t, foundSimpleArg, "SIMPLE_VER build arg not expanded") + assert.True(t, foundStaticArg, "STATIC_VALUE build arg not found") + }) + + t.Run("handles empty registry", func(t *testing.T) { + cfg := DockerConfig{ + Registry: "", + Image: "localimage", + Dockerfile: "/Dockerfile", + Platforms: []string{"linux/amd64"}, + BuildArgs: make(map[string]string), + } + + args := p.buildBuildxArgs(cfg, []string{"latest"}, "v1.0.0") + + assert.Contains(t, args, "-t") + assert.Contains(t, args, "localimage:latest") + }) +} + +func TestDockerPublisher_Publish_DryRun_Good(t *testing.T) { + // Skip if docker CLI is not available - dry run still validates docker is installed + if err := validateDockerCli(); err != nil { + t.Skip("skipping test: docker CLI not available") + } + + p := NewDockerPublisher() + + t.Run("dry run succeeds with valid Dockerfile", func(t *testing.T) { + // Create temp directory with Dockerfile + tmpDir, err := os.MkdirTemp("", "docker-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + dockerfilePath := filepath.Join(tmpDir, "Dockerfile") + err = os.WriteFile(dockerfilePath, []byte("FROM alpine:latest\n"), 0644) + require.NoError(t, err) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "docker"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "DRY RUN: Docker Build & Push") + }) + + t.Run("dry run uses custom dockerfile path", func(t *testing.T) { + // Create temp directory with custom Dockerfile + tmpDir, err := os.MkdirTemp("", "docker-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + customDir := filepath.Join(tmpDir, "docker") + err = os.MkdirAll(customDir, 0755) + require.NoError(t, err) + + dockerfilePath := filepath.Join(customDir, "Dockerfile.prod") + err = os.WriteFile(dockerfilePath, []byte("FROM alpine:latest\n"), 0644) + require.NoError(t, err) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "dockerfile": "docker/Dockerfile.prod", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "Dockerfile.prod") + }) +} + +func TestDockerPublisher_Publish_Validation_Bad(t *testing.T) { + p := NewDockerPublisher() + + t.Run("fails when Dockerfile not found with docker installed", func(t *testing.T) { + if err := validateDockerCli(); err != nil { + t.Skip("skipping test: docker CLI not available") + } + + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/nonexistent/path", + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "docker"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Dockerfile not found") + }) + + t.Run("fails when docker CLI not available", func(t *testing.T) { + if err := validateDockerCli(); err == nil { + t.Skip("skipping test: docker CLI is available") + } + + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/tmp", + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "docker"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "docker CLI not found") + }) +} + +func TestValidateDockerCli_Good(t *testing.T) { + t.Run("returns nil when docker is installed", func(t *testing.T) { + err := validateDockerCli() + if err != nil { + // Docker is not installed, which is fine for this test + assert.Contains(t, err.Error(), "docker CLI not found") + } + // If err is nil, docker is installed - that's OK + }) +} + +func TestDockerPublisher_Publish_WithCLI_Good(t *testing.T) { + // These tests run only when docker CLI is available + if err := validateDockerCli(); err != nil { + t.Skip("skipping test: docker CLI not available") + } + + p := NewDockerPublisher() + + t.Run("dry run succeeds with all config options", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "docker-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + dockerfilePath := filepath.Join(tmpDir, "Dockerfile") + err = os.WriteFile(dockerfilePath, []byte("FROM alpine:latest\n"), 0644) + require.NoError(t, err) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "registry": "docker.io", + "image": "myorg/myapp", + "platforms": []any{"linux/amd64", "linux/arm64"}, + "tags": []any{"latest", "{{.Version}}", "stable"}, + "build_args": map[string]any{"GO_VERSION": "1.21"}, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "DRY RUN: Docker Build & Push") + assert.Contains(t, output, "docker.io") + assert.Contains(t, output, "myorg/myapp") + }) + + t.Run("dry run with nil relCfg uses extended image", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "docker-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + dockerfilePath := filepath.Join(tmpDir, "Dockerfile") + err = os.WriteFile(dockerfilePath, []byte("FROM alpine:latest\n"), 0644) + require.NoError(t, err) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{ + Type: "docker", + Extended: map[string]any{ + "image": "standalone/image", + }, + } + + err = p.Publish(context.TODO(), release, pubCfg, nil, true) // nil relCfg + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "standalone/image") + }) + + t.Run("fails with non-existent Dockerfile in non-dry-run", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "docker-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Don't create a Dockerfile + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "docker"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Dockerfile not found") + }) +} diff --git a/release/publishers/github.go b/release/publishers/github.go new file mode 100644 index 0000000..b1eaf70 --- /dev/null +++ b/release/publishers/github.go @@ -0,0 +1,233 @@ +// Package publishers provides release publishing implementations. +package publishers + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +// GitHubPublisher publishes releases to GitHub using the gh CLI. +type GitHubPublisher struct{} + +// NewGitHubPublisher creates a new GitHub publisher. +func NewGitHubPublisher() *GitHubPublisher { + return &GitHubPublisher{} +} + +// Name returns the publisher's identifier. +func (p *GitHubPublisher) Name() string { + return "github" +} + +// Publish publishes the release to GitHub. +// Uses the gh CLI for creating releases and uploading assets. +func (p *GitHubPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error { + // Determine repository + repo := "" + if relCfg != nil { + repo = relCfg.GetRepository() + } + if repo == "" { + // Try to detect from git remote + detectedRepo, err := detectRepository(release.ProjectDir) + if err != nil { + return fmt.Errorf("github.Publish: could not determine repository: %w", err) + } + repo = detectedRepo + } + + if dryRun { + return p.dryRunPublish(release, pubCfg, repo) + } + + // Validate gh CLI is available and authenticated for actual publish + if err := validateGhCli(); err != nil { + return err + } + + return p.executePublish(ctx, release, pubCfg, repo) +} + +// dryRunPublish shows what would be done without actually publishing. +func (p *GitHubPublisher) dryRunPublish(release *Release, pubCfg PublisherConfig, repo string) error { + fmt.Println() + fmt.Println("=== DRY RUN: GitHub Release ===") + fmt.Println() + fmt.Printf("Repository: %s\n", repo) + fmt.Printf("Version: %s\n", release.Version) + fmt.Printf("Draft: %t\n", pubCfg.Draft) + fmt.Printf("Prerelease: %t\n", pubCfg.Prerelease) + fmt.Println() + + fmt.Println("Would create release with command:") + args := p.buildCreateArgs(release, pubCfg, repo) + fmt.Printf(" gh %s\n", strings.Join(args, " ")) + fmt.Println() + + if len(release.Artifacts) > 0 { + fmt.Println("Would upload artifacts:") + for _, artifact := range release.Artifacts { + fmt.Printf(" - %s\n", filepath.Base(artifact.Path)) + } + } + + fmt.Println() + fmt.Println("Changelog:") + fmt.Println("---") + fmt.Println(release.Changelog) + fmt.Println("---") + fmt.Println() + fmt.Println("=== END DRY RUN ===") + + return nil +} + +// executePublish actually creates the release and uploads artifacts. +func (p *GitHubPublisher) executePublish(ctx context.Context, release *Release, pubCfg PublisherConfig, repo string) error { + // Build the release create command + args := p.buildCreateArgs(release, pubCfg, repo) + + // Add artifact paths to the command + for _, artifact := range release.Artifacts { + args = append(args, artifact.Path) + } + + // Execute gh release create + cmd := exec.CommandContext(ctx, "gh", args...) + cmd.Dir = release.ProjectDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("github.Publish: gh release create failed: %w", err) + } + + return nil +} + +// buildCreateArgs builds the arguments for gh release create. +func (p *GitHubPublisher) buildCreateArgs(release *Release, pubCfg PublisherConfig, repo string) []string { + args := []string{"release", "create", release.Version} + + // Add repository flag + if repo != "" { + args = append(args, "--repo", repo) + } + + // Add title + args = append(args, "--title", release.Version) + + // Add notes (changelog) + if release.Changelog != "" { + args = append(args, "--notes", release.Changelog) + } else { + args = append(args, "--generate-notes") + } + + // Add draft flag + if pubCfg.Draft { + args = append(args, "--draft") + } + + // Add prerelease flag + if pubCfg.Prerelease { + args = append(args, "--prerelease") + } + + return args +} + +// validateGhCli checks if the gh CLI is available and authenticated. +func validateGhCli() error { + // Check if gh is installed + cmd := exec.Command("gh", "--version") + if err := cmd.Run(); err != nil { + return fmt.Errorf("github: gh CLI not found. Install it from https://cli.github.com") + } + + // Check if authenticated + cmd = exec.Command("gh", "auth", "status") + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("github: not authenticated with gh CLI. Run 'gh auth login' first") + } + + if !strings.Contains(string(output), "Logged in") { + return fmt.Errorf("github: not authenticated with gh CLI. Run 'gh auth login' first") + } + + return nil +} + +// detectRepository detects the GitHub repository from git remote. +func detectRepository(dir string) (string, error) { + cmd := exec.Command("git", "remote", "get-url", "origin") + cmd.Dir = dir + output, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("failed to get git remote: %w", err) + } + + url := strings.TrimSpace(string(output)) + return parseGitHubRepo(url) +} + +// parseGitHubRepo extracts owner/repo from a GitHub URL. +// Supports: +// - git@github.com:owner/repo.git +// - https://github.com/owner/repo.git +// - https://github.com/owner/repo +func parseGitHubRepo(url string) (string, error) { + // SSH format + if strings.HasPrefix(url, "git@github.com:") { + repo := strings.TrimPrefix(url, "git@github.com:") + repo = strings.TrimSuffix(repo, ".git") + return repo, nil + } + + // HTTPS format + if strings.HasPrefix(url, "https://github.com/") { + repo := strings.TrimPrefix(url, "https://github.com/") + repo = strings.TrimSuffix(repo, ".git") + return repo, nil + } + + return "", fmt.Errorf("not a GitHub URL: %s", url) +} + +// UploadArtifact uploads a single artifact to an existing release. +// This can be used to add artifacts to a release after creation. +func UploadArtifact(ctx context.Context, repo, version, artifactPath string) error { + cmd := exec.CommandContext(ctx, "gh", "release", "upload", version, artifactPath, "--repo", repo) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("github.UploadArtifact: failed to upload %s: %w", artifactPath, err) + } + + return nil +} + +// DeleteRelease deletes a release by tag name. +func DeleteRelease(ctx context.Context, repo, version string) error { + cmd := exec.CommandContext(ctx, "gh", "release", "delete", version, "--repo", repo, "--yes") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("github.DeleteRelease: failed to delete %s: %w", version, err) + } + + return nil +} + +// ReleaseExists checks if a release exists for the given version. +func ReleaseExists(ctx context.Context, repo, version string) bool { + cmd := exec.CommandContext(ctx, "gh", "release", "view", version, "--repo", repo) + return cmd.Run() == nil +} diff --git a/release/publishers/github_test.go b/release/publishers/github_test.go new file mode 100644 index 0000000..ccbfe67 --- /dev/null +++ b/release/publishers/github_test.go @@ -0,0 +1,560 @@ +package publishers + +import ( + "bytes" + "context" + "os" + "os/exec" + "strings" + "testing" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseGitHubRepo_Good(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "SSH URL", + input: "git@github.com:owner/repo.git", + expected: "owner/repo", + }, + { + name: "HTTPS URL with .git", + input: "https://github.com/owner/repo.git", + expected: "owner/repo", + }, + { + name: "HTTPS URL without .git", + input: "https://github.com/owner/repo", + expected: "owner/repo", + }, + { + name: "SSH URL without .git", + input: "git@github.com:owner/repo", + expected: "owner/repo", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result, err := parseGitHubRepo(tc.input) + assert.NoError(t, err) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestParseGitHubRepo_Bad(t *testing.T) { + tests := []struct { + name string + input string + }{ + { + name: "GitLab URL", + input: "https://gitlab.com/owner/repo.git", + }, + { + name: "Bitbucket URL", + input: "git@bitbucket.org:owner/repo.git", + }, + { + name: "Random URL", + input: "https://example.com/something", + }, + { + name: "Not a URL", + input: "owner/repo", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + _, err := parseGitHubRepo(tc.input) + assert.Error(t, err) + }) + } +} + +func TestGitHubPublisher_Name_Good(t *testing.T) { + t.Run("returns github", func(t *testing.T) { + p := NewGitHubPublisher() + assert.Equal(t, "github", p.Name()) + }) +} + +func TestNewRelease_Good(t *testing.T) { + t.Run("creates release struct", func(t *testing.T) { + r := NewRelease("v1.0.0", nil, "changelog", "/project", io.Local) + assert.Equal(t, "v1.0.0", r.Version) + assert.Equal(t, "changelog", r.Changelog) + assert.Equal(t, "/project", r.ProjectDir) + assert.Nil(t, r.Artifacts) + }) +} + +func TestNewPublisherConfig_Good(t *testing.T) { + t.Run("creates config struct", func(t *testing.T) { + cfg := NewPublisherConfig("github", true, false, nil) + assert.Equal(t, "github", cfg.Type) + assert.True(t, cfg.Prerelease) + assert.False(t, cfg.Draft) + assert.Nil(t, cfg.Extended) + }) + + t.Run("creates config with extended", func(t *testing.T) { + ext := map[string]any{"key": "value"} + cfg := NewPublisherConfig("docker", false, false, ext) + assert.Equal(t, "docker", cfg.Type) + assert.Equal(t, ext, cfg.Extended) + }) +} + +func TestBuildCreateArgs_Good(t *testing.T) { + p := NewGitHubPublisher() + + t.Run("basic args", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + Changelog: "## v1.0.0\n\nChanges", + FS: io.Local, + } + cfg := PublisherConfig{ + Type: "github", + } + + args := p.buildCreateArgs(release, cfg, "owner/repo") + + assert.Contains(t, args, "release") + assert.Contains(t, args, "create") + assert.Contains(t, args, "v1.0.0") + assert.Contains(t, args, "--repo") + assert.Contains(t, args, "owner/repo") + assert.Contains(t, args, "--title") + assert.Contains(t, args, "--notes") + }) + + t.Run("with draft flag", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + FS: io.Local, + } + cfg := PublisherConfig{ + Type: "github", + Draft: true, + } + + args := p.buildCreateArgs(release, cfg, "owner/repo") + + assert.Contains(t, args, "--draft") + }) + + t.Run("with prerelease flag", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + FS: io.Local, + } + cfg := PublisherConfig{ + Type: "github", + Prerelease: true, + } + + args := p.buildCreateArgs(release, cfg, "owner/repo") + + assert.Contains(t, args, "--prerelease") + }) + + t.Run("generates notes when no changelog", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + Changelog: "", + FS: io.Local, + } + cfg := PublisherConfig{ + Type: "github", + } + + args := p.buildCreateArgs(release, cfg, "owner/repo") + + assert.Contains(t, args, "--generate-notes") + }) + + t.Run("with draft and prerelease flags", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0-alpha", + FS: io.Local, + } + cfg := PublisherConfig{ + Type: "github", + Draft: true, + Prerelease: true, + } + + args := p.buildCreateArgs(release, cfg, "owner/repo") + + assert.Contains(t, args, "--draft") + assert.Contains(t, args, "--prerelease") + }) + + t.Run("without repo includes version", func(t *testing.T) { + release := &Release{ + Version: "v2.0.0", + Changelog: "Some changes", + FS: io.Local, + } + cfg := PublisherConfig{ + Type: "github", + } + + args := p.buildCreateArgs(release, cfg, "") + + assert.Contains(t, args, "release") + assert.Contains(t, args, "create") + assert.Contains(t, args, "v2.0.0") + assert.NotContains(t, args, "--repo") + }) +} + +func TestGitHubPublisher_DryRunPublish_Good(t *testing.T) { + p := NewGitHubPublisher() + + t.Run("outputs expected dry run information", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + Changelog: "## Changes\n\n- Feature A\n- Bug fix B", + ProjectDir: "/project", + FS: io.Local, + } + cfg := PublisherConfig{ + Type: "github", + Draft: false, + Prerelease: false, + } + + err := p.dryRunPublish(release, cfg, "owner/repo") + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "DRY RUN: GitHub Release") + assert.Contains(t, output, "Repository: owner/repo") + assert.Contains(t, output, "Version: v1.0.0") + assert.Contains(t, output, "Draft: false") + assert.Contains(t, output, "Prerelease: false") + assert.Contains(t, output, "Would create release with command:") + assert.Contains(t, output, "gh release create") + assert.Contains(t, output, "Changelog:") + assert.Contains(t, output, "## Changes") + assert.Contains(t, output, "END DRY RUN") + }) + + t.Run("shows artifacts when present", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + Changelog: "Changes", + ProjectDir: "/project", + FS: io.Local, + Artifacts: []build.Artifact{ + {Path: "/dist/myapp-darwin-amd64.tar.gz"}, + {Path: "/dist/myapp-linux-amd64.tar.gz"}, + }, + } + cfg := PublisherConfig{Type: "github"} + + err := p.dryRunPublish(release, cfg, "owner/repo") + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "Would upload artifacts:") + assert.Contains(t, output, "myapp-darwin-amd64.tar.gz") + assert.Contains(t, output, "myapp-linux-amd64.tar.gz") + }) + + t.Run("shows draft and prerelease flags", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0-beta", + Changelog: "Beta release", + ProjectDir: "/project", + FS: io.Local, + } + cfg := PublisherConfig{ + Type: "github", + Draft: true, + Prerelease: true, + } + + err := p.dryRunPublish(release, cfg, "owner/repo") + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "Draft: true") + assert.Contains(t, output, "Prerelease: true") + assert.Contains(t, output, "--draft") + assert.Contains(t, output, "--prerelease") + }) +} + +func TestGitHubPublisher_Publish_Good(t *testing.T) { + p := NewGitHubPublisher() + + t.Run("dry run uses repository from config", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + Changelog: "Changes", + ProjectDir: "/tmp", + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "github"} + relCfg := &mockReleaseConfig{repository: "custom/repo"} + + // Dry run should succeed without needing gh CLI + err := p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "Repository: custom/repo") + }) +} + +func TestGitHubPublisher_Publish_Bad(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + p := NewGitHubPublisher() + + t.Run("fails when gh CLI not available and not dry run", func(t *testing.T) { + // This test will fail if gh is installed but not authenticated + // or succeed if gh is not installed + release := &Release{ + Version: "v1.0.0", + Changelog: "Changes", + ProjectDir: "/nonexistent", + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "github"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.Background(), release, pubCfg, relCfg, false) + + // Should fail due to either gh not found or not authenticated + assert.Error(t, err) + }) + + t.Run("fails when repository cannot be detected", func(t *testing.T) { + // Create a temp directory that is NOT a git repo + tmpDir, err := os.MkdirTemp("", "github-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + release := &Release{ + Version: "v1.0.0", + Changelog: "Changes", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "github"} + relCfg := &mockReleaseConfig{repository: ""} // Empty repository + + err = p.Publish(context.Background(), release, pubCfg, relCfg, true) + + // Should fail because detectRepository will fail on non-git dir + assert.Error(t, err) + assert.Contains(t, err.Error(), "could not determine repository") + }) +} + +func TestDetectRepository_Good(t *testing.T) { + t.Run("detects repository from git remote", func(t *testing.T) { + // Create a temp git repo + tmpDir, err := os.MkdirTemp("", "git-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Initialize git repo and set remote + cmd := exec.Command("git", "init") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "remote", "add", "origin", "git@github.com:test-owner/test-repo.git") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + repo, err := detectRepository(tmpDir) + require.NoError(t, err) + assert.Equal(t, "test-owner/test-repo", repo) + }) + + t.Run("detects repository from HTTPS remote", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "git-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + cmd := exec.Command("git", "init") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "remote", "add", "origin", "https://github.com/another-owner/another-repo.git") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + repo, err := detectRepository(tmpDir) + require.NoError(t, err) + assert.Equal(t, "another-owner/another-repo", repo) + }) +} + +func TestDetectRepository_Bad(t *testing.T) { + t.Run("fails when not a git repository", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "no-git-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + _, err = detectRepository(tmpDir) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to get git remote") + }) + + t.Run("fails when directory does not exist", func(t *testing.T) { + _, err := detectRepository("/nonexistent/directory/that/does/not/exist") + assert.Error(t, err) + }) + + t.Run("fails when remote is not GitHub", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "git-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + cmd := exec.Command("git", "init") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "remote", "add", "origin", "git@gitlab.com:owner/repo.git") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + _, err = detectRepository(tmpDir) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not a GitHub URL") + }) +} + +func TestValidateGhCli_Bad(t *testing.T) { + // This test verifies the error messages from validateGhCli + // We can't easily mock exec.Command, but we can at least + // verify the function exists and returns expected error types + t.Run("returns error when gh not installed", func(t *testing.T) { + // We can't force gh to not be installed, but we can verify + // the function signature works correctly + err := validateGhCli() + if err != nil { + // Either gh is not installed or not authenticated + assert.True(t, + strings.Contains(err.Error(), "gh CLI not found") || + strings.Contains(err.Error(), "not authenticated"), + "unexpected error: %s", err.Error()) + } + // If err is nil, gh is installed and authenticated - that's OK too + }) +} + +func TestGitHubPublisher_ExecutePublish_Good(t *testing.T) { + // These tests run only when gh CLI is available and authenticated + if err := validateGhCli(); err != nil { + t.Skip("skipping test: gh CLI not available or not authenticated") + } + + p := NewGitHubPublisher() + + t.Run("executePublish builds command with artifacts", func(t *testing.T) { + // We test the command building by checking that it fails appropriately + // with a non-existent release (rather than testing actual release creation) + release := &Release{ + Version: "v999.999.999-test-nonexistent", + Changelog: "Test changelog", + ProjectDir: "/tmp", + FS: io.Local, + Artifacts: []build.Artifact{ + {Path: "/tmp/nonexistent-artifact.tar.gz"}, + }, + } + cfg := PublisherConfig{ + Type: "github", + Draft: true, + Prerelease: true, + } + + // This will fail because the artifact doesn't exist, but it proves + // the code path runs + err := p.executePublish(context.Background(), release, cfg, "test-owner/test-repo-nonexistent") + assert.Error(t, err) // Expected to fail + }) +} + +func TestReleaseExists_Good(t *testing.T) { + // These tests run only when gh CLI is available + if err := validateGhCli(); err != nil { + t.Skip("skipping test: gh CLI not available or not authenticated") + } + + t.Run("returns false for non-existent release", func(t *testing.T) { + ctx := context.Background() + // Use a non-existent repo and version + exists := ReleaseExists(ctx, "nonexistent-owner-12345/nonexistent-repo-67890", "v999.999.999") + assert.False(t, exists) + }) + + t.Run("checks release existence", func(t *testing.T) { + ctx := context.Background() + // Test against a known public repository with releases + // This tests the true path if the release exists + exists := ReleaseExists(ctx, "cli/cli", "v2.0.0") + // We don't assert the result since it depends on network access + // and the release may or may not exist + _ = exists // Just verify function runs without panic + }) +} diff --git a/release/publishers/homebrew.go b/release/publishers/homebrew.go new file mode 100644 index 0000000..5614a3b --- /dev/null +++ b/release/publishers/homebrew.go @@ -0,0 +1,371 @@ +// Package publishers provides release publishing implementations. +package publishers + +import ( + "bytes" + "context" + "embed" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +//go:embed templates/homebrew/*.tmpl +var homebrewTemplates embed.FS + +// HomebrewConfig holds Homebrew-specific configuration. +type HomebrewConfig struct { + // Tap is the Homebrew tap repository (e.g., "host-uk/homebrew-tap"). + Tap string + // Formula is the formula name (defaults to project name). + Formula string + // Official config for generating files for official repo PRs. + Official *OfficialConfig +} + +// OfficialConfig holds configuration for generating files for official repo PRs. +type OfficialConfig struct { + // Enabled determines whether to generate files for official repos. + Enabled bool + // Output is the directory to write generated files. + Output string +} + +// HomebrewPublisher publishes releases to Homebrew. +type HomebrewPublisher struct{} + +// NewHomebrewPublisher creates a new Homebrew publisher. +func NewHomebrewPublisher() *HomebrewPublisher { + return &HomebrewPublisher{} +} + +// Name returns the publisher's identifier. +func (p *HomebrewPublisher) Name() string { + return "homebrew" +} + +// Publish publishes the release to Homebrew. +func (p *HomebrewPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error { + // Parse config + cfg := p.parseConfig(pubCfg, relCfg) + + // Validate configuration + if cfg.Tap == "" && (cfg.Official == nil || !cfg.Official.Enabled) { + return fmt.Errorf("homebrew.Publish: tap is required (set publish.homebrew.tap in config)") + } + + // Get repository and project info + repo := "" + if relCfg != nil { + repo = relCfg.GetRepository() + } + if repo == "" { + detectedRepo, err := detectRepository(release.ProjectDir) + if err != nil { + return fmt.Errorf("homebrew.Publish: could not determine repository: %w", err) + } + repo = detectedRepo + } + + projectName := "" + if relCfg != nil { + projectName = relCfg.GetProjectName() + } + if projectName == "" { + parts := strings.Split(repo, "/") + projectName = parts[len(parts)-1] + } + + formulaName := cfg.Formula + if formulaName == "" { + formulaName = projectName + } + + // Strip leading 'v' from version + version := strings.TrimPrefix(release.Version, "v") + + // Build checksums map from artifacts + checksums := buildChecksumMap(release.Artifacts) + + // Template data + data := homebrewTemplateData{ + FormulaClass: toFormulaClass(formulaName), + Description: fmt.Sprintf("%s CLI", projectName), + Repository: repo, + Version: version, + License: "MIT", + BinaryName: projectName, + Checksums: checksums, + } + + if dryRun { + return p.dryRunPublish(release.FS, data, cfg) + } + + return p.executePublish(ctx, release.ProjectDir, data, cfg, release) +} + +// homebrewTemplateData holds data for Homebrew templates. +type homebrewTemplateData struct { + FormulaClass string + Description string + Repository string + Version string + License string + BinaryName string + Checksums ChecksumMap +} + +// ChecksumMap holds checksums for different platform/arch combinations. +type ChecksumMap struct { + DarwinAmd64 string + DarwinArm64 string + LinuxAmd64 string + LinuxArm64 string + WindowsAmd64 string + WindowsArm64 string +} + +// parseConfig extracts Homebrew-specific configuration. +func (p *HomebrewPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) HomebrewConfig { + cfg := HomebrewConfig{ + Tap: "", + Formula: "", + } + + if ext, ok := pubCfg.Extended.(map[string]any); ok { + if tap, ok := ext["tap"].(string); ok && tap != "" { + cfg.Tap = tap + } + if formula, ok := ext["formula"].(string); ok && formula != "" { + cfg.Formula = formula + } + if official, ok := ext["official"].(map[string]any); ok { + cfg.Official = &OfficialConfig{} + if enabled, ok := official["enabled"].(bool); ok { + cfg.Official.Enabled = enabled + } + if output, ok := official["output"].(string); ok { + cfg.Official.Output = output + } + } + } + + return cfg +} + +// dryRunPublish shows what would be done. +func (p *HomebrewPublisher) dryRunPublish(m io.Medium, data homebrewTemplateData, cfg HomebrewConfig) error { + fmt.Println() + fmt.Println("=== DRY RUN: Homebrew Publish ===") + fmt.Println() + fmt.Printf("Formula: %s\n", data.FormulaClass) + fmt.Printf("Version: %s\n", data.Version) + fmt.Printf("Tap: %s\n", cfg.Tap) + fmt.Printf("Repository: %s\n", data.Repository) + fmt.Println() + + // Generate and show formula + formula, err := p.renderTemplate(m, "templates/homebrew/formula.rb.tmpl", data) + if err != nil { + return fmt.Errorf("homebrew.dryRunPublish: %w", err) + } + fmt.Println("Generated formula.rb:") + fmt.Println("---") + fmt.Println(formula) + fmt.Println("---") + fmt.Println() + + if cfg.Tap != "" { + fmt.Printf("Would commit to tap: %s\n", cfg.Tap) + } + if cfg.Official != nil && cfg.Official.Enabled { + output := cfg.Official.Output + if output == "" { + output = "dist/homebrew" + } + fmt.Printf("Would write files for official PR to: %s\n", output) + } + fmt.Println() + fmt.Println("=== END DRY RUN ===") + + return nil +} + +// executePublish creates the formula and commits to tap. +func (p *HomebrewPublisher) executePublish(ctx context.Context, projectDir string, data homebrewTemplateData, cfg HomebrewConfig, release *Release) error { + // Generate formula + formula, err := p.renderTemplate(release.FS, "templates/homebrew/formula.rb.tmpl", data) + if err != nil { + return fmt.Errorf("homebrew.Publish: failed to render formula: %w", err) + } + + // If official config is enabled, write to output directory + if cfg.Official != nil && cfg.Official.Enabled { + output := cfg.Official.Output + if output == "" { + output = filepath.Join(projectDir, "dist", "homebrew") + } else if !filepath.IsAbs(output) { + output = filepath.Join(projectDir, output) + } + + if err := release.FS.EnsureDir(output); err != nil { + return fmt.Errorf("homebrew.Publish: failed to create output directory: %w", err) + } + + formulaPath := filepath.Join(output, fmt.Sprintf("%s.rb", strings.ToLower(data.FormulaClass))) + if err := release.FS.Write(formulaPath, formula); err != nil { + return fmt.Errorf("homebrew.Publish: failed to write formula: %w", err) + } + fmt.Printf("Wrote Homebrew formula for official PR: %s\n", formulaPath) + } + + // If tap is configured, commit to it + if cfg.Tap != "" { + if err := p.commitToTap(ctx, cfg.Tap, data, formula); err != nil { + return err + } + } + + return nil +} + +// commitToTap commits the formula to the tap repository. +func (p *HomebrewPublisher) commitToTap(ctx context.Context, tap string, data homebrewTemplateData, formula string) error { + // Clone tap repo to temp directory + tmpDir, err := os.MkdirTemp("", "homebrew-tap-*") + if err != nil { + return fmt.Errorf("homebrew.Publish: failed to create temp directory: %w", err) + } + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Clone the tap + fmt.Printf("Cloning tap %s...\n", tap) + cmd := exec.CommandContext(ctx, "gh", "repo", "clone", tap, tmpDir, "--", "--depth=1") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("homebrew.Publish: failed to clone tap: %w", err) + } + + // Ensure Formula directory exists + formulaDir := filepath.Join(tmpDir, "Formula") + if err := os.MkdirAll(formulaDir, 0755); err != nil { + return fmt.Errorf("homebrew.Publish: failed to create Formula directory: %w", err) + } + + // Write formula + formulaPath := filepath.Join(formulaDir, fmt.Sprintf("%s.rb", strings.ToLower(data.FormulaClass))) + if err := os.WriteFile(formulaPath, []byte(formula), 0644); err != nil { + return fmt.Errorf("homebrew.Publish: failed to write formula: %w", err) + } + + // Git add, commit, push + commitMsg := fmt.Sprintf("Update %s to %s", data.FormulaClass, data.Version) + + cmd = exec.CommandContext(ctx, "git", "add", ".") + cmd.Dir = tmpDir + if err := cmd.Run(); err != nil { + return fmt.Errorf("homebrew.Publish: git add failed: %w", err) + } + + cmd = exec.CommandContext(ctx, "git", "commit", "-m", commitMsg) + cmd.Dir = tmpDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("homebrew.Publish: git commit failed: %w", err) + } + + cmd = exec.CommandContext(ctx, "git", "push") + cmd.Dir = tmpDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("homebrew.Publish: git push failed: %w", err) + } + + fmt.Printf("Updated Homebrew tap: %s\n", tap) + return nil +} + +// renderTemplate renders an embedded template with the given data. +func (p *HomebrewPublisher) renderTemplate(m io.Medium, name string, data homebrewTemplateData) (string, error) { + var content []byte + var err error + + // Try custom template from medium + customPath := filepath.Join(".core", name) + if m != nil && m.IsFile(customPath) { + customContent, err := m.Read(customPath) + if err == nil { + content = []byte(customContent) + } + } + + // Fallback to embedded template + if content == nil { + content, err = homebrewTemplates.ReadFile(name) + if err != nil { + return "", fmt.Errorf("failed to read template %s: %w", name, err) + } + } + + tmpl, err := template.New(filepath.Base(name)).Parse(string(content)) + if err != nil { + return "", fmt.Errorf("failed to parse template %s: %w", name, err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("failed to execute template %s: %w", name, err) + } + + return buf.String(), nil +} + +// toFormulaClass converts a package name to a Ruby class name. +func toFormulaClass(name string) string { + // Convert kebab-case to PascalCase + parts := strings.Split(name, "-") + for i, part := range parts { + if len(part) > 0 { + parts[i] = strings.ToUpper(part[:1]) + part[1:] + } + } + return strings.Join(parts, "") +} + +// buildChecksumMap extracts checksums from artifacts into a structured map. +func buildChecksumMap(artifacts []build.Artifact) ChecksumMap { + checksums := ChecksumMap{} + + for _, a := range artifacts { + // Parse artifact name to determine platform + name := filepath.Base(a.Path) + checksum := a.Checksum + + switch { + case strings.Contains(name, "darwin-amd64"): + checksums.DarwinAmd64 = checksum + case strings.Contains(name, "darwin-arm64"): + checksums.DarwinArm64 = checksum + case strings.Contains(name, "linux-amd64"): + checksums.LinuxAmd64 = checksum + case strings.Contains(name, "linux-arm64"): + checksums.LinuxArm64 = checksum + case strings.Contains(name, "windows-amd64"): + checksums.WindowsAmd64 = checksum + case strings.Contains(name, "windows-arm64"): + checksums.WindowsArm64 = checksum + } + } + + return checksums +} diff --git a/release/publishers/homebrew_test.go b/release/publishers/homebrew_test.go new file mode 100644 index 0000000..ae50e75 --- /dev/null +++ b/release/publishers/homebrew_test.go @@ -0,0 +1,347 @@ +package publishers + +import ( + "bytes" + "context" + "os" + "testing" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHomebrewPublisher_Name_Good(t *testing.T) { + t.Run("returns homebrew", func(t *testing.T) { + p := NewHomebrewPublisher() + assert.Equal(t, "homebrew", p.Name()) + }) +} + +func TestHomebrewPublisher_ParseConfig_Good(t *testing.T) { + p := NewHomebrewPublisher() + + t.Run("uses defaults when no extended config", func(t *testing.T) { + pubCfg := PublisherConfig{Type: "homebrew"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Tap) + assert.Empty(t, cfg.Formula) + assert.Nil(t, cfg.Official) + }) + + t.Run("parses tap and formula from extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "homebrew", + Extended: map[string]any{ + "tap": "host-uk/homebrew-tap", + "formula": "myformula", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Equal(t, "host-uk/homebrew-tap", cfg.Tap) + assert.Equal(t, "myformula", cfg.Formula) + }) + + t.Run("parses official config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "homebrew", + Extended: map[string]any{ + "official": map[string]any{ + "enabled": true, + "output": "dist/brew", + }, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + require.NotNil(t, cfg.Official) + assert.True(t, cfg.Official.Enabled) + assert.Equal(t, "dist/brew", cfg.Official.Output) + }) + + t.Run("handles missing official fields", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "homebrew", + Extended: map[string]any{ + "official": map[string]any{}, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + require.NotNil(t, cfg.Official) + assert.False(t, cfg.Official.Enabled) + assert.Empty(t, cfg.Official.Output) + }) +} + +func TestHomebrewPublisher_ToFormulaClass_Good(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "simple name", + input: "core", + expected: "Core", + }, + { + name: "kebab case", + input: "my-cli-tool", + expected: "MyCliTool", + }, + { + name: "already capitalised", + input: "CLI", + expected: "CLI", + }, + { + name: "single letter", + input: "x", + expected: "X", + }, + { + name: "multiple dashes", + input: "my-super-cool-app", + expected: "MySuperCoolApp", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := toFormulaClass(tc.input) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestHomebrewPublisher_BuildChecksumMap_Good(t *testing.T) { + t.Run("maps artifacts to checksums by platform", func(t *testing.T) { + artifacts := []build.Artifact{ + {Path: "/dist/myapp-darwin-amd64.tar.gz", OS: "darwin", Arch: "amd64", Checksum: "abc123"}, + {Path: "/dist/myapp-darwin-arm64.tar.gz", OS: "darwin", Arch: "arm64", Checksum: "def456"}, + {Path: "/dist/myapp-linux-amd64.tar.gz", OS: "linux", Arch: "amd64", Checksum: "ghi789"}, + {Path: "/dist/myapp-linux-arm64.tar.gz", OS: "linux", Arch: "arm64", Checksum: "jkl012"}, + {Path: "/dist/myapp-windows-amd64.zip", OS: "windows", Arch: "amd64", Checksum: "mno345"}, + {Path: "/dist/myapp-windows-arm64.zip", OS: "windows", Arch: "arm64", Checksum: "pqr678"}, + } + + checksums := buildChecksumMap(artifacts) + + assert.Equal(t, "abc123", checksums.DarwinAmd64) + assert.Equal(t, "def456", checksums.DarwinArm64) + assert.Equal(t, "ghi789", checksums.LinuxAmd64) + assert.Equal(t, "jkl012", checksums.LinuxArm64) + assert.Equal(t, "mno345", checksums.WindowsAmd64) + assert.Equal(t, "pqr678", checksums.WindowsArm64) + }) + + t.Run("handles empty artifacts", func(t *testing.T) { + checksums := buildChecksumMap([]build.Artifact{}) + + assert.Empty(t, checksums.DarwinAmd64) + assert.Empty(t, checksums.DarwinArm64) + assert.Empty(t, checksums.LinuxAmd64) + assert.Empty(t, checksums.LinuxArm64) + }) + + t.Run("handles partial platform coverage", func(t *testing.T) { + artifacts := []build.Artifact{ + {Path: "/dist/myapp-darwin-arm64.tar.gz", Checksum: "def456"}, + {Path: "/dist/myapp-linux-amd64.tar.gz", Checksum: "ghi789"}, + } + + checksums := buildChecksumMap(artifacts) + + assert.Empty(t, checksums.DarwinAmd64) + assert.Equal(t, "def456", checksums.DarwinArm64) + assert.Equal(t, "ghi789", checksums.LinuxAmd64) + assert.Empty(t, checksums.LinuxArm64) + }) +} + +func TestHomebrewPublisher_RenderTemplate_Good(t *testing.T) { + p := NewHomebrewPublisher() + + t.Run("renders formula template with data", func(t *testing.T) { + data := homebrewTemplateData{ + FormulaClass: "MyApp", + Description: "My awesome CLI", + Repository: "owner/myapp", + Version: "1.2.3", + License: "MIT", + BinaryName: "myapp", + Checksums: ChecksumMap{ + DarwinAmd64: "abc123", + DarwinArm64: "def456", + LinuxAmd64: "ghi789", + LinuxArm64: "jkl012", + }, + } + + result, err := p.renderTemplate(io.Local, "templates/homebrew/formula.rb.tmpl", data) + require.NoError(t, err) + + assert.Contains(t, result, "class MyApp < Formula") + assert.Contains(t, result, `desc "My awesome CLI"`) + assert.Contains(t, result, `version "1.2.3"`) + assert.Contains(t, result, `license "MIT"`) + assert.Contains(t, result, "owner/myapp") + assert.Contains(t, result, "abc123") + assert.Contains(t, result, "def456") + assert.Contains(t, result, "ghi789") + assert.Contains(t, result, "jkl012") + assert.Contains(t, result, `bin.install "myapp"`) + }) +} + +func TestHomebrewPublisher_RenderTemplate_Bad(t *testing.T) { + p := NewHomebrewPublisher() + + t.Run("returns error for non-existent template", func(t *testing.T) { + data := homebrewTemplateData{} + _, err := p.renderTemplate(io.Local, "templates/homebrew/nonexistent.tmpl", data) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to read template") + }) +} + +func TestHomebrewPublisher_DryRunPublish_Good(t *testing.T) { + p := NewHomebrewPublisher() + + t.Run("outputs expected dry run information", func(t *testing.T) { + // Capture stdout + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := homebrewTemplateData{ + FormulaClass: "MyApp", + Description: "My CLI", + Repository: "owner/repo", + Version: "1.0.0", + License: "MIT", + BinaryName: "myapp", + Checksums: ChecksumMap{}, + } + cfg := HomebrewConfig{ + Tap: "owner/homebrew-tap", + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "DRY RUN: Homebrew Publish") + assert.Contains(t, output, "Formula: MyApp") + assert.Contains(t, output, "Version: 1.0.0") + assert.Contains(t, output, "Tap: owner/homebrew-tap") + assert.Contains(t, output, "Repository: owner/repo") + assert.Contains(t, output, "Would commit to tap: owner/homebrew-tap") + assert.Contains(t, output, "END DRY RUN") + }) + + t.Run("shows official output path when enabled", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := homebrewTemplateData{ + FormulaClass: "MyApp", + Version: "1.0.0", + BinaryName: "myapp", + Checksums: ChecksumMap{}, + } + cfg := HomebrewConfig{ + Official: &OfficialConfig{ + Enabled: true, + Output: "custom/path", + }, + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "Would write files for official PR to: custom/path") + }) + + t.Run("uses default official output path when not specified", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := homebrewTemplateData{ + FormulaClass: "MyApp", + Version: "1.0.0", + BinaryName: "myapp", + Checksums: ChecksumMap{}, + } + cfg := HomebrewConfig{ + Official: &OfficialConfig{ + Enabled: true, + }, + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "Would write files for official PR to: dist/homebrew") + }) +} + +func TestHomebrewPublisher_Publish_Bad(t *testing.T) { + p := NewHomebrewPublisher() + + t.Run("fails when tap not configured and not official mode", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/project", + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "homebrew"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "tap is required") + }) +} + +func TestHomebrewConfig_Defaults_Good(t *testing.T) { + t.Run("has sensible defaults", func(t *testing.T) { + p := NewHomebrewPublisher() + pubCfg := PublisherConfig{Type: "homebrew"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Tap) + assert.Empty(t, cfg.Formula) + assert.Nil(t, cfg.Official) + }) +} diff --git a/release/publishers/linuxkit.go b/release/publishers/linuxkit.go new file mode 100644 index 0000000..4905575 --- /dev/null +++ b/release/publishers/linuxkit.go @@ -0,0 +1,300 @@ +// Package publishers provides release publishing implementations. +package publishers + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +// LinuxKitConfig holds configuration for the LinuxKit publisher. +type LinuxKitConfig struct { + // Config is the path to the LinuxKit YAML configuration file. + Config string `yaml:"config"` + // Formats are the output formats to build. + // Supported: iso, iso-bios, iso-efi, raw, raw-bios, raw-efi, + // qcow2, qcow2-bios, qcow2-efi, vmdk, vhd, gcp, aws, + // docker (tarball for `docker load`), tar, kernel+initrd + Formats []string `yaml:"formats"` + // Platforms are the target platforms (linux/amd64, linux/arm64). + Platforms []string `yaml:"platforms"` +} + +// LinuxKitPublisher builds and publishes LinuxKit images. +type LinuxKitPublisher struct{} + +// NewLinuxKitPublisher creates a new LinuxKit publisher. +func NewLinuxKitPublisher() *LinuxKitPublisher { + return &LinuxKitPublisher{} +} + +// Name returns the publisher's identifier. +func (p *LinuxKitPublisher) Name() string { + return "linuxkit" +} + +// Publish builds LinuxKit images and uploads them to the GitHub release. +func (p *LinuxKitPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error { + // Validate linuxkit CLI is available + if err := validateLinuxKitCli(); err != nil { + return err + } + + // Parse LinuxKit-specific config from publisher config + lkCfg := p.parseConfig(pubCfg, release.ProjectDir) + + // Validate config file exists + if !release.FS.Exists(lkCfg.Config) { + return fmt.Errorf("linuxkit.Publish: config file not found: %s", lkCfg.Config) + } + + // Determine repository for artifact upload + repo := "" + if relCfg != nil { + repo = relCfg.GetRepository() + } + if repo == "" { + detectedRepo, err := detectRepository(release.ProjectDir) + if err != nil { + return fmt.Errorf("linuxkit.Publish: could not determine repository: %w", err) + } + repo = detectedRepo + } + + if dryRun { + return p.dryRunPublish(release, lkCfg, repo) + } + + return p.executePublish(ctx, release, lkCfg, repo) +} + +// parseConfig extracts LinuxKit-specific configuration. +func (p *LinuxKitPublisher) parseConfig(pubCfg PublisherConfig, projectDir string) LinuxKitConfig { + cfg := LinuxKitConfig{ + Config: filepath.Join(projectDir, ".core", "linuxkit", "server.yml"), + Formats: []string{"iso"}, + Platforms: []string{"linux/amd64"}, + } + + // Override from extended config if present + if ext, ok := pubCfg.Extended.(map[string]any); ok { + if configPath, ok := ext["config"].(string); ok && configPath != "" { + if filepath.IsAbs(configPath) { + cfg.Config = configPath + } else { + cfg.Config = filepath.Join(projectDir, configPath) + } + } + if formats, ok := ext["formats"].([]any); ok && len(formats) > 0 { + cfg.Formats = make([]string, 0, len(formats)) + for _, f := range formats { + if s, ok := f.(string); ok { + cfg.Formats = append(cfg.Formats, s) + } + } + } + if platforms, ok := ext["platforms"].([]any); ok && len(platforms) > 0 { + cfg.Platforms = make([]string, 0, len(platforms)) + for _, p := range platforms { + if s, ok := p.(string); ok { + cfg.Platforms = append(cfg.Platforms, s) + } + } + } + } + + return cfg +} + +// dryRunPublish shows what would be done without actually building. +func (p *LinuxKitPublisher) dryRunPublish(release *Release, cfg LinuxKitConfig, repo string) error { + fmt.Println() + fmt.Println("=== DRY RUN: LinuxKit Build & Publish ===") + fmt.Println() + fmt.Printf("Repository: %s\n", repo) + fmt.Printf("Version: %s\n", release.Version) + fmt.Printf("Config: %s\n", cfg.Config) + fmt.Printf("Formats: %s\n", strings.Join(cfg.Formats, ", ")) + fmt.Printf("Platforms: %s\n", strings.Join(cfg.Platforms, ", ")) + fmt.Println() + + outputDir := filepath.Join(release.ProjectDir, "dist", "linuxkit") + baseName := p.buildBaseName(release.Version) + + fmt.Println("Would execute commands:") + for _, platform := range cfg.Platforms { + parts := strings.Split(platform, "/") + arch := "amd64" + if len(parts) == 2 { + arch = parts[1] + } + + for _, format := range cfg.Formats { + outputName := fmt.Sprintf("%s-%s", baseName, arch) + args := p.buildLinuxKitArgs(cfg.Config, format, outputName, outputDir, arch) + fmt.Printf(" linuxkit %s\n", strings.Join(args, " ")) + } + } + fmt.Println() + + fmt.Println("Would upload artifacts to release:") + for _, platform := range cfg.Platforms { + parts := strings.Split(platform, "/") + arch := "amd64" + if len(parts) == 2 { + arch = parts[1] + } + + for _, format := range cfg.Formats { + outputName := fmt.Sprintf("%s-%s", baseName, arch) + artifactPath := p.getArtifactPath(outputDir, outputName, format) + fmt.Printf(" - %s\n", filepath.Base(artifactPath)) + if format == "docker" { + fmt.Printf(" Usage: docker load < %s\n", filepath.Base(artifactPath)) + } + } + } + + fmt.Println() + fmt.Println("=== END DRY RUN ===") + + return nil +} + +// executePublish builds LinuxKit images and uploads them. +func (p *LinuxKitPublisher) executePublish(ctx context.Context, release *Release, cfg LinuxKitConfig, repo string) error { + outputDir := filepath.Join(release.ProjectDir, "dist", "linuxkit") + + // Create output directory + if err := release.FS.EnsureDir(outputDir); err != nil { + return fmt.Errorf("linuxkit.Publish: failed to create output directory: %w", err) + } + + baseName := p.buildBaseName(release.Version) + var artifacts []string + + // Build for each platform and format + for _, platform := range cfg.Platforms { + parts := strings.Split(platform, "/") + arch := "amd64" + if len(parts) == 2 { + arch = parts[1] + } + + for _, format := range cfg.Formats { + outputName := fmt.Sprintf("%s-%s", baseName, arch) + + // Build the image + args := p.buildLinuxKitArgs(cfg.Config, format, outputName, outputDir, arch) + cmd := exec.CommandContext(ctx, "linuxkit", args...) + cmd.Dir = release.ProjectDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + fmt.Printf("Building LinuxKit image: %s (%s)\n", outputName, format) + if err := cmd.Run(); err != nil { + return fmt.Errorf("linuxkit.Publish: build failed for %s/%s: %w", platform, format, err) + } + + // Track artifact for upload + artifactPath := p.getArtifactPath(outputDir, outputName, format) + artifacts = append(artifacts, artifactPath) + } + } + + // Upload artifacts to GitHub release + for _, artifactPath := range artifacts { + if !release.FS.Exists(artifactPath) { + return fmt.Errorf("linuxkit.Publish: artifact not found after build: %s", artifactPath) + } + + if err := UploadArtifact(ctx, repo, release.Version, artifactPath); err != nil { + return fmt.Errorf("linuxkit.Publish: failed to upload %s: %w", filepath.Base(artifactPath), err) + } + + // Print helpful usage info for docker format + if strings.HasSuffix(artifactPath, ".docker.tar") { + fmt.Printf(" Load with: docker load < %s\n", filepath.Base(artifactPath)) + } + } + + return nil +} + +// buildBaseName creates the base name for output files. +func (p *LinuxKitPublisher) buildBaseName(version string) string { + // Strip leading 'v' if present for cleaner filenames + name := strings.TrimPrefix(version, "v") + return fmt.Sprintf("linuxkit-%s", name) +} + +// buildLinuxKitArgs builds the arguments for linuxkit build command. +func (p *LinuxKitPublisher) buildLinuxKitArgs(configPath, format, outputName, outputDir, arch string) []string { + args := []string{"build"} + + // Output format + args = append(args, "--format", format) + + // Output name + args = append(args, "--name", outputName) + + // Output directory + args = append(args, "--dir", outputDir) + + // Architecture (if not amd64) + if arch != "amd64" { + args = append(args, "--arch", arch) + } + + // Config file + args = append(args, configPath) + + return args +} + +// getArtifactPath returns the expected path of the built artifact. +func (p *LinuxKitPublisher) getArtifactPath(outputDir, outputName, format string) string { + ext := p.getFormatExtension(format) + return filepath.Join(outputDir, outputName+ext) +} + +// getFormatExtension returns the file extension for a LinuxKit output format. +func (p *LinuxKitPublisher) getFormatExtension(format string) string { + switch format { + case "iso", "iso-bios", "iso-efi": + return ".iso" + case "raw", "raw-bios", "raw-efi": + return ".raw" + case "qcow2", "qcow2-bios", "qcow2-efi": + return ".qcow2" + case "vmdk": + return ".vmdk" + case "vhd": + return ".vhd" + case "gcp": + return ".img.tar.gz" + case "aws": + return ".raw" + case "docker": + // Docker format outputs a tarball that can be loaded with `docker load` + return ".docker.tar" + case "tar": + return ".tar" + case "kernel+initrd": + return "-initrd.img" + default: + return "." + format + } +} + +// validateLinuxKitCli checks if the linuxkit CLI is available. +func validateLinuxKitCli() error { + cmd := exec.Command("linuxkit", "version") + if err := cmd.Run(); err != nil { + return fmt.Errorf("linuxkit: linuxkit CLI not found. Install it from https://github.com/linuxkit/linuxkit") + } + return nil +} diff --git a/release/publishers/linuxkit_test.go b/release/publishers/linuxkit_test.go new file mode 100644 index 0000000..85a82a9 --- /dev/null +++ b/release/publishers/linuxkit_test.go @@ -0,0 +1,938 @@ +package publishers + +import ( + "bytes" + "context" + "os" + "os/exec" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLinuxKitPublisher_Name_Good(t *testing.T) { + t.Run("returns linuxkit", func(t *testing.T) { + p := NewLinuxKitPublisher() + assert.Equal(t, "linuxkit", p.Name()) + }) +} + +func TestLinuxKitPublisher_ParseConfig_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + t.Run("uses defaults when no extended config", func(t *testing.T) { + pubCfg := PublisherConfig{Type: "linuxkit"} + cfg := p.parseConfig(pubCfg, "/project") + + assert.Equal(t, "/project/.core/linuxkit/server.yml", cfg.Config) + assert.Equal(t, []string{"iso"}, cfg.Formats) + assert.Equal(t, []string{"linux/amd64"}, cfg.Platforms) + }) + + t.Run("parses extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: map[string]any{ + "config": ".core/linuxkit/custom.yml", + "formats": []any{"iso", "qcow2", "vmdk"}, + "platforms": []any{"linux/amd64", "linux/arm64"}, + }, + } + cfg := p.parseConfig(pubCfg, "/project") + + assert.Equal(t, "/project/.core/linuxkit/custom.yml", cfg.Config) + assert.Equal(t, []string{"iso", "qcow2", "vmdk"}, cfg.Formats) + assert.Equal(t, []string{"linux/amd64", "linux/arm64"}, cfg.Platforms) + }) + + t.Run("handles absolute config path", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: map[string]any{ + "config": "/absolute/path/to/config.yml", + }, + } + cfg := p.parseConfig(pubCfg, "/project") + + assert.Equal(t, "/absolute/path/to/config.yml", cfg.Config) + }) +} + +func TestLinuxKitPublisher_BuildLinuxKitArgs_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + t.Run("builds basic args for amd64", func(t *testing.T) { + args := p.buildLinuxKitArgs("/config/server.yml", "iso", "linuxkit-1.0.0-amd64", "/output", "amd64") + + assert.Contains(t, args, "build") + assert.Contains(t, args, "--format") + assert.Contains(t, args, "iso") + assert.Contains(t, args, "--name") + assert.Contains(t, args, "linuxkit-1.0.0-amd64") + assert.Contains(t, args, "--dir") + assert.Contains(t, args, "/output") + assert.Contains(t, args, "/config/server.yml") + // Should not contain --arch for amd64 (default) + assert.NotContains(t, args, "--arch") + }) + + t.Run("builds args with arch for arm64", func(t *testing.T) { + args := p.buildLinuxKitArgs("/config/server.yml", "qcow2", "linuxkit-1.0.0-arm64", "/output", "arm64") + + assert.Contains(t, args, "--arch") + assert.Contains(t, args, "arm64") + assert.Contains(t, args, "qcow2") + }) +} + +func TestLinuxKitPublisher_BuildBaseName_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + t.Run("strips v prefix", func(t *testing.T) { + name := p.buildBaseName("v1.2.3") + assert.Equal(t, "linuxkit-1.2.3", name) + }) + + t.Run("handles version without v prefix", func(t *testing.T) { + name := p.buildBaseName("1.2.3") + assert.Equal(t, "linuxkit-1.2.3", name) + }) +} + +func TestLinuxKitPublisher_GetArtifactPath_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + tests := []struct { + name string + outputDir string + outputName string + format string + expected string + }{ + { + name: "ISO format", + outputDir: "/dist/linuxkit", + outputName: "linuxkit-1.0.0-amd64", + format: "iso", + expected: "/dist/linuxkit/linuxkit-1.0.0-amd64.iso", + }, + { + name: "raw format", + outputDir: "/dist/linuxkit", + outputName: "linuxkit-1.0.0-amd64", + format: "raw", + expected: "/dist/linuxkit/linuxkit-1.0.0-amd64.raw", + }, + { + name: "qcow2 format", + outputDir: "/dist/linuxkit", + outputName: "linuxkit-1.0.0-arm64", + format: "qcow2", + expected: "/dist/linuxkit/linuxkit-1.0.0-arm64.qcow2", + }, + { + name: "vmdk format", + outputDir: "/dist/linuxkit", + outputName: "linuxkit-1.0.0-amd64", + format: "vmdk", + expected: "/dist/linuxkit/linuxkit-1.0.0-amd64.vmdk", + }, + { + name: "gcp format", + outputDir: "/dist/linuxkit", + outputName: "linuxkit-1.0.0-amd64", + format: "gcp", + expected: "/dist/linuxkit/linuxkit-1.0.0-amd64.img.tar.gz", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + path := p.getArtifactPath(tc.outputDir, tc.outputName, tc.format) + assert.Equal(t, tc.expected, path) + }) + } +} + +func TestLinuxKitPublisher_GetFormatExtension_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + tests := []struct { + format string + expected string + }{ + {"iso", ".iso"}, + {"raw", ".raw"}, + {"qcow2", ".qcow2"}, + {"vmdk", ".vmdk"}, + {"vhd", ".vhd"}, + {"gcp", ".img.tar.gz"}, + {"aws", ".raw"}, + {"unknown", ".unknown"}, + } + + for _, tc := range tests { + t.Run(tc.format, func(t *testing.T) { + ext := p.getFormatExtension(tc.format) + assert.Equal(t, tc.expected, ext) + }) + } +} + +func TestLinuxKitPublisher_Publish_Bad(t *testing.T) { + p := NewLinuxKitPublisher() + + t.Run("fails when config file not found with linuxkit installed", func(t *testing.T) { + if err := validateLinuxKitCli(); err != nil { + t.Skip("skipping test: linuxkit CLI not available") + } + + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/nonexistent", + FS: io.Local, + } + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: map[string]any{ + "config": "/nonexistent/config.yml", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "config file not found") + }) + + t.Run("fails when linuxkit CLI not available", func(t *testing.T) { + if err := validateLinuxKitCli(); err == nil { + t.Skip("skipping test: linuxkit CLI is available") + } + + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/tmp", + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "linuxkit"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "linuxkit CLI not found") + }) + + t.Run("fails when repository cannot be detected and not provided", func(t *testing.T) { + if err := validateLinuxKitCli(); err != nil { + t.Skip("skipping test: linuxkit CLI not available") + } + + // Create temp directory that is NOT a git repo + tmpDir, err := os.MkdirTemp("", "linuxkit-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Create a config file + configPath := filepath.Join(tmpDir, "config.yml") + err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644) + require.NoError(t, err) + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: map[string]any{ + "config": "config.yml", + }, + } + relCfg := &mockReleaseConfig{repository: ""} // Empty repository + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + assert.Error(t, err) + assert.Contains(t, err.Error(), "could not determine repository") + }) +} + +func TestValidateLinuxKitCli_Good(t *testing.T) { + t.Run("returns expected error when linuxkit not installed", func(t *testing.T) { + err := validateLinuxKitCli() + if err != nil { + // LinuxKit is not installed + assert.Contains(t, err.Error(), "linuxkit CLI not found") + } + // If err is nil, linuxkit is installed - that's OK + }) +} + +func TestLinuxKitPublisher_Publish_WithCLI_Good(t *testing.T) { + // These tests run only when linuxkit CLI is available + if err := validateLinuxKitCli(); err != nil { + t.Skip("skipping test: linuxkit CLI not available") + } + + p := NewLinuxKitPublisher() + + t.Run("succeeds with dry run and valid config", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "linuxkit-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Create config directory and file + configDir := filepath.Join(tmpDir, ".core", "linuxkit") + err = os.MkdirAll(configDir, 0755) + require.NoError(t, err) + + configPath := filepath.Join(configDir, "server.yml") + err = os.WriteFile(configPath, []byte("kernel:\n image: linuxkit/kernel:5.10\n"), 0644) + require.NoError(t, err) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "linuxkit"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "DRY RUN: LinuxKit Build & Publish") + }) + + t.Run("fails with missing config file", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "linuxkit-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "linuxkit"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "config file not found") + }) + + t.Run("uses relCfg repository", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "linuxkit-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + configDir := filepath.Join(tmpDir, ".core", "linuxkit") + err = os.MkdirAll(configDir, 0755) + require.NoError(t, err) + + configPath := filepath.Join(configDir, "server.yml") + err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644) + require.NoError(t, err) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "linuxkit"} + relCfg := &mockReleaseConfig{repository: "custom-owner/custom-repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "custom-owner/custom-repo") + }) + + t.Run("detects repository when not provided", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "linuxkit-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Create config file + configDir := filepath.Join(tmpDir, ".core", "linuxkit") + err = os.MkdirAll(configDir, 0755) + require.NoError(t, err) + + configPath := filepath.Join(configDir, "server.yml") + err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644) + require.NoError(t, err) + + // Initialize git repo + cmd := exec.Command("git", "init") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "remote", "add", "origin", "git@github.com:detected-owner/detected-repo.git") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "linuxkit"} + relCfg := &mockReleaseConfig{repository: ""} // Empty to trigger detection + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "detected-owner/detected-repo") + }) +} + +func TestLinuxKitPublisher_Publish_NilRelCfg_Good(t *testing.T) { + if err := validateLinuxKitCli(); err != nil { + t.Skip("skipping test: linuxkit CLI not available") + } + + p := NewLinuxKitPublisher() + + t.Run("handles nil relCfg by detecting repo", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "linuxkit-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Create config file + configDir := filepath.Join(tmpDir, ".core", "linuxkit") + err = os.MkdirAll(configDir, 0755) + require.NoError(t, err) + + configPath := filepath.Join(configDir, "server.yml") + err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644) + require.NoError(t, err) + + // Initialize git repo + cmd := exec.Command("git", "init") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "remote", "add", "origin", "git@github.com:nil-owner/nil-repo.git") + cmd.Dir = tmpDir + require.NoError(t, cmd.Run()) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + } + pubCfg := PublisherConfig{Type: "linuxkit"} + + err = p.Publish(context.TODO(), release, pubCfg, nil, true) // nil relCfg + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "nil-owner/nil-repo") + }) +} + +// mockReleaseConfig implements ReleaseConfig for testing. +type mockReleaseConfig struct { + repository string + projectName string +} + +func (m *mockReleaseConfig) GetRepository() string { + return m.repository +} + +func (m *mockReleaseConfig) GetProjectName() string { + return m.projectName +} + +func TestLinuxKitPublisher_DryRunPublish_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + t.Run("outputs expected dry run information", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/project", + FS: io.Local, + } + cfg := LinuxKitConfig{ + Config: "/project/.core/linuxkit/server.yml", + Formats: []string{"iso", "qcow2"}, + Platforms: []string{"linux/amd64", "linux/arm64"}, + } + + err := p.dryRunPublish(release, cfg, "owner/repo") + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "DRY RUN: LinuxKit Build & Publish") + assert.Contains(t, output, "Repository: owner/repo") + assert.Contains(t, output, "Version: v1.0.0") + assert.Contains(t, output, "Config: /project/.core/linuxkit/server.yml") + assert.Contains(t, output, "Formats: iso, qcow2") + assert.Contains(t, output, "Platforms: linux/amd64, linux/arm64") + assert.Contains(t, output, "Would execute commands:") + assert.Contains(t, output, "linuxkit build") + assert.Contains(t, output, "Would upload artifacts to release:") + assert.Contains(t, output, "linuxkit-1.0.0-amd64.iso") + assert.Contains(t, output, "linuxkit-1.0.0-amd64.qcow2") + assert.Contains(t, output, "linuxkit-1.0.0-arm64.iso") + assert.Contains(t, output, "linuxkit-1.0.0-arm64.qcow2") + assert.Contains(t, output, "END DRY RUN") + }) + + t.Run("shows docker format usage hint", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/project", + FS: io.Local, + } + cfg := LinuxKitConfig{ + Config: "/config.yml", + Formats: []string{"docker"}, + Platforms: []string{"linux/amd64"}, + } + + err := p.dryRunPublish(release, cfg, "owner/repo") + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "linuxkit-1.0.0-amd64.docker.tar") + assert.Contains(t, output, "Usage: docker load <") + }) + + t.Run("handles single platform and format", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v2.0.0", + ProjectDir: "/project", + FS: io.Local, + } + cfg := LinuxKitConfig{ + Config: "/config.yml", + Formats: []string{"iso"}, + Platforms: []string{"linux/amd64"}, + } + + err := p.dryRunPublish(release, cfg, "owner/repo") + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "linuxkit-2.0.0-amd64.iso") + assert.NotContains(t, output, "arm64") + }) +} + +func TestLinuxKitPublisher_GetFormatExtension_AllFormats_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + tests := []struct { + format string + expected string + }{ + {"iso", ".iso"}, + {"iso-bios", ".iso"}, + {"iso-efi", ".iso"}, + {"raw", ".raw"}, + {"raw-bios", ".raw"}, + {"raw-efi", ".raw"}, + {"qcow2", ".qcow2"}, + {"qcow2-bios", ".qcow2"}, + {"qcow2-efi", ".qcow2"}, + {"vmdk", ".vmdk"}, + {"vhd", ".vhd"}, + {"gcp", ".img.tar.gz"}, + {"aws", ".raw"}, + {"docker", ".docker.tar"}, + {"tar", ".tar"}, + {"kernel+initrd", "-initrd.img"}, + {"custom--format", ".custom--format"}, + } + + for _, tc := range tests { + t.Run(tc.format, func(t *testing.T) { + ext := p.getFormatExtension(tc.format) + assert.Equal(t, tc.expected, ext) + }) + } +} + +func TestLinuxKitPublisher_BuildLinuxKitArgs_AllArchitectures_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + t.Run("amd64 does not include arch flag", func(t *testing.T) { + args := p.buildLinuxKitArgs("/config.yml", "iso", "output--name", "/output", "amd64") + + assert.Contains(t, args, "build") + assert.Contains(t, args, "--format") + assert.Contains(t, args, "iso") + assert.Contains(t, args, "--name") + assert.Contains(t, args, "output--name") + assert.Contains(t, args, "--dir") + assert.Contains(t, args, "/output") + assert.Contains(t, args, "/config.yml") + assert.NotContains(t, args, "--arch") + }) + + t.Run("arm64 includes arch flag", func(t *testing.T) { + args := p.buildLinuxKitArgs("/config.yml", "qcow2", "output--name", "/output", "arm64") + + assert.Contains(t, args, "--arch") + assert.Contains(t, args, "arm64") + }) + + t.Run("other architectures include arch flag", func(t *testing.T) { + args := p.buildLinuxKitArgs("/config.yml", "raw", "output--name", "/output", "riscv64") + + assert.Contains(t, args, "--arch") + assert.Contains(t, args, "riscv64") + }) +} + +func TestLinuxKitPublisher_ParseConfig_EdgeCases_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + t.Run("handles nil extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: nil, + } + + cfg := p.parseConfig(pubCfg, "/project") + + assert.Equal(t, "/project/.core/linuxkit/server.yml", cfg.Config) + assert.Equal(t, []string{"iso"}, cfg.Formats) + assert.Equal(t, []string{"linux/amd64"}, cfg.Platforms) + }) + + t.Run("handles empty extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: map[string]any{}, + } + + cfg := p.parseConfig(pubCfg, "/project") + + assert.Equal(t, "/project/.core/linuxkit/server.yml", cfg.Config) + assert.Equal(t, []string{"iso"}, cfg.Formats) + assert.Equal(t, []string{"linux/amd64"}, cfg.Platforms) + }) + + t.Run("handles mixed format types in extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: map[string]any{ + "formats": []any{"iso", 123, "qcow2"}, // includes non-string + }, + } + + cfg := p.parseConfig(pubCfg, "/project") + + assert.Equal(t, []string{"iso", "qcow2"}, cfg.Formats) + }) + + t.Run("handles mixed platform types in extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: map[string]any{ + "platforms": []any{"linux/amd64", nil, "linux/arm64"}, + }, + } + + cfg := p.parseConfig(pubCfg, "/project") + + assert.Equal(t, []string{"linux/amd64", "linux/arm64"}, cfg.Platforms) + }) +} + +func TestLinuxKitPublisher_BuildBaseName_EdgeCases_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + tests := []struct { + name string + version string + expected string + }{ + {"strips v prefix", "v1.2.3", "linuxkit-1.2.3"}, + {"no v prefix", "1.2.3", "linuxkit-1.2.3"}, + {"prerelease version", "v1.0.0-alpha.1", "linuxkit-1.0.0-alpha.1"}, + {"build metadata", "v1.0.0+build.123", "linuxkit-1.0.0+build.123"}, + {"only v", "v", "linuxkit-"}, + {"empty string", "", "linuxkit-"}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + name := p.buildBaseName(tc.version) + assert.Equal(t, tc.expected, name) + }) + } +} + +func TestLinuxKitPublisher_GetArtifactPath_AllFormats_Good(t *testing.T) { + p := NewLinuxKitPublisher() + + tests := []struct { + name string + outputDir string + outputName string + format string + expected string + }{ + { + name: "ISO format", + outputDir: "/dist", + outputName: "linuxkit-1.0.0-amd64", + format: "iso", + expected: "/dist/linuxkit-1.0.0-amd64.iso", + }, + { + name: "ISO-BIOS format", + outputDir: "/dist", + outputName: "linuxkit-1.0.0-amd64", + format: "iso-bios", + expected: "/dist/linuxkit-1.0.0-amd64.iso", + }, + { + name: "docker format", + outputDir: "/output", + outputName: "linuxkit-2.0.0-arm64", + format: "docker", + expected: "/output/linuxkit-2.0.0-arm64.docker.tar", + }, + { + name: "tar format", + outputDir: "/output", + outputName: "linuxkit-1.0.0", + format: "tar", + expected: "/output/linuxkit-1.0.0.tar", + }, + { + name: "kernel+initrd format", + outputDir: "/output", + outputName: "linuxkit-1.0.0", + format: "kernel+initrd", + expected: "/output/linuxkit-1.0.0-initrd.img", + }, + { + name: "GCP format", + outputDir: "/output", + outputName: "linuxkit-1.0.0", + format: "gcp", + expected: "/output/linuxkit-1.0.0.img.tar.gz", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + path := p.getArtifactPath(tc.outputDir, tc.outputName, tc.format) + assert.Equal(t, tc.expected, path) + }) + } +} + +func TestLinuxKitPublisher_Publish_DryRun_Good(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Skip if linuxkit CLI is not available + if err := validateLinuxKitCli(); err != nil { + t.Skip("skipping test: linuxkit CLI not available") + } + + p := NewLinuxKitPublisher() + + t.Run("dry run succeeds with valid config file", func(t *testing.T) { + // Create temp directory with config file + tmpDir, err := os.MkdirTemp("", "linuxkit-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + configDir := filepath.Join(tmpDir, ".core", "linuxkit") + err = os.MkdirAll(configDir, 0755) + require.NoError(t, err) + + configPath := filepath.Join(configDir, "server.yml") + err = os.WriteFile(configPath, []byte("kernel:\n image: linuxkit/kernel:5.10\n"), 0644) + require.NoError(t, err) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "linuxkit"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "DRY RUN: LinuxKit Build & Publish") + }) + + t.Run("dry run uses custom config path", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "linuxkit-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + customConfigPath := filepath.Join(tmpDir, "custom-config.yml") + err = os.WriteFile(customConfigPath, []byte("kernel:\n image: custom\n"), 0644) + require.NoError(t, err) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v1.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: map[string]any{ + "config": customConfigPath, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "custom-config.yml") + }) + + t.Run("dry run with multiple formats and platforms", func(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "linuxkit-test") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmpDir) }() + + configPath := filepath.Join(tmpDir, "config.yml") + err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644) + require.NoError(t, err) + + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + release := &Release{ + Version: "v2.0.0", + ProjectDir: tmpDir, + FS: io.Local, + } + pubCfg := PublisherConfig{ + Type: "linuxkit", + Extended: map[string]any{ + "config": "config.yml", + "formats": []any{"iso", "qcow2", "vmdk"}, + "platforms": []any{"linux/amd64", "linux/arm64"}, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err = p.Publish(context.TODO(), release, pubCfg, relCfg, true) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + // Check all format/platform combinations are listed + assert.Contains(t, output, "linuxkit-2.0.0-amd64.iso") + assert.Contains(t, output, "linuxkit-2.0.0-amd64.qcow2") + assert.Contains(t, output, "linuxkit-2.0.0-amd64.vmdk") + assert.Contains(t, output, "linuxkit-2.0.0-arm64.iso") + assert.Contains(t, output, "linuxkit-2.0.0-arm64.qcow2") + assert.Contains(t, output, "linuxkit-2.0.0-arm64.vmdk") + }) +} diff --git a/release/publishers/npm.go b/release/publishers/npm.go new file mode 100644 index 0000000..51b49f8 --- /dev/null +++ b/release/publishers/npm.go @@ -0,0 +1,265 @@ +// Package publishers provides release publishing implementations. +package publishers + +import ( + "bytes" + "context" + "embed" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + + "forge.lthn.ai/core/go/pkg/io" +) + +//go:embed templates/npm/*.tmpl +var npmTemplates embed.FS + +// NpmConfig holds npm-specific configuration. +type NpmConfig struct { + // Package is the npm package name (e.g., "@host-uk/core"). + Package string + // Access is the npm access level: "public" or "restricted". + Access string +} + +// NpmPublisher publishes releases to npm using the binary wrapper pattern. +type NpmPublisher struct{} + +// NewNpmPublisher creates a new npm publisher. +func NewNpmPublisher() *NpmPublisher { + return &NpmPublisher{} +} + +// Name returns the publisher's identifier. +func (p *NpmPublisher) Name() string { + return "npm" +} + +// Publish publishes the release to npm. +// It generates a binary wrapper package that downloads the correct platform binary on postinstall. +func (p *NpmPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error { + // Parse npm config + npmCfg := p.parseConfig(pubCfg, relCfg) + + // Validate configuration + if npmCfg.Package == "" { + return fmt.Errorf("npm.Publish: package name is required (set publish.npm.package in config)") + } + + // Get repository + repo := "" + if relCfg != nil { + repo = relCfg.GetRepository() + } + if repo == "" { + detectedRepo, err := detectRepository(release.ProjectDir) + if err != nil { + return fmt.Errorf("npm.Publish: could not determine repository: %w", err) + } + repo = detectedRepo + } + + // Get project name (binary name) + projectName := "" + if relCfg != nil { + projectName = relCfg.GetProjectName() + } + if projectName == "" { + // Try to infer from package name + parts := strings.Split(npmCfg.Package, "/") + projectName = parts[len(parts)-1] + } + + // Strip leading 'v' from version for npm + version := strings.TrimPrefix(release.Version, "v") + + // Template data + data := npmTemplateData{ + Package: npmCfg.Package, + Version: version, + Description: fmt.Sprintf("%s CLI", projectName), + License: "MIT", + Repository: repo, + BinaryName: projectName, + ProjectName: projectName, + Access: npmCfg.Access, + } + + if dryRun { + return p.dryRunPublish(release.FS, data, &npmCfg) + } + + return p.executePublish(ctx, release.FS, data, &npmCfg) +} + +// parseConfig extracts npm-specific configuration from the publisher config. +func (p *NpmPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) NpmConfig { + cfg := NpmConfig{ + Package: "", + Access: "public", + } + + // Override from extended config if present + if ext, ok := pubCfg.Extended.(map[string]any); ok { + if pkg, ok := ext["package"].(string); ok && pkg != "" { + cfg.Package = pkg + } + if access, ok := ext["access"].(string); ok && access != "" { + cfg.Access = access + } + } + + return cfg +} + +// npmTemplateData holds data for npm templates. +type npmTemplateData struct { + Package string + Version string + Description string + License string + Repository string + BinaryName string + ProjectName string + Access string +} + +// dryRunPublish shows what would be done without actually publishing. +func (p *NpmPublisher) dryRunPublish(m io.Medium, data npmTemplateData, cfg *NpmConfig) error { + fmt.Println() + fmt.Println("=== DRY RUN: npm Publish ===") + fmt.Println() + fmt.Printf("Package: %s\n", data.Package) + fmt.Printf("Version: %s\n", data.Version) + fmt.Printf("Access: %s\n", data.Access) + fmt.Printf("Repository: %s\n", data.Repository) + fmt.Printf("Binary: %s\n", data.BinaryName) + fmt.Println() + + // Generate and show package.json + pkgJSON, err := p.renderTemplate(m, "templates/npm/package.json.tmpl", data) + if err != nil { + return fmt.Errorf("npm.dryRunPublish: %w", err) + } + fmt.Println("Generated package.json:") + fmt.Println("---") + fmt.Println(pkgJSON) + fmt.Println("---") + fmt.Println() + + fmt.Println("Would run: npm publish --access", data.Access) + fmt.Println() + fmt.Println("=== END DRY RUN ===") + + return nil +} + +// executePublish actually creates and publishes the npm package. +func (p *NpmPublisher) executePublish(ctx context.Context, m io.Medium, data npmTemplateData, cfg *NpmConfig) error { + // Check for NPM_TOKEN + if os.Getenv("NPM_TOKEN") == "" { + return fmt.Errorf("npm.Publish: NPM_TOKEN environment variable is required") + } + + // Create temp directory for package + tmpDir, err := os.MkdirTemp("", "npm-publish-*") + if err != nil { + return fmt.Errorf("npm.Publish: failed to create temp directory: %w", err) + } + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Create bin directory + binDir := filepath.Join(tmpDir, "bin") + if err := os.MkdirAll(binDir, 0755); err != nil { + return fmt.Errorf("npm.Publish: failed to create bin directory: %w", err) + } + + // Generate package.json + pkgJSON, err := p.renderTemplate(m, "templates/npm/package.json.tmpl", data) + if err != nil { + return fmt.Errorf("npm.Publish: failed to render package.json: %w", err) + } + if err := os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(pkgJSON), 0644); err != nil { + return fmt.Errorf("npm.Publish: failed to write package.json: %w", err) + } + + // Generate install.js + installJS, err := p.renderTemplate(m, "templates/npm/install.js.tmpl", data) + if err != nil { + return fmt.Errorf("npm.Publish: failed to render install.js: %w", err) + } + if err := os.WriteFile(filepath.Join(tmpDir, "install.js"), []byte(installJS), 0644); err != nil { + return fmt.Errorf("npm.Publish: failed to write install.js: %w", err) + } + + // Generate run.js + runJS, err := p.renderTemplate(m, "templates/npm/run.js.tmpl", data) + if err != nil { + return fmt.Errorf("npm.Publish: failed to render run.js: %w", err) + } + if err := os.WriteFile(filepath.Join(binDir, "run.js"), []byte(runJS), 0755); err != nil { + return fmt.Errorf("npm.Publish: failed to write run.js: %w", err) + } + + // Create .npmrc with token + npmrc := "//registry.npmjs.org/:_authToken=${NPM_TOKEN}\n" + if err := os.WriteFile(filepath.Join(tmpDir, ".npmrc"), []byte(npmrc), 0600); err != nil { + return fmt.Errorf("npm.Publish: failed to write .npmrc: %w", err) + } + + // Run npm publish + cmd := exec.CommandContext(ctx, "npm", "publish", "--access", data.Access) + cmd.Dir = tmpDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = append(os.Environ(), "NPM_TOKEN="+os.Getenv("NPM_TOKEN")) + + fmt.Printf("Publishing %s@%s to npm...\n", data.Package, data.Version) + if err := cmd.Run(); err != nil { + return fmt.Errorf("npm.Publish: npm publish failed: %w", err) + } + + fmt.Printf("Published %s@%s to npm\n", data.Package, data.Version) + fmt.Printf(" https://www.npmjs.com/package/%s\n", data.Package) + + return nil +} + +// renderTemplate renders an embedded template with the given data. +func (p *NpmPublisher) renderTemplate(m io.Medium, name string, data npmTemplateData) (string, error) { + var content []byte + var err error + + // Try custom template from medium + customPath := filepath.Join(".core", name) + if m != nil && m.IsFile(customPath) { + customContent, err := m.Read(customPath) + if err == nil { + content = []byte(customContent) + } + } + + // Fallback to embedded template + if content == nil { + content, err = npmTemplates.ReadFile(name) + if err != nil { + return "", fmt.Errorf("failed to read template %s: %w", name, err) + } + } + + tmpl, err := template.New(filepath.Base(name)).Parse(string(content)) + if err != nil { + return "", fmt.Errorf("failed to parse template %s: %w", name, err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("failed to execute template %s: %w", name, err) + } + + return buf.String(), nil +} diff --git a/release/publishers/npm_test.go b/release/publishers/npm_test.go new file mode 100644 index 0000000..8144402 --- /dev/null +++ b/release/publishers/npm_test.go @@ -0,0 +1,303 @@ +package publishers + +import ( + "bytes" + "context" + "os" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNpmPublisher_Name_Good(t *testing.T) { + t.Run("returns npm", func(t *testing.T) { + p := NewNpmPublisher() + assert.Equal(t, "npm", p.Name()) + }) +} + +func TestNpmPublisher_ParseConfig_Good(t *testing.T) { + p := NewNpmPublisher() + + t.Run("uses defaults when no extended config", func(t *testing.T) { + pubCfg := PublisherConfig{Type: "npm"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Package) + assert.Equal(t, "public", cfg.Access) + }) + + t.Run("parses package and access from extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "npm", + Extended: map[string]any{ + "package": "@myorg/mypackage", + "access": "restricted", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Equal(t, "@myorg/mypackage", cfg.Package) + assert.Equal(t, "restricted", cfg.Access) + }) + + t.Run("keeps default access when not specified", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "npm", + Extended: map[string]any{ + "package": "@myorg/mypackage", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Equal(t, "@myorg/mypackage", cfg.Package) + assert.Equal(t, "public", cfg.Access) + }) + + t.Run("handles nil extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "npm", + Extended: nil, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Package) + assert.Equal(t, "public", cfg.Access) + }) + + t.Run("handles empty strings in config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "npm", + Extended: map[string]any{ + "package": "", + "access": "", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Package) + assert.Equal(t, "public", cfg.Access) + }) +} + +func TestNpmPublisher_RenderTemplate_Good(t *testing.T) { + p := NewNpmPublisher() + + t.Run("renders package.json template with data", func(t *testing.T) { + data := npmTemplateData{ + Package: "@myorg/mycli", + Version: "1.2.3", + Description: "My awesome CLI", + License: "MIT", + Repository: "owner/myapp", + BinaryName: "myapp", + ProjectName: "myapp", + Access: "public", + } + + result, err := p.renderTemplate(io.Local, "templates/npm/package.json.tmpl", data) + require.NoError(t, err) + + assert.Contains(t, result, `"name": "@myorg/mycli"`) + assert.Contains(t, result, `"version": "1.2.3"`) + assert.Contains(t, result, `"description": "My awesome CLI"`) + assert.Contains(t, result, `"license": "MIT"`) + assert.Contains(t, result, "owner/myapp") + assert.Contains(t, result, `"myapp": "./bin/run.js"`) + assert.Contains(t, result, `"access": "public"`) + }) + + t.Run("renders restricted access correctly", func(t *testing.T) { + data := npmTemplateData{ + Package: "@private/cli", + Version: "1.0.0", + Description: "Private CLI", + License: "MIT", + Repository: "org/repo", + BinaryName: "cli", + ProjectName: "cli", + Access: "restricted", + } + + result, err := p.renderTemplate(io.Local, "templates/npm/package.json.tmpl", data) + require.NoError(t, err) + + assert.Contains(t, result, `"access": "restricted"`) + }) +} + +func TestNpmPublisher_RenderTemplate_Bad(t *testing.T) { + p := NewNpmPublisher() + + t.Run("returns error for non-existent template", func(t *testing.T) { + data := npmTemplateData{} + _, err := p.renderTemplate(io.Local, "templates/npm/nonexistent.tmpl", data) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to read template") + }) +} + +func TestNpmPublisher_DryRunPublish_Good(t *testing.T) { + p := NewNpmPublisher() + + t.Run("outputs expected dry run information", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := npmTemplateData{ + Package: "@myorg/mycli", + Version: "1.0.0", + Access: "public", + Repository: "owner/repo", + BinaryName: "mycli", + Description: "My CLI", + } + cfg := &NpmConfig{ + Package: "@myorg/mycli", + Access: "public", + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "DRY RUN: npm Publish") + assert.Contains(t, output, "Package: @myorg/mycli") + assert.Contains(t, output, "Version: 1.0.0") + assert.Contains(t, output, "Access: public") + assert.Contains(t, output, "Repository: owner/repo") + assert.Contains(t, output, "Binary: mycli") + assert.Contains(t, output, "Generated package.json:") + assert.Contains(t, output, "Would run: npm publish --access public") + assert.Contains(t, output, "END DRY RUN") + }) + + t.Run("shows restricted access correctly", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := npmTemplateData{ + Package: "@private/cli", + Version: "2.0.0", + Access: "restricted", + Repository: "org/repo", + BinaryName: "cli", + } + cfg := &NpmConfig{ + Package: "@private/cli", + Access: "restricted", + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "Access: restricted") + assert.Contains(t, output, "Would run: npm publish --access restricted") + }) +} + +func TestNpmPublisher_Publish_Bad(t *testing.T) { + p := NewNpmPublisher() + + t.Run("fails when package name not configured", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/project", + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "npm"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "package name is required") + }) + + t.Run("fails when NPM_TOKEN not set in non-dry-run", func(t *testing.T) { + // Ensure NPM_TOKEN is not set + oldToken := os.Getenv("NPM_TOKEN") + _ = os.Unsetenv("NPM_TOKEN") + defer func() { + if oldToken != "" { + _ = os.Setenv("NPM_TOKEN", oldToken) + } + }() + + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/project", + FS: io.Local, + } + pubCfg := PublisherConfig{ + Type: "npm", + Extended: map[string]any{ + "package": "@test/package", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "NPM_TOKEN environment variable is required") + }) +} + +func TestNpmConfig_Defaults_Good(t *testing.T) { + t.Run("has sensible defaults", func(t *testing.T) { + p := NewNpmPublisher() + pubCfg := PublisherConfig{Type: "npm"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Package) + assert.Equal(t, "public", cfg.Access) + }) +} + +func TestNpmTemplateData_Good(t *testing.T) { + t.Run("struct has all expected fields", func(t *testing.T) { + data := npmTemplateData{ + Package: "@myorg/package", + Version: "1.0.0", + Description: "description", + License: "MIT", + Repository: "org/repo", + BinaryName: "cli", + ProjectName: "cli", + Access: "public", + } + + assert.Equal(t, "@myorg/package", data.Package) + assert.Equal(t, "1.0.0", data.Version) + assert.Equal(t, "description", data.Description) + assert.Equal(t, "MIT", data.License) + assert.Equal(t, "org/repo", data.Repository) + assert.Equal(t, "cli", data.BinaryName) + assert.Equal(t, "cli", data.ProjectName) + assert.Equal(t, "public", data.Access) + }) +} diff --git a/release/publishers/publisher.go b/release/publishers/publisher.go new file mode 100644 index 0000000..0142683 --- /dev/null +++ b/release/publishers/publisher.go @@ -0,0 +1,72 @@ +// Package publishers provides release publishing implementations. +package publishers + +import ( + "context" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +// Release represents a release to be published. +type Release struct { + // Version is the semantic version string (e.g., "v1.2.3"). + Version string + // Artifacts are the built release artifacts. + Artifacts []build.Artifact + // Changelog is the generated markdown changelog. + Changelog string + // ProjectDir is the root directory of the project. + ProjectDir string + // FS is the medium for file operations. + FS io.Medium +} + +// PublisherConfig holds configuration for a publisher. +type PublisherConfig struct { + // Type is the publisher type (e.g., "github", "linuxkit", "docker"). + Type string + // Prerelease marks the release as a prerelease. + Prerelease bool + // Draft creates the release as a draft. + Draft bool + // Extended holds publisher-specific configuration. + Extended any +} + +// ReleaseConfig holds release configuration needed by publishers. +type ReleaseConfig interface { + GetRepository() string + GetProjectName() string +} + +// Publisher defines the interface for release publishers. +type Publisher interface { + // Name returns the publisher's identifier. + Name() string + // Publish publishes the release to the target. + // If dryRun is true, it prints what would be done without executing. + Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error +} + +// NewRelease creates a Release from the release package's Release type. +// This is a helper to convert between packages. +func NewRelease(version string, artifacts []build.Artifact, changelog, projectDir string, fs io.Medium) *Release { + return &Release{ + Version: version, + Artifacts: artifacts, + Changelog: changelog, + ProjectDir: projectDir, + FS: fs, + } +} + +// NewPublisherConfig creates a PublisherConfig. +func NewPublisherConfig(pubType string, prerelease, draft bool, extended any) PublisherConfig { + return PublisherConfig{ + Type: pubType, + Prerelease: prerelease, + Draft: draft, + Extended: extended, + } +} diff --git a/release/publishers/scoop.go b/release/publishers/scoop.go new file mode 100644 index 0000000..ce1a46e --- /dev/null +++ b/release/publishers/scoop.go @@ -0,0 +1,284 @@ +// Package publishers provides release publishing implementations. +package publishers + +import ( + "bytes" + "context" + "embed" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" +) + +//go:embed templates/scoop/*.tmpl +var scoopTemplates embed.FS + +// ScoopConfig holds Scoop-specific configuration. +type ScoopConfig struct { + // Bucket is the Scoop bucket repository (e.g., "host-uk/scoop-bucket"). + Bucket string + // Official config for generating files for official repo PRs. + Official *OfficialConfig +} + +// ScoopPublisher publishes releases to Scoop. +type ScoopPublisher struct{} + +// NewScoopPublisher creates a new Scoop publisher. +func NewScoopPublisher() *ScoopPublisher { + return &ScoopPublisher{} +} + +// Name returns the publisher's identifier. +func (p *ScoopPublisher) Name() string { + return "scoop" +} + +// Publish publishes the release to Scoop. +func (p *ScoopPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error { + cfg := p.parseConfig(pubCfg, relCfg) + + if cfg.Bucket == "" && (cfg.Official == nil || !cfg.Official.Enabled) { + return fmt.Errorf("scoop.Publish: bucket is required (set publish.scoop.bucket in config)") + } + + repo := "" + if relCfg != nil { + repo = relCfg.GetRepository() + } + if repo == "" { + detectedRepo, err := detectRepository(release.ProjectDir) + if err != nil { + return fmt.Errorf("scoop.Publish: could not determine repository: %w", err) + } + repo = detectedRepo + } + + projectName := "" + if relCfg != nil { + projectName = relCfg.GetProjectName() + } + if projectName == "" { + parts := strings.Split(repo, "/") + projectName = parts[len(parts)-1] + } + + version := strings.TrimPrefix(release.Version, "v") + checksums := buildChecksumMap(release.Artifacts) + + data := scoopTemplateData{ + PackageName: projectName, + Description: fmt.Sprintf("%s CLI", projectName), + Repository: repo, + Version: version, + License: "MIT", + BinaryName: projectName, + Checksums: checksums, + } + + if dryRun { + return p.dryRunPublish(release.FS, data, cfg) + } + + return p.executePublish(ctx, release.ProjectDir, data, cfg, release) +} + +type scoopTemplateData struct { + PackageName string + Description string + Repository string + Version string + License string + BinaryName string + Checksums ChecksumMap +} + +func (p *ScoopPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) ScoopConfig { + cfg := ScoopConfig{} + + if ext, ok := pubCfg.Extended.(map[string]any); ok { + if bucket, ok := ext["bucket"].(string); ok && bucket != "" { + cfg.Bucket = bucket + } + if official, ok := ext["official"].(map[string]any); ok { + cfg.Official = &OfficialConfig{} + if enabled, ok := official["enabled"].(bool); ok { + cfg.Official.Enabled = enabled + } + if output, ok := official["output"].(string); ok { + cfg.Official.Output = output + } + } + } + + return cfg +} + +func (p *ScoopPublisher) dryRunPublish(m io.Medium, data scoopTemplateData, cfg ScoopConfig) error { + fmt.Println() + fmt.Println("=== DRY RUN: Scoop Publish ===") + fmt.Println() + fmt.Printf("Package: %s\n", data.PackageName) + fmt.Printf("Version: %s\n", data.Version) + fmt.Printf("Bucket: %s\n", cfg.Bucket) + fmt.Printf("Repository: %s\n", data.Repository) + fmt.Println() + + manifest, err := p.renderTemplate(m, "templates/scoop/manifest.json.tmpl", data) + if err != nil { + return fmt.Errorf("scoop.dryRunPublish: %w", err) + } + fmt.Println("Generated manifest.json:") + fmt.Println("---") + fmt.Println(manifest) + fmt.Println("---") + fmt.Println() + + if cfg.Bucket != "" { + fmt.Printf("Would commit to bucket: %s\n", cfg.Bucket) + } + if cfg.Official != nil && cfg.Official.Enabled { + output := cfg.Official.Output + if output == "" { + output = "dist/scoop" + } + fmt.Printf("Would write files for official PR to: %s\n", output) + } + fmt.Println() + fmt.Println("=== END DRY RUN ===") + + return nil +} + +func (p *ScoopPublisher) executePublish(ctx context.Context, projectDir string, data scoopTemplateData, cfg ScoopConfig, release *Release) error { + manifest, err := p.renderTemplate(release.FS, "templates/scoop/manifest.json.tmpl", data) + if err != nil { + return fmt.Errorf("scoop.Publish: failed to render manifest: %w", err) + } + + // If official config is enabled, write to output directory + if cfg.Official != nil && cfg.Official.Enabled { + output := cfg.Official.Output + if output == "" { + output = filepath.Join(projectDir, "dist", "scoop") + } else if !filepath.IsAbs(output) { + output = filepath.Join(projectDir, output) + } + + if err := release.FS.EnsureDir(output); err != nil { + return fmt.Errorf("scoop.Publish: failed to create output directory: %w", err) + } + + manifestPath := filepath.Join(output, fmt.Sprintf("%s.json", data.PackageName)) + if err := release.FS.Write(manifestPath, manifest); err != nil { + return fmt.Errorf("scoop.Publish: failed to write manifest: %w", err) + } + fmt.Printf("Wrote Scoop manifest for official PR: %s\n", manifestPath) + } + + // If bucket is configured, commit to it + if cfg.Bucket != "" { + if err := p.commitToBucket(ctx, cfg.Bucket, data, manifest); err != nil { + return err + } + } + + return nil +} + +func (p *ScoopPublisher) commitToBucket(ctx context.Context, bucket string, data scoopTemplateData, manifest string) error { + tmpDir, err := os.MkdirTemp("", "scoop-bucket-*") + if err != nil { + return fmt.Errorf("scoop.Publish: failed to create temp directory: %w", err) + } + defer func() { _ = os.RemoveAll(tmpDir) }() + + fmt.Printf("Cloning bucket %s...\n", bucket) + cmd := exec.CommandContext(ctx, "gh", "repo", "clone", bucket, tmpDir, "--", "--depth=1") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("scoop.Publish: failed to clone bucket: %w", err) + } + + // Ensure bucket directory exists + bucketDir := filepath.Join(tmpDir, "bucket") + if _, err := os.Stat(bucketDir); os.IsNotExist(err) { + bucketDir = tmpDir // Some repos put manifests in root + } + + manifestPath := filepath.Join(bucketDir, fmt.Sprintf("%s.json", data.PackageName)) + if err := os.WriteFile(manifestPath, []byte(manifest), 0644); err != nil { + return fmt.Errorf("scoop.Publish: failed to write manifest: %w", err) + } + + commitMsg := fmt.Sprintf("Update %s to %s", data.PackageName, data.Version) + + cmd = exec.CommandContext(ctx, "git", "add", ".") + cmd.Dir = tmpDir + if err := cmd.Run(); err != nil { + return fmt.Errorf("scoop.Publish: git add failed: %w", err) + } + + cmd = exec.CommandContext(ctx, "git", "commit", "-m", commitMsg) + cmd.Dir = tmpDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("scoop.Publish: git commit failed: %w", err) + } + + cmd = exec.CommandContext(ctx, "git", "push") + cmd.Dir = tmpDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("scoop.Publish: git push failed: %w", err) + } + + fmt.Printf("Updated Scoop bucket: %s\n", bucket) + return nil +} + +func (p *ScoopPublisher) renderTemplate(m io.Medium, name string, data scoopTemplateData) (string, error) { + var content []byte + var err error + + // Try custom template from medium + customPath := filepath.Join(".core", name) + if m != nil && m.IsFile(customPath) { + customContent, err := m.Read(customPath) + if err == nil { + content = []byte(customContent) + } + } + + // Fallback to embedded template + if content == nil { + content, err = scoopTemplates.ReadFile(name) + if err != nil { + return "", fmt.Errorf("failed to read template %s: %w", name, err) + } + } + + tmpl, err := template.New(filepath.Base(name)).Parse(string(content)) + if err != nil { + return "", fmt.Errorf("failed to parse template %s: %w", name, err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("failed to execute template %s: %w", name, err) + } + + return buf.String(), nil +} + +// Ensure build package is used +var _ = build.Artifact{} diff --git a/release/publishers/scoop_test.go b/release/publishers/scoop_test.go new file mode 100644 index 0000000..4afb2ad --- /dev/null +++ b/release/publishers/scoop_test.go @@ -0,0 +1,311 @@ +package publishers + +import ( + "bytes" + "context" + "os" + "testing" + + "forge.lthn.ai/core/go/pkg/io" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestScoopPublisher_Name_Good(t *testing.T) { + t.Run("returns scoop", func(t *testing.T) { + p := NewScoopPublisher() + assert.Equal(t, "scoop", p.Name()) + }) +} + +func TestScoopPublisher_ParseConfig_Good(t *testing.T) { + p := NewScoopPublisher() + + t.Run("uses defaults when no extended config", func(t *testing.T) { + pubCfg := PublisherConfig{Type: "scoop"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Bucket) + assert.Nil(t, cfg.Official) + }) + + t.Run("parses bucket from extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "scoop", + Extended: map[string]any{ + "bucket": "host-uk/scoop-bucket", + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Equal(t, "host-uk/scoop-bucket", cfg.Bucket) + }) + + t.Run("parses official config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "scoop", + Extended: map[string]any{ + "official": map[string]any{ + "enabled": true, + "output": "dist/scoop-manifest", + }, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + require.NotNil(t, cfg.Official) + assert.True(t, cfg.Official.Enabled) + assert.Equal(t, "dist/scoop-manifest", cfg.Official.Output) + }) + + t.Run("handles missing official fields", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "scoop", + Extended: map[string]any{ + "official": map[string]any{}, + }, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + require.NotNil(t, cfg.Official) + assert.False(t, cfg.Official.Enabled) + assert.Empty(t, cfg.Official.Output) + }) + + t.Run("handles nil extended config", func(t *testing.T) { + pubCfg := PublisherConfig{ + Type: "scoop", + Extended: nil, + } + relCfg := &mockReleaseConfig{repository: "owner/repo"} + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Bucket) + assert.Nil(t, cfg.Official) + }) +} + +func TestScoopPublisher_RenderTemplate_Good(t *testing.T) { + p := NewScoopPublisher() + + t.Run("renders manifest template with data", func(t *testing.T) { + data := scoopTemplateData{ + PackageName: "myapp", + Description: "My awesome CLI", + Repository: "owner/myapp", + Version: "1.2.3", + License: "MIT", + BinaryName: "myapp", + Checksums: ChecksumMap{ + WindowsAmd64: "abc123", + WindowsArm64: "def456", + }, + } + + result, err := p.renderTemplate(io.Local, "templates/scoop/manifest.json.tmpl", data) + require.NoError(t, err) + + assert.Contains(t, result, `"version": "1.2.3"`) + assert.Contains(t, result, `"description": "My awesome CLI"`) + assert.Contains(t, result, `"homepage": "https://github.com/owner/myapp"`) + assert.Contains(t, result, `"license": "MIT"`) + assert.Contains(t, result, `"64bit"`) + assert.Contains(t, result, `"arm64"`) + assert.Contains(t, result, "myapp-windows-amd64.zip") + assert.Contains(t, result, "myapp-windows-arm64.zip") + assert.Contains(t, result, `"hash": "abc123"`) + assert.Contains(t, result, `"hash": "def456"`) + assert.Contains(t, result, `"bin": "myapp.exe"`) + }) + + t.Run("includes autoupdate configuration", func(t *testing.T) { + data := scoopTemplateData{ + PackageName: "tool", + Description: "A tool", + Repository: "org/tool", + Version: "2.0.0", + License: "Apache-2.0", + BinaryName: "tool", + Checksums: ChecksumMap{}, + } + + result, err := p.renderTemplate(io.Local, "templates/scoop/manifest.json.tmpl", data) + require.NoError(t, err) + + assert.Contains(t, result, `"checkver"`) + assert.Contains(t, result, `"github": "https://github.com/org/tool"`) + assert.Contains(t, result, `"autoupdate"`) + }) +} + +func TestScoopPublisher_RenderTemplate_Bad(t *testing.T) { + p := NewScoopPublisher() + + t.Run("returns error for non-existent template", func(t *testing.T) { + data := scoopTemplateData{} + _, err := p.renderTemplate(io.Local, "templates/scoop/nonexistent.tmpl", data) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to read template") + }) +} + +func TestScoopPublisher_DryRunPublish_Good(t *testing.T) { + p := NewScoopPublisher() + + t.Run("outputs expected dry run information", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := scoopTemplateData{ + PackageName: "myapp", + Version: "1.0.0", + Repository: "owner/repo", + BinaryName: "myapp", + Checksums: ChecksumMap{}, + } + cfg := ScoopConfig{ + Bucket: "owner/scoop-bucket", + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + + assert.Contains(t, output, "DRY RUN: Scoop Publish") + assert.Contains(t, output, "Package: myapp") + assert.Contains(t, output, "Version: 1.0.0") + assert.Contains(t, output, "Bucket: owner/scoop-bucket") + assert.Contains(t, output, "Repository: owner/repo") + assert.Contains(t, output, "Generated manifest.json:") + assert.Contains(t, output, "Would commit to bucket: owner/scoop-bucket") + assert.Contains(t, output, "END DRY RUN") + }) + + t.Run("shows official output path when enabled", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := scoopTemplateData{ + PackageName: "myapp", + Version: "1.0.0", + BinaryName: "myapp", + Checksums: ChecksumMap{}, + } + cfg := ScoopConfig{ + Official: &OfficialConfig{ + Enabled: true, + Output: "custom/scoop/path", + }, + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "Would write files for official PR to: custom/scoop/path") + }) + + t.Run("uses default official output path when not specified", func(t *testing.T) { + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + data := scoopTemplateData{ + PackageName: "myapp", + Version: "1.0.0", + BinaryName: "myapp", + Checksums: ChecksumMap{}, + } + cfg := ScoopConfig{ + Official: &OfficialConfig{ + Enabled: true, + }, + } + + err := p.dryRunPublish(io.Local, data, cfg) + + _ = w.Close() + var buf bytes.Buffer + _, _ = buf.ReadFrom(r) + os.Stdout = oldStdout + + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "Would write files for official PR to: dist/scoop") + }) +} + +func TestScoopPublisher_Publish_Bad(t *testing.T) { + p := NewScoopPublisher() + + t.Run("fails when bucket not configured and not official mode", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + ProjectDir: "/project", + FS: io.Local, + } + pubCfg := PublisherConfig{Type: "scoop"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + err := p.Publish(context.TODO(), release, pubCfg, relCfg, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "bucket is required") + }) +} + +func TestScoopConfig_Defaults_Good(t *testing.T) { + t.Run("has sensible defaults", func(t *testing.T) { + p := NewScoopPublisher() + pubCfg := PublisherConfig{Type: "scoop"} + relCfg := &mockReleaseConfig{repository: "owner/repo"} + + cfg := p.parseConfig(pubCfg, relCfg) + + assert.Empty(t, cfg.Bucket) + assert.Nil(t, cfg.Official) + }) +} + +func TestScoopTemplateData_Good(t *testing.T) { + t.Run("struct has all expected fields", func(t *testing.T) { + data := scoopTemplateData{ + PackageName: "myapp", + Description: "description", + Repository: "org/repo", + Version: "1.0.0", + License: "MIT", + BinaryName: "myapp", + Checksums: ChecksumMap{ + WindowsAmd64: "hash1", + WindowsArm64: "hash2", + }, + } + + assert.Equal(t, "myapp", data.PackageName) + assert.Equal(t, "description", data.Description) + assert.Equal(t, "org/repo", data.Repository) + assert.Equal(t, "1.0.0", data.Version) + assert.Equal(t, "MIT", data.License) + assert.Equal(t, "myapp", data.BinaryName) + assert.Equal(t, "hash1", data.Checksums.WindowsAmd64) + assert.Equal(t, "hash2", data.Checksums.WindowsArm64) + }) +} diff --git a/release/publishers/templates/aur/.SRCINFO.tmpl b/release/publishers/templates/aur/.SRCINFO.tmpl new file mode 100644 index 0000000..af3ad66 --- /dev/null +++ b/release/publishers/templates/aur/.SRCINFO.tmpl @@ -0,0 +1,16 @@ +pkgbase = {{.PackageName}}-bin + pkgdesc = {{.Description}} + pkgver = {{.Version}} + pkgrel = 1 + url = https://github.com/{{.Repository}} + arch = x86_64 + arch = aarch64 + license = {{.License}} + provides = {{.PackageName}} + conflicts = {{.PackageName}} + source_x86_64 = {{.PackageName}}-bin-{{.Version}}-x86_64.tar.gz::https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-linux-amd64.tar.gz + sha256sums_x86_64 = {{.Checksums.LinuxAmd64}} + source_aarch64 = {{.PackageName}}-bin-{{.Version}}-aarch64.tar.gz::https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-linux-arm64.tar.gz + sha256sums_aarch64 = {{.Checksums.LinuxArm64}} + +pkgname = {{.PackageName}}-bin diff --git a/release/publishers/templates/aur/PKGBUILD.tmpl b/release/publishers/templates/aur/PKGBUILD.tmpl new file mode 100644 index 0000000..61096bf --- /dev/null +++ b/release/publishers/templates/aur/PKGBUILD.tmpl @@ -0,0 +1,20 @@ +# Maintainer: {{.Maintainer}} +pkgname={{.PackageName}}-bin +pkgver={{.Version}} +pkgrel=1 +pkgdesc="{{.Description}}" +arch=('x86_64' 'aarch64') +url="https://github.com/{{.Repository}}" +license=('{{.License}}') +provides=('{{.PackageName}}') +conflicts=('{{.PackageName}}') + +source_x86_64=("${pkgname}-${pkgver}-x86_64.tar.gz::https://github.com/{{.Repository}}/releases/download/v${pkgver}/{{.BinaryName}}-linux-amd64.tar.gz") +source_aarch64=("${pkgname}-${pkgver}-aarch64.tar.gz::https://github.com/{{.Repository}}/releases/download/v${pkgver}/{{.BinaryName}}-linux-arm64.tar.gz") + +sha256sums_x86_64=('{{.Checksums.LinuxAmd64}}') +sha256sums_aarch64=('{{.Checksums.LinuxArm64}}') + +package() { + install -Dm755 {{.BinaryName}} "${pkgdir}/usr/bin/{{.BinaryName}}" +} diff --git a/release/publishers/templates/chocolatey/package.nuspec.tmpl b/release/publishers/templates/chocolatey/package.nuspec.tmpl new file mode 100644 index 0000000..c96ca7d --- /dev/null +++ b/release/publishers/templates/chocolatey/package.nuspec.tmpl @@ -0,0 +1,18 @@ + + + + {{.PackageName}} + {{.Version}} + {{.Title}} + {{.Authors}} + https://github.com/{{.Repository}} + https://github.com/{{.Repository}}/blob/main/LICENSE + false + {{.Description}} + {{.Tags}} + https://github.com/{{.Repository}}/releases/tag/v{{.Version}} + + + + + diff --git a/release/publishers/templates/chocolatey/tools/chocolateyinstall.ps1.tmpl b/release/publishers/templates/chocolatey/tools/chocolateyinstall.ps1.tmpl new file mode 100644 index 0000000..a915be8 --- /dev/null +++ b/release/publishers/templates/chocolatey/tools/chocolateyinstall.ps1.tmpl @@ -0,0 +1,13 @@ +$ErrorActionPreference = 'Stop' +$toolsDir = "$(Split-Path -parent $MyInvocation.MyCommand.Definition)" +$url64 = 'https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-windows-amd64.zip' + +$packageArgs = @{ + packageName = '{{.PackageName}}' + unzipLocation = $toolsDir + url64bit = $url64 + checksum64 = '{{.Checksums.WindowsAmd64}}' + checksumType64 = 'sha256' +} + +Install-ChocolateyZipPackage @packageArgs diff --git a/release/publishers/templates/homebrew/formula.rb.tmpl b/release/publishers/templates/homebrew/formula.rb.tmpl new file mode 100644 index 0000000..aa03fcb --- /dev/null +++ b/release/publishers/templates/homebrew/formula.rb.tmpl @@ -0,0 +1,37 @@ +# typed: false +# frozen_string_literal: true + +class {{.FormulaClass}} < Formula + desc "{{.Description}}" + homepage "https://github.com/{{.Repository}}" + version "{{.Version}}" + license "{{.License}}" + + on_macos do + if Hardware::CPU.arm? + url "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-darwin-arm64.tar.gz" + sha256 "{{.Checksums.DarwinArm64}}" + else + url "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-darwin-amd64.tar.gz" + sha256 "{{.Checksums.DarwinAmd64}}" + end + end + + on_linux do + if Hardware::CPU.arm? + url "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-linux-arm64.tar.gz" + sha256 "{{.Checksums.LinuxArm64}}" + else + url "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-linux-amd64.tar.gz" + sha256 "{{.Checksums.LinuxAmd64}}" + end + end + + def install + bin.install "{{.BinaryName}}" + end + + test do + system "#{bin}/{{.BinaryName}}", "--version" + end +end diff --git a/release/publishers/templates/npm/install.js.tmpl b/release/publishers/templates/npm/install.js.tmpl new file mode 100644 index 0000000..bf924f6 --- /dev/null +++ b/release/publishers/templates/npm/install.js.tmpl @@ -0,0 +1,176 @@ +#!/usr/bin/env node +/** + * Binary installer for {{.Package}} + * Downloads the correct binary for the current platform from GitHub releases. + */ + +const fs = require('fs'); +const path = require('path'); +const https = require('https'); +const { spawnSync } = require('child_process'); +const crypto = require('crypto'); + +const PACKAGE_VERSION = '{{.Version}}'; +const GITHUB_REPO = '{{.Repository}}'; +const BINARY_NAME = '{{.BinaryName}}'; + +// Platform/arch mapping +const PLATFORM_MAP = { + darwin: 'darwin', + linux: 'linux', + win32: 'windows', +}; + +const ARCH_MAP = { + x64: 'amd64', + arm64: 'arm64', +}; + +function getPlatformInfo() { + const platform = PLATFORM_MAP[process.platform]; + const arch = ARCH_MAP[process.arch]; + + if (!platform || !arch) { + console.error(`Unsupported platform: ${process.platform}/${process.arch}`); + process.exit(1); + } + + return { platform, arch }; +} + +function getDownloadUrl(platform, arch) { + const ext = platform === 'windows' ? '.zip' : '.tar.gz'; + const name = `${BINARY_NAME}-${platform}-${arch}${ext}`; + return `https://github.com/${GITHUB_REPO}/releases/download/v${PACKAGE_VERSION}/${name}`; +} + +function getChecksumsUrl() { + return `https://github.com/${GITHUB_REPO}/releases/download/v${PACKAGE_VERSION}/checksums.txt`; +} + +function download(url) { + return new Promise((resolve, reject) => { + const request = (url) => { + https.get(url, (res) => { + if (res.statusCode >= 300 && res.statusCode < 400 && res.headers.location) { + // Follow redirect + request(res.headers.location); + return; + } + + if (res.statusCode !== 200) { + reject(new Error(`Failed to download ${url}: HTTP ${res.statusCode}`)); + return; + } + + const chunks = []; + res.on('data', (chunk) => chunks.push(chunk)); + res.on('end', () => resolve(Buffer.concat(chunks))); + res.on('error', reject); + }).on('error', reject); + }; + request(url); + }); +} + +async function fetchChecksums() { + try { + const data = await download(getChecksumsUrl()); + const checksums = {}; + data.toString().split('\n').forEach((line) => { + const parts = line.trim().split(/\s+/); + if (parts.length === 2) { + checksums[parts[1]] = parts[0]; + } + }); + return checksums; + } catch (err) { + console.warn('Warning: Could not fetch checksums, skipping verification'); + return null; + } +} + +function verifyChecksum(data, expectedHash) { + const actualHash = crypto.createHash('sha256').update(data).digest('hex'); + return actualHash === expectedHash; +} + +function extract(data, destDir, platform) { + const tempFile = path.join(destDir, platform === 'windows' ? 'temp.zip' : 'temp.tar.gz'); + fs.writeFileSync(tempFile, data); + + try { + if (platform === 'windows') { + // Use PowerShell to extract zip + const result = spawnSync('powershell', [ + '-command', + `Expand-Archive -Path '${tempFile}' -DestinationPath '${destDir}' -Force` + ], { stdio: 'ignore' }); + if (result.status !== 0) { + throw new Error('Failed to extract zip'); + } + } else { + const result = spawnSync('tar', ['-xzf', tempFile, '-C', destDir], { stdio: 'ignore' }); + if (result.status !== 0) { + throw new Error('Failed to extract tar.gz'); + } + } + } finally { + fs.unlinkSync(tempFile); + } +} + +async function main() { + const { platform, arch } = getPlatformInfo(); + const binDir = path.join(__dirname, 'bin'); + const binaryPath = path.join(binDir, platform === 'windows' ? `${BINARY_NAME}.exe` : BINARY_NAME); + + // Skip if binary already exists + if (fs.existsSync(binaryPath)) { + console.log(`${BINARY_NAME} binary already installed`); + return; + } + + console.log(`Installing ${BINARY_NAME} v${PACKAGE_VERSION} for ${platform}/${arch}...`); + + // Ensure bin directory exists + if (!fs.existsSync(binDir)) { + fs.mkdirSync(binDir, { recursive: true }); + } + + // Fetch checksums + const checksums = await fetchChecksums(); + + // Download binary + const url = getDownloadUrl(platform, arch); + console.log(`Downloading from ${url}`); + + const data = await download(url); + + // Verify checksum if available + if (checksums) { + const ext = platform === 'windows' ? '.zip' : '.tar.gz'; + const filename = `${BINARY_NAME}-${platform}-${arch}${ext}`; + const expectedHash = checksums[filename]; + if (expectedHash && !verifyChecksum(data, expectedHash)) { + console.error('Checksum verification failed!'); + process.exit(1); + } + console.log('Checksum verified'); + } + + // Extract + extract(data, binDir, platform); + + // Make executable on Unix + if (platform !== 'windows') { + fs.chmodSync(binaryPath, 0o755); + } + + console.log(`${BINARY_NAME} installed successfully`); +} + +main().catch((err) => { + console.error(`Installation failed: ${err.message}`); + process.exit(1); +}); diff --git a/release/publishers/templates/npm/package.json.tmpl b/release/publishers/templates/npm/package.json.tmpl new file mode 100644 index 0000000..a7d0962 --- /dev/null +++ b/release/publishers/templates/npm/package.json.tmpl @@ -0,0 +1,34 @@ +{ + "name": "{{.Package}}", + "version": "{{.Version}}", + "description": "{{.Description}}", + "license": "{{.License}}", + "repository": { + "type": "git", + "url": "https://github.com/{{.Repository}}.git" + }, + "homepage": "https://github.com/{{.Repository}}", + "bugs": { + "url": "https://github.com/{{.Repository}}/issues" + }, + "bin": { + "{{.BinaryName}}": "./bin/run.js" + }, + "scripts": { + "postinstall": "node ./install.js" + }, + "files": [ + "bin/", + "install.js" + ], + "engines": { + "node": ">=14.0.0" + }, + "keywords": [ + "cli", + "{{.ProjectName}}" + ], + "publishConfig": { + "access": "{{.Access}}" + } +} diff --git a/release/publishers/templates/npm/run.js.tmpl b/release/publishers/templates/npm/run.js.tmpl new file mode 100644 index 0000000..8a04a68 --- /dev/null +++ b/release/publishers/templates/npm/run.js.tmpl @@ -0,0 +1,48 @@ +#!/usr/bin/env node +/** + * Binary wrapper for {{.Package}} + * Executes the platform-specific binary. + */ + +const { spawn } = require('child_process'); +const path = require('path'); +const fs = require('fs'); + +const BINARY_NAME = '{{.BinaryName}}'; + +function getBinaryPath() { + const binDir = path.join(__dirname); + const isWindows = process.platform === 'win32'; + const binaryName = isWindows ? `${BINARY_NAME}.exe` : BINARY_NAME; + return path.join(binDir, binaryName); +} + +function main() { + const binaryPath = getBinaryPath(); + + if (!fs.existsSync(binaryPath)) { + console.error(`Binary not found at ${binaryPath}`); + console.error('Try reinstalling the package: npm install -g {{.Package}}'); + process.exit(1); + } + + const child = spawn(binaryPath, process.argv.slice(2), { + stdio: 'inherit', + windowsHide: true, + }); + + child.on('error', (err) => { + console.error(`Failed to start ${BINARY_NAME}: ${err.message}`); + process.exit(1); + }); + + child.on('exit', (code, signal) => { + if (signal) { + process.kill(process.pid, signal); + } else { + process.exit(code ?? 0); + } + }); +} + +main(); diff --git a/release/publishers/templates/scoop/manifest.json.tmpl b/release/publishers/templates/scoop/manifest.json.tmpl new file mode 100644 index 0000000..6455225 --- /dev/null +++ b/release/publishers/templates/scoop/manifest.json.tmpl @@ -0,0 +1,30 @@ +{ + "version": "{{.Version}}", + "description": "{{.Description}}", + "homepage": "https://github.com/{{.Repository}}", + "license": "{{.License}}", + "architecture": { + "64bit": { + "url": "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-windows-amd64.zip", + "hash": "{{.Checksums.WindowsAmd64}}" + }, + "arm64": { + "url": "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-windows-arm64.zip", + "hash": "{{.Checksums.WindowsArm64}}" + } + }, + "bin": "{{.BinaryName}}.exe", + "checkver": { + "github": "https://github.com/{{.Repository}}" + }, + "autoupdate": { + "architecture": { + "64bit": { + "url": "https://github.com/{{.Repository}}/releases/download/v$version/{{.BinaryName}}-windows-amd64.zip" + }, + "arm64": { + "url": "https://github.com/{{.Repository}}/releases/download/v$version/{{.BinaryName}}-windows-arm64.zip" + } + } + } +} diff --git a/release/release.go b/release/release.go new file mode 100644 index 0000000..8ece33f --- /dev/null +++ b/release/release.go @@ -0,0 +1,439 @@ +// Package release provides release automation with changelog generation and publishing. +// It orchestrates the build system, changelog generation, and publishing to targets +// like GitHub Releases. +package release + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go-devops/build/builders" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go-devops/release/publishers" +) + +// Release represents a release with its version, artifacts, and changelog. +type Release struct { + // Version is the semantic version string (e.g., "v1.2.3"). + Version string + // Artifacts are the built release artifacts (archives with checksums). + Artifacts []build.Artifact + // Changelog is the generated markdown changelog. + Changelog string + // ProjectDir is the root directory of the project. + ProjectDir string + // FS is the medium for file operations. + FS io.Medium +} + +// Publish publishes pre-built artifacts from dist/ to configured targets. +// Use this after `core build` to separate build and publish concerns. +// If dryRun is true, it will show what would be done without actually publishing. +func Publish(ctx context.Context, cfg *Config, dryRun bool) (*Release, error) { + if cfg == nil { + return nil, fmt.Errorf("release.Publish: config is nil") + } + + m := io.Local + + projectDir := cfg.projectDir + if projectDir == "" { + projectDir = "." + } + + // Resolve to absolute path + absProjectDir, err := filepath.Abs(projectDir) + if err != nil { + return nil, fmt.Errorf("release.Publish: failed to resolve project directory: %w", err) + } + + // Step 1: Determine version + version := cfg.version + if version == "" { + version, err = DetermineVersion(absProjectDir) + if err != nil { + return nil, fmt.Errorf("release.Publish: failed to determine version: %w", err) + } + } + + // Step 2: Find pre-built artifacts in dist/ + distDir := filepath.Join(absProjectDir, "dist") + artifacts, err := findArtifacts(m, distDir) + if err != nil { + return nil, fmt.Errorf("release.Publish: %w", err) + } + + if len(artifacts) == 0 { + return nil, fmt.Errorf("release.Publish: no artifacts found in dist/\nRun 'core build' first to create artifacts") + } + + // Step 3: Generate changelog + changelog, err := Generate(absProjectDir, "", version) + if err != nil { + // Non-fatal: continue with empty changelog + changelog = fmt.Sprintf("Release %s", version) + } + + release := &Release{ + Version: version, + Artifacts: artifacts, + Changelog: changelog, + ProjectDir: absProjectDir, + FS: m, + } + + // Step 4: Publish to configured targets + if len(cfg.Publishers) > 0 { + pubRelease := publishers.NewRelease(release.Version, release.Artifacts, release.Changelog, release.ProjectDir, release.FS) + + for _, pubCfg := range cfg.Publishers { + publisher, err := getPublisher(pubCfg.Type) + if err != nil { + return release, fmt.Errorf("release.Publish: %w", err) + } + + extendedCfg := buildExtendedConfig(pubCfg) + publisherCfg := publishers.NewPublisherConfig(pubCfg.Type, pubCfg.Prerelease, pubCfg.Draft, extendedCfg) + if err := publisher.Publish(ctx, pubRelease, publisherCfg, cfg, dryRun); err != nil { + return release, fmt.Errorf("release.Publish: publish to %s failed: %w", pubCfg.Type, err) + } + } + } + + return release, nil +} + +// findArtifacts discovers pre-built artifacts in the dist directory. +func findArtifacts(m io.Medium, distDir string) ([]build.Artifact, error) { + if !m.IsDir(distDir) { + return nil, fmt.Errorf("dist/ directory not found") + } + + var artifacts []build.Artifact + + entries, err := m.List(distDir) + if err != nil { + return nil, fmt.Errorf("failed to read dist/: %w", err) + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + path := filepath.Join(distDir, name) + + // Include archives and checksums + if strings.HasSuffix(name, ".tar.gz") || + strings.HasSuffix(name, ".zip") || + strings.HasSuffix(name, ".txt") || + strings.HasSuffix(name, ".sig") { + artifacts = append(artifacts, build.Artifact{Path: path}) + } + } + + return artifacts, nil +} + +// Run executes the full release process: determine version, build artifacts, +// generate changelog, and publish to configured targets. +// For separated concerns, prefer using `core build` then `core ci` (Publish). +// If dryRun is true, it will show what would be done without actually publishing. +func Run(ctx context.Context, cfg *Config, dryRun bool) (*Release, error) { + if cfg == nil { + return nil, fmt.Errorf("release.Run: config is nil") + } + + m := io.Local + + projectDir := cfg.projectDir + if projectDir == "" { + projectDir = "." + } + + // Resolve to absolute path + absProjectDir, err := filepath.Abs(projectDir) + if err != nil { + return nil, fmt.Errorf("release.Run: failed to resolve project directory: %w", err) + } + + // Step 1: Determine version + version := cfg.version + if version == "" { + version, err = DetermineVersion(absProjectDir) + if err != nil { + return nil, fmt.Errorf("release.Run: failed to determine version: %w", err) + } + } + + // Step 2: Generate changelog + changelog, err := Generate(absProjectDir, "", version) + if err != nil { + // Non-fatal: continue with empty changelog + changelog = fmt.Sprintf("Release %s", version) + } + + // Step 3: Build artifacts + artifacts, err := buildArtifacts(ctx, m, cfg, absProjectDir, version) + if err != nil { + return nil, fmt.Errorf("release.Run: build failed: %w", err) + } + + release := &Release{ + Version: version, + Artifacts: artifacts, + Changelog: changelog, + ProjectDir: absProjectDir, + FS: m, + } + + // Step 4: Publish to configured targets + if len(cfg.Publishers) > 0 { + // Convert to publisher types + pubRelease := publishers.NewRelease(release.Version, release.Artifacts, release.Changelog, release.ProjectDir, release.FS) + + for _, pubCfg := range cfg.Publishers { + publisher, err := getPublisher(pubCfg.Type) + if err != nil { + return release, fmt.Errorf("release.Run: %w", err) + } + + // Build extended config for publisher-specific settings + extendedCfg := buildExtendedConfig(pubCfg) + publisherCfg := publishers.NewPublisherConfig(pubCfg.Type, pubCfg.Prerelease, pubCfg.Draft, extendedCfg) + if err := publisher.Publish(ctx, pubRelease, publisherCfg, cfg, dryRun); err != nil { + return release, fmt.Errorf("release.Run: publish to %s failed: %w", pubCfg.Type, err) + } + } + } + + return release, nil +} + +// buildArtifacts builds all artifacts for the release. +func buildArtifacts(ctx context.Context, fs io.Medium, cfg *Config, projectDir, version string) ([]build.Artifact, error) { + // Load build configuration + buildCfg, err := build.LoadConfig(fs, projectDir) + if err != nil { + return nil, fmt.Errorf("failed to load build config: %w", err) + } + + // Determine targets + var targets []build.Target + if len(cfg.Build.Targets) > 0 { + for _, t := range cfg.Build.Targets { + targets = append(targets, build.Target{OS: t.OS, Arch: t.Arch}) + } + } else if len(buildCfg.Targets) > 0 { + targets = buildCfg.ToTargets() + } else { + // Default targets + targets = []build.Target{ + {OS: "linux", Arch: "amd64"}, + {OS: "linux", Arch: "arm64"}, + {OS: "darwin", Arch: "arm64"}, + {OS: "windows", Arch: "amd64"}, + } + } + + // Determine binary name + binaryName := cfg.Project.Name + if binaryName == "" { + binaryName = buildCfg.Project.Binary + } + if binaryName == "" { + binaryName = buildCfg.Project.Name + } + if binaryName == "" { + binaryName = filepath.Base(projectDir) + } + + // Determine output directory + outputDir := filepath.Join(projectDir, "dist") + + // Get builder (detect project type) + projectType, err := build.PrimaryType(fs, projectDir) + if err != nil { + return nil, fmt.Errorf("failed to detect project type: %w", err) + } + + builder, err := getBuilder(projectType) + if err != nil { + return nil, err + } + + // Build configuration + buildConfig := &build.Config{ + FS: fs, + ProjectDir: projectDir, + OutputDir: outputDir, + Name: binaryName, + Version: version, + LDFlags: buildCfg.Build.LDFlags, + } + + // Build + artifacts, err := builder.Build(ctx, buildConfig, targets) + if err != nil { + return nil, fmt.Errorf("build failed: %w", err) + } + + // Archive artifacts + archivedArtifacts, err := build.ArchiveAll(fs, artifacts) + if err != nil { + return nil, fmt.Errorf("archive failed: %w", err) + } + + // Compute checksums + checksummedArtifacts, err := build.ChecksumAll(fs, archivedArtifacts) + if err != nil { + return nil, fmt.Errorf("checksum failed: %w", err) + } + + // Write CHECKSUMS.txt + checksumPath := filepath.Join(outputDir, "CHECKSUMS.txt") + if err := build.WriteChecksumFile(fs, checksummedArtifacts, checksumPath); err != nil { + return nil, fmt.Errorf("failed to write checksums file: %w", err) + } + + // Add CHECKSUMS.txt as an artifact + checksumArtifact := build.Artifact{ + Path: checksumPath, + } + checksummedArtifacts = append(checksummedArtifacts, checksumArtifact) + + return checksummedArtifacts, nil +} + +// getBuilder returns the appropriate builder for the project type. +func getBuilder(projectType build.ProjectType) (build.Builder, error) { + switch projectType { + case build.ProjectTypeWails: + return builders.NewWailsBuilder(), nil + case build.ProjectTypeGo: + return builders.NewGoBuilder(), nil + case build.ProjectTypeNode: + return nil, fmt.Errorf("node.js builder not yet implemented") + case build.ProjectTypePHP: + return nil, fmt.Errorf("PHP builder not yet implemented") + default: + return nil, fmt.Errorf("unsupported project type: %s", projectType) + } +} + +// getPublisher returns the publisher for the given type. +func getPublisher(pubType string) (publishers.Publisher, error) { + switch pubType { + case "github": + return publishers.NewGitHubPublisher(), nil + case "linuxkit": + return publishers.NewLinuxKitPublisher(), nil + case "docker": + return publishers.NewDockerPublisher(), nil + case "npm": + return publishers.NewNpmPublisher(), nil + case "homebrew": + return publishers.NewHomebrewPublisher(), nil + case "scoop": + return publishers.NewScoopPublisher(), nil + case "aur": + return publishers.NewAURPublisher(), nil + case "chocolatey": + return publishers.NewChocolateyPublisher(), nil + default: + return nil, fmt.Errorf("unsupported publisher type: %s", pubType) + } +} + +// buildExtendedConfig builds a map of extended configuration for a publisher. +func buildExtendedConfig(pubCfg PublisherConfig) map[string]any { + ext := make(map[string]any) + + // LinuxKit-specific config + if pubCfg.Config != "" { + ext["config"] = pubCfg.Config + } + if len(pubCfg.Formats) > 0 { + ext["formats"] = toAnySlice(pubCfg.Formats) + } + if len(pubCfg.Platforms) > 0 { + ext["platforms"] = toAnySlice(pubCfg.Platforms) + } + + // Docker-specific config + if pubCfg.Registry != "" { + ext["registry"] = pubCfg.Registry + } + if pubCfg.Image != "" { + ext["image"] = pubCfg.Image + } + if pubCfg.Dockerfile != "" { + ext["dockerfile"] = pubCfg.Dockerfile + } + if len(pubCfg.Tags) > 0 { + ext["tags"] = toAnySlice(pubCfg.Tags) + } + if len(pubCfg.BuildArgs) > 0 { + args := make(map[string]any) + for k, v := range pubCfg.BuildArgs { + args[k] = v + } + ext["build_args"] = args + } + + // npm-specific config + if pubCfg.Package != "" { + ext["package"] = pubCfg.Package + } + if pubCfg.Access != "" { + ext["access"] = pubCfg.Access + } + + // Homebrew-specific config + if pubCfg.Tap != "" { + ext["tap"] = pubCfg.Tap + } + if pubCfg.Formula != "" { + ext["formula"] = pubCfg.Formula + } + + // Scoop-specific config + if pubCfg.Bucket != "" { + ext["bucket"] = pubCfg.Bucket + } + + // AUR-specific config + if pubCfg.Maintainer != "" { + ext["maintainer"] = pubCfg.Maintainer + } + + // Chocolatey-specific config + if pubCfg.Push { + ext["push"] = pubCfg.Push + } + + // Official repo config (shared by multiple publishers) + if pubCfg.Official != nil { + official := make(map[string]any) + official["enabled"] = pubCfg.Official.Enabled + if pubCfg.Official.Output != "" { + official["output"] = pubCfg.Official.Output + } + ext["official"] = official + } + + return ext +} + +// toAnySlice converts a string slice to an any slice. +func toAnySlice(s []string) []any { + result := make([]any, len(s)) + for i, v := range s { + result[i] = v + } + return result +} diff --git a/release/release_test.go b/release/release_test.go new file mode 100644 index 0000000..17d0d39 --- /dev/null +++ b/release/release_test.go @@ -0,0 +1,704 @@ +package release + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "testing" + + "forge.lthn.ai/core/go-devops/build" + "forge.lthn.ai/core/go/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFindArtifacts_Good(t *testing.T) { + t.Run("finds tar.gz artifacts", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + + // Create test artifact files + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-linux-amd64.tar.gz"), []byte("test"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-darwin-arm64.tar.gz"), []byte("test"), 0644)) + + artifacts, err := findArtifacts(io.Local, distDir) + require.NoError(t, err) + + assert.Len(t, artifacts, 2) + }) + + t.Run("finds zip artifacts", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-windows-amd64.zip"), []byte("test"), 0644)) + + artifacts, err := findArtifacts(io.Local, distDir) + require.NoError(t, err) + + assert.Len(t, artifacts, 1) + assert.Contains(t, artifacts[0].Path, "app-windows-amd64.zip") + }) + + t.Run("finds checksum files", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + + require.NoError(t, os.WriteFile(filepath.Join(distDir, "CHECKSUMS.txt"), []byte("checksums"), 0644)) + + artifacts, err := findArtifacts(io.Local, distDir) + require.NoError(t, err) + + assert.Len(t, artifacts, 1) + assert.Contains(t, artifacts[0].Path, "CHECKSUMS.txt") + }) + + t.Run("finds signature files", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz.sig"), []byte("signature"), 0644)) + + artifacts, err := findArtifacts(io.Local, distDir) + require.NoError(t, err) + + assert.Len(t, artifacts, 1) + }) + + t.Run("finds mixed artifact types", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-linux.tar.gz"), []byte("test"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-windows.zip"), []byte("test"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "CHECKSUMS.txt"), []byte("checksums"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.sig"), []byte("sig"), 0644)) + + artifacts, err := findArtifacts(io.Local, distDir) + require.NoError(t, err) + + assert.Len(t, artifacts, 4) + }) + + t.Run("ignores non-artifact files", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + + require.NoError(t, os.WriteFile(filepath.Join(distDir, "README.md"), []byte("readme"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.exe"), []byte("binary"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("artifact"), 0644)) + + artifacts, err := findArtifacts(io.Local, distDir) + require.NoError(t, err) + + assert.Len(t, artifacts, 1) + assert.Contains(t, artifacts[0].Path, "app.tar.gz") + }) + + t.Run("ignores subdirectories", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + require.NoError(t, os.MkdirAll(filepath.Join(distDir, "subdir"), 0755)) + + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("artifact"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "subdir", "nested.tar.gz"), []byte("nested"), 0644)) + + artifacts, err := findArtifacts(io.Local, distDir) + require.NoError(t, err) + + // Should only find the top-level artifact + assert.Len(t, artifacts, 1) + }) + + t.Run("returns empty slice for empty dist directory", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + + artifacts, err := findArtifacts(io.Local, distDir) + require.NoError(t, err) + + assert.Empty(t, artifacts) + }) +} + +func TestFindArtifacts_Bad(t *testing.T) { + t.Run("returns error when dist directory does not exist", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + + _, err := findArtifacts(io.Local, distDir) + assert.Error(t, err) + assert.Contains(t, err.Error(), "dist/ directory not found") + }) + + t.Run("returns error when dist directory is unreadable", func(t *testing.T) { + if os.Geteuid() == 0 { + t.Skip("root can read any directory") + } + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + + // Create a file that looks like dist but will cause ReadDir to fail + // by making the directory unreadable + require.NoError(t, os.Chmod(distDir, 0000)) + defer func() { _ = os.Chmod(distDir, 0755) }() + + _, err := findArtifacts(io.Local, distDir) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to read dist/") + }) +} + +func TestGetBuilder_Good(t *testing.T) { + t.Run("returns Go builder for go project type", func(t *testing.T) { + builder, err := getBuilder(build.ProjectTypeGo) + require.NoError(t, err) + assert.NotNil(t, builder) + assert.Equal(t, "go", builder.Name()) + }) + + t.Run("returns Wails builder for wails project type", func(t *testing.T) { + builder, err := getBuilder(build.ProjectTypeWails) + require.NoError(t, err) + assert.NotNil(t, builder) + assert.Equal(t, "wails", builder.Name()) + }) +} + +func TestGetBuilder_Bad(t *testing.T) { + t.Run("returns error for Node project type", func(t *testing.T) { + _, err := getBuilder(build.ProjectTypeNode) + assert.Error(t, err) + assert.Contains(t, err.Error(), "node.js builder not yet implemented") + }) + + t.Run("returns error for PHP project type", func(t *testing.T) { + _, err := getBuilder(build.ProjectTypePHP) + assert.Error(t, err) + assert.Contains(t, err.Error(), "PHP builder not yet implemented") + }) + + t.Run("returns error for unsupported project type", func(t *testing.T) { + _, err := getBuilder(build.ProjectType("unknown")) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported project type") + }) +} + +func TestGetPublisher_Good(t *testing.T) { + tests := []struct { + pubType string + expectedName string + }{ + {"github", "github"}, + {"linuxkit", "linuxkit"}, + {"docker", "docker"}, + {"npm", "npm"}, + {"homebrew", "homebrew"}, + {"scoop", "scoop"}, + {"aur", "aur"}, + {"chocolatey", "chocolatey"}, + } + + for _, tc := range tests { + t.Run(tc.pubType, func(t *testing.T) { + publisher, err := getPublisher(tc.pubType) + require.NoError(t, err) + assert.NotNil(t, publisher) + assert.Equal(t, tc.expectedName, publisher.Name()) + }) + } +} + +func TestGetPublisher_Bad(t *testing.T) { + t.Run("returns error for unsupported publisher type", func(t *testing.T) { + _, err := getPublisher("unsupported") + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported publisher type: unsupported") + }) + + t.Run("returns error for empty publisher type", func(t *testing.T) { + _, err := getPublisher("") + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported publisher type") + }) +} + +func TestBuildExtendedConfig_Good(t *testing.T) { + t.Run("returns empty map for minimal config", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "github", + } + + ext := buildExtendedConfig(cfg) + assert.Empty(t, ext) + }) + + t.Run("includes LinuxKit config", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "linuxkit", + Config: "linuxkit.yaml", + Formats: []string{"iso", "qcow2"}, + Platforms: []string{"linux/amd64", "linux/arm64"}, + } + + ext := buildExtendedConfig(cfg) + + assert.Equal(t, "linuxkit.yaml", ext["config"]) + assert.Equal(t, []any{"iso", "qcow2"}, ext["formats"]) + assert.Equal(t, []any{"linux/amd64", "linux/arm64"}, ext["platforms"]) + }) + + t.Run("includes Docker config", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "docker", + Registry: "ghcr.io", + Image: "owner/repo", + Dockerfile: "Dockerfile.prod", + Tags: []string{"latest", "v1.0.0"}, + BuildArgs: map[string]string{"VERSION": "1.0.0"}, + } + + ext := buildExtendedConfig(cfg) + + assert.Equal(t, "ghcr.io", ext["registry"]) + assert.Equal(t, "owner/repo", ext["image"]) + assert.Equal(t, "Dockerfile.prod", ext["dockerfile"]) + assert.Equal(t, []any{"latest", "v1.0.0"}, ext["tags"]) + buildArgs := ext["build_args"].(map[string]any) + assert.Equal(t, "1.0.0", buildArgs["VERSION"]) + }) + + t.Run("includes npm config", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "npm", + Package: "@host-uk/core", + Access: "public", + } + + ext := buildExtendedConfig(cfg) + + assert.Equal(t, "@host-uk/core", ext["package"]) + assert.Equal(t, "public", ext["access"]) + }) + + t.Run("includes Homebrew config", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "homebrew", + Tap: "host-uk/tap", + Formula: "core", + } + + ext := buildExtendedConfig(cfg) + + assert.Equal(t, "host-uk/tap", ext["tap"]) + assert.Equal(t, "core", ext["formula"]) + }) + + t.Run("includes Scoop config", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "scoop", + Bucket: "host-uk/bucket", + } + + ext := buildExtendedConfig(cfg) + + assert.Equal(t, "host-uk/bucket", ext["bucket"]) + }) + + t.Run("includes AUR config", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "aur", + Maintainer: "John Doe ", + } + + ext := buildExtendedConfig(cfg) + + assert.Equal(t, "John Doe ", ext["maintainer"]) + }) + + t.Run("includes Chocolatey config", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "chocolatey", + Push: true, + } + + ext := buildExtendedConfig(cfg) + + assert.True(t, ext["push"].(bool)) + }) + + t.Run("includes Official config", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "homebrew", + Official: &OfficialConfig{ + Enabled: true, + Output: "/path/to/output", + }, + } + + ext := buildExtendedConfig(cfg) + + official := ext["official"].(map[string]any) + assert.True(t, official["enabled"].(bool)) + assert.Equal(t, "/path/to/output", official["output"]) + }) + + t.Run("Official config without output", func(t *testing.T) { + cfg := PublisherConfig{ + Type: "scoop", + Official: &OfficialConfig{ + Enabled: true, + }, + } + + ext := buildExtendedConfig(cfg) + + official := ext["official"].(map[string]any) + assert.True(t, official["enabled"].(bool)) + _, hasOutput := official["output"] + assert.False(t, hasOutput) + }) +} + +func TestToAnySlice_Good(t *testing.T) { + t.Run("converts string slice to any slice", func(t *testing.T) { + input := []string{"a", "b", "c"} + + result := toAnySlice(input) + + assert.Len(t, result, 3) + assert.Equal(t, "a", result[0]) + assert.Equal(t, "b", result[1]) + assert.Equal(t, "c", result[2]) + }) + + t.Run("handles empty slice", func(t *testing.T) { + input := []string{} + + result := toAnySlice(input) + + assert.Empty(t, result) + }) + + t.Run("handles single element", func(t *testing.T) { + input := []string{"only"} + + result := toAnySlice(input) + + assert.Len(t, result, 1) + assert.Equal(t, "only", result[0]) + }) +} + +func TestPublish_Good(t *testing.T) { + t.Run("returns release with version from config", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + cfg.SetVersion("v1.0.0") + cfg.Publishers = nil // No publishers to avoid network calls + + release, err := Publish(context.Background(), cfg, true) + require.NoError(t, err) + + assert.Equal(t, "v1.0.0", release.Version) + assert.Len(t, release.Artifacts, 1) + }) + + t.Run("finds artifacts in dist directory", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-linux.tar.gz"), []byte("test"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-darwin.tar.gz"), []byte("test"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "CHECKSUMS.txt"), []byte("checksums"), 0644)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + cfg.SetVersion("v1.0.0") + cfg.Publishers = nil + + release, err := Publish(context.Background(), cfg, true) + require.NoError(t, err) + + assert.Len(t, release.Artifacts, 3) + }) +} + +func TestPublish_Bad(t *testing.T) { + t.Run("returns error when config is nil", func(t *testing.T) { + _, err := Publish(context.Background(), nil, true) + assert.Error(t, err) + assert.Contains(t, err.Error(), "config is nil") + }) + + t.Run("returns error when dist directory missing", func(t *testing.T) { + dir := t.TempDir() + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + cfg.SetVersion("v1.0.0") + + _, err := Publish(context.Background(), cfg, true) + assert.Error(t, err) + assert.Contains(t, err.Error(), "dist/ directory not found") + }) + + t.Run("returns error when no artifacts found", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + cfg.SetVersion("v1.0.0") + + _, err := Publish(context.Background(), cfg, true) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no artifacts found") + }) + + t.Run("returns error for unsupported publisher", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + cfg.SetVersion("v1.0.0") + cfg.Publishers = []PublisherConfig{ + {Type: "unsupported"}, + } + + _, err := Publish(context.Background(), cfg, true) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported publisher type") + }) + + t.Run("returns error when version determination fails in non-git dir", func(t *testing.T) { + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + // Don't set version - let it try to determine from git + cfg.Publishers = nil + + // In a non-git directory, DetermineVersion returns v0.0.1 as default + // so we verify that the publish proceeds without error + release, err := Publish(context.Background(), cfg, true) + require.NoError(t, err) + assert.Equal(t, "v0.0.1", release.Version) + }) +} + +func TestRun_Good(t *testing.T) { + t.Run("returns release with version from config", func(t *testing.T) { + // Create a minimal Go project for testing + dir := t.TempDir() + + // Create go.mod + goMod := `module testapp + +go 1.21 +` + require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)) + + // Create main.go + mainGo := `package main + +func main() {} +` + require.NoError(t, os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + cfg.SetVersion("v1.0.0") + cfg.Project.Name = "testapp" + cfg.Build.Targets = []TargetConfig{} // Empty targets to use defaults + cfg.Publishers = nil // No publishers to avoid network calls + + // Note: This test will actually try to build, which may fail in CI + // So we just test that the function accepts the config properly + release, err := Run(context.Background(), cfg, true) + if err != nil { + // Build might fail in test environment, but we still verify the error message + assert.Contains(t, err.Error(), "build") + } else { + assert.Equal(t, "v1.0.0", release.Version) + } + }) +} + +func TestRun_Bad(t *testing.T) { + t.Run("returns error when config is nil", func(t *testing.T) { + _, err := Run(context.Background(), nil, true) + assert.Error(t, err) + assert.Contains(t, err.Error(), "config is nil") + }) +} + +func TestRelease_Structure(t *testing.T) { + t.Run("Release struct holds expected fields", func(t *testing.T) { + release := &Release{ + Version: "v1.0.0", + Artifacts: []build.Artifact{{Path: "/path/to/artifact"}}, + Changelog: "## v1.0.0\n\nChanges", + ProjectDir: "/project", + } + + assert.Equal(t, "v1.0.0", release.Version) + assert.Len(t, release.Artifacts, 1) + assert.Contains(t, release.Changelog, "v1.0.0") + assert.Equal(t, "/project", release.ProjectDir) + }) +} + +func TestPublish_VersionFromGit(t *testing.T) { + t.Run("determines version from git when not set", func(t *testing.T) { + dir := setupPublishGitRepo(t) + createPublishCommit(t, dir, "feat: initial commit") + createPublishTag(t, dir, "v1.2.3") + + // Create dist directory with artifact + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + // Don't set version - let it be determined from git + cfg.Publishers = nil + + release, err := Publish(context.Background(), cfg, true) + require.NoError(t, err) + + assert.Equal(t, "v1.2.3", release.Version) + }) +} + +func TestPublish_ChangelogGeneration(t *testing.T) { + t.Run("generates changelog from git commits when available", func(t *testing.T) { + dir := setupPublishGitRepo(t) + createPublishCommit(t, dir, "feat: add feature") + createPublishTag(t, dir, "v1.0.0") + createPublishCommit(t, dir, "fix: fix bug") + createPublishTag(t, dir, "v1.0.1") + + // Create dist directory with artifact + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + cfg.SetVersion("v1.0.1") + cfg.Publishers = nil + + release, err := Publish(context.Background(), cfg, true) + require.NoError(t, err) + + // Changelog should contain either the commit message or the version + assert.Contains(t, release.Changelog, "v1.0.1") + }) + + t.Run("uses fallback changelog on error", func(t *testing.T) { + dir := t.TempDir() // Not a git repo + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + cfg.SetVersion("v1.0.0") + cfg.Publishers = nil + + release, err := Publish(context.Background(), cfg, true) + require.NoError(t, err) + + // Should use fallback changelog + assert.Contains(t, release.Changelog, "Release v1.0.0") + }) +} + +func TestPublish_DefaultProjectDir(t *testing.T) { + t.Run("uses current directory when projectDir is empty", func(t *testing.T) { + // Create artifacts in current directory's dist folder + dir := t.TempDir() + distDir := filepath.Join(dir, "dist") + require.NoError(t, os.MkdirAll(distDir, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644)) + + cfg := DefaultConfig() + cfg.SetProjectDir(dir) + cfg.SetVersion("v1.0.0") + cfg.Publishers = nil + + release, err := Publish(context.Background(), cfg, true) + require.NoError(t, err) + + assert.NotEmpty(t, release.ProjectDir) + }) +} + +// Helper functions for publish tests +func setupPublishGitRepo(t *testing.T) string { + t.Helper() + dir := t.TempDir() + + cmd := exec.Command("git", "init") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "config", "user.email", "test@example.com") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "config", "user.name", "Test User") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + return dir +} + +func createPublishCommit(t *testing.T, dir, message string) { + t.Helper() + + filePath := filepath.Join(dir, "publish_test.txt") + content, _ := os.ReadFile(filePath) + content = append(content, []byte(message+"\n")...) + require.NoError(t, os.WriteFile(filePath, content, 0644)) + + cmd := exec.Command("git", "add", ".") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "commit", "-m", message) + cmd.Dir = dir + require.NoError(t, cmd.Run()) +} + +func createPublishTag(t *testing.T, dir, tag string) { + t.Helper() + cmd := exec.Command("git", "tag", tag) + cmd.Dir = dir + require.NoError(t, cmd.Run()) +} diff --git a/release/sdk.go b/release/sdk.go new file mode 100644 index 0000000..30c2540 --- /dev/null +++ b/release/sdk.go @@ -0,0 +1,133 @@ +// Package release provides release automation with changelog generation and publishing. +package release + +import ( + "context" + "fmt" + + "forge.lthn.ai/core/go-devops/sdk" +) + +// SDKRelease holds the result of an SDK release. +type SDKRelease struct { + // Version is the SDK version. + Version string + // Languages that were generated. + Languages []string + // Output directory. + Output string +} + +// RunSDK executes SDK-only release: diff check + generate. +// If dryRun is true, it shows what would be done without generating. +func RunSDK(ctx context.Context, cfg *Config, dryRun bool) (*SDKRelease, error) { + if cfg == nil { + return nil, fmt.Errorf("release.RunSDK: config is nil") + } + if cfg.SDK == nil { + return nil, fmt.Errorf("release.RunSDK: sdk not configured in .core/release.yaml") + } + + projectDir := cfg.projectDir + if projectDir == "" { + projectDir = "." + } + + // Determine version + version := cfg.version + if version == "" { + var err error + version, err = DetermineVersion(projectDir) + if err != nil { + return nil, fmt.Errorf("release.RunSDK: failed to determine version: %w", err) + } + } + + // Run diff check if enabled + if cfg.SDK.Diff.Enabled { + breaking, err := checkBreakingChanges(projectDir, cfg.SDK) + if err != nil { + // Non-fatal: warn and continue + fmt.Printf("Warning: diff check failed: %v\n", err) + } else if breaking { + if cfg.SDK.Diff.FailOnBreaking { + return nil, fmt.Errorf("release.RunSDK: breaking API changes detected") + } + fmt.Printf("Warning: breaking API changes detected\n") + } + } + + // Prepare result + output := cfg.SDK.Output + if output == "" { + output = "sdk" + } + + result := &SDKRelease{ + Version: version, + Languages: cfg.SDK.Languages, + Output: output, + } + + if dryRun { + return result, nil + } + + // Generate SDKs + sdkCfg := toSDKConfig(cfg.SDK) + s := sdk.New(projectDir, sdkCfg) + s.SetVersion(version) + + if err := s.Generate(ctx); err != nil { + return nil, fmt.Errorf("release.RunSDK: generation failed: %w", err) + } + + return result, nil +} + +// checkBreakingChanges runs oasdiff to detect breaking changes. +func checkBreakingChanges(projectDir string, cfg *SDKConfig) (bool, error) { + // Get previous tag for comparison (uses getPreviousTag from changelog.go) + prevTag, err := getPreviousTag(projectDir, "HEAD") + if err != nil { + return false, fmt.Errorf("no previous tag found: %w", err) + } + + // Detect spec path + specPath := cfg.Spec + if specPath == "" { + s := sdk.New(projectDir, nil) + specPath, err = s.DetectSpec() + if err != nil { + return false, err + } + } + + // Run diff + result, err := sdk.Diff(prevTag, specPath) + if err != nil { + return false, err + } + + return result.Breaking, nil +} + +// toSDKConfig converts release.SDKConfig to sdk.Config. +func toSDKConfig(cfg *SDKConfig) *sdk.Config { + if cfg == nil { + return nil + } + return &sdk.Config{ + Spec: cfg.Spec, + Languages: cfg.Languages, + Output: cfg.Output, + Package: sdk.PackageConfig{ + Name: cfg.Package.Name, + Version: cfg.Package.Version, + }, + Diff: sdk.DiffConfig{ + Enabled: cfg.Diff.Enabled, + FailOnBreaking: cfg.Diff.FailOnBreaking, + }, + } +} diff --git a/release/sdk_test.go b/release/sdk_test.go new file mode 100644 index 0000000..f800beb --- /dev/null +++ b/release/sdk_test.go @@ -0,0 +1,229 @@ +package release + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRunSDK_Bad_NilConfig(t *testing.T) { + _, err := RunSDK(context.Background(), nil, true) + assert.Error(t, err) + assert.Contains(t, err.Error(), "config is nil") +} + +func TestRunSDK_Bad_NoSDKConfig(t *testing.T) { + cfg := &Config{ + SDK: nil, + } + cfg.projectDir = "/tmp" + + _, err := RunSDK(context.Background(), cfg, true) + assert.Error(t, err) + assert.Contains(t, err.Error(), "sdk not configured") +} + +func TestRunSDK_Good_DryRun(t *testing.T) { + cfg := &Config{ + SDK: &SDKConfig{ + Languages: []string{"typescript", "python"}, + Output: "sdk", + }, + } + cfg.projectDir = "/tmp" + cfg.version = "v1.0.0" + + result, err := RunSDK(context.Background(), cfg, true) + require.NoError(t, err) + + assert.Equal(t, "v1.0.0", result.Version) + assert.Len(t, result.Languages, 2) + assert.Contains(t, result.Languages, "typescript") + assert.Contains(t, result.Languages, "python") + assert.Equal(t, "sdk", result.Output) +} + +func TestRunSDK_Good_DryRunDefaultOutput(t *testing.T) { + cfg := &Config{ + SDK: &SDKConfig{ + Languages: []string{"go"}, + Output: "", // Empty output, should default to "sdk" + }, + } + cfg.projectDir = "/tmp" + cfg.version = "v2.0.0" + + result, err := RunSDK(context.Background(), cfg, true) + require.NoError(t, err) + + assert.Equal(t, "sdk", result.Output) +} + +func TestRunSDK_Good_DryRunDefaultProjectDir(t *testing.T) { + cfg := &Config{ + SDK: &SDKConfig{ + Languages: []string{"typescript"}, + Output: "out", + }, + } + // projectDir is empty, should default to "." + cfg.version = "v1.0.0" + + result, err := RunSDK(context.Background(), cfg, true) + require.NoError(t, err) + + assert.Equal(t, "v1.0.0", result.Version) +} + +func TestRunSDK_Bad_BreakingChangesFailOnBreaking(t *testing.T) { + // This test verifies that when diff.FailOnBreaking is true and breaking changes + // are detected, RunSDK returns an error. However, since we can't easily mock + // the diff check, this test verifies the config is correctly processed. + // The actual breaking change detection is tested in pkg/sdk/diff_test.go. + cfg := &Config{ + SDK: &SDKConfig{ + Languages: []string{"typescript"}, + Output: "sdk", + Diff: SDKDiffConfig{ + Enabled: true, + FailOnBreaking: true, + }, + }, + } + cfg.projectDir = "/tmp" + cfg.version = "v1.0.0" + + // In dry run mode with no git repo, diff check will fail gracefully + // (non-fatal warning), so this should succeed + result, err := RunSDK(context.Background(), cfg, true) + require.NoError(t, err) + assert.Equal(t, "v1.0.0", result.Version) +} + +func TestToSDKConfig_Good(t *testing.T) { + sdkCfg := &SDKConfig{ + Spec: "api/openapi.yaml", + Languages: []string{"typescript", "go"}, + Output: "sdk", + Package: SDKPackageConfig{ + Name: "myapi", + Version: "v1.0.0", + }, + Diff: SDKDiffConfig{ + Enabled: true, + FailOnBreaking: true, + }, + } + + result := toSDKConfig(sdkCfg) + + assert.Equal(t, "api/openapi.yaml", result.Spec) + assert.Equal(t, []string{"typescript", "go"}, result.Languages) + assert.Equal(t, "sdk", result.Output) + assert.Equal(t, "myapi", result.Package.Name) + assert.Equal(t, "v1.0.0", result.Package.Version) + assert.True(t, result.Diff.Enabled) + assert.True(t, result.Diff.FailOnBreaking) +} + +func TestToSDKConfig_Good_NilInput(t *testing.T) { + result := toSDKConfig(nil) + assert.Nil(t, result) +} + +func TestRunSDK_Good_WithDiffEnabledNoFailOnBreaking(t *testing.T) { + // Tests diff enabled but FailOnBreaking=false (should warn but not fail) + cfg := &Config{ + SDK: &SDKConfig{ + Languages: []string{"typescript"}, + Output: "sdk", + Diff: SDKDiffConfig{ + Enabled: true, + FailOnBreaking: false, + }, + }, + } + cfg.projectDir = "/tmp" + cfg.version = "v1.0.0" + + // Dry run should succeed even without git repo (diff check fails gracefully) + result, err := RunSDK(context.Background(), cfg, true) + require.NoError(t, err) + assert.Equal(t, "v1.0.0", result.Version) + assert.Contains(t, result.Languages, "typescript") +} + +func TestRunSDK_Good_MultipleLanguages(t *testing.T) { + // Tests multiple language support + cfg := &Config{ + SDK: &SDKConfig{ + Languages: []string{"typescript", "python", "go", "java"}, + Output: "multi-sdk", + }, + } + cfg.projectDir = "/tmp" + cfg.version = "v3.0.0" + + result, err := RunSDK(context.Background(), cfg, true) + require.NoError(t, err) + + assert.Equal(t, "v3.0.0", result.Version) + assert.Len(t, result.Languages, 4) + assert.Equal(t, "multi-sdk", result.Output) +} + +func TestRunSDK_Good_WithPackageConfig(t *testing.T) { + // Tests that package config is properly handled + cfg := &Config{ + SDK: &SDKConfig{ + Spec: "openapi.yaml", + Languages: []string{"typescript"}, + Output: "sdk", + Package: SDKPackageConfig{ + Name: "my-custom-sdk", + Version: "v2.5.0", + }, + }, + } + cfg.projectDir = "/tmp" + cfg.version = "v1.0.0" + + result, err := RunSDK(context.Background(), cfg, true) + require.NoError(t, err) + assert.Equal(t, "v1.0.0", result.Version) +} + +func TestToSDKConfig_Good_EmptyPackageConfig(t *testing.T) { + // Tests conversion with empty package config + sdkCfg := &SDKConfig{ + Languages: []string{"go"}, + Output: "sdk", + // Package is empty struct + } + + result := toSDKConfig(sdkCfg) + + assert.Equal(t, []string{"go"}, result.Languages) + assert.Equal(t, "sdk", result.Output) + assert.Empty(t, result.Package.Name) + assert.Empty(t, result.Package.Version) +} + +func TestToSDKConfig_Good_DiffDisabled(t *testing.T) { + // Tests conversion with diff disabled + sdkCfg := &SDKConfig{ + Languages: []string{"typescript"}, + Output: "sdk", + Diff: SDKDiffConfig{ + Enabled: false, + FailOnBreaking: false, + }, + } + + result := toSDKConfig(sdkCfg) + + assert.False(t, result.Diff.Enabled) + assert.False(t, result.Diff.FailOnBreaking) +} diff --git a/release/testdata/.core/release.yaml b/release/testdata/.core/release.yaml new file mode 100644 index 0000000..b9c9fd7 --- /dev/null +++ b/release/testdata/.core/release.yaml @@ -0,0 +1,35 @@ +version: 1 + +project: + name: myapp + repository: owner/repo + +build: + targets: + - os: linux + arch: amd64 + - os: linux + arch: arm64 + - os: darwin + arch: amd64 + - os: darwin + arch: arm64 + - os: windows + arch: amd64 + +publishers: + - type: github + prerelease: false + draft: false + +changelog: + include: + - feat + - fix + - perf + exclude: + - chore + - docs + - style + - test + - ci diff --git a/release/version.go b/release/version.go new file mode 100644 index 0000000..335ced7 --- /dev/null +++ b/release/version.go @@ -0,0 +1,195 @@ +// Package release provides release automation with changelog generation and publishing. +package release + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" +) + +// semverRegex matches semantic version strings with or without 'v' prefix. +var semverRegex = regexp.MustCompile(`^v?(\d+)\.(\d+)\.(\d+)(?:-([a-zA-Z0-9.-]+))?(?:\+([a-zA-Z0-9.-]+))?$`) + +// DetermineVersion determines the version for a release. +// It checks in order: +// 1. Git tag on HEAD +// 2. Most recent tag + increment patch +// 3. Default to v0.0.1 if no tags exist +func DetermineVersion(dir string) (string, error) { + // Check if HEAD has a tag + headTag, err := getTagOnHead(dir) + if err == nil && headTag != "" { + return normalizeVersion(headTag), nil + } + + // Get most recent tag + latestTag, err := getLatestTag(dir) + if err != nil || latestTag == "" { + // No tags exist, return default + return "v0.0.1", nil + } + + // Increment patch version + return IncrementVersion(latestTag), nil +} + +// IncrementVersion increments the patch version of a semver string. +// Examples: +// - "v1.2.3" -> "v1.2.4" +// - "1.2.3" -> "v1.2.4" +// - "v1.2.3-alpha" -> "v1.2.4" (strips prerelease) +func IncrementVersion(current string) string { + matches := semverRegex.FindStringSubmatch(current) + if matches == nil { + // Not a valid semver, return as-is with increment suffix + return current + ".1" + } + + major, _ := strconv.Atoi(matches[1]) + minor, _ := strconv.Atoi(matches[2]) + patch, _ := strconv.Atoi(matches[3]) + + // Increment patch + patch++ + + return fmt.Sprintf("v%d.%d.%d", major, minor, patch) +} + +// IncrementMinor increments the minor version of a semver string. +// Examples: +// - "v1.2.3" -> "v1.3.0" +// - "1.2.3" -> "v1.3.0" +func IncrementMinor(current string) string { + matches := semverRegex.FindStringSubmatch(current) + if matches == nil { + return current + ".1" + } + + major, _ := strconv.Atoi(matches[1]) + minor, _ := strconv.Atoi(matches[2]) + + // Increment minor, reset patch + minor++ + + return fmt.Sprintf("v%d.%d.0", major, minor) +} + +// IncrementMajor increments the major version of a semver string. +// Examples: +// - "v1.2.3" -> "v2.0.0" +// - "1.2.3" -> "v2.0.0" +func IncrementMajor(current string) string { + matches := semverRegex.FindStringSubmatch(current) + if matches == nil { + return current + ".1" + } + + major, _ := strconv.Atoi(matches[1]) + + // Increment major, reset minor and patch + major++ + + return fmt.Sprintf("v%d.0.0", major) +} + +// ParseVersion parses a semver string into its components. +// Returns (major, minor, patch, prerelease, build, error). +func ParseVersion(version string) (int, int, int, string, string, error) { + matches := semverRegex.FindStringSubmatch(version) + if matches == nil { + return 0, 0, 0, "", "", fmt.Errorf("invalid semver: %s", version) + } + + major, _ := strconv.Atoi(matches[1]) + minor, _ := strconv.Atoi(matches[2]) + patch, _ := strconv.Atoi(matches[3]) + prerelease := matches[4] + build := matches[5] + + return major, minor, patch, prerelease, build, nil +} + +// ValidateVersion checks if a string is a valid semver. +func ValidateVersion(version string) bool { + return semverRegex.MatchString(version) +} + +// normalizeVersion ensures the version starts with 'v'. +func normalizeVersion(version string) string { + if !strings.HasPrefix(version, "v") { + return "v" + version + } + return version +} + +// getTagOnHead returns the tag on HEAD, if any. +func getTagOnHead(dir string) (string, error) { + cmd := exec.Command("git", "describe", "--tags", "--exact-match", "HEAD") + cmd.Dir = dir + output, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(output)), nil +} + +// getLatestTag returns the most recent tag in the repository. +func getLatestTag(dir string) (string, error) { + cmd := exec.Command("git", "describe", "--tags", "--abbrev=0") + cmd.Dir = dir + output, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(output)), nil +} + +// CompareVersions compares two semver strings. +// Returns: +// +// -1 if a < b +// 0 if a == b +// 1 if a > b +func CompareVersions(a, b string) int { + aMajor, aMinor, aPatch, _, _, errA := ParseVersion(a) + bMajor, bMinor, bPatch, _, _, errB := ParseVersion(b) + + // Invalid versions are considered less than valid ones + if errA != nil && errB != nil { + return strings.Compare(a, b) + } + if errA != nil { + return -1 + } + if errB != nil { + return 1 + } + + // Compare major + if aMajor != bMajor { + if aMajor < bMajor { + return -1 + } + return 1 + } + + // Compare minor + if aMinor != bMinor { + if aMinor < bMinor { + return -1 + } + return 1 + } + + // Compare patch + if aPatch != bPatch { + if aPatch < bPatch { + return -1 + } + return 1 + } + + return 0 +} diff --git a/release/version_test.go b/release/version_test.go new file mode 100644 index 0000000..b170a98 --- /dev/null +++ b/release/version_test.go @@ -0,0 +1,520 @@ +package release + +import ( + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// setupGitRepo creates a temporary directory with an initialized git repository. +func setupGitRepo(t *testing.T) string { + t.Helper() + dir := t.TempDir() + + // Initialize git repo + cmd := exec.Command("git", "init") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + // Configure git user for commits + cmd = exec.Command("git", "config", "user.email", "test@example.com") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "config", "user.name", "Test User") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + return dir +} + +// createCommit creates a commit in the given directory. +func createCommit(t *testing.T, dir, message string) { + t.Helper() + + // Create or modify a file + filePath := filepath.Join(dir, "test.txt") + content, _ := os.ReadFile(filePath) + content = append(content, []byte(message+"\n")...) + require.NoError(t, os.WriteFile(filePath, content, 0644)) + + // Stage and commit + cmd := exec.Command("git", "add", ".") + cmd.Dir = dir + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "commit", "-m", message) + cmd.Dir = dir + require.NoError(t, cmd.Run()) +} + +// createTag creates a tag in the given directory. +func createTag(t *testing.T, dir, tag string) { + t.Helper() + cmd := exec.Command("git", "tag", tag) + cmd.Dir = dir + require.NoError(t, cmd.Run()) +} + +func TestDetermineVersion_Good(t *testing.T) { + t.Run("returns tag when HEAD has tag", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + createTag(t, dir, "v1.0.0") + + version, err := DetermineVersion(dir) + require.NoError(t, err) + assert.Equal(t, "v1.0.0", version) + }) + + t.Run("normalizes tag without v prefix", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + createTag(t, dir, "1.0.0") + + version, err := DetermineVersion(dir) + require.NoError(t, err) + assert.Equal(t, "v1.0.0", version) + }) + + t.Run("increments patch when commits after tag", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + createTag(t, dir, "v1.0.0") + createCommit(t, dir, "feat: new feature") + + version, err := DetermineVersion(dir) + require.NoError(t, err) + assert.Equal(t, "v1.0.1", version) + }) + + t.Run("returns v0.0.1 when no tags exist", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + + version, err := DetermineVersion(dir) + require.NoError(t, err) + assert.Equal(t, "v0.0.1", version) + }) + + t.Run("handles multiple tags with increments", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: first") + createTag(t, dir, "v1.0.0") + createCommit(t, dir, "feat: second") + createTag(t, dir, "v1.0.1") + createCommit(t, dir, "feat: third") + + version, err := DetermineVersion(dir) + require.NoError(t, err) + assert.Equal(t, "v1.0.2", version) + }) +} + +func TestDetermineVersion_Bad(t *testing.T) { + t.Run("returns v0.0.1 for empty repo", func(t *testing.T) { + dir := setupGitRepo(t) + + // No commits, git describe will fail + version, err := DetermineVersion(dir) + require.NoError(t, err) + assert.Equal(t, "v0.0.1", version) + }) +} + +func TestGetTagOnHead_Good(t *testing.T) { + t.Run("returns tag when HEAD has tag", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + createTag(t, dir, "v1.2.3") + + tag, err := getTagOnHead(dir) + require.NoError(t, err) + assert.Equal(t, "v1.2.3", tag) + }) + + t.Run("returns latest tag when multiple tags on HEAD", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + createTag(t, dir, "v1.0.0") + createTag(t, dir, "v1.0.0-beta") + + tag, err := getTagOnHead(dir) + require.NoError(t, err) + // Git returns one of the tags + assert.Contains(t, []string{"v1.0.0", "v1.0.0-beta"}, tag) + }) +} + +func TestGetTagOnHead_Bad(t *testing.T) { + t.Run("returns error when HEAD has no tag", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + + _, err := getTagOnHead(dir) + assert.Error(t, err) + }) + + t.Run("returns error when commits after tag", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + createTag(t, dir, "v1.0.0") + createCommit(t, dir, "feat: new feature") + + _, err := getTagOnHead(dir) + assert.Error(t, err) + }) +} + +func TestGetLatestTag_Good(t *testing.T) { + t.Run("returns latest tag", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + createTag(t, dir, "v1.0.0") + + tag, err := getLatestTag(dir) + require.NoError(t, err) + assert.Equal(t, "v1.0.0", tag) + }) + + t.Run("returns most recent tag after multiple commits", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: first") + createTag(t, dir, "v1.0.0") + createCommit(t, dir, "feat: second") + createTag(t, dir, "v1.1.0") + createCommit(t, dir, "feat: third") + + tag, err := getLatestTag(dir) + require.NoError(t, err) + assert.Equal(t, "v1.1.0", tag) + }) +} + +func TestGetLatestTag_Bad(t *testing.T) { + t.Run("returns error when no tags exist", func(t *testing.T) { + dir := setupGitRepo(t) + createCommit(t, dir, "feat: initial commit") + + _, err := getLatestTag(dir) + assert.Error(t, err) + }) + + t.Run("returns error for empty repo", func(t *testing.T) { + dir := setupGitRepo(t) + + _, err := getLatestTag(dir) + assert.Error(t, err) + }) +} + +func TestIncrementMinor_Bad(t *testing.T) { + t.Run("returns fallback for invalid version", func(t *testing.T) { + result := IncrementMinor("not-valid") + assert.Equal(t, "not-valid.1", result) + }) +} + +func TestIncrementMajor_Bad(t *testing.T) { + t.Run("returns fallback for invalid version", func(t *testing.T) { + result := IncrementMajor("not-valid") + assert.Equal(t, "not-valid.1", result) + }) +} + +func TestCompareVersions_Ugly(t *testing.T) { + t.Run("handles both invalid versions", func(t *testing.T) { + result := CompareVersions("invalid-a", "invalid-b") + // Should do string comparison for invalid versions + assert.Equal(t, -1, result) // "invalid-a" < "invalid-b" + }) + + t.Run("invalid a returns -1", func(t *testing.T) { + result := CompareVersions("invalid", "v1.0.0") + assert.Equal(t, -1, result) + }) + + t.Run("invalid b returns 1", func(t *testing.T) { + result := CompareVersions("v1.0.0", "invalid") + assert.Equal(t, 1, result) + }) +} + +func TestIncrementVersion_Good(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "increment patch with v prefix", + input: "v1.2.3", + expected: "v1.2.4", + }, + { + name: "increment patch without v prefix", + input: "1.2.3", + expected: "v1.2.4", + }, + { + name: "increment from zero", + input: "v0.0.0", + expected: "v0.0.1", + }, + { + name: "strips prerelease", + input: "v1.2.3-alpha", + expected: "v1.2.4", + }, + { + name: "strips build metadata", + input: "v1.2.3+build123", + expected: "v1.2.4", + }, + { + name: "strips prerelease and build", + input: "v1.2.3-beta.1+build456", + expected: "v1.2.4", + }, + { + name: "handles large numbers", + input: "v10.20.99", + expected: "v10.20.100", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := IncrementVersion(tc.input) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestIncrementVersion_Bad(t *testing.T) { + t.Run("invalid semver returns original with suffix", func(t *testing.T) { + result := IncrementVersion("not-a-version") + assert.Equal(t, "not-a-version.1", result) + }) +} + +func TestIncrementMinor_Good(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "increment minor resets patch", + input: "v1.2.3", + expected: "v1.3.0", + }, + { + name: "increment minor from zero", + input: "v1.0.5", + expected: "v1.1.0", + }, + { + name: "handles large numbers", + input: "v5.99.50", + expected: "v5.100.0", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := IncrementMinor(tc.input) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestIncrementMajor_Good(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "increment major resets minor and patch", + input: "v1.2.3", + expected: "v2.0.0", + }, + { + name: "increment major from zero", + input: "v0.5.10", + expected: "v1.0.0", + }, + { + name: "handles large numbers", + input: "v99.50.25", + expected: "v100.0.0", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := IncrementMajor(tc.input) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestParseVersion_Good(t *testing.T) { + tests := []struct { + name string + input string + major int + minor int + patch int + prerelease string + build string + }{ + { + name: "simple version with v", + input: "v1.2.3", + major: 1, minor: 2, patch: 3, + }, + { + name: "simple version without v", + input: "1.2.3", + major: 1, minor: 2, patch: 3, + }, + { + name: "with prerelease", + input: "v1.2.3-alpha", + major: 1, minor: 2, patch: 3, + prerelease: "alpha", + }, + { + name: "with prerelease and build", + input: "v1.2.3-beta.1+build.456", + major: 1, minor: 2, patch: 3, + prerelease: "beta.1", + build: "build.456", + }, + { + name: "with build only", + input: "v1.2.3+sha.abc123", + major: 1, minor: 2, patch: 3, + build: "sha.abc123", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + major, minor, patch, prerelease, build, err := ParseVersion(tc.input) + assert.NoError(t, err) + assert.Equal(t, tc.major, major) + assert.Equal(t, tc.minor, minor) + assert.Equal(t, tc.patch, patch) + assert.Equal(t, tc.prerelease, prerelease) + assert.Equal(t, tc.build, build) + }) + } +} + +func TestParseVersion_Bad(t *testing.T) { + tests := []struct { + name string + input string + }{ + {"empty string", ""}, + {"not a version", "not-a-version"}, + {"missing minor", "v1"}, + {"missing patch", "v1.2"}, + {"letters in version", "v1.2.x"}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + _, _, _, _, _, err := ParseVersion(tc.input) + assert.Error(t, err) + }) + } +} + +func TestValidateVersion_Good(t *testing.T) { + validVersions := []string{ + "v1.0.0", + "1.0.0", + "v0.0.1", + "v10.20.30", + "v1.2.3-alpha", + "v1.2.3+build", + "v1.2.3-alpha.1+build.123", + } + + for _, v := range validVersions { + t.Run(v, func(t *testing.T) { + assert.True(t, ValidateVersion(v)) + }) + } +} + +func TestValidateVersion_Bad(t *testing.T) { + invalidVersions := []string{ + "", + "v1", + "v1.2", + "1.2", + "not-a-version", + "v1.2.x", + "version1.0.0", + } + + for _, v := range invalidVersions { + t.Run(v, func(t *testing.T) { + assert.False(t, ValidateVersion(v)) + }) + } +} + +func TestCompareVersions_Good(t *testing.T) { + tests := []struct { + name string + a string + b string + expected int + }{ + {"equal versions", "v1.0.0", "v1.0.0", 0}, + {"a less than b major", "v1.0.0", "v2.0.0", -1}, + {"a greater than b major", "v2.0.0", "v1.0.0", 1}, + {"a less than b minor", "v1.1.0", "v1.2.0", -1}, + {"a greater than b minor", "v1.2.0", "v1.1.0", 1}, + {"a less than b patch", "v1.0.1", "v1.0.2", -1}, + {"a greater than b patch", "v1.0.2", "v1.0.1", 1}, + {"with and without v prefix", "v1.0.0", "1.0.0", 0}, + {"different scales", "v1.10.0", "v1.9.0", 1}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := CompareVersions(tc.a, tc.b) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestNormalizeVersion_Good(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"1.0.0", "v1.0.0"}, + {"v1.0.0", "v1.0.0"}, + {"0.0.1", "v0.0.1"}, + {"v10.20.30", "v10.20.30"}, + } + + for _, tc := range tests { + t.Run(tc.input, func(t *testing.T) { + result := normalizeVersion(tc.input) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/sdk/detect.go b/sdk/detect.go new file mode 100644 index 0000000..2b1b1a8 --- /dev/null +++ b/sdk/detect.go @@ -0,0 +1,78 @@ +package sdk + +import ( + "fmt" + "path/filepath" + "strings" + + coreio "forge.lthn.ai/core/go/pkg/io" +) + +// commonSpecPaths are checked in order when no spec is configured. +var commonSpecPaths = []string{ + "api/openapi.yaml", + "api/openapi.json", + "openapi.yaml", + "openapi.json", + "docs/api.yaml", + "docs/api.json", + "swagger.yaml", + "swagger.json", +} + +// DetectSpec finds the OpenAPI spec file. +// Priority: config path -> common paths -> Laravel Scramble. +func (s *SDK) DetectSpec() (string, error) { + // 1. Check configured path + if s.config.Spec != "" { + specPath := filepath.Join(s.projectDir, s.config.Spec) + if coreio.Local.IsFile(specPath) { + return specPath, nil + } + return "", fmt.Errorf("sdk.DetectSpec: configured spec not found: %s", s.config.Spec) + } + + // 2. Check common paths + for _, p := range commonSpecPaths { + specPath := filepath.Join(s.projectDir, p) + if coreio.Local.IsFile(specPath) { + return specPath, nil + } + } + + // 3. Try Laravel Scramble detection + specPath, err := s.detectScramble() + if err == nil { + return specPath, nil + } + + return "", fmt.Errorf("sdk.DetectSpec: no OpenAPI spec found (checked config, common paths, Scramble)") +} + +// detectScramble checks for Laravel Scramble and exports the spec. +func (s *SDK) detectScramble() (string, error) { + composerPath := filepath.Join(s.projectDir, "composer.json") + if !coreio.Local.IsFile(composerPath) { + return "", fmt.Errorf("no composer.json") + } + + // Check for scramble in composer.json + data, err := coreio.Local.Read(composerPath) + if err != nil { + return "", err + } + + // Simple check for scramble package + if !containsScramble(data) { + return "", fmt.Errorf("scramble not found in composer.json") + } + + // TODO: Run php artisan scramble:export + return "", fmt.Errorf("scramble export not implemented") +} + +// containsScramble checks if composer.json includes scramble. +func containsScramble(content string) bool { + return strings.Contains(content, "dedoc/scramble") || + strings.Contains(content, "\"scramble\"") +} diff --git a/sdk/detect_test.go b/sdk/detect_test.go new file mode 100644 index 0000000..fef2dbc --- /dev/null +++ b/sdk/detect_test.go @@ -0,0 +1,87 @@ +package sdk + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDetectSpec_Good_ConfigPath(t *testing.T) { + tmpDir := t.TempDir() + specPath := filepath.Join(tmpDir, "api", "spec.yaml") + err := os.MkdirAll(filepath.Dir(specPath), 0755) + require.NoError(t, err) + err = os.WriteFile(specPath, []byte("openapi: 3.0.0"), 0644) + require.NoError(t, err) + + sdk := New(tmpDir, &Config{Spec: "api/spec.yaml"}) + got, err := sdk.DetectSpec() + assert.NoError(t, err) + assert.Equal(t, specPath, got) +} + +func TestDetectSpec_Good_CommonPath(t *testing.T) { + tmpDir := t.TempDir() + specPath := filepath.Join(tmpDir, "openapi.yaml") + err := os.WriteFile(specPath, []byte("openapi: 3.0.0"), 0644) + require.NoError(t, err) + + sdk := New(tmpDir, nil) + got, err := sdk.DetectSpec() + assert.NoError(t, err) + assert.Equal(t, specPath, got) +} + +func TestDetectSpec_Bad_NotFound(t *testing.T) { + tmpDir := t.TempDir() + sdk := New(tmpDir, nil) + _, err := sdk.DetectSpec() + assert.Error(t, err) + assert.Contains(t, err.Error(), "no OpenAPI spec found") +} + +func TestDetectSpec_Bad_ConfigNotFound(t *testing.T) { + tmpDir := t.TempDir() + sdk := New(tmpDir, &Config{Spec: "non-existent.yaml"}) + _, err := sdk.DetectSpec() + assert.Error(t, err) + assert.Contains(t, err.Error(), "configured spec not found") +} + +func TestContainsScramble(t *testing.T) { + tests := []struct { + data string + expected bool + }{ + {`{"require": {"dedoc/scramble": "^0.1"}}`, true}, + {`{"require": {"scramble": "^0.1"}}`, true}, + {`{"require": {"laravel/framework": "^11.0"}}`, false}, + } + + for _, tt := range tests { + assert.Equal(t, tt.expected, containsScramble(tt.data)) + } +} + +func TestDetectScramble_Bad(t *testing.T) { + t.Run("no composer.json", func(t *testing.T) { + sdk := New(t.TempDir(), nil) + _, err := sdk.detectScramble() + assert.Error(t, err) + assert.Contains(t, err.Error(), "no composer.json") + }) + + t.Run("no scramble in composer.json", func(t *testing.T) { + tmpDir := t.TempDir() + err := os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{}`), 0644) + require.NoError(t, err) + + sdk := New(tmpDir, nil) + _, err = sdk.detectScramble() + assert.Error(t, err) + assert.Contains(t, err.Error(), "scramble not found") + }) +} diff --git a/sdk/diff.go b/sdk/diff.go new file mode 100644 index 0000000..ebd4f6c --- /dev/null +++ b/sdk/diff.go @@ -0,0 +1,83 @@ +package sdk + +import ( + "fmt" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/oasdiff/oasdiff/checker" + "github.com/oasdiff/oasdiff/diff" + "github.com/oasdiff/oasdiff/load" +) + +// DiffResult holds the result of comparing two OpenAPI specs. +type DiffResult struct { + // Breaking is true if breaking changes were detected. + Breaking bool + // Changes is the list of breaking changes. + Changes []string + // Summary is a human-readable summary. + Summary string +} + +// Diff compares two OpenAPI specs and detects breaking changes. +func Diff(basePath, revisionPath string) (*DiffResult, error) { + loader := openapi3.NewLoader() + loader.IsExternalRefsAllowed = true + + // Load specs + baseSpec, err := load.NewSpecInfo(loader, load.NewSource(basePath)) + if err != nil { + return nil, fmt.Errorf("sdk.Diff: failed to load base spec: %w", err) + } + + revSpec, err := load.NewSpecInfo(loader, load.NewSource(revisionPath)) + if err != nil { + return nil, fmt.Errorf("sdk.Diff: failed to load revision spec: %w", err) + } + + // Compute diff with operations sources map for better error reporting + diffResult, operationsSources, err := diff.GetWithOperationsSourcesMap(diff.NewConfig(), baseSpec, revSpec) + if err != nil { + return nil, fmt.Errorf("sdk.Diff: failed to compute diff: %w", err) + } + + // Check for breaking changes + config := checker.NewConfig(checker.GetAllChecks()) + breaks := checker.CheckBackwardCompatibilityUntilLevel( + config, + diffResult, + operationsSources, + checker.ERR, // Only errors (breaking changes) + ) + + // Build result + result := &DiffResult{ + Breaking: len(breaks) > 0, + Changes: make([]string, 0, len(breaks)), + } + + localizer := checker.NewDefaultLocalizer() + for _, b := range breaks { + result.Changes = append(result.Changes, b.GetUncolorizedText(localizer)) + } + + if result.Breaking { + result.Summary = fmt.Sprintf("%d breaking change(s) detected", len(breaks)) + } else { + result.Summary = "No breaking changes" + } + + return result, nil +} + +// DiffExitCode returns the exit code for CI integration. +// 0 = no breaking changes, 1 = breaking changes, 2 = error +func DiffExitCode(result *DiffResult, err error) int { + if err != nil { + return 2 + } + if result.Breaking { + return 1 + } + return 0 +} diff --git a/sdk/diff_test.go b/sdk/diff_test.go new file mode 100644 index 0000000..f1b3a20 --- /dev/null +++ b/sdk/diff_test.go @@ -0,0 +1,101 @@ +package sdk + +import ( + "os" + "path/filepath" + "testing" +) + +func TestDiff_Good_NoBreaking(t *testing.T) { + tmpDir := t.TempDir() + + baseSpec := `openapi: "3.0.0" +info: + title: Test API + version: "1.0.0" +paths: + /health: + get: + operationId: getHealth + responses: + "200": + description: OK +` + revSpec := `openapi: "3.0.0" +info: + title: Test API + version: "1.1.0" +paths: + /health: + get: + operationId: getHealth + responses: + "200": + description: OK + /status: + get: + operationId: getStatus + responses: + "200": + description: OK +` + basePath := filepath.Join(tmpDir, "base.yaml") + revPath := filepath.Join(tmpDir, "rev.yaml") + _ = os.WriteFile(basePath, []byte(baseSpec), 0644) + _ = os.WriteFile(revPath, []byte(revSpec), 0644) + + result, err := Diff(basePath, revPath) + if err != nil { + t.Fatalf("Diff failed: %v", err) + } + if result.Breaking { + t.Error("expected no breaking changes for adding endpoint") + } +} + +func TestDiff_Good_Breaking(t *testing.T) { + tmpDir := t.TempDir() + + baseSpec := `openapi: "3.0.0" +info: + title: Test API + version: "1.0.0" +paths: + /health: + get: + operationId: getHealth + responses: + "200": + description: OK + /users: + get: + operationId: getUsers + responses: + "200": + description: OK +` + revSpec := `openapi: "3.0.0" +info: + title: Test API + version: "2.0.0" +paths: + /health: + get: + operationId: getHealth + responses: + "200": + description: OK +` + basePath := filepath.Join(tmpDir, "base.yaml") + revPath := filepath.Join(tmpDir, "rev.yaml") + _ = os.WriteFile(basePath, []byte(baseSpec), 0644) + _ = os.WriteFile(revPath, []byte(revSpec), 0644) + + result, err := Diff(basePath, revPath) + if err != nil { + t.Fatalf("Diff failed: %v", err) + } + if !result.Breaking { + t.Error("expected breaking change for removed endpoint") + } +} diff --git a/sdk/generators/generator.go b/sdk/generators/generator.go new file mode 100644 index 0000000..3a37f2e --- /dev/null +++ b/sdk/generators/generator.go @@ -0,0 +1,79 @@ +// Package generators provides SDK code generators for different languages. +package generators + +import ( + "context" + "fmt" + "os" + "runtime" +) + +// Options holds common generation options. +type Options struct { + // SpecPath is the path to the OpenAPI spec file. + SpecPath string + // OutputDir is where to write the generated SDK. + OutputDir string + // PackageName is the package/module name. + PackageName string + // Version is the SDK version. + Version string +} + +// Generator defines the interface for SDK generators. +type Generator interface { + // Language returns the generator's target language identifier. + Language() string + + // Generate creates SDK from OpenAPI spec. + Generate(ctx context.Context, opts Options) error + + // Available checks if generator dependencies are installed. + Available() bool + + // Install returns instructions for installing the generator. + Install() string +} + +// Registry holds available generators. +type Registry struct { + generators map[string]Generator +} + +// NewRegistry creates a registry with all available generators. +func NewRegistry() *Registry { + r := &Registry{ + generators: make(map[string]Generator), + } + // Generators will be registered in subsequent tasks + return r +} + +// Get returns a generator by language. +func (r *Registry) Get(lang string) (Generator, bool) { + g, ok := r.generators[lang] + return g, ok +} + +// Register adds a generator to the registry. +func (r *Registry) Register(g Generator) { + r.generators[g.Language()] = g +} + +// Languages returns all registered language identifiers. +func (r *Registry) Languages() []string { + langs := make([]string, 0, len(r.generators)) + for lang := range r.generators { + langs = append(langs, lang) + } + return langs +} + +// dockerUserArgs returns Docker --user args for the current user on Unix systems. +// On Windows, Docker handles permissions differently, so no args are returned. +func dockerUserArgs() []string { + if runtime.GOOS == "windows" { + return nil + } + return []string{"--user", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid())} +} diff --git a/sdk/generators/go.go b/sdk/generators/go.go new file mode 100644 index 0000000..b772063 --- /dev/null +++ b/sdk/generators/go.go @@ -0,0 +1,90 @@ +package generators + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + + coreio "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/log" +) + +// GoGenerator generates Go SDKs from OpenAPI specs. +type GoGenerator struct{} + +// NewGoGenerator creates a new Go generator. +func NewGoGenerator() *GoGenerator { + return &GoGenerator{} +} + +// Language returns the generator's target language identifier. +func (g *GoGenerator) Language() string { + return "go" +} + +// Available checks if generator dependencies are installed. +func (g *GoGenerator) Available() bool { + _, err := exec.LookPath("oapi-codegen") + return err == nil +} + +// Install returns instructions for installing the generator. +func (g *GoGenerator) Install() string { + return "go install github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@latest" +} + +// Generate creates SDK from OpenAPI spec. +func (g *GoGenerator) Generate(ctx context.Context, opts Options) error { + if err := coreio.Local.EnsureDir(opts.OutputDir); err != nil { + return log.E("go.Generate", "failed to create output dir", err) + } + + if g.Available() { + return g.generateNative(ctx, opts) + } + return g.generateDocker(ctx, opts) +} + +func (g *GoGenerator) generateNative(ctx context.Context, opts Options) error { + outputFile := filepath.Join(opts.OutputDir, "client.go") + + cmd := exec.CommandContext(ctx, "oapi-codegen", + "-package", opts.PackageName, + "-generate", "types,client", + "-o", outputFile, + opts.SpecPath, + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return log.E("go.generateNative", "oapi-codegen failed", err) + } + + goMod := fmt.Sprintf("module %s\n\ngo 1.21\n", opts.PackageName) + return coreio.Local.Write(filepath.Join(opts.OutputDir, "go.mod"), goMod) +} + +func (g *GoGenerator) generateDocker(ctx context.Context, opts Options) error { + specDir := filepath.Dir(opts.SpecPath) + specName := filepath.Base(opts.SpecPath) + + args := []string{"run", "--rm"} + args = append(args, dockerUserArgs()...) + args = append(args, + "-v", specDir+":/spec", + "-v", opts.OutputDir+":/out", + "openapitools/openapi-generator-cli", "generate", + "-i", "/spec/"+specName, + "-g", "go", + "-o", "/out", + "--additional-properties=packageName="+opts.PackageName, + ) + + cmd := exec.CommandContext(ctx, "docker", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/sdk/generators/go_test.go b/sdk/generators/go_test.go new file mode 100644 index 0000000..708b7dd --- /dev/null +++ b/sdk/generators/go_test.go @@ -0,0 +1,58 @@ +package generators + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" +) + +func TestGoGenerator_Good_Available(t *testing.T) { + g := NewGoGenerator() + + // These should not panic + lang := g.Language() + if lang != "go" { + t.Errorf("expected language 'go', got '%s'", lang) + } + + _ = g.Available() + + install := g.Install() + if install == "" { + t.Error("expected non-empty install instructions") + } +} + +func TestGoGenerator_Good_Generate(t *testing.T) { + g := NewGoGenerator() + if !g.Available() && !dockerAvailable() { + t.Skip("no Go generator available (neither native nor docker)") + } + + // Create temp directories + tmpDir := t.TempDir() + specPath := createTestSpec(t, tmpDir) + outputDir := filepath.Join(tmpDir, "output") + + opts := Options{ + SpecPath: specPath, + OutputDir: outputDir, + PackageName: "testclient", + Version: "1.0.0", + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + err := g.Generate(ctx, opts) + if err != nil { + t.Fatalf("Generate failed: %v", err) + } + + // Verify output directory was created + if _, err := os.Stat(outputDir); os.IsNotExist(err) { + t.Error("output directory was not created") + } +} diff --git a/sdk/generators/php.go b/sdk/generators/php.go new file mode 100644 index 0000000..0c7a569 --- /dev/null +++ b/sdk/generators/php.go @@ -0,0 +1,70 @@ +package generators + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + + coreio "forge.lthn.ai/core/go/pkg/io" +) + +// PHPGenerator generates PHP SDKs from OpenAPI specs. +type PHPGenerator struct{} + +// NewPHPGenerator creates a new PHP generator. +func NewPHPGenerator() *PHPGenerator { + return &PHPGenerator{} +} + +// Language returns the generator's target language identifier. +func (g *PHPGenerator) Language() string { + return "php" +} + +// Available checks if generator dependencies are installed. +func (g *PHPGenerator) Available() bool { + _, err := exec.LookPath("docker") + return err == nil +} + +// Install returns instructions for installing the generator. +func (g *PHPGenerator) Install() string { + return "Docker is required for PHP SDK generation" +} + +// Generate creates SDK from OpenAPI spec. +func (g *PHPGenerator) Generate(ctx context.Context, opts Options) error { + if !g.Available() { + return fmt.Errorf("php.Generate: Docker is required but not available") + } + + if err := coreio.Local.EnsureDir(opts.OutputDir); err != nil { + return fmt.Errorf("php.Generate: failed to create output dir: %w", err) + } + + specDir := filepath.Dir(opts.SpecPath) + specName := filepath.Base(opts.SpecPath) + + args := []string{"run", "--rm"} + args = append(args, dockerUserArgs()...) + args = append(args, + "-v", specDir+":/spec", + "-v", opts.OutputDir+":/out", + "openapitools/openapi-generator-cli", "generate", + "-i", "/spec/"+specName, + "-g", "php", + "-o", "/out", + "--additional-properties=invokerPackage="+opts.PackageName, + ) + + cmd := exec.CommandContext(ctx, "docker", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("php.Generate: %w", err) + } + return nil +} diff --git a/sdk/generators/php_test.go b/sdk/generators/php_test.go new file mode 100644 index 0000000..a3a6e4a --- /dev/null +++ b/sdk/generators/php_test.go @@ -0,0 +1,58 @@ +package generators + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" +) + +func TestPHPGenerator_Good_Available(t *testing.T) { + g := NewPHPGenerator() + + // These should not panic + lang := g.Language() + if lang != "php" { + t.Errorf("expected language 'php', got '%s'", lang) + } + + _ = g.Available() + + install := g.Install() + if install == "" { + t.Error("expected non-empty install instructions") + } +} + +func TestPHPGenerator_Good_Generate(t *testing.T) { + g := NewPHPGenerator() + if !g.Available() { + t.Skip("no PHP generator available (docker not installed)") + } + + // Create temp directories + tmpDir := t.TempDir() + specPath := createTestSpec(t, tmpDir) + outputDir := filepath.Join(tmpDir, "output") + + opts := Options{ + SpecPath: specPath, + OutputDir: outputDir, + PackageName: "TestClient", + Version: "1.0.0", + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + err := g.Generate(ctx, opts) + if err != nil { + t.Fatalf("Generate failed: %v", err) + } + + // Verify output directory was created + if _, err := os.Stat(outputDir); os.IsNotExist(err) { + t.Error("output directory was not created") + } +} diff --git a/sdk/generators/python.go b/sdk/generators/python.go new file mode 100644 index 0000000..e14df5e --- /dev/null +++ b/sdk/generators/python.go @@ -0,0 +1,82 @@ +package generators + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + + coreio "forge.lthn.ai/core/go/pkg/io" +) + +// PythonGenerator generates Python SDKs from OpenAPI specs. +type PythonGenerator struct{} + +// NewPythonGenerator creates a new Python generator. +func NewPythonGenerator() *PythonGenerator { + return &PythonGenerator{} +} + +// Language returns the generator's target language identifier. +func (g *PythonGenerator) Language() string { + return "python" +} + +// Available checks if generator dependencies are installed. +func (g *PythonGenerator) Available() bool { + _, err := exec.LookPath("openapi-python-client") + return err == nil +} + +// Install returns instructions for installing the generator. +func (g *PythonGenerator) Install() string { + return "pip install openapi-python-client" +} + +// Generate creates SDK from OpenAPI spec. +func (g *PythonGenerator) Generate(ctx context.Context, opts Options) error { + if err := coreio.Local.EnsureDir(opts.OutputDir); err != nil { + return fmt.Errorf("python.Generate: failed to create output dir: %w", err) + } + + if g.Available() { + return g.generateNative(ctx, opts) + } + return g.generateDocker(ctx, opts) +} + +func (g *PythonGenerator) generateNative(ctx context.Context, opts Options) error { + parentDir := filepath.Dir(opts.OutputDir) + + cmd := exec.CommandContext(ctx, "openapi-python-client", "generate", + "--path", opts.SpecPath, + "--output-path", opts.OutputDir, + ) + cmd.Dir = parentDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func (g *PythonGenerator) generateDocker(ctx context.Context, opts Options) error { + specDir := filepath.Dir(opts.SpecPath) + specName := filepath.Base(opts.SpecPath) + + args := []string{"run", "--rm"} + args = append(args, dockerUserArgs()...) + args = append(args, + "-v", specDir+":/spec", + "-v", opts.OutputDir+":/out", + "openapitools/openapi-generator-cli", "generate", + "-i", "/spec/"+specName, + "-g", "python", + "-o", "/out", + "--additional-properties=packageName="+opts.PackageName, + ) + + cmd := exec.CommandContext(ctx, "docker", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/sdk/generators/python_test.go b/sdk/generators/python_test.go new file mode 100644 index 0000000..5b03a76 --- /dev/null +++ b/sdk/generators/python_test.go @@ -0,0 +1,58 @@ +package generators + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" +) + +func TestPythonGenerator_Good_Available(t *testing.T) { + g := NewPythonGenerator() + + // These should not panic + lang := g.Language() + if lang != "python" { + t.Errorf("expected language 'python', got '%s'", lang) + } + + _ = g.Available() + + install := g.Install() + if install == "" { + t.Error("expected non-empty install instructions") + } +} + +func TestPythonGenerator_Good_Generate(t *testing.T) { + g := NewPythonGenerator() + if !g.Available() && !dockerAvailable() { + t.Skip("no Python generator available (neither native nor docker)") + } + + // Create temp directories + tmpDir := t.TempDir() + specPath := createTestSpec(t, tmpDir) + outputDir := filepath.Join(tmpDir, "output") + + opts := Options{ + SpecPath: specPath, + OutputDir: outputDir, + PackageName: "testclient", + Version: "1.0.0", + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + err := g.Generate(ctx, opts) + if err != nil { + t.Fatalf("Generate failed: %v", err) + } + + // Verify output directory was created + if _, err := os.Stat(outputDir); os.IsNotExist(err) { + t.Error("output directory was not created") + } +} diff --git a/sdk/generators/typescript.go b/sdk/generators/typescript.go new file mode 100644 index 0000000..b177c98 --- /dev/null +++ b/sdk/generators/typescript.go @@ -0,0 +1,112 @@ +package generators + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + + coreio "forge.lthn.ai/core/go/pkg/io" +) + +// TypeScriptGenerator generates TypeScript SDKs from OpenAPI specs. +type TypeScriptGenerator struct{} + +// NewTypeScriptGenerator creates a new TypeScript generator. +func NewTypeScriptGenerator() *TypeScriptGenerator { + return &TypeScriptGenerator{} +} + +// Language returns the generator's target language identifier. +func (g *TypeScriptGenerator) Language() string { + return "typescript" +} + +// Available checks if generator dependencies are installed. +func (g *TypeScriptGenerator) Available() bool { + _, err := exec.LookPath("openapi-typescript-codegen") + if err == nil { + return true + } + _, err = exec.LookPath("npx") + return err == nil +} + +// Install returns instructions for installing the generator. +func (g *TypeScriptGenerator) Install() string { + return "npm install -g openapi-typescript-codegen" +} + +// Generate creates SDK from OpenAPI spec. +func (g *TypeScriptGenerator) Generate(ctx context.Context, opts Options) error { + if err := coreio.Local.EnsureDir(opts.OutputDir); err != nil { + return fmt.Errorf("typescript.Generate: failed to create output dir: %w", err) + } + + if g.nativeAvailable() { + return g.generateNative(ctx, opts) + } + if g.npxAvailable() { + return g.generateNpx(ctx, opts) + } + return g.generateDocker(ctx, opts) +} + +func (g *TypeScriptGenerator) nativeAvailable() bool { + _, err := exec.LookPath("openapi-typescript-codegen") + return err == nil +} + +func (g *TypeScriptGenerator) npxAvailable() bool { + _, err := exec.LookPath("npx") + return err == nil +} + +func (g *TypeScriptGenerator) generateNative(ctx context.Context, opts Options) error { + cmd := exec.CommandContext(ctx, "openapi-typescript-codegen", + "--input", opts.SpecPath, + "--output", opts.OutputDir, + "--name", opts.PackageName, + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func (g *TypeScriptGenerator) generateNpx(ctx context.Context, opts Options) error { + cmd := exec.CommandContext(ctx, "npx", "openapi-typescript-codegen", + "--input", opts.SpecPath, + "--output", opts.OutputDir, + "--name", opts.PackageName, + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func (g *TypeScriptGenerator) generateDocker(ctx context.Context, opts Options) error { + specDir := filepath.Dir(opts.SpecPath) + specName := filepath.Base(opts.SpecPath) + + args := []string{"run", "--rm"} + args = append(args, dockerUserArgs()...) + args = append(args, + "-v", specDir+":/spec", + "-v", opts.OutputDir+":/out", + "openapitools/openapi-generator-cli", "generate", + "-i", "/spec/"+specName, + "-g", "typescript-fetch", + "-o", "/out", + "--additional-properties=npmName="+opts.PackageName, + ) + + cmd := exec.CommandContext(ctx, "docker", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("typescript.generateDocker: %w", err) + } + return nil +} diff --git a/sdk/generators/typescript_test.go b/sdk/generators/typescript_test.go new file mode 100644 index 0000000..3a40443 --- /dev/null +++ b/sdk/generators/typescript_test.go @@ -0,0 +1,87 @@ +package generators + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "testing" + "time" +) + +// dockerAvailable checks if docker is available for fallback generation. +func dockerAvailable() bool { + _, err := exec.LookPath("docker") + return err == nil +} + +// createTestSpec creates a minimal OpenAPI spec for testing. +func createTestSpec(t *testing.T, dir string) string { + t.Helper() + spec := `openapi: "3.0.0" +info: + title: Test API + version: "1.0.0" +paths: + /health: + get: + summary: Health check + responses: + "200": + description: OK +` + specPath := filepath.Join(dir, "openapi.yaml") + if err := os.WriteFile(specPath, []byte(spec), 0644); err != nil { + t.Fatalf("failed to write test spec: %v", err) + } + return specPath +} + +func TestTypeScriptGenerator_Good_Available(t *testing.T) { + g := NewTypeScriptGenerator() + + // These should not panic + lang := g.Language() + if lang != "typescript" { + t.Errorf("expected language 'typescript', got '%s'", lang) + } + + _ = g.Available() + + install := g.Install() + if install == "" { + t.Error("expected non-empty install instructions") + } +} + +func TestTypeScriptGenerator_Good_Generate(t *testing.T) { + g := NewTypeScriptGenerator() + if !g.Available() && !dockerAvailable() { + t.Skip("no TypeScript generator available (neither native nor docker)") + } + + // Create temp directories + tmpDir := t.TempDir() + specPath := createTestSpec(t, tmpDir) + outputDir := filepath.Join(tmpDir, "output") + + opts := Options{ + SpecPath: specPath, + OutputDir: outputDir, + PackageName: "testclient", + Version: "1.0.0", + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + err := g.Generate(ctx, opts) + if err != nil { + t.Fatalf("Generate failed: %v", err) + } + + // Verify output directory was created + if _, err := os.Stat(outputDir); os.IsNotExist(err) { + t.Error("output directory was not created") + } +} diff --git a/sdk/sdk.go b/sdk/sdk.go new file mode 100644 index 0000000..c94b5ed --- /dev/null +++ b/sdk/sdk.go @@ -0,0 +1,141 @@ +// Package sdk provides OpenAPI SDK generation and diff capabilities. +package sdk + +import ( + "context" + "fmt" + "path/filepath" + + "forge.lthn.ai/core/go-devops/sdk/generators" +) + +// Config holds SDK generation configuration from .core/release.yaml. +type Config struct { + // Spec is the path to the OpenAPI spec file (auto-detected if empty). + Spec string `yaml:"spec,omitempty"` + // Languages to generate SDKs for. + Languages []string `yaml:"languages,omitempty"` + // Output directory (default: sdk/). + Output string `yaml:"output,omitempty"` + // Package naming configuration. + Package PackageConfig `yaml:"package,omitempty"` + // Diff configuration for breaking change detection. + Diff DiffConfig `yaml:"diff,omitempty"` + // Publish configuration for monorepo publishing. + Publish PublishConfig `yaml:"publish,omitempty"` +} + +// PackageConfig holds package naming configuration. +type PackageConfig struct { + // Name is the base package name. + Name string `yaml:"name,omitempty"` + // Version is the SDK version (supports templates like {{.Version}}). + Version string `yaml:"version,omitempty"` +} + +// DiffConfig holds breaking change detection configuration. +type DiffConfig struct { + // Enabled determines whether to run diff checks. + Enabled bool `yaml:"enabled,omitempty"` + // FailOnBreaking fails the release if breaking changes are detected. + FailOnBreaking bool `yaml:"fail_on_breaking,omitempty"` +} + +// PublishConfig holds monorepo publishing configuration. +type PublishConfig struct { + // Repo is the SDK monorepo (e.g., "myorg/sdks"). + Repo string `yaml:"repo,omitempty"` + // Path is the subdirectory for this SDK (e.g., "packages/myapi"). + Path string `yaml:"path,omitempty"` +} + +// SDK orchestrates OpenAPI SDK generation. +type SDK struct { + config *Config + projectDir string + version string +} + +// New creates a new SDK instance. +func New(projectDir string, config *Config) *SDK { + if config == nil { + config = DefaultConfig() + } + return &SDK{ + config: config, + projectDir: projectDir, + } +} + +// SetVersion sets the SDK version for generation. +// This updates both the internal version field and the config's Package.Version. +func (s *SDK) SetVersion(version string) { + s.version = version + if s.config != nil { + s.config.Package.Version = version + } +} + +// DefaultConfig returns sensible defaults for SDK configuration. +func DefaultConfig() *Config { + return &Config{ + Languages: []string{"typescript", "python", "go", "php"}, + Output: "sdk", + Diff: DiffConfig{ + Enabled: true, + FailOnBreaking: false, + }, + } +} + +// Generate generates SDKs for all configured languages. +func (s *SDK) Generate(ctx context.Context) error { + // Generate for each language + for _, lang := range s.config.Languages { + if err := s.GenerateLanguage(ctx, lang); err != nil { + return err + } + } + + return nil +} + +// GenerateLanguage generates SDK for a specific language. +func (s *SDK) GenerateLanguage(ctx context.Context, lang string) error { + specPath, err := s.DetectSpec() + if err != nil { + return err + } + + registry := generators.NewRegistry() + registry.Register(generators.NewTypeScriptGenerator()) + registry.Register(generators.NewPythonGenerator()) + registry.Register(generators.NewGoGenerator()) + registry.Register(generators.NewPHPGenerator()) + + gen, ok := registry.Get(lang) + if !ok { + return fmt.Errorf("sdk.GenerateLanguage: unknown language: %s", lang) + } + + if !gen.Available() { + fmt.Printf("Warning: %s generator not available. Install with: %s\n", lang, gen.Install()) + fmt.Printf("Falling back to Docker...\n") + } + + outputDir := filepath.Join(s.projectDir, s.config.Output, lang) + opts := generators.Options{ + SpecPath: specPath, + OutputDir: outputDir, + PackageName: s.config.Package.Name, + Version: s.config.Package.Version, + } + + fmt.Printf("Generating %s SDK...\n", lang) + if err := gen.Generate(ctx, opts); err != nil { + return fmt.Errorf("sdk.GenerateLanguage: %s generation failed: %w", lang, err) + } + fmt.Printf("Generated %s SDK at %s\n", lang, outputDir) + + return nil +} diff --git a/sdk/sdk_test.go b/sdk/sdk_test.go new file mode 100644 index 0000000..ced3b91 --- /dev/null +++ b/sdk/sdk_test.go @@ -0,0 +1,77 @@ +package sdk + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSDK_Good_SetVersion(t *testing.T) { + s := New("/tmp", nil) + s.SetVersion("v1.2.3") + + assert.Equal(t, "v1.2.3", s.version) +} + +func TestSDK_Good_VersionPassedToGenerator(t *testing.T) { + config := &Config{ + Languages: []string{"typescript"}, + Output: "sdk", + Package: PackageConfig{ + Name: "test-sdk", + }, + } + s := New("/tmp", config) + s.SetVersion("v2.0.0") + + assert.Equal(t, "v2.0.0", s.config.Package.Version) +} + +func TestDefaultConfig(t *testing.T) { + cfg := DefaultConfig() + assert.Contains(t, cfg.Languages, "typescript") + assert.Equal(t, "sdk", cfg.Output) + assert.True(t, cfg.Diff.Enabled) +} + +func TestSDK_New(t *testing.T) { + t.Run("with nil config", func(t *testing.T) { + s := New("/tmp", nil) + assert.NotNil(t, s.config) + assert.Equal(t, "sdk", s.config.Output) + }) + + t.Run("with custom config", func(t *testing.T) { + cfg := &Config{Output: "custom"} + s := New("/tmp", cfg) + assert.Equal(t, "custom", s.config.Output) + }) +} + +func TestSDK_GenerateLanguage_Bad(t *testing.T) { + + t.Run("unknown language", func(t *testing.T) { + + tmpDir := t.TempDir() + + specPath := filepath.Join(tmpDir, "openapi.yaml") + + err := os.WriteFile(specPath, []byte("openapi: 3.0.0"), 0644) + + require.NoError(t, err) + + s := New(tmpDir, nil) + + err = s.GenerateLanguage(context.Background(), "invalid-lang") + + assert.Error(t, err) + + assert.Contains(t, err.Error(), "unknown language") + + }) + +}