refactor: move cmd/ packages to their respective repos
- cmd/vm/ → go-container/cmd/vm/ - cmd/prod/ → go-infra/cmd/prod/ - cmd/monitor/ → go-infra/cmd/monitor/ - cmd/qa/ → lint/cmd/qa/ - cmd/deploy/cmd_ansible.go → go-ansible/cmd/ansible/ Each repo now owns its CLI commands alongside its library code. Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
0737b83196
commit
f293e71a04
20 changed files with 0 additions and 4467 deletions
|
|
@ -1,311 +0,0 @@
|
|||
package deploy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-ansible"
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
ansibleInventory string
|
||||
ansibleLimit string
|
||||
ansibleTags string
|
||||
ansibleSkipTags string
|
||||
ansibleVars []string
|
||||
ansibleVerbose int
|
||||
ansibleCheck bool
|
||||
)
|
||||
|
||||
var ansibleCmd = &cli.Command{
|
||||
Use: "ansible <playbook>",
|
||||
Short: "Run Ansible playbooks natively (no Python required)",
|
||||
Long: `Execute Ansible playbooks using a pure Go implementation.
|
||||
|
||||
This command parses Ansible YAML playbooks and executes them natively,
|
||||
without requiring Python or ansible-playbook to be installed.
|
||||
|
||||
Supported modules:
|
||||
- shell, command, raw, script
|
||||
- copy, template, file, lineinfile, stat, slurp, fetch, get_url
|
||||
- apt, apt_key, apt_repository, package, pip
|
||||
- service, systemd
|
||||
- user, group
|
||||
- uri, wait_for, git, unarchive
|
||||
- debug, fail, assert, set_fact, pause
|
||||
|
||||
Examples:
|
||||
core deploy ansible playbooks/coolify/create.yml -i inventory/
|
||||
core deploy ansible site.yml -l production
|
||||
core deploy ansible deploy.yml -e "version=1.2.3" -e "env=prod"`,
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: runAnsible,
|
||||
}
|
||||
|
||||
var ansibleTestCmd = &cli.Command{
|
||||
Use: "test <host>",
|
||||
Short: "Test SSH connectivity to a host",
|
||||
Long: `Test SSH connection and gather facts from a host.
|
||||
|
||||
Examples:
|
||||
core deploy ansible test linux.snider.dev -u claude -p claude
|
||||
core deploy ansible test server.example.com -i ~/.ssh/id_rsa`,
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: runAnsibleTest,
|
||||
}
|
||||
|
||||
var (
|
||||
testUser string
|
||||
testPassword string
|
||||
testKeyFile string
|
||||
testPort int
|
||||
)
|
||||
|
||||
func init() {
|
||||
// ansible command flags
|
||||
ansibleCmd.Flags().StringVarP(&ansibleInventory, "inventory", "i", "", "Inventory file or directory")
|
||||
ansibleCmd.Flags().StringVarP(&ansibleLimit, "limit", "l", "", "Limit to specific hosts")
|
||||
ansibleCmd.Flags().StringVarP(&ansibleTags, "tags", "t", "", "Only run plays and tasks tagged with these values")
|
||||
ansibleCmd.Flags().StringVar(&ansibleSkipTags, "skip-tags", "", "Skip plays and tasks tagged with these values")
|
||||
ansibleCmd.Flags().StringArrayVarP(&ansibleVars, "extra-vars", "e", nil, "Set additional variables (key=value)")
|
||||
ansibleCmd.Flags().CountVarP(&ansibleVerbose, "verbose", "v", "Increase verbosity")
|
||||
ansibleCmd.Flags().BoolVar(&ansibleCheck, "check", false, "Don't make any changes (dry run)")
|
||||
|
||||
// test command flags
|
||||
ansibleTestCmd.Flags().StringVarP(&testUser, "user", "u", "root", "SSH user")
|
||||
ansibleTestCmd.Flags().StringVarP(&testPassword, "password", "p", "", "SSH password")
|
||||
ansibleTestCmd.Flags().StringVarP(&testKeyFile, "key", "i", "", "SSH private key file")
|
||||
ansibleTestCmd.Flags().IntVar(&testPort, "port", 22, "SSH port")
|
||||
|
||||
// Add subcommands
|
||||
ansibleCmd.AddCommand(ansibleTestCmd)
|
||||
Cmd.AddCommand(ansibleCmd)
|
||||
}
|
||||
|
||||
func runAnsible(cmd *cli.Command, args []string) error {
|
||||
playbookPath := args[0]
|
||||
|
||||
// Resolve playbook path
|
||||
if !filepath.IsAbs(playbookPath) {
|
||||
cwd, _ := os.Getwd()
|
||||
playbookPath = filepath.Join(cwd, playbookPath)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(playbookPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("playbook not found: %s", playbookPath)
|
||||
}
|
||||
|
||||
// Create executor
|
||||
basePath := filepath.Dir(playbookPath)
|
||||
executor := ansible.NewExecutor(basePath)
|
||||
defer executor.Close()
|
||||
|
||||
// Set options
|
||||
executor.Limit = ansibleLimit
|
||||
executor.CheckMode = ansibleCheck
|
||||
executor.Verbose = ansibleVerbose
|
||||
|
||||
if ansibleTags != "" {
|
||||
executor.Tags = strings.Split(ansibleTags, ",")
|
||||
}
|
||||
if ansibleSkipTags != "" {
|
||||
executor.SkipTags = strings.Split(ansibleSkipTags, ",")
|
||||
}
|
||||
|
||||
// Parse extra vars
|
||||
for _, v := range ansibleVars {
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
executor.SetVar(parts[0], parts[1])
|
||||
}
|
||||
}
|
||||
|
||||
// Load inventory
|
||||
if ansibleInventory != "" {
|
||||
invPath := ansibleInventory
|
||||
if !filepath.IsAbs(invPath) {
|
||||
cwd, _ := os.Getwd()
|
||||
invPath = filepath.Join(cwd, invPath)
|
||||
}
|
||||
|
||||
// Check if it's a directory
|
||||
info, err := os.Stat(invPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("inventory not found: %s", invPath)
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
// Look for inventory.yml or hosts.yml
|
||||
for _, name := range []string{"inventory.yml", "hosts.yml", "inventory.yaml", "hosts.yaml"} {
|
||||
p := filepath.Join(invPath, name)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
invPath = p
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := executor.SetInventory(invPath); err != nil {
|
||||
return fmt.Errorf("load inventory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set up callbacks
|
||||
executor.OnPlayStart = func(play *ansible.Play) {
|
||||
fmt.Printf("\n%s %s\n", cli.TitleStyle.Render("PLAY"), cli.BoldStyle.Render("["+play.Name+"]"))
|
||||
fmt.Println(strings.Repeat("*", 70))
|
||||
}
|
||||
|
||||
executor.OnTaskStart = func(host string, task *ansible.Task) {
|
||||
taskName := task.Name
|
||||
if taskName == "" {
|
||||
taskName = task.Module
|
||||
}
|
||||
fmt.Printf("\n%s %s\n", cli.TitleStyle.Render("TASK"), cli.BoldStyle.Render("["+taskName+"]"))
|
||||
if ansibleVerbose > 0 {
|
||||
fmt.Printf("%s\n", cli.DimStyle.Render("host: "+host))
|
||||
}
|
||||
}
|
||||
|
||||
executor.OnTaskEnd = func(host string, task *ansible.Task, result *ansible.TaskResult) {
|
||||
status := "ok"
|
||||
style := cli.SuccessStyle
|
||||
|
||||
if result.Failed {
|
||||
status = "failed"
|
||||
style = cli.ErrorStyle
|
||||
} else if result.Skipped {
|
||||
status = "skipping"
|
||||
style = cli.DimStyle
|
||||
} else if result.Changed {
|
||||
status = "changed"
|
||||
style = cli.WarningStyle
|
||||
}
|
||||
|
||||
fmt.Printf("%s: [%s]", style.Render(status), host)
|
||||
if result.Msg != "" && ansibleVerbose > 0 {
|
||||
fmt.Printf(" => %s", result.Msg)
|
||||
}
|
||||
if result.Duration > 0 && ansibleVerbose > 1 {
|
||||
fmt.Printf(" (%s)", result.Duration.Round(time.Millisecond))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
if result.Failed && result.Stderr != "" {
|
||||
fmt.Printf("%s\n", cli.ErrorStyle.Render(result.Stderr))
|
||||
}
|
||||
|
||||
if ansibleVerbose > 1 {
|
||||
if result.Stdout != "" {
|
||||
fmt.Printf("stdout: %s\n", strings.TrimSpace(result.Stdout))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
executor.OnPlayEnd = func(play *ansible.Play) {
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Run playbook
|
||||
ctx := context.Background()
|
||||
start := time.Now()
|
||||
|
||||
fmt.Printf("%s Running playbook: %s\n", cli.BoldStyle.Render("▶"), playbookPath)
|
||||
|
||||
if err := executor.Run(ctx, playbookPath); err != nil {
|
||||
return fmt.Errorf("playbook failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s Playbook completed in %s\n",
|
||||
cli.SuccessStyle.Render("✓"),
|
||||
time.Since(start).Round(time.Millisecond))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runAnsibleTest(cmd *cli.Command, args []string) error {
|
||||
host := args[0]
|
||||
|
||||
fmt.Printf("Testing SSH connection to %s...\n", cli.BoldStyle.Render(host))
|
||||
|
||||
cfg := ansible.SSHConfig{
|
||||
Host: host,
|
||||
Port: testPort,
|
||||
User: testUser,
|
||||
Password: testPassword,
|
||||
KeyFile: testKeyFile,
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ansible.NewSSHClient(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create client: %w", err)
|
||||
}
|
||||
defer func() { _ = client.Close() }()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test connection
|
||||
start := time.Now()
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
return fmt.Errorf("connect failed: %w", err)
|
||||
}
|
||||
connectTime := time.Since(start)
|
||||
|
||||
fmt.Printf("%s Connected in %s\n", cli.SuccessStyle.Render("✓"), connectTime.Round(time.Millisecond))
|
||||
|
||||
// Gather facts
|
||||
fmt.Println("\nGathering facts...")
|
||||
|
||||
// Hostname
|
||||
stdout, _, _, _ := client.Run(ctx, "hostname -f 2>/dev/null || hostname")
|
||||
fmt.Printf(" Hostname: %s\n", cli.BoldStyle.Render(strings.TrimSpace(stdout)))
|
||||
|
||||
// OS
|
||||
stdout, _, _, _ = client.Run(ctx, "cat /etc/os-release 2>/dev/null | grep PRETTY_NAME | cut -d'\"' -f2")
|
||||
if stdout != "" {
|
||||
fmt.Printf(" OS: %s\n", strings.TrimSpace(stdout))
|
||||
}
|
||||
|
||||
// Kernel
|
||||
stdout, _, _, _ = client.Run(ctx, "uname -r")
|
||||
fmt.Printf(" Kernel: %s\n", strings.TrimSpace(stdout))
|
||||
|
||||
// Architecture
|
||||
stdout, _, _, _ = client.Run(ctx, "uname -m")
|
||||
fmt.Printf(" Architecture: %s\n", strings.TrimSpace(stdout))
|
||||
|
||||
// Memory
|
||||
stdout, _, _, _ = client.Run(ctx, "free -h | grep Mem | awk '{print $2}'")
|
||||
fmt.Printf(" Memory: %s\n", strings.TrimSpace(stdout))
|
||||
|
||||
// Disk
|
||||
stdout, _, _, _ = client.Run(ctx, "df -h / | tail -1 | awk '{print $2 \" total, \" $4 \" available\"}'")
|
||||
fmt.Printf(" Disk: %s\n", strings.TrimSpace(stdout))
|
||||
|
||||
// Docker
|
||||
stdout, _, _, err = client.Run(ctx, "docker --version 2>/dev/null")
|
||||
if err == nil {
|
||||
fmt.Printf(" Docker: %s\n", cli.SuccessStyle.Render(strings.TrimSpace(stdout)))
|
||||
} else {
|
||||
fmt.Printf(" Docker: %s\n", cli.DimStyle.Render("not installed"))
|
||||
}
|
||||
|
||||
// Check if Coolify is running
|
||||
stdout, _, _, _ = client.Run(ctx, "docker ps 2>/dev/null | grep -q coolify && echo 'running' || echo 'not running'")
|
||||
if strings.TrimSpace(stdout) == "running" {
|
||||
fmt.Printf(" Coolify: %s\n", cli.SuccessStyle.Render("running"))
|
||||
} else {
|
||||
fmt.Printf(" Coolify: %s\n", cli.DimStyle.Render("not installed"))
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s SSH test passed\n", cli.SuccessStyle.Render("✓"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
// Package monitor provides security monitoring commands.
|
||||
//
|
||||
// Commands:
|
||||
// - monitor: Aggregate security findings from GitHub Security Tab, workflow artifacts, and PR comments
|
||||
//
|
||||
// Data sources (all free tier):
|
||||
// - Code scanning: Semgrep, Trivy, Gitleaks, OSV-Scanner, Checkov, CodeQL
|
||||
// - Dependabot: Dependency vulnerability alerts
|
||||
// - Secret scanning: Exposed secrets/credentials
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddMonitorCommands)
|
||||
}
|
||||
|
||||
// Style aliases from shared package
|
||||
var (
|
||||
successStyle = cli.SuccessStyle
|
||||
errorStyle = cli.ErrorStyle
|
||||
warningStyle = cli.WarningStyle
|
||||
dimStyle = cli.DimStyle
|
||||
)
|
||||
|
||||
// AddMonitorCommands registers the 'monitor' command.
|
||||
func AddMonitorCommands(root *cli.Command) {
|
||||
monitorCmd := &cli.Command{
|
||||
Use: "monitor",
|
||||
Short: i18n.T("cmd.monitor.short"),
|
||||
Long: i18n.T("cmd.monitor.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runMonitor()
|
||||
},
|
||||
}
|
||||
|
||||
// Flags
|
||||
monitorCmd.Flags().StringVarP(&monitorRepo, "repo", "r", "", i18n.T("cmd.monitor.flag.repo"))
|
||||
monitorCmd.Flags().StringSliceVarP(&monitorSeverity, "severity", "s", []string{}, i18n.T("cmd.monitor.flag.severity"))
|
||||
monitorCmd.Flags().BoolVar(&monitorJSON, "json", false, i18n.T("cmd.monitor.flag.json"))
|
||||
monitorCmd.Flags().BoolVar(&monitorAll, "all", false, i18n.T("cmd.monitor.flag.all"))
|
||||
|
||||
root.AddCommand(monitorCmd)
|
||||
}
|
||||
|
|
@ -1,586 +0,0 @@
|
|||
// cmd_monitor.go implements the 'monitor' command for aggregating security findings.
|
||||
//
|
||||
// Usage:
|
||||
// core monitor # Monitor current repo
|
||||
// core monitor --repo X # Monitor specific repo
|
||||
// core monitor --all # Monitor all repos in registry
|
||||
// core monitor --severity high # Filter by severity
|
||||
// core monitor --json # Output as JSON
|
||||
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-scm/repos"
|
||||
)
|
||||
|
||||
// Command flags
|
||||
var (
|
||||
monitorRepo string
|
||||
monitorSeverity []string
|
||||
monitorJSON bool
|
||||
monitorAll bool
|
||||
)
|
||||
|
||||
// Finding represents a security finding from any source
|
||||
type Finding struct {
|
||||
Source string `json:"source"` // semgrep, trivy, dependabot, secret-scanning, etc.
|
||||
Severity string `json:"severity"` // critical, high, medium, low
|
||||
Rule string `json:"rule"` // Rule ID or CVE
|
||||
File string `json:"file"` // Affected file path
|
||||
Line int `json:"line"` // Line number (0 if N/A)
|
||||
Message string `json:"message"` // Description
|
||||
URL string `json:"url"` // Link to finding
|
||||
State string `json:"state"` // open, dismissed, fixed
|
||||
RepoName string `json:"repo"` // Repository name
|
||||
CreatedAt string `json:"created_at"` // When found
|
||||
Labels []string `json:"suggested_labels,omitempty"`
|
||||
}
|
||||
|
||||
// CodeScanningAlert represents a GitHub code scanning alert
|
||||
type CodeScanningAlert struct {
|
||||
Number int `json:"number"`
|
||||
State string `json:"state"` // open, dismissed, fixed
|
||||
Rule struct {
|
||||
ID string `json:"id"`
|
||||
Severity string `json:"severity"`
|
||||
Description string `json:"description"`
|
||||
} `json:"rule"`
|
||||
Tool struct {
|
||||
Name string `json:"name"`
|
||||
} `json:"tool"`
|
||||
MostRecentInstance struct {
|
||||
Location struct {
|
||||
Path string `json:"path"`
|
||||
StartLine int `json:"start_line"`
|
||||
} `json:"location"`
|
||||
Message struct {
|
||||
Text string `json:"text"`
|
||||
} `json:"message"`
|
||||
} `json:"most_recent_instance"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
|
||||
// DependabotAlert represents a GitHub Dependabot alert
|
||||
type DependabotAlert struct {
|
||||
Number int `json:"number"`
|
||||
State string `json:"state"` // open, dismissed, fixed
|
||||
SecurityVulnerability struct {
|
||||
Severity string `json:"severity"`
|
||||
Package struct {
|
||||
Name string `json:"name"`
|
||||
Ecosystem string `json:"ecosystem"`
|
||||
} `json:"package"`
|
||||
} `json:"security_vulnerability"`
|
||||
SecurityAdvisory struct {
|
||||
CVEID string `json:"cve_id"`
|
||||
Summary string `json:"summary"`
|
||||
Description string `json:"description"`
|
||||
} `json:"security_advisory"`
|
||||
Dependency struct {
|
||||
ManifestPath string `json:"manifest_path"`
|
||||
} `json:"dependency"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
|
||||
// SecretScanningAlert represents a GitHub secret scanning alert
|
||||
type SecretScanningAlert struct {
|
||||
Number int `json:"number"`
|
||||
State string `json:"state"` // open, resolved
|
||||
SecretType string `json:"secret_type"`
|
||||
Secret string `json:"secret"` // Partial, redacted
|
||||
HTMLURL string `json:"html_url"`
|
||||
LocationType string `json:"location_type"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
|
||||
func runMonitor() error {
|
||||
// Check gh is available
|
||||
if _, err := exec.LookPath("gh"); err != nil {
|
||||
return log.E("monitor", i18n.T("error.gh_not_found"), err)
|
||||
}
|
||||
|
||||
// Determine repos to scan
|
||||
repoList, err := resolveRepos()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(repoList) == 0 {
|
||||
return log.E("monitor", i18n.T("cmd.monitor.error.no_repos"), nil)
|
||||
}
|
||||
|
||||
// Collect all findings and errors
|
||||
var allFindings []Finding
|
||||
var fetchErrors []string
|
||||
for _, repo := range repoList {
|
||||
if !monitorJSON {
|
||||
cli.Print("\033[2K\r%s %s...", dimStyle.Render(i18n.T("cmd.monitor.scanning")), repo)
|
||||
}
|
||||
|
||||
findings, errs := fetchRepoFindings(repo)
|
||||
allFindings = append(allFindings, findings...)
|
||||
fetchErrors = append(fetchErrors, errs...)
|
||||
}
|
||||
|
||||
// Filter by severity if specified
|
||||
if len(monitorSeverity) > 0 {
|
||||
allFindings = filterBySeverity(allFindings, monitorSeverity)
|
||||
}
|
||||
|
||||
// Sort by severity (critical first)
|
||||
sortBySeverity(allFindings)
|
||||
|
||||
// Output
|
||||
if monitorJSON {
|
||||
return outputJSON(allFindings)
|
||||
}
|
||||
|
||||
cli.Print("\033[2K\r") // Clear scanning line
|
||||
|
||||
// Show any fetch errors as warnings
|
||||
if len(fetchErrors) > 0 {
|
||||
for _, e := range fetchErrors {
|
||||
cli.Print("%s %s\n", warningStyle.Render("!"), e)
|
||||
}
|
||||
cli.Blank()
|
||||
}
|
||||
|
||||
return outputTable(allFindings)
|
||||
}
|
||||
|
||||
// resolveRepos determines which repos to scan
|
||||
func resolveRepos() ([]string, error) {
|
||||
if monitorRepo != "" {
|
||||
// Specific repo - if fully qualified (org/repo), use as-is
|
||||
if strings.Contains(monitorRepo, "/") {
|
||||
return []string{monitorRepo}, nil
|
||||
}
|
||||
// Otherwise, try to detect org from git remote, fallback to host-uk
|
||||
// Note: Users outside host-uk org should use fully qualified names
|
||||
org := detectOrgFromGit()
|
||||
if org == "" {
|
||||
org = "host-uk"
|
||||
}
|
||||
return []string{org + "/" + monitorRepo}, nil
|
||||
}
|
||||
|
||||
if monitorAll {
|
||||
// All repos from registry
|
||||
registry, err := repos.FindRegistry(io.Local)
|
||||
if err != nil {
|
||||
return nil, log.E("monitor", "failed to find registry", err)
|
||||
}
|
||||
|
||||
loaded, err := repos.LoadRegistry(io.Local, registry)
|
||||
if err != nil {
|
||||
return nil, log.E("monitor", "failed to load registry", err)
|
||||
}
|
||||
|
||||
var repoList []string
|
||||
for _, r := range loaded.Repos {
|
||||
repoList = append(repoList, loaded.Org+"/"+r.Name)
|
||||
}
|
||||
return repoList, nil
|
||||
}
|
||||
|
||||
// Default to current repo
|
||||
repo, err := detectRepoFromGit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []string{repo}, nil
|
||||
}
|
||||
|
||||
// fetchRepoFindings fetches all security findings for a repo
|
||||
// Returns findings and any errors encountered (errors don't stop other fetches)
|
||||
func fetchRepoFindings(repoFullName string) ([]Finding, []string) {
|
||||
var findings []Finding
|
||||
var errs []string
|
||||
repoName := strings.Split(repoFullName, "/")[1]
|
||||
|
||||
// Fetch code scanning alerts
|
||||
codeFindings, err := fetchCodeScanningAlerts(repoFullName)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("%s: code-scanning: %s", repoName, err))
|
||||
}
|
||||
findings = append(findings, codeFindings...)
|
||||
|
||||
// Fetch Dependabot alerts
|
||||
depFindings, err := fetchDependabotAlerts(repoFullName)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("%s: dependabot: %s", repoName, err))
|
||||
}
|
||||
findings = append(findings, depFindings...)
|
||||
|
||||
// Fetch secret scanning alerts
|
||||
secretFindings, err := fetchSecretScanningAlerts(repoFullName)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("%s: secret-scanning: %s", repoName, err))
|
||||
}
|
||||
findings = append(findings, secretFindings...)
|
||||
|
||||
return findings, errs
|
||||
}
|
||||
|
||||
// fetchCodeScanningAlerts fetches code scanning alerts
|
||||
func fetchCodeScanningAlerts(repoFullName string) ([]Finding, error) {
|
||||
args := []string{
|
||||
"api",
|
||||
fmt.Sprintf("repos/%s/code-scanning/alerts", repoFullName),
|
||||
}
|
||||
|
||||
cmd := exec.Command("gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// Check for expected "not enabled" responses vs actual errors
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
stderr := string(exitErr.Stderr)
|
||||
// These are expected conditions, not errors
|
||||
if strings.Contains(stderr, "Advanced Security must be enabled") ||
|
||||
strings.Contains(stderr, "no analysis found") ||
|
||||
strings.Contains(stderr, "Not Found") {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return nil, log.E("monitor.fetchCodeScanning", "API request failed", err)
|
||||
}
|
||||
|
||||
var alerts []CodeScanningAlert
|
||||
if err := json.Unmarshal(output, &alerts); err != nil {
|
||||
return nil, log.E("monitor.fetchCodeScanning", "failed to parse response", err)
|
||||
}
|
||||
|
||||
repoName := strings.Split(repoFullName, "/")[1]
|
||||
var findings []Finding
|
||||
for _, alert := range alerts {
|
||||
if alert.State != "open" {
|
||||
continue
|
||||
}
|
||||
f := Finding{
|
||||
Source: alert.Tool.Name,
|
||||
Severity: normalizeSeverity(alert.Rule.Severity),
|
||||
Rule: alert.Rule.ID,
|
||||
File: alert.MostRecentInstance.Location.Path,
|
||||
Line: alert.MostRecentInstance.Location.StartLine,
|
||||
Message: alert.MostRecentInstance.Message.Text,
|
||||
URL: alert.HTMLURL,
|
||||
State: alert.State,
|
||||
RepoName: repoName,
|
||||
CreatedAt: alert.CreatedAt,
|
||||
Labels: []string{"type:security"},
|
||||
}
|
||||
if f.Message == "" {
|
||||
f.Message = alert.Rule.Description
|
||||
}
|
||||
findings = append(findings, f)
|
||||
}
|
||||
|
||||
return findings, nil
|
||||
}
|
||||
|
||||
// fetchDependabotAlerts fetches Dependabot alerts
|
||||
func fetchDependabotAlerts(repoFullName string) ([]Finding, error) {
|
||||
args := []string{
|
||||
"api",
|
||||
fmt.Sprintf("repos/%s/dependabot/alerts", repoFullName),
|
||||
}
|
||||
|
||||
cmd := exec.Command("gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
stderr := string(exitErr.Stderr)
|
||||
// Dependabot not enabled is expected
|
||||
if strings.Contains(stderr, "Dependabot alerts are not enabled") ||
|
||||
strings.Contains(stderr, "Not Found") {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return nil, log.E("monitor.fetchDependabot", "API request failed", err)
|
||||
}
|
||||
|
||||
var alerts []DependabotAlert
|
||||
if err := json.Unmarshal(output, &alerts); err != nil {
|
||||
return nil, log.E("monitor.fetchDependabot", "failed to parse response", err)
|
||||
}
|
||||
|
||||
repoName := strings.Split(repoFullName, "/")[1]
|
||||
var findings []Finding
|
||||
for _, alert := range alerts {
|
||||
if alert.State != "open" {
|
||||
continue
|
||||
}
|
||||
f := Finding{
|
||||
Source: "dependabot",
|
||||
Severity: normalizeSeverity(alert.SecurityVulnerability.Severity),
|
||||
Rule: alert.SecurityAdvisory.CVEID,
|
||||
File: alert.Dependency.ManifestPath,
|
||||
Line: 0,
|
||||
Message: fmt.Sprintf("%s: %s", alert.SecurityVulnerability.Package.Name, alert.SecurityAdvisory.Summary),
|
||||
URL: alert.HTMLURL,
|
||||
State: alert.State,
|
||||
RepoName: repoName,
|
||||
CreatedAt: alert.CreatedAt,
|
||||
Labels: []string{"type:security", "dependencies"},
|
||||
}
|
||||
findings = append(findings, f)
|
||||
}
|
||||
|
||||
return findings, nil
|
||||
}
|
||||
|
||||
// fetchSecretScanningAlerts fetches secret scanning alerts
|
||||
func fetchSecretScanningAlerts(repoFullName string) ([]Finding, error) {
|
||||
args := []string{
|
||||
"api",
|
||||
fmt.Sprintf("repos/%s/secret-scanning/alerts", repoFullName),
|
||||
}
|
||||
|
||||
cmd := exec.Command("gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
stderr := string(exitErr.Stderr)
|
||||
// Secret scanning not enabled is expected
|
||||
if strings.Contains(stderr, "Secret scanning is disabled") ||
|
||||
strings.Contains(stderr, "Not Found") {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return nil, log.E("monitor.fetchSecretScanning", "API request failed", err)
|
||||
}
|
||||
|
||||
var alerts []SecretScanningAlert
|
||||
if err := json.Unmarshal(output, &alerts); err != nil {
|
||||
return nil, log.E("monitor.fetchSecretScanning", "failed to parse response", err)
|
||||
}
|
||||
|
||||
repoName := strings.Split(repoFullName, "/")[1]
|
||||
var findings []Finding
|
||||
for _, alert := range alerts {
|
||||
if alert.State != "open" {
|
||||
continue
|
||||
}
|
||||
f := Finding{
|
||||
Source: "secret-scanning",
|
||||
Severity: "critical", // Secrets are always critical
|
||||
Rule: alert.SecretType,
|
||||
File: alert.LocationType,
|
||||
Line: 0,
|
||||
Message: fmt.Sprintf("Exposed %s detected", alert.SecretType),
|
||||
URL: alert.HTMLURL,
|
||||
State: alert.State,
|
||||
RepoName: repoName,
|
||||
CreatedAt: alert.CreatedAt,
|
||||
Labels: []string{"type:security", "secrets"},
|
||||
}
|
||||
findings = append(findings, f)
|
||||
}
|
||||
|
||||
return findings, nil
|
||||
}
|
||||
|
||||
// normalizeSeverity normalizes severity strings to standard values
|
||||
func normalizeSeverity(s string) string {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "critical", "crit":
|
||||
return "critical"
|
||||
case "high", "error":
|
||||
return "high"
|
||||
case "medium", "moderate", "warning":
|
||||
return "medium"
|
||||
case "low", "info", "note":
|
||||
return "low"
|
||||
default:
|
||||
return "medium"
|
||||
}
|
||||
}
|
||||
|
||||
// filterBySeverity filters findings by severity
|
||||
func filterBySeverity(findings []Finding, severities []string) []Finding {
|
||||
sevSet := make(map[string]bool)
|
||||
for _, s := range severities {
|
||||
sevSet[strings.ToLower(s)] = true
|
||||
}
|
||||
|
||||
var filtered []Finding
|
||||
for _, f := range findings {
|
||||
if sevSet[f.Severity] {
|
||||
filtered = append(filtered, f)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// sortBySeverity sorts findings by severity (critical first)
|
||||
func sortBySeverity(findings []Finding) {
|
||||
severityOrder := map[string]int{
|
||||
"critical": 0,
|
||||
"high": 1,
|
||||
"medium": 2,
|
||||
"low": 3,
|
||||
}
|
||||
|
||||
slices.SortFunc(findings, func(a, b Finding) int {
|
||||
return cmp.Or(
|
||||
cmp.Compare(severityOrder[a.Severity], severityOrder[b.Severity]),
|
||||
cmp.Compare(a.RepoName, b.RepoName),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// outputJSON outputs findings as JSON
|
||||
func outputJSON(findings []Finding) error {
|
||||
data, err := json.MarshalIndent(findings, "", " ")
|
||||
if err != nil {
|
||||
return log.E("monitor", "failed to marshal findings", err)
|
||||
}
|
||||
cli.Print("%s\n", string(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
// outputTable outputs findings as a formatted table
|
||||
func outputTable(findings []Finding) error {
|
||||
if len(findings) == 0 {
|
||||
cli.Print("%s\n", successStyle.Render(i18n.T("cmd.monitor.no_findings")))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Count by severity
|
||||
counts := make(map[string]int)
|
||||
for _, f := range findings {
|
||||
counts[f.Severity]++
|
||||
}
|
||||
|
||||
// Header summary
|
||||
var parts []string
|
||||
if counts["critical"] > 0 {
|
||||
parts = append(parts, errorStyle.Render(fmt.Sprintf("%d critical", counts["critical"])))
|
||||
}
|
||||
if counts["high"] > 0 {
|
||||
parts = append(parts, errorStyle.Render(fmt.Sprintf("%d high", counts["high"])))
|
||||
}
|
||||
if counts["medium"] > 0 {
|
||||
parts = append(parts, warningStyle.Render(fmt.Sprintf("%d medium", counts["medium"])))
|
||||
}
|
||||
if counts["low"] > 0 {
|
||||
parts = append(parts, dimStyle.Render(fmt.Sprintf("%d low", counts["low"])))
|
||||
}
|
||||
cli.Print("%s: %s\n", i18n.T("cmd.monitor.found"), strings.Join(parts, ", "))
|
||||
cli.Blank()
|
||||
|
||||
// Group by repo
|
||||
byRepo := make(map[string][]Finding)
|
||||
for _, f := range findings {
|
||||
byRepo[f.RepoName] = append(byRepo[f.RepoName], f)
|
||||
}
|
||||
|
||||
// Sort repos for consistent output
|
||||
repoNames := slices.Sorted(maps.Keys(byRepo))
|
||||
|
||||
// Print by repo
|
||||
for _, repo := range repoNames {
|
||||
repoFindings := byRepo[repo]
|
||||
cli.Print("%s\n", cli.BoldStyle.Render(repo))
|
||||
for _, f := range repoFindings {
|
||||
sevStyle := dimStyle
|
||||
switch f.Severity {
|
||||
case "critical", "high":
|
||||
sevStyle = errorStyle
|
||||
case "medium":
|
||||
sevStyle = warningStyle
|
||||
}
|
||||
|
||||
// Format: [severity] source: message (file:line)
|
||||
location := ""
|
||||
if f.File != "" {
|
||||
location = f.File
|
||||
if f.Line > 0 {
|
||||
location = fmt.Sprintf("%s:%d", f.File, f.Line)
|
||||
}
|
||||
}
|
||||
|
||||
cli.Print(" %s %s: %s",
|
||||
sevStyle.Render(fmt.Sprintf("[%s]", f.Severity)),
|
||||
dimStyle.Render(f.Source),
|
||||
truncate(f.Message, 60))
|
||||
if location != "" {
|
||||
cli.Print(" %s", dimStyle.Render("("+location+")"))
|
||||
}
|
||||
cli.Blank()
|
||||
}
|
||||
cli.Blank()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// truncate truncates a string to max runes (Unicode-safe)
|
||||
func truncate(s string, max int) string {
|
||||
runes := []rune(s)
|
||||
if len(runes) <= max {
|
||||
return s
|
||||
}
|
||||
return string(runes[:max-3]) + "..."
|
||||
}
|
||||
|
||||
// detectRepoFromGit detects the repo from git remote
|
||||
func detectRepoFromGit() (string, error) {
|
||||
cmd := exec.Command("git", "remote", "get-url", "origin")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", log.E("monitor", i18n.T("cmd.monitor.error.not_git_repo"), err)
|
||||
}
|
||||
|
||||
url := strings.TrimSpace(string(output))
|
||||
return parseGitHubRepo(url)
|
||||
}
|
||||
|
||||
// detectOrgFromGit tries to detect the org from git remote
|
||||
func detectOrgFromGit() string {
|
||||
repo, err := detectRepoFromGit()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
parts := strings.Split(repo, "/")
|
||||
if len(parts) >= 1 {
|
||||
return parts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// parseGitHubRepo extracts org/repo from a git URL
|
||||
func parseGitHubRepo(url string) (string, error) {
|
||||
// Handle SSH URLs: git@github.com:org/repo.git
|
||||
if strings.HasPrefix(url, "git@github.com:") {
|
||||
path := strings.TrimPrefix(url, "git@github.com:")
|
||||
path = strings.TrimSuffix(path, ".git")
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// Handle HTTPS URLs: https://github.com/org/repo.git
|
||||
if strings.Contains(url, "github.com/") {
|
||||
parts := strings.Split(url, "github.com/")
|
||||
if len(parts) >= 2 {
|
||||
path := strings.TrimSuffix(parts[1], ".git")
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("could not parse GitHub repo from URL: %s", url)
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddProdCommands)
|
||||
}
|
||||
|
||||
// AddProdCommands registers the 'prod' command and all subcommands.
|
||||
func AddProdCommands(root *cli.Command) {
|
||||
root.AddCommand(Cmd)
|
||||
}
|
||||
|
|
@ -1,129 +0,0 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-infra"
|
||||
)
|
||||
|
||||
var dnsCmd = &cli.Command{
|
||||
Use: "dns",
|
||||
Short: "Manage DNS records via CloudNS",
|
||||
Long: `View and manage DNS records for host.uk.com via CloudNS API.
|
||||
|
||||
Requires:
|
||||
CLOUDNS_AUTH_ID CloudNS auth ID
|
||||
CLOUDNS_AUTH_PASSWORD CloudNS auth password`,
|
||||
}
|
||||
|
||||
var dnsListCmd = &cli.Command{
|
||||
Use: "list [zone]",
|
||||
Short: "List DNS records",
|
||||
Args: cli.MaximumNArgs(1),
|
||||
RunE: runDNSList,
|
||||
}
|
||||
|
||||
var dnsSetCmd = &cli.Command{
|
||||
Use: "set <host> <type> <value>",
|
||||
Short: "Create or update a DNS record",
|
||||
Long: `Create or update a DNS record. Example:
|
||||
core prod dns set hermes.lb A 1.2.3.4
|
||||
core prod dns set "*.host.uk.com" CNAME hermes.lb.host.uk.com`,
|
||||
Args: cli.ExactArgs(3),
|
||||
RunE: runDNSSet,
|
||||
}
|
||||
|
||||
var (
|
||||
dnsZone string
|
||||
dnsTTL int
|
||||
)
|
||||
|
||||
func init() {
|
||||
dnsCmd.PersistentFlags().StringVar(&dnsZone, "zone", "host.uk.com", "DNS zone")
|
||||
|
||||
dnsSetCmd.Flags().IntVar(&dnsTTL, "ttl", 300, "Record TTL in seconds")
|
||||
|
||||
dnsCmd.AddCommand(dnsListCmd)
|
||||
dnsCmd.AddCommand(dnsSetCmd)
|
||||
}
|
||||
|
||||
func getDNSClient() (*infra.CloudNSClient, error) {
|
||||
authID := os.Getenv("CLOUDNS_AUTH_ID")
|
||||
authPass := os.Getenv("CLOUDNS_AUTH_PASSWORD")
|
||||
if authID == "" || authPass == "" {
|
||||
return nil, errors.New("CLOUDNS_AUTH_ID and CLOUDNS_AUTH_PASSWORD required")
|
||||
}
|
||||
return infra.NewCloudNSClient(authID, authPass), nil
|
||||
}
|
||||
|
||||
func runDNSList(cmd *cli.Command, args []string) error {
|
||||
dns, err := getDNSClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zone := dnsZone
|
||||
if len(args) > 0 {
|
||||
zone = args[0]
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
records, err := dns.ListRecords(ctx, zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list records: %w", err)
|
||||
}
|
||||
|
||||
cli.Print("%s DNS records for %s\n\n", cli.BoldStyle.Render("▶"), cli.TitleStyle.Render(zone))
|
||||
|
||||
if len(records) == 0 {
|
||||
cli.Print(" No records found\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
for id, r := range records {
|
||||
cli.Print(" %s %-6s %-30s %s TTL:%s\n",
|
||||
cli.DimStyle.Render(id),
|
||||
cli.BoldStyle.Render(r.Type),
|
||||
r.Host,
|
||||
r.Record,
|
||||
r.TTL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runDNSSet(cmd *cli.Command, args []string) error {
|
||||
dns, err := getDNSClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := args[0]
|
||||
recordType := args[1]
|
||||
value := args[2]
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
changed, err := dns.EnsureRecord(ctx, dnsZone, host, recordType, value, dnsTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set record: %w", err)
|
||||
}
|
||||
|
||||
if changed {
|
||||
cli.Print("%s %s %s %s -> %s\n",
|
||||
cli.SuccessStyle.Render("✓"),
|
||||
recordType, host, dnsZone, value)
|
||||
} else {
|
||||
cli.Print("%s Record already correct\n", cli.DimStyle.Render("·"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,113 +0,0 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-infra"
|
||||
)
|
||||
|
||||
var lbCmd = &cli.Command{
|
||||
Use: "lb",
|
||||
Short: "Manage Hetzner load balancer",
|
||||
Long: `View and manage the Hetzner Cloud managed load balancer.
|
||||
|
||||
Requires: HCLOUD_TOKEN`,
|
||||
}
|
||||
|
||||
var lbStatusCmd = &cli.Command{
|
||||
Use: "status",
|
||||
Short: "Show load balancer status and target health",
|
||||
RunE: runLBStatus,
|
||||
}
|
||||
|
||||
var lbCreateCmd = &cli.Command{
|
||||
Use: "create",
|
||||
Short: "Create load balancer from infra.yaml",
|
||||
RunE: runLBCreate,
|
||||
}
|
||||
|
||||
func init() {
|
||||
lbCmd.AddCommand(lbStatusCmd)
|
||||
lbCmd.AddCommand(lbCreateCmd)
|
||||
}
|
||||
|
||||
func getHCloudClient() (*infra.HCloudClient, error) {
|
||||
token := os.Getenv("HCLOUD_TOKEN")
|
||||
if token == "" {
|
||||
return nil, errors.New("HCLOUD_TOKEN environment variable required")
|
||||
}
|
||||
return infra.NewHCloudClient(token), nil
|
||||
}
|
||||
|
||||
func runLBStatus(cmd *cli.Command, args []string) error {
|
||||
hc, err := getHCloudClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
lbs, err := hc.ListLoadBalancers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list load balancers: %w", err)
|
||||
}
|
||||
|
||||
if len(lbs) == 0 {
|
||||
cli.Print("No load balancers found\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, lb := range lbs {
|
||||
cli.Print("%s %s\n", cli.BoldStyle.Render("▶"), cli.TitleStyle.Render(lb.Name))
|
||||
cli.Print(" ID: %d\n", lb.ID)
|
||||
cli.Print(" IP: %s\n", lb.PublicNet.IPv4.IP)
|
||||
cli.Print(" Algorithm: %s\n", lb.Algorithm.Type)
|
||||
cli.Print(" Location: %s\n", lb.Location.Name)
|
||||
|
||||
if len(lb.Services) > 0 {
|
||||
cli.Print("\n Services:\n")
|
||||
for _, s := range lb.Services {
|
||||
cli.Print(" %s :%d -> :%d proxy_protocol=%v\n",
|
||||
s.Protocol, s.ListenPort, s.DestinationPort, s.Proxyprotocol)
|
||||
}
|
||||
}
|
||||
|
||||
if len(lb.Targets) > 0 {
|
||||
cli.Print("\n Targets:\n")
|
||||
for _, t := range lb.Targets {
|
||||
ip := ""
|
||||
if t.IP != nil {
|
||||
ip = t.IP.IP
|
||||
}
|
||||
for _, hs := range t.HealthStatus {
|
||||
icon := cli.SuccessStyle.Render("●")
|
||||
if hs.Status != "healthy" {
|
||||
icon = cli.ErrorStyle.Render("○")
|
||||
}
|
||||
cli.Print(" %s %s :%d %s\n", icon, ip, hs.ListenPort, hs.Status)
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runLBCreate(cmd *cli.Command, args []string) error {
|
||||
cfg, _, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
return stepLoadBalancer(ctx, cfg)
|
||||
}
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
infraFile string
|
||||
)
|
||||
|
||||
// Cmd is the root prod command.
|
||||
var Cmd = &cli.Command{
|
||||
Use: "prod",
|
||||
Short: "Production infrastructure management",
|
||||
Long: `Manage the Host UK production infrastructure.
|
||||
|
||||
Commands:
|
||||
status Show infrastructure health and connectivity
|
||||
setup Phase 1: discover topology, create LB, configure DNS
|
||||
dns Manage DNS records via CloudNS
|
||||
lb Manage Hetzner load balancer
|
||||
ssh SSH into a production host
|
||||
|
||||
Configuration is read from infra.yaml in the project root.`,
|
||||
}
|
||||
|
||||
func init() {
|
||||
Cmd.PersistentFlags().StringVar(&infraFile, "config", "", "Path to infra.yaml (auto-discovered if not set)")
|
||||
|
||||
Cmd.AddCommand(statusCmd)
|
||||
Cmd.AddCommand(setupCmd)
|
||||
Cmd.AddCommand(dnsCmd)
|
||||
Cmd.AddCommand(lbCmd)
|
||||
Cmd.AddCommand(sshCmd)
|
||||
}
|
||||
|
|
@ -1,284 +0,0 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-infra"
|
||||
)
|
||||
|
||||
var setupCmd = &cli.Command{
|
||||
Use: "setup",
|
||||
Short: "Phase 1: discover topology, create LB, configure DNS",
|
||||
Long: `Run the Phase 1 foundation setup:
|
||||
|
||||
1. Discover Hetzner topology (Cloud + Robot servers)
|
||||
2. Create Hetzner managed load balancer
|
||||
3. Configure DNS records via CloudNS
|
||||
4. Verify connectivity to all hosts
|
||||
|
||||
Required environment variables:
|
||||
HCLOUD_TOKEN Hetzner Cloud API token
|
||||
HETZNER_ROBOT_USER Hetzner Robot username
|
||||
HETZNER_ROBOT_PASS Hetzner Robot password
|
||||
CLOUDNS_AUTH_ID CloudNS auth ID
|
||||
CLOUDNS_AUTH_PASSWORD CloudNS auth password`,
|
||||
RunE: runSetup,
|
||||
}
|
||||
|
||||
var (
|
||||
setupDryRun bool
|
||||
setupStep string
|
||||
)
|
||||
|
||||
func init() {
|
||||
setupCmd.Flags().BoolVar(&setupDryRun, "dry-run", false, "Show what would be done without making changes")
|
||||
setupCmd.Flags().StringVar(&setupStep, "step", "", "Run a specific step only (discover, lb, dns)")
|
||||
}
|
||||
|
||||
func runSetup(cmd *cli.Command, args []string) error {
|
||||
cfg, cfgPath, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli.Print("%s Production setup from %s\n\n",
|
||||
cli.BoldStyle.Render("▶"),
|
||||
cli.DimStyle.Render(cfgPath))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
steps := []struct {
|
||||
name string
|
||||
fn func(context.Context, *infra.Config) error
|
||||
}{
|
||||
{"discover", stepDiscover},
|
||||
{"lb", stepLoadBalancer},
|
||||
{"dns", stepDNS},
|
||||
}
|
||||
|
||||
for _, step := range steps {
|
||||
if setupStep != "" && setupStep != step.name {
|
||||
continue
|
||||
}
|
||||
|
||||
cli.Print("\n%s Step: %s\n", cli.BoldStyle.Render("━━"), cli.TitleStyle.Render(step.name))
|
||||
|
||||
if err := step.fn(ctx, cfg); err != nil {
|
||||
cli.Print(" %s %s: %s\n", cli.ErrorStyle.Render("✗"), step.name, err)
|
||||
return fmt.Errorf("step %s failed: %w", step.name, err)
|
||||
}
|
||||
|
||||
cli.Print(" %s %s complete\n", cli.SuccessStyle.Render("✓"), step.name)
|
||||
}
|
||||
|
||||
cli.Print("\n%s Setup complete\n", cli.SuccessStyle.Render("✓"))
|
||||
return nil
|
||||
}
|
||||
|
||||
func stepDiscover(ctx context.Context, cfg *infra.Config) error {
|
||||
// Discover HCloud servers
|
||||
hcloudToken := os.Getenv("HCLOUD_TOKEN")
|
||||
if hcloudToken != "" {
|
||||
cli.Print(" Discovering Hetzner Cloud servers...\n")
|
||||
|
||||
hc := infra.NewHCloudClient(hcloudToken)
|
||||
servers, err := hc.ListServers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list HCloud servers: %w", err)
|
||||
}
|
||||
|
||||
for _, s := range servers {
|
||||
cli.Print(" %s %s %s %s %s\n",
|
||||
cli.SuccessStyle.Render("●"),
|
||||
cli.BoldStyle.Render(s.Name),
|
||||
s.PublicNet.IPv4.IP,
|
||||
s.ServerType.Name,
|
||||
cli.DimStyle.Render(s.Datacenter.Name))
|
||||
}
|
||||
} else {
|
||||
cli.Print(" %s HCLOUD_TOKEN not set — skipping Cloud discovery\n",
|
||||
cli.WarningStyle.Render("⚠"))
|
||||
}
|
||||
|
||||
// Discover Robot servers
|
||||
robotUser := os.Getenv("HETZNER_ROBOT_USER")
|
||||
robotPass := os.Getenv("HETZNER_ROBOT_PASS")
|
||||
if robotUser != "" && robotPass != "" {
|
||||
cli.Print(" Discovering Hetzner Robot servers...\n")
|
||||
|
||||
hr := infra.NewHRobotClient(robotUser, robotPass)
|
||||
servers, err := hr.ListServers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list Robot servers: %w", err)
|
||||
}
|
||||
|
||||
for _, s := range servers {
|
||||
status := cli.SuccessStyle.Render("●")
|
||||
if s.Status != "ready" {
|
||||
status = cli.WarningStyle.Render("○")
|
||||
}
|
||||
cli.Print(" %s %s %s %s %s\n",
|
||||
status,
|
||||
cli.BoldStyle.Render(s.ServerName),
|
||||
s.ServerIP,
|
||||
s.Product,
|
||||
cli.DimStyle.Render(s.Datacenter))
|
||||
}
|
||||
} else {
|
||||
cli.Print(" %s HETZNER_ROBOT_USER/PASS not set — skipping Robot discovery\n",
|
||||
cli.WarningStyle.Render("⚠"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stepLoadBalancer(ctx context.Context, cfg *infra.Config) error {
|
||||
hcloudToken := os.Getenv("HCLOUD_TOKEN")
|
||||
if hcloudToken == "" {
|
||||
return errors.New("HCLOUD_TOKEN required for load balancer management")
|
||||
}
|
||||
|
||||
hc := infra.NewHCloudClient(hcloudToken)
|
||||
|
||||
// Check if LB already exists
|
||||
lbs, err := hc.ListLoadBalancers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list load balancers: %w", err)
|
||||
}
|
||||
|
||||
for _, lb := range lbs {
|
||||
if lb.Name == cfg.LoadBalancer.Name {
|
||||
cli.Print(" Load balancer '%s' already exists (ID: %d, IP: %s)\n",
|
||||
lb.Name, lb.ID, lb.PublicNet.IPv4.IP)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if setupDryRun {
|
||||
cli.Print(" [dry-run] Would create load balancer '%s' (%s) in %s\n",
|
||||
cfg.LoadBalancer.Name, cfg.LoadBalancer.Type, cfg.LoadBalancer.Location)
|
||||
for _, b := range cfg.LoadBalancer.Backends {
|
||||
if host, ok := cfg.Hosts[b.Host]; ok {
|
||||
cli.Print(" [dry-run] Backend: %s (%s:%d)\n", b.Host, host.IP, b.Port)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build targets from config
|
||||
targets := make([]infra.HCloudLBCreateTarget, 0, len(cfg.LoadBalancer.Backends))
|
||||
for _, b := range cfg.LoadBalancer.Backends {
|
||||
host, ok := cfg.Hosts[b.Host]
|
||||
if !ok {
|
||||
return fmt.Errorf("backend host '%s' not found in config", b.Host)
|
||||
}
|
||||
targets = append(targets, infra.HCloudLBCreateTarget{
|
||||
Type: "ip",
|
||||
IP: &infra.HCloudLBTargetIP{IP: host.IP},
|
||||
})
|
||||
}
|
||||
|
||||
// Build services
|
||||
services := make([]infra.HCloudLBService, 0, len(cfg.LoadBalancer.Listeners))
|
||||
for _, l := range cfg.LoadBalancer.Listeners {
|
||||
svc := infra.HCloudLBService{
|
||||
Protocol: l.Protocol,
|
||||
ListenPort: l.Frontend,
|
||||
DestinationPort: l.Backend,
|
||||
Proxyprotocol: l.ProxyProtocol,
|
||||
HealthCheck: &infra.HCloudLBHealthCheck{
|
||||
Protocol: cfg.LoadBalancer.Health.Protocol,
|
||||
Port: l.Backend,
|
||||
Interval: cfg.LoadBalancer.Health.Interval,
|
||||
Timeout: 10,
|
||||
Retries: 3,
|
||||
HTTP: &infra.HCloudLBHCHTTP{
|
||||
Path: cfg.LoadBalancer.Health.Path,
|
||||
StatusCode: "2??",
|
||||
},
|
||||
},
|
||||
}
|
||||
services = append(services, svc)
|
||||
}
|
||||
|
||||
req := infra.HCloudLBCreateRequest{
|
||||
Name: cfg.LoadBalancer.Name,
|
||||
LoadBalancerType: cfg.LoadBalancer.Type,
|
||||
Location: cfg.LoadBalancer.Location,
|
||||
Algorithm: infra.HCloudLBAlgorithm{Type: cfg.LoadBalancer.Algorithm},
|
||||
Services: services,
|
||||
Targets: targets,
|
||||
Labels: map[string]string{
|
||||
"project": "host-uk",
|
||||
"managed": "core-cli",
|
||||
},
|
||||
}
|
||||
|
||||
cli.Print(" Creating load balancer '%s'...\n", cfg.LoadBalancer.Name)
|
||||
|
||||
lb, err := hc.CreateLoadBalancer(ctx, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create load balancer: %w", err)
|
||||
}
|
||||
|
||||
cli.Print(" Created: %s (ID: %d, IP: %s)\n",
|
||||
cli.BoldStyle.Render(lb.Name), lb.ID, lb.PublicNet.IPv4.IP)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stepDNS(ctx context.Context, cfg *infra.Config) error {
|
||||
authID := os.Getenv("CLOUDNS_AUTH_ID")
|
||||
authPass := os.Getenv("CLOUDNS_AUTH_PASSWORD")
|
||||
if authID == "" || authPass == "" {
|
||||
return errors.New("CLOUDNS_AUTH_ID and CLOUDNS_AUTH_PASSWORD required")
|
||||
}
|
||||
|
||||
dns := infra.NewCloudNSClient(authID, authPass)
|
||||
|
||||
for zoneName, zone := range cfg.DNS.Zones {
|
||||
cli.Print(" Zone: %s\n", cli.BoldStyle.Render(zoneName))
|
||||
|
||||
for _, rec := range zone.Records {
|
||||
value := rec.Value
|
||||
// Skip templated values (need LB IP first)
|
||||
if value == "{{.lb_ip}}" {
|
||||
cli.Print(" %s %s %s %s — %s\n",
|
||||
cli.WarningStyle.Render("⚠"),
|
||||
rec.Name, rec.Type, value,
|
||||
cli.DimStyle.Render("needs LB IP (run setup --step=lb first)"))
|
||||
continue
|
||||
}
|
||||
|
||||
if setupDryRun {
|
||||
cli.Print(" [dry-run] %s %s -> %s (TTL: %d)\n",
|
||||
rec.Type, rec.Name, value, rec.TTL)
|
||||
continue
|
||||
}
|
||||
|
||||
changed, err := dns.EnsureRecord(ctx, zoneName, rec.Name, rec.Type, value, rec.TTL)
|
||||
if err != nil {
|
||||
cli.Print(" %s %s %s: %s\n", cli.ErrorStyle.Render("✗"), rec.Type, rec.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if changed {
|
||||
cli.Print(" %s %s %s -> %s\n",
|
||||
cli.SuccessStyle.Render("✓"),
|
||||
rec.Type, rec.Name, value)
|
||||
} else {
|
||||
cli.Print(" %s %s %s (no change)\n",
|
||||
cli.DimStyle.Render("·"),
|
||||
rec.Type, rec.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
)
|
||||
|
||||
var sshCmd = &cli.Command{
|
||||
Use: "ssh <host>",
|
||||
Short: "SSH into a production host",
|
||||
Long: `Open an SSH session to a production host defined in infra.yaml.
|
||||
|
||||
Examples:
|
||||
core prod ssh noc
|
||||
core prod ssh de
|
||||
core prod ssh de2
|
||||
core prod ssh build`,
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: runSSH,
|
||||
}
|
||||
|
||||
func runSSH(cmd *cli.Command, args []string) error {
|
||||
cfg, _, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := args[0]
|
||||
host, ok := cfg.Hosts[name]
|
||||
if !ok {
|
||||
// List available hosts
|
||||
cli.Print("Unknown host '%s'. Available:\n", name)
|
||||
for n, h := range cfg.Hosts {
|
||||
cli.Print(" %s %s (%s)\n", cli.BoldStyle.Render(n), h.IP, h.Role)
|
||||
}
|
||||
return fmt.Errorf("host '%s' not found in infra.yaml", name)
|
||||
}
|
||||
|
||||
sshArgs := []string{
|
||||
"ssh",
|
||||
"-i", host.SSH.Key,
|
||||
"-p", fmt.Sprintf("%d", host.SSH.Port),
|
||||
"-o", "StrictHostKeyChecking=accept-new",
|
||||
fmt.Sprintf("%s@%s", host.SSH.User, host.IP),
|
||||
}
|
||||
|
||||
cli.Print("%s %s@%s (%s)\n",
|
||||
cli.BoldStyle.Render("▶"),
|
||||
host.SSH.User, host.FQDN,
|
||||
cli.DimStyle.Render(host.IP))
|
||||
|
||||
sshPath, err := exec.LookPath("ssh")
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh not found: %w", err)
|
||||
}
|
||||
|
||||
// Replace current process with SSH
|
||||
return syscall.Exec(sshPath, sshArgs, os.Environ())
|
||||
}
|
||||
|
|
@ -1,324 +0,0 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-ansible"
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-infra"
|
||||
)
|
||||
|
||||
var statusCmd = &cli.Command{
|
||||
Use: "status",
|
||||
Short: "Show production infrastructure health",
|
||||
Long: `Check connectivity, services, and cluster health across all production hosts.
|
||||
|
||||
Tests:
|
||||
- SSH connectivity to all hosts
|
||||
- Docker daemon status
|
||||
- Coolify controller (noc)
|
||||
- Galera cluster state (de, de2)
|
||||
- Redis Sentinel status (de, de2)
|
||||
- Load balancer health (if HCLOUD_TOKEN set)`,
|
||||
RunE: runStatus,
|
||||
}
|
||||
|
||||
type hostStatus struct {
|
||||
Name string
|
||||
Host *infra.Host
|
||||
Connected bool
|
||||
ConnTime time.Duration
|
||||
OS string
|
||||
Docker string
|
||||
Services map[string]string
|
||||
Error error
|
||||
}
|
||||
|
||||
func runStatus(cmd *cli.Command, args []string) error {
|
||||
cfg, cfgPath, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli.Print("%s Infrastructure status from %s\n\n",
|
||||
cli.BoldStyle.Render("▶"),
|
||||
cli.DimStyle.Render(cfgPath))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Check all hosts in parallel
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
statuses []hostStatus
|
||||
)
|
||||
|
||||
for name, host := range cfg.Hosts {
|
||||
wg.Add(1)
|
||||
go func(name string, host *infra.Host) {
|
||||
defer wg.Done()
|
||||
s := checkHost(ctx, name, host)
|
||||
mu.Lock()
|
||||
statuses = append(statuses, s)
|
||||
mu.Unlock()
|
||||
}(name, host)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Print results in consistent order
|
||||
order := []string{"noc", "de", "de2", "build"}
|
||||
for _, name := range order {
|
||||
for _, s := range statuses {
|
||||
if s.Name == name {
|
||||
printHostStatus(s)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check LB if token available
|
||||
if token := os.Getenv("HCLOUD_TOKEN"); token != "" {
|
||||
fmt.Println()
|
||||
checkLoadBalancer(ctx, token)
|
||||
} else {
|
||||
fmt.Println()
|
||||
cli.Print("%s Load balancer: %s\n",
|
||||
cli.DimStyle.Render(" ○"),
|
||||
cli.DimStyle.Render("HCLOUD_TOKEN not set (skipped)"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkHost(ctx context.Context, name string, host *infra.Host) hostStatus {
|
||||
s := hostStatus{
|
||||
Name: name,
|
||||
Host: host,
|
||||
Services: make(map[string]string),
|
||||
}
|
||||
|
||||
sshCfg := ansible.SSHConfig{
|
||||
Host: host.IP,
|
||||
Port: host.SSH.Port,
|
||||
User: host.SSH.User,
|
||||
KeyFile: host.SSH.Key,
|
||||
Timeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ansible.NewSSHClient(sshCfg)
|
||||
if err != nil {
|
||||
s.Error = fmt.Errorf("create SSH client: %w", err)
|
||||
return s
|
||||
}
|
||||
defer func() { _ = client.Close() }()
|
||||
|
||||
start := time.Now()
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
s.Error = fmt.Errorf("SSH connect: %w", err)
|
||||
return s
|
||||
}
|
||||
s.Connected = true
|
||||
s.ConnTime = time.Since(start)
|
||||
|
||||
// OS info
|
||||
stdout, _, _, _ := client.Run(ctx, "cat /etc/os-release 2>/dev/null | grep PRETTY_NAME | cut -d'\"' -f2")
|
||||
s.OS = strings.TrimSpace(stdout)
|
||||
|
||||
// Docker
|
||||
stdout, _, _, err = client.Run(ctx, "docker --version 2>/dev/null | head -1")
|
||||
if err == nil && stdout != "" {
|
||||
s.Docker = strings.TrimSpace(stdout)
|
||||
}
|
||||
|
||||
// Check each expected service
|
||||
for _, svc := range host.Services {
|
||||
status := checkService(ctx, client, svc)
|
||||
s.Services[svc] = status
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func checkService(ctx context.Context, client *ansible.SSHClient, service string) string {
|
||||
switch service {
|
||||
case "coolify":
|
||||
stdout, _, _, _ := client.Run(ctx, "docker ps --format '{{.Names}}' 2>/dev/null | grep -c coolify")
|
||||
if strings.TrimSpace(stdout) != "0" && strings.TrimSpace(stdout) != "" {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
|
||||
case "traefik":
|
||||
stdout, _, _, _ := client.Run(ctx, "docker ps --format '{{.Names}}' 2>/dev/null | grep -c traefik")
|
||||
if strings.TrimSpace(stdout) != "0" && strings.TrimSpace(stdout) != "" {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
|
||||
case "galera":
|
||||
// Check Galera cluster state
|
||||
stdout, _, _, _ := client.Run(ctx,
|
||||
"docker exec $(docker ps -q --filter name=mariadb 2>/dev/null || echo none) "+
|
||||
"mariadb -u root -e \"SHOW STATUS LIKE 'wsrep_cluster_size'\" --skip-column-names 2>/dev/null | awk '{print $2}'")
|
||||
size := strings.TrimSpace(stdout)
|
||||
if size != "" && size != "0" {
|
||||
return fmt.Sprintf("cluster_size=%s", size)
|
||||
}
|
||||
// Try non-Docker
|
||||
stdout, _, _, _ = client.Run(ctx,
|
||||
"mariadb -u root -e \"SHOW STATUS LIKE 'wsrep_cluster_size'\" --skip-column-names 2>/dev/null | awk '{print $2}'")
|
||||
size = strings.TrimSpace(stdout)
|
||||
if size != "" && size != "0" {
|
||||
return fmt.Sprintf("cluster_size=%s", size)
|
||||
}
|
||||
return "not running"
|
||||
|
||||
case "redis":
|
||||
stdout, _, _, _ := client.Run(ctx,
|
||||
"docker exec $(docker ps -q --filter name=redis 2>/dev/null || echo none) "+
|
||||
"redis-cli ping 2>/dev/null")
|
||||
if strings.TrimSpace(stdout) == "PONG" {
|
||||
return "running"
|
||||
}
|
||||
stdout, _, _, _ = client.Run(ctx, "redis-cli ping 2>/dev/null")
|
||||
if strings.TrimSpace(stdout) == "PONG" {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
|
||||
case "forgejo-runner":
|
||||
stdout, _, _, _ := client.Run(ctx, "systemctl is-active forgejo-runner 2>/dev/null || docker ps --format '{{.Names}}' 2>/dev/null | grep -c runner")
|
||||
val := strings.TrimSpace(stdout)
|
||||
if val == "active" || (val != "0" && val != "") {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
|
||||
default:
|
||||
// Generic docker container check
|
||||
stdout, _, _, _ := client.Run(ctx,
|
||||
fmt.Sprintf("docker ps --format '{{.Names}}' 2>/dev/null | grep -c %s", service))
|
||||
if strings.TrimSpace(stdout) != "0" && strings.TrimSpace(stdout) != "" {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
}
|
||||
}
|
||||
|
||||
func printHostStatus(s hostStatus) {
|
||||
// Host header
|
||||
roleStyle := cli.DimStyle
|
||||
switch s.Host.Role {
|
||||
case "app":
|
||||
roleStyle = cli.SuccessStyle
|
||||
case "bastion":
|
||||
roleStyle = cli.WarningStyle
|
||||
case "builder":
|
||||
roleStyle = cli.InfoStyle
|
||||
}
|
||||
|
||||
cli.Print(" %s %s %s %s\n",
|
||||
cli.BoldStyle.Render(s.Name),
|
||||
cli.DimStyle.Render(s.Host.IP),
|
||||
roleStyle.Render(s.Host.Role),
|
||||
cli.DimStyle.Render(s.Host.FQDN))
|
||||
|
||||
if s.Error != nil {
|
||||
cli.Print(" %s %s\n", cli.ErrorStyle.Render("✗"), s.Error)
|
||||
return
|
||||
}
|
||||
|
||||
if !s.Connected {
|
||||
cli.Print(" %s SSH unreachable\n", cli.ErrorStyle.Render("✗"))
|
||||
return
|
||||
}
|
||||
|
||||
// Connection info
|
||||
cli.Print(" %s SSH %s",
|
||||
cli.SuccessStyle.Render("✓"),
|
||||
cli.DimStyle.Render(s.ConnTime.Round(time.Millisecond).String()))
|
||||
if s.OS != "" {
|
||||
cli.Print(" %s", cli.DimStyle.Render(s.OS))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
if s.Docker != "" {
|
||||
cli.Print(" %s %s\n", cli.SuccessStyle.Render("✓"), cli.DimStyle.Render(s.Docker))
|
||||
}
|
||||
|
||||
// Services
|
||||
for _, svc := range s.Host.Services {
|
||||
status, ok := s.Services[svc]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
icon := cli.SuccessStyle.Render("●")
|
||||
style := cli.SuccessStyle
|
||||
if status == "not running" {
|
||||
icon = cli.ErrorStyle.Render("○")
|
||||
style = cli.ErrorStyle
|
||||
}
|
||||
|
||||
cli.Print(" %s %s %s\n", icon, svc, style.Render(status))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func checkLoadBalancer(ctx context.Context, token string) {
|
||||
hc := infra.NewHCloudClient(token)
|
||||
lbs, err := hc.ListLoadBalancers(ctx)
|
||||
if err != nil {
|
||||
cli.Print(" %s Load balancer: %s\n", cli.ErrorStyle.Render("✗"), err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(lbs) == 0 {
|
||||
cli.Print(" %s No load balancers found\n", cli.DimStyle.Render("○"))
|
||||
return
|
||||
}
|
||||
|
||||
for _, lb := range lbs {
|
||||
cli.Print(" %s LB: %s IP: %s Targets: %d\n",
|
||||
cli.SuccessStyle.Render("●"),
|
||||
cli.BoldStyle.Render(lb.Name),
|
||||
lb.PublicNet.IPv4.IP,
|
||||
len(lb.Targets))
|
||||
|
||||
for _, t := range lb.Targets {
|
||||
for _, hs := range t.HealthStatus {
|
||||
icon := cli.SuccessStyle.Render("●")
|
||||
if hs.Status != "healthy" {
|
||||
icon = cli.ErrorStyle.Render("○")
|
||||
}
|
||||
ip := ""
|
||||
if t.IP != nil {
|
||||
ip = t.IP.IP
|
||||
}
|
||||
cli.Print(" %s :%d %s %s\n", icon, hs.ListenPort, hs.Status, cli.DimStyle.Render(ip))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func loadConfig() (*infra.Config, string, error) {
|
||||
if infraFile != "" {
|
||||
cfg, err := infra.Load(infraFile)
|
||||
return cfg, infraFile, err
|
||||
}
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return infra.Discover(cwd)
|
||||
}
|
||||
|
|
@ -1,354 +0,0 @@
|
|||
// cmd_docblock.go implements docblock/docstring coverage checking for Go code.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// core qa docblock # Check current directory
|
||||
// core qa docblock ./pkg/... # Check specific packages
|
||||
// core qa docblock --threshold=80 # Require 80% coverage
|
||||
package qa
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
)
|
||||
|
||||
// Docblock command flags
|
||||
var (
|
||||
docblockThreshold float64
|
||||
docblockVerbose bool
|
||||
docblockJSON bool
|
||||
)
|
||||
|
||||
// addDocblockCommand adds the 'docblock' command to qa.
|
||||
func addDocblockCommand(parent *cli.Command) {
|
||||
docblockCmd := &cli.Command{
|
||||
Use: "docblock [packages...]",
|
||||
Short: i18n.T("cmd.qa.docblock.short"),
|
||||
Long: i18n.T("cmd.qa.docblock.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
paths := args
|
||||
if len(paths) == 0 {
|
||||
paths = []string{"./..."}
|
||||
}
|
||||
return RunDocblockCheck(paths, docblockThreshold, docblockVerbose, docblockJSON)
|
||||
},
|
||||
}
|
||||
|
||||
docblockCmd.Flags().Float64Var(&docblockThreshold, "threshold", 80, i18n.T("cmd.qa.docblock.flag.threshold"))
|
||||
docblockCmd.Flags().BoolVarP(&docblockVerbose, "verbose", "v", false, i18n.T("common.flag.verbose"))
|
||||
docblockCmd.Flags().BoolVar(&docblockJSON, "json", false, i18n.T("common.flag.json"))
|
||||
|
||||
parent.AddCommand(docblockCmd)
|
||||
}
|
||||
|
||||
// DocblockResult holds the result of a docblock coverage check.
|
||||
type DocblockResult struct {
|
||||
Coverage float64 `json:"coverage"`
|
||||
Threshold float64 `json:"threshold"`
|
||||
Total int `json:"total"`
|
||||
Documented int `json:"documented"`
|
||||
Missing []MissingDocblock `json:"missing,omitempty"`
|
||||
Passed bool `json:"passed"`
|
||||
}
|
||||
|
||||
// MissingDocblock represents an exported symbol without documentation.
|
||||
type MissingDocblock struct {
|
||||
File string `json:"file"`
|
||||
Line int `json:"line"`
|
||||
Name string `json:"name"`
|
||||
Kind string `json:"kind"` // func, type, const, var
|
||||
Reason string `json:"reason,omitempty"`
|
||||
}
|
||||
|
||||
// RunDocblockCheck checks docblock coverage for the given packages.
|
||||
func RunDocblockCheck(paths []string, threshold float64, verbose, jsonOutput bool) error {
|
||||
result, err := CheckDocblockCoverage(paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result.Threshold = threshold
|
||||
result.Passed = result.Coverage >= threshold
|
||||
|
||||
if jsonOutput {
|
||||
data, err := json.MarshalIndent(result, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(data))
|
||||
if !result.Passed {
|
||||
return cli.Err("docblock coverage %.1f%% below threshold %.1f%%", result.Coverage, threshold)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sort missing by file then line
|
||||
slices.SortFunc(result.Missing, func(a, b MissingDocblock) int {
|
||||
return cmp.Or(
|
||||
cmp.Compare(a.File, b.File),
|
||||
cmp.Compare(a.Line, b.Line),
|
||||
)
|
||||
})
|
||||
|
||||
// Print result
|
||||
if verbose && len(result.Missing) > 0 {
|
||||
cli.Print("%s\n\n", i18n.T("cmd.qa.docblock.missing_docs"))
|
||||
for _, m := range result.Missing {
|
||||
cli.Print(" %s:%d: %s %s\n",
|
||||
dimStyle.Render(m.File),
|
||||
m.Line,
|
||||
dimStyle.Render(m.Kind),
|
||||
m.Name,
|
||||
)
|
||||
}
|
||||
cli.Blank()
|
||||
}
|
||||
|
||||
// Summary
|
||||
coverageStr := fmt.Sprintf("%.1f%%", result.Coverage)
|
||||
thresholdStr := fmt.Sprintf("%.1f%%", threshold)
|
||||
|
||||
if result.Passed {
|
||||
cli.Print("%s %s %s/%s (%s >= %s)\n",
|
||||
successStyle.Render(i18n.T("common.label.success")),
|
||||
i18n.T("cmd.qa.docblock.coverage"),
|
||||
fmt.Sprintf("%d", result.Documented),
|
||||
fmt.Sprintf("%d", result.Total),
|
||||
successStyle.Render(coverageStr),
|
||||
thresholdStr,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
cli.Print("%s %s %s/%s (%s < %s)\n",
|
||||
errorStyle.Render(i18n.T("common.label.error")),
|
||||
i18n.T("cmd.qa.docblock.coverage"),
|
||||
fmt.Sprintf("%d", result.Documented),
|
||||
fmt.Sprintf("%d", result.Total),
|
||||
errorStyle.Render(coverageStr),
|
||||
thresholdStr,
|
||||
)
|
||||
|
||||
// Always show compact file:line list when failing (token-efficient for AI agents)
|
||||
if len(result.Missing) > 0 {
|
||||
cli.Blank()
|
||||
for _, m := range result.Missing {
|
||||
cli.Print("%s:%d\n", m.File, m.Line)
|
||||
}
|
||||
}
|
||||
|
||||
return cli.Err("docblock coverage %.1f%% below threshold %.1f%%", result.Coverage, threshold)
|
||||
}
|
||||
|
||||
// CheckDocblockCoverage analyzes Go packages for docblock coverage.
|
||||
func CheckDocblockCoverage(patterns []string) (*DocblockResult, error) {
|
||||
result := &DocblockResult{}
|
||||
|
||||
// Expand patterns to actual directories
|
||||
dirs, err := expandPatterns(patterns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fset := token.NewFileSet()
|
||||
|
||||
for _, dir := range dirs {
|
||||
pkgs, err := parser.ParseDir(fset, dir, func(fi os.FileInfo) bool {
|
||||
return !strings.HasSuffix(fi.Name(), "_test.go")
|
||||
}, parser.ParseComments)
|
||||
if err != nil {
|
||||
// Log parse errors but continue to check other directories
|
||||
cli.Warnf("failed to parse %s: %v", dir, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
for filename, file := range pkg.Files {
|
||||
checkFile(fset, filename, file, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result.Total > 0 {
|
||||
result.Coverage = float64(result.Documented) / float64(result.Total) * 100
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// expandPatterns expands Go package patterns like ./... to actual directories.
|
||||
func expandPatterns(patterns []string) ([]string, error) {
|
||||
var dirs []string
|
||||
seen := make(map[string]bool)
|
||||
|
||||
for _, pattern := range patterns {
|
||||
if strings.HasSuffix(pattern, "/...") {
|
||||
// Recursive pattern
|
||||
base := strings.TrimSuffix(pattern, "/...")
|
||||
if base == "." {
|
||||
base = "."
|
||||
}
|
||||
err := filepath.Walk(base, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil // Skip errors
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
// Skip vendor, testdata, and hidden directories (but not "." itself)
|
||||
name := info.Name()
|
||||
if name == "vendor" || name == "testdata" || (strings.HasPrefix(name, ".") && name != ".") {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// Check if directory has Go files
|
||||
if hasGoFiles(path) && !seen[path] {
|
||||
dirs = append(dirs, path)
|
||||
seen[path] = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Single directory
|
||||
path := pattern
|
||||
if !seen[path] && hasGoFiles(path) {
|
||||
dirs = append(dirs, path)
|
||||
seen[path] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
// hasGoFiles checks if a directory contains Go files.
|
||||
func hasGoFiles(dir string) bool {
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".go") && !strings.HasSuffix(entry.Name(), "_test.go") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkFile analyzes a single file for docblock coverage.
|
||||
func checkFile(fset *token.FileSet, filename string, file *ast.File, result *DocblockResult) {
|
||||
// Make filename relative if possible
|
||||
if cwd, err := os.Getwd(); err == nil {
|
||||
if rel, err := filepath.Rel(cwd, filename); err == nil {
|
||||
filename = rel
|
||||
}
|
||||
}
|
||||
|
||||
for _, decl := range file.Decls {
|
||||
switch d := decl.(type) {
|
||||
case *ast.FuncDecl:
|
||||
// Skip unexported functions
|
||||
if !ast.IsExported(d.Name.Name) {
|
||||
continue
|
||||
}
|
||||
// Skip methods on unexported types
|
||||
if d.Recv != nil && len(d.Recv.List) > 0 {
|
||||
if recvType := getReceiverTypeName(d.Recv.List[0].Type); recvType != "" && !ast.IsExported(recvType) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
result.Total++
|
||||
if d.Doc != nil && len(d.Doc.List) > 0 {
|
||||
result.Documented++
|
||||
} else {
|
||||
pos := fset.Position(d.Pos())
|
||||
result.Missing = append(result.Missing, MissingDocblock{
|
||||
File: filename,
|
||||
Line: pos.Line,
|
||||
Name: d.Name.Name,
|
||||
Kind: "func",
|
||||
})
|
||||
}
|
||||
|
||||
case *ast.GenDecl:
|
||||
for _, spec := range d.Specs {
|
||||
switch s := spec.(type) {
|
||||
case *ast.TypeSpec:
|
||||
if !ast.IsExported(s.Name.Name) {
|
||||
continue
|
||||
}
|
||||
result.Total++
|
||||
// Type can have doc on GenDecl or TypeSpec
|
||||
if (d.Doc != nil && len(d.Doc.List) > 0) || (s.Doc != nil && len(s.Doc.List) > 0) {
|
||||
result.Documented++
|
||||
} else {
|
||||
pos := fset.Position(s.Pos())
|
||||
result.Missing = append(result.Missing, MissingDocblock{
|
||||
File: filename,
|
||||
Line: pos.Line,
|
||||
Name: s.Name.Name,
|
||||
Kind: "type",
|
||||
})
|
||||
}
|
||||
|
||||
case *ast.ValueSpec:
|
||||
// Check exported consts and vars
|
||||
for _, name := range s.Names {
|
||||
if !ast.IsExported(name.Name) {
|
||||
continue
|
||||
}
|
||||
result.Total++
|
||||
// Value can have doc on GenDecl or ValueSpec
|
||||
if (d.Doc != nil && len(d.Doc.List) > 0) || (s.Doc != nil && len(s.Doc.List) > 0) {
|
||||
result.Documented++
|
||||
} else {
|
||||
pos := fset.Position(name.Pos())
|
||||
result.Missing = append(result.Missing, MissingDocblock{
|
||||
File: filename,
|
||||
Line: pos.Line,
|
||||
Name: name.Name,
|
||||
Kind: kindFromToken(d.Tok),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getReceiverTypeName extracts the type name from a method receiver.
|
||||
func getReceiverTypeName(expr ast.Expr) string {
|
||||
switch t := expr.(type) {
|
||||
case *ast.Ident:
|
||||
return t.Name
|
||||
case *ast.StarExpr:
|
||||
return getReceiverTypeName(t.X)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// kindFromToken returns a string representation of the token kind.
|
||||
func kindFromToken(tok token.Token) string {
|
||||
switch tok {
|
||||
case token.CONST:
|
||||
return "const"
|
||||
case token.VAR:
|
||||
return "var"
|
||||
default:
|
||||
return "value"
|
||||
}
|
||||
}
|
||||
|
|
@ -1,290 +0,0 @@
|
|||
// cmd_health.go implements the 'qa health' command for aggregate CI health.
|
||||
//
|
||||
// Usage:
|
||||
// core qa health # Show CI health summary
|
||||
// core qa health --problems # Show only repos with problems
|
||||
|
||||
package qa
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-scm/repos"
|
||||
)
|
||||
|
||||
// Health command flags
|
||||
var (
|
||||
healthProblems bool
|
||||
healthRegistry string
|
||||
)
|
||||
|
||||
// HealthWorkflowRun represents a GitHub Actions workflow run
|
||||
type HealthWorkflowRun struct {
|
||||
Status string `json:"status"`
|
||||
Conclusion string `json:"conclusion"`
|
||||
Name string `json:"name"`
|
||||
HeadSha string `json:"headSha"`
|
||||
UpdatedAt string `json:"updatedAt"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// RepoHealth represents the CI health of a single repo
|
||||
type RepoHealth struct {
|
||||
Name string
|
||||
Status string // "passing", "failing", "pending", "no_ci", "disabled"
|
||||
Message string
|
||||
URL string
|
||||
FailingSince string
|
||||
}
|
||||
|
||||
// addHealthCommand adds the 'health' subcommand to qa.
|
||||
func addHealthCommand(parent *cli.Command) {
|
||||
healthCmd := &cli.Command{
|
||||
Use: "health",
|
||||
Short: i18n.T("cmd.qa.health.short"),
|
||||
Long: i18n.T("cmd.qa.health.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runHealth()
|
||||
},
|
||||
}
|
||||
|
||||
healthCmd.Flags().BoolVarP(&healthProblems, "problems", "p", false, i18n.T("cmd.qa.health.flag.problems"))
|
||||
healthCmd.Flags().StringVar(&healthRegistry, "registry", "", i18n.T("common.flag.registry"))
|
||||
|
||||
parent.AddCommand(healthCmd)
|
||||
}
|
||||
|
||||
func runHealth() error {
|
||||
// Check gh is available
|
||||
if _, err := exec.LookPath("gh"); err != nil {
|
||||
return log.E("qa.health", i18n.T("error.gh_not_found"), nil)
|
||||
}
|
||||
|
||||
// Load registry
|
||||
var reg *repos.Registry
|
||||
var err error
|
||||
|
||||
if healthRegistry != "" {
|
||||
reg, err = repos.LoadRegistry(io.Local, healthRegistry)
|
||||
} else {
|
||||
registryPath, findErr := repos.FindRegistry(io.Local)
|
||||
if findErr != nil {
|
||||
return log.E("qa.health", i18n.T("error.registry_not_found"), nil)
|
||||
}
|
||||
reg, err = repos.LoadRegistry(io.Local, registryPath)
|
||||
}
|
||||
if err != nil {
|
||||
return log.E("qa.health", "failed to load registry", err)
|
||||
}
|
||||
|
||||
// Fetch CI status from all repos
|
||||
var healthResults []RepoHealth
|
||||
repoList := reg.List()
|
||||
|
||||
for i, repo := range repoList {
|
||||
cli.Print("\033[2K\r%s %d/%d %s",
|
||||
dimStyle.Render(i18n.T("cmd.qa.issues.fetching")),
|
||||
i+1, len(repoList), repo.Name)
|
||||
|
||||
health := fetchRepoHealth(reg.Org, repo.Name)
|
||||
healthResults = append(healthResults, health)
|
||||
}
|
||||
cli.Print("\033[2K\r") // Clear progress
|
||||
|
||||
// Sort: problems first, then passing
|
||||
slices.SortFunc(healthResults, func(a, b RepoHealth) int {
|
||||
return cmp.Compare(healthPriority(a.Status), healthPriority(b.Status))
|
||||
})
|
||||
|
||||
// Filter if --problems flag
|
||||
if healthProblems {
|
||||
var problems []RepoHealth
|
||||
for _, h := range healthResults {
|
||||
if h.Status != "passing" {
|
||||
problems = append(problems, h)
|
||||
}
|
||||
}
|
||||
healthResults = problems
|
||||
}
|
||||
|
||||
// Calculate summary
|
||||
passing := 0
|
||||
for _, h := range healthResults {
|
||||
if h.Status == "passing" {
|
||||
passing++
|
||||
}
|
||||
}
|
||||
total := len(repoList)
|
||||
percentage := 0
|
||||
if total > 0 {
|
||||
percentage = (passing * 100) / total
|
||||
}
|
||||
|
||||
// Print summary
|
||||
cli.Print("%s: %d/%d repos healthy (%d%%)\n\n",
|
||||
i18n.T("cmd.qa.health.summary"),
|
||||
passing, total, percentage)
|
||||
|
||||
if len(healthResults) == 0 {
|
||||
cli.Text(i18n.T("cmd.qa.health.all_healthy"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Group by status
|
||||
grouped := make(map[string][]RepoHealth)
|
||||
for _, h := range healthResults {
|
||||
grouped[h.Status] = append(grouped[h.Status], h)
|
||||
}
|
||||
|
||||
// Print problems first
|
||||
printHealthGroup("failing", grouped["failing"], errorStyle)
|
||||
printHealthGroup("pending", grouped["pending"], warningStyle)
|
||||
printHealthGroup("no_ci", grouped["no_ci"], dimStyle)
|
||||
printHealthGroup("disabled", grouped["disabled"], dimStyle)
|
||||
|
||||
if !healthProblems {
|
||||
printHealthGroup("passing", grouped["passing"], successStyle)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchRepoHealth(org, repoName string) RepoHealth {
|
||||
repoFullName := cli.Sprintf("%s/%s", org, repoName)
|
||||
|
||||
args := []string{
|
||||
"run", "list",
|
||||
"--repo", repoFullName,
|
||||
"--limit", "1",
|
||||
"--json", "status,conclusion,name,headSha,updatedAt,url",
|
||||
}
|
||||
|
||||
cmd := exec.Command("gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// Check if it's a 404 (no workflows)
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
stderr := string(exitErr.Stderr)
|
||||
if strings.Contains(stderr, "no workflows") || strings.Contains(stderr, "not found") {
|
||||
return RepoHealth{
|
||||
Name: repoName,
|
||||
Status: "no_ci",
|
||||
Message: i18n.T("cmd.qa.health.no_ci_configured"),
|
||||
}
|
||||
}
|
||||
}
|
||||
return RepoHealth{
|
||||
Name: repoName,
|
||||
Status: "no_ci",
|
||||
Message: i18n.T("cmd.qa.health.fetch_error"),
|
||||
}
|
||||
}
|
||||
|
||||
var runs []HealthWorkflowRun
|
||||
if err := json.Unmarshal(output, &runs); err != nil {
|
||||
return RepoHealth{
|
||||
Name: repoName,
|
||||
Status: "no_ci",
|
||||
Message: i18n.T("cmd.qa.health.parse_error"),
|
||||
}
|
||||
}
|
||||
|
||||
if len(runs) == 0 {
|
||||
return RepoHealth{
|
||||
Name: repoName,
|
||||
Status: "no_ci",
|
||||
Message: i18n.T("cmd.qa.health.no_ci_configured"),
|
||||
}
|
||||
}
|
||||
|
||||
run := runs[0]
|
||||
health := RepoHealth{
|
||||
Name: repoName,
|
||||
URL: run.URL,
|
||||
}
|
||||
|
||||
switch run.Status {
|
||||
case "completed":
|
||||
switch run.Conclusion {
|
||||
case "success":
|
||||
health.Status = "passing"
|
||||
health.Message = i18n.T("cmd.qa.health.passing")
|
||||
case "failure":
|
||||
health.Status = "failing"
|
||||
health.Message = i18n.T("cmd.qa.health.tests_failing")
|
||||
case "cancelled":
|
||||
health.Status = "pending"
|
||||
health.Message = i18n.T("cmd.qa.health.cancelled")
|
||||
case "skipped":
|
||||
health.Status = "passing"
|
||||
health.Message = i18n.T("cmd.qa.health.skipped")
|
||||
default:
|
||||
health.Status = "failing"
|
||||
health.Message = run.Conclusion
|
||||
}
|
||||
case "in_progress", "queued", "waiting":
|
||||
health.Status = "pending"
|
||||
health.Message = i18n.T("cmd.qa.health.running")
|
||||
default:
|
||||
health.Status = "no_ci"
|
||||
health.Message = run.Status
|
||||
}
|
||||
|
||||
return health
|
||||
}
|
||||
|
||||
func healthPriority(status string) int {
|
||||
switch status {
|
||||
case "failing":
|
||||
return 0
|
||||
case "pending":
|
||||
return 1
|
||||
case "no_ci":
|
||||
return 2
|
||||
case "disabled":
|
||||
return 3
|
||||
case "passing":
|
||||
return 4
|
||||
default:
|
||||
return 5
|
||||
}
|
||||
}
|
||||
|
||||
func printHealthGroup(status string, repos []RepoHealth, style *cli.AnsiStyle) {
|
||||
if len(repos) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var label string
|
||||
switch status {
|
||||
case "failing":
|
||||
label = i18n.T("cmd.qa.health.count_failing")
|
||||
case "pending":
|
||||
label = i18n.T("cmd.qa.health.count_pending")
|
||||
case "no_ci":
|
||||
label = i18n.T("cmd.qa.health.count_no_ci")
|
||||
case "disabled":
|
||||
label = i18n.T("cmd.qa.health.count_disabled")
|
||||
case "passing":
|
||||
label = i18n.T("cmd.qa.health.count_passing")
|
||||
}
|
||||
|
||||
cli.Print("%s (%d):\n", style.Render(label), len(repos))
|
||||
for _, repo := range repos {
|
||||
cli.Print(" %s %s\n",
|
||||
cli.RepoStyle.Render(repo.Name),
|
||||
dimStyle.Render(repo.Message))
|
||||
if repo.URL != "" && status == "failing" {
|
||||
cli.Print(" -> %s\n", dimStyle.Render(repo.URL))
|
||||
}
|
||||
}
|
||||
cli.Blank()
|
||||
}
|
||||
|
|
@ -1,395 +0,0 @@
|
|||
// cmd_issues.go implements the 'qa issues' command for intelligent issue triage.
|
||||
//
|
||||
// Usage:
|
||||
// core qa issues # Show prioritised, actionable issues
|
||||
// core qa issues --mine # Show issues assigned to you
|
||||
// core qa issues --triage # Show issues needing triage (no labels/assignee)
|
||||
// core qa issues --blocked # Show blocked issues
|
||||
|
||||
package qa
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-scm/repos"
|
||||
)
|
||||
|
||||
// Issue command flags
|
||||
var (
|
||||
issuesMine bool
|
||||
issuesTriage bool
|
||||
issuesBlocked bool
|
||||
issuesRegistry string
|
||||
issuesLimit int
|
||||
)
|
||||
|
||||
// Issue represents a GitHub issue with triage metadata
|
||||
type Issue struct {
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
State string `json:"state"`
|
||||
Body string `json:"body"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
Author struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"author"`
|
||||
Assignees struct {
|
||||
Nodes []struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"nodes"`
|
||||
} `json:"assignees"`
|
||||
Labels struct {
|
||||
Nodes []struct {
|
||||
Name string `json:"name"`
|
||||
} `json:"nodes"`
|
||||
} `json:"labels"`
|
||||
Comments struct {
|
||||
TotalCount int `json:"totalCount"`
|
||||
Nodes []struct {
|
||||
Author struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"author"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
} `json:"nodes"`
|
||||
} `json:"comments"`
|
||||
URL string `json:"url"`
|
||||
|
||||
// Computed fields
|
||||
RepoName string
|
||||
Priority int // Lower = higher priority
|
||||
Category string // "needs_response", "ready", "blocked", "triage"
|
||||
ActionHint string
|
||||
}
|
||||
|
||||
// addIssuesCommand adds the 'issues' subcommand to qa.
|
||||
func addIssuesCommand(parent *cli.Command) {
|
||||
issuesCmd := &cli.Command{
|
||||
Use: "issues",
|
||||
Short: i18n.T("cmd.qa.issues.short"),
|
||||
Long: i18n.T("cmd.qa.issues.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runQAIssues()
|
||||
},
|
||||
}
|
||||
|
||||
issuesCmd.Flags().BoolVarP(&issuesMine, "mine", "m", false, i18n.T("cmd.qa.issues.flag.mine"))
|
||||
issuesCmd.Flags().BoolVarP(&issuesTriage, "triage", "t", false, i18n.T("cmd.qa.issues.flag.triage"))
|
||||
issuesCmd.Flags().BoolVarP(&issuesBlocked, "blocked", "b", false, i18n.T("cmd.qa.issues.flag.blocked"))
|
||||
issuesCmd.Flags().StringVar(&issuesRegistry, "registry", "", i18n.T("common.flag.registry"))
|
||||
issuesCmd.Flags().IntVarP(&issuesLimit, "limit", "l", 50, i18n.T("cmd.qa.issues.flag.limit"))
|
||||
|
||||
parent.AddCommand(issuesCmd)
|
||||
}
|
||||
|
||||
func runQAIssues() error {
|
||||
// Check gh is available
|
||||
if _, err := exec.LookPath("gh"); err != nil {
|
||||
return log.E("qa.issues", i18n.T("error.gh_not_found"), nil)
|
||||
}
|
||||
|
||||
// Load registry
|
||||
var reg *repos.Registry
|
||||
var err error
|
||||
|
||||
if issuesRegistry != "" {
|
||||
reg, err = repos.LoadRegistry(io.Local, issuesRegistry)
|
||||
} else {
|
||||
registryPath, findErr := repos.FindRegistry(io.Local)
|
||||
if findErr != nil {
|
||||
return log.E("qa.issues", i18n.T("error.registry_not_found"), nil)
|
||||
}
|
||||
reg, err = repos.LoadRegistry(io.Local, registryPath)
|
||||
}
|
||||
if err != nil {
|
||||
return log.E("qa.issues", "failed to load registry", err)
|
||||
}
|
||||
|
||||
// Fetch issues from all repos
|
||||
var allIssues []Issue
|
||||
repoList := reg.List()
|
||||
|
||||
for i, repo := range repoList {
|
||||
cli.Print("\033[2K\r%s %d/%d %s",
|
||||
dimStyle.Render(i18n.T("cmd.qa.issues.fetching")),
|
||||
i+1, len(repoList), repo.Name)
|
||||
|
||||
issues, err := fetchQAIssues(reg.Org, repo.Name, issuesLimit)
|
||||
if err != nil {
|
||||
continue // Skip repos with errors
|
||||
}
|
||||
allIssues = append(allIssues, issues...)
|
||||
}
|
||||
cli.Print("\033[2K\r") // Clear progress
|
||||
|
||||
if len(allIssues) == 0 {
|
||||
cli.Text(i18n.T("cmd.qa.issues.no_issues"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Categorise and prioritise issues
|
||||
categorised := categoriseIssues(allIssues)
|
||||
|
||||
// Filter based on flags
|
||||
if issuesMine {
|
||||
categorised = filterMine(categorised)
|
||||
}
|
||||
if issuesTriage {
|
||||
categorised = filterCategory(categorised, "triage")
|
||||
}
|
||||
if issuesBlocked {
|
||||
categorised = filterCategory(categorised, "blocked")
|
||||
}
|
||||
|
||||
// Print categorised issues
|
||||
printCategorisedIssues(categorised)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchQAIssues(org, repoName string, limit int) ([]Issue, error) {
|
||||
repoFullName := cli.Sprintf("%s/%s", org, repoName)
|
||||
|
||||
args := []string{
|
||||
"issue", "list",
|
||||
"--repo", repoFullName,
|
||||
"--state", "open",
|
||||
"--limit", cli.Sprintf("%d", limit),
|
||||
"--json", "number,title,state,body,createdAt,updatedAt,author,assignees,labels,comments,url",
|
||||
}
|
||||
|
||||
cmd := exec.Command("gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []Issue
|
||||
if err := json.Unmarshal(output, &issues); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Tag with repo name
|
||||
for i := range issues {
|
||||
issues[i].RepoName = repoName
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
func categoriseIssues(issues []Issue) map[string][]Issue {
|
||||
result := map[string][]Issue{
|
||||
"needs_response": {},
|
||||
"ready": {},
|
||||
"blocked": {},
|
||||
"triage": {},
|
||||
}
|
||||
|
||||
currentUser := getCurrentUser()
|
||||
|
||||
for i := range issues {
|
||||
issue := &issues[i]
|
||||
categoriseIssue(issue, currentUser)
|
||||
result[issue.Category] = append(result[issue.Category], *issue)
|
||||
}
|
||||
|
||||
// Sort each category by priority
|
||||
for cat := range result {
|
||||
slices.SortFunc(result[cat], func(a, b Issue) int {
|
||||
return cmp.Compare(a.Priority, b.Priority)
|
||||
})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func categoriseIssue(issue *Issue, currentUser string) {
|
||||
labels := getLabels(issue)
|
||||
|
||||
// Check if blocked
|
||||
for _, l := range labels {
|
||||
if strings.HasPrefix(l, "blocked") || l == "waiting" {
|
||||
issue.Category = "blocked"
|
||||
issue.Priority = 30
|
||||
issue.ActionHint = i18n.T("cmd.qa.issues.hint.blocked")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check if needs triage (no labels, no assignee)
|
||||
if len(issue.Labels.Nodes) == 0 && len(issue.Assignees.Nodes) == 0 {
|
||||
issue.Category = "triage"
|
||||
issue.Priority = 20
|
||||
issue.ActionHint = i18n.T("cmd.qa.issues.hint.triage")
|
||||
return
|
||||
}
|
||||
|
||||
// Check if needs response (recent comment from someone else)
|
||||
if issue.Comments.TotalCount > 0 && len(issue.Comments.Nodes) > 0 {
|
||||
lastComment := issue.Comments.Nodes[len(issue.Comments.Nodes)-1]
|
||||
// If last comment is not from current user and is recent
|
||||
if lastComment.Author.Login != currentUser {
|
||||
age := time.Since(lastComment.CreatedAt)
|
||||
if age < 48*time.Hour {
|
||||
issue.Category = "needs_response"
|
||||
issue.Priority = 10
|
||||
issue.ActionHint = cli.Sprintf("@%s %s", lastComment.Author.Login, i18n.T("cmd.qa.issues.hint.needs_response"))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default: ready to work
|
||||
issue.Category = "ready"
|
||||
issue.Priority = calculatePriority(issue, labels)
|
||||
issue.ActionHint = ""
|
||||
}
|
||||
|
||||
func calculatePriority(issue *Issue, labels []string) int {
|
||||
priority := 50
|
||||
|
||||
// Priority labels
|
||||
for _, l := range labels {
|
||||
switch {
|
||||
case strings.Contains(l, "critical") || strings.Contains(l, "urgent"):
|
||||
priority = 1
|
||||
case strings.Contains(l, "high"):
|
||||
priority = 10
|
||||
case strings.Contains(l, "medium"):
|
||||
priority = 30
|
||||
case strings.Contains(l, "low"):
|
||||
priority = 70
|
||||
case l == "good-first-issue" || l == "good first issue":
|
||||
priority = min(priority, 15) // Boost good first issues
|
||||
case l == "help-wanted" || l == "help wanted":
|
||||
priority = min(priority, 20)
|
||||
case l == "agent:ready" || l == "agentic":
|
||||
priority = min(priority, 5) // AI-ready issues are high priority
|
||||
}
|
||||
}
|
||||
|
||||
return priority
|
||||
}
|
||||
|
||||
func getLabels(issue *Issue) []string {
|
||||
var labels []string
|
||||
for _, l := range issue.Labels.Nodes {
|
||||
labels = append(labels, strings.ToLower(l.Name))
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
func getCurrentUser() string {
|
||||
cmd := exec.Command("gh", "api", "user", "--jq", ".login")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(output))
|
||||
}
|
||||
|
||||
func filterMine(categorised map[string][]Issue) map[string][]Issue {
|
||||
currentUser := getCurrentUser()
|
||||
result := make(map[string][]Issue)
|
||||
|
||||
for cat, issues := range categorised {
|
||||
var filtered []Issue
|
||||
for _, issue := range issues {
|
||||
for _, a := range issue.Assignees.Nodes {
|
||||
if a.Login == currentUser {
|
||||
filtered = append(filtered, issue)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(filtered) > 0 {
|
||||
result[cat] = filtered
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func filterCategory(categorised map[string][]Issue, category string) map[string][]Issue {
|
||||
if issues, ok := categorised[category]; ok && len(issues) > 0 {
|
||||
return map[string][]Issue{category: issues}
|
||||
}
|
||||
return map[string][]Issue{}
|
||||
}
|
||||
|
||||
func printCategorisedIssues(categorised map[string][]Issue) {
|
||||
// Print in order: needs_response, ready, blocked, triage
|
||||
categories := []struct {
|
||||
key string
|
||||
title string
|
||||
style *cli.AnsiStyle
|
||||
}{
|
||||
{"needs_response", i18n.T("cmd.qa.issues.category.needs_response"), warningStyle},
|
||||
{"ready", i18n.T("cmd.qa.issues.category.ready"), successStyle},
|
||||
{"blocked", i18n.T("cmd.qa.issues.category.blocked"), errorStyle},
|
||||
{"triage", i18n.T("cmd.qa.issues.category.triage"), dimStyle},
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, cat := range categories {
|
||||
issues := categorised[cat.key]
|
||||
if len(issues) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if !first {
|
||||
cli.Blank()
|
||||
}
|
||||
first = false
|
||||
|
||||
cli.Print("%s (%d):\n", cat.style.Render(cat.title), len(issues))
|
||||
|
||||
for _, issue := range issues {
|
||||
printTriagedIssue(issue)
|
||||
}
|
||||
}
|
||||
|
||||
if first {
|
||||
cli.Text(i18n.T("cmd.qa.issues.no_issues"))
|
||||
}
|
||||
}
|
||||
|
||||
func printTriagedIssue(issue Issue) {
|
||||
// #42 [core-bio] Fix avatar upload
|
||||
num := cli.TitleStyle.Render(cli.Sprintf("#%d", issue.Number))
|
||||
repo := dimStyle.Render(cli.Sprintf("[%s]", issue.RepoName))
|
||||
title := cli.ValueStyle.Render(truncate(issue.Title, 50))
|
||||
|
||||
cli.Print(" %s %s %s", num, repo, title)
|
||||
|
||||
// Add labels if priority-related
|
||||
var importantLabels []string
|
||||
for _, l := range issue.Labels.Nodes {
|
||||
name := strings.ToLower(l.Name)
|
||||
if strings.Contains(name, "priority") || strings.Contains(name, "critical") ||
|
||||
name == "good-first-issue" || name == "agent:ready" || name == "agentic" {
|
||||
importantLabels = append(importantLabels, l.Name)
|
||||
}
|
||||
}
|
||||
if len(importantLabels) > 0 {
|
||||
cli.Print(" %s", warningStyle.Render("["+strings.Join(importantLabels, ", ")+"]"))
|
||||
}
|
||||
|
||||
// Add age
|
||||
age := cli.FormatAge(issue.UpdatedAt)
|
||||
cli.Print(" %s\n", dimStyle.Render(age))
|
||||
|
||||
// Add action hint if present
|
||||
if issue.ActionHint != "" {
|
||||
cli.Print(" %s %s\n", dimStyle.Render("->"), issue.ActionHint)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
// Package qa provides quality assurance workflow commands.
|
||||
//
|
||||
// Unlike `core dev` which is about doing work (commit, push, pull),
|
||||
// `core qa` is about verifying work (CI status, reviews, issues).
|
||||
//
|
||||
// Commands:
|
||||
// - watch: Monitor GitHub Actions after a push, report actionable data
|
||||
// - review: PR review status with actionable next steps
|
||||
// - health: Aggregate CI health across all repos
|
||||
// - issues: Intelligent issue triage
|
||||
package qa
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddQACommands)
|
||||
}
|
||||
|
||||
// Style aliases from shared package
|
||||
var (
|
||||
successStyle = cli.SuccessStyle
|
||||
errorStyle = cli.ErrorStyle
|
||||
warningStyle = cli.WarningStyle
|
||||
dimStyle = cli.DimStyle
|
||||
)
|
||||
|
||||
// AddQACommands registers the 'qa' command and all subcommands.
|
||||
func AddQACommands(root *cli.Command) {
|
||||
qaCmd := &cli.Command{
|
||||
Use: "qa",
|
||||
Short: i18n.T("cmd.qa.short"),
|
||||
Long: i18n.T("cmd.qa.long"),
|
||||
}
|
||||
root.AddCommand(qaCmd)
|
||||
|
||||
// Subcommands
|
||||
addWatchCommand(qaCmd)
|
||||
addReviewCommand(qaCmd)
|
||||
addHealthCommand(qaCmd)
|
||||
addIssuesCommand(qaCmd)
|
||||
addDocblockCommand(qaCmd)
|
||||
}
|
||||
|
|
@ -1,322 +0,0 @@
|
|||
// cmd_review.go implements the 'qa review' command for PR review status.
|
||||
//
|
||||
// Usage:
|
||||
// core qa review # Show all PRs needing attention
|
||||
// core qa review --mine # Show status of your open PRs
|
||||
// core qa review --requested # Show PRs you need to review
|
||||
|
||||
package qa
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-log"
|
||||
)
|
||||
|
||||
// Review command flags
|
||||
var (
|
||||
reviewMine bool
|
||||
reviewRequested bool
|
||||
reviewRepo string
|
||||
)
|
||||
|
||||
// PullRequest represents a GitHub pull request
|
||||
type PullRequest struct {
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
Author Author `json:"author"`
|
||||
State string `json:"state"`
|
||||
IsDraft bool `json:"isDraft"`
|
||||
Mergeable string `json:"mergeable"`
|
||||
ReviewDecision string `json:"reviewDecision"`
|
||||
URL string `json:"url"`
|
||||
HeadRefName string `json:"headRefName"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
Additions int `json:"additions"`
|
||||
Deletions int `json:"deletions"`
|
||||
ChangedFiles int `json:"changedFiles"`
|
||||
StatusChecks *StatusCheckRollup `json:"statusCheckRollup"`
|
||||
ReviewRequests ReviewRequests `json:"reviewRequests"`
|
||||
Reviews []Review `json:"reviews"`
|
||||
}
|
||||
|
||||
// Author represents a GitHub user
|
||||
type Author struct {
|
||||
Login string `json:"login"`
|
||||
}
|
||||
|
||||
// StatusCheckRollup contains CI check status
|
||||
type StatusCheckRollup struct {
|
||||
Contexts []StatusContext `json:"contexts"`
|
||||
}
|
||||
|
||||
// StatusContext represents a single check
|
||||
type StatusContext struct {
|
||||
State string `json:"state"`
|
||||
Conclusion string `json:"conclusion"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// ReviewRequests contains pending review requests
|
||||
type ReviewRequests struct {
|
||||
Nodes []ReviewRequest `json:"nodes"`
|
||||
}
|
||||
|
||||
// ReviewRequest represents a review request
|
||||
type ReviewRequest struct {
|
||||
RequestedReviewer Author `json:"requestedReviewer"`
|
||||
}
|
||||
|
||||
// Review represents a PR review
|
||||
type Review struct {
|
||||
Author Author `json:"author"`
|
||||
State string `json:"state"`
|
||||
}
|
||||
|
||||
// addReviewCommand adds the 'review' subcommand to the qa command.
|
||||
func addReviewCommand(parent *cli.Command) {
|
||||
reviewCmd := &cli.Command{
|
||||
Use: "review",
|
||||
Short: i18n.T("cmd.qa.review.short"),
|
||||
Long: i18n.T("cmd.qa.review.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runReview()
|
||||
},
|
||||
}
|
||||
|
||||
reviewCmd.Flags().BoolVarP(&reviewMine, "mine", "m", false, i18n.T("cmd.qa.review.flag.mine"))
|
||||
reviewCmd.Flags().BoolVarP(&reviewRequested, "requested", "r", false, i18n.T("cmd.qa.review.flag.requested"))
|
||||
reviewCmd.Flags().StringVar(&reviewRepo, "repo", "", i18n.T("cmd.qa.review.flag.repo"))
|
||||
|
||||
parent.AddCommand(reviewCmd)
|
||||
}
|
||||
|
||||
func runReview() error {
|
||||
// Check gh is available
|
||||
if _, err := exec.LookPath("gh"); err != nil {
|
||||
return log.E("qa.review", i18n.T("error.gh_not_found"), nil)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Determine repo
|
||||
repoFullName := reviewRepo
|
||||
if repoFullName == "" {
|
||||
var err error
|
||||
repoFullName, err = detectRepoFromGit()
|
||||
if err != nil {
|
||||
return log.E("qa.review", i18n.T("cmd.qa.review.error.no_repo"), nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Default: show both mine and requested if neither flag is set
|
||||
showMine := reviewMine || (!reviewMine && !reviewRequested)
|
||||
showRequested := reviewRequested || (!reviewMine && !reviewRequested)
|
||||
|
||||
if showMine {
|
||||
if err := showMyPRs(ctx, repoFullName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if showRequested {
|
||||
if showMine {
|
||||
cli.Blank()
|
||||
}
|
||||
if err := showRequestedReviews(ctx, repoFullName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// showMyPRs shows the user's open PRs with status
|
||||
func showMyPRs(ctx context.Context, repo string) error {
|
||||
prs, err := fetchPRs(ctx, repo, "author:@me")
|
||||
if err != nil {
|
||||
return log.E("qa.review", "failed to fetch your PRs", err)
|
||||
}
|
||||
|
||||
if len(prs) == 0 {
|
||||
cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.qa.review.no_prs")))
|
||||
return nil
|
||||
}
|
||||
|
||||
cli.Print("%s (%d):\n", i18n.T("cmd.qa.review.your_prs"), len(prs))
|
||||
|
||||
for _, pr := range prs {
|
||||
printPRStatus(pr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// showRequestedReviews shows PRs where user's review is requested
|
||||
func showRequestedReviews(ctx context.Context, repo string) error {
|
||||
prs, err := fetchPRs(ctx, repo, "review-requested:@me")
|
||||
if err != nil {
|
||||
return log.E("qa.review", "failed to fetch review requests", err)
|
||||
}
|
||||
|
||||
if len(prs) == 0 {
|
||||
cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.qa.review.no_reviews")))
|
||||
return nil
|
||||
}
|
||||
|
||||
cli.Print("%s (%d):\n", i18n.T("cmd.qa.review.review_requested"), len(prs))
|
||||
|
||||
for _, pr := range prs {
|
||||
printPRForReview(pr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchPRs fetches PRs matching the search query
|
||||
func fetchPRs(ctx context.Context, repo, search string) ([]PullRequest, error) {
|
||||
args := []string{
|
||||
"pr", "list",
|
||||
"--state", "open",
|
||||
"--search", search,
|
||||
"--json", "number,title,author,state,isDraft,mergeable,reviewDecision,url,headRefName,createdAt,updatedAt,additions,deletions,changedFiles,statusCheckRollup,reviewRequests,reviews",
|
||||
}
|
||||
|
||||
if repo != "" {
|
||||
args = append(args, "--repo", repo)
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return nil, fmt.Errorf("%s", strings.TrimSpace(string(exitErr.Stderr)))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var prs []PullRequest
|
||||
if err := json.Unmarshal(output, &prs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return prs, nil
|
||||
}
|
||||
|
||||
// printPRStatus prints a PR with its merge status
|
||||
func printPRStatus(pr PullRequest) {
|
||||
// Determine status icon and color
|
||||
status, style, action := analyzePRStatus(pr)
|
||||
|
||||
cli.Print(" %s #%d %s\n",
|
||||
style.Render(status),
|
||||
pr.Number,
|
||||
truncate(pr.Title, 50))
|
||||
|
||||
if action != "" {
|
||||
cli.Print(" %s %s\n", dimStyle.Render("->"), action)
|
||||
}
|
||||
}
|
||||
|
||||
// printPRForReview prints a PR that needs review
|
||||
func printPRForReview(pr PullRequest) {
|
||||
// Show PR info with stats
|
||||
stats := fmt.Sprintf("+%d/-%d, %d files",
|
||||
pr.Additions, pr.Deletions, pr.ChangedFiles)
|
||||
|
||||
cli.Print(" %s #%d %s\n",
|
||||
warningStyle.Render("◯"),
|
||||
pr.Number,
|
||||
truncate(pr.Title, 50))
|
||||
cli.Print(" %s @%s, %s\n",
|
||||
dimStyle.Render("->"),
|
||||
pr.Author.Login,
|
||||
stats)
|
||||
cli.Print(" %s gh pr checkout %d\n",
|
||||
dimStyle.Render("->"),
|
||||
pr.Number)
|
||||
}
|
||||
|
||||
// analyzePRStatus determines the status, style, and action for a PR
|
||||
func analyzePRStatus(pr PullRequest) (status string, style *cli.AnsiStyle, action string) {
|
||||
// Check if draft
|
||||
if pr.IsDraft {
|
||||
return "◯", dimStyle, "Draft - convert to ready when done"
|
||||
}
|
||||
|
||||
// Check CI status
|
||||
ciPassed := true
|
||||
ciFailed := false
|
||||
ciPending := false
|
||||
var failedCheck string
|
||||
|
||||
if pr.StatusChecks != nil {
|
||||
for _, check := range pr.StatusChecks.Contexts {
|
||||
switch check.Conclusion {
|
||||
case "FAILURE", "failure":
|
||||
ciFailed = true
|
||||
ciPassed = false
|
||||
if failedCheck == "" {
|
||||
failedCheck = check.Name
|
||||
}
|
||||
case "PENDING", "pending", "":
|
||||
if check.State == "PENDING" || check.State == "" {
|
||||
ciPending = true
|
||||
ciPassed = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check review status
|
||||
approved := pr.ReviewDecision == "APPROVED"
|
||||
changesRequested := pr.ReviewDecision == "CHANGES_REQUESTED"
|
||||
|
||||
// Check mergeable status
|
||||
hasConflicts := pr.Mergeable == "CONFLICTING"
|
||||
|
||||
// Determine overall status
|
||||
if hasConflicts {
|
||||
return "✗", errorStyle, "Needs rebase - has merge conflicts"
|
||||
}
|
||||
|
||||
if ciFailed {
|
||||
return "✗", errorStyle, fmt.Sprintf("CI failed: %s", failedCheck)
|
||||
}
|
||||
|
||||
if changesRequested {
|
||||
return "✗", warningStyle, "Changes requested - address review feedback"
|
||||
}
|
||||
|
||||
if ciPending {
|
||||
return "◯", warningStyle, "CI running..."
|
||||
}
|
||||
|
||||
if !approved && pr.ReviewDecision != "" {
|
||||
return "◯", warningStyle, "Awaiting review"
|
||||
}
|
||||
|
||||
if approved && ciPassed {
|
||||
return "✓", successStyle, "Ready to merge"
|
||||
}
|
||||
|
||||
return "◯", dimStyle, ""
|
||||
}
|
||||
|
||||
// truncate shortens a string to max length (rune-safe for UTF-8)
|
||||
func truncate(s string, max int) string {
|
||||
runes := []rune(s)
|
||||
if len(runes) <= max {
|
||||
return s
|
||||
}
|
||||
return string(runes[:max-3]) + "..."
|
||||
}
|
||||
|
|
@ -1,444 +0,0 @@
|
|||
// cmd_watch.go implements the 'qa watch' command for monitoring GitHub Actions.
|
||||
//
|
||||
// Usage:
|
||||
// core qa watch # Watch current repo's latest push
|
||||
// core qa watch --repo X # Watch specific repo
|
||||
// core qa watch --commit SHA # Watch specific commit
|
||||
// core qa watch --timeout 5m # Custom timeout (default: 10m)
|
||||
|
||||
package qa
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-log"
|
||||
)
|
||||
|
||||
// Watch command flags
|
||||
var (
|
||||
watchRepo string
|
||||
watchCommit string
|
||||
watchTimeout time.Duration
|
||||
)
|
||||
|
||||
// WorkflowRun represents a GitHub Actions workflow run
|
||||
type WorkflowRun struct {
|
||||
ID int64 `json:"databaseId"`
|
||||
Name string `json:"name"`
|
||||
DisplayTitle string `json:"displayTitle"`
|
||||
Status string `json:"status"`
|
||||
Conclusion string `json:"conclusion"`
|
||||
HeadSha string `json:"headSha"`
|
||||
URL string `json:"url"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
}
|
||||
|
||||
// WorkflowJob represents a job within a workflow run
|
||||
type WorkflowJob struct {
|
||||
ID int64 `json:"databaseId"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Conclusion string `json:"conclusion"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// JobStep represents a step within a job
|
||||
type JobStep struct {
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Conclusion string `json:"conclusion"`
|
||||
Number int `json:"number"`
|
||||
}
|
||||
|
||||
// addWatchCommand adds the 'watch' subcommand to the qa command.
|
||||
func addWatchCommand(parent *cli.Command) {
|
||||
watchCmd := &cli.Command{
|
||||
Use: "watch",
|
||||
Short: i18n.T("cmd.qa.watch.short"),
|
||||
Long: i18n.T("cmd.qa.watch.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runWatch()
|
||||
},
|
||||
}
|
||||
|
||||
watchCmd.Flags().StringVarP(&watchRepo, "repo", "r", "", i18n.T("cmd.qa.watch.flag.repo"))
|
||||
watchCmd.Flags().StringVarP(&watchCommit, "commit", "c", "", i18n.T("cmd.qa.watch.flag.commit"))
|
||||
watchCmd.Flags().DurationVarP(&watchTimeout, "timeout", "t", 10*time.Minute, i18n.T("cmd.qa.watch.flag.timeout"))
|
||||
|
||||
parent.AddCommand(watchCmd)
|
||||
}
|
||||
|
||||
func runWatch() error {
|
||||
// Check gh is available
|
||||
if _, err := exec.LookPath("gh"); err != nil {
|
||||
return log.E("qa.watch", i18n.T("error.gh_not_found"), nil)
|
||||
}
|
||||
|
||||
// Determine repo
|
||||
repoFullName, err := resolveRepo(watchRepo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Determine commit
|
||||
commitSha, err := resolveCommit(watchCommit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli.Print("%s %s\n", dimStyle.Render(i18n.Label("repo")), repoFullName)
|
||||
// Safe prefix for display - handle short SHAs gracefully
|
||||
shaPrefix := commitSha
|
||||
if len(commitSha) > 8 {
|
||||
shaPrefix = commitSha[:8]
|
||||
}
|
||||
cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.qa.watch.commit")), shaPrefix)
|
||||
cli.Blank()
|
||||
|
||||
// Create context with timeout for all gh commands
|
||||
ctx, cancel := context.WithTimeout(context.Background(), watchTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Poll for workflow runs
|
||||
pollInterval := 3 * time.Second
|
||||
var lastStatus string
|
||||
|
||||
for {
|
||||
// Check if context deadline exceeded
|
||||
if ctx.Err() != nil {
|
||||
cli.Blank()
|
||||
return log.E("qa.watch", i18n.T("cmd.qa.watch.timeout", map[string]any{"Duration": watchTimeout}), nil)
|
||||
}
|
||||
|
||||
runs, err := fetchWorkflowRunsForCommit(ctx, repoFullName, commitSha)
|
||||
if err != nil {
|
||||
return log.Wrap(err, "qa.watch", "failed to fetch workflow runs")
|
||||
}
|
||||
|
||||
if len(runs) == 0 {
|
||||
// No workflows triggered yet, keep waiting
|
||||
cli.Print("\033[2K\r%s", dimStyle.Render(i18n.T("cmd.qa.watch.waiting_for_workflows")))
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check status of all runs
|
||||
allComplete := true
|
||||
var pending, success, failed int
|
||||
for _, run := range runs {
|
||||
switch run.Status {
|
||||
case "completed":
|
||||
if run.Conclusion == "success" {
|
||||
success++
|
||||
} else {
|
||||
// Count all non-success conclusions as failed
|
||||
// (failure, cancelled, timed_out, action_required, stale, etc.)
|
||||
failed++
|
||||
}
|
||||
default:
|
||||
allComplete = false
|
||||
pending++
|
||||
}
|
||||
}
|
||||
|
||||
// Build status line
|
||||
status := fmt.Sprintf("%d workflow(s): ", len(runs))
|
||||
if pending > 0 {
|
||||
status += warningStyle.Render(fmt.Sprintf("%d running", pending))
|
||||
if success > 0 || failed > 0 {
|
||||
status += ", "
|
||||
}
|
||||
}
|
||||
if success > 0 {
|
||||
status += successStyle.Render(fmt.Sprintf("%d passed", success))
|
||||
if failed > 0 {
|
||||
status += ", "
|
||||
}
|
||||
}
|
||||
if failed > 0 {
|
||||
status += errorStyle.Render(fmt.Sprintf("%d failed", failed))
|
||||
}
|
||||
|
||||
// Only print if status changed
|
||||
if status != lastStatus {
|
||||
cli.Print("\033[2K\r%s", status)
|
||||
lastStatus = status
|
||||
}
|
||||
|
||||
if allComplete {
|
||||
cli.Blank()
|
||||
cli.Blank()
|
||||
return printResults(ctx, repoFullName, runs)
|
||||
}
|
||||
|
||||
time.Sleep(pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// resolveRepo determines the repo to watch
|
||||
func resolveRepo(specified string) (string, error) {
|
||||
if specified != "" {
|
||||
// If it contains /, assume it's already full name
|
||||
if strings.Contains(specified, "/") {
|
||||
return specified, nil
|
||||
}
|
||||
// Try to get org from current directory
|
||||
org := detectOrgFromGit()
|
||||
if org != "" {
|
||||
return org + "/" + specified, nil
|
||||
}
|
||||
return "", log.E("qa.watch", i18n.T("cmd.qa.watch.error.repo_format"), nil)
|
||||
}
|
||||
|
||||
// Detect from current directory
|
||||
return detectRepoFromGit()
|
||||
}
|
||||
|
||||
// resolveCommit determines the commit to watch
|
||||
func resolveCommit(specified string) (string, error) {
|
||||
if specified != "" {
|
||||
return specified, nil
|
||||
}
|
||||
|
||||
// Get HEAD commit
|
||||
cmd := exec.Command("git", "rev-parse", "HEAD")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", log.Wrap(err, "qa.watch", "failed to get HEAD commit")
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(output)), nil
|
||||
}
|
||||
|
||||
// detectRepoFromGit detects the repo from git remote
|
||||
func detectRepoFromGit() (string, error) {
|
||||
cmd := exec.Command("git", "remote", "get-url", "origin")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", log.E("qa.watch", i18n.T("cmd.qa.watch.error.not_git_repo"), nil)
|
||||
}
|
||||
|
||||
url := strings.TrimSpace(string(output))
|
||||
return parseGitHubRepo(url)
|
||||
}
|
||||
|
||||
// detectOrgFromGit tries to detect the org from git remote
|
||||
func detectOrgFromGit() string {
|
||||
repo, err := detectRepoFromGit()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
parts := strings.Split(repo, "/")
|
||||
if len(parts) >= 1 {
|
||||
return parts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// parseGitHubRepo extracts org/repo from a git URL
|
||||
func parseGitHubRepo(url string) (string, error) {
|
||||
// Handle SSH URLs: git@github.com:org/repo.git
|
||||
if strings.HasPrefix(url, "git@github.com:") {
|
||||
path := strings.TrimPrefix(url, "git@github.com:")
|
||||
path = strings.TrimSuffix(path, ".git")
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// Handle HTTPS URLs: https://github.com/org/repo.git
|
||||
if strings.Contains(url, "github.com/") {
|
||||
parts := strings.Split(url, "github.com/")
|
||||
if len(parts) >= 2 {
|
||||
path := strings.TrimSuffix(parts[1], ".git")
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("could not parse GitHub repo from URL: %s", url)
|
||||
}
|
||||
|
||||
// fetchWorkflowRunsForCommit fetches workflow runs for a specific commit
|
||||
func fetchWorkflowRunsForCommit(ctx context.Context, repoFullName, commitSha string) ([]WorkflowRun, error) {
|
||||
args := []string{
|
||||
"run", "list",
|
||||
"--repo", repoFullName,
|
||||
"--commit", commitSha,
|
||||
"--json", "databaseId,name,displayTitle,status,conclusion,headSha,url,createdAt,updatedAt",
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// Check if context was cancelled/deadline exceeded
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return nil, cli.Err("%s", strings.TrimSpace(string(exitErr.Stderr)))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var runs []WorkflowRun
|
||||
if err := json.Unmarshal(output, &runs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return runs, nil
|
||||
}
|
||||
|
||||
// printResults prints the final results with actionable information
|
||||
func printResults(ctx context.Context, repoFullName string, runs []WorkflowRun) error {
|
||||
var failures []WorkflowRun
|
||||
var successes []WorkflowRun
|
||||
|
||||
for _, run := range runs {
|
||||
if run.Conclusion == "success" {
|
||||
successes = append(successes, run)
|
||||
} else {
|
||||
// Treat all non-success as failures (failure, cancelled, timed_out, etc.)
|
||||
failures = append(failures, run)
|
||||
}
|
||||
}
|
||||
|
||||
// Print successes briefly
|
||||
for _, run := range successes {
|
||||
cli.Print("%s %s\n", successStyle.Render(cli.Glyph(":check:")), run.Name)
|
||||
}
|
||||
|
||||
// Print failures with details
|
||||
for _, run := range failures {
|
||||
cli.Print("%s %s\n", errorStyle.Render(cli.Glyph(":cross:")), run.Name)
|
||||
|
||||
// Fetch failed job details
|
||||
failedJob, failedStep, errorLine := fetchFailureDetails(ctx, repoFullName, run.ID)
|
||||
if failedJob != "" {
|
||||
cli.Print(" %s Job: %s", dimStyle.Render("->"), failedJob)
|
||||
if failedStep != "" {
|
||||
cli.Print(" (step: %s)", failedStep)
|
||||
}
|
||||
cli.Blank()
|
||||
}
|
||||
if errorLine != "" {
|
||||
cli.Print(" %s Error: %s\n", dimStyle.Render("->"), errorLine)
|
||||
}
|
||||
cli.Print(" %s %s\n", dimStyle.Render("->"), run.URL)
|
||||
}
|
||||
|
||||
// Exit with error if any failures
|
||||
if len(failures) > 0 {
|
||||
cli.Blank()
|
||||
return cli.Err("%s", i18n.T("cmd.qa.watch.workflows_failed", map[string]any{"Count": len(failures)}))
|
||||
}
|
||||
|
||||
cli.Blank()
|
||||
cli.Print("%s\n", successStyle.Render(i18n.T("cmd.qa.watch.all_passed")))
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchFailureDetails fetches details about why a workflow failed
|
||||
func fetchFailureDetails(ctx context.Context, repoFullName string, runID int64) (jobName, stepName, errorLine string) {
|
||||
// Fetch jobs for this run
|
||||
args := []string{
|
||||
"run", "view", fmt.Sprintf("%d", runID),
|
||||
"--repo", repoFullName,
|
||||
"--json", "jobs",
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", "", ""
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Jobs []struct {
|
||||
Name string `json:"name"`
|
||||
Conclusion string `json:"conclusion"`
|
||||
Steps []struct {
|
||||
Name string `json:"name"`
|
||||
Conclusion string `json:"conclusion"`
|
||||
Number int `json:"number"`
|
||||
} `json:"steps"`
|
||||
} `json:"jobs"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
return "", "", ""
|
||||
}
|
||||
|
||||
// Find the failed job and step
|
||||
for _, job := range result.Jobs {
|
||||
if job.Conclusion == "failure" {
|
||||
jobName = job.Name
|
||||
for _, step := range job.Steps {
|
||||
if step.Conclusion == "failure" {
|
||||
stepName = fmt.Sprintf("%d: %s", step.Number, step.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get the error line from logs (if available)
|
||||
errorLine = fetchErrorFromLogs(ctx, repoFullName, runID)
|
||||
|
||||
return jobName, stepName, errorLine
|
||||
}
|
||||
|
||||
// fetchErrorFromLogs attempts to extract the first error line from workflow logs
|
||||
func fetchErrorFromLogs(ctx context.Context, repoFullName string, runID int64) string {
|
||||
// Use gh run view --log-failed to get failed step logs
|
||||
args := []string{
|
||||
"run", "view", fmt.Sprintf("%d", runID),
|
||||
"--repo", repoFullName,
|
||||
"--log-failed",
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Parse output to find the first meaningful error line
|
||||
lines := strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip common metadata/progress lines
|
||||
lower := strings.ToLower(line)
|
||||
if strings.HasPrefix(lower, "##[") { // GitHub Actions command markers
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(line, "Run ") || strings.HasPrefix(line, "Running ") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Look for error indicators
|
||||
if strings.Contains(lower, "error") ||
|
||||
strings.Contains(lower, "failed") ||
|
||||
strings.Contains(lower, "fatal") ||
|
||||
strings.Contains(lower, "panic") ||
|
||||
strings.Contains(line, ": ") { // Likely a file:line or key: value format
|
||||
// Truncate long lines
|
||||
if len(line) > 120 {
|
||||
line = line[:117] + "..."
|
||||
}
|
||||
return line
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
// Package vm provides LinuxKit virtual machine management commands.
|
||||
//
|
||||
// Commands:
|
||||
// - run: Run a VM from image (.iso, .qcow2, .vmdk, .raw) or template
|
||||
// - ps: List running VMs
|
||||
// - stop: Stop a running VM
|
||||
// - logs: View VM logs
|
||||
// - exec: Execute command in VM via SSH
|
||||
// - templates: Manage LinuxKit templates (list, build)
|
||||
//
|
||||
// Uses qemu or hyperkit depending on system availability.
|
||||
// Templates are built from YAML definitions and can include variables.
|
||||
package vm
|
||||
|
|
@ -1,345 +0,0 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
goio "io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-container"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
var (
|
||||
runName string
|
||||
runDetach bool
|
||||
runMemory int
|
||||
runCPUs int
|
||||
runSSHPort int
|
||||
runTemplateName string
|
||||
runVarFlags []string
|
||||
)
|
||||
|
||||
// addVMRunCommand adds the 'run' command under vm.
|
||||
func addVMRunCommand(parent *cli.Command) {
|
||||
runCmd := &cli.Command{
|
||||
Use: "run [image]",
|
||||
Short: i18n.T("cmd.vm.run.short"),
|
||||
Long: i18n.T("cmd.vm.run.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
opts := container.RunOptions{
|
||||
Name: runName,
|
||||
Detach: runDetach,
|
||||
Memory: runMemory,
|
||||
CPUs: runCPUs,
|
||||
SSHPort: runSSHPort,
|
||||
}
|
||||
|
||||
// If template is specified, build and run from template
|
||||
if runTemplateName != "" {
|
||||
vars := ParseVarFlags(runVarFlags)
|
||||
return RunFromTemplate(runTemplateName, vars, opts)
|
||||
}
|
||||
|
||||
// Otherwise, require an image path
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.run.error.image_required"))
|
||||
}
|
||||
image := args[0]
|
||||
|
||||
return runContainer(image, runName, runDetach, runMemory, runCPUs, runSSHPort)
|
||||
},
|
||||
}
|
||||
|
||||
runCmd.Flags().StringVar(&runName, "name", "", i18n.T("cmd.vm.run.flag.name"))
|
||||
runCmd.Flags().BoolVarP(&runDetach, "detach", "d", false, i18n.T("cmd.vm.run.flag.detach"))
|
||||
runCmd.Flags().IntVar(&runMemory, "memory", 0, i18n.T("cmd.vm.run.flag.memory"))
|
||||
runCmd.Flags().IntVar(&runCPUs, "cpus", 0, i18n.T("cmd.vm.run.flag.cpus"))
|
||||
runCmd.Flags().IntVar(&runSSHPort, "ssh-port", 0, i18n.T("cmd.vm.run.flag.ssh_port"))
|
||||
runCmd.Flags().StringVar(&runTemplateName, "template", "", i18n.T("cmd.vm.run.flag.template"))
|
||||
runCmd.Flags().StringArrayVar(&runVarFlags, "var", nil, i18n.T("cmd.vm.run.flag.var"))
|
||||
|
||||
parent.AddCommand(runCmd)
|
||||
}
|
||||
|
||||
func runContainer(image, name string, detach bool, memory, cpus, sshPort int) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
opts := container.RunOptions{
|
||||
Name: name,
|
||||
Detach: detach,
|
||||
Memory: memory,
|
||||
CPUs: cpus,
|
||||
SSHPort: sshPort,
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.Label("image")), image)
|
||||
if name != "" {
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.name")), name)
|
||||
}
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.hypervisor")), manager.Hypervisor().Name())
|
||||
fmt.Println()
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := manager.Run(ctx, image, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.run", "container")+": %w", err)
|
||||
}
|
||||
|
||||
if detach {
|
||||
fmt.Printf("%s %s\n", successStyle.Render(i18n.Label("started")), c.ID)
|
||||
fmt.Printf("%s %d\n", dimStyle.Render(i18n.T("cmd.vm.label.pid")), c.PID)
|
||||
fmt.Println()
|
||||
fmt.Println(i18n.T("cmd.vm.hint.view_logs", map[string]any{"ID": c.ID[:8]}))
|
||||
fmt.Println(i18n.T("cmd.vm.hint.stop", map[string]any{"ID": c.ID[:8]}))
|
||||
} else {
|
||||
fmt.Printf("\n%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.container_stopped")), c.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var psAll bool
|
||||
|
||||
// addVMPsCommand adds the 'ps' command under vm.
|
||||
func addVMPsCommand(parent *cli.Command) {
|
||||
psCmd := &cli.Command{
|
||||
Use: "ps",
|
||||
Short: i18n.T("cmd.vm.ps.short"),
|
||||
Long: i18n.T("cmd.vm.ps.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return listContainers(psAll)
|
||||
},
|
||||
}
|
||||
|
||||
psCmd.Flags().BoolVarP(&psAll, "all", "a", false, i18n.T("cmd.vm.ps.flag.all"))
|
||||
|
||||
parent.AddCommand(psCmd)
|
||||
}
|
||||
|
||||
func listContainers(all bool) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
containers, err := manager.List(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.list", "containers")+": %w", err)
|
||||
}
|
||||
|
||||
// Filter if not showing all
|
||||
if !all {
|
||||
filtered := make([]*container.Container, 0)
|
||||
for _, c := range containers {
|
||||
if c.Status == container.StatusRunning {
|
||||
filtered = append(filtered, c)
|
||||
}
|
||||
}
|
||||
containers = filtered
|
||||
}
|
||||
|
||||
if len(containers) == 0 {
|
||||
if all {
|
||||
fmt.Println(i18n.T("cmd.vm.ps.no_containers"))
|
||||
} else {
|
||||
fmt.Println(i18n.T("cmd.vm.ps.no_running"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
_, _ = fmt.Fprintln(w, i18n.T("cmd.vm.ps.header"))
|
||||
_, _ = fmt.Fprintln(w, "--\t----\t-----\t------\t-------\t---")
|
||||
|
||||
for _, c := range containers {
|
||||
// Shorten image path
|
||||
imageName := c.Image
|
||||
if len(imageName) > 30 {
|
||||
imageName = "..." + imageName[len(imageName)-27:]
|
||||
}
|
||||
|
||||
// Format duration
|
||||
duration := formatDuration(time.Since(c.StartedAt))
|
||||
|
||||
// Status with color
|
||||
status := string(c.Status)
|
||||
switch c.Status {
|
||||
case container.StatusRunning:
|
||||
status = successStyle.Render(status)
|
||||
case container.StatusStopped:
|
||||
status = dimStyle.Render(status)
|
||||
case container.StatusError:
|
||||
status = errorStyle.Render(status)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%d\n",
|
||||
c.ID[:8], c.Name, imageName, status, duration, c.PID)
|
||||
}
|
||||
|
||||
_ = w.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatDuration(d time.Duration) string {
|
||||
if d < time.Minute {
|
||||
return fmt.Sprintf("%ds", int(d.Seconds()))
|
||||
}
|
||||
if d < time.Hour {
|
||||
return fmt.Sprintf("%dm", int(d.Minutes()))
|
||||
}
|
||||
if d < 24*time.Hour {
|
||||
return fmt.Sprintf("%dh", int(d.Hours()))
|
||||
}
|
||||
return fmt.Sprintf("%dd", int(d.Hours()/24))
|
||||
}
|
||||
|
||||
// addVMStopCommand adds the 'stop' command under vm.
|
||||
func addVMStopCommand(parent *cli.Command) {
|
||||
stopCmd := &cli.Command{
|
||||
Use: "stop <container-id>",
|
||||
Short: i18n.T("cmd.vm.stop.short"),
|
||||
Long: i18n.T("cmd.vm.stop.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.error.id_required"))
|
||||
}
|
||||
return stopContainer(args[0])
|
||||
},
|
||||
}
|
||||
|
||||
parent.AddCommand(stopCmd)
|
||||
}
|
||||
|
||||
func stopContainer(id string) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
// Support partial ID matching
|
||||
fullID, err := resolveContainerID(manager, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.stop.stopping")), fullID[:8])
|
||||
|
||||
ctx := context.Background()
|
||||
if err := manager.Stop(ctx, fullID); err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.stop", "container")+": %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", successStyle.Render(i18n.T("common.status.stopped")))
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveContainerID resolves a partial ID to a full ID.
|
||||
func resolveContainerID(manager *container.LinuxKitManager, partialID string) (string, error) {
|
||||
ctx := context.Background()
|
||||
containers, err := manager.List(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var matches []*container.Container
|
||||
for _, c := range containers {
|
||||
if strings.HasPrefix(c.ID, partialID) || strings.HasPrefix(c.Name, partialID) {
|
||||
matches = append(matches, c)
|
||||
}
|
||||
}
|
||||
|
||||
switch len(matches) {
|
||||
case 0:
|
||||
return "", errors.New(i18n.T("cmd.vm.error.no_match", map[string]any{"ID": partialID}))
|
||||
case 1:
|
||||
return matches[0].ID, nil
|
||||
default:
|
||||
return "", errors.New(i18n.T("cmd.vm.error.multiple_match", map[string]any{"ID": partialID}))
|
||||
}
|
||||
}
|
||||
|
||||
var logsFollow bool
|
||||
|
||||
// addVMLogsCommand adds the 'logs' command under vm.
|
||||
func addVMLogsCommand(parent *cli.Command) {
|
||||
logsCmd := &cli.Command{
|
||||
Use: "logs <container-id>",
|
||||
Short: i18n.T("cmd.vm.logs.short"),
|
||||
Long: i18n.T("cmd.vm.logs.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.error.id_required"))
|
||||
}
|
||||
return viewLogs(args[0], logsFollow)
|
||||
},
|
||||
}
|
||||
|
||||
logsCmd.Flags().BoolVarP(&logsFollow, "follow", "f", false, i18n.T("common.flag.follow"))
|
||||
|
||||
parent.AddCommand(logsCmd)
|
||||
}
|
||||
|
||||
func viewLogs(id string, follow bool) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
fullID, err := resolveContainerID(manager, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
reader, err := manager.Logs(ctx, fullID, follow)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.get", "logs")+": %w", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
_, err = goio.Copy(os.Stdout, reader)
|
||||
return err
|
||||
}
|
||||
|
||||
// addVMExecCommand adds the 'exec' command under vm.
|
||||
func addVMExecCommand(parent *cli.Command) {
|
||||
execCmd := &cli.Command{
|
||||
Use: "exec <container-id> <command> [args...]",
|
||||
Short: i18n.T("cmd.vm.exec.short"),
|
||||
Long: i18n.T("cmd.vm.exec.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return errors.New(i18n.T("cmd.vm.error.id_and_cmd_required"))
|
||||
}
|
||||
return execInContainer(args[0], args[1:])
|
||||
},
|
||||
}
|
||||
|
||||
parent.AddCommand(execCmd)
|
||||
}
|
||||
|
||||
func execInContainer(id string, cmd []string) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
fullID, err := resolveContainerID(manager, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
return manager.Exec(ctx, fullID, cmd)
|
||||
}
|
||||
|
|
@ -1,311 +0,0 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-container"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// addVMTemplatesCommand adds the 'templates' command under vm.
|
||||
func addVMTemplatesCommand(parent *cli.Command) {
|
||||
templatesCmd := &cli.Command{
|
||||
Use: "templates",
|
||||
Short: i18n.T("cmd.vm.templates.short"),
|
||||
Long: i18n.T("cmd.vm.templates.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return listTemplates()
|
||||
},
|
||||
}
|
||||
|
||||
// Add subcommands
|
||||
addTemplatesShowCommand(templatesCmd)
|
||||
addTemplatesVarsCommand(templatesCmd)
|
||||
|
||||
parent.AddCommand(templatesCmd)
|
||||
}
|
||||
|
||||
// addTemplatesShowCommand adds the 'templates show' subcommand.
|
||||
func addTemplatesShowCommand(parent *cli.Command) {
|
||||
showCmd := &cli.Command{
|
||||
Use: "show <template-name>",
|
||||
Short: i18n.T("cmd.vm.templates.show.short"),
|
||||
Long: i18n.T("cmd.vm.templates.show.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.error.template_required"))
|
||||
}
|
||||
return showTemplate(args[0])
|
||||
},
|
||||
}
|
||||
|
||||
parent.AddCommand(showCmd)
|
||||
}
|
||||
|
||||
// addTemplatesVarsCommand adds the 'templates vars' subcommand.
|
||||
func addTemplatesVarsCommand(parent *cli.Command) {
|
||||
varsCmd := &cli.Command{
|
||||
Use: "vars <template-name>",
|
||||
Short: i18n.T("cmd.vm.templates.vars.short"),
|
||||
Long: i18n.T("cmd.vm.templates.vars.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.error.template_required"))
|
||||
}
|
||||
return showTemplateVars(args[0])
|
||||
},
|
||||
}
|
||||
|
||||
parent.AddCommand(varsCmd)
|
||||
}
|
||||
|
||||
func listTemplates() error {
|
||||
templates := container.ListTemplates()
|
||||
|
||||
if len(templates) == 0 {
|
||||
fmt.Println(i18n.T("cmd.vm.templates.no_templates"))
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n\n", repoNameStyle.Render(i18n.T("cmd.vm.templates.title")))
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
_, _ = fmt.Fprintln(w, i18n.T("cmd.vm.templates.header"))
|
||||
_, _ = fmt.Fprintln(w, "----\t-----------")
|
||||
|
||||
for _, tmpl := range templates {
|
||||
desc := tmpl.Description
|
||||
if len(desc) > 60 {
|
||||
desc = desc[:57] + "..."
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\n", repoNameStyle.Render(tmpl.Name), desc)
|
||||
}
|
||||
_ = w.Flush()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.vm.templates.hint.show"), dimStyle.Render("core vm templates show <name>"))
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.vm.templates.hint.vars"), dimStyle.Render("core vm templates vars <name>"))
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.vm.templates.hint.run"), dimStyle.Render("core vm run --template <name> --var SSH_KEY=\"...\""))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func showTemplate(name string) error {
|
||||
content, err := container.GetTemplate(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n\n", dimStyle.Render(i18n.T("common.label.template")), repoNameStyle.Render(name))
|
||||
fmt.Println(content)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func showTemplateVars(name string) error {
|
||||
content, err := container.GetTemplate(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
required, optional := container.ExtractVariables(content)
|
||||
|
||||
fmt.Printf("%s %s\n\n", dimStyle.Render(i18n.T("common.label.template")), repoNameStyle.Render(name))
|
||||
|
||||
if len(required) > 0 {
|
||||
fmt.Printf("%s\n", errorStyle.Render(i18n.T("cmd.vm.templates.vars.required")))
|
||||
for _, v := range required {
|
||||
fmt.Printf(" %s\n", varStyle.Render("${"+v+"}"))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
if len(optional) > 0 {
|
||||
fmt.Printf("%s\n", successStyle.Render(i18n.T("cmd.vm.templates.vars.optional")))
|
||||
for v, def := range optional {
|
||||
fmt.Printf(" %s = %s\n",
|
||||
varStyle.Render("${"+v+"}"),
|
||||
defaultStyle.Render(def))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
if len(required) == 0 && len(optional) == 0 {
|
||||
fmt.Println(i18n.T("cmd.vm.templates.vars.none"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunFromTemplate builds and runs a LinuxKit image from a template.
|
||||
func RunFromTemplate(templateName string, vars map[string]string, runOpts container.RunOptions) error {
|
||||
// Apply template with variables
|
||||
content, err := container.ApplyTemplate(templateName, vars)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "apply template"})+": %w", err)
|
||||
}
|
||||
|
||||
// Create a temporary directory for the build
|
||||
tmpDir, err := os.MkdirTemp("", "core-linuxkit-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "create temp directory"})+": %w", err)
|
||||
}
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Write the YAML file
|
||||
yamlPath := filepath.Join(tmpDir, templateName+".yml")
|
||||
if err := os.WriteFile(yamlPath, []byte(content), 0644); err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "write template"})+": %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("common.label.template")), repoNameStyle.Render(templateName))
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.building")), yamlPath)
|
||||
|
||||
// Build the image using linuxkit
|
||||
outputPath := filepath.Join(tmpDir, templateName)
|
||||
if err := buildLinuxKitImage(yamlPath, outputPath); err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "build image"})+": %w", err)
|
||||
}
|
||||
|
||||
// Find the built image (linuxkit creates .iso or other format)
|
||||
imagePath := findBuiltImage(outputPath)
|
||||
if imagePath == "" {
|
||||
return errors.New(i18n.T("cmd.vm.error.no_image_found"))
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("common.label.image")), imagePath)
|
||||
fmt.Println()
|
||||
|
||||
// Run the image
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "initialize container manager"})+": %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.hypervisor")), manager.Hypervisor().Name())
|
||||
fmt.Println()
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := manager.Run(ctx, imagePath, runOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.run", "container")+": %w", err)
|
||||
}
|
||||
|
||||
if runOpts.Detach {
|
||||
fmt.Printf("%s %s\n", successStyle.Render(i18n.T("common.label.started")), c.ID)
|
||||
fmt.Printf("%s %d\n", dimStyle.Render(i18n.T("cmd.vm.label.pid")), c.PID)
|
||||
fmt.Println()
|
||||
fmt.Println(i18n.T("cmd.vm.hint.view_logs", map[string]any{"ID": c.ID[:8]}))
|
||||
fmt.Println(i18n.T("cmd.vm.hint.stop", map[string]any{"ID": c.ID[:8]}))
|
||||
} else {
|
||||
fmt.Printf("\n%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.container_stopped")), c.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildLinuxKitImage builds a LinuxKit image from a YAML file.
|
||||
func buildLinuxKitImage(yamlPath, outputPath string) error {
|
||||
// Check if linuxkit is available
|
||||
lkPath, err := lookupLinuxKit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the image
|
||||
// linuxkit build --format iso-bios --name <output> <yaml>
|
||||
cmd := exec.Command(lkPath, "build",
|
||||
"--format", "iso-bios",
|
||||
"--name", outputPath,
|
||||
yamlPath)
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// findBuiltImage finds the built image file.
|
||||
func findBuiltImage(basePath string) string {
|
||||
// LinuxKit can create different formats
|
||||
extensions := []string{".iso", "-bios.iso", ".qcow2", ".raw", ".vmdk"}
|
||||
|
||||
for _, ext := range extensions {
|
||||
path := basePath + ext
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
// Check directory for any image file
|
||||
dir := filepath.Dir(basePath)
|
||||
base := filepath.Base(basePath)
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
if strings.HasPrefix(name, base) {
|
||||
for _, ext := range []string{".iso", ".qcow2", ".raw", ".vmdk"} {
|
||||
if strings.HasSuffix(name, ext) {
|
||||
return filepath.Join(dir, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// lookupLinuxKit finds the linuxkit binary.
|
||||
func lookupLinuxKit() (string, error) {
|
||||
// Check PATH first
|
||||
if path, err := exec.LookPath("linuxkit"); err == nil {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// Check common locations
|
||||
paths := []string{
|
||||
"/usr/local/bin/linuxkit",
|
||||
"/opt/homebrew/bin/linuxkit",
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New(i18n.T("cmd.vm.error.linuxkit_not_found"))
|
||||
}
|
||||
|
||||
// ParseVarFlags parses --var flags into a map.
|
||||
// Format: --var KEY=VALUE or --var KEY="VALUE"
|
||||
func ParseVarFlags(varFlags []string) map[string]string {
|
||||
vars := make(map[string]string)
|
||||
|
||||
for _, v := range varFlags {
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
key := strings.TrimSpace(parts[0])
|
||||
value := strings.TrimSpace(parts[1])
|
||||
// Remove surrounding quotes if present
|
||||
value = strings.Trim(value, "\"'")
|
||||
vars[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return vars
|
||||
}
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
// Package vm provides LinuxKit VM management commands.
|
||||
package vm
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddVMCommands)
|
||||
}
|
||||
|
||||
// Style aliases from shared
|
||||
var (
|
||||
repoNameStyle = cli.RepoStyle
|
||||
successStyle = cli.SuccessStyle
|
||||
errorStyle = cli.ErrorStyle
|
||||
dimStyle = cli.DimStyle
|
||||
)
|
||||
|
||||
// VM-specific styles
|
||||
var (
|
||||
varStyle = cli.NewStyle().Foreground(cli.ColourAmber500)
|
||||
defaultStyle = cli.NewStyle().Foreground(cli.ColourGray500).Italic()
|
||||
)
|
||||
|
||||
// AddVMCommands adds container-related commands under 'vm' to the CLI.
|
||||
func AddVMCommands(root *cli.Command) {
|
||||
vmCmd := &cli.Command{
|
||||
Use: "vm",
|
||||
Short: i18n.T("cmd.vm.short"),
|
||||
Long: i18n.T("cmd.vm.long"),
|
||||
}
|
||||
|
||||
root.AddCommand(vmCmd)
|
||||
addVMRunCommand(vmCmd)
|
||||
addVMPsCommand(vmCmd)
|
||||
addVMStopCommand(vmCmd)
|
||||
addVMLogsCommand(vmCmd)
|
||||
addVMExecCommand(vmCmd)
|
||||
addVMTemplatesCommand(vmCmd)
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue