feat: migrate deploy, prod, vm commands from CLI
Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
4b5739fbd7
commit
400d8a7690
14 changed files with 2284 additions and 0 deletions
312
cmd/deploy/cmd_ansible.go
Normal file
312
cmd/deploy/cmd_ansible.go
Normal file
|
|
@ -0,0 +1,312 @@
|
|||
package deploy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/ansible"
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
ansibleInventory string
|
||||
ansibleLimit string
|
||||
ansibleTags string
|
||||
ansibleSkipTags string
|
||||
ansibleVars []string
|
||||
ansibleVerbose int
|
||||
ansibleCheck bool
|
||||
)
|
||||
|
||||
var ansibleCmd = &cobra.Command{
|
||||
Use: "ansible <playbook>",
|
||||
Short: "Run Ansible playbooks natively (no Python required)",
|
||||
Long: `Execute Ansible playbooks using a pure Go implementation.
|
||||
|
||||
This command parses Ansible YAML playbooks and executes them natively,
|
||||
without requiring Python or ansible-playbook to be installed.
|
||||
|
||||
Supported modules:
|
||||
- shell, command, raw, script
|
||||
- copy, template, file, lineinfile, stat, slurp, fetch, get_url
|
||||
- apt, apt_key, apt_repository, package, pip
|
||||
- service, systemd
|
||||
- user, group
|
||||
- uri, wait_for, git, unarchive
|
||||
- debug, fail, assert, set_fact, pause
|
||||
|
||||
Examples:
|
||||
core deploy ansible playbooks/coolify/create.yml -i inventory/
|
||||
core deploy ansible site.yml -l production
|
||||
core deploy ansible deploy.yml -e "version=1.2.3" -e "env=prod"`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runAnsible,
|
||||
}
|
||||
|
||||
var ansibleTestCmd = &cobra.Command{
|
||||
Use: "test <host>",
|
||||
Short: "Test SSH connectivity to a host",
|
||||
Long: `Test SSH connection and gather facts from a host.
|
||||
|
||||
Examples:
|
||||
core deploy ansible test linux.snider.dev -u claude -p claude
|
||||
core deploy ansible test server.example.com -i ~/.ssh/id_rsa`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runAnsibleTest,
|
||||
}
|
||||
|
||||
var (
|
||||
testUser string
|
||||
testPassword string
|
||||
testKeyFile string
|
||||
testPort int
|
||||
)
|
||||
|
||||
func init() {
|
||||
// ansible command flags
|
||||
ansibleCmd.Flags().StringVarP(&ansibleInventory, "inventory", "i", "", "Inventory file or directory")
|
||||
ansibleCmd.Flags().StringVarP(&ansibleLimit, "limit", "l", "", "Limit to specific hosts")
|
||||
ansibleCmd.Flags().StringVarP(&ansibleTags, "tags", "t", "", "Only run plays and tasks tagged with these values")
|
||||
ansibleCmd.Flags().StringVar(&ansibleSkipTags, "skip-tags", "", "Skip plays and tasks tagged with these values")
|
||||
ansibleCmd.Flags().StringArrayVarP(&ansibleVars, "extra-vars", "e", nil, "Set additional variables (key=value)")
|
||||
ansibleCmd.Flags().CountVarP(&ansibleVerbose, "verbose", "v", "Increase verbosity")
|
||||
ansibleCmd.Flags().BoolVar(&ansibleCheck, "check", false, "Don't make any changes (dry run)")
|
||||
|
||||
// test command flags
|
||||
ansibleTestCmd.Flags().StringVarP(&testUser, "user", "u", "root", "SSH user")
|
||||
ansibleTestCmd.Flags().StringVarP(&testPassword, "password", "p", "", "SSH password")
|
||||
ansibleTestCmd.Flags().StringVarP(&testKeyFile, "key", "i", "", "SSH private key file")
|
||||
ansibleTestCmd.Flags().IntVar(&testPort, "port", 22, "SSH port")
|
||||
|
||||
// Add subcommands
|
||||
ansibleCmd.AddCommand(ansibleTestCmd)
|
||||
Cmd.AddCommand(ansibleCmd)
|
||||
}
|
||||
|
||||
func runAnsible(cmd *cobra.Command, args []string) error {
|
||||
playbookPath := args[0]
|
||||
|
||||
// Resolve playbook path
|
||||
if !filepath.IsAbs(playbookPath) {
|
||||
cwd, _ := os.Getwd()
|
||||
playbookPath = filepath.Join(cwd, playbookPath)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(playbookPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("playbook not found: %s", playbookPath)
|
||||
}
|
||||
|
||||
// Create executor
|
||||
basePath := filepath.Dir(playbookPath)
|
||||
executor := ansible.NewExecutor(basePath)
|
||||
defer executor.Close()
|
||||
|
||||
// Set options
|
||||
executor.Limit = ansibleLimit
|
||||
executor.CheckMode = ansibleCheck
|
||||
executor.Verbose = ansibleVerbose
|
||||
|
||||
if ansibleTags != "" {
|
||||
executor.Tags = strings.Split(ansibleTags, ",")
|
||||
}
|
||||
if ansibleSkipTags != "" {
|
||||
executor.SkipTags = strings.Split(ansibleSkipTags, ",")
|
||||
}
|
||||
|
||||
// Parse extra vars
|
||||
for _, v := range ansibleVars {
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
executor.SetVar(parts[0], parts[1])
|
||||
}
|
||||
}
|
||||
|
||||
// Load inventory
|
||||
if ansibleInventory != "" {
|
||||
invPath := ansibleInventory
|
||||
if !filepath.IsAbs(invPath) {
|
||||
cwd, _ := os.Getwd()
|
||||
invPath = filepath.Join(cwd, invPath)
|
||||
}
|
||||
|
||||
// Check if it's a directory
|
||||
info, err := os.Stat(invPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("inventory not found: %s", invPath)
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
// Look for inventory.yml or hosts.yml
|
||||
for _, name := range []string{"inventory.yml", "hosts.yml", "inventory.yaml", "hosts.yaml"} {
|
||||
p := filepath.Join(invPath, name)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
invPath = p
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := executor.SetInventory(invPath); err != nil {
|
||||
return fmt.Errorf("load inventory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set up callbacks
|
||||
executor.OnPlayStart = func(play *ansible.Play) {
|
||||
fmt.Printf("\n%s %s\n", cli.TitleStyle.Render("PLAY"), cli.BoldStyle.Render("["+play.Name+"]"))
|
||||
fmt.Println(strings.Repeat("*", 70))
|
||||
}
|
||||
|
||||
executor.OnTaskStart = func(host string, task *ansible.Task) {
|
||||
taskName := task.Name
|
||||
if taskName == "" {
|
||||
taskName = task.Module
|
||||
}
|
||||
fmt.Printf("\n%s %s\n", cli.TitleStyle.Render("TASK"), cli.BoldStyle.Render("["+taskName+"]"))
|
||||
if ansibleVerbose > 0 {
|
||||
fmt.Printf("%s\n", cli.DimStyle.Render("host: "+host))
|
||||
}
|
||||
}
|
||||
|
||||
executor.OnTaskEnd = func(host string, task *ansible.Task, result *ansible.TaskResult) {
|
||||
status := "ok"
|
||||
style := cli.SuccessStyle
|
||||
|
||||
if result.Failed {
|
||||
status = "failed"
|
||||
style = cli.ErrorStyle
|
||||
} else if result.Skipped {
|
||||
status = "skipping"
|
||||
style = cli.DimStyle
|
||||
} else if result.Changed {
|
||||
status = "changed"
|
||||
style = cli.WarningStyle
|
||||
}
|
||||
|
||||
fmt.Printf("%s: [%s]", style.Render(status), host)
|
||||
if result.Msg != "" && ansibleVerbose > 0 {
|
||||
fmt.Printf(" => %s", result.Msg)
|
||||
}
|
||||
if result.Duration > 0 && ansibleVerbose > 1 {
|
||||
fmt.Printf(" (%s)", result.Duration.Round(time.Millisecond))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
if result.Failed && result.Stderr != "" {
|
||||
fmt.Printf("%s\n", cli.ErrorStyle.Render(result.Stderr))
|
||||
}
|
||||
|
||||
if ansibleVerbose > 1 {
|
||||
if result.Stdout != "" {
|
||||
fmt.Printf("stdout: %s\n", strings.TrimSpace(result.Stdout))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
executor.OnPlayEnd = func(play *ansible.Play) {
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Run playbook
|
||||
ctx := context.Background()
|
||||
start := time.Now()
|
||||
|
||||
fmt.Printf("%s Running playbook: %s\n", cli.BoldStyle.Render("▶"), playbookPath)
|
||||
|
||||
if err := executor.Run(ctx, playbookPath); err != nil {
|
||||
return fmt.Errorf("playbook failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s Playbook completed in %s\n",
|
||||
cli.SuccessStyle.Render("✓"),
|
||||
time.Since(start).Round(time.Millisecond))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runAnsibleTest(cmd *cobra.Command, args []string) error {
|
||||
host := args[0]
|
||||
|
||||
fmt.Printf("Testing SSH connection to %s...\n", cli.BoldStyle.Render(host))
|
||||
|
||||
cfg := ansible.SSHConfig{
|
||||
Host: host,
|
||||
Port: testPort,
|
||||
User: testUser,
|
||||
Password: testPassword,
|
||||
KeyFile: testKeyFile,
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ansible.NewSSHClient(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create client: %w", err)
|
||||
}
|
||||
defer func() { _ = client.Close() }()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test connection
|
||||
start := time.Now()
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
return fmt.Errorf("connect failed: %w", err)
|
||||
}
|
||||
connectTime := time.Since(start)
|
||||
|
||||
fmt.Printf("%s Connected in %s\n", cli.SuccessStyle.Render("✓"), connectTime.Round(time.Millisecond))
|
||||
|
||||
// Gather facts
|
||||
fmt.Println("\nGathering facts...")
|
||||
|
||||
// Hostname
|
||||
stdout, _, _, _ := client.Run(ctx, "hostname -f 2>/dev/null || hostname")
|
||||
fmt.Printf(" Hostname: %s\n", cli.BoldStyle.Render(strings.TrimSpace(stdout)))
|
||||
|
||||
// OS
|
||||
stdout, _, _, _ = client.Run(ctx, "cat /etc/os-release 2>/dev/null | grep PRETTY_NAME | cut -d'\"' -f2")
|
||||
if stdout != "" {
|
||||
fmt.Printf(" OS: %s\n", strings.TrimSpace(stdout))
|
||||
}
|
||||
|
||||
// Kernel
|
||||
stdout, _, _, _ = client.Run(ctx, "uname -r")
|
||||
fmt.Printf(" Kernel: %s\n", strings.TrimSpace(stdout))
|
||||
|
||||
// Architecture
|
||||
stdout, _, _, _ = client.Run(ctx, "uname -m")
|
||||
fmt.Printf(" Architecture: %s\n", strings.TrimSpace(stdout))
|
||||
|
||||
// Memory
|
||||
stdout, _, _, _ = client.Run(ctx, "free -h | grep Mem | awk '{print $2}'")
|
||||
fmt.Printf(" Memory: %s\n", strings.TrimSpace(stdout))
|
||||
|
||||
// Disk
|
||||
stdout, _, _, _ = client.Run(ctx, "df -h / | tail -1 | awk '{print $2 \" total, \" $4 \" available\"}'")
|
||||
fmt.Printf(" Disk: %s\n", strings.TrimSpace(stdout))
|
||||
|
||||
// Docker
|
||||
stdout, _, _, err = client.Run(ctx, "docker --version 2>/dev/null")
|
||||
if err == nil {
|
||||
fmt.Printf(" Docker: %s\n", cli.SuccessStyle.Render(strings.TrimSpace(stdout)))
|
||||
} else {
|
||||
fmt.Printf(" Docker: %s\n", cli.DimStyle.Render("not installed"))
|
||||
}
|
||||
|
||||
// Check if Coolify is running
|
||||
stdout, _, _, _ = client.Run(ctx, "docker ps 2>/dev/null | grep -q coolify && echo 'running' || echo 'not running'")
|
||||
if strings.TrimSpace(stdout) == "running" {
|
||||
fmt.Printf(" Coolify: %s\n", cli.SuccessStyle.Render("running"))
|
||||
} else {
|
||||
fmt.Printf(" Coolify: %s\n", cli.DimStyle.Render("not installed"))
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s SSH test passed\n", cli.SuccessStyle.Render("✓"))
|
||||
|
||||
return nil
|
||||
}
|
||||
15
cmd/deploy/cmd_commands.go
Normal file
15
cmd/deploy/cmd_commands.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
package deploy
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddDeployCommands)
|
||||
}
|
||||
|
||||
// AddDeployCommands registers the 'deploy' command and all subcommands.
|
||||
func AddDeployCommands(root *cobra.Command) {
|
||||
root.AddCommand(Cmd)
|
||||
}
|
||||
280
cmd/deploy/cmd_deploy.go
Normal file
280
cmd/deploy/cmd_deploy.go
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
package deploy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-devops/deploy/coolify"
|
||||
"forge.lthn.ai/core/go/pkg/i18n"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
coolifyURL string
|
||||
coolifyToken string
|
||||
outputJSON bool
|
||||
)
|
||||
|
||||
// Cmd is the root deploy command.
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "deploy",
|
||||
Short: i18n.T("cmd.deploy.short"),
|
||||
Long: i18n.T("cmd.deploy.long"),
|
||||
}
|
||||
|
||||
var serversCmd = &cobra.Command{
|
||||
Use: "servers",
|
||||
Short: "List Coolify servers",
|
||||
RunE: runListServers,
|
||||
}
|
||||
|
||||
var projectsCmd = &cobra.Command{
|
||||
Use: "projects",
|
||||
Short: "List Coolify projects",
|
||||
RunE: runListProjects,
|
||||
}
|
||||
|
||||
var appsCmd = &cobra.Command{
|
||||
Use: "apps",
|
||||
Short: "List Coolify applications",
|
||||
RunE: runListApps,
|
||||
}
|
||||
|
||||
var dbsCmd = &cobra.Command{
|
||||
Use: "databases",
|
||||
Short: "List Coolify databases",
|
||||
Aliases: []string{"dbs", "db"},
|
||||
RunE: runListDatabases,
|
||||
}
|
||||
|
||||
var servicesCmd = &cobra.Command{
|
||||
Use: "services",
|
||||
Short: "List Coolify services",
|
||||
RunE: runListServices,
|
||||
}
|
||||
|
||||
var teamCmd = &cobra.Command{
|
||||
Use: "team",
|
||||
Short: "Show current team info",
|
||||
RunE: runTeam,
|
||||
}
|
||||
|
||||
var callCmd = &cobra.Command{
|
||||
Use: "call <operation> [params-json]",
|
||||
Short: "Call any Coolify API operation",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: runCall,
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Global flags
|
||||
Cmd.PersistentFlags().StringVar(&coolifyURL, "url", os.Getenv("COOLIFY_URL"), "Coolify API URL")
|
||||
Cmd.PersistentFlags().StringVar(&coolifyToken, "token", os.Getenv("COOLIFY_TOKEN"), "Coolify API token")
|
||||
Cmd.PersistentFlags().BoolVar(&outputJSON, "json", false, "Output as JSON")
|
||||
|
||||
// Add subcommands
|
||||
Cmd.AddCommand(serversCmd)
|
||||
Cmd.AddCommand(projectsCmd)
|
||||
Cmd.AddCommand(appsCmd)
|
||||
Cmd.AddCommand(dbsCmd)
|
||||
Cmd.AddCommand(servicesCmd)
|
||||
Cmd.AddCommand(teamCmd)
|
||||
Cmd.AddCommand(callCmd)
|
||||
}
|
||||
|
||||
func getClient() (*coolify.Client, error) {
|
||||
cfg := coolify.Config{
|
||||
BaseURL: coolifyURL,
|
||||
APIToken: coolifyToken,
|
||||
Timeout: 30,
|
||||
VerifySSL: true,
|
||||
}
|
||||
|
||||
if cfg.BaseURL == "" {
|
||||
cfg.BaseURL = os.Getenv("COOLIFY_URL")
|
||||
}
|
||||
if cfg.APIToken == "" {
|
||||
cfg.APIToken = os.Getenv("COOLIFY_TOKEN")
|
||||
}
|
||||
|
||||
return coolify.NewClient(cfg)
|
||||
}
|
||||
|
||||
func outputResult(data any) error {
|
||||
if outputJSON {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(data)
|
||||
}
|
||||
|
||||
// Pretty print based on type
|
||||
switch v := data.(type) {
|
||||
case []map[string]any:
|
||||
for _, item := range v {
|
||||
printItem(item)
|
||||
}
|
||||
case map[string]any:
|
||||
printItem(v)
|
||||
default:
|
||||
fmt.Printf("%v\n", data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func printItem(item map[string]any) {
|
||||
// Common fields to display
|
||||
if uuid, ok := item["uuid"].(string); ok {
|
||||
fmt.Printf("%s ", cli.DimStyle.Render(uuid[:8]))
|
||||
}
|
||||
if name, ok := item["name"].(string); ok {
|
||||
fmt.Printf("%s", cli.TitleStyle.Render(name))
|
||||
}
|
||||
if desc, ok := item["description"].(string); ok && desc != "" {
|
||||
fmt.Printf(" %s", cli.DimStyle.Render(desc))
|
||||
}
|
||||
if status, ok := item["status"].(string); ok {
|
||||
switch status {
|
||||
case "running":
|
||||
fmt.Printf(" %s", cli.SuccessStyle.Render("●"))
|
||||
case "stopped":
|
||||
fmt.Printf(" %s", cli.ErrorStyle.Render("○"))
|
||||
default:
|
||||
fmt.Printf(" %s", cli.DimStyle.Render(status))
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func runListServers(cmd *cobra.Command, args []string) error {
|
||||
client, err := getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
servers, err := client.ListServers(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(servers) == 0 {
|
||||
fmt.Println("No servers found")
|
||||
return nil
|
||||
}
|
||||
|
||||
return outputResult(servers)
|
||||
}
|
||||
|
||||
func runListProjects(cmd *cobra.Command, args []string) error {
|
||||
client, err := getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
projects, err := client.ListProjects(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(projects) == 0 {
|
||||
fmt.Println("No projects found")
|
||||
return nil
|
||||
}
|
||||
|
||||
return outputResult(projects)
|
||||
}
|
||||
|
||||
func runListApps(cmd *cobra.Command, args []string) error {
|
||||
client, err := getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apps, err := client.ListApplications(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(apps) == 0 {
|
||||
fmt.Println("No applications found")
|
||||
return nil
|
||||
}
|
||||
|
||||
return outputResult(apps)
|
||||
}
|
||||
|
||||
func runListDatabases(cmd *cobra.Command, args []string) error {
|
||||
client, err := getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dbs, err := client.ListDatabases(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dbs) == 0 {
|
||||
fmt.Println("No databases found")
|
||||
return nil
|
||||
}
|
||||
|
||||
return outputResult(dbs)
|
||||
}
|
||||
|
||||
func runListServices(cmd *cobra.Command, args []string) error {
|
||||
client, err := getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
services, err := client.ListServices(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(services) == 0 {
|
||||
fmt.Println("No services found")
|
||||
return nil
|
||||
}
|
||||
|
||||
return outputResult(services)
|
||||
}
|
||||
|
||||
func runTeam(cmd *cobra.Command, args []string) error {
|
||||
client, err := getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
team, err := client.GetTeam(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return outputResult(team)
|
||||
}
|
||||
|
||||
func runCall(cmd *cobra.Command, args []string) error {
|
||||
client, err := getClient()
|
||||
if err != nil {
|
||||
return cli.WrapVerb(err, "initialize", "client")
|
||||
}
|
||||
|
||||
operation := args[0]
|
||||
var params map[string]any
|
||||
if len(args) > 1 {
|
||||
if err := json.Unmarshal([]byte(args[1]), ¶ms); err != nil {
|
||||
return fmt.Errorf("invalid JSON params: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
result, err := client.Call(context.Background(), operation, params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return outputResult(result)
|
||||
}
|
||||
15
cmd/prod/cmd_commands.go
Normal file
15
cmd/prod/cmd_commands.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddProdCommands)
|
||||
}
|
||||
|
||||
// AddProdCommands registers the 'prod' command and all subcommands.
|
||||
func AddProdCommands(root *cobra.Command) {
|
||||
root.AddCommand(Cmd)
|
||||
}
|
||||
129
cmd/prod/cmd_dns.go
Normal file
129
cmd/prod/cmd_dns.go
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-devops/infra"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var dnsCmd = &cobra.Command{
|
||||
Use: "dns",
|
||||
Short: "Manage DNS records via CloudNS",
|
||||
Long: `View and manage DNS records for host.uk.com via CloudNS API.
|
||||
|
||||
Requires:
|
||||
CLOUDNS_AUTH_ID CloudNS auth ID
|
||||
CLOUDNS_AUTH_PASSWORD CloudNS auth password`,
|
||||
}
|
||||
|
||||
var dnsListCmd = &cobra.Command{
|
||||
Use: "list [zone]",
|
||||
Short: "List DNS records",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runDNSList,
|
||||
}
|
||||
|
||||
var dnsSetCmd = &cobra.Command{
|
||||
Use: "set <host> <type> <value>",
|
||||
Short: "Create or update a DNS record",
|
||||
Long: `Create or update a DNS record. Example:
|
||||
core prod dns set hermes.lb A 1.2.3.4
|
||||
core prod dns set "*.host.uk.com" CNAME hermes.lb.host.uk.com`,
|
||||
Args: cobra.ExactArgs(3),
|
||||
RunE: runDNSSet,
|
||||
}
|
||||
|
||||
var (
|
||||
dnsZone string
|
||||
dnsTTL int
|
||||
)
|
||||
|
||||
func init() {
|
||||
dnsCmd.PersistentFlags().StringVar(&dnsZone, "zone", "host.uk.com", "DNS zone")
|
||||
|
||||
dnsSetCmd.Flags().IntVar(&dnsTTL, "ttl", 300, "Record TTL in seconds")
|
||||
|
||||
dnsCmd.AddCommand(dnsListCmd)
|
||||
dnsCmd.AddCommand(dnsSetCmd)
|
||||
}
|
||||
|
||||
func getDNSClient() (*infra.CloudNSClient, error) {
|
||||
authID := os.Getenv("CLOUDNS_AUTH_ID")
|
||||
authPass := os.Getenv("CLOUDNS_AUTH_PASSWORD")
|
||||
if authID == "" || authPass == "" {
|
||||
return nil, fmt.Errorf("CLOUDNS_AUTH_ID and CLOUDNS_AUTH_PASSWORD required")
|
||||
}
|
||||
return infra.NewCloudNSClient(authID, authPass), nil
|
||||
}
|
||||
|
||||
func runDNSList(cmd *cobra.Command, args []string) error {
|
||||
dns, err := getDNSClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zone := dnsZone
|
||||
if len(args) > 0 {
|
||||
zone = args[0]
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
records, err := dns.ListRecords(ctx, zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list records: %w", err)
|
||||
}
|
||||
|
||||
cli.Print("%s DNS records for %s\n\n", cli.BoldStyle.Render("▶"), cli.TitleStyle.Render(zone))
|
||||
|
||||
if len(records) == 0 {
|
||||
cli.Print(" No records found\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
for id, r := range records {
|
||||
cli.Print(" %s %-6s %-30s %s TTL:%s\n",
|
||||
cli.DimStyle.Render(id),
|
||||
cli.BoldStyle.Render(r.Type),
|
||||
r.Host,
|
||||
r.Record,
|
||||
r.TTL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runDNSSet(cmd *cobra.Command, args []string) error {
|
||||
dns, err := getDNSClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := args[0]
|
||||
recordType := args[1]
|
||||
value := args[2]
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
changed, err := dns.EnsureRecord(ctx, dnsZone, host, recordType, value, dnsTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("set record: %w", err)
|
||||
}
|
||||
|
||||
if changed {
|
||||
cli.Print("%s %s %s %s -> %s\n",
|
||||
cli.SuccessStyle.Render("✓"),
|
||||
recordType, host, dnsZone, value)
|
||||
} else {
|
||||
cli.Print("%s Record already correct\n", cli.DimStyle.Render("·"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
113
cmd/prod/cmd_lb.go
Normal file
113
cmd/prod/cmd_lb.go
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-devops/infra"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var lbCmd = &cobra.Command{
|
||||
Use: "lb",
|
||||
Short: "Manage Hetzner load balancer",
|
||||
Long: `View and manage the Hetzner Cloud managed load balancer.
|
||||
|
||||
Requires: HCLOUD_TOKEN`,
|
||||
}
|
||||
|
||||
var lbStatusCmd = &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show load balancer status and target health",
|
||||
RunE: runLBStatus,
|
||||
}
|
||||
|
||||
var lbCreateCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create load balancer from infra.yaml",
|
||||
RunE: runLBCreate,
|
||||
}
|
||||
|
||||
func init() {
|
||||
lbCmd.AddCommand(lbStatusCmd)
|
||||
lbCmd.AddCommand(lbCreateCmd)
|
||||
}
|
||||
|
||||
func getHCloudClient() (*infra.HCloudClient, error) {
|
||||
token := os.Getenv("HCLOUD_TOKEN")
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("HCLOUD_TOKEN environment variable required")
|
||||
}
|
||||
return infra.NewHCloudClient(token), nil
|
||||
}
|
||||
|
||||
func runLBStatus(cmd *cobra.Command, args []string) error {
|
||||
hc, err := getHCloudClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
lbs, err := hc.ListLoadBalancers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list load balancers: %w", err)
|
||||
}
|
||||
|
||||
if len(lbs) == 0 {
|
||||
cli.Print("No load balancers found\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, lb := range lbs {
|
||||
cli.Print("%s %s\n", cli.BoldStyle.Render("▶"), cli.TitleStyle.Render(lb.Name))
|
||||
cli.Print(" ID: %d\n", lb.ID)
|
||||
cli.Print(" IP: %s\n", lb.PublicNet.IPv4.IP)
|
||||
cli.Print(" Algorithm: %s\n", lb.Algorithm.Type)
|
||||
cli.Print(" Location: %s\n", lb.Location.Name)
|
||||
|
||||
if len(lb.Services) > 0 {
|
||||
cli.Print("\n Services:\n")
|
||||
for _, s := range lb.Services {
|
||||
cli.Print(" %s :%d -> :%d proxy_protocol=%v\n",
|
||||
s.Protocol, s.ListenPort, s.DestinationPort, s.Proxyprotocol)
|
||||
}
|
||||
}
|
||||
|
||||
if len(lb.Targets) > 0 {
|
||||
cli.Print("\n Targets:\n")
|
||||
for _, t := range lb.Targets {
|
||||
ip := ""
|
||||
if t.IP != nil {
|
||||
ip = t.IP.IP
|
||||
}
|
||||
for _, hs := range t.HealthStatus {
|
||||
icon := cli.SuccessStyle.Render("●")
|
||||
if hs.Status != "healthy" {
|
||||
icon = cli.ErrorStyle.Render("○")
|
||||
}
|
||||
cli.Print(" %s %s :%d %s\n", icon, ip, hs.ListenPort, hs.Status)
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runLBCreate(cmd *cobra.Command, args []string) error {
|
||||
cfg, _, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
return stepLoadBalancer(ctx, cfg)
|
||||
}
|
||||
35
cmd/prod/cmd_prod.go
Normal file
35
cmd/prod/cmd_prod.go
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
infraFile string
|
||||
)
|
||||
|
||||
// Cmd is the root prod command.
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "prod",
|
||||
Short: "Production infrastructure management",
|
||||
Long: `Manage the Host UK production infrastructure.
|
||||
|
||||
Commands:
|
||||
status Show infrastructure health and connectivity
|
||||
setup Phase 1: discover topology, create LB, configure DNS
|
||||
dns Manage DNS records via CloudNS
|
||||
lb Manage Hetzner load balancer
|
||||
ssh SSH into a production host
|
||||
|
||||
Configuration is read from infra.yaml in the project root.`,
|
||||
}
|
||||
|
||||
func init() {
|
||||
Cmd.PersistentFlags().StringVar(&infraFile, "config", "", "Path to infra.yaml (auto-discovered if not set)")
|
||||
|
||||
Cmd.AddCommand(statusCmd)
|
||||
Cmd.AddCommand(setupCmd)
|
||||
Cmd.AddCommand(dnsCmd)
|
||||
Cmd.AddCommand(lbCmd)
|
||||
Cmd.AddCommand(sshCmd)
|
||||
}
|
||||
284
cmd/prod/cmd_setup.go
Normal file
284
cmd/prod/cmd_setup.go
Normal file
|
|
@ -0,0 +1,284 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-devops/infra"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var setupCmd = &cobra.Command{
|
||||
Use: "setup",
|
||||
Short: "Phase 1: discover topology, create LB, configure DNS",
|
||||
Long: `Run the Phase 1 foundation setup:
|
||||
|
||||
1. Discover Hetzner topology (Cloud + Robot servers)
|
||||
2. Create Hetzner managed load balancer
|
||||
3. Configure DNS records via CloudNS
|
||||
4. Verify connectivity to all hosts
|
||||
|
||||
Required environment variables:
|
||||
HCLOUD_TOKEN Hetzner Cloud API token
|
||||
HETZNER_ROBOT_USER Hetzner Robot username
|
||||
HETZNER_ROBOT_PASS Hetzner Robot password
|
||||
CLOUDNS_AUTH_ID CloudNS auth ID
|
||||
CLOUDNS_AUTH_PASSWORD CloudNS auth password`,
|
||||
RunE: runSetup,
|
||||
}
|
||||
|
||||
var (
|
||||
setupDryRun bool
|
||||
setupStep string
|
||||
)
|
||||
|
||||
func init() {
|
||||
setupCmd.Flags().BoolVar(&setupDryRun, "dry-run", false, "Show what would be done without making changes")
|
||||
setupCmd.Flags().StringVar(&setupStep, "step", "", "Run a specific step only (discover, lb, dns)")
|
||||
}
|
||||
|
||||
func runSetup(cmd *cobra.Command, args []string) error {
|
||||
cfg, cfgPath, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli.Print("%s Production setup from %s\n\n",
|
||||
cli.BoldStyle.Render("▶"),
|
||||
cli.DimStyle.Render(cfgPath))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
steps := []struct {
|
||||
name string
|
||||
fn func(context.Context, *infra.Config) error
|
||||
}{
|
||||
{"discover", stepDiscover},
|
||||
{"lb", stepLoadBalancer},
|
||||
{"dns", stepDNS},
|
||||
}
|
||||
|
||||
for _, step := range steps {
|
||||
if setupStep != "" && setupStep != step.name {
|
||||
continue
|
||||
}
|
||||
|
||||
cli.Print("\n%s Step: %s\n", cli.BoldStyle.Render("━━"), cli.TitleStyle.Render(step.name))
|
||||
|
||||
if err := step.fn(ctx, cfg); err != nil {
|
||||
cli.Print(" %s %s: %s\n", cli.ErrorStyle.Render("✗"), step.name, err)
|
||||
return fmt.Errorf("step %s failed: %w", step.name, err)
|
||||
}
|
||||
|
||||
cli.Print(" %s %s complete\n", cli.SuccessStyle.Render("✓"), step.name)
|
||||
}
|
||||
|
||||
cli.Print("\n%s Setup complete\n", cli.SuccessStyle.Render("✓"))
|
||||
return nil
|
||||
}
|
||||
|
||||
func stepDiscover(ctx context.Context, cfg *infra.Config) error {
|
||||
// Discover HCloud servers
|
||||
hcloudToken := os.Getenv("HCLOUD_TOKEN")
|
||||
if hcloudToken != "" {
|
||||
cli.Print(" Discovering Hetzner Cloud servers...\n")
|
||||
|
||||
hc := infra.NewHCloudClient(hcloudToken)
|
||||
servers, err := hc.ListServers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list HCloud servers: %w", err)
|
||||
}
|
||||
|
||||
for _, s := range servers {
|
||||
cli.Print(" %s %s %s %s %s\n",
|
||||
cli.SuccessStyle.Render("●"),
|
||||
cli.BoldStyle.Render(s.Name),
|
||||
s.PublicNet.IPv4.IP,
|
||||
s.ServerType.Name,
|
||||
cli.DimStyle.Render(s.Datacenter.Name))
|
||||
}
|
||||
} else {
|
||||
cli.Print(" %s HCLOUD_TOKEN not set — skipping Cloud discovery\n",
|
||||
cli.WarningStyle.Render("⚠"))
|
||||
}
|
||||
|
||||
// Discover Robot servers
|
||||
robotUser := os.Getenv("HETZNER_ROBOT_USER")
|
||||
robotPass := os.Getenv("HETZNER_ROBOT_PASS")
|
||||
if robotUser != "" && robotPass != "" {
|
||||
cli.Print(" Discovering Hetzner Robot servers...\n")
|
||||
|
||||
hr := infra.NewHRobotClient(robotUser, robotPass)
|
||||
servers, err := hr.ListServers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list Robot servers: %w", err)
|
||||
}
|
||||
|
||||
for _, s := range servers {
|
||||
status := cli.SuccessStyle.Render("●")
|
||||
if s.Status != "ready" {
|
||||
status = cli.WarningStyle.Render("○")
|
||||
}
|
||||
cli.Print(" %s %s %s %s %s\n",
|
||||
status,
|
||||
cli.BoldStyle.Render(s.ServerName),
|
||||
s.ServerIP,
|
||||
s.Product,
|
||||
cli.DimStyle.Render(s.Datacenter))
|
||||
}
|
||||
} else {
|
||||
cli.Print(" %s HETZNER_ROBOT_USER/PASS not set — skipping Robot discovery\n",
|
||||
cli.WarningStyle.Render("⚠"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stepLoadBalancer(ctx context.Context, cfg *infra.Config) error {
|
||||
hcloudToken := os.Getenv("HCLOUD_TOKEN")
|
||||
if hcloudToken == "" {
|
||||
return fmt.Errorf("HCLOUD_TOKEN required for load balancer management")
|
||||
}
|
||||
|
||||
hc := infra.NewHCloudClient(hcloudToken)
|
||||
|
||||
// Check if LB already exists
|
||||
lbs, err := hc.ListLoadBalancers(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list load balancers: %w", err)
|
||||
}
|
||||
|
||||
for _, lb := range lbs {
|
||||
if lb.Name == cfg.LoadBalancer.Name {
|
||||
cli.Print(" Load balancer '%s' already exists (ID: %d, IP: %s)\n",
|
||||
lb.Name, lb.ID, lb.PublicNet.IPv4.IP)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if setupDryRun {
|
||||
cli.Print(" [dry-run] Would create load balancer '%s' (%s) in %s\n",
|
||||
cfg.LoadBalancer.Name, cfg.LoadBalancer.Type, cfg.LoadBalancer.Location)
|
||||
for _, b := range cfg.LoadBalancer.Backends {
|
||||
if host, ok := cfg.Hosts[b.Host]; ok {
|
||||
cli.Print(" [dry-run] Backend: %s (%s:%d)\n", b.Host, host.IP, b.Port)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build targets from config
|
||||
targets := make([]infra.HCloudLBCreateTarget, 0, len(cfg.LoadBalancer.Backends))
|
||||
for _, b := range cfg.LoadBalancer.Backends {
|
||||
host, ok := cfg.Hosts[b.Host]
|
||||
if !ok {
|
||||
return fmt.Errorf("backend host '%s' not found in config", b.Host)
|
||||
}
|
||||
targets = append(targets, infra.HCloudLBCreateTarget{
|
||||
Type: "ip",
|
||||
IP: &infra.HCloudLBTargetIP{IP: host.IP},
|
||||
})
|
||||
}
|
||||
|
||||
// Build services
|
||||
services := make([]infra.HCloudLBService, 0, len(cfg.LoadBalancer.Listeners))
|
||||
for _, l := range cfg.LoadBalancer.Listeners {
|
||||
svc := infra.HCloudLBService{
|
||||
Protocol: l.Protocol,
|
||||
ListenPort: l.Frontend,
|
||||
DestinationPort: l.Backend,
|
||||
Proxyprotocol: l.ProxyProtocol,
|
||||
HealthCheck: &infra.HCloudLBHealthCheck{
|
||||
Protocol: cfg.LoadBalancer.Health.Protocol,
|
||||
Port: l.Backend,
|
||||
Interval: cfg.LoadBalancer.Health.Interval,
|
||||
Timeout: 10,
|
||||
Retries: 3,
|
||||
HTTP: &infra.HCloudLBHCHTTP{
|
||||
Path: cfg.LoadBalancer.Health.Path,
|
||||
StatusCode: "2??",
|
||||
},
|
||||
},
|
||||
}
|
||||
services = append(services, svc)
|
||||
}
|
||||
|
||||
req := infra.HCloudLBCreateRequest{
|
||||
Name: cfg.LoadBalancer.Name,
|
||||
LoadBalancerType: cfg.LoadBalancer.Type,
|
||||
Location: cfg.LoadBalancer.Location,
|
||||
Algorithm: infra.HCloudLBAlgorithm{Type: cfg.LoadBalancer.Algorithm},
|
||||
Services: services,
|
||||
Targets: targets,
|
||||
Labels: map[string]string{
|
||||
"project": "host-uk",
|
||||
"managed": "core-cli",
|
||||
},
|
||||
}
|
||||
|
||||
cli.Print(" Creating load balancer '%s'...\n", cfg.LoadBalancer.Name)
|
||||
|
||||
lb, err := hc.CreateLoadBalancer(ctx, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create load balancer: %w", err)
|
||||
}
|
||||
|
||||
cli.Print(" Created: %s (ID: %d, IP: %s)\n",
|
||||
cli.BoldStyle.Render(lb.Name), lb.ID, lb.PublicNet.IPv4.IP)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stepDNS(ctx context.Context, cfg *infra.Config) error {
|
||||
authID := os.Getenv("CLOUDNS_AUTH_ID")
|
||||
authPass := os.Getenv("CLOUDNS_AUTH_PASSWORD")
|
||||
if authID == "" || authPass == "" {
|
||||
return fmt.Errorf("CLOUDNS_AUTH_ID and CLOUDNS_AUTH_PASSWORD required")
|
||||
}
|
||||
|
||||
dns := infra.NewCloudNSClient(authID, authPass)
|
||||
|
||||
for zoneName, zone := range cfg.DNS.Zones {
|
||||
cli.Print(" Zone: %s\n", cli.BoldStyle.Render(zoneName))
|
||||
|
||||
for _, rec := range zone.Records {
|
||||
value := rec.Value
|
||||
// Skip templated values (need LB IP first)
|
||||
if value == "{{.lb_ip}}" {
|
||||
cli.Print(" %s %s %s %s — %s\n",
|
||||
cli.WarningStyle.Render("⚠"),
|
||||
rec.Name, rec.Type, value,
|
||||
cli.DimStyle.Render("needs LB IP (run setup --step=lb first)"))
|
||||
continue
|
||||
}
|
||||
|
||||
if setupDryRun {
|
||||
cli.Print(" [dry-run] %s %s -> %s (TTL: %d)\n",
|
||||
rec.Type, rec.Name, value, rec.TTL)
|
||||
continue
|
||||
}
|
||||
|
||||
changed, err := dns.EnsureRecord(ctx, zoneName, rec.Name, rec.Type, value, rec.TTL)
|
||||
if err != nil {
|
||||
cli.Print(" %s %s %s: %s\n", cli.ErrorStyle.Render("✗"), rec.Type, rec.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if changed {
|
||||
cli.Print(" %s %s %s -> %s\n",
|
||||
cli.SuccessStyle.Render("✓"),
|
||||
rec.Type, rec.Name, value)
|
||||
} else {
|
||||
cli.Print(" %s %s %s (no change)\n",
|
||||
cli.DimStyle.Render("·"),
|
||||
rec.Type, rec.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
64
cmd/prod/cmd_ssh.go
Normal file
64
cmd/prod/cmd_ssh.go
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var sshCmd = &cobra.Command{
|
||||
Use: "ssh <host>",
|
||||
Short: "SSH into a production host",
|
||||
Long: `Open an SSH session to a production host defined in infra.yaml.
|
||||
|
||||
Examples:
|
||||
core prod ssh noc
|
||||
core prod ssh de
|
||||
core prod ssh de2
|
||||
core prod ssh build`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runSSH,
|
||||
}
|
||||
|
||||
func runSSH(cmd *cobra.Command, args []string) error {
|
||||
cfg, _, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := args[0]
|
||||
host, ok := cfg.Hosts[name]
|
||||
if !ok {
|
||||
// List available hosts
|
||||
cli.Print("Unknown host '%s'. Available:\n", name)
|
||||
for n, h := range cfg.Hosts {
|
||||
cli.Print(" %s %s (%s)\n", cli.BoldStyle.Render(n), h.IP, h.Role)
|
||||
}
|
||||
return fmt.Errorf("host '%s' not found in infra.yaml", name)
|
||||
}
|
||||
|
||||
sshArgs := []string{
|
||||
"ssh",
|
||||
"-i", host.SSH.Key,
|
||||
"-p", fmt.Sprintf("%d", host.SSH.Port),
|
||||
"-o", "StrictHostKeyChecking=accept-new",
|
||||
fmt.Sprintf("%s@%s", host.SSH.User, host.IP),
|
||||
}
|
||||
|
||||
cli.Print("%s %s@%s (%s)\n",
|
||||
cli.BoldStyle.Render("▶"),
|
||||
host.SSH.User, host.FQDN,
|
||||
cli.DimStyle.Render(host.IP))
|
||||
|
||||
sshPath, err := exec.LookPath("ssh")
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh not found: %w", err)
|
||||
}
|
||||
|
||||
// Replace current process with SSH
|
||||
return syscall.Exec(sshPath, sshArgs, os.Environ())
|
||||
}
|
||||
325
cmd/prod/cmd_status.go
Normal file
325
cmd/prod/cmd_status.go
Normal file
|
|
@ -0,0 +1,325 @@
|
|||
package prod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/ansible"
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go-devops/infra"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var statusCmd = &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show production infrastructure health",
|
||||
Long: `Check connectivity, services, and cluster health across all production hosts.
|
||||
|
||||
Tests:
|
||||
- SSH connectivity to all hosts
|
||||
- Docker daemon status
|
||||
- Coolify controller (noc)
|
||||
- Galera cluster state (de, de2)
|
||||
- Redis Sentinel status (de, de2)
|
||||
- Load balancer health (if HCLOUD_TOKEN set)`,
|
||||
RunE: runStatus,
|
||||
}
|
||||
|
||||
type hostStatus struct {
|
||||
Name string
|
||||
Host *infra.Host
|
||||
Connected bool
|
||||
ConnTime time.Duration
|
||||
OS string
|
||||
Docker string
|
||||
Services map[string]string
|
||||
Error error
|
||||
}
|
||||
|
||||
func runStatus(cmd *cobra.Command, args []string) error {
|
||||
cfg, cfgPath, err := loadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli.Print("%s Infrastructure status from %s\n\n",
|
||||
cli.BoldStyle.Render("▶"),
|
||||
cli.DimStyle.Render(cfgPath))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Check all hosts in parallel
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
statuses []hostStatus
|
||||
)
|
||||
|
||||
for name, host := range cfg.Hosts {
|
||||
wg.Add(1)
|
||||
go func(name string, host *infra.Host) {
|
||||
defer wg.Done()
|
||||
s := checkHost(ctx, name, host)
|
||||
mu.Lock()
|
||||
statuses = append(statuses, s)
|
||||
mu.Unlock()
|
||||
}(name, host)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Print results in consistent order
|
||||
order := []string{"noc", "de", "de2", "build"}
|
||||
for _, name := range order {
|
||||
for _, s := range statuses {
|
||||
if s.Name == name {
|
||||
printHostStatus(s)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check LB if token available
|
||||
if token := os.Getenv("HCLOUD_TOKEN"); token != "" {
|
||||
fmt.Println()
|
||||
checkLoadBalancer(ctx, token)
|
||||
} else {
|
||||
fmt.Println()
|
||||
cli.Print("%s Load balancer: %s\n",
|
||||
cli.DimStyle.Render(" ○"),
|
||||
cli.DimStyle.Render("HCLOUD_TOKEN not set (skipped)"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkHost(ctx context.Context, name string, host *infra.Host) hostStatus {
|
||||
s := hostStatus{
|
||||
Name: name,
|
||||
Host: host,
|
||||
Services: make(map[string]string),
|
||||
}
|
||||
|
||||
sshCfg := ansible.SSHConfig{
|
||||
Host: host.IP,
|
||||
Port: host.SSH.Port,
|
||||
User: host.SSH.User,
|
||||
KeyFile: host.SSH.Key,
|
||||
Timeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ansible.NewSSHClient(sshCfg)
|
||||
if err != nil {
|
||||
s.Error = fmt.Errorf("create SSH client: %w", err)
|
||||
return s
|
||||
}
|
||||
defer func() { _ = client.Close() }()
|
||||
|
||||
start := time.Now()
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
s.Error = fmt.Errorf("SSH connect: %w", err)
|
||||
return s
|
||||
}
|
||||
s.Connected = true
|
||||
s.ConnTime = time.Since(start)
|
||||
|
||||
// OS info
|
||||
stdout, _, _, _ := client.Run(ctx, "cat /etc/os-release 2>/dev/null | grep PRETTY_NAME | cut -d'\"' -f2")
|
||||
s.OS = strings.TrimSpace(stdout)
|
||||
|
||||
// Docker
|
||||
stdout, _, _, err = client.Run(ctx, "docker --version 2>/dev/null | head -1")
|
||||
if err == nil && stdout != "" {
|
||||
s.Docker = strings.TrimSpace(stdout)
|
||||
}
|
||||
|
||||
// Check each expected service
|
||||
for _, svc := range host.Services {
|
||||
status := checkService(ctx, client, svc)
|
||||
s.Services[svc] = status
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func checkService(ctx context.Context, client *ansible.SSHClient, service string) string {
|
||||
switch service {
|
||||
case "coolify":
|
||||
stdout, _, _, _ := client.Run(ctx, "docker ps --format '{{.Names}}' 2>/dev/null | grep -c coolify")
|
||||
if strings.TrimSpace(stdout) != "0" && strings.TrimSpace(stdout) != "" {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
|
||||
case "traefik":
|
||||
stdout, _, _, _ := client.Run(ctx, "docker ps --format '{{.Names}}' 2>/dev/null | grep -c traefik")
|
||||
if strings.TrimSpace(stdout) != "0" && strings.TrimSpace(stdout) != "" {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
|
||||
case "galera":
|
||||
// Check Galera cluster state
|
||||
stdout, _, _, _ := client.Run(ctx,
|
||||
"docker exec $(docker ps -q --filter name=mariadb 2>/dev/null || echo none) "+
|
||||
"mariadb -u root -e \"SHOW STATUS LIKE 'wsrep_cluster_size'\" --skip-column-names 2>/dev/null | awk '{print $2}'")
|
||||
size := strings.TrimSpace(stdout)
|
||||
if size != "" && size != "0" {
|
||||
return fmt.Sprintf("cluster_size=%s", size)
|
||||
}
|
||||
// Try non-Docker
|
||||
stdout, _, _, _ = client.Run(ctx,
|
||||
"mariadb -u root -e \"SHOW STATUS LIKE 'wsrep_cluster_size'\" --skip-column-names 2>/dev/null | awk '{print $2}'")
|
||||
size = strings.TrimSpace(stdout)
|
||||
if size != "" && size != "0" {
|
||||
return fmt.Sprintf("cluster_size=%s", size)
|
||||
}
|
||||
return "not running"
|
||||
|
||||
case "redis":
|
||||
stdout, _, _, _ := client.Run(ctx,
|
||||
"docker exec $(docker ps -q --filter name=redis 2>/dev/null || echo none) "+
|
||||
"redis-cli ping 2>/dev/null")
|
||||
if strings.TrimSpace(stdout) == "PONG" {
|
||||
return "running"
|
||||
}
|
||||
stdout, _, _, _ = client.Run(ctx, "redis-cli ping 2>/dev/null")
|
||||
if strings.TrimSpace(stdout) == "PONG" {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
|
||||
case "forgejo-runner":
|
||||
stdout, _, _, _ := client.Run(ctx, "systemctl is-active forgejo-runner 2>/dev/null || docker ps --format '{{.Names}}' 2>/dev/null | grep -c runner")
|
||||
val := strings.TrimSpace(stdout)
|
||||
if val == "active" || (val != "0" && val != "") {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
|
||||
default:
|
||||
// Generic docker container check
|
||||
stdout, _, _, _ := client.Run(ctx,
|
||||
fmt.Sprintf("docker ps --format '{{.Names}}' 2>/dev/null | grep -c %s", service))
|
||||
if strings.TrimSpace(stdout) != "0" && strings.TrimSpace(stdout) != "" {
|
||||
return "running"
|
||||
}
|
||||
return "not running"
|
||||
}
|
||||
}
|
||||
|
||||
func printHostStatus(s hostStatus) {
|
||||
// Host header
|
||||
roleStyle := cli.DimStyle
|
||||
switch s.Host.Role {
|
||||
case "app":
|
||||
roleStyle = cli.SuccessStyle
|
||||
case "bastion":
|
||||
roleStyle = cli.WarningStyle
|
||||
case "builder":
|
||||
roleStyle = cli.InfoStyle
|
||||
}
|
||||
|
||||
cli.Print(" %s %s %s %s\n",
|
||||
cli.BoldStyle.Render(s.Name),
|
||||
cli.DimStyle.Render(s.Host.IP),
|
||||
roleStyle.Render(s.Host.Role),
|
||||
cli.DimStyle.Render(s.Host.FQDN))
|
||||
|
||||
if s.Error != nil {
|
||||
cli.Print(" %s %s\n", cli.ErrorStyle.Render("✗"), s.Error)
|
||||
return
|
||||
}
|
||||
|
||||
if !s.Connected {
|
||||
cli.Print(" %s SSH unreachable\n", cli.ErrorStyle.Render("✗"))
|
||||
return
|
||||
}
|
||||
|
||||
// Connection info
|
||||
cli.Print(" %s SSH %s",
|
||||
cli.SuccessStyle.Render("✓"),
|
||||
cli.DimStyle.Render(s.ConnTime.Round(time.Millisecond).String()))
|
||||
if s.OS != "" {
|
||||
cli.Print(" %s", cli.DimStyle.Render(s.OS))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
if s.Docker != "" {
|
||||
cli.Print(" %s %s\n", cli.SuccessStyle.Render("✓"), cli.DimStyle.Render(s.Docker))
|
||||
}
|
||||
|
||||
// Services
|
||||
for _, svc := range s.Host.Services {
|
||||
status, ok := s.Services[svc]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
icon := cli.SuccessStyle.Render("●")
|
||||
style := cli.SuccessStyle
|
||||
if status == "not running" {
|
||||
icon = cli.ErrorStyle.Render("○")
|
||||
style = cli.ErrorStyle
|
||||
}
|
||||
|
||||
cli.Print(" %s %s %s\n", icon, svc, style.Render(status))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func checkLoadBalancer(ctx context.Context, token string) {
|
||||
hc := infra.NewHCloudClient(token)
|
||||
lbs, err := hc.ListLoadBalancers(ctx)
|
||||
if err != nil {
|
||||
cli.Print(" %s Load balancer: %s\n", cli.ErrorStyle.Render("✗"), err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(lbs) == 0 {
|
||||
cli.Print(" %s No load balancers found\n", cli.DimStyle.Render("○"))
|
||||
return
|
||||
}
|
||||
|
||||
for _, lb := range lbs {
|
||||
cli.Print(" %s LB: %s IP: %s Targets: %d\n",
|
||||
cli.SuccessStyle.Render("●"),
|
||||
cli.BoldStyle.Render(lb.Name),
|
||||
lb.PublicNet.IPv4.IP,
|
||||
len(lb.Targets))
|
||||
|
||||
for _, t := range lb.Targets {
|
||||
for _, hs := range t.HealthStatus {
|
||||
icon := cli.SuccessStyle.Render("●")
|
||||
if hs.Status != "healthy" {
|
||||
icon = cli.ErrorStyle.Render("○")
|
||||
}
|
||||
ip := ""
|
||||
if t.IP != nil {
|
||||
ip = t.IP.IP
|
||||
}
|
||||
cli.Print(" %s :%d %s %s\n", icon, hs.ListenPort, hs.Status, cli.DimStyle.Render(ip))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func loadConfig() (*infra.Config, string, error) {
|
||||
if infraFile != "" {
|
||||
cfg, err := infra.Load(infraFile)
|
||||
return cfg, infraFile, err
|
||||
}
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return infra.Discover(cwd)
|
||||
}
|
||||
13
cmd/vm/cmd_commands.go
Normal file
13
cmd/vm/cmd_commands.go
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
// Package vm provides LinuxKit virtual machine management commands.
|
||||
//
|
||||
// Commands:
|
||||
// - run: Run a VM from image (.iso, .qcow2, .vmdk, .raw) or template
|
||||
// - ps: List running VMs
|
||||
// - stop: Stop a running VM
|
||||
// - logs: View VM logs
|
||||
// - exec: Execute command in VM via SSH
|
||||
// - templates: Manage LinuxKit templates (list, build)
|
||||
//
|
||||
// Uses qemu or hyperkit depending on system availability.
|
||||
// Templates are built from YAML definitions and can include variables.
|
||||
package vm
|
||||
345
cmd/vm/cmd_container.go
Normal file
345
cmd/vm/cmd_container.go
Normal file
|
|
@ -0,0 +1,345 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
goio "io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/container"
|
||||
"forge.lthn.ai/core/go/pkg/i18n"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
runName string
|
||||
runDetach bool
|
||||
runMemory int
|
||||
runCPUs int
|
||||
runSSHPort int
|
||||
runTemplateName string
|
||||
runVarFlags []string
|
||||
)
|
||||
|
||||
// addVMRunCommand adds the 'run' command under vm.
|
||||
func addVMRunCommand(parent *cobra.Command) {
|
||||
runCmd := &cobra.Command{
|
||||
Use: "run [image]",
|
||||
Short: i18n.T("cmd.vm.run.short"),
|
||||
Long: i18n.T("cmd.vm.run.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts := container.RunOptions{
|
||||
Name: runName,
|
||||
Detach: runDetach,
|
||||
Memory: runMemory,
|
||||
CPUs: runCPUs,
|
||||
SSHPort: runSSHPort,
|
||||
}
|
||||
|
||||
// If template is specified, build and run from template
|
||||
if runTemplateName != "" {
|
||||
vars := ParseVarFlags(runVarFlags)
|
||||
return RunFromTemplate(runTemplateName, vars, opts)
|
||||
}
|
||||
|
||||
// Otherwise, require an image path
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.run.error.image_required"))
|
||||
}
|
||||
image := args[0]
|
||||
|
||||
return runContainer(image, runName, runDetach, runMemory, runCPUs, runSSHPort)
|
||||
},
|
||||
}
|
||||
|
||||
runCmd.Flags().StringVar(&runName, "name", "", i18n.T("cmd.vm.run.flag.name"))
|
||||
runCmd.Flags().BoolVarP(&runDetach, "detach", "d", false, i18n.T("cmd.vm.run.flag.detach"))
|
||||
runCmd.Flags().IntVar(&runMemory, "memory", 0, i18n.T("cmd.vm.run.flag.memory"))
|
||||
runCmd.Flags().IntVar(&runCPUs, "cpus", 0, i18n.T("cmd.vm.run.flag.cpus"))
|
||||
runCmd.Flags().IntVar(&runSSHPort, "ssh-port", 0, i18n.T("cmd.vm.run.flag.ssh_port"))
|
||||
runCmd.Flags().StringVar(&runTemplateName, "template", "", i18n.T("cmd.vm.run.flag.template"))
|
||||
runCmd.Flags().StringArrayVar(&runVarFlags, "var", nil, i18n.T("cmd.vm.run.flag.var"))
|
||||
|
||||
parent.AddCommand(runCmd)
|
||||
}
|
||||
|
||||
func runContainer(image, name string, detach bool, memory, cpus, sshPort int) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
opts := container.RunOptions{
|
||||
Name: name,
|
||||
Detach: detach,
|
||||
Memory: memory,
|
||||
CPUs: cpus,
|
||||
SSHPort: sshPort,
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.Label("image")), image)
|
||||
if name != "" {
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.name")), name)
|
||||
}
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.hypervisor")), manager.Hypervisor().Name())
|
||||
fmt.Println()
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := manager.Run(ctx, image, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.run", "container")+": %w", err)
|
||||
}
|
||||
|
||||
if detach {
|
||||
fmt.Printf("%s %s\n", successStyle.Render(i18n.Label("started")), c.ID)
|
||||
fmt.Printf("%s %d\n", dimStyle.Render(i18n.T("cmd.vm.label.pid")), c.PID)
|
||||
fmt.Println()
|
||||
fmt.Println(i18n.T("cmd.vm.hint.view_logs", map[string]interface{}{"ID": c.ID[:8]}))
|
||||
fmt.Println(i18n.T("cmd.vm.hint.stop", map[string]interface{}{"ID": c.ID[:8]}))
|
||||
} else {
|
||||
fmt.Printf("\n%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.container_stopped")), c.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var psAll bool
|
||||
|
||||
// addVMPsCommand adds the 'ps' command under vm.
|
||||
func addVMPsCommand(parent *cobra.Command) {
|
||||
psCmd := &cobra.Command{
|
||||
Use: "ps",
|
||||
Short: i18n.T("cmd.vm.ps.short"),
|
||||
Long: i18n.T("cmd.vm.ps.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return listContainers(psAll)
|
||||
},
|
||||
}
|
||||
|
||||
psCmd.Flags().BoolVarP(&psAll, "all", "a", false, i18n.T("cmd.vm.ps.flag.all"))
|
||||
|
||||
parent.AddCommand(psCmd)
|
||||
}
|
||||
|
||||
func listContainers(all bool) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
containers, err := manager.List(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.list", "containers")+": %w", err)
|
||||
}
|
||||
|
||||
// Filter if not showing all
|
||||
if !all {
|
||||
filtered := make([]*container.Container, 0)
|
||||
for _, c := range containers {
|
||||
if c.Status == container.StatusRunning {
|
||||
filtered = append(filtered, c)
|
||||
}
|
||||
}
|
||||
containers = filtered
|
||||
}
|
||||
|
||||
if len(containers) == 0 {
|
||||
if all {
|
||||
fmt.Println(i18n.T("cmd.vm.ps.no_containers"))
|
||||
} else {
|
||||
fmt.Println(i18n.T("cmd.vm.ps.no_running"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
_, _ = fmt.Fprintln(w, i18n.T("cmd.vm.ps.header"))
|
||||
_, _ = fmt.Fprintln(w, "--\t----\t-----\t------\t-------\t---")
|
||||
|
||||
for _, c := range containers {
|
||||
// Shorten image path
|
||||
imageName := c.Image
|
||||
if len(imageName) > 30 {
|
||||
imageName = "..." + imageName[len(imageName)-27:]
|
||||
}
|
||||
|
||||
// Format duration
|
||||
duration := formatDuration(time.Since(c.StartedAt))
|
||||
|
||||
// Status with color
|
||||
status := string(c.Status)
|
||||
switch c.Status {
|
||||
case container.StatusRunning:
|
||||
status = successStyle.Render(status)
|
||||
case container.StatusStopped:
|
||||
status = dimStyle.Render(status)
|
||||
case container.StatusError:
|
||||
status = errorStyle.Render(status)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%d\n",
|
||||
c.ID[:8], c.Name, imageName, status, duration, c.PID)
|
||||
}
|
||||
|
||||
_ = w.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatDuration(d time.Duration) string {
|
||||
if d < time.Minute {
|
||||
return fmt.Sprintf("%ds", int(d.Seconds()))
|
||||
}
|
||||
if d < time.Hour {
|
||||
return fmt.Sprintf("%dm", int(d.Minutes()))
|
||||
}
|
||||
if d < 24*time.Hour {
|
||||
return fmt.Sprintf("%dh", int(d.Hours()))
|
||||
}
|
||||
return fmt.Sprintf("%dd", int(d.Hours()/24))
|
||||
}
|
||||
|
||||
// addVMStopCommand adds the 'stop' command under vm.
|
||||
func addVMStopCommand(parent *cobra.Command) {
|
||||
stopCmd := &cobra.Command{
|
||||
Use: "stop <container-id>",
|
||||
Short: i18n.T("cmd.vm.stop.short"),
|
||||
Long: i18n.T("cmd.vm.stop.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.error.id_required"))
|
||||
}
|
||||
return stopContainer(args[0])
|
||||
},
|
||||
}
|
||||
|
||||
parent.AddCommand(stopCmd)
|
||||
}
|
||||
|
||||
func stopContainer(id string) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
// Support partial ID matching
|
||||
fullID, err := resolveContainerID(manager, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.stop.stopping")), fullID[:8])
|
||||
|
||||
ctx := context.Background()
|
||||
if err := manager.Stop(ctx, fullID); err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.stop", "container")+": %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", successStyle.Render(i18n.T("common.status.stopped")))
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveContainerID resolves a partial ID to a full ID.
|
||||
func resolveContainerID(manager *container.LinuxKitManager, partialID string) (string, error) {
|
||||
ctx := context.Background()
|
||||
containers, err := manager.List(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var matches []*container.Container
|
||||
for _, c := range containers {
|
||||
if strings.HasPrefix(c.ID, partialID) || strings.HasPrefix(c.Name, partialID) {
|
||||
matches = append(matches, c)
|
||||
}
|
||||
}
|
||||
|
||||
switch len(matches) {
|
||||
case 0:
|
||||
return "", errors.New(i18n.T("cmd.vm.error.no_match", map[string]interface{}{"ID": partialID}))
|
||||
case 1:
|
||||
return matches[0].ID, nil
|
||||
default:
|
||||
return "", errors.New(i18n.T("cmd.vm.error.multiple_match", map[string]interface{}{"ID": partialID}))
|
||||
}
|
||||
}
|
||||
|
||||
var logsFollow bool
|
||||
|
||||
// addVMLogsCommand adds the 'logs' command under vm.
|
||||
func addVMLogsCommand(parent *cobra.Command) {
|
||||
logsCmd := &cobra.Command{
|
||||
Use: "logs <container-id>",
|
||||
Short: i18n.T("cmd.vm.logs.short"),
|
||||
Long: i18n.T("cmd.vm.logs.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.error.id_required"))
|
||||
}
|
||||
return viewLogs(args[0], logsFollow)
|
||||
},
|
||||
}
|
||||
|
||||
logsCmd.Flags().BoolVarP(&logsFollow, "follow", "f", false, i18n.T("common.flag.follow"))
|
||||
|
||||
parent.AddCommand(logsCmd)
|
||||
}
|
||||
|
||||
func viewLogs(id string, follow bool) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
fullID, err := resolveContainerID(manager, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
reader, err := manager.Logs(ctx, fullID, follow)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.get", "logs")+": %w", err)
|
||||
}
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
_, err = goio.Copy(os.Stdout, reader)
|
||||
return err
|
||||
}
|
||||
|
||||
// addVMExecCommand adds the 'exec' command under vm.
|
||||
func addVMExecCommand(parent *cobra.Command) {
|
||||
execCmd := &cobra.Command{
|
||||
Use: "exec <container-id> <command> [args...]",
|
||||
Short: i18n.T("cmd.vm.exec.short"),
|
||||
Long: i18n.T("cmd.vm.exec.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return errors.New(i18n.T("cmd.vm.error.id_and_cmd_required"))
|
||||
}
|
||||
return execInContainer(args[0], args[1:])
|
||||
},
|
||||
}
|
||||
|
||||
parent.AddCommand(execCmd)
|
||||
}
|
||||
|
||||
func execInContainer(id string, cmd []string) error {
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.init", "container manager")+": %w", err)
|
||||
}
|
||||
|
||||
fullID, err := resolveContainerID(manager, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
return manager.Exec(ctx, fullID, cmd)
|
||||
}
|
||||
311
cmd/vm/cmd_templates.go
Normal file
311
cmd/vm/cmd_templates.go
Normal file
|
|
@ -0,0 +1,311 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/container"
|
||||
"forge.lthn.ai/core/go/pkg/i18n"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// addVMTemplatesCommand adds the 'templates' command under vm.
|
||||
func addVMTemplatesCommand(parent *cobra.Command) {
|
||||
templatesCmd := &cobra.Command{
|
||||
Use: "templates",
|
||||
Short: i18n.T("cmd.vm.templates.short"),
|
||||
Long: i18n.T("cmd.vm.templates.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return listTemplates()
|
||||
},
|
||||
}
|
||||
|
||||
// Add subcommands
|
||||
addTemplatesShowCommand(templatesCmd)
|
||||
addTemplatesVarsCommand(templatesCmd)
|
||||
|
||||
parent.AddCommand(templatesCmd)
|
||||
}
|
||||
|
||||
// addTemplatesShowCommand adds the 'templates show' subcommand.
|
||||
func addTemplatesShowCommand(parent *cobra.Command) {
|
||||
showCmd := &cobra.Command{
|
||||
Use: "show <template-name>",
|
||||
Short: i18n.T("cmd.vm.templates.show.short"),
|
||||
Long: i18n.T("cmd.vm.templates.show.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.error.template_required"))
|
||||
}
|
||||
return showTemplate(args[0])
|
||||
},
|
||||
}
|
||||
|
||||
parent.AddCommand(showCmd)
|
||||
}
|
||||
|
||||
// addTemplatesVarsCommand adds the 'templates vars' subcommand.
|
||||
func addTemplatesVarsCommand(parent *cobra.Command) {
|
||||
varsCmd := &cobra.Command{
|
||||
Use: "vars <template-name>",
|
||||
Short: i18n.T("cmd.vm.templates.vars.short"),
|
||||
Long: i18n.T("cmd.vm.templates.vars.long"),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New(i18n.T("cmd.vm.error.template_required"))
|
||||
}
|
||||
return showTemplateVars(args[0])
|
||||
},
|
||||
}
|
||||
|
||||
parent.AddCommand(varsCmd)
|
||||
}
|
||||
|
||||
func listTemplates() error {
|
||||
templates := container.ListTemplates()
|
||||
|
||||
if len(templates) == 0 {
|
||||
fmt.Println(i18n.T("cmd.vm.templates.no_templates"))
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n\n", repoNameStyle.Render(i18n.T("cmd.vm.templates.title")))
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
_, _ = fmt.Fprintln(w, i18n.T("cmd.vm.templates.header"))
|
||||
_, _ = fmt.Fprintln(w, "----\t-----------")
|
||||
|
||||
for _, tmpl := range templates {
|
||||
desc := tmpl.Description
|
||||
if len(desc) > 60 {
|
||||
desc = desc[:57] + "..."
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\n", repoNameStyle.Render(tmpl.Name), desc)
|
||||
}
|
||||
_ = w.Flush()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.vm.templates.hint.show"), dimStyle.Render("core vm templates show <name>"))
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.vm.templates.hint.vars"), dimStyle.Render("core vm templates vars <name>"))
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.vm.templates.hint.run"), dimStyle.Render("core vm run --template <name> --var SSH_KEY=\"...\""))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func showTemplate(name string) error {
|
||||
content, err := container.GetTemplate(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n\n", dimStyle.Render(i18n.T("common.label.template")), repoNameStyle.Render(name))
|
||||
fmt.Println(content)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func showTemplateVars(name string) error {
|
||||
content, err := container.GetTemplate(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
required, optional := container.ExtractVariables(content)
|
||||
|
||||
fmt.Printf("%s %s\n\n", dimStyle.Render(i18n.T("common.label.template")), repoNameStyle.Render(name))
|
||||
|
||||
if len(required) > 0 {
|
||||
fmt.Printf("%s\n", errorStyle.Render(i18n.T("cmd.vm.templates.vars.required")))
|
||||
for _, v := range required {
|
||||
fmt.Printf(" %s\n", varStyle.Render("${"+v+"}"))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
if len(optional) > 0 {
|
||||
fmt.Printf("%s\n", successStyle.Render(i18n.T("cmd.vm.templates.vars.optional")))
|
||||
for v, def := range optional {
|
||||
fmt.Printf(" %s = %s\n",
|
||||
varStyle.Render("${"+v+"}"),
|
||||
defaultStyle.Render(def))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
if len(required) == 0 && len(optional) == 0 {
|
||||
fmt.Println(i18n.T("cmd.vm.templates.vars.none"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunFromTemplate builds and runs a LinuxKit image from a template.
|
||||
func RunFromTemplate(templateName string, vars map[string]string, runOpts container.RunOptions) error {
|
||||
// Apply template with variables
|
||||
content, err := container.ApplyTemplate(templateName, vars)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "apply template"})+": %w", err)
|
||||
}
|
||||
|
||||
// Create a temporary directory for the build
|
||||
tmpDir, err := os.MkdirTemp("", "core-linuxkit-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "create temp directory"})+": %w", err)
|
||||
}
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Write the YAML file
|
||||
yamlPath := filepath.Join(tmpDir, templateName+".yml")
|
||||
if err := os.WriteFile(yamlPath, []byte(content), 0644); err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "write template"})+": %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("common.label.template")), repoNameStyle.Render(templateName))
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.building")), yamlPath)
|
||||
|
||||
// Build the image using linuxkit
|
||||
outputPath := filepath.Join(tmpDir, templateName)
|
||||
if err := buildLinuxKitImage(yamlPath, outputPath); err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "build image"})+": %w", err)
|
||||
}
|
||||
|
||||
// Find the built image (linuxkit creates .iso or other format)
|
||||
imagePath := findBuiltImage(outputPath)
|
||||
if imagePath == "" {
|
||||
return errors.New(i18n.T("cmd.vm.error.no_image_found"))
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("common.label.image")), imagePath)
|
||||
fmt.Println()
|
||||
|
||||
// Run the image
|
||||
manager, err := container.NewLinuxKitManager(io.Local)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "initialize container manager"})+": %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.hypervisor")), manager.Hypervisor().Name())
|
||||
fmt.Println()
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := manager.Run(ctx, imagePath, runOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf(i18n.T("i18n.fail.run", "container")+": %w", err)
|
||||
}
|
||||
|
||||
if runOpts.Detach {
|
||||
fmt.Printf("%s %s\n", successStyle.Render(i18n.T("common.label.started")), c.ID)
|
||||
fmt.Printf("%s %d\n", dimStyle.Render(i18n.T("cmd.vm.label.pid")), c.PID)
|
||||
fmt.Println()
|
||||
fmt.Println(i18n.T("cmd.vm.hint.view_logs", map[string]interface{}{"ID": c.ID[:8]}))
|
||||
fmt.Println(i18n.T("cmd.vm.hint.stop", map[string]interface{}{"ID": c.ID[:8]}))
|
||||
} else {
|
||||
fmt.Printf("\n%s %s\n", dimStyle.Render(i18n.T("cmd.vm.label.container_stopped")), c.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildLinuxKitImage builds a LinuxKit image from a YAML file.
|
||||
func buildLinuxKitImage(yamlPath, outputPath string) error {
|
||||
// Check if linuxkit is available
|
||||
lkPath, err := lookupLinuxKit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the image
|
||||
// linuxkit build --format iso-bios --name <output> <yaml>
|
||||
cmd := exec.Command(lkPath, "build",
|
||||
"--format", "iso-bios",
|
||||
"--name", outputPath,
|
||||
yamlPath)
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// findBuiltImage finds the built image file.
|
||||
func findBuiltImage(basePath string) string {
|
||||
// LinuxKit can create different formats
|
||||
extensions := []string{".iso", "-bios.iso", ".qcow2", ".raw", ".vmdk"}
|
||||
|
||||
for _, ext := range extensions {
|
||||
path := basePath + ext
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
// Check directory for any image file
|
||||
dir := filepath.Dir(basePath)
|
||||
base := filepath.Base(basePath)
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
if strings.HasPrefix(name, base) {
|
||||
for _, ext := range []string{".iso", ".qcow2", ".raw", ".vmdk"} {
|
||||
if strings.HasSuffix(name, ext) {
|
||||
return filepath.Join(dir, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// lookupLinuxKit finds the linuxkit binary.
|
||||
func lookupLinuxKit() (string, error) {
|
||||
// Check PATH first
|
||||
if path, err := exec.LookPath("linuxkit"); err == nil {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// Check common locations
|
||||
paths := []string{
|
||||
"/usr/local/bin/linuxkit",
|
||||
"/opt/homebrew/bin/linuxkit",
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New(i18n.T("cmd.vm.error.linuxkit_not_found"))
|
||||
}
|
||||
|
||||
// ParseVarFlags parses --var flags into a map.
|
||||
// Format: --var KEY=VALUE or --var KEY="VALUE"
|
||||
func ParseVarFlags(varFlags []string) map[string]string {
|
||||
vars := make(map[string]string)
|
||||
|
||||
for _, v := range varFlags {
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
key := strings.TrimSpace(parts[0])
|
||||
value := strings.TrimSpace(parts[1])
|
||||
// Remove surrounding quotes if present
|
||||
value = strings.Trim(value, "\"'")
|
||||
vars[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return vars
|
||||
}
|
||||
43
cmd/vm/cmd_vm.go
Normal file
43
cmd/vm/cmd_vm.go
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
// Package vm provides LinuxKit VM management commands.
|
||||
package vm
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/go/pkg/cli"
|
||||
"forge.lthn.ai/core/go/pkg/i18n"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddVMCommands)
|
||||
}
|
||||
|
||||
// Style aliases from shared
|
||||
var (
|
||||
repoNameStyle = cli.RepoStyle
|
||||
successStyle = cli.SuccessStyle
|
||||
errorStyle = cli.ErrorStyle
|
||||
dimStyle = cli.DimStyle
|
||||
)
|
||||
|
||||
// VM-specific styles
|
||||
var (
|
||||
varStyle = cli.NewStyle().Foreground(cli.ColourAmber500)
|
||||
defaultStyle = cli.NewStyle().Foreground(cli.ColourGray500).Italic()
|
||||
)
|
||||
|
||||
// AddVMCommands adds container-related commands under 'vm' to the CLI.
|
||||
func AddVMCommands(root *cobra.Command) {
|
||||
vmCmd := &cobra.Command{
|
||||
Use: "vm",
|
||||
Short: i18n.T("cmd.vm.short"),
|
||||
Long: i18n.T("cmd.vm.long"),
|
||||
}
|
||||
|
||||
root.AddCommand(vmCmd)
|
||||
addVMRunCommand(vmCmd)
|
||||
addVMPsCommand(vmCmd)
|
||||
addVMStopCommand(vmCmd)
|
||||
addVMLogsCommand(vmCmd)
|
||||
addVMExecCommand(vmCmd)
|
||||
addVMTemplatesCommand(vmCmd)
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue