diff --git a/CLAUDE.md b/CLAUDE.md index b94a857..39db777 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## Project Overview -`core/go-ansible` is a pure Go Ansible playbook engine. It parses YAML playbooks, inventories, and roles, then executes tasks on remote hosts via SSH. 41 module handler implementations (plus 3 community modules), Jinja2-compatible templating, privilege escalation (become), and event-driven callbacks. This is a library — there is no standalone binary. The CLI integration lives in `cmd/ansible/` and is compiled as part of the `core` CLI binary. +`core/go-ansible` is a pure Go Ansible playbook engine. It parses YAML playbooks, inventories, and roles, then executes tasks on remote hosts via SSH. 42 module handler implementations (plus 3 community modules), Jinja2-compatible templating, privilege escalation (become), and event-driven callbacks. This is a library — there is no standalone binary. The CLI integration lives in `cmd/ansible/` and is compiled as part of the `core` CLI binary. ## Build & Test diff --git a/docs/index.md b/docs/index.md index 505b0db..a448dfd 100644 --- a/docs/index.md +++ b/docs/index.md @@ -126,7 +126,7 @@ go-ansible/ ## Supported Modules -41 module handlers are implemented, covering the most commonly used Ansible modules: +42 module handlers are implemented, covering the most commonly used Ansible modules: | Category | Modules | |----------|---------| @@ -139,7 +139,7 @@ go-ansible/ | **Source control** | `git` | | **Archive** | `unarchive` | | **System** | `hostname`, `sysctl`, `cron`, `reboot`, `setup` | -| **Flow control** | `debug`, `fail`, `assert`, `set_fact`, `pause`, `wait_for`, `meta`, `include_vars` | +| **Flow control** | `debug`, `fail`, `assert`, `set_fact`, `add_host`, `pause`, `wait_for`, `meta`, `include_vars` | | **Community** | `community.general.ufw`, `ansible.posix.authorized_key`, `community.docker.docker_compose` | Both fully-qualified collection names (e.g. `ansible.builtin.shell`) and short-form names (e.g. `shell`) are accepted. diff --git a/executor_extra_test.go b/executor_extra_test.go index d9e843f..4fa19f6 100644 --- a/executor_extra_test.go +++ b/executor_extra_test.go @@ -1,6 +1,7 @@ package ansible import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -147,6 +148,72 @@ func TestExecutorExtra_ModuleSetFact_Good_SkipsCacheable(t *testing.T) { assert.False(t, hasCacheable) } +// --- moduleAddHost --- + +func TestExecutorExtra_ModuleAddHost_Good_AddsHostAndGroups(t *testing.T) { + e := NewExecutor("/tmp") + + result, err := e.moduleAddHost(map[string]any{ + "name": "db1", + "groups": "databases,production", + "ansible_host": "10.0.0.5", + "ansible_port": "2222", + "ansible_user": "deploy", + "ansible_connection": "ssh", + "ansible_become_password": "secret", + "environment": "prod", + "custom_var": "custom-value", + }) + + require.NoError(t, err) + assert.True(t, result.Changed) + assert.Equal(t, "db1", result.Data["host"]) + assert.Contains(t, result.Msg, "db1") + + require.NotNil(t, e.inventory) + require.NotNil(t, e.inventory.All) + require.NotNil(t, e.inventory.All.Hosts["db1"]) + + host := e.inventory.All.Hosts["db1"] + assert.Equal(t, "10.0.0.5", host.AnsibleHost) + assert.Equal(t, 2222, host.AnsiblePort) + assert.Equal(t, "deploy", host.AnsibleUser) + assert.Equal(t, "ssh", host.AnsibleConnection) + assert.Equal(t, "secret", host.AnsibleBecomePassword) + assert.Equal(t, "custom-value", host.Vars["custom_var"]) + + require.NotNil(t, e.inventory.All.Children["databases"]) + require.NotNil(t, e.inventory.All.Children["production"]) + assert.Same(t, host, e.inventory.All.Children["databases"].Hosts["db1"]) + assert.Same(t, host, e.inventory.All.Children["production"].Hosts["db1"]) + + assert.Equal(t, []string{"db1"}, GetHosts(e.inventory, "all")) + assert.Equal(t, []string{"db1"}, GetHosts(e.inventory, "databases")) + assert.Equal(t, []string{"db1"}, GetHosts(e.inventory, "production")) +} + +func TestExecutorExtra_ModuleAddHost_Good_ThroughDispatcher(t *testing.T) { + e := NewExecutor("/tmp") + task := &Task{ + Module: "add_host", + Args: map[string]any{ + "name": "cache1", + "group": "caches", + "role": "redis", + }, + } + + result, err := e.executeModule(context.Background(), "host1", &SSHClient{}, task, &Play{}) + + require.NoError(t, err) + assert.True(t, result.Changed) + assert.Equal(t, "cache1", result.Data["host"]) + assert.Equal(t, []string{"caches"}, result.Data["groups"]) + assert.Equal(t, []string{"cache1"}, GetHosts(e.inventory, "all")) + assert.Equal(t, []string{"cache1"}, GetHosts(e.inventory, "caches")) + assert.Equal(t, "redis", e.inventory.All.Hosts["cache1"].Vars["role"]) +} + // --- moduleIncludeVars --- func TestExecutorExtra_ModuleIncludeVars_Good_WithFile(t *testing.T) { diff --git a/modules.go b/modules.go index 9a4d0b0..facd0ab 100644 --- a/modules.go +++ b/modules.go @@ -99,6 +99,8 @@ func (e *Executor) executeModule(ctx context.Context, host string, client *SSHCl return e.moduleAssert(args, host) case "ansible.builtin.set_fact": return e.moduleSetFact(args) + case "ansible.builtin.add_host": + return e.moduleAddHost(args) case "ansible.builtin.pause": return e.modulePause(ctx, args) case "ansible.builtin.wait_for": @@ -939,6 +941,126 @@ func (e *Executor) moduleSetFact(args map[string]any) (*TaskResult, error) { return &TaskResult{Changed: true}, nil } +func (e *Executor) moduleAddHost(args map[string]any) (*TaskResult, error) { + name := getStringArg(args, "name", "") + if name == "" { + name = getStringArg(args, "hostname", "") + } + if name == "" { + return nil, coreerr.E("Executor.moduleAddHost", "name required", nil) + } + + groups := normalizeStringList(args["groups"]) + if len(groups) == 0 { + groups = normalizeStringList(args["group"]) + } + + e.mu.Lock() + defer e.mu.Unlock() + + if e.inventory == nil { + e.inventory = &Inventory{} + } + if e.inventory.All == nil { + e.inventory.All = &InventoryGroup{} + } + + host := findInventoryHost(e.inventory.All, name) + if host == nil { + host = &Host{} + } + if host.Vars == nil { + host.Vars = make(map[string]any) + } + + if v := getStringArg(args, "ansible_host", ""); v != "" { + host.AnsibleHost = v + } + switch v := args["ansible_port"].(type) { + case int: + host.AnsiblePort = v + case int8: + host.AnsiblePort = int(v) + case int16: + host.AnsiblePort = int(v) + case int32: + host.AnsiblePort = int(v) + case int64: + host.AnsiblePort = int(v) + case uint: + host.AnsiblePort = int(v) + case uint8: + host.AnsiblePort = int(v) + case uint16: + host.AnsiblePort = int(v) + case uint32: + host.AnsiblePort = int(v) + case uint64: + host.AnsiblePort = int(v) + case string: + if port, err := strconv.Atoi(v); err == nil { + host.AnsiblePort = port + } + } + if v := getStringArg(args, "ansible_user", ""); v != "" { + host.AnsibleUser = v + } + if v := getStringArg(args, "ansible_password", ""); v != "" { + host.AnsiblePassword = v + } + if v := getStringArg(args, "ansible_ssh_private_key_file", ""); v != "" { + host.AnsibleSSHPrivateKeyFile = v + } + if v := getStringArg(args, "ansible_connection", ""); v != "" { + host.AnsibleConnection = v + } + if v := getStringArg(args, "ansible_become_password", ""); v != "" { + host.AnsibleBecomePassword = v + } + + reserved := map[string]bool{ + "name": true, "hostname": true, "groups": true, "group": true, + "ansible_host": true, "ansible_port": true, "ansible_user": true, + "ansible_password": true, "ansible_ssh_private_key_file": true, + "ansible_connection": true, "ansible_become_password": true, + } + for key, val := range args { + if reserved[key] { + continue + } + host.Vars[key] = val + } + + if e.inventory.All.Hosts == nil { + e.inventory.All.Hosts = make(map[string]*Host) + } + e.inventory.All.Hosts[name] = host + + for _, groupName := range groups { + if groupName == "" { + continue + } + + group := ensureInventoryGroup(e.inventory.All, groupName) + if group.Hosts == nil { + group.Hosts = make(map[string]*Host) + } + group.Hosts[name] = host + } + + msg := sprintf("host %s added", name) + if len(groups) > 0 { + msg += " to groups: " + join(", ", groups) + } + + data := map[string]any{"host": name} + if len(groups) > 0 { + data["groups"] = groups + } + + return &TaskResult{Changed: true, Msg: msg, Data: data}, nil +} + func (e *Executor) modulePause(ctx context.Context, args map[string]any) (*TaskResult, error) { seconds := 0 if s, ok := args["seconds"].(int); ok { @@ -987,6 +1109,86 @@ func sleepChan(seconds int) <-chan struct{} { return ch } +func normalizeStringList(value any) []string { + switch v := value.(type) { + case nil: + return nil + case string: + if v == "" { + return nil + } + parts := corexSplit(v, ",") + out := make([]string, 0, len(parts)) + for _, part := range parts { + if trimmed := corexTrimSpace(part); trimmed != "" { + out = append(out, trimmed) + } + } + if len(out) == 0 && corexTrimSpace(v) != "" { + return []string{corexTrimSpace(v)} + } + return out + case []string: + out := make([]string, 0, len(v)) + for _, item := range v { + if trimmed := corexTrimSpace(item); trimmed != "" { + out = append(out, trimmed) + } + } + return out + case []any: + out := make([]string, 0, len(v)) + for _, item := range v { + if s, ok := item.(string); ok { + if trimmed := corexTrimSpace(s); trimmed != "" { + out = append(out, trimmed) + } + } + } + return out + default: + s := corexTrimSpace(corexSprint(v)) + if s == "" { + return nil + } + return []string{s} + } +} + +func ensureInventoryGroup(parent *InventoryGroup, name string) *InventoryGroup { + if parent == nil { + return nil + } + if parent.Children == nil { + parent.Children = make(map[string]*InventoryGroup) + } + if group, ok := parent.Children[name]; ok && group != nil { + return group + } + + group := &InventoryGroup{} + parent.Children[name] = group + return group +} + +func findInventoryHost(group *InventoryGroup, name string) *Host { + if group == nil { + return nil + } + + if host, ok := group.Hosts[name]; ok { + return host + } + + for _, child := range group.Children { + if host := findInventoryHost(child, name); host != nil { + return host + } + } + + return nil +} + func (e *Executor) moduleWaitFor(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) { port := 0 if p, ok := args["port"].(int); ok { diff --git a/parser.go b/parser.go index aa4bc18..865461d 100644 --- a/parser.go +++ b/parser.go @@ -416,15 +416,32 @@ func getAllHosts(group *InventoryGroup) []string { } var hosts []string - for name := range group.Hosts { - hosts = append(hosts, name) - } - for _, child := range group.Children { - hosts = append(hosts, getAllHosts(child)...) - } + seen := make(map[string]bool) + collectAllHosts(group, seen, &hosts) return hosts } +func collectAllHosts(group *InventoryGroup, seen map[string]bool, hosts *[]string) { + if group == nil { + return + } + + // Sort keys for deterministic traversal. + hostKeys := slices.Sorted(maps.Keys(group.Hosts)) + for _, name := range hostKeys { + if seen[name] { + continue + } + seen[name] = true + *hosts = append(*hosts, name) + } + + childKeys := slices.Sorted(maps.Keys(group.Children)) + for _, name := range childKeys { + collectAllHosts(group.Children[name], seen, hosts) + } +} + // AllHostsIter returns an iterator for all hosts in an inventory group. // // Example: @@ -432,27 +449,11 @@ func getAllHosts(group *InventoryGroup) []string { // seq := AllHostsIter(inv.All) func AllHostsIter(group *InventoryGroup) iter.Seq[string] { return func(yield func(string) bool) { - if group == nil { - return - } - // Sort keys for deterministic iteration - keys := slices.Sorted(maps.Keys(group.Hosts)) - for _, name := range keys { - if !yield(name) { + for _, host := range getAllHosts(group) { + if !yield(host) { return } } - - // Sort children keys for deterministic iteration - childKeys := slices.Sorted(maps.Keys(group.Children)) - for _, name := range childKeys { - child := group.Children[name] - for host := range AllHostsIter(child) { - if !yield(host) { - return - } - } - } } }