[agent/codex:gpt-5.4-mini] Read ~/spec/code/core/go/ansible/RFC.md fully. Find ONE feat... #36

Merged
Virgil merged 37 commits from main into dev 2026-04-01 08:53:49 +00:00
24 changed files with 2202 additions and 217 deletions

View file

@ -16,8 +16,8 @@ go tool cover -func=/tmp/ansible.cover
No direct `stdlib`-to-`core.*` wrapper drift was found in the Go implementation. The remaining drift is stale migration residue around the `core.*` move:
- `go.mod:15`, `go.sum:7`, `go.sum:8`
Legacy `forge.lthn.ai/core/go-log` references still remain in the dependency graph.
- Resolved on the current branch:
Legacy `forge.lthn.ai/core/go-log` references were removed by replacing `dappco.re/go/core/io` usage with the filesystem primitives in `dappco.re/go/core`.
- `CLAUDE.md:37`, `docs/development.md:169`
Repository guidance still refers to `core/cli`, while the current command registration lives on the `dappco.re/go/core` API at `cmd/ansible/cmd.go:8`.
- `CLAUDE.md:66`, `docs/development.md:86`
@ -25,12 +25,8 @@ No direct `stdlib`-to-`core.*` wrapper drift was found in the Go implementation.
## UK English
- `executor.go:248`
Comment uses US spelling: `Initialize host results`.
- `parser.go:321`
Comment uses US spelling: `NormalizeModule normalizes a module name to its canonical form.`
- `types.go:110`
Comment uses US spelling: `LoopControl controls loop behavior.`
- Resolved on the current branch:
Comment spelling now uses `Initialise`, `normalises`, and `behaviour` in the affected code paths.
## Missing Tests

View file

@ -6,7 +6,6 @@ import (
"dappco.re/go/core"
"dappco.re/go/core/ansible"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
)
@ -35,7 +34,7 @@ func runAnsible(opts core.Options) core.Result {
playbookPath = absPath(playbookPath)
}
if !coreio.Local.Exists(playbookPath) {
if !localFS.Exists(playbookPath) {
return core.Result{Value: coreerr.E("runAnsible", sprintf("playbook not found: %s", playbookPath), nil)}
}
@ -47,6 +46,7 @@ func runAnsible(opts core.Options) core.Result {
// Set options
executor.Limit = opts.String("limit")
executor.CheckMode = opts.Bool("check")
executor.Diff = opts.Bool("diff")
executor.Verbose = opts.Int("verbose")
if tags := opts.String("tags"); tags != "" {
@ -72,14 +72,14 @@ func runAnsible(opts core.Options) core.Result {
invPath = absPath(invPath)
}
if !coreio.Local.Exists(invPath) {
if !localFS.Exists(invPath) {
return core.Result{Value: coreerr.E("runAnsible", sprintf("inventory not found: %s", invPath), nil)}
}
if coreio.Local.IsDir(invPath) {
if localFS.IsDir(invPath) {
for _, name := range []string{"inventory.yml", "hosts.yml", "inventory.yaml", "hosts.yaml"} {
p := joinPath(invPath, name)
if coreio.Local.Exists(p) {
if localFS.Exists(p) {
invPath = p
break
}

View file

@ -22,6 +22,7 @@ func Register(c *core.Core) {
core.Option{Key: "extra-vars", Value: ""},
core.Option{Key: "verbose", Value: 0},
core.Option{Key: "check", Value: false},
core.Option{Key: "diff", Value: false},
),
})

22
cmd/ansible/cmd_test.go Normal file
View file

@ -0,0 +1,22 @@
package anscmd
import (
"testing"
"dappco.re/go/core"
"github.com/stretchr/testify/require"
)
func TestRegister_AnsibleCommandExposesDiffFlag(t *testing.T) {
app := core.New()
Register(app)
result := app.Command("ansible")
require.True(t, result.OK)
cmd, ok := result.Value.(*core.Command)
require.True(t, ok)
require.True(t, cmd.Flags.Has("diff"))
require.False(t, cmd.Flags.Bool("diff"))
}

View file

@ -1,12 +1,56 @@
package anscmd
import (
"io/fs"
"unicode"
"unicode/utf8"
"dappco.re/go/core"
)
type localFileSystem struct {
fs *core.Fs
}
var localFS = localFileSystem{fs: (&core.Fs{}).NewUnrestricted()}
func (l localFileSystem) Exists(path string) bool {
return l.fs.Exists(path)
}
func (l localFileSystem) IsDir(path string) bool {
return l.fs.IsDir(path)
}
func (l localFileSystem) Write(path, content string) error {
result := l.fs.Write(path, content)
if !result.OK {
return resultError("write "+path, result)
}
return nil
}
func (l localFileSystem) WriteMode(path, content string, mode fs.FileMode) error {
result := l.fs.WriteMode(path, content, mode)
if !result.OK {
return resultError("write "+path, result)
}
return nil
}
func resultError(op string, result core.Result) error {
if result.OK {
return nil
}
if err, ok := result.Value.(error); ok {
return err
}
if result.Value == nil {
return core.E(op, "operation failed", nil)
}
return core.E(op, core.Sprint(result.Value), nil)
}
func absPath(path string) string {
if path == "" {
return core.Env("DIR_CWD")

15
cmd/ansible/specs/RFC.md Normal file
View file

@ -0,0 +1,15 @@
# anscmd
**Import:** `dappco.re/go/core/ansible/cmd/ansible`
**Files:** 3
## Types
This package has no exported structs, interfaces, or type aliases.
## Functions
### Register
`func Register(c *core.Core)`
Registers two CLI commands on `c`:
- `ansible`: Runs a playbook through `ansible.Executor`. The command exposes `inventory`, `limit`, `tags`, `skip-tags`, `extra-vars`, `verbose`, and `check` flags.
- `ansible/test`: Opens an SSH connection to a host, prints basic host facts, and exposes `user`, `password`, `key`, and `port` flags.

View file

@ -1,6 +1,7 @@
package ansible
import (
"io/fs"
"unicode"
"unicode/utf8"
@ -13,6 +14,66 @@ type stringBuffer interface {
String() string
}
type corexLocalFS struct {
fs *core.Fs
}
var localFS = corexLocalFS{fs: (&core.Fs{}).NewUnrestricted()}
func (l corexLocalFS) Read(path string) (string, error) {
result := l.fs.Read(path)
if !result.OK {
return "", corexResultError("read "+path, result)
}
content, _ := result.Value.(string)
return content, nil
}
func (l corexLocalFS) Write(path, content string) error {
result := l.fs.Write(path, content)
if !result.OK {
return corexResultError("write "+path, result)
}
return nil
}
func (l corexLocalFS) WriteMode(path, content string, mode fs.FileMode) error {
result := l.fs.WriteMode(path, content, mode)
if !result.OK {
return corexResultError("write "+path, result)
}
return nil
}
func (l corexLocalFS) EnsureDir(path string) error {
result := l.fs.EnsureDir(path)
if !result.OK {
return corexResultError("ensure dir "+path, result)
}
return nil
}
func (l corexLocalFS) Exists(path string) bool {
return l.fs.Exists(path)
}
func (l corexLocalFS) IsDir(path string) bool {
return l.fs.IsDir(path)
}
func corexResultError(op string, result core.Result) error {
if result.OK {
return nil
}
if err, ok := result.Value.(error); ok {
return err
}
if result.Value == nil {
return core.E(op, "operation failed", nil)
}
return core.E(op, core.Sprint(result.Value), nil)
}
func dirSep() string {
ds := core.Env("DS")
if ds == "" {

View file

@ -2,13 +2,15 @@ package ansible
import (
"context"
"errors"
"regexp"
"slices"
"strconv"
"sync"
"text/template"
"time"
"unicode"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
)
@ -43,6 +45,10 @@ type Executor struct {
Verbose int
}
type commandRunner interface {
Run(ctx context.Context, cmd string) (stdout, stderr string, exitCode int, err error)
}
// NewExecutor creates a new playbook executor.
//
// Example:
@ -130,6 +136,7 @@ func (e *Executor) runPlay(ctx context.Context, play *Play) error {
if len(hosts) == 0 {
return nil // No hosts matched
}
failedHosts := make(map[string]bool)
// Merge play vars
for k, v := range play.Vars {
@ -149,38 +156,41 @@ func (e *Executor) runPlay(ctx context.Context, play *Play) error {
}
}
// Execute pre_tasks
for _, task := range play.PreTasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
return err
// Execute hosts in serial batches when requested.
for _, batch := range e.hostBatches(hosts, play.Serial) {
// Execute pre_tasks
for _, task := range play.PreTasks {
if err := e.runTaskOnHosts(ctx, batch, &task, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Execute roles
for _, roleRef := range play.Roles {
if err := e.runRole(ctx, hosts, &roleRef, play); err != nil {
return err
// Execute roles
for _, roleRef := range play.Roles {
if err := e.runRole(ctx, batch, &roleRef, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Execute tasks
for _, task := range play.Tasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
return err
// Execute tasks
for _, task := range play.Tasks {
if err := e.runTaskOnHosts(ctx, batch, &task, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Execute post_tasks
for _, task := range play.PostTasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
return err
// Execute post_tasks
for _, task := range play.PostTasks {
if err := e.runTaskOnHosts(ctx, batch, &task, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Run notified handlers
for _, handler := range play.Handlers {
if e.notified[handler.Name] {
if err := e.runTaskOnHosts(ctx, hosts, &handler, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &handler, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
@ -189,8 +199,56 @@ func (e *Executor) runPlay(ctx context.Context, play *Play) error {
return nil
}
// hostBatches returns the host list split into serial batches.
func (e *Executor) hostBatches(hosts []string, serial any) [][]string {
batchSize := serialBatchSize(serial, len(hosts))
if batchSize >= len(hosts) {
return [][]string{hosts}
}
batches := make([][]string, 0, (len(hosts)+batchSize-1)/batchSize)
for start := 0; start < len(hosts); start += batchSize {
end := start + batchSize
if end > len(hosts) {
end = len(hosts)
}
batch := make([]string, end-start)
copy(batch, hosts[start:end])
batches = append(batches, batch)
}
return batches
}
// serialBatchSize normalises the play serial value to a usable batch size.
func serialBatchSize(serial any, hostCount int) int {
switch v := serial.(type) {
case int:
if v > 0 {
if v > hostCount {
return hostCount
}
return v
}
case int64:
if v > 0 {
if int(v) > hostCount {
return hostCount
}
return int(v)
}
case string:
if n, err := strconv.Atoi(corexTrimSpace(v)); err == nil && n > 0 {
if n > hostCount {
return hostCount
}
return n
}
}
return hostCount
}
// runRole executes a role on hosts.
func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef, play *Play) error {
func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
// Check when condition
if roleRef.When != nil {
if !e.evaluateWhen(roleRef.When, "", nil) {
@ -215,7 +273,12 @@ func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef
// Execute tasks
for _, task := range tasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
task := task
task.Tags = append(append([]string{}, roleRef.Tags...), task.Tags...)
if err := e.runTaskOnHosts(ctx, hosts, &task, play, failedHosts, totalHosts, maxFailPercent); err != nil {
if isHostFailureError(err) {
continue
}
// Restore vars
e.vars = oldVars
return err
@ -228,36 +291,113 @@ func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef
}
// runTaskOnHosts runs a task on all hosts.
func (e *Executor) runTaskOnHosts(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runTaskOnHosts(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
hosts = filterFailedHosts(hosts, failedHosts)
if len(hosts) == 0 {
return nil
}
// run_once executes the task only on the first host in the current batch.
if task.RunOnce && len(hosts) > 1 {
hosts = hosts[:1]
}
// Check tags
if !e.matchesTags(task.Tags) {
tags := append(append([]string{}, play.Tags...), task.Tags...)
if !e.matchesTags(tags) {
return nil
}
// Handle block tasks
if len(task.Block) > 0 {
return e.runBlock(ctx, hosts, task, play)
return e.runBlock(ctx, hosts, task, play, failedHosts, totalHosts, maxFailPercent)
}
// Handle include/import
if task.IncludeTasks != "" || task.ImportTasks != "" {
return e.runIncludeTasks(ctx, hosts, task, play)
return e.runIncludeTasks(ctx, hosts, task, play, failedHosts, totalHosts, maxFailPercent)
}
if task.IncludeRole != nil || task.ImportRole != nil {
return e.runIncludeRole(ctx, hosts, task, play)
return e.runIncludeRole(ctx, hosts, task, play, failedHosts, totalHosts, maxFailPercent)
}
var (
haveFailure bool
lastErr error
)
for _, host := range hosts {
if failedHosts[host] {
continue
}
if err := e.runTaskOnHost(ctx, host, task, play); err != nil {
if !task.IgnoreErrors {
return err
failedHosts[host] = true
taskErr := coreerr.E("Executor.runTaskOnHosts", sprintf("task failed on %s: %s", host, err.Error()), err)
if maxFailPercent > 0 && exceedsMaxFailPercent(len(failedHosts), totalHosts, maxFailPercent) {
return coreerr.E("Executor.runTaskOnHosts", sprintf("max fail percentage exceeded: %d%% failed hosts of %d", len(failedHosts), totalHosts), taskErr)
}
if maxFailPercent > 0 {
haveFailure = true
lastErr = taskErr
continue
}
return taskErr
}
}
}
if haveFailure {
return &hostFailureError{err: lastErr}
}
return nil
}
func filterFailedHosts(hosts []string, failedHosts map[string]bool) []string {
if len(hosts) == 0 || len(failedHosts) == 0 {
return hosts
}
filtered := make([]string, 0, len(hosts))
for _, host := range hosts {
if !failedHosts[host] {
filtered = append(filtered, host)
}
}
return filtered
}
func exceedsMaxFailPercent(failedHosts, totalHosts, maxFailPercent int) bool {
if maxFailPercent <= 0 || totalHosts <= 0 || failedHosts <= 0 {
return false
}
return failedHosts*100 > totalHosts*maxFailPercent
}
type hostFailureError struct {
err error
}
func (e *hostFailureError) Error() string {
if e == nil || e.err == nil {
return "host failure"
}
return e.err.Error()
}
func (e *hostFailureError) Unwrap() error {
if e == nil {
return nil
}
return e.err
}
func isHostFailureError(err error) bool {
var target *hostFailureError
return errors.As(err, &target)
}
// runTaskOnHost runs a task on a single host.
func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, play *Play) error {
start := time.Now()
@ -266,7 +406,7 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
e.OnTaskStart(host, task)
}
// Initialize host results
// Initialise host results
if e.results[host] == nil {
e.results[host] = make(map[string]*TaskResult)
}
@ -285,10 +425,20 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
}
}
// delegate_to changes the execution target but keeps the task context
// anchored to the original host for templating and registration.
executionHost := host
if task.Delegate != "" {
executionHost = e.templateString(task.Delegate, host, task)
if executionHost == "" {
executionHost = host
}
}
// Get SSH client
client, err := e.getClient(host, play)
client, err := e.getClient(executionHost, play)
if err != nil {
return coreerr.E("Executor.runTaskOnHost", sprintf("get client for %s", host), err)
return coreerr.E("Executor.runTaskOnHost", sprintf("get client for %s", executionHost), err)
}
// Handle loops
@ -296,11 +446,12 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
return e.runLoop(ctx, host, client, task, play)
}
// Execute the task
result, err := e.executeModule(ctx, host, client, task, play)
// Execute the task, retrying when an until condition is present.
result, err := e.runTaskWithUntil(ctx, host, client, task, play)
if err != nil {
result = &TaskResult{Failed: true, Msg: err.Error()}
}
result = e.applyTaskResultConditions(host, task, result)
result.Duration = time.Since(start)
// Store result
@ -324,6 +475,137 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
return nil
}
// runTaskWithUntil executes a task once or retries it until the until
// condition evaluates to true.
func (e *Executor) runTaskWithUntil(ctx context.Context, host string, client *SSHClient, task *Task, play *Play) (*TaskResult, error) {
if task.Until == "" {
return e.executeModule(ctx, host, client, task, play)
}
retries := task.Retries
if retries <= 0 {
retries = 3
}
delay := task.Delay
if delay <= 0 {
delay = 1
}
restoreAlias := task.Register != "result"
var (
previousAlias *TaskResult
hadAlias bool
)
if restoreAlias {
e.mu.RLock()
if hostResults, ok := e.results[host]; ok {
previousAlias, hadAlias = hostResults["result"]
}
e.mu.RUnlock()
defer func() {
e.mu.Lock()
defer e.mu.Unlock()
if e.results[host] == nil {
e.results[host] = make(map[string]*TaskResult)
}
if hadAlias {
e.results[host]["result"] = previousAlias
} else {
delete(e.results[host], "result")
}
}()
}
lastResult, lastErr := retryTask(ctx, retries, delay, func() (*TaskResult, error) {
result, err := e.executeModule(ctx, host, client, task, play)
if err != nil {
result = &TaskResult{Failed: true, Msg: err.Error()}
}
result = e.applyTaskResultConditions(host, task, result)
e.setTempResult(host, task.Register, result)
return result, nil
}, func(result *TaskResult) bool {
return e.evaluateWhen(task.Until, host, task)
})
if lastErr != nil {
return lastResult, lastErr
}
if lastResult == nil {
lastResult = &TaskResult{}
}
lastResult.Failed = true
if lastResult.Msg == "" {
lastResult.Msg = sprintf("until condition not met: %s", task.Until)
}
return lastResult, nil
}
// setTempResult exposes the latest task result for until evaluation without
// leaving stale state behind when a different register name is used.
func (e *Executor) setTempResult(host string, register string, result *TaskResult) {
e.mu.Lock()
defer e.mu.Unlock()
if e.results[host] == nil {
e.results[host] = make(map[string]*TaskResult)
}
e.results[host]["result"] = result
if register != "" {
e.results[host][register] = result
}
}
// retryTask runs fn until done returns true or the retry budget is exhausted.
func retryTask(ctx context.Context, retries, delay int, fn func() (*TaskResult, error), done func(*TaskResult) bool) (*TaskResult, error) {
if retries < 0 {
retries = 0
}
if delay < 0 {
delay = 0
}
var lastResult *TaskResult
var lastErr error
for attempt := 0; attempt <= retries; attempt++ {
lastResult, lastErr = fn()
if lastErr != nil {
return lastResult, lastErr
}
if lastResult == nil {
lastResult = &TaskResult{}
}
if done == nil || done(lastResult) {
return lastResult, nil
}
if attempt < retries && delay > 0 {
if err := sleepWithContext(ctx, time.Duration(delay)*time.Second); err != nil {
return lastResult, err
}
}
}
return lastResult, nil
}
// sleepWithContext pauses for the requested duration or stops early when the
// context is cancelled.
func sleepWithContext(ctx context.Context, d time.Duration) error {
timer := time.NewTimer(d)
defer timer.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
return nil
}
}
// runLoop handles task loops.
func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient, task *Task, play *Play) error {
items := e.resolveLoop(task.Loop, host)
@ -345,6 +627,10 @@ func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient,
savedVars[indexVar] = v
}
}
extendedLoop := task.LoopControl != nil && task.LoopControl.Extended
if v, ok := e.vars["ansible_loop"]; ok {
savedVars["ansible_loop"] = v
}
var results []TaskResult
for i, item := range items {
@ -353,16 +639,26 @@ func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient,
if indexVar != "" {
e.vars[indexVar] = i
}
if extendedLoop {
e.vars["ansible_loop"] = e.loopMetadata(item, i, len(items), task.LoopControl.Label, host, task)
} else {
delete(e.vars, "ansible_loop")
}
result, err := e.executeModule(ctx, host, client, task, play)
if err != nil {
result = &TaskResult{Failed: true, Msg: err.Error()}
}
result = e.applyTaskResultConditions(host, task, result)
results = append(results, *result)
if result.Failed && !task.IgnoreErrors {
break
}
if task.LoopControl != nil && task.LoopControl.Pause > 0 && i < len(items)-1 {
time.Sleep(time.Duration(task.LoopControl.Pause) * time.Second)
}
}
// Restore loop variables
@ -378,6 +674,11 @@ func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient,
delete(e.vars, indexVar)
}
}
if v, ok := savedVars["ansible_loop"]; ok {
e.vars["ansible_loop"] = v
} else {
delete(e.vars, "ansible_loop")
}
// Store combined result
if task.Register != "" {
@ -399,13 +700,32 @@ func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient,
return nil
}
// applyTaskResultConditions applies changed_when and failed_when overrides to a result.
func (e *Executor) applyTaskResultConditions(host string, task *Task, result *TaskResult) *TaskResult {
if result == nil {
result = &TaskResult{}
}
if task == nil {
return result
}
if task.ChangedWhen != nil {
result.Changed = e.evaluateWhen(task.ChangedWhen, host, task)
}
if task.FailedWhen != nil {
result.Failed = e.evaluateWhen(task.FailedWhen, host, task)
}
return result
}
// runBlock handles block/rescue/always.
func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
var blockErr error
// Try block
for _, t := range task.Block {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
blockErr = err
break
}
@ -414,7 +734,7 @@ func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, pla
// Run rescue if block failed
if blockErr != nil && len(task.Rescue) > 0 {
for _, t := range task.Rescue {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
// Rescue also failed
break
}
@ -423,7 +743,7 @@ func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, pla
// Always run always block
for _, t := range task.Always {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
if blockErr == nil {
blockErr = err
}
@ -438,14 +758,17 @@ func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, pla
}
// runIncludeTasks handles include_tasks/import_tasks.
func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
path := task.IncludeTasks
if path == "" {
path = task.ImportTasks
}
// Resolve path relative to playbook
// Resolve path relative to the playbook root when it is not absolute.
path = e.templateString(path, "", nil)
if path != "" && !pathIsAbs(path) {
path = joinPath(e.parser.basePath, path)
}
tasks, err := e.parser.ParseTasks(path)
if err != nil {
@ -453,7 +776,10 @@ func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Ta
}
for _, t := range tasks {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
if isHostFailureError(err) {
continue
}
return err
}
}
@ -462,7 +788,7 @@ func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Ta
}
// runIncludeRole handles include_role/import_role.
func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
var roleName, tasksFrom string
var roleVars map[string]any
@ -482,7 +808,7 @@ func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Tas
Vars: roleVars,
}
return e.runRole(ctx, hosts, roleRef, play)
return e.runRole(ctx, hosts, roleRef, play, failedHosts, totalHosts, maxFailPercent)
}
// getHosts returns hosts matching the pattern.
@ -584,7 +910,16 @@ func (e *Executor) getClient(host string, play *Play) (*SSHClient, error) {
// gatherFacts collects facts from a host.
func (e *Executor) gatherFacts(ctx context.Context, host string, play *Play) error {
if play.Connection == "local" || host == "localhost" {
client, err := e.getClient(host, play)
if err != nil {
return err
}
return e.populateFacts(ctx, host, play, client)
}
func (e *Executor) populateFacts(ctx context.Context, host string, play *Play, client commandRunner) error {
if (play != nil && play.Connection == "local") || host == "localhost" {
// Local facts
e.facts[host] = &Facts{
Hostname: "localhost",
@ -592,11 +927,6 @@ func (e *Executor) gatherFacts(ctx context.Context, host string, play *Play) err
return nil
}
client, err := e.getClient(host, play)
if err != nil {
return err
}
// Gather basic facts
facts := &Facts{}
@ -617,10 +947,16 @@ func (e *Executor) gatherFacts(ctx context.Context, host string, play *Play) err
if corexHasPrefix(line, "ID=") {
facts.Distribution = trimCutset(corexTrimPrefix(line, "ID="), "\"")
}
if corexHasPrefix(line, "ID_LIKE=") && facts.OS == "" {
facts.OS = titleFactFamily(trimCutset(corexTrimPrefix(line, "ID_LIKE="), "\""))
}
if corexHasPrefix(line, "VERSION_ID=") {
facts.Version = trimCutset(corexTrimPrefix(line, "VERSION_ID="), "\"")
}
}
if facts.OS == "" {
facts.OS = titleFactFamily(facts.Distribution)
}
// Architecture
stdout, _, _, _ = client.Run(ctx, "uname -m")
@ -630,6 +966,16 @@ func (e *Executor) gatherFacts(ctx context.Context, host string, play *Play) err
stdout, _, _, _ = client.Run(ctx, "uname -r")
facts.Kernel = corexTrimSpace(stdout)
// Memory, CPU count, and primary IPv4 address
stdout, _, _, _ = client.Run(ctx, "awk '/MemTotal:/ { print int($2 / 1024) }' /proc/meminfo")
facts.Memory = parseFactInt64(stdout)
stdout, _, _, _ = client.Run(ctx, "nproc --all 2>/dev/null || getconf _NPROCESSORS_ONLN")
facts.CPUs = int(parseFactInt64(stdout))
stdout, _, _, _ = client.Run(ctx, "hostname -I 2>/dev/null | awk '{ print $1 }'")
facts.IPv4 = firstNonEmptyField(stdout)
e.mu.Lock()
e.facts[host] = facts
e.mu.Unlock()
@ -669,6 +1015,60 @@ func normalizeConditions(when any) []string {
return nil
}
func parseFactInt64(stdout string) int64 {
fields := fields(stdout)
if len(fields) == 0 {
return 0
}
n, err := strconv.ParseInt(fields[0], 10, 64)
if err != nil {
return 0
}
return n
}
func firstNonEmptyField(stdout string) string {
for _, field := range fields(stdout) {
if field != "" {
return field
}
}
return ""
}
func titleFactFamily(value string) string {
value = corexTrimSpace(value)
if value == "" {
return ""
}
fields := fields(value)
if len(fields) == 0 {
return ""
}
family := lower(fields[0])
switch family {
case "debian", "ubuntu", "linuxmint":
return "Debian"
case "rhel", "redhat", "centos", "fedora", "rocky", "almalinux", "alma", "ol", "oracle":
return "RedHat"
case "sles", "suse", "opensuse":
return "Suse"
case "arch", "manjaro":
return "Archlinux"
case "alpine":
return "Alpine"
}
runes := []rune(family)
if len(runes) == 0 {
return ""
}
runes[0] = unicode.ToUpper(runes[0])
return string(runes)
}
// evalCondition evaluates a single condition.
func (e *Executor) evalCondition(cond string, host string) bool {
cond = corexTrimSpace(cond)
@ -763,6 +1163,112 @@ func (e *Executor) getRegisteredVar(host string, name string) *TaskResult {
return nil
}
func (e *Executor) lookupTemplateValue(name string, host string, task *Task) (any, bool) {
if val, ok := e.vars[name]; ok {
return val, true
}
if task != nil {
if val, ok := task.Vars[name]; ok {
return val, true
}
}
if e.inventory != nil {
hostVars := GetHostVars(e.inventory, host)
if val, ok := hostVars[name]; ok {
return val, true
}
}
switch name {
case "ansible_hostname":
if facts, ok := e.facts[host]; ok {
return facts.Hostname, true
}
case "ansible_fqdn":
if facts, ok := e.facts[host]; ok {
return facts.FQDN, true
}
case "ansible_distribution":
if facts, ok := e.facts[host]; ok {
return facts.Distribution, true
}
case "ansible_distribution_version":
if facts, ok := e.facts[host]; ok {
return facts.Version, true
}
case "ansible_os_family":
if facts, ok := e.facts[host]; ok {
return facts.OS, true
}
case "ansible_architecture":
if facts, ok := e.facts[host]; ok {
return facts.Architecture, true
}
case "ansible_kernel":
if facts, ok := e.facts[host]; ok {
return facts.Kernel, true
}
case "ansible_memtotal_mb":
if facts, ok := e.facts[host]; ok {
return facts.Memory, true
}
case "ansible_processor_vcpus":
if facts, ok := e.facts[host]; ok {
return facts.CPUs, true
}
case "ansible_default_ipv4_address":
if facts, ok := e.facts[host]; ok {
return facts.IPv4, true
}
}
return nil, false
}
func resolveDottedValue(value any, path string) (any, bool) {
current := value
for _, part := range split(path, ".") {
switch v := current.(type) {
case map[string]any:
next, ok := v[part]
if !ok {
return nil, false
}
current = next
case map[string]string:
next, ok := v[part]
if !ok {
return nil, false
}
current = next
default:
return nil, false
}
}
return current, true
}
func (e *Executor) loopMetadata(item any, index, length int, label string, host string, task *Task) map[string]any {
metadata := map[string]any{
"index": index + 1,
"index0": index,
"revindex": length - index,
"revindex0": length - index - 1,
"first": index == 0,
"last": index == length-1,
"length": length,
"item": item,
}
if label != "" {
metadata["label"] = e.templateString(label, host, task)
}
return metadata
}
// templateString applies Jinja2-like templating.
func (e *Executor) templateString(s string, host string, task *Task) string {
// Handle {{ var }} syntax
@ -805,46 +1311,19 @@ func (e *Executor) resolveExpr(expr string, host string, task *Task) string {
return sprintf("%t", result.Failed)
}
}
if root, ok := e.lookupTemplateValue(parts[0], host, task); ok {
if resolved, ok := resolveDottedValue(root, parts[1]); ok {
return sprintf("%v", resolved)
}
}
}
// Check vars
if val, ok := e.vars[expr]; ok {
// Check vars, task vars, and host vars
if val, ok := e.lookupTemplateValue(expr, host, task); ok {
return sprintf("%v", val)
}
// Check task vars
if task != nil {
if val, ok := task.Vars[expr]; ok {
return sprintf("%v", val)
}
}
// Check host vars
if e.inventory != nil {
hostVars := GetHostVars(e.inventory, host)
if val, ok := hostVars[expr]; ok {
return sprintf("%v", val)
}
}
// Check facts
if facts, ok := e.facts[host]; ok {
switch expr {
case "ansible_hostname":
return facts.Hostname
case "ansible_fqdn":
return facts.FQDN
case "ansible_distribution":
return facts.Distribution
case "ansible_distribution_version":
return facts.Version
case "ansible_architecture":
return facts.Architecture
case "ansible_kernel":
return facts.Kernel
}
}
return "{{ " + expr + " }}" // Return as-is if unresolved
}
@ -903,7 +1382,7 @@ func (e *Executor) handleLookup(expr string) string {
case "env":
return env(arg)
case "file":
if data, err := coreio.Local.Read(arg); err == nil {
if data, err := localFS.Read(arg); err == nil {
return data
}
}
@ -1000,7 +1479,7 @@ func (e *Executor) Close() {
//
// content, err := exec.TemplateFile("/workspace/templates/app.conf.j2", "web1", &Task{})
func (e *Executor) TemplateFile(src, host string, task *Task) (string, error) {
content, err := coreio.Local.Read(src)
content, err := localFS.Read(src)
if err != nil {
return "", err
}
@ -1033,10 +1512,14 @@ func (e *Executor) TemplateFile(src, host string, task *Task) (string, error) {
if facts, ok := e.facts[host]; ok {
context["ansible_hostname"] = facts.Hostname
context["ansible_fqdn"] = facts.FQDN
context["ansible_os_family"] = facts.OS
context["ansible_distribution"] = facts.Distribution
context["ansible_distribution_version"] = facts.Version
context["ansible_architecture"] = facts.Architecture
context["ansible_kernel"] = facts.Kernel
context["ansible_memtotal_mb"] = facts.Memory
context["ansible_processor_vcpus"] = facts.CPUs
context["ansible_default_ipv4_address"] = facts.IPv4
}
buf := newBuilder()

View file

@ -410,18 +410,26 @@ func TestExecutorExtra_ResolveExpr_Good_Facts(t *testing.T) {
e.facts["host1"] = &Facts{
Hostname: "web01",
FQDN: "web01.example.com",
OS: "Debian",
Distribution: "ubuntu",
Version: "22.04",
Architecture: "x86_64",
Kernel: "5.15.0",
Memory: 32768,
CPUs: 16,
IPv4: "10.0.0.10",
}
assert.Equal(t, "web01", e.resolveExpr("ansible_hostname", "host1", nil))
assert.Equal(t, "web01.example.com", e.resolveExpr("ansible_fqdn", "host1", nil))
assert.Equal(t, "Debian", e.resolveExpr("ansible_os_family", "host1", nil))
assert.Equal(t, "ubuntu", e.resolveExpr("ansible_distribution", "host1", nil))
assert.Equal(t, "22.04", e.resolveExpr("ansible_distribution_version", "host1", nil))
assert.Equal(t, "x86_64", e.resolveExpr("ansible_architecture", "host1", nil))
assert.Equal(t, "5.15.0", e.resolveExpr("ansible_kernel", "host1", nil))
assert.Equal(t, "32768", e.resolveExpr("ansible_memtotal_mb", "host1", nil))
assert.Equal(t, "16", e.resolveExpr("ansible_processor_vcpus", "host1", nil))
assert.Equal(t, "10.0.0.10", e.resolveExpr("ansible_default_ipv4_address", "host1", nil))
}
// --- applyFilter additional coverage ---
@ -459,3 +467,33 @@ func TestExecutorExtra_ResolveExpr_Good_WithFilter(t *testing.T) {
result := e.resolveExpr("raw_value | trim", "host1", nil)
assert.Equal(t, "trimmed", result)
}
func TestExecutorExtra_LoopMetadata_Good_ExtendedMetadata(t *testing.T) {
e := NewExecutor("/tmp")
e.vars["item"] = "alpha"
metadata := e.loopMetadata("alpha", 1, 3, "item {{ item }}", "host1", &Task{})
assert.Equal(t, 2, metadata["index"])
assert.Equal(t, 1, metadata["index0"])
assert.Equal(t, 2, metadata["revindex"])
assert.Equal(t, 1, metadata["revindex0"])
assert.False(t, metadata["first"].(bool))
assert.False(t, metadata["last"].(bool))
assert.Equal(t, 3, metadata["length"])
assert.Equal(t, "alpha", metadata["item"])
assert.Equal(t, "item alpha", metadata["label"])
}
func TestExecutorExtra_TemplateString_Good_DottedMapAccess(t *testing.T) {
e := NewExecutor("/tmp")
e.vars["ansible_loop"] = map[string]any{
"index0": 1,
"first": false,
"last": true,
}
got := e.templateString("{{ ansible_loop.index0 }} {{ ansible_loop.first }} {{ ansible_loop.last }}", "host1", nil)
assert.Equal(t, "1 false true", got)
}

View file

@ -1,9 +1,13 @@
package ansible
import (
"context"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// --- NewExecutor ---
@ -32,6 +36,33 @@ func TestExecutor_SetVar_Good(t *testing.T) {
assert.Equal(t, 42, e.vars["count"])
}
func TestExecutor_TaskEnvironment_Good_MergesAndTemplates(t *testing.T) {
e := NewExecutor("/tmp")
e.SetVar("tier", "blue")
e.vars["inventory_hostname"] = "web1"
play := &Play{
Environment: map[string]string{
"PLAY_ONLY": "{{ tier }}",
"SHARED": "play",
},
}
task := &Task{
Environment: map[string]string{
"TASK_ONLY": "on-{{ inventory_hostname }}",
"SHARED": "task",
},
}
env := e.taskEnvironment("web1", play, task)
assert.Equal(t, map[string]string{
"PLAY_ONLY": "blue",
"TASK_ONLY": "on-web1",
"SHARED": "task",
}, env)
}
// --- SetInventoryDirect ---
func TestExecutor_SetInventoryDirect_Good(t *testing.T) {
@ -48,6 +79,27 @@ func TestExecutor_SetInventoryDirect_Good(t *testing.T) {
assert.Equal(t, inv, e.inventory)
}
func TestExecutor_GetClient_Good_UsesInventoryBecomePassword(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {
AnsibleHost: "10.0.0.1",
AnsibleUser: "deploy",
AnsibleBecomePassword: "inventory-secret",
},
},
},
})
play := &Play{Become: true}
client, err := e.getClient("host1", play)
require.NoError(t, err)
require.NotNil(t, client)
assert.Equal(t, "inventory-secret", client.becomePass)
}
// --- getHosts ---
func TestExecutor_GetHosts_Good_WithInventory(t *testing.T) {
@ -98,6 +150,318 @@ func TestExecutor_GetHosts_Good_WithLimit(t *testing.T) {
assert.Contains(t, hosts, "host2")
}
func TestExecutor_RunPlay_Good_SerialBatchesHosts(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {},
"host2": {},
},
},
})
var gathered []string
e.OnTaskStart = func(host string, task *Task) {
gathered = append(gathered, host+":"+task.Name)
}
gatherFacts := false
play := &Play{
Name: "serial",
Hosts: "all",
GatherFacts: &gatherFacts,
Serial: "1",
Tasks: []Task{
{Name: "first", Module: "debug", Args: map[string]any{"msg": "one"}},
{Name: "second", Module: "debug", Args: map[string]any{"msg": "two"}},
},
}
require.NoError(t, e.runPlay(context.Background(), play))
assert.Equal(t, []string{
"host1:first",
"host1:second",
"host2:first",
"host2:second",
}, gathered)
}
func TestExecutor_RunPlay_Good_MaxFailPercentStopsAfterThreshold(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {Vars: map[string]any{"fail_first": true, "fail_second": false}},
"host2": {Vars: map[string]any{"fail_first": false, "fail_second": true}},
"host3": {Vars: map[string]any{"fail_first": false, "fail_second": false}},
},
},
})
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, host+":"+task.Name)
}
gatherFacts := false
play := &Play{
Name: "max fail",
Hosts: "all",
GatherFacts: &gatherFacts,
MaxFailPercent: 50,
Tasks: []Task{
{
Name: "first failure",
Module: "fail",
Args: map[string]any{"msg": "first"},
When: "{{ fail_first }}",
},
{
Name: "second failure",
Module: "fail",
Args: map[string]any{"msg": "second"},
When: "{{ fail_second }}",
},
{
Name: "final task",
Module: "debug",
Args: map[string]any{"msg": "ok"},
},
},
}
err := e.runPlay(context.Background(), play)
require.Error(t, err)
assert.Equal(t, []string{
"host1:first failure",
"host2:first failure",
"host3:first failure",
"host2:second failure",
}, executed)
assert.NotContains(t, executed, "host3:second failure")
assert.NotContains(t, executed, "host1:final task")
assert.NotContains(t, executed, "host2:final task")
assert.NotContains(t, executed, "host3:final task")
}
func TestExecutor_RunPlay_Good_RunOnceTaskOnlyRunsOnFirstHost(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {},
"host2": {},
},
},
})
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, host)
}
gatherFacts := false
play := &Play{
Name: "run once",
Hosts: "all",
GatherFacts: &gatherFacts,
Tasks: []Task{
{
Name: "single host",
Module: "debug",
Args: map[string]any{"msg": "ok"},
Register: "result",
RunOnce: true,
},
},
}
require.NoError(t, e.runPlay(context.Background(), play))
assert.Equal(t, []string{"host1"}, executed)
assert.NotNil(t, e.results["host1"]["result"])
_, ok := e.results["host2"]
assert.False(t, ok)
}
func TestExecutor_RunPlay_Good_IncludeTasksResolvesRelativeToPlaybookRoot(t *testing.T) {
dir := t.TempDir()
includedPath := joinPath(dir, "included.yml")
playbookPath := joinPath(dir, "playbook.yml")
require.NoError(t, os.WriteFile(includedPath, []byte(`
- name: included debug
debug:
msg: hello
`), 0o644))
require.NoError(t, os.WriteFile(playbookPath, []byte(`
- name: include relative path
hosts: localhost
gather_facts: false
tasks:
- name: load relative tasks
include_tasks: included.yml
`), 0o644))
e := NewExecutor(dir)
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, task.Name)
}
require.NoError(t, e.Run(context.Background(), playbookPath))
assert.Contains(t, executed, "included debug")
}
func TestExecutor_RunTaskOnHost_Good_ChangedWhenOverridesResult(t *testing.T) {
e := NewExecutor("/tmp")
task := &Task{
Name: "force changed",
Module: "debug",
Args: map[string]any{"msg": "ok"},
Register: "result",
ChangedWhen: "true",
}
play := &Play{}
require.NoError(t, e.runTaskOnHost(context.Background(), "localhost", task, play))
result := e.results["localhost"]["result"]
require.NotNil(t, result)
assert.True(t, result.Changed)
assert.False(t, result.Failed)
}
func TestExecutor_RunTaskOnHost_Good_FailedWhenOverridesResult(t *testing.T) {
e := NewExecutor("/tmp")
task := &Task{
Name: "force fail",
Module: "debug",
Args: map[string]any{"msg": "ok"},
Register: "result",
FailedWhen: "true",
}
play := &Play{}
err := e.runTaskOnHost(context.Background(), "localhost", task, play)
require.Error(t, err)
result := e.results["localhost"]["result"]
require.NotNil(t, result)
assert.True(t, result.Failed)
assert.False(t, result.Changed)
}
func TestExecutor_RunTaskOnHost_Good_DelegateToUsesDelegateHostClient(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"source": {},
},
},
})
task := &Task{
Name: "delegated debug",
Module: "debug",
Args: map[string]any{"msg": "ok"},
Delegate: "delegate-host",
}
play := &Play{}
require.NoError(t, e.runTaskOnHost(context.Background(), "source", task, play))
_, ok := e.clients["delegate-host"]
assert.True(t, ok)
_, ok = e.clients["source"]
assert.False(t, ok)
}
func TestExecutor_RunPlay_Good_PlayTagsApplyToUntaggedTasks(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {},
},
},
})
e.Tags = []string{"deploy"}
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, host+":"+task.Name)
}
gatherFacts := false
play := &Play{
Name: "tagged play",
Hosts: "all",
GatherFacts: &gatherFacts,
Tags: []string{"deploy"},
Tasks: []Task{
{Name: "untagged task", Module: "debug", Args: map[string]any{"msg": "ok"}},
},
}
require.NoError(t, e.runPlay(context.Background(), play))
assert.Equal(t, []string{"host1:untagged task"}, executed)
}
func TestExecutor_RunTaskOnHost_Good_LoopControlPause(t *testing.T) {
e := NewExecutor("/tmp")
task := &Task{
Name: "pause loop",
Module: "debug",
Args: map[string]any{"msg": "{{ item }}"},
Loop: []any{"one", "two"},
LoopControl: &LoopControl{
Pause: 1,
},
}
play := &Play{}
start := time.Now()
require.NoError(t, e.runTaskOnHost(context.Background(), "localhost", task, play))
assert.GreaterOrEqual(t, time.Since(start), 900*time.Millisecond)
}
func TestExecutor_SetTempResult_Good_ResultAliasSupportsUntil(t *testing.T) {
e := NewExecutor("/tmp")
e.setTempResult("host1", "", &TaskResult{Failed: true, RC: 1})
assert.False(t, e.evaluateWhen("result is success", "host1", &Task{}))
e.setTempResult("host1", "", &TaskResult{Failed: false, RC: 0})
assert.True(t, e.evaluateWhen("result is success", "host1", &Task{}))
}
func TestRetryTask_Good_RetriesAndWaits(t *testing.T) {
attempts := 0
start := time.Now()
result, err := retryTask(context.Background(), 1, 1, func() (*TaskResult, error) {
attempts++
if attempts == 1 {
return &TaskResult{Failed: true, RC: 1}, nil
}
return &TaskResult{Failed: false, RC: 0}, nil
}, func(result *TaskResult) bool {
return result.RC == 0
})
require.NoError(t, err)
assert.Equal(t, 2, attempts)
assert.NotNil(t, result)
assert.False(t, result.Failed)
assert.GreaterOrEqual(t, time.Since(start), 900*time.Millisecond)
}
// --- matchesTags ---
func TestExecutor_MatchesTags_Good_NoTagsFilter(t *testing.T) {

3
go.mod
View file

@ -4,7 +4,6 @@ go 1.26.0
require (
dappco.re/go/core v0.8.0-alpha.1
dappco.re/go/core/io v0.2.0
dappco.re/go/core/log v0.1.0
github.com/stretchr/testify v1.11.1
golang.org/x/crypto v0.49.0
@ -12,8 +11,8 @@ require (
)
require (
forge.lthn.ai/core/go-log v0.0.4 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/kr/text v0.2.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
golang.org/x/sys v0.42.0 // indirect
)

5
go.sum
View file

@ -1,11 +1,8 @@
dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk=
dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
dappco.re/go/core/io v0.2.0 h1:zuudgIiTsQQ5ipVt97saWdGLROovbEB/zdVyy9/l+I4=
dappco.re/go/core/io v0.2.0/go.mod h1:1QnQV6X9LNgFKfm8SkOtR9LLaj3bDcsOIeJOOyjbL5E=
dappco.re/go/core/log v0.1.0 h1:pa71Vq2TD2aoEUQWFKwNcaJ3GBY8HbaNGqtE688Unyc=
dappco.re/go/core/log v0.1.0/go.mod h1:Nkqb8gsXhZAO8VLpx7B8i1iAmohhzqA20b9Zr8VUcJs=
forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0=
forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=

View file

@ -4,7 +4,9 @@ import (
"context"
"io"
"io/fs"
"maps"
"regexp"
"slices"
"strconv"
"sync"
@ -36,6 +38,9 @@ type MockSSHClient struct {
becomeUser string
becomePass string
// Remote environment overrides
environment map[string]string
// Execution log: every command that was executed
executed []executedCommand
@ -73,8 +78,9 @@ type uploadRecord struct {
// mock.expectCommand("echo ok", "ok", "", 0)
func NewMockSSHClient() *MockSSHClient {
return &MockSSHClient{
files: make(map[string][]byte),
stats: make(map[string]map[string]any),
files: make(map[string][]byte),
stats: make(map[string]map[string]any),
environment: make(map[string]string),
}
}
@ -133,6 +139,7 @@ func (m *MockSSHClient) Run(_ context.Context, cmd string) (string, string, int,
m.mu.Lock()
defer m.mu.Unlock()
cmd = m.commandWithEnvironment(cmd)
m.executed = append(m.executed, executedCommand{Method: "Run", Cmd: cmd})
// Search expectations in reverse order (last registered wins)
@ -156,6 +163,7 @@ func (m *MockSSHClient) RunScript(_ context.Context, script string) (string, str
m.mu.Lock()
defer m.mu.Unlock()
script = m.commandWithEnvironment(script)
m.executed = append(m.executed, executedCommand{Method: "RunScript", Cmd: script})
// Match against the script content
@ -260,6 +268,60 @@ func (m *MockSSHClient) SetBecome(become bool, user, password string) {
}
}
// Environment returns a copy of the configured remote environment overrides.
func (m *MockSSHClient) Environment() map[string]string {
m.mu.Lock()
defer m.mu.Unlock()
if len(m.environment) == 0 {
return nil
}
env := make(map[string]string, len(m.environment))
for k, v := range m.environment {
env[k] = v
}
return env
}
// SetEnvironment replaces the configured remote environment overrides.
func (m *MockSSHClient) SetEnvironment(environment map[string]string) {
m.mu.Lock()
defer m.mu.Unlock()
if len(environment) == 0 {
m.environment = make(map[string]string)
return
}
m.environment = make(map[string]string, len(environment))
for k, v := range environment {
m.environment[k] = v
}
}
func (m *MockSSHClient) commandWithEnvironment(cmd string) string {
if len(m.environment) == 0 {
return cmd
}
keys := sortedStringKeys(m.environment)
buf := newBuilder()
for _, key := range keys {
buf.WriteString("export ")
buf.WriteString(key)
buf.WriteString("=")
buf.WriteString(shellQuote(m.environment[key]))
buf.WriteString("; ")
}
buf.WriteString(cmd)
return buf.String()
}
func sortedStringKeys(m map[string]string) []string {
return slices.Sorted(maps.Keys(m))
}
// Close is a no-op for the mock.
//
// Example:
@ -362,6 +424,7 @@ func (m *MockSSHClient) reset() {
defer m.mu.Unlock()
m.executed = nil
m.uploads = nil
m.environment = make(map[string]string)
}
// --- Test helper: create executor with mock client ---
@ -397,8 +460,15 @@ func newTestExecutorWithMock(host string) (*Executor, *MockSSHClient) {
// and goes straight to module execution.
func executeModuleWithMock(e *Executor, mock *MockSSHClient, host string, task *Task) (*TaskResult, error) {
module := NormalizeModule(task.Module)
e.vars["inventory_hostname"] = host
args := e.templateArgs(task.Args, host, task)
if environment := e.taskEnvironment(host, nil, task); len(environment) > 0 {
oldEnvironment := mock.Environment()
mock.SetEnvironment(environment)
defer mock.SetEnvironment(oldEnvironment)
}
// Dispatch directly to module handlers using the mock
switch module {
case "ansible.builtin.shell":
@ -449,6 +519,10 @@ func executeModuleWithMock(e *Executor, mock *MockSSHClient, host string, task *
case "ansible.builtin.cron":
return moduleCronWithClient(e, mock, args)
// Inventory mutation
case "ansible.builtin.add_host":
return e.moduleAddHost(args)
// SSH keys
case "ansible.posix.authorized_key", "ansible.builtin.authorized_key":
return moduleAuthorizedKeyWithClient(e, mock, args)
@ -458,6 +532,8 @@ func executeModuleWithMock(e *Executor, mock *MockSSHClient, host string, task *
return moduleGitWithClient(e, mock, args)
// Archive
case "ansible.builtin.archive":
return moduleArchiveWithClient(e, mock, args)
case "ansible.builtin.unarchive":
return moduleUnarchiveWithClient(e, mock, args)
@ -1250,6 +1326,65 @@ func moduleGitWithClient(_ *Executor, client sshFileRunner, args map[string]any)
return &TaskResult{Changed: true}, nil
}
// --- Archive module shim ---
func moduleArchiveWithClient(_ *Executor, client sshRunner, args map[string]any) (*TaskResult, error) {
src := getStringArg(args, "src", "")
if src == "" {
src = getStringArg(args, "path", "")
}
dest := getStringArg(args, "dest", "")
if src == "" || dest == "" {
return nil, mockError("moduleArchiveWithClient", "archive: src and dest required")
}
// Create the destination directory first so the archive command can write the file.
_, _, _, _ = client.Run(context.Background(), sprintf("mkdir -p %q", pathDir(dest)))
format := lower(getStringArg(args, "format", ""))
if format == "" {
switch {
case hasSuffix(dest, ".tar.gz") || hasSuffix(dest, ".tgz"):
format = "gz"
case hasSuffix(dest, ".tar.xz"):
format = "xz"
case hasSuffix(dest, ".tar.bz2"):
format = "bz2"
case hasSuffix(dest, ".tar"):
format = "tar"
case hasSuffix(dest, ".zip"):
format = "zip"
default:
format = "gz"
}
}
srcDir := pathDir(src)
srcBase := pathBase(src)
var cmd string
switch format {
case "zip":
cmd = sprintf("cd %q && zip -r %q %q", srcDir, dest, srcBase)
case "xz":
cmd = sprintf("tar -cJf %q -C %q %q", dest, srcDir, srcBase)
case "bz2":
cmd = sprintf("tar -cjf %q -C %q %q", dest, srcDir, srcBase)
case "tar":
cmd = sprintf("tar -cf %q -C %q %q", dest, srcDir, srcBase)
default:
cmd = sprintf("tar -czf %q -C %q %q", dest, srcDir, srcBase)
}
stdout, stderr, rc, err := client.Run(context.Background(), cmd)
if err != nil || rc != 0 {
return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil
}
return &TaskResult{Changed: true}, nil
}
// --- Unarchive module shim ---
func moduleUnarchiveWithClient(_ *Executor, client sshFileRunner, args map[string]any) (*TaskResult, error) {

View file

@ -6,7 +6,6 @@ import (
"io/fs"
"strconv"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
)
@ -14,18 +13,31 @@ import (
func (e *Executor) executeModule(ctx context.Context, host string, client *SSHClient, task *Task, play *Play) (*TaskResult, error) {
module := NormalizeModule(task.Module)
// Apply task-level become
if task.Become != nil && *task.Become {
// Save old state to restore
// Apply task-level become override.
if task.Become != nil {
// Save old state to restore.
oldBecome := client.become
oldUser := client.becomeUser
oldPass := client.becomePass
client.SetBecome(true, task.BecomeUser, "")
if *task.Become {
client.SetBecome(true, task.BecomeUser, "")
} else {
client.SetBecome(false, "", "")
}
defer client.SetBecome(oldBecome, oldUser, oldPass)
}
e.vars["inventory_hostname"] = host
// Apply play/task environment overrides for the duration of the module run.
if environment := e.taskEnvironment(host, play, task); len(environment) > 0 {
oldEnvironment := client.Environment()
client.SetEnvironment(environment)
defer client.SetEnvironment(oldEnvironment)
}
// Template the args
args := e.templateArgs(task.Args, host, task)
@ -103,6 +115,8 @@ func (e *Executor) executeModule(ctx context.Context, host string, client *SSHCl
return e.moduleGit(ctx, client, args)
case "ansible.builtin.unarchive":
return e.moduleUnarchive(ctx, client, args)
case "ansible.builtin.archive":
return e.moduleArchive(ctx, client, args)
// Additional modules
case "ansible.builtin.hostname":
@ -115,6 +129,8 @@ func (e *Executor) executeModule(ctx context.Context, host string, client *SSHCl
return e.moduleBlockinfile(ctx, client, args)
case "ansible.builtin.include_vars":
return e.moduleIncludeVars(args)
case "ansible.builtin.add_host":
return e.moduleAddHost(args)
case "ansible.builtin.meta":
return e.moduleMeta(args)
case "ansible.builtin.setup":
@ -139,6 +155,31 @@ func (e *Executor) executeModule(ctx context.Context, host string, client *SSHCl
}
}
func (e *Executor) taskEnvironment(host string, play *Play, task *Task) map[string]string {
var hasEnvironment bool
environment := make(map[string]string)
if play != nil {
for key, value := range play.Environment {
environment[key] = e.templateString(value, host, task)
hasEnvironment = true
}
}
if task != nil {
for key, value := range task.Environment {
environment[key] = e.templateString(value, host, task)
hasEnvironment = true
}
}
if !hasEnvironment {
return nil
}
return environment
}
// templateArgs templates all string values in args.
func (e *Executor) templateArgs(args map[string]any, host string, task *Task) map[string]any {
// Set inventory_hostname for templating
@ -254,7 +295,7 @@ func (e *Executor) moduleScript(ctx context.Context, client *SSHClient, args map
}
// Read local script
data, err := coreio.Local.Read(script)
data, err := localFS.Read(script)
if err != nil {
return nil, coreerr.E("Executor.moduleScript", "read script", err)
}
@ -285,7 +326,7 @@ func (e *Executor) moduleCopy(ctx context.Context, client *SSHClient, args map[s
var err error
if src := getStringArg(args, "src", ""); src != "" {
content, err = coreio.Local.Read(src)
content, err = localFS.Read(src)
if err != nil {
return nil, coreerr.E("Executor.moduleCopy", "read src", err)
}
@ -509,11 +550,11 @@ func (e *Executor) moduleFetch(ctx context.Context, client *SSHClient, args map[
}
// Create dest directory
if err := coreio.Local.EnsureDir(pathDir(dest)); err != nil {
if err := localFS.EnsureDir(pathDir(dest)); err != nil {
return nil, err
}
if err := coreio.Local.Write(dest, string(content)); err != nil {
if err := localFS.Write(dest, string(content)); err != nil {
return nil, err
}
@ -1051,7 +1092,7 @@ func (e *Executor) moduleUnarchive(ctx context.Context, client *SSHClient, args
var cmd string
if !remote {
// Upload local file first
data, err := coreio.Local.Read(src)
data, err := localFS.Read(src)
if err != nil {
return nil, coreerr.E("Executor.moduleUnarchive", "read src", err)
}
@ -1087,6 +1128,63 @@ func (e *Executor) moduleUnarchive(ctx context.Context, client *SSHClient, args
return &TaskResult{Changed: true}, nil
}
func (e *Executor) moduleArchive(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) {
src := getStringArg(args, "src", "")
if src == "" {
src = getStringArg(args, "path", "")
}
dest := getStringArg(args, "dest", "")
if src == "" || dest == "" {
return nil, coreerr.E("Executor.moduleArchive", "src and dest required", nil)
}
// Create the destination directory first so the archive command can write the file.
_, _, _, _ = client.Run(ctx, sprintf("mkdir -p %q", pathDir(dest)))
format := lower(getStringArg(args, "format", ""))
if format == "" {
switch {
case hasSuffix(dest, ".tar.gz") || hasSuffix(dest, ".tgz"):
format = "gz"
case hasSuffix(dest, ".tar.xz"):
format = "xz"
case hasSuffix(dest, ".tar.bz2"):
format = "bz2"
case hasSuffix(dest, ".tar"):
format = "tar"
case hasSuffix(dest, ".zip"):
format = "zip"
default:
format = "gz"
}
}
srcDir := pathDir(src)
srcBase := pathBase(src)
var cmd string
switch format {
case "zip":
cmd = sprintf("cd %q && zip -r %q %q", srcDir, dest, srcBase)
case "xz":
cmd = sprintf("tar -cJf %q -C %q %q", dest, srcDir, srcBase)
case "bz2":
cmd = sprintf("tar -cjf %q -C %q %q", dest, srcDir, srcBase)
case "tar":
cmd = sprintf("tar -cf %q -C %q %q", dest, srcDir, srcBase)
default:
cmd = sprintf("tar -czf %q -C %q %q", dest, srcDir, srcBase)
}
stdout, stderr, rc, err := client.Run(ctx, cmd)
if err != nil || rc != 0 {
return &TaskResult{Failed: true, Msg: stderr, Stdout: stdout, RC: rc}, nil
}
return &TaskResult{Changed: true}, nil
}
// --- Helpers ---
func getStringArg(args map[string]any, key, def string) string {
@ -1112,6 +1210,34 @@ func getBoolArg(args map[string]any, key string, def bool) bool {
return def
}
func normalizeGroups(v any) []string {
switch val := v.(type) {
case string:
if val == "" {
return nil
}
parts := split(val, ",")
groups := make([]string, 0, len(parts))
for _, part := range parts {
if trimmed := corexTrimSpace(part); trimmed != "" {
groups = append(groups, trimmed)
}
}
return groups
case []string:
return val
case []any:
groups := make([]string, 0, len(val))
for _, item := range val {
if s, ok := item.(string); ok && corexTrimSpace(s) != "" {
groups = append(groups, corexTrimSpace(s))
}
}
return groups
}
return nil
}
// --- Additional Modules ---
func (e *Executor) moduleHostname(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) {
@ -1271,6 +1397,83 @@ func (e *Executor) moduleIncludeVars(args map[string]any) (*TaskResult, error) {
return &TaskResult{Changed: false}, nil
}
func (e *Executor) moduleAddHost(args map[string]any) (*TaskResult, error) {
name := getStringArg(args, "name", "")
if name == "" {
name = getStringArg(args, "host", "")
}
if name == "" {
return nil, coreerr.E("Executor.moduleAddHost", "name required", nil)
}
if e.inventory == nil {
e.inventory = &Inventory{}
}
if e.inventory.All == nil {
e.inventory.All = &InventoryGroup{}
}
if e.inventory.All.Hosts == nil {
e.inventory.All.Hosts = make(map[string]*Host)
}
if e.inventory.All.Children == nil {
e.inventory.All.Children = make(map[string]*InventoryGroup)
}
host := e.inventory.All.Hosts[name]
if host == nil {
host = &Host{}
e.inventory.All.Hosts[name] = host
}
if v, ok := args["ansible_host"].(string); ok && v != "" {
host.AnsibleHost = v
}
if v, ok := args["ansible_port"].(int); ok && v > 0 {
host.AnsiblePort = v
}
if v, ok := args["ansible_user"].(string); ok && v != "" {
host.AnsibleUser = v
}
if v, ok := args["ansible_password"].(string); ok && v != "" {
host.AnsiblePassword = v
}
if v, ok := args["ansible_ssh_private_key_file"].(string); ok && v != "" {
host.AnsibleSSHPrivateKeyFile = v
}
if v, ok := args["ansible_connection"].(string); ok && v != "" {
host.AnsibleConnection = v
}
if v, ok := args["ansible_become_password"].(string); ok && v != "" {
host.AnsibleBecomePassword = v
}
if host.Vars == nil {
host.Vars = make(map[string]any)
}
if inlineVars, ok := args["vars"].(map[string]any); ok {
for k, v := range inlineVars {
host.Vars[k] = v
}
}
for _, groupName := range normalizeGroups(args["groups"]) {
if groupName == "" {
continue
}
group := e.inventory.All.Children[groupName]
if group == nil {
group = &InventoryGroup{}
e.inventory.All.Children[groupName] = group
}
if group.Hosts == nil {
group.Hosts = make(map[string]*Host)
}
group.Hosts[name] = host
}
return &TaskResult{Changed: true, Msg: "host added: " + name}, nil
}
func (e *Executor) moduleMeta(args map[string]any) (*TaskResult, error) {
// meta module controls play execution
// Most actions are no-ops for us

View file

@ -702,6 +702,45 @@ func TestModulesAdv_ModuleUnarchive_Bad_LocalFileNotFound(t *testing.T) {
assert.Contains(t, err.Error(), "read src")
}
// --- archive module ---
func TestModulesAdv_ModuleArchive_Good_CreateTarGz(t *testing.T) {
e, mock := newTestExecutorWithMock("host1")
mock.expectCommand(`mkdir -p`, "", "", 0)
mock.expectCommand(`tar -czf`, "", "", 0)
result, err := moduleArchiveWithClient(e, mock, map[string]any{
"src": "/opt/app",
"dest": "/tmp/app.tar.gz",
})
require.NoError(t, err)
assert.True(t, result.Changed)
assert.False(t, result.Failed)
assert.True(t, mock.containsSubstring("tar -czf"))
assert.True(t, mock.containsSubstring("/tmp/app.tar.gz"))
assert.True(t, mock.containsSubstring(`-C "/opt"`))
}
func TestModulesAdv_ModuleArchive_Good_CreateZip(t *testing.T) {
e, mock := newTestExecutorWithMock("host1")
mock.expectCommand(`mkdir -p`, "", "", 0)
mock.expectCommand(`zip -r`, "", "", 0)
result, err := moduleArchiveWithClient(e, mock, map[string]any{
"path": "/srv/releases/app",
"dest": "/tmp/app.zip",
"format": "zip",
})
require.NoError(t, err)
assert.True(t, result.Changed)
assert.False(t, result.Failed)
assert.True(t, mock.containsSubstring("zip -r"))
assert.True(t, mock.containsSubstring("/tmp/app.zip"))
assert.True(t, mock.containsSubstring(`cd "/srv/releases"`))
}
// --- uri module ---
func TestModulesAdv_ModuleURI_Good_GetRequestDefault(t *testing.T) {
@ -1016,6 +1055,54 @@ func TestModulesAdv_ModuleDockerCompose_Good_DefaultStateIsPresent(t *testing.T)
assert.True(t, mock.hasExecuted(`docker compose up -d`))
}
// --- add_host module ---
func TestModulesAdv_ModuleAddHost_Good_AddsHostAndGroups(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{},
Children: map[string]*InventoryGroup{},
Vars: map[string]any{},
},
})
result, err := e.moduleAddHost(map[string]any{
"name": "web2",
"ansible_host": "10.0.0.2",
"ansible_user": "deploy",
"ansible_connection": "ssh",
"ansible_become_password": "sudo-secret",
"groups": []any{"webservers", "frontend"},
"vars": map[string]any{"role": "app"},
"ansible_ssh_private_key_file": "~/.ssh/id_ed25519",
"ansible_password": "ssh-secret",
})
require.NoError(t, err)
assert.True(t, result.Changed)
assert.Equal(t, "host added: web2", result.Msg)
host := e.inventory.All.Hosts["web2"]
require.NotNil(t, host)
assert.Equal(t, "10.0.0.2", host.AnsibleHost)
assert.Equal(t, "deploy", host.AnsibleUser)
assert.Equal(t, "ssh", host.AnsibleConnection)
assert.Equal(t, "sudo-secret", host.AnsibleBecomePassword)
assert.Equal(t, "~/.ssh/id_ed25519", host.AnsibleSSHPrivateKeyFile)
assert.Equal(t, "ssh-secret", host.AnsiblePassword)
assert.Equal(t, "app", host.Vars["role"])
require.NotNil(t, e.inventory.All.Children["webservers"])
require.NotNil(t, e.inventory.All.Children["frontend"])
assert.Contains(t, e.inventory.All.Children["webservers"].Hosts, "web2")
assert.Contains(t, e.inventory.All.Children["frontend"].Hosts, "web2")
assert.Equal(t, host, e.inventory.All.Children["webservers"].Hosts["web2"])
assert.Equal(t, host, e.inventory.All.Children["frontend"].Hosts["web2"])
assert.Equal(t, []string{"web2"}, GetHosts(e.inventory, "webservers"))
assert.Equal(t, "app", GetHostVars(e.inventory, "web2")["role"])
}
// --- Cross-module dispatch tests for advanced modules ---
func TestModulesAdv_ExecuteModuleWithMock_Good_DispatchUser(t *testing.T) {

View file

@ -1,6 +1,7 @@
package ansible
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
@ -812,51 +813,35 @@ func TestModulesInfra_Facts_Good_UbuntuParsing(t *testing.T) {
// Mock os-release output for Ubuntu
mock.expectCommand(`hostname -f`, "web1.example.com\n", "", 0)
mock.expectCommand(`hostname -s`, "web1\n", "", 0)
mock.expectCommand(`cat /etc/os-release`, "ID=ubuntu\nVERSION_ID=\"24.04\"\n", "", 0)
mock.expectCommand(`cat /etc/os-release`, "ID=ubuntu\nVERSION_ID=\"24.04\"\nID_LIKE=debian\n", "", 0)
mock.expectCommand(`uname -m`, "x86_64\n", "", 0)
mock.expectCommand(`uname -r`, "6.5.0-44-generic\n", "", 0)
mock.expectCommand(`MemTotal`, "16384\n", "", 0)
mock.expectCommand(`nproc`, "8\n", "", 0)
mock.expectCommand(`hostname -I`, "10.0.0.1 10.0.0.2\n", "", 0)
// Simulate fact gathering by directly populating facts
// using the same parsing logic as gatherFacts
facts := &Facts{}
stdout, _, _, _ := mock.Run(nil, "hostname -f 2>/dev/null || hostname")
facts.FQDN = trimFactSpace(stdout)
stdout, _, _, _ = mock.Run(nil, "hostname -s 2>/dev/null || hostname")
facts.Hostname = trimFactSpace(stdout)
stdout, _, _, _ = mock.Run(nil, "cat /etc/os-release 2>/dev/null | grep -E '^(ID|VERSION_ID)=' | head -2")
for _, line := range splitLines(stdout) {
if hasPrefix(line, "ID=") {
facts.Distribution = trimQuotes(trimPrefix(line, "ID="))
}
if hasPrefix(line, "VERSION_ID=") {
facts.Version = trimQuotes(trimPrefix(line, "VERSION_ID="))
}
}
stdout, _, _, _ = mock.Run(nil, "uname -m")
facts.Architecture = trimFactSpace(stdout)
stdout, _, _, _ = mock.Run(nil, "uname -r")
facts.Kernel = trimFactSpace(stdout)
e.facts["web1"] = facts
require.NoError(t, e.populateFacts(context.Background(), "web1", &Play{}, mock))
facts := e.facts["web1"]
require.NotNil(t, facts)
assert.Equal(t, "web1.example.com", facts.FQDN)
assert.Equal(t, "web1", facts.Hostname)
assert.Equal(t, "Debian", facts.OS)
assert.Equal(t, "ubuntu", facts.Distribution)
assert.Equal(t, "24.04", facts.Version)
assert.Equal(t, "x86_64", facts.Architecture)
assert.Equal(t, "6.5.0-44-generic", facts.Kernel)
assert.Equal(t, int64(16384), facts.Memory)
assert.Equal(t, 8, facts.CPUs)
assert.Equal(t, "10.0.0.1", facts.IPv4)
// Now verify template resolution with these facts
result := e.templateString("{{ ansible_hostname }}", "web1", nil)
assert.Equal(t, "web1", result)
result = e.templateString("{{ ansible_distribution }}", "web1", nil)
assert.Equal(t, "ubuntu", result)
assert.Equal(t, "web1", e.templateString("{{ ansible_hostname }}", "web1", nil))
assert.Equal(t, "ubuntu", e.templateString("{{ ansible_distribution }}", "web1", nil))
assert.Equal(t, "Debian", e.templateString("{{ ansible_os_family }}", "web1", nil))
assert.Equal(t, "16384", e.templateString("{{ ansible_memtotal_mb }}", "web1", nil))
assert.Equal(t, "8", e.templateString("{{ ansible_processor_vcpus }}", "web1", nil))
assert.Equal(t, "10.0.0.1", e.templateString("{{ ansible_default_ipv4_address }}", "web1", nil))
}
func TestModulesInfra_Facts_Good_CentOSParsing(t *testing.T) {

View file

@ -5,7 +5,6 @@ import (
"maps"
"slices"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
"gopkg.in/yaml.v3"
)
@ -38,7 +37,7 @@ func NewParser(basePath string) *Parser {
//
// plays, err := parser.ParsePlaybook("/workspace/playbooks/site.yml")
func (p *Parser) ParsePlaybook(path string) ([]Play, error) {
data, err := coreio.Local.Read(path)
data, err := localFS.Read(path)
if err != nil {
return nil, coreerr.E("Parser.ParsePlaybook", "read playbook", err)
}
@ -83,7 +82,7 @@ func (p *Parser) ParsePlaybookIter(path string) (iter.Seq[Play], error) {
//
// inv, err := parser.ParseInventory("/workspace/inventory.yml")
func (p *Parser) ParseInventory(path string) (*Inventory, error) {
data, err := coreio.Local.Read(path)
data, err := localFS.Read(path)
if err != nil {
return nil, coreerr.E("Parser.ParseInventory", "read inventory", err)
}
@ -102,7 +101,7 @@ func (p *Parser) ParseInventory(path string) (*Inventory, error) {
//
// tasks, err := parser.ParseTasks("/workspace/roles/web/tasks/main.yml")
func (p *Parser) ParseTasks(path string) ([]Task, error) {
data, err := coreio.Local.Read(path)
data, err := localFS.Read(path)
if err != nil {
return nil, coreerr.E("Parser.ParseTasks", "read tasks", err)
}
@ -168,7 +167,7 @@ func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
for _, sp := range searchPaths {
// Clean the path to resolve .. segments
sp = cleanPath(sp)
if coreio.Local.Exists(sp) {
if localFS.Exists(sp) {
tasksPath = sp
break
}
@ -180,7 +179,7 @@ func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
// Load role defaults
defaultsPath := joinPath(pathDir(pathDir(tasksPath)), "defaults", "main.yml")
if data, err := coreio.Local.Read(defaultsPath); err == nil {
if data, err := localFS.Read(defaultsPath); err == nil {
var defaults map[string]any
if yaml.Unmarshal([]byte(data), &defaults) == nil {
for k, v := range defaults {
@ -193,7 +192,7 @@ func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
// Load role vars
varsPath := joinPath(pathDir(pathDir(tasksPath)), "vars", "main.yml")
if data, err := coreio.Local.Read(varsPath); err == nil {
if data, err := localFS.Read(varsPath); err == nil {
var roleVars map[string]any
if yaml.Unmarshal([]byte(data), &roleVars) == nil {
for k, v := range roleVars {
@ -288,7 +287,7 @@ func (t *Task) UnmarshalYAML(node *yaml.Node) error {
*t = Task(raw)
t.raw = m
// Find the module key
// Find the module key in YAML order so the first recognised module wins.
knownKeys := map[string]bool{
"name": true, "register": true, "when": true, "loop": true,
"loop_control": true, "vars": true, "environment": true,
@ -302,28 +301,43 @@ func (t *Task) UnmarshalYAML(node *yaml.Node) error {
"with_items": true, "with_dict": true, "with_file": true,
}
for key, val := range m {
if knownKeys[key] {
continue
}
// Check if this is a module
if isModule(key) {
t.Module = key
t.Args = make(map[string]any)
switch v := val.(type) {
case string:
// Free-form args (e.g., shell: echo hello)
t.Args["_raw_params"] = v
case map[string]any:
t.Args = v
case nil:
// Module with no args
default:
t.Args["_raw_params"] = v
if node.Kind == yaml.MappingNode {
for i := 0; i+1 < len(node.Content); i += 2 {
keyNode := node.Content[i]
valueNode := node.Content[i+1]
key := keyNode.Value
if knownKeys[key] {
continue
}
// Check if this is a module.
if isModule(key) {
t.Module = key
t.Args = make(map[string]any)
switch valueNode.Kind {
case yaml.MappingNode:
var args map[string]any
if err := valueNode.Decode(&args); err == nil {
t.Args = args
}
case yaml.SequenceNode:
var args any
if err := valueNode.Decode(&args); err == nil {
t.Args["_raw_params"] = args
}
case yaml.ScalarNode:
if valueNode.Tag != "!!null" {
t.Args["_raw_params"] = valueNode.Value
}
default:
var args any
if err := valueNode.Decode(&args); err == nil && args != nil {
t.Args["_raw_params"] = args
}
}
break
}
break
}
}
@ -352,7 +366,7 @@ func isModule(key string) bool {
return contains(key, ".")
}
// NormalizeModule normalizes a module name to its canonical form.
// NormalizeModule normalises a module name to its canonical form.
//
// Example:
//
@ -416,11 +430,11 @@ func getAllHosts(group *InventoryGroup) []string {
}
var hosts []string
for name := range group.Hosts {
for _, name := range slices.Sorted(maps.Keys(group.Hosts)) {
hosts = append(hosts, name)
}
for _, child := range group.Children {
hosts = append(hosts, getAllHosts(child)...)
for _, name := range slices.Sorted(maps.Keys(group.Children)) {
hosts = append(hosts, getAllHosts(group.Children[name])...)
}
return hosts
}
@ -541,6 +555,9 @@ func collectHostVars(group *InventoryGroup, hostname string, vars map[string]any
if host.AnsibleConnection != "" {
vars["ansible_connection"] = host.AnsibleConnection
}
if host.AnsibleBecomePassword != "" {
vars["ansible_become_password"] = host.AnsibleBecomePassword
}
for k, v := range host.Vars {
vars[k] = v
}

View file

@ -673,9 +673,12 @@ func TestParser_GetHostVars_Good_DirectHost(t *testing.T) {
Vars: map[string]any{"global_var": "global"},
Hosts: map[string]*Host{
"myhost": {
AnsibleHost: "10.0.0.1",
AnsiblePort: 2222,
AnsibleUser: "deploy",
AnsibleHost: "10.0.0.1",
AnsiblePort: 2222,
AnsibleUser: "deploy",
AnsibleBecomePassword: "sudo-secret",
AnsibleSSHPrivateKeyFile: "/tmp/id_rsa",
AnsibleConnection: "ssh",
},
},
},
@ -685,6 +688,9 @@ func TestParser_GetHostVars_Good_DirectHost(t *testing.T) {
assert.Equal(t, "10.0.0.1", vars["ansible_host"])
assert.Equal(t, 2222, vars["ansible_port"])
assert.Equal(t, "deploy", vars["ansible_user"])
assert.Equal(t, "sudo-secret", vars["ansible_become_password"])
assert.Equal(t, "/tmp/id_rsa", vars["ansible_ssh_private_key_file"])
assert.Equal(t, "ssh", vars["ansible_connection"])
assert.Equal(t, "global", vars["global_var"])
}

364
specs/RFC.md Normal file
View file

@ -0,0 +1,364 @@
# ansible
**Import:** `dappco.re/go/core/ansible`
**Files:** 6
## Types
This package exports structs only. It has no exported interfaces or type aliases.
### Playbook
`type Playbook struct`
Top-level playbook wrapper for YAML documents that decode to an inline list of plays.
Fields:
- `Plays []Play`: Inlined play definitions in declaration order.
### Play
`type Play struct`
One play in a playbook, including host targeting, privilege settings, vars, task lists, roles, handlers, and run controls.
Fields:
- `Name string`: Human-readable play name used in output and callbacks.
- `Hosts string`: Inventory pattern resolved before the play runs.
- `Connection string`: Optional connection type; `"local"` skips SSH fact gathering.
- `Become bool`: Enables privilege escalation for tasks in the play.
- `BecomeUser string`: Override user for privilege escalation.
- `GatherFacts *bool`: Optional fact-gathering switch; `nil` means facts are gathered.
- `Vars map[string]any`: Play-scoped variables merged into parser and executor state.
- `PreTasks []Task`: Tasks run before roles and main tasks.
- `Tasks []Task`: Main task list.
- `PostTasks []Task`: Tasks run after the main task list.
- `Roles []RoleRef`: Role references executed between `PreTasks` and `Tasks`.
- `Handlers []Task`: Handler tasks that may run after normal tasks when notified.
- `Tags []string`: Tags attached to the play.
- `Environment map[string]string`: Environment variables attached to the play.
- `Serial any`: Serial batch setting; the YAML accepts either an `int` or a `string`.
- `MaxFailPercent int`: Maximum tolerated failure percentage for the play.
### RoleRef
`type RoleRef struct`
Role entry used in `Play.Roles`. The YAML may be either a scalar role name or a mapping.
Fields:
- `Role string`: Canonical role name used by the executor.
- `Name string`: Alternate YAML field that is normalised into `Role` during unmarshalling.
- `TasksFrom string`: Task file loaded from the role's `tasks/` directory.
- `Vars map[string]any`: Variables merged while the role runs.
- `When any`: Optional condition evaluated before the role runs.
- `Tags []string`: Tags declared on the role reference.
### Task
`type Task struct`
Single Ansible task, including the selected module, module args, flow-control settings, includes, blocks, notifications, and privilege settings.
Fields:
- `Name string`: Optional task name.
- `Module string`: Module name extracted from the YAML key rather than a fixed field.
- `Args map[string]any`: Module arguments extracted from the YAML value.
- `Register string`: Variable name used to store the task result.
- `When any`: Conditional expression or expression list.
- `Loop any`: Loop source, typically a slice or templated string.
- `LoopControl *LoopControl`: Optional loop metadata such as custom variable names.
- `Vars map[string]any`: Task-local variables.
- `Environment map[string]string`: Task-local environment overrides.
- `ChangedWhen any`: Override for changed-state evaluation.
- `FailedWhen any`: Override for failure evaluation.
- `IgnoreErrors bool`: Continue after task failure when true.
- `NoLog bool`: Marker for suppressing logging.
- `Become *bool`: Per-task privilege-escalation override.
- `BecomeUser string`: Per-task privilege-escalation user.
- `Delegate string`: `delegate_to` target host.
- `RunOnce bool`: Runs the task once rather than on every host.
- `Tags []string`: Tags attached to the task.
- `Block []Task`: Main block tasks for `block` syntax.
- `Rescue []Task`: Rescue tasks run after a block failure.
- `Always []Task`: Tasks that always run after a block.
- `Notify any`: Handler notification target, either a string or a list.
- `Retries int`: Retry count for `until` loops.
- `Delay int`: Delay between retries.
- `Until string`: Condition checked for retry loops.
- `IncludeTasks string`: Path used by `include_tasks`.
- `ImportTasks string`: Path used by `import_tasks`.
- `IncludeRole *struct{...}`: Role inclusion payload with `Name`, optional `TasksFrom`, and optional `Vars`.
- `ImportRole *struct{...}`: Role import payload with `Name`, optional `TasksFrom`, and optional `Vars`.
### LoopControl
`type LoopControl struct`
Loop metadata attached to a task.
Fields:
- `LoopVar string`: Name assigned to the current loop item.
- `IndexVar string`: Name assigned to the current loop index.
- `Label string`: Display label for loop items.
- `Pause int`: Pause between loop iterations.
- `Extended bool`: Enables extended loop metadata.
### TaskResult
`type TaskResult struct`
Normalised execution result returned by module handlers and stored in registered variables.
Fields:
- `Changed bool`: Whether the task changed remote state.
- `Failed bool`: Whether the task failed.
- `Skipped bool`: Whether the task was skipped.
- `Msg string`: Summary message.
- `Stdout string`: Standard output captured from command-based modules.
- `Stderr string`: Standard error captured from command-based modules.
- `RC int`: Command exit status when applicable.
- `Results []TaskResult`: Per-item loop results.
- `Data map[string]any`: Module-specific result payload.
- `Duration time.Duration`: Execution duration recorded by the executor.
### Inventory
`type Inventory struct`
Root inventory object.
Fields:
- `All *InventoryGroup`: Root inventory group decoded from the `all` key.
### InventoryGroup
`type InventoryGroup struct`
Inventory group containing hosts, child groups, and inherited variables.
Fields:
- `Hosts map[string]*Host`: Hosts defined directly in the group.
- `Children map[string]*InventoryGroup`: Nested child groups.
- `Vars map[string]any`: Variables inherited by descendant hosts unless overridden.
### Host
`type Host struct`
Per-host inventory entry with Ansible connection settings and inline custom vars.
Fields:
- `AnsibleHost string`: Remote address or hostname to connect to.
- `AnsiblePort int`: SSH port.
- `AnsibleUser string`: SSH user.
- `AnsiblePassword string`: SSH password.
- `AnsibleSSHPrivateKeyFile string`: Private key path for SSH authentication.
- `AnsibleConnection string`: Connection transport hint.
- `AnsibleBecomePassword string`: Password used for privilege escalation.
- `Vars map[string]any`: Additional host variables stored inline in YAML.
### Facts
`type Facts struct`
Subset of gathered host facts stored by the executor.
Fields:
- `Hostname string`: Short hostname.
- `FQDN string`: Fully qualified domain name.
- `OS string`: OS family.
- `Distribution string`: Distribution identifier.
- `Version string`: Distribution version.
- `Architecture string`: Machine architecture.
- `Kernel string`: Kernel release.
- `Memory int64`: Total memory in MiB.
- `CPUs int`: Virtual CPU count.
- `IPv4 string`: Default IPv4 address.
### Parser
`type Parser struct`
Stateful YAML parser for playbooks, inventories, task files, and roles. Its internal path and variable cache are unexported.
### Executor
`type Executor struct`
Playbook execution engine that combines parser state, inventory, vars, gathered facts, registered results, handler notifications, and SSH client reuse.
Fields:
- `OnPlayStart func(play *Play)`: Optional callback fired before a play starts.
- `OnTaskStart func(host string, task *Task)`: Optional callback fired before a task runs on a host.
- `OnTaskEnd func(host string, task *Task, result *TaskResult)`: Optional callback fired after a task result is produced.
- `OnPlayEnd func(play *Play)`: Optional callback fired after a play finishes.
- `Limit string`: Additional host filter applied after normal play host resolution.
- `Tags []string`: Inclusive tag filter for task execution.
- `SkipTags []string`: Exclusive tag filter that always skips matching tasks.
- `CheckMode bool`: Public execution flag exposed for callers and CLI wiring.
- `Diff bool`: Public execution flag exposed for callers and CLI wiring.
- `Verbose int`: Verbosity level used by executor logging and CLI callbacks.
### SSHClient
`type SSHClient struct`
Lazy SSH client that owns connection, authentication, privilege-escalation, and timeout state. All fields are unexported.
### SSHConfig
`type SSHConfig struct`
Configuration used to construct an `SSHClient`.
Fields:
- `Host string`: Target host.
- `Port int`: Target SSH port; defaults to `22`.
- `User string`: SSH user; defaults to `"root"`.
- `Password string`: SSH password.
- `KeyFile string`: Private key path.
- `Become bool`: Enables privilege escalation on the client.
- `BecomeUser string`: User used for privilege escalation.
- `BecomePass string`: Password used for privilege escalation.
- `Timeout time.Duration`: Connection timeout; defaults to `30 * time.Second`.
## Functions
### NewParser
`func NewParser(basePath string) *Parser`
Constructs a parser rooted at `basePath` and initialises its internal variable map. `basePath` is later used to resolve role search paths.
### (*Parser).ParsePlaybook
`func (p *Parser) ParsePlaybook(path string) ([]Play, error)`
Reads a playbook YAML file, unmarshals it into `[]Play`, and post-processes every `PreTasks`, `Tasks`, `PostTasks`, and handler entry to extract `Task.Module` and `Task.Args`.
### (*Parser).ParsePlaybookIter
`func (p *Parser) ParsePlaybookIter(path string) (iter.Seq[Play], error)`
Wrapper around `ParsePlaybook` that yields parsed plays through an `iter.Seq`.
### (*Parser).ParseInventory
`func (p *Parser) ParseInventory(path string) (*Inventory, error)`
Reads an inventory YAML file and unmarshals it into the public `Inventory` model.
### (*Parser).ParseTasks
`func (p *Parser) ParseTasks(path string) ([]Task, error)`
Reads a task file, unmarshals it into `[]Task`, and extracts module names and args for every task entry.
### (*Parser).ParseTasksIter
`func (p *Parser) ParseTasksIter(path string) (iter.Seq[Task], error)`
Wrapper around `ParseTasks` that yields parsed tasks through an `iter.Seq`.
### (*Parser).ParseRole
`func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error)`
Resolves `roles/<name>/tasks/<tasksFrom>` across several search patterns rooted around `basePath`, defaults `tasksFrom` to `main.yml`, merges role defaults without overwriting existing parser vars, merges role vars with overwrite semantics, and then parses the resolved task file.
### (*RoleRef).UnmarshalYAML
`func (r *RoleRef) UnmarshalYAML(unmarshal func(any) error) error`
Accepts either a scalar role name or a structured role mapping. When the mapping only sets `Name`, the method copies it into `Role`.
### (*Task).UnmarshalYAML
`func (t *Task) UnmarshalYAML(node *yaml.Node) error`
Decodes the standard task fields, scans the remaining YAML keys for the first recognised module name, stores free-form arguments in `Args["_raw_params"]`, accepts module mappings and nil-valued modules, and maps `with_items` into `Loop` when `Loop` is unset.
### NormalizeModule
`func NormalizeModule(name string) string`
Returns `ansible.builtin.<name>` for short module names and leaves dotted names unchanged.
### GetHosts
`func GetHosts(inv *Inventory, pattern string) []string`
Resolves hosts from a non-nil inventory by handling `all`, `localhost`, group names, and explicit host names. Patterns containing `:` are recognised as future work and currently return `nil`.
### GetHostsIter
`func GetHostsIter(inv *Inventory, pattern string) iter.Seq[string]`
Iterator wrapper around `GetHosts`.
### AllHostsIter
`func AllHostsIter(group *InventoryGroup) iter.Seq[string]`
Yields every host reachable from a group tree in deterministic order by sorting host keys and child-group keys at each level.
### GetHostVars
`func GetHostVars(inv *Inventory, hostname string) map[string]any`
Builds the effective variable map for `hostname` by walking the group tree, applying direct-group vars, host connection settings, inline host vars, and then parent-group vars for keys not already set by a nearer scope.
### NewExecutor
`func NewExecutor(basePath string) *Executor`
Constructs an executor with a parser rooted at `basePath` and fresh maps for vars, facts, registered results, handlers, notifications, and SSH clients.
### (*Executor).SetInventory
`func (e *Executor) SetInventory(path string) error`
Parses an inventory file through the embedded parser and stores the resulting `Inventory` on the executor.
### (*Executor).SetInventoryDirect
`func (e *Executor) SetInventoryDirect(inv *Inventory)`
Stores a caller-supplied inventory pointer on the executor without parsing.
### (*Executor).SetVar
`func (e *Executor) SetVar(key string, value any)`
Stores an executor-scoped variable under a write lock.
### (*Executor).Run
`func (e *Executor) Run(ctx context.Context, playbookPath string) error`
Parses the playbook at `playbookPath` and runs plays sequentially. Each play resolves hosts, merges play vars, gathers facts by default, runs `PreTasks`, roles, `Tasks`, `PostTasks`, and finally any handlers that were notified during the play.
### (*Executor).Close
`func (e *Executor) Close()`
Closes every cached `SSHClient` and replaces the client cache with a fresh empty map.
### (*Executor).TemplateFile
`func (e *Executor) TemplateFile(src, host string, task *Task) (string, error)`
Reads a template file, performs a basic Jinja2-to-Go-template token conversion, and executes it against a context built from executor vars, host vars, and gathered facts. If parsing or execution fails, it falls back to the executor's simpler string-templating path.
### NewSSHClient
`func NewSSHClient(cfg SSHConfig) (*SSHClient, error)`
Applies defaults for `Port`, `User`, and `Timeout`, then constructs an `SSHClient` from `cfg`.
### (*SSHClient).Connect
`func (c *SSHClient) Connect(ctx context.Context) error`
Lazily establishes the SSH connection. Authentication is attempted in this order: explicit key file, default keys from `~/.ssh`, then password-based auth. The method also ensures `known_hosts` exists and uses it for host-key verification.
### (*SSHClient).Close
`func (c *SSHClient) Close() error`
Closes the active SSH connection, if any, and clears the cached client pointer.
### (*SSHClient).Run
`func (c *SSHClient) Run(ctx context.Context, cmd string) (stdout, stderr string, exitCode int, err error)`
Runs a command on the remote host, opening a new SSH session after calling `Connect`. When privilege escalation is enabled, the command is wrapped with `sudo`, using either the become password, the SSH password, or passwordless `sudo -n`. The method returns stdout, stderr, an exit code, and honours context cancellation by signalling the session.
### (*SSHClient).RunScript
`func (c *SSHClient) RunScript(ctx context.Context, script string) (stdout, stderr string, exitCode int, err error)`
Wraps `script` in a heredoc passed to `bash` and delegates execution to `Run`.
### (*SSHClient).Upload
`func (c *SSHClient) Upload(ctx context.Context, local io.Reader, remote string, mode fs.FileMode) error`
Reads all content from `local`, creates the remote parent directory, writes the file via `cat >`, applies the requested mode with `chmod`, and handles both normal and `sudo`-mediated uploads.
### (*SSHClient).Download
`func (c *SSHClient) Download(ctx context.Context, remote string) ([]byte, error)`
Downloads a remote file by running `cat` and returning the captured bytes. A non-zero remote exit status is reported as an error.
### (*SSHClient).FileExists
`func (c *SSHClient) FileExists(ctx context.Context, path string) (bool, error)`
Checks remote path existence with `test -e`.
### (*SSHClient).Stat
`func (c *SSHClient) Stat(ctx context.Context, path string) (map[string]any, error)`
Returns a minimal stat map parsed from remote shell output. The current implementation reports boolean `exists` and `isdir` keys.
### (*SSHClient).SetBecome
`func (c *SSHClient) SetBecome(become bool, user, password string)`
Updates the client's privilege-escalation flag and replaces the stored become user and password only when non-empty override values are supplied.

114
ssh.go
View file

@ -5,11 +5,12 @@ import (
"context"
"io"
"io/fs"
"maps"
"net"
"slices"
"sync"
"time"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
@ -21,17 +22,18 @@ import (
//
// client, _ := NewSSHClient(SSHConfig{Host: "web1"})
type SSHClient struct {
host string
port int
user string
password string
keyFile string
client *ssh.Client
mu sync.Mutex
become bool
becomeUser string
becomePass string
timeout time.Duration
host string
port int
user string
password string
keyFile string
client *ssh.Client
mu sync.Mutex
become bool
becomeUser string
becomePass string
environment map[string]string
timeout time.Duration
}
// SSHConfig holds SSH connection configuration.
@ -68,20 +70,53 @@ func NewSSHClient(cfg SSHConfig) (*SSHClient, error) {
}
client := &SSHClient{
host: cfg.Host,
port: cfg.Port,
user: cfg.User,
password: cfg.Password,
keyFile: cfg.KeyFile,
become: cfg.Become,
becomeUser: cfg.BecomeUser,
becomePass: cfg.BecomePass,
timeout: cfg.Timeout,
host: cfg.Host,
port: cfg.Port,
user: cfg.User,
password: cfg.Password,
keyFile: cfg.KeyFile,
become: cfg.Become,
becomeUser: cfg.BecomeUser,
becomePass: cfg.BecomePass,
timeout: cfg.Timeout,
environment: make(map[string]string),
}
return client, nil
}
// Environment returns a copy of the current remote environment overrides.
func (c *SSHClient) Environment() map[string]string {
c.mu.Lock()
defer c.mu.Unlock()
if len(c.environment) == 0 {
return nil
}
env := make(map[string]string, len(c.environment))
for k, v := range c.environment {
env[k] = v
}
return env
}
// SetEnvironment replaces the current remote environment overrides.
func (c *SSHClient) SetEnvironment(environment map[string]string) {
c.mu.Lock()
defer c.mu.Unlock()
if len(environment) == 0 {
c.environment = make(map[string]string)
return
}
c.environment = make(map[string]string, len(environment))
for k, v := range environment {
c.environment[k] = v
}
}
// Connect establishes the SSH connection.
//
// Example:
@ -104,7 +139,7 @@ func (c *SSHClient) Connect(ctx context.Context) error {
keyPath = joinPath(env("DIR_HOME"), keyPath[1:])
}
if key, err := coreio.Local.Read(keyPath); err == nil {
if key, err := localFS.Read(keyPath); err == nil {
if signer, err := ssh.ParsePrivateKey([]byte(key)); err == nil {
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
@ -119,7 +154,7 @@ func (c *SSHClient) Connect(ctx context.Context) error {
joinPath(home, ".ssh", "id_rsa"),
}
for _, keyPath := range defaultKeys {
if key, err := coreio.Local.Read(keyPath); err == nil {
if key, err := localFS.Read(keyPath); err == nil {
if signer, err := ssh.ParsePrivateKey([]byte(key)); err == nil {
authMethods = append(authMethods, ssh.PublicKeys(signer))
break
@ -154,11 +189,11 @@ func (c *SSHClient) Connect(ctx context.Context) error {
knownHostsPath := joinPath(home, ".ssh", "known_hosts")
// Ensure known_hosts file exists
if !coreio.Local.Exists(knownHostsPath) {
if err := coreio.Local.EnsureDir(pathDir(knownHostsPath)); err != nil {
if !localFS.Exists(knownHostsPath) {
if err := localFS.EnsureDir(pathDir(knownHostsPath)); err != nil {
return coreerr.E("ssh.Connect", "failed to create .ssh dir", err)
}
if err := coreio.Local.Write(knownHostsPath, ""); err != nil {
if err := localFS.Write(knownHostsPath, ""); err != nil {
return coreerr.E("ssh.Connect", "failed to create known_hosts file", err)
}
}
@ -222,6 +257,8 @@ func (c *SSHClient) Run(ctx context.Context, cmd string) (stdout, stderr string,
return "", "", -1, err
}
cmd = c.commandWithEnvironment(cmd)
session, err := c.client.NewSession()
if err != nil {
return "", "", -1, coreerr.E("ssh.Run", "new session", err)
@ -494,3 +531,28 @@ func (c *SSHClient) SetBecome(become bool, user, password string) {
c.becomePass = password
}
}
func (c *SSHClient) commandWithEnvironment(cmd string) string {
c.mu.Lock()
defer c.mu.Unlock()
if len(c.environment) == 0 {
return cmd
}
keys := slices.Sorted(maps.Keys(c.environment))
buf := newBuilder()
for _, key := range keys {
buf.WriteString("export ")
buf.WriteString(key)
buf.WriteString("=")
buf.WriteString(shellQuote(c.environment[key]))
buf.WriteString("; ")
}
buf.WriteString(cmd)
return buf.String()
}
func shellQuote(value string) string {
return "'" + replaceAll(value, "'", `'\''`) + "'"
}

View file

@ -34,3 +34,15 @@ func TestSSH_NewSSHClient_Good_Defaults(t *testing.T) {
assert.Equal(t, "root", client.user)
assert.Equal(t, 30*time.Second, client.timeout)
}
func TestSSH_SSHClient_Good_CommandWithEnvironment(t *testing.T) {
client := &SSHClient{}
client.SetEnvironment(map[string]string{
"BETA": "two words",
"ALPHA": "O'Reilly",
})
cmd := client.commandWithEnvironment("echo done")
assert.Equal(t, "export ALPHA='O'\\''Reilly'; export BETA='two words'; echo done", cmd)
}

View file

@ -2,12 +2,10 @@ package ansible
import (
"io/fs"
coreio "dappco.re/go/core/io"
)
func readTestFile(path string) ([]byte, error) {
content, err := coreio.Local.Read(path)
content, err := localFS.Read(path)
if err != nil {
return nil, err
}
@ -15,7 +13,7 @@ func readTestFile(path string) ([]byte, error) {
}
func writeTestFile(path string, content []byte, mode fs.FileMode) error {
return coreio.Local.WriteMode(path, string(content), mode)
return localFS.WriteMode(path, string(content), mode)
}
func joinStrings(parts []string, sep string) string {

View file

@ -2,6 +2,8 @@ package ansible
import (
"time"
"gopkg.in/yaml.v3"
)
// Playbook represents an Ansible playbook.
@ -13,6 +15,30 @@ type Playbook struct {
Plays []Play `yaml:",inline"`
}
// UnmarshalYAML allows a top-level YAML sequence to decode directly into a
// Playbook wrapper.
func (p *Playbook) UnmarshalYAML(node *yaml.Node) error {
if p == nil {
return nil
}
switch node.Kind {
case yaml.SequenceNode:
return node.Decode(&p.Plays)
case yaml.MappingNode:
var raw struct {
Plays []Play `yaml:"plays"`
}
if err := node.Decode(&raw); err != nil {
return err
}
p.Plays = raw.Plays
return nil
default:
return node.Decode(&p.Plays)
}
}
// Play represents a single play in a playbook.
//
// Example:
@ -128,7 +154,7 @@ type Task struct {
raw map[string]any
}
// LoopControl controls loop behavior.
// LoopControl controls loop behaviour.
//
// Example:
//

View file

@ -8,6 +8,26 @@ import (
"gopkg.in/yaml.v3"
)
// --- Playbook UnmarshalYAML ---
func TestTypes_Playbook_UnmarshalYAML_Good_Sequence(t *testing.T) {
input := `
- name: Bootstrap
hosts: all
tasks:
- debug:
msg: hello
`
var book Playbook
err := yaml.Unmarshal([]byte(input), &book)
require.NoError(t, err)
require.Len(t, book.Plays, 1)
assert.Equal(t, "Bootstrap", book.Plays[0].Name)
assert.Equal(t, "all", book.Plays[0].Hosts)
}
// --- RoleRef UnmarshalYAML ---
func TestTypes_RoleRef_UnmarshalYAML_Good_StringForm(t *testing.T) {
@ -176,6 +196,21 @@ with_items:
assert.Len(t, items, 2)
}
func TestTypes_Task_UnmarshalYAML_Good_FirstRecognisedModuleWins(t *testing.T) {
input := `
name: Multiple modules
shell: echo first
debug:
msg: "second"
`
var task Task
err := yaml.Unmarshal([]byte(input), &task)
require.NoError(t, err)
assert.Equal(t, "shell", task.Module)
assert.Equal(t, "echo first", task.Args["_raw_params"])
}
func TestTypes_Task_UnmarshalYAML_Good_WithNotify(t *testing.T) {
input := `
name: Install package
@ -236,6 +271,41 @@ include_role:
assert.Equal(t, "setup.yml", task.IncludeRole.TasksFrom)
}
func TestTypes_Task_UnmarshalYAML_Good_Environment(t *testing.T) {
input := `
name: Env task
shell: echo hi
environment:
APP_ENV: production
`
var task Task
err := yaml.Unmarshal([]byte(input), &task)
require.NoError(t, err)
require.NotNil(t, task.Environment)
assert.Equal(t, "production", task.Environment["APP_ENV"])
}
func TestTypes_Play_UnmarshalYAML_Good_Environment(t *testing.T) {
input := `
---
- name: Env play
hosts: all
environment:
APP_ENV: production
tasks:
- debug:
msg: hello
`
var plays []Play
err := yaml.Unmarshal([]byte(input), &plays)
require.NoError(t, err)
require.Len(t, plays, 1)
require.NotNil(t, plays[0].Environment)
assert.Equal(t, "production", plays[0].Environment["APP_ENV"])
}
func TestTypes_Task_UnmarshalYAML_Good_BecomeFields(t *testing.T) {
input := `
name: Privileged task