Merge pull request '[agent/codex:gpt-5.4-mini] Read ~/spec/code/core/go/ansible/RFC.md fully. Find ONE feat...' (#28) from main into dev
Some checks failed
CI / auto-fix (push) Failing after 0s
CI / test (push) Failing after 3s
CI / auto-merge (push) Failing after 0s

This commit is contained in:
Virgil 2026-04-01 06:54:16 +00:00
commit fafc0febdf
22 changed files with 1569 additions and 116 deletions

View file

@ -16,8 +16,8 @@ go tool cover -func=/tmp/ansible.cover
No direct `stdlib`-to-`core.*` wrapper drift was found in the Go implementation. The remaining drift is stale migration residue around the `core.*` move:
- `go.mod:15`, `go.sum:7`, `go.sum:8`
Legacy `forge.lthn.ai/core/go-log` references still remain in the dependency graph.
- Resolved on the current branch:
Legacy `forge.lthn.ai/core/go-log` references were removed by replacing `dappco.re/go/core/io` usage with the filesystem primitives in `dappco.re/go/core`.
- `CLAUDE.md:37`, `docs/development.md:169`
Repository guidance still refers to `core/cli`, while the current command registration lives on the `dappco.re/go/core` API at `cmd/ansible/cmd.go:8`.
- `CLAUDE.md:66`, `docs/development.md:86`
@ -25,12 +25,8 @@ No direct `stdlib`-to-`core.*` wrapper drift was found in the Go implementation.
## UK English
- `executor.go:248`
Comment uses US spelling: `Initialize host results`.
- `parser.go:321`
Comment uses US spelling: `NormalizeModule normalizes a module name to its canonical form.`
- `types.go:110`
Comment uses US spelling: `LoopControl controls loop behavior.`
- Resolved on the current branch:
Comment spelling now uses `Initialise`, `normalises`, and `behaviour` in the affected code paths.
## Missing Tests

View file

@ -6,7 +6,6 @@ import (
"dappco.re/go/core"
"dappco.re/go/core/ansible"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
)
@ -35,7 +34,7 @@ func runAnsible(opts core.Options) core.Result {
playbookPath = absPath(playbookPath)
}
if !coreio.Local.Exists(playbookPath) {
if !localFS.Exists(playbookPath) {
return core.Result{Value: coreerr.E("runAnsible", sprintf("playbook not found: %s", playbookPath), nil)}
}
@ -47,6 +46,7 @@ func runAnsible(opts core.Options) core.Result {
// Set options
executor.Limit = opts.String("limit")
executor.CheckMode = opts.Bool("check")
executor.Diff = opts.Bool("diff")
executor.Verbose = opts.Int("verbose")
if tags := opts.String("tags"); tags != "" {
@ -72,14 +72,14 @@ func runAnsible(opts core.Options) core.Result {
invPath = absPath(invPath)
}
if !coreio.Local.Exists(invPath) {
if !localFS.Exists(invPath) {
return core.Result{Value: coreerr.E("runAnsible", sprintf("inventory not found: %s", invPath), nil)}
}
if coreio.Local.IsDir(invPath) {
if localFS.IsDir(invPath) {
for _, name := range []string{"inventory.yml", "hosts.yml", "inventory.yaml", "hosts.yaml"} {
p := joinPath(invPath, name)
if coreio.Local.Exists(p) {
if localFS.Exists(p) {
invPath = p
break
}

View file

@ -22,6 +22,7 @@ func Register(c *core.Core) {
core.Option{Key: "extra-vars", Value: ""},
core.Option{Key: "verbose", Value: 0},
core.Option{Key: "check", Value: false},
core.Option{Key: "diff", Value: false},
),
})

22
cmd/ansible/cmd_test.go Normal file
View file

@ -0,0 +1,22 @@
package anscmd
import (
"testing"
"dappco.re/go/core"
"github.com/stretchr/testify/require"
)
func TestRegister_AnsibleCommandExposesDiffFlag(t *testing.T) {
app := core.New()
Register(app)
result := app.Command("ansible")
require.True(t, result.OK)
cmd, ok := result.Value.(*core.Command)
require.True(t, ok)
require.True(t, cmd.Flags.Has("diff"))
require.False(t, cmd.Flags.Bool("diff"))
}

View file

@ -1,12 +1,56 @@
package anscmd
import (
"io/fs"
"unicode"
"unicode/utf8"
"dappco.re/go/core"
)
type localFileSystem struct {
fs *core.Fs
}
var localFS = localFileSystem{fs: (&core.Fs{}).NewUnrestricted()}
func (l localFileSystem) Exists(path string) bool {
return l.fs.Exists(path)
}
func (l localFileSystem) IsDir(path string) bool {
return l.fs.IsDir(path)
}
func (l localFileSystem) Write(path, content string) error {
result := l.fs.Write(path, content)
if !result.OK {
return resultError("write "+path, result)
}
return nil
}
func (l localFileSystem) WriteMode(path, content string, mode fs.FileMode) error {
result := l.fs.WriteMode(path, content, mode)
if !result.OK {
return resultError("write "+path, result)
}
return nil
}
func resultError(op string, result core.Result) error {
if result.OK {
return nil
}
if err, ok := result.Value.(error); ok {
return err
}
if result.Value == nil {
return core.E(op, "operation failed", nil)
}
return core.E(op, core.Sprint(result.Value), nil)
}
func absPath(path string) string {
if path == "" {
return core.Env("DIR_CWD")

15
cmd/ansible/specs/RFC.md Normal file
View file

@ -0,0 +1,15 @@
# anscmd
**Import:** `dappco.re/go/core/ansible/cmd/ansible`
**Files:** 3
## Types
This package has no exported structs, interfaces, or type aliases.
## Functions
### Register
`func Register(c *core.Core)`
Registers two CLI commands on `c`:
- `ansible`: Runs a playbook through `ansible.Executor`. The command exposes `inventory`, `limit`, `tags`, `skip-tags`, `extra-vars`, `verbose`, and `check` flags.
- `ansible/test`: Opens an SSH connection to a host, prints basic host facts, and exposes `user`, `password`, `key`, and `port` flags.

View file

@ -1,6 +1,7 @@
package ansible
import (
"io/fs"
"unicode"
"unicode/utf8"
@ -13,6 +14,66 @@ type stringBuffer interface {
String() string
}
type corexLocalFS struct {
fs *core.Fs
}
var localFS = corexLocalFS{fs: (&core.Fs{}).NewUnrestricted()}
func (l corexLocalFS) Read(path string) (string, error) {
result := l.fs.Read(path)
if !result.OK {
return "", corexResultError("read "+path, result)
}
content, _ := result.Value.(string)
return content, nil
}
func (l corexLocalFS) Write(path, content string) error {
result := l.fs.Write(path, content)
if !result.OK {
return corexResultError("write "+path, result)
}
return nil
}
func (l corexLocalFS) WriteMode(path, content string, mode fs.FileMode) error {
result := l.fs.WriteMode(path, content, mode)
if !result.OK {
return corexResultError("write "+path, result)
}
return nil
}
func (l corexLocalFS) EnsureDir(path string) error {
result := l.fs.EnsureDir(path)
if !result.OK {
return corexResultError("ensure dir "+path, result)
}
return nil
}
func (l corexLocalFS) Exists(path string) bool {
return l.fs.Exists(path)
}
func (l corexLocalFS) IsDir(path string) bool {
return l.fs.IsDir(path)
}
func corexResultError(op string, result core.Result) error {
if result.OK {
return nil
}
if err, ok := result.Value.(error); ok {
return err
}
if result.Value == nil {
return core.E(op, "operation failed", nil)
}
return core.E(op, core.Sprint(result.Value), nil)
}
func dirSep() string {
ds := core.Env("DS")
if ds == "" {

View file

@ -2,13 +2,14 @@ package ansible
import (
"context"
"errors"
"regexp"
"slices"
"strconv"
"sync"
"text/template"
"time"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
)
@ -130,6 +131,7 @@ func (e *Executor) runPlay(ctx context.Context, play *Play) error {
if len(hosts) == 0 {
return nil // No hosts matched
}
failedHosts := make(map[string]bool)
// Merge play vars
for k, v := range play.Vars {
@ -149,38 +151,41 @@ func (e *Executor) runPlay(ctx context.Context, play *Play) error {
}
}
// Execute pre_tasks
for _, task := range play.PreTasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
return err
// Execute hosts in serial batches when requested.
for _, batch := range e.hostBatches(hosts, play.Serial) {
// Execute pre_tasks
for _, task := range play.PreTasks {
if err := e.runTaskOnHosts(ctx, batch, &task, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Execute roles
for _, roleRef := range play.Roles {
if err := e.runRole(ctx, hosts, &roleRef, play); err != nil {
return err
// Execute roles
for _, roleRef := range play.Roles {
if err := e.runRole(ctx, batch, &roleRef, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Execute tasks
for _, task := range play.Tasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
return err
// Execute tasks
for _, task := range play.Tasks {
if err := e.runTaskOnHosts(ctx, batch, &task, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Execute post_tasks
for _, task := range play.PostTasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
return err
// Execute post_tasks
for _, task := range play.PostTasks {
if err := e.runTaskOnHosts(ctx, batch, &task, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Run notified handlers
for _, handler := range play.Handlers {
if e.notified[handler.Name] {
if err := e.runTaskOnHosts(ctx, hosts, &handler, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &handler, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
@ -189,8 +194,56 @@ func (e *Executor) runPlay(ctx context.Context, play *Play) error {
return nil
}
// hostBatches returns the host list split into serial batches.
func (e *Executor) hostBatches(hosts []string, serial any) [][]string {
batchSize := serialBatchSize(serial, len(hosts))
if batchSize >= len(hosts) {
return [][]string{hosts}
}
batches := make([][]string, 0, (len(hosts)+batchSize-1)/batchSize)
for start := 0; start < len(hosts); start += batchSize {
end := start + batchSize
if end > len(hosts) {
end = len(hosts)
}
batch := make([]string, end-start)
copy(batch, hosts[start:end])
batches = append(batches, batch)
}
return batches
}
// serialBatchSize normalises the play serial value to a usable batch size.
func serialBatchSize(serial any, hostCount int) int {
switch v := serial.(type) {
case int:
if v > 0 {
if v > hostCount {
return hostCount
}
return v
}
case int64:
if v > 0 {
if int(v) > hostCount {
return hostCount
}
return int(v)
}
case string:
if n, err := strconv.Atoi(corexTrimSpace(v)); err == nil && n > 0 {
if n > hostCount {
return hostCount
}
return n
}
}
return hostCount
}
// runRole executes a role on hosts.
func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef, play *Play) error {
func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
// Check when condition
if roleRef.When != nil {
if !e.evaluateWhen(roleRef.When, "", nil) {
@ -215,7 +268,12 @@ func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef
// Execute tasks
for _, task := range tasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
task := task
task.Tags = append(append([]string{}, roleRef.Tags...), task.Tags...)
if err := e.runTaskOnHosts(ctx, hosts, &task, play, failedHosts, totalHosts, maxFailPercent); err != nil {
if isHostFailureError(err) {
continue
}
// Restore vars
e.vars = oldVars
return err
@ -228,36 +286,113 @@ func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef
}
// runTaskOnHosts runs a task on all hosts.
func (e *Executor) runTaskOnHosts(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runTaskOnHosts(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
hosts = filterFailedHosts(hosts, failedHosts)
if len(hosts) == 0 {
return nil
}
// run_once executes the task only on the first host in the current batch.
if task.RunOnce && len(hosts) > 1 {
hosts = hosts[:1]
}
// Check tags
if !e.matchesTags(task.Tags) {
tags := append(append([]string{}, play.Tags...), task.Tags...)
if !e.matchesTags(tags) {
return nil
}
// Handle block tasks
if len(task.Block) > 0 {
return e.runBlock(ctx, hosts, task, play)
return e.runBlock(ctx, hosts, task, play, failedHosts, totalHosts, maxFailPercent)
}
// Handle include/import
if task.IncludeTasks != "" || task.ImportTasks != "" {
return e.runIncludeTasks(ctx, hosts, task, play)
return e.runIncludeTasks(ctx, hosts, task, play, failedHosts, totalHosts, maxFailPercent)
}
if task.IncludeRole != nil || task.ImportRole != nil {
return e.runIncludeRole(ctx, hosts, task, play)
return e.runIncludeRole(ctx, hosts, task, play, failedHosts, totalHosts, maxFailPercent)
}
var (
haveFailure bool
lastErr error
)
for _, host := range hosts {
if failedHosts[host] {
continue
}
if err := e.runTaskOnHost(ctx, host, task, play); err != nil {
if !task.IgnoreErrors {
return err
failedHosts[host] = true
taskErr := coreerr.E("Executor.runTaskOnHosts", sprintf("task failed on %s: %s", host, err.Error()), err)
if maxFailPercent > 0 && exceedsMaxFailPercent(len(failedHosts), totalHosts, maxFailPercent) {
return coreerr.E("Executor.runTaskOnHosts", sprintf("max fail percentage exceeded: %d%% failed hosts of %d", len(failedHosts), totalHosts), taskErr)
}
if maxFailPercent > 0 {
haveFailure = true
lastErr = taskErr
continue
}
return taskErr
}
}
}
if haveFailure {
return &hostFailureError{err: lastErr}
}
return nil
}
func filterFailedHosts(hosts []string, failedHosts map[string]bool) []string {
if len(hosts) == 0 || len(failedHosts) == 0 {
return hosts
}
filtered := make([]string, 0, len(hosts))
for _, host := range hosts {
if !failedHosts[host] {
filtered = append(filtered, host)
}
}
return filtered
}
func exceedsMaxFailPercent(failedHosts, totalHosts, maxFailPercent int) bool {
if maxFailPercent <= 0 || totalHosts <= 0 || failedHosts <= 0 {
return false
}
return failedHosts*100 > totalHosts*maxFailPercent
}
type hostFailureError struct {
err error
}
func (e *hostFailureError) Error() string {
if e == nil || e.err == nil {
return "host failure"
}
return e.err.Error()
}
func (e *hostFailureError) Unwrap() error {
if e == nil {
return nil
}
return e.err
}
func isHostFailureError(err error) bool {
var target *hostFailureError
return errors.As(err, &target)
}
// runTaskOnHost runs a task on a single host.
func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, play *Play) error {
start := time.Now()
@ -266,7 +401,7 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
e.OnTaskStart(host, task)
}
// Initialize host results
// Initialise host results
if e.results[host] == nil {
e.results[host] = make(map[string]*TaskResult)
}
@ -285,10 +420,20 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
}
}
// delegate_to changes the execution target but keeps the task context
// anchored to the original host for templating and registration.
executionHost := host
if task.Delegate != "" {
executionHost = e.templateString(task.Delegate, host, task)
if executionHost == "" {
executionHost = host
}
}
// Get SSH client
client, err := e.getClient(host, play)
client, err := e.getClient(executionHost, play)
if err != nil {
return coreerr.E("Executor.runTaskOnHost", sprintf("get client for %s", host), err)
return coreerr.E("Executor.runTaskOnHost", sprintf("get client for %s", executionHost), err)
}
// Handle loops
@ -296,8 +441,8 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
return e.runLoop(ctx, host, client, task, play)
}
// Execute the task
result, err := e.executeModule(ctx, host, client, task, play)
// Execute the task, retrying when an until condition is present.
result, err := e.runTaskWithUntil(ctx, host, client, task, play)
if err != nil {
result = &TaskResult{Failed: true, Msg: err.Error()}
}
@ -324,6 +469,139 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
return nil
}
// runTaskWithUntil executes a task once or retries it until the until
// condition evaluates to true.
func (e *Executor) runTaskWithUntil(ctx context.Context, host string, client *SSHClient, task *Task, play *Play) (*TaskResult, error) {
if task.Until == "" {
return e.executeModule(ctx, host, client, task, play)
}
retries := task.Retries
if retries <= 0 {
retries = 3
}
delay := task.Delay
if delay <= 0 {
delay = 1
}
restoreAlias := task.Register != "result"
var (
previousAlias *TaskResult
hadAlias bool
)
if restoreAlias {
e.mu.RLock()
if hostResults, ok := e.results[host]; ok {
previousAlias, hadAlias = hostResults["result"]
}
e.mu.RUnlock()
defer func() {
e.mu.Lock()
defer e.mu.Unlock()
if e.results[host] == nil {
e.results[host] = make(map[string]*TaskResult)
}
if hadAlias {
e.results[host]["result"] = previousAlias
} else {
delete(e.results[host], "result")
}
}()
}
lastResult, lastErr := retryTask(ctx, retries, delay, func() (*TaskResult, error) {
result, err := e.executeModule(ctx, host, client, task, play)
if err != nil {
result = &TaskResult{Failed: true, Msg: err.Error()}
}
if result == nil {
result = &TaskResult{}
}
e.setTempResult(host, task.Register, result)
return result, nil
}, func(result *TaskResult) bool {
return e.evaluateWhen(task.Until, host, task)
})
if lastErr != nil {
return lastResult, lastErr
}
if lastResult == nil {
lastResult = &TaskResult{}
}
lastResult.Failed = true
if lastResult.Msg == "" {
lastResult.Msg = sprintf("until condition not met: %s", task.Until)
}
return lastResult, nil
}
// setTempResult exposes the latest task result for until evaluation without
// leaving stale state behind when a different register name is used.
func (e *Executor) setTempResult(host string, register string, result *TaskResult) {
e.mu.Lock()
defer e.mu.Unlock()
if e.results[host] == nil {
e.results[host] = make(map[string]*TaskResult)
}
e.results[host]["result"] = result
if register != "" {
e.results[host][register] = result
}
}
// retryTask runs fn until done returns true or the retry budget is exhausted.
func retryTask(ctx context.Context, retries, delay int, fn func() (*TaskResult, error), done func(*TaskResult) bool) (*TaskResult, error) {
if retries < 0 {
retries = 0
}
if delay < 0 {
delay = 0
}
var lastResult *TaskResult
var lastErr error
for attempt := 0; attempt <= retries; attempt++ {
lastResult, lastErr = fn()
if lastErr != nil {
return lastResult, lastErr
}
if lastResult == nil {
lastResult = &TaskResult{}
}
if done == nil || done(lastResult) {
return lastResult, nil
}
if attempt < retries && delay > 0 {
if err := sleepWithContext(ctx, time.Duration(delay)*time.Second); err != nil {
return lastResult, err
}
}
}
return lastResult, nil
}
// sleepWithContext pauses for the requested duration or stops early when the
// context is cancelled.
func sleepWithContext(ctx context.Context, d time.Duration) error {
timer := time.NewTimer(d)
defer timer.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
return nil
}
}
// runLoop handles task loops.
func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient, task *Task, play *Play) error {
items := e.resolveLoop(task.Loop, host)
@ -363,6 +641,10 @@ func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient,
if result.Failed && !task.IgnoreErrors {
break
}
if task.LoopControl != nil && task.LoopControl.Pause > 0 && i < len(items)-1 {
time.Sleep(time.Duration(task.LoopControl.Pause) * time.Second)
}
}
// Restore loop variables
@ -400,12 +682,12 @@ func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient,
}
// runBlock handles block/rescue/always.
func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
var blockErr error
// Try block
for _, t := range task.Block {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
blockErr = err
break
}
@ -414,7 +696,7 @@ func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, pla
// Run rescue if block failed
if blockErr != nil && len(task.Rescue) > 0 {
for _, t := range task.Rescue {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
// Rescue also failed
break
}
@ -423,7 +705,7 @@ func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, pla
// Always run always block
for _, t := range task.Always {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
if blockErr == nil {
blockErr = err
}
@ -438,7 +720,7 @@ func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, pla
}
// runIncludeTasks handles include_tasks/import_tasks.
func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
path := task.IncludeTasks
if path == "" {
path = task.ImportTasks
@ -453,7 +735,10 @@ func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Ta
}
for _, t := range tasks {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
if isHostFailureError(err) {
continue
}
return err
}
}
@ -462,7 +747,7 @@ func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Ta
}
// runIncludeRole handles include_role/import_role.
func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
var roleName, tasksFrom string
var roleVars map[string]any
@ -482,7 +767,7 @@ func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Tas
Vars: roleVars,
}
return e.runRole(ctx, hosts, roleRef, play)
return e.runRole(ctx, hosts, roleRef, play, failedHosts, totalHosts, maxFailPercent)
}
// getHosts returns hosts matching the pattern.
@ -903,7 +1188,7 @@ func (e *Executor) handleLookup(expr string) string {
case "env":
return env(arg)
case "file":
if data, err := coreio.Local.Read(arg); err == nil {
if data, err := localFS.Read(arg); err == nil {
return data
}
}
@ -1000,7 +1285,7 @@ func (e *Executor) Close() {
//
// content, err := exec.TemplateFile("/workspace/templates/app.conf.j2", "web1", &Task{})
func (e *Executor) TemplateFile(src, host string, task *Task) (string, error) {
content, err := coreio.Local.Read(src)
content, err := localFS.Read(src)
if err != nil {
return "", err
}

View file

@ -1,9 +1,12 @@
package ansible
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// --- NewExecutor ---
@ -32,6 +35,33 @@ func TestExecutor_SetVar_Good(t *testing.T) {
assert.Equal(t, 42, e.vars["count"])
}
func TestExecutor_TaskEnvironment_Good_MergesAndTemplates(t *testing.T) {
e := NewExecutor("/tmp")
e.SetVar("tier", "blue")
e.vars["inventory_hostname"] = "web1"
play := &Play{
Environment: map[string]string{
"PLAY_ONLY": "{{ tier }}",
"SHARED": "play",
},
}
task := &Task{
Environment: map[string]string{
"TASK_ONLY": "on-{{ inventory_hostname }}",
"SHARED": "task",
},
}
env := e.taskEnvironment("web1", play, task)
assert.Equal(t, map[string]string{
"PLAY_ONLY": "blue",
"TASK_ONLY": "on-web1",
"SHARED": "task",
}, env)
}
// --- SetInventoryDirect ---
func TestExecutor_SetInventoryDirect_Good(t *testing.T) {
@ -48,6 +78,27 @@ func TestExecutor_SetInventoryDirect_Good(t *testing.T) {
assert.Equal(t, inv, e.inventory)
}
func TestExecutor_GetClient_Good_UsesInventoryBecomePassword(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {
AnsibleHost: "10.0.0.1",
AnsibleUser: "deploy",
AnsibleBecomePassword: "inventory-secret",
},
},
},
})
play := &Play{Become: true}
client, err := e.getClient("host1", play)
require.NoError(t, err)
require.NotNil(t, client)
assert.Equal(t, "inventory-secret", client.becomePass)
}
// --- getHosts ---
func TestExecutor_GetHosts_Good_WithInventory(t *testing.T) {
@ -98,6 +149,247 @@ func TestExecutor_GetHosts_Good_WithLimit(t *testing.T) {
assert.Contains(t, hosts, "host2")
}
func TestExecutor_RunPlay_Good_SerialBatchesHosts(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {},
"host2": {},
},
},
})
var gathered []string
e.OnTaskStart = func(host string, task *Task) {
gathered = append(gathered, host+":"+task.Name)
}
gatherFacts := false
play := &Play{
Name: "serial",
Hosts: "all",
GatherFacts: &gatherFacts,
Serial: "1",
Tasks: []Task{
{Name: "first", Module: "debug", Args: map[string]any{"msg": "one"}},
{Name: "second", Module: "debug", Args: map[string]any{"msg": "two"}},
},
}
require.NoError(t, e.runPlay(context.Background(), play))
assert.Equal(t, []string{
"host1:first",
"host1:second",
"host2:first",
"host2:second",
}, gathered)
}
func TestExecutor_RunPlay_Good_MaxFailPercentStopsAfterThreshold(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {Vars: map[string]any{"fail_first": true, "fail_second": false}},
"host2": {Vars: map[string]any{"fail_first": false, "fail_second": true}},
"host3": {Vars: map[string]any{"fail_first": false, "fail_second": false}},
},
},
})
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, host+":"+task.Name)
}
gatherFacts := false
play := &Play{
Name: "max fail",
Hosts: "all",
GatherFacts: &gatherFacts,
MaxFailPercent: 50,
Tasks: []Task{
{
Name: "first failure",
Module: "fail",
Args: map[string]any{"msg": "first"},
When: "{{ fail_first }}",
},
{
Name: "second failure",
Module: "fail",
Args: map[string]any{"msg": "second"},
When: "{{ fail_second }}",
},
{
Name: "final task",
Module: "debug",
Args: map[string]any{"msg": "ok"},
},
},
}
err := e.runPlay(context.Background(), play)
require.Error(t, err)
assert.Equal(t, []string{
"host1:first failure",
"host2:first failure",
"host3:first failure",
"host2:second failure",
}, executed)
assert.NotContains(t, executed, "host3:second failure")
assert.NotContains(t, executed, "host1:final task")
assert.NotContains(t, executed, "host2:final task")
assert.NotContains(t, executed, "host3:final task")
}
func TestExecutor_RunPlay_Good_RunOnceTaskOnlyRunsOnFirstHost(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {},
"host2": {},
},
},
})
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, host)
}
gatherFacts := false
play := &Play{
Name: "run once",
Hosts: "all",
GatherFacts: &gatherFacts,
Tasks: []Task{
{
Name: "single host",
Module: "debug",
Args: map[string]any{"msg": "ok"},
Register: "result",
RunOnce: true,
},
},
}
require.NoError(t, e.runPlay(context.Background(), play))
assert.Equal(t, []string{"host1"}, executed)
assert.NotNil(t, e.results["host1"]["result"])
_, ok := e.results["host2"]
assert.False(t, ok)
}
func TestExecutor_RunTaskOnHost_Good_DelegateToUsesDelegateHostClient(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"source": {},
},
},
})
task := &Task{
Name: "delegated debug",
Module: "debug",
Args: map[string]any{"msg": "ok"},
Delegate: "delegate-host",
}
play := &Play{}
require.NoError(t, e.runTaskOnHost(context.Background(), "source", task, play))
_, ok := e.clients["delegate-host"]
assert.True(t, ok)
_, ok = e.clients["source"]
assert.False(t, ok)
}
func TestExecutor_RunPlay_Good_PlayTagsApplyToUntaggedTasks(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {},
},
},
})
e.Tags = []string{"deploy"}
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, host+":"+task.Name)
}
gatherFacts := false
play := &Play{
Name: "tagged play",
Hosts: "all",
GatherFacts: &gatherFacts,
Tags: []string{"deploy"},
Tasks: []Task{
{Name: "untagged task", Module: "debug", Args: map[string]any{"msg": "ok"}},
},
}
require.NoError(t, e.runPlay(context.Background(), play))
assert.Equal(t, []string{"host1:untagged task"}, executed)
}
func TestExecutor_RunTaskOnHost_Good_LoopControlPause(t *testing.T) {
e := NewExecutor("/tmp")
task := &Task{
Name: "pause loop",
Module: "debug",
Args: map[string]any{"msg": "{{ item }}"},
Loop: []any{"one", "two"},
LoopControl: &LoopControl{
Pause: 1,
},
}
play := &Play{}
start := time.Now()
require.NoError(t, e.runTaskOnHost(context.Background(), "localhost", task, play))
assert.GreaterOrEqual(t, time.Since(start), 900*time.Millisecond)
}
func TestExecutor_SetTempResult_Good_ResultAliasSupportsUntil(t *testing.T) {
e := NewExecutor("/tmp")
e.setTempResult("host1", "", &TaskResult{Failed: true, RC: 1})
assert.False(t, e.evaluateWhen("result is success", "host1", &Task{}))
e.setTempResult("host1", "", &TaskResult{Failed: false, RC: 0})
assert.True(t, e.evaluateWhen("result is success", "host1", &Task{}))
}
func TestRetryTask_Good_RetriesAndWaits(t *testing.T) {
attempts := 0
start := time.Now()
result, err := retryTask(context.Background(), 1, 1, func() (*TaskResult, error) {
attempts++
if attempts == 1 {
return &TaskResult{Failed: true, RC: 1}, nil
}
return &TaskResult{Failed: false, RC: 0}, nil
}, func(result *TaskResult) bool {
return result.RC == 0
})
require.NoError(t, err)
assert.Equal(t, 2, attempts)
assert.NotNil(t, result)
assert.False(t, result.Failed)
assert.GreaterOrEqual(t, time.Since(start), 900*time.Millisecond)
}
// --- matchesTags ---
func TestExecutor_MatchesTags_Good_NoTagsFilter(t *testing.T) {

3
go.mod
View file

@ -4,7 +4,6 @@ go 1.26.0
require (
dappco.re/go/core v0.8.0-alpha.1
dappco.re/go/core/io v0.2.0
dappco.re/go/core/log v0.1.0
github.com/stretchr/testify v1.11.1
golang.org/x/crypto v0.49.0
@ -12,8 +11,8 @@ require (
)
require (
forge.lthn.ai/core/go-log v0.0.4 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/kr/text v0.2.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
golang.org/x/sys v0.42.0 // indirect
)

5
go.sum
View file

@ -1,11 +1,8 @@
dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk=
dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
dappco.re/go/core/io v0.2.0 h1:zuudgIiTsQQ5ipVt97saWdGLROovbEB/zdVyy9/l+I4=
dappco.re/go/core/io v0.2.0/go.mod h1:1QnQV6X9LNgFKfm8SkOtR9LLaj3bDcsOIeJOOyjbL5E=
dappco.re/go/core/log v0.1.0 h1:pa71Vq2TD2aoEUQWFKwNcaJ3GBY8HbaNGqtE688Unyc=
dappco.re/go/core/log v0.1.0/go.mod h1:Nkqb8gsXhZAO8VLpx7B8i1iAmohhzqA20b9Zr8VUcJs=
forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0=
forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=

View file

@ -4,7 +4,9 @@ import (
"context"
"io"
"io/fs"
"maps"
"regexp"
"slices"
"strconv"
"sync"
@ -36,6 +38,9 @@ type MockSSHClient struct {
becomeUser string
becomePass string
// Remote environment overrides
environment map[string]string
// Execution log: every command that was executed
executed []executedCommand
@ -73,8 +78,9 @@ type uploadRecord struct {
// mock.expectCommand("echo ok", "ok", "", 0)
func NewMockSSHClient() *MockSSHClient {
return &MockSSHClient{
files: make(map[string][]byte),
stats: make(map[string]map[string]any),
files: make(map[string][]byte),
stats: make(map[string]map[string]any),
environment: make(map[string]string),
}
}
@ -133,6 +139,7 @@ func (m *MockSSHClient) Run(_ context.Context, cmd string) (string, string, int,
m.mu.Lock()
defer m.mu.Unlock()
cmd = m.commandWithEnvironment(cmd)
m.executed = append(m.executed, executedCommand{Method: "Run", Cmd: cmd})
// Search expectations in reverse order (last registered wins)
@ -156,6 +163,7 @@ func (m *MockSSHClient) RunScript(_ context.Context, script string) (string, str
m.mu.Lock()
defer m.mu.Unlock()
script = m.commandWithEnvironment(script)
m.executed = append(m.executed, executedCommand{Method: "RunScript", Cmd: script})
// Match against the script content
@ -260,6 +268,60 @@ func (m *MockSSHClient) SetBecome(become bool, user, password string) {
}
}
// Environment returns a copy of the configured remote environment overrides.
func (m *MockSSHClient) Environment() map[string]string {
m.mu.Lock()
defer m.mu.Unlock()
if len(m.environment) == 0 {
return nil
}
env := make(map[string]string, len(m.environment))
for k, v := range m.environment {
env[k] = v
}
return env
}
// SetEnvironment replaces the configured remote environment overrides.
func (m *MockSSHClient) SetEnvironment(environment map[string]string) {
m.mu.Lock()
defer m.mu.Unlock()
if len(environment) == 0 {
m.environment = make(map[string]string)
return
}
m.environment = make(map[string]string, len(environment))
for k, v := range environment {
m.environment[k] = v
}
}
func (m *MockSSHClient) commandWithEnvironment(cmd string) string {
if len(m.environment) == 0 {
return cmd
}
keys := sortedStringKeys(m.environment)
buf := newBuilder()
for _, key := range keys {
buf.WriteString("export ")
buf.WriteString(key)
buf.WriteString("=")
buf.WriteString(shellQuote(m.environment[key]))
buf.WriteString("; ")
}
buf.WriteString(cmd)
return buf.String()
}
func sortedStringKeys(m map[string]string) []string {
return slices.Sorted(maps.Keys(m))
}
// Close is a no-op for the mock.
//
// Example:
@ -362,6 +424,7 @@ func (m *MockSSHClient) reset() {
defer m.mu.Unlock()
m.executed = nil
m.uploads = nil
m.environment = make(map[string]string)
}
// --- Test helper: create executor with mock client ---
@ -397,8 +460,15 @@ func newTestExecutorWithMock(host string) (*Executor, *MockSSHClient) {
// and goes straight to module execution.
func executeModuleWithMock(e *Executor, mock *MockSSHClient, host string, task *Task) (*TaskResult, error) {
module := NormalizeModule(task.Module)
e.vars["inventory_hostname"] = host
args := e.templateArgs(task.Args, host, task)
if environment := e.taskEnvironment(host, nil, task); len(environment) > 0 {
oldEnvironment := mock.Environment()
mock.SetEnvironment(environment)
defer mock.SetEnvironment(oldEnvironment)
}
// Dispatch directly to module handlers using the mock
switch module {
case "ansible.builtin.shell":
@ -449,6 +519,10 @@ func executeModuleWithMock(e *Executor, mock *MockSSHClient, host string, task *
case "ansible.builtin.cron":
return moduleCronWithClient(e, mock, args)
// Inventory mutation
case "ansible.builtin.add_host":
return e.moduleAddHost(args)
// SSH keys
case "ansible.posix.authorized_key", "ansible.builtin.authorized_key":
return moduleAuthorizedKeyWithClient(e, mock, args)

View file

@ -6,7 +6,6 @@ import (
"io/fs"
"strconv"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
)
@ -26,6 +25,15 @@ func (e *Executor) executeModule(ctx context.Context, host string, client *SSHCl
defer client.SetBecome(oldBecome, oldUser, oldPass)
}
e.vars["inventory_hostname"] = host
// Apply play/task environment overrides for the duration of the module run.
if environment := e.taskEnvironment(host, play, task); len(environment) > 0 {
oldEnvironment := client.Environment()
client.SetEnvironment(environment)
defer client.SetEnvironment(oldEnvironment)
}
// Template the args
args := e.templateArgs(task.Args, host, task)
@ -115,6 +123,8 @@ func (e *Executor) executeModule(ctx context.Context, host string, client *SSHCl
return e.moduleBlockinfile(ctx, client, args)
case "ansible.builtin.include_vars":
return e.moduleIncludeVars(args)
case "ansible.builtin.add_host":
return e.moduleAddHost(args)
case "ansible.builtin.meta":
return e.moduleMeta(args)
case "ansible.builtin.setup":
@ -139,6 +149,31 @@ func (e *Executor) executeModule(ctx context.Context, host string, client *SSHCl
}
}
func (e *Executor) taskEnvironment(host string, play *Play, task *Task) map[string]string {
var hasEnvironment bool
environment := make(map[string]string)
if play != nil {
for key, value := range play.Environment {
environment[key] = e.templateString(value, host, task)
hasEnvironment = true
}
}
if task != nil {
for key, value := range task.Environment {
environment[key] = e.templateString(value, host, task)
hasEnvironment = true
}
}
if !hasEnvironment {
return nil
}
return environment
}
// templateArgs templates all string values in args.
func (e *Executor) templateArgs(args map[string]any, host string, task *Task) map[string]any {
// Set inventory_hostname for templating
@ -254,7 +289,7 @@ func (e *Executor) moduleScript(ctx context.Context, client *SSHClient, args map
}
// Read local script
data, err := coreio.Local.Read(script)
data, err := localFS.Read(script)
if err != nil {
return nil, coreerr.E("Executor.moduleScript", "read script", err)
}
@ -285,7 +320,7 @@ func (e *Executor) moduleCopy(ctx context.Context, client *SSHClient, args map[s
var err error
if src := getStringArg(args, "src", ""); src != "" {
content, err = coreio.Local.Read(src)
content, err = localFS.Read(src)
if err != nil {
return nil, coreerr.E("Executor.moduleCopy", "read src", err)
}
@ -509,11 +544,11 @@ func (e *Executor) moduleFetch(ctx context.Context, client *SSHClient, args map[
}
// Create dest directory
if err := coreio.Local.EnsureDir(pathDir(dest)); err != nil {
if err := localFS.EnsureDir(pathDir(dest)); err != nil {
return nil, err
}
if err := coreio.Local.Write(dest, string(content)); err != nil {
if err := localFS.Write(dest, string(content)); err != nil {
return nil, err
}
@ -1051,7 +1086,7 @@ func (e *Executor) moduleUnarchive(ctx context.Context, client *SSHClient, args
var cmd string
if !remote {
// Upload local file first
data, err := coreio.Local.Read(src)
data, err := localFS.Read(src)
if err != nil {
return nil, coreerr.E("Executor.moduleUnarchive", "read src", err)
}
@ -1112,6 +1147,34 @@ func getBoolArg(args map[string]any, key string, def bool) bool {
return def
}
func normalizeGroups(v any) []string {
switch val := v.(type) {
case string:
if val == "" {
return nil
}
parts := split(val, ",")
groups := make([]string, 0, len(parts))
for _, part := range parts {
if trimmed := corexTrimSpace(part); trimmed != "" {
groups = append(groups, trimmed)
}
}
return groups
case []string:
return val
case []any:
groups := make([]string, 0, len(val))
for _, item := range val {
if s, ok := item.(string); ok && corexTrimSpace(s) != "" {
groups = append(groups, corexTrimSpace(s))
}
}
return groups
}
return nil
}
// --- Additional Modules ---
func (e *Executor) moduleHostname(ctx context.Context, client *SSHClient, args map[string]any) (*TaskResult, error) {
@ -1271,6 +1334,83 @@ func (e *Executor) moduleIncludeVars(args map[string]any) (*TaskResult, error) {
return &TaskResult{Changed: false}, nil
}
func (e *Executor) moduleAddHost(args map[string]any) (*TaskResult, error) {
name := getStringArg(args, "name", "")
if name == "" {
name = getStringArg(args, "host", "")
}
if name == "" {
return nil, coreerr.E("Executor.moduleAddHost", "name required", nil)
}
if e.inventory == nil {
e.inventory = &Inventory{}
}
if e.inventory.All == nil {
e.inventory.All = &InventoryGroup{}
}
if e.inventory.All.Hosts == nil {
e.inventory.All.Hosts = make(map[string]*Host)
}
if e.inventory.All.Children == nil {
e.inventory.All.Children = make(map[string]*InventoryGroup)
}
host := e.inventory.All.Hosts[name]
if host == nil {
host = &Host{}
e.inventory.All.Hosts[name] = host
}
if v, ok := args["ansible_host"].(string); ok && v != "" {
host.AnsibleHost = v
}
if v, ok := args["ansible_port"].(int); ok && v > 0 {
host.AnsiblePort = v
}
if v, ok := args["ansible_user"].(string); ok && v != "" {
host.AnsibleUser = v
}
if v, ok := args["ansible_password"].(string); ok && v != "" {
host.AnsiblePassword = v
}
if v, ok := args["ansible_ssh_private_key_file"].(string); ok && v != "" {
host.AnsibleSSHPrivateKeyFile = v
}
if v, ok := args["ansible_connection"].(string); ok && v != "" {
host.AnsibleConnection = v
}
if v, ok := args["ansible_become_password"].(string); ok && v != "" {
host.AnsibleBecomePassword = v
}
if host.Vars == nil {
host.Vars = make(map[string]any)
}
if inlineVars, ok := args["vars"].(map[string]any); ok {
for k, v := range inlineVars {
host.Vars[k] = v
}
}
for _, groupName := range normalizeGroups(args["groups"]) {
if groupName == "" {
continue
}
group := e.inventory.All.Children[groupName]
if group == nil {
group = &InventoryGroup{}
e.inventory.All.Children[groupName] = group
}
if group.Hosts == nil {
group.Hosts = make(map[string]*Host)
}
group.Hosts[name] = host
}
return &TaskResult{Changed: true, Msg: "host added: " + name}, nil
}
func (e *Executor) moduleMeta(args map[string]any) (*TaskResult, error) {
// meta module controls play execution
// Most actions are no-ops for us

View file

@ -1016,6 +1016,54 @@ func TestModulesAdv_ModuleDockerCompose_Good_DefaultStateIsPresent(t *testing.T)
assert.True(t, mock.hasExecuted(`docker compose up -d`))
}
// --- add_host module ---
func TestModulesAdv_ModuleAddHost_Good_AddsHostAndGroups(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{},
Children: map[string]*InventoryGroup{},
Vars: map[string]any{},
},
})
result, err := e.moduleAddHost(map[string]any{
"name": "web2",
"ansible_host": "10.0.0.2",
"ansible_user": "deploy",
"ansible_connection": "ssh",
"ansible_become_password": "sudo-secret",
"groups": []any{"webservers", "frontend"},
"vars": map[string]any{"role": "app"},
"ansible_ssh_private_key_file": "~/.ssh/id_ed25519",
"ansible_password": "ssh-secret",
})
require.NoError(t, err)
assert.True(t, result.Changed)
assert.Equal(t, "host added: web2", result.Msg)
host := e.inventory.All.Hosts["web2"]
require.NotNil(t, host)
assert.Equal(t, "10.0.0.2", host.AnsibleHost)
assert.Equal(t, "deploy", host.AnsibleUser)
assert.Equal(t, "ssh", host.AnsibleConnection)
assert.Equal(t, "sudo-secret", host.AnsibleBecomePassword)
assert.Equal(t, "~/.ssh/id_ed25519", host.AnsibleSSHPrivateKeyFile)
assert.Equal(t, "ssh-secret", host.AnsiblePassword)
assert.Equal(t, "app", host.Vars["role"])
require.NotNil(t, e.inventory.All.Children["webservers"])
require.NotNil(t, e.inventory.All.Children["frontend"])
assert.Contains(t, e.inventory.All.Children["webservers"].Hosts, "web2")
assert.Contains(t, e.inventory.All.Children["frontend"].Hosts, "web2")
assert.Equal(t, host, e.inventory.All.Children["webservers"].Hosts["web2"])
assert.Equal(t, host, e.inventory.All.Children["frontend"].Hosts["web2"])
assert.Equal(t, []string{"web2"}, GetHosts(e.inventory, "webservers"))
assert.Equal(t, "app", GetHostVars(e.inventory, "web2")["role"])
}
// --- Cross-module dispatch tests for advanced modules ---
func TestModulesAdv_ExecuteModuleWithMock_Good_DispatchUser(t *testing.T) {

View file

@ -5,7 +5,6 @@ import (
"maps"
"slices"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
"gopkg.in/yaml.v3"
)
@ -38,7 +37,7 @@ func NewParser(basePath string) *Parser {
//
// plays, err := parser.ParsePlaybook("/workspace/playbooks/site.yml")
func (p *Parser) ParsePlaybook(path string) ([]Play, error) {
data, err := coreio.Local.Read(path)
data, err := localFS.Read(path)
if err != nil {
return nil, coreerr.E("Parser.ParsePlaybook", "read playbook", err)
}
@ -83,7 +82,7 @@ func (p *Parser) ParsePlaybookIter(path string) (iter.Seq[Play], error) {
//
// inv, err := parser.ParseInventory("/workspace/inventory.yml")
func (p *Parser) ParseInventory(path string) (*Inventory, error) {
data, err := coreio.Local.Read(path)
data, err := localFS.Read(path)
if err != nil {
return nil, coreerr.E("Parser.ParseInventory", "read inventory", err)
}
@ -102,7 +101,7 @@ func (p *Parser) ParseInventory(path string) (*Inventory, error) {
//
// tasks, err := parser.ParseTasks("/workspace/roles/web/tasks/main.yml")
func (p *Parser) ParseTasks(path string) ([]Task, error) {
data, err := coreio.Local.Read(path)
data, err := localFS.Read(path)
if err != nil {
return nil, coreerr.E("Parser.ParseTasks", "read tasks", err)
}
@ -168,7 +167,7 @@ func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
for _, sp := range searchPaths {
// Clean the path to resolve .. segments
sp = cleanPath(sp)
if coreio.Local.Exists(sp) {
if localFS.Exists(sp) {
tasksPath = sp
break
}
@ -180,7 +179,7 @@ func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
// Load role defaults
defaultsPath := joinPath(pathDir(pathDir(tasksPath)), "defaults", "main.yml")
if data, err := coreio.Local.Read(defaultsPath); err == nil {
if data, err := localFS.Read(defaultsPath); err == nil {
var defaults map[string]any
if yaml.Unmarshal([]byte(data), &defaults) == nil {
for k, v := range defaults {
@ -193,7 +192,7 @@ func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
// Load role vars
varsPath := joinPath(pathDir(pathDir(tasksPath)), "vars", "main.yml")
if data, err := coreio.Local.Read(varsPath); err == nil {
if data, err := localFS.Read(varsPath); err == nil {
var roleVars map[string]any
if yaml.Unmarshal([]byte(data), &roleVars) == nil {
for k, v := range roleVars {
@ -352,7 +351,7 @@ func isModule(key string) bool {
return contains(key, ".")
}
// NormalizeModule normalizes a module name to its canonical form.
// NormalizeModule normalises a module name to its canonical form.
//
// Example:
//
@ -416,11 +415,11 @@ func getAllHosts(group *InventoryGroup) []string {
}
var hosts []string
for name := range group.Hosts {
for _, name := range slices.Sorted(maps.Keys(group.Hosts)) {
hosts = append(hosts, name)
}
for _, child := range group.Children {
hosts = append(hosts, getAllHosts(child)...)
for _, name := range slices.Sorted(maps.Keys(group.Children)) {
hosts = append(hosts, getAllHosts(group.Children[name])...)
}
return hosts
}
@ -541,6 +540,9 @@ func collectHostVars(group *InventoryGroup, hostname string, vars map[string]any
if host.AnsibleConnection != "" {
vars["ansible_connection"] = host.AnsibleConnection
}
if host.AnsibleBecomePassword != "" {
vars["ansible_become_password"] = host.AnsibleBecomePassword
}
for k, v := range host.Vars {
vars[k] = v
}

View file

@ -673,9 +673,12 @@ func TestParser_GetHostVars_Good_DirectHost(t *testing.T) {
Vars: map[string]any{"global_var": "global"},
Hosts: map[string]*Host{
"myhost": {
AnsibleHost: "10.0.0.1",
AnsiblePort: 2222,
AnsibleUser: "deploy",
AnsibleHost: "10.0.0.1",
AnsiblePort: 2222,
AnsibleUser: "deploy",
AnsibleBecomePassword: "sudo-secret",
AnsibleSSHPrivateKeyFile: "/tmp/id_rsa",
AnsibleConnection: "ssh",
},
},
},
@ -685,6 +688,9 @@ func TestParser_GetHostVars_Good_DirectHost(t *testing.T) {
assert.Equal(t, "10.0.0.1", vars["ansible_host"])
assert.Equal(t, 2222, vars["ansible_port"])
assert.Equal(t, "deploy", vars["ansible_user"])
assert.Equal(t, "sudo-secret", vars["ansible_become_password"])
assert.Equal(t, "/tmp/id_rsa", vars["ansible_ssh_private_key_file"])
assert.Equal(t, "ssh", vars["ansible_connection"])
assert.Equal(t, "global", vars["global_var"])
}

364
specs/RFC.md Normal file
View file

@ -0,0 +1,364 @@
# ansible
**Import:** `dappco.re/go/core/ansible`
**Files:** 6
## Types
This package exports structs only. It has no exported interfaces or type aliases.
### Playbook
`type Playbook struct`
Top-level playbook wrapper for YAML documents that decode to an inline list of plays.
Fields:
- `Plays []Play`: Inlined play definitions in declaration order.
### Play
`type Play struct`
One play in a playbook, including host targeting, privilege settings, vars, task lists, roles, handlers, and run controls.
Fields:
- `Name string`: Human-readable play name used in output and callbacks.
- `Hosts string`: Inventory pattern resolved before the play runs.
- `Connection string`: Optional connection type; `"local"` skips SSH fact gathering.
- `Become bool`: Enables privilege escalation for tasks in the play.
- `BecomeUser string`: Override user for privilege escalation.
- `GatherFacts *bool`: Optional fact-gathering switch; `nil` means facts are gathered.
- `Vars map[string]any`: Play-scoped variables merged into parser and executor state.
- `PreTasks []Task`: Tasks run before roles and main tasks.
- `Tasks []Task`: Main task list.
- `PostTasks []Task`: Tasks run after the main task list.
- `Roles []RoleRef`: Role references executed between `PreTasks` and `Tasks`.
- `Handlers []Task`: Handler tasks that may run after normal tasks when notified.
- `Tags []string`: Tags attached to the play.
- `Environment map[string]string`: Environment variables attached to the play.
- `Serial any`: Serial batch setting; the YAML accepts either an `int` or a `string`.
- `MaxFailPercent int`: Maximum tolerated failure percentage for the play.
### RoleRef
`type RoleRef struct`
Role entry used in `Play.Roles`. The YAML may be either a scalar role name or a mapping.
Fields:
- `Role string`: Canonical role name used by the executor.
- `Name string`: Alternate YAML field that is normalised into `Role` during unmarshalling.
- `TasksFrom string`: Task file loaded from the role's `tasks/` directory.
- `Vars map[string]any`: Variables merged while the role runs.
- `When any`: Optional condition evaluated before the role runs.
- `Tags []string`: Tags declared on the role reference.
### Task
`type Task struct`
Single Ansible task, including the selected module, module args, flow-control settings, includes, blocks, notifications, and privilege settings.
Fields:
- `Name string`: Optional task name.
- `Module string`: Module name extracted from the YAML key rather than a fixed field.
- `Args map[string]any`: Module arguments extracted from the YAML value.
- `Register string`: Variable name used to store the task result.
- `When any`: Conditional expression or expression list.
- `Loop any`: Loop source, typically a slice or templated string.
- `LoopControl *LoopControl`: Optional loop metadata such as custom variable names.
- `Vars map[string]any`: Task-local variables.
- `Environment map[string]string`: Task-local environment overrides.
- `ChangedWhen any`: Override for changed-state evaluation.
- `FailedWhen any`: Override for failure evaluation.
- `IgnoreErrors bool`: Continue after task failure when true.
- `NoLog bool`: Marker for suppressing logging.
- `Become *bool`: Per-task privilege-escalation override.
- `BecomeUser string`: Per-task privilege-escalation user.
- `Delegate string`: `delegate_to` target host.
- `RunOnce bool`: Runs the task once rather than on every host.
- `Tags []string`: Tags attached to the task.
- `Block []Task`: Main block tasks for `block` syntax.
- `Rescue []Task`: Rescue tasks run after a block failure.
- `Always []Task`: Tasks that always run after a block.
- `Notify any`: Handler notification target, either a string or a list.
- `Retries int`: Retry count for `until` loops.
- `Delay int`: Delay between retries.
- `Until string`: Condition checked for retry loops.
- `IncludeTasks string`: Path used by `include_tasks`.
- `ImportTasks string`: Path used by `import_tasks`.
- `IncludeRole *struct{...}`: Role inclusion payload with `Name`, optional `TasksFrom`, and optional `Vars`.
- `ImportRole *struct{...}`: Role import payload with `Name`, optional `TasksFrom`, and optional `Vars`.
### LoopControl
`type LoopControl struct`
Loop metadata attached to a task.
Fields:
- `LoopVar string`: Name assigned to the current loop item.
- `IndexVar string`: Name assigned to the current loop index.
- `Label string`: Display label for loop items.
- `Pause int`: Pause between loop iterations.
- `Extended bool`: Enables extended loop metadata.
### TaskResult
`type TaskResult struct`
Normalised execution result returned by module handlers and stored in registered variables.
Fields:
- `Changed bool`: Whether the task changed remote state.
- `Failed bool`: Whether the task failed.
- `Skipped bool`: Whether the task was skipped.
- `Msg string`: Summary message.
- `Stdout string`: Standard output captured from command-based modules.
- `Stderr string`: Standard error captured from command-based modules.
- `RC int`: Command exit status when applicable.
- `Results []TaskResult`: Per-item loop results.
- `Data map[string]any`: Module-specific result payload.
- `Duration time.Duration`: Execution duration recorded by the executor.
### Inventory
`type Inventory struct`
Root inventory object.
Fields:
- `All *InventoryGroup`: Root inventory group decoded from the `all` key.
### InventoryGroup
`type InventoryGroup struct`
Inventory group containing hosts, child groups, and inherited variables.
Fields:
- `Hosts map[string]*Host`: Hosts defined directly in the group.
- `Children map[string]*InventoryGroup`: Nested child groups.
- `Vars map[string]any`: Variables inherited by descendant hosts unless overridden.
### Host
`type Host struct`
Per-host inventory entry with Ansible connection settings and inline custom vars.
Fields:
- `AnsibleHost string`: Remote address or hostname to connect to.
- `AnsiblePort int`: SSH port.
- `AnsibleUser string`: SSH user.
- `AnsiblePassword string`: SSH password.
- `AnsibleSSHPrivateKeyFile string`: Private key path for SSH authentication.
- `AnsibleConnection string`: Connection transport hint.
- `AnsibleBecomePassword string`: Password used for privilege escalation.
- `Vars map[string]any`: Additional host variables stored inline in YAML.
### Facts
`type Facts struct`
Subset of gathered host facts stored by the executor.
Fields:
- `Hostname string`: Short hostname.
- `FQDN string`: Fully qualified domain name.
- `OS string`: OS family.
- `Distribution string`: Distribution identifier.
- `Version string`: Distribution version.
- `Architecture string`: Machine architecture.
- `Kernel string`: Kernel release.
- `Memory int64`: Total memory in MiB.
- `CPUs int`: Virtual CPU count.
- `IPv4 string`: Default IPv4 address.
### Parser
`type Parser struct`
Stateful YAML parser for playbooks, inventories, task files, and roles. Its internal path and variable cache are unexported.
### Executor
`type Executor struct`
Playbook execution engine that combines parser state, inventory, vars, gathered facts, registered results, handler notifications, and SSH client reuse.
Fields:
- `OnPlayStart func(play *Play)`: Optional callback fired before a play starts.
- `OnTaskStart func(host string, task *Task)`: Optional callback fired before a task runs on a host.
- `OnTaskEnd func(host string, task *Task, result *TaskResult)`: Optional callback fired after a task result is produced.
- `OnPlayEnd func(play *Play)`: Optional callback fired after a play finishes.
- `Limit string`: Additional host filter applied after normal play host resolution.
- `Tags []string`: Inclusive tag filter for task execution.
- `SkipTags []string`: Exclusive tag filter that always skips matching tasks.
- `CheckMode bool`: Public execution flag exposed for callers and CLI wiring.
- `Diff bool`: Public execution flag exposed for callers and CLI wiring.
- `Verbose int`: Verbosity level used by executor logging and CLI callbacks.
### SSHClient
`type SSHClient struct`
Lazy SSH client that owns connection, authentication, privilege-escalation, and timeout state. All fields are unexported.
### SSHConfig
`type SSHConfig struct`
Configuration used to construct an `SSHClient`.
Fields:
- `Host string`: Target host.
- `Port int`: Target SSH port; defaults to `22`.
- `User string`: SSH user; defaults to `"root"`.
- `Password string`: SSH password.
- `KeyFile string`: Private key path.
- `Become bool`: Enables privilege escalation on the client.
- `BecomeUser string`: User used for privilege escalation.
- `BecomePass string`: Password used for privilege escalation.
- `Timeout time.Duration`: Connection timeout; defaults to `30 * time.Second`.
## Functions
### NewParser
`func NewParser(basePath string) *Parser`
Constructs a parser rooted at `basePath` and initialises its internal variable map. `basePath` is later used to resolve role search paths.
### (*Parser).ParsePlaybook
`func (p *Parser) ParsePlaybook(path string) ([]Play, error)`
Reads a playbook YAML file, unmarshals it into `[]Play`, and post-processes every `PreTasks`, `Tasks`, `PostTasks`, and handler entry to extract `Task.Module` and `Task.Args`.
### (*Parser).ParsePlaybookIter
`func (p *Parser) ParsePlaybookIter(path string) (iter.Seq[Play], error)`
Wrapper around `ParsePlaybook` that yields parsed plays through an `iter.Seq`.
### (*Parser).ParseInventory
`func (p *Parser) ParseInventory(path string) (*Inventory, error)`
Reads an inventory YAML file and unmarshals it into the public `Inventory` model.
### (*Parser).ParseTasks
`func (p *Parser) ParseTasks(path string) ([]Task, error)`
Reads a task file, unmarshals it into `[]Task`, and extracts module names and args for every task entry.
### (*Parser).ParseTasksIter
`func (p *Parser) ParseTasksIter(path string) (iter.Seq[Task], error)`
Wrapper around `ParseTasks` that yields parsed tasks through an `iter.Seq`.
### (*Parser).ParseRole
`func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error)`
Resolves `roles/<name>/tasks/<tasksFrom>` across several search patterns rooted around `basePath`, defaults `tasksFrom` to `main.yml`, merges role defaults without overwriting existing parser vars, merges role vars with overwrite semantics, and then parses the resolved task file.
### (*RoleRef).UnmarshalYAML
`func (r *RoleRef) UnmarshalYAML(unmarshal func(any) error) error`
Accepts either a scalar role name or a structured role mapping. When the mapping only sets `Name`, the method copies it into `Role`.
### (*Task).UnmarshalYAML
`func (t *Task) UnmarshalYAML(node *yaml.Node) error`
Decodes the standard task fields, scans the remaining YAML keys for the first recognised module name, stores free-form arguments in `Args["_raw_params"]`, accepts module mappings and nil-valued modules, and maps `with_items` into `Loop` when `Loop` is unset.
### NormalizeModule
`func NormalizeModule(name string) string`
Returns `ansible.builtin.<name>` for short module names and leaves dotted names unchanged.
### GetHosts
`func GetHosts(inv *Inventory, pattern string) []string`
Resolves hosts from a non-nil inventory by handling `all`, `localhost`, group names, and explicit host names. Patterns containing `:` are recognised as future work and currently return `nil`.
### GetHostsIter
`func GetHostsIter(inv *Inventory, pattern string) iter.Seq[string]`
Iterator wrapper around `GetHosts`.
### AllHostsIter
`func AllHostsIter(group *InventoryGroup) iter.Seq[string]`
Yields every host reachable from a group tree in deterministic order by sorting host keys and child-group keys at each level.
### GetHostVars
`func GetHostVars(inv *Inventory, hostname string) map[string]any`
Builds the effective variable map for `hostname` by walking the group tree, applying direct-group vars, host connection settings, inline host vars, and then parent-group vars for keys not already set by a nearer scope.
### NewExecutor
`func NewExecutor(basePath string) *Executor`
Constructs an executor with a parser rooted at `basePath` and fresh maps for vars, facts, registered results, handlers, notifications, and SSH clients.
### (*Executor).SetInventory
`func (e *Executor) SetInventory(path string) error`
Parses an inventory file through the embedded parser and stores the resulting `Inventory` on the executor.
### (*Executor).SetInventoryDirect
`func (e *Executor) SetInventoryDirect(inv *Inventory)`
Stores a caller-supplied inventory pointer on the executor without parsing.
### (*Executor).SetVar
`func (e *Executor) SetVar(key string, value any)`
Stores an executor-scoped variable under a write lock.
### (*Executor).Run
`func (e *Executor) Run(ctx context.Context, playbookPath string) error`
Parses the playbook at `playbookPath` and runs plays sequentially. Each play resolves hosts, merges play vars, gathers facts by default, runs `PreTasks`, roles, `Tasks`, `PostTasks`, and finally any handlers that were notified during the play.
### (*Executor).Close
`func (e *Executor) Close()`
Closes every cached `SSHClient` and replaces the client cache with a fresh empty map.
### (*Executor).TemplateFile
`func (e *Executor) TemplateFile(src, host string, task *Task) (string, error)`
Reads a template file, performs a basic Jinja2-to-Go-template token conversion, and executes it against a context built from executor vars, host vars, and gathered facts. If parsing or execution fails, it falls back to the executor's simpler string-templating path.
### NewSSHClient
`func NewSSHClient(cfg SSHConfig) (*SSHClient, error)`
Applies defaults for `Port`, `User`, and `Timeout`, then constructs an `SSHClient` from `cfg`.
### (*SSHClient).Connect
`func (c *SSHClient) Connect(ctx context.Context) error`
Lazily establishes the SSH connection. Authentication is attempted in this order: explicit key file, default keys from `~/.ssh`, then password-based auth. The method also ensures `known_hosts` exists and uses it for host-key verification.
### (*SSHClient).Close
`func (c *SSHClient) Close() error`
Closes the active SSH connection, if any, and clears the cached client pointer.
### (*SSHClient).Run
`func (c *SSHClient) Run(ctx context.Context, cmd string) (stdout, stderr string, exitCode int, err error)`
Runs a command on the remote host, opening a new SSH session after calling `Connect`. When privilege escalation is enabled, the command is wrapped with `sudo`, using either the become password, the SSH password, or passwordless `sudo -n`. The method returns stdout, stderr, an exit code, and honours context cancellation by signalling the session.
### (*SSHClient).RunScript
`func (c *SSHClient) RunScript(ctx context.Context, script string) (stdout, stderr string, exitCode int, err error)`
Wraps `script` in a heredoc passed to `bash` and delegates execution to `Run`.
### (*SSHClient).Upload
`func (c *SSHClient) Upload(ctx context.Context, local io.Reader, remote string, mode fs.FileMode) error`
Reads all content from `local`, creates the remote parent directory, writes the file via `cat >`, applies the requested mode with `chmod`, and handles both normal and `sudo`-mediated uploads.
### (*SSHClient).Download
`func (c *SSHClient) Download(ctx context.Context, remote string) ([]byte, error)`
Downloads a remote file by running `cat` and returning the captured bytes. A non-zero remote exit status is reported as an error.
### (*SSHClient).FileExists
`func (c *SSHClient) FileExists(ctx context.Context, path string) (bool, error)`
Checks remote path existence with `test -e`.
### (*SSHClient).Stat
`func (c *SSHClient) Stat(ctx context.Context, path string) (map[string]any, error)`
Returns a minimal stat map parsed from remote shell output. The current implementation reports boolean `exists` and `isdir` keys.
### (*SSHClient).SetBecome
`func (c *SSHClient) SetBecome(become bool, user, password string)`
Updates the client's privilege-escalation flag and replaces the stored become user and password only when non-empty override values are supplied.

114
ssh.go
View file

@ -5,11 +5,12 @@ import (
"context"
"io"
"io/fs"
"maps"
"net"
"slices"
"sync"
"time"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
@ -21,17 +22,18 @@ import (
//
// client, _ := NewSSHClient(SSHConfig{Host: "web1"})
type SSHClient struct {
host string
port int
user string
password string
keyFile string
client *ssh.Client
mu sync.Mutex
become bool
becomeUser string
becomePass string
timeout time.Duration
host string
port int
user string
password string
keyFile string
client *ssh.Client
mu sync.Mutex
become bool
becomeUser string
becomePass string
environment map[string]string
timeout time.Duration
}
// SSHConfig holds SSH connection configuration.
@ -68,20 +70,53 @@ func NewSSHClient(cfg SSHConfig) (*SSHClient, error) {
}
client := &SSHClient{
host: cfg.Host,
port: cfg.Port,
user: cfg.User,
password: cfg.Password,
keyFile: cfg.KeyFile,
become: cfg.Become,
becomeUser: cfg.BecomeUser,
becomePass: cfg.BecomePass,
timeout: cfg.Timeout,
host: cfg.Host,
port: cfg.Port,
user: cfg.User,
password: cfg.Password,
keyFile: cfg.KeyFile,
become: cfg.Become,
becomeUser: cfg.BecomeUser,
becomePass: cfg.BecomePass,
timeout: cfg.Timeout,
environment: make(map[string]string),
}
return client, nil
}
// Environment returns a copy of the current remote environment overrides.
func (c *SSHClient) Environment() map[string]string {
c.mu.Lock()
defer c.mu.Unlock()
if len(c.environment) == 0 {
return nil
}
env := make(map[string]string, len(c.environment))
for k, v := range c.environment {
env[k] = v
}
return env
}
// SetEnvironment replaces the current remote environment overrides.
func (c *SSHClient) SetEnvironment(environment map[string]string) {
c.mu.Lock()
defer c.mu.Unlock()
if len(environment) == 0 {
c.environment = make(map[string]string)
return
}
c.environment = make(map[string]string, len(environment))
for k, v := range environment {
c.environment[k] = v
}
}
// Connect establishes the SSH connection.
//
// Example:
@ -104,7 +139,7 @@ func (c *SSHClient) Connect(ctx context.Context) error {
keyPath = joinPath(env("DIR_HOME"), keyPath[1:])
}
if key, err := coreio.Local.Read(keyPath); err == nil {
if key, err := localFS.Read(keyPath); err == nil {
if signer, err := ssh.ParsePrivateKey([]byte(key)); err == nil {
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
@ -119,7 +154,7 @@ func (c *SSHClient) Connect(ctx context.Context) error {
joinPath(home, ".ssh", "id_rsa"),
}
for _, keyPath := range defaultKeys {
if key, err := coreio.Local.Read(keyPath); err == nil {
if key, err := localFS.Read(keyPath); err == nil {
if signer, err := ssh.ParsePrivateKey([]byte(key)); err == nil {
authMethods = append(authMethods, ssh.PublicKeys(signer))
break
@ -154,11 +189,11 @@ func (c *SSHClient) Connect(ctx context.Context) error {
knownHostsPath := joinPath(home, ".ssh", "known_hosts")
// Ensure known_hosts file exists
if !coreio.Local.Exists(knownHostsPath) {
if err := coreio.Local.EnsureDir(pathDir(knownHostsPath)); err != nil {
if !localFS.Exists(knownHostsPath) {
if err := localFS.EnsureDir(pathDir(knownHostsPath)); err != nil {
return coreerr.E("ssh.Connect", "failed to create .ssh dir", err)
}
if err := coreio.Local.Write(knownHostsPath, ""); err != nil {
if err := localFS.Write(knownHostsPath, ""); err != nil {
return coreerr.E("ssh.Connect", "failed to create known_hosts file", err)
}
}
@ -222,6 +257,8 @@ func (c *SSHClient) Run(ctx context.Context, cmd string) (stdout, stderr string,
return "", "", -1, err
}
cmd = c.commandWithEnvironment(cmd)
session, err := c.client.NewSession()
if err != nil {
return "", "", -1, coreerr.E("ssh.Run", "new session", err)
@ -494,3 +531,28 @@ func (c *SSHClient) SetBecome(become bool, user, password string) {
c.becomePass = password
}
}
func (c *SSHClient) commandWithEnvironment(cmd string) string {
c.mu.Lock()
defer c.mu.Unlock()
if len(c.environment) == 0 {
return cmd
}
keys := slices.Sorted(maps.Keys(c.environment))
buf := newBuilder()
for _, key := range keys {
buf.WriteString("export ")
buf.WriteString(key)
buf.WriteString("=")
buf.WriteString(shellQuote(c.environment[key]))
buf.WriteString("; ")
}
buf.WriteString(cmd)
return buf.String()
}
func shellQuote(value string) string {
return "'" + replaceAll(value, "'", `'\''`) + "'"
}

View file

@ -34,3 +34,15 @@ func TestSSH_NewSSHClient_Good_Defaults(t *testing.T) {
assert.Equal(t, "root", client.user)
assert.Equal(t, 30*time.Second, client.timeout)
}
func TestSSH_SSHClient_Good_CommandWithEnvironment(t *testing.T) {
client := &SSHClient{}
client.SetEnvironment(map[string]string{
"BETA": "two words",
"ALPHA": "O'Reilly",
})
cmd := client.commandWithEnvironment("echo done")
assert.Equal(t, "export ALPHA='O'\\''Reilly'; export BETA='two words'; echo done", cmd)
}

View file

@ -2,12 +2,10 @@ package ansible
import (
"io/fs"
coreio "dappco.re/go/core/io"
)
func readTestFile(path string) ([]byte, error) {
content, err := coreio.Local.Read(path)
content, err := localFS.Read(path)
if err != nil {
return nil, err
}
@ -15,7 +13,7 @@ func readTestFile(path string) ([]byte, error) {
}
func writeTestFile(path string, content []byte, mode fs.FileMode) error {
return coreio.Local.WriteMode(path, string(content), mode)
return localFS.WriteMode(path, string(content), mode)
}
func joinStrings(parts []string, sep string) string {

View file

@ -128,7 +128,7 @@ type Task struct {
raw map[string]any
}
// LoopControl controls loop behavior.
// LoopControl controls loop behaviour.
//
// Example:
//

View file

@ -236,6 +236,41 @@ include_role:
assert.Equal(t, "setup.yml", task.IncludeRole.TasksFrom)
}
func TestTypes_Task_UnmarshalYAML_Good_Environment(t *testing.T) {
input := `
name: Env task
shell: echo hi
environment:
APP_ENV: production
`
var task Task
err := yaml.Unmarshal([]byte(input), &task)
require.NoError(t, err)
require.NotNil(t, task.Environment)
assert.Equal(t, "production", task.Environment["APP_ENV"])
}
func TestTypes_Play_UnmarshalYAML_Good_Environment(t *testing.T) {
input := `
---
- name: Env play
hosts: all
environment:
APP_ENV: production
tasks:
- debug:
msg: hello
`
var plays []Play
err := yaml.Unmarshal([]byte(input), &plays)
require.NoError(t, err)
require.Len(t, plays, 1)
require.NotNil(t, plays[0].Environment)
assert.Equal(t, "production", plays[0].Environment["APP_ENV"])
}
func TestTypes_Task_UnmarshalYAML_Good_BecomeFields(t *testing.T) {
input := `
name: Privileged task