[agent/codex:gpt-5.4-mini] Read ~/spec/code/core/go/ansible/RFC.md fully. Find ONE feat... #24

Merged
Virgil merged 17 commits from main into dev 2026-04-01 06:24:09 +00:00
17 changed files with 1076 additions and 89 deletions

View file

@ -16,8 +16,8 @@ go tool cover -func=/tmp/ansible.cover
No direct `stdlib`-to-`core.*` wrapper drift was found in the Go implementation. The remaining drift is stale migration residue around the `core.*` move:
- `go.mod:15`, `go.sum:7`, `go.sum:8`
Legacy `forge.lthn.ai/core/go-log` references still remain in the dependency graph.
- Resolved on the current branch:
Legacy `forge.lthn.ai/core/go-log` references were removed by replacing `dappco.re/go/core/io` usage with the filesystem primitives in `dappco.re/go/core`.
- `CLAUDE.md:37`, `docs/development.md:169`
Repository guidance still refers to `core/cli`, while the current command registration lives on the `dappco.re/go/core` API at `cmd/ansible/cmd.go:8`.
- `CLAUDE.md:66`, `docs/development.md:86`
@ -25,12 +25,8 @@ No direct `stdlib`-to-`core.*` wrapper drift was found in the Go implementation.
## UK English
- `executor.go:248`
Comment uses US spelling: `Initialize host results`.
- `parser.go:321`
Comment uses US spelling: `NormalizeModule normalizes a module name to its canonical form.`
- `types.go:110`
Comment uses US spelling: `LoopControl controls loop behavior.`
- Resolved on the current branch:
Comment spelling now uses `Initialise`, `normalises`, and `behaviour` in the affected code paths.
## Missing Tests

View file

@ -6,7 +6,6 @@ import (
"dappco.re/go/core"
"dappco.re/go/core/ansible"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
)
@ -35,7 +34,7 @@ func runAnsible(opts core.Options) core.Result {
playbookPath = absPath(playbookPath)
}
if !coreio.Local.Exists(playbookPath) {
if !localFS.Exists(playbookPath) {
return core.Result{Value: coreerr.E("runAnsible", sprintf("playbook not found: %s", playbookPath), nil)}
}
@ -47,6 +46,7 @@ func runAnsible(opts core.Options) core.Result {
// Set options
executor.Limit = opts.String("limit")
executor.CheckMode = opts.Bool("check")
executor.Diff = opts.Bool("diff")
executor.Verbose = opts.Int("verbose")
if tags := opts.String("tags"); tags != "" {
@ -72,14 +72,14 @@ func runAnsible(opts core.Options) core.Result {
invPath = absPath(invPath)
}
if !coreio.Local.Exists(invPath) {
if !localFS.Exists(invPath) {
return core.Result{Value: coreerr.E("runAnsible", sprintf("inventory not found: %s", invPath), nil)}
}
if coreio.Local.IsDir(invPath) {
if localFS.IsDir(invPath) {
for _, name := range []string{"inventory.yml", "hosts.yml", "inventory.yaml", "hosts.yaml"} {
p := joinPath(invPath, name)
if coreio.Local.Exists(p) {
if localFS.Exists(p) {
invPath = p
break
}

View file

@ -22,6 +22,7 @@ func Register(c *core.Core) {
core.Option{Key: "extra-vars", Value: ""},
core.Option{Key: "verbose", Value: 0},
core.Option{Key: "check", Value: false},
core.Option{Key: "diff", Value: false},
),
})

22
cmd/ansible/cmd_test.go Normal file
View file

@ -0,0 +1,22 @@
package anscmd
import (
"testing"
"dappco.re/go/core"
"github.com/stretchr/testify/require"
)
func TestRegister_AnsibleCommandExposesDiffFlag(t *testing.T) {
app := core.New()
Register(app)
result := app.Command("ansible")
require.True(t, result.OK)
cmd, ok := result.Value.(*core.Command)
require.True(t, ok)
require.True(t, cmd.Flags.Has("diff"))
require.False(t, cmd.Flags.Bool("diff"))
}

View file

@ -1,12 +1,56 @@
package anscmd
import (
"io/fs"
"unicode"
"unicode/utf8"
"dappco.re/go/core"
)
type localFileSystem struct {
fs *core.Fs
}
var localFS = localFileSystem{fs: (&core.Fs{}).NewUnrestricted()}
func (l localFileSystem) Exists(path string) bool {
return l.fs.Exists(path)
}
func (l localFileSystem) IsDir(path string) bool {
return l.fs.IsDir(path)
}
func (l localFileSystem) Write(path, content string) error {
result := l.fs.Write(path, content)
if !result.OK {
return resultError("write "+path, result)
}
return nil
}
func (l localFileSystem) WriteMode(path, content string, mode fs.FileMode) error {
result := l.fs.WriteMode(path, content, mode)
if !result.OK {
return resultError("write "+path, result)
}
return nil
}
func resultError(op string, result core.Result) error {
if result.OK {
return nil
}
if err, ok := result.Value.(error); ok {
return err
}
if result.Value == nil {
return core.E(op, "operation failed", nil)
}
return core.E(op, core.Sprint(result.Value), nil)
}
func absPath(path string) string {
if path == "" {
return core.Env("DIR_CWD")

15
cmd/ansible/specs/RFC.md Normal file
View file

@ -0,0 +1,15 @@
# anscmd
**Import:** `dappco.re/go/core/ansible/cmd/ansible`
**Files:** 3
## Types
This package has no exported structs, interfaces, or type aliases.
## Functions
### Register
`func Register(c *core.Core)`
Registers two CLI commands on `c`:
- `ansible`: Runs a playbook through `ansible.Executor`. The command exposes `inventory`, `limit`, `tags`, `skip-tags`, `extra-vars`, `verbose`, and `check` flags.
- `ansible/test`: Opens an SSH connection to a host, prints basic host facts, and exposes `user`, `password`, `key`, and `port` flags.

View file

@ -1,6 +1,7 @@
package ansible
import (
"io/fs"
"unicode"
"unicode/utf8"
@ -13,6 +14,66 @@ type stringBuffer interface {
String() string
}
type corexLocalFS struct {
fs *core.Fs
}
var localFS = corexLocalFS{fs: (&core.Fs{}).NewUnrestricted()}
func (l corexLocalFS) Read(path string) (string, error) {
result := l.fs.Read(path)
if !result.OK {
return "", corexResultError("read "+path, result)
}
content, _ := result.Value.(string)
return content, nil
}
func (l corexLocalFS) Write(path, content string) error {
result := l.fs.Write(path, content)
if !result.OK {
return corexResultError("write "+path, result)
}
return nil
}
func (l corexLocalFS) WriteMode(path, content string, mode fs.FileMode) error {
result := l.fs.WriteMode(path, content, mode)
if !result.OK {
return corexResultError("write "+path, result)
}
return nil
}
func (l corexLocalFS) EnsureDir(path string) error {
result := l.fs.EnsureDir(path)
if !result.OK {
return corexResultError("ensure dir "+path, result)
}
return nil
}
func (l corexLocalFS) Exists(path string) bool {
return l.fs.Exists(path)
}
func (l corexLocalFS) IsDir(path string) bool {
return l.fs.IsDir(path)
}
func corexResultError(op string, result core.Result) error {
if result.OK {
return nil
}
if err, ok := result.Value.(error); ok {
return err
}
if result.Value == nil {
return core.E(op, "operation failed", nil)
}
return core.E(op, core.Sprint(result.Value), nil)
}
func dirSep() string {
ds := core.Env("DS")
if ds == "" {

View file

@ -2,13 +2,14 @@ package ansible
import (
"context"
"errors"
"regexp"
"slices"
"strconv"
"sync"
"text/template"
"time"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
)
@ -130,6 +131,7 @@ func (e *Executor) runPlay(ctx context.Context, play *Play) error {
if len(hosts) == 0 {
return nil // No hosts matched
}
failedHosts := make(map[string]bool)
// Merge play vars
for k, v := range play.Vars {
@ -149,38 +151,41 @@ func (e *Executor) runPlay(ctx context.Context, play *Play) error {
}
}
// Execute pre_tasks
for _, task := range play.PreTasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
return err
// Execute hosts in serial batches when requested.
for _, batch := range e.hostBatches(hosts, play.Serial) {
// Execute pre_tasks
for _, task := range play.PreTasks {
if err := e.runTaskOnHosts(ctx, batch, &task, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Execute roles
for _, roleRef := range play.Roles {
if err := e.runRole(ctx, hosts, &roleRef, play); err != nil {
return err
// Execute roles
for _, roleRef := range play.Roles {
if err := e.runRole(ctx, batch, &roleRef, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Execute tasks
for _, task := range play.Tasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
return err
// Execute tasks
for _, task := range play.Tasks {
if err := e.runTaskOnHosts(ctx, batch, &task, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Execute post_tasks
for _, task := range play.PostTasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
return err
// Execute post_tasks
for _, task := range play.PostTasks {
if err := e.runTaskOnHosts(ctx, batch, &task, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
}
// Run notified handlers
for _, handler := range play.Handlers {
if e.notified[handler.Name] {
if err := e.runTaskOnHosts(ctx, hosts, &handler, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &handler, play, failedHosts, len(hosts), play.MaxFailPercent); err != nil && !isHostFailureError(err) {
return err
}
}
@ -189,8 +194,56 @@ func (e *Executor) runPlay(ctx context.Context, play *Play) error {
return nil
}
// hostBatches returns the host list split into serial batches.
func (e *Executor) hostBatches(hosts []string, serial any) [][]string {
batchSize := serialBatchSize(serial, len(hosts))
if batchSize >= len(hosts) {
return [][]string{hosts}
}
batches := make([][]string, 0, (len(hosts)+batchSize-1)/batchSize)
for start := 0; start < len(hosts); start += batchSize {
end := start + batchSize
if end > len(hosts) {
end = len(hosts)
}
batch := make([]string, end-start)
copy(batch, hosts[start:end])
batches = append(batches, batch)
}
return batches
}
// serialBatchSize normalises the play serial value to a usable batch size.
func serialBatchSize(serial any, hostCount int) int {
switch v := serial.(type) {
case int:
if v > 0 {
if v > hostCount {
return hostCount
}
return v
}
case int64:
if v > 0 {
if int(v) > hostCount {
return hostCount
}
return int(v)
}
case string:
if n, err := strconv.Atoi(corexTrimSpace(v)); err == nil && n > 0 {
if n > hostCount {
return hostCount
}
return n
}
}
return hostCount
}
// runRole executes a role on hosts.
func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef, play *Play) error {
func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
// Check when condition
if roleRef.When != nil {
if !e.evaluateWhen(roleRef.When, "", nil) {
@ -215,7 +268,12 @@ func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef
// Execute tasks
for _, task := range tasks {
if err := e.runTaskOnHosts(ctx, hosts, &task, play); err != nil {
task := task
task.Tags = append(append([]string{}, roleRef.Tags...), task.Tags...)
if err := e.runTaskOnHosts(ctx, hosts, &task, play, failedHosts, totalHosts, maxFailPercent); err != nil {
if isHostFailureError(err) {
continue
}
// Restore vars
e.vars = oldVars
return err
@ -228,36 +286,113 @@ func (e *Executor) runRole(ctx context.Context, hosts []string, roleRef *RoleRef
}
// runTaskOnHosts runs a task on all hosts.
func (e *Executor) runTaskOnHosts(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runTaskOnHosts(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
hosts = filterFailedHosts(hosts, failedHosts)
if len(hosts) == 0 {
return nil
}
// run_once executes the task only on the first host in the current batch.
if task.RunOnce && len(hosts) > 1 {
hosts = hosts[:1]
}
// Check tags
if !e.matchesTags(task.Tags) {
tags := append(append([]string{}, play.Tags...), task.Tags...)
if !e.matchesTags(tags) {
return nil
}
// Handle block tasks
if len(task.Block) > 0 {
return e.runBlock(ctx, hosts, task, play)
return e.runBlock(ctx, hosts, task, play, failedHosts, totalHosts, maxFailPercent)
}
// Handle include/import
if task.IncludeTasks != "" || task.ImportTasks != "" {
return e.runIncludeTasks(ctx, hosts, task, play)
return e.runIncludeTasks(ctx, hosts, task, play, failedHosts, totalHosts, maxFailPercent)
}
if task.IncludeRole != nil || task.ImportRole != nil {
return e.runIncludeRole(ctx, hosts, task, play)
return e.runIncludeRole(ctx, hosts, task, play, failedHosts, totalHosts, maxFailPercent)
}
var (
haveFailure bool
lastErr error
)
for _, host := range hosts {
if failedHosts[host] {
continue
}
if err := e.runTaskOnHost(ctx, host, task, play); err != nil {
if !task.IgnoreErrors {
return err
failedHosts[host] = true
taskErr := coreerr.E("Executor.runTaskOnHosts", sprintf("task failed on %s: %s", host, err.Error()), err)
if maxFailPercent > 0 && exceedsMaxFailPercent(len(failedHosts), totalHosts, maxFailPercent) {
return coreerr.E("Executor.runTaskOnHosts", sprintf("max fail percentage exceeded: %d%% failed hosts of %d", len(failedHosts), totalHosts), taskErr)
}
if maxFailPercent > 0 {
haveFailure = true
lastErr = taskErr
continue
}
return taskErr
}
}
}
if haveFailure {
return &hostFailureError{err: lastErr}
}
return nil
}
func filterFailedHosts(hosts []string, failedHosts map[string]bool) []string {
if len(hosts) == 0 || len(failedHosts) == 0 {
return hosts
}
filtered := make([]string, 0, len(hosts))
for _, host := range hosts {
if !failedHosts[host] {
filtered = append(filtered, host)
}
}
return filtered
}
func exceedsMaxFailPercent(failedHosts, totalHosts, maxFailPercent int) bool {
if maxFailPercent <= 0 || totalHosts <= 0 || failedHosts <= 0 {
return false
}
return failedHosts*100 > totalHosts*maxFailPercent
}
type hostFailureError struct {
err error
}
func (e *hostFailureError) Error() string {
if e == nil || e.err == nil {
return "host failure"
}
return e.err.Error()
}
func (e *hostFailureError) Unwrap() error {
if e == nil {
return nil
}
return e.err
}
func isHostFailureError(err error) bool {
var target *hostFailureError
return errors.As(err, &target)
}
// runTaskOnHost runs a task on a single host.
func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, play *Play) error {
start := time.Now()
@ -266,7 +401,7 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
e.OnTaskStart(host, task)
}
// Initialize host results
// Initialise host results
if e.results[host] == nil {
e.results[host] = make(map[string]*TaskResult)
}
@ -296,8 +431,8 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
return e.runLoop(ctx, host, client, task, play)
}
// Execute the task
result, err := e.executeModule(ctx, host, client, task, play)
// Execute the task, retrying when an until condition is present.
result, err := e.runTaskWithUntil(ctx, host, client, task, play)
if err != nil {
result = &TaskResult{Failed: true, Msg: err.Error()}
}
@ -324,6 +459,139 @@ func (e *Executor) runTaskOnHost(ctx context.Context, host string, task *Task, p
return nil
}
// runTaskWithUntil executes a task once or retries it until the until
// condition evaluates to true.
func (e *Executor) runTaskWithUntil(ctx context.Context, host string, client *SSHClient, task *Task, play *Play) (*TaskResult, error) {
if task.Until == "" {
return e.executeModule(ctx, host, client, task, play)
}
retries := task.Retries
if retries <= 0 {
retries = 3
}
delay := task.Delay
if delay <= 0 {
delay = 1
}
restoreAlias := task.Register != "result"
var (
previousAlias *TaskResult
hadAlias bool
)
if restoreAlias {
e.mu.RLock()
if hostResults, ok := e.results[host]; ok {
previousAlias, hadAlias = hostResults["result"]
}
e.mu.RUnlock()
defer func() {
e.mu.Lock()
defer e.mu.Unlock()
if e.results[host] == nil {
e.results[host] = make(map[string]*TaskResult)
}
if hadAlias {
e.results[host]["result"] = previousAlias
} else {
delete(e.results[host], "result")
}
}()
}
lastResult, lastErr := retryTask(ctx, retries, delay, func() (*TaskResult, error) {
result, err := e.executeModule(ctx, host, client, task, play)
if err != nil {
result = &TaskResult{Failed: true, Msg: err.Error()}
}
if result == nil {
result = &TaskResult{}
}
e.setTempResult(host, task.Register, result)
return result, nil
}, func(result *TaskResult) bool {
return e.evaluateWhen(task.Until, host, task)
})
if lastErr != nil {
return lastResult, lastErr
}
if lastResult == nil {
lastResult = &TaskResult{}
}
lastResult.Failed = true
if lastResult.Msg == "" {
lastResult.Msg = sprintf("until condition not met: %s", task.Until)
}
return lastResult, nil
}
// setTempResult exposes the latest task result for until evaluation without
// leaving stale state behind when a different register name is used.
func (e *Executor) setTempResult(host string, register string, result *TaskResult) {
e.mu.Lock()
defer e.mu.Unlock()
if e.results[host] == nil {
e.results[host] = make(map[string]*TaskResult)
}
e.results[host]["result"] = result
if register != "" {
e.results[host][register] = result
}
}
// retryTask runs fn until done returns true or the retry budget is exhausted.
func retryTask(ctx context.Context, retries, delay int, fn func() (*TaskResult, error), done func(*TaskResult) bool) (*TaskResult, error) {
if retries < 0 {
retries = 0
}
if delay < 0 {
delay = 0
}
var lastResult *TaskResult
var lastErr error
for attempt := 0; attempt <= retries; attempt++ {
lastResult, lastErr = fn()
if lastErr != nil {
return lastResult, lastErr
}
if lastResult == nil {
lastResult = &TaskResult{}
}
if done == nil || done(lastResult) {
return lastResult, nil
}
if attempt < retries && delay > 0 {
if err := sleepWithContext(ctx, time.Duration(delay)*time.Second); err != nil {
return lastResult, err
}
}
}
return lastResult, nil
}
// sleepWithContext pauses for the requested duration or stops early when the
// context is cancelled.
func sleepWithContext(ctx context.Context, d time.Duration) error {
timer := time.NewTimer(d)
defer timer.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
return nil
}
}
// runLoop handles task loops.
func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient, task *Task, play *Play) error {
items := e.resolveLoop(task.Loop, host)
@ -363,6 +631,10 @@ func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient,
if result.Failed && !task.IgnoreErrors {
break
}
if task.LoopControl != nil && task.LoopControl.Pause > 0 && i < len(items)-1 {
time.Sleep(time.Duration(task.LoopControl.Pause) * time.Second)
}
}
// Restore loop variables
@ -400,12 +672,12 @@ func (e *Executor) runLoop(ctx context.Context, host string, client *SSHClient,
}
// runBlock handles block/rescue/always.
func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
var blockErr error
// Try block
for _, t := range task.Block {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
blockErr = err
break
}
@ -414,7 +686,7 @@ func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, pla
// Run rescue if block failed
if blockErr != nil && len(task.Rescue) > 0 {
for _, t := range task.Rescue {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
// Rescue also failed
break
}
@ -423,7 +695,7 @@ func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, pla
// Always run always block
for _, t := range task.Always {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
if blockErr == nil {
blockErr = err
}
@ -438,7 +710,7 @@ func (e *Executor) runBlock(ctx context.Context, hosts []string, task *Task, pla
}
// runIncludeTasks handles include_tasks/import_tasks.
func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
path := task.IncludeTasks
if path == "" {
path = task.ImportTasks
@ -453,7 +725,10 @@ func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Ta
}
for _, t := range tasks {
if err := e.runTaskOnHosts(ctx, hosts, &t, play); err != nil {
if err := e.runTaskOnHosts(ctx, hosts, &t, play, failedHosts, totalHosts, maxFailPercent); err != nil {
if isHostFailureError(err) {
continue
}
return err
}
}
@ -462,7 +737,7 @@ func (e *Executor) runIncludeTasks(ctx context.Context, hosts []string, task *Ta
}
// runIncludeRole handles include_role/import_role.
func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Task, play *Play) error {
func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Task, play *Play, failedHosts map[string]bool, totalHosts int, maxFailPercent int) error {
var roleName, tasksFrom string
var roleVars map[string]any
@ -482,7 +757,7 @@ func (e *Executor) runIncludeRole(ctx context.Context, hosts []string, task *Tas
Vars: roleVars,
}
return e.runRole(ctx, hosts, roleRef, play)
return e.runRole(ctx, hosts, roleRef, play, failedHosts, totalHosts, maxFailPercent)
}
// getHosts returns hosts matching the pattern.
@ -903,7 +1178,7 @@ func (e *Executor) handleLookup(expr string) string {
case "env":
return env(arg)
case "file":
if data, err := coreio.Local.Read(arg); err == nil {
if data, err := localFS.Read(arg); err == nil {
return data
}
}
@ -1000,7 +1275,7 @@ func (e *Executor) Close() {
//
// content, err := exec.TemplateFile("/workspace/templates/app.conf.j2", "web1", &Task{})
func (e *Executor) TemplateFile(src, host string, task *Task) (string, error) {
content, err := coreio.Local.Read(src)
content, err := localFS.Read(src)
if err != nil {
return "", err
}

View file

@ -1,9 +1,12 @@
package ansible
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// --- NewExecutor ---
@ -98,6 +101,221 @@ func TestExecutor_GetHosts_Good_WithLimit(t *testing.T) {
assert.Contains(t, hosts, "host2")
}
func TestExecutor_RunPlay_Good_SerialBatchesHosts(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {},
"host2": {},
},
},
})
var gathered []string
e.OnTaskStart = func(host string, task *Task) {
gathered = append(gathered, host+":"+task.Name)
}
gatherFacts := false
play := &Play{
Name: "serial",
Hosts: "all",
GatherFacts: &gatherFacts,
Serial: "1",
Tasks: []Task{
{Name: "first", Module: "debug", Args: map[string]any{"msg": "one"}},
{Name: "second", Module: "debug", Args: map[string]any{"msg": "two"}},
},
}
require.NoError(t, e.runPlay(context.Background(), play))
assert.Equal(t, []string{
"host1:first",
"host1:second",
"host2:first",
"host2:second",
}, gathered)
}
func TestExecutor_RunPlay_Good_MaxFailPercentStopsAfterThreshold(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {Vars: map[string]any{"fail_first": true, "fail_second": false}},
"host2": {Vars: map[string]any{"fail_first": false, "fail_second": true}},
"host3": {Vars: map[string]any{"fail_first": false, "fail_second": false}},
},
},
})
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, host+":"+task.Name)
}
gatherFacts := false
play := &Play{
Name: "max fail",
Hosts: "all",
GatherFacts: &gatherFacts,
MaxFailPercent: 50,
Tasks: []Task{
{
Name: "first failure",
Module: "fail",
Args: map[string]any{"msg": "first"},
When: "{{ fail_first }}",
},
{
Name: "second failure",
Module: "fail",
Args: map[string]any{"msg": "second"},
When: "{{ fail_second }}",
},
{
Name: "final task",
Module: "debug",
Args: map[string]any{"msg": "ok"},
},
},
}
err := e.runPlay(context.Background(), play)
require.Error(t, err)
assert.Equal(t, []string{
"host1:first failure",
"host2:first failure",
"host3:first failure",
"host2:second failure",
}, executed)
assert.NotContains(t, executed, "host3:second failure")
assert.NotContains(t, executed, "host1:final task")
assert.NotContains(t, executed, "host2:final task")
assert.NotContains(t, executed, "host3:final task")
}
func TestExecutor_RunPlay_Good_RunOnceTaskOnlyRunsOnFirstHost(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {},
"host2": {},
},
},
})
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, host)
}
gatherFacts := false
play := &Play{
Name: "run once",
Hosts: "all",
GatherFacts: &gatherFacts,
Tasks: []Task{
{
Name: "single host",
Module: "debug",
Args: map[string]any{"msg": "ok"},
Register: "result",
RunOnce: true,
},
},
}
require.NoError(t, e.runPlay(context.Background(), play))
assert.Equal(t, []string{"host1"}, executed)
assert.NotNil(t, e.results["host1"]["result"])
_, ok := e.results["host2"]
assert.False(t, ok)
}
func TestExecutor_RunPlay_Good_PlayTagsApplyToUntaggedTasks(t *testing.T) {
e := NewExecutor("/tmp")
e.SetInventoryDirect(&Inventory{
All: &InventoryGroup{
Hosts: map[string]*Host{
"host1": {},
},
},
})
e.Tags = []string{"deploy"}
var executed []string
e.OnTaskStart = func(host string, task *Task) {
executed = append(executed, host+":"+task.Name)
}
gatherFacts := false
play := &Play{
Name: "tagged play",
Hosts: "all",
GatherFacts: &gatherFacts,
Tags: []string{"deploy"},
Tasks: []Task{
{Name: "untagged task", Module: "debug", Args: map[string]any{"msg": "ok"}},
},
}
require.NoError(t, e.runPlay(context.Background(), play))
assert.Equal(t, []string{"host1:untagged task"}, executed)
}
func TestExecutor_RunTaskOnHost_Good_LoopControlPause(t *testing.T) {
e := NewExecutor("/tmp")
task := &Task{
Name: "pause loop",
Module: "debug",
Args: map[string]any{"msg": "{{ item }}"},
Loop: []any{"one", "two"},
LoopControl: &LoopControl{
Pause: 1,
},
}
play := &Play{}
start := time.Now()
require.NoError(t, e.runTaskOnHost(context.Background(), "localhost", task, play))
assert.GreaterOrEqual(t, time.Since(start), 900*time.Millisecond)
}
func TestExecutor_SetTempResult_Good_ResultAliasSupportsUntil(t *testing.T) {
e := NewExecutor("/tmp")
e.setTempResult("host1", "", &TaskResult{Failed: true, RC: 1})
assert.False(t, e.evaluateWhen("result is success", "host1", &Task{}))
e.setTempResult("host1", "", &TaskResult{Failed: false, RC: 0})
assert.True(t, e.evaluateWhen("result is success", "host1", &Task{}))
}
func TestRetryTask_Good_RetriesAndWaits(t *testing.T) {
attempts := 0
start := time.Now()
result, err := retryTask(context.Background(), 1, 1, func() (*TaskResult, error) {
attempts++
if attempts == 1 {
return &TaskResult{Failed: true, RC: 1}, nil
}
return &TaskResult{Failed: false, RC: 0}, nil
}, func(result *TaskResult) bool {
return result.RC == 0
})
require.NoError(t, err)
assert.Equal(t, 2, attempts)
assert.NotNil(t, result)
assert.False(t, result.Failed)
assert.GreaterOrEqual(t, time.Since(start), 900*time.Millisecond)
}
// --- matchesTags ---
func TestExecutor_MatchesTags_Good_NoTagsFilter(t *testing.T) {

3
go.mod
View file

@ -4,7 +4,6 @@ go 1.26.0
require (
dappco.re/go/core v0.8.0-alpha.1
dappco.re/go/core/io v0.2.0
dappco.re/go/core/log v0.1.0
github.com/stretchr/testify v1.11.1
golang.org/x/crypto v0.49.0
@ -12,8 +11,8 @@ require (
)
require (
forge.lthn.ai/core/go-log v0.0.4 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/kr/text v0.2.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
golang.org/x/sys v0.42.0 // indirect
)

5
go.sum
View file

@ -1,11 +1,8 @@
dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk=
dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
dappco.re/go/core/io v0.2.0 h1:zuudgIiTsQQ5ipVt97saWdGLROovbEB/zdVyy9/l+I4=
dappco.re/go/core/io v0.2.0/go.mod h1:1QnQV6X9LNgFKfm8SkOtR9LLaj3bDcsOIeJOOyjbL5E=
dappco.re/go/core/log v0.1.0 h1:pa71Vq2TD2aoEUQWFKwNcaJ3GBY8HbaNGqtE688Unyc=
dappco.re/go/core/log v0.1.0/go.mod h1:Nkqb8gsXhZAO8VLpx7B8i1iAmohhzqA20b9Zr8VUcJs=
forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0=
forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=

View file

@ -6,7 +6,6 @@ import (
"io/fs"
"strconv"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
)
@ -254,7 +253,7 @@ func (e *Executor) moduleScript(ctx context.Context, client *SSHClient, args map
}
// Read local script
data, err := coreio.Local.Read(script)
data, err := localFS.Read(script)
if err != nil {
return nil, coreerr.E("Executor.moduleScript", "read script", err)
}
@ -285,7 +284,7 @@ func (e *Executor) moduleCopy(ctx context.Context, client *SSHClient, args map[s
var err error
if src := getStringArg(args, "src", ""); src != "" {
content, err = coreio.Local.Read(src)
content, err = localFS.Read(src)
if err != nil {
return nil, coreerr.E("Executor.moduleCopy", "read src", err)
}
@ -509,11 +508,11 @@ func (e *Executor) moduleFetch(ctx context.Context, client *SSHClient, args map[
}
// Create dest directory
if err := coreio.Local.EnsureDir(pathDir(dest)); err != nil {
if err := localFS.EnsureDir(pathDir(dest)); err != nil {
return nil, err
}
if err := coreio.Local.Write(dest, string(content)); err != nil {
if err := localFS.Write(dest, string(content)); err != nil {
return nil, err
}
@ -1051,7 +1050,7 @@ func (e *Executor) moduleUnarchive(ctx context.Context, client *SSHClient, args
var cmd string
if !remote {
// Upload local file first
data, err := coreio.Local.Read(src)
data, err := localFS.Read(src)
if err != nil {
return nil, coreerr.E("Executor.moduleUnarchive", "read src", err)
}

View file

@ -5,7 +5,6 @@ import (
"maps"
"slices"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
"gopkg.in/yaml.v3"
)
@ -38,7 +37,7 @@ func NewParser(basePath string) *Parser {
//
// plays, err := parser.ParsePlaybook("/workspace/playbooks/site.yml")
func (p *Parser) ParsePlaybook(path string) ([]Play, error) {
data, err := coreio.Local.Read(path)
data, err := localFS.Read(path)
if err != nil {
return nil, coreerr.E("Parser.ParsePlaybook", "read playbook", err)
}
@ -83,7 +82,7 @@ func (p *Parser) ParsePlaybookIter(path string) (iter.Seq[Play], error) {
//
// inv, err := parser.ParseInventory("/workspace/inventory.yml")
func (p *Parser) ParseInventory(path string) (*Inventory, error) {
data, err := coreio.Local.Read(path)
data, err := localFS.Read(path)
if err != nil {
return nil, coreerr.E("Parser.ParseInventory", "read inventory", err)
}
@ -102,7 +101,7 @@ func (p *Parser) ParseInventory(path string) (*Inventory, error) {
//
// tasks, err := parser.ParseTasks("/workspace/roles/web/tasks/main.yml")
func (p *Parser) ParseTasks(path string) ([]Task, error) {
data, err := coreio.Local.Read(path)
data, err := localFS.Read(path)
if err != nil {
return nil, coreerr.E("Parser.ParseTasks", "read tasks", err)
}
@ -168,7 +167,7 @@ func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
for _, sp := range searchPaths {
// Clean the path to resolve .. segments
sp = cleanPath(sp)
if coreio.Local.Exists(sp) {
if localFS.Exists(sp) {
tasksPath = sp
break
}
@ -180,7 +179,7 @@ func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
// Load role defaults
defaultsPath := joinPath(pathDir(pathDir(tasksPath)), "defaults", "main.yml")
if data, err := coreio.Local.Read(defaultsPath); err == nil {
if data, err := localFS.Read(defaultsPath); err == nil {
var defaults map[string]any
if yaml.Unmarshal([]byte(data), &defaults) == nil {
for k, v := range defaults {
@ -193,7 +192,7 @@ func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
// Load role vars
varsPath := joinPath(pathDir(pathDir(tasksPath)), "vars", "main.yml")
if data, err := coreio.Local.Read(varsPath); err == nil {
if data, err := localFS.Read(varsPath); err == nil {
var roleVars map[string]any
if yaml.Unmarshal([]byte(data), &roleVars) == nil {
for k, v := range roleVars {
@ -352,7 +351,7 @@ func isModule(key string) bool {
return contains(key, ".")
}
// NormalizeModule normalizes a module name to its canonical form.
// NormalizeModule normalises a module name to its canonical form.
//
// Example:
//
@ -416,11 +415,11 @@ func getAllHosts(group *InventoryGroup) []string {
}
var hosts []string
for name := range group.Hosts {
for _, name := range slices.Sorted(maps.Keys(group.Hosts)) {
hosts = append(hosts, name)
}
for _, child := range group.Children {
hosts = append(hosts, getAllHosts(child)...)
for _, name := range slices.Sorted(maps.Keys(group.Children)) {
hosts = append(hosts, getAllHosts(group.Children[name])...)
}
return hosts
}

364
specs/RFC.md Normal file
View file

@ -0,0 +1,364 @@
# ansible
**Import:** `dappco.re/go/core/ansible`
**Files:** 6
## Types
This package exports structs only. It has no exported interfaces or type aliases.
### Playbook
`type Playbook struct`
Top-level playbook wrapper for YAML documents that decode to an inline list of plays.
Fields:
- `Plays []Play`: Inlined play definitions in declaration order.
### Play
`type Play struct`
One play in a playbook, including host targeting, privilege settings, vars, task lists, roles, handlers, and run controls.
Fields:
- `Name string`: Human-readable play name used in output and callbacks.
- `Hosts string`: Inventory pattern resolved before the play runs.
- `Connection string`: Optional connection type; `"local"` skips SSH fact gathering.
- `Become bool`: Enables privilege escalation for tasks in the play.
- `BecomeUser string`: Override user for privilege escalation.
- `GatherFacts *bool`: Optional fact-gathering switch; `nil` means facts are gathered.
- `Vars map[string]any`: Play-scoped variables merged into parser and executor state.
- `PreTasks []Task`: Tasks run before roles and main tasks.
- `Tasks []Task`: Main task list.
- `PostTasks []Task`: Tasks run after the main task list.
- `Roles []RoleRef`: Role references executed between `PreTasks` and `Tasks`.
- `Handlers []Task`: Handler tasks that may run after normal tasks when notified.
- `Tags []string`: Tags attached to the play.
- `Environment map[string]string`: Environment variables attached to the play.
- `Serial any`: Serial batch setting; the YAML accepts either an `int` or a `string`.
- `MaxFailPercent int`: Maximum tolerated failure percentage for the play.
### RoleRef
`type RoleRef struct`
Role entry used in `Play.Roles`. The YAML may be either a scalar role name or a mapping.
Fields:
- `Role string`: Canonical role name used by the executor.
- `Name string`: Alternate YAML field that is normalised into `Role` during unmarshalling.
- `TasksFrom string`: Task file loaded from the role's `tasks/` directory.
- `Vars map[string]any`: Variables merged while the role runs.
- `When any`: Optional condition evaluated before the role runs.
- `Tags []string`: Tags declared on the role reference.
### Task
`type Task struct`
Single Ansible task, including the selected module, module args, flow-control settings, includes, blocks, notifications, and privilege settings.
Fields:
- `Name string`: Optional task name.
- `Module string`: Module name extracted from the YAML key rather than a fixed field.
- `Args map[string]any`: Module arguments extracted from the YAML value.
- `Register string`: Variable name used to store the task result.
- `When any`: Conditional expression or expression list.
- `Loop any`: Loop source, typically a slice or templated string.
- `LoopControl *LoopControl`: Optional loop metadata such as custom variable names.
- `Vars map[string]any`: Task-local variables.
- `Environment map[string]string`: Task-local environment overrides.
- `ChangedWhen any`: Override for changed-state evaluation.
- `FailedWhen any`: Override for failure evaluation.
- `IgnoreErrors bool`: Continue after task failure when true.
- `NoLog bool`: Marker for suppressing logging.
- `Become *bool`: Per-task privilege-escalation override.
- `BecomeUser string`: Per-task privilege-escalation user.
- `Delegate string`: `delegate_to` target host.
- `RunOnce bool`: Runs the task once rather than on every host.
- `Tags []string`: Tags attached to the task.
- `Block []Task`: Main block tasks for `block` syntax.
- `Rescue []Task`: Rescue tasks run after a block failure.
- `Always []Task`: Tasks that always run after a block.
- `Notify any`: Handler notification target, either a string or a list.
- `Retries int`: Retry count for `until` loops.
- `Delay int`: Delay between retries.
- `Until string`: Condition checked for retry loops.
- `IncludeTasks string`: Path used by `include_tasks`.
- `ImportTasks string`: Path used by `import_tasks`.
- `IncludeRole *struct{...}`: Role inclusion payload with `Name`, optional `TasksFrom`, and optional `Vars`.
- `ImportRole *struct{...}`: Role import payload with `Name`, optional `TasksFrom`, and optional `Vars`.
### LoopControl
`type LoopControl struct`
Loop metadata attached to a task.
Fields:
- `LoopVar string`: Name assigned to the current loop item.
- `IndexVar string`: Name assigned to the current loop index.
- `Label string`: Display label for loop items.
- `Pause int`: Pause between loop iterations.
- `Extended bool`: Enables extended loop metadata.
### TaskResult
`type TaskResult struct`
Normalised execution result returned by module handlers and stored in registered variables.
Fields:
- `Changed bool`: Whether the task changed remote state.
- `Failed bool`: Whether the task failed.
- `Skipped bool`: Whether the task was skipped.
- `Msg string`: Summary message.
- `Stdout string`: Standard output captured from command-based modules.
- `Stderr string`: Standard error captured from command-based modules.
- `RC int`: Command exit status when applicable.
- `Results []TaskResult`: Per-item loop results.
- `Data map[string]any`: Module-specific result payload.
- `Duration time.Duration`: Execution duration recorded by the executor.
### Inventory
`type Inventory struct`
Root inventory object.
Fields:
- `All *InventoryGroup`: Root inventory group decoded from the `all` key.
### InventoryGroup
`type InventoryGroup struct`
Inventory group containing hosts, child groups, and inherited variables.
Fields:
- `Hosts map[string]*Host`: Hosts defined directly in the group.
- `Children map[string]*InventoryGroup`: Nested child groups.
- `Vars map[string]any`: Variables inherited by descendant hosts unless overridden.
### Host
`type Host struct`
Per-host inventory entry with Ansible connection settings and inline custom vars.
Fields:
- `AnsibleHost string`: Remote address or hostname to connect to.
- `AnsiblePort int`: SSH port.
- `AnsibleUser string`: SSH user.
- `AnsiblePassword string`: SSH password.
- `AnsibleSSHPrivateKeyFile string`: Private key path for SSH authentication.
- `AnsibleConnection string`: Connection transport hint.
- `AnsibleBecomePassword string`: Password used for privilege escalation.
- `Vars map[string]any`: Additional host variables stored inline in YAML.
### Facts
`type Facts struct`
Subset of gathered host facts stored by the executor.
Fields:
- `Hostname string`: Short hostname.
- `FQDN string`: Fully qualified domain name.
- `OS string`: OS family.
- `Distribution string`: Distribution identifier.
- `Version string`: Distribution version.
- `Architecture string`: Machine architecture.
- `Kernel string`: Kernel release.
- `Memory int64`: Total memory in MiB.
- `CPUs int`: Virtual CPU count.
- `IPv4 string`: Default IPv4 address.
### Parser
`type Parser struct`
Stateful YAML parser for playbooks, inventories, task files, and roles. Its internal path and variable cache are unexported.
### Executor
`type Executor struct`
Playbook execution engine that combines parser state, inventory, vars, gathered facts, registered results, handler notifications, and SSH client reuse.
Fields:
- `OnPlayStart func(play *Play)`: Optional callback fired before a play starts.
- `OnTaskStart func(host string, task *Task)`: Optional callback fired before a task runs on a host.
- `OnTaskEnd func(host string, task *Task, result *TaskResult)`: Optional callback fired after a task result is produced.
- `OnPlayEnd func(play *Play)`: Optional callback fired after a play finishes.
- `Limit string`: Additional host filter applied after normal play host resolution.
- `Tags []string`: Inclusive tag filter for task execution.
- `SkipTags []string`: Exclusive tag filter that always skips matching tasks.
- `CheckMode bool`: Public execution flag exposed for callers and CLI wiring.
- `Diff bool`: Public execution flag exposed for callers and CLI wiring.
- `Verbose int`: Verbosity level used by executor logging and CLI callbacks.
### SSHClient
`type SSHClient struct`
Lazy SSH client that owns connection, authentication, privilege-escalation, and timeout state. All fields are unexported.
### SSHConfig
`type SSHConfig struct`
Configuration used to construct an `SSHClient`.
Fields:
- `Host string`: Target host.
- `Port int`: Target SSH port; defaults to `22`.
- `User string`: SSH user; defaults to `"root"`.
- `Password string`: SSH password.
- `KeyFile string`: Private key path.
- `Become bool`: Enables privilege escalation on the client.
- `BecomeUser string`: User used for privilege escalation.
- `BecomePass string`: Password used for privilege escalation.
- `Timeout time.Duration`: Connection timeout; defaults to `30 * time.Second`.
## Functions
### NewParser
`func NewParser(basePath string) *Parser`
Constructs a parser rooted at `basePath` and initialises its internal variable map. `basePath` is later used to resolve role search paths.
### (*Parser).ParsePlaybook
`func (p *Parser) ParsePlaybook(path string) ([]Play, error)`
Reads a playbook YAML file, unmarshals it into `[]Play`, and post-processes every `PreTasks`, `Tasks`, `PostTasks`, and handler entry to extract `Task.Module` and `Task.Args`.
### (*Parser).ParsePlaybookIter
`func (p *Parser) ParsePlaybookIter(path string) (iter.Seq[Play], error)`
Wrapper around `ParsePlaybook` that yields parsed plays through an `iter.Seq`.
### (*Parser).ParseInventory
`func (p *Parser) ParseInventory(path string) (*Inventory, error)`
Reads an inventory YAML file and unmarshals it into the public `Inventory` model.
### (*Parser).ParseTasks
`func (p *Parser) ParseTasks(path string) ([]Task, error)`
Reads a task file, unmarshals it into `[]Task`, and extracts module names and args for every task entry.
### (*Parser).ParseTasksIter
`func (p *Parser) ParseTasksIter(path string) (iter.Seq[Task], error)`
Wrapper around `ParseTasks` that yields parsed tasks through an `iter.Seq`.
### (*Parser).ParseRole
`func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error)`
Resolves `roles/<name>/tasks/<tasksFrom>` across several search patterns rooted around `basePath`, defaults `tasksFrom` to `main.yml`, merges role defaults without overwriting existing parser vars, merges role vars with overwrite semantics, and then parses the resolved task file.
### (*RoleRef).UnmarshalYAML
`func (r *RoleRef) UnmarshalYAML(unmarshal func(any) error) error`
Accepts either a scalar role name or a structured role mapping. When the mapping only sets `Name`, the method copies it into `Role`.
### (*Task).UnmarshalYAML
`func (t *Task) UnmarshalYAML(node *yaml.Node) error`
Decodes the standard task fields, scans the remaining YAML keys for the first recognised module name, stores free-form arguments in `Args["_raw_params"]`, accepts module mappings and nil-valued modules, and maps `with_items` into `Loop` when `Loop` is unset.
### NormalizeModule
`func NormalizeModule(name string) string`
Returns `ansible.builtin.<name>` for short module names and leaves dotted names unchanged.
### GetHosts
`func GetHosts(inv *Inventory, pattern string) []string`
Resolves hosts from a non-nil inventory by handling `all`, `localhost`, group names, and explicit host names. Patterns containing `:` are recognised as future work and currently return `nil`.
### GetHostsIter
`func GetHostsIter(inv *Inventory, pattern string) iter.Seq[string]`
Iterator wrapper around `GetHosts`.
### AllHostsIter
`func AllHostsIter(group *InventoryGroup) iter.Seq[string]`
Yields every host reachable from a group tree in deterministic order by sorting host keys and child-group keys at each level.
### GetHostVars
`func GetHostVars(inv *Inventory, hostname string) map[string]any`
Builds the effective variable map for `hostname` by walking the group tree, applying direct-group vars, host connection settings, inline host vars, and then parent-group vars for keys not already set by a nearer scope.
### NewExecutor
`func NewExecutor(basePath string) *Executor`
Constructs an executor with a parser rooted at `basePath` and fresh maps for vars, facts, registered results, handlers, notifications, and SSH clients.
### (*Executor).SetInventory
`func (e *Executor) SetInventory(path string) error`
Parses an inventory file through the embedded parser and stores the resulting `Inventory` on the executor.
### (*Executor).SetInventoryDirect
`func (e *Executor) SetInventoryDirect(inv *Inventory)`
Stores a caller-supplied inventory pointer on the executor without parsing.
### (*Executor).SetVar
`func (e *Executor) SetVar(key string, value any)`
Stores an executor-scoped variable under a write lock.
### (*Executor).Run
`func (e *Executor) Run(ctx context.Context, playbookPath string) error`
Parses the playbook at `playbookPath` and runs plays sequentially. Each play resolves hosts, merges play vars, gathers facts by default, runs `PreTasks`, roles, `Tasks`, `PostTasks`, and finally any handlers that were notified during the play.
### (*Executor).Close
`func (e *Executor) Close()`
Closes every cached `SSHClient` and replaces the client cache with a fresh empty map.
### (*Executor).TemplateFile
`func (e *Executor) TemplateFile(src, host string, task *Task) (string, error)`
Reads a template file, performs a basic Jinja2-to-Go-template token conversion, and executes it against a context built from executor vars, host vars, and gathered facts. If parsing or execution fails, it falls back to the executor's simpler string-templating path.
### NewSSHClient
`func NewSSHClient(cfg SSHConfig) (*SSHClient, error)`
Applies defaults for `Port`, `User`, and `Timeout`, then constructs an `SSHClient` from `cfg`.
### (*SSHClient).Connect
`func (c *SSHClient) Connect(ctx context.Context) error`
Lazily establishes the SSH connection. Authentication is attempted in this order: explicit key file, default keys from `~/.ssh`, then password-based auth. The method also ensures `known_hosts` exists and uses it for host-key verification.
### (*SSHClient).Close
`func (c *SSHClient) Close() error`
Closes the active SSH connection, if any, and clears the cached client pointer.
### (*SSHClient).Run
`func (c *SSHClient) Run(ctx context.Context, cmd string) (stdout, stderr string, exitCode int, err error)`
Runs a command on the remote host, opening a new SSH session after calling `Connect`. When privilege escalation is enabled, the command is wrapped with `sudo`, using either the become password, the SSH password, or passwordless `sudo -n`. The method returns stdout, stderr, an exit code, and honours context cancellation by signalling the session.
### (*SSHClient).RunScript
`func (c *SSHClient) RunScript(ctx context.Context, script string) (stdout, stderr string, exitCode int, err error)`
Wraps `script` in a heredoc passed to `bash` and delegates execution to `Run`.
### (*SSHClient).Upload
`func (c *SSHClient) Upload(ctx context.Context, local io.Reader, remote string, mode fs.FileMode) error`
Reads all content from `local`, creates the remote parent directory, writes the file via `cat >`, applies the requested mode with `chmod`, and handles both normal and `sudo`-mediated uploads.
### (*SSHClient).Download
`func (c *SSHClient) Download(ctx context.Context, remote string) ([]byte, error)`
Downloads a remote file by running `cat` and returning the captured bytes. A non-zero remote exit status is reported as an error.
### (*SSHClient).FileExists
`func (c *SSHClient) FileExists(ctx context.Context, path string) (bool, error)`
Checks remote path existence with `test -e`.
### (*SSHClient).Stat
`func (c *SSHClient) Stat(ctx context.Context, path string) (map[string]any, error)`
Returns a minimal stat map parsed from remote shell output. The current implementation reports boolean `exists` and `isdir` keys.
### (*SSHClient).SetBecome
`func (c *SSHClient) SetBecome(become bool, user, password string)`
Updates the client's privilege-escalation flag and replaces the stored become user and password only when non-empty override values are supplied.

11
ssh.go
View file

@ -9,7 +9,6 @@ import (
"sync"
"time"
coreio "dappco.re/go/core/io"
coreerr "dappco.re/go/core/log"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
@ -104,7 +103,7 @@ func (c *SSHClient) Connect(ctx context.Context) error {
keyPath = joinPath(env("DIR_HOME"), keyPath[1:])
}
if key, err := coreio.Local.Read(keyPath); err == nil {
if key, err := localFS.Read(keyPath); err == nil {
if signer, err := ssh.ParsePrivateKey([]byte(key)); err == nil {
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
@ -119,7 +118,7 @@ func (c *SSHClient) Connect(ctx context.Context) error {
joinPath(home, ".ssh", "id_rsa"),
}
for _, keyPath := range defaultKeys {
if key, err := coreio.Local.Read(keyPath); err == nil {
if key, err := localFS.Read(keyPath); err == nil {
if signer, err := ssh.ParsePrivateKey([]byte(key)); err == nil {
authMethods = append(authMethods, ssh.PublicKeys(signer))
break
@ -154,11 +153,11 @@ func (c *SSHClient) Connect(ctx context.Context) error {
knownHostsPath := joinPath(home, ".ssh", "known_hosts")
// Ensure known_hosts file exists
if !coreio.Local.Exists(knownHostsPath) {
if err := coreio.Local.EnsureDir(pathDir(knownHostsPath)); err != nil {
if !localFS.Exists(knownHostsPath) {
if err := localFS.EnsureDir(pathDir(knownHostsPath)); err != nil {
return coreerr.E("ssh.Connect", "failed to create .ssh dir", err)
}
if err := coreio.Local.Write(knownHostsPath, ""); err != nil {
if err := localFS.Write(knownHostsPath, ""); err != nil {
return coreerr.E("ssh.Connect", "failed to create known_hosts file", err)
}
}

View file

@ -2,12 +2,10 @@ package ansible
import (
"io/fs"
coreio "dappco.re/go/core/io"
)
func readTestFile(path string) ([]byte, error) {
content, err := coreio.Local.Read(path)
content, err := localFS.Read(path)
if err != nil {
return nil, err
}
@ -15,7 +13,7 @@ func readTestFile(path string) ([]byte, error) {
}
func writeTestFile(path string, content []byte, mode fs.FileMode) error {
return coreio.Local.WriteMode(path, string(content), mode)
return localFS.WriteMode(path, string(content), mode)
}
func joinStrings(parts []string, sep string) string {

View file

@ -128,7 +128,7 @@ type Task struct {
raw map[string]any
}
// LoopControl controls loop behavior.
// LoopControl controls loop behaviour.
//
// Example:
//