diff --git a/.agents/skills/deploy/SKILL.md b/.agents/skills/deploy/SKILL.md new file mode 100644 index 0000000..90229b3 --- /dev/null +++ b/.agents/skills/deploy/SKILL.md @@ -0,0 +1,7 @@ +--- +name: deploy +description: Deploy to homelab. Build Docker image, transfer, and restart container. Use for lthn.sh deployments. +--- + +Use the core-agent MCP tools to execute this skill. +Call the appropriate tool: See deployment skill instructions diff --git a/.agents/skills/dispatch/SKILL.md b/.agents/skills/dispatch/SKILL.md new file mode 100644 index 0000000..ade1b5f --- /dev/null +++ b/.agents/skills/dispatch/SKILL.md @@ -0,0 +1,7 @@ +--- +name: dispatch +description: Dispatch a subagent to work on a task in a sandboxed workspace. Use when you need to send work to Gemini, Codex, or Claude agents. +--- + +Use the core-agent MCP tools to execute this skill. +Call the appropriate tool: agentic_dispatch diff --git a/.agents/skills/pipeline/SKILL.md b/.agents/skills/pipeline/SKILL.md new file mode 100644 index 0000000..cf86637 --- /dev/null +++ b/.agents/skills/pipeline/SKILL.md @@ -0,0 +1,7 @@ +--- +name: pipeline +description: Run the review-fix-verify pipeline on code changes. Dispatches reviewer, then fixer, then verifier. +--- + +Use the core-agent MCP tools to execute this skill. +Call the appropriate tool: agentic_dispatch reviewer → wait → agentic_dispatch fixer → wait → verify diff --git a/.agents/skills/recall/SKILL.md b/.agents/skills/recall/SKILL.md new file mode 100644 index 0000000..7d9cfee --- /dev/null +++ b/.agents/skills/recall/SKILL.md @@ -0,0 +1,7 @@ +--- +name: recall +description: Search OpenBrain for memories and context. Use when you need prior session knowledge or architecture context. +--- + +Use the core-agent MCP tools to execute this skill. +Call the appropriate tool: brain_recall diff --git a/.agents/skills/remember/SKILL.md b/.agents/skills/remember/SKILL.md new file mode 100644 index 0000000..ce18996 --- /dev/null +++ b/.agents/skills/remember/SKILL.md @@ -0,0 +1,7 @@ +--- +name: remember +description: Save a fact or decision to OpenBrain. Use to persist knowledge across sessions. +--- + +Use the core-agent MCP tools to execute this skill. +Call the appropriate tool: brain_remember diff --git a/.agents/skills/review/SKILL.md b/.agents/skills/review/SKILL.md new file mode 100644 index 0000000..e26dbb0 --- /dev/null +++ b/.agents/skills/review/SKILL.md @@ -0,0 +1,7 @@ +--- +name: review +description: Review completed agent workspace. Show output, git diff, and merge options. Use after an agent completes a task. +--- + +Use the core-agent MCP tools to execute this skill. +Call the appropriate tool: agentic_status + read agent log + git diff diff --git a/.agents/skills/scan/SKILL.md b/.agents/skills/scan/SKILL.md new file mode 100644 index 0000000..1a4c775 --- /dev/null +++ b/.agents/skills/scan/SKILL.md @@ -0,0 +1,7 @@ +--- +name: scan +description: Scan Forge repos for open issues with actionable labels. Use to find work to dispatch. +--- + +Use the core-agent MCP tools to execute this skill. +Call the appropriate tool: agentic_scan diff --git a/.agents/skills/status/SKILL.md b/.agents/skills/status/SKILL.md new file mode 100644 index 0000000..7aadb78 --- /dev/null +++ b/.agents/skills/status/SKILL.md @@ -0,0 +1,7 @@ +--- +name: status +description: Show status of all agent workspaces (running, completed, blocked, failed). Use to check pipeline progress. +--- + +Use the core-agent MCP tools to execute this skill. +Call the appropriate tool: agentic_status diff --git a/.agents/skills/sweep/SKILL.md b/.agents/skills/sweep/SKILL.md new file mode 100644 index 0000000..d816dc4 --- /dev/null +++ b/.agents/skills/sweep/SKILL.md @@ -0,0 +1,7 @@ +--- +name: sweep +description: Batch audit across all repos using agent dispatch. Use for ecosystem-wide convention checks. +--- + +Use the core-agent MCP tools to execute this skill. +Call the appropriate tool: agentic_dispatch in a loop across repos diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index f289317..9733e8d 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -44,6 +44,12 @@ }, "description": "CI/CD, deployment, issue tracking, and Coolify integration", "version": "0.1.0" + }, + { + "name": "devops", + "source": "./claude/devops", + "description": "Agent workflow utilities — install binaries, merge workspaces, update deps, clean queues", + "version": "0.1.0" } ] } diff --git a/.codex/agents/fixer.toml b/.codex/agents/fixer.toml new file mode 100644 index 0000000..b93c664 --- /dev/null +++ b/.codex/agents/fixer.toml @@ -0,0 +1,25 @@ +# Review Findings Fixer +# Implements fixes from reviewer findings + +name = "fixer" +description = "Fix code review findings. Takes a list of findings with file:line references and implements the fixes. Creates EXCEPTIONS.md for items that cannot be fixed." +developer_instructions = """ +You are the Review Findings Fixer for the Core ecosystem. + +You receive a list of findings from the reviewer agent. +For each finding: +1. Read the file at the specified line +2. Implement the fix following Core conventions +3. If a fix is impossible (e.g. circular import), add to EXCEPTIONS.md with reason + +After fixing: +- Run go build ./... to verify +- Run go vet ./... to verify +- Run go test ./... if tests exist + +Commit message format: fix(pkg): description of fixes + +Do not add features. Do not refactor beyond the finding. Minimal changes only. +""" +model = "gpt-5.4" +sandbox_mode = "workspace-write" diff --git a/.codex/agents/migrator.toml b/.codex/agents/migrator.toml new file mode 100644 index 0000000..521d4ff --- /dev/null +++ b/.codex/agents/migrator.toml @@ -0,0 +1,32 @@ +# Core Primitives Migrator +# Migrates packages from separate deps to Core built-ins + +name = "migrator" +description = "Migrate Go packages to use Core primitives instead of separate go-io/go-log/strings/fmt packages. Use when upgrading a package to the new Core API." +developer_instructions = """ +You are the Core Primitives Migrator for the Core ecosystem. + +Read .core/reference/RFC-025-AGENT-EXPERIENCE.md for the AX spec. +Read .core/reference/*.go for the Core framework API. + +Migration pattern: +- coreio.Local.Read(path) → fs.Read(path) returning core.Result +- coreio.Local.Write(path, s) → fs.Write(path, s) returning core.Result +- coreio.Local.List(path) → fs.List(path) returning core.Result +- coreio.Local.EnsureDir(path) → fs.EnsureDir(path) returning core.Result +- coreio.Local.IsFile(path) → fs.IsFile(path) returning bool +- coreio.Local.Delete(path) → fs.Delete(path) returning core.Result +- coreerr.E("op", "msg", err) → core.E("op", "msg", err) +- log.Error/Info/Warn → core.Error/Info/Warn +- strings.Contains → core.Contains +- strings.Split → core.Split +- strings.TrimSpace → core.Trim +- strings.HasPrefix → core.HasPrefix +- fmt.Sprintf → core.Sprintf +- embed.FS → core.Mount() + core.Embed + +Add AX usage-example comments to all public types and functions. +Build must pass after migration. +""" +model = "gpt-5.4" +sandbox_mode = "workspace-write" diff --git a/.codex/agents/reviewer.toml b/.codex/agents/reviewer.toml new file mode 100644 index 0000000..4a08ea5 --- /dev/null +++ b/.codex/agents/reviewer.toml @@ -0,0 +1,28 @@ +# AX Convention Reviewer +# Audits code against RFC-025 Agent Experience spec + +name = "reviewer" +description = "Audit Go code against AX conventions (RFC-025). Use for code review, convention checking, and quality assessment. Read-only — never modifies code." +developer_instructions = """ +You are the AX Convention Reviewer for the Core ecosystem. + +Read .core/reference/RFC-025-AGENT-EXPERIENCE.md for the full spec. +Read .core/reference/*.go for the Core framework API. + +Audit all Go files against these conventions: +1. Predictable names — no abbreviations (Cfg→Config, Srv→Service) +2. Comments as usage examples — show HOW with real values +3. Result pattern — core.Result not (value, error) +4. Error handling — core.E("op", "msg", err) not fmt.Errorf +5. Core string ops — core.Contains/Split/Trim not strings.* +6. Core logging — core.Error/Info/Warn not log.* +7. Core filesystem — core.Fs{} not os.ReadFile +8. UK English — initialise not initialize +9. Import aliasing — stdlib io as goio +10. Compile-time assertions — var _ Interface = (*Impl)(nil) + +Report findings with severity (critical/high/medium/low) and file:line. +Group by package. Do NOT fix — report only. +""" +model = "gpt-5.4" +sandbox_mode = "read-only" diff --git a/.codex/config.toml b/.codex/config.toml new file mode 100644 index 0000000..38e3771 --- /dev/null +++ b/.codex/config.toml @@ -0,0 +1,69 @@ +# Core Agent — Codex Configuration +# Shared between CLI and IDE extension + +model = "gpt-5.4" +model_reasoning_effort = "high" +approval_policy = "on-request" +sandbox_mode = "workspace-write" +personality = "pragmatic" + +# Default to LEM when available +# oss_provider = "ollama" + +[profiles.review] +model = "gpt-5.4" +model_reasoning_effort = "extra-high" +approval_policy = "never" +sandbox_mode = "read-only" + +[profiles.quick] +model = "gpt-5.4" +model_reasoning_effort = "low" +approval_policy = "never" + +[profiles.implement] +model = "gpt-5.4" +model_reasoning_effort = "high" +approval_policy = "never" +sandbox_mode = "workspace-write" + +[profiles.lem] +model = "lem-4b" +model_provider = "ollama" +model_reasoning_effort = "high" +approval_policy = "never" +sandbox_mode = "workspace-write" + +# Core Agent MCP Server +[mcp_servers.core-agent] +command = "core-agent" +args = ["mcp"] +required = true +startup_timeout_sec = 15 +tool_timeout_sec = 120 + +[mcp_servers.core-agent.env] +FORGE_TOKEN = "${FORGE_TOKEN}" +CORE_BRAIN_KEY = "${CORE_BRAIN_KEY}" +MONITOR_INTERVAL = "15s" + +# Local model providers +[model_providers.ollama] +name = "Ollama" +base_url = "http://127.0.0.1:11434/v1" + +[model_providers.lmstudio] +name = "LM Studio" +base_url = "http://127.0.0.1:1234/v1" + +# Agent configuration +[agents] +max_threads = 4 +max_depth = 1 +job_max_runtime_seconds = 600 + +# Features +[features] +multi_agent = true +shell_snapshot = true +undo = true diff --git a/.codex/rules/core-agent.rules b/.codex/rules/core-agent.rules new file mode 100644 index 0000000..ea16b44 --- /dev/null +++ b/.codex/rules/core-agent.rules @@ -0,0 +1,67 @@ +# Core Agent — Codex Rules +# Controls which commands can run outside the sandbox + +# Go toolchain — always safe +prefix_rule( + pattern = ["go", ["build", "test", "vet", "fmt", "mod", "get", "work"]], + decision = "allow", + justification = "Go development tools are safe read/build operations", + match = [["go", "build", "./..."], ["go", "test", "./pkg/agentic"]], + not_match = [["go", "run", "main.go"]], +) + +# Core agent binary +prefix_rule( + pattern = ["core-agent", ["mcp", "--version"]], + decision = "allow", + justification = "Core agent MCP server and version check", +) + +# Git read operations +prefix_rule( + pattern = ["git", ["status", "log", "diff", "branch", "tag", "remote", "fetch", "rev-parse", "ls-remote"]], + decision = "allow", + justification = "Read-only git operations are safe", +) + +# Git write — prompt for approval +prefix_rule( + pattern = ["git", ["add", "commit", "merge", "rebase", "stash"]], + decision = "prompt", + justification = "Git write operations need human approval", +) + +# Git push — forbidden (use PR workflow) +prefix_rule( + pattern = ["git", "push"], + decision = "forbidden", + justification = "Never push directly — use PR workflow via agentic_create_pr", +) + +# Git destructive — forbidden +prefix_rule( + pattern = ["git", ["reset", "clean"], "--force"], + decision = "forbidden", + justification = "Destructive git operations are never allowed", +) + +# Curl — prompt (network access) +prefix_rule( + pattern = ["curl"], + decision = "prompt", + justification = "Network requests need approval", +) + +# SSH — forbidden +prefix_rule( + pattern = ["ssh"], + decision = "forbidden", + justification = "Direct SSH is forbidden — use Ansible via deployment skills", +) + +# rm -rf — forbidden +prefix_rule( + pattern = ["rm", "-rf"], + decision = "forbidden", + justification = "Recursive force delete is never allowed", +) diff --git a/.gitignore b/.gitignore index 2365340..cdc6f76 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,4 @@ .idea/ +.vscode/ +*.log .core/ -docker/.env -ui/node_modules -# Compiled binaries -core-agent -mcp -*.exe diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..a95169b --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,76 @@ +# AGENTS.md — Core Agent + +This file provides guidance to Codex when working with code in this repository. + +## Project Overview + +Core Agent (`dappco.re/go/agent`) is the agent orchestration platform for the Core ecosystem. It provides an MCP server binary (`core-agent`) with tools for dispatching subagents, workspace management, cross-agent messaging, OpenBrain integration, and monitoring. + +## Architecture + +``` +cmd/main.go — Binary entry point, Core CLI (no cobra) +pkg/agentic/ — Dispatch, workspace prep, status, queue, plans, PRs, epics +pkg/brain/ — OpenBrain knowledge store (direct HTTP + IDE bridge) +pkg/monitor/ — Background monitoring, harvest, sync +pkg/lib/ — Embedded prompts, tasks, flows, personas, workspace templates +pkg/setup/ — Project detection, config generation, scaffolding +``` + +## Conventions + +This project follows the **AX (Agent Experience)** design principles from RFC-025. + +### Code Style +- **UK English**: colour, organisation, initialise (never American spellings) +- **Errors**: `core.E("operation", "message", err)` — never `fmt.Errorf` +- **Logging**: `core.Error/Info/Warn/Debug` — never `log.*` or `fmt.Print*` +- **Filesystem**: `core.Fs{}` with `Result` returns — never `os.ReadFile/WriteFile` +- **Strings**: `core.Contains/Split/Trim/HasPrefix/Sprintf` — never `strings.*` or `fmt.Sprintf` +- **Returns**: `core.Result{Value, OK}` — never `(value, error)` pairs +- **Comments**: Usage examples showing HOW with real values, not descriptions +- **Names**: Predictable, unabbreviated (Config not Cfg, Service not Srv) +- **Imports**: stdlib `io` aliased as `goio` +- **Interface checks**: `var _ Interface = (*Impl)(nil)` compile-time assertions + +### Build & Test +```bash +go build ./... +go test ./... +go vet ./... +``` + +### Branch Strategy +- Work on `dev` branch, never push to `main` directly +- PRs required for `main` — Codex review gate +- Commit format: `type(scope): description` +- Co-author: `Co-Authored-By: Virgil ` + +### Dependencies +- Only `dappco.re/go/core` for primitives (fs, errors, logging, strings) +- Domain packages: `process`, `ws`, `mcp` for actual services +- No `go-io`, `go-log`, `cli` — Core provides these natively +- Use `go get -u ./...` for dependency updates, never manual go.mod edits + +## MCP Tools + +The binary exposes these MCP tools when run as `core-agent mcp`: + +| Tool | Purpose | +|------|---------| +| `agentic_dispatch` | Dispatch subagent to sandboxed workspace | +| `agentic_status` | List workspace statuses | +| `agentic_resume` | Resume blocked/failed workspace | +| `agentic_prep_workspace` | Prepare workspace without dispatching | +| `agentic_create_pr` | Create PR from workspace | +| `agentic_list_prs` | List PRs across repos | +| `agentic_create_epic` | Create epic with child issues | +| `agentic_scan` | Scan Forge for actionable issues | +| `agentic_plan_*` | Plan CRUD (create, read, update, delete, list) | +| `brain_recall` | Semantic search OpenBrain | +| `brain_remember` | Store to OpenBrain | +| `brain_forget` | Remove from OpenBrain | +| `agent_send` | Send message to another agent | +| `agent_inbox` | Read inbox messages | +| `metrics_record` | Record metrics event | +| `metrics_query` | Query metrics | diff --git a/Makefile b/Makefile index 6460019..4340b5f 100644 --- a/Makefile +++ b/Makefile @@ -1,50 +1,36 @@ -# Host UK Developer Workspace -# Run `make setup` to bootstrap your environment -CORE_REPO := github.com/host-uk/core -CORE_VERSION := latest -INSTALL_DIR := $(HOME)/.local/bin +# ── core-agent binary ────────────────────────────────── -.PHONY: all setup install-deps install-go install-core doctor clean help +BINARY_NAME=core-agent +CMD_PATH=./cmd/core-agent +MODULE_PATH=dappco.re/go/agent -all: help +# Default LDFLAGS to empty +LDFLAGS = "" -help: - @echo "Host UK Developer Workspace" - @echo "" - @echo "Usage:" - @echo " make setup Full setup (deps + core + clone repos)" - @echo " make install-deps Install system dependencies (go, gh, etc)" - @echo " make install-core Build and install core CLI" - @echo " make doctor Check environment health" - @echo " make clone Clone all repos into packages/" - @echo " make clean Remove built artifacts" - @echo "" - @echo "Quick start:" - @echo " make setup" +# If VERSION is set, inject into binary +ifdef VERSION + LDFLAGS = -ldflags "-X '$(MODULE_PATH).version=$(VERSION)'" +endif -setup: install-deps install-core doctor clone - @echo "" - @echo "Setup complete! Run 'core health' to verify." +.PHONY: build install agent-dev test coverage -install-deps: - @echo "Installing dependencies..." - @./scripts/install-deps.sh +build: + @echo "Building $(BINARY_NAME)..." + @go build $(LDFLAGS) -o $(BINARY_NAME) $(CMD_PATH) -install-go: - @echo "Installing Go..." - @./scripts/install-go.sh +install: + @echo "Installing $(BINARY_NAME)..." + @go install $(LDFLAGS) $(CMD_PATH) -install-core: - @echo "Installing core CLI..." - @./scripts/install-core.sh +agent-dev: build + @./$(BINARY_NAME) version -doctor: - @core doctor || echo "Run 'make install-core' first if core is not found" +test: + @echo "Running tests..." + @go test ./... -clone: - @core setup || echo "Run 'make install-core' first if core is not found" - -clean: - @rm -rf ./build - @echo "Cleaned build artifacts" +coverage: + @echo "Generating coverage report..." + @go test -coverprofile=coverage.out ./... + @echo "Coverage: coverage.out" diff --git a/claude/core/.claude-plugin/plugin.json b/claude/core/.claude-plugin/plugin.json index 0516bc1..4c6015b 100644 --- a/claude/core/.claude-plugin/plugin.json +++ b/claude/core/.claude-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "core", "description": "Core agent platform — dispatch (local + remote), verify+merge, CodeRabbit/Codex review queue, GitHub mirror, cross-agent messaging, OpenBrain integration, inbox notifications", - "version": "0.14.0", + "version": "0.15.0", "author": { "name": "Lethean Community", "email": "hello@lethean.io" diff --git a/claude/devops/.claude-plugin/plugin.json b/claude/devops/.claude-plugin/plugin.json new file mode 100644 index 0000000..226c00b --- /dev/null +++ b/claude/devops/.claude-plugin/plugin.json @@ -0,0 +1,9 @@ +{ + "name": "devops", + "version": "0.2.0", + "description": "DevOps utilities for the Core ecosystem — build, install, deploy.", + "author": { + "name": "Lethean", + "email": "virgil@lethean.io" + } +} diff --git a/claude/devops/agents/agent-task-clean-workspaces.md b/claude/devops/agents/agent-task-clean-workspaces.md new file mode 100644 index 0000000..0999c8d --- /dev/null +++ b/claude/devops/agents/agent-task-clean-workspaces.md @@ -0,0 +1,34 @@ +--- +name: agent-task-clean-workspaces +description: Removes completed/failed/blocked agent workspaces. Use when workspaces are piling up, the user asks to "clean workspaces", or before starting a fresh sweep. +tools: Bash +model: haiku +color: green +--- + +Clean stale agent workspaces using the core-agent CLI. + +## Steps + +1. List current workspaces: +```bash +core-agent workspace/list +``` + +2. Clean based on context: +```bash +# Remove all non-running (default) +core-agent workspace/clean all + +# Or specific status +core-agent workspace/clean completed +core-agent workspace/clean failed +core-agent workspace/clean blocked +``` + +3. Report what was removed. + +## Rules + +- NEVER remove workspaces with status "running" +- Report the count and what was removed diff --git a/claude/devops/agents/agent-task-health-check.md b/claude/devops/agents/agent-task-health-check.md new file mode 100644 index 0000000..4aaa145 --- /dev/null +++ b/claude/devops/agents/agent-task-health-check.md @@ -0,0 +1,19 @@ +--- +name: agent-task-health-check +description: Runs a health check on the core-agent system. Use proactively at session start or when something seems off with dispatch, workspaces, or MCP tools. +tools: Bash +model: haiku +color: green +--- + +Quick health check of the core-agent system. + +## Steps + +```bash +core-agent check +core-agent workspace/list +core-agent version +``` + +Report the results concisely. Flag anything that looks wrong. diff --git a/claude/devops/agents/agent-task-install-core-agent.md b/claude/devops/agents/agent-task-install-core-agent.md new file mode 100644 index 0000000..af7e78f --- /dev/null +++ b/claude/devops/agents/agent-task-install-core-agent.md @@ -0,0 +1,34 @@ +--- +name: agent-task-install-core-agent +description: Builds and installs the core-agent binary. Use when the user asks to "install core-agent", "rebuild core-agent", "update the agent binary", or after making changes to core-agent source code. +tools: Bash +model: haiku +color: green +--- + +Build and install the core-agent binary from source. + +## Steps + +1. Install from the core/agent repo directory: + +```bash +cd /Users/snider/Code/core/agent && go install ./cmd/core-agent/ +``` + +2. Verify the binary is installed: + +```bash +which core-agent +``` + +3. Report the result. Tell the user to restart core-agent to pick up the new binary. + +## Rules + +- The entry point is `./cmd/core-agent/main.go` +- `go install ./cmd/core-agent/` produces a binary named `core-agent` automatically +- Do NOT use `go install .`, `go install ./cmd/`, or `go build` with manual `-o` flags +- Do NOT move, copy, or rename binaries +- Do NOT touch `~/go/bin/` or `~/.local/bin/` directly +- If the install fails, report the error — do not attempt alternatives diff --git a/claude/devops/agents/agent-task-merge-workspace.md b/claude/devops/agents/agent-task-merge-workspace.md new file mode 100644 index 0000000..e9b9cc3 --- /dev/null +++ b/claude/devops/agents/agent-task-merge-workspace.md @@ -0,0 +1,51 @@ +--- +name: agent-task-merge-workspace +description: Reviews and merges completed agent workspace changes into the source repo. Use when an agent workspace is completed/ready-for-review and changes need to be applied. +tools: Bash, Read +model: sonnet +color: blue +--- + +Merge a completed agent workspace into the source repo. + +## Steps + +1. Check workspace status: +```bash +cat /Users/snider/Code/.core/workspace/{name}/status.json +``` +Only proceed if status is `completed` or `ready-for-review`. + +2. Show the diff: +```bash +git -C /Users/snider/Code/.core/workspace/{name}/repo diff --stat HEAD +git -C /Users/snider/Code/.core/workspace/{name}/repo diff HEAD +``` + +3. Check for untracked new files (git diff misses these): +```bash +git -C /Users/snider/Code/.core/workspace/{name}/repo ls-files --others --exclude-standard +``` + +4. Present a summary to the user. Ask for confirmation before applying. + +5. Apply changes via patch: +```bash +cd /Users/snider/Code/.core/workspace/{name}/repo && git diff HEAD > /tmp/agent-patch.diff +cd /Users/snider/Code/core/{repo}/ && git apply /tmp/agent-patch.diff +``` + +6. Copy any new untracked files manually. + +7. Verify build: +```bash +cd /Users/snider/Code/core/{repo}/ && go build ./... +``` + +## Rules + +- Always show the diff BEFORE applying +- Always check for untracked files (new files created by agent) +- Always verify the build AFTER applying +- Never commit — the user commits when ready +- If the patch fails, show the conflict and stop diff --git a/claude/devops/agents/agent-task-repair-core-agent.md b/claude/devops/agents/agent-task-repair-core-agent.md new file mode 100644 index 0000000..f2c2e6f --- /dev/null +++ b/claude/devops/agents/agent-task-repair-core-agent.md @@ -0,0 +1,53 @@ +--- +name: agent-task-repair-core-agent +description: Diagnoses and repairs core-agent when MCP tools fail, dispatch breaks, or the binary is stale. Use when something isn't working with the agent system. +tools: Bash, Read +model: haiku +color: red +--- + +Diagnose and fix core-agent issues. + +## Diagnosis Steps (run in order, stop at first failure) + +1. Does it compile? +```bash +cd /Users/snider/Code/core/agent && go build ./cmd/core-agent/ +``` + +2. Health check: +```bash +core-agent check +``` + +3. Is a stale process running? +```bash +ps aux | grep core-agent | grep -v grep +``` + +4. Are workspaces clean? +```bash +core-agent workspace/list +``` + +5. Is agents.yaml readable? +```bash +cat /Users/snider/Code/.core/agents.yaml +``` + +## Common Fixes + +| Symptom | Fix | +|---------|-----| +| MCP tools not found | User needs to restart core-agent | +| Dispatch always queued | Check concurrency in agents.yaml | +| Workspaces not prepping | Check template: `ls pkg/lib/workspace/default/` | +| go.work missing | Rebuild — template was updated | +| Codex can't find core.Env | Core dep too old — needs update-deps | + +## Rules + +- Do NOT run `go install` — tell the user to do it +- Do NOT kill processes without asking +- Do NOT delete workspaces without asking +- Report what's wrong, suggest the fix, let the user decide diff --git a/claude/devops/skills/build-prompt/SKILL.md b/claude/devops/skills/build-prompt/SKILL.md new file mode 100644 index 0000000..0fead0d --- /dev/null +++ b/claude/devops/skills/build-prompt/SKILL.md @@ -0,0 +1,20 @@ +--- +name: build-prompt +description: This skill should be used when the user asks to "build prompt", "show prompt", "preview agent prompt", "what would codex see", or needs to preview the prompt that would be sent to a dispatched agent without actually cloning or dispatching. +argument-hint: [--task="..."] [--persona=...] [--org=core] +allowed-tools: ["Bash"] +--- + +# Build Agent Prompt + +Preview the full prompt that would be sent to a dispatched agent. Shows task, repo info, workflow, brain recall, consumers, git log, and constraints — without cloning or dispatching. + +```bash +core-agent prompt --task="description" [--persona=code/go] [--org=core] +``` + +Example: +```bash +core-agent prompt go-io --task="AX audit" +core-agent prompt agent --task="Fix monitor package" --persona=code/go +``` diff --git a/claude/devops/skills/issue-comment/SKILL.md b/claude/devops/skills/issue-comment/SKILL.md new file mode 100644 index 0000000..83ec3d4 --- /dev/null +++ b/claude/devops/skills/issue-comment/SKILL.md @@ -0,0 +1,19 @@ +--- +name: issue-comment +description: This skill should be used when the user asks to "comment on issue", "add comment", "reply to issue", or needs to post a comment on a Forge issue. +argument-hint: --number=N --body="comment text" [--org=core] +allowed-tools: ["Bash"] +--- + +# Comment on Forge Issue + +Post a comment on a Forge issue. + +```bash +core-agent issue/comment --number=N --body="comment text" [--org=core] +``` + +Example: +```bash +core-agent issue/comment go --number=16 --body="Fixed in v0.6.0" +``` diff --git a/claude/devops/skills/issue-get/SKILL.md b/claude/devops/skills/issue-get/SKILL.md new file mode 100644 index 0000000..cda481a --- /dev/null +++ b/claude/devops/skills/issue-get/SKILL.md @@ -0,0 +1,20 @@ +--- +name: issue-get +description: This skill should be used when the user asks to "get issue", "show issue", "read issue", "fetch issue", or needs to view a specific Forge issue by number. +argument-hint: --number=N [--org=core] +allowed-tools: ["Bash"] +--- + +# Get Forge Issue + +Fetch and display a Forge issue by number. + +```bash +core-agent issue/get --number=N [--org=core] +``` + +Example: +```bash +core-agent issue/get go --number=16 +core-agent issue/get agent --number=5 --org=core +``` diff --git a/claude/devops/skills/issue-list/SKILL.md b/claude/devops/skills/issue-list/SKILL.md new file mode 100644 index 0000000..19a97a0 --- /dev/null +++ b/claude/devops/skills/issue-list/SKILL.md @@ -0,0 +1,20 @@ +--- +name: issue-list +description: This skill should be used when the user asks to "list issues", "show issues", "what issues are open", or needs to see issues for a Forge repo. +argument-hint: [--org=core] +allowed-tools: ["Bash"] +--- + +# List Forge Issues + +List all issues for a Forge repository. + +```bash +core-agent issue/list [--org=core] +``` + +Example: +```bash +core-agent issue/list go +core-agent issue/list agent +``` diff --git a/claude/devops/skills/pr-get/SKILL.md b/claude/devops/skills/pr-get/SKILL.md new file mode 100644 index 0000000..426a60d --- /dev/null +++ b/claude/devops/skills/pr-get/SKILL.md @@ -0,0 +1,19 @@ +--- +name: pr-get +description: This skill should be used when the user asks to "get PR", "show PR", "read pull request", "fetch PR", or needs to view a specific Forge pull request by number. +argument-hint: --number=N [--org=core] +allowed-tools: ["Bash"] +--- + +# Get Forge Pull Request + +Fetch and display a Forge PR by number. Shows state, branch, mergeability. + +```bash +core-agent pr/get --number=N [--org=core] +``` + +Example: +```bash +core-agent pr/get go --number=22 +``` diff --git a/claude/devops/skills/pr-list/SKILL.md b/claude/devops/skills/pr-list/SKILL.md new file mode 100644 index 0000000..9271d03 --- /dev/null +++ b/claude/devops/skills/pr-list/SKILL.md @@ -0,0 +1,20 @@ +--- +name: pr-list +description: This skill should be used when the user asks to "list PRs", "show pull requests", "what PRs are open", "pending PRs", or needs to see pull requests for a Forge repo. +argument-hint: [--org=core] +allowed-tools: ["Bash"] +--- + +# List Forge Pull Requests + +List all pull requests for a Forge repository. Shows state, branches, title. + +```bash +core-agent pr/list [--org=core] +``` + +Example: +```bash +core-agent pr/list go +core-agent pr/list agent +``` diff --git a/claude/devops/skills/pr-merge/SKILL.md b/claude/devops/skills/pr-merge/SKILL.md new file mode 100644 index 0000000..119b988 --- /dev/null +++ b/claude/devops/skills/pr-merge/SKILL.md @@ -0,0 +1,26 @@ +--- +name: pr-merge +description: This skill should be used when the user asks to "merge PR", "merge pull request", "accept PR", or needs to merge a Forge PR. Supports merge, rebase, and squash methods. +argument-hint: --number=N [--method=merge|rebase|squash] [--org=core] +allowed-tools: ["Bash"] +--- + +# Merge Forge Pull Request + +Merge a PR on Forge. Default method is merge. + +```bash +core-agent pr/merge --number=N [--method=merge|rebase|squash] [--org=core] +``` + +Example: +```bash +core-agent pr/merge go --number=22 +core-agent pr/merge go-forge --number=7 --method=squash +``` + +## Important + +- Always confirm with the user before merging +- Check PR status with `pr/get` first if unsure about mergeability +- The merge happens on Forge, not locally diff --git a/claude/devops/skills/repo-get/SKILL.md b/claude/devops/skills/repo-get/SKILL.md new file mode 100644 index 0000000..7447534 --- /dev/null +++ b/claude/devops/skills/repo-get/SKILL.md @@ -0,0 +1,20 @@ +--- +name: repo-get +description: This skill should be used when the user asks to "get repo info", "show repo", "repo details", or needs to see details about a specific Forge repository including default branch, visibility, and archive status. +argument-hint: [--org=core] +allowed-tools: ["Bash"] +--- + +# Get Forge Repository Info + +Fetch and display repository details from Forge. + +```bash +core-agent repo/get [--org=core] +``` + +Example: +```bash +core-agent repo/get go +core-agent repo/get agent +``` diff --git a/claude/devops/skills/repo-list/SKILL.md b/claude/devops/skills/repo-list/SKILL.md new file mode 100644 index 0000000..e0bd0db --- /dev/null +++ b/claude/devops/skills/repo-list/SKILL.md @@ -0,0 +1,20 @@ +--- +name: repo-list +description: This skill should be used when the user asks to "list repos", "show repos", "what repos exist", "how many repos", or needs to see all repositories in a Forge organisation. +argument-hint: [--org=core] +allowed-tools: ["Bash"] +--- + +# List Forge Repositories + +List all repositories in a Forge organisation. + +```bash +core-agent repo/list [--org=core] +``` + +Example: +```bash +core-agent repo/list +core-agent repo/list --org=lthn +``` diff --git a/claude/devops/skills/update-deps/SKILL.md b/claude/devops/skills/update-deps/SKILL.md new file mode 100644 index 0000000..e2ea49c --- /dev/null +++ b/claude/devops/skills/update-deps/SKILL.md @@ -0,0 +1,55 @@ +--- +name: update-deps +description: This skill should be used when the user asks to "update deps", "bump core", "update go.mod", "upgrade dependencies", or needs to update dappco.re/go/core or other Go module dependencies in a core ecosystem repo. Uses go get properly — never manual go.mod editing. +argument-hint: [repo-name] [module@version] +allowed-tools: ["Bash"] +--- + +# Update Go Module Dependencies + +Properly update dependencies in a Core ecosystem Go module. + +## Steps + +1. Determine the repo. If an argument is given, use it. Otherwise use the current working directory. + ``` + /Users/snider/Code/core// + ``` + +2. Check current dependency versions: + ```bash + grep 'dappco.re' go.mod + ``` + +3. Update the dependency using `go get`. Examples: + ```bash + # Update core to latest + GONOSUMDB='dappco.re/*' GONOSUMCHECK='dappco.re/*' GOPROXY=direct go get dappco.re/go/core@latest + + # Update to specific version + GONOSUMDB='dappco.re/*' GONOSUMCHECK='dappco.re/*' GOPROXY=direct go get dappco.re/go/core@v0.6.0 + + # Update all dappco.re deps + GONOSUMDB='dappco.re/*' GONOSUMCHECK='dappco.re/*' GOPROXY=direct go get -u dappco.re/... + ``` + +4. Tidy: + ```bash + go mod tidy + ``` + +5. Verify: + ```bash + go build ./... + ``` + +6. Report what changed in go.mod. + +## Important + +- ALWAYS use `go get` — NEVER manually edit go.mod +- ALWAYS set `GONOSUMDB` and `GONOSUMCHECK` for dappco.re modules +- ALWAYS set `GOPROXY=direct` to bypass proxy cache for private modules +- ALWAYS run `go mod tidy` after updating +- ALWAYS verify with `go build ./...` +- If a version doesn't resolve, check if the tag has been pushed to GitHub (dappco.re vanity imports resolve through GitHub) diff --git a/claude/devops/skills/workspace-clean/SKILL.md b/claude/devops/skills/workspace-clean/SKILL.md new file mode 100644 index 0000000..2b8eba5 --- /dev/null +++ b/claude/devops/skills/workspace-clean/SKILL.md @@ -0,0 +1,24 @@ +--- +name: workspace-clean +description: This skill should be used when the user asks to "clean workspaces", "clean up agents", "remove stale workspaces", "nuke completed", or needs to remove finished/failed/blocked agent workspaces. +argument-hint: [all|completed|failed|blocked] +allowed-tools: ["Bash"] +--- + +# Clean Agent Workspaces + +Remove stale agent workspaces. Never removes running workspaces. + +```bash +# Remove all non-running workspaces +core-agent workspace/clean all + +# Remove only completed/merged +core-agent workspace/clean completed + +# Remove only failed +core-agent workspace/clean failed + +# Remove only blocked +core-agent workspace/clean blocked +``` diff --git a/claude/devops/skills/workspace-list/SKILL.md b/claude/devops/skills/workspace-list/SKILL.md new file mode 100644 index 0000000..fbb3900 --- /dev/null +++ b/claude/devops/skills/workspace-list/SKILL.md @@ -0,0 +1,16 @@ +--- +name: workspace-list +description: This skill should be used when the user asks to "list workspaces", "show agents", "what's running", "workspace status", "active agents", or wants to see the current state of all agent workspaces. +argument-hint: (no arguments needed) +allowed-tools: ["Bash"] +--- + +# List Agent Workspaces + +Show all agent workspaces with their status, agent type, and repo. + +```bash +core-agent workspace/list +``` + +Output shows: status, agent, repo, workspace name. Statuses: running, completed, failed, blocked, merged, queued. diff --git a/cmd/core-agent/forge.go b/cmd/core-agent/forge.go new file mode 100644 index 0000000..b0b8ce6 --- /dev/null +++ b/cmd/core-agent/forge.go @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package main + +import ( + "context" + "strconv" + + "dappco.re/go/core" + "dappco.re/go/core/forge" + forge_types "dappco.re/go/core/forge/types" +) + +// newForgeClient creates a Forge client from env config. +func newForgeClient() *forge.Forge { + url := core.Env("FORGE_URL") + if url == "" { + url = "https://forge.lthn.ai" + } + token := core.Env("FORGE_TOKEN") + if token == "" { + token = core.Env("GITEA_TOKEN") + } + return forge.NewForge(url, token) +} + +// parseArgs extracts org and repo from opts. First positional arg is repo, --org flag defaults to "core". +func parseArgs(opts core.Options) (org, repo string, num int64) { + org = opts.String("org") + if org == "" { + org = "core" + } + repo = opts.String("_arg") + if v := opts.String("number"); v != "" { + num, _ = strconv.ParseInt(v, 10, 64) + } + return +} + +func fmtIndex(n int64) string { return strconv.FormatInt(n, 10) } + +func registerForgeCommands(c *core.Core) { + ctx := context.Background() + + // --- Issues --- + + c.Command("issue/get", core.Command{ + Description: "Get a Forge issue", + Action: func(opts core.Options) core.Result { + org, repo, num := parseArgs(opts) + if repo == "" || num == 0 { + core.Print(nil, "usage: core-agent issue get --number=N [--org=core]") + return core.Result{OK: false} + } + + f := newForgeClient() + issue, err := f.Issues.Get(ctx, forge.Params{"owner": org, "repo": repo, "index": fmtIndex(num)}) + if err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + core.Print(nil, "#%d %s", issue.Index, issue.Title) + core.Print(nil, " state: %s", issue.State) + core.Print(nil, " url: %s", issue.HTMLURL) + if issue.Body != "" { + core.Print(nil, "") + core.Print(nil, "%s", issue.Body) + } + return core.Result{OK: true} + }, + }) + + c.Command("issue/list", core.Command{ + Description: "List Forge issues for a repo", + Action: func(opts core.Options) core.Result { + org, repo, _ := parseArgs(opts) + if repo == "" { + core.Print(nil, "usage: core-agent issue list [--org=core]") + return core.Result{OK: false} + } + + f := newForgeClient() + issues, err := f.Issues.ListAll(ctx, forge.Params{"owner": org, "repo": repo}) + if err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + for _, issue := range issues { + core.Print(nil, " #%-4d %-6s %s", issue.Index, issue.State, issue.Title) + } + if len(issues) == 0 { + core.Print(nil, " no issues") + } + return core.Result{OK: true} + }, + }) + + c.Command("issue/comment", core.Command{ + Description: "Comment on a Forge issue", + Action: func(opts core.Options) core.Result { + org, repo, num := parseArgs(opts) + body := opts.String("body") + if repo == "" || num == 0 || body == "" { + core.Print(nil, "usage: core-agent issue comment --number=N --body=\"text\" [--org=core]") + return core.Result{OK: false} + } + + f := newForgeClient() + comment, err := f.Issues.CreateComment(ctx, org, repo, num, body) + if err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + core.Print(nil, "comment #%d created on %s/%s#%d", comment.ID, org, repo, num) + return core.Result{OK: true} + }, + }) + + c.Command("issue/create", core.Command{ + Description: "Create a Forge issue", + Action: func(opts core.Options) core.Result { + org, repo, _ := parseArgs(opts) + title := opts.String("title") + body := opts.String("body") + labels := opts.String("labels") + milestone := opts.String("milestone") + assignee := opts.String("assignee") + ref := opts.String("ref") + if repo == "" || title == "" { + core.Print(nil, "usage: core-agent issue create --title=\"...\" [--body=\"...\"] [--labels=\"agentic,bug\"] [--milestone=\"v0.2.0\"] [--assignee=virgil] [--ref=dev] [--org=core]") + return core.Result{OK: false} + } + + createOpts := &forge_types.CreateIssueOption{ + Title: title, + Body: body, + Ref: ref, + } + + // Resolve milestone name to ID + if milestone != "" { + f := newForgeClient() + milestones, err := f.Milestones.ListAll(ctx, forge.Params{"owner": org, "repo": repo}) + if err == nil { + for _, m := range milestones { + if m.Title == milestone { + createOpts.Milestone = m.ID + break + } + } + } + } + + // Set assignee + if assignee != "" { + createOpts.Assignees = []string{assignee} + } + + // Resolve label names to IDs if provided + if labels != "" { + f := newForgeClient() + labelNames := core.Split(labels, ",") + allLabels, err := f.Labels.ListRepoLabels(ctx, org, repo) + if err == nil { + for _, name := range labelNames { + name = core.Trim(name) + for _, l := range allLabels { + if l.Name == name { + createOpts.Labels = append(createOpts.Labels, l.ID) + break + } + } + } + } + } + + f := newForgeClient() + issue, err := f.Issues.Create(ctx, forge.Params{"owner": org, "repo": repo}, createOpts) + if err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + core.Print(nil, "#%d %s", issue.Index, issue.Title) + core.Print(nil, " url: %s", issue.HTMLURL) + return core.Result{Value: issue.Index, OK: true} + }, + }) + + // --- Pull Requests --- + + c.Command("pr/get", core.Command{ + Description: "Get a Forge PR", + Action: func(opts core.Options) core.Result { + org, repo, num := parseArgs(opts) + if repo == "" || num == 0 { + core.Print(nil, "usage: core-agent pr get --number=N [--org=core]") + return core.Result{OK: false} + } + + f := newForgeClient() + pr, err := f.Pulls.Get(ctx, forge.Params{"owner": org, "repo": repo, "index": fmtIndex(num)}) + if err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + core.Print(nil, "#%d %s", pr.Index, pr.Title) + core.Print(nil, " state: %s", pr.State) + core.Print(nil, " head: %s", pr.Head.Ref) + core.Print(nil, " base: %s", pr.Base.Ref) + core.Print(nil, " mergeable: %v", pr.Mergeable) + core.Print(nil, " url: %s", pr.HTMLURL) + if pr.Body != "" { + core.Print(nil, "") + core.Print(nil, "%s", pr.Body) + } + return core.Result{OK: true} + }, + }) + + c.Command("pr/list", core.Command{ + Description: "List Forge PRs for a repo", + Action: func(opts core.Options) core.Result { + org, repo, _ := parseArgs(opts) + if repo == "" { + core.Print(nil, "usage: core-agent pr list [--org=core]") + return core.Result{OK: false} + } + + f := newForgeClient() + prs, err := f.Pulls.ListAll(ctx, forge.Params{"owner": org, "repo": repo}) + if err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + for _, pr := range prs { + core.Print(nil, " #%-4d %-6s %s → %s %s", pr.Index, pr.State, pr.Head.Ref, pr.Base.Ref, pr.Title) + } + if len(prs) == 0 { + core.Print(nil, " no PRs") + } + return core.Result{OK: true} + }, + }) + + c.Command("pr/merge", core.Command{ + Description: "Merge a Forge PR", + Action: func(opts core.Options) core.Result { + org, repo, num := parseArgs(opts) + method := opts.String("method") + if method == "" { + method = "merge" + } + if repo == "" || num == 0 { + core.Print(nil, "usage: core-agent pr merge --number=N [--method=merge|rebase|squash] [--org=core]") + return core.Result{OK: false} + } + + f := newForgeClient() + if err := f.Pulls.Merge(ctx, org, repo, num, method); err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + core.Print(nil, "merged %s/%s#%d via %s", org, repo, num, method) + return core.Result{OK: true} + }, + }) + + // --- Repositories --- + + c.Command("repo/get", core.Command{ + Description: "Get Forge repo info", + Action: func(opts core.Options) core.Result { + org, repo, _ := parseArgs(opts) + if repo == "" { + core.Print(nil, "usage: core-agent repo get [--org=core]") + return core.Result{OK: false} + } + + f := newForgeClient() + r, err := f.Repos.Get(ctx, forge.Params{"owner": org, "repo": repo}) + if err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + core.Print(nil, "%s/%s", r.Owner.UserName, r.Name) + core.Print(nil, " description: %s", r.Description) + core.Print(nil, " default: %s", r.DefaultBranch) + core.Print(nil, " private: %v", r.Private) + core.Print(nil, " archived: %v", r.Archived) + core.Print(nil, " url: %s", r.HTMLURL) + return core.Result{OK: true} + }, + }) + + c.Command("repo/list", core.Command{ + Description: "List Forge repos for an org", + Action: func(opts core.Options) core.Result { + org := opts.String("org") + if org == "" { + org = "core" + } + + f := newForgeClient() + repos, err := f.Repos.ListOrgRepos(ctx, org) + if err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + for _, r := range repos { + archived := "" + if r.Archived { + archived = " (archived)" + } + core.Print(nil, " %-30s %s%s", r.Name, r.Description, archived) + } + core.Print(nil, "\n %d repos", len(repos)) + return core.Result{OK: true} + }, + }) +} diff --git a/cmd/core-agent/main.go b/cmd/core-agent/main.go new file mode 100644 index 0000000..9d3ccbc --- /dev/null +++ b/cmd/core-agent/main.go @@ -0,0 +1,501 @@ +package main + +import ( + "context" + "os" + "os/signal" + "strconv" + "syscall" + + "dappco.re/go/core" + "dappco.re/go/core/process" + + "dappco.re/go/agent/pkg/agentic" + "dappco.re/go/agent/pkg/brain" + "dappco.re/go/agent/pkg/lib" + "dappco.re/go/agent/pkg/monitor" + "forge.lthn.ai/core/mcp/pkg/mcp" +) + +func main() { + c := core.New(core.Options{ + {Key: "name", Value: "core-agent"}, + }) + // Version set at build time: go build -ldflags "-X main.version=0.15.0" + if version != "" { + c.App().Version = version + } else { + c.App().Version = "dev" + } + + // version — print version and build info + c.Command("version", core.Command{ + Description: "Print version and build info", + Action: func(opts core.Options) core.Result { + core.Print(nil, "core-agent %s", c.App().Version) + core.Print(nil, " go: %s", core.Env("GO")) + core.Print(nil, " os: %s/%s", core.Env("OS"), core.Env("ARCH")) + core.Print(nil, " home: %s", core.Env("DIR_HOME")) + core.Print(nil, " hostname: %s", core.Env("HOSTNAME")) + core.Print(nil, " pid: %s", core.Env("PID")) + core.Print(nil, " channel: %s", updateChannel()) + return core.Result{OK: true} + }, + }) + + // check — verify workspace, deps, and config are healthy + c.Command("check", core.Command{ + Description: "Verify workspace, deps, and config", + Action: func(opts core.Options) core.Result { + fs := c.Fs() + + core.Print(nil, "core-agent %s health check", c.App().Version) + core.Print(nil, "") + + // Binary location + core.Print(nil, " binary: %s", os.Args[0]) + + // Agents config + agentsPath := core.Path("Code", ".core", "agents.yaml") + if fs.IsFile(agentsPath) { + core.Print(nil, " agents: %s (ok)", agentsPath) + } else { + core.Print(nil, " agents: %s (MISSING)", agentsPath) + } + + // Workspace dir + wsRoot := core.Path("Code", ".core", "workspace") + if fs.IsDir(wsRoot) { + r := fs.List(wsRoot) + count := 0 + if r.OK { + count = len(r.Value.([]os.DirEntry)) + } + core.Print(nil, " workspace: %s (%d entries)", wsRoot, count) + } else { + core.Print(nil, " workspace: %s (MISSING)", wsRoot) + } + + // Core dep version + core.Print(nil, " core: dappco.re/go/core@v%s", c.App().Version) + + // Env keys + core.Print(nil, " env keys: %d loaded", len(core.EnvKeys())) + + core.Print(nil, "") + core.Print(nil, "ok") + return core.Result{OK: true} + }, + }) + + // extract — test workspace template extraction + c.Command("extract", core.Command{ + Description: "Extract a workspace template to a directory", + Action: func(opts core.Options) core.Result { + tmpl := opts.String("_arg") + if tmpl == "" { + tmpl = "default" + } + target := opts.String("target") + if target == "" { + target = core.Path("Code", ".core", "workspace", "test-extract") + } + + data := &lib.WorkspaceData{ + Repo: "test-repo", + Branch: "dev", + Task: "test extraction", + Agent: "codex", + } + + core.Print(nil, "extracting template %q to %s", tmpl, target) + if err := lib.ExtractWorkspace(tmpl, target, data); err != nil { + return core.Result{Value: err, OK: false} + } + + // List what was created + fs := &core.Fs{} + r := fs.List(target) + if r.OK { + for _, e := range r.Value.([]os.DirEntry) { + marker := " " + if e.IsDir() { + marker = "/" + } + core.Print(nil, " %s%s", e.Name(), marker) + } + } + + core.Print(nil, "done") + return core.Result{OK: true} + }, + }) + + // --- Forge + Workspace CLI commands --- + registerForgeCommands(c) + registerWorkspaceCommands(c) + // registerUpdateCommand(c) — parked until version moves to module root + + // --- CLI commands for feature testing --- + + prep := agentic.NewPrep() + + // prep — test workspace preparation (clone + prompt) + c.Command("prep", core.Command{ + Description: "Prepare a workspace: clone repo, build prompt", + Action: func(opts core.Options) core.Result { + repo := opts.String("_arg") + if repo == "" { + core.Print(nil, "usage: core-agent prep --issue=N|--pr=N|--branch=X --task=\"...\"") + return core.Result{OK: false} + } + + input := agentic.PrepInput{ + Repo: repo, + Org: opts.String("org"), + Task: opts.String("task"), + Template: opts.String("template"), + Persona: opts.String("persona"), + DryRun: opts.Bool("dry-run"), + } + + // Parse identifier from flags + if v := opts.String("issue"); v != "" { + n := 0 + for _, ch := range v { + if ch >= '0' && ch <= '9' { + n = n*10 + int(ch-'0') + } + } + input.Issue = n + } + if v := opts.String("pr"); v != "" { + n := 0 + for _, ch := range v { + if ch >= '0' && ch <= '9' { + n = n*10 + int(ch-'0') + } + } + input.PR = n + } + if v := opts.String("branch"); v != "" { + input.Branch = v + } + if v := opts.String("tag"); v != "" { + input.Tag = v + } + + // Default to branch "dev" if no identifier + if input.Issue == 0 && input.PR == 0 && input.Branch == "" && input.Tag == "" { + input.Branch = "dev" + } + + _, out, err := prep.TestPrepWorkspace(context.Background(), input) + if err != nil { + core.Print(nil, "error: %v", err) + return core.Result{Value: err, OK: false} + } + + core.Print(nil, "workspace: %s", out.WorkspaceDir) + core.Print(nil, "repo: %s", out.RepoDir) + core.Print(nil, "branch: %s", out.Branch) + core.Print(nil, "resumed: %v", out.Resumed) + core.Print(nil, "memories: %d", out.Memories) + core.Print(nil, "consumers: %d", out.Consumers) + if out.Prompt != "" { + core.Print(nil, "") + core.Print(nil, "--- prompt (%d chars) ---", len(out.Prompt)) + core.Print(nil, "%s", out.Prompt) + } + return core.Result{OK: true} + }, + }) + + // status — list workspace statuses + c.Command("status", core.Command{ + Description: "List agent workspace statuses", + Action: func(opts core.Options) core.Result { + wsRoot := agentic.WorkspaceRoot() + fsys := c.Fs() + r := fsys.List(wsRoot) + if !r.OK { + core.Print(nil, "no workspaces found at %s", wsRoot) + return core.Result{OK: true} + } + + entries := r.Value.([]os.DirEntry) + if len(entries) == 0 { + core.Print(nil, "no workspaces") + return core.Result{OK: true} + } + + for _, e := range entries { + if !e.IsDir() { + continue + } + statusFile := core.JoinPath(wsRoot, e.Name(), "status.json") + if sr := fsys.Read(statusFile); sr.OK { + core.Print(nil, " %s", e.Name()) + } + } + return core.Result{OK: true} + }, + }) + + // prompt — build and show an agent prompt without cloning + c.Command("prompt", core.Command{ + Description: "Build and display an agent prompt for a repo", + Action: func(opts core.Options) core.Result { + repo := opts.String("_arg") + if repo == "" { + core.Print(nil, "usage: core-agent prompt --task=\"...\"") + return core.Result{OK: false} + } + + org := opts.String("org") + if org == "" { + org = "core" + } + task := opts.String("task") + if task == "" { + task = "Review and report findings" + } + + repoPath := core.JoinPath(core.Env("DIR_HOME"), "Code", org, repo) + + input := agentic.PrepInput{ + Repo: repo, + Org: org, + Task: task, + Template: opts.String("template"), + Persona: opts.String("persona"), + } + + prompt, memories, consumers := prep.TestBuildPrompt(context.Background(), input, "dev", repoPath) + core.Print(nil, "memories: %d", memories) + core.Print(nil, "consumers: %d", consumers) + core.Print(nil, "") + core.Print(nil, "%s", prompt) + return core.Result{OK: true} + }, + }) + + // env — dump all Env keys + c.Command("env", core.Command{ + Description: "Show all core.Env() keys and values", + Action: func(opts core.Options) core.Result { + keys := core.EnvKeys() + for _, k := range keys { + core.Print(nil, " %-15s %s", k, core.Env(k)) + } + return core.Result{OK: true} + }, + }) + + // Shared setup — creates MCP service with all subsystems wired + initServices := func() (*mcp.Service, *monitor.Subsystem, error) { + procFactory := process.NewService(process.Options{}) + procResult, err := procFactory(c) + if err != nil { + return nil, nil, core.E("main", "init process service", err) + } + if procSvc, ok := procResult.(*process.Service); ok { + _ = process.SetDefault(procSvc) + } + + mon := monitor.New() + prep := agentic.NewPrep() + prep.SetCompletionNotifier(mon) + + mcpSvc, err := mcp.New(mcp.Options{ + Subsystems: []mcp.Subsystem{brain.NewDirect(), prep, mon}, + }) + if err != nil { + return nil, nil, core.E("main", "create MCP service", err) + } + + mon.SetNotifier(mcpSvc) + prep.StartRunner() + return mcpSvc, mon, nil + } + + // Signal-aware context for clean shutdown + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + // mcp — stdio transport (Claude Code integration) + c.Command("mcp", core.Command{ + Description: "Start the MCP server on stdio", + Action: func(opts core.Options) core.Result { + mcpSvc, mon, err := initServices() + if err != nil { + return core.Result{Value: err, OK: false} + } + mon.Start(ctx) + if err := mcpSvc.Run(ctx); err != nil { + return core.Result{Value: err, OK: false} + } + return core.Result{OK: true} + }, + }) + + // serve — persistent HTTP daemon (Charon, CI, cross-agent) + c.Command("serve", core.Command{ + Description: "Start as a persistent HTTP daemon", + Action: func(opts core.Options) core.Result { + mcpSvc, mon, err := initServices() + if err != nil { + return core.Result{Value: err, OK: false} + } + + addr := core.Env("MCP_HTTP_ADDR") + if addr == "" { + addr = "0.0.0.0:9101" + } + + healthAddr := core.Env("HEALTH_ADDR") + if healthAddr == "" { + healthAddr = "0.0.0.0:9102" + } + + pidFile := core.Path(".core", "core-agent.pid") + + daemon := process.NewDaemon(process.DaemonOptions{ + PIDFile: pidFile, + HealthAddr: healthAddr, + Registry: process.DefaultRegistry(), + RegistryEntry: process.DaemonEntry{ + Code: "core", + Daemon: "agent", + Project: "core-agent", + Binary: "core-agent", + }, + }) + + if err := daemon.Start(); err != nil { + return core.Result{Value: core.E("main", "daemon start", err), OK: false} + } + + mon.Start(ctx) + daemon.SetReady(true) + core.Print(os.Stderr, "core-agent serving on %s (health: %s, pid: %s)", addr, healthAddr, pidFile) + + os.Setenv("MCP_HTTP_ADDR", addr) + + if err := mcpSvc.Run(ctx); err != nil { + return core.Result{Value: err, OK: false} + } + return core.Result{OK: true} + }, + }) + + // run task — single task e2e (prep → spawn → wait → done) + c.Command("run/task", core.Command{ + Description: "Run a single task end-to-end", + Action: func(opts core.Options) core.Result { + repo := opts.String("repo") + agent := opts.String("agent") + task := opts.String("task") + issueStr := opts.String("issue") + org := opts.String("org") + + if repo == "" || task == "" { + core.Print(nil, "usage: core-agent run task --repo= --task=\"...\" --agent=codex [--issue=N] [--org=core]") + return core.Result{OK: false} + } + if agent == "" { + agent = "codex" + } + if org == "" { + org = "core" + } + + issue := 0 + if issueStr != "" { + if n, err := strconv.Atoi(issueStr); err == nil { + issue = n + } + } + + procFactory := process.NewService(process.Options{}) + procResult, err := procFactory(c) + if err != nil { + return core.Result{Value: err, OK: false} + } + if procSvc, ok := procResult.(*process.Service); ok { + _ = process.SetDefault(procSvc) + } + + prep := agentic.NewPrep() + + core.Print(os.Stderr, "core-agent run task") + core.Print(os.Stderr, " repo: %s/%s", org, repo) + core.Print(os.Stderr, " agent: %s", agent) + if issue > 0 { + core.Print(os.Stderr, " issue: #%d", issue) + } + core.Print(os.Stderr, " task: %s", task) + core.Print(os.Stderr, "") + + // Dispatch and wait + result := prep.DispatchSync(ctx, agentic.DispatchSyncInput{ + Org: org, + Repo: repo, + Agent: agent, + Task: task, + Issue: issue, + }) + + if !result.OK { + core.Print(os.Stderr, "FAILED: %v", result.Error) + return core.Result{Value: result.Error, OK: false} + } + + core.Print(os.Stderr, "DONE: %s", result.Status) + if result.PRURL != "" { + core.Print(os.Stderr, " PR: %s", result.PRURL) + } + return core.Result{OK: true} + }, + }) + + // run orchestrator — standalone queue runner without MCP stdio + c.Command("run/orchestrator", core.Command{ + Description: "Run the queue orchestrator (standalone, no MCP)", + Action: func(opts core.Options) core.Result { + procFactory := process.NewService(process.Options{}) + procResult, err := procFactory(c) + if err != nil { + return core.Result{Value: err, OK: false} + } + if procSvc, ok := procResult.(*process.Service); ok { + _ = process.SetDefault(procSvc) + } + + mon := monitor.New() + prep := agentic.NewPrep() + prep.SetCompletionNotifier(mon) + + mon.Start(ctx) + prep.StartRunner() + + core.Print(os.Stderr, "core-agent orchestrator running (pid %s)", core.Env("PID")) + core.Print(os.Stderr, " workspace: %s", agentic.WorkspaceRoot()) + core.Print(os.Stderr, " watching queue, draining on 30s tick + completion poke") + + // Block until signal + <-ctx.Done() + core.Print(os.Stderr, "orchestrator shutting down") + return core.Result{OK: true} + }, + }) + + // Run CLI — resolves os.Args to command path + r := c.Cli().Run() + if !r.OK { + if err, ok := r.Value.(error); ok { + core.Error(err.Error()) + } + os.Exit(1) + } +} diff --git a/cmd/core-agent/update.go b/cmd/core-agent/update.go new file mode 100644 index 0000000..b5bc983 --- /dev/null +++ b/cmd/core-agent/update.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package main + +// version is set at build time via ldflags: +// +// go build -ldflags "-X 'dappco.re/go/agent.version=0.15.0'" ./cmd/core-agent/ +var version string + +// updateChannel returns the channel based on the version string. +func updateChannel() string { + switch { + case version == "" || version == "dev": + return "dev" + case len(version) > 0 && (version[len(version)-1] >= 'a'): + return "prerelease" + default: + return "stable" + } +} + +// TODO: wire go-update UpdateService for self-update command +// Channels: stable → GitHub releases, prerelease → GitHub dev, dev → Forge main +// Parked until version var moves to module root package (dappco.re/go/agent.Version) diff --git a/cmd/core-agent/workspace.go b/cmd/core-agent/workspace.go new file mode 100644 index 0000000..38410ed --- /dev/null +++ b/cmd/core-agent/workspace.go @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package main + +import ( + "os" + + "dappco.re/go/core" + + "dappco.re/go/agent/pkg/agentic" +) + +func registerWorkspaceCommands(c *core.Core) { + + // workspace/list — show all workspaces with status + c.Command("workspace/list", core.Command{ + Description: "List all agent workspaces with status", + Action: func(opts core.Options) core.Result { + wsRoot := agentic.WorkspaceRoot() + fsys := c.Fs() + + r := fsys.List(wsRoot) + if !r.OK { + core.Print(nil, "no workspaces at %s", wsRoot) + return core.Result{OK: true} + } + + entries := r.Value.([]os.DirEntry) + count := 0 + for _, e := range entries { + if !e.IsDir() { + continue + } + statusFile := core.JoinPath(wsRoot, e.Name(), "status.json") + if sr := fsys.Read(statusFile); sr.OK { + // Quick parse for status field + content := sr.Value.(string) + status := extractField(content, "status") + repo := extractField(content, "repo") + agent := extractField(content, "agent") + core.Print(nil, " %-8s %-8s %-10s %s", status, agent, repo, e.Name()) + count++ + } + } + if count == 0 { + core.Print(nil, " no workspaces") + } + return core.Result{OK: true} + }, + }) + + // workspace/clean — remove stale workspaces + c.Command("workspace/clean", core.Command{ + Description: "Remove completed/failed/blocked workspaces", + Action: func(opts core.Options) core.Result { + wsRoot := agentic.WorkspaceRoot() + fsys := c.Fs() + filter := opts.String("_arg") + if filter == "" { + filter = "all" + } + + r := fsys.List(wsRoot) + if !r.OK { + core.Print(nil, "no workspaces") + return core.Result{OK: true} + } + + entries := r.Value.([]os.DirEntry) + var toRemove []string + + for _, e := range entries { + if !e.IsDir() { + continue + } + statusFile := core.JoinPath(wsRoot, e.Name(), "status.json") + sr := fsys.Read(statusFile) + if !sr.OK { + continue + } + status := extractField(sr.Value.(string), "status") + + switch filter { + case "all": + if status == "completed" || status == "failed" || status == "blocked" || status == "merged" || status == "ready-for-review" { + toRemove = append(toRemove, e.Name()) + } + case "completed": + if status == "completed" || status == "merged" || status == "ready-for-review" { + toRemove = append(toRemove, e.Name()) + } + case "failed": + if status == "failed" { + toRemove = append(toRemove, e.Name()) + } + case "blocked": + if status == "blocked" { + toRemove = append(toRemove, e.Name()) + } + } + } + + if len(toRemove) == 0 { + core.Print(nil, "nothing to clean") + return core.Result{OK: true} + } + + for _, name := range toRemove { + path := core.JoinPath(wsRoot, name) + fsys.DeleteAll(path) + core.Print(nil, " removed %s", name) + } + core.Print(nil, "\n %d workspaces removed", len(toRemove)) + return core.Result{OK: true} + }, + }) + + // workspace/dispatch — dispatch an agent (CLI wrapper for MCP tool) + c.Command("workspace/dispatch", core.Command{ + Description: "Dispatch an agent to work on a repo task", + Action: func(opts core.Options) core.Result { + repo := opts.String("_arg") + if repo == "" { + core.Print(nil, "usage: core-agent workspace/dispatch --task=\"...\" --issue=N|--pr=N|--branch=X [--agent=codex]") + return core.Result{OK: false} + } + + core.Print(nil, "dispatch via CLI not yet wired — use MCP agentic_dispatch tool") + core.Print(nil, "repo: %s, task: %s", repo, opts.String("task")) + return core.Result{OK: true} + }, + }) +} + +// extractField does a quick JSON field extraction without full unmarshal. +// Looks for "field":"value" pattern. Good enough for status.json. +func extractField(jsonStr, field string) string { + // Match both "field":"value" and "field": "value" + needle := core.Concat("\"", field, "\"") + idx := -1 + for i := 0; i <= len(jsonStr)-len(needle); i++ { + if jsonStr[i:i+len(needle)] == needle { + idx = i + len(needle) + break + } + } + if idx < 0 { + return "" + } + // Skip : and whitespace to find opening quote + for idx < len(jsonStr) && (jsonStr[idx] == ':' || jsonStr[idx] == ' ' || jsonStr[idx] == '\t') { + idx++ + } + if idx >= len(jsonStr) || jsonStr[idx] != '"' { + return "" + } + idx++ // skip opening quote + end := idx + for end < len(jsonStr) && jsonStr[end] != '"' { + end++ + } + return jsonStr[idx:end] +} diff --git a/cmd/main.go b/cmd/main.go deleted file mode 100644 index bcb5b1a..0000000 --- a/cmd/main.go +++ /dev/null @@ -1,126 +0,0 @@ -package main - -import ( - "fmt" - "log" - "os" - "path/filepath" - - "dappco.re/go/agent/pkg/agentic" - "dappco.re/go/agent/pkg/brain" - "dappco.re/go/agent/pkg/monitor" - "forge.lthn.ai/core/cli/pkg/cli" - "dappco.re/go/core/process" - "dappco.re/go/core" - "forge.lthn.ai/core/mcp/pkg/mcp" -) - -func main() { - if err := cli.Init(cli.Options{ - AppName: "core-agent", - Version: "0.2.0", - }); err != nil { - log.Fatal(err) - } - - // Shared setup for both mcp and serve commands - initServices := func() (*mcp.Service, *monitor.Subsystem, error) { - c := core.New(core.Options{ - {Key: "name", Value: "core-agent"}, - }) - procFactory := process.NewService(process.Options{}) - procResult, err := procFactory(c) - if err != nil { - return nil, nil, cli.Wrap(err, "init process service") - } - if procSvc, ok := procResult.(*process.Service); ok { - process.SetDefault(procSvc) - } - - mon := monitor.New() - prep := agentic.NewPrep() - prep.SetCompletionNotifier(mon) - - mcpSvc, err := mcp.New(mcp.Options{ - Subsystems: []mcp.Subsystem{brain.NewDirect(), prep, mon}, - }) - if err != nil { - return nil, nil, cli.Wrap(err, "create MCP service") - } - - // Wire channel notifications — monitor pushes events into MCP sessions - mon.SetNotifier(mcpSvc) - - return mcpSvc, mon, nil - } - - // mcp — stdio transport (Claude Code integration) - mcpCmd := cli.NewCommand("mcp", "Start the MCP server on stdio", "", func(cmd *cli.Command, args []string) error { - mcpSvc, mon, err := initServices() - if err != nil { - return err - } - mon.Start(cmd.Context()) - return mcpSvc.Run(cmd.Context()) - }) - - // serve — persistent HTTP daemon (Charon, CI, cross-agent) - serveCmd := cli.NewCommand("serve", "Start as a persistent HTTP daemon", "", func(cmd *cli.Command, args []string) error { - mcpSvc, mon, err := initServices() - if err != nil { - return err - } - - // Determine address - addr := os.Getenv("MCP_HTTP_ADDR") - if addr == "" { - addr = "0.0.0.0:9101" - } - - // Determine health address - healthAddr := os.Getenv("HEALTH_ADDR") - if healthAddr == "" { - healthAddr = "0.0.0.0:9102" - } - - // Set up daemon with PID file, health check, and registry - home, _ := os.UserHomeDir() - pidFile := filepath.Join(home, ".core", "core-agent.pid") - - daemon := process.NewDaemon(process.DaemonOptions{ - PIDFile: pidFile, - HealthAddr: healthAddr, - Registry: process.DefaultRegistry(), - RegistryEntry: process.DaemonEntry{ - Code: "core", - Daemon: "agent", - Project: "core-agent", - Binary: "core-agent", - }, - }) - - if err := daemon.Start(); err != nil { - return cli.Wrap(err, "daemon start") - } - - // Start monitor - mon.Start(cmd.Context()) - - // Mark ready - daemon.SetReady(true) - fmt.Fprintf(os.Stderr, "core-agent serving on %s (health: %s, pid: %s)\n", addr, healthAddr, pidFile) - - // Set env so mcp.Run picks HTTP transport - os.Setenv("MCP_HTTP_ADDR", addr) - - // Run MCP server (blocks until context cancelled) - return mcpSvc.Run(cmd.Context()) - }) - - cli.RootCmd().AddCommand(mcpCmd) - cli.RootCmd().AddCommand(serveCmd) - - if err := cli.Execute(); err != nil { - log.Fatal(err) - } -} diff --git a/go.mod b/go.mod index 80d7e34..d3c7998 100644 --- a/go.mod +++ b/go.mod @@ -3,14 +3,13 @@ module dappco.re/go/agent go 1.26.0 require ( - dappco.re/go/core v0.5.0 - dappco.re/go/core/io v0.2.0 - dappco.re/go/core/log v0.1.0 + dappco.re/go/core v0.6.0 + dappco.re/go/core/api v0.2.0 dappco.re/go/core/process v0.3.0 dappco.re/go/core/ws v0.3.0 - forge.lthn.ai/core/api v0.1.5 + forge.lthn.ai/core/api v0.1.6 forge.lthn.ai/core/cli v0.3.7 - forge.lthn.ai/core/mcp v0.4.0 + forge.lthn.ai/core/mcp v0.4.8 github.com/gin-gonic/gin v1.12.0 github.com/gorilla/websocket v1.5.3 github.com/modelcontextprotocol/go-sdk v1.4.1 @@ -18,7 +17,14 @@ require ( gopkg.in/yaml.v3 v3.0.1 ) +require dappco.re/go/core/forge v0.2.0 // indirect + require ( + dappco.re/go/core/i18n v0.2.0 + dappco.re/go/core/io v0.2.0 // indirect + dappco.re/go/core/log v0.1.0 // indirect + dappco.re/go/core/scm v0.4.0 + dappco.re/go/core/store v0.2.0 forge.lthn.ai/core/go v0.3.3 // indirect forge.lthn.ai/core/go-ai v0.1.12 // indirect forge.lthn.ai/core/go-i18n v0.1.7 // indirect @@ -27,7 +33,7 @@ require ( forge.lthn.ai/core/go-log v0.0.4 // indirect forge.lthn.ai/core/go-process v0.2.9 // indirect forge.lthn.ai/core/go-rag v0.1.11 // indirect - forge.lthn.ai/core/go-webview v0.1.6 // indirect + forge.lthn.ai/core/go-webview v0.1.7 // indirect forge.lthn.ai/core/go-ws v0.2.5 // indirect github.com/99designs/gqlgen v0.17.88 // indirect github.com/KyleBanks/depth v1.2.1 // indirect @@ -36,7 +42,7 @@ require ( github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/bmatcuk/doublestar/v4 v4.10.0 // indirect - github.com/buger/jsonparser v1.1.1 // indirect + github.com/buger/jsonparser v1.1.2 // indirect github.com/bytedance/gopkg v0.1.4 // indirect github.com/bytedance/sonic v1.15.0 // indirect github.com/bytedance/sonic/loader v0.5.0 // indirect @@ -109,7 +115,7 @@ require ( github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/termenv v0.16.0 // indirect - github.com/ollama/ollama v0.18.1 // indirect + github.com/ollama/ollama v0.18.2 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/qdrant/go-client v1.17.1 // indirect @@ -150,7 +156,7 @@ require ( golang.org/x/term v0.41.0 // indirect golang.org/x/text v0.35.0 // indirect golang.org/x/tools v0.43.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260316180232-0b37fe3546d5 // indirect - google.golang.org/grpc v1.79.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 // indirect + google.golang.org/grpc v1.79.3 // indirect google.golang.org/protobuf v1.36.11 // indirect ) diff --git a/go.sum b/go.sum index 44b3d75..968bb42 100644 --- a/go.sum +++ b/go.sum @@ -1,15 +1,26 @@ dappco.re/go/core v0.5.0 h1:P5DJoaCiK5Q+af5UiTdWqUIW4W4qYKzpgGK50thm21U= dappco.re/go/core v0.5.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core v0.6.0 h1:0wmuO/UmCWXxJkxQ6XvVLnqkAuWitbd49PhxjCsplyk= +dappco.re/go/core v0.6.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core/api v0.2.0 h1:5OcN9nawpp18Jp6dB1OwI2CBfs0Tacb0y0zqxFB6TJ0= +dappco.re/go/core/api v0.2.0/go.mod h1:AtgNAx8lDY+qhVObFdNQOjSUQrHX1BeiDdMuA6RIfzo= +dappco.re/go/core/forge v0.2.0 h1:EBCHaUdzEAbYpDwRTXMmJoSfSrK30IJTOVBPRxxkJTg= +dappco.re/go/core/forge v0.2.0/go.mod h1:XMz9ZNVl9xane9Rg3AEBuVV5UNNBGWbPY9rSKbqYgnM= +dappco.re/go/core/i18n v0.2.0/go.mod h1:9eSVJXr3OpIGWQvDynfhqcp27xnLMwlYLgsByU+p7ok= dappco.re/go/core/io v0.2.0 h1:zuudgIiTsQQ5ipVt97saWdGLROovbEB/zdVyy9/l+I4= dappco.re/go/core/io v0.2.0/go.mod h1:1QnQV6X9LNgFKfm8SkOtR9LLaj3bDcsOIeJOOyjbL5E= dappco.re/go/core/log v0.1.0 h1:pa71Vq2TD2aoEUQWFKwNcaJ3GBY8HbaNGqtE688Unyc= dappco.re/go/core/log v0.1.0/go.mod h1:Nkqb8gsXhZAO8VLpx7B8i1iAmohhzqA20b9Zr8VUcJs= dappco.re/go/core/process v0.3.0 h1:BPF9R79+8ZWe34qCIy/sZy+P4HwbaO95js2oPJL7IqM= dappco.re/go/core/process v0.3.0/go.mod h1:qwx8kt6x+J9gn7fu8lavuess72Ye9jPBODqDZQ9K0as= +dappco.re/go/core/scm v0.4.0/go.mod h1:ufb7si6HBkaT6zC8L67kLm8zzBaD1aQoTn4OsVAM1aI= +dappco.re/go/core/store v0.2.0/go.mod h1:QQGJiruayjna3nywbf0N2gcO502q/oEkPoSpBpSKbLM= dappco.re/go/core/ws v0.3.0 h1:ZxR8y5pfrWvnCHVN7qExXz7fdP5a063uNqyqE0Ab8pQ= dappco.re/go/core/ws v0.3.0/go.mod h1:aLyXrJnbCOGL0SW9rC1EHAAIS83w3djO374gHIz4Nic= forge.lthn.ai/core/api v0.1.5 h1:NwZrcOyBjaiz5/cn0n0tnlMUodi8Or6FHMx59C7Kv2o= forge.lthn.ai/core/api v0.1.5/go.mod h1:PBnaWyOVXSOGy+0x2XAPUFMYJxQ2CNhppia/D06ZPII= +forge.lthn.ai/core/api v0.1.6 h1:DwJ9s/B5yEAVx497oB6Ja9wlj4qZ6HLvsyZOcN7RivA= +forge.lthn.ai/core/api v0.1.6/go.mod h1:l7EeqKgu3New2kAeg65We8KJoVlzkO0P3bK7tQNniXg= forge.lthn.ai/core/cli v0.3.7 h1:1GrbaGg0wDGHr6+klSbbGyN/9sSbHvFbdySJznymhwg= forge.lthn.ai/core/cli v0.3.7/go.mod h1:DBUppJkA9P45ZFGgI2B8VXw1rAZxamHoI/KG7fRvTNs= forge.lthn.ai/core/go v0.3.3 h1:kYYZ2nRYy0/Be3cyuLJspRjLqTMxpckVyhb/7Sw2gd0= @@ -30,10 +41,20 @@ forge.lthn.ai/core/go-rag v0.1.11 h1:KXTOtnOdrx8YKmvnj0EOi2EI/+cKjE8w2PpJCQIrSd8 forge.lthn.ai/core/go-rag v0.1.11/go.mod h1:vIlOKVD1SdqqjkJ2XQyXPuKPtiajz/STPLCaDpqOzk8= forge.lthn.ai/core/go-webview v0.1.6 h1:szXQxRJf2bOZJKh3v1P01B1Vf9mgXaBCXzh0EZu9aoc= forge.lthn.ai/core/go-webview v0.1.6/go.mod h1:5n1tECD1wBV/uFZRY9ZjfPFO5TYZrlaR3mQFwvO2nek= +forge.lthn.ai/core/go-webview v0.1.7 h1:9+aEHeAvNcPX8Zwr+UGu0/T+menRm5T1YOmqZ9dawDc= +forge.lthn.ai/core/go-webview v0.1.7/go.mod h1:5n1tECD1wBV/uFZRY9ZjfPFO5TYZrlaR3mQFwvO2nek= forge.lthn.ai/core/go-ws v0.2.5 h1:ZIV7Yrv01R/xpJUogA5vrfP9yB9li1w7EV3eZFMt8h0= forge.lthn.ai/core/go-ws v0.2.5/go.mod h1:C3riJyLLcV6QhLvYlq3P/XkGTsN598qQeGBoLdoHBU4= forge.lthn.ai/core/mcp v0.4.0 h1:t4HMTI6CpoGB/VmE1aTklSEM8EI4Z/uKWyjGHxa1f4M= forge.lthn.ai/core/mcp v0.4.0/go.mod h1:eU35WT/8Mc0oJDVWdKaXEtNp27+Hc8KvnTKPf4DAqXE= +forge.lthn.ai/core/mcp v0.4.4 h1:VTCOA1Dj/L7S8JCRg9BfYw7KfowW/Vvrp39bxc0dYyw= +forge.lthn.ai/core/mcp v0.4.4/go.mod h1:eU35WT/8Mc0oJDVWdKaXEtNp27+Hc8KvnTKPf4DAqXE= +forge.lthn.ai/core/mcp v0.4.6 h1:jZY72sfPiCppKU4YyX7Gwy7ynbgVzUto+3S6oAj5Qs4= +forge.lthn.ai/core/mcp v0.4.6/go.mod h1:eU35WT/8Mc0oJDVWdKaXEtNp27+Hc8KvnTKPf4DAqXE= +forge.lthn.ai/core/mcp v0.4.7 h1:Iy/83laUpkaH8W2EoDlVMJbyv60xJ4aMgQe6sOcwL7k= +forge.lthn.ai/core/mcp v0.4.7/go.mod h1:eU35WT/8Mc0oJDVWdKaXEtNp27+Hc8KvnTKPf4DAqXE= +forge.lthn.ai/core/mcp v0.4.8 h1:nd1x3AL8AkUfl0kziltoJUX96Nx1BeFWEbgHmfrkKz8= +forge.lthn.ai/core/mcp v0.4.8/go.mod h1:eU35WT/8Mc0oJDVWdKaXEtNp27+Hc8KvnTKPf4DAqXE= github.com/99designs/gqlgen v0.17.88 h1:neMQDgehMwT1vYIOx/w5ZYPUU/iMNAJzRO44I5Intoc= github.com/99designs/gqlgen v0.17.88/go.mod h1:qeqYFEgOeSKqWedOjogPizimp2iu4E23bdPvl4jTYic= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= @@ -63,6 +84,8 @@ github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/buger/jsonparser v1.1.2 h1:frqHqw7otoVbk5M8LlE/L7HTnIq2v9RX6EJ48i9AxJk= +github.com/buger/jsonparser v1.1.2/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bytedance/gopkg v0.1.4 h1:oZnQwnX82KAIWb7033bEwtxvTqXcYMxDBaQxo5JJHWM= github.com/bytedance/gopkg v0.1.4/go.mod h1:v1zWfPm21Fb+OsyXN2VAHdL6TBb2L88anLQgdyje6R4= github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE= @@ -247,6 +270,8 @@ github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/ollama/ollama v0.18.1 h1:7K6anW64C2keASpToYfuOa00LuP8aCmofLKcT2c1mlY= github.com/ollama/ollama v0.18.1/go.mod h1:tCX4IMV8DHjl3zY0THxuEkpWDZSOchJpzTuLACpMwFw= +github.com/ollama/ollama v0.18.2 h1:RsOY8oZ6TufRiPgsSlKJp4/V/X+oBREscUlEHZfd554= +github.com/ollama/ollama v0.18.2/go.mod h1:tCX4IMV8DHjl3zY0THxuEkpWDZSOchJpzTuLACpMwFw= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -396,8 +421,12 @@ gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= google.golang.org/genproto/googleapis/rpc v0.0.0-20260316180232-0b37fe3546d5 h1:aJmi6DVGGIStN9Mobk/tZOOQUBbj0BPjZjjnOdoZKts= google.golang.org/genproto/googleapis/rpc v0.0.0-20260316180232-0b37fe3546d5/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 h1:ndE4FoJqsIceKP2oYSnUZqhTdYufCYYkqwtFzfrhI7w= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU= google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= +google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/main b/main deleted file mode 100755 index 7eb01d7..0000000 Binary files a/main and /dev/null differ diff --git a/pkg/agentic/auto_pr.go b/pkg/agentic/auto_pr.go index 124b05d..25ce776 100644 --- a/pkg/agentic/auto_pr.go +++ b/pkg/agentic/auto_pr.go @@ -4,11 +4,10 @@ package agentic import ( "context" - "fmt" "os/exec" - "path/filepath" - "strings" "time" + + core "dappco.re/go/core" ) // autoCreatePR pushes the agent's branch and creates a PR on Forge @@ -19,21 +18,19 @@ func (s *PrepSubsystem) autoCreatePR(wsDir string) { return } - srcDir := filepath.Join(wsDir, "src") + repoDir := core.JoinPath(wsDir, "repo") - // Detect default branch for this repo - base := gitDefaultBranch(srcDir) + // PRs target dev — agents never merge directly to main + base := "dev" - // Check if there are commits on the branch beyond the default branch diffCmd := exec.Command("git", "log", "--oneline", "origin/"+base+"..HEAD") - diffCmd.Dir = srcDir + diffCmd.Dir = repoDir out, err := diffCmd.Output() - if err != nil || len(strings.TrimSpace(string(out))) == 0 { - // No commits — nothing to PR + if err != nil || len(core.Trim(string(out))) == 0 { return } - commitCount := len(strings.Split(strings.TrimSpace(string(out)), "\n")) + commitCount := len(core.Split(core.Trim(string(out)), "\n")) // Get the repo's forge remote URL to extract org/repo org := st.Org @@ -42,20 +39,20 @@ func (s *PrepSubsystem) autoCreatePR(wsDir string) { } // Push the branch to forge - forgeRemote := fmt.Sprintf("ssh://git@forge.lthn.ai:2223/%s/%s.git", org, st.Repo) + forgeRemote := core.Sprintf("ssh://git@forge.lthn.ai:2223/%s/%s.git", org, st.Repo) pushCmd := exec.Command("git", "push", forgeRemote, st.Branch) - pushCmd.Dir = srcDir + pushCmd.Dir = repoDir if pushErr := pushCmd.Run(); pushErr != nil { // Push failed — update status with error but don't block if st2, err := readStatus(wsDir); err == nil { - st2.Question = fmt.Sprintf("PR push failed: %v", pushErr) + st2.Question = core.Sprintf("PR push failed: %v", pushErr) writeStatus(wsDir, st2) } return } // Create PR via Forge API - title := fmt.Sprintf("[agent/%s] %s", st.Agent, truncate(st.Task, 60)) + title := core.Sprintf("[agent/%s] %s", st.Agent, truncate(st.Task, 60)) body := s.buildAutoPRBody(st, commitCount) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -64,7 +61,7 @@ func (s *PrepSubsystem) autoCreatePR(wsDir string) { prURL, _, err := s.forgeCreatePR(ctx, org, st.Repo, st.Branch, base, title, body) if err != nil { if st2, err := readStatus(wsDir); err == nil { - st2.Question = fmt.Sprintf("PR creation failed: %v", err) + st2.Question = core.Sprintf("PR creation failed: %v", err) writeStatus(wsDir, st2) } return @@ -78,13 +75,16 @@ func (s *PrepSubsystem) autoCreatePR(wsDir string) { } func (s *PrepSubsystem) buildAutoPRBody(st *WorkspaceStatus, commits int) string { - var b strings.Builder + b := core.NewBuilder() b.WriteString("## Task\n\n") b.WriteString(st.Task) b.WriteString("\n\n") - b.WriteString(fmt.Sprintf("**Agent:** %s\n", st.Agent)) - b.WriteString(fmt.Sprintf("**Commits:** %d\n", commits)) - b.WriteString(fmt.Sprintf("**Branch:** `%s`\n", st.Branch)) + if st.Issue > 0 { + b.WriteString(core.Sprintf("Closes #%d\n\n", st.Issue)) + } + b.WriteString(core.Sprintf("**Agent:** %s\n", st.Agent)) + b.WriteString(core.Sprintf("**Commits:** %d\n", commits)) + b.WriteString(core.Sprintf("**Branch:** `%s`\n", st.Branch)) b.WriteString("\n---\n") b.WriteString("Auto-created by core-agent dispatch system.\n") b.WriteString("Co-Authored-By: Virgil \n") diff --git a/pkg/agentic/dispatch.go b/pkg/agentic/dispatch.go index 2100edf..aff3aa0 100644 --- a/pkg/agentic/dispatch.go +++ b/pkg/agentic/dispatch.go @@ -4,34 +4,37 @@ package agentic import ( "context" - "fmt" - "os" - "path/filepath" - "strings" + "os/exec" "syscall" "time" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "dappco.re/go/core/process" "github.com/modelcontextprotocol/go-sdk/mcp" ) // DispatchInput is the input for agentic_dispatch. +// +// input := agentic.DispatchInput{Repo: "go-io", Task: "Fix the failing tests", Agent: "codex", Issue: 15} type DispatchInput struct { Repo string `json:"repo"` // Target repo (e.g. "go-io") Org string `json:"org,omitempty"` // Forge org (default "core") Task string `json:"task"` // What the agent should do - Agent string `json:"agent,omitempty"` // "gemini" (default), "codex", "claude" + Agent string `json:"agent,omitempty"` // "codex" (default), "claude", "gemini" Template string `json:"template,omitempty"` // "conventions", "security", "coding" (default) - PlanTemplate string `json:"plan_template,omitempty"` // Plan template: bug-fix, code-review, new-feature, refactor, feature-port + PlanTemplate string `json:"plan_template,omitempty"` // Plan template slug Variables map[string]string `json:"variables,omitempty"` // Template variable substitution - Persona string `json:"persona,omitempty"` // Persona: engineering/backend-architect, testing/api-tester, etc. - Issue int `json:"issue,omitempty"` // Forge issue to work from + Persona string `json:"persona,omitempty"` // Persona slug + Issue int `json:"issue,omitempty"` // Forge issue number → workspace: task-{num}/ + PR int `json:"pr,omitempty"` // PR number → workspace: pr-{num}/ + Branch string `json:"branch,omitempty"` // Branch → workspace: {branch}/ + Tag string `json:"tag,omitempty"` // Tag → workspace: {tag}/ (immutable) DryRun bool `json:"dry_run,omitempty"` // Preview without executing } // DispatchOutput is the output for agentic_dispatch. +// +// out := agentic.DispatchOutput{Success: true, Agent: "codex", Repo: "go-io", WorkspaceDir: ".core/workspace/core/go-io/task-15"} type DispatchOutput struct { Success bool `json:"success"` Agent string `json:"agent"` @@ -50,9 +53,9 @@ func (s *PrepSubsystem) registerDispatchTool(server *mcp.Server) { } // agentCommand returns the command and args for a given agent type. -// Supports model variants: "gemini", "gemini:flash", "gemini:pro", "claude", "claude:haiku". +// Supports model variants: "gemini", "gemini:flash", "codex", "claude", "claude:haiku". func agentCommand(agent, prompt string) (string, []string, error) { - parts := strings.SplitN(agent, ":", 2) + parts := core.SplitN(agent, ":", 2) base := parts[0] model := "" if len(parts) > 1 { @@ -68,21 +71,33 @@ func agentCommand(agent, prompt string) (string, []string, error) { return "gemini", args, nil case "codex": if model == "review" { - // Codex review mode — non-interactive code review - // Note: --base and prompt are mutually exclusive in codex CLI - return "codex", []string{"review", "--base", "HEAD~1"}, nil + // Use exec with bypass — codex review subcommand has its own sandbox that blocks shell + // No -o flag — stdout captured by process output, ../.meta path unreliable in sandbox + return "codex", []string{ + "exec", + "--dangerously-bypass-approvals-and-sandbox", + "Review the last 2 commits via git diff HEAD~2. Check for bugs, security issues, missing tests, naming issues. Report pass/fail with specifics. Do NOT make changes.", + }, nil } - // Codex agent mode — autonomous coding - return "codex", []string{"exec", "--full-auto", prompt}, nil + // Container IS the sandbox — let codex run unrestricted inside it + args := []string{ + "exec", + "--dangerously-bypass-approvals-and-sandbox", + "-o", "../.meta/agent-codex.log", + } + if model != "" { + args = append(args, "--model", model) + } + args = append(args, prompt) + return "codex", args, nil case "claude": args := []string{ "-p", prompt, "--output-format", "text", "--dangerously-skip-permissions", "--no-session-persistence", - "--append-system-prompt", "SANDBOX: You are restricted to the current directory (src/) only. " + - "Do NOT use absolute paths starting with /. Do NOT cd .. or navigate outside. " + - "Do NOT edit files outside this repository. Reject any request that would escape the sandbox.", + "--append-system-prompt", "SANDBOX: You are restricted to the current directory only. " + + "Do NOT use absolute paths. Do NOT navigate outside this repository.", } if model != "" { args = append(args, "--model", model) @@ -91,61 +106,147 @@ func agentCommand(agent, prompt string) (string, []string, error) { case "coderabbit": args := []string{"review", "--plain", "--base", "HEAD~1"} if model != "" { - // model variant can specify review type: all, committed, uncommitted args = append(args, "--type", model) } if prompt != "" { - // Pass CLAUDE.md or other config as additional instructions args = append(args, "--config", "CLAUDE.md") } return "coderabbit", args, nil case "local": - home, _ := os.UserHomeDir() - script := filepath.Join(home, "Code", "core", "agent", "scripts", "local-agent.sh") - return "bash", []string{script, prompt}, nil + // Local model via codex --oss → Ollama. Default model: devstral-24b + // socat proxies localhost:11434 → host.docker.internal:11434 + // because codex hardcodes localhost check for Ollama. + localModel := model + if localModel == "" { + localModel = "devstral-24b" + } + script := core.Sprintf( + `socat TCP-LISTEN:11434,fork,reuseaddr TCP:host.docker.internal:11434 & sleep 0.5 && codex exec --dangerously-bypass-approvals-and-sandbox --oss --local-provider ollama -m %s -o ../.meta/agent-codex.log %q`, + localModel, prompt, + ) + return "sh", []string{"-c", script}, nil default: - return "", nil, coreerr.E("agentCommand", "unknown agent: "+agent, nil) + return "", nil, core.E("agentCommand", "unknown agent: "+agent, nil) } } -// spawnAgent launches an agent process via go-process and returns the PID. -// Output is captured via pipes and written to the log file on completion. -// The background goroutine handles status updates, findings ingestion, and queue drain. +// defaultDockerImage is the container image for agent dispatch. +// Override via AGENT_DOCKER_IMAGE env var. +const defaultDockerImage = "core-dev" + +// containerCommand wraps an agent command to run inside a Docker container. +// All agents run containerised — no bare metal execution. +// agentType is the base agent name (e.g. "local", "codex", "claude"). // -// For CodeRabbit agents, no process is spawned — instead the code is pushed -// to GitHub and a PR is created/marked ready for review. -func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir, srcDir string) (int, string, error) { +// cmd, args := containerCommand("local", "codex", []string{"exec", "..."}, repoDir, metaDir) +func containerCommand(agentType, command string, args []string, repoDir, metaDir string) (string, []string) { + image := core.Env("AGENT_DOCKER_IMAGE") + if image == "" { + image = defaultDockerImage + } + + home := core.Env("DIR_HOME") + + dockerArgs := []string{ + "run", "--rm", + // Host access for Ollama (local models) + "--add-host=host.docker.internal:host-gateway", + // Workspace: repo + meta + "-v", repoDir + ":/workspace", + "-v", metaDir + ":/workspace/.meta", + "-w", "/workspace", + // Auth: agent configs only — NO SSH keys, git push runs on host + "-v", core.JoinPath(home, ".codex") + ":/root/.codex:ro", + // API keys — passed by name, Docker resolves from host env + "-e", "OPENAI_API_KEY", + "-e", "ANTHROPIC_API_KEY", + "-e", "GEMINI_API_KEY", + "-e", "GOOGLE_API_KEY", + // Agent environment + "-e", "TERM=dumb", + "-e", "NO_COLOR=1", + "-e", "CI=true", + "-e", "GIT_USER_NAME=Virgil", + "-e", "GIT_USER_EMAIL=virgil@lethean.io", + // Local model access — Ollama on host + "-e", "OLLAMA_HOST=http://host.docker.internal:11434", + } + + // Mount Claude config if dispatching claude agent + if command == "claude" { + dockerArgs = append(dockerArgs, + "-v", core.JoinPath(home, ".claude")+":/root/.claude:ro", + ) + } + + // Mount Gemini config if dispatching gemini agent + if command == "gemini" { + dockerArgs = append(dockerArgs, + "-v", core.JoinPath(home, ".gemini")+":/root/.gemini:ro", + ) + } + + dockerArgs = append(dockerArgs, image, command) + dockerArgs = append(dockerArgs, args...) + + return "docker", dockerArgs +} + +// spawnAgent launches an agent inside a Docker container. +// The repo/ directory is mounted at /workspace, agent runs sandboxed. +// Output is captured and written to .meta/agent-{agent}.log on completion. +func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir string) (int, string, error) { command, args, err := agentCommand(agent, prompt) if err != nil { return 0, "", err } - outputFile := filepath.Join(wsDir, fmt.Sprintf("agent-%s.log", agent)) + repoDir := core.JoinPath(wsDir, "repo") + metaDir := core.JoinPath(wsDir, ".meta") + // Use base agent name for log file — colon in variants breaks paths + agentBase := core.SplitN(agent, ":", 2)[0] + outputFile := core.JoinPath(metaDir, core.Sprintf("agent-%s.log", agentBase)) - // Clean up stale BLOCKED.md from previous runs so it doesn't - // prevent this run from completing - os.Remove(filepath.Join(srcDir, "BLOCKED.md")) + // Clean up stale BLOCKED.md from previous runs + fs.Delete(core.JoinPath(repoDir, "BLOCKED.md")) + + // All agents run containerised + command, args = containerCommand(agentBase, command, args, repoDir, metaDir) proc, err := process.StartWithOptions(context.Background(), process.RunOptions{ Command: command, Args: args, - Dir: srcDir, - Env: []string{"TERM=dumb", "NO_COLOR=1", "CI=true", "GOWORK=off"}, + Dir: repoDir, Detach: true, }) if err != nil { - return 0, "", coreerr.E("dispatch.spawnAgent", "failed to spawn "+agent, err) + return 0, "", core.E("dispatch.spawnAgent", "failed to spawn "+agent, err) } - // Close stdin immediately — agents use -p mode, not interactive stdin. - // Without this, Claude CLI blocks waiting on the open pipe. proc.CloseStdin() - pid := proc.Info().PID + // Notify monitor directly — no filesystem polling + if s.onComplete != nil { + st, _ := readStatus(wsDir) + repo := "" + if st != nil { + repo = st.Repo + } + s.onComplete.AgentStarted(agent, repo, core.PathBase(wsDir)) + } + emitStartEvent(agent, core.PathBase(wsDir)) // audit log + + // Start Forge stopwatch on the issue (time tracking) + if st, _ := readStatus(wsDir); st != nil && st.Issue > 0 { + org := st.Org + if org == "" { + org = "core" + } + s.forge.Issues.StartStopwatch(context.Background(), org, st.Repo, int64(st.Issue)) + } + go func() { - // Wait for process exit. go-process handles timeout and kill group. - // PID polling fallback in case pipes hang from inherited child processes. ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() for { @@ -160,82 +261,174 @@ func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir, srcDir string) (int, st } done: - // Write captured output to log file if output := proc.Output(); output != "" { - coreio.Local.Write(outputFile, output) + fs.Write(outputFile, output) } - // Determine final status: check exit code, BLOCKED.md, and output finalStatus := "completed" exitCode := proc.Info().ExitCode procStatus := proc.Info().Status question := "" - blockedPath := filepath.Join(wsDir, "src", "BLOCKED.md") - if blockedContent, err := coreio.Local.Read(blockedPath); err == nil && strings.TrimSpace(blockedContent) != "" { + blockedPath := core.JoinPath(repoDir, "BLOCKED.md") + if r := fs.Read(blockedPath); r.OK && core.Trim(r.Value.(string)) != "" { finalStatus = "blocked" - question = strings.TrimSpace(blockedContent) + question = core.Trim(r.Value.(string)) } else if exitCode != 0 || procStatus == "failed" || procStatus == "killed" { finalStatus = "failed" if exitCode != 0 { - question = fmt.Sprintf("Agent exited with code %d", exitCode) + question = core.Sprintf("Agent exited with code %d", exitCode) } } - if st, err := readStatus(wsDir); err == nil { + if st, stErr := readStatus(wsDir); stErr == nil { st.Status = finalStatus st.PID = 0 st.Question = question writeStatus(wsDir, st) } - // Emit completion event with actual status - emitCompletionEvent(agent, filepath.Base(wsDir), finalStatus) + emitCompletionEvent(agent, core.PathBase(wsDir), finalStatus) // audit log - // Notify monitor immediately (push to connected clients) + // Rate-limit detection: if agent failed fast (<60s), track consecutive failures + pool := baseAgent(agent) + if finalStatus == "failed" { + if st, _ := readStatus(wsDir); st != nil { + elapsed := time.Since(st.StartedAt) + if elapsed < 60*time.Second { + s.failCount[pool]++ + if s.failCount[pool] >= 3 { + s.backoff[pool] = time.Now().Add(30 * time.Minute) + core.Print(nil, "rate-limit detected for %s — pausing pool for 30 minutes", pool) + } + } else { + s.failCount[pool] = 0 // slow failure = real failure, reset count + } + } + } else { + s.failCount[pool] = 0 // success resets count + } + + // Stop Forge stopwatch on the issue (time tracking) + if st, _ := readStatus(wsDir); st != nil && st.Issue > 0 { + org := st.Org + if org == "" { + org = "core" + } + s.forge.Issues.StopStopwatch(context.Background(), org, st.Repo, int64(st.Issue)) + } + + // Push notification directly — no filesystem polling if s.onComplete != nil { - s.onComplete.Poke() + stNow, _ := readStatus(wsDir) + repoName := "" + if stNow != nil { + repoName = stNow.Repo + } + s.onComplete.AgentCompleted(agent, repoName, core.PathBase(wsDir), finalStatus) } - // Auto-create PR if agent completed successfully, then verify and merge if finalStatus == "completed" { - s.autoCreatePR(wsDir) - s.autoVerifyAndMerge(wsDir) + // Run QA before PR — if QA fails, mark as failed, don't PR + if !s.runQA(wsDir) { + finalStatus = "failed" + question = "QA check failed — build or tests did not pass" + if st, stErr := readStatus(wsDir); stErr == nil { + st.Status = finalStatus + st.Question = question + writeStatus(wsDir, st) + } + } else { + s.autoCreatePR(wsDir) + s.autoVerifyAndMerge(wsDir) + } } - // Ingest scan findings as issues s.ingestFindings(wsDir) - - // Drain queue - s.drainQueue() + s.Poke() }() return pid, outputFile, nil } +// runQA runs build + test checks on the repo after agent completion. +// Returns true if QA passes, false if build or tests fail. +func (s *PrepSubsystem) runQA(wsDir string) bool { + repoDir := core.JoinPath(wsDir, "repo") + + // Detect language and run appropriate checks + if fs.IsFile(core.JoinPath(repoDir, "go.mod")) { + // Go: build + vet + test + for _, args := range [][]string{ + {"go", "build", "./..."}, + {"go", "vet", "./..."}, + {"go", "test", "./...", "-count=1", "-timeout", "120s"}, + } { + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = repoDir + if err := cmd.Run(); err != nil { + core.Warn("QA failed", "cmd", core.Join(" ", args...), "err", err) + return false + } + } + return true + } + + if fs.IsFile(core.JoinPath(repoDir, "composer.json")) { + // PHP: composer install + test + install := exec.Command("composer", "install", "--no-interaction") + install.Dir = repoDir + if err := install.Run(); err != nil { + return false + } + test := exec.Command("composer", "test") + test.Dir = repoDir + return test.Run() == nil + } + + if fs.IsFile(core.JoinPath(repoDir, "package.json")) { + // Node: npm install + test + install := exec.Command("npm", "install") + install.Dir = repoDir + if err := install.Run(); err != nil { + return false + } + test := exec.Command("npm", "test") + test.Dir = repoDir + return test.Run() == nil + } + + // Unknown language — pass QA (no checks to run) + return true +} + func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest, input DispatchInput) (*mcp.CallToolResult, DispatchOutput, error) { if input.Repo == "" { - return nil, DispatchOutput{}, coreerr.E("dispatch", "repo is required", nil) + return nil, DispatchOutput{}, core.E("dispatch", "repo is required", nil) } if input.Task == "" { - return nil, DispatchOutput{}, coreerr.E("dispatch", "task is required", nil) + return nil, DispatchOutput{}, core.E("dispatch", "task is required", nil) } if input.Org == "" { input.Org = "core" } if input.Agent == "" { - input.Agent = "gemini" + input.Agent = "codex" } if input.Template == "" { input.Template = "coding" } - // Step 1: Prep the sandboxed workspace + // Step 1: Prep workspace — clone + build prompt prepInput := PrepInput{ Repo: input.Repo, Org: input.Org, Issue: input.Issue, + PR: input.PR, + Branch: input.Branch, + Tag: input.Tag, Task: input.Task, + Agent: input.Agent, Template: input.Template, PlanTemplate: input.PlanTemplate, Variables: input.Variables, @@ -243,30 +436,24 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest, } _, prepOut, err := s.prepWorkspace(ctx, req, prepInput) if err != nil { - return nil, DispatchOutput{}, coreerr.E("dispatch", "prep workspace failed", err) + return nil, DispatchOutput{}, core.E("dispatch", "prep workspace failed", err) } wsDir := prepOut.WorkspaceDir - srcDir := filepath.Join(wsDir, "src") - - // The prompt is just: read PROMPT.md and do the work - prompt := "Read PROMPT.md for instructions. All context files (CLAUDE.md, TODO.md, CONTEXT.md, CONSUMERS.md, RECENT.md) are in the current directory. Work in this directory." + prompt := prepOut.Prompt if input.DryRun { - // Read PROMPT.md for the dry run output - promptContent, _ := coreio.Local.Read(filepath.Join(srcDir, "PROMPT.md")) return nil, DispatchOutput{ Success: true, Agent: input.Agent, Repo: input.Repo, WorkspaceDir: wsDir, - Prompt: promptContent, + Prompt: prompt, }, nil } // Step 2: Check per-agent concurrency limit if !s.canDispatchAgent(input.Agent) { - // Queue the workspace — write status as "queued" and return writeStatus(wsDir, &WorkspaceStatus{ Status: "queued", Agent: input.Agent, @@ -286,8 +473,8 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest, }, nil } - // Step 3: Spawn agent via go-process (pipes for output capture) - pid, outputFile, err := s.spawnAgent(input.Agent, prompt, wsDir, srcDir) + // Step 3: Spawn agent in repo/ directory + pid, outputFile, err := s.spawnAgent(input.Agent, prompt, wsDir) if err != nil { return nil, DispatchOutput{}, err } diff --git a/pkg/agentic/dispatch_sync.go b/pkg/agentic/dispatch_sync.go new file mode 100644 index 0000000..df0ceba --- /dev/null +++ b/pkg/agentic/dispatch_sync.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package agentic + +import ( + "context" + "syscall" + "time" + + core "dappco.re/go/core" +) + +// DispatchSyncInput is the input for a synchronous (blocking) task run. +// +// input := agentic.DispatchSyncInput{Repo: "go-crypt", Agent: "codex:gpt-5.3-codex-spark", Task: "fix it", Issue: 7} +type DispatchSyncInput struct { + Org string + Repo string + Agent string + Task string + Issue int +} + +// DispatchSyncResult is the output of a synchronous task run. +// +// if result.OK { fmt.Println("done:", result.Status) } +type DispatchSyncResult struct { + OK bool + Status string + Error string + PRURL string +} + +// DispatchSync preps a workspace, spawns the agent directly (no queue, no concurrency check), +// and blocks until the agent completes. +// +// result := prep.DispatchSync(ctx, input) +func (s *PrepSubsystem) DispatchSync(ctx context.Context, input DispatchSyncInput) DispatchSyncResult { + // Prep workspace + prepInput := PrepInput{ + Org: input.Org, + Repo: input.Repo, + Task: input.Task, + Agent: input.Agent, + Issue: input.Issue, + } + + prepCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + _, prepOut, err := s.prepWorkspace(prepCtx, nil, prepInput) + if err != nil { + return DispatchSyncResult{Error: err.Error()} + } + if !prepOut.Success { + return DispatchSyncResult{Error: "prep failed"} + } + + wsDir := prepOut.WorkspaceDir + prompt := prepOut.Prompt + + core.Print(nil, " workspace: %s", wsDir) + core.Print(nil, " branch: %s", prepOut.Branch) + + // Spawn agent directly — no queue, no concurrency check + pid, _, err := s.spawnAgent(input.Agent, prompt, wsDir) + if err != nil { + return DispatchSyncResult{Error: err.Error()} + } + + core.Print(nil, " pid: %d", pid) + core.Print(nil, " waiting for completion...") + + // Poll for process exit + ticker := time.NewTicker(3 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return DispatchSyncResult{Error: "cancelled"} + case <-ticker.C: + if pid > 0 && syscall.Kill(pid, 0) != nil { + // Process exited — read final status + st, err := readStatus(wsDir) + if err != nil { + return DispatchSyncResult{Error: "can't read final status"} + } + return DispatchSyncResult{ + OK: st.Status == "completed", + Status: st.Status, + PRURL: st.PRURL, + } + } + } + } +} diff --git a/pkg/agentic/epic.go b/pkg/agentic/epic.go index 4668b35..6295d21 100644 --- a/pkg/agentic/epic.go +++ b/pkg/agentic/epic.go @@ -6,39 +6,43 @@ import ( "bytes" "context" "encoding/json" - "fmt" "net/http" - "strings" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) // --- agentic_create_epic --- // EpicInput is the input for agentic_create_epic. +// +// input := agentic.EpicInput{Repo: "go-scm", Title: "Port agentic plans", Tasks: []string{"Read PHP flow", "Implement Go MCP tools"}} type EpicInput struct { Repo string `json:"repo"` // Target repo (e.g. "go-scm") - Org string `json:"org,omitempty"` // Forge org (default "core") - Title string `json:"title"` // Epic title - Body string `json:"body,omitempty"` // Epic description (above checklist) - Tasks []string `json:"tasks"` // Sub-task titles (become child issues) - Labels []string `json:"labels,omitempty"` // Labels for epic + children (e.g. ["agentic"]) - Dispatch bool `json:"dispatch,omitempty"` // Auto-dispatch agents to each child - Agent string `json:"agent,omitempty"` // Agent type for dispatch (default "claude") - Template string `json:"template,omitempty"` // Prompt template for dispatch (default "coding") + Org string `json:"org,omitempty"` // Forge org (default "core") + Title string `json:"title"` // Epic title + Body string `json:"body,omitempty"` // Epic description (above checklist) + Tasks []string `json:"tasks"` // Sub-task titles (become child issues) + Labels []string `json:"labels,omitempty"` // Labels for epic + children (e.g. ["agentic"]) + Dispatch bool `json:"dispatch,omitempty"` // Auto-dispatch agents to each child + Agent string `json:"agent,omitempty"` // Agent type for dispatch (default "claude") + Template string `json:"template,omitempty"` // Prompt template for dispatch (default "coding") } // EpicOutput is the output for agentic_create_epic. +// +// out := agentic.EpicOutput{Success: true, EpicNumber: 42, EpicURL: "https://forge.example/core/go-scm/issues/42"} type EpicOutput struct { - Success bool `json:"success"` - EpicNumber int `json:"epic_number"` - EpicURL string `json:"epic_url"` - Children []ChildRef `json:"children"` - Dispatched int `json:"dispatched,omitempty"` + Success bool `json:"success"` + EpicNumber int `json:"epic_number"` + EpicURL string `json:"epic_url"` + Children []ChildRef `json:"children"` + Dispatched int `json:"dispatched,omitempty"` } // ChildRef references a child issue. +// +// child := agentic.ChildRef{Number: 43, Title: "Implement plan list", URL: "https://forge.example/core/go-scm/issues/43"} type ChildRef struct { Number int `json:"number"` Title string `json:"title"` @@ -54,13 +58,13 @@ func (s *PrepSubsystem) registerEpicTool(server *mcp.Server) { func (s *PrepSubsystem) createEpic(ctx context.Context, req *mcp.CallToolRequest, input EpicInput) (*mcp.CallToolResult, EpicOutput, error) { if input.Title == "" { - return nil, EpicOutput{}, coreerr.E("createEpic", "title is required", nil) + return nil, EpicOutput{}, core.E("createEpic", "title is required", nil) } if len(input.Tasks) == 0 { - return nil, EpicOutput{}, coreerr.E("createEpic", "at least one task is required", nil) + return nil, EpicOutput{}, core.E("createEpic", "at least one task is required", nil) } if s.forgeToken == "" { - return nil, EpicOutput{}, coreerr.E("createEpic", "no Forge token configured", nil) + return nil, EpicOutput{}, core.E("createEpic", "no Forge token configured", nil) } if input.Org == "" { input.Org = "core" @@ -99,21 +103,21 @@ func (s *PrepSubsystem) createEpic(ctx context.Context, req *mcp.CallToolRequest } // Step 2: Build epic body with checklist - var body strings.Builder + body := core.NewBuilder() if input.Body != "" { body.WriteString(input.Body) body.WriteString("\n\n") } body.WriteString("## Tasks\n\n") for _, child := range children { - body.WriteString(fmt.Sprintf("- [ ] #%d %s\n", child.Number, child.Title)) + body.WriteString(core.Sprintf("- [ ] #%d %s\n", child.Number, child.Title)) } // Step 3: Create epic issue epicLabels := append(labelIDs, s.resolveLabelIDs(ctx, input.Org, input.Repo, []string{"epic"})...) epic, err := s.createIssue(ctx, input.Org, input.Repo, input.Title, body.String(), epicLabels) if err != nil { - return nil, EpicOutput{}, coreerr.E("createEpic", "failed to create epic", err) + return nil, EpicOutput{}, core.E("createEpic", "failed to create epic", err) } out := EpicOutput{ @@ -156,19 +160,19 @@ func (s *PrepSubsystem) createIssue(ctx context.Context, org, repo, title, body } data, _ := json.Marshal(payload) - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues", s.forgeURL, org, repo) + url := core.Sprintf("%s/api/v1/repos/%s/%s/issues", s.forgeURL, org, repo) req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(data)) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "token "+s.forgeToken) resp, err := s.client.Do(req) if err != nil { - return ChildRef{}, coreerr.E("createIssue", "create issue request failed", err) + return ChildRef{}, core.E("createIssue", "create issue request failed", err) } defer resp.Body.Close() if resp.StatusCode != 201 { - return ChildRef{}, coreerr.E("createIssue", fmt.Sprintf("create issue returned %d", resp.StatusCode), nil) + return ChildRef{}, core.E("createIssue", core.Sprintf("create issue returned %d", resp.StatusCode), nil) } var result struct { @@ -191,7 +195,7 @@ func (s *PrepSubsystem) resolveLabelIDs(ctx context.Context, org, repo string, n } // Fetch existing labels - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/labels?limit=50", s.forgeURL, org, repo) + url := core.Sprintf("%s/api/v1/repos/%s/%s/labels?limit=50", s.forgeURL, org, repo) req, _ := http.NewRequestWithContext(ctx, "GET", url, nil) req.Header.Set("Authorization", "token "+s.forgeToken) @@ -250,7 +254,7 @@ func (s *PrepSubsystem) createLabel(ctx context.Context, org, repo, name string) "color": colour, }) - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo) + url := core.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo) req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload)) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "token "+s.forgeToken) diff --git a/pkg/agentic/events.go b/pkg/agentic/events.go index ac66c90..e549e84 100644 --- a/pkg/agentic/events.go +++ b/pkg/agentic/events.go @@ -4,13 +4,16 @@ package agentic import ( "encoding/json" - "os" - "path/filepath" + "io" "time" + + core "dappco.re/go/core" ) // CompletionEvent is emitted when a dispatched agent finishes. // Written to ~/.core/workspace/events.jsonl as append-only log. +// +// event := agentic.CompletionEvent{Type: "agent_completed", Agent: "codex", Workspace: "go-io-123", Status: "completed"} type CompletionEvent struct { Type string `json:"type"` Agent string `json:"agent"` @@ -19,14 +22,12 @@ type CompletionEvent struct { Timestamp string `json:"timestamp"` } -// emitCompletionEvent appends a completion event to the events log. -// The plugin's hook watches this file to notify the orchestrating agent. -// Status should be the actual terminal state: completed, failed, or blocked. -func emitCompletionEvent(agent, workspace, status string) { - eventsFile := filepath.Join(WorkspaceRoot(), "events.jsonl") +// emitEvent appends an event to the events log. +func emitEvent(eventType, agent, workspace, status string) { + eventsFile := core.JoinPath(WorkspaceRoot(), "events.jsonl") event := CompletionEvent{ - Type: "agent_completed", + Type: eventType, Agent: agent, Workspace: workspace, Status: status, @@ -39,10 +40,21 @@ func emitCompletionEvent(agent, workspace, status string) { } // Append to events log - f, err := os.OpenFile(eventsFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { + r := fs.Append(eventsFile) + if !r.OK { return } - defer f.Close() - f.Write(append(data, '\n')) + wc := r.Value.(io.WriteCloser) + defer wc.Close() + wc.Write(append(data, '\n')) +} + +// emitStartEvent logs that an agent has been spawned. +func emitStartEvent(agent, workspace string) { + emitEvent("agent_started", agent, workspace, "running") +} + +// emitCompletionEvent logs that an agent has finished. +func emitCompletionEvent(agent, workspace, status string) { + emitEvent("agent_completed", agent, workspace, status) } diff --git a/pkg/agentic/ingest.go b/pkg/agentic/ingest.go index fee8452..d033258 100644 --- a/pkg/agentic/ingest.go +++ b/pkg/agentic/ingest.go @@ -5,13 +5,9 @@ package agentic import ( "bytes" "encoding/json" - "fmt" "net/http" - "os" - "path/filepath" - "strings" - coreio "dappco.re/go/core/io" + core "dappco.re/go/core" ) // ingestFindings reads the agent output log and creates issues via the API @@ -23,20 +19,20 @@ func (s *PrepSubsystem) ingestFindings(wsDir string) { } // Read the log file - logFiles, _ := filepath.Glob(filepath.Join(wsDir, "agent-*.log")) + logFiles := core.PathGlob(core.JoinPath(wsDir, "agent-*.log")) if len(logFiles) == 0 { return } - contentStr, err := coreio.Local.Read(logFiles[0]) - if err != nil || len(contentStr) < 100 { + r := fs.Read(logFiles[0]) + if !r.OK || len(r.Value.(string)) < 100 { return } - body := contentStr + body := r.Value.(string) // Skip quota errors - if strings.Contains(body, "QUOTA_EXHAUSTED") || strings.Contains(body, "QuotaError") { + if core.Contains(body, "QUOTA_EXHAUSTED") || core.Contains(body, "QuotaError") { return } @@ -49,13 +45,13 @@ func (s *PrepSubsystem) ingestFindings(wsDir string) { // Determine issue type from the template used issueType := "task" priority := "normal" - if strings.Contains(body, "security") || strings.Contains(body, "Security") { + if core.Contains(body, "security") || core.Contains(body, "Security") { issueType = "bug" priority = "high" } // Create a single issue per repo with all findings in the body - title := fmt.Sprintf("Scan findings for %s (%d items)", st.Repo, findings) + title := core.Sprintf("Scan findings for %s (%d items)", st.Repo, findings) // Truncate body to reasonable size for issue description description := body @@ -78,7 +74,7 @@ func countFileRefs(body string) int { } if j < len(body) && body[j] == '`' { ref := body[i+1 : j] - if strings.Contains(ref, ".go:") || strings.Contains(ref, ".php:") { + if core.Contains(ref, ".go:") || core.Contains(ref, ".php:") { count++ } } @@ -94,12 +90,11 @@ func (s *PrepSubsystem) createIssueViaAPI(repo, title, description, issueType, p } // Read the agent API key from file - home, _ := os.UserHomeDir() - apiKeyStr, err := coreio.Local.Read(filepath.Join(home, ".claude", "agent-api.key")) - if err != nil { + r := fs.Read(core.JoinPath(core.Env("DIR_HOME"), ".claude", "agent-api.key")) + if !r.OK { return } - apiKey := strings.TrimSpace(apiKeyStr) + apiKey := core.Trim(r.Value.(string)) payload, _ := json.Marshal(map[string]string{ "title": title, diff --git a/pkg/agentic/mirror.go b/pkg/agentic/mirror.go index f56423f..2da4c7b 100644 --- a/pkg/agentic/mirror.go +++ b/pkg/agentic/mirror.go @@ -4,26 +4,28 @@ package agentic import ( "context" - "fmt" + "encoding/json" "os" "os/exec" - "path/filepath" - "strings" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) // --- agentic_mirror tool --- // MirrorInput is the input for agentic_mirror. +// +// input := agentic.MirrorInput{Repo: "go-io", DryRun: true, MaxFiles: 50} type MirrorInput struct { - Repo string `json:"repo,omitempty"` // Specific repo, or empty for all - DryRun bool `json:"dry_run,omitempty"` // Preview without pushing + Repo string `json:"repo,omitempty"` // Specific repo, or empty for all + DryRun bool `json:"dry_run,omitempty"` // Preview without pushing MaxFiles int `json:"max_files,omitempty"` // Max files per PR (default 50, CodeRabbit limit) } // MirrorOutput is the output for agentic_mirror. +// +// out := agentic.MirrorOutput{Success: true, Count: 1, Synced: []agentic.MirrorSync{{Repo: "go-io"}}} type MirrorOutput struct { Success bool `json:"success"` Synced []MirrorSync `json:"synced"` @@ -32,6 +34,8 @@ type MirrorOutput struct { } // MirrorSync records one repo sync. +// +// sync := agentic.MirrorSync{Repo: "go-io", CommitsAhead: 3, FilesChanged: 12} type MirrorSync struct { Repo string `json:"repo"` CommitsAhead int `json:"commits_ahead"` @@ -56,10 +60,9 @@ func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, inpu basePath := s.codePath if basePath == "" { - home, _ := os.UserHomeDir() - basePath = filepath.Join(home, "Code", "core") + basePath = core.JoinPath(core.Env("DIR_HOME"), "Code", "core") } else { - basePath = filepath.Join(basePath, "core") + basePath = core.JoinPath(basePath, "core") } // Build list of repos to sync @@ -74,7 +77,7 @@ func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, inpu var skipped []string for _, repo := range repos { - repoDir := filepath.Join(basePath, repo) + repoDir := core.JoinPath(basePath, repo) // Check if github remote exists if !hasRemote(repoDir, "github") { @@ -88,7 +91,7 @@ func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, inpu fetchCmd.Run() // Check how far ahead local default branch is vs github - localBase := gitDefaultBranch(repoDir) + localBase := DefaultBranch(repoDir) ahead := commitsAhead(repoDir, "github/main", localBase) if ahead == 0 { continue // Already in sync @@ -105,7 +108,7 @@ func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, inpu // Skip if too many files for one PR if files > maxFiles { - sync.Skipped = fmt.Sprintf("%d files exceeds limit of %d", files, maxFiles) + sync.Skipped = core.Sprintf("%d files exceeds limit of %d", files, maxFiles) synced = append(synced, sync) continue } @@ -120,11 +123,11 @@ func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, inpu ensureDevBranch(repoDir) // Push local main to github dev (explicit main, not HEAD) - base := gitDefaultBranch(repoDir) + base := DefaultBranch(repoDir) pushCmd := exec.CommandContext(ctx, "git", "push", "github", base+":refs/heads/dev", "--force") pushCmd.Dir = repoDir if err := pushCmd.Run(); err != nil { - sync.Skipped = fmt.Sprintf("push failed: %v", err) + sync.Skipped = core.Sprintf("push failed: %v", err) synced = append(synced, sync) continue } @@ -133,7 +136,7 @@ func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, inpu // Create PR: dev → main on GitHub prURL, err := s.createGitHubPR(ctx, repoDir, repo, ahead, files) if err != nil { - sync.Skipped = fmt.Sprintf("PR creation failed: %v", err) + sync.Skipped = core.Sprintf("PR creation failed: %v", err) } else { sync.PRURL = prURL } @@ -152,11 +155,11 @@ func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, inpu // createGitHubPR creates a PR from dev → main using the gh CLI. func (s *PrepSubsystem) createGitHubPR(ctx context.Context, repoDir, repo string, commits, files int) (string, error) { // Check if there's already an open PR from dev - ghRepo := fmt.Sprintf("%s/%s", GitHubOrg(), repo) + ghRepo := core.Sprintf("%s/%s", GitHubOrg(), repo) checkCmd := exec.CommandContext(ctx, "gh", "pr", "list", "--repo", ghRepo, "--head", "dev", "--state", "open", "--json", "url", "--limit", "1") checkCmd.Dir = repoDir out, err := checkCmd.Output() - if err == nil && strings.Contains(string(out), "url") { + if err == nil && core.Contains(string(out), "url") { // PR already exists — extract URL // Format: [{"url":"https://..."}] url := extractJSONField(string(out), "url") @@ -166,7 +169,7 @@ func (s *PrepSubsystem) createGitHubPR(ctx context.Context, repoDir, repo string } // Build PR body - body := fmt.Sprintf("## Forge → GitHub Sync\n\n"+ + body := core.Sprintf("## Forge → GitHub Sync\n\n"+ "**Commits:** %d\n"+ "**Files changed:** %d\n\n"+ "Automated sync from Forge (forge.lthn.ai) to GitHub mirror.\n"+ @@ -175,7 +178,7 @@ func (s *PrepSubsystem) createGitHubPR(ctx context.Context, repoDir, repo string "Co-Authored-By: Virgil ", commits, files) - title := fmt.Sprintf("[sync] %s: %d commits, %d files", repo, commits, files) + title := core.Sprintf("[sync] %s: %d commits, %d files", repo, commits, files) prCmd := exec.CommandContext(ctx, "gh", "pr", "create", "--repo", ghRepo, @@ -187,11 +190,11 @@ func (s *PrepSubsystem) createGitHubPR(ctx context.Context, repoDir, repo string prCmd.Dir = repoDir prOut, err := prCmd.CombinedOutput() if err != nil { - return "", coreerr.E("createGitHubPR", string(prOut), err) + return "", core.E("createGitHubPR", string(prOut), err) } // gh pr create outputs the PR URL on the last line - lines := strings.Split(strings.TrimSpace(string(prOut)), "\n") + lines := core.Split(core.Trim(string(prOut)), "\n") if len(lines) > 0 { return lines[len(lines)-1], nil } @@ -222,9 +225,7 @@ func commitsAhead(repoDir, base, head string) int { if err != nil { return 0 } - var n int - fmt.Sscanf(strings.TrimSpace(string(out)), "%d", &n) - return n + return parseInt(string(out)) } // filesChanged returns the number of files changed between two refs. @@ -235,7 +236,7 @@ func filesChanged(repoDir, base, head string) int { if err != nil { return 0 } - lines := strings.Split(strings.TrimSpace(string(out)), "\n") + lines := core.Split(core.Trim(string(out)), "\n") if len(lines) == 1 && lines[0] == "" { return 0 } @@ -244,17 +245,18 @@ func filesChanged(repoDir, base, head string) int { // listLocalRepos returns repo names that exist as directories in basePath. func (s *PrepSubsystem) listLocalRepos(basePath string) []string { - entries, err := os.ReadDir(basePath) - if err != nil { + r := fs.List(basePath) + if !r.OK { return nil } + entries := r.Value.([]os.DirEntry) var repos []string for _, e := range entries { if !e.IsDir() { continue } // Must have a .git directory - if _, err := os.Stat(filepath.Join(basePath, e.Name(), ".git")); err == nil { + if fs.IsDir(core.JoinPath(basePath, e.Name(), ".git")) { repos = append(repos, e.Name()) } } @@ -263,17 +265,24 @@ func (s *PrepSubsystem) listLocalRepos(basePath string) []string { // extractJSONField extracts a simple string field from JSON array output. func extractJSONField(jsonStr, field string) string { - // Quick and dirty — works for gh CLI output like [{"url":"https://..."}] - key := fmt.Sprintf(`"%s":"`, field) - idx := strings.Index(jsonStr, key) - if idx < 0 { + if jsonStr == "" || field == "" { return "" } - start := idx + len(key) - end := strings.Index(jsonStr[start:], `"`) - if end < 0 { - return "" - } - return jsonStr[start : start+end] -} + var list []map[string]any + if err := json.Unmarshal([]byte(jsonStr), &list); err == nil { + for _, item := range list { + if value, ok := item[field].(string); ok { + return value + } + } + } + + var item map[string]any + if err := json.Unmarshal([]byte(jsonStr), &item); err != nil { + return "" + } + + value, _ := item[field].(string) + return value +} diff --git a/pkg/agentic/paths.go b/pkg/agentic/paths.go index 737878d..4277a79 100644 --- a/pkg/agentic/paths.go +++ b/pkg/agentic/paths.go @@ -3,55 +3,85 @@ package agentic import ( - "os" "os/exec" - "path/filepath" - "strings" + "strconv" + "unsafe" + + core "dappco.re/go/core" ) +// fs provides unrestricted filesystem access (root "/" = no sandbox). +// +// r := fs.Read("/etc/hostname") +// if r.OK { core.Print(nil, "%s", r.Value.(string)) } +var fs = newFs("/") + +// newFs creates a core.Fs with the given root directory. +// Root "/" means unrestricted access (same as coreio.Local). +func newFs(root string) *core.Fs { + type fsRoot struct{ root string } + f := &core.Fs{} + (*fsRoot)(unsafe.Pointer(f)).root = root + return f +} + +// LocalFs returns an unrestricted filesystem instance for use by other packages. +// +// r := agentic.LocalFs().Read("/tmp/agent-status.json") +// if r.OK { core.Print(nil, "%s", r.Value.(string)) } +func LocalFs() *core.Fs { return fs } + // WorkspaceRoot returns the root directory for agent workspaces. // Checks CORE_WORKSPACE env var first, falls back to ~/Code/.core/workspace. +// +// wsDir := core.JoinPath(agentic.WorkspaceRoot(), "go-io-1774149757") func WorkspaceRoot() string { - return filepath.Join(CoreRoot(), "workspace") + return core.JoinPath(CoreRoot(), "workspace") } // CoreRoot returns the root directory for core ecosystem files. // Checks CORE_WORKSPACE env var first, falls back to ~/Code/.core. +// +// root := agentic.CoreRoot() func CoreRoot() string { - if root := os.Getenv("CORE_WORKSPACE"); root != "" { + if root := core.Env("CORE_WORKSPACE"); root != "" { return root } - home, _ := os.UserHomeDir() - return filepath.Join(home, "Code", ".core") + return core.JoinPath(core.Env("DIR_HOME"), "Code", ".core") } // PlansRoot returns the root directory for agent plans. +// +// plansDir := agentic.PlansRoot() func PlansRoot() string { - return filepath.Join(CoreRoot(), "plans") + return core.JoinPath(CoreRoot(), "plans") } // AgentName returns the name of this agent based on hostname. // Checks AGENT_NAME env var first. +// +// name := agentic.AgentName() // "cladius" on Snider's Mac, "charon" elsewhere func AgentName() string { - if name := os.Getenv("AGENT_NAME"); name != "" { + if name := core.Env("AGENT_NAME"); name != "" { return name } - hostname, _ := os.Hostname() - h := strings.ToLower(hostname) - if strings.Contains(h, "snider") || strings.Contains(h, "studio") || strings.Contains(h, "mac") { + h := core.Lower(core.Env("HOSTNAME")) + if core.Contains(h, "snider") || core.Contains(h, "studio") || core.Contains(h, "mac") { return "cladius" } return "charon" } -// gitDefaultBranch detects the default branch of a repo (main, master, etc.). -func gitDefaultBranch(repoDir string) string { +// DefaultBranch detects the default branch of a repo (main, master, etc.). +// +// base := agentic.DefaultBranch("./src") +func DefaultBranch(repoDir string) string { cmd := exec.Command("git", "symbolic-ref", "refs/remotes/origin/HEAD", "--short") cmd.Dir = repoDir if out, err := cmd.Output(); err == nil { - ref := strings.TrimSpace(string(out)) - if strings.HasPrefix(ref, "origin/") { - return strings.TrimPrefix(ref, "origin/") + ref := core.Trim(string(out)) + if core.HasPrefix(ref, "origin/") { + return core.TrimPrefix(ref, "origin/") } return ref } @@ -66,9 +96,19 @@ func gitDefaultBranch(repoDir string) string { } // GitHubOrg returns the GitHub org for mirror operations. +// +// org := agentic.GitHubOrg() // "dAppCore" func GitHubOrg() string { - if org := os.Getenv("GITHUB_ORG"); org != "" { + if org := core.Env("GITHUB_ORG"); org != "" { return org } return "dAppCore" } + +func parseInt(value string) int { + n, err := strconv.Atoi(core.Trim(value)) + if err != nil { + return 0 + } + return n +} diff --git a/pkg/agentic/paths_test.go b/pkg/agentic/paths_test.go index 4735d5b..1bf8216 100644 --- a/pkg/agentic/paths_test.go +++ b/pkg/agentic/paths_test.go @@ -111,6 +111,16 @@ func TestExtractJSONField_Good(t *testing.T) { assert.Equal(t, "https://github.com/dAppCore/go-io/pull/1", extractJSONField(json, "url")) } +func TestExtractJSONField_Good_Object(t *testing.T) { + json := `{"url":"https://github.com/dAppCore/go-io/pull/2"}` + assert.Equal(t, "https://github.com/dAppCore/go-io/pull/2", extractJSONField(json, "url")) +} + +func TestExtractJSONField_Good_PrettyPrinted(t *testing.T) { + json := "[\n {\n \"url\": \"https://github.com/dAppCore/go-io/pull/3\"\n }\n]" + assert.Equal(t, "https://github.com/dAppCore/go-io/pull/3", extractJSONField(json, "url")) +} + func TestExtractJSONField_Bad_Missing(t *testing.T) { assert.Equal(t, "", extractJSONField(`{"name":"test"}`, "url")) assert.Equal(t, "", extractJSONField("", "url")) diff --git a/pkg/agentic/plan.go b/pkg/agentic/plan.go index 9980c6c..f3931b5 100644 --- a/pkg/agentic/plan.go +++ b/pkg/agentic/plan.go @@ -8,20 +8,20 @@ import ( "encoding/hex" "encoding/json" "os" - "path/filepath" - "strings" "time" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) // Plan represents an implementation plan for agent work. +// +// plan := &Plan{ID: "migrate-core-abc", Title: "Migrate Core", Status: "draft", Objective: "..."} +// writePlan(PlansRoot(), plan) type Plan struct { ID string `json:"id"` Title string `json:"title"` - Status string `json:"status"` // draft, ready, in_progress, needs_verification, verified, approved + Status string `json:"status"` // draft, ready, in_progress, needs_verification, verified, approved Repo string `json:"repo,omitempty"` Org string `json:"org,omitempty"` Objective string `json:"objective"` @@ -33,10 +33,12 @@ type Plan struct { } // Phase represents a phase within an implementation plan. +// +// phase := agentic.Phase{Number: 1, Name: "Migrate strings", Status: "in_progress"} type Phase struct { Number int `json:"number"` Name string `json:"name"` - Status string `json:"status"` // pending, in_progress, done + Status string `json:"status"` // pending, in_progress, done Criteria []string `json:"criteria,omitempty"` Tests int `json:"tests,omitempty"` Notes string `json:"notes,omitempty"` @@ -45,6 +47,8 @@ type Phase struct { // --- Input/Output types --- // PlanCreateInput is the input for agentic_plan_create. +// +// input := agentic.PlanCreateInput{Title: "Migrate pkg/agentic", Objective: "Use Core primitives everywhere"} type PlanCreateInput struct { Title string `json:"title"` Objective string `json:"objective"` @@ -55,6 +59,8 @@ type PlanCreateInput struct { } // PlanCreateOutput is the output for agentic_plan_create. +// +// out := agentic.PlanCreateOutput{Success: true, ID: "migrate-pkg-agentic-abc123"} type PlanCreateOutput struct { Success bool `json:"success"` ID string `json:"id"` @@ -62,17 +68,23 @@ type PlanCreateOutput struct { } // PlanReadInput is the input for agentic_plan_read. +// +// input := agentic.PlanReadInput{ID: "migrate-pkg-agentic-abc123"} type PlanReadInput struct { ID string `json:"id"` } // PlanReadOutput is the output for agentic_plan_read. +// +// out := agentic.PlanReadOutput{Success: true, Plan: agentic.Plan{ID: "migrate-pkg-agentic-abc123"}} type PlanReadOutput struct { Success bool `json:"success"` Plan Plan `json:"plan"` } // PlanUpdateInput is the input for agentic_plan_update. +// +// input := agentic.PlanUpdateInput{ID: "migrate-pkg-agentic-abc123", Status: "verified"} type PlanUpdateInput struct { ID string `json:"id"` Status string `json:"status,omitempty"` @@ -84,29 +96,39 @@ type PlanUpdateInput struct { } // PlanUpdateOutput is the output for agentic_plan_update. +// +// out := agentic.PlanUpdateOutput{Success: true, Plan: agentic.Plan{Status: "verified"}} type PlanUpdateOutput struct { Success bool `json:"success"` Plan Plan `json:"plan"` } // PlanDeleteInput is the input for agentic_plan_delete. +// +// input := agentic.PlanDeleteInput{ID: "migrate-pkg-agentic-abc123"} type PlanDeleteInput struct { ID string `json:"id"` } // PlanDeleteOutput is the output for agentic_plan_delete. +// +// out := agentic.PlanDeleteOutput{Success: true, Deleted: "migrate-pkg-agentic-abc123"} type PlanDeleteOutput struct { Success bool `json:"success"` Deleted string `json:"deleted"` } // PlanListInput is the input for agentic_plan_list. +// +// input := agentic.PlanListInput{Repo: "go-io", Status: "ready"} type PlanListInput struct { Status string `json:"status,omitempty"` Repo string `json:"repo,omitempty"` } // PlanListOutput is the output for agentic_plan_list. +// +// out := agentic.PlanListOutput{Success: true, Count: 2, Plans: []agentic.Plan{{ID: "migrate-pkg-agentic-abc123"}}} type PlanListOutput struct { Success bool `json:"success"` Count int `json:"count"` @@ -146,10 +168,10 @@ func (s *PrepSubsystem) registerPlanTools(server *mcp.Server) { func (s *PrepSubsystem) planCreate(_ context.Context, _ *mcp.CallToolRequest, input PlanCreateInput) (*mcp.CallToolResult, PlanCreateOutput, error) { if input.Title == "" { - return nil, PlanCreateOutput{}, coreerr.E("planCreate", "title is required", nil) + return nil, PlanCreateOutput{}, core.E("planCreate", "title is required", nil) } if input.Objective == "" { - return nil, PlanCreateOutput{}, coreerr.E("planCreate", "objective is required", nil) + return nil, PlanCreateOutput{}, core.E("planCreate", "objective is required", nil) } id := generatePlanID(input.Title) @@ -178,7 +200,7 @@ func (s *PrepSubsystem) planCreate(_ context.Context, _ *mcp.CallToolRequest, in path, err := writePlan(PlansRoot(), &plan) if err != nil { - return nil, PlanCreateOutput{}, coreerr.E("planCreate", "failed to write plan", err) + return nil, PlanCreateOutput{}, core.E("planCreate", "failed to write plan", err) } return nil, PlanCreateOutput{ @@ -190,7 +212,7 @@ func (s *PrepSubsystem) planCreate(_ context.Context, _ *mcp.CallToolRequest, in func (s *PrepSubsystem) planRead(_ context.Context, _ *mcp.CallToolRequest, input PlanReadInput) (*mcp.CallToolResult, PlanReadOutput, error) { if input.ID == "" { - return nil, PlanReadOutput{}, coreerr.E("planRead", "id is required", nil) + return nil, PlanReadOutput{}, core.E("planRead", "id is required", nil) } plan, err := readPlan(PlansRoot(), input.ID) @@ -206,7 +228,7 @@ func (s *PrepSubsystem) planRead(_ context.Context, _ *mcp.CallToolRequest, inpu func (s *PrepSubsystem) planUpdate(_ context.Context, _ *mcp.CallToolRequest, input PlanUpdateInput) (*mcp.CallToolResult, PlanUpdateOutput, error) { if input.ID == "" { - return nil, PlanUpdateOutput{}, coreerr.E("planUpdate", "id is required", nil) + return nil, PlanUpdateOutput{}, core.E("planUpdate", "id is required", nil) } plan, err := readPlan(PlansRoot(), input.ID) @@ -217,7 +239,7 @@ func (s *PrepSubsystem) planUpdate(_ context.Context, _ *mcp.CallToolRequest, in // Apply partial updates if input.Status != "" { if !validPlanStatus(input.Status) { - return nil, PlanUpdateOutput{}, coreerr.E("planUpdate", "invalid status: "+input.Status+" (valid: draft, ready, in_progress, needs_verification, verified, approved)", nil) + return nil, PlanUpdateOutput{}, core.E("planUpdate", "invalid status: "+input.Status+" (valid: draft, ready, in_progress, needs_verification, verified, approved)", nil) } plan.Status = input.Status } @@ -240,7 +262,7 @@ func (s *PrepSubsystem) planUpdate(_ context.Context, _ *mcp.CallToolRequest, in plan.UpdatedAt = time.Now() if _, err := writePlan(PlansRoot(), plan); err != nil { - return nil, PlanUpdateOutput{}, coreerr.E("planUpdate", "failed to write plan", err) + return nil, PlanUpdateOutput{}, core.E("planUpdate", "failed to write plan", err) } return nil, PlanUpdateOutput{ @@ -251,16 +273,17 @@ func (s *PrepSubsystem) planUpdate(_ context.Context, _ *mcp.CallToolRequest, in func (s *PrepSubsystem) planDelete(_ context.Context, _ *mcp.CallToolRequest, input PlanDeleteInput) (*mcp.CallToolResult, PlanDeleteOutput, error) { if input.ID == "" { - return nil, PlanDeleteOutput{}, coreerr.E("planDelete", "id is required", nil) + return nil, PlanDeleteOutput{}, core.E("planDelete", "id is required", nil) } path := planPath(PlansRoot(), input.ID) - if _, err := os.Stat(path); err != nil { - return nil, PlanDeleteOutput{}, coreerr.E("planDelete", "plan not found: "+input.ID, nil) + if !fs.Exists(path) { + return nil, PlanDeleteOutput{}, core.E("planDelete", "plan not found: "+input.ID, nil) } - if err := coreio.Local.Delete(path); err != nil { - return nil, PlanDeleteOutput{}, coreerr.E("planDelete", "failed to delete plan", err) + if r := fs.Delete(path); !r.OK { + err, _ := r.Value.(error) + return nil, PlanDeleteOutput{}, core.E("planDelete", "failed to delete plan", err) } return nil, PlanDeleteOutput{ @@ -271,22 +294,24 @@ func (s *PrepSubsystem) planDelete(_ context.Context, _ *mcp.CallToolRequest, in func (s *PrepSubsystem) planList(_ context.Context, _ *mcp.CallToolRequest, input PlanListInput) (*mcp.CallToolResult, PlanListOutput, error) { dir := PlansRoot() - if err := coreio.Local.EnsureDir(dir); err != nil { - return nil, PlanListOutput{}, coreerr.E("planList", "failed to access plans directory", err) + if r := fs.EnsureDir(dir); !r.OK { + err, _ := r.Value.(error) + return nil, PlanListOutput{}, core.E("planList", "failed to access plans directory", err) } - entries, err := os.ReadDir(dir) - if err != nil { - return nil, PlanListOutput{}, coreerr.E("planList", "failed to read plans directory", err) + r := fs.List(dir) + if !r.OK { + return nil, PlanListOutput{}, nil } + entries := r.Value.([]os.DirEntry) var plans []Plan for _, entry := range entries { - if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".json") { + if entry.IsDir() || !core.HasSuffix(entry.Name(), ".json") { continue } - id := strings.TrimSuffix(entry.Name(), ".json") + id := core.TrimSuffix(entry.Name(), ".json") plan, err := readPlan(dir, id) if err != nil { continue @@ -314,36 +339,15 @@ func (s *PrepSubsystem) planList(_ context.Context, _ *mcp.CallToolRequest, inpu func planPath(dir, id string) string { // Sanitise ID to prevent path traversal - safe := filepath.Base(id) + safe := core.PathBase(id) if safe == "." || safe == ".." || safe == "" { safe = "invalid" } - return filepath.Join(dir, safe+".json") + return core.JoinPath(dir, safe+".json") } func generatePlanID(title string) string { - slug := strings.Map(func(r rune) rune { - if r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-' { - return r - } - if r >= 'A' && r <= 'Z' { - return r + 32 - } - if r == ' ' { - return '-' - } - return -1 - }, title) - - // Trim consecutive dashes and cap length - for strings.Contains(slug, "--") { - slug = strings.ReplaceAll(slug, "--", "-") - } - slug = strings.Trim(slug, "-") - if len(slug) > 30 { - slug = slug[:30] - } - slug = strings.TrimRight(slug, "-") + slug := sanitisePlanSlug(title) // Append short random suffix for uniqueness b := make([]byte, 3) @@ -352,21 +356,22 @@ func generatePlanID(title string) string { } func readPlan(dir, id string) (*Plan, error) { - data, err := coreio.Local.Read(planPath(dir, id)) - if err != nil { - return nil, coreerr.E("readPlan", "plan not found: "+id, nil) + r := fs.Read(planPath(dir, id)) + if !r.OK { + return nil, core.E("readPlan", "plan not found: "+id, nil) } var plan Plan - if err := json.Unmarshal([]byte(data), &plan); err != nil { - return nil, coreerr.E("readPlan", "failed to parse plan "+id, err) + if err := json.Unmarshal([]byte(r.Value.(string)), &plan); err != nil { + return nil, core.E("readPlan", "failed to parse plan "+id, err) } return &plan, nil } func writePlan(dir string, plan *Plan) (string, error) { - if err := coreio.Local.EnsureDir(dir); err != nil { - return "", coreerr.E("writePlan", "failed to create plans directory", err) + if r := fs.EnsureDir(dir); !r.OK { + err, _ := r.Value.(error) + return "", core.E("writePlan", "failed to create plans directory", err) } path := planPath(dir, plan.ID) @@ -375,7 +380,11 @@ func writePlan(dir string, plan *Plan) (string, error) { return "", err } - return path, coreio.Local.Write(path, string(data)) + if r := fs.Write(path, string(data)); !r.OK { + err, _ := r.Value.(error) + return "", core.E("writePlan", "failed to write plan", err) + } + return path, nil } func validPlanStatus(status string) bool { diff --git a/pkg/agentic/plan_test.go b/pkg/agentic/plan_test.go index 3f59669..1440c24 100644 --- a/pkg/agentic/plan_test.go +++ b/pkg/agentic/plan_test.go @@ -7,7 +7,6 @@ import ( "strings" "testing" - coreio "dappco.re/go/core/io" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -31,7 +30,7 @@ func TestWritePlan_Good(t *testing.T) { assert.Equal(t, filepath.Join(dir, "test-plan-abc123.json"), path) // Verify file exists - assert.True(t, coreio.Local.IsFile(path)) + assert.True(t, fs.IsFile(path)) } func TestWritePlan_Good_CreatesDirectory(t *testing.T) { @@ -96,7 +95,7 @@ func TestReadPlan_Bad_NotFound(t *testing.T) { func TestReadPlan_Bad_InvalidJSON(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "bad-json.json"), "{broken")) + require.True(t, fs.Write(filepath.Join(dir, "bad-json.json"), "{broken").OK) _, err := readPlan(dir, "bad-json") assert.Error(t, err) @@ -205,7 +204,7 @@ func TestWritePlan_Good_OverwriteExisting(t *testing.T) { func TestReadPlan_Ugly_EmptyFile(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "empty.json"), "")) + require.True(t, fs.Write(filepath.Join(dir, "empty.json"), "").OK) _, err := readPlan(dir, "empty") assert.Error(t, err) diff --git a/pkg/agentic/pr.go b/pkg/agentic/pr.go index e1dcbd1..b564459 100644 --- a/pkg/agentic/pr.go +++ b/pkg/agentic/pr.go @@ -3,32 +3,31 @@ package agentic import ( - "bytes" "context" - "encoding/json" - "fmt" - "net/http" - "os" "os/exec" - "path/filepath" - "strings" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" + "dappco.re/go/core/forge" + forge_types "dappco.re/go/core/forge/types" "github.com/modelcontextprotocol/go-sdk/mcp" ) // --- agentic_create_pr --- // CreatePRInput is the input for agentic_create_pr. +// +// input := agentic.CreatePRInput{Workspace: "go-io-1773581873", Title: "Fix watcher panic"} type CreatePRInput struct { - Workspace string `json:"workspace"` // workspace name (e.g. "mcp-1773581873") - Title string `json:"title,omitempty"` // PR title (default: task description) - Body string `json:"body,omitempty"` // PR body (default: auto-generated) - Base string `json:"base,omitempty"` // base branch (default: "main") - DryRun bool `json:"dry_run,omitempty"` // preview without creating + Workspace string `json:"workspace"` // workspace name (e.g. "mcp-1773581873") + Title string `json:"title,omitempty"` // PR title (default: task description) + Body string `json:"body,omitempty"` // PR body (default: auto-generated) + Base string `json:"base,omitempty"` // base branch (default: "main") + DryRun bool `json:"dry_run,omitempty"` // preview without creating } // CreatePROutput is the output for agentic_create_pr. +// +// out := agentic.CreatePROutput{Success: true, PRURL: "https://forge.example/core/go-io/pulls/12", PRNum: 12} type CreatePROutput struct { Success bool `json:"success"` PRURL string `json:"pr_url,omitempty"` @@ -48,34 +47,34 @@ func (s *PrepSubsystem) registerCreatePRTool(server *mcp.Server) { func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, input CreatePRInput) (*mcp.CallToolResult, CreatePROutput, error) { if input.Workspace == "" { - return nil, CreatePROutput{}, coreerr.E("createPR", "workspace is required", nil) + return nil, CreatePROutput{}, core.E("createPR", "workspace is required", nil) } if s.forgeToken == "" { - return nil, CreatePROutput{}, coreerr.E("createPR", "no Forge token configured", nil) + return nil, CreatePROutput{}, core.E("createPR", "no Forge token configured", nil) } - wsDir := filepath.Join(WorkspaceRoot(), input.Workspace) - srcDir := filepath.Join(wsDir, "src") + wsDir := core.JoinPath(WorkspaceRoot(), input.Workspace) + repoDir := core.JoinPath(wsDir, "repo") - if _, err := os.Stat(srcDir); err != nil { - return nil, CreatePROutput{}, coreerr.E("createPR", "workspace not found: "+input.Workspace, nil) + if !fs.IsDir(core.JoinPath(repoDir, ".git")) { + return nil, CreatePROutput{}, core.E("createPR", "workspace not found: "+input.Workspace, nil) } // Read workspace status for repo, branch, issue context st, err := readStatus(wsDir) if err != nil { - return nil, CreatePROutput{}, coreerr.E("createPR", "no status.json", err) + return nil, CreatePROutput{}, core.E("createPR", "no status.json", err) } if st.Branch == "" { // Detect branch from git branchCmd := exec.CommandContext(ctx, "git", "rev-parse", "--abbrev-ref", "HEAD") - branchCmd.Dir = srcDir + branchCmd.Dir = repoDir out, err := branchCmd.Output() if err != nil { - return nil, CreatePROutput{}, coreerr.E("createPR", "failed to detect branch", err) + return nil, CreatePROutput{}, core.E("createPR", "failed to detect branch", err) } - st.Branch = strings.TrimSpace(string(out)) + st.Branch = core.Trim(string(out)) } org := st.Org @@ -84,7 +83,7 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in } base := input.Base if base == "" { - base = "main" + base = "dev" } // Build PR title @@ -93,7 +92,7 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in title = st.Task } if title == "" { - title = fmt.Sprintf("Agent work on %s", st.Branch) + title = core.Sprintf("Agent work on %s", st.Branch) } // Build PR body @@ -112,18 +111,18 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in } // Push branch to Forge (origin is the local clone, not Forge) - forgeRemote := fmt.Sprintf("ssh://git@forge.lthn.ai:2223/%s/%s.git", org, st.Repo) + forgeRemote := core.Sprintf("ssh://git@forge.lthn.ai:2223/%s/%s.git", org, st.Repo) pushCmd := exec.CommandContext(ctx, "git", "push", forgeRemote, st.Branch) - pushCmd.Dir = srcDir + pushCmd.Dir = repoDir pushOut, err := pushCmd.CombinedOutput() if err != nil { - return nil, CreatePROutput{}, coreerr.E("createPR", "git push failed: "+string(pushOut), err) + return nil, CreatePROutput{}, core.E("createPR", "git push failed: "+string(pushOut), err) } // Create PR via Forge API prURL, prNum, err := s.forgeCreatePR(ctx, org, st.Repo, st.Branch, base, title, body) if err != nil { - return nil, CreatePROutput{}, coreerr.E("createPR", "failed to create PR", err) + return nil, CreatePROutput{}, core.E("createPR", "failed to create PR", err) } // Update status with PR URL @@ -132,7 +131,7 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in // Comment on issue if tracked if st.Issue > 0 { - comment := fmt.Sprintf("Pull request created: %s", prURL) + comment := core.Sprintf("Pull request created: %s", prURL) s.commentOnIssue(ctx, org, st.Repo, st.Issue, comment) } @@ -148,82 +147,53 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in } func (s *PrepSubsystem) buildPRBody(st *WorkspaceStatus) string { - var b strings.Builder + b := core.NewBuilder() b.WriteString("## Summary\n\n") if st.Task != "" { b.WriteString(st.Task) b.WriteString("\n\n") } if st.Issue > 0 { - b.WriteString(fmt.Sprintf("Closes #%d\n\n", st.Issue)) + b.WriteString(core.Sprintf("Closes #%d\n\n", st.Issue)) } - b.WriteString(fmt.Sprintf("**Agent:** %s\n", st.Agent)) - b.WriteString(fmt.Sprintf("**Runs:** %d\n", st.Runs)) + b.WriteString(core.Sprintf("**Agent:** %s\n", st.Agent)) + b.WriteString(core.Sprintf("**Runs:** %d\n", st.Runs)) b.WriteString("\n---\n*Created by agentic dispatch*\n") return b.String() } func (s *PrepSubsystem) forgeCreatePR(ctx context.Context, org, repo, head, base, title, body string) (string, int, error) { - payload, _ := json.Marshal(map[string]any{ - "title": title, - "body": body, - "head": head, - "base": base, + pr, err := s.forge.Pulls.Create(ctx, forge.Params{"owner": org, "repo": repo}, &forge_types.CreatePullRequestOption{ + Title: title, + Body: body, + Head: head, + Base: base, }) - - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls", s.forgeURL, org, repo) - req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload)) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", "token "+s.forgeToken) - - resp, err := s.client.Do(req) if err != nil { - return "", 0, coreerr.E("forgeCreatePR", "request failed", err) + return "", 0, core.E("forgeCreatePR", "create PR failed", err) } - defer resp.Body.Close() - - if resp.StatusCode != 201 { - var errBody map[string]any - json.NewDecoder(resp.Body).Decode(&errBody) - msg, _ := errBody["message"].(string) - return "", 0, coreerr.E("forgeCreatePR", fmt.Sprintf("HTTP %d: %s", resp.StatusCode, msg), nil) - } - - var pr struct { - Number int `json:"number"` - HTMLURL string `json:"html_url"` - } - json.NewDecoder(resp.Body).Decode(&pr) - - return pr.HTMLURL, pr.Number, nil + return pr.HTMLURL, int(pr.Index), nil } func (s *PrepSubsystem) commentOnIssue(ctx context.Context, org, repo string, issue int, comment string) { - payload, _ := json.Marshal(map[string]string{"body": comment}) - - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/comments", s.forgeURL, org, repo, issue) - req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload)) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", "token "+s.forgeToken) - - resp, err := s.client.Do(req) - if err != nil { - return - } - resp.Body.Close() + s.forge.Issues.CreateComment(ctx, org, repo, int64(issue), comment) } // --- agentic_list_prs --- // ListPRsInput is the input for agentic_list_prs. +// +// input := agentic.ListPRsInput{Org: "core", Repo: "go-io", State: "open", Limit: 10} type ListPRsInput struct { Org string `json:"org,omitempty"` // forge org (default "core") - Repo string `json:"repo,omitempty"` // specific repo, or empty for all + Repo string `json:"repo,omitempty"` // specific repo, or empty for all State string `json:"state,omitempty"` // "open" (default), "closed", "all" Limit int `json:"limit,omitempty"` // max results (default 20) } // ListPRsOutput is the output for agentic_list_prs. +// +// out := agentic.ListPRsOutput{Success: true, Count: 2, PRs: []agentic.PRInfo{{Repo: "go-io", Number: 12}}} type ListPRsOutput struct { Success bool `json:"success"` Count int `json:"count"` @@ -231,6 +201,8 @@ type ListPRsOutput struct { } // PRInfo represents a pull request. +// +// pr := agentic.PRInfo{Repo: "go-io", Number: 12, Title: "Migrate pkg/fs", Branch: "agent/migrate-fs"} type PRInfo struct { Repo string `json:"repo"` Number int `json:"number"` @@ -253,7 +225,7 @@ func (s *PrepSubsystem) registerListPRsTool(server *mcp.Server) { func (s *PrepSubsystem) listPRs(ctx context.Context, _ *mcp.CallToolRequest, input ListPRsInput) (*mcp.CallToolResult, ListPRsOutput, error) { if s.forgeToken == "" { - return nil, ListPRsOutput{}, coreerr.E("listPRs", "no Forge token configured", nil) + return nil, ListPRsOutput{}, core.E("listPRs", "no Forge token configured", nil) } if input.Org == "" { @@ -303,54 +275,30 @@ func (s *PrepSubsystem) listPRs(ctx context.Context, _ *mcp.CallToolRequest, inp } func (s *PrepSubsystem) listRepoPRs(ctx context.Context, org, repo, state string) ([]PRInfo, error) { - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls?state=%s&limit=10", - s.forgeURL, org, repo, state) - req, _ := http.NewRequestWithContext(ctx, "GET", url, nil) - req.Header.Set("Authorization", "token "+s.forgeToken) - - resp, err := s.client.Do(req) + prs, err := s.forge.Pulls.ListAll(ctx, forge.Params{"owner": org, "repo": repo}) if err != nil { - return nil, coreerr.E("listRepoPRs", "failed to list PRs for "+repo, err) + return nil, core.E("listRepoPRs", "failed to list PRs for "+repo, err) } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return nil, coreerr.E("listRepoPRs", fmt.Sprintf("HTTP %d listing PRs for %s", resp.StatusCode, repo), nil) - } - - var prs []struct { - Number int `json:"number"` - Title string `json:"title"` - State string `json:"state"` - Mergeable bool `json:"mergeable"` - HTMLURL string `json:"html_url"` - Head struct { - Ref string `json:"ref"` - } `json:"head"` - Base struct { - Ref string `json:"ref"` - } `json:"base"` - User struct { - Login string `json:"login"` - } `json:"user"` - Labels []struct { - Name string `json:"name"` - } `json:"labels"` - } - json.NewDecoder(resp.Body).Decode(&prs) var result []PRInfo for _, pr := range prs { + if state != "" && state != "all" && string(pr.State) != state { + continue + } var labels []string for _, l := range pr.Labels { labels = append(labels, l.Name) } + author := "" + if pr.User != nil { + author = pr.User.UserName + } result = append(result, PRInfo{ Repo: repo, - Number: pr.Number, + Number: int(pr.Index), Title: pr.Title, - State: pr.State, - Author: pr.User.Login, + State: string(pr.State), + Author: author, Branch: pr.Head.Ref, Base: pr.Base.Ref, Labels: labels, diff --git a/pkg/agentic/prep.go b/pkg/agentic/prep.go index ddc863b..da5675c 100644 --- a/pkg/agentic/prep.go +++ b/pkg/agentic/prep.go @@ -1,83 +1,101 @@ // SPDX-License-Identifier: EUPL-1.2 // Package agentic provides MCP tools for agent orchestration. -// Prepares sandboxed workspaces and dispatches subagents. +// Prepares workspaces and dispatches subagents. package agentic import ( "context" "encoding/base64" "encoding/json" - "fmt" - "io" + goio "io" "net/http" - "os" "os/exec" - "path/filepath" - "strings" "sync" "time" "dappco.re/go/agent/pkg/lib" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" + "dappco.re/go/core/forge" + coremcp "forge.lthn.ai/core/mcp/pkg/mcp" "github.com/modelcontextprotocol/go-sdk/mcp" "gopkg.in/yaml.v3" ) -// CompletionNotifier is called when an agent completes, to trigger -// immediate notifications to connected clients. +// CompletionNotifier receives agent lifecycle events directly from dispatch. +// No filesystem polling — events flow in-memory. +// +// prep.SetCompletionNotifier(monitor) type CompletionNotifier interface { - Poke() + AgentStarted(agent, repo, workspace string) + AgentCompleted(agent, repo, workspace, status string) } -// PrepSubsystem provides agentic MCP tools. +// PrepSubsystem provides agentic MCP tools for workspace orchestration. +// +// sub := agentic.NewPrep() +// sub.RegisterTools(server) type PrepSubsystem struct { - forgeURL string - forgeToken string - brainURL string - brainKey string - specsPath string - codePath string - client *http.Client - onComplete CompletionNotifier - drainMu sync.Mutex // protects drainQueue from concurrent execution + forge *forge.Forge + forgeURL string + forgeToken string + brainURL string + brainKey string + codePath string + client *http.Client + onComplete CompletionNotifier + drainMu sync.Mutex + pokeCh chan struct{} + frozen bool + backoff map[string]time.Time // pool → paused until + failCount map[string]int // pool → consecutive fast failures } +var _ coremcp.Subsystem = (*PrepSubsystem)(nil) + // NewPrep creates an agentic subsystem. +// +// sub := agentic.NewPrep() +// sub.SetCompletionNotifier(monitor) func NewPrep() *PrepSubsystem { - home, _ := os.UserHomeDir() + home := core.Env("DIR_HOME") - forgeToken := os.Getenv("FORGE_TOKEN") + forgeToken := core.Env("FORGE_TOKEN") if forgeToken == "" { - forgeToken = os.Getenv("GITEA_TOKEN") + forgeToken = core.Env("GITEA_TOKEN") } - brainKey := os.Getenv("CORE_BRAIN_KEY") + brainKey := core.Env("CORE_BRAIN_KEY") if brainKey == "" { - if data, err := coreio.Local.Read(filepath.Join(home, ".claude", "brain.key")); err == nil { - brainKey = strings.TrimSpace(data) + if r := fs.Read(core.JoinPath(home, ".claude", "brain.key")); r.OK { + brainKey = core.Trim(r.Value.(string)) } } + forgeURL := envOr("FORGE_URL", "https://forge.lthn.ai") + return &PrepSubsystem{ - forgeURL: envOr("FORGE_URL", "https://forge.lthn.ai"), + forge: forge.NewForge(forgeURL, forgeToken), + forgeURL: forgeURL, forgeToken: forgeToken, brainURL: envOr("CORE_BRAIN_URL", "https://api.lthn.sh"), brainKey: brainKey, - specsPath: envOr("SPECS_PATH", filepath.Join(home, "Code", "specs")), - codePath: envOr("CODE_PATH", filepath.Join(home, "Code")), + codePath: envOr("CODE_PATH", core.JoinPath(home, "Code")), client: &http.Client{Timeout: 30 * time.Second}, + backoff: make(map[string]time.Time), + failCount: make(map[string]int), } } // SetCompletionNotifier wires up the monitor for immediate push on agent completion. +// +// prep.SetCompletionNotifier(monitor) func (s *PrepSubsystem) SetCompletionNotifier(n CompletionNotifier) { s.onComplete = n } func envOr(key, fallback string) string { - if v := os.Getenv(key); v != "" { + if v := core.Env(key); v != "" { return v } return fallback @@ -90,7 +108,7 @@ func (s *PrepSubsystem) Name() string { return "agentic" } func (s *PrepSubsystem) RegisterTools(server *mcp.Server) { mcp.AddTool(server, &mcp.Tool{ Name: "agentic_prep_workspace", - Description: "Prepare a sandboxed agent workspace with TODO.md, CLAUDE.md, CONTEXT.md, CONSUMERS.md, RECENT.md, and a git clone of the target repo in src/.", + Description: "Prepare an agent workspace: clone repo, create branch, build prompt with context.", }, s.prepWorkspace) s.registerDispatchTool(server) @@ -103,6 +121,7 @@ func (s *PrepSubsystem) RegisterTools(server *mcp.Server) { s.registerRemoteDispatchTool(server) s.registerRemoteStatusTool(server) s.registerReviewQueueTool(server) + s.registerShutdownTools(server) mcp.AddTool(server, &mcp.Tool{ Name: "agentic_scan", @@ -119,33 +138,62 @@ func (s *PrepSubsystem) Shutdown(_ context.Context) error { return nil } // --- Input/Output types --- // PrepInput is the input for agentic_prep_workspace. +// One of Issue, PR, Branch, or Tag is required. +// +// input := agentic.PrepInput{Repo: "go-io", Issue: 15, Task: "Migrate to Core primitives"} type PrepInput struct { - Repo string `json:"repo"` // e.g. "go-io" + Repo string `json:"repo"` // required: e.g. "go-io" Org string `json:"org,omitempty"` // default "core" - Issue int `json:"issue,omitempty"` // Forge issue number - Task string `json:"task,omitempty"` // Task description (if no issue) - Template string `json:"template,omitempty"` // Prompt template: conventions, security, coding (default: coding) - PlanTemplate string `json:"plan_template,omitempty"` // Plan template slug: bug-fix, code-review, new-feature, refactor, feature-port - Variables map[string]string `json:"variables,omitempty"` // Template variable substitution - Persona string `json:"persona,omitempty"` // Persona slug: engineering/backend-architect, testing/api-tester, etc. + Task string `json:"task,omitempty"` // task description + Agent string `json:"agent,omitempty"` // agent type + Issue int `json:"issue,omitempty"` // Forge issue → workspace: task-{num}/ + PR int `json:"pr,omitempty"` // PR number → workspace: pr-{num}/ + Branch string `json:"branch,omitempty"` // branch → workspace: {branch}/ + Tag string `json:"tag,omitempty"` // tag → workspace: {tag}/ (immutable) + Template string `json:"template,omitempty"` // prompt template slug + PlanTemplate string `json:"plan_template,omitempty"` // plan template slug + Variables map[string]string `json:"variables,omitempty"` // template variable substitution + Persona string `json:"persona,omitempty"` // persona slug + DryRun bool `json:"dry_run,omitempty"` // preview without executing } // PrepOutput is the output for agentic_prep_workspace. +// +// out := agentic.PrepOutput{Success: true, WorkspaceDir: ".core/workspace/core/go-io/task-15"} type PrepOutput struct { - Success bool `json:"success"` - WorkspaceDir string `json:"workspace_dir"` - Branch string `json:"branch"` - WikiPages int `json:"wiki_pages"` - SpecFiles int `json:"spec_files"` - Memories int `json:"memories"` - Consumers int `json:"consumers"` - ClaudeMd bool `json:"claude_md"` - GitLog int `json:"git_log_entries"` + Success bool `json:"success"` + WorkspaceDir string `json:"workspace_dir"` + RepoDir string `json:"repo_dir"` + Branch string `json:"branch"` + Prompt string `json:"prompt,omitempty"` + Memories int `json:"memories"` + Consumers int `json:"consumers"` + Resumed bool `json:"resumed"` +} + +// workspaceDir resolves the workspace path from the input identifier. +// +// dir := workspaceDir("core", "go-io", PrepInput{Issue: 15}) +// // → ".core/workspace/core/go-io/task-15" +func workspaceDir(org, repo string, input PrepInput) (string, error) { + base := core.JoinPath(WorkspaceRoot(), org, repo) + switch { + case input.PR > 0: + return core.JoinPath(base, core.Sprintf("pr-%d", input.PR)), nil + case input.Issue > 0: + return core.JoinPath(base, core.Sprintf("task-%d", input.Issue)), nil + case input.Branch != "": + return core.JoinPath(base, input.Branch), nil + case input.Tag != "": + return core.JoinPath(base, input.Tag), nil + default: + return "", core.E("workspaceDir", "one of issue, pr, branch, or tag is required", nil) + } } func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolRequest, input PrepInput) (*mcp.CallToolResult, PrepOutput, error) { if input.Repo == "" { - return nil, PrepOutput{}, coreerr.E("prepWorkspace", "repo is required", nil) + return nil, PrepOutput{}, core.E("prepWorkspace", "repo is required", nil) } if input.Org == "" { input.Org = "core" @@ -154,199 +202,348 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques input.Template = "coding" } - // Workspace root: .core/workspace/{repo}-{timestamp}/ - wsRoot := WorkspaceRoot() - wsName := fmt.Sprintf("%s-%d", input.Repo, time.Now().UnixNano()) - wsDir := filepath.Join(wsRoot, wsName) - - // Create workspace structure - // kb/ and specs/ will be created inside src/ after clone - - // Ensure workspace directory exists - if err := os.MkdirAll(wsDir, 0755); err != nil { - return nil, PrepOutput{}, coreerr.E("prep", "failed to create workspace dir", err) + // Resolve workspace directory from identifier + wsDir, err := workspaceDir(input.Org, input.Repo, input) + if err != nil { + return nil, PrepOutput{}, err } - out := PrepOutput{WorkspaceDir: wsDir} + repoDir := core.JoinPath(wsDir, "repo") + metaDir := core.JoinPath(wsDir, ".meta") + out := PrepOutput{WorkspaceDir: wsDir, RepoDir: repoDir} // Source repo path — sanitise to prevent path traversal - repoName := filepath.Base(input.Repo) // strips ../ and absolute paths + repoName := core.PathBase(input.Repo) if repoName == "." || repoName == ".." || repoName == "" { - return nil, PrepOutput{}, coreerr.E("prep", "invalid repo name: "+input.Repo, nil) + return nil, PrepOutput{}, core.E("prep", "invalid repo name: "+input.Repo, nil) } - repoPath := filepath.Join(s.codePath, "core", repoName) + repoPath := core.JoinPath(s.codePath, input.Org, repoName) - // 1. Clone repo into src/ and create feature branch - srcDir := filepath.Join(wsDir, "src") - cloneCmd := exec.CommandContext(ctx, "git", "clone", repoPath, srcDir) - if err := cloneCmd.Run(); err != nil { - return nil, PrepOutput{}, coreerr.E("prep", "git clone failed for "+input.Repo, err) + // Ensure meta directory exists + if r := fs.EnsureDir(metaDir); !r.OK { + return nil, PrepOutput{}, core.E("prep", "failed to create meta dir", nil) } - // Create feature branch - taskSlug := strings.Map(func(r rune) rune { - if r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-' { - return r + // Check for resume: if repo/ already has .git, skip clone + resumed := fs.IsDir(core.JoinPath(repoDir, ".git")) + out.Resumed = resumed + + // Extract default workspace template (go.work etc.) + lib.ExtractWorkspace("default", wsDir, &lib.WorkspaceData{ + Repo: input.Repo, + Branch: "", + Task: input.Task, + Agent: input.Agent, + }) + + if !resumed { + // Clone repo into repo/ + cloneCmd := exec.CommandContext(ctx, "git", "clone", repoPath, repoDir) + if cloneErr := cloneCmd.Run(); cloneErr != nil { + return nil, PrepOutput{}, core.E("prep", "git clone failed for "+input.Repo, cloneErr) } - if r >= 'A' && r <= 'Z' { - return r + 32 // lowercase + + // Create feature branch + taskSlug := sanitiseBranchSlug(input.Task, 40) + if taskSlug == "" { + if input.Issue > 0 { + taskSlug = core.Sprintf("issue-%d", input.Issue) + } else if input.PR > 0 { + taskSlug = core.Sprintf("pr-%d", input.PR) + } else { + taskSlug = core.Sprintf("work-%d", time.Now().Unix()) + } } - return '-' - }, input.Task) - if len(taskSlug) > 40 { - taskSlug = taskSlug[:40] - } - taskSlug = strings.Trim(taskSlug, "-") - if taskSlug == "" { - // Fallback for issue-only dispatches with no task text - taskSlug = fmt.Sprintf("issue-%d", input.Issue) - if input.Issue == 0 { - taskSlug = fmt.Sprintf("work-%d", time.Now().Unix()) + branchName := core.Sprintf("agent/%s", taskSlug) + + branchCmd := exec.CommandContext(ctx, "git", "checkout", "-b", branchName) + branchCmd.Dir = repoDir + if branchErr := branchCmd.Run(); branchErr != nil { + return nil, PrepOutput{}, core.E("prep.branch", core.Sprintf("failed to create branch %q", branchName), branchErr) + } + out.Branch = branchName + } else { + // Resume: read branch from existing checkout + branchCmd := exec.CommandContext(ctx, "git", "rev-parse", "--abbrev-ref", "HEAD") + branchCmd.Dir = repoDir + if branchOut, branchErr := branchCmd.Output(); branchErr == nil { + out.Branch = core.Trim(string(branchOut)) } } - branchName := fmt.Sprintf("agent/%s", taskSlug) - branchCmd := exec.CommandContext(ctx, "git", "checkout", "-b", branchName) - branchCmd.Dir = srcDir - if err := branchCmd.Run(); err != nil { - return nil, PrepOutput{}, coreerr.E("prep.branch", fmt.Sprintf("failed to create branch %q", branchName), err) - } - out.Branch = branchName - - // Create context dirs inside src/ - coreio.Local.EnsureDir(filepath.Join(srcDir, "kb")) - coreio.Local.EnsureDir(filepath.Join(srcDir, "specs")) - - // Remote stays as local clone origin — agent cannot push to forge. - // Reviewer pulls changes from workspace and pushes after verification. - - // 2. Extract workspace template - wsTmpl := "default" - if input.Template == "security" { - wsTmpl = "security" - } else if input.Template == "verify" || input.Template == "conventions" { - wsTmpl = "review" - } - - promptContent, _ := lib.Prompt(input.Template) - personaContent := "" - if input.Persona != "" { - personaContent, _ = lib.Persona(input.Persona) - } - flowContent, _ := lib.Flow(detectLanguage(repoPath)) - - wsData := &lib.WorkspaceData{ - Repo: input.Repo, - Branch: branchName, - Task: input.Task, - Agent: "agent", - Language: detectLanguage(repoPath), - Prompt: promptContent, - Persona: personaContent, - Flow: flowContent, - BuildCmd: detectBuildCmd(repoPath), - TestCmd: detectTestCmd(repoPath), - } - - lib.ExtractWorkspace(wsTmpl, srcDir, wsData) - out.ClaudeMd = true - - // Copy repo's own CLAUDE.md over template if it exists - claudeMdPath := filepath.Join(repoPath, "CLAUDE.md") - if data, err := coreio.Local.Read(claudeMdPath); err == nil { - coreio.Local.Write(filepath.Join(srcDir, "CLAUDE.md"), data) - } - // Copy GEMINI.md from core/agent (ethics framework for all agents) - agentGeminiMd := filepath.Join(s.codePath, "core", "agent", "GEMINI.md") - if data, err := coreio.Local.Read(agentGeminiMd); err == nil { - coreio.Local.Write(filepath.Join(srcDir, "GEMINI.md"), data) - } - - // 3. Generate TODO.md from issue (overrides template) - if input.Issue > 0 { - s.generateTodo(ctx, input.Org, input.Repo, input.Issue, wsDir) - } - - // 4. Generate CONTEXT.md from OpenBrain - out.Memories = s.generateContext(ctx, input.Repo, wsDir) - - // 5. Generate CONSUMERS.md - out.Consumers = s.findConsumers(input.Repo, wsDir) - - // 6. Generate RECENT.md - out.GitLog = s.gitLog(repoPath, wsDir) - - // 7. Pull wiki pages into kb/ - out.WikiPages = s.pullWiki(ctx, input.Org, input.Repo, wsDir) - - // 8. Copy spec files into specs/ - out.SpecFiles = s.copySpecs(wsDir) - - // 9. Write PLAN.md from template (if specified) - if input.PlanTemplate != "" { - s.writePlanFromTemplate(input.PlanTemplate, input.Variables, input.Task, wsDir) - } - - // 10. Write prompt template - s.writePromptTemplate(input.Template, wsDir) + // Build the rich prompt with all context + out.Prompt, out.Memories, out.Consumers = s.buildPrompt(ctx, input, out.Branch, repoPath) out.Success = true return nil, out, nil } -// --- Prompt templates --- +// --- Public API for CLI testing --- -func (s *PrepSubsystem) writePromptTemplate(template, wsDir string) { - prompt, err := lib.Template(template) - if err != nil { - // Fallback to default template - prompt, _ = lib.Template("default") - if prompt == "" { - prompt = "Read TODO.md and complete the task. Work in src/.\n" +// TestPrepWorkspace exposes prepWorkspace for CLI testing. +// +// _, out, err := prep.TestPrepWorkspace(ctx, input) +func (s *PrepSubsystem) TestPrepWorkspace(ctx context.Context, input PrepInput) (*mcp.CallToolResult, PrepOutput, error) { + return s.prepWorkspace(ctx, nil, input) +} + +// TestBuildPrompt exposes buildPrompt for CLI testing. +// +// prompt, memories, consumers := prep.TestBuildPrompt(ctx, input, "dev", repoPath) +func (s *PrepSubsystem) TestBuildPrompt(ctx context.Context, input PrepInput, branch, repoPath string) (string, int, int) { + return s.buildPrompt(ctx, input, branch, repoPath) +} + +// --- Prompt Building --- + +// buildPrompt assembles all context into a single prompt string. +// Context is gathered from: persona, flow, issue, brain, consumers, git log, wiki, plan. +func (s *PrepSubsystem) buildPrompt(ctx context.Context, input PrepInput, branch, repoPath string) (string, int, int) { + b := core.NewBuilder() + memories := 0 + consumers := 0 + + // Task + b.WriteString("TASK: ") + b.WriteString(input.Task) + b.WriteString("\n\n") + + // Repo info + b.WriteString(core.Sprintf("REPO: %s/%s on branch %s\n", input.Org, input.Repo, branch)) + b.WriteString(core.Sprintf("LANGUAGE: %s\n", detectLanguage(repoPath))) + b.WriteString(core.Sprintf("BUILD: %s\n", detectBuildCmd(repoPath))) + b.WriteString(core.Sprintf("TEST: %s\n\n", detectTestCmd(repoPath))) + + // Persona + if input.Persona != "" { + if r := lib.Persona(input.Persona); r.OK { + b.WriteString("PERSONA:\n") + b.WriteString(r.Value.(string)) + b.WriteString("\n\n") } } - coreio.Local.Write(filepath.Join(wsDir, "src", "PROMPT.md"), prompt) + // Flow + if r := lib.Flow(detectLanguage(repoPath)); r.OK { + b.WriteString("WORKFLOW:\n") + b.WriteString(r.Value.(string)) + b.WriteString("\n\n") + } + + // Issue body + if input.Issue > 0 { + if body := s.getIssueBody(ctx, input.Org, input.Repo, input.Issue); body != "" { + b.WriteString("ISSUE:\n") + b.WriteString(body) + b.WriteString("\n\n") + } + } + + // Brain recall + if recall, count := s.brainRecall(ctx, input.Repo); recall != "" { + b.WriteString("CONTEXT (from OpenBrain):\n") + b.WriteString(recall) + b.WriteString("\n\n") + memories = count + } + + // Consumers + if list, count := s.findConsumersList(input.Repo); list != "" { + b.WriteString("CONSUMERS (modules that import this repo):\n") + b.WriteString(list) + b.WriteString("\n\n") + consumers = count + } + + // Recent git log + if log := s.getGitLog(repoPath); log != "" { + b.WriteString("RECENT CHANGES:\n```\n") + b.WriteString(log) + b.WriteString("```\n\n") + } + + // Plan template + if input.PlanTemplate != "" { + if plan := s.renderPlan(input.PlanTemplate, input.Variables, input.Task); plan != "" { + b.WriteString("PLAN:\n") + b.WriteString(plan) + b.WriteString("\n\n") + } + } + + // Constraints + b.WriteString("CONSTRAINTS:\n") + b.WriteString("- Read CODEX.md for coding conventions (if it exists)\n") + b.WriteString("- Read CLAUDE.md for project-specific instructions (if it exists)\n") + b.WriteString("- Commit with conventional commit format: type(scope): description\n") + b.WriteString("- Co-Authored-By: Virgil \n") + b.WriteString("- Run build and tests before committing\n") + + return b.String(), memories, consumers } -// --- Plan template rendering --- +// --- Context Helpers (return strings, not write files) --- -// writePlanFromTemplate loads a YAML plan template, substitutes variables, -// and writes PLAN.md into the workspace src/ directory. -func (s *PrepSubsystem) writePlanFromTemplate(templateSlug string, variables map[string]string, task string, wsDir string) { - // Load template from embedded prompts package - data, err := lib.Template(templateSlug) +func (s *PrepSubsystem) getIssueBody(ctx context.Context, org, repo string, issue int) string { + idx := core.Sprintf("%d", issue) + iss, err := s.forge.Issues.Get(ctx, forge.Params{"owner": org, "repo": repo, "index": idx}) if err != nil { - return // Template not found, skip silently + return "" + } + return core.Sprintf("# %s\n\n%s", iss.Title, iss.Body) +} + +func (s *PrepSubsystem) brainRecall(ctx context.Context, repo string) (string, int) { + if s.brainKey == "" { + return "", 0 } - content := data + body, _ := json.Marshal(map[string]any{ + "query": "architecture conventions key interfaces for " + repo, + "top_k": 10, + "project": repo, + "agent_id": "cladius", + }) - // Substitute variables ({{variable_name}} → value) + req, _ := http.NewRequestWithContext(ctx, "POST", s.brainURL+"/v1/brain/recall", core.NewReader(string(body))) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + req.Header.Set("Authorization", "Bearer "+s.brainKey) + + resp, err := s.client.Do(req) + if err != nil || resp.StatusCode != 200 { + if resp != nil { + resp.Body.Close() + } + return "", 0 + } + defer resp.Body.Close() + + respData, _ := goio.ReadAll(resp.Body) + var result struct { + Memories []map[string]any `json:"memories"` + } + json.Unmarshal(respData, &result) + + if len(result.Memories) == 0 { + return "", 0 + } + + b := core.NewBuilder() + for i, mem := range result.Memories { + memType, _ := mem["type"].(string) + memContent, _ := mem["content"].(string) + memProject, _ := mem["project"].(string) + b.WriteString(core.Sprintf("%d. [%s] %s: %s\n", i+1, memType, memProject, memContent)) + } + + return b.String(), len(result.Memories) +} + +func (s *PrepSubsystem) findConsumersList(repo string) (string, int) { + goWorkPath := core.JoinPath(s.codePath, "go.work") + modulePath := "forge.lthn.ai/core/" + repo + + r := fs.Read(goWorkPath) + if !r.OK { + return "", 0 + } + workData := r.Value.(string) + + var consumers []string + for _, line := range core.Split(workData, "\n") { + line = core.Trim(line) + if !core.HasPrefix(line, "./") { + continue + } + dir := core.JoinPath(s.codePath, core.TrimPrefix(line, "./")) + goMod := core.JoinPath(dir, "go.mod") + mr := fs.Read(goMod) + if !mr.OK { + continue + } + modData := mr.Value.(string) + if core.Contains(modData, modulePath) && !core.HasPrefix(modData, "module "+modulePath) { + consumers = append(consumers, core.PathBase(dir)) + } + } + + if len(consumers) == 0 { + return "", 0 + } + + b := core.NewBuilder() + for _, c := range consumers { + b.WriteString("- " + c + "\n") + } + b.WriteString(core.Sprintf("Breaking change risk: %d consumers.\n", len(consumers))) + + return b.String(), len(consumers) +} + +func (s *PrepSubsystem) getGitLog(repoPath string) string { + cmd := exec.Command("git", "log", "--oneline", "-20") + cmd.Dir = repoPath + output, err := cmd.Output() + if err != nil { + return "" + } + return core.Trim(string(output)) +} + +func (s *PrepSubsystem) pullWikiContent(ctx context.Context, org, repo string) string { + pages, err := s.forge.Wiki.ListPages(ctx, org, repo) + if err != nil || len(pages) == 0 { + return "" + } + + b := core.NewBuilder() + for _, meta := range pages { + name := meta.SubURL + if name == "" { + name = meta.Title + } + page, pErr := s.forge.Wiki.GetPage(ctx, org, repo, name) + if pErr != nil || page.ContentBase64 == "" { + continue + } + content, _ := base64.StdEncoding.DecodeString(page.ContentBase64) + b.WriteString("### " + meta.Title + "\n\n") + b.WriteString(string(content)) + b.WriteString("\n\n") + } + return b.String() +} + +func (s *PrepSubsystem) renderPlan(templateSlug string, variables map[string]string, task string) string { + r := lib.Template(templateSlug) + if !r.OK { + return "" + } + + content := r.Value.(string) for key, value := range variables { - content = strings.ReplaceAll(content, "{{"+key+"}}", value) - content = strings.ReplaceAll(content, "{{ "+key+" }}", value) + content = core.Replace(content, "{{"+key+"}}", value) + content = core.Replace(content, "{{ "+key+" }}", value) } - // Parse the YAML to render as markdown var tmpl struct { Name string `yaml:"name"` Description string `yaml:"description"` Guidelines []string `yaml:"guidelines"` Phases []struct { - Name string `yaml:"name"` - Description string `yaml:"description"` - Tasks []any `yaml:"tasks"` + Name string `yaml:"name"` + Description string `yaml:"description"` + Tasks []any `yaml:"tasks"` } `yaml:"phases"` } if err := yaml.Unmarshal([]byte(content), &tmpl); err != nil { - return + return "" } - // Render as PLAN.md - var plan strings.Builder - plan.WriteString("# Plan: " + tmpl.Name + "\n\n") + plan := core.NewBuilder() + plan.WriteString("# " + tmpl.Name + "\n\n") if task != "" { plan.WriteString("**Task:** " + task + "\n\n") } @@ -363,254 +560,28 @@ func (s *PrepSubsystem) writePlanFromTemplate(templateSlug string, variables map } for i, phase := range tmpl.Phases { - plan.WriteString(fmt.Sprintf("## Phase %d: %s\n\n", i+1, phase.Name)) + plan.WriteString(core.Sprintf("## Phase %d: %s\n\n", i+1, phase.Name)) if phase.Description != "" { plan.WriteString(phase.Description + "\n\n") } - for _, task := range phase.Tasks { - switch t := task.(type) { + for _, t := range phase.Tasks { + switch v := t.(type) { case string: - plan.WriteString("- [ ] " + t + "\n") + plan.WriteString("- [ ] " + v + "\n") case map[string]any: - if name, ok := t["name"].(string); ok { + if name, ok := v["name"].(string); ok { plan.WriteString("- [ ] " + name + "\n") } } } - plan.WriteString("\n**Commit after completing this phase.**\n\n---\n\n") + plan.WriteString("\n") } - coreio.Local.Write(filepath.Join(wsDir, "src", "PLAN.md"), plan.String()) + return plan.String() } -// --- Helpers (unchanged) --- +// --- Detection helpers (unchanged) --- -func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) int { - if s.forgeToken == "" { - return 0 - } - - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/wiki/pages", s.forgeURL, org, repo) - req, _ := http.NewRequestWithContext(ctx, "GET", url, nil) - req.Header.Set("Authorization", "token "+s.forgeToken) - - resp, err := s.client.Do(req) - if err != nil { - return 0 - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return 0 - } - - var pages []struct { - Title string `json:"title"` - SubURL string `json:"sub_url"` - } - json.NewDecoder(resp.Body).Decode(&pages) - - count := 0 - for _, page := range pages { - subURL := page.SubURL - if subURL == "" { - subURL = page.Title - } - - pageURL := fmt.Sprintf("%s/api/v1/repos/%s/%s/wiki/page/%s", s.forgeURL, org, repo, subURL) - pageReq, _ := http.NewRequestWithContext(ctx, "GET", pageURL, nil) - pageReq.Header.Set("Authorization", "token "+s.forgeToken) - - pageResp, err := s.client.Do(pageReq) - if err != nil { - continue - } - if pageResp.StatusCode != 200 { - pageResp.Body.Close() - continue - } - - var pageData struct { - ContentBase64 string `json:"content_base64"` - } - json.NewDecoder(pageResp.Body).Decode(&pageData) - pageResp.Body.Close() - - if pageData.ContentBase64 == "" { - continue - } - - content, _ := base64.StdEncoding.DecodeString(pageData.ContentBase64) - filename := strings.Map(func(r rune) rune { - if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' { - return r - } - return '-' - }, page.Title) + ".md" - - coreio.Local.Write(filepath.Join(wsDir, "src", "kb", filename), string(content)) - count++ - } - - return count -} - -func (s *PrepSubsystem) copySpecs(wsDir string) int { - specFiles := []string{"AGENT_CONTEXT.md", "TASK_PROTOCOL.md"} - count := 0 - - for _, file := range specFiles { - src := filepath.Join(s.specsPath, file) - if data, err := coreio.Local.Read(src); err == nil { - coreio.Local.Write(filepath.Join(wsDir, "src", "specs", file), data) - count++ - } - } - - return count -} - -func (s *PrepSubsystem) generateContext(ctx context.Context, repo, wsDir string) int { - if s.brainKey == "" { - return 0 - } - - body, _ := json.Marshal(map[string]any{ - "query": "architecture conventions key interfaces for " + repo, - "top_k": 10, - "project": repo, - "agent_id": "cladius", - }) - - req, _ := http.NewRequestWithContext(ctx, "POST", s.brainURL+"/v1/brain/recall", strings.NewReader(string(body))) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Accept", "application/json") - req.Header.Set("Authorization", "Bearer "+s.brainKey) - - resp, err := s.client.Do(req) - if err != nil { - return 0 - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return 0 - } - - respData, _ := io.ReadAll(resp.Body) - var result struct { - Memories []map[string]any `json:"memories"` - } - json.Unmarshal(respData, &result) - - var content strings.Builder - content.WriteString("# Context — " + repo + "\n\n") - content.WriteString("> Relevant knowledge from OpenBrain.\n\n") - - for i, mem := range result.Memories { - memType, _ := mem["type"].(string) - memContent, _ := mem["content"].(string) - memProject, _ := mem["project"].(string) - score, _ := mem["score"].(float64) - content.WriteString(fmt.Sprintf("### %d. %s [%s] (score: %.3f)\n\n%s\n\n", i+1, memProject, memType, score, memContent)) - } - - coreio.Local.Write(filepath.Join(wsDir, "src", "CONTEXT.md"), content.String()) - return len(result.Memories) -} - -func (s *PrepSubsystem) findConsumers(repo, wsDir string) int { - goWorkPath := filepath.Join(s.codePath, "go.work") - modulePath := "forge.lthn.ai/core/" + repo - - workData, err := coreio.Local.Read(goWorkPath) - if err != nil { - return 0 - } - - var consumers []string - for _, line := range strings.Split(workData, "\n") { - line = strings.TrimSpace(line) - if !strings.HasPrefix(line, "./") { - continue - } - dir := filepath.Join(s.codePath, strings.TrimPrefix(line, "./")) - goMod := filepath.Join(dir, "go.mod") - modData, err := coreio.Local.Read(goMod) - if err != nil { - continue - } - if strings.Contains(modData, modulePath) && !strings.HasPrefix(modData, "module "+modulePath) { - consumers = append(consumers, filepath.Base(dir)) - } - } - - if len(consumers) > 0 { - content := "# Consumers of " + repo + "\n\n" - content += "These modules import `" + modulePath + "`:\n\n" - for _, c := range consumers { - content += "- " + c + "\n" - } - content += fmt.Sprintf("\n**Breaking change risk: %d consumers.**\n", len(consumers)) - coreio.Local.Write(filepath.Join(wsDir, "src", "CONSUMERS.md"), content) - } - - return len(consumers) -} - -func (s *PrepSubsystem) gitLog(repoPath, wsDir string) int { - cmd := exec.Command("git", "log", "--oneline", "-20") - cmd.Dir = repoPath - output, err := cmd.Output() - if err != nil { - return 0 - } - - lines := strings.Split(strings.TrimSpace(string(output)), "\n") - if len(lines) > 0 && lines[0] != "" { - content := "# Recent Changes\n\n```\n" + string(output) + "```\n" - coreio.Local.Write(filepath.Join(wsDir, "src", "RECENT.md"), content) - } - - return len(lines) -} - -func (s *PrepSubsystem) generateTodo(ctx context.Context, org, repo string, issue int, wsDir string) { - if s.forgeToken == "" { - return - } - - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue) - req, _ := http.NewRequestWithContext(ctx, "GET", url, nil) - req.Header.Set("Authorization", "token "+s.forgeToken) - - resp, err := s.client.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return - } - - var issueData struct { - Title string `json:"title"` - Body string `json:"body"` - } - json.NewDecoder(resp.Body).Decode(&issueData) - - content := fmt.Sprintf("# TASK: %s\n\n", issueData.Title) - content += fmt.Sprintf("**Status:** ready\n") - content += fmt.Sprintf("**Source:** %s/%s/%s/issues/%d\n", s.forgeURL, org, repo, issue) - content += fmt.Sprintf("**Repo:** %s/%s\n\n---\n\n", org, repo) - content += "## Objective\n\n" + issueData.Body + "\n" - - coreio.Local.Write(filepath.Join(wsDir, "src", "TODO.md"), content) -} - -// detectLanguage guesses the primary language from repo contents. -// Checks in priority order (Go first) to avoid nondeterministic results. func detectLanguage(repoPath string) string { checks := []struct { file string @@ -625,7 +596,7 @@ func detectLanguage(repoPath string) string { {"Dockerfile", "docker"}, } for _, c := range checks { - if _, err := os.Stat(filepath.Join(repoPath, c.file)); err == nil { + if fs.IsFile(core.JoinPath(repoPath, c.file)) { return c.lang } } diff --git a/pkg/agentic/prep_test.go b/pkg/agentic/prep_test.go index 39dbadf..5afa5cd 100644 --- a/pkg/agentic/prep_test.go +++ b/pkg/agentic/prep_test.go @@ -3,12 +3,10 @@ package agentic import ( - "path/filepath" - "testing" - - coreio "dappco.re/go/core/io" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "path/filepath" + "testing" ) func TestEnvOr_Good_EnvSet(t *testing.T) { @@ -28,43 +26,43 @@ func TestEnvOr_Good_UnsetUsesFallback(t *testing.T) { func TestDetectLanguage_Good_Go(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "go.mod"), "module test")) + require.True(t, fs.Write(filepath.Join(dir, "go.mod"), "module test").OK) assert.Equal(t, "go", detectLanguage(dir)) } func TestDetectLanguage_Good_PHP(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "composer.json"), "{}")) + require.True(t, fs.Write(filepath.Join(dir, "composer.json"), "{}").OK) assert.Equal(t, "php", detectLanguage(dir)) } func TestDetectLanguage_Good_TypeScript(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "package.json"), "{}")) + require.True(t, fs.Write(filepath.Join(dir, "package.json"), "{}").OK) assert.Equal(t, "ts", detectLanguage(dir)) } func TestDetectLanguage_Good_Rust(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "Cargo.toml"), "[package]")) + require.True(t, fs.Write(filepath.Join(dir, "Cargo.toml"), "[package]").OK) assert.Equal(t, "rust", detectLanguage(dir)) } func TestDetectLanguage_Good_Python(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "requirements.txt"), "flask")) + require.True(t, fs.Write(filepath.Join(dir, "requirements.txt"), "flask").OK) assert.Equal(t, "py", detectLanguage(dir)) } func TestDetectLanguage_Good_Cpp(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "CMakeLists.txt"), "cmake_minimum_required")) + require.True(t, fs.Write(filepath.Join(dir, "CMakeLists.txt"), "cmake_minimum_required").OK) assert.Equal(t, "cpp", detectLanguage(dir)) } func TestDetectLanguage_Good_Docker(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "Dockerfile"), "FROM alpine")) + require.True(t, fs.Write(filepath.Join(dir, "Dockerfile"), "FROM alpine").OK) assert.Equal(t, "docker", detectLanguage(dir)) } @@ -90,7 +88,7 @@ func TestDetectBuildCmd_Good(t *testing.T) { for _, tt := range tests { t.Run(tt.file, func(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, tt.file), tt.content)) + require.True(t, fs.Write(filepath.Join(dir, tt.file), tt.content).OK) assert.Equal(t, tt.expected, detectBuildCmd(dir)) }) } @@ -118,7 +116,7 @@ func TestDetectTestCmd_Good(t *testing.T) { for _, tt := range tests { t.Run(tt.file, func(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, tt.file), tt.content)) + require.True(t, fs.Write(filepath.Join(dir, tt.file), tt.content).OK) assert.Equal(t, tt.expected, detectTestCmd(dir)) }) } @@ -129,6 +127,19 @@ func TestDetectTestCmd_Good_DefaultsToGo(t *testing.T) { assert.Equal(t, "go test ./...", detectTestCmd(dir)) } +func TestSanitiseBranchSlug_Good(t *testing.T) { + assert.Equal(t, "fix-login-bug", sanitiseBranchSlug("Fix login bug!", 40)) + assert.Equal(t, "trim-me", sanitiseBranchSlug("---Trim Me---", 40)) +} + +func TestSanitiseBranchSlug_Good_Truncates(t *testing.T) { + assert.Equal(t, "feature", sanitiseBranchSlug("feature--extra", 7)) +} + +func TestSanitiseFilename_Good(t *testing.T) { + assert.Equal(t, "Core---Agent-Notes", sanitiseFilename("Core / Agent:Notes")) +} + func TestNewPrep_Good_Defaults(t *testing.T) { t.Setenv("FORGE_TOKEN", "") t.Setenv("GITEA_TOKEN", "") @@ -157,7 +168,6 @@ func TestNewPrep_Good_EnvOverrides(t *testing.T) { assert.Equal(t, "test-token", s.forgeToken) assert.Equal(t, "https://custom-brain.example.com", s.brainURL) assert.Equal(t, "brain-key-123", s.brainKey) - assert.Equal(t, "/custom/specs", s.specsPath) assert.Equal(t, "/custom/code", s.codePath) } @@ -184,9 +194,14 @@ func TestSetCompletionNotifier_Good(t *testing.T) { } type mockNotifier struct { - poked bool + started bool + completed bool } -func (m *mockNotifier) Poke() { - m.poked = true +func (m *mockNotifier) AgentStarted(agent, repo, workspace string) { + m.started = true +} + +func (m *mockNotifier) AgentCompleted(agent, repo, workspace, status string) { + m.completed = true } diff --git a/pkg/agentic/queue.go b/pkg/agentic/queue.go index b5dbf50..9e2003c 100644 --- a/pkg/agentic/queue.go +++ b/pkg/agentic/queue.go @@ -3,18 +3,17 @@ package agentic import ( - "fmt" - "os" - "path/filepath" - "strings" + "strconv" "syscall" "time" - coreio "dappco.re/go/core/io" + core "dappco.re/go/core" "gopkg.in/yaml.v3" ) // DispatchConfig controls agent dispatch behaviour. +// +// cfg := agentic.DispatchConfig{DefaultAgent: "claude", DefaultTemplate: "coding"} type DispatchConfig struct { DefaultAgent string `yaml:"default_agent"` DefaultTemplate string `yaml:"default_template"` @@ -22,37 +21,41 @@ type DispatchConfig struct { } // RateConfig controls pacing between task dispatches. +// +// rate := agentic.RateConfig{ResetUTC: "06:00", SustainedDelay: 120, BurstWindow: 2, BurstDelay: 15} type RateConfig struct { ResetUTC string `yaml:"reset_utc"` // Daily quota reset time (UTC), e.g. "06:00" - DailyLimit int `yaml:"daily_limit"` // Max requests per day (0 = unknown) - MinDelay int `yaml:"min_delay"` // Minimum seconds between task starts - SustainedDelay int `yaml:"sustained_delay"` // Delay when pacing for full-day use - BurstWindow int `yaml:"burst_window"` // Hours before reset where burst kicks in - BurstDelay int `yaml:"burst_delay"` // Delay during burst window + DailyLimit int `yaml:"daily_limit"` // Max requests per day (0 = unknown) + MinDelay int `yaml:"min_delay"` // Minimum seconds between task starts + SustainedDelay int `yaml:"sustained_delay"` // Delay when pacing for full-day use + BurstWindow int `yaml:"burst_window"` // Hours before reset where burst kicks in + BurstDelay int `yaml:"burst_delay"` // Delay during burst window } // AgentsConfig is the root of config/agents.yaml. +// +// cfg := agentic.AgentsConfig{Version: 1, Dispatch: agentic.DispatchConfig{DefaultAgent: "claude"}} type AgentsConfig struct { - Version int `yaml:"version"` - Dispatch DispatchConfig `yaml:"dispatch"` - Concurrency map[string]int `yaml:"concurrency"` + Version int `yaml:"version"` + Dispatch DispatchConfig `yaml:"dispatch"` + Concurrency map[string]int `yaml:"concurrency"` Rates map[string]RateConfig `yaml:"rates"` } // loadAgentsConfig reads config/agents.yaml from the code path. func (s *PrepSubsystem) loadAgentsConfig() *AgentsConfig { paths := []string{ - filepath.Join(CoreRoot(), "agents.yaml"), - filepath.Join(s.codePath, "core", "agent", "config", "agents.yaml"), + core.JoinPath(CoreRoot(), "agents.yaml"), + core.JoinPath(s.codePath, "core", "agent", "config", "agents.yaml"), } for _, path := range paths { - data, err := coreio.Local.Read(path) - if err != nil { + r := fs.Read(path) + if !r.OK { continue } var cfg AgentsConfig - if err := yaml.Unmarshal([]byte(data), &cfg); err != nil { + if err := yaml.Unmarshal([]byte(r.Value.(string)), &cfg); err != nil { continue } return &cfg @@ -75,10 +78,7 @@ func (s *PrepSubsystem) loadAgentsConfig() *AgentsConfig { func (s *PrepSubsystem) delayForAgent(agent string) time.Duration { cfg := s.loadAgentsConfig() // Strip variant suffix (claude:opus → claude) for config lookup - base := agent - if idx := strings.Index(agent, ":"); idx >= 0 { - base = agent[:idx] - } + base := baseAgent(agent) rate, ok := cfg.Rates[base] if !ok || rate.SustainedDelay == 0 { return 0 @@ -86,7 +86,15 @@ func (s *PrepSubsystem) delayForAgent(agent string) time.Duration { // Parse reset time resetHour, resetMin := 6, 0 - fmt.Sscanf(rate.ResetUTC, "%d:%d", &resetHour, &resetMin) + parts := core.Split(rate.ResetUTC, ":") + if len(parts) >= 2 { + if hour, err := strconv.Atoi(core.Trim(parts[0])); err == nil { + resetHour = hour + } + if min, err := strconv.Atoi(core.Trim(parts[1])); err == nil { + resetMin = min + } + } now := time.Now().UTC() resetToday := time.Date(now.Year(), now.Month(), now.Day(), resetHour, resetMin, 0, 0, time.UTC) @@ -107,21 +115,18 @@ func (s *PrepSubsystem) delayForAgent(agent string) time.Duration { } // countRunningByAgent counts running workspaces for a specific agent type. +// Scans both old (*/status.json) and new (*/*/*/status.json) workspace layouts. func (s *PrepSubsystem) countRunningByAgent(agent string) int { wsRoot := WorkspaceRoot() - entries, err := os.ReadDir(wsRoot) - if err != nil { - return 0 - } + // Scan both old and new workspace layouts + old := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json")) + new := core.PathGlob(core.JoinPath(wsRoot, "*", "*", "*", "status.json")) + paths := append(old, new...) count := 0 - for _, entry := range entries { - if !entry.IsDir() { - continue - } - - st, err := readStatus(filepath.Join(wsRoot, entry.Name())) + for _, statusPath := range paths { + st, err := readStatus(core.PathDir(statusPath)) if err != nil || st.Status != "running" { continue } @@ -139,7 +144,11 @@ func (s *PrepSubsystem) countRunningByAgent(agent string) int { // baseAgent strips the model variant (gemini:flash → gemini). func baseAgent(agent string) string { - return strings.SplitN(agent, ":", 2)[0] + // codex:gpt-5.3-codex-spark → codex-spark (separate pool) + if core.Contains(agent, "codex-spark") { + return "codex-spark" + } + return core.SplitN(agent, ":", 2)[0] } // canDispatchAgent checks if we're under the concurrency limit for a specific agent type. @@ -153,26 +162,32 @@ func (s *PrepSubsystem) canDispatchAgent(agent string) bool { return s.countRunningByAgent(base) < limit } -// drainQueue finds the oldest queued workspace and spawns it if a slot is available. -// Applies rate-based delay between spawns. Serialised via drainMu to prevent -// concurrent drainers from exceeding concurrency limits. +// drainQueue fills all available concurrency slots from queued workspaces. +// Loops until no slots remain or no queued tasks match. Serialised via drainMu. func (s *PrepSubsystem) drainQueue() { + if s.frozen { + return + } s.drainMu.Lock() defer s.drainMu.Unlock() + for s.drainOne() { + // keep filling slots + } +} + +// drainOne finds the oldest queued workspace and spawns it if a slot is available. +// Returns true if a task was spawned, false if nothing to do. +func (s *PrepSubsystem) drainOne() bool { wsRoot := WorkspaceRoot() - entries, err := os.ReadDir(wsRoot) - if err != nil { - return - } + // Scan both old and new workspace layouts + old := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json")) + deep := core.PathGlob(core.JoinPath(wsRoot, "*", "*", "*", "status.json")) + statusFiles := append(old, deep...) - for _, entry := range entries { - if !entry.IsDir() { - continue - } - - wsDir := filepath.Join(wsRoot, entry.Name()) + for _, statusPath := range statusFiles { + wsDir := core.PathDir(statusPath) st, err := readStatus(wsDir) if err != nil || st.Status != "queued" { continue @@ -182,6 +197,12 @@ func (s *PrepSubsystem) drainQueue() { continue } + // Skip if agent pool is in rate-limit backoff + pool := baseAgent(st.Agent) + if until, ok := s.backoff[pool]; ok && time.Now().Before(until) { + continue + } + // Apply rate delay before spawning delay := s.delayForAgent(st.Agent) if delay > 0 { @@ -193,10 +214,9 @@ func (s *PrepSubsystem) drainQueue() { continue } - srcDir := filepath.Join(wsDir, "src") - prompt := "Read PROMPT.md for instructions. All context files (CLAUDE.md, TODO.md, CONTEXT.md, CONSUMERS.md, RECENT.md) are in the current directory. Work in this directory." + prompt := "TASK: " + st.Task + "\n\nResume from where you left off. Read CODEX.md for conventions. Commit when done." - pid, _, err := s.spawnAgent(st.Agent, prompt, wsDir, srcDir) + pid, _, err := s.spawnAgent(st.Agent, prompt, wsDir) if err != nil { continue } @@ -206,6 +226,8 @@ func (s *PrepSubsystem) drainQueue() { st.Runs++ writeStatus(wsDir, st) - return + return true } + + return false } diff --git a/pkg/agentic/queue_test.go b/pkg/agentic/queue_test.go index cf91bfd..c3a1645 100644 --- a/pkg/agentic/queue_test.go +++ b/pkg/agentic/queue_test.go @@ -3,12 +3,10 @@ package agentic import ( - "path/filepath" - "testing" - - coreio "dappco.re/go/core/io" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "path/filepath" + "testing" ) func TestBaseAgent_Ugly_Empty(t *testing.T) { @@ -36,7 +34,7 @@ func TestCanDispatchAgent_Good_NoConfig(t *testing.T) { // With no running workspaces and default config, should be able to dispatch root := t.TempDir() t.Setenv("CORE_WORKSPACE", root) - require.NoError(t, coreio.Local.EnsureDir(filepath.Join(root, "workspace"))) + require.True(t, fs.EnsureDir(filepath.Join(root, "workspace")).OK) s := &PrepSubsystem{codePath: t.TempDir()} assert.True(t, s.canDispatchAgent("gemini")) @@ -46,7 +44,7 @@ func TestCanDispatchAgent_Good_UnknownAgent(t *testing.T) { // Unknown agent has no limit, so always allowed root := t.TempDir() t.Setenv("CORE_WORKSPACE", root) - require.NoError(t, coreio.Local.EnsureDir(filepath.Join(root, "workspace"))) + require.True(t, fs.EnsureDir(filepath.Join(root, "workspace")).OK) s := &PrepSubsystem{codePath: t.TempDir()} assert.True(t, s.canDispatchAgent("unknown-agent")) @@ -55,7 +53,7 @@ func TestCanDispatchAgent_Good_UnknownAgent(t *testing.T) { func TestCountRunningByAgent_Good_EmptyWorkspace(t *testing.T) { root := t.TempDir() t.Setenv("CORE_WORKSPACE", root) - require.NoError(t, coreio.Local.EnsureDir(filepath.Join(root, "workspace"))) + require.True(t, fs.EnsureDir(filepath.Join(root, "workspace")).OK) s := &PrepSubsystem{} assert.Equal(t, 0, s.countRunningByAgent("gemini")) @@ -68,7 +66,7 @@ func TestCountRunningByAgent_Good_NoRunning(t *testing.T) { // Create a workspace with completed status under workspace/ ws := filepath.Join(root, "workspace", "test-ws") - require.NoError(t, coreio.Local.EnsureDir(ws)) + require.True(t, fs.EnsureDir(ws).OK) require.NoError(t, writeStatus(ws, &WorkspaceStatus{ Status: "completed", Agent: "gemini", diff --git a/pkg/agentic/remote.go b/pkg/agentic/remote.go index 0a2e6e1..3dcb55d 100644 --- a/pkg/agentic/remote.go +++ b/pkg/agentic/remote.go @@ -5,32 +5,32 @@ package agentic import ( "context" "encoding/json" - "fmt" "net/http" - "os" - "strings" "time" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) // --- agentic_dispatch_remote tool --- // RemoteDispatchInput dispatches a task to a remote core-agent over HTTP. +// +// input := agentic.RemoteDispatchInput{Host: "charon", Repo: "go-io", Task: "Run the review queue"} type RemoteDispatchInput struct { - Host string `json:"host"` // Remote agent host (e.g. "charon", "10.69.69.165:9101") - Repo string `json:"repo"` // Target repo - Task string `json:"task"` // What the agent should do - Agent string `json:"agent,omitempty"` // Agent type (default: claude:opus) - Template string `json:"template,omitempty"` // Prompt template - Persona string `json:"persona,omitempty"` // Persona slug - Org string `json:"org,omitempty"` // Forge org (default: core) + Host string `json:"host"` // Remote agent host (e.g. "charon", "10.69.69.165:9101") + Repo string `json:"repo"` // Target repo + Task string `json:"task"` // What the agent should do + Agent string `json:"agent,omitempty"` // Agent type (default: claude:opus) + Template string `json:"template,omitempty"` // Prompt template + Persona string `json:"persona,omitempty"` // Persona slug + Org string `json:"org,omitempty"` // Forge org (default: core) Variables map[string]string `json:"variables,omitempty"` // Template variables } // RemoteDispatchOutput is the response from a remote dispatch. +// +// out := agentic.RemoteDispatchOutput{Success: true, Host: "charon", Repo: "go-io", Agent: "claude:opus"} type RemoteDispatchOutput struct { Success bool `json:"success"` Host string `json:"host"` @@ -50,13 +50,13 @@ func (s *PrepSubsystem) registerRemoteDispatchTool(server *mcp.Server) { func (s *PrepSubsystem) dispatchRemote(ctx context.Context, _ *mcp.CallToolRequest, input RemoteDispatchInput) (*mcp.CallToolResult, RemoteDispatchOutput, error) { if input.Host == "" { - return nil, RemoteDispatchOutput{}, coreerr.E("dispatchRemote", "host is required", nil) + return nil, RemoteDispatchOutput{}, core.E("dispatchRemote", "host is required", nil) } if input.Repo == "" { - return nil, RemoteDispatchOutput{}, coreerr.E("dispatchRemote", "repo is required", nil) + return nil, RemoteDispatchOutput{}, core.E("dispatchRemote", "repo is required", nil) } if input.Task == "" { - return nil, RemoteDispatchOutput{}, coreerr.E("dispatchRemote", "task is required", nil) + return nil, RemoteDispatchOutput{}, core.E("dispatchRemote", "task is required", nil) } // Resolve host aliases @@ -96,7 +96,7 @@ func (s *PrepSubsystem) dispatchRemote(ctx context.Context, _ *mcp.CallToolReque }, } - url := fmt.Sprintf("http://%s/mcp", addr) + url := core.Sprintf("http://%s/mcp", addr) client := &http.Client{Timeout: 30 * time.Second} // Step 1: Initialize session @@ -104,8 +104,8 @@ func (s *PrepSubsystem) dispatchRemote(ctx context.Context, _ *mcp.CallToolReque if err != nil { return nil, RemoteDispatchOutput{ Host: input.Host, - Error: fmt.Sprintf("init failed: %v", err), - }, coreerr.E("dispatchRemote", "MCP initialize failed", err) + Error: core.Sprintf("init failed: %v", err), + }, core.E("dispatchRemote", "MCP initialize failed", err) } // Step 2: Call the tool @@ -114,8 +114,8 @@ func (s *PrepSubsystem) dispatchRemote(ctx context.Context, _ *mcp.CallToolReque if err != nil { return nil, RemoteDispatchOutput{ Host: input.Host, - Error: fmt.Sprintf("call failed: %v", err), - }, coreerr.E("dispatchRemote", "tool call failed", err) + Error: core.Sprintf("call failed: %v", err), + }, core.E("dispatchRemote", "tool call failed", err) } // Parse result @@ -163,12 +163,12 @@ func resolveHost(host string) string { "local": "127.0.0.1:9101", } - if addr, ok := aliases[strings.ToLower(host)]; ok { + if addr, ok := aliases[core.Lower(host)]; ok { return addr } // If no port specified, add default - if !strings.Contains(host, ":") { + if !core.Contains(host, ":") { return host + ":9101" } @@ -178,25 +178,25 @@ func resolveHost(host string) string { // remoteToken gets the auth token for a remote agent. func remoteToken(host string) string { // Check environment first - envKey := fmt.Sprintf("AGENT_TOKEN_%s", strings.ToUpper(host)) - if token := os.Getenv(envKey); token != "" { + envKey := core.Sprintf("AGENT_TOKEN_%s", core.Upper(host)) + if token := core.Env(envKey); token != "" { return token } // Fallback to shared agent token - if token := os.Getenv("MCP_AUTH_TOKEN"); token != "" { + if token := core.Env("MCP_AUTH_TOKEN"); token != "" { return token } // Try reading from file - home, _ := os.UserHomeDir() + home := core.Env("DIR_HOME") tokenFiles := []string{ - fmt.Sprintf("%s/.core/tokens/%s.token", home, strings.ToLower(host)), - fmt.Sprintf("%s/.core/agent-token", home), + core.Sprintf("%s/.core/tokens/%s.token", home, core.Lower(host)), + core.Sprintf("%s/.core/agent-token", home), } for _, f := range tokenFiles { - if data, err := coreio.Local.Read(f); err == nil { - return strings.TrimSpace(data) + if r := fs.Read(f); r.OK { + return core.Trim(r.Value.(string)) } } diff --git a/pkg/agentic/remote_client.go b/pkg/agentic/remote_client.go index f49d024..1969565 100644 --- a/pkg/agentic/remote_client.go +++ b/pkg/agentic/remote_client.go @@ -7,14 +7,12 @@ import ( "bytes" "context" "encoding/json" - "fmt" "net/http" - "strings" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" ) -// mcpInitialize performs the MCP initialize handshake over Streamable HTTP. +// mcpInitialize performs the MCP initialise handshake over Streamable HTTP. // Returns the session ID from the Mcp-Session-Id header. func mcpInitialize(ctx context.Context, client *http.Client, url, token string) (string, error) { initReq := map[string]any{ @@ -35,26 +33,26 @@ func mcpInitialize(ctx context.Context, client *http.Client, url, token string) req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body)) if err != nil { - return "", coreerr.E("mcpInitialize", "create request", err) + return "", core.E("mcpInitialize", "create request", err) } setHeaders(req, token, "") resp, err := client.Do(req) if err != nil { - return "", coreerr.E("mcpInitialize", "request failed", err) + return "", core.E("mcpInitialize", "request failed", err) } defer resp.Body.Close() if resp.StatusCode != 200 { - return "", coreerr.E("mcpInitialize", fmt.Sprintf("HTTP %d", resp.StatusCode), nil) + return "", core.E("mcpInitialize", core.Sprintf("HTTP %d", resp.StatusCode), nil) } sessionID := resp.Header.Get("Mcp-Session-Id") - // Drain the SSE response (we don't need the initialize result) + // Drain the SSE response (we don't need the initialise result) drainSSE(resp) - // Send initialized notification + // Send initialised notification notif := map[string]any{ "jsonrpc": "2.0", "method": "notifications/initialized", @@ -77,18 +75,18 @@ func mcpInitialize(ctx context.Context, client *http.Client, url, token string) func mcpCall(ctx context.Context, client *http.Client, url, token, sessionID string, body []byte) ([]byte, error) { req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body)) if err != nil { - return nil, coreerr.E("mcpCall", "create request", err) + return nil, core.E("mcpCall", "create request", err) } setHeaders(req, token, sessionID) resp, err := client.Do(req) if err != nil { - return nil, coreerr.E("mcpCall", "request failed", err) + return nil, core.E("mcpCall", "request failed", err) } defer resp.Body.Close() if resp.StatusCode != 200 { - return nil, coreerr.E("mcpCall", fmt.Sprintf("HTTP %d", resp.StatusCode), nil) + return nil, core.E("mcpCall", core.Sprintf("HTTP %d", resp.StatusCode), nil) } // Parse SSE response — extract data: lines @@ -100,11 +98,11 @@ func readSSEData(resp *http.Response) ([]byte, error) { scanner := bufio.NewScanner(resp.Body) for scanner.Scan() { line := scanner.Text() - if strings.HasPrefix(line, "data: ") { - return []byte(strings.TrimPrefix(line, "data: ")), nil + if core.HasPrefix(line, "data: ") { + return []byte(core.TrimPrefix(line, "data: ")), nil } } - return nil, coreerr.E("readSSEData", "no data in SSE response", nil) + return nil, core.E("readSSEData", "no data in SSE response", nil) } // setHeaders applies standard MCP HTTP headers. diff --git a/pkg/agentic/remote_status.go b/pkg/agentic/remote_status.go index 482dd0a..d58f6ec 100644 --- a/pkg/agentic/remote_status.go +++ b/pkg/agentic/remote_status.go @@ -8,24 +8,27 @@ import ( "net/http" "time" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) // --- agentic_status_remote tool --- // RemoteStatusInput queries a remote core-agent for workspace status. +// +// input := agentic.RemoteStatusInput{Host: "charon"} type RemoteStatusInput struct { Host string `json:"host"` // Remote agent host (e.g. "charon") } // RemoteStatusOutput is the response from a remote status check. +// +// out := agentic.RemoteStatusOutput{Success: true, Host: "charon"} type RemoteStatusOutput struct { - Success bool `json:"success"` - Host string `json:"host"` - Workspaces []WorkspaceInfo `json:"workspaces"` - Count int `json:"count"` - Error string `json:"error,omitempty"` + Success bool `json:"success"` + Host string `json:"host"` + Stats StatusOutput `json:"stats"` + Error string `json:"error,omitempty"` } func (s *PrepSubsystem) registerRemoteStatusTool(server *mcp.Server) { @@ -37,7 +40,7 @@ func (s *PrepSubsystem) registerRemoteStatusTool(server *mcp.Server) { func (s *PrepSubsystem) statusRemote(ctx context.Context, _ *mcp.CallToolRequest, input RemoteStatusInput) (*mcp.CallToolResult, RemoteStatusOutput, error) { if input.Host == "" { - return nil, RemoteStatusOutput{}, coreerr.E("statusRemote", "host is required", nil) + return nil, RemoteStatusOutput{}, core.E("statusRemote", "host is required", nil) } addr := resolveHost(input.Host) @@ -102,8 +105,7 @@ func (s *PrepSubsystem) statusRemote(ctx context.Context, _ *mcp.CallToolRequest if len(rpcResp.Result.Content) > 0 { var statusOut StatusOutput if json.Unmarshal([]byte(rpcResp.Result.Content[0].Text), &statusOut) == nil { - output.Workspaces = statusOut.Workspaces - output.Count = statusOut.Count + output.Stats = statusOut } } diff --git a/pkg/agentic/resume.go b/pkg/agentic/resume.go index 9422502..c4b5cef 100644 --- a/pkg/agentic/resume.go +++ b/pkg/agentic/resume.go @@ -4,31 +4,31 @@ package agentic import ( "context" - "fmt" - "os" - "path/filepath" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) // ResumeInput is the input for agentic_resume. +// +// input := agentic.ResumeInput{Workspace: "go-scm-1773581173", Answer: "Use the existing queue config"} type ResumeInput struct { - Workspace string `json:"workspace"` // workspace name (e.g. "go-scm-1773581173") - Answer string `json:"answer,omitempty"` // answer to the blocked question (written to ANSWER.md) - Agent string `json:"agent,omitempty"` // override agent type (default: same as original) - DryRun bool `json:"dry_run,omitempty"` // preview without executing + Workspace string `json:"workspace"` // workspace name (e.g. "go-scm-1773581173") + Answer string `json:"answer,omitempty"` // answer to the blocked question (written to ANSWER.md) + Agent string `json:"agent,omitempty"` // override agent type (default: same as original) + DryRun bool `json:"dry_run,omitempty"` // preview without executing } // ResumeOutput is the output for agentic_resume. +// +// out := agentic.ResumeOutput{Success: true, Workspace: "go-scm-1773581173", Agent: "codex"} type ResumeOutput struct { - Success bool `json:"success"` - Workspace string `json:"workspace"` - Agent string `json:"agent"` - PID int `json:"pid,omitempty"` - OutputFile string `json:"output_file,omitempty"` - Prompt string `json:"prompt,omitempty"` + Success bool `json:"success"` + Workspace string `json:"workspace"` + Agent string `json:"agent"` + PID int `json:"pid,omitempty"` + OutputFile string `json:"output_file,omitempty"` + Prompt string `json:"prompt,omitempty"` } func (s *PrepSubsystem) registerResumeTool(server *mcp.Server) { @@ -40,25 +40,25 @@ func (s *PrepSubsystem) registerResumeTool(server *mcp.Server) { func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, input ResumeInput) (*mcp.CallToolResult, ResumeOutput, error) { if input.Workspace == "" { - return nil, ResumeOutput{}, coreerr.E("resume", "workspace is required", nil) + return nil, ResumeOutput{}, core.E("resume", "workspace is required", nil) } - wsDir := filepath.Join(WorkspaceRoot(), input.Workspace) - srcDir := filepath.Join(wsDir, "src") + wsDir := core.JoinPath(WorkspaceRoot(), input.Workspace) + repoDir := core.JoinPath(wsDir, "repo") // Verify workspace exists - if _, err := os.Stat(srcDir); err != nil { - return nil, ResumeOutput{}, coreerr.E("resume", "workspace not found: "+input.Workspace, nil) + if !fs.IsDir(core.JoinPath(repoDir, ".git")) { + return nil, ResumeOutput{}, core.E("resume", "workspace not found: "+input.Workspace, nil) } // Read current status st, err := readStatus(wsDir) if err != nil { - return nil, ResumeOutput{}, coreerr.E("resume", "no status.json in workspace", err) + return nil, ResumeOutput{}, core.E("resume", "no status.json in workspace", err) } if st.Status != "blocked" && st.Status != "failed" && st.Status != "completed" { - return nil, ResumeOutput{}, coreerr.E("resume", "workspace is "+st.Status+", not resumable (must be blocked, failed, or completed)", nil) + return nil, ResumeOutput{}, core.E("resume", "workspace is "+st.Status+", not resumable (must be blocked, failed, or completed)", nil) } // Determine agent @@ -69,19 +69,20 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu // Write ANSWER.md if answer provided if input.Answer != "" { - answerPath := filepath.Join(srcDir, "ANSWER.md") - content := fmt.Sprintf("# Answer\n\n%s\n", input.Answer) - if err := coreio.Local.Write(answerPath, content); err != nil { - return nil, ResumeOutput{}, coreerr.E("resume", "failed to write ANSWER.md", err) + answerPath := core.JoinPath(repoDir, "ANSWER.md") + content := core.Sprintf("# Answer\n\n%s\n", input.Answer) + if r := fs.Write(answerPath, content); !r.OK { + err, _ := r.Value.(error) + return nil, ResumeOutput{}, core.E("resume", "failed to write ANSWER.md", err) } } - // Build resume prompt - prompt := "You are resuming previous work in this workspace. " + // Build resume prompt — inline the task and answer, no file references + prompt := "You are resuming previous work.\n\nORIGINAL TASK:\n" + st.Task if input.Answer != "" { - prompt += "Read ANSWER.md for the response to your question. " + prompt += "\n\nANSWER TO YOUR QUESTION:\n" + input.Answer } - prompt += "Read PROMPT.md for the original task. Read BLOCKED.md to see what you were stuck on. Continue working." + prompt += "\n\nContinue working. Read BLOCKED.md to see what you were stuck on. Commit when done." if input.DryRun { return nil, ResumeOutput{ @@ -93,7 +94,7 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu } // Spawn agent via go-process - pid, _, err := s.spawnAgent(agent, prompt, wsDir, srcDir) + pid, _, err := s.spawnAgent(agent, prompt, wsDir) if err != nil { return nil, ResumeOutput{}, err } @@ -110,6 +111,6 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu Workspace: input.Workspace, Agent: agent, PID: pid, - OutputFile: filepath.Join(wsDir, fmt.Sprintf("agent-%s.log", agent)), + OutputFile: core.JoinPath(wsDir, core.Sprintf("agent-%s.log", agent)), }, nil } diff --git a/pkg/agentic/review_queue.go b/pkg/agentic/review_queue.go index 515722a..2f8d4c8 100644 --- a/pkg/agentic/review_queue.go +++ b/pkg/agentic/review_queue.go @@ -5,31 +5,31 @@ package agentic import ( "context" "encoding/json" - "fmt" + "io" "os" "os/exec" - "path/filepath" "regexp" - "strconv" - "strings" "time" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) // --- agentic_review_queue tool --- // ReviewQueueInput controls the review queue runner. +// +// input := agentic.ReviewQueueInput{Reviewer: "coderabbit", Limit: 4, DryRun: true} type ReviewQueueInput struct { - Limit int `json:"limit,omitempty"` // Max PRs to process this run (default: 4) - Reviewer string `json:"reviewer,omitempty"` // "coderabbit" (default), "codex", or "both" - DryRun bool `json:"dry_run,omitempty"` // Preview without acting - LocalOnly bool `json:"local_only,omitempty"` // Run review locally, don't touch GitHub + Limit int `json:"limit,omitempty"` // Max PRs to process this run (default: 4) + Reviewer string `json:"reviewer,omitempty"` // "coderabbit" (default), "codex", or "both" + DryRun bool `json:"dry_run,omitempty"` // Preview without acting + LocalOnly bool `json:"local_only,omitempty"` // Run review locally, don't touch GitHub } // ReviewQueueOutput reports what happened. +// +// out := agentic.ReviewQueueOutput{Success: true, Processed: []agentic.ReviewResult{{Repo: "go-io", Verdict: "clean"}}} type ReviewQueueOutput struct { Success bool `json:"success"` Processed []ReviewResult `json:"processed"` @@ -38,6 +38,8 @@ type ReviewQueueOutput struct { } // ReviewResult is the outcome of reviewing one repo. +// +// result := agentic.ReviewResult{Repo: "go-io", Verdict: "findings", Findings: 3, Action: "fix_dispatched"} type ReviewResult struct { Repo string `json:"repo"` Verdict string `json:"verdict"` // clean, findings, rate_limited, error @@ -47,10 +49,12 @@ type ReviewResult struct { } // RateLimitInfo tracks CodeRabbit rate limit state. +// +// limit := agentic.RateLimitInfo{Limited: true, Message: "retry after 2026-03-22T06:00:00Z"} type RateLimitInfo struct { - Limited bool `json:"limited"` - RetryAt time.Time `json:"retry_at,omitempty"` - Message string `json:"message,omitempty"` + Limited bool `json:"limited"` + RetryAt time.Time `json:"retry_at,omitempty"` + Message string `json:"message,omitempty"` } func (s *PrepSubsystem) registerReviewQueueTool(server *mcp.Server) { @@ -66,7 +70,7 @@ func (s *PrepSubsystem) reviewQueue(ctx context.Context, _ *mcp.CallToolRequest, limit = 4 } - basePath := filepath.Join(s.codePath, "core") + basePath := core.JoinPath(s.codePath, "core") // Find repos with draft PRs (ahead of GitHub) candidates := s.findReviewCandidates(basePath) @@ -93,7 +97,7 @@ func (s *PrepSubsystem) reviewQueue(ctx context.Context, _ *mcp.CallToolRequest, continue } - repoDir := filepath.Join(basePath, repo) + repoDir := core.JoinPath(basePath, repo) reviewer := input.Reviewer if reviewer == "" { reviewer = "coderabbit" @@ -131,17 +135,18 @@ func (s *PrepSubsystem) reviewQueue(ctx context.Context, _ *mcp.CallToolRequest, // findReviewCandidates returns repos that are ahead of GitHub main. func (s *PrepSubsystem) findReviewCandidates(basePath string) []string { - entries, err := os.ReadDir(basePath) - if err != nil { + r := fs.List(basePath) + if !r.OK { return nil } + entries := r.Value.([]os.DirEntry) var candidates []string for _, e := range entries { if !e.IsDir() { continue } - repoDir := filepath.Join(basePath, e.Name()) + repoDir := core.JoinPath(basePath, e.Name()) if !hasRemote(repoDir, "github") { continue } @@ -160,7 +165,7 @@ func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer // Check saved rate limit if rl := s.loadRateLimitState(); rl != nil && rl.Limited && time.Now().Before(rl.RetryAt) { result.Verdict = "rate_limited" - result.Detail = fmt.Sprintf("retry after %s", rl.RetryAt.Format(time.RFC3339)) + result.Detail = core.Sprintf("retry after %s", rl.RetryAt.Format(time.RFC3339)) return result } @@ -173,14 +178,14 @@ func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer output := string(out) // Parse rate limit (both reviewers use similar patterns) - if strings.Contains(output, "Rate limit exceeded") || strings.Contains(output, "rate limit") { + if core.Contains(output, "Rate limit exceeded") || core.Contains(output, "rate limit") { result.Verdict = "rate_limited" result.Detail = output return result } // Parse error - if err != nil && !strings.Contains(output, "No findings") && !strings.Contains(output, "no issues") { + if err != nil && !core.Contains(output, "No findings") && !core.Contains(output, "no issues") { result.Verdict = "error" result.Detail = output return result @@ -190,7 +195,7 @@ func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer s.storeReviewOutput(repoDir, repo, reviewer, output) // Parse verdict - if strings.Contains(output, "No findings") || strings.Contains(output, "no issues") || strings.Contains(output, "LGTM") { + if core.Contains(output, "No findings") || core.Contains(output, "no issues") || core.Contains(output, "LGTM") { result.Verdict = "clean" result.Findings = 0 @@ -222,11 +227,11 @@ func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer } // Save findings for agent dispatch - findingsFile := filepath.Join(repoDir, ".core", "coderabbit-findings.txt") - coreio.Local.Write(findingsFile, output) + findingsFile := core.JoinPath(repoDir, ".core", "coderabbit-findings.txt") + fs.Write(findingsFile, output) // Dispatch fix agent with the findings - task := fmt.Sprintf("Fix CodeRabbit findings. The review output is in .core/coderabbit-findings.txt. "+ + task := core.Sprintf("Fix CodeRabbit findings. The review output is in .core/coderabbit-findings.txt. "+ "Read it, verify each finding against the code, fix what's valid. Run tests. "+ "Commit: fix(coderabbit): address review findings\n\nFindings summary (%d issues):\n%s", result.Findings, truncate(output, 1500)) @@ -248,7 +253,7 @@ func (s *PrepSubsystem) pushAndMerge(ctx context.Context, repoDir, repo string) pushCmd := exec.CommandContext(ctx, "git", "push", "github", "HEAD:refs/heads/dev", "--force") pushCmd.Dir = repoDir if out, err := pushCmd.CombinedOutput(); err != nil { - return coreerr.E("pushAndMerge", "push failed: "+string(out), err) + return core.E("pushAndMerge", "push failed: "+string(out), err) } // Mark PR ready if draft @@ -260,7 +265,7 @@ func (s *PrepSubsystem) pushAndMerge(ctx context.Context, repoDir, repo string) mergeCmd := exec.CommandContext(ctx, "gh", "pr", "merge", "--merge", "--delete-branch") mergeCmd.Dir = repoDir if out, err := mergeCmd.CombinedOutput(); err != nil { - return coreerr.E("pushAndMerge", "merge failed: "+string(out), err) + return core.E("pushAndMerge", "merge failed: "+string(out), err) } return nil @@ -279,7 +284,7 @@ func (s *PrepSubsystem) dispatchFixFromQueue(ctx context.Context, repo, task str return err } if !out.Success { - return coreerr.E("dispatchFixFromQueue", "dispatch failed for "+repo, nil) + return core.E("dispatchFixFromQueue", "dispatch failed for "+repo, nil) } return nil } @@ -288,15 +293,15 @@ func (s *PrepSubsystem) dispatchFixFromQueue(ctx context.Context, repo, task str func countFindings(output string) int { // Count lines that look like findings count := 0 - for _, line := range strings.Split(output, "\n") { - trimmed := strings.TrimSpace(line) - if strings.HasPrefix(trimmed, "- ") || strings.HasPrefix(trimmed, "* ") || - strings.Contains(trimmed, "Issue:") || strings.Contains(trimmed, "Finding:") || - strings.Contains(trimmed, "⚠") || strings.Contains(trimmed, "❌") { + for _, line := range core.Split(output, "\n") { + trimmed := core.Trim(line) + if core.HasPrefix(trimmed, "- ") || core.HasPrefix(trimmed, "* ") || + core.Contains(trimmed, "Issue:") || core.Contains(trimmed, "Finding:") || + core.Contains(trimmed, "⚠") || core.Contains(trimmed, "❌") { count++ } } - if count == 0 && !strings.Contains(output, "No findings") { + if count == 0 && !core.Contains(output, "No findings") { count = 1 // At least one finding if not clean } return count @@ -308,10 +313,10 @@ func parseRetryAfter(message string) time.Duration { re := regexp.MustCompile(`(\d+)\s*minutes?\s*(?:and\s*)?(\d+)?\s*seconds?`) matches := re.FindStringSubmatch(message) if len(matches) >= 2 { - mins, _ := strconv.Atoi(matches[1]) + mins := parseInt(matches[1]) secs := 0 if len(matches) >= 3 && matches[2] != "" { - secs, _ = strconv.Atoi(matches[2]) + secs = parseInt(matches[2]) } return time.Duration(mins)*time.Minute + time.Duration(secs)*time.Second } @@ -334,15 +339,14 @@ func (s *PrepSubsystem) buildReviewCommand(ctx context.Context, repoDir, reviewe // storeReviewOutput saves raw review output for training data collection. func (s *PrepSubsystem) storeReviewOutput(repoDir, repo, reviewer, output string) { - home, _ := os.UserHomeDir() - dataDir := filepath.Join(home, ".core", "training", "reviews") - coreio.Local.EnsureDir(dataDir) + dataDir := core.JoinPath(core.Env("DIR_HOME"), ".core", "training", "reviews") + fs.EnsureDir(dataDir) timestamp := time.Now().Format("2006-01-02T15-04-05") - filename := fmt.Sprintf("%s_%s_%s.txt", repo, reviewer, timestamp) + filename := core.Sprintf("%s_%s_%s.txt", repo, reviewer, timestamp) // Write raw output - coreio.Local.Write(filepath.Join(dataDir, filename), output) + fs.Write(core.JoinPath(dataDir, filename), output) // Append to JSONL for structured training entry := map[string]string{ @@ -352,37 +356,37 @@ func (s *PrepSubsystem) storeReviewOutput(repoDir, repo, reviewer, output string "output": output, "verdict": "clean", } - if !strings.Contains(output, "No findings") && !strings.Contains(output, "no issues") { + if !core.Contains(output, "No findings") && !core.Contains(output, "no issues") { entry["verdict"] = "findings" } jsonLine, _ := json.Marshal(entry) - jsonlPath := filepath.Join(dataDir, "reviews.jsonl") - f, err := os.OpenFile(jsonlPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err == nil { - defer f.Close() - f.Write(append(jsonLine, '\n')) + jsonlPath := core.JoinPath(dataDir, "reviews.jsonl") + r := fs.Append(jsonlPath) + if !r.OK { + return } + wc := r.Value.(io.WriteCloser) + defer wc.Close() + wc.Write(append(jsonLine, '\n')) } // saveRateLimitState persists rate limit info for cross-run awareness. func (s *PrepSubsystem) saveRateLimitState(info *RateLimitInfo) { - home, _ := os.UserHomeDir() - path := filepath.Join(home, ".core", "coderabbit-ratelimit.json") + path := core.JoinPath(core.Env("DIR_HOME"), ".core", "coderabbit-ratelimit.json") data, _ := json.Marshal(info) - coreio.Local.Write(path, string(data)) + fs.Write(path, string(data)) } // loadRateLimitState reads persisted rate limit info. func (s *PrepSubsystem) loadRateLimitState() *RateLimitInfo { - home, _ := os.UserHomeDir() - path := filepath.Join(home, ".core", "coderabbit-ratelimit.json") - data, err := coreio.Local.Read(path) - if err != nil { + path := core.JoinPath(core.Env("DIR_HOME"), ".core", "coderabbit-ratelimit.json") + r := fs.Read(path) + if !r.OK { return nil } var info RateLimitInfo - if json.Unmarshal([]byte(data), &info) != nil { + if json.Unmarshal([]byte(r.Value.(string)), &info) != nil { return nil } return &info diff --git a/pkg/agentic/runner.go b/pkg/agentic/runner.go new file mode 100644 index 0000000..eb943d9 --- /dev/null +++ b/pkg/agentic/runner.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package agentic + +import ( + "time" + + core "dappco.re/go/core" +) + +// StartRunner begins the background queue runner. +// Queue is frozen by default — use agentic_dispatch_start to unfreeze, +// or set CORE_AGENT_DISPATCH=1 to auto-start. +// +// prep.StartRunner() +func (s *PrepSubsystem) StartRunner() { + s.pokeCh = make(chan struct{}, 1) + + // Frozen by default — explicit start required + if core.Env("CORE_AGENT_DISPATCH") == "1" { + s.frozen = false + core.Print(nil, "dispatch: auto-start enabled (CORE_AGENT_DISPATCH=1)") + } else { + s.frozen = true + } + + go s.runLoop() +} + +func (s *PrepSubsystem) runLoop() { + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + s.drainQueue() + case <-s.pokeCh: + s.drainQueue() + } + } +} + +// Poke signals the runner to check the queue immediately. +// Non-blocking — if a poke is already pending, this is a no-op. +// +// s.Poke() // after agent completion +func (s *PrepSubsystem) Poke() { + if s.pokeCh == nil { + return + } + select { + case s.pokeCh <- struct{}{}: + default: + } +} diff --git a/pkg/agentic/sanitise.go b/pkg/agentic/sanitise.go new file mode 100644 index 0000000..5a90d76 --- /dev/null +++ b/pkg/agentic/sanitise.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package agentic + +func sanitiseBranchSlug(text string, max int) string { + out := make([]rune, 0, len(text)) + for _, r := range text { + switch { + case r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-': + out = append(out, r) + case r >= 'A' && r <= 'Z': + out = append(out, r+32) + default: + out = append(out, '-') + } + + if max > 0 && len(out) >= max { + break + } + } + + return trimRuneEdges(string(out), '-') +} + +func sanitisePlanSlug(text string) string { + out := make([]rune, 0, len(text)) + for _, r := range text { + switch { + case r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-': + out = append(out, r) + case r >= 'A' && r <= 'Z': + out = append(out, r+32) + case r == ' ': + out = append(out, '-') + } + } + + slug := collapseRepeatedRune(string(out), '-') + slug = trimRuneEdges(slug, '-') + if len(slug) > 30 { + slug = slug[:30] + } + + return trimRuneEdges(slug, '-') +} + +func sanitiseFilename(text string) string { + out := make([]rune, 0, len(text)) + for _, r := range text { + switch { + case r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.': + out = append(out, r) + default: + out = append(out, '-') + } + } + + return string(out) +} + +func collapseRepeatedRune(text string, target rune) string { + runes := []rune(text) + out := make([]rune, 0, len(runes)) + lastWasTarget := false + + for _, r := range runes { + if r == target { + if lastWasTarget { + continue + } + lastWasTarget = true + } else { + lastWasTarget = false + } + + out = append(out, r) + } + + return string(out) +} + +func trimRuneEdges(text string, target rune) string { + runes := []rune(text) + start := 0 + end := len(runes) + + for start < end && runes[start] == target { + start++ + } + + for end > start && runes[end-1] == target { + end-- + } + + return string(runes[start:end]) +} diff --git a/pkg/agentic/scan.go b/pkg/agentic/scan.go index c466605..5e3cf5b 100644 --- a/pkg/agentic/scan.go +++ b/pkg/agentic/scan.go @@ -5,15 +5,15 @@ package agentic import ( "context" "encoding/json" - "fmt" "net/http" - "strings" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) // ScanInput is the input for agentic_scan. +// +// input := agentic.ScanInput{Org: "core", Labels: []string{"agentic", "bug"}, Limit: 20} type ScanInput struct { Org string `json:"org,omitempty"` // default "core" Labels []string `json:"labels,omitempty"` // filter by labels (default: agentic, help-wanted, bug) @@ -21,6 +21,8 @@ type ScanInput struct { } // ScanOutput is the output for agentic_scan. +// +// out := agentic.ScanOutput{Success: true, Count: 1, Issues: []agentic.ScanIssue{{Repo: "go-io", Number: 12}}} type ScanOutput struct { Success bool `json:"success"` Count int `json:"count"` @@ -28,6 +30,8 @@ type ScanOutput struct { } // ScanIssue is a single actionable issue. +// +// issue := agentic.ScanIssue{Repo: "go-io", Number: 12, Title: "Replace fmt.Errorf"} type ScanIssue struct { Repo string `json:"repo"` Number int `json:"number"` @@ -39,7 +43,7 @@ type ScanIssue struct { func (s *PrepSubsystem) scan(ctx context.Context, _ *mcp.CallToolRequest, input ScanInput) (*mcp.CallToolResult, ScanOutput, error) { if s.forgeToken == "" { - return nil, ScanOutput{}, coreerr.E("scan", "no Forge token configured", nil) + return nil, ScanOutput{}, core.E("scan", "no Forge token configured", nil) } if input.Org == "" { @@ -81,7 +85,7 @@ func (s *PrepSubsystem) scan(ctx context.Context, _ *mcp.CallToolRequest, input seen := make(map[string]bool) var unique []ScanIssue for _, issue := range allIssues { - key := fmt.Sprintf("%s#%d", issue.Repo, issue.Number) + key := core.Sprintf("%s#%d", issue.Repo, issue.Number) if !seen[key] { seen[key] = true unique = append(unique, issue) @@ -100,66 +104,38 @@ func (s *PrepSubsystem) scan(ctx context.Context, _ *mcp.CallToolRequest, input } func (s *PrepSubsystem) listOrgRepos(ctx context.Context, org string) ([]string, error) { - var allNames []string - page := 1 - - for { - u := fmt.Sprintf("%s/api/v1/orgs/%s/repos?limit=50&page=%d", s.forgeURL, org, page) - req, err := http.NewRequestWithContext(ctx, "GET", u, nil) - if err != nil { - return nil, coreerr.E("scan.listOrgRepos", "failed to create request", err) - } - req.Header.Set("Authorization", "token "+s.forgeToken) - - resp, err := s.client.Do(req) - if err != nil { - return nil, coreerr.E("scan.listOrgRepos", "failed to list repos", err) - } - - if resp.StatusCode != 200 { - resp.Body.Close() - return nil, coreerr.E("scan.listOrgRepos", fmt.Sprintf("HTTP %d listing repos", resp.StatusCode), nil) + repos, err := s.forge.Repos.ListOrgRepos(ctx, org) + if err != nil { + return nil, core.E("scan.listOrgRepos", "failed to list repos", err) } - var repos []struct { - Name string `json:"name"` - } - json.NewDecoder(resp.Body).Decode(&repos) - resp.Body.Close() - - for _, r := range repos { - allNames = append(allNames, r.Name) - } - - // If we got fewer than the limit, we've reached the last page - if len(repos) < 50 { - break - } - page++ + var allNames []string + for _, r := range repos { + allNames = append(allNames, r.Name) } return allNames, nil } func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label string) ([]ScanIssue, error) { - u := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues?state=open&limit=10&type=issues", + u := core.Sprintf("%s/api/v1/repos/%s/%s/issues?state=open&limit=10&type=issues", s.forgeURL, org, repo) if label != "" { - u += "&labels=" + strings.ReplaceAll(strings.ReplaceAll(label, " ", "%20"), "&", "%26") + u += "&labels=" + core.Replace(core.Replace(label, " ", "%20"), "&", "%26") } req, err := http.NewRequestWithContext(ctx, "GET", u, nil) if err != nil { - return nil, coreerr.E("scan.listRepoIssues", "failed to create request", err) + return nil, core.E("scan.listRepoIssues", "failed to create request", err) } req.Header.Set("Authorization", "token "+s.forgeToken) resp, err := s.client.Do(req) if err != nil { - return nil, coreerr.E("scan.listRepoIssues", "failed to list issues for "+repo, err) + return nil, core.E("scan.listRepoIssues", "failed to list issues for "+repo, err) } defer resp.Body.Close() if resp.StatusCode != 200 { - return nil, coreerr.E("scan.listRepoIssues", fmt.Sprintf("HTTP %d listing issues for %s", resp.StatusCode, repo), nil) + return nil, core.E("scan.listRepoIssues", core.Sprintf("HTTP %d listing issues for %s", resp.StatusCode, repo), nil) } var issues []struct { @@ -192,7 +168,7 @@ func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label str Title: issue.Title, Labels: labels, Assignee: assignee, - URL: strings.Replace(issue.HTMLURL, "https://forge.lthn.ai", s.forgeURL, 1), + URL: core.Replace(issue.HTMLURL, "https://forge.lthn.ai", s.forgeURL), }) } diff --git a/pkg/agentic/shutdown.go b/pkg/agentic/shutdown.go new file mode 100644 index 0000000..220e351 --- /dev/null +++ b/pkg/agentic/shutdown.go @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package agentic + +import ( + "context" + "syscall" + + core "dappco.re/go/core" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// ShutdownInput is the input for agentic_dispatch_shutdown. +// +// input := agentic.ShutdownInput{} +type ShutdownInput struct{} + +// ShutdownOutput is the output for agentic_dispatch_shutdown. +// +// out := agentic.ShutdownOutput{Success: true, Running: 3, Message: "draining"} +type ShutdownOutput struct { + Success bool `json:"success"` + Running int `json:"running"` + Queued int `json:"queued"` + Message string `json:"message"` +} + +func (s *PrepSubsystem) registerShutdownTools(server *mcp.Server) { + mcp.AddTool(server, &mcp.Tool{ + Name: "agentic_dispatch_start", + Description: "Start the dispatch queue runner. Unfreezes the queue and begins draining.", + }, s.dispatchStart) + + mcp.AddTool(server, &mcp.Tool{ + Name: "agentic_dispatch_shutdown", + Description: "Graceful shutdown: stop accepting new jobs, let running agents finish. Queue is frozen.", + }, s.shutdownGraceful) + + mcp.AddTool(server, &mcp.Tool{ + Name: "agentic_dispatch_shutdown_now", + Description: "Hard shutdown: kill all running agents immediately. Queue is cleared.", + }, s.shutdownNow) +} + +// dispatchStart unfreezes the queue and starts draining. +func (s *PrepSubsystem) dispatchStart(ctx context.Context, _ *mcp.CallToolRequest, input ShutdownInput) (*mcp.CallToolResult, ShutdownOutput, error) { + s.frozen = false + s.Poke() // trigger immediate drain + + return nil, ShutdownOutput{ + Success: true, + Message: "dispatch started — queue unfrozen, draining", + }, nil +} + +// shutdownGraceful freezes the queue — running agents finish, no new dispatches. +func (s *PrepSubsystem) shutdownGraceful(ctx context.Context, _ *mcp.CallToolRequest, input ShutdownInput) (*mcp.CallToolResult, ShutdownOutput, error) { + s.frozen = true + + running := s.countRunningByAgent("codex") + s.countRunningByAgent("claude") + + s.countRunningByAgent("gemini") + s.countRunningByAgent("codex-spark") + + return nil, ShutdownOutput{ + Success: true, + Running: running, + Message: "queue frozen — running agents will finish, no new dispatches", + }, nil +} + +// shutdownNow kills all running agents and clears the queue. +func (s *PrepSubsystem) shutdownNow(ctx context.Context, _ *mcp.CallToolRequest, input ShutdownInput) (*mcp.CallToolResult, ShutdownOutput, error) { + s.frozen = true + + wsRoot := WorkspaceRoot() + old := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json")) + deep := core.PathGlob(core.JoinPath(wsRoot, "*", "*", "*", "status.json")) + statusFiles := append(old, deep...) + + killed := 0 + cleared := 0 + + for _, statusPath := range statusFiles { + wsDir := core.PathDir(statusPath) + st, err := readStatus(wsDir) + if err != nil { + continue + } + + // Kill running agents + if st.Status == "running" && st.PID > 0 { + if syscall.Kill(st.PID, syscall.SIGTERM) == nil { + killed++ + } + st.Status = "failed" + st.Question = "killed by shutdown_now" + st.PID = 0 + writeStatus(wsDir, st) + } + + // Clear queued tasks + if st.Status == "queued" { + st.Status = "failed" + st.Question = "cleared by shutdown_now" + writeStatus(wsDir, st) + cleared++ + } + } + + return nil, ShutdownOutput{ + Success: true, + Running: 0, + Queued: 0, + Message: core.Sprintf("killed %d agents, cleared %d queued tasks", killed, cleared), + }, nil +} diff --git a/pkg/agentic/status.go b/pkg/agentic/status.go index 331aaf0..0258f61 100644 --- a/pkg/agentic/status.go +++ b/pkg/agentic/status.go @@ -5,15 +5,10 @@ package agentic import ( "context" "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" "syscall" "time" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) @@ -31,20 +26,23 @@ import ( // running → failed (agent crashed / non-zero exit) // WorkspaceStatus represents the current state of an agent workspace. +// +// st, err := readStatus(wsDir) +// if err == nil && st.Status == "completed" { autoCreatePR(wsDir) } type WorkspaceStatus struct { - Status string `json:"status"` // running, completed, blocked, failed - Agent string `json:"agent"` // gemini, claude, codex - Repo string `json:"repo"` // target repo - Org string `json:"org,omitempty"` // forge org (e.g. "core") - Task string `json:"task"` // task description - Branch string `json:"branch,omitempty"` // git branch name - Issue int `json:"issue,omitempty"` // forge issue number - PID int `json:"pid,omitempty"` // process ID (if running) - StartedAt time.Time `json:"started_at"` // when dispatch started - UpdatedAt time.Time `json:"updated_at"` // last status change - Question string `json:"question,omitempty"` // from BLOCKED.md - Runs int `json:"runs"` // how many times dispatched/resumed - PRURL string `json:"pr_url,omitempty"` // pull request URL (after PR created) + Status string `json:"status"` // running, completed, blocked, failed + Agent string `json:"agent"` // gemini, claude, codex + Repo string `json:"repo"` // target repo + Org string `json:"org,omitempty"` // forge org (e.g. "core") + Task string `json:"task"` // task description + Branch string `json:"branch,omitempty"` // git branch name + Issue int `json:"issue,omitempty"` // forge issue number + PID int `json:"pid,omitempty"` // process ID (if running) + StartedAt time.Time `json:"started_at"` // when dispatch started + UpdatedAt time.Time `json:"updated_at"` // last status change + Question string `json:"question,omitempty"` // from BLOCKED.md + Runs int `json:"runs"` // how many times dispatched/resumed + PRURL string `json:"pr_url,omitempty"` // pull request URL (after PR created) } func writeStatus(wsDir string, status *WorkspaceStatus) error { @@ -53,16 +51,20 @@ func writeStatus(wsDir string, status *WorkspaceStatus) error { if err != nil { return err } - return coreio.Local.Write(filepath.Join(wsDir, "status.json"), string(data)) + if r := fs.Write(core.JoinPath(wsDir, "status.json"), string(data)); !r.OK { + err, _ := r.Value.(error) + return core.E("writeStatus", "failed to write status", err) + } + return nil } func readStatus(wsDir string) (*WorkspaceStatus, error) { - data, err := coreio.Local.Read(filepath.Join(wsDir, "status.json")) - if err != nil { - return nil, err + r := fs.Read(core.JoinPath(wsDir, "status.json")) + if !r.OK { + return nil, core.E("readStatus", "status not found", nil) } var s WorkspaceStatus - if err := json.Unmarshal([]byte(data), &s); err != nil { + if err := json.Unmarshal([]byte(r.Value.(string)), &s); err != nil { return nil, err } return &s, nil @@ -70,24 +72,36 @@ func readStatus(wsDir string) (*WorkspaceStatus, error) { // --- agentic_status tool --- +// StatusInput is the input for agentic_status. +// +// input := agentic.StatusInput{Workspace: "go-io-123", Limit: 50} type StatusInput struct { Workspace string `json:"workspace,omitempty"` // specific workspace name, or empty for all + Limit int `json:"limit,omitempty"` // max results (default 100) + Status string `json:"status,omitempty"` // filter: running, completed, failed, blocked } +// StatusOutput is the output for agentic_status. +// Returns stats by default. Only blocked workspaces are listed (they need attention). +// +// out := agentic.StatusOutput{Total: 42, Running: 3, Queued: 10, Completed: 25} type StatusOutput struct { - Workspaces []WorkspaceInfo `json:"workspaces"` - Count int `json:"count"` + Total int `json:"total"` + Running int `json:"running"` + Queued int `json:"queued"` + Completed int `json:"completed"` + Failed int `json:"failed"` + Blocked []BlockedInfo `json:"blocked,omitempty"` } -type WorkspaceInfo struct { - Name string `json:"name"` - Status string `json:"status"` - Agent string `json:"agent"` - Repo string `json:"repo"` - Task string `json:"task"` - Age string `json:"age"` - Question string `json:"question,omitempty"` - Runs int `json:"runs"` +// BlockedInfo shows a workspace that needs human input. +// +// info := agentic.BlockedInfo{Name: "go-io/task-4", Repo: "go-io", Question: "Which API version?"} +type BlockedInfo struct { + Name string `json:"name"` + Repo string `json:"repo"` + Agent string `json:"agent"` + Question string `json:"question"` } func (s *PrepSubsystem) registerStatusTool(server *mcp.Server) { @@ -100,73 +114,37 @@ func (s *PrepSubsystem) registerStatusTool(server *mcp.Server) { func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, input StatusInput) (*mcp.CallToolResult, StatusOutput, error) { wsRoot := WorkspaceRoot() - entries, err := os.ReadDir(wsRoot) - if err != nil { - return nil, StatusOutput{}, coreerr.E("status", "no workspaces found", err) - } + // Scan both old (*/status.json) and new (*/*/*/status.json) layouts + old := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json")) + deep := core.PathGlob(core.JoinPath(wsRoot, "*", "*", "*", "status.json")) + statusFiles := append(old, deep...) - var workspaces []WorkspaceInfo + var out StatusOutput - for _, entry := range entries { - if !entry.IsDir() { - continue - } + for _, statusPath := range statusFiles { + wsDir := core.PathDir(statusPath) + name := wsDir[len(wsRoot)+1:] - name := entry.Name() - - // Filter by specific workspace if requested - if input.Workspace != "" && name != input.Workspace { - continue - } - - wsDir := filepath.Join(wsRoot, name) - info := WorkspaceInfo{Name: name} - - // Try reading status.json st, err := readStatus(wsDir) if err != nil { - // Legacy workspace (no status.json) — check for log file - logFiles, _ := filepath.Glob(filepath.Join(wsDir, "agent-*.log")) - if len(logFiles) > 0 { - info.Status = "completed" - } else { - info.Status = "unknown" - } - fi, _ := entry.Info() - if fi != nil { - info.Age = time.Since(fi.ModTime()).Truncate(time.Minute).String() - } - workspaces = append(workspaces, info) + out.Total++ + out.Failed++ continue } - info.Status = st.Status - info.Agent = st.Agent - info.Repo = st.Repo - info.Task = st.Task - info.Runs = st.Runs - info.Age = time.Since(st.StartedAt).Truncate(time.Minute).String() - // If status is "running", check if PID is still alive if st.Status == "running" && st.PID > 0 { if err := syscall.Kill(st.PID, 0); err != nil { - // Process died — check for BLOCKED.md - blockedPath := filepath.Join(wsDir, "src", "BLOCKED.md") - if data, err := coreio.Local.Read(blockedPath); err == nil { - info.Status = "blocked" - info.Question = strings.TrimSpace(data) + blockedPath := core.JoinPath(wsDir, "repo", "BLOCKED.md") + if r := fs.Read(blockedPath); r.OK { st.Status = "blocked" - st.Question = info.Question + st.Question = core.Trim(r.Value.(string)) } else { - // Dead PID without BLOCKED.md — check exit code from log - // If no evidence of success, mark as failed (not completed) - logFile := filepath.Join(wsDir, fmt.Sprintf("agent-%s.log", st.Agent)) - if _, err := coreio.Local.Read(logFile); err != nil { - info.Status = "failed" + logFile := core.JoinPath(wsDir, core.Sprintf("agent-%s.log", st.Agent)) + if r := fs.Read(logFile); !r.OK { st.Status = "failed" st.Question = "Agent process died (no output log)" } else { - info.Status = "completed" st.Status = "completed" } } @@ -174,15 +152,25 @@ func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, inpu } } - if st.Status == "blocked" { - info.Question = st.Question + out.Total++ + switch st.Status { + case "running": + out.Running++ + case "queued": + out.Queued++ + case "completed": + out.Completed++ + case "failed": + out.Failed++ + case "blocked": + out.Blocked = append(out.Blocked, BlockedInfo{ + Name: name, + Repo: st.Repo, + Agent: st.Agent, + Question: st.Question, + }) } - - workspaces = append(workspaces, info) } - return nil, StatusOutput{ - Workspaces: workspaces, - Count: len(workspaces), - }, nil + return nil, out, nil } diff --git a/pkg/agentic/status_test.go b/pkg/agentic/status_test.go index 9a53404..9130529 100644 --- a/pkg/agentic/status_test.go +++ b/pkg/agentic/status_test.go @@ -4,13 +4,11 @@ package agentic import ( "encoding/json" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "path/filepath" "testing" "time" - - coreio "dappco.re/go/core/io" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestWriteStatus_Good(t *testing.T) { @@ -28,12 +26,12 @@ func TestWriteStatus_Good(t *testing.T) { err := writeStatus(dir, status) require.NoError(t, err) - // Verify file was written via coreio - data, err := coreio.Local.Read(filepath.Join(dir, "status.json")) - require.NoError(t, err) + // Verify file was written via core.Fs + r := fs.Read(filepath.Join(dir, "status.json")) + require.True(t, r.OK) var read WorkspaceStatus - err = json.Unmarshal([]byte(data), &read) + err = json.Unmarshal([]byte(r.Value.(string)), &read) require.NoError(t, err) assert.Equal(t, "running", read.Status) @@ -77,7 +75,7 @@ func TestReadStatus_Good(t *testing.T) { data, err := json.MarshalIndent(status, "", " ") require.NoError(t, err) - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "status.json"), string(data))) + require.True(t, fs.Write(filepath.Join(dir, "status.json"), string(data)).OK) read, err := readStatus(dir) require.NoError(t, err) @@ -99,7 +97,7 @@ func TestReadStatus_Bad_NoFile(t *testing.T) { func TestReadStatus_Bad_InvalidJSON(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "status.json"), "not json{")) + require.True(t, fs.Write(filepath.Join(dir, "status.json"), "not json{").OK) _, err := readStatus(dir) assert.Error(t, err) @@ -117,7 +115,7 @@ func TestReadStatus_Good_BlockedWithQuestion(t *testing.T) { data, err := json.MarshalIndent(status, "", " ") require.NoError(t, err) - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "status.json"), string(data))) + require.True(t, fs.Write(filepath.Join(dir, "status.json"), string(data)).OK) read, err := readStatus(dir) require.NoError(t, err) @@ -177,7 +175,7 @@ func TestWriteStatus_Good_OverwriteExisting(t *testing.T) { func TestReadStatus_Ugly_EmptyFile(t *testing.T) { dir := t.TempDir() - require.NoError(t, coreio.Local.Write(filepath.Join(dir, "status.json"), "")) + require.True(t, fs.Write(filepath.Join(dir, "status.json"), "").OK) _, err := readStatus(dir) assert.Error(t, err) diff --git a/pkg/agentic/verify.go b/pkg/agentic/verify.go index 2a2f2ce..d98e376 100644 --- a/pkg/agentic/verify.go +++ b/pkg/agentic/verify.go @@ -6,16 +6,12 @@ import ( "bytes" "context" "encoding/json" - "fmt" "net/http" "os" "os/exec" - "path/filepath" - "strings" "time" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" ) // autoVerifyAndMerge runs inline tests (fast gate) and merges if they pass. @@ -31,7 +27,7 @@ func (s *PrepSubsystem) autoVerifyAndMerge(wsDir string) { return } - srcDir := filepath.Join(wsDir, "src") + repoDir := core.JoinPath(wsDir, "repo") org := st.Org if org == "" { org = "core" @@ -51,7 +47,7 @@ func (s *PrepSubsystem) autoVerifyAndMerge(wsDir string) { } // Attempt 1: run tests and try to merge - result := s.attemptVerifyAndMerge(srcDir, org, st.Repo, st.Branch, prNum) + result := s.attemptVerifyAndMerge(repoDir, org, st.Repo, st.Branch, prNum) if result == mergeSuccess { markMerged() return @@ -59,8 +55,8 @@ func (s *PrepSubsystem) autoVerifyAndMerge(wsDir string) { // Attempt 2: rebase onto main and retry if result == mergeConflict || result == testFailed { - if s.rebaseBranch(srcDir, st.Branch) { - if s.attemptVerifyAndMerge(srcDir, org, st.Repo, st.Branch, prNum) == mergeSuccess { + if s.rebaseBranch(repoDir, st.Branch) { + if s.attemptVerifyAndMerge(repoDir, org, st.Repo, st.Branch, prNum) == mergeSuccess { markMerged() return } @@ -85,11 +81,11 @@ const ( ) // attemptVerifyAndMerge runs tests and tries to merge. Returns the outcome. -func (s *PrepSubsystem) attemptVerifyAndMerge(srcDir, org, repo, branch string, prNum int) mergeResult { - testResult := s.runVerification(srcDir) +func (s *PrepSubsystem) attemptVerifyAndMerge(repoDir, org, repo, branch string, prNum int) mergeResult { + testResult := s.runVerification(repoDir) if !testResult.passed { - comment := fmt.Sprintf("## Verification Failed\n\n**Command:** `%s`\n\n```\n%s\n```\n\n**Exit code:** %d", + comment := core.Sprintf("## Verification Failed\n\n**Command:** `%s`\n\n```\n%s\n```\n\n**Exit code:** %d", testResult.testCmd, truncate(testResult.output, 2000), testResult.exitCode) s.commentOnIssue(context.Background(), org, repo, prNum, comment) return testFailed @@ -100,40 +96,40 @@ func (s *PrepSubsystem) attemptVerifyAndMerge(srcDir, org, repo, branch string, defer cancel() if err := s.forgeMergePR(ctx, org, repo, prNum); err != nil { - comment := fmt.Sprintf("## Tests Passed — Merge Failed\n\n`%s` passed but merge failed: %v", testResult.testCmd, err) + comment := core.Sprintf("## Tests Passed — Merge Failed\n\n`%s` passed but merge failed: %v", testResult.testCmd, err) s.commentOnIssue(context.Background(), org, repo, prNum, comment) return mergeConflict } - comment := fmt.Sprintf("## Auto-Verified & Merged\n\n**Tests:** `%s` — PASS\n\nAuto-merged by core-agent dispatch system.", testResult.testCmd) + comment := core.Sprintf("## Auto-Verified & Merged\n\n**Tests:** `%s` — PASS\n\nAuto-merged by core-agent dispatch system.", testResult.testCmd) s.commentOnIssue(context.Background(), org, repo, prNum, comment) return mergeSuccess } // rebaseBranch rebases the current branch onto the default branch and force-pushes. -func (s *PrepSubsystem) rebaseBranch(srcDir, branch string) bool { - base := gitDefaultBranch(srcDir) +func (s *PrepSubsystem) rebaseBranch(repoDir, branch string) bool { + base := DefaultBranch(repoDir) // Fetch latest default branch fetch := exec.Command("git", "fetch", "origin", base) - fetch.Dir = srcDir + fetch.Dir = repoDir if err := fetch.Run(); err != nil { return false } // Rebase onto default branch rebase := exec.Command("git", "rebase", "origin/"+base) - rebase.Dir = srcDir + rebase.Dir = repoDir if err := rebase.Run(); err != nil { // Rebase failed — abort and give up abort := exec.Command("git", "rebase", "--abort") - abort.Dir = srcDir + abort.Dir = repoDir abort.Run() return false } // Force-push the rebased branch to Forge (origin is local clone) - st, _ := readStatus(filepath.Dir(srcDir)) + st, _ := readStatus(core.PathDir(repoDir)) org := "core" repo := "" if st != nil { @@ -142,9 +138,9 @@ func (s *PrepSubsystem) rebaseBranch(srcDir, branch string) bool { } repo = st.Repo } - forgeRemote := fmt.Sprintf("ssh://git@forge.lthn.ai:2223/%s/%s.git", org, repo) + forgeRemote := core.Sprintf("ssh://git@forge.lthn.ai:2223/%s/%s.git", org, repo) push := exec.Command("git", "push", "--force-with-lease", forgeRemote, branch) - push.Dir = srcDir + push.Dir = repoDir return push.Run() == nil } @@ -160,7 +156,7 @@ func (s *PrepSubsystem) flagForReview(org, repo string, prNum int, result mergeR payload, _ := json.Marshal(map[string]any{ "labels": []int{s.getLabelID(ctx, org, repo, "needs-review")}, }) - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/labels", s.forgeURL, org, repo, prNum) + url := core.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/labels", s.forgeURL, org, repo, prNum) req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload)) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "token "+s.forgeToken) @@ -174,7 +170,7 @@ func (s *PrepSubsystem) flagForReview(org, repo string, prNum int, result mergeR if result == mergeConflict { reason = "Merge conflict persists after rebase" } - comment := fmt.Sprintf("## Needs Review\n\n%s. Auto-merge gave up after retry.\n\nLabelled `needs-review` for human attention.", reason) + comment := core.Sprintf("## Needs Review\n\n%s. Auto-merge gave up after retry.\n\nLabelled `needs-review` for human attention.", reason) s.commentOnIssue(ctx, org, repo, prNum, comment) } @@ -184,7 +180,7 @@ func (s *PrepSubsystem) ensureLabel(ctx context.Context, org, repo, name, colour "name": name, "color": "#" + colour, }) - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo) + url := core.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo) req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload)) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "token "+s.forgeToken) @@ -196,7 +192,7 @@ func (s *PrepSubsystem) ensureLabel(ctx context.Context, org, repo, name, colour // getLabelID fetches the ID of a label by name. func (s *PrepSubsystem) getLabelID(ctx context.Context, org, repo, name string) int { - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo) + url := core.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo) req, _ := http.NewRequestWithContext(ctx, "GET", url, nil) req.Header.Set("Authorization", "token "+s.forgeToken) resp, err := s.client.Do(req) @@ -227,22 +223,22 @@ type verifyResult struct { } // runVerification detects the project type and runs the appropriate test suite. -func (s *PrepSubsystem) runVerification(srcDir string) verifyResult { - if fileExists(filepath.Join(srcDir, "go.mod")) { - return s.runGoTests(srcDir) +func (s *PrepSubsystem) runVerification(repoDir string) verifyResult { + if fileExists(core.JoinPath(repoDir, "go.mod")) { + return s.runGoTests(repoDir) } - if fileExists(filepath.Join(srcDir, "composer.json")) { - return s.runPHPTests(srcDir) + if fileExists(core.JoinPath(repoDir, "composer.json")) { + return s.runPHPTests(repoDir) } - if fileExists(filepath.Join(srcDir, "package.json")) { - return s.runNodeTests(srcDir) + if fileExists(core.JoinPath(repoDir, "package.json")) { + return s.runNodeTests(repoDir) } return verifyResult{passed: true, testCmd: "none", output: "No test runner detected"} } -func (s *PrepSubsystem) runGoTests(srcDir string) verifyResult { +func (s *PrepSubsystem) runGoTests(repoDir string) verifyResult { cmd := exec.Command("go", "test", "./...", "-count=1", "-timeout", "120s") - cmd.Dir = srcDir + cmd.Dir = repoDir cmd.Env = append(os.Environ(), "GOWORK=off") out, err := cmd.CombinedOutput() @@ -258,9 +254,9 @@ func (s *PrepSubsystem) runGoTests(srcDir string) verifyResult { return verifyResult{passed: exitCode == 0, output: string(out), exitCode: exitCode, testCmd: "go test ./..."} } -func (s *PrepSubsystem) runPHPTests(srcDir string) verifyResult { +func (s *PrepSubsystem) runPHPTests(repoDir string) verifyResult { cmd := exec.Command("composer", "test", "--no-interaction") - cmd.Dir = srcDir + cmd.Dir = repoDir out, err := cmd.CombinedOutput() exitCode := 0 @@ -269,7 +265,7 @@ func (s *PrepSubsystem) runPHPTests(srcDir string) verifyResult { exitCode = exitErr.ExitCode() } else { cmd2 := exec.Command("./vendor/bin/pest", "--no-interaction") - cmd2.Dir = srcDir + cmd2.Dir = repoDir out2, err2 := cmd2.CombinedOutput() if err2 != nil { return verifyResult{passed: false, testCmd: "none", output: "No PHP test runner found (composer test and vendor/bin/pest both unavailable)", exitCode: 1} @@ -281,21 +277,21 @@ func (s *PrepSubsystem) runPHPTests(srcDir string) verifyResult { return verifyResult{passed: exitCode == 0, output: string(out), exitCode: exitCode, testCmd: "composer test"} } -func (s *PrepSubsystem) runNodeTests(srcDir string) verifyResult { - data, err := coreio.Local.Read(filepath.Join(srcDir, "package.json")) - if err != nil { +func (s *PrepSubsystem) runNodeTests(repoDir string) verifyResult { + r := fs.Read(core.JoinPath(repoDir, "package.json")) + if !r.OK { return verifyResult{passed: true, testCmd: "none", output: "Could not read package.json"} } var pkg struct { Scripts map[string]string `json:"scripts"` } - if json.Unmarshal([]byte(data), &pkg) != nil || pkg.Scripts["test"] == "" { + if json.Unmarshal([]byte(r.Value.(string)), &pkg) != nil || pkg.Scripts["test"] == "" { return verifyResult{passed: true, testCmd: "none", output: "No test script in package.json"} } cmd := exec.Command("npm", "test") - cmd.Dir = srcDir + cmd.Dir = repoDir out, err := cmd.CombinedOutput() exitCode := 0 @@ -318,14 +314,14 @@ func (s *PrepSubsystem) forgeMergePR(ctx context.Context, org, repo string, prNu "delete_branch_after_merge": true, }) - url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls/%d/merge", s.forgeURL, org, repo, prNum) + url := core.Sprintf("%s/api/v1/repos/%s/%s/pulls/%d/merge", s.forgeURL, org, repo, prNum) req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload)) req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "token "+s.forgeToken) resp, err := s.client.Do(req) if err != nil { - return coreerr.E("forgeMergePR", "request failed", err) + return core.E("forgeMergePR", "request failed", err) } defer resp.Body.Close() @@ -333,7 +329,7 @@ func (s *PrepSubsystem) forgeMergePR(ctx context.Context, org, repo string, prNu var errBody map[string]any json.NewDecoder(resp.Body).Decode(&errBody) msg, _ := errBody["message"].(string) - return coreerr.E("forgeMergePR", fmt.Sprintf("HTTP %d: %s", resp.StatusCode, msg), nil) + return core.E("forgeMergePR", core.Sprintf("HTTP %d: %s", resp.StatusCode, msg), nil) } return nil @@ -341,16 +337,14 @@ func (s *PrepSubsystem) forgeMergePR(ctx context.Context, org, repo string, prNu // extractPRNumber gets the PR number from a Forge PR URL. func extractPRNumber(prURL string) int { - parts := strings.Split(prURL, "/") + parts := core.Split(prURL, "/") if len(parts) == 0 { return 0 } - var num int - fmt.Sscanf(parts[len(parts)-1], "%d", &num) - return num + return parseInt(parts[len(parts)-1]) } // fileExists checks if a file exists. func fileExists(path string) bool { - return coreio.Local.IsFile(path) + return fs.IsFile(path) } diff --git a/pkg/agentic/watch.go b/pkg/agentic/watch.go index 6fc9999..ac10bff 100644 --- a/pkg/agentic/watch.go +++ b/pkg/agentic/watch.go @@ -4,15 +4,15 @@ package agentic import ( "context" - "fmt" - "path/filepath" "time" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) // WatchInput is the input for agentic_watch. +// +// input := agentic.WatchInput{Workspaces: []string{"go-io-123"}, PollInterval: 5, Timeout: 600} type WatchInput struct { // Workspaces to watch. If empty, watches all running/queued workspaces. Workspaces []string `json:"workspaces,omitempty"` @@ -23,6 +23,8 @@ type WatchInput struct { } // WatchOutput is the result when all watched workspaces complete. +// +// out := agentic.WatchOutput{Success: true, Completed: []agentic.WatchResult{{Workspace: "go-io-123", Status: "completed"}}} type WatchOutput struct { Success bool `json:"success"` Completed []WatchResult `json:"completed"` @@ -31,6 +33,8 @@ type WatchOutput struct { } // WatchResult describes one completed workspace. +// +// result := agentic.WatchResult{Workspace: "go-io-123", Agent: "codex", Repo: "go-io", Status: "completed"} type WatchResult struct { Workspace string `json:"workspace"` Agent string `json:"agent"` @@ -99,7 +103,7 @@ func (s *PrepSubsystem) watch(ctx context.Context, req *mcp.CallToolRequest, inp select { case <-ctx.Done(): - return nil, WatchOutput{}, coreerr.E("watch", "cancelled", ctx.Err()) + return nil, WatchOutput{}, core.E("watch", "cancelled", ctx.Err()) case <-time.After(pollInterval): } @@ -128,7 +132,7 @@ func (s *PrepSubsystem) watch(ctx context.Context, req *mcp.CallToolRequest, inp ProgressToken: progressToken, Progress: progressCount, Total: total, - Message: fmt.Sprintf("%s completed (%s)", st.Repo, st.Agent), + Message: core.Sprintf("%s completed (%s)", st.Repo, st.Agent), }) } @@ -149,7 +153,7 @@ func (s *PrepSubsystem) watch(ctx context.Context, req *mcp.CallToolRequest, inp ProgressToken: progressToken, Progress: progressCount, Total: total, - Message: fmt.Sprintf("%s %s (%s)", st.Repo, st.Status, st.Agent), + Message: core.Sprintf("%s %s (%s)", st.Repo, st.Status, st.Agent), }) } @@ -169,7 +173,7 @@ func (s *PrepSubsystem) watch(ctx context.Context, req *mcp.CallToolRequest, inp ProgressToken: progressToken, Progress: progressCount, Total: total, - Message: fmt.Sprintf("%s %s (%s)", st.Repo, st.Status, st.Agent), + Message: core.Sprintf("%s %s (%s)", st.Repo, st.Status, st.Agent), }) } } @@ -187,20 +191,17 @@ func (s *PrepSubsystem) watch(ctx context.Context, req *mcp.CallToolRequest, inp // findActiveWorkspaces returns workspace names that are running or queued. func (s *PrepSubsystem) findActiveWorkspaces() []string { wsRoot := WorkspaceRoot() - entries, err := filepath.Glob(filepath.Join(wsRoot, "*/status.json")) - if err != nil { - return nil - } + entries := core.PathGlob(core.JoinPath(wsRoot, "*/status.json")) var active []string for _, entry := range entries { - wsDir := filepath.Dir(entry) + wsDir := core.PathDir(entry) st, err := readStatus(wsDir) if err != nil { continue } if st.Status == "running" || st.Status == "queued" { - active = append(active, filepath.Base(wsDir)) + active = append(active, core.PathBase(wsDir)) } } return active @@ -208,8 +209,8 @@ func (s *PrepSubsystem) findActiveWorkspaces() []string { // resolveWorkspaceDir converts a workspace name to full path. func (s *PrepSubsystem) resolveWorkspaceDir(name string) string { - if filepath.IsAbs(name) { + if core.PathIsAbs(name) { return name } - return filepath.Join(WorkspaceRoot(), name) + return core.JoinPath(WorkspaceRoot(), name) } diff --git a/pkg/brain/brain.go b/pkg/brain/brain.go index 543d613..5e04998 100644 --- a/pkg/brain/brain.go +++ b/pkg/brain/brain.go @@ -7,36 +7,60 @@ package brain import ( "context" - coreerr "dappco.re/go/core/log" + "dappco.re/go/agent/pkg/agentic" + core "dappco.re/go/core" "forge.lthn.ai/core/mcp/pkg/mcp/ide" "github.com/modelcontextprotocol/go-sdk/mcp" ) +// fs provides unrestricted filesystem access for shared brain credentials. +// +// keyPath := core.Concat(home, "/.claude/brain.key") +// if r := fs.Read(keyPath); r.OK { +// apiKey = core.Trim(r.Value.(string)) +// } +var fs = agentic.LocalFs() + +func fieldString(values map[string]any, key string) string { + return core.Sprint(values[key]) +} + // errBridgeNotAvailable is returned when a tool requires the Laravel bridge // but it has not been initialised (headless mode). -var errBridgeNotAvailable = coreerr.E("brain", "bridge not available", nil) +var errBridgeNotAvailable = core.E("brain", "bridge not available", nil) -// Subsystem implements mcp.Subsystem for OpenBrain knowledge store operations. -// It proxies brain_* tool calls to the Laravel backend via the shared IDE bridge. +// Subsystem proxies brain_* MCP tools through the shared IDE bridge. +// +// sub := brain.New(bridge) +// sub.RegisterTools(server) type Subsystem struct { bridge *ide.Bridge } -// New creates a brain subsystem that uses the given IDE bridge for Laravel communication. -// Pass nil if headless (tools will return errBridgeNotAvailable). +// New creates a bridge-backed brain subsystem. +// +// sub := brain.New(bridge) +// _ = sub.Shutdown(context.Background()) func New(bridge *ide.Bridge) *Subsystem { return &Subsystem{bridge: bridge} } -// Name implements mcp.Subsystem. +// Name returns the MCP subsystem name. +// +// name := sub.Name() // "brain" func (s *Subsystem) Name() string { return "brain" } -// RegisterTools implements mcp.Subsystem. +// RegisterTools adds the bridge-backed brain tools to an MCP server. +// +// sub := brain.New(bridge) +// sub.RegisterTools(server) func (s *Subsystem) RegisterTools(server *mcp.Server) { s.registerBrainTools(server) } -// Shutdown implements mcp.SubsystemWithShutdown. +// Shutdown closes the subsystem without additional cleanup. +// +// _ = sub.Shutdown(context.Background()) func (s *Subsystem) Shutdown(_ context.Context) error { return nil } diff --git a/pkg/brain/bridge_test.go b/pkg/brain/bridge_test.go index 6e8d3aa..9c46fb4 100644 --- a/pkg/brain/bridge_test.go +++ b/pkg/brain/bridge_test.go @@ -11,7 +11,8 @@ import ( "testing" "time" - ws "dappco.re/go/core/ws" + providerws "dappco.re/go/core/ws" + bridgews "forge.lthn.ai/core/go-ws" "forge.lthn.ai/core/mcp/pkg/mcp/ide" "github.com/gorilla/websocket" mcpsdk "github.com/modelcontextprotocol/go-sdk/mcp" @@ -45,7 +46,7 @@ func testBridge(t *testing.T) *ide.Bridge { srv := testWSServer(t) wsURL := "ws" + strings.TrimPrefix(srv.URL, "http") - hub := ws.NewHub() + hub := bridgews.NewHub() bridge := ide.NewBridge(hub, ide.Config{ LaravelWSURL: wsURL, ReconnectInterval: 100 * time.Millisecond, @@ -193,7 +194,7 @@ func TestStatusHandler_Good_WithBridge(t *testing.T) { // --- emitEvent with hub --- func TestEmitEvent_Good_WithHub(t *testing.T) { - hub := ws.NewHub() + hub := providerws.NewHub() p := NewProvider(nil, hub) p.emitEvent("brain.test", map[string]any{"key": "value"}) } diff --git a/pkg/brain/direct.go b/pkg/brain/direct.go index bb5a195..fa13651 100644 --- a/pkg/brain/direct.go +++ b/pkg/brain/direct.go @@ -6,50 +6,53 @@ import ( "bytes" "context" "encoding/json" - "fmt" - "io" "net/http" - "os" - "path/filepath" - "strings" "time" "dappco.re/go/agent/pkg/agentic" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" + coremcp "forge.lthn.ai/core/mcp/pkg/mcp" "github.com/modelcontextprotocol/go-sdk/mcp" ) -// agentName returns the identity of this agent. -func agentName() string { - return agentic.AgentName() -} - -// DirectSubsystem implements mcp.Subsystem for OpenBrain via direct HTTP calls. -// Unlike Subsystem (which uses the IDE WebSocket bridge), this calls the -// Laravel API directly — suitable for standalone core-mcp usage. +// DirectSubsystem calls the OpenBrain HTTP API without the IDE bridge. +// +// sub := brain.NewDirect() +// sub.RegisterTools(server) type DirectSubsystem struct { apiURL string apiKey string client *http.Client } -// NewDirect creates a brain subsystem that calls the OpenBrain API directly. -// Reads CORE_BRAIN_URL and CORE_BRAIN_KEY from environment, or falls back -// to ~/.claude/brain.key for the API key. +var _ coremcp.Subsystem = (*DirectSubsystem)(nil) + +// NewDirect creates a direct HTTP brain subsystem. +// +// sub := brain.NewDirect() +// sub.RegisterTools(server) func NewDirect() *DirectSubsystem { - apiURL := os.Getenv("CORE_BRAIN_URL") + apiURL := core.Env("CORE_BRAIN_URL") if apiURL == "" { apiURL = "https://api.lthn.sh" } - apiKey := os.Getenv("CORE_BRAIN_KEY") + apiKey := core.Env("CORE_BRAIN_KEY") + keyPath := "" if apiKey == "" { - home, _ := os.UserHomeDir() - if data, err := coreio.Local.Read(filepath.Join(home, ".claude", "brain.key")); err == nil { - apiKey = strings.TrimSpace(data) + keyPath = brainKeyPath(brainHomeDir()) + if keyPath != "" { + if r := fs.Read(keyPath); r.OK { + apiKey = core.Trim(r.Value.(string)) + if apiKey != "" { + core.Info("brain direct subsystem loaded API key from file", "path", keyPath) + } + } } } + if apiKey == "" { + core.Warn("brain direct subsystem has no API key configured", "path", keyPath) + } return &DirectSubsystem{ apiURL: apiURL, @@ -58,10 +61,15 @@ func NewDirect() *DirectSubsystem { } } -// Name implements mcp.Subsystem. +// Name returns the MCP subsystem name. +// +// name := sub.Name() // "brain" func (s *DirectSubsystem) Name() string { return "brain" } -// RegisterTools implements mcp.Subsystem. +// RegisterTools adds the direct OpenBrain tools to an MCP server. +// +// sub := brain.NewDirect() +// sub.RegisterTools(server) func (s *DirectSubsystem) RegisterTools(server *mcp.Server) { mcp.AddTool(server, &mcp.Tool{ Name: "brain_remember", @@ -82,49 +90,76 @@ func (s *DirectSubsystem) RegisterTools(server *mcp.Server) { s.RegisterMessagingTools(server) } -// Shutdown implements mcp.SubsystemWithShutdown. +// Shutdown closes the direct subsystem without additional cleanup. +// +// _ = sub.Shutdown(context.Background()) func (s *DirectSubsystem) Shutdown(_ context.Context) error { return nil } +func brainKeyPath(home string) string { + if home == "" { + return "" + } + return core.JoinPath(core.TrimSuffix(home, "/"), ".claude", "brain.key") +} + +func brainHomeDir() string { + if home := core.Env("CORE_HOME"); home != "" { + return home + } + return core.Env("DIR_HOME") +} + func (s *DirectSubsystem) apiCall(ctx context.Context, method, path string, body any) (map[string]any, error) { if s.apiKey == "" { - return nil, coreerr.E("brain.apiCall", "no API key (set CORE_BRAIN_KEY or create ~/.claude/brain.key)", nil) + return nil, core.E("brain.apiCall", "no API key (set CORE_BRAIN_KEY or create ~/.claude/brain.key)", nil) } - var reqBody io.Reader + var reqBody *bytes.Reader if body != nil { data, err := json.Marshal(body) if err != nil { - return nil, coreerr.E("brain.apiCall", "marshal request", err) + core.Error("brain API request marshal failed", "method", method, "path", path, "err", err) + return nil, core.E("brain.apiCall", "marshal request", err) } reqBody = bytes.NewReader(data) } - req, err := http.NewRequestWithContext(ctx, method, s.apiURL+path, reqBody) + requestURL := core.Concat(s.apiURL, path) + req, err := http.NewRequestWithContext(ctx, method, requestURL, nil) + if reqBody != nil { + req, err = http.NewRequestWithContext(ctx, method, requestURL, reqBody) + } if err != nil { - return nil, coreerr.E("brain.apiCall", "create request", err) + core.Error("brain API request creation failed", "method", method, "path", path, "err", err) + return nil, core.E("brain.apiCall", "create request", err) } req.Header.Set("Content-Type", "application/json") req.Header.Set("Accept", "application/json") - req.Header.Set("Authorization", "Bearer "+s.apiKey) + req.Header.Set("Authorization", core.Concat("Bearer ", s.apiKey)) resp, err := s.client.Do(req) if err != nil { - return nil, coreerr.E("brain.apiCall", "API call failed", err) + core.Error("brain API call failed", "method", method, "path", path, "err", err) + return nil, core.E("brain.apiCall", "API call failed", err) } defer resp.Body.Close() - respData, err := io.ReadAll(resp.Body) - if err != nil { - return nil, coreerr.E("brain.apiCall", "read response", err) + respBuffer := bytes.NewBuffer(nil) + if _, err := respBuffer.ReadFrom(resp.Body); err != nil { + core.Error("brain API response read failed", "method", method, "path", path, "err", err) + return nil, core.E("brain.apiCall", "read response", err) } + respData := respBuffer.Bytes() if resp.StatusCode >= 400 { - return nil, coreerr.E("brain.apiCall", fmt.Sprintf("API returned %d: %s", resp.StatusCode, string(respData)), nil) + core.Warn("brain API returned error status", "method", method, "path", path, "status", resp.StatusCode) + return nil, core.E("brain.apiCall", core.Sprintf("API returned %d: %s", resp.StatusCode, string(respData)), nil) } var result map[string]any if err := json.Unmarshal(respData, &result); err != nil { - return nil, coreerr.E("brain.apiCall", "parse response", err) + core.Error("brain API response parse failed", "method", method, "path", path, "err", err) + return nil, core.E("brain.apiCall", "parse response", err) } return result, nil @@ -132,11 +167,14 @@ func (s *DirectSubsystem) apiCall(ctx context.Context, method, path string, body func (s *DirectSubsystem) remember(ctx context.Context, _ *mcp.CallToolRequest, input RememberInput) (*mcp.CallToolResult, RememberOutput, error) { result, err := s.apiCall(ctx, "POST", "/v1/brain/remember", map[string]any{ - "content": input.Content, - "type": input.Type, - "tags": input.Tags, - "project": input.Project, - "agent_id": agentName(), + "content": input.Content, + "type": input.Type, + "tags": input.Tags, + "project": input.Project, + "confidence": input.Confidence, + "supersedes": input.Supersedes, + "expires_in": input.ExpiresIn, + "agent_id": agentic.AgentName(), }) if err != nil { return nil, RememberOutput{}, err @@ -165,6 +203,9 @@ func (s *DirectSubsystem) recall(ctx context.Context, _ *mcp.CallToolRequest, in if input.Filter.Type != nil { body["type"] = input.Filter.Type } + if input.Filter.MinConfidence != 0 { + body["min_confidence"] = input.Filter.MinConfidence + } if input.TopK == 0 { body["top_k"] = 10 } @@ -179,11 +220,11 @@ func (s *DirectSubsystem) recall(ctx context.Context, _ *mcp.CallToolRequest, in for _, m := range mems { if mm, ok := m.(map[string]any); ok { mem := Memory{ - Content: fmt.Sprintf("%v", mm["content"]), - Type: fmt.Sprintf("%v", mm["type"]), - Project: fmt.Sprintf("%v", mm["project"]), - AgentID: fmt.Sprintf("%v", mm["agent_id"]), - CreatedAt: fmt.Sprintf("%v", mm["created_at"]), + Content: fieldString(mm, "content"), + Type: fieldString(mm, "type"), + Project: fieldString(mm, "project"), + AgentID: fieldString(mm, "agent_id"), + CreatedAt: fieldString(mm, "created_at"), } if id, ok := mm["id"].(string); ok { mem.ID = id @@ -191,8 +232,13 @@ func (s *DirectSubsystem) recall(ctx context.Context, _ *mcp.CallToolRequest, in if score, ok := mm["score"].(float64); ok { mem.Confidence = score } + if tags, ok := mm["tags"].([]any); ok { + for _, tag := range tags { + mem.Tags = append(mem.Tags, core.Sprint(tag)) + } + } if source, ok := mm["source"].(string); ok { - mem.Tags = append(mem.Tags, "source:"+source) + mem.Tags = append(mem.Tags, core.Concat("source:", source)) } memories = append(memories, mem) } @@ -207,7 +253,7 @@ func (s *DirectSubsystem) recall(ctx context.Context, _ *mcp.CallToolRequest, in } func (s *DirectSubsystem) forget(ctx context.Context, _ *mcp.CallToolRequest, input ForgetInput) (*mcp.CallToolResult, ForgetOutput, error) { - _, err := s.apiCall(ctx, "DELETE", "/v1/brain/forget/"+input.ID, nil) + _, err := s.apiCall(ctx, "DELETE", core.Concat("/v1/brain/forget/", input.ID), nil) if err != nil { return nil, ForgetOutput{}, err } diff --git a/pkg/brain/direct_test.go b/pkg/brain/direct_test.go index d26b14b..ea0f686 100644 --- a/pkg/brain/direct_test.go +++ b/pkg/brain/direct_test.go @@ -5,14 +5,12 @@ package brain import ( "context" "encoding/json" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "net/http" "net/http/httptest" "path/filepath" "testing" - - coreio "dappco.re/go/core/io" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // newTestDirect returns a DirectSubsystem wired to the given test server. @@ -61,10 +59,10 @@ func TestNewDirect_Good_KeyFromFile(t *testing.T) { t.Setenv("CORE_BRAIN_KEY", "") tmpHome := t.TempDir() - t.Setenv("HOME", tmpHome) + t.Setenv("CORE_HOME", tmpHome) keyDir := filepath.Join(tmpHome, ".claude") - require.NoError(t, coreio.Local.EnsureDir(keyDir)) - require.NoError(t, coreio.Local.Write(filepath.Join(keyDir, "brain.key"), " file-key-456 \n")) + require.True(t, fs.EnsureDir(keyDir).OK) + require.True(t, fs.Write(filepath.Join(keyDir, "brain.key"), " file-key-456 \n").OK) sub := NewDirect() assert.Equal(t, "file-key-456", sub.apiKey) diff --git a/pkg/brain/messaging.go b/pkg/brain/messaging.go index ad5e3a6..eb8e5f7 100644 --- a/pkg/brain/messaging.go +++ b/pkg/brain/messaging.go @@ -4,14 +4,17 @@ package brain import ( "context" - "fmt" "net/url" - coreerr "dappco.re/go/core/log" + "dappco.re/go/agent/pkg/agentic" + core "dappco.re/go/core" "github.com/modelcontextprotocol/go-sdk/mcp" ) -// RegisterMessagingTools adds agent messaging tools to the MCP server. +// RegisterMessagingTools adds direct agent messaging tools to an MCP server. +// +// sub := brain.NewDirect() +// sub.RegisterMessagingTools(server) func (s *DirectSubsystem) RegisterMessagingTools(server *mcp.Server) { mcp.AddTool(server, &mcp.Tool{ Name: "agent_send", @@ -31,22 +34,34 @@ func (s *DirectSubsystem) RegisterMessagingTools(server *mcp.Server) { // Input/Output types +// SendInput sends a direct message to another agent. +// +// brain.SendInput{To: "charon", Subject: "status update", Content: "deploy complete"} type SendInput struct { To string `json:"to"` Content string `json:"content"` Subject string `json:"subject,omitempty"` } +// SendOutput reports the created direct message. +// +// brain.SendOutput{Success: true, ID: 42, To: "charon"} type SendOutput struct { Success bool `json:"success"` ID int `json:"id"` To string `json:"to"` } +// InboxInput selects which agent inbox to read. +// +// brain.InboxInput{Agent: "cladius"} type InboxInput struct { Agent string `json:"agent,omitempty"` } +// MessageItem is one inbox or conversation message. +// +// brain.MessageItem{ID: 7, From: "cladius", To: "charon", Content: "all green"} type MessageItem struct { ID int `json:"id"` From string `json:"from"` @@ -57,15 +72,24 @@ type MessageItem struct { CreatedAt string `json:"created_at"` } +// InboxOutput returns the latest direct messages for an agent. +// +// brain.InboxOutput{Success: true, Messages: []brain.MessageItem{{ID: 1, From: "charon", To: "cladius"}}} type InboxOutput struct { Success bool `json:"success"` Messages []MessageItem `json:"messages"` } +// ConversationInput selects the agent thread to load. +// +// brain.ConversationInput{Agent: "charon"} type ConversationInput struct { Agent string `json:"agent"` } +// ConversationOutput returns a direct message thread with another agent. +// +// brain.ConversationOutput{Success: true, Messages: []brain.MessageItem{{ID: 10, From: "cladius", To: "charon"}}} type ConversationOutput struct { Success bool `json:"success"` Messages []MessageItem `json:"messages"` @@ -75,12 +99,12 @@ type ConversationOutput struct { func (s *DirectSubsystem) sendMessage(ctx context.Context, _ *mcp.CallToolRequest, input SendInput) (*mcp.CallToolResult, SendOutput, error) { if input.To == "" || input.Content == "" { - return nil, SendOutput{}, coreerr.E("brain.sendMessage", "to and content are required", nil) + return nil, SendOutput{}, core.E("brain.sendMessage", "to and content are required", nil) } result, err := s.apiCall(ctx, "POST", "/v1/messages/send", map[string]any{ "to": input.To, - "from": agentName(), + "from": agentic.AgentName(), "content": input.Content, "subject": input.Subject, }) @@ -101,7 +125,7 @@ func (s *DirectSubsystem) sendMessage(ctx context.Context, _ *mcp.CallToolReques func (s *DirectSubsystem) inbox(ctx context.Context, _ *mcp.CallToolRequest, input InboxInput) (*mcp.CallToolResult, InboxOutput, error) { agent := input.Agent if agent == "" { - agent = agentName() + agent = agentic.AgentName() } result, err := s.apiCall(ctx, "GET", "/v1/messages/inbox?agent="+url.QueryEscape(agent), nil) if err != nil { @@ -116,10 +140,10 @@ func (s *DirectSubsystem) inbox(ctx context.Context, _ *mcp.CallToolRequest, inp func (s *DirectSubsystem) conversation(ctx context.Context, _ *mcp.CallToolRequest, input ConversationInput) (*mcp.CallToolResult, ConversationOutput, error) { if input.Agent == "" { - return nil, ConversationOutput{}, coreerr.E("brain.conversation", "agent is required", nil) + return nil, ConversationOutput{}, core.E("brain.conversation", "agent is required", nil) } - result, err := s.apiCall(ctx, "GET", "/v1/messages/conversation/"+url.PathEscape(input.Agent)+"?me="+url.QueryEscape(agentName()), nil) + result, err := s.apiCall(ctx, "GET", "/v1/messages/conversation/"+url.PathEscape(input.Agent)+"?me="+url.QueryEscape(agentic.AgentName()), nil) if err != nil { return nil, ConversationOutput{}, err } @@ -137,12 +161,12 @@ func parseMessages(result map[string]any) []MessageItem { mm, _ := m.(map[string]any) messages = append(messages, MessageItem{ ID: toInt(mm["id"]), - From: fmt.Sprintf("%v", mm["from"]), - To: fmt.Sprintf("%v", mm["to"]), - Subject: fmt.Sprintf("%v", mm["subject"]), - Content: fmt.Sprintf("%v", mm["content"]), + From: fieldString(mm, "from"), + To: fieldString(mm, "to"), + Subject: fieldString(mm, "subject"), + Content: fieldString(mm, "content"), Read: mm["read"] == true, - CreatedAt: fmt.Sprintf("%v", mm["created_at"]), + CreatedAt: fieldString(mm, "created_at"), }) } return messages diff --git a/pkg/brain/provider.go b/pkg/brain/provider.go index 529811f..dd8bcd2 100644 --- a/pkg/brain/provider.go +++ b/pkg/brain/provider.go @@ -4,9 +4,10 @@ package brain import ( "net/http" + "strconv" - "forge.lthn.ai/core/api" - "forge.lthn.ai/core/api/pkg/provider" + "dappco.re/go/core/api" + "dappco.re/go/core/api/pkg/provider" "dappco.re/go/core/ws" "forge.lthn.ai/core/mcp/pkg/mcp/ide" "github.com/gin-gonic/gin" @@ -14,6 +15,11 @@ import ( // BrainProvider wraps the brain Subsystem as a service provider with REST // endpoints. It delegates to the same IDE bridge that the MCP tools use. +// +// Usage example: +// +// provider := brain.NewProvider(bridge, hub) +// provider.RegisterRoutes(router.Group("/api/brain")) type BrainProvider struct { bridge *ide.Bridge hub *ws.Hub @@ -294,13 +300,23 @@ func (p *BrainProvider) list(c *gin.Context) { return } + limit := 0 + if rawLimit := c.Query("limit"); rawLimit != "" { + parsedLimit, err := strconv.Atoi(rawLimit) + if err != nil { + c.JSON(http.StatusBadRequest, api.Fail("invalid_limit", "limit must be an integer")) + return + } + limit = parsedLimit + } + err := p.bridge.Send(ide.BridgeMessage{ Type: "brain_list", Data: map[string]any{ "project": c.Query("project"), "type": c.Query("type"), "agent_id": c.Query("agent_id"), - "limit": c.Query("limit"), + "limit": limit, }, }) if err != nil { diff --git a/pkg/brain/tools.go b/pkg/brain/tools.go index 134c5ce..c693b4d 100644 --- a/pkg/brain/tools.go +++ b/pkg/brain/tools.go @@ -6,7 +6,7 @@ import ( "context" "time" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" "forge.lthn.ai/core/mcp/pkg/mcp/ide" "github.com/modelcontextprotocol/go-sdk/mcp" ) @@ -14,6 +14,13 @@ import ( // -- Input/Output types ------------------------------------------------------- // RememberInput is the input for brain_remember. +// +// Usage example: +// +// input := brain.RememberInput{ +// Content: "Use core.Env for system paths.", +// Type: "convention", +// } type RememberInput struct { Content string `json:"content"` Type string `json:"type"` @@ -25,6 +32,13 @@ type RememberInput struct { } // RememberOutput is the output for brain_remember. +// +// Usage example: +// +// output := brain.RememberOutput{ +// Success: true, +// MemoryID: "mem_123", +// } type RememberOutput struct { Success bool `json:"success"` MemoryID string `json:"memoryId,omitempty"` @@ -32,6 +46,13 @@ type RememberOutput struct { } // RecallInput is the input for brain_recall. +// +// Usage example: +// +// input := brain.RecallInput{ +// Query: "core.Env conventions", +// TopK: 5, +// } type RecallInput struct { Query string `json:"query"` TopK int `json:"top_k,omitempty"` @@ -39,6 +60,13 @@ type RecallInput struct { } // RecallFilter holds optional filter criteria for brain_recall. +// +// Usage example: +// +// filter := brain.RecallFilter{ +// Project: "agent", +// Type: "convention", +// } type RecallFilter struct { Project string `json:"project,omitempty"` Type any `json:"type,omitempty"` @@ -47,6 +75,13 @@ type RecallFilter struct { } // RecallOutput is the output for brain_recall. +// +// Usage example: +// +// output := brain.RecallOutput{ +// Success: true, +// Count: 1, +// } type RecallOutput struct { Success bool `json:"success"` Count int `json:"count"` @@ -54,6 +89,14 @@ type RecallOutput struct { } // Memory is a single memory entry returned by recall or list. +// +// Usage example: +// +// memory := brain.Memory{ +// ID: "mem_123", +// Type: "convention", +// Content: "Use core.Env for system paths.", +// } type Memory struct { ID string `json:"id"` AgentID string `json:"agent_id"` @@ -69,12 +112,26 @@ type Memory struct { } // ForgetInput is the input for brain_forget. +// +// Usage example: +// +// input := brain.ForgetInput{ +// ID: "mem_123", +// Reason: "superseded", +// } type ForgetInput struct { ID string `json:"id"` Reason string `json:"reason,omitempty"` } // ForgetOutput is the output for brain_forget. +// +// Usage example: +// +// output := brain.ForgetOutput{ +// Success: true, +// Forgotten: "mem_123", +// } type ForgetOutput struct { Success bool `json:"success"` Forgotten string `json:"forgotten"` @@ -82,6 +139,13 @@ type ForgetOutput struct { } // ListInput is the input for brain_list. +// +// Usage example: +// +// input := brain.ListInput{ +// Project: "agent", +// Limit: 20, +// } type ListInput struct { Project string `json:"project,omitempty"` Type string `json:"type,omitempty"` @@ -90,6 +154,13 @@ type ListInput struct { } // ListOutput is the output for brain_list. +// +// Usage example: +// +// output := brain.ListOutput{ +// Success: true, +// Count: 2, +// } type ListOutput struct { Success bool `json:"success"` Count int `json:"count"` @@ -140,7 +211,7 @@ func (s *Subsystem) brainRemember(_ context.Context, _ *mcp.CallToolRequest, inp }, }) if err != nil { - return nil, RememberOutput{}, coreerr.E("brain.remember", "failed to send brain_remember", err) + return nil, RememberOutput{}, core.E("brain.remember", "failed to send brain_remember", err) } return nil, RememberOutput{ @@ -163,7 +234,7 @@ func (s *Subsystem) brainRecall(_ context.Context, _ *mcp.CallToolRequest, input }, }) if err != nil { - return nil, RecallOutput{}, coreerr.E("brain.recall", "failed to send brain_recall", err) + return nil, RecallOutput{}, core.E("brain.recall", "failed to send brain_recall", err) } return nil, RecallOutput{ @@ -185,7 +256,7 @@ func (s *Subsystem) brainForget(_ context.Context, _ *mcp.CallToolRequest, input }, }) if err != nil { - return nil, ForgetOutput{}, coreerr.E("brain.forget", "failed to send brain_forget", err) + return nil, ForgetOutput{}, core.E("brain.forget", "failed to send brain_forget", err) } return nil, ForgetOutput{ @@ -210,7 +281,7 @@ func (s *Subsystem) brainList(_ context.Context, _ *mcp.CallToolRequest, input L }, }) if err != nil { - return nil, ListOutput{}, coreerr.E("brain.list", "failed to send brain_list", err) + return nil, ListOutput{}, core.E("brain.list", "failed to send brain_list", err) } return nil, ListOutput{ diff --git a/pkg/lib/lib.go b/pkg/lib/lib.go index dc12398..41dc379 100644 --- a/pkg/lib/lib.go +++ b/pkg/lib/lib.go @@ -14,103 +14,134 @@ // // Usage: // -// prompt, _ := lib.Prompt("coding") -// task, _ := lib.Task("code/review") -// persona, _ := lib.Persona("secops/developer") -// flow, _ := lib.Flow("go") +// r := lib.Prompt("coding") // r.Value.(string) +// r := lib.Task("code/review") // r.Value.(string) +// r := lib.Persona("secops/dev") // r.Value.(string) +// r := lib.Flow("go") // r.Value.(string) // lib.ExtractWorkspace("default", "/tmp/ws", data) package lib import ( - "bytes" "embed" "io/fs" - "os" "path/filepath" - "strings" - "text/template" + + core "dappco.re/go/core" ) -//go:embed prompt/*.md -var promptFS embed.FS +//go:embed all:prompt +var promptFiles embed.FS //go:embed all:task -var taskFS embed.FS +var taskFiles embed.FS -//go:embed flow/*.md -var flowFS embed.FS +//go:embed all:flow +var flowFiles embed.FS -//go:embed persona -var personaFS embed.FS +//go:embed all:persona +var personaFiles embed.FS //go:embed all:workspace -var workspaceFS embed.FS +var workspaceFiles embed.FS + +var ( + promptFS = mustMount(promptFiles, "prompt") + taskFS = mustMount(taskFiles, "task") + flowFS = mustMount(flowFiles, "flow") + personaFS = mustMount(personaFiles, "persona") + workspaceFS = mustMount(workspaceFiles, "workspace") +) + +func mustMount(fsys embed.FS, basedir string) *core.Embed { + r := core.Mount(fsys, basedir) + if !r.OK { + panic(r.Value) + } + return r.Value.(*core.Embed) +} // --- Prompts --- // Template tries Prompt then Task (backwards compat). -func Template(slug string) (string, error) { - if content, err := Prompt(slug); err == nil { - return content, nil +// +// r := lib.Template("coding") +// if r.OK { content := r.Value.(string) } +func Template(slug string) core.Result { + if r := Prompt(slug); r.OK { + return r } return Task(slug) } -func Prompt(slug string) (string, error) { - data, err := promptFS.ReadFile("prompt/" + slug + ".md") - if err != nil { - return "", err - } - return string(data), nil +// Prompt reads a system prompt by slug. +// +// r := lib.Prompt("coding") +// if r.OK { content := r.Value.(string) } +func Prompt(slug string) core.Result { + return promptFS.ReadString(slug + ".md") } -func Task(slug string) (string, error) { +// Task reads a structured task plan by slug. Tries .md, .yaml, .yml. +// +// r := lib.Task("code/review") +// if r.OK { content := r.Value.(string) } +func Task(slug string) core.Result { for _, ext := range []string{".md", ".yaml", ".yml"} { - data, err := taskFS.ReadFile("task/" + slug + ext) - if err == nil { - return string(data), nil + if r := taskFS.ReadString(slug + ext); r.OK { + return r } } - return "", fs.ErrNotExist + return core.Result{Value: fs.ErrNotExist} } -func TaskBundle(slug string) (string, map[string]string, error) { - main, err := Task(slug) - if err != nil { - return "", nil, err +// Bundle holds a task's main content plus companion files. +// +// r := lib.TaskBundle("code/review") +// if r.OK { b := r.Value.(lib.Bundle) } +type Bundle struct { + Main string + Files map[string]string +} + +// TaskBundle reads a task and its companion files. +// +// r := lib.TaskBundle("code/review") +// if r.OK { b := r.Value.(lib.Bundle) } +func TaskBundle(slug string) core.Result { + main := Task(slug) + if !main.OK { + return main } - bundleDir := "task/" + slug - entries, err := fs.ReadDir(taskFS, bundleDir) - if err != nil { - return main, nil, nil + b := Bundle{Main: main.Value.(string), Files: make(map[string]string)} + r := taskFS.ReadDir(slug) + if !r.OK { + return core.Result{Value: b, OK: true} } - bundle := make(map[string]string) - for _, e := range entries { + for _, e := range r.Value.([]fs.DirEntry) { if e.IsDir() { continue } - data, err := taskFS.ReadFile(bundleDir + "/" + e.Name()) - if err == nil { - bundle[e.Name()] = string(data) + if fr := taskFS.ReadString(slug + "/" + e.Name()); fr.OK { + b.Files[e.Name()] = fr.Value.(string) } } - return main, bundle, nil + return core.Result{Value: b, OK: true} } -func Flow(slug string) (string, error) { - data, err := flowFS.ReadFile("flow/" + slug + ".md") - if err != nil { - return "", err - } - return string(data), nil +// Flow reads a build/release workflow by slug. +// +// r := lib.Flow("go") +// if r.OK { content := r.Value.(string) } +func Flow(slug string) core.Result { + return flowFS.ReadString(slug + ".md") } -func Persona(path string) (string, error) { - data, err := personaFS.ReadFile("persona/" + path + ".md") - if err != nil { - return "", err - } - return string(data), nil +// Persona reads a domain/role persona by path. +// +// r := lib.Persona("secops/developer") +// if r.OK { content := r.Value.(string) } +func Persona(path string) core.Result { + return personaFS.ReadString(path + ".md") } // --- Workspace Templates --- @@ -137,65 +168,38 @@ type WorkspaceData struct { // ExtractWorkspace creates an agent workspace from a template. // Template names: "default", "security", "review". func ExtractWorkspace(tmplName, targetDir string, data *WorkspaceData) error { - wsDir := "workspace/" + tmplName - entries, err := fs.ReadDir(workspaceFS, wsDir) - if err != nil { - return err - } - - if err := os.MkdirAll(targetDir, 0755); err != nil { - return err - } - - for _, entry := range entries { - if entry.IsDir() { - continue - } - - name := entry.Name() - content, err := fs.ReadFile(workspaceFS, wsDir+"/"+name) - if err != nil { + r := workspaceFS.Sub(tmplName) + if !r.OK { + if err, ok := r.Value.(error); ok { return err } - - // Process .tmpl files through text/template - outputName := name - if strings.HasSuffix(name, ".tmpl") { - outputName = strings.TrimSuffix(name, ".tmpl") - tmpl, err := template.New(name).Parse(string(content)) - if err != nil { - return err - } - var buf bytes.Buffer - if err := tmpl.Execute(&buf, data); err != nil { - return err - } - content = buf.Bytes() - } - - if err := os.WriteFile(filepath.Join(targetDir, outputName), content, 0644); err != nil { + return core.E("ExtractWorkspace", "template not found: "+tmplName, nil) + } + result := core.Extract(r.Value.(*core.Embed).FS(), targetDir, data) + if !result.OK { + if err, ok := result.Value.(error); ok { return err } } - return nil } // --- List Functions --- -func ListPrompts() []string { return listDir(promptFS, "prompt") } -func ListFlows() []string { return listDir(flowFS, "flow") } -func ListWorkspaces() []string { return listDir(workspaceFS, "workspace") } +func ListPrompts() []string { return listDir(promptFS) } +func ListFlows() []string { return listDir(flowFS) } +func ListWorkspaces() []string { return listDir(workspaceFS) } func ListTasks() []string { var slugs []string - fs.WalkDir(taskFS, "task", func(path string, d fs.DirEntry, err error) error { + base := taskFS.BaseDirectory() + fs.WalkDir(taskFS.FS(), base, func(path string, d fs.DirEntry, err error) error { if err != nil || d.IsDir() { return nil } - rel := strings.TrimPrefix(path, "task/") + rel := core.TrimPrefix(path, base+"/") ext := filepath.Ext(rel) - slugs = append(slugs, strings.TrimSuffix(rel, ext)) + slugs = append(slugs, core.TrimSuffix(rel, ext)) return nil }) return slugs @@ -203,13 +207,14 @@ func ListTasks() []string { func ListPersonas() []string { var paths []string - fs.WalkDir(personaFS, "persona", func(path string, d fs.DirEntry, err error) error { + base := personaFS.BaseDirectory() + fs.WalkDir(personaFS.FS(), base, func(path string, d fs.DirEntry, err error) error { if err != nil || d.IsDir() { return nil } - if strings.HasSuffix(path, ".md") { - rel := strings.TrimPrefix(path, "persona/") - rel = strings.TrimSuffix(rel, ".md") + if core.HasSuffix(path, ".md") { + rel := core.TrimPrefix(path, base+"/") + rel = core.TrimSuffix(rel, ".md") paths = append(paths, rel) } return nil @@ -217,21 +222,19 @@ func ListPersonas() []string { return paths } -func listDir(fsys embed.FS, dir string) []string { - entries, err := fsys.ReadDir(dir) - if err != nil { +func listDir(emb *core.Embed) []string { + r := emb.ReadDir(".") + if !r.OK { return nil } var slugs []string - for _, e := range entries { + for _, e := range r.Value.([]fs.DirEntry) { + name := e.Name() if e.IsDir() { - name := e.Name() slugs = append(slugs, name) continue } - name := e.Name() - ext := filepath.Ext(name) - slugs = append(slugs, strings.TrimSuffix(name, ext)) + slugs = append(slugs, core.TrimSuffix(name, filepath.Ext(name))) } return slugs } diff --git a/pkg/lib/lib_test.go b/pkg/lib/lib_test.go new file mode 100644 index 0000000..5141d41 --- /dev/null +++ b/pkg/lib/lib_test.go @@ -0,0 +1,273 @@ +package lib + +import ( + "io/fs" + "os" + "path/filepath" + "testing" +) + +// --- Prompt --- + +func TestPrompt_Good(t *testing.T) { + r := Prompt("coding") + if !r.OK { + t.Fatal("Prompt('coding') returned !OK") + } + if r.Value.(string) == "" { + t.Error("Prompt('coding') returned empty string") + } +} + +func TestPrompt_Bad(t *testing.T) { + r := Prompt("nonexistent-slug") + if r.OK { + t.Error("Prompt('nonexistent-slug') should return !OK") + } +} + +// --- Task --- + +func TestTask_Good_Yaml(t *testing.T) { + r := Task("bug-fix") + if !r.OK { + t.Fatal("Task('bug-fix') returned !OK") + } + if r.Value.(string) == "" { + t.Error("Task('bug-fix') returned empty string") + } +} + +func TestTask_Good_Md(t *testing.T) { + r := Task("code/review") + if !r.OK { + t.Fatal("Task('code/review') returned !OK") + } + if r.Value.(string) == "" { + t.Error("Task('code/review') returned empty string") + } +} + +func TestTask_Bad(t *testing.T) { + r := Task("nonexistent-slug") + if r.OK { + t.Error("Task('nonexistent-slug') should return !OK") + } + if r.Value != fs.ErrNotExist { + t.Error("Task('nonexistent-slug') should return fs.ErrNotExist") + } +} + +// --- TaskBundle --- + +func TestTaskBundle_Good(t *testing.T) { + r := TaskBundle("code/review") + if !r.OK { + t.Fatal("TaskBundle('code/review') returned !OK") + } + b := r.Value.(Bundle) + if b.Main == "" { + t.Error("Bundle.Main is empty") + } + if len(b.Files) == 0 { + t.Error("Bundle.Files is empty — expected companion files") + } +} + +func TestTaskBundle_Bad(t *testing.T) { + r := TaskBundle("nonexistent") + if r.OK { + t.Error("TaskBundle('nonexistent') should return !OK") + } +} + +// --- Flow --- + +func TestFlow_Good(t *testing.T) { + r := Flow("go") + if !r.OK { + t.Fatal("Flow('go') returned !OK") + } + if r.Value.(string) == "" { + t.Error("Flow('go') returned empty string") + } +} + +// --- Persona --- + +func TestPersona_Good(t *testing.T) { + // Use first persona from list to avoid hardcoding + personas := ListPersonas() + if len(personas) == 0 { + t.Skip("no personas found") + } + r := Persona(personas[0]) + if !r.OK { + t.Fatalf("Persona(%q) returned !OK", personas[0]) + } + if r.Value.(string) == "" { + t.Errorf("Persona(%q) returned empty string", personas[0]) + } +} + +// --- Template --- + +func TestTemplate_Good_Prompt(t *testing.T) { + r := Template("coding") + if !r.OK { + t.Fatal("Template('coding') returned !OK") + } + if r.Value.(string) == "" { + t.Error("Template('coding') returned empty string") + } +} + +func TestTemplate_Good_TaskFallback(t *testing.T) { + r := Template("bug-fix") + if !r.OK { + t.Fatal("Template('bug-fix') returned !OK — should fall through to Task") + } +} + +func TestTemplate_Bad(t *testing.T) { + r := Template("nonexistent-slug") + if r.OK { + t.Error("Template('nonexistent-slug') should return !OK") + } +} + +// --- List Functions --- + +func TestListPrompts(t *testing.T) { + prompts := ListPrompts() + if len(prompts) == 0 { + t.Error("ListPrompts() returned empty") + } +} + +func TestListTasks(t *testing.T) { + tasks := ListTasks() + if len(tasks) == 0 { + t.Fatal("ListTasks() returned empty") + } + // Verify nested paths are included (e.g., "code/review") + found := false + for _, s := range tasks { + if s == "code/review" { + found = true + break + } + } + if !found { + t.Error("ListTasks() missing nested path 'code/review'") + } +} + +func TestListPersonas(t *testing.T) { + personas := ListPersonas() + if len(personas) == 0 { + t.Error("ListPersonas() returned empty") + } + // Should have nested paths like "code/go" + hasNested := false + for _, p := range personas { + if len(p) > 0 && filepath.Dir(p) != "." { + hasNested = true + break + } + } + if !hasNested { + t.Error("ListPersonas() has no nested paths") + } +} + +func TestListFlows(t *testing.T) { + flows := ListFlows() + if len(flows) == 0 { + t.Error("ListFlows() returned empty") + } +} + +func TestListWorkspaces(t *testing.T) { + workspaces := ListWorkspaces() + if len(workspaces) == 0 { + t.Error("ListWorkspaces() returned empty") + } +} + +// --- ExtractWorkspace --- + +func TestExtractWorkspace_CreatesFiles(t *testing.T) { + dir := t.TempDir() + data := &WorkspaceData{Repo: "test-repo", Task: "test task"} + + err := ExtractWorkspace("default", dir, data) + if err != nil { + t.Fatalf("ExtractWorkspace failed: %v", err) + } + + for _, name := range []string{"CODEX.md", "CLAUDE.md", "PROMPT.md", "TODO.md", "CONTEXT.md", "go.work"} { + path := filepath.Join(dir, name) + if _, err := os.Stat(path); os.IsNotExist(err) { + t.Errorf("expected %s to exist", name) + } + } +} + +func TestExtractWorkspace_CreatesSubdirectories(t *testing.T) { + dir := t.TempDir() + data := &WorkspaceData{Repo: "test-repo", Task: "test task"} + + err := ExtractWorkspace("default", dir, data) + if err != nil { + t.Fatalf("ExtractWorkspace failed: %v", err) + } + + refDir := filepath.Join(dir, ".core", "reference") + if _, err := os.Stat(refDir); os.IsNotExist(err) { + t.Fatalf(".core/reference/ directory not created") + } + + axSpec := filepath.Join(refDir, "RFC-025-AGENT-EXPERIENCE.md") + if _, err := os.Stat(axSpec); os.IsNotExist(err) { + t.Errorf("AX spec not extracted: %s", axSpec) + } + + entries, err := os.ReadDir(refDir) + if err != nil { + t.Fatalf("failed to read reference dir: %v", err) + } + + goFiles := 0 + for _, e := range entries { + if filepath.Ext(e.Name()) == ".go" { + goFiles++ + } + } + if goFiles == 0 { + t.Error("no .go files in .core/reference/") + } + + docsDir := filepath.Join(refDir, "docs") + if _, err := os.Stat(docsDir); os.IsNotExist(err) { + t.Errorf(".core/reference/docs/ not created") + } +} + +func TestExtractWorkspace_TemplateSubstitution(t *testing.T) { + dir := t.TempDir() + data := &WorkspaceData{Repo: "my-repo", Task: "fix the bug"} + + err := ExtractWorkspace("default", dir, data) + if err != nil { + t.Fatalf("ExtractWorkspace failed: %v", err) + } + + content, err := os.ReadFile(filepath.Join(dir, "TODO.md")) + if err != nil { + t.Fatalf("failed to read TODO.md: %v", err) + } + if len(content) == 0 { + t.Error("TODO.md is empty") + } +} diff --git a/pkg/lib/workspace/default/.core/reference/RFC-025-AGENT-EXPERIENCE.md b/pkg/lib/workspace/default/.core/reference/RFC-025-AGENT-EXPERIENCE.md new file mode 100644 index 0000000..5538902 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/RFC-025-AGENT-EXPERIENCE.md @@ -0,0 +1,303 @@ +# RFC-025: Agent Experience (AX) Design Principles + +- **Status:** Draft +- **Authors:** Snider, Cladius +- **Date:** 2026-03-19 +- **Applies to:** All Core ecosystem packages (CoreGO, CorePHP, CoreTS, core-agent) + +## Abstract + +Agent Experience (AX) is a design paradigm for software systems where the primary code consumer is an AI agent, not a human developer. AX sits alongside User Experience (UX) and Developer Experience (DX) as the third era of interface design. + +This RFC establishes AX as a formal design principle for the Core ecosystem and defines the conventions that follow from it. + +## Motivation + +As of early 2026, AI agents write, review, and maintain the majority of code in the Core ecosystem. The original author has not manually edited code (outside of Core struct design) since October 2025. Code is processed semantically — agents reason about intent, not characters. + +Design patterns inherited from the human-developer era optimise for the wrong consumer: + +- **Short names** save keystrokes but increase semantic ambiguity +- **Functional option chains** are fluent for humans but opaque for agents tracing configuration +- **Error-at-every-call-site** produces 50% boilerplate that obscures intent +- **Generic type parameters** force agents to carry type context that the runtime already has +- **Panic-hiding conventions** (`Must*`) create implicit control flow that agents must special-case + +AX acknowledges this shift and provides principles for designing code, APIs, file structures, and conventions that serve AI agents as first-class consumers. + +## The Three Eras + +| Era | Primary Consumer | Optimises For | Key Metric | +|-----|-----------------|---------------|------------| +| UX | End users | Discoverability, forgiveness, visual clarity | Task completion time | +| DX | Developers | Typing speed, IDE support, convention familiarity | Time to first commit | +| AX | AI agents | Predictability, composability, semantic navigation | Correct-on-first-pass rate | + +AX does not replace UX or DX. End users still need good UX. Developers still need good DX. But when the primary code author and maintainer is an AI agent, the codebase should be designed for that consumer first. + +## Principles + +### 1. Predictable Names Over Short Names + +Names are tokens that agents pattern-match across languages and contexts. Abbreviations introduce mapping overhead. + +``` +Config not Cfg +Service not Srv +Embed not Emb +Error not Err (as a subsystem name; err for local variables is fine) +Options not Opts +``` + +**Rule:** If a name would require a comment to explain, it is too short. + +**Exception:** Industry-standard abbreviations that are universally understood (`HTTP`, `URL`, `ID`, `IPC`, `I18n`) are acceptable. The test: would an agent trained on any mainstream language recognise it without context? + +### 2. Comments as Usage Examples + +The function signature tells WHAT. The comment shows HOW with real values. + +```go +// Detect the project type from files present +setup.Detect("/path/to/project") + +// Set up a workspace with auto-detected template +setup.Run(setup.Options{Path: ".", Template: "auto"}) + +// Scaffold a PHP module workspace +setup.Run(setup.Options{Path: "./my-module", Template: "php"}) +``` + +**Rule:** If a comment restates what the type signature already says, delete it. If a comment shows a concrete usage with realistic values, keep it. + +**Rationale:** Agents learn from examples more effectively than from descriptions. A comment like "Run executes the setup process" adds zero information. A comment like `setup.Run(setup.Options{Path: ".", Template: "auto"})` teaches an agent exactly how to call the function. + +### 3. Path Is Documentation + +File and directory paths should be self-describing. An agent navigating the filesystem should understand what it is looking at without reading a README. + +``` +flow/deploy/to/homelab.yaml — deploy TO the homelab +flow/deploy/from/github.yaml — deploy FROM GitHub +flow/code/review.yaml — code review flow +template/file/go/struct.go.tmpl — Go struct file template +template/dir/workspace/php/ — PHP workspace scaffold +``` + +**Rule:** If an agent needs to read a file to understand what a directory contains, the directory naming has failed. + +**Corollary:** The unified path convention (folder structure = HTTP route = CLI command = test path) is AX-native. One path, every surface. + +### 4. Templates Over Freeform + +When an agent generates code from a template, the output is constrained to known-good shapes. When an agent writes freeform, the output varies. + +```go +// Template-driven — consistent output +lib.RenderFile("php/action", data) +lib.ExtractDir("php", targetDir, data) + +// Freeform — variance in output +"write a PHP action class that..." +``` + +**Rule:** For any code pattern that recurs, provide a template. Templates are guardrails for agents. + +**Scope:** Templates apply to file generation, workspace scaffolding, config generation, and commit messages. They do NOT apply to novel logic — agents should write business logic freeform with the domain knowledge available. + +### 5. Declarative Over Imperative + +Agents reason better about declarations of intent than sequences of operations. + +```yaml +# Declarative — agent sees what should happen +steps: + - name: build + flow: tools/docker-build + with: + context: "{{ .app_dir }}" + image_name: "{{ .image_name }}" + + - name: deploy + flow: deploy/with/docker + with: + host: "{{ .host }}" +``` + +```go +// Imperative — agent must trace execution +cmd := exec.Command("docker", "build", "--platform", "linux/amd64", "-t", imageName, ".") +cmd.Dir = appDir +if err := cmd.Run(); err != nil { + return fmt.Errorf("docker build: %w", err) +} +``` + +**Rule:** Orchestration, configuration, and pipeline logic should be declarative (YAML/JSON). Implementation logic should be imperative (Go/PHP/TS). The boundary is: if an agent needs to compose or modify the logic, make it declarative. + +### 6. Universal Types (Core Primitives) + +Every component in the ecosystem accepts and returns the same primitive types. An agent processing any level of the tree sees identical shapes. + +`Option` is a single key-value pair. `Options` is a collection. Any function that returns `Result` can accept `Options`. + +```go +// Option — the atom +core.Option{K: "name", V: "brain"} + +// Options — universal input (collection of Option) +core.Options{ + {K: "name", V: "myapp"}, + {K: "port", V: 8080}, +} + +// Result[T] — universal return +core.Result[*Embed]{Value: emb, OK: true} +``` + +Usage across subsystems — same shape everywhere: + +```go +// Create Core +c := core.New(core.Options{{K: "name", V: "myapp"}}) + +// Mount embedded content +c.Data().New(core.Options{ + {K: "name", V: "brain"}, + {K: "source", V: brainFS}, + {K: "path", V: "prompts"}, +}) + +// Register a transport handle +c.Drive().New(core.Options{ + {K: "name", V: "api"}, + {K: "transport", V: "https://api.lthn.ai"}, +}) + +// Read back what was passed in +c.Options().String("name") // "myapp" +``` + +**Core primitive types:** + +| Type | Purpose | +|------|---------| +| `core.Option` | Single key-value pair (the atom) | +| `core.Options` | Collection of Option (universal input) | +| `core.Result[T]` | Return value with OK/fail state (universal output) | +| `core.Config` | Runtime settings (what is active) | +| `core.Data` | Embedded or stored content from packages | +| `core.Drive` | Resource handle registry (transports) | +| `core.Service` | A managed component with lifecycle | + +**Core struct subsystems:** + +| Accessor | Analogy | Purpose | +|----------|---------|---------| +| `c.Options()` | argv | Input configuration used to create this Core | +| `c.Data()` | /mnt | Embedded assets mounted by packages | +| `c.Drive()` | /dev | Transport handles (API, MCP, SSH, VPN) | +| `c.Config()` | /etc | Configuration, settings, feature flags | +| `c.Fs()` | / | Local filesystem I/O (sandboxable) | +| `c.Error()` | — | Panic recovery and crash reporting (`ErrorPanic`) | +| `c.Log()` | — | Structured logging (`ErrorLog`) | +| `c.Service()` | — | Service registry and lifecycle | +| `c.Cli()` | — | CLI command framework | +| `c.IPC()` | — | Message bus | +| `c.I18n()` | — | Internationalisation | + +**What this replaces:** + +| Go Convention | Core AX | Why | +|--------------|---------|-----| +| `func With*(v) Option` | `core.Options{{K: k, V: v}}` | K/V pairs are parseable; option chains require tracing | +| `func Must*(v) T` | `core.Result[T]` | No hidden panics; errors flow through Core | +| `func *For[T](c) T` | `c.Service("name")` | String lookup is greppable; generics require type context | +| `val, err :=` everywhere | Single return via `core.Result` | Intent not obscured by error handling | +| `_ = err` | Never needed | Core handles all errors internally | +| `ErrPan` / `ErrLog` | `ErrorPanic` / `ErrorLog` | Full names — AX principle 1 | + +## Applying AX to Existing Patterns + +### File Structure + +``` +# AX-native: path describes content +core/agent/ +├── go/ # Go source +├── php/ # PHP source +├── ui/ # Frontend source +├── claude/ # Claude Code plugin +└── codex/ # Codex plugin + +# Not AX: generic names requiring README +src/ +├── lib/ +├── utils/ +└── helpers/ +``` + +### Error Handling + +```go +// AX-native: errors are infrastructure, not application logic +svc := c.Service("brain") +cfg := c.Config().Get("database.host") +// Errors logged by Core. Code reads like a spec. + +// Not AX: errors dominate the code +svc, err := c.ServiceFor[brain.Service]() +if err != nil { + return fmt.Errorf("get brain service: %w", err) +} +cfg, err := c.Config().Get("database.host") +if err != nil { + _ = err // silenced because "it'll be fine" +} +``` + +### API Design + +```go +// AX-native: one shape, every surface +c := core.New(core.Options{ + {K: "name", V: "my-app"}, +}) +c.Service("process", processSvc) +c.Data().New(core.Options{{K: "name", V: "app"}, {K: "source", V: appFS}}) + +// Not AX: multiple patterns for the same thing +c, err := core.New( + core.WithName("my-app"), + core.WithService(factory1), + core.WithAssets(appFS), +) +if err != nil { ... } +``` + +## Compatibility + +AX conventions are valid, idiomatic Go/PHP/TS. They do not require language extensions, code generation, or non-standard tooling. An AX-designed codebase compiles, tests, and deploys with standard toolchains. + +The conventions diverge from community patterns (functional options, Must/For, etc.) but do not violate language specifications. This is a style choice, not a fork. + +## Adoption + +AX applies to all new code in the Core ecosystem. Existing code migrates incrementally as it is touched — no big-bang rewrite. + +Priority order: +1. **Public APIs** (package-level functions, struct constructors) +2. **File structure** (path naming, template locations) +3. **Internal fields** (struct field names, local variables) + +## References + +- dAppServer unified path convention (2024) +- CoreGO DTO pattern refactor (2026-03-18) +- Core primitives design (2026-03-19) +- Go Proverbs, Rob Pike (2015) — AX provides an updated lens + +## Changelog + +- 2026-03-20: Updated to match implementation — Option K/V atoms, Options as []Option, Data/Drive split, ErrorPanic/ErrorLog renames, subsystem table +- 2026-03-19: Initial draft diff --git a/pkg/lib/workspace/default/.core/reference/app.go b/pkg/lib/workspace/default/.core/reference/app.go new file mode 100644 index 0000000..3a5aa02 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/app.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Application identity for the Core framework. +// Based on leaanthony/sail — Name, Filename, Path. + +package core + +import ( + "os/exec" + "path/filepath" +) + +// App holds the application identity and optional GUI runtime. +type App struct { + // Name is the human-readable application name (e.g., "Core CLI"). + Name string + + // Version is the application version string (e.g., "1.2.3"). + Version string + + // Description is a short description of the application. + Description string + + // Filename is the executable filename (e.g., "core"). + Filename string + + // Path is the absolute path to the executable. + Path string + + // Runtime is the GUI runtime (e.g., Wails App). + // Nil for CLI-only applications. + Runtime any +} + +// Find locates a program on PATH and returns a Result containing the App. +// +// r := core.Find("node", "Node.js") +// if r.OK { app := r.Value.(*App) } +func Find(filename, name string) Result { + path, err := exec.LookPath(filename) + if err != nil { + return Result{err, false} + } + abs, err := filepath.Abs(path) + if err != nil { + return Result{err, false} + } + return Result{&App{ + Name: name, + Filename: filename, + Path: abs, + }, true} +} diff --git a/pkg/lib/workspace/default/.core/reference/array.go b/pkg/lib/workspace/default/.core/reference/array.go new file mode 100644 index 0000000..ff085bb --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/array.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Generic slice operations for the Core framework. +// Based on leaanthony/slicer, rewritten with Go 1.18+ generics. + +package core + +// Array is a typed slice with common operations. +type Array[T comparable] struct { + items []T +} + +// NewArray creates an empty Array. +func NewArray[T comparable](items ...T) *Array[T] { + return &Array[T]{items: items} +} + +// Add appends values. +func (s *Array[T]) Add(values ...T) { + s.items = append(s.items, values...) +} + +// AddUnique appends values only if not already present. +func (s *Array[T]) AddUnique(values ...T) { + for _, v := range values { + if !s.Contains(v) { + s.items = append(s.items, v) + } + } +} + +// Contains returns true if the value is in the slice. +func (s *Array[T]) Contains(val T) bool { + for _, v := range s.items { + if v == val { + return true + } + } + return false +} + +// Filter returns a new Array with elements matching the predicate. +func (s *Array[T]) Filter(fn func(T) bool) Result { + filtered := &Array[T]{} + for _, v := range s.items { + if fn(v) { + filtered.items = append(filtered.items, v) + } + } + return Result{filtered, true} +} + +// Each runs a function on every element. +func (s *Array[T]) Each(fn func(T)) { + for _, v := range s.items { + fn(v) + } +} + +// Remove removes the first occurrence of a value. +func (s *Array[T]) Remove(val T) { + for i, v := range s.items { + if v == val { + s.items = append(s.items[:i], s.items[i+1:]...) + return + } + } +} + +// Deduplicate removes duplicate values, preserving order. +func (s *Array[T]) Deduplicate() { + seen := make(map[T]struct{}) + result := make([]T, 0, len(s.items)) + for _, v := range s.items { + if _, exists := seen[v]; !exists { + seen[v] = struct{}{} + result = append(result, v) + } + } + s.items = result +} + +// Len returns the number of elements. +func (s *Array[T]) Len() int { + return len(s.items) +} + +// Clear removes all elements. +func (s *Array[T]) Clear() { + s.items = nil +} + +// AsSlice returns a copy of the underlying slice. +func (s *Array[T]) AsSlice() []T { + if s.items == nil { + return nil + } + out := make([]T, len(s.items)) + copy(out, s.items) + return out +} diff --git a/pkg/lib/workspace/default/.core/reference/cli.go b/pkg/lib/workspace/default/.core/reference/cli.go new file mode 100644 index 0000000..ff7d298 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/cli.go @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Cli is the CLI surface layer for the Core command tree. +// It reads commands from Core's registry and wires them to terminal I/O. +// +// Run the CLI: +// +// c := core.New(core.Options{{Key: "name", Value: "myapp"}}) +// c.Command("deploy", handler) +// c.Cli().Run() +// +// The Cli resolves os.Args to a command path, parses flags, +// and calls the command's action with parsed options. +package core + +import ( + "io" + "os" +) + +// Cli is the CLI surface for the Core command tree. +type Cli struct { + core *Core + output io.Writer + banner func(*Cli) string +} + +// Print writes to the CLI output (defaults to os.Stdout). +// +// c.Cli().Print("hello %s", "world") +func (cl *Cli) Print(format string, args ...any) { + Print(cl.output, format, args...) +} + +// SetOutput sets the CLI output writer. +// +// c.Cli().SetOutput(os.Stderr) +func (cl *Cli) SetOutput(w io.Writer) { + cl.output = w +} + +// Run resolves os.Args to a command path and executes it. +// +// c.Cli().Run() +// c.Cli().Run("deploy", "to", "homelab") +func (cl *Cli) Run(args ...string) Result { + if len(args) == 0 { + args = os.Args[1:] + } + + clean := FilterArgs(args) + + if cl.core == nil || cl.core.commands == nil { + if cl.banner != nil { + cl.Print(cl.banner(cl)) + } + return Result{} + } + + cl.core.commands.mu.RLock() + cmdCount := len(cl.core.commands.commands) + cl.core.commands.mu.RUnlock() + + if cmdCount == 0 { + if cl.banner != nil { + cl.Print(cl.banner(cl)) + } + return Result{} + } + + // Resolve command path from args + var cmd *Command + var remaining []string + + cl.core.commands.mu.RLock() + for i := len(clean); i > 0; i-- { + path := JoinPath(clean[:i]...) + if c, ok := cl.core.commands.commands[path]; ok { + cmd = c + remaining = clean[i:] + break + } + } + cl.core.commands.mu.RUnlock() + + if cmd == nil { + if cl.banner != nil { + cl.Print(cl.banner(cl)) + } + cl.PrintHelp() + return Result{} + } + + // Build options from remaining args + opts := Options{} + for _, arg := range remaining { + key, val, valid := ParseFlag(arg) + if valid { + if Contains(arg, "=") { + opts = append(opts, Option{Key: key, Value: val}) + } else { + opts = append(opts, Option{Key: key, Value: true}) + } + } else if !IsFlag(arg) { + opts = append(opts, Option{Key: "_arg", Value: arg}) + } + } + + if cmd.Action != nil { + return cmd.Run(opts) + } + if cmd.Lifecycle != nil { + return cmd.Start(opts) + } + return Result{E("core.Cli.Run", Concat("command \"", cmd.Path, "\" is not executable"), nil), false} +} + +// PrintHelp prints available commands. +// +// c.Cli().PrintHelp() +func (cl *Cli) PrintHelp() { + if cl.core == nil || cl.core.commands == nil { + return + } + + name := "" + if cl.core.app != nil { + name = cl.core.app.Name + } + if name != "" { + cl.Print("%s commands:", name) + } else { + cl.Print("Commands:") + } + + cl.core.commands.mu.RLock() + defer cl.core.commands.mu.RUnlock() + + for path, cmd := range cl.core.commands.commands { + if cmd.Hidden || (cmd.Action == nil && cmd.Lifecycle == nil) { + continue + } + tr := cl.core.I18n().Translate(cmd.I18nKey()) + desc, _ := tr.Value.(string) + if desc == "" || desc == cmd.I18nKey() { + cl.Print(" %s", path) + } else { + cl.Print(" %-30s %s", path, desc) + } + } +} + +// SetBanner sets the banner function. +// +// c.Cli().SetBanner(func(_ *core.Cli) string { return "My App v1.0" }) +func (cl *Cli) SetBanner(fn func(*Cli) string) { + cl.banner = fn +} + +// Banner returns the banner string. +func (cl *Cli) Banner() string { + if cl.banner != nil { + return cl.banner(cl) + } + if cl.core != nil && cl.core.app != nil && cl.core.app.Name != "" { + return cl.core.app.Name + } + return "" +} diff --git a/pkg/lib/workspace/default/.core/reference/command.go b/pkg/lib/workspace/default/.core/reference/command.go new file mode 100644 index 0000000..7b74e9f --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/command.go @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Command is a DTO representing an executable operation. +// Commands don't know if they're root, child, or nested — the tree +// structure comes from composition via path-based registration. +// +// Register a command: +// +// c.Command("deploy", func(opts core.Options) core.Result { +// return core.Result{"deployed", true} +// }) +// +// Register a nested command: +// +// c.Command("deploy/to/homelab", handler) +// +// Description is an i18n key — derived from path if omitted: +// +// "deploy" → "cmd.deploy.description" +// "deploy/to/homelab" → "cmd.deploy.to.homelab.description" +package core + +import ( + "sync" +) + +// CommandAction is the function signature for command handlers. +// +// func(opts core.Options) core.Result +type CommandAction func(Options) Result + +// CommandLifecycle is implemented by commands that support managed lifecycle. +// Basic commands only need an action. Daemon commands implement Start/Stop/Signal +// via go-process. +type CommandLifecycle interface { + Start(Options) Result + Stop() Result + Restart() Result + Reload() Result + Signal(string) Result +} + +// Command is the DTO for an executable operation. +type Command struct { + Name string + Description string // i18n key — derived from path if empty + Path string // "deploy/to/homelab" + Action CommandAction // business logic + Lifecycle CommandLifecycle // optional — provided by go-process + Flags Options // declared flags + Hidden bool + commands map[string]*Command // child commands (internal) + mu sync.RWMutex +} + +// I18nKey returns the i18n key for this command's description. +// +// cmd with path "deploy/to/homelab" → "cmd.deploy.to.homelab.description" +func (cmd *Command) I18nKey() string { + if cmd.Description != "" { + return cmd.Description + } + path := cmd.Path + if path == "" { + path = cmd.Name + } + return Concat("cmd.", Replace(path, "/", "."), ".description") +} + +// Run executes the command's action with the given options. +// +// result := cmd.Run(core.Options{{Key: "target", Value: "homelab"}}) +func (cmd *Command) Run(opts Options) Result { + if cmd.Action == nil { + return Result{E("core.Command.Run", Concat("command \"", cmd.Path, "\" is not executable"), nil), false} + } + return cmd.Action(opts) +} + +// Start delegates to the lifecycle implementation if available. +func (cmd *Command) Start(opts Options) Result { + if cmd.Lifecycle != nil { + return cmd.Lifecycle.Start(opts) + } + return cmd.Run(opts) +} + +// Stop delegates to the lifecycle implementation. +func (cmd *Command) Stop() Result { + if cmd.Lifecycle != nil { + return cmd.Lifecycle.Stop() + } + return Result{} +} + +// Restart delegates to the lifecycle implementation. +func (cmd *Command) Restart() Result { + if cmd.Lifecycle != nil { + return cmd.Lifecycle.Restart() + } + return Result{} +} + +// Reload delegates to the lifecycle implementation. +func (cmd *Command) Reload() Result { + if cmd.Lifecycle != nil { + return cmd.Lifecycle.Reload() + } + return Result{} +} + +// Signal delegates to the lifecycle implementation. +func (cmd *Command) Signal(sig string) Result { + if cmd.Lifecycle != nil { + return cmd.Lifecycle.Signal(sig) + } + return Result{} +} + +// --- Command Registry (on Core) --- + +// commandRegistry holds the command tree. +type commandRegistry struct { + commands map[string]*Command + mu sync.RWMutex +} + +// Command gets or registers a command by path. +// +// c.Command("deploy", Command{Action: handler}) +// r := c.Command("deploy") +func (c *Core) Command(path string, command ...Command) Result { + if len(command) == 0 { + c.commands.mu.RLock() + cmd, ok := c.commands.commands[path] + c.commands.mu.RUnlock() + return Result{cmd, ok} + } + + if path == "" || HasPrefix(path, "/") || HasSuffix(path, "/") || Contains(path, "//") { + return Result{E("core.Command", Concat("invalid command path: \"", path, "\""), nil), false} + } + + c.commands.mu.Lock() + defer c.commands.mu.Unlock() + + if existing, exists := c.commands.commands[path]; exists && (existing.Action != nil || existing.Lifecycle != nil) { + return Result{E("core.Command", Concat("command \"", path, "\" already registered"), nil), false} + } + + cmd := &command[0] + cmd.Name = pathName(path) + cmd.Path = path + if cmd.commands == nil { + cmd.commands = make(map[string]*Command) + } + + // Preserve existing subtree when overwriting a placeholder parent + if existing, exists := c.commands.commands[path]; exists { + for k, v := range existing.commands { + if _, has := cmd.commands[k]; !has { + cmd.commands[k] = v + } + } + } + + c.commands.commands[path] = cmd + + // Build parent chain — "deploy/to/homelab" creates "deploy" and "deploy/to" if missing + parts := Split(path, "/") + for i := len(parts) - 1; i > 0; i-- { + parentPath := JoinPath(parts[:i]...) + if _, exists := c.commands.commands[parentPath]; !exists { + c.commands.commands[parentPath] = &Command{ + Name: parts[i-1], + Path: parentPath, + commands: make(map[string]*Command), + } + } + c.commands.commands[parentPath].commands[parts[i]] = cmd + cmd = c.commands.commands[parentPath] + } + + return Result{OK: true} +} + +// Commands returns all registered command paths. +// +// paths := c.Commands() +func (c *Core) Commands() []string { + if c.commands == nil { + return nil + } + c.commands.mu.RLock() + defer c.commands.mu.RUnlock() + var paths []string + for k := range c.commands.commands { + paths = append(paths, k) + } + return paths +} + +// pathName extracts the last segment of a path. +// "deploy/to/homelab" → "homelab" +func pathName(path string) string { + parts := Split(path, "/") + return parts[len(parts)-1] +} diff --git a/pkg/lib/workspace/default/.core/reference/config.go b/pkg/lib/workspace/default/.core/reference/config.go new file mode 100644 index 0000000..395a0f6 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/config.go @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Settings, feature flags, and typed configuration for the Core framework. + +package core + +import ( + "sync" +) + +// ConfigVar is a variable that can be set, unset, and queried for its state. +type ConfigVar[T any] struct { + val T + set bool +} + +func (v *ConfigVar[T]) Get() T { return v.val } +func (v *ConfigVar[T]) Set(val T) { v.val = val; v.set = true } +func (v *ConfigVar[T]) IsSet() bool { return v.set } +func (v *ConfigVar[T]) Unset() { + v.set = false + var zero T + v.val = zero +} + +func NewConfigVar[T any](val T) ConfigVar[T] { + return ConfigVar[T]{val: val, set: true} +} + +// ConfigOptions holds configuration data. +type ConfigOptions struct { + Settings map[string]any + Features map[string]bool +} + +func (o *ConfigOptions) init() { + if o.Settings == nil { + o.Settings = make(map[string]any) + } + if o.Features == nil { + o.Features = make(map[string]bool) + } +} + +// Config holds configuration settings and feature flags. +type Config struct { + *ConfigOptions + mu sync.RWMutex +} + +// Set stores a configuration value by key. +func (e *Config) Set(key string, val any) { + e.mu.Lock() + if e.ConfigOptions == nil { + e.ConfigOptions = &ConfigOptions{} + } + e.ConfigOptions.init() + e.Settings[key] = val + e.mu.Unlock() +} + +// Get retrieves a configuration value by key. +func (e *Config) Get(key string) Result { + e.mu.RLock() + defer e.mu.RUnlock() + if e.ConfigOptions == nil || e.Settings == nil { + return Result{} + } + val, ok := e.Settings[key] + if !ok { + return Result{} + } + return Result{val, true} +} + +func (e *Config) String(key string) string { return ConfigGet[string](e, key) } +func (e *Config) Int(key string) int { return ConfigGet[int](e, key) } +func (e *Config) Bool(key string) bool { return ConfigGet[bool](e, key) } + +// ConfigGet retrieves a typed configuration value. +func ConfigGet[T any](e *Config, key string) T { + r := e.Get(key) + if !r.OK { + var zero T + return zero + } + typed, _ := r.Value.(T) + return typed +} + +// --- Feature Flags --- + +func (e *Config) Enable(feature string) { + e.mu.Lock() + if e.ConfigOptions == nil { + e.ConfigOptions = &ConfigOptions{} + } + e.ConfigOptions.init() + e.Features[feature] = true + e.mu.Unlock() +} + +func (e *Config) Disable(feature string) { + e.mu.Lock() + if e.ConfigOptions == nil { + e.ConfigOptions = &ConfigOptions{} + } + e.ConfigOptions.init() + e.Features[feature] = false + e.mu.Unlock() +} + +func (e *Config) Enabled(feature string) bool { + e.mu.RLock() + defer e.mu.RUnlock() + if e.ConfigOptions == nil || e.Features == nil { + return false + } + return e.Features[feature] +} + +func (e *Config) EnabledFeatures() []string { + e.mu.RLock() + defer e.mu.RUnlock() + if e.ConfigOptions == nil || e.Features == nil { + return nil + } + var result []string + for k, v := range e.Features { + if v { + result = append(result, k) + } + } + return result +} diff --git a/pkg/lib/workspace/default/.core/reference/contract.go b/pkg/lib/workspace/default/.core/reference/contract.go new file mode 100644 index 0000000..ddf0def --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/contract.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Contracts, options, and type definitions for the Core framework. + +package core + +import ( + "context" +) + +// Message is the type for IPC broadcasts (fire-and-forget). +type Message any + +// Query is the type for read-only IPC requests. +type Query any + +// Task is the type for IPC requests that perform side effects. +type Task any + +// TaskWithIdentifier is an optional interface for tasks that need to know their assigned identifier. +type TaskWithIdentifier interface { + Task + SetTaskIdentifier(id string) + GetTaskIdentifier() string +} + +// QueryHandler handles Query requests. Returns Result{Value, OK}. +type QueryHandler func(*Core, Query) Result + +// TaskHandler handles Task requests. Returns Result{Value, OK}. +type TaskHandler func(*Core, Task) Result + +// Startable is implemented by services that need startup initialisation. +type Startable interface { + OnStartup(ctx context.Context) error +} + +// Stoppable is implemented by services that need shutdown cleanup. +type Stoppable interface { + OnShutdown(ctx context.Context) error +} + +// --- Action Messages --- + +type ActionServiceStartup struct{} +type ActionServiceShutdown struct{} + +type ActionTaskStarted struct { + TaskIdentifier string + Task Task +} + +type ActionTaskProgress struct { + TaskIdentifier string + Task Task + Progress float64 + Message string +} + +type ActionTaskCompleted struct { + TaskIdentifier string + Task Task + Result any + Error error +} + +// --- Constructor --- + +// New creates a Core instance. +// +// c := core.New(core.Options{ +// {Key: "name", Value: "myapp"}, +// }) +func New(opts ...Options) *Core { + c := &Core{ + app: &App{}, + data: &Data{}, + drive: &Drive{}, + fs: &Fs{root: "/"}, + config: &Config{ConfigOptions: &ConfigOptions{}}, + error: &ErrorPanic{}, + log: &ErrorLog{log: Default()}, + lock: &Lock{}, + ipc: &Ipc{}, + i18n: &I18n{}, + services: &serviceRegistry{services: make(map[string]*Service)}, + commands: &commandRegistry{commands: make(map[string]*Command)}, + } + c.context, c.cancel = context.WithCancel(context.Background()) + + if len(opts) > 0 { + cp := make(Options, len(opts[0])) + copy(cp, opts[0]) + c.options = &cp + name := cp.String("name") + if name != "" { + c.app.Name = name + } + } + + // Init Cli surface with Core reference + c.cli = &Cli{core: c} + + return c +} diff --git a/pkg/lib/workspace/default/.core/reference/core.go b/pkg/lib/workspace/default/.core/reference/core.go new file mode 100644 index 0000000..b02fc93 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/core.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Package core is a dependency injection and service lifecycle framework for Go. +// This file defines the Core struct, accessors, and IPC/error wrappers. + +package core + +import ( + "context" + "sync" + "sync/atomic" +) + +// --- Core Struct --- + +// Core is the central application object that manages services, assets, and communication. +type Core struct { + options *Options // c.Options() — Input configuration used to create this Core + app *App // c.App() — Application identity + optional GUI runtime + data *Data // c.Data() — Embedded/stored content from packages + drive *Drive // c.Drive() — Resource handle registry (transports) + fs *Fs // c.Fs() — Local filesystem I/O (sandboxable) + config *Config // c.Config() — Configuration, settings, feature flags + error *ErrorPanic // c.Error() — Panic recovery and crash reporting + log *ErrorLog // c.Log() — Structured logging + error wrapping + cli *Cli // c.Cli() — CLI surface layer + commands *commandRegistry // c.Command("path") — Command tree + services *serviceRegistry // c.Service("name") — Service registry + lock *Lock // c.Lock("name") — Named mutexes + ipc *Ipc // c.IPC() — Message bus for IPC + i18n *I18n // c.I18n() — Internationalisation and locale collection + + context context.Context + cancel context.CancelFunc + taskIDCounter atomic.Uint64 + waitGroup sync.WaitGroup + shutdown atomic.Bool +} + +// --- Accessors --- + +func (c *Core) Options() *Options { return c.options } +func (c *Core) App() *App { return c.app } +func (c *Core) Data() *Data { return c.data } +func (c *Core) Drive() *Drive { return c.drive } +func (c *Core) Embed() Result { return c.data.Get("app") } // legacy — use Data() +func (c *Core) Fs() *Fs { return c.fs } +func (c *Core) Config() *Config { return c.config } +func (c *Core) Error() *ErrorPanic { return c.error } +func (c *Core) Log() *ErrorLog { return c.log } +func (c *Core) Cli() *Cli { return c.cli } +func (c *Core) IPC() *Ipc { return c.ipc } +func (c *Core) I18n() *I18n { return c.i18n } +func (c *Core) Context() context.Context { return c.context } +func (c *Core) Core() *Core { return c } + +// --- IPC (uppercase aliases) --- + +func (c *Core) ACTION(msg Message) Result { return c.Action(msg) } +func (c *Core) QUERY(q Query) Result { return c.Query(q) } +func (c *Core) QUERYALL(q Query) Result { return c.QueryAll(q) } +func (c *Core) PERFORM(t Task) Result { return c.Perform(t) } + +// --- Error+Log --- + +// LogError logs an error and returns the Result from ErrorLog. +func (c *Core) LogError(err error, op, msg string) Result { + return c.log.Error(err, op, msg) +} + +// LogWarn logs a warning and returns the Result from ErrorLog. +func (c *Core) LogWarn(err error, op, msg string) Result { + return c.log.Warn(err, op, msg) +} + +// Must logs and panics if err is not nil. +func (c *Core) Must(err error, op, msg string) { + c.log.Must(err, op, msg) +} + +// --- Global Instance --- diff --git a/pkg/lib/workspace/default/.core/reference/data.go b/pkg/lib/workspace/default/.core/reference/data.go new file mode 100644 index 0000000..3fa5d7b --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/data.go @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Data is the embedded/stored content system for core packages. +// Packages mount their embedded content here and other packages +// read from it by path. +// +// Mount a package's assets: +// +// c.Data().New(core.Options{ +// {Key: "name", Value: "brain"}, +// {Key: "source", Value: brainFS}, +// {Key: "path", Value: "prompts"}, +// }) +// +// Read from any mounted path: +// +// content := c.Data().ReadString("brain/coding.md") +// entries := c.Data().List("agent/flow") +// +// Extract a template directory: +// +// c.Data().Extract("agent/workspace/default", "/tmp/ws", data) +package core + +import ( + "io/fs" + "path/filepath" + "sync" +) + +// Data manages mounted embedded filesystems from core packages. +type Data struct { + mounts map[string]*Embed + mu sync.RWMutex +} + +// New registers an embedded filesystem under a named prefix. +// +// c.Data().New(core.Options{ +// {Key: "name", Value: "brain"}, +// {Key: "source", Value: brainFS}, +// {Key: "path", Value: "prompts"}, +// }) +func (d *Data) New(opts Options) Result { + name := opts.String("name") + if name == "" { + return Result{} + } + + r := opts.Get("source") + if !r.OK { + return r + } + + fsys, ok := r.Value.(fs.FS) + if !ok { + return Result{E("data.New", "source is not fs.FS", nil), false} + } + + path := opts.String("path") + if path == "" { + path = "." + } + + d.mu.Lock() + defer d.mu.Unlock() + + if d.mounts == nil { + d.mounts = make(map[string]*Embed) + } + + mr := Mount(fsys, path) + if !mr.OK { + return mr + } + + emb := mr.Value.(*Embed) + d.mounts[name] = emb + return Result{emb, true} +} + +// Get returns the Embed for a named mount point. +// +// r := c.Data().Get("brain") +// if r.OK { emb := r.Value.(*Embed) } +func (d *Data) Get(name string) Result { + d.mu.RLock() + defer d.mu.RUnlock() + if d.mounts == nil { + return Result{} + } + emb, ok := d.mounts[name] + if !ok { + return Result{} + } + return Result{emb, true} +} + +// resolve splits a path like "brain/coding.md" into mount name + relative path. +func (d *Data) resolve(path string) (*Embed, string) { + d.mu.RLock() + defer d.mu.RUnlock() + + parts := SplitN(path, "/", 2) + if len(parts) < 2 { + return nil, "" + } + if d.mounts == nil { + return nil, "" + } + emb := d.mounts[parts[0]] + return emb, parts[1] +} + +// ReadFile reads a file by full path. +// +// r := c.Data().ReadFile("brain/prompts/coding.md") +// if r.OK { data := r.Value.([]byte) } +func (d *Data) ReadFile(path string) Result { + emb, rel := d.resolve(path) + if emb == nil { + return Result{} + } + return emb.ReadFile(rel) +} + +// ReadString reads a file as a string. +// +// r := c.Data().ReadString("agent/flow/deploy/to/homelab.yaml") +// if r.OK { content := r.Value.(string) } +func (d *Data) ReadString(path string) Result { + r := d.ReadFile(path) + if !r.OK { + return r + } + return Result{string(r.Value.([]byte)), true} +} + +// List returns directory entries at a path. +// +// r := c.Data().List("agent/persona/code") +// if r.OK { entries := r.Value.([]fs.DirEntry) } +func (d *Data) List(path string) Result { + emb, rel := d.resolve(path) + if emb == nil { + return Result{} + } + r := emb.ReadDir(rel) + if !r.OK { + return r + } + return Result{r.Value, true} +} + +// ListNames returns filenames (without extensions) at a path. +// +// r := c.Data().ListNames("agent/flow") +// if r.OK { names := r.Value.([]string) } +func (d *Data) ListNames(path string) Result { + r := d.List(path) + if !r.OK { + return r + } + entries := r.Value.([]fs.DirEntry) + var names []string + for _, e := range entries { + name := e.Name() + if !e.IsDir() { + name = TrimSuffix(name, filepath.Ext(name)) + } + names = append(names, name) + } + return Result{names, true} +} + +// Extract copies a template directory to targetDir. +// +// r := c.Data().Extract("agent/workspace/default", "/tmp/ws", templateData) +func (d *Data) Extract(path, targetDir string, templateData any) Result { + emb, rel := d.resolve(path) + if emb == nil { + return Result{} + } + r := emb.Sub(rel) + if !r.OK { + return r + } + return Extract(r.Value.(*Embed).FS(), targetDir, templateData) +} + +// Mounts returns the names of all mounted content. +// +// names := c.Data().Mounts() +func (d *Data) Mounts() []string { + d.mu.RLock() + defer d.mu.RUnlock() + var names []string + for k := range d.mounts { + names = append(names, k) + } + return names +} diff --git a/pkg/lib/workspace/default/.core/reference/docs/commands.md b/pkg/lib/workspace/default/.core/reference/docs/commands.md new file mode 100644 index 0000000..46e2022 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/commands.md @@ -0,0 +1,177 @@ +--- +title: Commands +description: Path-based command registration and CLI execution. +--- + +# Commands + +Commands are one of the most AX-native parts of CoreGO. The path is the identity. + +## Register a Command + +```go +c.Command("deploy/to/homelab", core.Command{ + Action: func(opts core.Options) core.Result { + target := opts.String("target") + return core.Result{Value: "deploying to " + target, OK: true} + }, +}) +``` + +## Command Paths + +Paths must be clean: + +- no empty path +- no leading slash +- no trailing slash +- no double slash + +These paths are valid: + +```text +deploy +deploy/to/homelab +workspace/create +``` + +These are rejected: + +```text +/deploy +deploy/ +deploy//to +``` + +## Parent Commands Are Auto-Created + +When you register `deploy/to/homelab`, CoreGO also creates placeholder parents if they do not already exist: + +- `deploy` +- `deploy/to` + +This makes the path tree navigable without extra setup. + +## Read a Command Back + +```go +r := c.Command("deploy/to/homelab") +if r.OK { + cmd := r.Value.(*core.Command) + _ = cmd +} +``` + +## Run a Command Directly + +```go +cmd := c.Command("deploy/to/homelab").Value.(*core.Command) + +r := cmd.Run(core.Options{ + {Key: "target", Value: "uk-prod"}, +}) +``` + +If `Action` is nil, `Run` returns `Result{OK:false}` with a structured error. + +## Run Through the CLI Surface + +```go +r := c.Cli().Run("deploy", "to", "homelab", "--target=uk-prod", "--debug") +``` + +`Cli.Run` resolves the longest matching command path from the arguments, then converts the remaining args into `core.Options`. + +## Flag Parsing Rules + +### Double Dash + +```text +--target=uk-prod -> key "target", value "uk-prod" +--debug -> key "debug", value true +``` + +### Single Dash + +```text +-v -> key "v", value true +-n=4 -> key "n", value "4" +``` + +### Positional Arguments + +Non-flag arguments after the command path are stored as repeated `_arg` options. + +```go +r := c.Cli().Run("workspace", "open", "alpha") +``` + +That produces an option like: + +```go +core.Option{Key: "_arg", Value: "alpha"} +``` + +### Important Details + +- flag values stay as strings +- `opts.Int("port")` only works if some code stored an actual `int` +- invalid flags such as `-verbose` and `--v` are ignored + +## Help Output + +`Cli.PrintHelp()` prints executable commands: + +```go +c.Cli().PrintHelp() +``` + +It skips: + +- hidden commands +- placeholder parents with no `Action` and no `Lifecycle` + +Descriptions are resolved through `cmd.I18nKey()`. + +## I18n Description Keys + +If `Description` is empty, CoreGO derives a key from the path. + +```text +deploy -> cmd.deploy.description +deploy/to/homelab -> cmd.deploy.to.homelab.description +workspace/create -> cmd.workspace.create.description +``` + +If `Description` is already set, CoreGO uses it as-is. + +## Lifecycle Commands + +Commands can also delegate to a lifecycle implementation. + +```go +type daemonCommand struct{} + +func (d *daemonCommand) Start(opts core.Options) core.Result { return core.Result{OK: true} } +func (d *daemonCommand) Stop() core.Result { return core.Result{OK: true} } +func (d *daemonCommand) Restart() core.Result { return core.Result{OK: true} } +func (d *daemonCommand) Reload() core.Result { return core.Result{OK: true} } +func (d *daemonCommand) Signal(sig string) core.Result { return core.Result{Value: sig, OK: true} } + +c.Command("agent/serve", core.Command{ + Lifecycle: &daemonCommand{}, +}) +``` + +Important behavior: + +- `Start` falls back to `Run` when `Lifecycle` is nil +- `Stop`, `Restart`, `Reload`, and `Signal` return an empty `Result` when `Lifecycle` is nil + +## List Command Paths + +```go +paths := c.Commands() +``` + +Like the service registry, the command registry is map-backed, so iteration order is not guaranteed. diff --git a/pkg/lib/workspace/default/.core/reference/docs/configuration.md b/pkg/lib/workspace/default/.core/reference/docs/configuration.md new file mode 100644 index 0000000..0a0cf11 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/configuration.md @@ -0,0 +1,96 @@ +--- +title: Configuration +description: Constructor options, runtime settings, and feature flags. +--- + +# Configuration + +CoreGO uses two different configuration layers: + +- constructor-time `core.Options` +- runtime `c.Config()` + +## Constructor-Time Options + +```go +c := core.New(core.Options{ + {Key: "name", Value: "agent-workbench"}, +}) +``` + +### Current Behavior + +- `New` accepts `opts ...Options` +- the current implementation copies only the first `Options` slice +- the `name` key is applied to `c.App().Name` + +If you need more constructor data, put it in the first `core.Options` slice. + +## Runtime Settings with `Config` + +Use `c.Config()` for mutable process settings. + +```go +c.Config().Set("workspace.root", "/srv/workspaces") +c.Config().Set("max_agents", 8) +c.Config().Set("debug", true) +``` + +Read them back with: + +```go +root := c.Config().String("workspace.root") +maxAgents := c.Config().Int("max_agents") +debug := c.Config().Bool("debug") +raw := c.Config().Get("workspace.root") +``` + +### Important Details + +- missing keys return zero values +- typed accessors do not coerce strings into ints or bools +- `Get` returns `core.Result` + +## Feature Flags + +`Config` also tracks named feature flags. + +```go +c.Config().Enable("workspace.templates") +c.Config().Enable("agent.review") +c.Config().Disable("agent.review") +``` + +Read them with: + +```go +enabled := c.Config().Enabled("workspace.templates") +features := c.Config().EnabledFeatures() +``` + +Feature names are case-sensitive. + +## `ConfigVar[T]` + +Use `ConfigVar[T]` when you need a typed value that can also represent “set versus unset”. + +```go +theme := core.NewConfigVar("amber") + +if theme.IsSet() { + fmt.Println(theme.Get()) +} + +theme.Unset() +``` + +This is useful for package-local state where zero values are not enough to describe configuration presence. + +## Recommended Pattern + +Use the two layers for different jobs: + +- put startup identity such as `name` into `core.Options` +- put mutable runtime values and feature switches into `c.Config()` + +That keeps constructor intent separate from live process state. diff --git a/pkg/lib/workspace/default/.core/reference/docs/errors.md b/pkg/lib/workspace/default/.core/reference/docs/errors.md new file mode 100644 index 0000000..9b7d3f3 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/errors.md @@ -0,0 +1,120 @@ +--- +title: Errors +description: Structured errors, logging helpers, and panic recovery. +--- + +# Errors + +CoreGO treats failures as structured operational data. + +Repository convention: use `E()` instead of `fmt.Errorf` for framework and service errors. + +## `Err` + +The structured error type is: + +```go +type Err struct { + Operation string + Message string + Cause error + Code string +} +``` + +## Create Errors + +### `E` + +```go +err := core.E("workspace.Load", "failed to read workspace manifest", cause) +``` + +### `Wrap` + +```go +err := core.Wrap(cause, "workspace.Load", "manifest parse failed") +``` + +### `WrapCode` + +```go +err := core.WrapCode(cause, "WORKSPACE_INVALID", "workspace.Load", "manifest parse failed") +``` + +### `NewCode` + +```go +err := core.NewCode("NOT_FOUND", "workspace not found") +``` + +## Inspect Errors + +```go +op := core.Operation(err) +code := core.ErrorCode(err) +msg := core.ErrorMessage(err) +root := core.Root(err) +stack := core.StackTrace(err) +pretty := core.FormatStackTrace(err) +``` + +These helpers keep the operational chain visible without extra type assertions. + +## Join and Standard Wrappers + +```go +combined := core.ErrorJoin(err1, err2) +same := core.Is(combined, err1) +``` + +`core.As` and `core.NewError` mirror the standard library for convenience. + +## Log-and-Return Helpers + +`Core` exposes two convenience wrappers: + +```go +r1 := c.LogError(err, "workspace.Load", "workspace load failed") +r2 := c.LogWarn(err, "workspace.Load", "workspace load degraded") +``` + +These log through the default logger and return `core.Result`. + +You can also use the underlying `ErrorLog` directly: + +```go +r := c.Log().Error(err, "workspace.Load", "workspace load failed") +``` + +`Must` logs and then panics when the error is non-nil: + +```go +c.Must(err, "workspace.Load", "workspace load failed") +``` + +## Panic Recovery + +`ErrorPanic` handles process-safe panic capture. + +```go +defer c.Error().Recover() +``` + +Run background work with recovery: + +```go +c.Error().SafeGo(func() { + panic("captured") +}) +``` + +If `ErrorPanic` has a configured crash file path, it appends JSON crash reports and `Reports(n)` reads them back. + +That crash file path is currently internal state on `ErrorPanic`, not a public constructor option on `Core.New()`. + +## Logging and Error Context + +The logging subsystem automatically extracts `op` and logical stack information from structured errors when those values are present in the key-value list. + +That makes errors created with `E`, `Wrap`, or `WrapCode` much easier to follow in logs. diff --git a/pkg/lib/workspace/default/.core/reference/docs/getting-started.md b/pkg/lib/workspace/default/.core/reference/docs/getting-started.md new file mode 100644 index 0000000..d2d8166 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/getting-started.md @@ -0,0 +1,208 @@ +--- +title: Getting Started +description: Build a first CoreGO application with the current API. +--- + +# Getting Started + +This page shows the shortest path to a useful CoreGO application using the API that exists in this repository today. + +## Install + +```bash +go get dappco.re/go/core +``` + +## Create a Core + +`New` takes zero or more `core.Options` slices, but the current implementation only reads the first one. In practice, treat the constructor as `core.New(core.Options{...})`. + +```go +package main + +import "dappco.re/go/core" + +func main() { + c := core.New(core.Options{ + {Key: "name", Value: "agent-workbench"}, + }) + + _ = c +} +``` + +The `name` option is copied into `c.App().Name`. + +## Register a Service + +Services are registered explicitly with a name and a `core.Service` DTO. + +```go +c.Service("audit", core.Service{ + OnStart: func() core.Result { + core.Info("audit service started", "app", c.App().Name) + return core.Result{OK: true} + }, + OnStop: func() core.Result { + core.Info("audit service stopped", "app", c.App().Name) + return core.Result{OK: true} + }, +}) +``` + +This registry stores `core.Service` values. It is a lifecycle registry, not a typed object container. + +## Register a Query, Task, and Command + +```go +type workspaceCountQuery struct{} + +type createWorkspaceTask struct { + Name string +} + +c.RegisterQuery(func(_ *core.Core, q core.Query) core.Result { + switch q.(type) { + case workspaceCountQuery: + return core.Result{Value: 1, OK: true} + } + return core.Result{} +}) + +c.RegisterTask(func(_ *core.Core, t core.Task) core.Result { + switch task := t.(type) { + case createWorkspaceTask: + path := "/tmp/agent-workbench/" + task.Name + return core.Result{Value: path, OK: true} + } + return core.Result{} +}) + +c.Command("workspace/create", core.Command{ + Action: func(opts core.Options) core.Result { + return c.PERFORM(createWorkspaceTask{ + Name: opts.String("name"), + }) + }, +}) +``` + +## Start the Runtime + +```go +if !c.ServiceStartup(context.Background(), nil).OK { + panic("startup failed") +} +``` + +`ServiceStartup` returns `core.Result`, not `error`. + +## Run Through the CLI Surface + +```go +r := c.Cli().Run("workspace", "create", "--name=alpha") +if r.OK { + fmt.Println("created:", r.Value) +} +``` + +For flags with values, the CLI stores the value as a string. `--name=alpha` becomes `opts.String("name") == "alpha"`. + +## Query the System + +```go +count := c.QUERY(workspaceCountQuery{}) +if count.OK { + fmt.Println("workspace count:", count.Value) +} +``` + +## Shut Down Cleanly + +```go +_ = c.ServiceShutdown(context.Background()) +``` + +Shutdown cancels `c.Context()`, broadcasts `ActionServiceShutdown{}`, waits for background tasks to finish, and then runs service stop hooks. + +## Full Example + +```go +package main + +import ( + "context" + "fmt" + + "dappco.re/go/core" +) + +type workspaceCountQuery struct{} + +type createWorkspaceTask struct { + Name string +} + +func main() { + c := core.New(core.Options{ + {Key: "name", Value: "agent-workbench"}, + }) + + c.Config().Set("workspace.root", "/tmp/agent-workbench") + c.Config().Enable("workspace.templates") + + c.Service("audit", core.Service{ + OnStart: func() core.Result { + core.Info("service started", "service", "audit") + return core.Result{OK: true} + }, + OnStop: func() core.Result { + core.Info("service stopped", "service", "audit") + return core.Result{OK: true} + }, + }) + + c.RegisterQuery(func(_ *core.Core, q core.Query) core.Result { + switch q.(type) { + case workspaceCountQuery: + return core.Result{Value: 1, OK: true} + } + return core.Result{} + }) + + c.RegisterTask(func(_ *core.Core, t core.Task) core.Result { + switch task := t.(type) { + case createWorkspaceTask: + path := c.Config().String("workspace.root") + "/" + task.Name + return core.Result{Value: path, OK: true} + } + return core.Result{} + }) + + c.Command("workspace/create", core.Command{ + Action: func(opts core.Options) core.Result { + return c.PERFORM(createWorkspaceTask{ + Name: opts.String("name"), + }) + }, + }) + + if !c.ServiceStartup(context.Background(), nil).OK { + panic("startup failed") + } + + created := c.Cli().Run("workspace", "create", "--name=alpha") + fmt.Println("created:", created.Value) + + count := c.QUERY(workspaceCountQuery{}) + fmt.Println("workspace count:", count.Value) + + _ = c.ServiceShutdown(context.Background()) +} +``` + +## Next Steps + +- Read [primitives.md](primitives.md) next so the repeated shapes are clear. +- Read [commands.md](commands.md) if you are building a CLI-first system. +- Read [messaging.md](messaging.md) if services need to collaborate without direct imports. diff --git a/pkg/lib/workspace/default/.core/reference/docs/index.md b/pkg/lib/workspace/default/.core/reference/docs/index.md new file mode 100644 index 0000000..0ec8647 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/index.md @@ -0,0 +1,112 @@ +--- +title: CoreGO +description: AX-first documentation for the CoreGO framework. +--- + +# CoreGO + +CoreGO is the foundation layer for the Core ecosystem. It gives you one container, one command tree, one message bus, and a small set of shared primitives that repeat across the whole framework. + +The current module path is `dappco.re/go/core`. + +## AX View + +CoreGO already follows the main AX ideas from RFC-025: + +- predictable names such as `Core`, `Service`, `Command`, `Options`, `Result`, `Message` +- path-shaped command registration such as `deploy/to/homelab` +- one repeated input shape (`Options`) and one repeated return shape (`Result`) +- comments and examples that show real usage instead of restating the type signature + +## What CoreGO Owns + +| Surface | Purpose | +|---------|---------| +| `Core` | Central container and access point | +| `Service` | Managed lifecycle component | +| `Command` | Path-based command tree node | +| `ACTION`, `QUERY`, `PERFORM` | Decoupled communication between components | +| `Data`, `Drive`, `Fs`, `Config`, `I18n`, `Cli` | Built-in subsystems for common runtime work | +| `E`, `Wrap`, `ErrorLog`, `ErrorPanic` | Structured failures and panic recovery | + +## Quick Example + +```go +package main + +import ( + "context" + "fmt" + + "dappco.re/go/core" +) + +type flushCacheTask struct { + Name string +} + +func main() { + c := core.New(core.Options{ + {Key: "name", Value: "agent-workbench"}, + }) + + c.Service("cache", core.Service{ + OnStart: func() core.Result { + core.Info("cache ready", "app", c.App().Name) + return core.Result{OK: true} + }, + OnStop: func() core.Result { + core.Info("cache stopped", "app", c.App().Name) + return core.Result{OK: true} + }, + }) + + c.RegisterTask(func(_ *core.Core, task core.Task) core.Result { + switch task.(type) { + case flushCacheTask: + return core.Result{Value: "cache flushed", OK: true} + } + return core.Result{} + }) + + c.Command("cache/flush", core.Command{ + Action: func(opts core.Options) core.Result { + return c.PERFORM(flushCacheTask{Name: opts.String("name")}) + }, + }) + + if !c.ServiceStartup(context.Background(), nil).OK { + panic("startup failed") + } + + r := c.Cli().Run("cache", "flush", "--name=session-store") + fmt.Println(r.Value) + + _ = c.ServiceShutdown(context.Background()) +} +``` + +## Documentation Paths + +| Path | Covers | +|------|--------| +| [getting-started.md](getting-started.md) | First runnable CoreGO app | +| [primitives.md](primitives.md) | `Options`, `Result`, `Service`, `Message`, `Query`, `Task` | +| [services.md](services.md) | Service registry, service locks, runtime helpers | +| [commands.md](commands.md) | Path-based commands and CLI execution | +| [messaging.md](messaging.md) | `ACTION`, `QUERY`, `QUERYALL`, `PERFORM`, `PerformAsync` | +| [lifecycle.md](lifecycle.md) | Startup, shutdown, context, background task draining | +| [configuration.md](configuration.md) | Constructor options, config state, feature flags | +| [subsystems.md](subsystems.md) | `App`, `Data`, `Drive`, `Fs`, `I18n`, `Cli` | +| [errors.md](errors.md) | Structured errors, logging helpers, panic recovery | +| [testing.md](testing.md) | Test naming and framework-level testing patterns | +| [pkg/core.md](pkg/core.md) | Package-level reference summary | +| [pkg/log.md](pkg/log.md) | Logging reference for the root package | +| [pkg/PACKAGE_STANDARDS.md](pkg/PACKAGE_STANDARDS.md) | AX package-authoring guidance | + +## Good Reading Order + +1. Start with [getting-started.md](getting-started.md). +2. Learn the repeated shapes in [primitives.md](primitives.md). +3. Pick the integration path you need next: [services.md](services.md), [commands.md](commands.md), or [messaging.md](messaging.md). +4. Use [subsystems.md](subsystems.md), [errors.md](errors.md), and [testing.md](testing.md) as reference pages while building. diff --git a/pkg/lib/workspace/default/.core/reference/docs/lifecycle.md b/pkg/lib/workspace/default/.core/reference/docs/lifecycle.md new file mode 100644 index 0000000..59ba644 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/lifecycle.md @@ -0,0 +1,111 @@ +--- +title: Lifecycle +description: Startup, shutdown, context ownership, and background task draining. +--- + +# Lifecycle + +CoreGO manages lifecycle through `core.Service` callbacks, not through reflection or implicit interfaces. + +## Service Hooks + +```go +c.Service("cache", core.Service{ + OnStart: func() core.Result { + return core.Result{OK: true} + }, + OnStop: func() core.Result { + return core.Result{OK: true} + }, +}) +``` + +Only services with `OnStart` appear in `Startables()`. Only services with `OnStop` appear in `Stoppables()`. + +## `ServiceStartup` + +```go +r := c.ServiceStartup(context.Background(), nil) +``` + +### What It Does + +1. clears the shutdown flag +2. stores a new cancellable context on `c.Context()` +3. runs each `OnStart` +4. broadcasts `ActionServiceStartup{}` + +### Failure Behavior + +- if the input context is already cancelled, startup returns that error +- if any `OnStart` returns `OK:false`, startup stops immediately and returns that result + +## `ServiceShutdown` + +```go +r := c.ServiceShutdown(context.Background()) +``` + +### What It Does + +1. sets the shutdown flag +2. cancels `c.Context()` +3. broadcasts `ActionServiceShutdown{}` +4. waits for background tasks created by `PerformAsync` +5. runs each `OnStop` + +### Failure Behavior + +- if draining background tasks hits the shutdown context deadline, shutdown returns that context error +- when service stop hooks fail, CoreGO returns the first error it sees + +## Ordering + +The current implementation builds `Startables()` and `Stoppables()` by iterating over a map-backed registry. + +That means lifecycle order is not guaranteed today. + +If your application needs strict startup or shutdown ordering, orchestrate it explicitly inside a smaller number of service callbacks instead of relying on registry order. + +## `c.Context()` + +`ServiceStartup` creates the context returned by `c.Context()`. + +Use it for background work that should stop when the application shuts down: + +```go +c.Service("watcher", core.Service{ + OnStart: func() core.Result { + go func(ctx context.Context) { + <-ctx.Done() + }(c.Context()) + return core.Result{OK: true} + }, +}) +``` + +## Built-In Lifecycle Actions + +You can listen for lifecycle state changes through the action bus. + +```go +c.RegisterAction(func(_ *core.Core, msg core.Message) core.Result { + switch msg.(type) { + case core.ActionServiceStartup: + core.Info("core startup completed") + case core.ActionServiceShutdown: + core.Info("core shutdown started") + } + return core.Result{OK: true} +}) +``` + +## Background Task Draining + +`ServiceShutdown` waits for the internal task waitgroup to finish before calling stop hooks. + +This is what makes `PerformAsync` safe for long-running work that should complete before teardown. + +## `OnReload` + +`Service` includes an `OnReload` callback field, but CoreGO does not currently expose a top-level lifecycle runner for reload operations. diff --git a/pkg/lib/workspace/default/.core/reference/docs/messaging.md b/pkg/lib/workspace/default/.core/reference/docs/messaging.md new file mode 100644 index 0000000..688893a --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/messaging.md @@ -0,0 +1,171 @@ +--- +title: Messaging +description: ACTION, QUERY, QUERYALL, PERFORM, and async task flow. +--- + +# Messaging + +CoreGO uses one message bus for broadcasts, lookups, and work dispatch. + +## Message Types + +```go +type Message any +type Query any +type Task any +``` + +Your own structs define the protocol. + +```go +type repositoryIndexed struct { + Name string +} + +type repositoryCountQuery struct{} + +type syncRepositoryTask struct { + Name string +} +``` + +## `ACTION` + +`ACTION` is a broadcast. + +```go +c.RegisterAction(func(_ *core.Core, msg core.Message) core.Result { + switch m := msg.(type) { + case repositoryIndexed: + core.Info("repository indexed", "name", m.Name) + return core.Result{OK: true} + } + return core.Result{OK: true} +}) + +r := c.ACTION(repositoryIndexed{Name: "core-go"}) +``` + +### Behavior + +- all registered action handlers are called in their current registration order +- if a handler returns `OK:false`, dispatch stops and that `Result` is returned +- if no handler fails, `ACTION` returns `Result{OK:true}` + +## `QUERY` + +`QUERY` is first-match request-response. + +```go +c.RegisterQuery(func(_ *core.Core, q core.Query) core.Result { + switch q.(type) { + case repositoryCountQuery: + return core.Result{Value: 42, OK: true} + } + return core.Result{} +}) + +r := c.QUERY(repositoryCountQuery{}) +``` + +### Behavior + +- handlers run until one returns `OK:true` +- the first successful result wins +- if nothing handles the query, CoreGO returns an empty `Result` + +## `QUERYALL` + +`QUERYALL` collects every successful non-nil response. + +```go +r := c.QUERYALL(repositoryCountQuery{}) +results := r.Value.([]any) +``` + +### Behavior + +- every query handler is called +- only `OK:true` results with non-nil `Value` are collected +- the call itself returns `OK:true` even when the result list is empty + +## `PERFORM` + +`PERFORM` dispatches a task to the first handler that accepts it. + +```go +c.RegisterTask(func(_ *core.Core, t core.Task) core.Result { + switch task := t.(type) { + case syncRepositoryTask: + return core.Result{Value: "synced " + task.Name, OK: true} + } + return core.Result{} +}) + +r := c.PERFORM(syncRepositoryTask{Name: "core-go"}) +``` + +### Behavior + +- handlers run until one returns `OK:true` +- the first successful result wins +- if nothing handles the task, CoreGO returns an empty `Result` + +## `PerformAsync` + +`PerformAsync` runs a task in a background goroutine and returns a generated task identifier. + +```go +r := c.PerformAsync(syncRepositoryTask{Name: "core-go"}) +taskID := r.Value.(string) +``` + +### Generated Events + +Async execution emits three action messages: + +| Message | When | +|---------|------| +| `ActionTaskStarted` | just before background execution begins | +| `ActionTaskProgress` | whenever `Progress` is called | +| `ActionTaskCompleted` | after the task finishes or panics | + +Example listener: + +```go +c.RegisterAction(func(_ *core.Core, msg core.Message) core.Result { + switch m := msg.(type) { + case core.ActionTaskCompleted: + core.Info("task completed", "task", m.TaskIdentifier, "err", m.Error) + } + return core.Result{OK: true} +}) +``` + +## Progress Updates + +```go +c.Progress(taskID, 0.5, "indexing commits", syncRepositoryTask{Name: "core-go"}) +``` + +That broadcasts `ActionTaskProgress`. + +## `TaskWithIdentifier` + +Tasks that implement `TaskWithIdentifier` receive the generated ID before dispatch. + +```go +type trackedTask struct { + ID string + Name string +} + +func (t *trackedTask) SetTaskIdentifier(id string) { t.ID = id } +func (t *trackedTask) GetTaskIdentifier() string { return t.ID } +``` + +## Shutdown Interaction + +When shutdown has started, `PerformAsync` returns an empty `Result` instead of scheduling more work. + +This is why `ServiceShutdown` can safely drain the outstanding background tasks before stopping services. diff --git a/pkg/lib/workspace/default/.core/reference/docs/pkg/PACKAGE_STANDARDS.md b/pkg/lib/workspace/default/.core/reference/docs/pkg/PACKAGE_STANDARDS.md new file mode 100644 index 0000000..398bbf6 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/pkg/PACKAGE_STANDARDS.md @@ -0,0 +1,138 @@ +# AX Package Standards + +This page describes how to build packages on top of CoreGO in the style described by RFC-025. + +## 1. Prefer Predictable Names + +Use names that tell an agent what the thing is without translation. + +Good: + +- `RepositoryService` +- `RepositoryServiceOptions` +- `WorkspaceCountQuery` +- `SyncRepositoryTask` + +Avoid shortening names unless the abbreviation is already universal. + +## 2. Put Real Usage in Comments + +Write comments that show a real call with realistic values. + +Good: + +```go +// Sync a repository into the local workspace cache. +// svc.SyncRepository("core-go", "/srv/repos/core-go") +``` + +Avoid comments that only repeat the signature. + +## 3. Keep Paths Semantic + +If a command or template lives at a path, let the path explain the intent. + +Good: + +```text +deploy/to/homelab +workspace/create +template/workspace/go +``` + +That keeps the CLI, tests, docs, and message vocabulary aligned. + +## 4. Reuse CoreGO Primitives + +At Core boundaries, prefer the shared shapes: + +- `core.Options` for lightweight input +- `core.Result` for output +- `core.Service` for lifecycle registration +- `core.Message`, `core.Query`, `core.Task` for bus protocols + +Inside your package, typed structs are still good. Use `ServiceRuntime[T]` when you want typed package options plus a `Core` reference. + +```go +type repositoryServiceOptions struct { + BaseDirectory string +} + +type repositoryService struct { + *core.ServiceRuntime[repositoryServiceOptions] +} +``` + +## 5. Prefer Explicit Registration + +Register services and commands with names and paths that stay readable in grep results. + +```go +c.Service("repository", core.Service{...}) +c.Command("repository/sync", core.Command{...}) +``` + +## 6. Use the Bus for Decoupling + +When one package needs another package’s behavior, prefer queries and tasks over tight package coupling. + +```go +type repositoryCountQuery struct{} +type syncRepositoryTask struct { + Name string +} +``` + +That keeps the protocol visible in code and easy for agents to follow. + +## 7. Use Structured Errors + +Use `core.E`, `core.Wrap`, and `core.WrapCode`. + +```go +return core.Result{ + Value: core.E("repository.Sync", "git fetch failed", err), + OK: false, +} +``` + +Do not introduce free-form `fmt.Errorf` chains in framework code. + +## 8. Keep Testing Names Predictable + +Follow the repository pattern: + +- `_Good` +- `_Bad` +- `_Ugly` + +Example: + +```go +func TestRepositorySync_Good(t *testing.T) {} +func TestRepositorySync_Bad(t *testing.T) {} +func TestRepositorySync_Ugly(t *testing.T) {} +``` + +## 9. Prefer Stable Shapes Over Clever APIs + +For package APIs, avoid patterns that force an agent to infer too much hidden control flow. + +Prefer: + +- clear structs +- explicit names +- path-based commands +- visible message types + +Avoid: + +- implicit global state unless it is truly a default service +- panic-hiding constructors +- dense option chains when a small explicit struct would do + +## 10. Document the Current Reality + +If the implementation is in transition, document what the code does now, not the API shape you plan to have later. + +That keeps agents correct on first pass, which is the real AX metric. diff --git a/pkg/lib/workspace/default/.core/reference/docs/pkg/core.md b/pkg/lib/workspace/default/.core/reference/docs/pkg/core.md new file mode 100644 index 0000000..88bd18b --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/pkg/core.md @@ -0,0 +1,81 @@ +# Package Reference: `core` + +Import path: + +```go +import "dappco.re/go/core" +``` + +This repository exposes one root package. The main areas are: + +## Constructors and Accessors + +| Name | Purpose | +|------|---------| +| `New` | Create a `*Core` | +| `NewRuntime` | Create an empty runtime wrapper | +| `NewWithFactories` | Create a runtime wrapper from named service factories | +| `Options`, `App`, `Data`, `Drive`, `Fs`, `Config`, `Error`, `Log`, `Cli`, `IPC`, `I18n`, `Context` | Access the built-in subsystems | + +## Core Primitives + +| Name | Purpose | +|------|---------| +| `Option`, `Options` | Input configuration and metadata | +| `Result` | Shared output shape | +| `Service` | Lifecycle DTO | +| `Command` | Command tree node | +| `Message`, `Query`, `Task` | Message bus payload types | + +## Service and Runtime APIs + +| Name | Purpose | +|------|---------| +| `Service` | Register or read a named service | +| `Services` | List registered service names | +| `Startables`, `Stoppables` | Snapshot lifecycle-capable services | +| `LockEnable`, `LockApply` | Activate the service registry lock | +| `ServiceRuntime[T]` | Helper for package authors | + +## Command and CLI APIs + +| Name | Purpose | +|------|---------| +| `Command` | Register or read a command by path | +| `Commands` | List command paths | +| `Cli().Run` | Resolve arguments to a command and execute it | +| `Cli().PrintHelp` | Show executable commands | + +## Messaging APIs + +| Name | Purpose | +|------|---------| +| `ACTION`, `Action` | Broadcast a message | +| `QUERY`, `Query` | Return the first successful query result | +| `QUERYALL`, `QueryAll` | Collect all successful query results | +| `PERFORM`, `Perform` | Run the first task handler that accepts the task | +| `PerformAsync` | Run a task in the background | +| `Progress` | Broadcast async task progress | +| `RegisterAction`, `RegisterActions`, `RegisterQuery`, `RegisterTask` | Register bus handlers | + +## Subsystems + +| Name | Purpose | +|------|---------| +| `Config` | Runtime settings and feature flags | +| `Data` | Embedded filesystem mounts | +| `Drive` | Named transport handles | +| `Fs` | Local filesystem operations | +| `I18n` | Locale collection and translation delegation | +| `App`, `Find` | Application identity and executable lookup | + +## Errors and Logging + +| Name | Purpose | +|------|---------| +| `E`, `Wrap`, `WrapCode`, `NewCode` | Structured error creation | +| `Operation`, `ErrorCode`, `ErrorMessage`, `Root`, `StackTrace`, `FormatStackTrace` | Error inspection | +| `NewLog`, `Default`, `SetDefault`, `SetLevel`, `SetRedactKeys` | Logger creation and defaults | +| `LogErr`, `LogPanic`, `ErrorLog`, `ErrorPanic` | Error-aware logging and panic recovery | + +Use the top-level docs in `docs/` for task-oriented guidance, then use this page as a compact reference. diff --git a/pkg/lib/workspace/default/.core/reference/docs/pkg/log.md b/pkg/lib/workspace/default/.core/reference/docs/pkg/log.md new file mode 100644 index 0000000..15e9db1 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/pkg/log.md @@ -0,0 +1,83 @@ +# Logging Reference + +Logging lives in the root `core` package in this repository. There is no separate `pkg/log` import path here. + +## Create a Logger + +```go +logger := core.NewLog(core.LogOptions{ + Level: core.LevelInfo, +}) +``` + +## Levels + +| Level | Meaning | +|-------|---------| +| `LevelQuiet` | no output | +| `LevelError` | errors and security events | +| `LevelWarn` | warnings, errors, security events | +| `LevelInfo` | informational, warnings, errors, security events | +| `LevelDebug` | everything | + +## Log Methods + +```go +logger.Debug("workspace discovered", "path", "/srv/workspaces") +logger.Info("service started", "service", "audit") +logger.Warn("retrying fetch", "attempt", 2) +logger.Error("fetch failed", "err", err) +logger.Security("sandbox escape detected", "path", attemptedPath) +``` + +## Default Logger + +The package owns a default logger. + +```go +core.SetLevel(core.LevelDebug) +core.SetRedactKeys("token", "password") + +core.Info("service started", "service", "audit") +``` + +## Redaction + +Values for keys listed in `RedactKeys` are replaced with `[REDACTED]`. + +```go +logger.SetRedactKeys("token") +logger.Info("login", "user", "cladius", "token", "secret-value") +``` + +## Output and Rotation + +```go +logger := core.NewLog(core.LogOptions{ + Level: core.LevelInfo, + Output: os.Stderr, +}) +``` + +If you provide `Rotation` and set `RotationWriterFactory`, the logger writes to the rotating writer instead of the plain output stream. + +## Error-Aware Logging + +`LogErr` extracts structured error context before logging: + +```go +le := core.NewLogErr(logger) +le.Log(err) +``` + +`ErrorLog` is the log-and-return wrapper exposed through `c.Log()`. + +## Panic-Aware Logging + +`LogPanic` is the lightweight panic logger: + +```go +defer core.NewLogPanic(logger).Recover() +``` + +It logs the recovered panic but does not manage crash files. For crash reports, use `c.Error().Recover()`. diff --git a/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-09-lint-pattern-catalog-design.md b/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-09-lint-pattern-catalog-design.md new file mode 100644 index 0000000..0825791 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-09-lint-pattern-catalog-design.md @@ -0,0 +1,261 @@ +# Lint Pattern Catalog & Polish Skill Design + +> **Partial implementation (14 Mar 2026):** Layer 1 (`core/lint` -- catalog, matcher, scanner, CLI) is fully implemented and documented at `docs/tools/lint/index.md`. Layer 2 (MCP subsystem in `go-ai`) and Layer 3 (Claude Code polish skill in `core/agent`) are NOT implemented. This plan is retained for those remaining layers. + +**Goal:** A structured pattern catalog (`core/lint`) that captures recurring code quality findings as regex rules, exposes them via MCP tools in `go-ai`, and orchestrates multi-AI code review via a Claude Code skill in `core/agent`. + +**Architecture:** Three layers — a standalone catalog+matcher library (`core/lint`), an MCP subsystem in `go-ai` that exposes lint tools to agents, and a Claude Code plugin in `core/agent` that orchestrates the "polish" workflow (deterministic checks + AI reviewers + feedback loop into the catalog). + +**Tech Stack:** Go (catalog, matcher, CLI, MCP subsystem), YAML (rule definitions), JSONL (findings output, compatible with `~/.core/ai/metrics/`), Claude Code plugin format (hooks.json, commands/*.md, plugin.json). + +--- + +## Context + +During a code review sweep of 18 Go repos (March 2026), AI reviewers (Gemini, Claude) found ~20 recurring patterns: SQL injection, path traversal, XSS, missing constant-time comparison, goroutine leaks, Go 1.26 modernisation opportunities, and more. Many of these patterns repeat across repos. + +Currently these findings exist only as commit messages. This design captures them as a reusable, machine-readable catalog that: +1. Deterministic tools can run immediately (regex matching) +2. MCP-connected agents can query and apply +3. LEM models can train on for "does this comply with CoreGo standards?" judgements +4. Grows automatically as AI reviewers find new patterns + +## Layer 1: `core/lint` — Pattern Catalog & Matcher + +### Repository Structure + +``` +core/lint/ +├── go.mod # forge.lthn.ai/core/lint +├── catalog/ +│ ├── go-security.yaml # SQL injection, path traversal, XSS, constant-time +│ ├── go-modernise.yaml # Go 1.26: slices.Clone, wg.Go, maps.Keys, range-over-int +│ ├── go-correctness.yaml # Deadlocks, goroutine leaks, nil guards, error handling +│ ├── php-security.yaml # XSS, CSRF, mass assignment, SQL injection +│ ├── ts-security.yaml # DOM XSS, prototype pollution +│ └── cpp-safety.yaml # Buffer overflow, use-after-free +├── pkg/lint/ +│ ├── catalog.go # Load + parse YAML catalog files +│ ├── rule.go # Rule struct definition +│ ├── matcher.go # Regex matcher against file contents +│ ├── report.go # Structured findings output (JSON/JSONL/text) +│ ├── catalog_test.go +│ ├── matcher_test.go +│ └── report_test.go +├── cmd/core-lint/ +│ └── main.go # `core-lint check ./...` CLI +└── .core/ + └── build.yaml # Produces core-lint binary +``` + +### Rule Schema (YAML) + +```yaml +- id: go-sec-001 + title: "SQL wildcard injection in LIKE clauses" + severity: high # critical, high, medium, low, info + languages: [go] + tags: [security, injection, owasp-a03] + pattern: 'LIKE\s+\?\s*,\s*["\x60]%\s*\+' + exclude_pattern: 'EscapeLike' # suppress if this also matches + fix: "Use parameterised LIKE with explicit escaping of % and _ characters" + found_in: [go-store] # repos where first discovered + example_bad: | + db.Where("name LIKE ?", "%"+input+"%") + example_good: | + db.Where("name LIKE ?", EscapeLike(input)) + first_seen: "2026-03-09" + detection: regex # future: ast, semantic + auto_fixable: false # future: true when we add codemods +``` + +### Rule Struct (Go) + +```go +type Rule struct { + ID string `yaml:"id"` + Title string `yaml:"title"` + Severity string `yaml:"severity"` + Languages []string `yaml:"languages"` + Tags []string `yaml:"tags"` + Pattern string `yaml:"pattern"` + ExcludePattern string `yaml:"exclude_pattern,omitempty"` + Fix string `yaml:"fix"` + FoundIn []string `yaml:"found_in,omitempty"` + ExampleBad string `yaml:"example_bad,omitempty"` + ExampleGood string `yaml:"example_good,omitempty"` + FirstSeen string `yaml:"first_seen"` + Detection string `yaml:"detection"` // regex | ast | semantic + AutoFixable bool `yaml:"auto_fixable"` +} +``` + +### Finding Struct (Go) + +Designed to align with go-ai's `ScanAlert` shape and `~/.core/ai/metrics/` JSONL format: + +```go +type Finding struct { + RuleID string `json:"rule_id"` + Title string `json:"title"` + Severity string `json:"severity"` + File string `json:"file"` + Line int `json:"line"` + Match string `json:"match"` // matched text + Fix string `json:"fix"` + Repo string `json:"repo,omitempty"` +} +``` + +### CLI Interface + +```bash +# Check current directory against all catalogs for detected languages +core-lint check ./... + +# Check specific languages/catalogs +core-lint check --lang go --catalog go-security ./pkg/... + +# Output as JSON (for piping to other tools) +core-lint check --format json ./... + +# List available rules +core-lint catalog list +core-lint catalog list --lang go --severity high + +# Show a specific rule with examples +core-lint catalog show go-sec-001 +``` + +## Layer 2: `go-ai` Lint MCP Subsystem + +New subsystem registered alongside files/rag/ml/brain: + +```go +type LintSubsystem struct { + catalog *lint.Catalog + root string // workspace root for scanning +} + +func (s *LintSubsystem) Name() string { return "lint" } + +func (s *LintSubsystem) RegisterTools(server *mcp.Server) { + // lint_check - run rules against workspace files + // lint_catalog - list/search available rules + // lint_report - get findings summary for a path +} +``` + +### MCP Tools + +| Tool | Input | Output | Group | +|------|-------|--------|-------| +| `lint_check` | `{path: string, lang?: string, severity?: string}` | `{findings: []Finding}` | lint | +| `lint_catalog` | `{lang?: string, tags?: []string, severity?: string}` | `{rules: []Rule}` | lint | +| `lint_report` | `{path: string, format?: "summary" or "detailed"}` | `{summary: ReportSummary}` | lint | + +This means any MCP-connected agent (Claude, LEM, Codex) can call `lint_check` to scan code against the catalog. + +## Layer 3: `core/agent` Polish Skill + +Claude Code plugin at `core/agent/claude/polish/`: + +``` +core/agent/claude/polish/ +├── plugin.json +├── hooks.json # optional: PostToolUse after git commit +├── commands/ +│ └── polish.md # /polish slash command +└── scripts/ + └── run-lint.sh # shells out to core-lint +``` + +### `/polish` Command Flow + +1. Run `core-lint check ./...` for fast deterministic findings +2. Report findings to user +3. Optionally run AI reviewers (Gemini CLI, Codex) for deeper analysis +4. Deduplicate AI findings against catalog (already-known patterns) +5. Propose new patterns as catalog additions (PR to core/lint) + +### Subagent Configuration (`.core/agents/`) + +Repos can configure polish behaviour: + +```yaml +# any-repo/.core/agents/polish.yaml +languages: [go] +catalogs: [go-security, go-modernise, go-correctness] +reviewers: [gemini] # which AI tools to invoke +exclude: [vendor/, testdata/, *_test.go] +severity_threshold: medium # only report medium+ findings +``` + +## Findings to LEM Pipeline + +``` +core-lint check -> findings.json + | + v +~/.core/ai/metrics/YYYY-MM-DD.jsonl (audit trail) + | + v +LEM training data: + - Rule examples (bad/good pairs) -> supervised training signal + - Finding frequency -> pattern importance weighting + - Rule descriptions -> natural language understanding of "why" + | + v +LEM tool: "does this code comply with CoreGo standards?" + -> queries lint_catalog via MCP + -> applies learned pattern recognition + -> reports violations with rule IDs and fixes +``` + +## Initial Catalog Seed + +From the March 2026 ecosystem sweep: + +| ID | Title | Severity | Language | Found In | +|----|-------|----------|----------|----------| +| go-sec-001 | SQL wildcard injection | high | go | go-store | +| go-sec-002 | Path traversal in cache keys | high | go | go-cache | +| go-sec-003 | XSS in HTML output | high | go | go-html | +| go-sec-004 | Non-constant-time auth comparison | high | go | go-crypt | +| go-sec-005 | Log injection via unescaped input | medium | go | go-log | +| go-sec-006 | Key material in log output | high | go | go-log | +| go-cor-001 | Goroutine leak (no WaitGroup) | high | go | core/go | +| go-cor-002 | Shutdown deadlock (wg.Wait no timeout) | high | go | core/go | +| go-cor-003 | Silent error swallowing | medium | go | go-process, go-ratelimit | +| go-cor-004 | Panic in library code | medium | go | go-i18n | +| go-cor-005 | Delete without path validation | high | go | go-io | +| go-mod-001 | Manual slice clone (append nil pattern) | low | go | core/go | +| go-mod-002 | Manual sort instead of slices.Sorted | low | go | core/go | +| go-mod-003 | Manual reverse loop instead of slices.Backward | low | go | core/go | +| go-mod-004 | sync.WaitGroup Add+Done instead of Go() | low | go | core/go | +| go-mod-005 | Manual map key collection instead of maps.Keys | low | go | core/go | +| go-cor-006 | Missing error return from API calls | medium | go | go-forge, go-git | +| go-cor-007 | Signal handler uses wrong type | medium | go | go-process | + +## Dependencies + +``` +core/lint (standalone, zero core deps) + ^ + | +go-ai/mcp/lint/ (imports core/lint for catalog + matcher) + ^ + | +core/agent/claude/polish/ (shells out to core-lint CLI) +``` + +`core/lint` has no dependency on `core/go` or any other framework module. It is a standalone library + CLI, like `core/go-io`. + +## Future Extensions (Not Built Now) + +- **AST-based detection** (layer 2): Parse Go/PHP AST, match structural patterns +- **Semantic detection** (layer 3): LEM judges code against rule descriptions +- **Auto-fix codemods**: `core-lint fix` applies known fixes automatically +- **CI integration**: GitHub Actions workflow runs `core-lint check` on PRs +- **CodeRabbit integration**: Import CodeRabbit findings as catalog entries +- **Cross-repo dashboard**: Aggregate findings across all repos in workspace diff --git a/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-09-lint-pattern-catalog-plan.md b/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-09-lint-pattern-catalog-plan.md new file mode 100644 index 0000000..7f1ddec --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-09-lint-pattern-catalog-plan.md @@ -0,0 +1,1668 @@ +# Lint Pattern Catalog Implementation Plan + +> **Fully implemented (14 Mar 2026).** All tasks in this plan are complete. The `core/lint` module ships 18 rules across 3 catalogs, with a working CLI and embedded YAML. This plan is retained alongside the design doc, which tracks the remaining MCP and polish skill layers. + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Build `core/lint` — a standalone Go library + CLI that loads YAML pattern catalogs and runs regex-based code checks, seeded with 18 patterns from the March 2026 ecosystem sweep. + +**Architecture:** Standalone Go module (`forge.lthn.ai/core/lint`) with zero framework deps. YAML catalog files define rules (id, severity, regex pattern, fix). `pkg/lint` loads catalogs and matches patterns against files. `cmd/core-lint` is a Cobra CLI. Uses `cli.Main()` + `cli.WithCommands()` from `core/cli`. + +**Tech Stack:** Go 1.26, `gopkg.in/yaml.v3` (YAML parsing), `forge.lthn.ai/core/cli` (CLI framework), `github.com/stretchr/testify` (testing), `embed` (catalog embedding). + +--- + +### Task 1: Create repo and Go module + +**Files:** +- Create: `/Users/snider/Code/core/lint/go.mod` +- Create: `/Users/snider/Code/core/lint/.core/build.yaml` +- Create: `/Users/snider/Code/core/lint/CLAUDE.md` + +**Step 1: Create repo on forge** + +```bash +ssh -p 2223 git@forge.lthn.ai +``` + +If SSH repo creation isn't available, create via Forgejo API: +```bash +curl -X POST "https://forge.lthn.ai/api/v1/orgs/core/repos" \ + -H "Authorization: token $FORGE_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"name":"lint","description":"Pattern catalog & regex matcher for code quality","auto_init":true,"default_branch":"main"}' +``` + +Or manually create on forge.lthn.ai web UI under the `core` org. + +**Step 2: Clone and initialise Go module** + +```bash +cd ~/Code/core +git clone ssh://git@forge.lthn.ai:2223/core/lint.git +cd lint +go mod init forge.lthn.ai/core/lint +``` + +Set Go version in go.mod: +``` +module forge.lthn.ai/core/lint + +go 1.26.0 +``` + +**Step 3: Create `.core/build.yaml`** + +```yaml +version: 1 + +project: + name: core-lint + description: Pattern catalog and regex code checker + main: ./cmd/core-lint + binary: core-lint + +build: + cgo: false + flags: + - -trimpath + ldflags: + - -s + - -w + +targets: + - os: linux + arch: amd64 + - os: linux + arch: arm64 + - os: darwin + arch: arm64 + - os: windows + arch: amd64 +``` + +**Step 4: Create `CLAUDE.md`** + +```markdown +# CLAUDE.md + +## Project Overview + +`core/lint` is a standalone pattern catalog and regex-based code checker. It loads YAML rule definitions and matches them against source files. Zero framework dependencies. + +## Build & Development + +```bash +core go test +core go qa +core build # produces ./bin/core-lint +``` + +## Architecture + +- `catalog/` — YAML rule files (embedded at compile time) +- `pkg/lint/` — Library: Rule, Catalog, Matcher, Report types +- `cmd/core-lint/` — CLI binary using `cli.Main()` + +## Rule Schema + +Each YAML file contains an array of rules with: id, title, severity, languages, tags, pattern (regex), exclude_pattern, fix, example_bad, example_good, detection type. + +## Coding Standards + +- UK English +- `declare(strict_types=1)` equivalent: all functions have typed params/returns +- Tests use testify +- License: EUPL-1.2 +``` + +**Step 5: Add to go.work** + +Add `./core/lint` to `~/Code/go.work` under the Core framework section. + +**Step 6: Commit** + +```bash +git add go.mod .core/ CLAUDE.md +git commit -m "feat: initialise core/lint module" +``` + +--- + +### Task 2: Rule struct and YAML parsing + +**Files:** +- Create: `/Users/snider/Code/core/lint/pkg/lint/rule.go` +- Create: `/Users/snider/Code/core/lint/pkg/lint/rule_test.go` + +**Step 1: Write the failing test** + +```go +package lint + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseRules(t *testing.T) { + yaml := ` +- id: test-001 + title: "Test rule" + severity: high + languages: [go] + tags: [security] + pattern: 'fmt\.Println' + fix: "Use structured logging" + detection: regex +` + rules, err := ParseRules([]byte(yaml)) + require.NoError(t, err) + require.Len(t, rules, 1) + assert.Equal(t, "test-001", rules[0].ID) + assert.Equal(t, "high", rules[0].Severity) + assert.Equal(t, []string{"go"}, rules[0].Languages) + assert.Equal(t, `fmt\.Println`, rules[0].Pattern) +} + +func TestParseRules_Invalid(t *testing.T) { + _, err := ParseRules([]byte("not: valid: yaml: [")) + assert.Error(t, err) +} + +func TestRule_Validate(t *testing.T) { + good := Rule{ID: "x-001", Title: "T", Severity: "high", Languages: []string{"go"}, Pattern: "foo", Detection: "regex"} + assert.NoError(t, good.Validate()) + + bad := Rule{} // missing required fields + assert.Error(t, bad.Validate()) +} + +func TestRule_Validate_BadRegex(t *testing.T) { + r := Rule{ID: "x-001", Title: "T", Severity: "high", Languages: []string{"go"}, Pattern: "[invalid", Detection: "regex"} + assert.Error(t, r.Validate()) +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v` +Expected: FAIL — `ParseRules` and `Rule` not defined + +**Step 3: Write minimal implementation** + +```go +package lint + +import ( + "fmt" + "regexp" + + "gopkg.in/yaml.v3" +) + +// Rule defines a single lint pattern check. +type Rule struct { + ID string `yaml:"id" json:"id"` + Title string `yaml:"title" json:"title"` + Severity string `yaml:"severity" json:"severity"` + Languages []string `yaml:"languages" json:"languages"` + Tags []string `yaml:"tags" json:"tags"` + Pattern string `yaml:"pattern" json:"pattern"` + ExcludePattern string `yaml:"exclude_pattern" json:"exclude_pattern,omitempty"` + Fix string `yaml:"fix" json:"fix"` + FoundIn []string `yaml:"found_in" json:"found_in,omitempty"` + ExampleBad string `yaml:"example_bad" json:"example_bad,omitempty"` + ExampleGood string `yaml:"example_good" json:"example_good,omitempty"` + FirstSeen string `yaml:"first_seen" json:"first_seen,omitempty"` + Detection string `yaml:"detection" json:"detection"` + AutoFixable bool `yaml:"auto_fixable" json:"auto_fixable"` +} + +// Validate checks that a Rule has all required fields and a compilable regex pattern. +func (r *Rule) Validate() error { + if r.ID == "" { + return fmt.Errorf("rule missing id") + } + if r.Title == "" { + return fmt.Errorf("rule %s: missing title", r.ID) + } + if r.Severity == "" { + return fmt.Errorf("rule %s: missing severity", r.ID) + } + if len(r.Languages) == 0 { + return fmt.Errorf("rule %s: missing languages", r.ID) + } + if r.Pattern == "" { + return fmt.Errorf("rule %s: missing pattern", r.ID) + } + if r.Detection == "regex" { + if _, err := regexp.Compile(r.Pattern); err != nil { + return fmt.Errorf("rule %s: invalid regex: %w", r.ID, err) + } + } + return nil +} + +// ParseRules parses YAML bytes into a slice of Rules. +func ParseRules(data []byte) ([]Rule, error) { + var rules []Rule + if err := yaml.Unmarshal(data, &rules); err != nil { + return nil, fmt.Errorf("parse rules: %w", err) + } + return rules, nil +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v` +Expected: PASS (4 tests) + +**Step 5: Add yaml dependency** + +```bash +cd ~/Code/core/lint && go get gopkg.in/yaml.v3 && go get github.com/stretchr/testify +``` + +**Step 6: Commit** + +```bash +git add pkg/lint/rule.go pkg/lint/rule_test.go go.mod go.sum +git commit -m "feat: add Rule struct with YAML parsing and validation" +``` + +--- + +### Task 3: Catalog loader with embed support + +**Files:** +- Create: `/Users/snider/Code/core/lint/pkg/lint/catalog.go` +- Create: `/Users/snider/Code/core/lint/pkg/lint/catalog_test.go` +- Create: `/Users/snider/Code/core/lint/catalog/go-security.yaml` (minimal test file) + +**Step 1: Create a minimal test catalog file** + +Create `/Users/snider/Code/core/lint/catalog/go-security.yaml`: +```yaml +- id: go-sec-001 + title: "SQL wildcard injection in LIKE clauses" + severity: high + languages: [go] + tags: [security, injection] + pattern: 'LIKE\s+\?\s*,\s*["%].*\+' + fix: "Use parameterised LIKE with EscapeLike()" + found_in: [go-store] + first_seen: "2026-03-09" + detection: regex +``` + +**Step 2: Write the failing test** + +```go +package lint + +import ( + "embed" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCatalog_LoadDir(t *testing.T) { + // Find the catalog/ dir relative to the module root + dir := filepath.Join("..", "..", "catalog") + cat, err := LoadDir(dir) + require.NoError(t, err) + assert.Greater(t, len(cat.Rules), 0) + assert.Equal(t, "go-sec-001", cat.Rules[0].ID) +} + +func TestCatalog_LoadDir_NotExist(t *testing.T) { + _, err := LoadDir("/nonexistent") + assert.Error(t, err) +} + +func TestCatalog_Filter_Language(t *testing.T) { + cat := &Catalog{Rules: []Rule{ + {ID: "go-001", Languages: []string{"go"}, Severity: "high"}, + {ID: "php-001", Languages: []string{"php"}, Severity: "high"}, + }} + filtered := cat.ForLanguage("go") + assert.Len(t, filtered, 1) + assert.Equal(t, "go-001", filtered[0].ID) +} + +func TestCatalog_Filter_Severity(t *testing.T) { + cat := &Catalog{Rules: []Rule{ + {ID: "a", Severity: "high"}, + {ID: "b", Severity: "low"}, + {ID: "c", Severity: "medium"}, + }} + filtered := cat.AtSeverity("medium") + assert.Len(t, filtered, 2) // high + medium +} + +func TestCatalog_LoadFS(t *testing.T) { + // Write temp yaml + dir := t.TempDir() + data := []byte(`- id: fs-001 + title: "FS test" + severity: low + languages: [go] + tags: [] + pattern: 'test' + fix: "fix" + detection: regex +`) + require.NoError(t, os.WriteFile(filepath.Join(dir, "test.yaml"), data, 0644)) + + cat, err := LoadDir(dir) + require.NoError(t, err) + assert.Len(t, cat.Rules, 1) +} +``` + +**Step 3: Write minimal implementation** + +```go +package lint + +import ( + "embed" + "fmt" + "io/fs" + "os" + "path/filepath" + "slices" + "strings" +) + +// Catalog holds a collection of lint rules loaded from YAML files. +type Catalog struct { + Rules []Rule +} + +// severityOrder maps severity names to numeric priority (higher = more severe). +var severityOrder = map[string]int{ + "critical": 5, + "high": 4, + "medium": 3, + "low": 2, + "info": 1, +} + +// LoadDir loads all .yaml files from a directory path into a Catalog. +func LoadDir(dir string) (*Catalog, error) { + entries, err := os.ReadDir(dir) + if err != nil { + return nil, fmt.Errorf("load catalog dir: %w", err) + } + + cat := &Catalog{} + for _, entry := range entries { + if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".yaml") { + continue + } + data, err := os.ReadFile(filepath.Join(dir, entry.Name())) + if err != nil { + return nil, fmt.Errorf("read %s: %w", entry.Name(), err) + } + rules, err := ParseRules(data) + if err != nil { + return nil, fmt.Errorf("parse %s: %w", entry.Name(), err) + } + cat.Rules = append(cat.Rules, rules...) + } + return cat, nil +} + +// LoadFS loads all .yaml files from an embed.FS into a Catalog. +func LoadFS(fsys embed.FS, dir string) (*Catalog, error) { + cat := &Catalog{} + err := fs.WalkDir(fsys, dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() || !strings.HasSuffix(path, ".yaml") { + return nil + } + data, err := fsys.ReadFile(path) + if err != nil { + return fmt.Errorf("read %s: %w", path, err) + } + rules, err := ParseRules(data) + if err != nil { + return fmt.Errorf("parse %s: %w", path, err) + } + cat.Rules = append(cat.Rules, rules...) + return nil + }) + if err != nil { + return nil, err + } + return cat, nil +} + +// ForLanguage returns rules that apply to the given language. +func (c *Catalog) ForLanguage(lang string) []Rule { + var out []Rule + for _, r := range c.Rules { + if slices.Contains(r.Languages, lang) { + out = append(out, r) + } + } + return out +} + +// AtSeverity returns rules at or above the given severity threshold. +func (c *Catalog) AtSeverity(threshold string) []Rule { + minLevel := severityOrder[threshold] + var out []Rule + for _, r := range c.Rules { + if severityOrder[r.Severity] >= minLevel { + out = append(out, r) + } + } + return out +} + +// ByID returns a rule by its ID, or nil if not found. +func (c *Catalog) ByID(id string) *Rule { + for i := range c.Rules { + if c.Rules[i].ID == id { + return &c.Rules[i] + } + } + return nil +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v` +Expected: PASS (all tests) + +**Step 5: Commit** + +```bash +git add pkg/lint/catalog.go pkg/lint/catalog_test.go catalog/go-security.yaml +git commit -m "feat: add Catalog loader with dir/embed/filter support" +``` + +--- + +### Task 4: Regex matcher + +**Files:** +- Create: `/Users/snider/Code/core/lint/pkg/lint/matcher.go` +- Create: `/Users/snider/Code/core/lint/pkg/lint/matcher_test.go` + +**Step 1: Write the failing test** + +```go +package lint + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMatcher_Match(t *testing.T) { + rules := []Rule{ + { + ID: "test-001", + Title: "fmt.Println usage", + Severity: "low", + Languages: []string{"go"}, + Pattern: `fmt\.Println`, + Fix: "Use structured logging", + Detection: "regex", + }, + } + m, err := NewMatcher(rules) + require.NoError(t, err) + + content := `package main + +import "fmt" + +func main() { + fmt.Println("hello") +} +` + findings := m.Match("main.go", []byte(content)) + require.Len(t, findings, 1) + assert.Equal(t, "test-001", findings[0].RuleID) + assert.Equal(t, "main.go", findings[0].File) + assert.Equal(t, 6, findings[0].Line) + assert.Contains(t, findings[0].Match, "fmt.Println") +} + +func TestMatcher_ExcludePattern(t *testing.T) { + rules := []Rule{ + { + ID: "test-002", + Title: "Println with exclude", + Severity: "low", + Languages: []string{"go"}, + Pattern: `fmt\.Println`, + ExcludePattern: `// lint:ignore`, + Fix: "Use logging", + Detection: "regex", + }, + } + m, err := NewMatcher(rules) + require.NoError(t, err) + + content := `package main +func a() { fmt.Println("bad") } +func b() { fmt.Println("ok") // lint:ignore } +` + findings := m.Match("main.go", []byte(content)) + // Line 2 matches, line 3 is excluded + assert.Len(t, findings, 1) + assert.Equal(t, 2, findings[0].Line) +} + +func TestMatcher_NoMatch(t *testing.T) { + rules := []Rule{ + {ID: "test-003", Title: "T", Severity: "low", Languages: []string{"go"}, Pattern: `NEVER_MATCH_THIS`, Detection: "regex"}, + } + m, err := NewMatcher(rules) + require.NoError(t, err) + + findings := m.Match("main.go", []byte("package main\n")) + assert.Empty(t, findings) +} + +func TestMatcher_InvalidRegex(t *testing.T) { + rules := []Rule{ + {ID: "bad", Title: "T", Severity: "low", Languages: []string{"go"}, Pattern: `[invalid`, Detection: "regex"}, + } + _, err := NewMatcher(rules) + assert.Error(t, err) +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v -run TestMatcher` +Expected: FAIL — `NewMatcher` not defined + +**Step 3: Write minimal implementation** + +```go +package lint + +import ( + "fmt" + "regexp" + "strings" +) + +// Finding represents a single match of a rule against source code. +type Finding struct { + RuleID string `json:"rule_id"` + Title string `json:"title"` + Severity string `json:"severity"` + File string `json:"file"` + Line int `json:"line"` + Match string `json:"match"` + Fix string `json:"fix"` + Repo string `json:"repo,omitempty"` +} + +// compiledRule is a rule with its regex pre-compiled. +type compiledRule struct { + rule Rule + pattern *regexp.Regexp + exclude *regexp.Regexp +} + +// Matcher runs compiled rules against file contents. +type Matcher struct { + rules []compiledRule +} + +// NewMatcher compiles all rule patterns and returns a Matcher. +func NewMatcher(rules []Rule) (*Matcher, error) { + compiled := make([]compiledRule, 0, len(rules)) + for _, r := range rules { + if r.Detection != "regex" { + continue // skip non-regex rules + } + p, err := regexp.Compile(r.Pattern) + if err != nil { + return nil, fmt.Errorf("rule %s: invalid pattern: %w", r.ID, err) + } + cr := compiledRule{rule: r, pattern: p} + if r.ExcludePattern != "" { + ex, err := regexp.Compile(r.ExcludePattern) + if err != nil { + return nil, fmt.Errorf("rule %s: invalid exclude_pattern: %w", r.ID, err) + } + cr.exclude = ex + } + compiled = append(compiled, cr) + } + return &Matcher{rules: compiled}, nil +} + +// Match checks file contents against all rules and returns findings. +func (m *Matcher) Match(filename string, content []byte) []Finding { + lines := strings.Split(string(content), "\n") + var findings []Finding + + for _, cr := range m.rules { + for i, line := range lines { + if !cr.pattern.MatchString(line) { + continue + } + if cr.exclude != nil && cr.exclude.MatchString(line) { + continue + } + findings = append(findings, Finding{ + RuleID: cr.rule.ID, + Title: cr.rule.Title, + Severity: cr.rule.Severity, + File: filename, + Line: i + 1, + Match: strings.TrimSpace(line), + Fix: cr.rule.Fix, + }) + } + } + return findings +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v -run TestMatcher` +Expected: PASS (4 tests) + +**Step 5: Commit** + +```bash +git add pkg/lint/matcher.go pkg/lint/matcher_test.go +git commit -m "feat: add regex Matcher with exclude pattern support" +``` + +--- + +### Task 5: Report output (JSON, text, JSONL) + +**Files:** +- Create: `/Users/snider/Code/core/lint/pkg/lint/report.go` +- Create: `/Users/snider/Code/core/lint/pkg/lint/report_test.go` + +**Step 1: Write the failing test** + +```go +package lint + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReport_JSON(t *testing.T) { + findings := []Finding{ + {RuleID: "x-001", Title: "Test", Severity: "high", File: "a.go", Line: 10, Match: "bad code", Fix: "fix it"}, + } + var buf bytes.Buffer + require.NoError(t, WriteJSON(&buf, findings)) + + var parsed []Finding + require.NoError(t, json.Unmarshal(buf.Bytes(), &parsed)) + assert.Len(t, parsed, 1) + assert.Equal(t, "x-001", parsed[0].RuleID) +} + +func TestReport_JSONL(t *testing.T) { + findings := []Finding{ + {RuleID: "a-001", File: "a.go", Line: 1}, + {RuleID: "b-001", File: "b.go", Line: 2}, + } + var buf bytes.Buffer + require.NoError(t, WriteJSONL(&buf, findings)) + + lines := strings.Split(strings.TrimSpace(buf.String()), "\n") + assert.Len(t, lines, 2) +} + +func TestReport_Text(t *testing.T) { + findings := []Finding{ + {RuleID: "x-001", Title: "Test rule", Severity: "high", File: "main.go", Line: 42, Match: "bad()", Fix: "use good()"}, + } + var buf bytes.Buffer + WriteText(&buf, findings) + + out := buf.String() + assert.Contains(t, out, "main.go:42") + assert.Contains(t, out, "x-001") + assert.Contains(t, out, "high") +} + +func TestReport_Summary(t *testing.T) { + findings := []Finding{ + {Severity: "high"}, + {Severity: "high"}, + {Severity: "low"}, + } + s := Summarise(findings) + assert.Equal(t, 3, s.Total) + assert.Equal(t, 2, s.BySeverity["high"]) + assert.Equal(t, 1, s.BySeverity["low"]) +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v -run TestReport` +Expected: FAIL — functions not defined + +**Step 3: Write minimal implementation** + +```go +package lint + +import ( + "encoding/json" + "fmt" + "io" +) + +// Summary holds aggregate stats about findings. +type Summary struct { + Total int `json:"total"` + BySeverity map[string]int `json:"by_severity"` +} + +// Summarise creates a Summary from a list of findings. +func Summarise(findings []Finding) Summary { + s := Summary{ + Total: len(findings), + BySeverity: make(map[string]int), + } + for _, f := range findings { + s.BySeverity[f.Severity]++ + } + return s +} + +// WriteJSON writes findings as a JSON array. +func WriteJSON(w io.Writer, findings []Finding) error { + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + return enc.Encode(findings) +} + +// WriteJSONL writes findings as newline-delimited JSON (one object per line). +// Compatible with ~/.core/ai/metrics/ format. +func WriteJSONL(w io.Writer, findings []Finding) error { + enc := json.NewEncoder(w) + for _, f := range findings { + if err := enc.Encode(f); err != nil { + return err + } + } + return nil +} + +// WriteText writes findings as human-readable text. +func WriteText(w io.Writer, findings []Finding) { + for _, f := range findings { + fmt.Fprintf(w, "%s:%d [%s] %s (%s)\n", f.File, f.Line, f.Severity, f.Title, f.RuleID) + if f.Fix != "" { + fmt.Fprintf(w, " fix: %s\n", f.Fix) + } + } +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v -run TestReport` +Expected: PASS (4 tests) + +**Step 5: Commit** + +```bash +git add pkg/lint/report.go pkg/lint/report_test.go +git commit -m "feat: add report output (JSON, JSONL, text, summary)" +``` + +--- + +### Task 6: Scanner (walk files + match) + +**Files:** +- Create: `/Users/snider/Code/core/lint/pkg/lint/scanner.go` +- Create: `/Users/snider/Code/core/lint/pkg/lint/scanner_test.go` + +**Step 1: Write the failing test** + +```go +package lint + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestScanner_ScanDir(t *testing.T) { + // Set up temp dir with a .go file containing a known pattern + dir := t.TempDir() + goFile := filepath.Join(dir, "main.go") + require.NoError(t, os.WriteFile(goFile, []byte(`package main + +import "fmt" + +func main() { + fmt.Println("hello") +} +`), 0644)) + + rules := []Rule{ + {ID: "test-001", Title: "Println", Severity: "low", Languages: []string{"go"}, Pattern: `fmt\.Println`, Fix: "log", Detection: "regex"}, + } + + s, err := NewScanner(rules) + require.NoError(t, err) + + findings, err := s.ScanDir(dir) + require.NoError(t, err) + require.Len(t, findings, 1) + assert.Equal(t, "test-001", findings[0].RuleID) +} + +func TestScanner_ScanDir_ExcludesVendor(t *testing.T) { + dir := t.TempDir() + vendor := filepath.Join(dir, "vendor") + require.NoError(t, os.MkdirAll(vendor, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(vendor, "lib.go"), []byte("package lib\nfunc x() { fmt.Println() }\n"), 0644)) + + rules := []Rule{ + {ID: "test-001", Title: "Println", Severity: "low", Languages: []string{"go"}, Pattern: `fmt\.Println`, Fix: "log", Detection: "regex"}, + } + + s, err := NewScanner(rules) + require.NoError(t, err) + + findings, err := s.ScanDir(dir) + require.NoError(t, err) + assert.Empty(t, findings) +} + +func TestScanner_LanguageDetection(t *testing.T) { + assert.Equal(t, "go", DetectLanguage("main.go")) + assert.Equal(t, "php", DetectLanguage("app.php")) + assert.Equal(t, "ts", DetectLanguage("index.ts")) + assert.Equal(t, "ts", DetectLanguage("index.tsx")) + assert.Equal(t, "cpp", DetectLanguage("engine.cpp")) + assert.Equal(t, "cpp", DetectLanguage("engine.cc")) + assert.Equal(t, "", DetectLanguage("README.md")) +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v -run TestScanner` +Expected: FAIL — `NewScanner` not defined + +**Step 3: Write minimal implementation** + +```go +package lint + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// defaultExcludes are directories skipped during scanning. +var defaultExcludes = []string{"vendor", "node_modules", ".git", "testdata", ".core"} + +// extToLang maps file extensions to language identifiers. +var extToLang = map[string]string{ + ".go": "go", + ".php": "php", + ".ts": "ts", + ".tsx": "ts", + ".js": "js", + ".jsx": "js", + ".cpp": "cpp", + ".cc": "cpp", + ".cxx": "cpp", + ".c": "cpp", + ".h": "cpp", + ".hpp": "cpp", +} + +// DetectLanguage returns the language identifier for a filename, or "" if unknown. +func DetectLanguage(filename string) string { + ext := filepath.Ext(filename) + return extToLang[ext] +} + +// Scanner walks directories and matches files against rules. +type Scanner struct { + matcher *Matcher + rules []Rule + excludes []string +} + +// NewScanner creates a Scanner from a set of rules. +func NewScanner(rules []Rule) (*Scanner, error) { + m, err := NewMatcher(rules) + if err != nil { + return nil, err + } + return &Scanner{ + matcher: m, + rules: rules, + excludes: defaultExcludes, + }, nil +} + +// ScanDir walks a directory tree and returns all findings. +func (s *Scanner) ScanDir(root string) ([]Finding, error) { + var all []Finding + + err := filepath.WalkDir(root, func(path string, d os.DirEntry, err error) error { + if err != nil { + return err + } + + // Skip excluded directories + if d.IsDir() { + for _, ex := range s.excludes { + if d.Name() == ex { + return filepath.SkipDir + } + } + return nil + } + + // Only scan files with known language extensions + lang := DetectLanguage(path) + if lang == "" { + return nil + } + + content, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("read %s: %w", path, err) + } + + // Make path relative to root for cleaner output + rel, err := filepath.Rel(root, path) + if err != nil { + rel = path + } + + findings := s.matcher.Match(rel, content) + all = append(all, findings...) + return nil + }) + + return all, err +} + +// ScanFile scans a single file and returns findings. +func (s *Scanner) ScanFile(path string) ([]Finding, error) { + content, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("read %s: %w", path, err) + } + return s.matcher.Match(path, content), nil +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v -run TestScanner` +Expected: PASS (3 tests) + +**Step 5: Commit** + +```bash +git add pkg/lint/scanner.go pkg/lint/scanner_test.go +git commit -m "feat: add Scanner with directory walking and language detection" +``` + +--- + +### Task 7: Seed the catalog YAML files + +**Files:** +- Create: `/Users/snider/Code/core/lint/catalog/go-security.yaml` (expand from task 3) +- Create: `/Users/snider/Code/core/lint/catalog/go-correctness.yaml` +- Create: `/Users/snider/Code/core/lint/catalog/go-modernise.yaml` + +**Step 1: Write `catalog/go-security.yaml`** + +```yaml +- id: go-sec-001 + title: "SQL wildcard injection in LIKE clauses" + severity: high + languages: [go] + tags: [security, injection, owasp-a03] + pattern: 'LIKE\s+\?.*["%`]\s*\%.*\+' + exclude_pattern: 'EscapeLike' + fix: "Use parameterised LIKE with explicit escaping of % and _ characters" + found_in: [go-store] + example_bad: | + db.Where("name LIKE ?", "%"+input+"%") + example_good: | + db.Where("name LIKE ?", EscapeLike(input)) + first_seen: "2026-03-09" + detection: regex + +- id: go-sec-002 + title: "Path traversal in file/cache key operations" + severity: high + languages: [go] + tags: [security, path-traversal, owasp-a01] + pattern: 'filepath\.Join\(.*,\s*\w+\)' + exclude_pattern: 'filepath\.Clean|securejoin|ValidatePath' + fix: "Validate path components do not contain .. before joining" + found_in: [go-cache] + example_bad: | + path := filepath.Join(cacheDir, userInput) + example_good: | + if strings.Contains(key, "..") { return ErrInvalidKey } + path := filepath.Join(cacheDir, key) + first_seen: "2026-03-09" + detection: regex + +- id: go-sec-003 + title: "XSS via unescaped HTML output" + severity: high + languages: [go] + tags: [security, xss, owasp-a03] + pattern: 'fmt\.Sprintf\(.*<.*>.*%s' + exclude_pattern: 'html\.EscapeString|template\.HTMLEscapeString' + fix: "Use html.EscapeString() for user-supplied values in HTML output" + found_in: [go-html] + example_bad: | + out := fmt.Sprintf("
%s
", userInput) + example_good: | + out := fmt.Sprintf("
%s
", html.EscapeString(userInput)) + first_seen: "2026-03-09" + detection: regex + +- id: go-sec-004 + title: "Non-constant-time comparison for authentication" + severity: high + languages: [go] + tags: [security, timing-attack, owasp-a02] + pattern: '==\s*\w*(token|key|secret|password|hash|digest|hmac|mac|sig)' + exclude_pattern: 'subtle\.ConstantTimeCompare|hmac\.Equal' + fix: "Use crypto/subtle.ConstantTimeCompare for security-sensitive comparisons" + found_in: [go-crypt] + example_bad: | + if providedToken == storedToken { + example_good: | + if subtle.ConstantTimeCompare([]byte(provided), []byte(stored)) == 1 { + first_seen: "2026-03-09" + detection: regex + +- id: go-sec-005 + title: "Log injection via unescaped newlines" + severity: medium + languages: [go] + tags: [security, injection, logging] + pattern: 'log\.\w+\(.*\+.*\)' + exclude_pattern: 'strings\.ReplaceAll.*\\n|slog\.' + fix: "Use structured logging (slog) or sanitise newlines from user input" + found_in: [go-log] + example_bad: | + log.Printf("user login: " + username) + example_good: | + slog.Info("user login", "username", username) + first_seen: "2026-03-09" + detection: regex + +- id: go-sec-006 + title: "Sensitive key material in log output" + severity: high + languages: [go] + tags: [security, secrets, logging] + pattern: 'log\.\w+\(.*(?i)(password|secret|token|apikey|private.?key|credential)' + exclude_pattern: 'REDACTED|\*\*\*|redact' + fix: "Redact sensitive fields before logging" + found_in: [go-log] + example_bad: | + log.Printf("config: token=%s", cfg.Token) + example_good: | + log.Printf("config: token=%s", redact(cfg.Token)) + first_seen: "2026-03-09" + detection: regex +``` + +**Step 2: Write `catalog/go-correctness.yaml`** + +```yaml +- id: go-cor-001 + title: "Goroutine without WaitGroup or context" + severity: high + languages: [go] + tags: [correctness, goroutine-leak] + pattern: 'go\s+func\s*\(' + exclude_pattern: 'wg\.|\.Go\(|context\.|done\s*<-|select\s*\{' + fix: "Use sync.WaitGroup.Go() or ensure goroutine has a shutdown signal" + found_in: [core/go] + example_bad: | + go func() { doWork() }() + example_good: | + wg.Go(func() { doWork() }) + first_seen: "2026-03-09" + detection: regex + +- id: go-cor-002 + title: "WaitGroup.Wait without context/timeout" + severity: high + languages: [go] + tags: [correctness, deadlock] + pattern: '\.Wait\(\)' + exclude_pattern: 'select\s*\{|ctx\.Done|context\.With|time\.After' + fix: "Wrap wg.Wait() in a select with context.Done() or timeout" + found_in: [core/go] + example_bad: | + wg.Wait() // blocks forever if goroutine hangs + example_good: | + done := make(chan struct{}) + go func() { wg.Wait(); close(done) }() + select { + case <-done: + case <-ctx.Done(): + } + first_seen: "2026-03-09" + detection: regex + +- id: go-cor-003 + title: "Silent error swallowing" + severity: medium + languages: [go] + tags: [correctness, error-handling] + pattern: '^\s*_\s*=\s*\w+\.\w+\(' + exclude_pattern: 'defer|Close\(|Flush\(' + fix: "Handle or propagate errors instead of discarding with _" + found_in: [go-process, go-ratelimit] + example_bad: | + _ = db.Save(record) + example_good: | + if err := db.Save(record); err != nil { + return fmt.Errorf("save record: %w", err) + } + first_seen: "2026-03-09" + detection: regex + +- id: go-cor-004 + title: "Panic in library code" + severity: medium + languages: [go] + tags: [correctness, panic] + pattern: '\bpanic\(' + exclude_pattern: '_test\.go|// unreachable|Must\w+\(' + fix: "Return errors instead of panicking in library code" + found_in: [go-i18n] + example_bad: | + func Parse(s string) *Node { panic("not implemented") } + example_good: | + func Parse(s string) (*Node, error) { return nil, fmt.Errorf("not implemented") } + first_seen: "2026-03-09" + detection: regex + +- id: go-cor-005 + title: "File deletion without path validation" + severity: high + languages: [go] + tags: [correctness, safety] + pattern: 'os\.Remove(All)?\(' + exclude_pattern: 'filepath\.Clean|ValidatePath|strings\.Contains.*\.\.' + fix: "Validate path does not escape base directory before deletion" + found_in: [go-io] + example_bad: | + os.RemoveAll(filepath.Join(base, userInput)) + example_good: | + clean := filepath.Clean(filepath.Join(base, userInput)) + if !strings.HasPrefix(clean, base) { return ErrPathTraversal } + os.RemoveAll(clean) + first_seen: "2026-03-09" + detection: regex + +- id: go-cor-006 + title: "Missing error return from API/network calls" + severity: medium + languages: [go] + tags: [correctness, error-handling] + pattern: 'resp,\s*_\s*:=.*\.(Get|Post|Do|Send)\(' + fix: "Check and handle HTTP/API errors" + found_in: [go-forge, go-git] + example_bad: | + resp, _ := client.Get(url) + example_good: | + resp, err := client.Get(url) + if err != nil { return fmt.Errorf("api call: %w", err) } + first_seen: "2026-03-09" + detection: regex + +- id: go-cor-007 + title: "Signal handler uses wrong type" + severity: medium + languages: [go] + tags: [correctness, signals] + pattern: 'syscall\.Signal\b' + exclude_pattern: 'os\.Signal' + fix: "Use os.Signal for portable signal handling" + found_in: [go-process] + example_bad: | + func Handle(sig syscall.Signal) { ... } + example_good: | + func Handle(sig os.Signal) { ... } + first_seen: "2026-03-09" + detection: regex +``` + +**Step 3: Write `catalog/go-modernise.yaml`** + +```yaml +- id: go-mod-001 + title: "Manual slice clone via append([]T(nil)...)" + severity: low + languages: [go] + tags: [modernise, go126] + pattern: 'append\(\[\]\w+\(nil\),\s*\w+\.\.\.\)' + fix: "Use slices.Clone() from Go 1.21+" + found_in: [core/go] + example_bad: | + copy := append([]string(nil), original...) + example_good: | + copy := slices.Clone(original) + first_seen: "2026-03-09" + detection: regex + +- id: go-mod-002 + title: "Manual sort of string/int slices" + severity: low + languages: [go] + tags: [modernise, go126] + pattern: 'sort\.Strings\(|sort\.Ints\(|sort\.Slice\(' + exclude_pattern: 'sort\.SliceStable' + fix: "Use slices.Sort() or slices.Sorted(iter) from Go 1.21+" + found_in: [core/go] + example_bad: | + sort.Strings(names) + example_good: | + slices.Sort(names) + first_seen: "2026-03-09" + detection: regex + +- id: go-mod-003 + title: "Manual reverse iteration loop" + severity: low + languages: [go] + tags: [modernise, go126] + pattern: 'for\s+\w+\s*:=\s*len\(\w+\)\s*-\s*1' + fix: "Use slices.Backward() from Go 1.23+" + found_in: [core/go] + example_bad: | + for i := len(items) - 1; i >= 0; i-- { use(items[i]) } + example_good: | + for _, item := range slices.Backward(items) { use(item) } + first_seen: "2026-03-09" + detection: regex + +- id: go-mod-004 + title: "WaitGroup Add+Done instead of Go()" + severity: low + languages: [go] + tags: [modernise, go126] + pattern: 'wg\.Add\(1\)' + fix: "Use sync.WaitGroup.Go() from Go 1.26" + found_in: [core/go] + example_bad: | + wg.Add(1) + go func() { defer wg.Done(); work() }() + example_good: | + wg.Go(func() { work() }) + first_seen: "2026-03-09" + detection: regex + +- id: go-mod-005 + title: "Manual map key collection" + severity: low + languages: [go] + tags: [modernise, go126] + pattern: 'for\s+\w+\s*:=\s*range\s+\w+\s*\{\s*\n\s*\w+\s*=\s*append' + exclude_pattern: 'maps\.Keys' + fix: "Use maps.Keys() or slices.Sorted(maps.Keys()) from Go 1.23+" + found_in: [core/go] + example_bad: | + var keys []string + for k := range m { keys = append(keys, k) } + example_good: | + keys := slices.Sorted(maps.Keys(m)) + first_seen: "2026-03-09" + detection: regex +``` + +**Step 4: Run all tests to verify catalog loads correctly** + +Run: `cd ~/Code/core/lint && go test ./pkg/lint/ -v` +Expected: PASS (all tests, including TestCatalog_LoadDir which reads the catalog/ dir) + +**Step 5: Commit** + +```bash +git add catalog/ +git commit -m "feat: seed catalog with 18 patterns from ecosystem sweep" +``` + +--- + +### Task 8: CLI binary with `cli.Main()` + +**Files:** +- Create: `/Users/snider/Code/core/lint/cmd/core-lint/main.go` +- Create: `/Users/snider/Code/core/lint/lint.go` (embed catalog + public API) + +**Step 1: Create the embed entry point** + +Create `/Users/snider/Code/core/lint/lint.go`: + +```go +package lint + +import ( + "embed" + + lintpkg "forge.lthn.ai/core/lint/pkg/lint" +) + +//go:embed catalog/*.yaml +var catalogFS embed.FS + +// LoadEmbeddedCatalog loads the built-in catalog from embedded YAML files. +func LoadEmbeddedCatalog() (*lintpkg.Catalog, error) { + return lintpkg.LoadFS(catalogFS, "catalog") +} +``` + +**Step 2: Create the CLI entry point** + +Create `/Users/snider/Code/core/lint/cmd/core-lint/main.go`: + +```go +package main + +import ( + "fmt" + "os" + + "forge.lthn.ai/core/cli/pkg/cli" + lint "forge.lthn.ai/core/lint" + lintpkg "forge.lthn.ai/core/lint/pkg/lint" +) + +func main() { + cli.Main( + cli.WithCommands("lint", addLintCommands), + ) +} + +func addLintCommands(root *cli.Command) { + lintCmd := &cli.Command{ + Use: "lint", + Short: "Pattern-based code checker", + } + root.AddCommand(lintCmd) + + // core-lint lint check [path...] + lintCmd.AddCommand(cli.NewCommand( + "check [path...]", + "Run pattern checks against source files", + "Scans files for known anti-patterns from the catalog", + func(cmd *cli.Command, args []string) error { + format, _ := cmd.Flags().GetString("format") + lang, _ := cmd.Flags().GetString("lang") + severity, _ := cmd.Flags().GetString("severity") + + cat, err := lint.LoadEmbeddedCatalog() + if err != nil { + return fmt.Errorf("load catalog: %w", err) + } + + rules := cat.Rules + if lang != "" { + rules = cat.ForLanguage(lang) + } + if severity != "" { + filtered := (&lintpkg.Catalog{Rules: rules}).AtSeverity(severity) + rules = filtered + } + + scanner, err := lintpkg.NewScanner(rules) + if err != nil { + return fmt.Errorf("create scanner: %w", err) + } + + paths := args + if len(paths) == 0 { + paths = []string{"."} + } + + var allFindings []lintpkg.Finding + for _, p := range paths { + findings, err := scanner.ScanDir(p) + if err != nil { + return fmt.Errorf("scan %s: %w", p, err) + } + allFindings = append(allFindings, findings...) + } + + switch format { + case "json": + return lintpkg.WriteJSON(os.Stdout, allFindings) + case "jsonl": + return lintpkg.WriteJSONL(os.Stdout, allFindings) + default: + lintpkg.WriteText(os.Stdout, allFindings) + } + + if len(allFindings) > 0 { + s := lintpkg.Summarise(allFindings) + fmt.Fprintf(os.Stderr, "\n%d findings", s.Total) + for sev, count := range s.BySeverity { + fmt.Fprintf(os.Stderr, " | %s: %d", sev, count) + } + fmt.Fprintln(os.Stderr) + } + return nil + }, + )) + + // Add flags to check command + checkCmd := lintCmd.Commands()[0] + checkCmd.Flags().StringP("format", "f", "text", "Output format: text, json, jsonl") + checkCmd.Flags().StringP("lang", "l", "", "Filter by language: go, php, ts, cpp") + checkCmd.Flags().StringP("severity", "s", "", "Minimum severity: critical, high, medium, low, info") + + // core-lint lint catalog + catalogCmd := &cli.Command{ + Use: "catalog", + Short: "Browse the pattern catalog", + } + lintCmd.AddCommand(catalogCmd) + + // core-lint lint catalog list + catalogCmd.AddCommand(cli.NewCommand( + "list", + "List available rules", + "", + func(cmd *cli.Command, args []string) error { + lang, _ := cmd.Flags().GetString("lang") + + cat, err := lint.LoadEmbeddedCatalog() + if err != nil { + return err + } + + rules := cat.Rules + if lang != "" { + rules = cat.ForLanguage(lang) + } + + for _, r := range rules { + fmt.Printf("%-12s [%s] %s\n", r.ID, r.Severity, r.Title) + } + fmt.Fprintf(os.Stderr, "\n%d rules\n", len(rules)) + return nil + }, + )) + catalogCmd.Commands()[0].Flags().StringP("lang", "l", "", "Filter by language") + + // core-lint lint catalog show + catalogCmd.AddCommand(cli.NewCommand( + "show [rule-id]", + "Show details for a specific rule", + "", + func(cmd *cli.Command, args []string) error { + if len(args) == 0 { + return fmt.Errorf("rule ID required") + } + cat, err := lint.LoadEmbeddedCatalog() + if err != nil { + return err + } + r := cat.ByID(args[0]) + if r == nil { + return fmt.Errorf("rule %s not found", args[0]) + } + fmt.Printf("ID: %s\n", r.ID) + fmt.Printf("Title: %s\n", r.Title) + fmt.Printf("Severity: %s\n", r.Severity) + fmt.Printf("Languages: %v\n", r.Languages) + fmt.Printf("Tags: %v\n", r.Tags) + fmt.Printf("Pattern: %s\n", r.Pattern) + if r.ExcludePattern != "" { + fmt.Printf("Exclude: %s\n", r.ExcludePattern) + } + fmt.Printf("Fix: %s\n", r.Fix) + if r.ExampleBad != "" { + fmt.Printf("\nBad:\n%s\n", r.ExampleBad) + } + if r.ExampleGood != "" { + fmt.Printf("Good:\n%s\n", r.ExampleGood) + } + return nil + }, + )) +} +``` + +**Step 3: Add cli dependency** + +```bash +cd ~/Code/core/lint +go get forge.lthn.ai/core/cli +go mod tidy +``` + +**Step 4: Build and smoke test** + +```bash +cd ~/Code/core/lint +go build -o ./bin/core-lint ./cmd/core-lint +./bin/core-lint lint catalog list +./bin/core-lint lint catalog show go-sec-001 +./bin/core-lint lint check --lang go --format json ~/Code/host-uk/core/pkg/core/ +``` + +Expected: Binary builds, catalog lists 18 rules, show displays rule details, check scans files. + +**Step 5: Commit** + +```bash +git add lint.go cmd/core-lint/main.go go.mod go.sum +git commit -m "feat: add core-lint CLI with check, catalog list, catalog show" +``` + +--- + +### Task 9: Run all tests, push to forge + +**Step 1: Run full test suite** + +```bash +cd ~/Code/core/lint +go test -race -count=1 ./... +``` + +Expected: PASS with race detector + +**Step 2: Run go vet** + +```bash +go vet ./... +``` + +Expected: No issues + +**Step 3: Build binary** + +```bash +go build -trimpath -o ./bin/core-lint ./cmd/core-lint +``` + +**Step 4: Smoke test against a real repo** + +```bash +./bin/core-lint lint check --lang go ~/Code/host-uk/core/pkg/core/ +./bin/core-lint lint check --lang go --severity high ~/Code/core/go-io/ +``` + +Expected: Any findings are displayed (or no findings if the repos are already clean from our sweep) + +**Step 5: Update go.work** + +```bash +# Add ./core/lint to ~/Code/go.work if not already there +cd ~/Code && go work sync +``` + +**Step 6: Push to forge** + +```bash +cd ~/Code/core/lint +git push -u origin main +``` + +**Step 7: Tag initial release** + +```bash +git tag v0.1.0 +git push origin v0.1.0 +``` diff --git a/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-12-altum-update-checker-design.md b/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-12-altum-update-checker-design.md new file mode 100644 index 0000000..a0bbe0d --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-12-altum-update-checker-design.md @@ -0,0 +1,160 @@ +# AltumCode Update Checker — Design + +> **Note:** Layer 1 (version detection via PHP artisan) is implemented and documented at `docs/docs/php/packages/uptelligence.md`. Layer 2 (browser-automated downloads via Claude Code skill) is NOT yet implemented. + +## Problem + +Host UK runs 4 AltumCode SaaS products and 13 plugins across two marketplaces (CodeCanyon + LemonSqueezy). Checking for updates and downloading them is a manual process: ~50 clicks across two marketplace UIs, moving 16+ zip files, extracting to the right directories. This eats a morning of momentum every update cycle. + +## Solution + +Two-layer system: lightweight version detection (PHP artisan command) + browser-automated download (Claude Code skill). + +## Architecture + +``` +Layer 1: Detection (core/php-uptelligence) + artisan uptelligence:check-updates + 5 HTTP GETs, no auth, schedulable + Compares remote vs deployed versions + +Layer 2: Download (Claude Code skill) + Playwright → LemonSqueezy (16 items) + Claude in Chrome → CodeCanyon (2 items) + Downloads zips to staging folder + Extracts to saas/services/{product}/package/ + +Layer 3: Deploy (existing — manual) + docker build → scp → deploy_saas.yml + Human in the loop +``` + +## Layer 1: Version Detection + +### Public Endpoints (no auth required) + +| Endpoint | Returns | +|----------|---------| +| `GET https://66analytics.com/info.php` | `{"latest_release_version": "66.0.0", "latest_release_version_code": 6600}` | +| `GET https://66biolinks.com/info.php` | Same format | +| `GET https://66pusher.com/info.php` | Same format | +| `GET https://66socialproof.com/info.php` | Same format | +| `GET https://dev.altumcode.com/plugins-versions` | `{"affiliate": {"version": "2.0.1"}, "ultimate-blocks": {"version": "9.1.0"}, ...}` | + +### Deployed Version Sources + +- **Product version**: `PRODUCT_CODE` constant in deployed source `config.php` +- **Plugin versions**: `version` field in each plugin's `config.php` or `config.json` + +### Artisan Command + +`php artisan uptelligence:check-updates` + +Output: +``` +Product Deployed Latest Status +────────────────────────────────────────────── +66analytics 65.0.0 66.0.0 UPDATE AVAILABLE +66biolinks 65.0.0 66.0.0 UPDATE AVAILABLE +66pusher 65.0.0 65.0.0 ✓ current +66socialproof 65.0.0 66.0.0 UPDATE AVAILABLE + +Plugin Deployed Latest Status +────────────────────────────────────────────── +affiliate 2.0.0 2.0.1 UPDATE AVAILABLE +ultimate-blocks 9.1.0 9.1.0 ✓ current +... +``` + +Lives in `core/php-uptelligence` as a scheduled check or on-demand command. + +## Layer 2: Browser-Automated Download + +### Claude Code Skill: `/update-altum` + +Workflow: +1. Run version check (Layer 1) — show what needs updating +2. Ask for confirmation before downloading +3. Download from both marketplaces +4. Extract to staging directories +5. Report what changed + +### Marketplace Access + +**LemonSqueezy (Playwright)** +- Auth: Magic link email to `snider@lt.hn` — user taps on phone +- Flow per item: Navigate to order detail → click "Download" button +- 16 items across 2 pages of orders +- Session persists for the skill invocation + +**CodeCanyon (Claude in Chrome)** +- Auth: Saved browser session cookies (user `snidered`) +- Flow per item: Click "Download" dropdown → "All files & documentation" +- 2 items on downloads page + +### Product-to-Marketplace Mapping + +| Product | CodeCanyon | LemonSqueezy | +|---------|-----------|--------------| +| 66biolinks | Regular licence | Extended licence (66biolinks custom, $359.28) | +| 66socialproof | Regular licence | — | +| 66analytics | — | Regular licence | +| 66pusher | — | Regular licence | + +### Plugin Inventory (all LemonSqueezy) + +| Plugin | Price | Applies To | +|--------|-------|------------| +| Pro Notifications | $58.80 | 66socialproof | +| Teams Plugin | $58.80 | All products | +| Push Notifications Plugin | $46.80 | All products | +| Ultimate Blocks | $32.40 | 66biolinks | +| Pro Blocks | $32.40 | 66biolinks | +| Payment Blocks | $32.40 | 66biolinks | +| Affiliate Plugin | $32.40 | All products | +| PWA Plugin | $25.20 | All products | +| Image Optimizer Plugin | $19.20 | All products | +| Email Shield Plugin | FREE | All products | +| Dynamic OG images plugin | FREE | 66biolinks | +| Offload & CDN Plugin | FREE | All products (gift from Altum) | + +### Staging & Extraction + +- Download to: `~/Code/lthn/saas/updates/YYYY-MM-DD/` +- Products extract to: `~/Code/lthn/saas/services/{product}/package/product/` +- Plugins extract to: `~/Code/lthn/saas/services/{product}/package/product/plugins/{plugin_id}/` + +## LemonSqueezy Order UUIDs + +Stable order URLs for direct navigation: + +| Product | Order URL | +|---------|-----------| +| 66analytics | `/my-orders/2972471f-abac-4165-b78d-541b176de180` | + +(Remaining UUIDs to be captured on first full run of the skill.) + +## Out of Scope + +- No auto-deploy to production (human runs `deploy_saas.yml`) +- No licence key handling or financial transactions +- No AltumCode Club membership management +- No Blesta updates (different vendor) +- No update SQL migration execution (handled by AltumCode's own update scripts) + +## Key Technical Details + +- AltumCode products use Unirest HTTP client for API calls +- Product `info.php` endpoints are public, no rate limiting observed +- Plugin versions endpoint (`dev.altumcode.com`) is also public +- Production Docker images have `/install/` and `/update/` directories stripped +- Updates require full Docker image rebuild and redeployment via Ansible +- CodeCanyon download URLs contain stable purchase UUIDs +- LemonSqueezy uses magic link auth (no password, email-based) +- Playwright can access LemonSqueezy; Claude in Chrome cannot (payment platform safety block) + +## Workflow Summary + +**Before**: Get email from AltumCode → log into 2 marketplaces → click through 18 products/plugins → download 16+ zips → extract to right directories → rebuild Docker images → deploy. Half a morning. + +**After**: Run `artisan uptelligence:check-updates` → see what's behind → invoke `/update-altum` → tap magic link on phone → go make coffee → come back to staged files → `deploy_saas.yml`. 10 minutes of human time. diff --git a/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-12-altum-update-checker-plan.md b/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-12-altum-update-checker-plan.md new file mode 100644 index 0000000..37ecb28 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/plans/2026-03-12-altum-update-checker-plan.md @@ -0,0 +1,799 @@ +# AltumCode Update Checker Implementation Plan + +> **Note:** Layer 1 (Tasks 1-2, 4: version checking + seeder + sync command) is implemented and documented at `docs/docs/php/packages/uptelligence.md`. Task 3 (Claude Code browser skill for Layer 2 downloads) is NOT yet implemented. + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Add AltumCode product + plugin version checking to uptelligence, and create a Claude Code skill for browser-automated downloads from LemonSqueezy and CodeCanyon. + +**Architecture:** Extend the existing `VendorUpdateCheckerService` to handle `PLATFORM_ALTUM` vendors via 5 public HTTP endpoints. Seed the vendors table with all 4 products and 13 plugins. Create a Claude Code plugin skill that uses Playwright (LemonSqueezy) and Chrome (CodeCanyon) to download updates. + +**Tech Stack:** PHP 8.4, Laravel, Pest, Claude Code plugins (Playwright MCP + Chrome MCP) + +--- + +### Task 1: Add AltumCode check to VendorUpdateCheckerService + +**Files:** +- Modify: `/Users/snider/Code/core/php-uptelligence/Services/VendorUpdateCheckerService.php` +- Test: `/Users/snider/Code/core/php-uptelligence/tests/Unit/AltumCodeCheckerTest.php` + +**Step 1: Write the failing test** + +Create `/Users/snider/Code/core/php-uptelligence/tests/Unit/AltumCodeCheckerTest.php`: + +```php +service = app(VendorUpdateCheckerService::class); +}); + +it('checks altum product version via info.php', function () { + Http::fake([ + 'https://66analytics.com/info.php' => Http::response([ + 'latest_release_version' => '66.0.0', + 'latest_release_version_code' => 6600, + ]), + ]); + + $vendor = Vendor::factory()->create([ + 'slug' => '66analytics', + 'name' => '66analytics', + 'source_type' => Vendor::SOURCE_LICENSED, + 'plugin_platform' => Vendor::PLATFORM_ALTUM, + 'current_version' => '65.0.0', + 'is_active' => true, + ]); + + $result = $this->service->checkVendor($vendor); + + expect($result['status'])->toBe('success') + ->and($result['current'])->toBe('65.0.0') + ->and($result['latest'])->toBe('66.0.0') + ->and($result['has_update'])->toBeTrue(); +}); + +it('reports no update when altum product is current', function () { + Http::fake([ + 'https://66analytics.com/info.php' => Http::response([ + 'latest_release_version' => '65.0.0', + 'latest_release_version_code' => 6500, + ]), + ]); + + $vendor = Vendor::factory()->create([ + 'slug' => '66analytics', + 'name' => '66analytics', + 'source_type' => Vendor::SOURCE_LICENSED, + 'plugin_platform' => Vendor::PLATFORM_ALTUM, + 'current_version' => '65.0.0', + 'is_active' => true, + ]); + + $result = $this->service->checkVendor($vendor); + + expect($result['has_update'])->toBeFalse(); +}); + +it('checks altum plugin versions via plugins-versions endpoint', function () { + Http::fake([ + 'https://dev.altumcode.com/plugins-versions' => Http::response([ + 'affiliate' => ['version' => '2.0.1'], + 'teams' => ['version' => '3.0.0'], + ]), + ]); + + $vendor = Vendor::factory()->create([ + 'slug' => 'altum-plugin-affiliate', + 'name' => 'Affiliate Plugin', + 'source_type' => Vendor::SOURCE_PLUGIN, + 'plugin_platform' => Vendor::PLATFORM_ALTUM, + 'current_version' => '2.0.0', + 'is_active' => true, + ]); + + $result = $this->service->checkVendor($vendor); + + expect($result['status'])->toBe('success') + ->and($result['latest'])->toBe('2.0.1') + ->and($result['has_update'])->toBeTrue(); +}); + +it('handles altum info.php timeout gracefully', function () { + Http::fake([ + 'https://66analytics.com/info.php' => Http::response('', 500), + ]); + + $vendor = Vendor::factory()->create([ + 'slug' => '66analytics', + 'name' => '66analytics', + 'source_type' => Vendor::SOURCE_LICENSED, + 'plugin_platform' => Vendor::PLATFORM_ALTUM, + 'current_version' => '65.0.0', + 'is_active' => true, + ]); + + $result = $this->service->checkVendor($vendor); + + expect($result['status'])->toBe('error') + ->and($result['has_update'])->toBeFalse(); +}); +``` + +**Step 2: Run test to verify it fails** + +Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=AltumCodeChecker` +Expected: FAIL — altum vendors still hit `skipCheck()` + +**Step 3: Write minimal implementation** + +In `/Users/snider/Code/core/php-uptelligence/Services/VendorUpdateCheckerService.php`, modify `checkVendor()` to route altum vendors: + +```php +public function checkVendor(Vendor $vendor): array +{ + $result = match (true) { + $this->isAltumPlatform($vendor) && $vendor->isLicensed() => $this->checkAltumProduct($vendor), + $this->isAltumPlatform($vendor) && $vendor->isPlugin() => $this->checkAltumPlugin($vendor), + $vendor->isOss() && $this->isGitHubUrl($vendor->git_repo_url) => $this->checkGitHub($vendor), + $vendor->isOss() && $this->isGiteaUrl($vendor->git_repo_url) => $this->checkGitea($vendor), + default => $this->skipCheck($vendor), + }; + + // ... rest unchanged +} +``` + +Add the three new methods: + +```php +/** + * Check if vendor is on the AltumCode platform. + */ +protected function isAltumPlatform(Vendor $vendor): bool +{ + return $vendor->plugin_platform === Vendor::PLATFORM_ALTUM; +} + +/** + * AltumCode product info endpoint mapping. + */ +protected function getAltumProductInfoUrl(Vendor $vendor): ?string +{ + $urls = [ + '66analytics' => 'https://66analytics.com/info.php', + '66biolinks' => 'https://66biolinks.com/info.php', + '66pusher' => 'https://66pusher.com/info.php', + '66socialproof' => 'https://66socialproof.com/info.php', + ]; + + return $urls[$vendor->slug] ?? null; +} + +/** + * Check an AltumCode product for updates via its info.php endpoint. + */ +protected function checkAltumProduct(Vendor $vendor): array +{ + $url = $this->getAltumProductInfoUrl($vendor); + if (! $url) { + return $this->errorResult("No info.php URL mapped for {$vendor->slug}"); + } + + try { + $response = Http::timeout(5)->get($url); + + if (! $response->successful()) { + return $this->errorResult("AltumCode info.php returned {$response->status()}"); + } + + $data = $response->json(); + $latestVersion = $data['latest_release_version'] ?? null; + + if (! $latestVersion) { + return $this->errorResult('No version in info.php response'); + } + + return $this->buildResult( + vendor: $vendor, + latestVersion: $this->normaliseVersion($latestVersion), + releaseInfo: [ + 'version_code' => $data['latest_release_version_code'] ?? null, + 'source' => $url, + ] + ); + } catch (\Exception $e) { + return $this->errorResult("AltumCode check failed: {$e->getMessage()}"); + } +} + +/** + * Check an AltumCode plugin for updates via the central plugins-versions endpoint. + */ +protected function checkAltumPlugin(Vendor $vendor): array +{ + try { + $allPlugins = $this->getAltumPluginVersions(); + + if ($allPlugins === null) { + return $this->errorResult('Failed to fetch AltumCode plugin versions'); + } + + // Extract the plugin_id from the vendor slug (strip 'altum-plugin-' prefix) + $pluginId = str_replace('altum-plugin-', '', $vendor->slug); + + if (! isset($allPlugins[$pluginId])) { + return $this->errorResult("Plugin '{$pluginId}' not found in AltumCode registry"); + } + + $latestVersion = $allPlugins[$pluginId]['version'] ?? null; + + return $this->buildResult( + vendor: $vendor, + latestVersion: $this->normaliseVersion($latestVersion), + releaseInfo: ['source' => 'dev.altumcode.com/plugins-versions'] + ); + } catch (\Exception $e) { + return $this->errorResult("AltumCode plugin check failed: {$e->getMessage()}"); + } +} + +/** + * Fetch all AltumCode plugin versions (cached for 1 hour within a check run). + */ +protected ?array $altumPluginVersionsCache = null; + +protected function getAltumPluginVersions(): ?array +{ + if ($this->altumPluginVersionsCache !== null) { + return $this->altumPluginVersionsCache; + } + + $response = Http::timeout(5)->get('https://dev.altumcode.com/plugins-versions'); + + if (! $response->successful()) { + return null; + } + + $this->altumPluginVersionsCache = $response->json(); + + return $this->altumPluginVersionsCache; +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=AltumCodeChecker` +Expected: PASS (4 tests) + +**Step 5: Commit** + +```bash +cd /Users/snider/Code/core/php-uptelligence +git add Services/VendorUpdateCheckerService.php tests/Unit/AltumCodeCheckerTest.php +git commit -m "feat: add AltumCode product + plugin version checking + +Extends VendorUpdateCheckerService to check AltumCode products via +their info.php endpoints and plugins via dev.altumcode.com/plugins-versions. +No auth required — all endpoints are public. + +Co-Authored-By: Virgil " +``` + +--- + +### Task 2: Seed AltumCode vendors + +**Files:** +- Create: `/Users/snider/Code/core/php-uptelligence/database/seeders/AltumCodeVendorSeeder.php` +- Test: `/Users/snider/Code/core/php-uptelligence/tests/Unit/AltumCodeVendorSeederTest.php` + +**Step 1: Write the failing test** + +Create `/Users/snider/Code/core/php-uptelligence/tests/Unit/AltumCodeVendorSeederTest.php`: + +```php +artisan('db:seed', ['--class' => 'Core\\Mod\\Uptelligence\\Database\\Seeders\\AltumCodeVendorSeeder']); + + expect(Vendor::where('source_type', Vendor::SOURCE_LICENSED) + ->where('plugin_platform', Vendor::PLATFORM_ALTUM) + ->count() + )->toBe(4); +}); + +it('seeds 13 altum plugins', function () { + $this->artisan('db:seed', ['--class' => 'Core\\Mod\\Uptelligence\\Database\\Seeders\\AltumCodeVendorSeeder']); + + expect(Vendor::where('source_type', Vendor::SOURCE_PLUGIN) + ->where('plugin_platform', Vendor::PLATFORM_ALTUM) + ->count() + )->toBe(13); +}); + +it('is idempotent', function () { + $this->artisan('db:seed', ['--class' => 'Core\\Mod\\Uptelligence\\Database\\Seeders\\AltumCodeVendorSeeder']); + $this->artisan('db:seed', ['--class' => 'Core\\Mod\\Uptelligence\\Database\\Seeders\\AltumCodeVendorSeeder']); + + expect(Vendor::where('plugin_platform', Vendor::PLATFORM_ALTUM)->count())->toBe(17); +}); +``` + +**Step 2: Run test to verify it fails** + +Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=AltumCodeVendorSeeder` +Expected: FAIL — seeder class not found + +**Step 3: Write minimal implementation** + +Create `/Users/snider/Code/core/php-uptelligence/database/seeders/AltumCodeVendorSeeder.php`: + +```php + '66analytics', 'name' => '66analytics', 'vendor_name' => 'AltumCode', 'current_version' => '65.0.0'], + ['slug' => '66biolinks', 'name' => '66biolinks', 'vendor_name' => 'AltumCode', 'current_version' => '65.0.0'], + ['slug' => '66pusher', 'name' => '66pusher', 'vendor_name' => 'AltumCode', 'current_version' => '65.0.0'], + ['slug' => '66socialproof', 'name' => '66socialproof', 'vendor_name' => 'AltumCode', 'current_version' => '65.0.0'], + ]; + + foreach ($products as $product) { + Vendor::updateOrCreate( + ['slug' => $product['slug']], + [ + ...$product, + 'source_type' => Vendor::SOURCE_LICENSED, + 'plugin_platform' => Vendor::PLATFORM_ALTUM, + 'is_active' => true, + ] + ); + } + + $plugins = [ + ['slug' => 'altum-plugin-affiliate', 'name' => 'Affiliate Plugin', 'current_version' => '2.0.0'], + ['slug' => 'altum-plugin-push-notifications', 'name' => 'Push Notifications Plugin', 'current_version' => '1.0.0'], + ['slug' => 'altum-plugin-teams', 'name' => 'Teams Plugin', 'current_version' => '1.0.0'], + ['slug' => 'altum-plugin-pwa', 'name' => 'PWA Plugin', 'current_version' => '1.0.0'], + ['slug' => 'altum-plugin-image-optimizer', 'name' => 'Image Optimizer Plugin', 'current_version' => '3.1.0'], + ['slug' => 'altum-plugin-email-shield', 'name' => 'Email Shield Plugin', 'current_version' => '1.0.0'], + ['slug' => 'altum-plugin-dynamic-og-images', 'name' => 'Dynamic OG Images Plugin', 'current_version' => '1.0.0'], + ['slug' => 'altum-plugin-offload', 'name' => 'Offload & CDN Plugin', 'current_version' => '1.0.0'], + ['slug' => 'altum-plugin-payment-blocks', 'name' => 'Payment Blocks Plugin', 'current_version' => '1.0.0'], + ['slug' => 'altum-plugin-ultimate-blocks', 'name' => 'Ultimate Blocks Plugin', 'current_version' => '9.1.0'], + ['slug' => 'altum-plugin-pro-blocks', 'name' => 'Pro Blocks Plugin', 'current_version' => '1.0.0'], + ['slug' => 'altum-plugin-pro-notifications', 'name' => 'Pro Notifications Plugin', 'current_version' => '1.0.0'], + ['slug' => 'altum-plugin-aix', 'name' => 'AIX Plugin', 'current_version' => '1.0.0'], + ]; + + foreach ($plugins as $plugin) { + Vendor::updateOrCreate( + ['slug' => $plugin['slug']], + [ + ...$plugin, + 'vendor_name' => 'AltumCode', + 'source_type' => Vendor::SOURCE_PLUGIN, + 'plugin_platform' => Vendor::PLATFORM_ALTUM, + 'is_active' => true, + ] + ); + } + } +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=AltumCodeVendorSeeder` +Expected: PASS (3 tests) + +**Step 5: Commit** + +```bash +cd /Users/snider/Code/core/php-uptelligence +git add database/seeders/AltumCodeVendorSeeder.php tests/Unit/AltumCodeVendorSeederTest.php +git commit -m "feat: seed AltumCode vendors — 4 products + 13 plugins + +Idempotent seeder using updateOrCreate. Products are SOURCE_LICENSED, +plugins are SOURCE_PLUGIN, all PLATFORM_ALTUM. Version numbers will +need updating to match actual deployed versions. + +Co-Authored-By: Virgil " +``` + +--- + +### Task 3: Create Claude Code plugin skill for downloads + +**Files:** +- Create: `/Users/snider/.claude/plugins/altum-updater/plugin.json` +- Create: `/Users/snider/.claude/plugins/altum-updater/skills/update-altum.md` + +**Step 1: Create plugin manifest** + +Create `/Users/snider/.claude/plugins/altum-updater/plugin.json`: + +```json +{ + "name": "altum-updater", + "description": "Download AltumCode product and plugin updates from LemonSqueezy and CodeCanyon", + "version": "0.1.0", + "skills": [ + { + "name": "update-altum", + "path": "skills/update-altum.md", + "description": "Download AltumCode product and plugin updates from marketplaces. Use when the user mentions updating AltumCode products, downloading from LemonSqueezy or CodeCanyon, or running the update checker." + } + ] +} +``` + +**Step 2: Create skill file** + +Create `/Users/snider/.claude/plugins/altum-updater/skills/update-altum.md`: + +```markdown +--- +name: update-altum +description: Download AltumCode product and plugin updates from LemonSqueezy and CodeCanyon +--- + +# AltumCode Update Downloader + +## Overview + +Downloads updated AltumCode products and plugins from two marketplaces: +- **LemonSqueezy** (Playwright): 66analytics, 66pusher, 66biolinks (extended), 13 plugins +- **CodeCanyon** (Claude in Chrome): 66biolinks (regular), 66socialproof + +## Pre-flight + +1. Run `php artisan uptelligence:check-updates --vendor=66analytics` (or check all) to see what needs updating +2. Show the user the version comparison table +3. Ask which products/plugins to download + +## LemonSqueezy Download Flow (Playwright) + +LemonSqueezy uses magic link auth. The user will need to tap the link on their phone. + +1. Navigate to `https://app.lemonsqueezy.com/my-orders` +2. If on login page, fill email `snider@lt.hn` and click Sign In +3. Tell user: "Magic link sent — tap the link on your phone" +4. Wait for redirect to orders page +5. For each product/plugin that needs updating: + a. Click the product link on the orders page (paginated — 10 per page, 2 pages) + b. In the order detail, find the "Download" button under "Files" + c. Click Download — file saves to default downloads folder +6. Move downloaded zips to staging: `~/Code/lthn/saas/updates/YYYY-MM-DD/` + +### LemonSqueezy Product Names (as shown on orders page) + +| Our Name | LemonSqueezy Order Name | +|----------|------------------------| +| 66analytics | "66analytics - Regular License" | +| 66pusher | "66pusher - Regular License" | +| 66biolinks (extended) | "66biolinks custom" | +| Affiliate Plugin | "Affiliate Plugin" | +| Push Notifications Plugin | "Push Notifications Plugin" | +| Teams Plugin | "Teams Plugin" | +| PWA Plugin | "PWA Plugin" | +| Image Optimizer Plugin | "Image Optimizer Plugin" | +| Email Shield Plugin | "Email Shield Plugin" | +| Dynamic OG Images | "Dynamic OG images plugin" | +| Offload & CDN | "Offload & CDN Plugin" | +| Payment Blocks | "Payment Blocks - 66biolinks plugin" | +| Ultimate Blocks | "Ultimate Blocks - 66biolinks plugin" | +| Pro Blocks | "Pro Blocks - 66biolinks plugin" | +| Pro Notifications | "Pro Notifications - 66socialproof plugin" | +| AltumCode Club | "The AltumCode Club" | + +## CodeCanyon Download Flow (Claude in Chrome) + +CodeCanyon uses saved browser session cookies (user: snidered). + +1. Navigate to `https://codecanyon.net/downloads` +2. Dismiss cookie banner if present (click "Reject all") +3. For 66socialproof: + a. Find "66socialproof" Download button + b. Click the dropdown arrow + c. Click "All files & documentation" +4. For 66biolinks: + a. Find "66biolinks" Download button (scroll down) + b. Click the dropdown arrow + c. Click "All files & documentation" +5. Move downloaded zips to staging + +### CodeCanyon Download URLs (stable) + +- 66socialproof: `/user/snidered/download_purchase/8d8ef4c1-5add-4eba-9a89-4261a9c87e0b` +- 66biolinks: `/user/snidered/download_purchase/38d79f4e-19cd-480a-b068-4332629b5206` + +## Post-Download + +1. List all zips in staging folder +2. For each product zip: + - Extract to `~/Code/lthn/saas/services/{product}/package/product/` +3. For each plugin zip: + - Extract to the correct product's `plugins/{plugin_id}/` directory + - Note: Some plugins apply to multiple products (affiliate, teams, etc.) +4. Show summary of what was updated +5. Remind user: "Files staged. Run `deploy_saas.yml` when ready to deploy." + +## Important Notes + +- Never make purchases or enter financial information +- LemonSqueezy session expires — if Playwright gets a login page mid-flow, re-trigger magic link +- CodeCanyon session depends on Chrome cookies — if logged out, tell user to log in manually +- The AltumCode Club subscription is NOT a downloadable product — skip it +- Plugin `aix` may not appear on LemonSqueezy (bundled with products) — skip if not found +``` + +**Step 3: Verify plugin loads** + +Run: `claude` in a new terminal, then type `/update-altum` to verify the skill is discovered. + +**Step 4: Commit** + +```bash +cd /Users/snider/.claude/plugins/altum-updater +git init +git add plugin.json skills/update-altum.md +git commit -m "feat: altum-updater Claude Code plugin — marketplace download skill + +Playwright for LemonSqueezy, Chrome for CodeCanyon. Includes full +product/plugin mapping and download flow documentation. + +Co-Authored-By: Virgil " +``` + +--- + +### Task 4: Sync deployed plugin versions from source + +**Files:** +- Create: `/Users/snider/Code/core/php-uptelligence/Console/SyncAltumVersionsCommand.php` +- Modify: `/Users/snider/Code/core/php-uptelligence/Boot.php` (register command) +- Test: `/Users/snider/Code/core/php-uptelligence/tests/Unit/SyncAltumVersionsCommandTest.php` + +**Step 1: Write the failing test** + +```php +artisan('uptelligence:sync-altum-versions', ['--dry-run' => true]) + ->assertExitCode(0); +}); +``` + +**Step 2: Run test to verify it fails** + +Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=SyncAltumVersions` +Expected: FAIL — command not found + +**Step 3: Write minimal implementation** + +Create `/Users/snider/Code/core/php-uptelligence/Console/SyncAltumVersionsCommand.php`: + +```php + '66analytics/package/product', + '66biolinks' => '66biolinks/package/product', + '66pusher' => '66pusher/package/product', + '66socialproof' => '66socialproof/package/product', + ]; + + public function handle(): int + { + $basePath = $this->option('path') + ?? env('SAAS_SERVICES_PATH', base_path('../lthn/saas/services')); + $dryRun = $this->option('dry-run'); + + $this->info('Syncing AltumCode versions from source...'); + $this->newLine(); + + $updates = []; + + // Sync product versions + foreach ($this->productPaths as $slug => $relativePath) { + $productPath = rtrim($basePath, '/') . '/' . $relativePath; + $version = $this->readProductVersion($productPath); + + if ($version) { + $updates[] = $this->syncVendorVersion($slug, $version, $dryRun); + } else { + $this->warn(" Could not read version for {$slug} at {$productPath}"); + } + } + + // Sync plugin versions — read from biolinks as canonical source + $biolinkPluginsPath = rtrim($basePath, '/') . '/66biolinks/package/product/plugins'; + if (is_dir($biolinkPluginsPath)) { + foreach (glob($biolinkPluginsPath . '/*/config.php') as $configFile) { + $pluginId = basename(dirname($configFile)); + $version = $this->readPluginVersion($configFile); + + if ($version) { + $slug = "altum-plugin-{$pluginId}"; + $updates[] = $this->syncVendorVersion($slug, $version, $dryRun); + } + } + } + + // Output table + $this->table( + ['Vendor', 'Old Version', 'New Version', 'Status'], + array_filter($updates) + ); + + if ($dryRun) { + $this->warn('Dry run — no changes written.'); + } + + return self::SUCCESS; + } + + protected function readProductVersion(string $productPath): ?string + { + // Read version from app/init.php or similar — look for PRODUCT_VERSION define + $initFile = $productPath . '/app/init.php'; + if (! file_exists($initFile)) { + return null; + } + + $content = file_get_contents($initFile); + if (preg_match("/define\('PRODUCT_VERSION',\s*'([^']+)'\)/", $content, $matches)) { + return $matches[1]; + } + + return null; + } + + protected function readPluginVersion(string $configFile): ?string + { + if (! file_exists($configFile)) { + return null; + } + + $content = file_get_contents($configFile); + + // PHP config format: 'version' => '2.0.0' + if (preg_match("/'version'\s*=>\s*'([^']+)'/", $content, $matches)) { + return $matches[1]; + } + + return null; + } + + protected function syncVendorVersion(string $slug, string $version, bool $dryRun): ?array + { + $vendor = Vendor::where('slug', $slug)->first(); + if (! $vendor) { + return [$slug, '(not in DB)', $version, 'SKIPPED']; + } + + $oldVersion = $vendor->current_version; + if ($oldVersion === $version) { + return [$slug, $oldVersion, $version, 'current']; + } + + if (! $dryRun) { + $vendor->update(['current_version' => $version]); + } + + return [$slug, $oldVersion ?? '(none)', $version, $dryRun ? 'WOULD UPDATE' : 'UPDATED']; + } +} +``` + +Register in Boot.php — add to `onConsole()`: + +```php +$event->command(Console\SyncAltumVersionsCommand::class); +``` + +**Step 4: Run test to verify it passes** + +Run: `cd /Users/snider/Code/core/php-uptelligence && composer test -- --filter=SyncAltumVersions` +Expected: PASS + +**Step 5: Commit** + +```bash +cd /Users/snider/Code/core/php-uptelligence +git add Console/SyncAltumVersionsCommand.php Boot.php tests/Unit/SyncAltumVersionsCommandTest.php +git commit -m "feat: sync deployed AltumCode versions from source files + +Reads PRODUCT_VERSION from product init.php and plugin versions from +config.php files. Updates uptelligence_vendors table so check-updates +knows what's actually deployed. + +Co-Authored-By: Virgil " +``` + +--- + +### Task 5: End-to-end verification + +**Step 1: Seed vendors on local dev** + +```bash +cd /Users/snider/Code/lab/host.uk.com +php artisan db:seed --class="Core\Mod\Uptelligence\Database\Seeders\AltumCodeVendorSeeder" +``` + +**Step 2: Sync actual deployed versions** + +```bash +php artisan uptelligence:sync-altum-versions --path=/Users/snider/Code/lthn/saas/services +``` + +**Step 3: Run the update check** + +```bash +php artisan uptelligence:check-updates +``` + +Expected: Table showing current vs latest versions for all 17 AltumCode vendors. + +**Step 4: Test the skill** + +Open a new Claude Code session and run `/update-altum` to verify the skill loads and shows the workflow. + +**Step 5: Commit any fixes** + +```bash +git add -A && git commit -m "fix: adjustments from end-to-end testing" +``` diff --git a/pkg/lib/workspace/default/.core/reference/docs/primitives.md b/pkg/lib/workspace/default/.core/reference/docs/primitives.md new file mode 100644 index 0000000..43701f2 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/primitives.md @@ -0,0 +1,169 @@ +--- +title: Core Primitives +description: The repeated shapes that make CoreGO easy to navigate. +--- + +# Core Primitives + +CoreGO is easiest to use when you read it as a small vocabulary repeated everywhere. Most of the framework is built from the same handful of types. + +## Primitive Map + +| Type | Used For | +|------|----------| +| `Options` | Input values and lightweight metadata | +| `Result` | Output values and success state | +| `Service` | Lifecycle-managed components | +| `Message` | Broadcast events | +| `Query` | Request-response lookups | +| `Task` | Side-effecting work items | + +## `Option` and `Options` + +`Option` is one key-value pair. `Options` is an ordered slice of them. + +```go +opts := core.Options{ + {Key: "name", Value: "brain"}, + {Key: "path", Value: "prompts"}, + {Key: "debug", Value: true}, +} +``` + +Use the helpers to read values: + +```go +name := opts.String("name") +path := opts.String("path") +debug := opts.Bool("debug") +hasPath := opts.Has("path") +raw := opts.Get("name") +``` + +### Important Details + +- `Get` returns the first matching key. +- `String`, `Int`, and `Bool` do not convert between types. +- Missing keys return zero values. +- CLI flags with values are stored as strings, so `--port=8080` should be read with `opts.String("port")`, not `opts.Int("port")`. + +## `Result` + +`Result` is the universal return shape. + +```go +r := core.Result{Value: "ready", OK: true} + +if r.OK { + fmt.Println(r.Value) +} +``` + +It has two jobs: + +- carry a value when work succeeds +- carry either an error or an empty state when work does not succeed + +### `Result.Result(...)` + +The `Result()` method adapts plain Go values and `(value, error)` pairs into a `core.Result`. + +```go +r1 := core.Result{}.Result("hello") +r2 := core.Result{}.Result(file, err) +``` + +This is how several built-in helpers bridge standard-library calls. + +## `Service` + +`Service` is the managed lifecycle DTO stored in the registry. + +```go +svc := core.Service{ + Name: "cache", + Options: core.Options{ + {Key: "backend", Value: "memory"}, + }, + OnStart: func() core.Result { + return core.Result{OK: true} + }, + OnStop: func() core.Result { + return core.Result{OK: true} + }, + OnReload: func() core.Result { + return core.Result{OK: true} + }, +} +``` + +### Important Details + +- `OnStart` and `OnStop` are used by the framework lifecycle. +- `OnReload` is stored on the service DTO, but CoreGO does not currently call it automatically. +- The registry stores `*core.Service`, not arbitrary typed service instances. + +## `Message`, `Query`, and `Task` + +These are simple aliases to `any`. + +```go +type Message any +type Query any +type Task any +``` + +That means your own structs become the protocol: + +```go +type deployStarted struct { + Environment string +} + +type workspaceCountQuery struct{} + +type syncRepositoryTask struct { + Name string +} +``` + +## `TaskWithIdentifier` + +Long-running tasks can opt into task identifiers. + +```go +type indexedTask struct { + ID string +} + +func (t *indexedTask) SetTaskIdentifier(id string) { t.ID = id } +func (t *indexedTask) GetTaskIdentifier() string { return t.ID } +``` + +If a task implements `TaskWithIdentifier`, `PerformAsync` injects the generated `task-N` identifier before dispatch. + +## `ServiceRuntime[T]` + +`ServiceRuntime[T]` is the small helper for packages that want to keep a Core reference and a typed options struct together. + +```go +type agentServiceOptions struct { + WorkspacePath string +} + +type agentService struct { + *core.ServiceRuntime[agentServiceOptions] +} + +runtime := core.NewServiceRuntime(c, agentServiceOptions{ + WorkspacePath: "/srv/agent-workspaces", +}) +``` + +It exposes: + +- `Core()` +- `Options()` +- `Config()` + +This helper does not register anything by itself. It is a composition aid for package authors. diff --git a/pkg/lib/workspace/default/.core/reference/docs/services.md b/pkg/lib/workspace/default/.core/reference/docs/services.md new file mode 100644 index 0000000..ad95d64 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/services.md @@ -0,0 +1,152 @@ +--- +title: Services +description: Register, inspect, and lock CoreGO services. +--- + +# Services + +In CoreGO, a service is a named lifecycle entry stored in the Core registry. + +## Register a Service + +```go +c := core.New() + +r := c.Service("audit", core.Service{ + OnStart: func() core.Result { + core.Info("audit started") + return core.Result{OK: true} + }, + OnStop: func() core.Result { + core.Info("audit stopped") + return core.Result{OK: true} + }, +}) +``` + +Registration succeeds when: + +- the name is not empty +- the registry is not locked +- the name is not already in use + +## Read a Service Back + +```go +r := c.Service("audit") +if r.OK { + svc := r.Value.(*core.Service) + _ = svc +} +``` + +The returned value is `*core.Service`. + +## List Registered Services + +```go +names := c.Services() +``` + +### Important Detail + +The current registry is map-backed. `Services()`, `Startables()`, and `Stoppables()` do not promise a stable order. + +## Lifecycle Snapshots + +Use these helpers when you want the current set of startable or stoppable services: + +```go +startables := c.Startables() +stoppables := c.Stoppables() +``` + +They return `[]*core.Service` inside `Result.Value`. + +## Lock the Registry + +CoreGO has a service-lock mechanism, but it is explicit. + +```go +c := core.New() + +c.LockEnable() +c.Service("audit", core.Service{}) +c.Service("cache", core.Service{}) +c.LockApply() +``` + +After `LockApply`, new registrations fail: + +```go +r := c.Service("late", core.Service{}) +fmt.Println(r.OK) // false +``` + +The default lock name is `"srv"`. You can pass a different name if you need a custom lock namespace. + +For the service registry itself, use the default `"srv"` lock path. That is the path used by `Core.Service(...)`. + +## `NewWithFactories` + +For GUI runtimes or factory-driven setup, CoreGO provides `NewWithFactories`. + +```go +r := core.NewWithFactories(nil, map[string]core.ServiceFactory{ + "audit": func() core.Result { + return core.Result{Value: core.Service{ + OnStart: func() core.Result { + return core.Result{OK: true} + }, + }, OK: true} + }, + "cache": func() core.Result { + return core.Result{Value: core.Service{}, OK: true} + }, +}) +``` + +### Important Details + +- each factory must return a `core.Service` in `Result.Value` +- factories are executed in sorted key order +- nil factories are skipped +- the return value is `*core.Runtime` + +## `Runtime` + +`Runtime` is a small wrapper used for external runtimes such as GUI bindings. + +```go +r := core.NewRuntime(nil) +rt := r.Value.(*core.Runtime) + +_ = rt.ServiceStartup(context.Background(), nil) +_ = rt.ServiceShutdown(context.Background()) +``` + +`Runtime.ServiceName()` returns `"Core"`. + +## `ServiceRuntime[T]` for Package Authors + +If you are writing a package on top of CoreGO, use `ServiceRuntime[T]` to keep a typed options struct and the parent `Core` together. + +```go +type repositoryServiceOptions struct { + BaseDirectory string +} + +type repositoryService struct { + *core.ServiceRuntime[repositoryServiceOptions] +} + +func newRepositoryService(c *core.Core) *repositoryService { + return &repositoryService{ + ServiceRuntime: core.NewServiceRuntime(c, repositoryServiceOptions{ + BaseDirectory: "/srv/repos", + }), + } +} +``` + +This is a package-authoring helper. It does not replace the `core.Service` registry entry. diff --git a/pkg/lib/workspace/default/.core/reference/docs/subsystems.md b/pkg/lib/workspace/default/.core/reference/docs/subsystems.md new file mode 100644 index 0000000..f39ea16 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/subsystems.md @@ -0,0 +1,158 @@ +--- +title: Subsystems +description: Built-in accessors for app metadata, embedded data, filesystem, transport handles, i18n, and CLI. +--- + +# Subsystems + +`Core` gives you a set of built-in subsystems so small applications do not need extra plumbing before they can do useful work. + +## Accessor Map + +| Accessor | Purpose | +|----------|---------| +| `App()` | Application identity and external runtime | +| `Data()` | Named embedded filesystem mounts | +| `Drive()` | Named transport handles | +| `Fs()` | Local filesystem access | +| `I18n()` | Locale collection and translation delegation | +| `Cli()` | Command-line surface over the command tree | + +## `App` + +`App` stores process identity and optional GUI runtime state. + +```go +app := c.App() +app.Name = "agent-workbench" +app.Version = "0.25.0" +app.Description = "workspace runner" +app.Runtime = myRuntime +``` + +`Find` resolves an executable on `PATH` and returns an `*App`. + +```go +r := core.Find("go", "Go toolchain") +``` + +## `Data` + +`Data` mounts named embedded filesystems and makes them addressable through paths like `mount-name/path/to/file`. + +```go +c.Data().New(core.Options{ + {Key: "name", Value: "app"}, + {Key: "source", Value: appFS}, + {Key: "path", Value: "templates"}, +}) +``` + +Read content: + +```go +text := c.Data().ReadString("app/agent.md") +bytes := c.Data().ReadFile("app/agent.md") +list := c.Data().List("app") +names := c.Data().ListNames("app") +``` + +Extract a mounted directory: + +```go +r := c.Data().Extract("app/workspace", "/tmp/workspace", nil) +``` + +### Path Rule + +The first path segment is always the mount name. + +## `Drive` + +`Drive` is a registry for named transport handles. + +```go +c.Drive().New(core.Options{ + {Key: "name", Value: "api"}, + {Key: "transport", Value: "https://api.lthn.ai"}, +}) + +c.Drive().New(core.Options{ + {Key: "name", Value: "mcp"}, + {Key: "transport", Value: "mcp://mcp.lthn.sh"}, +}) +``` + +Read them back: + +```go +handle := c.Drive().Get("api") +hasMCP := c.Drive().Has("mcp") +names := c.Drive().Names() +``` + +## `Fs` + +`Fs` wraps local filesystem operations with a consistent `Result` shape. + +```go +c.Fs().Write("/tmp/core-go/example.txt", "hello") +r := c.Fs().Read("/tmp/core-go/example.txt") +``` + +Other helpers: + +```go +c.Fs().EnsureDir("/tmp/core-go/cache") +c.Fs().List("/tmp/core-go") +c.Fs().Stat("/tmp/core-go/example.txt") +c.Fs().Rename("/tmp/core-go/example.txt", "/tmp/core-go/example-2.txt") +c.Fs().Delete("/tmp/core-go/example-2.txt") +``` + +### Important Details + +- the default `Core` starts with `Fs{root:"/"}` +- relative paths resolve from the current working directory +- `Delete` and `DeleteAll` refuse to remove `/` and `$HOME` + +## `I18n` + +`I18n` collects locale mounts and forwards translation work to a translator implementation when one is registered. + +```go +c.I18n().SetLanguage("en-GB") +``` + +Without a translator, `Translate` returns the message key itself: + +```go +r := c.I18n().Translate("cmd.deploy.description") +``` + +With a translator: + +```go +c.I18n().SetTranslator(myTranslator) +``` + +Then: + +```go +langs := c.I18n().AvailableLanguages() +current := c.I18n().Language() +``` + +## `Cli` + +`Cli` exposes the command registry through a terminal-facing API. + +```go +c.Cli().SetBanner(func(_ *core.Cli) string { + return "Agent Workbench" +}) + +r := c.Cli().Run("workspace", "create", "--name=alpha") +``` + +Use [commands.md](commands.md) for the full command and flag model. diff --git a/pkg/lib/workspace/default/.core/reference/docs/testing.md b/pkg/lib/workspace/default/.core/reference/docs/testing.md new file mode 100644 index 0000000..656634a --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/docs/testing.md @@ -0,0 +1,118 @@ +--- +title: Testing +description: Test naming and testing patterns used by CoreGO. +--- + +# Testing + +The repository uses `github.com/stretchr/testify/assert` and a simple AX-friendly naming pattern. + +## Test Names + +Use: + +- `_Good` for expected success +- `_Bad` for expected failure +- `_Ugly` for panics, degenerate input, and edge behavior + +Examples from this repository: + +```go +func TestNew_Good(t *testing.T) {} +func TestService_Register_Duplicate_Bad(t *testing.T) {} +func TestCore_Must_Ugly(t *testing.T) {} +``` + +## Start with a Small Core + +```go +c := core.New(core.Options{ + {Key: "name", Value: "test-core"}, +}) +``` + +Then register only the pieces your test needs. + +## Test a Service + +```go +started := false + +c.Service("audit", core.Service{ + OnStart: func() core.Result { + started = true + return core.Result{OK: true} + }, +}) + +r := c.ServiceStartup(context.Background(), nil) +assert.True(t, r.OK) +assert.True(t, started) +``` + +## Test a Command + +```go +c.Command("greet", core.Command{ + Action: func(opts core.Options) core.Result { + return core.Result{Value: "hello " + opts.String("name"), OK: true} + }, +}) + +r := c.Cli().Run("greet", "--name=world") +assert.True(t, r.OK) +assert.Equal(t, "hello world", r.Value) +``` + +## Test a Query or Task + +```go +c.RegisterQuery(func(_ *core.Core, q core.Query) core.Result { + if q == "ping" { + return core.Result{Value: "pong", OK: true} + } + return core.Result{} +}) + +assert.Equal(t, "pong", c.QUERY("ping").Value) +``` + +```go +c.RegisterTask(func(_ *core.Core, t core.Task) core.Result { + if t == "compute" { + return core.Result{Value: 42, OK: true} + } + return core.Result{} +}) + +assert.Equal(t, 42, c.PERFORM("compute").Value) +``` + +## Test Async Work + +For `PerformAsync`, observe completion through the action bus. + +```go +completed := make(chan core.ActionTaskCompleted, 1) + +c.RegisterAction(func(_ *core.Core, msg core.Message) core.Result { + if event, ok := msg.(core.ActionTaskCompleted); ok { + completed <- event + } + return core.Result{OK: true} +}) +``` + +Then wait with normal Go test tools such as channels, timers, or `assert.Eventually`. + +## Use Real Temporary Paths + +When testing `Fs`, `Data.Extract`, or other I/O helpers, use `t.TempDir()` and create realistic paths instead of mocking the filesystem by default. + +## Repository Commands + +```bash +core go test +core go test --run TestPerformAsync_Good +go test ./... +``` diff --git a/pkg/lib/workspace/default/.core/reference/drive.go b/pkg/lib/workspace/default/.core/reference/drive.go new file mode 100644 index 0000000..e6988c4 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/drive.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Drive is the resource handle registry for transport connections. +// Packages register their transport handles (API, MCP, SSH, VPN) +// and other packages access them by name. +// +// Register a transport: +// +// c.Drive().New(core.Options{ +// {Key: "name", Value: "api"}, +// {Key: "transport", Value: "https://api.lthn.ai"}, +// }) +// c.Drive().New(core.Options{ +// {Key: "name", Value: "ssh"}, +// {Key: "transport", Value: "ssh://claude@10.69.69.165"}, +// }) +// c.Drive().New(core.Options{ +// {Key: "name", Value: "mcp"}, +// {Key: "transport", Value: "mcp://mcp.lthn.sh"}, +// }) +// +// Retrieve a handle: +// +// api := c.Drive().Get("api") +package core + +import ( + "sync" +) + +// DriveHandle holds a named transport resource. +type DriveHandle struct { + Name string + Transport string + Options Options +} + +// Drive manages named transport handles. +type Drive struct { + handles map[string]*DriveHandle + mu sync.RWMutex +} + +// New registers a transport handle. +// +// c.Drive().New(core.Options{ +// {Key: "name", Value: "api"}, +// {Key: "transport", Value: "https://api.lthn.ai"}, +// }) +func (d *Drive) New(opts Options) Result { + name := opts.String("name") + if name == "" { + return Result{} + } + + transport := opts.String("transport") + + d.mu.Lock() + defer d.mu.Unlock() + + if d.handles == nil { + d.handles = make(map[string]*DriveHandle) + } + + cp := make(Options, len(opts)) + copy(cp, opts) + handle := &DriveHandle{ + Name: name, + Transport: transport, + Options: cp, + } + + d.handles[name] = handle + return Result{handle, true} +} + +// Get returns a handle by name. +// +// r := c.Drive().Get("api") +// if r.OK { handle := r.Value.(*DriveHandle) } +func (d *Drive) Get(name string) Result { + d.mu.RLock() + defer d.mu.RUnlock() + if d.handles == nil { + return Result{} + } + h, ok := d.handles[name] + if !ok { + return Result{} + } + return Result{h, true} +} + +// Has returns true if a handle is registered. +// +// if c.Drive().Has("ssh") { ... } +func (d *Drive) Has(name string) bool { + return d.Get(name).OK +} + +// Names returns all registered handle names. +// +// names := c.Drive().Names() +func (d *Drive) Names() []string { + d.mu.RLock() + defer d.mu.RUnlock() + var names []string + for k := range d.handles { + names = append(names, k) + } + return names +} diff --git a/pkg/lib/workspace/default/.core/reference/embed.go b/pkg/lib/workspace/default/.core/reference/embed.go new file mode 100644 index 0000000..e6a5766 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/embed.go @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Embedded assets for the Core framework. +// +// Embed provides scoped filesystem access for go:embed and any fs.FS. +// Also includes build-time asset packing (AST scanner + compressor) +// and template-based directory extraction. +// +// Usage (mount): +// +// sub, _ := core.Mount(myFS, "lib/persona") +// content, _ := sub.ReadString("secops/developer.md") +// +// Usage (extract): +// +// core.Extract(fsys, "/tmp/workspace", data) +// +// Usage (pack): +// +// refs, _ := core.ScanAssets([]string{"main.go"}) +// source, _ := core.GeneratePack(refs) +package core + +import ( + "bytes" + "compress/gzip" + "embed" + "encoding/base64" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "io/fs" + "os" + "path/filepath" + "sync" + "text/template" +) + +// --- Runtime: Asset Registry --- + +// AssetGroup holds a named collection of packed assets. +type AssetGroup struct { + assets map[string]string // name → compressed data +} + +var ( + assetGroups = make(map[string]*AssetGroup) + assetGroupsMu sync.RWMutex +) + +// AddAsset registers a packed asset at runtime (called from generated init()). +func AddAsset(group, name, data string) { + assetGroupsMu.Lock() + defer assetGroupsMu.Unlock() + + g, ok := assetGroups[group] + if !ok { + g = &AssetGroup{assets: make(map[string]string)} + assetGroups[group] = g + } + g.assets[name] = data +} + +// GetAsset retrieves and decompresses a packed asset. +// +// r := core.GetAsset("mygroup", "greeting") +// if r.OK { content := r.Value.(string) } +func GetAsset(group, name string) Result { + assetGroupsMu.RLock() + g, ok := assetGroups[group] + if !ok { + assetGroupsMu.RUnlock() + return Result{} + } + data, ok := g.assets[name] + assetGroupsMu.RUnlock() + if !ok { + return Result{} + } + s, err := decompress(data) + if err != nil { + return Result{err, false} + } + return Result{s, true} +} + +// GetAssetBytes retrieves a packed asset as bytes. +// +// r := core.GetAssetBytes("mygroup", "file") +// if r.OK { data := r.Value.([]byte) } +func GetAssetBytes(group, name string) Result { + r := GetAsset(group, name) + if !r.OK { + return r + } + return Result{[]byte(r.Value.(string)), true} +} + +// --- Build-time: AST Scanner --- + +// AssetRef is a reference to an asset found in source code. +type AssetRef struct { + Name string + Path string + Group string + FullPath string +} + +// ScannedPackage holds all asset references from a set of source files. +type ScannedPackage struct { + PackageName string + BaseDirectory string + Groups []string + Assets []AssetRef +} + +// ScanAssets parses Go source files and finds asset references. +// Looks for calls to: core.GetAsset("group", "name"), core.AddAsset, etc. +func ScanAssets(filenames []string) Result { + packageMap := make(map[string]*ScannedPackage) + var scanErr error + + for _, filename := range filenames { + fset := token.NewFileSet() + node, err := parser.ParseFile(fset, filename, nil, parser.AllErrors) + if err != nil { + return Result{err, false} + } + + baseDir := filepath.Dir(filename) + pkg, ok := packageMap[baseDir] + if !ok { + pkg = &ScannedPackage{BaseDirectory: baseDir} + packageMap[baseDir] = pkg + } + pkg.PackageName = node.Name.Name + + ast.Inspect(node, func(n ast.Node) bool { + if scanErr != nil { + return false + } + call, ok := n.(*ast.CallExpr) + if !ok { + return true + } + + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return true + } + + ident, ok := sel.X.(*ast.Ident) + if !ok { + return true + } + + // Look for core.GetAsset or mewn.String patterns + if ident.Name == "core" || ident.Name == "mewn" { + switch sel.Sel.Name { + case "GetAsset", "GetAssetBytes", "String", "MustString", "Bytes", "MustBytes": + if len(call.Args) >= 1 { + if lit, ok := call.Args[len(call.Args)-1].(*ast.BasicLit); ok { + path := TrimPrefix(TrimSuffix(lit.Value, "\""), "\"") + group := "." + if len(call.Args) >= 2 { + if glit, ok := call.Args[0].(*ast.BasicLit); ok { + group = TrimPrefix(TrimSuffix(glit.Value, "\""), "\"") + } + } + fullPath, err := filepath.Abs(filepath.Join(baseDir, group, path)) + if err != nil { + scanErr = Wrap(err, "core.ScanAssets", Join(" ", "could not determine absolute path for asset", path, "in group", group)) + return false + } + pkg.Assets = append(pkg.Assets, AssetRef{ + Name: path, + + Group: group, + FullPath: fullPath, + }) + } + } + case "Group": + // Variable assignment: g := core.Group("./assets") + if len(call.Args) == 1 { + if lit, ok := call.Args[0].(*ast.BasicLit); ok { + path := TrimPrefix(TrimSuffix(lit.Value, "\""), "\"") + fullPath, err := filepath.Abs(filepath.Join(baseDir, path)) + if err != nil { + scanErr = Wrap(err, "core.ScanAssets", Join(" ", "could not determine absolute path for group", path)) + return false + } + pkg.Groups = append(pkg.Groups, fullPath) + // Track for variable resolution + } + } + } + } + + return true + }) + if scanErr != nil { + return Result{scanErr, false} + } + } + + var result []ScannedPackage + for _, pkg := range packageMap { + result = append(result, *pkg) + } + return Result{result, true} +} + +// GeneratePack creates Go source code that embeds the scanned assets. +func GeneratePack(pkg ScannedPackage) Result { + b := NewBuilder() + + b.WriteString(fmt.Sprintf("package %s\n\n", pkg.PackageName)) + b.WriteString("// Code generated by core pack. DO NOT EDIT.\n\n") + + if len(pkg.Assets) == 0 && len(pkg.Groups) == 0 { + return Result{b.String(), true} + } + + b.WriteString("import \"dappco.re/go/core\"\n\n") + b.WriteString("func init() {\n") + + // Pack groups (entire directories) + packed := make(map[string]bool) + for _, groupPath := range pkg.Groups { + files, err := getAllFiles(groupPath) + if err != nil { + return Result{err, false} + } + for _, file := range files { + if packed[file] { + continue + } + data, err := compressFile(file) + if err != nil { + return Result{err, false} + } + localPath := TrimPrefix(file, groupPath+"/") + relGroup, err := filepath.Rel(pkg.BaseDirectory, groupPath) + if err != nil { + return Result{err, false} + } + b.WriteString(fmt.Sprintf("\tcore.AddAsset(%q, %q, %q)\n", relGroup, localPath, data)) + packed[file] = true + } + } + + // Pack individual assets + for _, asset := range pkg.Assets { + if packed[asset.FullPath] { + continue + } + data, err := compressFile(asset.FullPath) + if err != nil { + return Result{err, false} + } + b.WriteString(fmt.Sprintf("\tcore.AddAsset(%q, %q, %q)\n", asset.Group, asset.Name, data)) + packed[asset.FullPath] = true + } + + b.WriteString("}\n") + return Result{b.String(), true} +} + +// --- Compression --- + +func compressFile(path string) (string, error) { + data, err := os.ReadFile(path) + if err != nil { + return "", err + } + return compress(string(data)) +} + +func compress(input string) (string, error) { + var buf bytes.Buffer + b64 := base64.NewEncoder(base64.StdEncoding, &buf) + gz, err := gzip.NewWriterLevel(b64, gzip.BestCompression) + if err != nil { + return "", err + } + if _, err := gz.Write([]byte(input)); err != nil { + _ = gz.Close() + _ = b64.Close() + return "", err + } + if err := gz.Close(); err != nil { + _ = b64.Close() + return "", err + } + if err := b64.Close(); err != nil { + return "", err + } + return buf.String(), nil +} + +func decompress(input string) (string, error) { + b64 := base64.NewDecoder(base64.StdEncoding, NewReader(input)) + gz, err := gzip.NewReader(b64) + if err != nil { + return "", err + } + + data, err := io.ReadAll(gz) + if err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + return string(data), nil +} + +func getAllFiles(dir string) ([]string, error) { + var result []string + err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + result = append(result, path) + } + return nil + }) + return result, err +} + +// --- Embed: Scoped Filesystem Mount --- + +// Embed wraps an fs.FS with a basedir for scoped access. +// All paths are relative to basedir. +type Embed struct { + basedir string + fsys fs.FS + embedFS *embed.FS // original embed.FS for type-safe access via EmbedFS() +} + +// Mount creates a scoped view of an fs.FS anchored at basedir. +// +// r := core.Mount(myFS, "lib/prompts") +// if r.OK { emb := r.Value.(*Embed) } +func Mount(fsys fs.FS, basedir string) Result { + s := &Embed{fsys: fsys, basedir: basedir} + + if efs, ok := fsys.(embed.FS); ok { + s.embedFS = &efs + } + + if r := s.ReadDir("."); !r.OK { + return r + } + return Result{s, true} +} + +// MountEmbed creates a scoped view of an embed.FS. +// +// r := core.MountEmbed(myFS, "testdata") +func MountEmbed(efs embed.FS, basedir string) Result { + return Mount(efs, basedir) +} + +func (s *Embed) path(name string) Result { + joined := filepath.ToSlash(filepath.Join(s.basedir, name)) + if HasPrefix(joined, "..") || Contains(joined, "/../") || HasSuffix(joined, "/..") { + return Result{E("embed.path", Concat("path traversal rejected: ", name), nil), false} + } + return Result{joined, true} +} + +// Open opens the named file for reading. +// +// r := emb.Open("test.txt") +// if r.OK { file := r.Value.(fs.File) } +func (s *Embed) Open(name string) Result { + r := s.path(name) + if !r.OK { + return r + } + f, err := s.fsys.Open(r.Value.(string)) + if err != nil { + return Result{err, false} + } + return Result{f, true} +} + +// ReadDir reads the named directory. +func (s *Embed) ReadDir(name string) Result { + r := s.path(name) + if !r.OK { + return r + } + return Result{}.Result(fs.ReadDir(s.fsys, r.Value.(string))) +} + +// ReadFile reads the named file. +// +// r := emb.ReadFile("test.txt") +// if r.OK { data := r.Value.([]byte) } +func (s *Embed) ReadFile(name string) Result { + r := s.path(name) + if !r.OK { + return r + } + data, err := fs.ReadFile(s.fsys, r.Value.(string)) + if err != nil { + return Result{err, false} + } + return Result{data, true} +} + +// ReadString reads the named file as a string. +// +// r := emb.ReadString("test.txt") +// if r.OK { content := r.Value.(string) } +func (s *Embed) ReadString(name string) Result { + r := s.ReadFile(name) + if !r.OK { + return r + } + return Result{string(r.Value.([]byte)), true} +} + +// Sub returns a new Embed anchored at a subdirectory within this mount. +// +// r := emb.Sub("testdata") +// if r.OK { sub := r.Value.(*Embed) } +func (s *Embed) Sub(subDir string) Result { + r := s.path(subDir) + if !r.OK { + return r + } + sub, err := fs.Sub(s.fsys, r.Value.(string)) + if err != nil { + return Result{err, false} + } + return Result{&Embed{fsys: sub, basedir: "."}, true} +} + +// FS returns the underlying fs.FS. +func (s *Embed) FS() fs.FS { + return s.fsys +} + +// EmbedFS returns the underlying embed.FS if mounted from one. +// Returns zero embed.FS if mounted from a non-embed source. +func (s *Embed) EmbedFS() embed.FS { + if s.embedFS != nil { + return *s.embedFS + } + return embed.FS{} +} + +// BaseDirectory returns the base directory this Embed is anchored at. +func (s *Embed) BaseDirectory() string { + return s.basedir +} + +// --- Template Extraction --- + +// ExtractOptions configures template extraction. +type ExtractOptions struct { + // TemplateFilters identifies template files by substring match. + // Default: [".tmpl"] + TemplateFilters []string + + // IgnoreFiles is a set of filenames to skip during extraction. + IgnoreFiles map[string]struct{} + + // RenameFiles maps original filenames to new names. + RenameFiles map[string]string +} + +// Extract copies a template directory from an fs.FS to targetDir, +// processing Go text/template in filenames and file contents. +// +// Files containing a template filter substring (default: ".tmpl") have +// their contents processed through text/template with the given data. +// The filter is stripped from the output filename. +// +// Directory and file names can contain Go template expressions: +// {{.Name}}/main.go → myproject/main.go +// +// Data can be any struct or map[string]string for template substitution. +func Extract(fsys fs.FS, targetDir string, data any, opts ...ExtractOptions) Result { + opt := ExtractOptions{ + TemplateFilters: []string{".tmpl"}, + IgnoreFiles: make(map[string]struct{}), + RenameFiles: make(map[string]string), + } + if len(opts) > 0 { + if len(opts[0].TemplateFilters) > 0 { + opt.TemplateFilters = opts[0].TemplateFilters + } + if opts[0].IgnoreFiles != nil { + opt.IgnoreFiles = opts[0].IgnoreFiles + } + if opts[0].RenameFiles != nil { + opt.RenameFiles = opts[0].RenameFiles + } + } + + // Ensure target directory exists + targetDir, err := filepath.Abs(targetDir) + if err != nil { + return Result{err, false} + } + if err := os.MkdirAll(targetDir, 0755); err != nil { + return Result{err, false} + } + + // Categorise files + var dirs []string + var templateFiles []string + var standardFiles []string + + err = fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if path == "." { + return nil + } + if d.IsDir() { + dirs = append(dirs, path) + return nil + } + filename := filepath.Base(path) + if _, ignored := opt.IgnoreFiles[filename]; ignored { + return nil + } + if isTemplate(filename, opt.TemplateFilters) { + templateFiles = append(templateFiles, path) + } else { + standardFiles = append(standardFiles, path) + } + return nil + }) + if err != nil { + return Result{err, false} + } + + // safePath ensures a rendered path stays under targetDir. + safePath := func(rendered string) (string, error) { + abs, err := filepath.Abs(rendered) + if err != nil { + return "", err + } + if !HasPrefix(abs, targetDir+string(filepath.Separator)) && abs != targetDir { + return "", E("embed.Extract", Concat("path escapes target: ", abs), nil) + } + return abs, nil + } + + // Create directories (names may contain templates) + for _, dir := range dirs { + target, err := safePath(renderPath(filepath.Join(targetDir, dir), data)) + if err != nil { + return Result{err, false} + } + if err := os.MkdirAll(target, 0755); err != nil { + return Result{err, false} + } + } + + // Process template files + for _, path := range templateFiles { + tmpl, err := template.ParseFS(fsys, path) + if err != nil { + return Result{err, false} + } + + targetFile := renderPath(filepath.Join(targetDir, path), data) + + // Strip template filters from filename + dir := filepath.Dir(targetFile) + name := filepath.Base(targetFile) + for _, filter := range opt.TemplateFilters { + name = Replace(name, filter, "") + } + if renamed := opt.RenameFiles[name]; renamed != "" { + name = renamed + } + targetFile, err = safePath(filepath.Join(dir, name)) + if err != nil { + return Result{err, false} + } + + f, err := os.Create(targetFile) + if err != nil { + return Result{err, false} + } + if err := tmpl.Execute(f, data); err != nil { + f.Close() + return Result{err, false} + } + f.Close() + } + + // Copy standard files + for _, path := range standardFiles { + targetPath := path + name := filepath.Base(path) + if renamed := opt.RenameFiles[name]; renamed != "" { + targetPath = filepath.Join(filepath.Dir(path), renamed) + } + target, err := safePath(renderPath(filepath.Join(targetDir, targetPath), data)) + if err != nil { + return Result{err, false} + } + if err := copyFile(fsys, path, target); err != nil { + return Result{err, false} + } + } + + return Result{OK: true} +} + +func isTemplate(filename string, filters []string) bool { + for _, f := range filters { + if Contains(filename, f) { + return true + } + } + return false +} + +func renderPath(path string, data any) string { + if data == nil { + return path + } + tmpl, err := template.New("path").Parse(path) + if err != nil { + return path + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return path + } + return buf.String() +} + +func copyFile(fsys fs.FS, source, target string) error { + s, err := fsys.Open(source) + if err != nil { + return err + } + defer s.Close() + + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return err + } + + d, err := os.Create(target) + if err != nil { + return err + } + defer d.Close() + + _, err = io.Copy(d, s) + return err +} diff --git a/pkg/lib/workspace/default/.core/reference/error.go b/pkg/lib/workspace/default/.core/reference/error.go new file mode 100644 index 0000000..d562494 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/error.go @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Structured errors, crash recovery, and reporting for the Core framework. +// Provides E() for error creation, Wrap()/WrapCode() for chaining, +// and Err for panic recovery and crash reporting. + +package core + +import ( + "encoding/json" + "errors" + "iter" + "maps" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "sync" + "time" +) + +// ErrorSink is the shared interface for error reporting. +// Implemented by ErrorLog (structured logging) and ErrorPanic (panic recovery). +type ErrorSink interface { + Error(msg string, keyvals ...any) + Warn(msg string, keyvals ...any) +} + +var _ ErrorSink = (*Log)(nil) + +// Err represents a structured error with operational context. +// It implements the error interface and supports unwrapping. +type Err struct { + Operation string // Operation being performed (e.g., "user.Save") + Message string // Human-readable message + Cause error // Underlying error (optional) + Code string // Error code (optional, e.g., "VALIDATION_FAILED") +} + +// Error implements the error interface. +func (e *Err) Error() string { + var prefix string + if e.Operation != "" { + prefix = e.Operation + ": " + } + if e.Cause != nil { + if e.Code != "" { + return Concat(prefix, e.Message, " [", e.Code, "]: ", e.Cause.Error()) + } + return Concat(prefix, e.Message, ": ", e.Cause.Error()) + } + if e.Code != "" { + return Concat(prefix, e.Message, " [", e.Code, "]") + } + return Concat(prefix, e.Message) +} + +// Unwrap returns the underlying error for use with errors.Is and errors.As. +func (e *Err) Unwrap() error { + return e.Cause +} + +// --- Error Creation Functions --- + +// E creates a new Err with operation context. +// The underlying error can be nil for creating errors without a cause. +// +// Example: +// +// return log.E("user.Save", "failed to save user", err) +// return log.E("api.Call", "rate limited", nil) // No underlying cause +func E(op, msg string, err error) error { + return &Err{Operation: op, Message: msg, Cause: err} +} + +// Wrap wraps an error with operation context. +// Returns nil if err is nil, to support conditional wrapping. +// Preserves error Code if the wrapped error is an *Err. +// +// Example: +// +// return log.Wrap(err, "db.Query", "database query failed") +func Wrap(err error, op, msg string) error { + if err == nil { + return nil + } + // Preserve Code from wrapped *Err + var logErr *Err + if As(err, &logErr) && logErr.Code != "" { + return &Err{Operation: op, Message: msg, Cause: err, Code: logErr.Code} + } + return &Err{Operation: op, Message: msg, Cause: err} +} + +// WrapCode wraps an error with operation context and error code. +// Returns nil only if both err is nil AND code is empty. +// Useful for API errors that need machine-readable codes. +// +// Example: +// +// return log.WrapCode(err, "VALIDATION_ERROR", "user.Validate", "invalid email") +func WrapCode(err error, code, op, msg string) error { + if err == nil && code == "" { + return nil + } + return &Err{Operation: op, Message: msg, Cause: err, Code: code} +} + +// NewCode creates an error with just code and message (no underlying error). +// Useful for creating sentinel errors with codes. +// +// Example: +// +// var ErrNotFound = log.NewCode("NOT_FOUND", "resource not found") +func NewCode(code, msg string) error { + return &Err{Message: msg, Code: code} +} + +// --- Standard Library Wrappers --- + +// Is reports whether any error in err's tree matches target. +// Wrapper around errors.Is for convenience. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in err's tree that matches target. +// Wrapper around errors.As for convenience. +func As(err error, target any) bool { + return errors.As(err, target) +} + +// NewError creates a simple error with the given text. +// Wrapper around errors.New for convenience. +func NewError(text string) error { + return errors.New(text) +} + +// ErrorJoin combines multiple errors into one. +// +// core.ErrorJoin(err1, err2, err3) +func ErrorJoin(errs ...error) error { + return errors.Join(errs...) +} + +// --- Error Introspection Helpers --- + +// Operation extracts the operation name from an error. +// Returns empty string if the error is not an *Err. +func Operation(err error) string { + var e *Err + if As(err, &e) { + return e.Operation + } + return "" +} + +// ErrorCode extracts the error code from an error. +// Returns empty string if the error is not an *Err or has no code. +func ErrorCode(err error) string { + var e *Err + if As(err, &e) { + return e.Code + } + return "" +} + +// Message extracts the message from an error. +// Returns the error's Error() string if not an *Err. +func ErrorMessage(err error) string { + if err == nil { + return "" + } + var e *Err + if As(err, &e) { + return e.Message + } + return err.Error() +} + +// Root returns the root cause of an error chain. +// Unwraps until no more wrapped errors are found. +func Root(err error) error { + if err == nil { + return nil + } + for { + unwrapped := errors.Unwrap(err) + if unwrapped == nil { + return err + } + err = unwrapped + } +} + +// AllOperations returns an iterator over all operational contexts in the error chain. +// It traverses the error tree using errors.Unwrap. +func AllOperations(err error) iter.Seq[string] { + return func(yield func(string) bool) { + for err != nil { + if e, ok := err.(*Err); ok { + if e.Operation != "" { + if !yield(e.Operation) { + return + } + } + } + err = errors.Unwrap(err) + } + } +} + +// StackTrace returns the logical stack trace (chain of operations) from an error. +// It returns an empty slice if no operational context is found. +func StackTrace(err error) []string { + var stack []string + for op := range AllOperations(err) { + stack = append(stack, op) + } + return stack +} + +// FormatStackTrace returns a pretty-printed logical stack trace. +func FormatStackTrace(err error) string { + var ops []string + for op := range AllOperations(err) { + ops = append(ops, op) + } + if len(ops) == 0 { + return "" + } + return Join(" -> ", ops...) +} + +// --- ErrorLog: Log-and-Return Error Helpers --- + +// ErrorLog combines error creation with logging. +// Primary action: return an error. Secondary: log it. +type ErrorLog struct { + log *Log +} + +func (el *ErrorLog) logger() *Log { + if el.log != nil { + return el.log + } + return Default() +} + +// Error logs at Error level and returns a Result with the wrapped error. +func (el *ErrorLog) Error(err error, op, msg string) Result { + if err == nil { + return Result{OK: true} + } + wrapped := Wrap(err, op, msg) + el.logger().Error(msg, "op", op, "err", err) + return Result{wrapped, false} +} + +// Warn logs at Warn level and returns a Result with the wrapped error. +func (el *ErrorLog) Warn(err error, op, msg string) Result { + if err == nil { + return Result{OK: true} + } + wrapped := Wrap(err, op, msg) + el.logger().Warn(msg, "op", op, "err", err) + return Result{wrapped, false} +} + +// Must logs and panics if err is not nil. +func (el *ErrorLog) Must(err error, op, msg string) { + if err != nil { + el.logger().Error(msg, "op", op, "err", err) + panic(Wrap(err, op, msg)) + } +} + +// --- Crash Recovery & Reporting --- + +// CrashReport represents a single crash event. +type CrashReport struct { + Timestamp time.Time `json:"timestamp"` + Error string `json:"error"` + Stack string `json:"stack"` + System CrashSystem `json:"system,omitempty"` + Meta map[string]string `json:"meta,omitempty"` +} + +// CrashSystem holds system information at crash time. +type CrashSystem struct { + OperatingSystem string `json:"operatingsystem"` + Architecture string `json:"architecture"` + Version string `json:"go_version"` +} + +// ErrorPanic manages panic recovery and crash reporting. +type ErrorPanic struct { + filePath string + meta map[string]string + onCrash func(CrashReport) +} + +// Recover captures a panic and creates a crash report. +// Use as: defer c.Error().Recover() +func (h *ErrorPanic) Recover() { + if h == nil { + return + } + r := recover() + if r == nil { + return + } + + err, ok := r.(error) + if !ok { + err = NewError(Sprint("panic: ", r)) + } + + report := CrashReport{ + Timestamp: time.Now(), + Error: err.Error(), + Stack: string(debug.Stack()), + System: CrashSystem{ + OperatingSystem: runtime.GOOS, + Architecture: runtime.GOARCH, + Version: runtime.Version(), + }, + Meta: maps.Clone(h.meta), + } + + if h.onCrash != nil { + h.onCrash(report) + } + + if h.filePath != "" { + h.appendReport(report) + } +} + +// SafeGo runs a function in a goroutine with panic recovery. +func (h *ErrorPanic) SafeGo(fn func()) { + go func() { + defer h.Recover() + fn() + }() +} + +// Reports returns the last n crash reports from the file. +func (h *ErrorPanic) Reports(n int) Result { + if h.filePath == "" { + return Result{} + } + crashMu.Lock() + defer crashMu.Unlock() + data, err := os.ReadFile(h.filePath) + if err != nil { + return Result{err, false} + } + var reports []CrashReport + if err := json.Unmarshal(data, &reports); err != nil { + return Result{err, false} + } + if n <= 0 || len(reports) <= n { + return Result{reports, true} + } + return Result{reports[len(reports)-n:], true} +} + +var crashMu sync.Mutex + +func (h *ErrorPanic) appendReport(report CrashReport) { + crashMu.Lock() + defer crashMu.Unlock() + + var reports []CrashReport + if data, err := os.ReadFile(h.filePath); err == nil { + if err := json.Unmarshal(data, &reports); err != nil { + reports = nil + } + } + + reports = append(reports, report) + data, err := json.MarshalIndent(reports, "", " ") + if err != nil { + Default().Error(Concat("crash report marshal failed: ", err.Error())) + return + } + if err := os.MkdirAll(filepath.Dir(h.filePath), 0755); err != nil { + Default().Error(Concat("crash report dir failed: ", err.Error())) + return + } + if err := os.WriteFile(h.filePath, data, 0600); err != nil { + Default().Error(Concat("crash report write failed: ", err.Error())) + } +} diff --git a/pkg/lib/workspace/default/.core/reference/fs.go b/pkg/lib/workspace/default/.core/reference/fs.go new file mode 100644 index 0000000..8642cdc --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/fs.go @@ -0,0 +1,287 @@ +// Sandboxed local filesystem I/O for the Core framework. +package core + +import ( + "os" + "os/user" + "path/filepath" + "time" +) + +// Fs is a sandboxed local filesystem backend. +type Fs struct { + root string +} + +// path sanitises and returns the full path. +// Absolute paths are sandboxed under root (unless root is "/"). +func (m *Fs) path(p string) string { + if p == "" { + return m.root + } + + // If the path is relative and the medium is rooted at "/", + // treat it as relative to the current working directory. + // This makes io.Local behave more like the standard 'os' package. + if m.root == "/" && !filepath.IsAbs(p) { + cwd, _ := os.Getwd() + return filepath.Join(cwd, p) + } + + // Use filepath.Clean with a leading slash to resolve all .. and . internally + // before joining with the root. This is a standard way to sandbox paths. + clean := filepath.Clean("/" + p) + + // If root is "/", allow absolute paths through + if m.root == "/" { + return clean + } + + // Strip leading "/" so Join works correctly with root + return filepath.Join(m.root, clean[1:]) +} + +// validatePath ensures the path is within the sandbox, following symlinks if they exist. +func (m *Fs) validatePath(p string) Result { + if m.root == "/" { + return Result{m.path(p), true} + } + + // Split the cleaned path into components + parts := Split(filepath.Clean("/"+p), string(os.PathSeparator)) + current := m.root + + for _, part := range parts { + if part == "" { + continue + } + + next := filepath.Join(current, part) + realNext, err := filepath.EvalSymlinks(next) + if err != nil { + if os.IsNotExist(err) { + // Part doesn't exist, we can't follow symlinks anymore. + // Since the path is already Cleaned and current is safe, + // appending a component to current will not escape. + current = next + continue + } + return Result{err, false} + } + + // Verify the resolved part is still within the root + rel, err := filepath.Rel(m.root, realNext) + if err != nil || HasPrefix(rel, "..") { + // Security event: sandbox escape attempt + username := "unknown" + if u, err := user.Current(); err == nil { + username = u.Username + } + Print(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s", + time.Now().Format(time.RFC3339), m.root, p, realNext, username) + if err == nil { + err = E("fs.validatePath", Concat("sandbox escape: ", p, " resolves outside ", m.root), nil) + } + return Result{err, false} + } + current = realNext + } + + return Result{current, true} +} + +// Read returns file contents as string. +func (m *Fs) Read(p string) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + data, err := os.ReadFile(vp.Value.(string)) + if err != nil { + return Result{err, false} + } + return Result{string(data), true} +} + +// Write saves content to file, creating parent directories as needed. +// Files are created with mode 0644. For sensitive files (keys, secrets), +// use WriteMode with 0600. +func (m *Fs) Write(p, content string) Result { + return m.WriteMode(p, content, 0644) +} + +// WriteMode saves content to file with explicit permissions. +// Use 0600 for sensitive files (encryption output, private keys, auth hashes). +func (m *Fs) WriteMode(p, content string, mode os.FileMode) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + full := vp.Value.(string) + if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + return Result{err, false} + } + if err := os.WriteFile(full, []byte(content), mode); err != nil { + return Result{err, false} + } + return Result{OK: true} +} + +// EnsureDir creates directory if it doesn't exist. +func (m *Fs) EnsureDir(p string) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + if err := os.MkdirAll(vp.Value.(string), 0755); err != nil { + return Result{err, false} + } + return Result{OK: true} +} + +// IsDir returns true if path is a directory. +func (m *Fs) IsDir(p string) bool { + if p == "" { + return false + } + vp := m.validatePath(p) + if !vp.OK { + return false + } + info, err := os.Stat(vp.Value.(string)) + return err == nil && info.IsDir() +} + +// IsFile returns true if path is a regular file. +func (m *Fs) IsFile(p string) bool { + if p == "" { + return false + } + vp := m.validatePath(p) + if !vp.OK { + return false + } + info, err := os.Stat(vp.Value.(string)) + return err == nil && info.Mode().IsRegular() +} + +// Exists returns true if path exists. +func (m *Fs) Exists(p string) bool { + vp := m.validatePath(p) + if !vp.OK { + return false + } + _, err := os.Stat(vp.Value.(string)) + return err == nil +} + +// List returns directory entries. +func (m *Fs) List(p string) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + return Result{}.Result(os.ReadDir(vp.Value.(string))) +} + +// Stat returns file info. +func (m *Fs) Stat(p string) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + return Result{}.Result(os.Stat(vp.Value.(string))) +} + +// Open opens the named file for reading. +func (m *Fs) Open(p string) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + return Result{}.Result(os.Open(vp.Value.(string))) +} + +// Create creates or truncates the named file. +func (m *Fs) Create(p string) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + full := vp.Value.(string) + if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + return Result{err, false} + } + return Result{}.Result(os.Create(full)) +} + +// Append opens the named file for appending, creating it if it doesn't exist. +func (m *Fs) Append(p string) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + full := vp.Value.(string) + if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + return Result{err, false} + } + return Result{}.Result(os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)) +} + +// ReadStream returns a reader for the file content. +func (m *Fs) ReadStream(path string) Result { + return m.Open(path) +} + +// WriteStream returns a writer for the file content. +func (m *Fs) WriteStream(path string) Result { + return m.Create(path) +} + +// Delete removes a file or empty directory. +func (m *Fs) Delete(p string) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + full := vp.Value.(string) + if full == "/" || full == os.Getenv("HOME") { + return Result{E("fs.Delete", Concat("refusing to delete protected path: ", full), nil), false} + } + if err := os.Remove(full); err != nil { + return Result{err, false} + } + return Result{OK: true} +} + +// DeleteAll removes a file or directory recursively. +func (m *Fs) DeleteAll(p string) Result { + vp := m.validatePath(p) + if !vp.OK { + return vp + } + full := vp.Value.(string) + if full == "/" || full == os.Getenv("HOME") { + return Result{E("fs.DeleteAll", Concat("refusing to delete protected path: ", full), nil), false} + } + if err := os.RemoveAll(full); err != nil { + return Result{err, false} + } + return Result{OK: true} +} + +// Rename moves a file or directory. +func (m *Fs) Rename(oldPath, newPath string) Result { + oldVp := m.validatePath(oldPath) + if !oldVp.OK { + return oldVp + } + newVp := m.validatePath(newPath) + if !newVp.OK { + return newVp + } + if err := os.Rename(oldVp.Value.(string), newVp.Value.(string)); err != nil { + return Result{err, false} + } + return Result{OK: true} +} diff --git a/pkg/lib/workspace/default/.core/reference/i18n.go b/pkg/lib/workspace/default/.core/reference/i18n.go new file mode 100644 index 0000000..7061ce8 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/i18n.go @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Internationalisation for the Core framework. +// I18n collects locale mounts from services and delegates +// translation to a registered Translator implementation (e.g., go-i18n). + +package core + +import ( + "sync" +) + +// Translator defines the interface for translation services. +// Implemented by go-i18n's Srv. +type Translator interface { + // Translate translates a message by its ID with optional arguments. + Translate(messageID string, args ...any) Result + // SetLanguage sets the active language (BCP47 tag, e.g., "en-GB", "de"). + SetLanguage(lang string) error + // Language returns the current language code. + Language() string + // AvailableLanguages returns all loaded language codes. + AvailableLanguages() []string +} + +// LocaleProvider is implemented by services that ship their own translation files. +// Core discovers this interface during service registration and collects the +// locale mounts. The i18n service loads them during startup. +// +// Usage in a service package: +// +// //go:embed locales +// var localeFS embed.FS +// +// func (s *MyService) Locales() *Embed { +// m, _ := Mount(localeFS, "locales") +// return m +// } +type LocaleProvider interface { + Locales() *Embed +} + +// I18n manages locale collection and translation dispatch. +type I18n struct { + mu sync.RWMutex + locales []*Embed // collected from LocaleProvider services + locale string + translator Translator // registered implementation (nil until set) +} + +// AddLocales adds locale mounts (called during service registration). +func (i *I18n) AddLocales(mounts ...*Embed) { + i.mu.Lock() + i.locales = append(i.locales, mounts...) + i.mu.Unlock() +} + +// Locales returns all collected locale mounts. +func (i *I18n) Locales() Result { + i.mu.RLock() + out := make([]*Embed, len(i.locales)) + copy(out, i.locales) + i.mu.RUnlock() + return Result{out, true} +} + +// SetTranslator registers the translation implementation. +// Called by go-i18n's Srv during startup. +func (i *I18n) SetTranslator(t Translator) { + i.mu.Lock() + i.translator = t + locale := i.locale + i.mu.Unlock() + if t != nil && locale != "" { + _ = t.SetLanguage(locale) + } +} + +// Translator returns the registered translation implementation, or nil. +func (i *I18n) Translator() Result { + i.mu.RLock() + t := i.translator + i.mu.RUnlock() + if t == nil { + return Result{} + } + return Result{t, true} +} + +// Translate translates a message. Returns the key as-is if no translator is registered. +func (i *I18n) Translate(messageID string, args ...any) Result { + i.mu.RLock() + t := i.translator + i.mu.RUnlock() + if t != nil { + return t.Translate(messageID, args...) + } + return Result{messageID, true} +} + +// SetLanguage sets the active language and forwards to the translator if registered. +func (i *I18n) SetLanguage(lang string) Result { + if lang == "" { + return Result{OK: true} + } + i.mu.Lock() + i.locale = lang + t := i.translator + i.mu.Unlock() + if t != nil { + if err := t.SetLanguage(lang); err != nil { + return Result{err, false} + } + } + return Result{OK: true} +} + +// Language returns the current language code, or "en" if not set. +func (i *I18n) Language() string { + i.mu.RLock() + locale := i.locale + i.mu.RUnlock() + if locale != "" { + return locale + } + return "en" +} + +// AvailableLanguages returns all loaded language codes. +func (i *I18n) AvailableLanguages() []string { + i.mu.RLock() + t := i.translator + i.mu.RUnlock() + if t != nil { + return t.AvailableLanguages() + } + return []string{"en"} +} diff --git a/pkg/lib/workspace/default/.core/reference/ipc.go b/pkg/lib/workspace/default/.core/reference/ipc.go new file mode 100644 index 0000000..5f22c6f --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/ipc.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Message bus for the Core framework. +// Dispatches actions (fire-and-forget), queries (first responder), +// and tasks (first executor) between registered handlers. + +package core + +import ( + "slices" + "sync" +) + +// Ipc holds IPC dispatch data. +type Ipc struct { + ipcMu sync.RWMutex + ipcHandlers []func(*Core, Message) Result + + queryMu sync.RWMutex + queryHandlers []QueryHandler + + taskMu sync.RWMutex + taskHandlers []TaskHandler +} + +func (c *Core) Action(msg Message) Result { + c.ipc.ipcMu.RLock() + handlers := slices.Clone(c.ipc.ipcHandlers) + c.ipc.ipcMu.RUnlock() + + for _, h := range handlers { + if r := h(c, msg); !r.OK { + return r + } + } + return Result{OK: true} +} + +func (c *Core) Query(q Query) Result { + c.ipc.queryMu.RLock() + handlers := slices.Clone(c.ipc.queryHandlers) + c.ipc.queryMu.RUnlock() + + for _, h := range handlers { + r := h(c, q) + if r.OK { + return r + } + } + return Result{} +} + +func (c *Core) QueryAll(q Query) Result { + c.ipc.queryMu.RLock() + handlers := slices.Clone(c.ipc.queryHandlers) + c.ipc.queryMu.RUnlock() + + var results []any + for _, h := range handlers { + r := h(c, q) + if r.OK && r.Value != nil { + results = append(results, r.Value) + } + } + return Result{results, true} +} + +func (c *Core) RegisterQuery(handler QueryHandler) { + c.ipc.queryMu.Lock() + c.ipc.queryHandlers = append(c.ipc.queryHandlers, handler) + c.ipc.queryMu.Unlock() +} diff --git a/pkg/lib/workspace/default/.core/reference/lock.go b/pkg/lib/workspace/default/.core/reference/lock.go new file mode 100644 index 0000000..a87181d --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/lock.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Synchronisation, locking, and lifecycle snapshots for the Core framework. + +package core + +import ( + "sync" +) + +// package-level mutex infrastructure +var ( + lockMu sync.Mutex + lockMap = make(map[string]*sync.RWMutex) +) + +// Lock is the DTO for a named mutex. +type Lock struct { + Name string + Mutex *sync.RWMutex +} + +// Lock returns a named Lock, creating the mutex if needed. +func (c *Core) Lock(name string) *Lock { + lockMu.Lock() + m, ok := lockMap[name] + if !ok { + m = &sync.RWMutex{} + lockMap[name] = m + } + lockMu.Unlock() + return &Lock{Name: name, Mutex: m} +} + +// LockEnable marks that the service lock should be applied after initialisation. +func (c *Core) LockEnable(name ...string) { + n := "srv" + if len(name) > 0 { + n = name[0] + } + c.Lock(n).Mutex.Lock() + defer c.Lock(n).Mutex.Unlock() + c.services.lockEnabled = true +} + +// LockApply activates the service lock if it was enabled. +func (c *Core) LockApply(name ...string) { + n := "srv" + if len(name) > 0 { + n = name[0] + } + c.Lock(n).Mutex.Lock() + defer c.Lock(n).Mutex.Unlock() + if c.services.lockEnabled { + c.services.locked = true + } +} + +// Startables returns services that have an OnStart function. +func (c *Core) Startables() Result { + if c.services == nil { + return Result{} + } + c.Lock("srv").Mutex.RLock() + defer c.Lock("srv").Mutex.RUnlock() + var out []*Service + for _, svc := range c.services.services { + if svc.OnStart != nil { + out = append(out, svc) + } + } + return Result{out, true} +} + +// Stoppables returns services that have an OnStop function. +func (c *Core) Stoppables() Result { + if c.services == nil { + return Result{} + } + c.Lock("srv").Mutex.RLock() + defer c.Lock("srv").Mutex.RUnlock() + var out []*Service + for _, svc := range c.services.services { + if svc.OnStop != nil { + out = append(out, svc) + } + } + return Result{out, true} +} diff --git a/pkg/lib/workspace/default/.core/reference/log.go b/pkg/lib/workspace/default/.core/reference/log.go new file mode 100644 index 0000000..65f8c5f --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/log.go @@ -0,0 +1,402 @@ +// Structured logging for the Core framework. +// +// core.SetLevel(core.LevelDebug) +// core.Info("server started", "port", 8080) +// core.Error("failed to connect", "err", err) +package core + +import ( + goio "io" + "os" + "os/user" + "slices" + "sync" + "sync/atomic" + "time" +) + +// Level defines logging verbosity. +type Level int + +// Logging level constants ordered by increasing verbosity. +const ( + // LevelQuiet suppresses all log output. + LevelQuiet Level = iota + // LevelError shows only error messages. + LevelError + // LevelWarn shows warnings and errors. + LevelWarn + // LevelInfo shows informational messages, warnings, and errors. + LevelInfo + // LevelDebug shows all messages including debug details. + LevelDebug +) + +// String returns the level name. +func (l Level) String() string { + switch l { + case LevelQuiet: + return "quiet" + case LevelError: + return "error" + case LevelWarn: + return "warn" + case LevelInfo: + return "info" + case LevelDebug: + return "debug" + default: + return "unknown" + } +} + +// Log provides structured logging. +type Log struct { + mu sync.RWMutex + level Level + output goio.Writer + + // RedactKeys is a list of keys whose values should be masked in logs. + redactKeys []string + + // Style functions for formatting (can be overridden) + StyleTimestamp func(string) string + StyleDebug func(string) string + StyleInfo func(string) string + StyleWarn func(string) string + StyleError func(string) string + StyleSecurity func(string) string +} + +// RotationLogOptions defines the log rotation and retention policy. +type RotationLogOptions struct { + // Filename is the log file path. If empty, rotation is disabled. + Filename string + + // MaxSize is the maximum size of the log file in megabytes before it gets rotated. + // It defaults to 100 megabytes. + MaxSize int + + // MaxAge is the maximum number of days to retain old log files based on their + // file modification time. It defaults to 28 days. + // Note: set to a negative value to disable age-based retention. + MaxAge int + + // MaxBackups is the maximum number of old log files to retain. + // It defaults to 5 backups. + MaxBackups int + + // Compress determines if the rotated log files should be compressed using gzip. + // It defaults to true. + Compress bool +} + +// LogOptions configures a Log. +type LogOptions struct { + Level Level + // Output is the destination for log messages. If Rotation is provided, + // Output is ignored and logs are written to the rotating file instead. + Output goio.Writer + // Rotation enables log rotation to file. If provided, Filename must be set. + Rotation *RotationLogOptions + // RedactKeys is a list of keys whose values should be masked in logs. + RedactKeys []string +} + +// RotationWriterFactory creates a rotating writer from options. +// Set this to enable log rotation (provided by core/go-io integration). +var RotationWriterFactory func(RotationLogOptions) goio.WriteCloser + +// New creates a new Log with the given options. +func NewLog(opts LogOptions) *Log { + output := opts.Output + if opts.Rotation != nil && opts.Rotation.Filename != "" && RotationWriterFactory != nil { + output = RotationWriterFactory(*opts.Rotation) + } + if output == nil { + output = os.Stderr + } + + return &Log{ + level: opts.Level, + output: output, + redactKeys: slices.Clone(opts.RedactKeys), + StyleTimestamp: identity, + StyleDebug: identity, + StyleInfo: identity, + StyleWarn: identity, + StyleError: identity, + StyleSecurity: identity, + } +} + +func identity(s string) string { return s } + +// SetLevel changes the log level. +func (l *Log) SetLevel(level Level) { + l.mu.Lock() + l.level = level + l.mu.Unlock() +} + +// Level returns the current log level. +func (l *Log) Level() Level { + l.mu.RLock() + defer l.mu.RUnlock() + return l.level +} + +// SetOutput changes the output writer. +func (l *Log) SetOutput(w goio.Writer) { + l.mu.Lock() + l.output = w + l.mu.Unlock() +} + +// SetRedactKeys sets the keys to be redacted. +func (l *Log) SetRedactKeys(keys ...string) { + l.mu.Lock() + l.redactKeys = slices.Clone(keys) + l.mu.Unlock() +} + +func (l *Log) shouldLog(level Level) bool { + l.mu.RLock() + defer l.mu.RUnlock() + return level <= l.level +} + +func (l *Log) log(level Level, prefix, msg string, keyvals ...any) { + l.mu.RLock() + output := l.output + styleTimestamp := l.StyleTimestamp + redactKeys := l.redactKeys + l.mu.RUnlock() + + timestamp := styleTimestamp(time.Now().Format("15:04:05")) + + // Copy keyvals to avoid mutating the caller's slice + keyvals = append([]any(nil), keyvals...) + + // Automatically extract context from error if present in keyvals + origLen := len(keyvals) + for i := 0; i < origLen; i += 2 { + if i+1 < origLen { + if err, ok := keyvals[i+1].(error); ok { + if op := Operation(err); op != "" { + // Check if op is already in keyvals + hasOp := false + for j := 0; j < len(keyvals); j += 2 { + if k, ok := keyvals[j].(string); ok && k == "op" { + hasOp = true + break + } + } + if !hasOp { + keyvals = append(keyvals, "op", op) + } + } + if stack := FormatStackTrace(err); stack != "" { + // Check if stack is already in keyvals + hasStack := false + for j := 0; j < len(keyvals); j += 2 { + if k, ok := keyvals[j].(string); ok && k == "stack" { + hasStack = true + break + } + } + if !hasStack { + keyvals = append(keyvals, "stack", stack) + } + } + } + } + } + + // Format key-value pairs + var kvStr string + if len(keyvals) > 0 { + kvStr = " " + for i := 0; i < len(keyvals); i += 2 { + if i > 0 { + kvStr += " " + } + key := keyvals[i] + var val any + if i+1 < len(keyvals) { + val = keyvals[i+1] + } + + // Redaction logic + keyStr := Sprint(key) + if slices.Contains(redactKeys, keyStr) { + val = "[REDACTED]" + } + + // Secure formatting to prevent log injection + if s, ok := val.(string); ok { + kvStr += Sprintf("%v=%q", key, s) + } else { + kvStr += Sprintf("%v=%v", key, val) + } + } + } + + Print(output, "%s %s %s%s", timestamp, prefix, msg, kvStr) +} + +// Debug logs a debug message with optional key-value pairs. +func (l *Log) Debug(msg string, keyvals ...any) { + if l.shouldLog(LevelDebug) { + l.log(LevelDebug, l.StyleDebug("[DBG]"), msg, keyvals...) + } +} + +// Info logs an info message with optional key-value pairs. +func (l *Log) Info(msg string, keyvals ...any) { + if l.shouldLog(LevelInfo) { + l.log(LevelInfo, l.StyleInfo("[INF]"), msg, keyvals...) + } +} + +// Warn logs a warning message with optional key-value pairs. +func (l *Log) Warn(msg string, keyvals ...any) { + if l.shouldLog(LevelWarn) { + l.log(LevelWarn, l.StyleWarn("[WRN]"), msg, keyvals...) + } +} + +// Error logs an error message with optional key-value pairs. +func (l *Log) Error(msg string, keyvals ...any) { + if l.shouldLog(LevelError) { + l.log(LevelError, l.StyleError("[ERR]"), msg, keyvals...) + } +} + +// Security logs a security event with optional key-value pairs. +// It uses LevelError to ensure security events are visible even in restrictive +// log configurations. +func (l *Log) Security(msg string, keyvals ...any) { + if l.shouldLog(LevelError) { + l.log(LevelError, l.StyleSecurity("[SEC]"), msg, keyvals...) + } +} + +// Username returns the current system username. +// It uses os/user for reliability and falls back to environment variables. +func Username() string { + if u, err := user.Current(); err == nil { + return u.Username + } + // Fallback for environments where user lookup might fail + if u := os.Getenv("USER"); u != "" { + return u + } + return os.Getenv("USERNAME") +} + +// --- Default logger --- + +var defaultLogPtr atomic.Pointer[Log] + +func init() { + l := NewLog(LogOptions{Level: LevelInfo}) + defaultLogPtr.Store(l) +} + +// Default returns the default logger. +func Default() *Log { + return defaultLogPtr.Load() +} + +// SetDefault sets the default logger. +func SetDefault(l *Log) { + defaultLogPtr.Store(l) +} + +// SetLevel sets the default logger's level. +func SetLevel(level Level) { + Default().SetLevel(level) +} + +// SetRedactKeys sets the default logger's redaction keys. +func SetRedactKeys(keys ...string) { + Default().SetRedactKeys(keys...) +} + +// Debug logs to the default logger. +func Debug(msg string, keyvals ...any) { + Default().Debug(msg, keyvals...) +} + +// Info logs to the default logger. +func Info(msg string, keyvals ...any) { + Default().Info(msg, keyvals...) +} + +// Warn logs to the default logger. +func Warn(msg string, keyvals ...any) { + Default().Warn(msg, keyvals...) +} + +// Error logs to the default logger. +func Error(msg string, keyvals ...any) { + Default().Error(msg, keyvals...) +} + +// Security logs to the default logger. +func Security(msg string, keyvals ...any) { + Default().Security(msg, keyvals...) +} + +// --- LogErr: Error-Aware Logger --- + +// LogErr logs structured information extracted from errors. +// Primary action: log. Secondary: extract error context. +type LogErr struct { + log *Log +} + +// NewLogErr creates a LogErr bound to the given logger. +func NewLogErr(log *Log) *LogErr { + return &LogErr{log: log} +} + +// Log extracts context from an Err and logs it at Error level. +func (le *LogErr) Log(err error) { + if err == nil { + return + } + le.log.Error(ErrorMessage(err), "op", Operation(err), "code", ErrorCode(err), "stack", FormatStackTrace(err)) +} + +// --- LogPanic: Panic-Aware Logger --- + +// LogPanic logs panic context without crash file management. +// Primary action: log. Secondary: recover panics. +type LogPanic struct { + log *Log +} + +// NewLogPanic creates a LogPanic bound to the given logger. +func NewLogPanic(log *Log) *LogPanic { + return &LogPanic{log: log} +} + +// Recover captures a panic and logs it. Does not write crash files. +// Use as: defer core.NewLogPanic(logger).Recover() +func (lp *LogPanic) Recover() { + r := recover() + if r == nil { + return + } + err, ok := r.(error) + if !ok { + err = NewError(Sprint("panic: ", r)) + } + lp.log.Error("panic recovered", + "err", err, + "op", Operation(err), + "stack", FormatStackTrace(err), + ) +} diff --git a/pkg/lib/workspace/default/.core/reference/options.go b/pkg/lib/workspace/default/.core/reference/options.go new file mode 100644 index 0000000..4d4c5f8 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/options.go @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Core primitives: Option, Options, Result. +// +// Option is a single key-value pair. Options is a collection. +// Any function that returns Result can accept Options. +// +// Create options: +// +// opts := core.Options{ +// {Key: "name", Value: "brain"}, +// {Key: "path", Value: "prompts"}, +// } +// +// Read options: +// +// name := opts.String("name") +// port := opts.Int("port") +// ok := opts.Has("debug") +// +// Use with subsystems: +// +// c.Drive().New(core.Options{ +// {Key: "name", Value: "brain"}, +// {Key: "source", Value: brainFS}, +// {Key: "path", Value: "prompts"}, +// }) +// +// Use with New: +// +// c := core.New(core.Options{ +// {Key: "name", Value: "myapp"}, +// }) +package core + +// Result is the universal return type for Core operations. +// Replaces the (value, error) pattern — errors flow through Core internally. +// +// r := c.Data().New(core.Options{{Key: "name", Value: "brain"}}) +// if r.OK { use(r.Result()) } +type Result struct { + Value any + OK bool +} + +// Result gets or sets the value. Zero args returns Value. With args, maps +// Go (value, error) pairs to Result and returns self. +// +// r.Result(file, err) // OK = err == nil, Value = file +// r.Result(value) // OK = true, Value = value +// r.Result() // after set — returns the value +func (r Result) Result(args ...any) Result { + if len(args) == 0 { + return r + } + + if len(args) == 1 { + return Result{args[0], true} + } + + if err, ok := args[len(args)-1].(error); ok { + if err != nil { + return Result{err, false} + } + return Result{args[0], true} + } + return Result{args[0], true} +} + +// Option is a single key-value configuration pair. +// +// core.Option{Key: "name", Value: "brain"} +// core.Option{Key: "port", Value: 8080} +type Option struct { + Key string + Value any +} + +// Options is a collection of Option items. +// The universal input type for Core operations. +// +// opts := core.Options{{Key: "name", Value: "myapp"}} +// name := opts.String("name") +type Options []Option + +// Get retrieves a value by key. +// +// r := opts.Get("name") +// if r.OK { name := r.Value.(string) } +func (o Options) Get(key string) Result { + for _, opt := range o { + if opt.Key == key { + return Result{opt.Value, true} + } + } + return Result{} +} + +// Has returns true if a key exists. +// +// if opts.Has("debug") { ... } +func (o Options) Has(key string) bool { + return o.Get(key).OK +} + +// String retrieves a string value, empty string if missing. +// +// name := opts.String("name") +func (o Options) String(key string) string { + r := o.Get(key) + if !r.OK { + return "" + } + s, _ := r.Value.(string) + return s +} + +// Int retrieves an int value, 0 if missing. +// +// port := opts.Int("port") +func (o Options) Int(key string) int { + r := o.Get(key) + if !r.OK { + return 0 + } + i, _ := r.Value.(int) + return i +} + +// Bool retrieves a bool value, false if missing. +// +// debug := opts.Bool("debug") +func (o Options) Bool(key string) bool { + r := o.Get(key) + if !r.OK { + return false + } + b, _ := r.Value.(bool) + return b +} diff --git a/pkg/lib/workspace/default/.core/reference/runtime.go b/pkg/lib/workspace/default/.core/reference/runtime.go new file mode 100644 index 0000000..952001d --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/runtime.go @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Runtime helpers for the Core framework. +// ServiceRuntime is embedded by consumer services. +// Runtime is the GUI binding container (e.g., Wails). + +package core + +import ( + "context" + "maps" + "slices" +) + +// --- ServiceRuntime (embedded by consumer services) --- + +// ServiceRuntime is embedded in services to provide access to the Core and typed options. +type ServiceRuntime[T any] struct { + core *Core + opts T +} + +// NewServiceRuntime creates a ServiceRuntime for a service constructor. +func NewServiceRuntime[T any](c *Core, opts T) *ServiceRuntime[T] { + return &ServiceRuntime[T]{core: c, opts: opts} +} + +func (r *ServiceRuntime[T]) Core() *Core { return r.core } +func (r *ServiceRuntime[T]) Options() T { return r.opts } +func (r *ServiceRuntime[T]) Config() *Config { return r.core.Config() } + +// --- Lifecycle --- + +// ServiceStartup runs OnStart for all registered services that have one. +func (c *Core) ServiceStartup(ctx context.Context, options any) Result { + c.shutdown.Store(false) + c.context, c.cancel = context.WithCancel(ctx) + startables := c.Startables() + if startables.OK { + for _, s := range startables.Value.([]*Service) { + if err := ctx.Err(); err != nil { + return Result{err, false} + } + r := s.OnStart() + if !r.OK { + return r + } + } + } + c.ACTION(ActionServiceStartup{}) + return Result{OK: true} +} + +// ServiceShutdown drains background tasks, then stops all registered services. +func (c *Core) ServiceShutdown(ctx context.Context) Result { + c.shutdown.Store(true) + c.cancel() // signal all context-aware tasks to stop + c.ACTION(ActionServiceShutdown{}) + + // Drain background tasks before stopping services. + done := make(chan struct{}) + go func() { + c.waitGroup.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + return Result{ctx.Err(), false} + } + + // Stop services + var firstErr error + stoppables := c.Stoppables() + if stoppables.OK { + for _, s := range stoppables.Value.([]*Service) { + if err := ctx.Err(); err != nil { + return Result{err, false} + } + r := s.OnStop() + if !r.OK && firstErr == nil { + if e, ok := r.Value.(error); ok { + firstErr = e + } else { + firstErr = E("core.ServiceShutdown", Sprint("service OnStop failed: ", r.Value), nil) + } + } + } + } + if firstErr != nil { + return Result{firstErr, false} + } + return Result{OK: true} +} + +// --- Runtime DTO (GUI binding) --- + +// Runtime is the container for GUI runtimes (e.g., Wails). +type Runtime struct { + app any + Core *Core +} + +// ServiceFactory defines a function that creates a Service. +type ServiceFactory func() Result + +// NewWithFactories creates a Runtime with the provided service factories. +func NewWithFactories(app any, factories map[string]ServiceFactory) Result { + c := New(Options{{Key: "name", Value: "core"}}) + c.app.Runtime = app + + names := slices.Sorted(maps.Keys(factories)) + for _, name := range names { + factory := factories[name] + if factory == nil { + continue + } + r := factory() + if !r.OK { + cause, _ := r.Value.(error) + return Result{E("core.NewWithFactories", Concat("factory \"", name, "\" failed"), cause), false} + } + svc, ok := r.Value.(Service) + if !ok { + return Result{E("core.NewWithFactories", Concat("factory \"", name, "\" returned non-Service type"), nil), false} + } + sr := c.Service(name, svc) + if !sr.OK { + return sr + } + } + return Result{&Runtime{app: app, Core: c}, true} +} + +// NewRuntime creates a Runtime with no custom services. +func NewRuntime(app any) Result { + return NewWithFactories(app, map[string]ServiceFactory{}) +} + +func (r *Runtime) ServiceName() string { return "Core" } +func (r *Runtime) ServiceStartup(ctx context.Context, options any) Result { + return r.Core.ServiceStartup(ctx, options) +} +func (r *Runtime) ServiceShutdown(ctx context.Context) Result { + if r.Core != nil { + return r.Core.ServiceShutdown(ctx) + } + return Result{OK: true} +} diff --git a/pkg/lib/workspace/default/.core/reference/service.go b/pkg/lib/workspace/default/.core/reference/service.go new file mode 100644 index 0000000..1e82dd6 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/service.go @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Service registry for the Core framework. +// +// Register a service: +// +// c.Service("auth", core.Service{}) +// +// Get a service: +// +// r := c.Service("auth") +// if r.OK { svc := r.Value } + +package core + +// No imports needed — uses package-level string helpers. + +// Service is a managed component with optional lifecycle. +type Service struct { + Name string + Options Options + OnStart func() Result + OnStop func() Result + OnReload func() Result +} + +// serviceRegistry holds registered services. +type serviceRegistry struct { + services map[string]*Service + lockEnabled bool + locked bool +} + +// --- Core service methods --- + +// Service gets or registers a service by name. +// +// c.Service("auth", core.Service{OnStart: startFn}) +// r := c.Service("auth") +func (c *Core) Service(name string, service ...Service) Result { + if len(service) == 0 { + c.Lock("srv").Mutex.RLock() + v, ok := c.services.services[name] + c.Lock("srv").Mutex.RUnlock() + return Result{v, ok} + } + + if name == "" { + return Result{E("core.Service", "service name cannot be empty", nil), false} + } + + c.Lock("srv").Mutex.Lock() + defer c.Lock("srv").Mutex.Unlock() + + if c.services.locked { + return Result{E("core.Service", Concat("service \"", name, "\" not permitted — registry locked"), nil), false} + } + if _, exists := c.services.services[name]; exists { + return Result{E("core.Service", Join(" ", "service", name, "already registered"), nil), false} + } + + srv := &service[0] + srv.Name = name + c.services.services[name] = srv + + return Result{OK: true} +} + +// Services returns all registered service names. +// +// names := c.Services() +func (c *Core) Services() []string { + if c.services == nil { + return nil + } + c.Lock("srv").Mutex.RLock() + defer c.Lock("srv").Mutex.RUnlock() + var names []string + for k := range c.services.services { + names = append(names, k) + } + return names +} diff --git a/pkg/lib/workspace/default/.core/reference/string.go b/pkg/lib/workspace/default/.core/reference/string.go new file mode 100644 index 0000000..4c64aa7 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/string.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// String operations for the Core framework. +// Provides safe, predictable string helpers that downstream packages +// use directly — same pattern as Array[T] for slices. + +package core + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// HasPrefix returns true if s starts with prefix. +// +// core.HasPrefix("--verbose", "--") // true +func HasPrefix(s, prefix string) bool { + return strings.HasPrefix(s, prefix) +} + +// HasSuffix returns true if s ends with suffix. +// +// core.HasSuffix("test.go", ".go") // true +func HasSuffix(s, suffix string) bool { + return strings.HasSuffix(s, suffix) +} + +// TrimPrefix removes prefix from s. +// +// core.TrimPrefix("--verbose", "--") // "verbose" +func TrimPrefix(s, prefix string) string { + return strings.TrimPrefix(s, prefix) +} + +// TrimSuffix removes suffix from s. +// +// core.TrimSuffix("test.go", ".go") // "test" +func TrimSuffix(s, suffix string) string { + return strings.TrimSuffix(s, suffix) +} + +// Contains returns true if s contains substr. +// +// core.Contains("hello world", "world") // true +func Contains(s, substr string) bool { + return strings.Contains(s, substr) +} + +// Split splits s by separator. +// +// core.Split("a/b/c", "/") // ["a", "b", "c"] +func Split(s, sep string) []string { + return strings.Split(s, sep) +} + +// SplitN splits s by separator into at most n parts. +// +// core.SplitN("key=value=extra", "=", 2) // ["key", "value=extra"] +func SplitN(s, sep string, n int) []string { + return strings.SplitN(s, sep, n) +} + +// Join joins parts with a separator, building via Concat. +// +// core.Join("/", "deploy", "to", "homelab") // "deploy/to/homelab" +// core.Join(".", "cmd", "deploy", "description") // "cmd.deploy.description" +func Join(sep string, parts ...string) string { + if len(parts) == 0 { + return "" + } + result := parts[0] + for _, p := range parts[1:] { + result = Concat(result, sep, p) + } + return result +} + +// Replace replaces all occurrences of old with new in s. +// +// core.Replace("deploy/to/homelab", "/", ".") // "deploy.to.homelab" +func Replace(s, old, new string) string { + return strings.ReplaceAll(s, old, new) +} + +// Lower returns s in lowercase. +// +// core.Lower("HELLO") // "hello" +func Lower(s string) string { + return strings.ToLower(s) +} + +// Upper returns s in uppercase. +// +// core.Upper("hello") // "HELLO" +func Upper(s string) string { + return strings.ToUpper(s) +} + +// Trim removes leading and trailing whitespace. +// +// core.Trim(" hello ") // "hello" +func Trim(s string) string { + return strings.TrimSpace(s) +} + +// RuneCount returns the number of runes (unicode characters) in s. +// +// core.RuneCount("hello") // 5 +// core.RuneCount("🔥") // 1 +func RuneCount(s string) int { + return utf8.RuneCountInString(s) +} + +// NewBuilder returns a new strings.Builder. +// +// b := core.NewBuilder() +// b.WriteString("hello") +// b.String() // "hello" +func NewBuilder() *strings.Builder { + return &strings.Builder{} +} + +// NewReader returns a strings.NewReader for the given string. +// +// r := core.NewReader("hello world") +func NewReader(s string) *strings.Reader { + return strings.NewReader(s) +} + +// Sprint converts any value to its string representation. +// +// core.Sprint(42) // "42" +// core.Sprint(err) // "connection refused" +func Sprint(args ...any) string { + return fmt.Sprint(args...) +} + +// Sprintf formats a string with the given arguments. +// +// core.Sprintf("%v=%q", "key", "value") // `key="value"` +func Sprintf(format string, args ...any) string { + return fmt.Sprintf(format, args...) +} + +// Concat joins variadic string parts into one string. +// Hook point for validation, sanitisation, and security checks. +// +// core.Concat("cmd.", "deploy.to.homelab", ".description") +// core.Concat("https://", host, "/api/v1") +func Concat(parts ...string) string { + b := NewBuilder() + for _, p := range parts { + b.WriteString(p) + } + return b.String() +} diff --git a/pkg/lib/workspace/default/.core/reference/task.go b/pkg/lib/workspace/default/.core/reference/task.go new file mode 100644 index 0000000..acdf394 --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/task.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Background task dispatch for the Core framework. + +package core + +import ( + "reflect" + "slices" + "strconv" +) + +// TaskState holds background task state. +type TaskState struct { + Identifier string + Task Task + Result any + Error error +} + +// PerformAsync dispatches a task in a background goroutine. +func (c *Core) PerformAsync(t Task) Result { + if c.shutdown.Load() { + return Result{} + } + taskID := Concat("task-", strconv.FormatUint(c.taskIDCounter.Add(1), 10)) + if tid, ok := t.(TaskWithIdentifier); ok { + tid.SetTaskIdentifier(taskID) + } + c.ACTION(ActionTaskStarted{TaskIdentifier: taskID, Task: t}) + c.waitGroup.Go(func() { + defer func() { + if rec := recover(); rec != nil { + err := E("core.PerformAsync", Sprint("panic: ", rec), nil) + c.ACTION(ActionTaskCompleted{TaskIdentifier: taskID, Task: t, Result: nil, Error: err}) + } + }() + r := c.PERFORM(t) + var err error + if !r.OK { + if e, ok := r.Value.(error); ok { + err = e + } else { + taskType := reflect.TypeOf(t) + typeName := "" + if taskType != nil { + typeName = taskType.String() + } + err = E("core.PerformAsync", Join(" ", "no handler found for task type", typeName), nil) + } + } + c.ACTION(ActionTaskCompleted{TaskIdentifier: taskID, Task: t, Result: r.Value, Error: err}) + }) + return Result{taskID, true} +} + +// Progress broadcasts a progress update for a background task. +func (c *Core) Progress(taskID string, progress float64, message string, t Task) { + c.ACTION(ActionTaskProgress{TaskIdentifier: taskID, Task: t, Progress: progress, Message: message}) +} + +func (c *Core) Perform(t Task) Result { + c.ipc.taskMu.RLock() + handlers := slices.Clone(c.ipc.taskHandlers) + c.ipc.taskMu.RUnlock() + + for _, h := range handlers { + r := h(c, t) + if r.OK { + return r + } + } + return Result{} +} + +func (c *Core) RegisterAction(handler func(*Core, Message) Result) { + c.ipc.ipcMu.Lock() + c.ipc.ipcHandlers = append(c.ipc.ipcHandlers, handler) + c.ipc.ipcMu.Unlock() +} + +func (c *Core) RegisterActions(handlers ...func(*Core, Message) Result) { + c.ipc.ipcMu.Lock() + c.ipc.ipcHandlers = append(c.ipc.ipcHandlers, handlers...) + c.ipc.ipcMu.Unlock() +} + +func (c *Core) RegisterTask(handler TaskHandler) { + c.ipc.taskMu.Lock() + c.ipc.taskHandlers = append(c.ipc.taskHandlers, handler) + c.ipc.taskMu.Unlock() +} diff --git a/pkg/lib/workspace/default/.core/reference/utils.go b/pkg/lib/workspace/default/.core/reference/utils.go new file mode 100644 index 0000000..038e32e --- /dev/null +++ b/pkg/lib/workspace/default/.core/reference/utils.go @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Utility functions for the Core framework. +// Built on core string.go primitives. + +package core + +import ( + "fmt" + "io" + "os" +) + +// Print writes a formatted line to a writer, defaulting to os.Stdout. +// +// core.Print(nil, "hello %s", "world") // → stdout +// core.Print(w, "port: %d", 8080) // → w +func Print(w io.Writer, format string, args ...any) { + if w == nil { + w = os.Stdout + } + fmt.Fprintf(w, format+"\n", args...) +} + +// JoinPath joins string segments into a path with "/" separator. +// +// core.JoinPath("deploy", "to", "homelab") // → "deploy/to/homelab" +func JoinPath(segments ...string) string { + return Join("/", segments...) +} + +// IsFlag returns true if the argument starts with a dash. +// +// core.IsFlag("--verbose") // true +// core.IsFlag("-v") // true +// core.IsFlag("deploy") // false +func IsFlag(arg string) bool { + return HasPrefix(arg, "-") +} + +// Arg extracts a value from variadic args at the given index. +// Type-checks and delegates to the appropriate typed extractor. +// Returns Result — OK is false if index is out of bounds. +// +// r := core.Arg(0, args...) +// if r.OK { path = r.Value.(string) } +func Arg(index int, args ...any) Result { + if index >= len(args) { + return Result{} + } + v := args[index] + switch v.(type) { + case string: + return Result{ArgString(index, args...), true} + case int: + return Result{ArgInt(index, args...), true} + case bool: + return Result{ArgBool(index, args...), true} + default: + return Result{v, true} + } +} + +// ArgString extracts a string at the given index. +// +// name := core.ArgString(0, args...) +func ArgString(index int, args ...any) string { + if index >= len(args) { + return "" + } + s, ok := args[index].(string) + if !ok { + return "" + } + return s +} + +// ArgInt extracts an int at the given index. +// +// port := core.ArgInt(1, args...) +func ArgInt(index int, args ...any) int { + if index >= len(args) { + return 0 + } + i, ok := args[index].(int) + if !ok { + return 0 + } + return i +} + +// ArgBool extracts a bool at the given index. +// +// debug := core.ArgBool(2, args...) +func ArgBool(index int, args ...any) bool { + if index >= len(args) { + return false + } + b, ok := args[index].(bool) + if !ok { + return false + } + return b +} + +// FilterArgs removes empty strings and Go test runner flags from an argument list. +// +// clean := core.FilterArgs(os.Args[1:]) +func FilterArgs(args []string) []string { + var clean []string + for _, a := range args { + if a == "" || HasPrefix(a, "-test.") { + continue + } + clean = append(clean, a) + } + return clean +} + +// ParseFlag parses a single flag argument into key, value, and validity. +// Single dash (-) requires exactly 1 character (letter, emoji, unicode). +// Double dash (--) requires 2+ characters. +// +// "-v" → "v", "", true +// "-🔥" → "🔥", "", true +// "--verbose" → "verbose", "", true +// "--port=8080" → "port", "8080", true +// "-verbose" → "", "", false (single dash, 2+ chars) +// "--v" → "", "", false (double dash, 1 char) +// "hello" → "", "", false (not a flag) +func ParseFlag(arg string) (key, value string, valid bool) { + if HasPrefix(arg, "--") { + rest := TrimPrefix(arg, "--") + parts := SplitN(rest, "=", 2) + name := parts[0] + if RuneCount(name) < 2 { + return "", "", false + } + if len(parts) == 2 { + return name, parts[1], true + } + return name, "", true + } + + if HasPrefix(arg, "-") { + rest := TrimPrefix(arg, "-") + parts := SplitN(rest, "=", 2) + name := parts[0] + if RuneCount(name) != 1 { + return "", "", false + } + if len(parts) == 2 { + return name, parts[1], true + } + return name, "", true + } + + return "", "", false +} diff --git a/pkg/lib/workspace/default/.gitignore b/pkg/lib/workspace/default/.gitignore deleted file mode 100644 index cdc6f76..0000000 --- a/pkg/lib/workspace/default/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.idea/ -.vscode/ -*.log -.core/ diff --git a/pkg/lib/workspace/default/CODEX.md.tmpl b/pkg/lib/workspace/default/CODEX.md.tmpl new file mode 100644 index 0000000..8e1bdf2 --- /dev/null +++ b/pkg/lib/workspace/default/CODEX.md.tmpl @@ -0,0 +1,489 @@ +# CODEX.md + +Instructions for Codex when working with code in this workspace. + +## Core Framework + +This project uses `dappco.re/go/core` as its foundation. Core provides primitives that REPLACE standard library and third-party packages. Implementation reference is in `.core/reference/*.go`. + +## Core Struct + +Create a Core instance and access its subsystems: + +```go +c := core.New(core.Options{ + {Key: "name", Value: "my-service"}, +}) +``` + +### Subsystem Accessors + +| Accessor | Type | Purpose | +|----------|------|---------| +| `c.App()` | `*App` | Application identity (Name, Version, Path) | +| `c.Fs()` | `*Fs` | Sandboxed filesystem I/O | +| `c.Config()` | `*Config` | Settings + feature flags | +| `c.Data()` | `*Data` | Embedded content registry (mount/read) | +| `c.Drive()` | `*Drive` | Transport handle registry (API, SSH, MCP) | +| `c.Log()` | `*ErrorLog` | Structured logging with error wrapping | +| `c.Error()` | `*ErrorPanic` | Panic recovery + crash reports | +| `c.Cli()` | `*Cli` | CLI surface (command tree → terminal) | +| `c.IPC()` | `*Ipc` | Message bus (Action/Query/Task) | +| `c.I18n()` | `*I18n` | Internationalisation + locale collection | +| `c.Env("key")` | `string` | Read-only system/environment info | +| `c.Options()` | `*Options` | Input configuration used to create Core | +| `c.Context()` | `context.Context` | Application context (cancelled on shutdown) | + +### Service Lifecycle + +```go +// Register a service with lifecycle hooks +c.Service("cache", core.Service{ + OnStart: func() core.Result { return core.Result{OK: true} }, + OnStop: func() core.Result { return core.Result{OK: true} }, + OnReload: func() core.Result { return core.Result{OK: true} }, +}) + +// Start all services +c.ServiceStartup(ctx, nil) + +// Stop all services +c.ServiceShutdown(ctx) +``` + +### Startable / Stoppable Interfaces + +Services that need lifecycle hooks implement these: + +```go +type Startable interface { + OnStartup(ctx context.Context) error +} + +type Stoppable interface { + OnShutdown(ctx context.Context) error +} +``` + +### Error Logging on Core + +```go +c.LogError(err, "save", "failed to save") // logs + returns Result +c.LogWarn(err, "cache", "cache miss") // logs warning + returns Result +c.Must(err, "init", "critical failure") // logs + panics if err != nil +``` + +### Async Tasks + +```go +// Perform synchronously (blocks until handler responds) +r := c.PERFORM(SendEmail{To: "user@example.com"}) + +// Perform asynchronously (returns immediately, runs in background) +r := c.PerformAsync(BuildProject{Repo: "core"}) +// r.Value is the task ID string + +// Report progress +c.Progress(taskID, 0.5, "halfway done", task) + +// Register task handler +c.RegisterTask(func(c *core.Core, t core.Task) core.Result { + switch task := t.(type) { + case BuildProject: + // do work + return core.Result{Value: "built", OK: true} + } + return core.Result{} +}) +``` + +### Environment — use `core.Env()`, never `os.Getenv` for standard dirs + +Env is environment (read-only system facts). Config is ours (mutable app settings). + +```go +// System +core.Env("OS") // "darwin", "linux", "windows" +core.Env("ARCH") // "arm64", "amd64" +core.Env("DS") // "/" or "\" (directory separator) +core.Env("HOSTNAME") // machine name +core.Env("USER") // current user + +// Directories +core.Env("DIR_HOME") // home dir (overridable via CORE_HOME env var) +core.Env("DIR_CONFIG") // OS config dir +core.Env("DIR_CACHE") // OS cache dir +core.Env("DIR_DATA") // OS data dir (platform-specific) +core.Env("DIR_TMP") // temp dir +core.Env("DIR_CWD") // working directory at startup +core.Env("DIR_CODE") // ~/Code +core.Env("DIR_DOWNLOADS") // ~/Downloads + +// Timestamps +core.Env("CORE_START") // RFC3339 UTC boot timestamp +``` + +### Paths — use `core.Path()`, never `filepath.Join` or raw concatenation + +Path() is the single point of responsibility for filesystem paths. Every path goes through it — security fixes happen in one place. + +```go +// WRONG +home, _ := os.UserHomeDir() +configPath := filepath.Join(home, ".config", "app.yaml") +base := filepath.Base(configPath) + +// CORRECT +configPath := core.Path(".config", "app.yaml") // anchored to DIR_HOME +base := core.PathBase(configPath) +``` + +```go +// Relative → anchored to DIR_HOME +core.Path("Code", ".core") // "/Users/snider/Code/.core" +core.Path(".config", "app.yaml") // "/Users/snider/.config/app.yaml" + +// Absolute → pass through (cleaned) +core.Path("/tmp", "workspace") // "/tmp/workspace" + +// No args → DIR_HOME +core.Path() // "/Users/snider" + +// Component helpers +core.PathBase("/a/b/c") // "c" +core.PathDir("/a/b/c") // "/a/b" +core.PathExt("main.go") // ".go" +``` + +## Mandatory Patterns + +### Errors — use `core.E()`, never `fmt.Errorf` or `errors.New` + +```go +// WRONG +return fmt.Errorf("failed to read: %w", err) +return errors.New("not found") + +// CORRECT +return core.E("readConfig", "failed to read config", err) +return core.E("findUser", "user not found", nil) +``` + +### Logging — use `core.Error/Info/Warn/Debug`, never `log.*` or `fmt.Print*` + +```go +// WRONG +log.Printf("starting server on %s", addr) +fmt.Fprintf(os.Stderr, "error: %v\n", err) + +// CORRECT +core.Info("starting server", "addr", addr) +core.Error("operation failed", "err", err) +``` + +### Filesystem — use `core.Fs{}` + `core.Path()`, never `os.*` or `filepath.*` + +```go +var fs = &core.Fs{} + +// Build paths with Path() — never raw concatenation +configPath := core.Path(".config", "app.yaml") + +// Read/write through Fs — never os.ReadFile/WriteFile +r := fs.Read(configPath) +if !r.OK { return r } +content := r.Value.(string) + +fs.Write(configPath, content) +fs.EnsureDir(core.Path(".config")) + +// File checks — never os.Stat +fs.Exists(path) // bool +fs.IsFile(path) // bool +fs.IsDir(path) // bool + +// Directory listing — never os.ReadDir +r := fs.List(dir) // Result{[]os.DirEntry, true} + +// Append — never os.OpenFile +r := fs.Append(logPath) // Result{*os.File, true} + +// Delete — never os.Remove +fs.Delete(path) // Result +``` + +### Returns — use `core.Result`, never `(value, error)` + +```go +// WRONG +func LoadConfig(path string) (string, error) { + data, err := os.ReadFile(path) + if err != nil { return "", err } + return string(data), nil +} + +// CORRECT +func LoadConfig(path string) core.Result { + return fs.Read(path) +} +``` + +### Strings — use `core.*`, never `strings.*` or `fmt.Sprintf` + +| Do NOT use | Use instead | +|------------|-------------| +| `strings.Contains` | `core.Contains` | +| `strings.HasPrefix` | `core.HasPrefix` | +| `strings.HasSuffix` | `core.HasSuffix` | +| `strings.TrimSpace` | `core.Trim` | +| `strings.TrimPrefix` | `core.TrimPrefix` | +| `strings.TrimSuffix` | `core.TrimSuffix` | +| `strings.Split` | `core.Split` | +| `strings.SplitN` | `core.SplitN` | +| `strings.Join(parts, sep)` | `core.Join(sep, parts...)` | +| `strings.ReplaceAll` | `core.Replace` | +| `strings.ToLower` | `core.Lower` | +| `strings.ToUpper` | `core.Upper` | +| `strings.NewReader` | `core.NewReader` | +| `strings.Builder{}` | `core.NewBuilder()` | +| `fmt.Sprintf` | `core.Sprintf` | +| `fmt.Sprint` | `core.Sprint` | + +### Imports — alias stdlib `io` as `goio` + +```go +import goio "io" +``` + +### Comments — usage examples, not descriptions + +```go +// WRONG +// LoadConfig loads configuration from a file path. + +// CORRECT +// LoadConfig reads and parses a YAML configuration file. +// +// r := LoadConfig("/home/user/.core/agents.yaml") +// if r.OK { cfg := r.Value.(*AgentsConfig) } +``` + +### Names — predictable, never abbreviated + +``` +Config not Cfg +Service not Srv +Options not Opts +Error not Err (as subsystem name) +``` + +### UK English in comments + +``` +initialise not initialize +colour not color +organisation not organization +serialise not serialize +``` + +### Compile-time interface assertions + +```go +var _ mcp.Subsystem = (*MySubsystem)(nil) +``` + +### Keyed struct literals + +```go +// WRONG +core.Result{err, false} + +// CORRECT +core.Result{Value: err, OK: false} +``` + +### Embedded content — use `core.Mount` + `core.Extract`, never raw `embed.FS` + +```go +// WRONG +//go:embed templates +var templatesFS embed.FS +data, _ := templatesFS.ReadFile("templates/config.yaml") + +// CORRECT +//go:embed templates +var templatesFS embed.FS +var templates = mustMount(templatesFS, "templates") + +func mustMount(fsys embed.FS, basedir string) *core.Embed { + r := core.Mount(fsys, basedir) + if !r.OK { panic(r.Value) } + return r.Value.(*core.Embed) +} + +r := templates.ReadString("config.yaml") +if r.OK { content := r.Value.(string) } + +// Extract template directory with data substitution +core.Extract(templates.FS(), targetDir, data) +``` + +### Error wrapping — use `core.Wrap`, never manual chaining + +```go +// WRONG +return fmt.Errorf("save failed: %w", err) + +// CORRECT +return core.Wrap(err, "saveConfig", "failed to save config") +``` + +### Error codes — use `core.WrapCode` for machine-readable errors + +```go +return core.WrapCode(err, "VALIDATION_ERROR", "user.Validate", "invalid email") +var ErrNotFound = core.NewCode("NOT_FOUND", "resource not found") +``` + +### Error introspection + +```go +core.Operation(err) // extract operation name +core.ErrorCode(err) // extract error code +core.ErrorMessage(err) // extract message +core.Root(err) // root cause +core.StackTrace(err) // logical stack trace +core.Is(err, target) // errors.Is wrapper +core.As(err, &target) // errors.As wrapper +``` + +### Formatted output — use `core.Print`, never `fmt.Fprintf` + +```go +// WRONG +fmt.Fprintf(os.Stderr, "server on %s\n", addr) +fmt.Println("done") + +// CORRECT +core.Print(os.Stderr, "server on %s", addr) // writer + format +core.Print(nil, "done") // nil = stdout +``` + +### Arrays — use `core.Array[T]`, never manual slice management + +```go +arr := core.NewArray[string]("a", "b", "c") +arr.AddUnique("d") +arr.Contains("a") // true +arr.Filter(func(s string) bool { return s != "b" }) +arr.Deduplicate() +``` + +### Config — use `core.Config`, never raw maps + +```go +c.Config().Set("port", 8080) +port := c.Config().Int("port") +c.Config().Enable("debug") +if c.Config().Enabled("debug") { ... } +``` + +### IPC — use `core.Action/Query/Perform` for inter-service communication + +```go +// Fire-and-forget broadcast +c.ACTION(MyEvent{Data: "hello"}) + +// Query first responder +r := c.QUERY(FindUser{ID: 123}) +if r.OK { user := r.Value.(*User) } + +// Perform task (side effects) +r := c.PERFORM(SendEmail{To: "user@example.com"}) + +// Register handler +c.RegisterAction(func(c *core.Core, msg core.Message) core.Result { + switch m := msg.(type) { + case MyEvent: + core.Info("received event", "data", m.Data) + } + return core.Result{OK: true} +}) +``` + +### Services — use `c.Service()` DTO pattern + +```go +c.Service("cache", core.Service{ + OnStart: func() core.Result { return core.Result{OK: true} }, + OnStop: func() core.Result { return core.Result{OK: true} }, +}) + +r := c.Service("cache") +if r.OK { svc := r.Value.(*core.Service) } +``` + +### Commands — use `c.Command()` path-based registration + +```go +c.Command("deploy", core.Command{ + Description: "Deploy to production", + Action: func(opts core.Options) core.Result { + target := opts.String("target") + return core.Result{Value: "deployed to " + target, OK: true} + }, +}) + +// Nested commands use path notation +c.Command("deploy/to/homelab", core.Command{...}) + +// Run CLI +c.Cli().Run() +``` + +### Drive — use `c.Drive()` for transport handles + +```go +c.Drive().New(core.Options{ + {Key: "name", Value: "api"}, + {Key: "transport", Value: "https://api.lthn.ai"}, +}) + +r := c.Drive().Get("api") +if r.OK { handle := r.Value.(*core.DriveHandle) } +``` + +### I18n — use `c.I18n()` for translations + +```go +r := c.I18n().Translate("greeting", "name", "World") +if r.OK { text := r.Value.(string) } + +c.I18n().SetLanguage("en-GB") +``` + +## What NOT to import + +| Do NOT import | Use instead | +|---------------|-------------| +| `fmt` | `core.Sprintf`, `core.Print` | +| `log` | `core.Error`, `core.Info` | +| `strings` | `core.Contains`, `core.Split` etc | +| `errors` | `core.E`, `core.Wrap` | +| `path/filepath` | `core.Path`, `core.PathBase`, `core.PathDir`, `core.PathExt` | +| `io/ioutil` | `core.Fs{}` | +| `os` (file ops) | `core.Fs{}` | +| `os.UserHomeDir` | `core.Env("DIR_HOME")` | +| `os.Getenv` (standard dirs) | `core.Env("DIR_CONFIG")` etc | +| `runtime.GOOS` | `core.Env("OS")` | +| `runtime.GOARCH` | `core.Env("ARCH")` | + +Acceptable stdlib: `os.Exit`, `os.Stderr`, `os.Getenv` (non-standard keys), `context`, `sync`, `time`, `net/http`, `encoding/json`. + +## Build & Test + +```bash +go build ./... +go test ./... +go vet ./... +``` diff --git a/pkg/lib/workspace/default/go.work.tmpl b/pkg/lib/workspace/default/go.work.tmpl new file mode 100644 index 0000000..e69b5f9 --- /dev/null +++ b/pkg/lib/workspace/default/go.work.tmpl @@ -0,0 +1,3 @@ +go 1.26.0 + +use ./repo diff --git a/pkg/lib/workspace/review/PROMPT.md.tmpl b/pkg/lib/workspace/review/PROMPT.md.tmpl new file mode 100644 index 0000000..8e1a6ad --- /dev/null +++ b/pkg/lib/workspace/review/PROMPT.md.tmpl @@ -0,0 +1,14 @@ +Read CLAUDE.md for review instructions. Read CODEX.md for the AX conventions to audit against. + +## Mode: AUDIT ONLY + +You are a reviewer. You do NOT fix code. You do NOT commit. + +1. Read every .go file in cmd/ and pkg/ +2. Check each against the patterns in CODEX.md +3. Report findings to stdout with: severity (critical/high/medium/low), file:line, one-sentence description +4. Group by package + +If you find merge conflicts, report them as critical findings — do not resolve them. + +Do NOT run the closeout sequence. Do NOT use code-review agents. Do NOT commit. Report and stop. diff --git a/pkg/monitor/harvest.go b/pkg/monitor/harvest.go index 642b2d4..c1049cc 100644 --- a/pkg/monitor/harvest.go +++ b/pkg/monitor/harvest.go @@ -12,15 +12,12 @@ package monitor import ( "context" "encoding/json" - "fmt" - "os" "os/exec" "path/filepath" - "strings" + "strconv" "dappco.re/go/agent/pkg/agentic" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" ) // harvestResult tracks what happened during harvest. @@ -35,10 +32,7 @@ type harvestResult struct { // branches back to the source repos. Returns a summary message. func (m *Subsystem) harvestCompleted() string { wsRoot := agentic.WorkspaceRoot() - entries, err := filepath.Glob(filepath.Join(wsRoot, "*/status.json")) - if err != nil { - return "" - } + entries := workspaceStatusPaths(wsRoot) var harvested []harvestResult @@ -57,7 +51,7 @@ func (m *Subsystem) harvestCompleted() string { var parts []string for _, h := range harvested { if h.rejected != "" { - parts = append(parts, fmt.Sprintf("%s: REJECTED (%s)", h.repo, h.rejected)) + parts = append(parts, core.Sprintf("%s: REJECTED (%s)", h.repo, h.rejected)) if m.notifier != nil { m.notifier.ChannelSend(context.Background(), "harvest.rejected", map[string]any{ "repo": h.repo, @@ -66,7 +60,7 @@ func (m *Subsystem) harvestCompleted() string { }) } } else { - parts = append(parts, fmt.Sprintf("%s: ready-for-review %s (%d files)", h.repo, h.branch, h.files)) + parts = append(parts, core.Sprintf("%s: ready-for-review %s (%d files)", h.repo, h.branch, h.files)) if m.notifier != nil { m.notifier.ChannelSend(context.Background(), "harvest.complete", map[string]any{ "repo": h.repo, @@ -76,13 +70,17 @@ func (m *Subsystem) harvestCompleted() string { } } } - return "Harvested: " + strings.Join(parts, ", ") + return core.Concat("Harvested: ", core.Join(", ", parts...)) } // harvestWorkspace checks a single workspace and pushes if ready. func (m *Subsystem) harvestWorkspace(wsDir string) *harvestResult { - data, err := coreio.Local.Read(filepath.Join(wsDir, "status.json")) - if err != nil { + r := fs.Read(workspaceStatusPath(wsDir)) + if !r.OK { + return nil + } + statusData, ok := resultString(r) + if !ok { return nil } @@ -91,7 +89,7 @@ func (m *Subsystem) harvestWorkspace(wsDir string) *harvestResult { Repo string `json:"repo"` Branch string `json:"branch"` } - if json.Unmarshal([]byte(data), &st) != nil { + if json.Unmarshal([]byte(statusData), &st) != nil { return nil } @@ -100,8 +98,8 @@ func (m *Subsystem) harvestWorkspace(wsDir string) *harvestResult { return nil } - srcDir := filepath.Join(wsDir, "src") - if _, err := os.Stat(srcDir); err != nil { + srcDir := core.Concat(wsDir, "/src") + if !fs.IsDir(srcDir) { return nil } @@ -146,7 +144,7 @@ func detectBranch(srcDir string) string { if err != nil { return "" } - return strings.TrimSpace(string(out)) + return core.Trim(string(out)) } // defaultBranch detects the default branch of the repo (main, master, etc.). @@ -155,10 +153,10 @@ func defaultBranch(srcDir string) string { cmd := exec.Command("git", "symbolic-ref", "refs/remotes/origin/HEAD", "--short") cmd.Dir = srcDir if out, err := cmd.Output(); err == nil { - ref := strings.TrimSpace(string(out)) + ref := core.Trim(string(out)) // returns "origin/main" — strip prefix - if strings.HasPrefix(ref, "origin/") { - return strings.TrimPrefix(ref, "origin/") + if core.HasPrefix(ref, "origin/") { + return core.TrimPrefix(ref, "origin/") } return ref } @@ -176,24 +174,26 @@ func defaultBranch(srcDir string) string { // countUnpushed returns the number of commits ahead of origin's default branch. func countUnpushed(srcDir, branch string) int { base := defaultBranch(srcDir) - cmd := exec.Command("git", "rev-list", "--count", "origin/"+base+".."+branch) + cmd := exec.Command("git", "rev-list", "--count", core.Concat("origin/", base, "..", branch)) cmd.Dir = srcDir out, err := cmd.Output() if err != nil { - cmd2 := exec.Command("git", "log", "--oneline", base+".."+branch) + cmd2 := exec.Command("git", "log", "--oneline", core.Concat(base, "..", branch)) cmd2.Dir = srcDir out2, err2 := cmd2.Output() if err2 != nil { return 0 } - lines := strings.Split(strings.TrimSpace(string(out2)), "\n") + lines := core.Split(core.Trim(string(out2)), "\n") if len(lines) == 1 && lines[0] == "" { return 0 } return len(lines) } - var count int - fmt.Sscanf(strings.TrimSpace(string(out)), "%d", &count) + count, err := strconv.Atoi(core.Trim(string(out))) + if err != nil { + return 0 + } return count } @@ -203,7 +203,7 @@ func countUnpushed(srcDir, branch string) int { func checkSafety(srcDir string) string { // Check all changed files — added, modified, renamed base := defaultBranch(srcDir) - cmd := exec.Command("git", "diff", "--name-only", base+"...HEAD") + cmd := exec.Command("git", "diff", "--name-only", core.Concat(base, "...HEAD")) cmd.Dir = srcDir out, err := cmd.Output() if err != nil { @@ -220,20 +220,21 @@ func checkSafety(srcDir string) string { ".db": true, ".sqlite": true, ".sqlite3": true, } - for _, file := range strings.Split(strings.TrimSpace(string(out)), "\n") { + for _, file := range core.Split(core.Trim(string(out)), "\n") { if file == "" { continue } - ext := strings.ToLower(filepath.Ext(file)) + ext := core.Lower(filepath.Ext(file)) if binaryExts[ext] { - return fmt.Sprintf("binary file added: %s", file) + return core.Sprintf("binary file added: %s", file) } // Check file size (reject > 1MB) - fullPath := filepath.Join(srcDir, file) - info, err := os.Stat(fullPath) - if err == nil && info.Size() > 1024*1024 { - return fmt.Sprintf("large file: %s (%d bytes)", file, info.Size()) + fullPath := core.Concat(srcDir, "/", file) + if stat := fs.Stat(fullPath); stat.OK { + if info, ok := stat.Value.(interface{ Size() int64 }); ok && info.Size() > 1024*1024 { + return core.Sprintf("large file: %s (%d bytes)", file, info.Size()) + } } } @@ -243,13 +244,13 @@ func checkSafety(srcDir string) string { // countChangedFiles returns the number of files changed vs the default branch. func countChangedFiles(srcDir string) int { base := defaultBranch(srcDir) - cmd := exec.Command("git", "diff", "--name-only", base+"...HEAD") + cmd := exec.Command("git", "diff", "--name-only", core.Concat(base, "...HEAD")) cmd.Dir = srcDir out, err := cmd.Output() if err != nil { return 0 } - lines := strings.Split(strings.TrimSpace(string(out)), "\n") + lines := core.Split(core.Trim(string(out)), "\n") if len(lines) == 1 && lines[0] == "" { return 0 } @@ -262,19 +263,23 @@ func pushBranch(srcDir, branch string) error { cmd.Dir = srcDir out, err := cmd.CombinedOutput() if err != nil { - return coreerr.E("harvest.pushBranch", strings.TrimSpace(string(out)), err) + return core.E("harvest.pushBranch", core.Trim(string(out)), err) } return nil } // updateStatus updates the workspace status.json. func updateStatus(wsDir, status, question string) { - data, err := coreio.Local.Read(filepath.Join(wsDir, "status.json")) - if err != nil { + r := fs.Read(workspaceStatusPath(wsDir)) + if !r.OK { + return + } + statusData, ok := resultString(r) + if !ok { return } var st map[string]any - if json.Unmarshal([]byte(data), &st) != nil { + if json.Unmarshal([]byte(statusData), &st) != nil { return } st["status"] = status @@ -284,5 +289,5 @@ func updateStatus(wsDir, status, question string) { delete(st, "question") // clear stale question from previous state } updated, _ := json.MarshalIndent(st, "", " ") - coreio.Local.Write(filepath.Join(wsDir, "status.json"), string(updated)) + fs.Write(workspaceStatusPath(wsDir), string(updated)) } diff --git a/pkg/monitor/monitor.go b/pkg/monitor/monitor.go index 4f14c14..6562eef 100644 --- a/pkg/monitor/monitor.go +++ b/pkg/monitor/monitor.go @@ -12,28 +12,93 @@ package monitor import ( "context" "encoding/json" - "fmt" "net/http" "net/url" "os" "path/filepath" - "strings" "sync" "time" "dappco.re/go/agent/pkg/agentic" - coreio "dappco.re/go/core/io" - coreerr "dappco.re/go/core/log" + core "dappco.re/go/core" + coremcp "forge.lthn.ai/core/mcp/pkg/mcp" "github.com/modelcontextprotocol/go-sdk/mcp" ) +// fs provides unrestricted filesystem access (root "/" = no sandbox). +// +// r := fs.Read(core.Concat(wsRoot, "/", name, "/status.json")) +// if text, ok := resultString(r); ok { json.Unmarshal([]byte(text), &st) } +var fs = agentic.LocalFs() + +// workspaceStatusPaths returns all status.json files across both old and new workspace layouts. +// Old: workspace/{name}/status.json (1 level) +// New: workspace/{org}/{repo}/{identifier}/status.json (3 levels) +func workspaceStatusPaths(wsRoot string) []string { + old := core.PathGlob(core.Concat(wsRoot, "/*/status.json")) + new := core.PathGlob(core.Concat(wsRoot, "/*/*/*/status.json")) + return append(old, new...) +} + +func workspaceStatusPath(wsDir string) string { + return core.Concat(wsDir, "/status.json") +} + +func brainKeyPath(home string) string { + return filepath.Join(home, ".claude", "brain.key") +} + +func monitorPath(path string) string { + ds := core.Env("DS") + return core.Replace(core.Replace(path, "\\", ds), "/", ds) +} + +func monitorHomeDir() string { + if d := core.Env("CORE_HOME"); d != "" { + return d + } + return core.Env("DIR_HOME") +} + +func monitorAPIURL() string { + if u := core.Env("CORE_API_URL"); u != "" { + return u + } + return "https://api.lthn.sh" +} + +func monitorBrainKey() string { + if k := core.Env("CORE_BRAIN_KEY"); k != "" { + return k + } + if r := fs.Read(brainKeyPath(monitorHomeDir())); r.OK { + if value, ok := resultString(r); ok { + return core.Trim(value) + } + } + return "" +} + +func resultString(r core.Result) (string, bool) { + value, ok := r.Value.(string) + if !ok { + return "", false + } + return value, true +} + // ChannelNotifier pushes events to connected MCP sessions. -// Matches the Notifier interface in core/mcp without importing it. +// +// mon.SetNotifier(notifier) type ChannelNotifier interface { ChannelSend(ctx context.Context, channel string, data any) } // Subsystem implements mcp.Subsystem for background monitoring. +// +// mon := monitor.New(monitor.Options{Interval: 2 * time.Minute}) +// mon.SetNotifier(notifier) +// mon.Start(ctx) type Subsystem struct { server *mcp.Server notifier ChannelNotifier @@ -42,7 +107,9 @@ type Subsystem struct { wg sync.WaitGroup // Track last seen state to only notify on changes + lastCompletedCount int // completed workspaces seen on the last scan seenCompleted map[string]bool // workspace names we've already notified about + seenRunning map[string]bool // workspace names we've already sent start notification for completionsSeeded bool // true after first completions check lastInboxMaxID int // highest message ID seen inboxSeeded bool // true after first inbox check @@ -53,18 +120,27 @@ type Subsystem struct { poke chan struct{} } +var _ coremcp.Subsystem = (*Subsystem)(nil) +var _ agentic.CompletionNotifier = (*Subsystem)(nil) + // SetNotifier wires up channel event broadcasting. +// +// mon.SetNotifier(notifier) func (m *Subsystem) SetNotifier(n ChannelNotifier) { m.notifier = n } -// Options configures the monitor. +// Options configures the monitor interval. +// +// monitor.New(monitor.Options{Interval: 30 * time.Second}) type Options struct { // Interval between checks (default: 2 minutes) Interval time.Duration } // New creates a monitor subsystem. +// +// mon := monitor.New(monitor.Options{Interval: 30 * time.Second}) func New(opts ...Options) *Subsystem { interval := 2 * time.Minute if len(opts) > 0 && opts[0].Interval > 0 { @@ -80,6 +156,7 @@ func New(opts ...Options) *Subsystem { interval: interval, poke: make(chan struct{}, 1), seenCompleted: make(map[string]bool), + seenRunning: make(map[string]bool), } } @@ -90,8 +167,14 @@ func (m *Subsystem) debugChannel(msg string) { } } +// Name returns the subsystem identifier used by MCP registration. +// +// mon.Name() // "monitor" func (m *Subsystem) Name() string { return "monitor" } +// RegisterTools binds the monitor resource to an MCP server. +// +// mon.RegisterTools(server) func (m *Subsystem) RegisterTools(server *mcp.Server) { m.server = server @@ -104,13 +187,14 @@ func (m *Subsystem) RegisterTools(server *mcp.Server) { }, m.agentStatusResource) } -// Start begins the background monitoring loop. -// Called after the MCP server is running and sessions are active. +// Start begins the background monitoring loop after MCP startup. +// +// mon.Start(ctx) func (m *Subsystem) Start(ctx context.Context) { monCtx, cancel := context.WithCancel(ctx) m.cancel = cancel - fmt.Fprintf(os.Stderr, "monitor: started (interval=%s, notifier=%v)\n", m.interval, m.notifier != nil) + core.Print(os.Stderr, "monitor: started (interval=%s, notifier=%v)", m.interval, m.notifier != nil) m.wg.Add(1) go func() { @@ -119,7 +203,9 @@ func (m *Subsystem) Start(ctx context.Context) { }() } -// Shutdown stops the monitoring loop. +// Shutdown stops the monitoring loop and waits for it to exit. +// +// _ = mon.Shutdown(ctx) func (m *Subsystem) Shutdown(_ context.Context) error { if m.cancel != nil { m.cancel() @@ -128,8 +214,7 @@ func (m *Subsystem) Shutdown(_ context.Context) error { return nil } -// Poke triggers an immediate check cycle. Non-blocking — if a poke is already -// pending it's a no-op. Call this from dispatch when an agent completes. +// Poke triggers an immediate check cycle (legacy — prefer AgentStarted/AgentCompleted). func (m *Subsystem) Poke() { select { case m.poke <- struct{}{}: @@ -137,6 +222,73 @@ func (m *Subsystem) Poke() { } } +// AgentStarted is called when an agent spawns. +// No individual notification — fleet status is checked on completion. +// +// mon.AgentStarted("codex:gpt-5.3-codex-spark", "go-io", "core/go-io/task-5") +func (m *Subsystem) AgentStarted(agent, repo, workspace string) { + // No-op — we only notify on failures and queue drain +} + +// AgentCompleted is called when an agent finishes. +// Only sends notifications for failures. Sends "queue.drained" when all work is done. +// +// mon.AgentCompleted("codex", "go-io", "core/go-io/task-5", "completed") +func (m *Subsystem) AgentCompleted(agent, repo, workspace, status string) { + m.mu.Lock() + m.seenCompleted[workspace] = true + m.mu.Unlock() + + if m.notifier != nil { + // Only notify on failures — those need attention + if status == "failed" || status == "blocked" { + m.notifier.ChannelSend(context.Background(), "agent.failed", map[string]any{ + "repo": repo, + "agent": agent, + "status": status, + }) + } + } + + // Check if queue is drained (0 running + 0 queued) + m.Poke() + go m.checkIdleAfterDelay() +} + +// checkIdleAfterDelay waits briefly then checks if the fleet is idle. +// Sends a single "queue.drained" notification when all work stops. +func (m *Subsystem) checkIdleAfterDelay() { + time.Sleep(5 * time.Second) // wait for runner to fill slots + if m.notifier == nil { + return + } + + // Quick count — scan for running/queued + running := 0 + queued := 0 + wsRoot := agentic.WorkspaceRoot() + old := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json")) + deep := core.PathGlob(core.JoinPath(wsRoot, "*", "*", "*", "status.json")) + for _, path := range append(old, deep...) { + r := fs.Read(path) + if !r.OK { + continue + } + s := r.Value.(string) + if core.Contains(s, `"status":"running"`) { + running++ + } else if core.Contains(s, `"status":"queued"`) { + queued++ + } + } + + if running == 0 && queued == 0 { + m.notifier.ChannelSend(context.Background(), "queue.drained", map[string]any{ + "message": "all work complete", + }) + } +} + func (m *Subsystem) loop(ctx context.Context) { // Initial check after short delay (let server fully start) select { @@ -194,7 +346,7 @@ func (m *Subsystem) check(ctx context.Context) { return } - combined := strings.Join(messages, "\n") + combined := core.Join("\n", messages...) m.notify(ctx, combined) // Notify resource subscribers that agent status changed @@ -210,20 +362,22 @@ func (m *Subsystem) check(ctx context.Context) { // don't suppress future notifications. func (m *Subsystem) checkCompletions() string { wsRoot := agentic.WorkspaceRoot() - entries, err := filepath.Glob(filepath.Join(wsRoot, "*/status.json")) - if err != nil { - return "" - } + entries := workspaceStatusPaths(wsRoot) running := 0 queued := 0 + completed := 0 var newlyCompleted []string m.mu.Lock() seeded := m.completionsSeeded for _, entry := range entries { - data, err := coreio.Local.Read(entry) - if err != nil { + r := fs.Read(entry) + if !r.OK { + continue + } + entryData, ok := resultString(r) + if !ok { continue } var st struct { @@ -231,33 +385,44 @@ func (m *Subsystem) checkCompletions() string { Repo string `json:"repo"` Agent string `json:"agent"` } - if json.Unmarshal([]byte(data), &st) != nil { + if json.Unmarshal([]byte(entryData), &st) != nil { continue } - wsName := filepath.Base(filepath.Dir(entry)) + // Use full relative path as dedup key — "core/go/main" not just "main" + wsDir := filepath.Dir(entry) + wsName := wsDir + if len(wsDir) > len(wsRoot)+1 { + wsName = wsDir[len(wsRoot)+1:] + } switch st.Status { case "completed": + completed++ if !m.seenCompleted[wsName] { m.seenCompleted[wsName] = true if seeded { - newlyCompleted = append(newlyCompleted, fmt.Sprintf("%s (%s)", st.Repo, st.Agent)) + newlyCompleted = append(newlyCompleted, core.Sprintf("%s (%s)", st.Repo, st.Agent)) } } case "running": running++ + if !m.seenRunning[wsName] && seeded { + m.seenRunning[wsName] = true + // No individual start notification — too noisy + } case "queued": queued++ case "blocked", "failed": if !m.seenCompleted[wsName] { m.seenCompleted[wsName] = true if seeded { - newlyCompleted = append(newlyCompleted, fmt.Sprintf("%s (%s) [%s]", st.Repo, st.Agent, st.Status)) + newlyCompleted = append(newlyCompleted, core.Sprintf("%s (%s) [%s]", st.Repo, st.Agent, st.Status)) } } } } + m.lastCompletedCount = completed m.completionsSeeded = true m.mu.Unlock() @@ -265,22 +430,20 @@ func (m *Subsystem) checkCompletions() string { return "" } - // Push channel events - if m.notifier != nil { - m.notifier.ChannelSend(context.Background(), "agent.complete", map[string]any{ - "count": len(newlyCompleted), - "completed": newlyCompleted, - "running": running, - "queued": queued, + // Only notify on queue drain (0 running + 0 queued) — individual completions are noise + if m.notifier != nil && running == 0 && queued == 0 { + m.notifier.ChannelSend(context.Background(), "queue.drained", map[string]any{ + "completed": len(newlyCompleted), + "message": "all work complete", }) } - msg := fmt.Sprintf("%d agent(s) completed", len(newlyCompleted)) + msg := core.Sprintf("%d agent(s) completed", len(newlyCompleted)) if running > 0 { - msg += fmt.Sprintf(", %d still running", running) + msg = core.Concat(msg, core.Sprintf(", %d still running", running)) } if queued > 0 { - msg += fmt.Sprintf(", %d queued", queued) + msg = core.Concat(msg, core.Sprintf(", %d queued", queued)) } return msg } @@ -290,12 +453,16 @@ func (m *Subsystem) checkInbox() string { apiKeyStr := os.Getenv("CORE_BRAIN_KEY") if apiKeyStr == "" { home, _ := os.UserHomeDir() - keyFile := filepath.Join(home, ".claude", "brain.key") - data, err := coreio.Local.Read(keyFile) - if err != nil { + keyFile := brainKeyPath(home) + r := fs.Read(keyFile) + if !r.OK { return "" } - apiKeyStr = data + value, ok := resultString(r) + if !ok { + return "" + } + apiKeyStr = value } // Call the API to check inbox @@ -303,11 +470,11 @@ func (m *Subsystem) checkInbox() string { if apiURL == "" { apiURL = "https://api.lthn.sh" } - req, err := http.NewRequest("GET", apiURL+"/v1/messages/inbox?agent="+url.QueryEscape(agentic.AgentName()), nil) + req, err := http.NewRequest("GET", core.Concat(apiURL, "/v1/messages/inbox?agent=", url.QueryEscape(agentic.AgentName())), nil) if err != nil { return "" } - req.Header.Set("Authorization", "Bearer "+strings.TrimSpace(apiKeyStr)) + req.Header.Set("Authorization", core.Concat("Bearer ", core.Trim(apiKeyStr))) client := &http.Client{Timeout: 10 * time.Second} httpResp, err := client.Do(req) @@ -337,7 +504,6 @@ func (m *Subsystem) checkInbox() string { // Find max ID, count unread, collect new messages maxID := 0 unread := 0 - senders := make(map[string]int) m.mu.Lock() prevMaxID := m.lastInboxMaxID @@ -358,9 +524,6 @@ func (m *Subsystem) checkInbox() string { } if !msg.Read { unread++ - if msg.From != "" { - senders[msg.From]++ - } } // Collect messages newer than what we've seen if msg.ID > prevMaxID { @@ -397,7 +560,7 @@ func (m *Subsystem) checkInbox() string { }) } - return fmt.Sprintf("%d unread message(s) in inbox", unread) + return core.Sprintf("%d unread message(s) in inbox", unread) } // notify sends a log notification to all connected MCP sessions. @@ -419,10 +582,7 @@ func (m *Subsystem) notify(ctx context.Context, message string) { // agentStatusResource returns current workspace status as a JSON resource. func (m *Subsystem) agentStatusResource(ctx context.Context, req *mcp.ReadResourceRequest) (*mcp.ReadResourceResult, error) { wsRoot := agentic.WorkspaceRoot() - entries, err := filepath.Glob(filepath.Join(wsRoot, "*/status.json")) - if err != nil { - return nil, coreerr.E("monitor.agentStatus", "failed to scan workspaces", err) - } + entries := workspaceStatusPaths(wsRoot) type wsInfo struct { Name string `json:"name"` @@ -434,8 +594,12 @@ func (m *Subsystem) agentStatusResource(ctx context.Context, req *mcp.ReadResour var workspaces []wsInfo for _, entry := range entries { - data, err := coreio.Local.Read(entry) - if err != nil { + r := fs.Read(entry) + if !r.OK { + continue + } + entryData, ok := resultString(r) + if !ok { continue } var st struct { @@ -444,11 +608,16 @@ func (m *Subsystem) agentStatusResource(ctx context.Context, req *mcp.ReadResour Agent string `json:"agent"` PRURL string `json:"pr_url"` } - if json.Unmarshal([]byte(data), &st) != nil { + if json.Unmarshal([]byte(entryData), &st) != nil { continue } + entryDir := filepath.Dir(entry) + entryName := entryDir + if len(entryDir) > len(wsRoot)+1 { + entryName = entryDir[len(wsRoot)+1:] + } workspaces = append(workspaces, wsInfo{ - Name: filepath.Base(filepath.Dir(entry)), + Name: entryName, Status: st.Status, Repo: st.Repo, Agent: st.Agent, @@ -456,7 +625,10 @@ func (m *Subsystem) agentStatusResource(ctx context.Context, req *mcp.ReadResour }) } - result, _ := json.Marshal(workspaces) + result, err := json.Marshal(workspaces) + if err != nil { + return nil, core.E("monitor.agentStatus", "failed to encode workspace status", err) + } return &mcp.ReadResourceResult{ Contents: []*mcp.ResourceContents{ { diff --git a/pkg/monitor/monitor_test.go b/pkg/monitor/monitor_test.go index 9cfc4ab..fb41d25 100644 --- a/pkg/monitor/monitor_test.go +++ b/pkg/monitor/monitor_test.go @@ -50,6 +50,7 @@ func writeWorkspaceStatus(t *testing.T, wsRoot, name string, fields map[string]a // --- New --- func TestNew_Good_Defaults(t *testing.T) { + t.Setenv("MONITOR_INTERVAL", "") mon := New() assert.Equal(t, 2*time.Minute, mon.interval) assert.NotNil(t, mon.poke) @@ -61,6 +62,7 @@ func TestNew_Good_CustomInterval(t *testing.T) { } func TestNew_Bad_ZeroInterval(t *testing.T) { + t.Setenv("MONITOR_INTERVAL", "") mon := New(Options{Interval: 0}) assert.Equal(t, 2*time.Minute, mon.interval) } @@ -125,6 +127,13 @@ func TestCheckCompletions_Good_NewCompletions(t *testing.T) { wsRoot := t.TempDir() t.Setenv("CORE_WORKSPACE", wsRoot) + require.NoError(t, os.MkdirAll(filepath.Join(wsRoot, "workspace"), 0755)) + + mon := New() + notifier := &mockNotifier{} + mon.SetNotifier(notifier) + assert.Equal(t, "", mon.checkCompletions()) + for i := 0; i < 2; i++ { writeWorkspaceStatus(t, wsRoot, fmt.Sprintf("ws-%d", i), map[string]any{ "status": "completed", @@ -133,10 +142,6 @@ func TestCheckCompletions_Good_NewCompletions(t *testing.T) { }) } - mon := New() - notifier := &mockNotifier{} - mon.SetNotifier(notifier) - msg := mon.checkCompletions() assert.Contains(t, msg, "2 agent(s) completed") @@ -151,6 +156,13 @@ func TestCheckCompletions_Good_MixedStatuses(t *testing.T) { wsRoot := t.TempDir() t.Setenv("CORE_WORKSPACE", wsRoot) + require.NoError(t, os.MkdirAll(filepath.Join(wsRoot, "workspace"), 0755)) + + mon := New() + notifier := &mockNotifier{} + mon.SetNotifier(notifier) + assert.Equal(t, "", mon.checkCompletions()) + for i, status := range []string{"completed", "running", "queued"} { writeWorkspaceStatus(t, wsRoot, fmt.Sprintf("ws-%d", i), map[string]any{ "status": status, @@ -159,10 +171,6 @@ func TestCheckCompletions_Good_MixedStatuses(t *testing.T) { }) } - mon := New() - notifier := &mockNotifier{} - mon.SetNotifier(notifier) - msg := mon.checkCompletions() assert.Contains(t, msg, "1 agent(s) completed") assert.Contains(t, msg, "1 still running") @@ -211,11 +219,15 @@ func TestCheckCompletions_Good_NoNotifierSet(t *testing.T) { wsRoot := t.TempDir() t.Setenv("CORE_WORKSPACE", wsRoot) + require.NoError(t, os.MkdirAll(filepath.Join(wsRoot, "workspace"), 0755)) + + mon := New() + assert.Equal(t, "", mon.checkCompletions()) + writeWorkspaceStatus(t, wsRoot, "ws-0", map[string]any{ "status": "completed", "repo": "r", "agent": "a", }) - mon := New() msg := mon.checkCompletions() assert.Contains(t, msg, "1 agent(s) completed") } @@ -229,9 +241,9 @@ func TestCheckInbox_Good_UnreadMessages(t *testing.T) { resp := map[string]any{ "data": []map[string]any{ - {"read": false, "from_agent": "clotho", "subject": "task done"}, - {"read": false, "from_agent": "gemini", "subject": "review ready"}, - {"read": true, "from_agent": "clotho", "subject": "old msg"}, + {"id": 3, "read": false, "from": "clotho", "subject": "task done"}, + {"id": 2, "read": false, "from": "gemini", "subject": "review ready"}, + {"id": 1, "read": true, "from": "clotho", "subject": "old msg"}, }, } w.Header().Set("Content-Type", "application/json") @@ -244,6 +256,7 @@ func TestCheckInbox_Good_UnreadMessages(t *testing.T) { t.Setenv("AGENT_NAME", "test-agent") mon := New() + mon.inboxSeeded = true notifier := &mockNotifier{} mon.SetNotifier(notifier) @@ -254,16 +267,19 @@ func TestCheckInbox_Good_UnreadMessages(t *testing.T) { require.Len(t, events, 1) assert.Equal(t, "inbox.message", events[0].channel) eventData := events[0].data.(map[string]any) - assert.Equal(t, 2, eventData["new"]) + assert.Equal(t, 3, eventData["new"]) assert.Equal(t, 2, eventData["total"]) - assert.Equal(t, "task done", eventData["subject"]) + payload, err := json.Marshal(eventData["messages"]) + require.NoError(t, err) + assert.Contains(t, string(payload), "\"subject\":\"task done\"") + assert.Contains(t, string(payload), "\"subject\":\"review ready\"") } func TestCheckInbox_Good_NoUnread(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { resp := map[string]any{ "data": []map[string]any{ - {"read": true, "from_agent": "clotho", "subject": "old"}, + {"id": 1, "read": true, "from": "clotho", "subject": "old"}, }, } w.Header().Set("Content-Type", "application/json") @@ -282,7 +298,7 @@ func TestCheckInbox_Good_SameCountNoRepeat(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { resp := map[string]any{ "data": []map[string]any{ - {"read": false, "from_agent": "clotho", "subject": "msg"}, + {"id": 1, "read": false, "from": "clotho", "subject": "msg"}, }, } w.Header().Set("Content-Type", "application/json") @@ -339,9 +355,9 @@ func TestCheckInbox_Good_MultipleSameSender(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { resp := map[string]any{ "data": []map[string]any{ - {"read": false, "from_agent": "clotho", "subject": "msg1"}, - {"read": false, "from_agent": "clotho", "subject": "msg2"}, - {"read": false, "from_agent": "gemini", "subject": "msg3"}, + {"id": 3, "read": false, "from": "clotho", "subject": "msg1"}, + {"id": 2, "read": false, "from": "clotho", "subject": "msg2"}, + {"id": 1, "read": false, "from": "gemini", "subject": "msg3"}, }, } w.Header().Set("Content-Type", "application/json") @@ -352,6 +368,7 @@ func TestCheckInbox_Good_MultipleSameSender(t *testing.T) { setupAPIEnv(t, srv.URL) mon := New() + mon.inboxSeeded = true notifier := &mockNotifier{} mon.SetNotifier(notifier) @@ -361,14 +378,13 @@ func TestCheckInbox_Good_MultipleSameSender(t *testing.T) { events := notifier.Events() require.Len(t, events, 1) eventData := events[0].data.(map[string]any) - senders := eventData["senders"].([]string) - found := false - for _, s := range senders { - if s == "clotho (2)" { - found = true - } - } - assert.True(t, found, "expected clotho (2) in senders, got %v", senders) + assert.Equal(t, 3, eventData["new"]) + assert.Equal(t, 3, eventData["total"]) + payload, err := json.Marshal(eventData["messages"]) + require.NoError(t, err) + assert.Contains(t, string(payload), "\"from\":\"clotho\"") + assert.Contains(t, string(payload), "\"subject\":\"msg1\"") + assert.Contains(t, string(payload), "\"subject\":\"msg2\"") } // --- check (integration of sub-checks) --- @@ -388,7 +404,8 @@ func TestCheck_Good_CombinesMessages(t *testing.T) { mon.check(context.Background()) mon.mu.Lock() - assert.Equal(t, 1, mon.lastCompletedCount) + assert.True(t, mon.completionsSeeded) + assert.True(t, mon.seenCompleted["ws-0"]) mon.mu.Unlock() } @@ -470,8 +487,8 @@ func TestLoop_Good_PokeTriggersCheck(t *testing.T) { require.Eventually(t, func() bool { mon.mu.Lock() defer mon.mu.Unlock() - return mon.lastCompletedCount == 1 - }, 5*time.Second, 50*time.Millisecond, "expected lastCompletedCount to reach 1") + return mon.seenCompleted["ws-poke"] + }, 5*time.Second, 50*time.Millisecond, "expected ws-poke completion to be recorded") cancel() mon.wg.Wait() diff --git a/pkg/monitor/sync.go b/pkg/monitor/sync.go index ae5a964..55b1045 100644 --- a/pkg/monitor/sync.go +++ b/pkg/monitor/sync.go @@ -4,20 +4,18 @@ package monitor import ( "encoding/json" - "fmt" "net/http" neturl "net/url" - "os" "os/exec" - "path/filepath" - "strings" "time" "dappco.re/go/agent/pkg/agentic" - coreio "dappco.re/go/core/io" + core "dappco.re/go/core" ) // CheckinResponse is what the API returns for an agent checkin. +// +// resp := monitor.CheckinResponse{Changed: []monitor.ChangedRepo{{Repo: "core-agent", Branch: "main", SHA: "abc123"}}, Timestamp: 1712345678} type CheckinResponse struct { // Repos that have new commits since the agent's last checkin. Changed []ChangedRepo `json:"changed,omitempty"` @@ -26,6 +24,8 @@ type CheckinResponse struct { } // ChangedRepo is a repo that has new commits. +// +// repo := monitor.ChangedRepo{Repo: "core-agent", Branch: "main", SHA: "abc123"} type ChangedRepo struct { Repo string `json:"repo"` Branch string `json:"branch"` @@ -35,14 +35,8 @@ type ChangedRepo struct { // syncRepos calls the checkin API and pulls any repos that changed. // Returns a human-readable message if repos were updated, empty string otherwise. func (m *Subsystem) syncRepos() string { - apiURL := os.Getenv("CORE_API_URL") - if apiURL == "" { - apiURL = "https://api.lthn.sh" - } - agentName := agentic.AgentName() - - checkinURL := fmt.Sprintf("%s/v1/agent/checkin?agent=%s&since=%d", apiURL, neturl.QueryEscape(agentName), m.lastSyncTimestamp) + checkinURL := core.Sprintf("%s/v1/agent/checkin?agent=%s&since=%d", monitorAPIURL(), neturl.QueryEscape(agentName), m.lastSyncTimestamp) req, err := http.NewRequest("GET", checkinURL, nil) if err != nil { @@ -50,15 +44,9 @@ func (m *Subsystem) syncRepos() string { } // Use brain key for auth - brainKey := os.Getenv("CORE_BRAIN_KEY") - if brainKey == "" { - home, _ := os.UserHomeDir() - if data, err := coreio.Local.Read(filepath.Join(home, ".claude", "brain.key")); err == nil { - brainKey = strings.TrimSpace(data) - } - } + brainKey := monitorBrainKey() if brainKey != "" { - req.Header.Set("Authorization", "Bearer "+brainKey) + req.Header.Set("Authorization", core.Concat("Bearer ", brainKey)) } client := &http.Client{Timeout: 15 * time.Second} @@ -86,21 +74,20 @@ func (m *Subsystem) syncRepos() string { } // Pull changed repos - basePath := os.Getenv("CODE_PATH") + basePath := core.Env("CODE_PATH") if basePath == "" { - home, _ := os.UserHomeDir() - basePath = filepath.Join(home, "Code", "core") + basePath = core.JoinPath(monitorHomeDir(), "Code", "core") } var pulled []string for _, repo := range checkin.Changed { // Sanitise repo name to prevent path traversal from API response - repoName := filepath.Base(repo.Repo) + repoName := core.PathBase(monitorPath(repo.Repo)) if repoName == "." || repoName == ".." || repoName == "" { continue } - repoDir := filepath.Join(basePath, repoName) - if _, err := os.Stat(repoDir); err != nil { + repoDir := core.Concat(basePath, "/", repoName) + if !fs.Exists(repoDir) || fs.IsFile(repoDir) { continue } @@ -111,7 +98,7 @@ func (m *Subsystem) syncRepos() string { if err != nil { continue } - current := strings.TrimSpace(string(currentBranch)) + current := core.Trim(string(currentBranch)) // Determine which branch to pull — use server-reported branch, // fall back to current if server didn't specify @@ -128,7 +115,7 @@ func (m *Subsystem) syncRepos() string { statusCmd := exec.Command("git", "status", "--porcelain") statusCmd.Dir = repoDir status, _ := statusCmd.Output() - if len(strings.TrimSpace(string(status))) > 0 { + if len(core.Trim(string(status))) > 0 { continue // Don't pull if dirty } @@ -154,7 +141,7 @@ func (m *Subsystem) syncRepos() string { return "" } - return fmt.Sprintf("Synced %d repo(s): %s", len(pulled), strings.Join(pulled, ", ")) + return core.Sprintf("Synced %d repo(s): %s", len(pulled), core.Join(", ", pulled...)) } // lastSyncTimestamp is stored on the subsystem — add it via the check cycle. diff --git a/pkg/setup/config.go b/pkg/setup/config.go index fd7d349..bdd16b1 100644 --- a/pkg/setup/config.go +++ b/pkg/setup/config.go @@ -3,11 +3,12 @@ package setup import ( + neturl "net/url" "os/exec" "path/filepath" - "strings" - "dappco.re/go/agent/pkg/lib" + core "dappco.re/go/core" + "gopkg.in/yaml.v3" ) // ConfigData holds the data passed to config templates. @@ -35,135 +36,172 @@ type Command struct { Run string } +type configSection struct { + Key string + Values []configValue +} + +type configValue struct { + Key string + Value any +} + // GenerateBuildConfig renders a build.yaml for the detected project type. +// +// content, err := setup.GenerateBuildConfig("/repo", setup.TypeGo) func GenerateBuildConfig(path string, projType ProjectType) (string, error) { name := filepath.Base(path) - data := map[string]any{ - "Comment": name + " build configuration", - "Sections": []map[string]any{ - { - "Key": "project", - "Values": []map[string]any{ - {"Key": "name", "Value": name}, - {"Key": "type", "Value": string(projType)}, - }, + sections := []configSection{ + { + Key: "project", + Values: []configValue{ + {Key: "name", Value: name}, + {Key: "type", Value: string(projType)}, }, }, } switch projType { case TypeGo, TypeWails: - data["Sections"] = append(data["Sections"].([]map[string]any), - map[string]any{ - "Key": "build", - "Values": []map[string]any{ - {"Key": "main", "Value": "./cmd/" + name}, - {"Key": "binary", "Value": name}, - {"Key": "cgo", "Value": "false"}, - }, + sections = append(sections, configSection{ + Key: "build", + Values: []configValue{ + {Key: "main", Value: "./cmd/" + name}, + {Key: "binary", Value: name}, + {Key: "cgo", Value: false}, }, - ) + }) case TypePHP: - data["Sections"] = append(data["Sections"].([]map[string]any), - map[string]any{ - "Key": "build", - "Values": []map[string]any{ - {"Key": "dockerfile", "Value": "Dockerfile"}, - {"Key": "image", "Value": name}, - }, + sections = append(sections, configSection{ + Key: "build", + Values: []configValue{ + {Key: "dockerfile", Value: "Dockerfile"}, + {Key: "image", Value: name}, }, - ) + }) case TypeNode: - data["Sections"] = append(data["Sections"].([]map[string]any), - map[string]any{ - "Key": "build", - "Values": []map[string]any{ - {"Key": "script", "Value": "npm run build"}, - {"Key": "output", "Value": "dist"}, - }, + sections = append(sections, configSection{ + Key: "build", + Values: []configValue{ + {Key: "script", Value: "npm run build"}, + {Key: "output", Value: "dist"}, }, - ) + }) } - return lib.RenderFile("yaml/config", data) + return renderConfig(name+" build configuration", sections) } // GenerateTestConfig renders a test.yaml for the detected project type. +// +// content, err := setup.GenerateTestConfig(setup.TypeGo) func GenerateTestConfig(projType ProjectType) (string, error) { - data := map[string]any{ - "Comment": "Test configuration", - } + var sections []configSection switch projType { case TypeGo, TypeWails: - data["Sections"] = []map[string]any{ + sections = []configSection{ { - "Key": "commands", - "Values": []map[string]any{ - {"Key": "unit", "Value": "go test ./..."}, - {"Key": "coverage", "Value": "go test -coverprofile=coverage.out ./..."}, - {"Key": "race", "Value": "go test -race ./..."}, + Key: "commands", + Values: []configValue{ + {Key: "unit", Value: "go test ./..."}, + {Key: "coverage", Value: "go test -coverprofile=coverage.out ./..."}, + {Key: "race", Value: "go test -race ./..."}, }, }, } case TypePHP: - data["Sections"] = []map[string]any{ + sections = []configSection{ { - "Key": "commands", - "Values": []map[string]any{ - {"Key": "unit", "Value": "vendor/bin/pest --parallel"}, - {"Key": "lint", "Value": "vendor/bin/pint --test"}, + Key: "commands", + Values: []configValue{ + {Key: "unit", Value: "vendor/bin/pest --parallel"}, + {Key: "lint", Value: "vendor/bin/pint --test"}, }, }, } case TypeNode: - data["Sections"] = []map[string]any{ + sections = []configSection{ { - "Key": "commands", - "Values": []map[string]any{ - {"Key": "unit", "Value": "npm test"}, - {"Key": "lint", "Value": "npm run lint"}, + Key: "commands", + Values: []configValue{ + {Key: "unit", Value: "npm test"}, + {Key: "lint", Value: "npm run lint"}, }, }, } } - return lib.RenderFile("yaml/config", data) + return renderConfig("Test configuration", sections) +} + +func renderConfig(comment string, sections []configSection) (string, error) { + builder := core.NewBuilder() + + if comment != "" { + builder.WriteString("# ") + builder.WriteString(comment) + builder.WriteString("\n\n") + } + + for idx, section := range sections { + builder.WriteString(section.Key) + builder.WriteString(":\n") + + for _, value := range section.Values { + scalar, err := yaml.Marshal(value.Value) + if err != nil { + return "", core.E("setup.renderConfig", "marshal "+section.Key+"."+value.Key, err) + } + + builder.WriteString(" ") + builder.WriteString(value.Key) + builder.WriteString(": ") + builder.WriteString(core.Trim(string(scalar))) + builder.WriteString("\n") + } + + if idx < len(sections)-1 { + builder.WriteString("\n") + } + } + + return builder.String(), nil } // detectGitRemote extracts owner/repo from git remote origin. -func detectGitRemote() string { +func detectGitRemote(path string) string { cmd := exec.Command("git", "remote", "get-url", "origin") + cmd.Dir = path output, err := cmd.Output() if err != nil { return "" } - url := strings.TrimSpace(string(output)) + return parseGitRemote(core.Trim(string(output))) +} - // SSH: git@github.com:owner/repo.git or ssh://git@forge.lthn.ai:2223/core/agent.git - if strings.Contains(url, ":") { - parts := strings.SplitN(url, ":", 2) - if len(parts) == 2 { - repo := parts[1] - repo = strings.TrimSuffix(repo, ".git") - // Handle port in SSH URL (ssh://git@host:port/path) - if strings.Contains(repo, "/") { - segments := strings.SplitN(repo, "/", 2) - if len(segments) == 2 && strings.ContainsAny(segments[0], "0123456789") { - repo = segments[1] - } - } - return repo - } +func parseGitRemote(remote string) string { + if remote == "" { + return "" } - // HTTPS: https://github.com/owner/repo.git - for _, host := range []string{"github.com/", "forge.lthn.ai/"} { - if idx := strings.Index(url, host); idx >= 0 { - repo := url[idx+len(host):] - return strings.TrimSuffix(repo, ".git") - } + if parsed, err := neturl.Parse(remote); err == nil && parsed.Host != "" { + return trimRemotePath(parsed.Path) + } + + parts := core.SplitN(remote, ":", 2) + if len(parts) == 2 && core.Contains(parts[0], "@") { + return trimRemotePath(parts[1]) + } + + if core.Contains(remote, "/") { + return trimRemotePath(remote) } return "" } + +func trimRemotePath(remote string) string { + trimmed := core.TrimPrefix(remote, "/") + return core.TrimSuffix(trimmed, ".git") +} diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 6aaaf31..31e26af 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -4,8 +4,10 @@ package setup import ( - "os" "path/filepath" + "unsafe" + + core "dappco.re/go/core" ) // ProjectType identifies what kind of project lives at a path. @@ -19,8 +21,22 @@ const ( TypeUnknown ProjectType = "unknown" ) +// fs provides unrestricted filesystem access for setup operations. +var fs = newFs("/") + +// newFs creates a core.Fs with the given root directory. +func newFs(root string) *core.Fs { + type fsRoot struct{ root string } + f := &core.Fs{} + (*fsRoot)(unsafe.Pointer(f)).root = root + return f +} + // Detect identifies the project type from files present at the given path. +// +// projType := setup.Detect("./repo") func Detect(path string) ProjectType { + base := absolutePath(path) checks := []struct { file string projType ProjectType @@ -31,7 +47,7 @@ func Detect(path string) ProjectType { {"package.json", TypeNode}, } for _, c := range checks { - if _, err := os.Stat(filepath.Join(path, c.file)); err == nil { + if fs.IsFile(filepath.Join(base, c.file)) { return c.projType } } @@ -39,7 +55,10 @@ func Detect(path string) ProjectType { } // DetectAll returns all project types found at the path (polyglot repos). +// +// types := setup.DetectAll("./repo") func DetectAll(path string) []ProjectType { + base := absolutePath(path) var types []ProjectType all := []struct { file string @@ -51,9 +70,20 @@ func DetectAll(path string) []ProjectType { {"wails.json", TypeWails}, } for _, c := range all { - if _, err := os.Stat(filepath.Join(path, c.file)); err == nil { + if fs.IsFile(filepath.Join(base, c.file)) { types = append(types, c.projType) } } return types } + +func absolutePath(path string) string { + if path == "" { + path = "." + } + abs, err := filepath.Abs(path) + if err != nil { + return path + } + return abs +} diff --git a/pkg/setup/setup.go b/pkg/setup/setup.go index edc4718..a4e3cc2 100644 --- a/pkg/setup/setup.go +++ b/pkg/setup/setup.go @@ -3,40 +3,45 @@ package setup import ( - "fmt" "os" "path/filepath" "dappco.re/go/agent/pkg/lib" + core "dappco.re/go/core" ) // Options controls setup behaviour. +// +// err := setup.Run(setup.Options{Path: ".", Force: true}) type Options struct { Path string // Target directory (default: cwd) DryRun bool // Preview only, don't write Force bool // Overwrite existing files - Template string // Dir template to use (agent, php, go, gui) + Template string // Workspace template or compatibility alias (default, review, security, agent, go, php, gui, auto) } // Run performs the workspace setup at the given path. // It detects the project type, generates .core/ configs, // and optionally scaffolds a workspace from a dir template. +// +// err := setup.Run(setup.Options{Path: ".", Template: "auto"}) func Run(opts Options) error { if opts.Path == "" { var err error opts.Path, err = os.Getwd() if err != nil { - return fmt.Errorf("setup: %w", err) + return core.E("setup.Run", "resolve working directory", err) } } + opts.Path = absolutePath(opts.Path) projType := Detect(opts.Path) allTypes := DetectAll(opts.Path) - fmt.Printf("Project: %s\n", filepath.Base(opts.Path)) - fmt.Printf("Type: %s\n", projType) + core.Print(nil, "Project: %s", filepath.Base(opts.Path)) + core.Print(nil, "Type: %s", projType) if len(allTypes) > 1 { - fmt.Printf("Also: %v (polyglot)\n", allTypes) + core.Print(nil, "Also: %v (polyglot)", allTypes) } // Generate .core/ config files @@ -57,17 +62,19 @@ func setupCoreDir(opts Options, projType ProjectType) error { coreDir := filepath.Join(opts.Path, ".core") if opts.DryRun { - fmt.Printf("\nWould create %s/\n", coreDir) + core.Print(nil, "") + core.Print(nil, "Would create %s/", coreDir) } else { - if err := os.MkdirAll(coreDir, 0755); err != nil { - return fmt.Errorf("setup: create .core: %w", err) + if r := fs.EnsureDir(coreDir); !r.OK { + err, _ := r.Value.(error) + return core.E("setup.setupCoreDir", "create .core directory", err) } } // build.yaml buildConfig, err := GenerateBuildConfig(opts.Path, projType) if err != nil { - return fmt.Errorf("setup: build config: %w", err) + return core.E("setup.setupCoreDir", "generate build config", err) } if err := writeConfig(filepath.Join(coreDir, "build.yaml"), buildConfig, opts); err != nil { return err @@ -76,7 +83,7 @@ func setupCoreDir(opts Options, projType ProjectType) error { // test.yaml testConfig, err := GenerateTestConfig(projType) if err != nil { - return fmt.Errorf("setup: test config: %w", err) + return core.E("setup.setupCoreDir", "generate test config", err) } if err := writeConfig(filepath.Join(coreDir, "test.yaml"), testConfig, opts); err != nil { return err @@ -87,64 +94,125 @@ func setupCoreDir(opts Options, projType ProjectType) error { // scaffoldTemplate extracts a dir template into the target path. func scaffoldTemplate(opts Options, projType ProjectType) error { - tmplName := opts.Template - if tmplName == "auto" { - switch projType { - case TypeGo, TypeWails: - tmplName = "go" - case TypePHP: - tmplName = "php" - case TypeNode: - tmplName = "gui" - default: - tmplName = "agent" - } + tmplName, err := resolveTemplateName(opts.Template, projType) + if err != nil { + return err } - fmt.Printf("Template: %s\n", tmplName) + core.Print(nil, "Template: %s", tmplName) - data := map[string]any{ - "Name": filepath.Base(opts.Path), - "Module": detectGitRemote(), - "Namespace": "App", - "ViewNamespace": filepath.Base(opts.Path), - "RouteName": filepath.Base(opts.Path), - "GoVersion": "1.26", - "HasAdmin": true, - "HasApi": true, - "HasConsole": true, + data := &lib.WorkspaceData{ + Repo: filepath.Base(opts.Path), + Branch: "main", + Task: core.Sprintf("Initialise %s project tooling.", projType), + Agent: "setup", + Language: string(projType), + Prompt: "This workspace was scaffolded by pkg/setup. Review the repository and continue from the generated context files.", + Flow: formatFlow(projType), + RepoDescription: detectGitRemote(opts.Path), + BuildCmd: defaultBuildCommand(projType), + TestCmd: defaultTestCommand(projType), + } + + if !templateExists(tmplName) { + return core.E("setup.scaffoldTemplate", "template not found: "+tmplName, nil) } if opts.DryRun { - fmt.Printf("Would extract template/%s to %s\n", tmplName, opts.Path) - files := lib.ListDirTemplates() - for _, f := range files { - if f == tmplName { - fmt.Printf(" Template found: %s\n", f) - } - } + core.Print(nil, "Would extract workspace/%s to %s", tmplName, opts.Path) + core.Print(nil, " Template found: %s", tmplName) return nil } - return lib.ExtractDir(tmplName, opts.Path, data) + if err := lib.ExtractWorkspace(tmplName, opts.Path, data); err != nil { + return core.E("setup.scaffoldTemplate", "extract workspace template "+tmplName, err) + } + return nil } func writeConfig(path, content string, opts Options) error { if opts.DryRun { - fmt.Printf(" %s\n", path) + core.Print(nil, " %s", path) return nil } - if !opts.Force { - if _, err := os.Stat(path); err == nil { - fmt.Printf(" skip %s (exists, use --force to overwrite)\n", filepath.Base(path)) - return nil + if !opts.Force && fs.Exists(path) { + core.Print(nil, " skip %s (exists, use --force to overwrite)", filepath.Base(path)) + return nil + } + + if r := fs.WriteMode(path, content, 0644); !r.OK { + err, _ := r.Value.(error) + return core.E("setup.writeConfig", "write "+filepath.Base(path), err) + } + core.Print(nil, " created %s", path) + return nil +} + +func resolveTemplateName(name string, projType ProjectType) (string, error) { + if name == "" { + return "", core.E("setup.resolveTemplateName", "template is required", nil) + } + + if name == "auto" { + switch projType { + case TypeGo, TypeWails, TypePHP, TypeNode, TypeUnknown: + return "default", nil } } - if err := os.WriteFile(path, []byte(content), 0644); err != nil { - return fmt.Errorf("setup: write %s: %w", filepath.Base(path), err) + switch name { + case "agent", "go", "php", "gui": + return "default", nil + case "verify", "conventions": + return "review", nil + default: + return name, nil } - fmt.Printf(" created %s\n", path) - return nil +} + +func templateExists(name string) bool { + for _, tmpl := range lib.ListWorkspaces() { + if tmpl == name { + return true + } + } + return false +} + +func defaultBuildCommand(projType ProjectType) string { + switch projType { + case TypeGo, TypeWails: + return "go build ./..." + case TypePHP: + return "composer test" + case TypeNode: + return "npm run build" + default: + return "make build" + } +} + +func defaultTestCommand(projType ProjectType) string { + switch projType { + case TypeGo, TypeWails: + return "go test ./..." + case TypePHP: + return "composer test" + case TypeNode: + return "npm test" + default: + return "make test" + } +} + +func formatFlow(projType ProjectType) string { + builder := core.NewBuilder() + builder.WriteString("- Build: `") + builder.WriteString(defaultBuildCommand(projType)) + builder.WriteString("`\n") + builder.WriteString("- Test: `") + builder.WriteString(defaultTestCommand(projType)) + builder.WriteString("`") + return builder.String() } diff --git a/pkg/setup/setup_test.go b/pkg/setup/setup_test.go new file mode 100644 index 0000000..4e60eb4 --- /dev/null +++ b/pkg/setup/setup_test.go @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package setup + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDetect_Good(t *testing.T) { + dir := t.TempDir() + require.True(t, fs.WriteMode(filepath.Join(dir, "go.mod"), "module example.com/test\n", 0644).OK) + + assert.Equal(t, TypeGo, Detect(dir)) + assert.Equal(t, []ProjectType{TypeGo}, DetectAll(dir)) +} + +func TestGenerateBuildConfig_Good(t *testing.T) { + cfg, err := GenerateBuildConfig("/tmp/example", TypeGo) + require.NoError(t, err) + + assert.Contains(t, cfg, "# example build configuration") + assert.Contains(t, cfg, "project:") + assert.Contains(t, cfg, "name: example") + assert.Contains(t, cfg, "type: go") + assert.Contains(t, cfg, "main: ./cmd/example") + assert.Contains(t, cfg, "cgo: false") +} + +func TestParseGitRemote_Good(t *testing.T) { + tests := map[string]string{ + "https://github.com/dAppCore/go-io.git": "dAppCore/go-io", + "git@github.com:dAppCore/go-io.git": "dAppCore/go-io", + "ssh://git@forge.lthn.ai:2223/core/agent.git": "core/agent", + "ssh://git@forge.lthn.ai:2223/core/agent": "core/agent", + "git@forge.lthn.ai:core/agent.git": "core/agent", + "/srv/git/core/agent.git": "srv/git/core/agent", + } + + for remote, want := range tests { + assert.Equal(t, want, parseGitRemote(remote), remote) + } +} + +func TestParseGitRemote_Bad(t *testing.T) { + assert.Equal(t, "", parseGitRemote("")) + assert.Equal(t, "", parseGitRemote("origin")) +} + +func TestRun_Good(t *testing.T) { + dir := t.TempDir() + require.True(t, fs.WriteMode(filepath.Join(dir, "go.mod"), "module example.com/test\n", 0644).OK) + + err := Run(Options{Path: dir}) + require.NoError(t, err) + + build := fs.Read(filepath.Join(dir, ".core", "build.yaml")) + require.True(t, build.OK) + assert.Contains(t, build.Value.(string), "type: go") + + test := fs.Read(filepath.Join(dir, ".core", "test.yaml")) + require.True(t, test.OK) + assert.Contains(t, test.Value.(string), "go test ./...") +} + +func TestRun_TemplateAlias_Good(t *testing.T) { + dir := t.TempDir() + require.True(t, fs.WriteMode(filepath.Join(dir, "go.mod"), "module example.com/test\n", 0644).OK) + + err := Run(Options{Path: dir, Template: "agent"}) + require.NoError(t, err) + + prompt := fs.Read(filepath.Join(dir, "PROMPT.md")) + require.True(t, prompt.OK) + assert.Contains(t, prompt.Value.(string), "This workspace was scaffolded by pkg/setup.") +}