feat(v0.8.0): full AX migration — ServiceRuntime, Actions, quality gates, transport
go-process:
- Register factory, Result lifecycle, 5 named Action handlers
- Start/Run/StartWithOptions/RunWithOptions all return core.Result
- core.ID() replaces fmt.Sprintf, core.As replaces errors.As
core/agent:
- PrepSubsystem + monitor.Subsystem + setup.Service embed ServiceRuntime[T]
- 22 named Actions + agent.completion Task pipeline in OnStartup
- ChannelNotifier removed — all IPC via c.ACTION(messages.X{})
- proc.go: all methods via s.Core().Process(), returns core.Result
- status.go: WriteAtomic + JSONMarshalString
- paths.go: Fs.NewUnrestricted() replaces unsafe.Pointer
- transport.go: ONE net/http file — HTTPGet/HTTPPost/HTTPDo/MCP transport
- All disallowed imports eliminated from source files (13 quality gates)
- String concat eliminated — core.Concat() throughout
- 1:1 _test.go + _example_test.go for every source file
- Reference docs synced from core/go v0.8.0
- RFC-025 updated with net/http, net/url, io/fs quality gates
- lib.go: io/fs eliminated via Data.ListNames, Array[T].Deduplicate
Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
96ac2d99cd
commit
f83c753277
157 changed files with 5039 additions and 2503 deletions
434
docs/RFC.md
434
docs/RFC.md
|
|
@ -3,7 +3,7 @@
|
|||
> `dappco.re/go/core/agent` — Agentic dispatch, orchestration, and pipeline management.
|
||||
> An agent should be able to understand core/agent's architecture from this document alone.
|
||||
|
||||
**Status:** v0.8.0
|
||||
**Status:** v0.8.0+alpha.1
|
||||
**Module:** `dappco.re/go/core/agent`
|
||||
**Depends on:** core/go v0.8.0, go-process v0.8.0
|
||||
|
||||
|
|
@ -15,38 +15,21 @@ core/agent dispatches AI agents (Claude, Codex, Gemini) to work on tasks in sand
|
|||
|
||||
core/go provides the primitives. core/agent composes them.
|
||||
|
||||
### Current State (2026-03-25)
|
||||
|
||||
The codebase is PRE-migration. The RFC describes the v0.8.0 target. What exists today:
|
||||
|
||||
- `pkg/agentic/proc.go` — standalone process helpers with `ensureProcess()`. **Delete** — replace with `s.Core().Process()`
|
||||
- `pkg/agentic/handlers.go` — nested `c.ACTION()` cascade 4 levels deep. **Replace** with `c.Task("agent.completion")`
|
||||
- `pkg/agentic/commands.go` — closures already extracted to named methods (done in prior session)
|
||||
- `pkg/agentic/commands_forge.go` — forge command methods (done)
|
||||
- `pkg/agentic/commands_workspace.go` — workspace command methods (done)
|
||||
- `pkg/agentic/dispatch.go` — `spawnAgent` decomposed into 7 functions (done)
|
||||
- `pkg/agentic/status.go` — uses `os.WriteFile` for status.json. **Replace** with `Fs.WriteAtomic`
|
||||
- `pkg/agentic/paths.go` — uses `unsafe.Pointer` to bypass Fs.root. **Replace** with `Fs.NewUnrestricted()`
|
||||
- `pkg/messages/` — typed IPC message structs (`AgentCompleted`, `QAResult`, etc.)
|
||||
- `pkg/brain/` — OpenBrain integration (recall/remember)
|
||||
- `pkg/monitor/` — agent monitoring + notifications
|
||||
- `pkg/setup/` — workspace scaffolding
|
||||
- `OnStartup`/`OnShutdown` — currently return `error`. **Change** to return `Result`
|
||||
|
||||
### File Layout
|
||||
|
||||
```
|
||||
cmd/core-agent/main.go — entry point: core.New + Run
|
||||
pkg/agentic/ — orchestration (dispatch, prep, verify, scan, commands)
|
||||
pkg/agentic/proc.go — DELETE (replace with c.Process())
|
||||
pkg/agentic/handlers.go — REWRITE (cascade → Task pipeline)
|
||||
pkg/agentic/status.go — MIGRATE (os.WriteFile → WriteAtomic)
|
||||
pkg/agentic/paths.go — MIGRATE (unsafe.Pointer → NewUnrestricted)
|
||||
pkg/agentic/actions.go — named Action handlers (ctx, Options) → Result
|
||||
pkg/agentic/proc.go — process helpers via s.Core().Process()
|
||||
pkg/agentic/handlers.go — IPC completion pipeline handlers
|
||||
pkg/agentic/status.go — workspace status (WriteAtomic + JSONMarshalString)
|
||||
pkg/agentic/paths.go — paths, fs (NewUnrestricted), helpers
|
||||
pkg/brain/ — OpenBrain (recall, remember, search)
|
||||
pkg/lib/ — embedded templates, personas, flows, plans
|
||||
pkg/messages/ — typed message structs for IPC broadcast
|
||||
pkg/monitor/ — agent monitoring + channel notifications
|
||||
pkg/setup/ — workspace detection + scaffolding
|
||||
pkg/monitor/ — agent monitoring via IPC (ServiceRuntime)
|
||||
pkg/setup/ — workspace detection + scaffolding (Service)
|
||||
claude/ — Claude Code plugin definitions
|
||||
docs/ — RFC, plans, architecture
|
||||
```
|
||||
|
|
@ -55,12 +38,19 @@ docs/ — RFC, plans, architecture
|
|||
|
||||
## 2. Service Registration
|
||||
|
||||
All services use `ServiceRuntime[T]` — no raw `core *core.Core` fields.
|
||||
|
||||
```go
|
||||
func Register(c *core.Core) core.Result {
|
||||
svc := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
}
|
||||
return core.Result{Value: svc, OK: true}
|
||||
prep := NewPrep()
|
||||
prep.ServiceRuntime = core.NewServiceRuntime(c, AgentOptions{})
|
||||
|
||||
cfg := prep.loadAgentsConfig()
|
||||
c.Config().Set("agents.concurrency", cfg.Concurrency)
|
||||
c.Config().Set("agents.rates", cfg.Rates)
|
||||
|
||||
RegisterHandlers(c, prep)
|
||||
return core.Result{Value: prep, OK: true}
|
||||
}
|
||||
|
||||
// In main:
|
||||
|
|
@ -84,12 +74,13 @@ All capabilities registered as named Actions during OnStartup. Inspectable, comp
|
|||
func (s *PrepSubsystem) OnStartup(ctx context.Context) core.Result {
|
||||
c := s.Core()
|
||||
|
||||
// Dispatch
|
||||
// Dispatch & workspace
|
||||
c.Action("agentic.dispatch", s.handleDispatch)
|
||||
c.Action("agentic.prep", s.handlePrep)
|
||||
c.Action("agentic.status", s.handleStatus)
|
||||
c.Action("agentic.resume", s.handleResume)
|
||||
c.Action("agentic.scan", s.handleScan)
|
||||
c.Action("agentic.watch", s.handleWatch)
|
||||
|
||||
// Pipeline
|
||||
c.Action("agentic.qa", s.handleQA)
|
||||
|
|
@ -107,11 +98,11 @@ func (s *PrepSubsystem) OnStartup(ctx context.Context) core.Result {
|
|||
c.Action("agentic.pr.list", s.handlePRList)
|
||||
c.Action("agentic.pr.merge", s.handlePRMerge)
|
||||
|
||||
// Brain
|
||||
c.Action("brain.recall", s.handleBrainRecall)
|
||||
c.Action("brain.remember", s.handleBrainRemember)
|
||||
// Review & Epic
|
||||
c.Action("agentic.review-queue", s.handleReviewQueue)
|
||||
c.Action("agentic.epic", s.handleEpic)
|
||||
|
||||
// Completion pipeline
|
||||
// Completion pipeline — Task composition
|
||||
c.Task("agent.completion", core.Task{
|
||||
Description: "QA → PR → Verify → Merge",
|
||||
Steps: []core.Step{
|
||||
|
|
@ -123,7 +114,10 @@ func (s *PrepSubsystem) OnStartup(ctx context.Context) core.Result {
|
|||
},
|
||||
})
|
||||
|
||||
s.StartRunner()
|
||||
s.registerCommands(ctx)
|
||||
s.registerWorkspaceCommands()
|
||||
s.registerForgeCommands()
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
```
|
||||
|
|
@ -132,38 +126,35 @@ func (s *PrepSubsystem) OnStartup(ctx context.Context) core.Result {
|
|||
|
||||
## 4. Completion Pipeline
|
||||
|
||||
When an agent completes, the Task runs sequentially. Async steps fire without blocking the queue drain.
|
||||
When an agent completes, the IPC handler chain fires. Registered in `RegisterHandlers()`:
|
||||
|
||||
```go
|
||||
c.RegisterAction(func(c *core.Core, msg core.Message) core.Result {
|
||||
if ev, ok := msg.(messages.AgentCompleted); ok {
|
||||
opts := core.NewOptions(
|
||||
core.Option{Key: "repo", Value: ev.Repo},
|
||||
core.Option{Key: "workspace", Value: ev.Workspace},
|
||||
)
|
||||
c.PerformAsync("agent.completion", opts)
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
})
|
||||
```
|
||||
AgentCompleted → QA handler → QAResult
|
||||
QAResult{Passed} → PR handler → PRCreated
|
||||
PRCreated → Verify handler → PRMerged | PRNeedsReview
|
||||
AgentCompleted → Ingest handler (findings → issues)
|
||||
AgentCompleted → Poke handler (drain queue)
|
||||
```
|
||||
|
||||
Steps: QA (build+test) → Auto-PR (git push + Forge API) → Verify (test + merge).
|
||||
Ingest and Poke run async — Poke drains the queue immediately.
|
||||
All handlers use `c.ACTION(messages.X{})` — no ChannelNotifier, no callbacks.
|
||||
|
||||
---
|
||||
|
||||
## 5. Process Execution
|
||||
|
||||
All commands via `c.Process()`. No `os/exec`, no `proc.go`, no `ensureProcess()`.
|
||||
All commands via `s.Core().Process()`. Returns `core.Result` — Value is always a string.
|
||||
|
||||
```go
|
||||
// Git operations
|
||||
func (s *PrepSubsystem) gitCmd(ctx context.Context, dir string, args ...string) core.Result {
|
||||
return s.Core().Process().RunIn(ctx, dir, "git", args...)
|
||||
func (s *PrepSubsystem) runCmd(ctx context.Context, dir, command string, args ...string) core.Result {
|
||||
return s.Core().Process().RunIn(ctx, dir, command, args...)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) gitOK(ctx context.Context, dir string, args ...string) bool {
|
||||
return s.gitCmd(ctx, dir, args...).OK
|
||||
func (s *PrepSubsystem) runCmdOK(ctx context.Context, dir, command string, args ...string) bool {
|
||||
return s.runCmd(ctx, dir, command, args...).OK
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) gitCmd(ctx context.Context, dir string, args ...string) core.Result {
|
||||
return s.runCmd(ctx, dir, "git", args...)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) gitOutput(ctx context.Context, dir string, args ...string) string {
|
||||
|
|
@ -173,243 +164,199 @@ func (s *PrepSubsystem) gitOutput(ctx context.Context, dir string, args ...strin
|
|||
}
|
||||
```
|
||||
|
||||
go-process is fully Result-native. `Start`, `Run`, `StartWithOptions`, `RunWithOptions` all return `core.Result`. Value is `*Process` for Start, `string` for Run. OK=true guarantees the type.
|
||||
|
||||
---
|
||||
|
||||
## 6. Status Management
|
||||
|
||||
Workspace status uses `WriteAtomic` for safe concurrent access + per-workspace mutex for read-modify-write:
|
||||
Workspace status uses `WriteAtomic` + `JSONMarshalString` for safe concurrent access:
|
||||
|
||||
```go
|
||||
// Write
|
||||
s.Core().Fs().WriteAtomic(statusPath, core.JSONMarshalString(status))
|
||||
|
||||
// Read-modify-write with lock
|
||||
s.withLock(wsDir, func() {
|
||||
var st WorkspaceStatus
|
||||
core.JSONUnmarshalString(s.Core().Fs().Read(statusPath).Value.(string), &st)
|
||||
st.Status = "completed"
|
||||
s.Core().Fs().WriteAtomic(statusPath, core.JSONMarshalString(st))
|
||||
})
|
||||
func writeStatus(wsDir string, status *WorkspaceStatus) error {
|
||||
status.UpdatedAt = time.Now()
|
||||
statusPath := core.JoinPath(wsDir, "status.json")
|
||||
if r := fs.WriteAtomic(statusPath, core.JSONMarshalString(status)); !r.OK {
|
||||
err, _ := r.Value.(error)
|
||||
return core.E("writeStatus", "failed to write status", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Filesystem
|
||||
|
||||
No `unsafe.Pointer`. Sandboxed by default, unrestricted when needed:
|
||||
No `unsafe.Pointer`. Package-level unrestricted Fs via Core primitive:
|
||||
|
||||
```go
|
||||
// Sandboxed to workspace
|
||||
f := (&core.Fs{}).New(workspaceDir)
|
||||
|
||||
// Full access when required
|
||||
f := s.Core().Fs().NewUnrestricted()
|
||||
var fs = (&core.Fs{}).NewUnrestricted()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. Validation and IDs
|
||||
## 8. IPC Messages
|
||||
|
||||
All inter-service communication via typed messages in `pkg/messages/`:
|
||||
|
||||
```go
|
||||
// Validate input
|
||||
if r := core.ValidateName(input.Repo); !r.OK { return r }
|
||||
safe := core.SanitisePath(userInput)
|
||||
// Agent lifecycle
|
||||
messages.AgentStarted{Agent, Repo, Workspace}
|
||||
messages.AgentCompleted{Agent, Repo, Workspace, Status}
|
||||
|
||||
// Generate unique identifiers
|
||||
id := core.ID() // "id-42-a3f2b1"
|
||||
// Pipeline
|
||||
messages.QAResult{Workspace, Repo, Passed}
|
||||
messages.PRCreated{Repo, Branch, PRURL, PRNum}
|
||||
messages.PRMerged{Repo, PRURL, PRNum}
|
||||
messages.PRNeedsReview{Repo, PRURL, PRNum, Reason}
|
||||
|
||||
// Queue
|
||||
messages.QueueDrained{Completed}
|
||||
messages.PokeQueue{}
|
||||
|
||||
// Monitor
|
||||
messages.HarvestComplete{Repo, Branch, Files}
|
||||
messages.HarvestRejected{Repo, Branch, Reason}
|
||||
messages.InboxMessage{New, Total}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9. Entitlements
|
||||
## 9. Monitor
|
||||
|
||||
Embeds `*core.ServiceRuntime[MonitorOptions]`. All notifications via `m.Core().ACTION(messages.X{})` — no ChannelNotifier interface. Git operations via `m.Core().Process()`.
|
||||
|
||||
```go
|
||||
func Register(c *core.Core) core.Result {
|
||||
mon := New()
|
||||
mon.ServiceRuntime = core.NewServiceRuntime(c, MonitorOptions{})
|
||||
|
||||
c.RegisterAction(func(c *core.Core, msg core.Message) core.Result {
|
||||
switch ev := msg.(type) {
|
||||
case messages.AgentCompleted:
|
||||
mon.handleAgentCompleted(ev)
|
||||
case messages.AgentStarted:
|
||||
mon.handleAgentStarted(ev)
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
})
|
||||
|
||||
return core.Result{Value: mon, OK: true}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 10. Setup
|
||||
|
||||
Service with `*core.ServiceRuntime[SetupOptions]`. Detects project type, generates configs, scaffolds workspaces.
|
||||
|
||||
```go
|
||||
func Register(c *core.Core) core.Result {
|
||||
svc := &Service{
|
||||
ServiceRuntime: core.NewServiceRuntime(c, SetupOptions{}),
|
||||
}
|
||||
return core.Result{Value: svc, OK: true}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. Entitlements
|
||||
|
||||
Actions are gated by `c.Entitled()` — checked automatically in `Action.Run()`.
|
||||
|
||||
For explicit gating with quantity checks:
|
||||
|
||||
```go
|
||||
func (s *PrepSubsystem) handleDispatch(ctx context.Context, opts core.Options) core.Result {
|
||||
// Concurrency limit
|
||||
e := s.Core().Entitled("agentic.concurrency", 1)
|
||||
if !e.Allowed {
|
||||
return core.Result{Value: core.E("dispatch", e.Reason, nil), OK: false}
|
||||
}
|
||||
|
||||
// ... dispatch agent ...
|
||||
|
||||
s.Core().RecordUsage("agentic.dispatch")
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
```
|
||||
|
||||
Enables: SaaS tier gating, usage tracking, workspace isolation.
|
||||
---
|
||||
|
||||
## 12. MCP — Action Aggregator
|
||||
|
||||
MCP auto-exposes all registered Actions as tools via `c.Actions()`. Register an Action → it appears as an MCP tool. The API stream primitive (`c.API()`) handles transport.
|
||||
|
||||
---
|
||||
|
||||
## 10. MCP — Action Aggregator
|
||||
|
||||
MCP auto-exposes all registered Actions as tools:
|
||||
|
||||
```go
|
||||
func (s *MCPService) OnStartup(ctx context.Context) core.Result {
|
||||
for _, name := range s.Core().Actions() {
|
||||
name := name // capture loop variable
|
||||
action := s.Core().Action(name)
|
||||
s.server.AddTool(mcp.Tool{
|
||||
Name: name,
|
||||
Description: action.Description,
|
||||
InputSchema: schemaFromOptions(action.Schema),
|
||||
Handler: func(ctx context.Context, input map[string]any) (any, error) {
|
||||
// Re-resolve action at call time (not captured pointer)
|
||||
r := s.Core().Action(name).Run(ctx, optionsFromInput(input))
|
||||
if !r.OK { return nil, r.Value.(error) }
|
||||
return r.Value, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
```
|
||||
|
||||
Register an Action → it appears as an MCP tool. No hand-wiring.
|
||||
|
||||
---
|
||||
|
||||
## 11. Remote Dispatch
|
||||
## 13. Remote Dispatch
|
||||
|
||||
Transparent local/remote via `host:action` syntax:
|
||||
|
||||
```go
|
||||
// Local
|
||||
r := c.RemoteAction("agentic.status", ctx, opts)
|
||||
|
||||
// Remote — same API
|
||||
r := c.RemoteAction("charon:agentic.dispatch", ctx, opts)
|
||||
|
||||
// Web3
|
||||
r := c.RemoteAction("snider.lthn:brain.recall", ctx, opts)
|
||||
r := c.RemoteAction("agentic.status", ctx, opts) // local
|
||||
r := c.RemoteAction("charon:agentic.dispatch", ctx, opts) // remote
|
||||
r := c.RemoteAction("snider.lthn:brain.recall", ctx, opts) // web3
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 12. JSON Serialisation
|
||||
## 14. Quality Gates
|
||||
|
||||
```bash
|
||||
# No disallowed imports (source files only)
|
||||
grep -rn '"os"\|"os/exec"\|"io"\|"fmt"\|"errors"\|"log"\|"encoding/json"\|"path/filepath"\|"unsafe"\|"strings"' *.go **/*.go \
|
||||
| grep -v _test.go
|
||||
|
||||
# Test naming: TestFile_Function_{Good,Bad,Ugly}
|
||||
grep -rn "^func Test" *_test.go **/*_test.go \
|
||||
| grep -v "Test[A-Z][a-z]*_.*_\(Good\|Bad\|Ugly\)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 15. Validation and IDs
|
||||
|
||||
```go
|
||||
if r := core.ValidateName(input.Repo); !r.OK { return r }
|
||||
safe := core.SanitisePath(userInput)
|
||||
id := core.ID() // "id-42-a3f2b1"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 16. JSON Serialisation
|
||||
|
||||
All JSON via Core primitives. No `encoding/json` import.
|
||||
|
||||
```go
|
||||
data := core.JSONMarshalString(status)
|
||||
core.JSONUnmarshal(responseBytes, &result)
|
||||
core.JSONUnmarshalString(jsonStr, &result)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 13. Test Strategy
|
||||
|
||||
AX-7: `TestFile_Function_{Good,Bad,Ugly}` — 100% naming compliance target.
|
||||
|
||||
```
|
||||
TestHandlers_CompletionPipeline_Good — QA+PR+Verify succeed, Poke fires
|
||||
TestHandlers_CompletionPipeline_Bad — QA fails, chain stops
|
||||
TestHandlers_CompletionPipeline_Ugly — handler panics, pipeline recovers
|
||||
TestDispatch_Entitlement_Good — entitled workspace dispatches
|
||||
TestDispatch_Entitlement_Bad — denied workspace gets error
|
||||
TestPrep_GitCmd_Good — via c.Process()
|
||||
TestStatus_WriteAtomic_Ugly — concurrent writes don't corrupt
|
||||
TestMCP_ActionAggregator_Good — Actions appear as MCP tools
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 14. Error Handling and Logging
|
||||
|
||||
All errors via `core.E()`. All logging via Core. No `fmt`, `errors`, or `log` imports.
|
||||
## 17. Configuration
|
||||
|
||||
```go
|
||||
// Structured errors
|
||||
return core.E("dispatch.prep", "workspace not found", nil)
|
||||
return core.E("dispatch.prep", core.Concat("repo ", repo, " invalid"), cause)
|
||||
|
||||
// Error inspection
|
||||
core.Operation(err) // "dispatch.prep"
|
||||
core.ErrorMessage(err) // "workspace not found"
|
||||
core.Root(err) // unwrap to root cause
|
||||
|
||||
// Logging
|
||||
core.Info("agent dispatched", "repo", repo, "agent", agent)
|
||||
core.Warn("queue full", "pending", count)
|
||||
core.Error("dispatch failed", "err", err)
|
||||
core.Security("entitlement.denied", "action", action, "reason", reason)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 15. Configuration
|
||||
|
||||
```go
|
||||
// Runtime settings
|
||||
c.Config().Set("agents.concurrency", 5)
|
||||
c.Config().String("workspace.root")
|
||||
c.Config().Int("agents.concurrency")
|
||||
|
||||
// Feature flags
|
||||
c.Config().Enable("auto-merge")
|
||||
if c.Config().Enabled("auto-merge") { ... }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 16. Registry
|
||||
## 18. Registry
|
||||
|
||||
Use `Registry[T]` for any named collection. No `map[string]*T + sync.Mutex`.
|
||||
|
||||
```go
|
||||
// Workspace status tracking
|
||||
workspaces := core.NewRegistry[*WorkspaceStatus]()
|
||||
workspaces.Set(wsDir, status)
|
||||
workspaces.Get(wsDir)
|
||||
workspaces.Each(func(dir string, st *WorkspaceStatus) { ... })
|
||||
workspaces.Names() // insertion order
|
||||
|
||||
// Cross-cutting queries via Core
|
||||
c.RegistryOf("actions").List("agentic.*")
|
||||
c.RegistryOf("services").Names()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 17. Stream Helpers
|
||||
|
||||
No `io` import. Core wraps all stream operations:
|
||||
|
||||
```go
|
||||
// Read entire stream
|
||||
r := c.Fs().ReadStream(path)
|
||||
content := core.ReadAll(r.Value)
|
||||
|
||||
// Write to stream
|
||||
w := c.Fs().WriteStream(path)
|
||||
core.WriteAll(w.Value, data)
|
||||
|
||||
// Close any stream
|
||||
core.CloseStream(handle)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 18. Data and Drive
|
||||
|
||||
```go
|
||||
// Embedded assets (prompts, templates, personas)
|
||||
r := c.Data().ReadString("prompts/coding.md")
|
||||
c.Data().List("templates/")
|
||||
c.Data().Mounts() // all mounted asset namespaces
|
||||
|
||||
// Transport configuration
|
||||
c.Drive().New(core.NewOptions(
|
||||
core.Option{Key: "name", Value: "charon"},
|
||||
core.Option{Key: "transport", Value: "http://10.69.69.165:9101"},
|
||||
))
|
||||
c.Drive().Get("charon")
|
||||
```
|
||||
|
||||
---
|
||||
|
|
@ -430,7 +377,34 @@ core.Trim(s) // not strings.TrimSpace
|
|||
|
||||
---
|
||||
|
||||
## 20. Comments (AX Principle 2)
|
||||
## 20. Error Handling and Logging
|
||||
|
||||
All errors via `core.E()`. All logging via Core. No `fmt`, `errors`, or `log` imports.
|
||||
|
||||
```go
|
||||
return core.E("dispatch.prep", "workspace not found", nil)
|
||||
return core.E("dispatch.prep", core.Concat("repo ", repo, " invalid"), cause)
|
||||
core.Info("agent dispatched", "repo", repo, "agent", agent)
|
||||
core.Error("dispatch failed", "err", err)
|
||||
core.Security("entitlement.denied", "action", action, "reason", reason)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 21. Stream Helpers and Data
|
||||
|
||||
```go
|
||||
r := c.Data().ReadString("prompts/coding.md")
|
||||
c.Data().List("templates/")
|
||||
c.Drive().New(core.NewOptions(
|
||||
core.Option{Key: "name", Value: "charon"},
|
||||
core.Option{Key: "transport", Value: "http://10.69.69.165:9101"},
|
||||
))
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 22. Comments (AX Principle 2)
|
||||
|
||||
Every exported function MUST have a usage-example comment:
|
||||
|
||||
|
|
@ -441,41 +415,11 @@ Every exported function MUST have a usage-example comment:
|
|||
func (s *PrepSubsystem) gitCmd(ctx context.Context, dir string, args ...string) core.Result {
|
||||
```
|
||||
|
||||
No exceptions. The comment is for every model that will ever read the code.
|
||||
|
||||
---
|
||||
|
||||
## 21. Example Tests (AX Principle 7b)
|
||||
## 23. Test Strategy (AX Principle 7)
|
||||
|
||||
One `{source}_example_test.go` per source file. Examples serve as test + documentation + godoc.
|
||||
|
||||
```go
|
||||
// file: dispatch_example_test.go
|
||||
|
||||
func ExamplePrepSubsystem_handleDispatch() {
|
||||
c := core.New(core.WithService(agentic.Register))
|
||||
r := c.Action("agentic.dispatch").Run(ctx, opts)
|
||||
core.Println(r.OK)
|
||||
// Output: true
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 22. Quality Gates (AX Principle 9)
|
||||
|
||||
```bash
|
||||
# No disallowed imports (all 10)
|
||||
grep -rn '"os"\|"os/exec"\|"io"\|"fmt"\|"errors"\|"log"\|"encoding/json"\|"path/filepath"\|"unsafe"\|"strings"' *.go **/*.go \
|
||||
| grep -v _test.go
|
||||
|
||||
# Test naming
|
||||
grep -rn "^func Test" *_test.go **/*_test.go \
|
||||
| grep -v "Test[A-Z][a-z]*_.*_\(Good\|Bad\|Ugly\)"
|
||||
|
||||
# String concat
|
||||
grep -rn '" + \| + "' *.go **/*.go | grep -v _test.go | grep -v "//"
|
||||
```
|
||||
`TestFile_Function_{Good,Bad,Ugly}` — 100% naming compliance target.
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -484,10 +428,12 @@ grep -rn '" + \| + "' *.go **/*.go | grep -v _test.go | grep -v "//"
|
|||
| Package | RFC | Role |
|
||||
|---------|-----|------|
|
||||
| core/go | `core/go/docs/RFC.md` | Primitives — all 21 sections |
|
||||
| go-process | `core/go-process/docs/RFC.md` | Process Action handlers |
|
||||
| go-process | `core/go-process/docs/RFC.md` | Process Action handlers (Result-native) |
|
||||
|
||||
---
|
||||
|
||||
## Changelog
|
||||
|
||||
- 2026-03-26: WIP — net/http consolidated to transport.go (ONE file). net/url + io/fs eliminated. RFC-025 updated with 3 new quality gates (net/http, net/url, io/fs). 1:1 test + example test coverage. Array[T].Deduplicate replaces custom helpers. Remaining: remove dead `client` field from test literals, brain/provider.go Gin handler.
|
||||
- 2026-03-25: Quality gates pass. Zero disallowed imports (all 10). encoding/json→Core JSON. path/filepath→Core Path. os→Core Env/Fs. io→Core ReadAll/WriteAll. go-process fully Result-native. ServiceRuntime on all subsystems. 22 named Actions + Task pipeline. ChannelNotifier→IPC. Reference docs synced.
|
||||
- 2026-03-25: Initial spec — written with full core/go v0.8.0 domain context.
|
||||
|
|
|
|||
302
pkg/agentic/actions.go
Normal file
302
pkg/agentic/actions.go
Normal file
|
|
@ -0,0 +1,302 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Named Action handlers for the agentic service.
|
||||
// Each handler adapts (ctx, Options) → Result to call the existing MCP tool method.
|
||||
// Registered during OnStartup — the Action registry IS the capability map.
|
||||
//
|
||||
// c.Action("agentic.dispatch").Run(ctx, opts)
|
||||
// c.Actions() // all registered capabilities
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// --- Dispatch & Workspace ---
|
||||
|
||||
// handleDispatch dispatches a subagent to work on a repo task.
|
||||
//
|
||||
// r := c.Action("agentic.dispatch").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "repo", Value: "go-io"},
|
||||
// core.Option{Key: "task", Value: "Fix tests"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleDispatch(ctx context.Context, opts core.Options) core.Result {
|
||||
input := DispatchInput{
|
||||
Repo: opts.String("repo"),
|
||||
Task: opts.String("task"),
|
||||
Agent: opts.String("agent"),
|
||||
Issue: opts.Int("issue"),
|
||||
}
|
||||
_, out, err := s.dispatch(ctx, nil, input)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
return core.Result{Value: out, OK: true}
|
||||
}
|
||||
|
||||
// handlePrep prepares a workspace without dispatching an agent.
|
||||
//
|
||||
// r := c.Action("agentic.prep").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "repo", Value: "go-io"},
|
||||
// core.Option{Key: "issue", Value: 42},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handlePrep(ctx context.Context, opts core.Options) core.Result {
|
||||
input := PrepInput{
|
||||
Repo: opts.String("repo"),
|
||||
Org: opts.String("org"),
|
||||
Issue: opts.Int("issue"),
|
||||
}
|
||||
_, out, err := s.prepWorkspace(ctx, nil, input)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
return core.Result{Value: out, OK: true}
|
||||
}
|
||||
|
||||
// handleStatus lists workspace statuses.
|
||||
//
|
||||
// r := c.Action("agentic.status").Run(ctx, core.NewOptions())
|
||||
func (s *PrepSubsystem) handleStatus(ctx context.Context, opts core.Options) core.Result {
|
||||
input := StatusInput{
|
||||
Workspace: opts.String("workspace"),
|
||||
Limit: opts.Int("limit"),
|
||||
Status: opts.String("status"),
|
||||
}
|
||||
_, out, err := s.status(ctx, nil, input)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
return core.Result{Value: out, OK: true}
|
||||
}
|
||||
|
||||
// handleResume resumes a blocked workspace.
|
||||
//
|
||||
// r := c.Action("agentic.resume").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "workspace", Value: "core/go-io/task-5"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleResume(ctx context.Context, opts core.Options) core.Result {
|
||||
input := ResumeInput{
|
||||
Workspace: opts.String("workspace"),
|
||||
Answer: opts.String("answer"),
|
||||
}
|
||||
_, out, err := s.resume(ctx, nil, input)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
return core.Result{Value: out, OK: true}
|
||||
}
|
||||
|
||||
// handleScan scans forge repos for actionable issues.
|
||||
//
|
||||
// r := c.Action("agentic.scan").Run(ctx, core.NewOptions())
|
||||
func (s *PrepSubsystem) handleScan(ctx context.Context, opts core.Options) core.Result {
|
||||
input := ScanInput{
|
||||
Org: opts.String("org"),
|
||||
Limit: opts.Int("limit"),
|
||||
}
|
||||
_, out, err := s.scan(ctx, nil, input)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
return core.Result{Value: out, OK: true}
|
||||
}
|
||||
|
||||
// handleWatch watches a workspace for completion.
|
||||
//
|
||||
// r := c.Action("agentic.watch").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "workspace", Value: "core/go-io/task-5"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleWatch(ctx context.Context, opts core.Options) core.Result {
|
||||
input := WatchInput{}
|
||||
_, out, err := s.watch(ctx, nil, input)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
return core.Result{Value: out, OK: true}
|
||||
}
|
||||
|
||||
// --- Pipeline ---
|
||||
|
||||
// handleQA runs build+test on a completed workspace.
|
||||
//
|
||||
// r := c.Action("agentic.qa").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "workspace", Value: "/path/to/workspace"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleQA(ctx context.Context, opts core.Options) core.Result {
|
||||
wsDir := opts.String("workspace")
|
||||
if wsDir == "" {
|
||||
return core.Result{Value: core.E("agentic.qa", "workspace is required", nil), OK: false}
|
||||
}
|
||||
passed := s.runQA(wsDir)
|
||||
return core.Result{Value: passed, OK: passed}
|
||||
}
|
||||
|
||||
// handleAutoPR creates a PR for a completed workspace.
|
||||
//
|
||||
// r := c.Action("agentic.auto-pr").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "workspace", Value: "/path/to/workspace"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleAutoPR(ctx context.Context, opts core.Options) core.Result {
|
||||
wsDir := opts.String("workspace")
|
||||
if wsDir == "" {
|
||||
return core.Result{Value: core.E("agentic.auto-pr", "workspace is required", nil), OK: false}
|
||||
}
|
||||
s.autoCreatePR(wsDir)
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
// handleVerify verifies and auto-merges a PR.
|
||||
//
|
||||
// r := c.Action("agentic.verify").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "workspace", Value: "/path/to/workspace"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleVerify(ctx context.Context, opts core.Options) core.Result {
|
||||
wsDir := opts.String("workspace")
|
||||
if wsDir == "" {
|
||||
return core.Result{Value: core.E("agentic.verify", "workspace is required", nil), OK: false}
|
||||
}
|
||||
s.autoVerifyAndMerge(wsDir)
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
// handleIngest creates issues from agent findings.
|
||||
//
|
||||
// r := c.Action("agentic.ingest").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "workspace", Value: "/path/to/workspace"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleIngest(ctx context.Context, opts core.Options) core.Result {
|
||||
wsDir := opts.String("workspace")
|
||||
if wsDir == "" {
|
||||
return core.Result{Value: core.E("agentic.ingest", "workspace is required", nil), OK: false}
|
||||
}
|
||||
s.ingestFindings(wsDir)
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
// handlePoke drains the dispatch queue.
|
||||
//
|
||||
// r := c.Action("agentic.poke").Run(ctx, core.NewOptions())
|
||||
func (s *PrepSubsystem) handlePoke(ctx context.Context, opts core.Options) core.Result {
|
||||
s.Poke()
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
// handleMirror mirrors agent branches to GitHub.
|
||||
//
|
||||
// r := c.Action("agentic.mirror").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "repo", Value: "go-io"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleMirror(ctx context.Context, opts core.Options) core.Result {
|
||||
input := MirrorInput{
|
||||
Repo: opts.String("repo"),
|
||||
}
|
||||
_, out, err := s.mirror(ctx, nil, input)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
return core.Result{Value: out, OK: true}
|
||||
}
|
||||
|
||||
// --- Forge ---
|
||||
|
||||
// handleIssueGet retrieves a forge issue.
|
||||
//
|
||||
// r := c.Action("agentic.issue.get").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "repo", Value: "go-io"},
|
||||
// core.Option{Key: "number", Value: "42"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleIssueGet(ctx context.Context, opts core.Options) core.Result {
|
||||
return s.cmdIssueGet(opts)
|
||||
}
|
||||
|
||||
// handleIssueList lists forge issues.
|
||||
//
|
||||
// r := c.Action("agentic.issue.list").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "_arg", Value: "go-io"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleIssueList(ctx context.Context, opts core.Options) core.Result {
|
||||
return s.cmdIssueList(opts)
|
||||
}
|
||||
|
||||
// handleIssueCreate creates a forge issue.
|
||||
//
|
||||
// r := c.Action("agentic.issue.create").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "_arg", Value: "go-io"},
|
||||
// core.Option{Key: "title", Value: "Bug report"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleIssueCreate(ctx context.Context, opts core.Options) core.Result {
|
||||
return s.cmdIssueCreate(opts)
|
||||
}
|
||||
|
||||
// handlePRGet retrieves a forge PR.
|
||||
//
|
||||
// r := c.Action("agentic.pr.get").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "_arg", Value: "go-io"},
|
||||
// core.Option{Key: "number", Value: "12"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handlePRGet(ctx context.Context, opts core.Options) core.Result {
|
||||
return s.cmdPRGet(opts)
|
||||
}
|
||||
|
||||
// handlePRList lists forge PRs.
|
||||
//
|
||||
// r := c.Action("agentic.pr.list").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "_arg", Value: "go-io"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handlePRList(ctx context.Context, opts core.Options) core.Result {
|
||||
return s.cmdPRList(opts)
|
||||
}
|
||||
|
||||
// handlePRMerge merges a forge PR.
|
||||
//
|
||||
// r := c.Action("agentic.pr.merge").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "_arg", Value: "go-io"},
|
||||
// core.Option{Key: "number", Value: "12"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handlePRMerge(ctx context.Context, opts core.Options) core.Result {
|
||||
return s.cmdPRMerge(opts)
|
||||
}
|
||||
|
||||
// --- Review ---
|
||||
|
||||
// handleReviewQueue runs CodeRabbit review on a workspace.
|
||||
//
|
||||
// r := c.Action("agentic.review-queue").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "workspace", Value: "core/go-io/task-5"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleReviewQueue(ctx context.Context, opts core.Options) core.Result {
|
||||
input := ReviewQueueInput{
|
||||
Limit: opts.Int("limit"),
|
||||
Reviewer: opts.String("reviewer"),
|
||||
DryRun: opts.Bool("dry_run"),
|
||||
}
|
||||
_, out, err := s.reviewQueue(ctx, nil, input)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
return core.Result{Value: out, OK: true}
|
||||
}
|
||||
|
||||
// --- Epic ---
|
||||
|
||||
// handleEpic creates an epic (multi-repo task breakdown).
|
||||
//
|
||||
// r := c.Action("agentic.epic").Run(ctx, core.NewOptions(
|
||||
// core.Option{Key: "task", Value: "Update all repos to v0.8.0"},
|
||||
// ))
|
||||
func (s *PrepSubsystem) handleEpic(ctx context.Context, opts core.Options) core.Result {
|
||||
input := EpicInput{
|
||||
Repo: opts.String("repo"),
|
||||
Org: opts.String("org"),
|
||||
Title: opts.String("title"),
|
||||
Body: opts.String("body"),
|
||||
}
|
||||
_, out, err := s.createEpic(ctx, nil, input)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
return core.Result{Value: out, OK: true}
|
||||
}
|
||||
54
pkg/agentic/actions_example_test.go
Normal file
54
pkg/agentic/actions_example_test.go
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/process"
|
||||
|
||||
"dappco.re/go/agent/pkg/agentic"
|
||||
)
|
||||
|
||||
func ExampleRegister() {
|
||||
c := core.New(
|
||||
core.WithService(process.Register),
|
||||
core.WithService(agentic.Register),
|
||||
)
|
||||
c.ServiceStartup(context.Background(), nil)
|
||||
|
||||
// All actions registered during OnStartup
|
||||
core.Println(c.Action("agentic.dispatch").Exists())
|
||||
core.Println(c.Action("agentic.status").Exists())
|
||||
core.Println(c.Action("agentic.qa").Exists())
|
||||
// Output:
|
||||
// true
|
||||
// true
|
||||
// true
|
||||
}
|
||||
|
||||
func ExampleRegister_actions() {
|
||||
c := core.New(
|
||||
core.WithService(process.Register),
|
||||
core.WithService(agentic.Register),
|
||||
)
|
||||
c.ServiceStartup(context.Background(), nil)
|
||||
|
||||
actions := c.Actions()
|
||||
core.Println(len(actions) > 0)
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExampleRegister_task() {
|
||||
c := core.New(
|
||||
core.WithService(process.Register),
|
||||
core.WithService(agentic.Register),
|
||||
)
|
||||
c.ServiceStartup(context.Background(), nil)
|
||||
|
||||
// Completion pipeline registered as a Task
|
||||
t := c.Task("agent.completion")
|
||||
core.Println(t.Description)
|
||||
// Output: QA → PR → Verify → Merge
|
||||
}
|
||||
53
pkg/agentic/actions_test.go
Normal file
53
pkg/agentic/actions_test.go
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestActions_HandleDispatch_Good(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
r := s.handleDispatch(context.Background(), core.NewOptions(
|
||||
core.Option{Key: "repo", Value: "go-io"},
|
||||
core.Option{Key: "task", Value: "fix tests"},
|
||||
))
|
||||
// Will fail (no local clone) but exercises the handler path
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
||||
func TestActions_HandleStatus_Good(t *testing.T) {
|
||||
t.Setenv("CORE_WORKSPACE", t.TempDir())
|
||||
s := newPrepWithProcess()
|
||||
r := s.handleStatus(context.Background(), core.NewOptions())
|
||||
assert.True(t, r.OK)
|
||||
}
|
||||
|
||||
func TestActions_HandlePoke_Good(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
s.pokeCh = make(chan struct{}, 1)
|
||||
r := s.handlePoke(context.Background(), core.NewOptions())
|
||||
assert.True(t, r.OK)
|
||||
}
|
||||
|
||||
func TestActions_HandleQA_Bad_NoWorkspace(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
r := s.handleQA(context.Background(), core.NewOptions())
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
||||
func TestActions_HandleVerify_Bad_NoWorkspace(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
r := s.handleVerify(context.Background(), core.NewOptions())
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
||||
func TestActions_HandleIngest_Bad_NoWorkspace(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
r := s.handleIngest(context.Background(), core.NewOptions())
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
|
@ -23,7 +23,7 @@ func (s *PrepSubsystem) autoCreatePR(wsDir string) {
|
|||
// PRs target dev — agents never merge directly to main
|
||||
base := "dev"
|
||||
|
||||
out := gitOutput(ctx, repoDir, "log", "--oneline", "origin/"+base+"..HEAD")
|
||||
out := s.gitOutput(ctx, repoDir, "log", "--oneline", "origin/"+base+"..HEAD")
|
||||
if out == "" {
|
||||
return
|
||||
}
|
||||
|
|
@ -37,7 +37,7 @@ func (s *PrepSubsystem) autoCreatePR(wsDir string) {
|
|||
|
||||
// Push the branch to forge
|
||||
forgeRemote := core.Sprintf("ssh://git@forge.lthn.ai:2223/%s/%s.git", org, st.Repo)
|
||||
if !gitCmdOK(ctx, repoDir, "push", forgeRemote, st.Branch) {
|
||||
if !s.gitCmdOK(ctx, repoDir, "push", forgeRemote, st.Branch) {
|
||||
if st2, err := ReadStatus(wsDir); err == nil {
|
||||
st2.Question = "PR push failed"
|
||||
writeStatus(wsDir, st2)
|
||||
|
|
@ -89,5 +89,5 @@ func truncate(s string, max int) string {
|
|||
if len(s) <= max {
|
||||
return s
|
||||
}
|
||||
return s[:max] + "..."
|
||||
return core.Concat(s[:max], "...")
|
||||
}
|
||||
|
|
|
|||
10
pkg/agentic/auto_pr_example_test.go
Normal file
10
pkg/agentic/auto_pr_example_test.go
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func Example_truncate() {
|
||||
core.Println(truncate("hello world", 5))
|
||||
// Output: hello...
|
||||
}
|
||||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -23,6 +24,7 @@ func TestAutoPR_AutoCreatePR_Bad(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -103,6 +105,7 @@ func TestAutoPR_AutoCreatePR_Ugly(t *testing.T) {
|
|||
require.NoError(t, os.WriteFile(filepath.Join(wsDir, "status.json"), data, 0o644))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ package agentic
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"dappco.re/go/agent/pkg/lib"
|
||||
core "dappco.re/go/core"
|
||||
|
|
@ -14,7 +13,7 @@ import (
|
|||
|
||||
// registerCommands adds agentic CLI commands to Core's command tree.
|
||||
func (s *PrepSubsystem) registerCommands(ctx context.Context) {
|
||||
c := s.core
|
||||
c := s.Core()
|
||||
c.Command("run/task", core.Command{Description: "Run a single task end-to-end", Action: s.cmdRunTaskFactory(ctx)})
|
||||
c.Command("run/orchestrator", core.Command{Description: "Run the queue orchestrator (standalone, no MCP)", Action: s.cmdOrchestratorFactory(ctx)})
|
||||
c.Command("prep", core.Command{Description: "Prepare a workspace: clone repo, build prompt", Action: s.cmdPrep})
|
||||
|
|
@ -48,27 +47,27 @@ func (s *PrepSubsystem) cmdRunTask(ctx context.Context, opts core.Options) core.
|
|||
|
||||
issue := parseIntStr(issueStr)
|
||||
|
||||
core.Print(os.Stderr, "core-agent run task")
|
||||
core.Print(os.Stderr, " repo: %s/%s", org, repo)
|
||||
core.Print(os.Stderr, " agent: %s", agent)
|
||||
core.Print(nil, "core-agent run task")
|
||||
core.Print(nil, " repo: %s/%s", org, repo)
|
||||
core.Print(nil, " agent: %s", agent)
|
||||
if issue > 0 {
|
||||
core.Print(os.Stderr, " issue: #%d", issue)
|
||||
core.Print(nil, " issue: #%d", issue)
|
||||
}
|
||||
core.Print(os.Stderr, " task: %s", task)
|
||||
core.Print(os.Stderr, "")
|
||||
core.Print(nil, " task: %s", task)
|
||||
core.Print(nil, "")
|
||||
|
||||
result := s.DispatchSync(ctx, DispatchSyncInput{
|
||||
Org: org, Repo: repo, Agent: agent, Task: task, Issue: issue,
|
||||
})
|
||||
|
||||
if !result.OK {
|
||||
core.Print(os.Stderr, "FAILED: %v", result.Error)
|
||||
core.Print(nil, "FAILED: %v", result.Error)
|
||||
return core.Result{Value: result.Error, OK: false}
|
||||
}
|
||||
|
||||
core.Print(os.Stderr, "DONE: %s", result.Status)
|
||||
core.Print(nil, "DONE: %s", result.Status)
|
||||
if result.PRURL != "" {
|
||||
core.Print(os.Stderr, " PR: %s", result.PRURL)
|
||||
core.Print(nil, " PR: %s", result.PRURL)
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
|
@ -79,12 +78,12 @@ func (s *PrepSubsystem) cmdOrchestratorFactory(ctx context.Context) func(core.Op
|
|||
}
|
||||
|
||||
func (s *PrepSubsystem) cmdOrchestrator(ctx context.Context, _ core.Options) core.Result {
|
||||
core.Print(os.Stderr, "core-agent orchestrator running (pid %s)", core.Env("PID"))
|
||||
core.Print(os.Stderr, " workspace: %s", WorkspaceRoot())
|
||||
core.Print(os.Stderr, " watching queue, draining on 30s tick + completion poke")
|
||||
core.Print(nil, "core-agent orchestrator running (pid %s)", core.Env("PID"))
|
||||
core.Print(nil, " workspace: %s", WorkspaceRoot())
|
||||
core.Print(nil, " watching queue, draining on 30s tick + completion poke")
|
||||
|
||||
<-ctx.Done()
|
||||
core.Print(os.Stderr, "orchestrator shutting down")
|
||||
core.Print(nil, "orchestrator shutting down")
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
|
|
@ -143,27 +142,21 @@ func (s *PrepSubsystem) cmdPrep(opts core.Options) core.Result {
|
|||
|
||||
func (s *PrepSubsystem) cmdStatus(opts core.Options) core.Result {
|
||||
wsRoot := WorkspaceRoot()
|
||||
fsys := s.core.Fs()
|
||||
fsys := s.Core().Fs()
|
||||
r := fsys.List(wsRoot)
|
||||
if !r.OK {
|
||||
core.Print(nil, "no workspaces found at %s", wsRoot)
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
entries := r.Value.([]os.DirEntry)
|
||||
if len(entries) == 0 {
|
||||
statusFiles := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json"))
|
||||
if len(statusFiles) == 0 {
|
||||
core.Print(nil, "no workspaces")
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
if !e.IsDir() {
|
||||
continue
|
||||
}
|
||||
statusFile := core.JoinPath(wsRoot, e.Name(), "status.json")
|
||||
if sr := fsys.Read(statusFile); sr.OK {
|
||||
core.Print(nil, " %s", e.Name())
|
||||
}
|
||||
for _, sf := range statusFiles {
|
||||
core.Print(nil, " %s", core.PathBase(core.PathDir(sf)))
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
|
@ -224,16 +217,15 @@ func (s *PrepSubsystem) cmdExtract(opts core.Options) core.Result {
|
|||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
|
||||
fsys := s.core.Fs()
|
||||
r := fsys.List(target)
|
||||
if r.OK {
|
||||
for _, e := range r.Value.([]os.DirEntry) {
|
||||
marker := " "
|
||||
if e.IsDir() {
|
||||
marker = "/"
|
||||
}
|
||||
core.Print(nil, " %s%s", e.Name(), marker)
|
||||
fsys := s.Core().Fs()
|
||||
paths := core.PathGlob(core.JoinPath(target, "*"))
|
||||
for _, p := range paths {
|
||||
name := core.PathBase(p)
|
||||
marker := " "
|
||||
if fsys.IsDir(p) {
|
||||
marker = "/"
|
||||
}
|
||||
core.Print(nil, " %s%s", name, marker)
|
||||
}
|
||||
|
||||
core.Print(nil, "done")
|
||||
|
|
|
|||
15
pkg/agentic/commands_example_test.go
Normal file
15
pkg/agentic/commands_example_test.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func Example_parseIntStr() {
|
||||
core.Println(parseIntStr("42"))
|
||||
core.Println(parseIntStr("abc"))
|
||||
core.Println(parseIntStr(""))
|
||||
// Output:
|
||||
// 42
|
||||
// 0
|
||||
// 0
|
||||
}
|
||||
|
|
@ -28,7 +28,7 @@ func fmtIndex(n int64) string { return strconv.FormatInt(n, 10) }
|
|||
|
||||
// registerForgeCommands adds Forge API commands to Core's command tree.
|
||||
func (s *PrepSubsystem) registerForgeCommands() {
|
||||
c := s.core
|
||||
c := s.Core()
|
||||
c.Command("issue/get", core.Command{Description: "Get a Forge issue", Action: s.cmdIssueGet})
|
||||
c.Command("issue/list", core.Command{Description: "List Forge issues for a repo", Action: s.cmdIssueList})
|
||||
c.Command("issue/comment", core.Command{Description: "Comment on a Forge issue", Action: s.cmdIssueComment})
|
||||
|
|
|
|||
19
pkg/agentic/commands_forge_example_test.go
Normal file
19
pkg/agentic/commands_forge_example_test.go
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func Example_parseForgeArgs() {
|
||||
org, repo, num := parseForgeArgs(core.NewOptions(
|
||||
core.Option{Key: "_arg", Value: "go-io"},
|
||||
core.Option{Key: "number", Value: "42"},
|
||||
))
|
||||
core.Println(org, repo, num)
|
||||
// Output: core go-io 42
|
||||
}
|
||||
|
||||
func Example_fmtIndex() {
|
||||
core.Println(fmtIndex(42))
|
||||
// Output: 42
|
||||
}
|
||||
|
|
@ -33,7 +33,7 @@ func testPrepWithCore(t *testing.T, srv *httptest.Server) (*PrepSubsystem, *core
|
|||
}
|
||||
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
forge: f,
|
||||
forgeURL: "",
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -828,7 +828,7 @@ func TestCommands_CmdStatus_Bad_NoWorkspaceDir(t *testing.T) {
|
|||
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,14 +5,12 @@
|
|||
package agentic
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// registerWorkspaceCommands adds workspace management commands.
|
||||
func (s *PrepSubsystem) registerWorkspaceCommands() {
|
||||
c := s.core
|
||||
c := s.Core()
|
||||
c.Command("workspace/list", core.Command{Description: "List all agent workspaces with status", Action: s.cmdWorkspaceList})
|
||||
c.Command("workspace/clean", core.Command{Description: "Remove completed/failed/blocked workspaces", Action: s.cmdWorkspaceClean})
|
||||
c.Command("workspace/dispatch", core.Command{Description: "Dispatch an agent to work on a repo task", Action: s.cmdWorkspaceDispatch})
|
||||
|
|
@ -20,27 +18,18 @@ func (s *PrepSubsystem) registerWorkspaceCommands() {
|
|||
|
||||
func (s *PrepSubsystem) cmdWorkspaceList(opts core.Options) core.Result {
|
||||
wsRoot := WorkspaceRoot()
|
||||
fsys := s.core.Fs()
|
||||
fsys := s.Core().Fs()
|
||||
|
||||
r := fsys.List(wsRoot)
|
||||
if !r.OK {
|
||||
core.Print(nil, "no workspaces at %s", wsRoot)
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
entries := r.Value.([]os.DirEntry)
|
||||
statusFiles := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json"))
|
||||
count := 0
|
||||
for _, e := range entries {
|
||||
if !e.IsDir() {
|
||||
continue
|
||||
}
|
||||
statusFile := core.JoinPath(wsRoot, e.Name(), "status.json")
|
||||
if sr := fsys.Read(statusFile); sr.OK {
|
||||
for _, sf := range statusFiles {
|
||||
wsName := core.PathBase(core.PathDir(sf))
|
||||
if sr := fsys.Read(sf); sr.OK {
|
||||
content := sr.Value.(string)
|
||||
status := extractField(content, "status")
|
||||
repo := extractField(content, "repo")
|
||||
agent := extractField(content, "agent")
|
||||
core.Print(nil, " %-8s %-8s %-10s %s", status, agent, repo, e.Name())
|
||||
core.Print(nil, " %-8s %-8s %-10s %s", status, agent, repo, wsName)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
|
@ -52,27 +41,18 @@ func (s *PrepSubsystem) cmdWorkspaceList(opts core.Options) core.Result {
|
|||
|
||||
func (s *PrepSubsystem) cmdWorkspaceClean(opts core.Options) core.Result {
|
||||
wsRoot := WorkspaceRoot()
|
||||
fsys := s.core.Fs()
|
||||
fsys := s.Core().Fs()
|
||||
filter := opts.String("_arg")
|
||||
if filter == "" {
|
||||
filter = "all"
|
||||
}
|
||||
|
||||
r := fsys.List(wsRoot)
|
||||
if !r.OK {
|
||||
core.Print(nil, "no workspaces")
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
entries := r.Value.([]os.DirEntry)
|
||||
statusFiles := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json"))
|
||||
var toRemove []string
|
||||
|
||||
for _, e := range entries {
|
||||
if !e.IsDir() {
|
||||
continue
|
||||
}
|
||||
statusFile := core.JoinPath(wsRoot, e.Name(), "status.json")
|
||||
sr := fsys.Read(statusFile)
|
||||
for _, sf := range statusFiles {
|
||||
wsName := core.PathBase(core.PathDir(sf))
|
||||
sr := fsys.Read(sf)
|
||||
if !sr.OK {
|
||||
continue
|
||||
}
|
||||
|
|
@ -81,19 +61,19 @@ func (s *PrepSubsystem) cmdWorkspaceClean(opts core.Options) core.Result {
|
|||
switch filter {
|
||||
case "all":
|
||||
if status == "completed" || status == "failed" || status == "blocked" || status == "merged" || status == "ready-for-review" {
|
||||
toRemove = append(toRemove, e.Name())
|
||||
toRemove = append(toRemove, wsName)
|
||||
}
|
||||
case "completed":
|
||||
if status == "completed" || status == "merged" || status == "ready-for-review" {
|
||||
toRemove = append(toRemove, e.Name())
|
||||
toRemove = append(toRemove, wsName)
|
||||
}
|
||||
case "failed":
|
||||
if status == "failed" {
|
||||
toRemove = append(toRemove, e.Name())
|
||||
toRemove = append(toRemove, wsName)
|
||||
}
|
||||
case "blocked":
|
||||
if status == "blocked" {
|
||||
toRemove = append(toRemove, e.Name())
|
||||
toRemove = append(toRemove, wsName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
14
pkg/agentic/commands_workspace_example_test.go
Normal file
14
pkg/agentic/commands_workspace_example_test.go
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func Example_extractField() {
|
||||
json := `{"status":"completed","repo":"go-io"}`
|
||||
core.Println(extractField(json, "status"))
|
||||
core.Println(extractField(json, "repo"))
|
||||
// Output:
|
||||
// completed
|
||||
// go-io
|
||||
}
|
||||
|
|
@ -77,7 +77,7 @@ func TestCommandsWorkspace_CmdWorkspaceList_Bad_NoWorkspaceRootDir(t *testing.T)
|
|||
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -108,7 +108,7 @@ func TestCommandsWorkspace_CmdWorkspaceList_Ugly_NonDirAndCorruptStatus(t *testi
|
|||
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -138,7 +138,7 @@ func TestCommandsWorkspace_CmdWorkspaceClean_Bad_UnknownFilterLeavesEverything(t
|
|||
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -175,7 +175,7 @@ func TestCommandsWorkspace_CmdWorkspaceClean_Ugly_MixedStatuses(t *testing.T) {
|
|||
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -204,7 +204,7 @@ func TestCommandsWorkspace_CmdWorkspaceDispatch_Ugly_AllFieldsSet(t *testing.T)
|
|||
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -151,11 +151,11 @@ func containerCommand(agentType, command string, args []string, repoDir, metaDir
|
|||
// Host access for Ollama (local models)
|
||||
"--add-host=host.docker.internal:host-gateway",
|
||||
// Workspace: repo + meta
|
||||
"-v", repoDir + ":/workspace",
|
||||
"-v", metaDir + ":/workspace/.meta",
|
||||
"-v", core.Concat(repoDir, ":/workspace"),
|
||||
"-v", core.Concat(metaDir, ":/workspace/.meta"),
|
||||
"-w", "/workspace",
|
||||
// Auth: agent configs only — NO SSH keys, git push runs on host
|
||||
"-v", core.JoinPath(home, ".codex") + ":/home/dev/.codex:ro",
|
||||
"-v", core.Concat(core.JoinPath(home, ".codex"), ":/home/dev/.codex:ro"),
|
||||
// API keys — passed by name, Docker resolves from host env
|
||||
"-e", "OPENAI_API_KEY",
|
||||
"-e", "ANTHROPIC_API_KEY",
|
||||
|
|
@ -272,13 +272,13 @@ func (s *PrepSubsystem) stopIssueTracking(wsDir string) {
|
|||
|
||||
// broadcastStart emits IPC + audit events for agent start.
|
||||
func (s *PrepSubsystem) broadcastStart(agent, wsDir string) {
|
||||
if s.core != nil {
|
||||
st, _ := ReadStatus(wsDir)
|
||||
repo := ""
|
||||
if st != nil {
|
||||
repo = st.Repo
|
||||
}
|
||||
s.core.ACTION(messages.AgentStarted{
|
||||
st, _ := ReadStatus(wsDir)
|
||||
repo := ""
|
||||
if st != nil {
|
||||
repo = st.Repo
|
||||
}
|
||||
if s.ServiceRuntime != nil {
|
||||
s.Core().ACTION(messages.AgentStarted{
|
||||
Agent: agent, Repo: repo, Workspace: core.PathBase(wsDir),
|
||||
})
|
||||
}
|
||||
|
|
@ -288,13 +288,13 @@ func (s *PrepSubsystem) broadcastStart(agent, wsDir string) {
|
|||
// broadcastComplete emits IPC + audit events for agent completion.
|
||||
func (s *PrepSubsystem) broadcastComplete(agent, wsDir, finalStatus string) {
|
||||
emitCompletionEvent(agent, core.PathBase(wsDir), finalStatus)
|
||||
if s.core != nil {
|
||||
if s.ServiceRuntime != nil {
|
||||
st, _ := ReadStatus(wsDir)
|
||||
repo := ""
|
||||
if st != nil {
|
||||
repo = st.Repo
|
||||
}
|
||||
s.core.ACTION(messages.AgentCompleted{
|
||||
s.Core().ACTION(messages.AgentCompleted{
|
||||
Agent: agent, Repo: repo,
|
||||
Workspace: core.PathBase(wsDir), Status: finalStatus,
|
||||
})
|
||||
|
|
@ -352,15 +352,16 @@ func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir string) (int, string, er
|
|||
agentBase := core.SplitN(agent, ":", 2)[0]
|
||||
command, args = containerCommand(agentBase, command, args, repoDir, metaDir)
|
||||
|
||||
proc, err := process.StartWithOptions(context.Background(), process.RunOptions{
|
||||
sr := process.StartWithOptions(context.Background(), process.RunOptions{
|
||||
Command: command,
|
||||
Args: args,
|
||||
Dir: repoDir,
|
||||
Detach: true,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, "", core.E("dispatch.spawnAgent", "failed to spawn "+agent, err)
|
||||
if !sr.OK {
|
||||
return 0, "", core.E("dispatch.spawnAgent", core.Concat("failed to spawn ", agent), nil)
|
||||
}
|
||||
proc := sr.Value.(*process.Process)
|
||||
|
||||
proc.CloseStdin()
|
||||
pid := proc.Info().PID
|
||||
|
|
@ -389,7 +390,7 @@ func (s *PrepSubsystem) runQA(wsDir string) bool {
|
|||
{"go", "vet", "./..."},
|
||||
{"go", "test", "./...", "-count=1", "-timeout", "120s"},
|
||||
} {
|
||||
if !runCmdOK(ctx, repoDir, args[0], args[1:]...) {
|
||||
if !s.runCmdOK(ctx, repoDir, args[0], args[1:]...) {
|
||||
core.Warn("QA failed", "cmd", core.Join(" ", args...))
|
||||
return false
|
||||
}
|
||||
|
|
@ -398,17 +399,17 @@ func (s *PrepSubsystem) runQA(wsDir string) bool {
|
|||
}
|
||||
|
||||
if fs.IsFile(core.JoinPath(repoDir, "composer.json")) {
|
||||
if !runCmdOK(ctx, repoDir, "composer", "install", "--no-interaction") {
|
||||
if !s.runCmdOK(ctx, repoDir, "composer", "install", "--no-interaction") {
|
||||
return false
|
||||
}
|
||||
return runCmdOK(ctx, repoDir, "composer", "test")
|
||||
return s.runCmdOK(ctx, repoDir, "composer", "test")
|
||||
}
|
||||
|
||||
if fs.IsFile(core.JoinPath(repoDir, "package.json")) {
|
||||
if !runCmdOK(ctx, repoDir, "npm", "install") {
|
||||
if !s.runCmdOK(ctx, repoDir, "npm", "install") {
|
||||
return false
|
||||
}
|
||||
return runCmdOK(ctx, repoDir, "npm", "test")
|
||||
return s.runCmdOK(ctx, repoDir, "npm", "test")
|
||||
}
|
||||
|
||||
return true
|
||||
|
|
|
|||
27
pkg/agentic/dispatch_example_test.go
Normal file
27
pkg/agentic/dispatch_example_test.go
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func Example_detectFinalStatus() {
|
||||
dir := (&core.Fs{}).NewUnrestricted().TempDir("example-ws")
|
||||
defer (&core.Fs{}).NewUnrestricted().DeleteAll(dir)
|
||||
|
||||
// Exit code 0 → completed
|
||||
status, _ := detectFinalStatus(dir, 0, "exited")
|
||||
core.Println(status)
|
||||
// Output: completed
|
||||
}
|
||||
|
||||
func Example_detectFinalStatus_failed() {
|
||||
dir := (&core.Fs{}).NewUnrestricted().TempDir("example-ws")
|
||||
defer (&core.Fs{}).NewUnrestricted().DeleteAll(dir)
|
||||
|
||||
// Non-zero exit → failed
|
||||
status, _ := detectFinalStatus(dir, 1, "exited")
|
||||
core.Println(status)
|
||||
// Output: failed
|
||||
}
|
||||
14
pkg/agentic/dispatch_sync_example_test.go
Normal file
14
pkg/agentic/dispatch_sync_example_test.go
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func Example_containerCommand() {
|
||||
cmd, args := containerCommand("codex", "codex", []string{"--model", "gpt-5.4"}, "/workspace", "/meta")
|
||||
core.Println(cmd)
|
||||
core.Println(len(args) > 0)
|
||||
// Output:
|
||||
// docker
|
||||
// true
|
||||
}
|
||||
27
pkg/agentic/dispatch_sync_test.go
Normal file
27
pkg/agentic/dispatch_sync_test.go
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDispatchSync_ContainerCommand_Good(t *testing.T) {
|
||||
cmd, args := containerCommand("codex", "codex", []string{"--model", "gpt-5.4"}, "/workspace", "/meta")
|
||||
assert.Equal(t, "docker", cmd)
|
||||
assert.Contains(t, args, "run")
|
||||
}
|
||||
|
||||
func TestDispatchSync_ContainerCommand_Bad_UnknownAgent(t *testing.T) {
|
||||
cmd, args := containerCommand("unknown", "unknown", nil, "/workspace", "/meta")
|
||||
assert.Equal(t, "docker", cmd)
|
||||
assert.NotEmpty(t, args)
|
||||
}
|
||||
|
||||
func TestDispatchSync_ContainerCommand_Ugly_EmptyArgs(t *testing.T) {
|
||||
assert.NotPanics(t, func() {
|
||||
containerCommand("codex", "codex", nil, "", "")
|
||||
})
|
||||
}
|
||||
|
|
@ -95,7 +95,7 @@ func TestDispatch_DetectFinalStatus_Ugly(t *testing.T) {
|
|||
// --- trackFailureRate ---
|
||||
|
||||
func TestDispatch_TrackFailureRate_Good(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: map[string]int{"codex": 2}}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: map[string]int{"codex": 2}}
|
||||
|
||||
// Success resets count
|
||||
triggered := s.trackFailureRate("codex", "completed", time.Now().Add(-10*time.Second))
|
||||
|
|
@ -104,7 +104,7 @@ func TestDispatch_TrackFailureRate_Good(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDispatch_TrackFailureRate_Bad(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: map[string]int{"codex": 2}}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: map[string]int{"codex": 2}}
|
||||
|
||||
// 3rd fast failure triggers backoff
|
||||
triggered := s.trackFailureRate("codex", "failed", time.Now().Add(-10*time.Second))
|
||||
|
|
@ -113,7 +113,7 @@ func TestDispatch_TrackFailureRate_Bad(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDispatch_TrackFailureRate_Ugly(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := newPrepWithProcess()
|
||||
|
||||
// Slow failure (>60s) resets count instead of incrementing
|
||||
s.failCount["codex"] = 2
|
||||
|
|
@ -138,17 +138,17 @@ func TestDispatch_StartIssueTracking_Good(t *testing.T) {
|
|||
data, _ := json.Marshal(st)
|
||||
os.WriteFile(filepath.Join(dir, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{forge: forge.NewForge(srv.URL, "tok"), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), forge: forge.NewForge(srv.URL, "tok"), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.startIssueTracking(dir)
|
||||
}
|
||||
|
||||
func TestDispatch_StartIssueTracking_Bad(t *testing.T) {
|
||||
// No forge — returns early
|
||||
s := &PrepSubsystem{forge: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), forge: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.startIssueTracking(t.TempDir())
|
||||
|
||||
// No status file
|
||||
s2 := &PrepSubsystem{forge: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s2 := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), forge: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s2.startIssueTracking(t.TempDir())
|
||||
}
|
||||
|
||||
|
|
@ -159,7 +159,7 @@ func TestDispatch_StartIssueTracking_Ugly(t *testing.T) {
|
|||
data, _ := json.Marshal(st)
|
||||
os.WriteFile(filepath.Join(dir, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{forge: forge.NewForge("http://invalid", "tok"), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), forge: forge.NewForge("http://invalid", "tok"), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.startIssueTracking(dir) // no issue → skips API call
|
||||
}
|
||||
|
||||
|
|
@ -176,12 +176,12 @@ func TestDispatch_StopIssueTracking_Good(t *testing.T) {
|
|||
data, _ := json.Marshal(st)
|
||||
os.WriteFile(filepath.Join(dir, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{forge: forge.NewForge(srv.URL, "tok"), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), forge: forge.NewForge(srv.URL, "tok"), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.stopIssueTracking(dir)
|
||||
}
|
||||
|
||||
func TestDispatch_StopIssueTracking_Bad(t *testing.T) {
|
||||
s := &PrepSubsystem{forge: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), forge: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.stopIssueTracking(t.TempDir())
|
||||
}
|
||||
|
||||
|
|
@ -192,7 +192,7 @@ func TestDispatch_StopIssueTracking_Ugly(t *testing.T) {
|
|||
data, _ := json.Marshal(st)
|
||||
os.WriteFile(filepath.Join(dir, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{forge: forge.NewForge("http://invalid", "tok"), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), forge: forge.NewForge("http://invalid", "tok"), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.stopIssueTracking(dir)
|
||||
}
|
||||
|
||||
|
|
@ -208,20 +208,20 @@ func TestDispatch_BroadcastStart_Good(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(wsDir, "status.json"), data, 0o644)
|
||||
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{core: c, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.broadcastStart("codex", wsDir)
|
||||
}
|
||||
|
||||
func TestDispatch_BroadcastStart_Bad(t *testing.T) {
|
||||
// No Core — should not panic
|
||||
s := &PrepSubsystem{core: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.broadcastStart("codex", t.TempDir())
|
||||
}
|
||||
|
||||
func TestDispatch_BroadcastStart_Ugly(t *testing.T) {
|
||||
// No status file — broadcasts with empty repo
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{core: c, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.broadcastStart("codex", t.TempDir())
|
||||
}
|
||||
|
||||
|
|
@ -237,19 +237,19 @@ func TestDispatch_BroadcastComplete_Good(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(wsDir, "status.json"), data, 0o644)
|
||||
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{core: c, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.broadcastComplete("codex", wsDir, "completed")
|
||||
}
|
||||
|
||||
func TestDispatch_BroadcastComplete_Bad(t *testing.T) {
|
||||
s := &PrepSubsystem{core: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: nil, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.broadcastComplete("codex", t.TempDir(), "failed")
|
||||
}
|
||||
|
||||
func TestDispatch_BroadcastComplete_Ugly(t *testing.T) {
|
||||
// No status file
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{core: c, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s.broadcastComplete("codex", t.TempDir(), "completed")
|
||||
}
|
||||
|
||||
|
|
@ -269,7 +269,7 @@ func TestDispatch_OnAgentComplete_Good(t *testing.T) {
|
|||
data, _ := json.Marshal(st)
|
||||
os.WriteFile(filepath.Join(wsDir, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := newPrepWithProcess()
|
||||
outputFile := filepath.Join(metaDir, "agent-codex.log")
|
||||
s.onAgentComplete("codex", wsDir, outputFile, 0, "completed", "test output")
|
||||
|
||||
|
|
@ -296,7 +296,7 @@ func TestDispatch_OnAgentComplete_Bad(t *testing.T) {
|
|||
data, _ := json.Marshal(st)
|
||||
os.WriteFile(filepath.Join(wsDir, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := newPrepWithProcess()
|
||||
s.onAgentComplete("codex", wsDir, filepath.Join(metaDir, "agent-codex.log"), 1, "failed", "error")
|
||||
|
||||
updated, _ := ReadStatus(wsDir)
|
||||
|
|
@ -319,7 +319,7 @@ func TestDispatch_OnAgentComplete_Ugly(t *testing.T) {
|
|||
data, _ := json.Marshal(st)
|
||||
os.WriteFile(filepath.Join(wsDir, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := newPrepWithProcess()
|
||||
s.onAgentComplete("codex", wsDir, filepath.Join(metaDir, "agent-codex.log"), 0, "completed", "")
|
||||
|
||||
updated, _ := ReadStatus(wsDir)
|
||||
|
|
@ -340,7 +340,7 @@ func TestDispatch_RunQA_Good(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(repoDir, "go.mod"), []byte("module testmod\n\ngo 1.22\n"), 0o644)
|
||||
os.WriteFile(filepath.Join(repoDir, "main.go"), []byte("package main\nfunc main() {}\n"), 0o644)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := newPrepWithProcess()
|
||||
assert.True(t, s.runQA(wsDir))
|
||||
}
|
||||
|
||||
|
|
@ -353,7 +353,7 @@ func TestDispatch_RunQA_Bad(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(repoDir, "go.mod"), []byte("module testmod\n\ngo 1.22\n"), 0o644)
|
||||
os.WriteFile(filepath.Join(repoDir, "main.go"), []byte("package main\nfunc main( {\n}\n"), 0o644)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := newPrepWithProcess()
|
||||
assert.False(t, s.runQA(wsDir))
|
||||
|
||||
// PHP project — composer not available
|
||||
|
|
@ -370,7 +370,7 @@ func TestDispatch_RunQA_Ugly(t *testing.T) {
|
|||
wsDir := t.TempDir()
|
||||
os.MkdirAll(filepath.Join(wsDir, "repo"), 0o755)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := newPrepWithProcess()
|
||||
assert.True(t, s.runQA(wsDir))
|
||||
|
||||
// Go vet failure (compiles but bad printf)
|
||||
|
|
@ -408,10 +408,10 @@ func TestDispatch_Dispatch_Good(t *testing.T) {
|
|||
exec.Command("git", "-C", srcRepo, "add", ".").Run()
|
||||
exec.Command("git", "-C", srcRepo, "commit", "-m", "init").Run()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
forge: forge.NewForge(forgeSrv.URL, "tok"), codePath: filepath.Dir(filepath.Dir(srcRepo)),
|
||||
client: forgeSrv.Client(), backoff: make(map[string]time.Time), failCount: make(map[string]int),
|
||||
}
|
||||
s := newPrepWithProcess()
|
||||
s.forge = forge.NewForge(forgeSrv.URL, "tok")
|
||||
s.codePath = filepath.Dir(filepath.Dir(srcRepo))
|
||||
s.client = forgeSrv.Client()
|
||||
|
||||
_, out, err := s.dispatch(context.Background(), nil, DispatchInput{
|
||||
Repo: "go-io", Task: "Fix stuff", Issue: 42, DryRun: true,
|
||||
|
|
@ -423,7 +423,7 @@ func TestDispatch_Dispatch_Good(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDispatch_Dispatch_Bad(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := newPrepWithProcess()
|
||||
|
||||
// No repo
|
||||
_, _, err := s.dispatch(context.Background(), nil, DispatchInput{Task: "do"})
|
||||
|
|
@ -441,7 +441,7 @@ func TestDispatch_Dispatch_Ugly(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
// Prep fails (no local clone)
|
||||
s := &PrepSubsystem{codePath: t.TempDir(), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), codePath: t.TempDir(), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
_, _, err := s.dispatch(context.Background(), nil, DispatchInput{
|
||||
Repo: "nonexistent", Task: "do", Issue: 1,
|
||||
})
|
||||
|
|
|
|||
|
|
@ -3,10 +3,7 @@
|
|||
package agentic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
|
|
@ -159,27 +156,18 @@ func (s *PrepSubsystem) createIssue(ctx context.Context, org, repo, title, body
|
|||
payload["labels"] = labelIDs
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(payload)
|
||||
data := core.JSONMarshalString(payload)
|
||||
url := core.Sprintf("%s/api/v1/repos/%s/%s/issues", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(data))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return ChildRef{}, core.E("createIssue", "create issue request failed", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 201 {
|
||||
return ChildRef{}, core.E("createIssue", core.Sprintf("create issue returned %d", resp.StatusCode), nil)
|
||||
r := HTTPPost(ctx, url, data, s.forgeToken, "token")
|
||||
if !r.OK {
|
||||
return ChildRef{}, core.E("createIssue", "create issue request failed", nil)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Number int `json:"number"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&result)
|
||||
core.JSONUnmarshalString(r.Value.(string), &result)
|
||||
|
||||
return ChildRef{
|
||||
Number: result.Number,
|
||||
|
|
@ -196,16 +184,8 @@ func (s *PrepSubsystem) resolveLabelIDs(ctx context.Context, org, repo string, n
|
|||
|
||||
// Fetch existing labels
|
||||
url := core.Sprintf("%s/api/v1/repos/%s/%s/labels?limit=50", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
r := HTTPGet(ctx, url, s.forgeToken, "token")
|
||||
if !r.OK {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -213,7 +193,7 @@ func (s *PrepSubsystem) resolveLabelIDs(ctx context.Context, org, repo string, n
|
|||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&existing)
|
||||
core.JSONUnmarshalString(r.Value.(string), &existing)
|
||||
|
||||
nameToID := make(map[string]int64)
|
||||
for _, l := range existing {
|
||||
|
|
@ -249,29 +229,20 @@ func (s *PrepSubsystem) createLabel(ctx context.Context, org, repo, name string)
|
|||
colour = "#6b7280"
|
||||
}
|
||||
|
||||
payload, _ := json.Marshal(map[string]string{
|
||||
payload := core.JSONMarshalString(map[string]string{
|
||||
"name": name,
|
||||
"color": colour,
|
||||
})
|
||||
|
||||
url := core.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 201 {
|
||||
r := HTTPPost(ctx, url, payload, s.forgeToken, "token")
|
||||
if !r.OK {
|
||||
return 0
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&result)
|
||||
core.JSONUnmarshalString(r.Value.(string), &result)
|
||||
return result.ID
|
||||
}
|
||||
|
|
|
|||
18
pkg/agentic/epic_example_test.go
Normal file
18
pkg/agentic/epic_example_test.go
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExampleEpicInput() {
|
||||
input := EpicInput{
|
||||
Repo: "go-io",
|
||||
Title: "Port agentic plans",
|
||||
Tasks: []string{"Read PHP flow", "Implement Go MCP tools"},
|
||||
}
|
||||
core.Println(input.Repo)
|
||||
core.Println(len(input.Tasks))
|
||||
// Output:
|
||||
// go-io
|
||||
// 2
|
||||
}
|
||||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/forge"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
@ -128,6 +129,7 @@ func itoa(n int) string {
|
|||
func newTestSubsystem(t *testing.T, srv *httptest.Server) *PrepSubsystem {
|
||||
t.Helper()
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -178,6 +180,7 @@ func TestEpic_CreateIssue_Bad_ServerDown(t *testing.T) {
|
|||
srv.Close() // immediately close
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: &http.Client{},
|
||||
|
|
@ -196,6 +199,7 @@ func TestEpic_CreateIssue_Bad_Non201Response(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -243,6 +247,7 @@ func TestEpic_ResolveLabelIDs_Bad_ServerError(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -278,6 +283,7 @@ func TestEpic_CreateLabel_Bad_ServerDown(t *testing.T) {
|
|||
srv.Close()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: &http.Client{},
|
||||
|
|
@ -317,6 +323,7 @@ func TestEpic_CreateEpic_Bad_NoTasks(t *testing.T) {
|
|||
|
||||
func TestEpic_CreateEpic_Bad_NoToken(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeToken: "",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
|
|||
|
|
@ -3,8 +3,6 @@
|
|||
package agentic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
|
|
@ -34,19 +32,14 @@ func emitEvent(eventType, agent, workspace, status string) {
|
|||
Timestamp: time.Now().UTC().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
line := core.Concat(core.JSONMarshalString(event), "\n")
|
||||
|
||||
// Append to events log
|
||||
r := fs.Append(eventsFile)
|
||||
if !r.OK {
|
||||
return
|
||||
}
|
||||
wc := r.Value.(io.WriteCloser)
|
||||
defer wc.Close()
|
||||
wc.Write(append(data, '\n'))
|
||||
core.WriteAll(r.Value, line)
|
||||
}
|
||||
|
||||
// emitStartEvent logs that an agent has been spawned.
|
||||
|
|
|
|||
15
pkg/agentic/events_example_test.go
Normal file
15
pkg/agentic/events_example_test.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func Example_emitStartEvent() {
|
||||
// Events are appended to workspace/events.jsonl
|
||||
// This exercises the path without requiring a real workspace
|
||||
root := WorkspaceRoot()
|
||||
core.Println(core.HasSuffix(root, "workspace"))
|
||||
// Output: true
|
||||
}
|
||||
33
pkg/agentic/events_test.go
Normal file
33
pkg/agentic/events_test.go
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEvents_EmitEvent_Good(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
t.Setenv("CORE_WORKSPACE", root)
|
||||
fs.EnsureDir(core.JoinPath(root, "workspace"))
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
emitStartEvent("codex", "ws-1")
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvents_EmitEvent_Bad_NoWorkspace(t *testing.T) {
|
||||
t.Setenv("CORE_WORKSPACE", "/nonexistent")
|
||||
assert.NotPanics(t, func() {
|
||||
emitCompletionEvent("codex", "ws-1", "completed")
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvents_EmitEvent_Ugly_AllEmpty(t *testing.T) {
|
||||
assert.NotPanics(t, func() {
|
||||
emitEvent("", "", "", "")
|
||||
})
|
||||
}
|
||||
14
pkg/agentic/handlers_example_test.go
Normal file
14
pkg/agentic/handlers_example_test.go
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func Example_resolveWorkspace() {
|
||||
// Non-existent workspace → empty string
|
||||
resolved := resolveWorkspace("nonexistent/workspace")
|
||||
core.Println(resolved == "")
|
||||
// Output: true
|
||||
}
|
||||
|
|
@ -22,6 +22,7 @@ func newCoreForHandlerTests(t *testing.T) (*core.Core, *PrepSubsystem) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
pokeCh: make(chan struct{}, 1),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -29,7 +30,7 @@ func newCoreForHandlerTests(t *testing.T) (*core.Core, *PrepSubsystem) {
|
|||
}
|
||||
|
||||
c := core.New()
|
||||
s.core = c
|
||||
s.ServiceRuntime = core.NewServiceRuntime(c, AgentOptions{})
|
||||
RegisterHandlers(c, s)
|
||||
|
||||
return c, s
|
||||
|
|
@ -51,7 +52,7 @@ func TestHandlers_RegisterHandlers_Good_PokeOnCompletion(t *testing.T) {
|
|||
}
|
||||
|
||||
// Send AgentCompleted — should trigger poke
|
||||
s.core.ACTION(messages.AgentCompleted{
|
||||
s.Core().ACTION(messages.AgentCompleted{
|
||||
Workspace: "nonexistent",
|
||||
Repo: "test",
|
||||
Status: "completed",
|
||||
|
|
@ -155,7 +156,7 @@ func TestCommandsForge_RegisterForgeCommands_Good(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
core: core.New(),
|
||||
ServiceRuntime: core.NewServiceRuntime(core.New(), AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -168,7 +169,7 @@ func TestCommandsWorkspace_RegisterWorkspaceCommands_Good(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
core: core.New(),
|
||||
ServiceRuntime: core.NewServiceRuntime(core.New(), AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -183,7 +184,7 @@ func TestCommands_RegisterCommands_Good(t *testing.T) {
|
|||
defer cancel()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
core: core.New(),
|
||||
ServiceRuntime: core.NewServiceRuntime(core.New(), AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -207,8 +208,8 @@ func TestPrep_OnStartup_Good_Registers(t *testing.T) {
|
|||
c := core.New()
|
||||
s.SetCore(c)
|
||||
|
||||
err := s.OnStartup(context.Background())
|
||||
assert.NoError(t, err)
|
||||
r := s.OnStartup(context.Background())
|
||||
assert.True(t, r.OK)
|
||||
}
|
||||
|
||||
// --- RegisterTools (exercises all register*Tool functions) ---
|
||||
|
|
@ -228,6 +229,7 @@ func TestPrep_RegisterTools_Bad(t *testing.T) {
|
|||
// RegisterTools on prep without Core — should still register tools
|
||||
srv := mcp.NewServer(&mcp.Implementation{Name: "test", Version: "0.0.1"}, nil)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,9 +3,7 @@
|
|||
package agentic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"context"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
|
@ -56,7 +54,7 @@ func (s *PrepSubsystem) ingestFindings(wsDir string) {
|
|||
// Truncate body to reasonable size for issue description
|
||||
description := body
|
||||
if len(description) > 10000 {
|
||||
description = description[:10000] + "\n\n... (truncated, see full log in workspace)"
|
||||
description = core.Concat(description[:10000], "\n\n... (truncated, see full log in workspace)")
|
||||
}
|
||||
|
||||
s.createIssueViaAPI(st.Repo, title, description, issueType, priority, "scan")
|
||||
|
|
@ -96,7 +94,7 @@ func (s *PrepSubsystem) createIssueViaAPI(repo, title, description, issueType, p
|
|||
}
|
||||
apiKey := core.Trim(r.Value.(string))
|
||||
|
||||
payload, _ := json.Marshal(map[string]string{
|
||||
payload := core.JSONMarshalString(map[string]string{
|
||||
"title": title,
|
||||
"description": description,
|
||||
"type": issueType,
|
||||
|
|
@ -104,14 +102,5 @@ func (s *PrepSubsystem) createIssueViaAPI(repo, title, description, issueType, p
|
|||
"reporter": "cladius",
|
||||
})
|
||||
|
||||
req, _ := http.NewRequest("POST", s.brainURL+"/v1/issues", bytes.NewReader(payload))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
HTTPPost(context.Background(), core.Concat(s.brainURL, "/v1/issues"), payload, apiKey, "Bearer")
|
||||
}
|
||||
|
|
|
|||
11
pkg/agentic/ingest_example_test.go
Normal file
11
pkg/agentic/ingest_example_test.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func Example_ingestWorkspaceRoot() {
|
||||
root := WorkspaceRoot()
|
||||
core.Println(core.HasSuffix(root, "workspace"))
|
||||
// Output: true
|
||||
}
|
||||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -55,6 +56,7 @@ func TestIngest_IngestFindings_Good_WithFindings(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(home, ".claude", "agent-api.key"), "test-api-key").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainURL: srv.URL,
|
||||
brainKey: "test-brain-key",
|
||||
client: srv.Client(),
|
||||
|
|
@ -74,6 +76,7 @@ func TestIngest_IngestFindings_Bad_NotCompleted(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -92,6 +95,7 @@ func TestIngest_IngestFindings_Bad_NoLogFile(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -114,6 +118,7 @@ func TestIngest_IngestFindings_Bad_TooFewFindings(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(wsDir, "agent-codex.log"), logContent).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -135,6 +140,7 @@ func TestIngest_IngestFindings_Bad_QuotaExhausted(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(wsDir, "agent-codex.log"), logContent).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -148,6 +154,7 @@ func TestIngest_IngestFindings_Bad_NoStatusFile(t *testing.T) {
|
|||
wsDir := t.TempDir()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -168,6 +175,7 @@ func TestIngest_IngestFindings_Bad_ShortLogFile(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(wsDir, "agent-codex.log"), "short").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -199,6 +207,7 @@ func TestIngest_CreateIssueViaAPI_Good_Success(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainURL: srv.URL,
|
||||
brainKey: "test-brain-key",
|
||||
client: srv.Client(),
|
||||
|
|
@ -212,6 +221,7 @@ func TestIngest_CreateIssueViaAPI_Good_Success(t *testing.T) {
|
|||
|
||||
func TestIngest_CreateIssueViaAPI_Bad_NoBrainKey(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainKey: "",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -229,6 +239,7 @@ func TestIngest_CreateIssueViaAPI_Bad_NoAPIKey(t *testing.T) {
|
|||
// No agent-api.key file
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainURL: "https://example.com",
|
||||
brainKey: "test-brain-key",
|
||||
client: &http.Client{},
|
||||
|
|
@ -254,6 +265,7 @@ func TestIngest_CreateIssueViaAPI_Bad_ServerError(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(home, ".claude", "agent-api.key"), "test-key").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainURL: srv.URL,
|
||||
brainKey: "test-brain-key",
|
||||
client: srv.Client(),
|
||||
|
|
@ -289,6 +301,7 @@ func TestIngest_IngestFindings_Ugly(t *testing.T) {
|
|||
// No agent-*.log files at all
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -321,6 +334,7 @@ func TestIngest_CreateIssueViaAPI_Ugly(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(home, ".claude", "agent-api.key"), "test-key").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainURL: srv.URL,
|
||||
brainKey: "test-brain-key",
|
||||
client: srv.Client(),
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -196,7 +197,7 @@ func TestDispatch_ContainerCommand_Ugly_EmptyDirs(t *testing.T) {
|
|||
// --- buildAutoPRBody ---
|
||||
|
||||
func TestAutoPr_BuildAutoPRBody_Good_Basic(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
st := &WorkspaceStatus{
|
||||
Task: "Fix the login bug",
|
||||
Agent: "codex",
|
||||
|
|
@ -211,7 +212,7 @@ func TestAutoPr_BuildAutoPRBody_Good_Basic(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAutoPr_BuildAutoPRBody_Good_WithIssue(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
st := &WorkspaceStatus{
|
||||
Task: "Add rate limiting",
|
||||
Agent: "claude",
|
||||
|
|
@ -223,7 +224,7 @@ func TestAutoPr_BuildAutoPRBody_Good_WithIssue(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAutoPr_BuildAutoPRBody_Good_NoIssue(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
st := &WorkspaceStatus{
|
||||
Task: "Refactor internals",
|
||||
Agent: "gemini",
|
||||
|
|
@ -234,7 +235,7 @@ func TestAutoPr_BuildAutoPRBody_Good_NoIssue(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAutoPr_BuildAutoPRBody_Good_CommitCount(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
st := &WorkspaceStatus{Agent: "codex", Branch: "agent/foo"}
|
||||
body1 := s.buildAutoPRBody(st, 1)
|
||||
body5 := s.buildAutoPRBody(st, 5)
|
||||
|
|
@ -243,7 +244,7 @@ func TestAutoPr_BuildAutoPRBody_Good_CommitCount(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAutoPr_BuildAutoPRBody_Bad_EmptyTask(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
st := &WorkspaceStatus{
|
||||
Task: "",
|
||||
Agent: "codex",
|
||||
|
|
@ -256,7 +257,7 @@ func TestAutoPr_BuildAutoPRBody_Bad_EmptyTask(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAutoPr_BuildAutoPRBody_Ugly_ZeroCommits(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
st := &WorkspaceStatus{Agent: "codex", Branch: "agent/test"}
|
||||
body := s.buildAutoPRBody(st, 0)
|
||||
assert.Contains(t, body, "**Commits:** 0")
|
||||
|
|
|
|||
|
|
@ -4,8 +4,6 @@ package agentic
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
|
|
@ -79,23 +77,23 @@ func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
|||
repoDir := core.JoinPath(basePath, repo)
|
||||
|
||||
// Check if github remote exists
|
||||
if !hasRemote(repoDir, "github") {
|
||||
if !s.hasRemote(repoDir, "github") {
|
||||
skipped = append(skipped, repo+": no github remote")
|
||||
continue
|
||||
}
|
||||
|
||||
// Fetch github to get current state
|
||||
gitCmdOK(ctx, repoDir, "fetch", "github")
|
||||
s.gitCmdOK(ctx, repoDir, "fetch", "github")
|
||||
|
||||
// Check how far ahead local default branch is vs github
|
||||
localBase := DefaultBranch(repoDir)
|
||||
ahead := commitsAhead(repoDir, "github/main", localBase)
|
||||
localBase := s.DefaultBranch(repoDir)
|
||||
ahead := s.commitsAhead(repoDir, "github/main", localBase)
|
||||
if ahead == 0 {
|
||||
continue // Already in sync
|
||||
}
|
||||
|
||||
// Count files changed
|
||||
files := filesChanged(repoDir, "github/main", localBase)
|
||||
files := s.filesChanged(repoDir, "github/main", localBase)
|
||||
|
||||
sync := MirrorSync{
|
||||
Repo: repo,
|
||||
|
|
@ -117,12 +115,12 @@ func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
|||
}
|
||||
|
||||
// Ensure dev branch exists on GitHub
|
||||
ensureDevBranch(repoDir)
|
||||
s.ensureDevBranch(repoDir)
|
||||
|
||||
// Push local main to github dev (explicit main, not HEAD)
|
||||
base := DefaultBranch(repoDir)
|
||||
if _, err := gitCmd(ctx, repoDir, "push", "github", base+":refs/heads/dev", "--force"); err != nil {
|
||||
sync.Skipped = core.Sprintf("push failed: %v", err)
|
||||
base := s.DefaultBranch(repoDir)
|
||||
if r := s.gitCmd(ctx, repoDir, "push", "github", base+":refs/heads/dev", "--force"); !r.OK {
|
||||
sync.Skipped = core.Sprintf("push failed: %s", r.Value)
|
||||
synced = append(synced, sync)
|
||||
continue
|
||||
}
|
||||
|
|
@ -152,10 +150,13 @@ func (s *PrepSubsystem) createGitHubPR(ctx context.Context, repoDir, repo string
|
|||
ghRepo := core.Sprintf("%s/%s", GitHubOrg(), repo)
|
||||
|
||||
// Check if there's already an open PR from dev
|
||||
out, err := runCmd(ctx, repoDir, "gh", "pr", "list", "--repo", ghRepo, "--head", "dev", "--state", "open", "--json", "url", "--limit", "1")
|
||||
if err == nil && core.Contains(out, "url") {
|
||||
if url := extractJSONField(out, "url"); url != "" {
|
||||
return url, nil
|
||||
r := s.runCmd(ctx, repoDir, "gh", "pr", "list", "--repo", ghRepo, "--head", "dev", "--state", "open", "--json", "url", "--limit", "1")
|
||||
if r.OK {
|
||||
out := r.Value.(string)
|
||||
if core.Contains(out, "url") {
|
||||
if url := extractJSONField(out, "url"); url != "" {
|
||||
return url, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -168,13 +169,14 @@ func (s *PrepSubsystem) createGitHubPR(ctx context.Context, repoDir, repo string
|
|||
|
||||
title := core.Sprintf("[sync] %s: %d commits, %d files", repo, commits, files)
|
||||
|
||||
prOut, err := runCmd(ctx, repoDir, "gh", "pr", "create",
|
||||
r = s.runCmd(ctx, repoDir, "gh", "pr", "create",
|
||||
"--repo", ghRepo, "--head", "dev", "--base", "main",
|
||||
"--title", title, "--body", body)
|
||||
if err != nil {
|
||||
return "", core.E("createGitHubPR", prOut, err)
|
||||
if !r.OK {
|
||||
return "", core.E("createGitHubPR", r.Value.(string), nil)
|
||||
}
|
||||
|
||||
prOut := r.Value.(string)
|
||||
lines := core.Split(core.Trim(prOut), "\n")
|
||||
if len(lines) > 0 {
|
||||
return lines[len(lines)-1], nil
|
||||
|
|
@ -183,24 +185,24 @@ func (s *PrepSubsystem) createGitHubPR(ctx context.Context, repoDir, repo string
|
|||
}
|
||||
|
||||
// ensureDevBranch creates the dev branch on GitHub if it doesn't exist.
|
||||
func ensureDevBranch(repoDir string) {
|
||||
gitCmdOK(context.Background(), repoDir, "push", "github", "HEAD:refs/heads/dev")
|
||||
func (s *PrepSubsystem) ensureDevBranch(repoDir string) {
|
||||
s.gitCmdOK(context.Background(), repoDir, "push", "github", "HEAD:refs/heads/dev")
|
||||
}
|
||||
|
||||
// hasRemote checks if a git remote exists.
|
||||
func hasRemote(repoDir, name string) bool {
|
||||
return gitCmdOK(context.Background(), repoDir, "remote", "get-url", name)
|
||||
func (s *PrepSubsystem) hasRemote(repoDir, name string) bool {
|
||||
return s.gitCmdOK(context.Background(), repoDir, "remote", "get-url", name)
|
||||
}
|
||||
|
||||
// commitsAhead returns how many commits HEAD is ahead of the ref.
|
||||
func commitsAhead(repoDir, base, head string) int {
|
||||
out := gitOutput(context.Background(), repoDir, "rev-list", base+".."+head, "--count")
|
||||
func (s *PrepSubsystem) commitsAhead(repoDir, base, head string) int {
|
||||
out := s.gitOutput(context.Background(), repoDir, "rev-list", base+".."+head, "--count")
|
||||
return parseInt(out)
|
||||
}
|
||||
|
||||
// filesChanged returns the number of files changed between two refs.
|
||||
func filesChanged(repoDir, base, head string) int {
|
||||
out := gitOutput(context.Background(), repoDir, "diff", "--name-only", base+".."+head)
|
||||
func (s *PrepSubsystem) filesChanged(repoDir, base, head string) int {
|
||||
out := s.gitOutput(context.Background(), repoDir, "diff", "--name-only", base+".."+head)
|
||||
if out == "" {
|
||||
return 0
|
||||
}
|
||||
|
|
@ -209,19 +211,16 @@ func filesChanged(repoDir, base, head string) int {
|
|||
|
||||
// listLocalRepos returns repo names that exist as directories in basePath.
|
||||
func (s *PrepSubsystem) listLocalRepos(basePath string) []string {
|
||||
r := fs.List(basePath)
|
||||
if !r.OK {
|
||||
return nil
|
||||
}
|
||||
entries := r.Value.([]os.DirEntry)
|
||||
paths := core.PathGlob(core.JoinPath(basePath, "*"))
|
||||
var repos []string
|
||||
for _, e := range entries {
|
||||
if !e.IsDir() {
|
||||
for _, p := range paths {
|
||||
name := core.PathBase(p)
|
||||
if !fs.IsDir(p) {
|
||||
continue
|
||||
}
|
||||
// Must have a .git directory
|
||||
if fs.IsDir(core.JoinPath(basePath, e.Name(), ".git")) {
|
||||
repos = append(repos, e.Name())
|
||||
if fs.IsDir(core.JoinPath(basePath, name, ".git")) {
|
||||
repos = append(repos, name)
|
||||
}
|
||||
}
|
||||
return repos
|
||||
|
|
@ -234,7 +233,7 @@ func extractJSONField(jsonStr, field string) string {
|
|||
}
|
||||
|
||||
var list []map[string]any
|
||||
if err := json.Unmarshal([]byte(jsonStr), &list); err == nil {
|
||||
if r := core.JSONUnmarshalString(jsonStr, &list); r.OK {
|
||||
for _, item := range list {
|
||||
if value, ok := item[field].(string); ok {
|
||||
return value
|
||||
|
|
@ -243,7 +242,7 @@ func extractJSONField(jsonStr, field string) string {
|
|||
}
|
||||
|
||||
var item map[string]any
|
||||
if err := json.Unmarshal([]byte(jsonStr), &item); err != nil {
|
||||
if r := core.JSONUnmarshalString(jsonStr, &item); !r.OK {
|
||||
return ""
|
||||
}
|
||||
|
||||
|
|
|
|||
11
pkg/agentic/mirror_example_test.go
Normal file
11
pkg/agentic/mirror_example_test.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExampleMirrorInput() {
|
||||
input := MirrorInput{Repo: "go-io"}
|
||||
core.Println(input.Repo)
|
||||
// Output: go-io
|
||||
}
|
||||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -48,7 +49,7 @@ func TestMirror_HasRemote_Good_OriginExists(t *testing.T) {
|
|||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
assert.True(t, hasRemote(dir, "origin"))
|
||||
assert.True(t, testPrep.hasRemote(dir, "origin"))
|
||||
}
|
||||
|
||||
func TestMirror_HasRemote_Good_CustomRemote(t *testing.T) {
|
||||
|
|
@ -57,24 +58,24 @@ func TestMirror_HasRemote_Good_CustomRemote(t *testing.T) {
|
|||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
assert.True(t, hasRemote(dir, "github"))
|
||||
assert.True(t, testPrep.hasRemote(dir, "github"))
|
||||
}
|
||||
|
||||
func TestMirror_HasRemote_Bad_NoSuchRemote(t *testing.T) {
|
||||
dir := initBareRepo(t)
|
||||
assert.False(t, hasRemote(dir, "nonexistent"))
|
||||
assert.False(t, testPrep.hasRemote(dir, "nonexistent"))
|
||||
}
|
||||
|
||||
func TestMirror_HasRemote_Bad_NotAGitRepo(t *testing.T) {
|
||||
dir := t.TempDir() // plain directory, no .git
|
||||
assert.False(t, hasRemote(dir, "origin"))
|
||||
assert.False(t, testPrep.hasRemote(dir, "origin"))
|
||||
}
|
||||
|
||||
func TestMirror_HasRemote_Ugly_EmptyDir(t *testing.T) {
|
||||
// Empty dir defaults to cwd which may or may not be a repo.
|
||||
// Just ensure no panic.
|
||||
assert.NotPanics(t, func() {
|
||||
hasRemote("", "origin")
|
||||
testPrep.hasRemote("", "origin")
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -105,7 +106,7 @@ func TestMirror_CommitsAhead_Good_OneAhead(t *testing.T) {
|
|||
run("git", "add", "new.txt")
|
||||
run("git", "commit", "-m", "second commit")
|
||||
|
||||
ahead := commitsAhead(dir, "base", "main")
|
||||
ahead := testPrep.commitsAhead(dir, "base", "main")
|
||||
assert.Equal(t, 1, ahead)
|
||||
}
|
||||
|
||||
|
|
@ -134,30 +135,30 @@ func TestMirror_CommitsAhead_Good_ThreeAhead(t *testing.T) {
|
|||
run("git", "commit", "-m", "commit "+string(rune('0'+i)))
|
||||
}
|
||||
|
||||
ahead := commitsAhead(dir, "base", "main")
|
||||
ahead := testPrep.commitsAhead(dir, "base", "main")
|
||||
assert.Equal(t, 3, ahead)
|
||||
}
|
||||
|
||||
func TestMirror_CommitsAhead_Good_ZeroAhead(t *testing.T) {
|
||||
dir := initBareRepo(t)
|
||||
// Same ref on both sides
|
||||
ahead := commitsAhead(dir, "main", "main")
|
||||
ahead := testPrep.commitsAhead(dir, "main", "main")
|
||||
assert.Equal(t, 0, ahead)
|
||||
}
|
||||
|
||||
func TestMirror_CommitsAhead_Bad_InvalidRef(t *testing.T) {
|
||||
dir := initBareRepo(t)
|
||||
ahead := commitsAhead(dir, "nonexistent-ref", "main")
|
||||
ahead := testPrep.commitsAhead(dir, "nonexistent-ref", "main")
|
||||
assert.Equal(t, 0, ahead)
|
||||
}
|
||||
|
||||
func TestMirror_CommitsAhead_Bad_NotARepo(t *testing.T) {
|
||||
ahead := commitsAhead(t.TempDir(), "main", "dev")
|
||||
ahead := testPrep.commitsAhead(t.TempDir(), "main", "dev")
|
||||
assert.Equal(t, 0, ahead)
|
||||
}
|
||||
|
||||
func TestMirror_CommitsAhead_Ugly_EmptyDir(t *testing.T) {
|
||||
ahead := commitsAhead("", "a", "b")
|
||||
ahead := testPrep.commitsAhead("", "a", "b")
|
||||
assert.Equal(t, 0, ahead)
|
||||
}
|
||||
|
||||
|
|
@ -185,7 +186,7 @@ func TestMirror_FilesChanged_Good_OneFile(t *testing.T) {
|
|||
run("git", "add", "changed.txt")
|
||||
run("git", "commit", "-m", "add file")
|
||||
|
||||
files := filesChanged(dir, "base", "main")
|
||||
files := testPrep.filesChanged(dir, "base", "main")
|
||||
assert.Equal(t, 1, files)
|
||||
}
|
||||
|
||||
|
|
@ -213,29 +214,29 @@ func TestMirror_FilesChanged_Good_MultipleFiles(t *testing.T) {
|
|||
run("git", "add", ".")
|
||||
run("git", "commit", "-m", "add three files")
|
||||
|
||||
files := filesChanged(dir, "base", "main")
|
||||
files := testPrep.filesChanged(dir, "base", "main")
|
||||
assert.Equal(t, 3, files)
|
||||
}
|
||||
|
||||
func TestMirror_FilesChanged_Good_NoChanges(t *testing.T) {
|
||||
dir := initBareRepo(t)
|
||||
files := filesChanged(dir, "main", "main")
|
||||
files := testPrep.filesChanged(dir, "main", "main")
|
||||
assert.Equal(t, 0, files)
|
||||
}
|
||||
|
||||
func TestMirror_FilesChanged_Bad_InvalidRef(t *testing.T) {
|
||||
dir := initBareRepo(t)
|
||||
files := filesChanged(dir, "nonexistent", "main")
|
||||
files := testPrep.filesChanged(dir, "nonexistent", "main")
|
||||
assert.Equal(t, 0, files)
|
||||
}
|
||||
|
||||
func TestMirror_FilesChanged_Bad_NotARepo(t *testing.T) {
|
||||
files := filesChanged(t.TempDir(), "main", "dev")
|
||||
files := testPrep.filesChanged(t.TempDir(), "main", "dev")
|
||||
assert.Equal(t, 0, files)
|
||||
}
|
||||
|
||||
func TestMirror_FilesChanged_Ugly_EmptyDir(t *testing.T) {
|
||||
files := filesChanged("", "a", "b")
|
||||
files := testPrep.filesChanged("", "a", "b")
|
||||
assert.Equal(t, 0, files)
|
||||
}
|
||||
|
||||
|
|
@ -298,14 +299,14 @@ func TestMirror_ExtractJSONField_Ugly_NullValue(t *testing.T) {
|
|||
func TestPaths_DefaultBranch_Good_MainBranch(t *testing.T) {
|
||||
dir := initBareRepo(t)
|
||||
// initBareRepo creates with -b main
|
||||
branch := DefaultBranch(dir)
|
||||
branch := testPrep.DefaultBranch(dir)
|
||||
assert.Equal(t, "main", branch)
|
||||
}
|
||||
|
||||
func TestPaths_DefaultBranch_Bad_NotARepo(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Falls back to "main" when detection fails
|
||||
branch := DefaultBranch(dir)
|
||||
branch := testPrep.DefaultBranch(dir)
|
||||
assert.Equal(t, "main", branch)
|
||||
}
|
||||
|
||||
|
|
@ -324,7 +325,7 @@ func TestMirror_ListLocalRepos_Good_FindsRepos(t *testing.T) {
|
|||
// Create a non-repo directory
|
||||
require.True(t, fs.EnsureDir(filepath.Join(base, "not-a-repo")).OK)
|
||||
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
repos := s.listLocalRepos(base)
|
||||
assert.Contains(t, repos, "repo-a")
|
||||
assert.Contains(t, repos, "repo-b")
|
||||
|
|
@ -333,13 +334,13 @@ func TestMirror_ListLocalRepos_Good_FindsRepos(t *testing.T) {
|
|||
|
||||
func TestMirror_ListLocalRepos_Bad_EmptyDir(t *testing.T) {
|
||||
base := t.TempDir()
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
repos := s.listLocalRepos(base)
|
||||
assert.Empty(t, repos)
|
||||
}
|
||||
|
||||
func TestMirror_ListLocalRepos_Bad_NonExistentDir(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
repos := s.listLocalRepos("/nonexistent/path/that/doesnt/exist")
|
||||
assert.Nil(t, repos)
|
||||
}
|
||||
|
|
@ -376,7 +377,7 @@ func TestMirror_ListLocalRepos_Ugly(t *testing.T) {
|
|||
// Create a regular file (not a directory)
|
||||
require.True(t, fs.Write(filepath.Join(base, "some-file.txt"), "hello").OK)
|
||||
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
repos := s.listLocalRepos(base)
|
||||
assert.Contains(t, repos, "real-repo-a")
|
||||
assert.Contains(t, repos, "real-repo-b")
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ package agentic
|
|||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
|
@ -14,21 +13,12 @@ import (
|
|||
//
|
||||
// r := fs.Read("/etc/hostname")
|
||||
// if r.OK { core.Print(nil, "%s", r.Value.(string)) }
|
||||
var fs = newFs("/")
|
||||
|
||||
// newFs creates a core.Fs with the given root directory.
|
||||
// Root "/" means unrestricted access (same as coreio.Local).
|
||||
func newFs(root string) *core.Fs {
|
||||
type fsRoot struct{ root string }
|
||||
f := &core.Fs{}
|
||||
(*fsRoot)(unsafe.Pointer(f)).root = root
|
||||
return f
|
||||
}
|
||||
var fs = (&core.Fs{}).NewUnrestricted()
|
||||
|
||||
// LocalFs returns an unrestricted filesystem instance for use by other packages.
|
||||
//
|
||||
// r := agentic.LocalFs().Read("/tmp/agent-status.json")
|
||||
// if r.OK { core.Print(nil, "%s", r.Value.(string)) }
|
||||
// f := agentic.LocalFs()
|
||||
// r := f.Read("/tmp/agent-status.json")
|
||||
func LocalFs() *core.Fs { return fs }
|
||||
|
||||
// WorkspaceRoot returns the root directory for agent workspaces.
|
||||
|
|
@ -74,17 +64,17 @@ func AgentName() string {
|
|||
|
||||
// DefaultBranch detects the default branch of a repo (main, master, etc.).
|
||||
//
|
||||
// base := agentic.DefaultBranch("./src")
|
||||
func DefaultBranch(repoDir string) string {
|
||||
// base := s.DefaultBranch("./src")
|
||||
func (s *PrepSubsystem) DefaultBranch(repoDir string) string {
|
||||
ctx := context.Background()
|
||||
if ref := gitOutput(ctx, repoDir, "symbolic-ref", "refs/remotes/origin/HEAD", "--short"); ref != "" {
|
||||
if ref := s.gitOutput(ctx, repoDir, "symbolic-ref", "refs/remotes/origin/HEAD", "--short"); ref != "" {
|
||||
if core.HasPrefix(ref, "origin/") {
|
||||
return core.TrimPrefix(ref, "origin/")
|
||||
}
|
||||
return ref
|
||||
}
|
||||
for _, branch := range []string{"main", "master"} {
|
||||
if gitCmdOK(ctx, repoDir, "rev-parse", "--verify", branch) {
|
||||
if s.gitCmdOK(ctx, repoDir, "rev-parse", "--verify", branch) {
|
||||
return branch
|
||||
}
|
||||
}
|
||||
|
|
|
|||
37
pkg/agentic/paths_example_test.go
Normal file
37
pkg/agentic/paths_example_test.go
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func ExampleWorkspaceRoot() {
|
||||
root := WorkspaceRoot()
|
||||
core.Println(core.HasSuffix(root, "workspace"))
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExampleCoreRoot() {
|
||||
root := CoreRoot()
|
||||
core.Println(core.HasSuffix(root, ".core"))
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExamplePlansRoot() {
|
||||
root := PlansRoot()
|
||||
core.Println(core.HasSuffix(root, "plans"))
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExampleAgentName() {
|
||||
name := AgentName()
|
||||
core.Println(name != "")
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExampleGitHubOrg() {
|
||||
org := GitHubOrg()
|
||||
core.Println(org)
|
||||
// Output: dAppCore
|
||||
}
|
||||
|
|
@ -167,14 +167,14 @@ func TestPaths_DefaultBranch_Good(t *testing.T) {
|
|||
cmd = exec.Command("git", "-C", dir, "commit", "-m", "init")
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
branch := DefaultBranch(dir)
|
||||
branch := testPrep.DefaultBranch(dir)
|
||||
assert.Equal(t, "main", branch)
|
||||
}
|
||||
|
||||
func TestPaths_DefaultBranch_Bad(t *testing.T) {
|
||||
// Non-git directory — should return "main" (default)
|
||||
dir := t.TempDir()
|
||||
branch := DefaultBranch(dir)
|
||||
branch := testPrep.DefaultBranch(dir)
|
||||
assert.Equal(t, "main", branch)
|
||||
}
|
||||
|
||||
|
|
@ -196,7 +196,7 @@ func TestPaths_DefaultBranch_Ugly(t *testing.T) {
|
|||
cmd = exec.Command("git", "-C", dir, "commit", "-m", "init")
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
branch := DefaultBranch(dir)
|
||||
branch := testPrep.DefaultBranch(dir)
|
||||
assert.Equal(t, "master", branch)
|
||||
}
|
||||
|
||||
|
|
@ -321,12 +321,11 @@ func TestPaths_ParseInt_Ugly_LeadingTrailingWhitespace(t *testing.T) {
|
|||
assert.Equal(t, 42, parseInt(" 42 "))
|
||||
}
|
||||
|
||||
// --- newFs Good/Bad/Ugly ---
|
||||
// --- fs (NewUnrestricted) Good ---
|
||||
|
||||
func TestPaths_NewFs_Good(t *testing.T) {
|
||||
f := newFs("/tmp")
|
||||
assert.NotNil(t, f, "newFs should return a non-nil Fs")
|
||||
assert.IsType(t, &core.Fs{}, f)
|
||||
func TestPaths_Fs_Good_Unrestricted(t *testing.T) {
|
||||
assert.NotNil(t, fs, "package-level fs should be non-nil")
|
||||
assert.IsType(t, &core.Fs{}, fs)
|
||||
}
|
||||
|
||||
// --- parseInt Good ---
|
||||
|
|
@ -335,20 +334,3 @@ func TestPaths_ParseInt_Good(t *testing.T) {
|
|||
assert.Equal(t, 42, parseInt("42"))
|
||||
assert.Equal(t, 0, parseInt("0"))
|
||||
}
|
||||
|
||||
func TestPaths_NewFs_Bad_EmptyRoot(t *testing.T) {
|
||||
f := newFs("")
|
||||
assert.NotNil(t, f, "newFs with empty root should not return nil")
|
||||
}
|
||||
|
||||
func TestPaths_NewFs_Ugly_UnicodeRoot(t *testing.T) {
|
||||
assert.NotPanics(t, func() {
|
||||
f := newFs("/tmp/\u00e9\u00e0\u00fc/\u00f1o\u00f0\u00e9s")
|
||||
assert.NotNil(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPaths_NewFs_Ugly_VerifyIsFs(t *testing.T) {
|
||||
f := newFs("/tmp")
|
||||
assert.IsType(t, &core.Fs{}, f)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,8 +6,6 @@ import (
|
|||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
|
|
@ -299,19 +297,11 @@ func (s *PrepSubsystem) planList(_ context.Context, _ *mcp.CallToolRequest, inpu
|
|||
return nil, PlanListOutput{}, core.E("planList", "failed to access plans directory", err)
|
||||
}
|
||||
|
||||
r := fs.List(dir)
|
||||
if !r.OK {
|
||||
return nil, PlanListOutput{}, nil
|
||||
}
|
||||
entries := r.Value.([]os.DirEntry)
|
||||
jsonFiles := core.PathGlob(core.JoinPath(dir, "*.json"))
|
||||
|
||||
var plans []Plan
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() || !core.HasSuffix(entry.Name(), ".json") {
|
||||
continue
|
||||
}
|
||||
|
||||
id := core.TrimSuffix(entry.Name(), ".json")
|
||||
for _, f := range jsonFiles {
|
||||
id := core.TrimSuffix(core.PathBase(f), ".json")
|
||||
plan, err := readPlan(dir, id)
|
||||
if err != nil {
|
||||
continue
|
||||
|
|
@ -352,7 +342,7 @@ func generatePlanID(title string) string {
|
|||
// Append short random suffix for uniqueness
|
||||
b := make([]byte, 3)
|
||||
rand.Read(b)
|
||||
return slug + "-" + hex.EncodeToString(b)
|
||||
return core.Concat(slug, "-", hex.EncodeToString(b))
|
||||
}
|
||||
|
||||
func readPlan(dir, id string) (*Plan, error) {
|
||||
|
|
@ -362,8 +352,8 @@ func readPlan(dir, id string) (*Plan, error) {
|
|||
}
|
||||
|
||||
var plan Plan
|
||||
if err := json.Unmarshal([]byte(r.Value.(string)), &plan); err != nil {
|
||||
return nil, core.E("readPlan", "failed to parse plan "+id, err)
|
||||
if ur := core.JSONUnmarshalString(r.Value.(string), &plan); !ur.OK {
|
||||
return nil, core.E("readPlan", "failed to parse plan "+id, nil)
|
||||
}
|
||||
return &plan, nil
|
||||
}
|
||||
|
|
@ -375,12 +365,8 @@ func writePlan(dir string, plan *Plan) (string, error) {
|
|||
}
|
||||
|
||||
path := planPath(dir, plan.ID)
|
||||
data, err := json.MarshalIndent(plan, "", " ")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if r := fs.Write(path, string(data)); !r.OK {
|
||||
if r := fs.Write(path, core.JSONMarshalString(plan)); !r.OK {
|
||||
err, _ := r.Value.(error)
|
||||
return "", core.E("writePlan", "failed to write plan", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,16 +9,18 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// newTestPrep creates a PrepSubsystem for testing.
|
||||
// newTestPrep creates a PrepSubsystem for testing with testCore wired in.
|
||||
func newTestPrep(t *testing.T) *PrepSubsystem {
|
||||
t.Helper()
|
||||
return &PrepSubsystem{
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
13
pkg/agentic/plan_example_test.go
Normal file
13
pkg/agentic/plan_example_test.go
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func Example_planPath() {
|
||||
path := planPath("/tmp/plans", "my-plan")
|
||||
core.Println(core.HasSuffix(path, "my-plan.json"))
|
||||
// Output: true
|
||||
}
|
||||
|
|
@ -66,7 +66,7 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in
|
|||
}
|
||||
|
||||
if st.Branch == "" {
|
||||
branch := gitOutput(ctx, repoDir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
branch := s.gitOutput(ctx, repoDir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
if branch == "" {
|
||||
return nil, CreatePROutput{}, core.E("createPR", "failed to detect branch", nil)
|
||||
}
|
||||
|
|
@ -108,9 +108,9 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in
|
|||
|
||||
// Push branch to Forge (origin is the local clone, not Forge)
|
||||
forgeRemote := core.Sprintf("ssh://git@forge.lthn.ai:2223/%s/%s.git", org, st.Repo)
|
||||
pushOut, pushErr := gitCmd(ctx, repoDir, "push", forgeRemote, st.Branch)
|
||||
if pushErr != nil {
|
||||
return nil, CreatePROutput{}, core.E("createPR", "git push failed: "+pushOut, pushErr)
|
||||
r := s.gitCmd(ctx, repoDir, "push", forgeRemote, st.Branch)
|
||||
if !r.OK {
|
||||
return nil, CreatePROutput{}, core.E("createPR", "git push failed: "+r.Value.(string), nil)
|
||||
}
|
||||
|
||||
// Create PR via Forge API
|
||||
|
|
|
|||
11
pkg/agentic/pr_example_test.go
Normal file
11
pkg/agentic/pr_example_test.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExampleCreatePRInput() {
|
||||
input := CreatePRInput{Workspace: "core/go-io/task-5"}
|
||||
core.Println(input.Workspace)
|
||||
// Output: core/go-io/task-5
|
||||
}
|
||||
|
|
@ -14,6 +14,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/forge"
|
||||
forge_types "dappco.re/go/core/forge/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -64,6 +65,7 @@ func mockPRForgeServer(t *testing.T) *httptest.Server {
|
|||
func TestPr_ForgeCreatePR_Good_Success(t *testing.T) {
|
||||
srv := mockPRForgeServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -91,6 +93,7 @@ func TestPr_ForgeCreatePR_Bad_ServerError(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -112,6 +115,7 @@ func TestPr_ForgeCreatePR_Bad_ServerError(t *testing.T) {
|
|||
|
||||
func TestPr_CreatePR_Bad_NoWorkspace(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeToken: "test-token",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -124,6 +128,7 @@ func TestPr_CreatePR_Bad_NoWorkspace(t *testing.T) {
|
|||
|
||||
func TestPr_CreatePR_Bad_NoToken(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeToken: "",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -141,6 +146,7 @@ func TestPr_CreatePR_Bad_WorkspaceNotFound(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeToken: "test-token",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -176,6 +182,7 @@ func TestPr_CreatePR_Good_DryRun(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeToken: "test-token",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -214,6 +221,7 @@ func TestPr_CreatePR_Good_CustomTitle(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeToken: "test-token",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -232,6 +240,7 @@ func TestPr_CreatePR_Good_CustomTitle(t *testing.T) {
|
|||
|
||||
func TestPr_ListPRs_Bad_NoToken(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeToken: "",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -255,6 +264,7 @@ func TestPr_CommentOnIssue_Good_PostsComment(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -270,7 +280,7 @@ func TestPr_CommentOnIssue_Good_PostsComment(t *testing.T) {
|
|||
// --- buildPRBody ---
|
||||
|
||||
func TestPr_BuildPRBody_Good(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
st := &WorkspaceStatus{
|
||||
Status: "completed",
|
||||
Repo: "go-io",
|
||||
|
|
@ -290,7 +300,7 @@ func TestPr_BuildPRBody_Good(t *testing.T) {
|
|||
|
||||
func TestPr_BuildPRBody_Bad(t *testing.T) {
|
||||
// Empty status struct
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
st := &WorkspaceStatus{}
|
||||
body := s.buildPRBody(st)
|
||||
assert.Contains(t, body, "## Summary")
|
||||
|
|
@ -300,7 +310,7 @@ func TestPr_BuildPRBody_Bad(t *testing.T) {
|
|||
|
||||
func TestPr_BuildPRBody_Ugly(t *testing.T) {
|
||||
// Very long task string
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
longTask := strings.Repeat("This is a very long task description. ", 100)
|
||||
st := &WorkspaceStatus{
|
||||
Task: longTask,
|
||||
|
|
@ -322,6 +332,7 @@ func TestPr_CommentOnIssue_Bad(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -348,6 +359,7 @@ func TestPr_CommentOnIssue_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -396,6 +408,7 @@ func TestPr_CreatePR_Ugly(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeToken: "test-token",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -428,6 +441,7 @@ func TestPr_ForgeCreatePR_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -463,6 +477,7 @@ func TestPr_ListPRs_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -484,6 +499,7 @@ func TestPr_ListRepoPRs_Good(t *testing.T) {
|
|||
// Specific repo with PRs
|
||||
srv := mockPRForgeServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -506,6 +522,7 @@ func TestPr_ListRepoPRs_Bad(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -526,6 +543,7 @@ func TestPr_ListRepoPRs_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
|
|||
|
|
@ -7,9 +7,6 @@ package agentic
|
|||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
goio "io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -21,21 +18,21 @@ import (
|
|||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// AgentOptions configures the agentic service.
|
||||
type AgentOptions struct{}
|
||||
|
||||
// PrepSubsystem provides agentic MCP tools for workspace orchestration.
|
||||
// Agent lifecycle events are broadcast via c.ACTION(messages.AgentCompleted{}).
|
||||
// Agent lifecycle events are broadcast via s.Core().ACTION(messages.AgentCompleted{}).
|
||||
//
|
||||
// sub := agentic.NewPrep()
|
||||
// sub.SetCore(c)
|
||||
// sub.RegisterTools(server)
|
||||
// core.New(core.WithService(agentic.Register))
|
||||
type PrepSubsystem struct {
|
||||
core *core.Core // Core framework instance for IPC, Config, Lock
|
||||
*core.ServiceRuntime[AgentOptions]
|
||||
forge *forge.Forge
|
||||
forgeURL string
|
||||
forgeToken string
|
||||
brainURL string
|
||||
brainKey string
|
||||
codePath string
|
||||
client *http.Client
|
||||
drainMu sync.Mutex
|
||||
pokeCh chan struct{}
|
||||
frozen bool
|
||||
|
|
@ -73,34 +70,95 @@ func NewPrep() *PrepSubsystem {
|
|||
brainURL: envOr("CORE_BRAIN_URL", "https://api.lthn.sh"),
|
||||
brainKey: brainKey,
|
||||
codePath: envOr("CODE_PATH", core.JoinPath(home, "Code")),
|
||||
client: &http.Client{Timeout: 30 * time.Second},
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
// SetCore wires the Core framework instance for IPC, Config, and Lock access.
|
||||
// SetCore wires the Core framework instance via ServiceRuntime.
|
||||
// Deprecated: Use Register with core.WithService(agentic.Register) instead.
|
||||
//
|
||||
// prep.SetCore(c)
|
||||
func (s *PrepSubsystem) SetCore(c *core.Core) {
|
||||
s.core = c
|
||||
s.ServiceRuntime = core.NewServiceRuntime(c, AgentOptions{})
|
||||
}
|
||||
|
||||
// OnStartup implements core.Startable — starts the queue runner and registers commands.
|
||||
func (s *PrepSubsystem) OnStartup(ctx context.Context) error {
|
||||
// OnStartup implements core.Startable — registers named Actions, starts the queue runner,
|
||||
// and registers CLI commands. The Action registry IS the capability map.
|
||||
//
|
||||
// c.Action("agentic.dispatch").Run(ctx, opts)
|
||||
// c.Actions() // ["agentic.dispatch", "agentic.prep", "agentic.status", ...]
|
||||
func (s *PrepSubsystem) OnStartup(ctx context.Context) core.Result {
|
||||
c := s.Core()
|
||||
|
||||
// Transport — register HTTP protocol + Drive endpoints
|
||||
RegisterHTTPTransport(c)
|
||||
c.Drive().New(core.NewOptions(
|
||||
core.Option{Key: "name", Value: "forge"},
|
||||
core.Option{Key: "transport", Value: s.forgeURL},
|
||||
core.Option{Key: "token", Value: s.forgeToken},
|
||||
))
|
||||
c.Drive().New(core.NewOptions(
|
||||
core.Option{Key: "name", Value: "brain"},
|
||||
core.Option{Key: "transport", Value: s.brainURL},
|
||||
core.Option{Key: "token", Value: s.brainKey},
|
||||
))
|
||||
|
||||
// Dispatch & workspace
|
||||
c.Action("agentic.dispatch", s.handleDispatch)
|
||||
c.Action("agentic.prep", s.handlePrep)
|
||||
c.Action("agentic.status", s.handleStatus)
|
||||
c.Action("agentic.resume", s.handleResume)
|
||||
c.Action("agentic.scan", s.handleScan)
|
||||
c.Action("agentic.watch", s.handleWatch)
|
||||
|
||||
// Pipeline
|
||||
c.Action("agentic.qa", s.handleQA)
|
||||
c.Action("agentic.auto-pr", s.handleAutoPR)
|
||||
c.Action("agentic.verify", s.handleVerify)
|
||||
c.Action("agentic.ingest", s.handleIngest)
|
||||
c.Action("agentic.poke", s.handlePoke)
|
||||
c.Action("agentic.mirror", s.handleMirror)
|
||||
|
||||
// Forge
|
||||
c.Action("agentic.issue.get", s.handleIssueGet)
|
||||
c.Action("agentic.issue.list", s.handleIssueList)
|
||||
c.Action("agentic.issue.create", s.handleIssueCreate)
|
||||
c.Action("agentic.pr.get", s.handlePRGet)
|
||||
c.Action("agentic.pr.list", s.handlePRList)
|
||||
c.Action("agentic.pr.merge", s.handlePRMerge)
|
||||
|
||||
// Review
|
||||
c.Action("agentic.review-queue", s.handleReviewQueue)
|
||||
|
||||
// Epic
|
||||
c.Action("agentic.epic", s.handleEpic)
|
||||
|
||||
// Completion pipeline — Task composition
|
||||
c.Task("agent.completion", core.Task{
|
||||
Description: "QA → PR → Verify → Merge",
|
||||
Steps: []core.Step{
|
||||
{Action: "agentic.qa"},
|
||||
{Action: "agentic.auto-pr"},
|
||||
{Action: "agentic.verify"},
|
||||
{Action: "agentic.ingest", Async: true},
|
||||
{Action: "agentic.poke", Async: true},
|
||||
},
|
||||
})
|
||||
|
||||
s.StartRunner()
|
||||
s.registerCommands(ctx)
|
||||
s.registerWorkspaceCommands()
|
||||
s.registerForgeCommands()
|
||||
return nil
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
// registerCommands is in commands.go
|
||||
|
||||
// OnShutdown implements core.Stoppable — freezes the queue.
|
||||
func (s *PrepSubsystem) OnShutdown(ctx context.Context) error {
|
||||
func (s *PrepSubsystem) OnShutdown(ctx context.Context) core.Result {
|
||||
s.frozen = true
|
||||
return nil
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
func envOr(key, fallback string) string {
|
||||
|
|
@ -247,8 +305,8 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
|||
|
||||
if !resumed {
|
||||
// Clone repo into repo/
|
||||
if _, cloneErr := gitCmd(ctx, ".", "clone", repoPath, repoDir); cloneErr != nil {
|
||||
return nil, PrepOutput{}, core.E("prep", "git clone failed for "+input.Repo, cloneErr)
|
||||
if r := s.gitCmd(ctx, ".", "clone", repoPath, repoDir); !r.OK {
|
||||
return nil, PrepOutput{}, core.E("prep", "git clone failed for "+input.Repo, nil)
|
||||
}
|
||||
|
||||
// Create feature branch
|
||||
|
|
@ -264,13 +322,13 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
|||
}
|
||||
branchName := core.Sprintf("agent/%s", taskSlug)
|
||||
|
||||
if _, branchErr := gitCmd(ctx, repoDir, "checkout", "-b", branchName); branchErr != nil {
|
||||
return nil, PrepOutput{}, core.E("prep.branch", core.Sprintf("failed to create branch %q", branchName), branchErr)
|
||||
if r := s.gitCmd(ctx, repoDir, "checkout", "-b", branchName); !r.OK {
|
||||
return nil, PrepOutput{}, core.E("prep.branch", core.Sprintf("failed to create branch %q", branchName), nil)
|
||||
}
|
||||
out.Branch = branchName
|
||||
} else {
|
||||
// Resume: read branch from existing checkout
|
||||
out.Branch = gitOutput(ctx, repoDir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
out.Branch = s.gitOutput(ctx, repoDir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
}
|
||||
|
||||
// Build the rich prompt with all context
|
||||
|
|
@ -400,32 +458,22 @@ func (s *PrepSubsystem) brainRecall(ctx context.Context, repo string) (string, i
|
|||
return "", 0
|
||||
}
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"query": "architecture conventions key interfaces for " + repo,
|
||||
body := core.JSONMarshalString(map[string]any{
|
||||
"query": core.Concat("architecture conventions key interfaces for ", repo),
|
||||
"top_k": 10,
|
||||
"project": repo,
|
||||
"agent_id": "cladius",
|
||||
})
|
||||
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", s.brainURL+"/v1/brain/recall", core.NewReader(string(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+s.brainKey)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
r := HTTPPost(ctx, core.Concat(s.brainURL, "/v1/brain/recall"), body, s.brainKey, "Bearer")
|
||||
if !r.OK {
|
||||
return "", 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respData, _ := goio.ReadAll(resp.Body)
|
||||
var result struct {
|
||||
Memories []map[string]any `json:"memories"`
|
||||
}
|
||||
json.Unmarshal(respData, &result)
|
||||
core.JSONUnmarshalString(r.Value.(string), &result)
|
||||
|
||||
if len(result.Memories) == 0 {
|
||||
return "", 0
|
||||
|
|
@ -444,7 +492,7 @@ func (s *PrepSubsystem) brainRecall(ctx context.Context, repo string) (string, i
|
|||
|
||||
func (s *PrepSubsystem) findConsumersList(repo string) (string, int) {
|
||||
goWorkPath := core.JoinPath(s.codePath, "go.work")
|
||||
modulePath := "forge.lthn.ai/core/" + repo
|
||||
modulePath := core.Concat("forge.lthn.ai/core/", repo)
|
||||
|
||||
r := fs.Read(goWorkPath)
|
||||
if !r.OK {
|
||||
|
|
@ -476,7 +524,7 @@ func (s *PrepSubsystem) findConsumersList(repo string) (string, int) {
|
|||
|
||||
b := core.NewBuilder()
|
||||
for _, c := range consumers {
|
||||
b.WriteString("- " + c + "\n")
|
||||
b.WriteString(core.Concat("- ", c, "\n"))
|
||||
}
|
||||
b.WriteString(core.Sprintf("Breaking change risk: %d consumers.\n", len(consumers)))
|
||||
|
||||
|
|
@ -484,7 +532,7 @@ func (s *PrepSubsystem) findConsumersList(repo string) (string, int) {
|
|||
}
|
||||
|
||||
func (s *PrepSubsystem) getGitLog(repoPath string) string {
|
||||
return gitOutput(context.Background(), repoPath, "log", "--oneline", "-20")
|
||||
return s.gitOutput(context.Background(), repoPath, "log", "--oneline", "-20")
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) pullWikiContent(ctx context.Context, org, repo string) string {
|
||||
|
|
@ -504,7 +552,7 @@ func (s *PrepSubsystem) pullWikiContent(ctx context.Context, org, repo string) s
|
|||
continue
|
||||
}
|
||||
content, _ := base64.StdEncoding.DecodeString(page.ContentBase64)
|
||||
b.WriteString("### " + meta.Title + "\n\n")
|
||||
b.WriteString(core.Concat("### ", meta.Title, "\n\n"))
|
||||
b.WriteString(string(content))
|
||||
b.WriteString("\n\n")
|
||||
}
|
||||
|
|
@ -539,18 +587,18 @@ func (s *PrepSubsystem) renderPlan(templateSlug string, variables map[string]str
|
|||
}
|
||||
|
||||
plan := core.NewBuilder()
|
||||
plan.WriteString("# " + tmpl.Name + "\n\n")
|
||||
plan.WriteString(core.Concat("# ", tmpl.Name, "\n\n"))
|
||||
if task != "" {
|
||||
plan.WriteString("**Task:** " + task + "\n\n")
|
||||
plan.WriteString(core.Concat("**Task:** ", task, "\n\n"))
|
||||
}
|
||||
if tmpl.Description != "" {
|
||||
plan.WriteString(tmpl.Description + "\n\n")
|
||||
plan.WriteString(core.Concat(tmpl.Description, "\n\n"))
|
||||
}
|
||||
|
||||
if len(tmpl.Guidelines) > 0 {
|
||||
plan.WriteString("## Guidelines\n\n")
|
||||
for _, g := range tmpl.Guidelines {
|
||||
plan.WriteString("- " + g + "\n")
|
||||
plan.WriteString(core.Concat("- ", g, "\n"))
|
||||
}
|
||||
plan.WriteString("\n")
|
||||
}
|
||||
|
|
@ -558,15 +606,15 @@ func (s *PrepSubsystem) renderPlan(templateSlug string, variables map[string]str
|
|||
for i, phase := range tmpl.Phases {
|
||||
plan.WriteString(core.Sprintf("## Phase %d: %s\n\n", i+1, phase.Name))
|
||||
if phase.Description != "" {
|
||||
plan.WriteString(phase.Description + "\n\n")
|
||||
plan.WriteString(core.Concat(phase.Description, "\n\n"))
|
||||
}
|
||||
for _, t := range phase.Tasks {
|
||||
switch v := t.(type) {
|
||||
case string:
|
||||
plan.WriteString("- [ ] " + v + "\n")
|
||||
plan.WriteString(core.Concat("- [ ] ", v, "\n"))
|
||||
case map[string]any:
|
||||
if name, ok := v["name"].(string); ok {
|
||||
plan.WriteString("- [ ] " + name + "\n")
|
||||
plan.WriteString(core.Concat("- [ ] ", name, "\n"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
17
pkg/agentic/prep_example_test.go
Normal file
17
pkg/agentic/prep_example_test.go
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExamplePrepInput() {
|
||||
input := PrepInput{Repo: "go-io", Issue: 42}
|
||||
core.Println(input.Repo, input.Issue)
|
||||
// Output: go-io 42
|
||||
}
|
||||
|
||||
func ExampleNewPrep() {
|
||||
prep := NewPrep()
|
||||
core.Println(prep != nil)
|
||||
// Output: true
|
||||
}
|
||||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/forge"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -21,6 +22,7 @@ import (
|
|||
|
||||
func TestPrep_Shutdown_Good(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -31,7 +33,7 @@ func TestPrep_Shutdown_Good(t *testing.T) {
|
|||
// --- Name ---
|
||||
|
||||
func TestPrep_Name_Good(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
assert.Equal(t, "agentic", s.Name())
|
||||
}
|
||||
|
||||
|
|
@ -65,6 +67,7 @@ use (
|
|||
}
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: dir,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -92,6 +95,7 @@ use (
|
|||
os.WriteFile(filepath.Join(modDir, "go.mod"), []byte("module forge.lthn.ai/core/go\n"), 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: dir,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -104,6 +108,7 @@ use (
|
|||
|
||||
func TestPrep_FindConsumersList_Bad_NoGoWork(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -142,6 +147,7 @@ func TestPrep_PullWikiContent_Good_WithPages(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
client: srv.Client(),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -162,6 +168,7 @@ func TestPrep_PullWikiContent_Good_NoPages(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
client: srv.Client(),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -185,6 +192,7 @@ func TestPrep_GetIssueBody_Good(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
client: srv.Client(),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -202,6 +210,7 @@ func TestPrep_GetIssueBody_Bad_NotFound(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
client: srv.Client(),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -220,6 +229,7 @@ func TestPrep_BuildPrompt_Good_BasicFields(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test\n\ngo 1.22\n"), 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -253,6 +263,7 @@ func TestPrep_BuildPrompt_Good_WithIssue(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
codePath: t.TempDir(),
|
||||
client: srv.Client(),
|
||||
|
|
@ -279,6 +290,7 @@ func TestPrep_BuildPrompt_Good(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test\n\ngo 1.22\n"), 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -303,6 +315,7 @@ func TestPrep_BuildPrompt_Good(t *testing.T) {
|
|||
func TestPrep_BuildPrompt_Bad(t *testing.T) {
|
||||
// Empty repo path — still produces a prompt (no crash)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -334,6 +347,7 @@ func TestPrep_BuildPrompt_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
codePath: t.TempDir(),
|
||||
client: srv.Client(),
|
||||
|
|
@ -370,6 +384,7 @@ func TestPrep_BuildPrompt_Ugly_WithGitLog(t *testing.T) {
|
|||
exec.Command("git", "-C", dir, "commit", "-m", "init").Run()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -393,6 +408,7 @@ func TestDispatch_RunQA_Good_PHPNoComposer(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(repoDir, "composer.json"), []byte(`{"name":"test"}`), 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -411,6 +427,7 @@ func TestPrep_PullWikiContent_Bad(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
client: srv.Client(),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -441,6 +458,7 @@ func TestPrep_PullWikiContent_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
client: srv.Client(),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -457,6 +475,7 @@ func TestPrep_PullWikiContent_Ugly(t *testing.T) {
|
|||
func TestPrep_RenderPlan_Ugly(t *testing.T) {
|
||||
// Template with variables that don't exist in template — variables just won't match
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -486,6 +505,7 @@ func TestPrep_BrainRecall_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainURL: srv.URL,
|
||||
brainKey: "test-key",
|
||||
client: srv.Client(),
|
||||
|
|
@ -505,6 +525,7 @@ func TestPrep_PrepWorkspace_Ugly(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -545,6 +566,7 @@ func TestPrep_FindConsumersList_Ugly(t *testing.T) {
|
|||
os.MkdirAll(filepath.Join(dir, "core", "missing"), 0o755)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: dir,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -570,6 +592,7 @@ func TestPrep_GetIssueBody_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
client: srv.Client(),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
|
|||
|
|
@ -190,17 +190,17 @@ func TestPrep_NewPrep_Good_GiteaTokenFallback(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPrepSubsystem_Good_Name(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
assert.Equal(t, "agentic", s.Name())
|
||||
}
|
||||
|
||||
func TestPrep_SetCore_Good(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
assert.Nil(t, s.core)
|
||||
assert.Nil(t, s.ServiceRuntime)
|
||||
|
||||
c := core.New(core.WithOption("name", "test"))
|
||||
s.SetCore(c)
|
||||
assert.NotNil(t, s.core)
|
||||
assert.NotNil(t, s.ServiceRuntime)
|
||||
}
|
||||
|
||||
// --- sanitiseBranchSlug Bad/Ugly ---
|
||||
|
|
@ -338,7 +338,7 @@ func TestSanitise_TrimRuneEdges_Ugly_NoMatch(t *testing.T) {
|
|||
// --- PrepSubsystem Name Bad/Ugly ---
|
||||
|
||||
func TestPrep_Name_Bad(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
name := s.Name()
|
||||
assert.NotEmpty(t, name, "Name should never return empty")
|
||||
assert.Equal(t, "agentic", name)
|
||||
|
|
@ -387,34 +387,34 @@ func TestPrep_NewPrep_Ugly(t *testing.T) {
|
|||
|
||||
func TestPrep_SetCore_Bad(t *testing.T) {
|
||||
// SetCore with nil — should not panic
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
assert.NotPanics(t, func() {
|
||||
s.SetCore(nil)
|
||||
})
|
||||
assert.Nil(t, s.core)
|
||||
}
|
||||
|
||||
func TestPrep_SetCore_Ugly(t *testing.T) {
|
||||
// SetCore twice — second overwrites first
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
c1 := core.New(core.WithOption("name", "first"))
|
||||
c2 := core.New(core.WithOption("name", "second"))
|
||||
|
||||
s.SetCore(c1)
|
||||
assert.Equal(t, c1, s.core)
|
||||
assert.NotNil(t, s.ServiceRuntime)
|
||||
|
||||
s.SetCore(c2)
|
||||
assert.Equal(t, c2, s.core, "second SetCore should overwrite first")
|
||||
assert.Equal(t, c2, s.Core(), "second SetCore should overwrite first")
|
||||
}
|
||||
|
||||
// --- OnStartup Bad/Ugly ---
|
||||
|
||||
func TestPrep_OnStartup_Bad(t *testing.T) {
|
||||
// OnStartup without SetCore (nil core) — panics because registerCommands
|
||||
// needs core.Command(). Verify the panic is from nil core, not a logic error.
|
||||
// OnStartup without SetCore (nil ServiceRuntime) — panics because
|
||||
// registerCommands calls s.Core().Command().
|
||||
s := &PrepSubsystem{
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
ServiceRuntime: nil,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
assert.Panics(t, func() {
|
||||
_ = s.OnStartup(context.Background())
|
||||
|
|
@ -424,6 +424,7 @@ func TestPrep_OnStartup_Bad(t *testing.T) {
|
|||
func TestPrep_OnStartup_Ugly(t *testing.T) {
|
||||
// OnStartup called twice with valid core — second call should not panic
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -441,12 +442,13 @@ func TestPrep_OnStartup_Ugly(t *testing.T) {
|
|||
func TestPrep_OnShutdown_Bad(t *testing.T) {
|
||||
// OnShutdown without Core
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
assert.NotPanics(t, func() {
|
||||
err := s.OnShutdown(context.Background())
|
||||
assert.NoError(t, err)
|
||||
r := s.OnShutdown(context.Background())
|
||||
assert.True(t, r.OK)
|
||||
})
|
||||
assert.True(t, s.frozen)
|
||||
}
|
||||
|
|
@ -456,6 +458,7 @@ func TestPrep_OnShutdown_Bad(t *testing.T) {
|
|||
func TestPrep_Shutdown_Bad(t *testing.T) {
|
||||
// Shutdown always returns nil
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -525,6 +528,7 @@ func TestPrep_TestPrepWorkspace_Good(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -541,6 +545,7 @@ func TestPrep_TestPrepWorkspace_Good(t *testing.T) {
|
|||
|
||||
func TestPrep_TestPrepWorkspace_Bad(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -557,6 +562,7 @@ func TestPrep_TestPrepWorkspace_Ugly(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -578,6 +584,7 @@ func TestPrep_TestBuildPrompt_Good(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "go.mod"), "module test").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -598,6 +605,7 @@ func TestPrep_TestBuildPrompt_Good(t *testing.T) {
|
|||
|
||||
func TestPrep_TestBuildPrompt_Bad(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -616,6 +624,7 @@ func TestPrep_TestBuildPrompt_Ugly(t *testing.T) {
|
|||
dir := t.TempDir()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -686,6 +695,7 @@ func TestPrep_GetGitLog_Good(t *testing.T) {
|
|||
run("git", "commit", "-m", "initial commit")
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -698,6 +708,7 @@ func TestPrep_GetGitLog_Bad(t *testing.T) {
|
|||
// Non-git dir returns empty
|
||||
dir := t.TempDir()
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -713,6 +724,7 @@ func TestPrep_GetGitLog_Ugly(t *testing.T) {
|
|||
require.NoError(t, cmd.Run())
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -760,6 +772,7 @@ func TestPrep_PrepWorkspace_Good(t *testing.T) {
|
|||
run(srcRepo, "git", "commit", "-m", "initial commit")
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
codePath: filepath.Join(root, "src"),
|
||||
client: srv.Client(),
|
||||
|
|
|
|||
|
|
@ -1,132 +1,84 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Process execution helpers — wraps go-process for testable command execution.
|
||||
// All external command execution in the agentic package goes through these helpers.
|
||||
// Process execution helpers — routes all commands through s.Core().Process().
|
||||
// No direct os/exec or go-process imports.
|
||||
//
|
||||
// Requires go-process to be registered with Core via:
|
||||
//
|
||||
// core.New(core.WithService(agentic.ProcessRegister))
|
||||
//
|
||||
// If process service is not initialised (e.g. in tests), helpers will error.
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/process"
|
||||
)
|
||||
|
||||
var procOnce sync.Once
|
||||
|
||||
// ensureProcess lazily initialises go-process default service for tests
|
||||
// and standalone usage. In production, main.go registers ProcessRegister
|
||||
// with Core which calls SetDefault properly.
|
||||
func ensureProcess() {
|
||||
procOnce.Do(func() {
|
||||
if process.Default() != nil {
|
||||
return
|
||||
}
|
||||
c := core.New()
|
||||
svc, err := process.NewService(process.Options{})(c)
|
||||
if err == nil {
|
||||
if s, ok := svc.(*process.Service); ok {
|
||||
_ = process.SetDefault(s)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// runCmd executes a command in a directory and returns (output, error).
|
||||
// Uses go-process RunWithOptions — requires process service to be registered.
|
||||
// runCmd executes a command in a directory. Returns Result{Value: string, OK: bool}.
|
||||
//
|
||||
// out, err := runCmd(ctx, repoDir, "git", "log", "--oneline", "-20")
|
||||
func runCmd(ctx context.Context, dir string, command string, args ...string) (string, error) {
|
||||
ensureProcess()
|
||||
return process.RunWithOptions(ctx, process.RunOptions{
|
||||
Command: command,
|
||||
Args: args,
|
||||
Dir: dir,
|
||||
})
|
||||
// r := s.runCmd(ctx, repoDir, "git", "log", "--oneline", "-20")
|
||||
// if r.OK { output := r.Value.(string) }
|
||||
func (s *PrepSubsystem) runCmd(ctx context.Context, dir string, command string, args ...string) core.Result {
|
||||
return s.Core().Process().RunIn(ctx, dir, command, args...)
|
||||
}
|
||||
|
||||
// runCmdEnv executes a command with additional environment variables.
|
||||
//
|
||||
// out, err := runCmdEnv(ctx, repoDir, []string{"GOWORK=off"}, "go", "test", "./...")
|
||||
func runCmdEnv(ctx context.Context, dir string, env []string, command string, args ...string) (string, error) {
|
||||
ensureProcess()
|
||||
return process.RunWithOptions(ctx, process.RunOptions{
|
||||
Command: command,
|
||||
Args: args,
|
||||
Dir: dir,
|
||||
Env: env,
|
||||
})
|
||||
// r := s.runCmdEnv(ctx, repoDir, []string{"GOWORK=off"}, "go", "test", "./...")
|
||||
func (s *PrepSubsystem) runCmdEnv(ctx context.Context, dir string, env []string, command string, args ...string) core.Result {
|
||||
return s.Core().Process().RunWithEnv(ctx, dir, env, command, args...)
|
||||
}
|
||||
|
||||
// runCmdOK executes a command and returns true if it exits 0.
|
||||
//
|
||||
// if runCmdOK(ctx, repoDir, "go", "build", "./...") { ... }
|
||||
func runCmdOK(ctx context.Context, dir string, command string, args ...string) bool {
|
||||
_, err := runCmd(ctx, dir, command, args...)
|
||||
return err == nil
|
||||
// if s.runCmdOK(ctx, repoDir, "go", "build", "./...") { ... }
|
||||
func (s *PrepSubsystem) runCmdOK(ctx context.Context, dir string, command string, args ...string) bool {
|
||||
return s.runCmd(ctx, dir, command, args...).OK
|
||||
}
|
||||
|
||||
// gitCmd runs a git command in the given directory.
|
||||
//
|
||||
// out, err := gitCmd(ctx, repoDir, "log", "--oneline", "-20")
|
||||
func gitCmd(ctx context.Context, dir string, args ...string) (string, error) {
|
||||
return runCmd(ctx, dir, "git", args...)
|
||||
// r := s.gitCmd(ctx, repoDir, "log", "--oneline", "-20")
|
||||
func (s *PrepSubsystem) gitCmd(ctx context.Context, dir string, args ...string) core.Result {
|
||||
return s.runCmd(ctx, dir, "git", args...)
|
||||
}
|
||||
|
||||
// gitCmdOK runs a git command and returns true if it exits 0.
|
||||
//
|
||||
// if gitCmdOK(ctx, repoDir, "fetch", "origin", "main") { ... }
|
||||
func gitCmdOK(ctx context.Context, dir string, args ...string) bool {
|
||||
return runCmdOK(ctx, dir, "git", args...)
|
||||
// if s.gitCmdOK(ctx, repoDir, "fetch", "origin", "main") { ... }
|
||||
func (s *PrepSubsystem) gitCmdOK(ctx context.Context, dir string, args ...string) bool {
|
||||
return s.gitCmd(ctx, dir, args...).OK
|
||||
}
|
||||
|
||||
// gitOutput runs a git command and returns trimmed stdout.
|
||||
//
|
||||
// branch := gitOutput(ctx, repoDir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
func gitOutput(ctx context.Context, dir string, args ...string) string {
|
||||
out, err := gitCmd(ctx, dir, args...)
|
||||
if err != nil {
|
||||
// branch := s.gitOutput(ctx, repoDir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
func (s *PrepSubsystem) gitOutput(ctx context.Context, dir string, args ...string) string {
|
||||
r := s.gitCmd(ctx, dir, args...)
|
||||
if !r.OK {
|
||||
return ""
|
||||
}
|
||||
return core.Trim(out)
|
||||
return core.Trim(r.Value.(string))
|
||||
}
|
||||
|
||||
// --- Process lifecycle helpers ---
|
||||
|
||||
// processIsRunning checks if a process is still alive.
|
||||
// Uses go-process ProcessID if available, falls back to PID signal check.
|
||||
// processIsRunning checks if a process is still alive via PID signal check.
|
||||
//
|
||||
// if processIsRunning(st.ProcessID, st.PID) { ... }
|
||||
func processIsRunning(processID string, pid int) bool {
|
||||
if processID != "" {
|
||||
if proc, err := process.Get(processID); err == nil {
|
||||
return proc.IsRunning()
|
||||
}
|
||||
}
|
||||
if pid > 0 {
|
||||
return syscall.Kill(pid, 0) == nil
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// processKill terminates a process.
|
||||
// Uses go-process Kill if ProcessID available, falls back to SIGTERM.
|
||||
// processKill terminates a process via SIGTERM.
|
||||
//
|
||||
// processKill(st.ProcessID, st.PID)
|
||||
func processKill(processID string, pid int) bool {
|
||||
if processID != "" {
|
||||
if proc, err := process.Get(processID); err == nil {
|
||||
return proc.Kill() == nil
|
||||
}
|
||||
}
|
||||
if pid > 0 {
|
||||
return syscall.Kill(pid, syscall.SIGTERM) == nil
|
||||
}
|
||||
|
|
|
|||
39
pkg/agentic/proc_example_test.go
Normal file
39
pkg/agentic/proc_example_test.go
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func ExamplePrepSubsystem_runCmd() {
|
||||
r := testPrep.runCmd(context.Background(), ".", "echo", "hello")
|
||||
core.Println(r.OK)
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExamplePrepSubsystem_gitCmd() {
|
||||
r := testPrep.gitCmd(context.Background(), ".", "--version")
|
||||
core.Println(r.OK)
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExamplePrepSubsystem_gitOutput() {
|
||||
version := testPrep.gitOutput(context.Background(), ".", "--version")
|
||||
core.Println(core.HasPrefix(version, "git version"))
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExamplePrepSubsystem_runCmdOK() {
|
||||
ok := testPrep.runCmdOK(context.Background(), ".", "echo", "test")
|
||||
core.Println(ok)
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExamplePrepSubsystem_gitCmdOK() {
|
||||
ok := testPrep.gitCmdOK(context.Background(), ".", "--version")
|
||||
core.Println(ok)
|
||||
// Output: true
|
||||
}
|
||||
|
|
@ -6,159 +6,171 @@ import (
|
|||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/process"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// testPrep is the package-level PrepSubsystem for tests that need process execution.
|
||||
var testPrep *PrepSubsystem
|
||||
|
||||
// testCore is the package-level Core with go-process registered.
|
||||
var testCore *core.Core
|
||||
|
||||
// TestMain sets up a PrepSubsystem with go-process registered for all tests in the package.
|
||||
func TestMain(m *testing.M) {
|
||||
testCore = core.New(
|
||||
core.WithService(process.Register),
|
||||
)
|
||||
testCore.ServiceStartup(context.Background(), nil)
|
||||
|
||||
testPrep = &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// newPrepWithProcess creates a PrepSubsystem wired to testCore for tests that
|
||||
// need process execution via s.Core().Process().
|
||||
func newPrepWithProcess() *PrepSubsystem {
|
||||
return &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
// --- runCmd ---
|
||||
|
||||
func TestProc_RunCmd_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
out, err := runCmd(context.Background(), dir, "echo", "hello")
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, strings.TrimSpace(out), "hello")
|
||||
r := testPrep.runCmd(context.Background(), dir, "echo", "hello")
|
||||
assert.True(t, r.OK)
|
||||
assert.Contains(t, core.Trim(r.Value.(string)), "hello")
|
||||
}
|
||||
|
||||
func TestProc_RunCmd_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
_, err := runCmd(context.Background(), dir, "nonexistent-command-xyz")
|
||||
assert.Error(t, err)
|
||||
r := testPrep.runCmd(context.Background(), dir, "nonexistent-command-xyz")
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
||||
func TestProc_RunCmd_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Empty command string — should error
|
||||
_, err := runCmd(context.Background(), dir, "")
|
||||
assert.Error(t, err)
|
||||
r := testPrep.runCmd(context.Background(), dir, "")
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
||||
// --- runCmdEnv ---
|
||||
|
||||
func TestProc_RunCmdEnv_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
out, err := runCmdEnv(context.Background(), dir, []string{"MY_CUSTOM_VAR=hello_test"}, "env")
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, out, "MY_CUSTOM_VAR=hello_test")
|
||||
r := testPrep.runCmdEnv(context.Background(), dir, []string{"MY_CUSTOM_VAR=hello_test"}, "env")
|
||||
assert.True(t, r.OK)
|
||||
assert.Contains(t, r.Value.(string), "MY_CUSTOM_VAR=hello_test")
|
||||
}
|
||||
|
||||
func TestProc_RunCmdEnv_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
_, err := runCmdEnv(context.Background(), dir, []string{"FOO=bar"}, "nonexistent-command-xyz")
|
||||
assert.Error(t, err)
|
||||
r := testPrep.runCmdEnv(context.Background(), dir, []string{"FOO=bar"}, "nonexistent-command-xyz")
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
||||
func TestProc_RunCmdEnv_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Empty env slice — should work fine, just no extra vars
|
||||
out, err := runCmdEnv(context.Background(), dir, []string{}, "echo", "works")
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, strings.TrimSpace(out), "works")
|
||||
r := testPrep.runCmdEnv(context.Background(), dir, []string{}, "echo", "works")
|
||||
assert.True(t, r.OK)
|
||||
assert.Contains(t, core.Trim(r.Value.(string)), "works")
|
||||
}
|
||||
|
||||
// --- runCmdOK ---
|
||||
|
||||
func TestProc_RunCmdOK_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.True(t, runCmdOK(context.Background(), dir, "echo", "ok"))
|
||||
assert.True(t, testPrep.runCmdOK(context.Background(), dir, "echo", "ok"))
|
||||
}
|
||||
|
||||
func TestProc_RunCmdOK_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, runCmdOK(context.Background(), dir, "nonexistent-command-xyz"))
|
||||
assert.False(t, testPrep.runCmdOK(context.Background(), dir, "nonexistent-command-xyz"))
|
||||
}
|
||||
|
||||
func TestProc_RunCmdOK_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// "false" command returns exit 1
|
||||
assert.False(t, runCmdOK(context.Background(), dir, "false"))
|
||||
assert.False(t, testPrep.runCmdOK(context.Background(), dir, "false"))
|
||||
}
|
||||
|
||||
// --- gitCmd ---
|
||||
|
||||
func TestProc_GitCmd_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
_, err := gitCmd(context.Background(), dir, "--version")
|
||||
assert.NoError(t, err)
|
||||
r := testPrep.gitCmd(context.Background(), dir, "--version")
|
||||
assert.True(t, r.OK)
|
||||
}
|
||||
|
||||
func TestProc_GitCmd_Bad(t *testing.T) {
|
||||
// git log in a non-git dir should fail
|
||||
dir := t.TempDir()
|
||||
_, err := gitCmd(context.Background(), dir, "log")
|
||||
assert.Error(t, err)
|
||||
r := testPrep.gitCmd(context.Background(), dir, "log")
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
||||
func TestProc_GitCmd_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Empty args — git with no arguments exits 1
|
||||
_, err := gitCmd(context.Background(), dir)
|
||||
assert.Error(t, err)
|
||||
r := testPrep.gitCmd(context.Background(), dir)
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
||||
// --- gitCmdOK ---
|
||||
|
||||
func TestProc_GitCmdOK_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.True(t, gitCmdOK(context.Background(), dir, "--version"))
|
||||
assert.True(t, testPrep.gitCmdOK(context.Background(), dir, "--version"))
|
||||
}
|
||||
|
||||
func TestProc_GitCmdOK_Bad(t *testing.T) {
|
||||
// git log in non-git dir returns false
|
||||
dir := t.TempDir()
|
||||
assert.False(t, gitCmdOK(context.Background(), dir, "log"))
|
||||
assert.False(t, testPrep.gitCmdOK(context.Background(), dir, "log"))
|
||||
}
|
||||
|
||||
func TestProc_GitCmdOK_Ugly(t *testing.T) {
|
||||
// Empty dir string — git may use cwd, which may or may not be a repo
|
||||
// Just ensure no panic
|
||||
assert.NotPanics(t, func() {
|
||||
gitCmdOK(context.Background(), "", "--version")
|
||||
testPrep.gitCmdOK(context.Background(), "", "--version")
|
||||
})
|
||||
}
|
||||
|
||||
// --- gitOutput ---
|
||||
|
||||
func TestProc_GitOutput_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Init a git repo with a commit so we can read the branch
|
||||
run := func(args ...string) {
|
||||
t.Helper()
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = dir
|
||||
cmd.Env = append(cmd.Environ(),
|
||||
"GIT_AUTHOR_NAME=Test",
|
||||
"GIT_AUTHOR_EMAIL=test@test.com",
|
||||
"GIT_COMMITTER_NAME=Test",
|
||||
"GIT_COMMITTER_EMAIL=test@test.com",
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, "cmd %v failed: %s", args, string(out))
|
||||
}
|
||||
run("git", "init", "-b", "main")
|
||||
run("git", "config", "user.name", "Test")
|
||||
run("git", "config", "user.email", "test@test.com")
|
||||
run("git", "commit", "--allow-empty", "-m", "init")
|
||||
|
||||
branch := gitOutput(context.Background(), dir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
dir := initTestRepo(t)
|
||||
branch := testPrep.gitOutput(context.Background(), dir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
assert.Equal(t, "main", branch)
|
||||
}
|
||||
|
||||
func TestProc_GitOutput_Bad(t *testing.T) {
|
||||
// Non-git dir returns empty string
|
||||
dir := t.TempDir()
|
||||
out := gitOutput(context.Background(), dir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
out := testPrep.gitOutput(context.Background(), dir, "rev-parse", "--abbrev-ref", "HEAD")
|
||||
assert.Equal(t, "", out)
|
||||
}
|
||||
|
||||
func TestProc_GitOutput_Ugly(t *testing.T) {
|
||||
// Failed command returns empty string
|
||||
dir := t.TempDir()
|
||||
out := gitOutput(context.Background(), dir, "log", "--oneline", "-5")
|
||||
out := testPrep.gitOutput(context.Background(), dir, "log", "--oneline", "-5")
|
||||
assert.Equal(t, "", out)
|
||||
}
|
||||
|
||||
|
|
@ -178,9 +190,6 @@ func TestProc_ProcessIsRunning_Bad(t *testing.T) {
|
|||
func TestProc_ProcessIsRunning_Ugly(t *testing.T) {
|
||||
// PID 0 — should return false (invalid PID guard: pid > 0 is false for 0)
|
||||
assert.False(t, processIsRunning("", 0))
|
||||
|
||||
// Empty processID with PID 0 — both paths fail
|
||||
assert.False(t, processIsRunning("", 0))
|
||||
}
|
||||
|
||||
// --- processKill ---
|
||||
|
|
@ -197,41 +206,9 @@ func TestProc_ProcessKill_Bad(t *testing.T) {
|
|||
func TestProc_ProcessKill_Ugly(t *testing.T) {
|
||||
// PID 0 — pid > 0 guard returns false
|
||||
assert.False(t, processKill("", 0))
|
||||
|
||||
// Empty processID with PID 0 — both paths fail
|
||||
assert.False(t, processKill("", 0))
|
||||
}
|
||||
|
||||
// --- ensureProcess ---
|
||||
|
||||
func TestProc_EnsureProcess_Good(t *testing.T) {
|
||||
// Call twice — verify no panic (idempotent via sync.Once)
|
||||
assert.NotPanics(t, func() {
|
||||
ensureProcess()
|
||||
ensureProcess()
|
||||
})
|
||||
}
|
||||
|
||||
func TestProc_EnsureProcess_Bad(t *testing.T) {
|
||||
t.Skip("no bad path without mocking")
|
||||
}
|
||||
|
||||
func TestProc_EnsureProcess_Ugly(t *testing.T) {
|
||||
// Call from multiple goroutines concurrently — sync.Once should handle this
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
assert.NotPanics(t, func() {
|
||||
ensureProcess()
|
||||
})
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// --- initTestRepo is a helper to create a git repo with commits for proc tests ---
|
||||
// --- initTestRepo creates a git repo with commits for proc tests ---
|
||||
|
||||
func initTestRepo(t *testing.T) string {
|
||||
t.Helper()
|
||||
|
|
@ -252,7 +229,7 @@ func initTestRepo(t *testing.T) string {
|
|||
run("git", "init", "-b", "main")
|
||||
run("git", "config", "user.name", "Test")
|
||||
run("git", "config", "user.email", "test@test.com")
|
||||
require.True(t, fs.Write(filepath.Join(dir, "README.md"), "# Test").OK)
|
||||
require.True(t, fs.Write(core.JoinPath(dir, "README.md"), "# Test").OK)
|
||||
run("git", "add", "README.md")
|
||||
run("git", "commit", "-m", "initial commit")
|
||||
return dir
|
||||
|
|
|
|||
|
|
@ -7,19 +7,13 @@ import (
|
|||
"dappco.re/go/core/process"
|
||||
)
|
||||
|
||||
// ProcessRegister is the service factory for the process management service.
|
||||
// Wraps core/process for the v0.3.3→v0.4 factory pattern.
|
||||
// ProcessRegister is the service factory for go-process.
|
||||
// Delegates to process.Register — named Actions (process.run, process.start,
|
||||
// process.kill, process.list, process.get) are registered during OnStartup.
|
||||
//
|
||||
// core.New(
|
||||
// core.WithService(agentic.ProcessRegister),
|
||||
// )
|
||||
func ProcessRegister(c *core.Core) core.Result {
|
||||
svc, err := process.NewService(process.Options{})(c)
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
if procSvc, ok := svc.(*process.Service); ok {
|
||||
_ = process.SetDefault(procSvc)
|
||||
}
|
||||
return core.Result{Value: svc, OK: true}
|
||||
return process.Register(c)
|
||||
}
|
||||
|
|
|
|||
21
pkg/agentic/process_register_example_test.go
Normal file
21
pkg/agentic/process_register_example_test.go
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/agent/pkg/agentic"
|
||||
)
|
||||
|
||||
func ExampleProcessRegister_exists() {
|
||||
c := core.New(core.WithService(agentic.ProcessRegister))
|
||||
c.ServiceStartup(context.Background(), nil)
|
||||
|
||||
core.Println(c.Process().Exists())
|
||||
core.Println(c.Action("process.run").Exists())
|
||||
// Output:
|
||||
// true
|
||||
// true
|
||||
}
|
||||
34
pkg/agentic/process_register_test.go
Normal file
34
pkg/agentic/process_register_test.go
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestProcessRegister_Good(t *testing.T) {
|
||||
c := core.New(core.WithService(ProcessRegister))
|
||||
c.ServiceStartup(context.Background(), nil)
|
||||
assert.True(t, c.Process().Exists())
|
||||
}
|
||||
|
||||
func TestProcessRegister_Bad_NilCore(t *testing.T) {
|
||||
// ProcessRegister delegates to process.Register
|
||||
// which needs a valid Core — verify it doesn't panic
|
||||
assert.NotPanics(t, func() {
|
||||
c := core.New()
|
||||
_ = ProcessRegister(c)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessRegister_Ugly_ActionsRegistered(t *testing.T) {
|
||||
c := core.New(core.WithService(ProcessRegister))
|
||||
c.ServiceStartup(context.Background(), nil)
|
||||
assert.True(t, c.Action("process.run").Exists())
|
||||
assert.True(t, c.Action("process.start").Exists())
|
||||
assert.True(t, c.Action("process.kill").Exists())
|
||||
}
|
||||
|
|
@ -214,8 +214,8 @@ func baseAgent(agent string) string {
|
|||
func (s *PrepSubsystem) canDispatchAgent(agent string) bool {
|
||||
// Read concurrency from shared config (loaded once at startup)
|
||||
var concurrency map[string]ConcurrencyLimit
|
||||
if s.core != nil {
|
||||
concurrency = core.ConfigGet[map[string]ConcurrencyLimit](s.core.Config(), "agents.concurrency")
|
||||
if s.ServiceRuntime != nil {
|
||||
concurrency = core.ConfigGet[map[string]ConcurrencyLimit](s.Core().Config(), "agents.concurrency")
|
||||
}
|
||||
if concurrency == nil {
|
||||
cfg := s.loadAgentsConfig()
|
||||
|
|
@ -267,9 +267,9 @@ func (s *PrepSubsystem) drainQueue() {
|
|||
if s.frozen {
|
||||
return
|
||||
}
|
||||
if s.core != nil {
|
||||
s.core.Lock("drain").Mutex.Lock()
|
||||
defer s.core.Lock("drain").Mutex.Unlock()
|
||||
if s.ServiceRuntime != nil {
|
||||
s.Core().Lock("drain").Mutex.Lock()
|
||||
defer s.Core().Lock("drain").Mutex.Unlock()
|
||||
} else {
|
||||
s.drainMu.Lock()
|
||||
defer s.drainMu.Unlock()
|
||||
|
|
@ -318,7 +318,7 @@ func (s *PrepSubsystem) drainOne() bool {
|
|||
continue
|
||||
}
|
||||
|
||||
prompt := "TASK: " + st.Task + "\n\nResume from where you left off. Read CODEX.md for conventions. Commit when done."
|
||||
prompt := core.Concat("TASK: ", st.Task, "\n\nResume from where you left off. Read CODEX.md for conventions. Commit when done.")
|
||||
|
||||
pid, _, err := s.spawnAgent(st.Agent, prompt, wsDir)
|
||||
if err != nil {
|
||||
|
|
|
|||
15
pkg/agentic/queue_example_test.go
Normal file
15
pkg/agentic/queue_example_test.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func Example_baseAgent() {
|
||||
core.Println(baseAgent("codex:gpt-5.4"))
|
||||
core.Println(baseAgent("claude"))
|
||||
// Output:
|
||||
// codex
|
||||
// claude
|
||||
}
|
||||
|
|
@ -94,6 +94,7 @@ rates:
|
|||
os.WriteFile(filepath.Join(root, "agents.yaml"), []byte(cfg), 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -112,6 +113,7 @@ func TestQueue_CountRunningByModel_Good_NoWorkspaces(t *testing.T) {
|
|||
os.MkdirAll(filepath.Join(root, "workspace"), 0o755)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -126,8 +128,8 @@ func TestQueue_DrainQueue_Good_NoCoreFallsBackToMutex(t *testing.T) {
|
|||
os.MkdirAll(filepath.Join(root, "workspace"), 0o755)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: nil,
|
||||
frozen: false,
|
||||
core: nil,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -140,6 +142,7 @@ func TestQueue_DrainOne_Good_NoWorkspaces(t *testing.T) {
|
|||
os.MkdirAll(filepath.Join(root, "workspace"), 0o755)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -159,6 +162,7 @@ func TestQueue_DrainOne_Good_SkipsNonQueued(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(ws, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -178,6 +182,7 @@ func TestQueue_DrainOne_Good_SkipsBackedOffPool(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(ws, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: map[string]time.Time{
|
||||
"codex": time.Now().Add(1 * time.Hour),
|
||||
|
|
@ -202,7 +207,7 @@ func TestQueue_CanDispatchAgent_Ugly(t *testing.T) {
|
|||
})
|
||||
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -223,7 +228,7 @@ func TestQueue_DrainQueue_Ugly(t *testing.T) {
|
|||
|
||||
c := core.New()
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
frozen: false,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -258,7 +263,7 @@ func TestQueue_CanDispatchAgent_Bad_AgentAtLimit(t *testing.T) {
|
|||
})
|
||||
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -287,6 +292,7 @@ func TestQueue_CountRunningByAgent_Bad_WrongAgentType(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(ws, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -306,6 +312,7 @@ func TestQueue_CountRunningByAgent_Ugly_CorruptStatusJSON(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(ws, "status.json"), []byte("{not valid json!!!"), 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -333,6 +340,7 @@ func TestQueue_CountRunningByModel_Bad_NoMatchingModel(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(ws, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -361,6 +369,7 @@ func TestQueue_CountRunningByModel_Ugly_ModelMismatch(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -387,6 +396,7 @@ rates:
|
|||
os.WriteFile(filepath.Join(root, "agents.yaml"), []byte(cfg), 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -411,6 +421,7 @@ rates:
|
|||
os.WriteFile(filepath.Join(root, "agents.yaml"), []byte(cfg), 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -455,7 +466,7 @@ func TestQueue_DrainOne_Bad_QueuedButAtConcurrencyLimit(t *testing.T) {
|
|||
})
|
||||
|
||||
s := &PrepSubsystem{
|
||||
core: c,
|
||||
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -478,6 +489,7 @@ func TestQueue_DrainOne_Ugly_QueuedButInBackoffWindow(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(ws, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: map[string]time.Time{
|
||||
"codex": time.Now().Add(1 * time.Hour), // pool is backed off
|
||||
|
|
@ -539,6 +551,7 @@ rates:
|
|||
require.True(t, fs.Write(core.JoinPath(root, "agents.yaml"), cfg).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -560,6 +573,7 @@ func TestQueue_LoadAgentsConfig_Bad(t *testing.T) {
|
|||
require.True(t, fs.Write(core.JoinPath(root, "agents.yaml"), "{{{not yaml!!!").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -577,6 +591,7 @@ func TestQueue_LoadAgentsConfig_Ugly(t *testing.T) {
|
|||
// No agents.yaml file at all — should return defaults
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -604,6 +619,7 @@ func TestQueue_DrainQueue_Bad_FrozenQueueDoesNothing(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(ws, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: true, // queue is frozen
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -18,7 +19,7 @@ func TestQueue_CountRunningByModel_Good_Empty(t *testing.T) {
|
|||
root := t.TempDir()
|
||||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
assert.Equal(t, 0, s.countRunningByModel("claude:opus"))
|
||||
}
|
||||
|
||||
|
|
@ -35,7 +36,7 @@ func TestQueue_CountRunningByModel_Good_SkipsNonRunning(t *testing.T) {
|
|||
PID: 0,
|
||||
}))
|
||||
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
assert.Equal(t, 0, s.countRunningByModel("codex:gpt-5.4"))
|
||||
}
|
||||
|
||||
|
|
@ -51,7 +52,7 @@ func TestQueue_CountRunningByModel_Good_SkipsMismatchedModel(t *testing.T) {
|
|||
PID: 0,
|
||||
}))
|
||||
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
// Asking for gemini:pro — must not count gemini:flash
|
||||
assert.Equal(t, 0, s.countRunningByModel("gemini:pro"))
|
||||
}
|
||||
|
|
@ -68,7 +69,7 @@ func TestQueue_CountRunningByModel_Good_DeepLayout(t *testing.T) {
|
|||
Agent: "codex:gpt-5.4",
|
||||
}))
|
||||
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
// Completed, so count is still 0
|
||||
assert.Equal(t, 0, s.countRunningByModel("codex:gpt-5.4"))
|
||||
}
|
||||
|
|
@ -79,7 +80,7 @@ func TestQueue_DrainQueue_Good_FrozenReturnsImmediately(t *testing.T) {
|
|||
root := t.TempDir()
|
||||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{frozen: true, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), frozen: true, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
// Must not panic and must not block
|
||||
assert.NotPanics(t, func() {
|
||||
s.drainQueue()
|
||||
|
|
@ -90,7 +91,7 @@ func TestQueue_DrainQueue_Good_EmptyWorkspace(t *testing.T) {
|
|||
root := t.TempDir()
|
||||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{frozen: false, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), frozen: false, backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
// No workspaces — must return without error/panic
|
||||
assert.NotPanics(t, func() {
|
||||
s.drainQueue()
|
||||
|
|
@ -100,7 +101,7 @@ func TestQueue_DrainQueue_Good_EmptyWorkspace(t *testing.T) {
|
|||
// --- Poke ---
|
||||
|
||||
func TestRunner_Poke_Good_NilChannel(t *testing.T) {
|
||||
s := &PrepSubsystem{pokeCh: nil}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), pokeCh: nil}
|
||||
// Must not panic when pokeCh is nil
|
||||
assert.NotPanics(t, func() {
|
||||
s.Poke()
|
||||
|
|
@ -108,7 +109,7 @@ func TestRunner_Poke_Good_NilChannel(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunner_Poke_Good_ChannelReceivesSignal(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
s.pokeCh = make(chan struct{}, 1)
|
||||
|
||||
s.Poke()
|
||||
|
|
@ -116,7 +117,7 @@ func TestRunner_Poke_Good_ChannelReceivesSignal(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunner_Poke_Good_NonBlockingWhenFull(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
s.pokeCh = make(chan struct{}, 1)
|
||||
// Pre-fill the channel
|
||||
s.pokeCh <- struct{}{}
|
||||
|
|
@ -169,7 +170,7 @@ func TestRunner_Poke_Ugly(t *testing.T) {
|
|||
// but closing + sending would panic. However, Poke uses non-blocking send,
|
||||
// so we test that pokeCh=nil is safe (already tested), and that
|
||||
// double-filling is safe (already tested). Here we test rapid multi-poke.
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
s.pokeCh = make(chan struct{}, 1)
|
||||
|
||||
// Rapid-fire pokes — should all be safe
|
||||
|
|
@ -218,7 +219,7 @@ func TestRunner_StartRunner_Ugly(t *testing.T) {
|
|||
func TestPaths_DefaultBranch_Good_DefaultsToMain(t *testing.T) {
|
||||
// Non-git temp dir — git commands fail, fallback is "main"
|
||||
dir := t.TempDir()
|
||||
branch := DefaultBranch(dir)
|
||||
branch := testPrep.DefaultBranch(dir)
|
||||
assert.Equal(t, "main", branch)
|
||||
}
|
||||
|
||||
|
|
@ -227,7 +228,7 @@ func TestPaths_DefaultBranch_Good_RealGitRepo(t *testing.T) {
|
|||
// Init a real git repo with a main branch
|
||||
require.NoError(t, runGitInit(dir))
|
||||
|
||||
branch := DefaultBranch(dir)
|
||||
branch := testPrep.DefaultBranch(dir)
|
||||
// Any valid branch name — just must not panic or be empty
|
||||
assert.NotEmpty(t, branch)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"path/filepath"
|
||||
|
|
@ -20,7 +21,7 @@ func TestQueue_BaseAgent_Ugly_MultipleColons(t *testing.T) {
|
|||
|
||||
func TestDispatchConfig_Good_Defaults(t *testing.T) {
|
||||
// loadAgentsConfig falls back to defaults when no config file exists
|
||||
s := &PrepSubsystem{codePath: t.TempDir()}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), codePath: t.TempDir()}
|
||||
t.Setenv("CORE_WORKSPACE", t.TempDir())
|
||||
|
||||
cfg := s.loadAgentsConfig()
|
||||
|
|
@ -36,7 +37,7 @@ func TestQueue_CanDispatchAgent_Good_NoConfig(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
require.True(t, fs.EnsureDir(filepath.Join(root, "workspace")).OK)
|
||||
|
||||
s := &PrepSubsystem{codePath: t.TempDir()}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), codePath: t.TempDir()}
|
||||
assert.True(t, s.canDispatchAgent("gemini"))
|
||||
}
|
||||
|
||||
|
|
@ -46,7 +47,7 @@ func TestQueue_CanDispatchAgent_Good_UnknownAgent(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
require.True(t, fs.EnsureDir(filepath.Join(root, "workspace")).OK)
|
||||
|
||||
s := &PrepSubsystem{codePath: t.TempDir()}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), codePath: t.TempDir()}
|
||||
assert.True(t, s.canDispatchAgent("unknown-agent"))
|
||||
}
|
||||
|
||||
|
|
@ -55,7 +56,7 @@ func TestQueue_CountRunningByAgent_Good_EmptyWorkspace(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
require.True(t, fs.EnsureDir(filepath.Join(root, "workspace")).OK)
|
||||
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
assert.Equal(t, 0, s.countRunningByAgent("gemini"))
|
||||
assert.Equal(t, 0, s.countRunningByAgent("claude"))
|
||||
}
|
||||
|
|
@ -73,13 +74,13 @@ func TestQueue_CountRunningByAgent_Good_NoRunning(t *testing.T) {
|
|||
PID: 0,
|
||||
}))
|
||||
|
||||
s := &PrepSubsystem{}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
assert.Equal(t, 0, s.countRunningByAgent("gemini"))
|
||||
}
|
||||
|
||||
func TestQueue_DelayForAgent_Good_NoConfig(t *testing.T) {
|
||||
// With no config, delay should be 0
|
||||
t.Setenv("CORE_WORKSPACE", t.TempDir())
|
||||
s := &PrepSubsystem{codePath: t.TempDir()}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), codePath: t.TempDir()}
|
||||
assert.Equal(t, 0, int(s.delayForAgent("gemini").Seconds()))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,15 +8,15 @@ import (
|
|||
|
||||
// Register is the service factory for core.WithService.
|
||||
// Returns the PrepSubsystem instance — WithService auto-discovers the name
|
||||
// from the package path and registers it. Startable/Stoppable/HandleIPCEvents
|
||||
// are auto-discovered by RegisterService.
|
||||
// from the package path and registers it.
|
||||
//
|
||||
// core.New(
|
||||
// core.WithService(agentic.ProcessRegister),
|
||||
// core.WithService(agentic.Register),
|
||||
// )
|
||||
func Register(c *core.Core) core.Result {
|
||||
prep := NewPrep()
|
||||
prep.core = c
|
||||
prep.ServiceRuntime = core.NewServiceRuntime(c, AgentOptions{})
|
||||
|
||||
// Load agents config once into Core shared config
|
||||
cfg := prep.loadAgentsConfig()
|
||||
|
|
|
|||
21
pkg/agentic/register_example_test.go
Normal file
21
pkg/agentic/register_example_test.go
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
|
||||
"dappco.re/go/agent/pkg/agentic"
|
||||
)
|
||||
|
||||
func ExampleProcessRegister() {
|
||||
c := core.New(
|
||||
core.WithService(agentic.ProcessRegister),
|
||||
)
|
||||
c.ServiceStartup(context.Background(), nil)
|
||||
|
||||
core.Println(c.Process().Exists())
|
||||
// Output: true
|
||||
}
|
||||
|
|
@ -38,9 +38,9 @@ func TestRegister_Good_CoreWired(t *testing.T) {
|
|||
|
||||
prep, ok := core.ServiceFor[*PrepSubsystem](c, "agentic")
|
||||
require.True(t, ok)
|
||||
// Register must wire s.core — service needs it for config access
|
||||
assert.NotNil(t, prep.core, "Register must set prep.core")
|
||||
assert.Equal(t, c, prep.core)
|
||||
// Register must wire ServiceRuntime — service needs it for Core access
|
||||
assert.NotNil(t, prep.ServiceRuntime, "Register must set ServiceRuntime")
|
||||
assert.Equal(t, c, prep.Core())
|
||||
}
|
||||
|
||||
func TestRegister_Good_AgentsConfigLoaded(t *testing.T) {
|
||||
|
|
@ -97,8 +97,8 @@ func TestPrep_OnStartup_Good_CreatesPokeCh(t *testing.T) {
|
|||
|
||||
assert.Nil(t, s.pokeCh, "pokeCh should be nil before OnStartup")
|
||||
|
||||
err := s.OnStartup(context.Background())
|
||||
require.NoError(t, err)
|
||||
r := s.OnStartup(context.Background())
|
||||
assert.True(t, r.OK)
|
||||
|
||||
assert.NotNil(t, s.pokeCh, "OnStartup must initialise pokeCh via StartRunner")
|
||||
}
|
||||
|
|
@ -111,7 +111,7 @@ func TestPrep_OnStartup_Good_FrozenByDefault(t *testing.T) {
|
|||
s := NewPrep()
|
||||
s.SetCore(c)
|
||||
|
||||
require.NoError(t, s.OnStartup(context.Background()))
|
||||
assert.True(t, s.OnStartup(context.Background()).OK)
|
||||
assert.True(t, s.frozen, "queue must be frozen after OnStartup without CORE_AGENT_DISPATCH=1")
|
||||
}
|
||||
|
||||
|
|
@ -123,8 +123,7 @@ func TestPrep_OnStartup_Good_NoError(t *testing.T) {
|
|||
s := NewPrep()
|
||||
s.SetCore(c)
|
||||
|
||||
err := s.OnStartup(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, s.OnStartup(context.Background()).OK)
|
||||
}
|
||||
|
||||
// --- OnShutdown ---
|
||||
|
|
@ -132,28 +131,28 @@ func TestPrep_OnStartup_Good_NoError(t *testing.T) {
|
|||
func TestPrep_OnShutdown_Good_FreezesQueue(t *testing.T) {
|
||||
t.Setenv("CORE_WORKSPACE", t.TempDir())
|
||||
|
||||
s := &PrepSubsystem{frozen: false}
|
||||
err := s.OnShutdown(context.Background())
|
||||
require.NoError(t, err)
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), frozen: false}
|
||||
r := s.OnShutdown(context.Background())
|
||||
assert.True(t, r.OK)
|
||||
assert.True(t, s.frozen, "OnShutdown must set frozen=true")
|
||||
}
|
||||
|
||||
func TestPrep_OnShutdown_Good_AlreadyFrozen(t *testing.T) {
|
||||
// Calling OnShutdown twice must be idempotent
|
||||
s := &PrepSubsystem{frozen: true}
|
||||
err := s.OnShutdown(context.Background())
|
||||
require.NoError(t, err)
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), frozen: true}
|
||||
r := s.OnShutdown(context.Background())
|
||||
assert.True(t, r.OK)
|
||||
assert.True(t, s.frozen)
|
||||
}
|
||||
|
||||
func TestPrep_OnShutdown_Good_NoError(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
assert.NoError(t, s.OnShutdown(context.Background()))
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{})}
|
||||
assert.True(t, s.OnShutdown(context.Background()).OK)
|
||||
}
|
||||
|
||||
func TestPrep_OnShutdown_Ugly_NilCore(t *testing.T) {
|
||||
// OnShutdown must not panic even if s.core is nil
|
||||
s := &PrepSubsystem{core: nil, frozen: false}
|
||||
s := &PrepSubsystem{ServiceRuntime: nil, frozen: false}
|
||||
assert.NotPanics(t, func() {
|
||||
_ = s.OnShutdown(context.Background())
|
||||
})
|
||||
|
|
|
|||
|
|
@ -4,10 +4,6 @@ package agentic
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
|
@ -97,10 +93,9 @@ func (s *PrepSubsystem) dispatchRemote(ctx context.Context, _ *mcp.CallToolReque
|
|||
}
|
||||
|
||||
url := core.Sprintf("http://%s/mcp", addr)
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
// Step 1: Initialize session
|
||||
sessionID, err := mcpInitialize(ctx, client, url, token)
|
||||
sessionID, err := mcpInitialize(ctx, url, token)
|
||||
if err != nil {
|
||||
return nil, RemoteDispatchOutput{
|
||||
Host: input.Host,
|
||||
|
|
@ -109,8 +104,8 @@ func (s *PrepSubsystem) dispatchRemote(ctx context.Context, _ *mcp.CallToolReque
|
|||
}
|
||||
|
||||
// Step 2: Call the tool
|
||||
body, _ := json.Marshal(rpcReq)
|
||||
result, err := mcpCall(ctx, client, url, token, sessionID, body)
|
||||
body := []byte(core.JSONMarshalString(rpcReq))
|
||||
result, err := mcpCall(ctx, url, token, sessionID, body)
|
||||
if err != nil {
|
||||
return nil, RemoteDispatchOutput{
|
||||
Host: input.Host,
|
||||
|
|
@ -136,13 +131,13 @@ func (s *PrepSubsystem) dispatchRemote(ctx context.Context, _ *mcp.CallToolReque
|
|||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
if json.Unmarshal(result, &rpcResp) == nil {
|
||||
if r := core.JSONUnmarshal(result, &rpcResp); r.OK {
|
||||
if rpcResp.Error != nil {
|
||||
output.Success = false
|
||||
output.Error = rpcResp.Error.Message
|
||||
} else if len(rpcResp.Result.Content) > 0 {
|
||||
var dispatchOut DispatchOutput
|
||||
if json.Unmarshal([]byte(rpcResp.Result.Content[0].Text), &dispatchOut) == nil {
|
||||
if r := core.JSONUnmarshalString(rpcResp.Result.Content[0].Text, &dispatchOut); r.OK {
|
||||
output.Success = dispatchOut.Success
|
||||
output.WorkspaceDir = dispatchOut.WorkspaceDir
|
||||
output.PID = dispatchOut.PID
|
||||
|
|
@ -169,7 +164,7 @@ func resolveHost(host string) string {
|
|||
|
||||
// If no port specified, add default
|
||||
if !core.Contains(host, ":") {
|
||||
return host + ":9101"
|
||||
return core.Concat(host, ":9101")
|
||||
}
|
||||
|
||||
return host
|
||||
|
|
|
|||
|
|
@ -1,126 +1,6 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// MCP remote client helpers.
|
||||
// HTTP transport functions are in transport.go (the ONE net/http file).
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// mcpInitialize performs the MCP initialise handshake over Streamable HTTP.
|
||||
// Returns the session ID from the Mcp-Session-Id header.
|
||||
func mcpInitialize(ctx context.Context, client *http.Client, url, token string) (string, error) {
|
||||
initReq := map[string]any{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "initialize",
|
||||
"params": map[string]any{
|
||||
"protocolVersion": "2025-03-26",
|
||||
"capabilities": map[string]any{},
|
||||
"clientInfo": map[string]any{
|
||||
"name": "core-agent-remote",
|
||||
"version": "0.2.0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
body, _ := json.Marshal(initReq)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return "", core.E("mcpInitialize", "create request", err)
|
||||
}
|
||||
setHeaders(req, token, "")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", core.E("mcpInitialize", "request failed", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return "", core.E("mcpInitialize", core.Sprintf("HTTP %d", resp.StatusCode), nil)
|
||||
}
|
||||
|
||||
sessionID := resp.Header.Get("Mcp-Session-Id")
|
||||
|
||||
// Drain the SSE response (we don't need the initialise result)
|
||||
drainSSE(resp)
|
||||
|
||||
// Send initialised notification
|
||||
notif := map[string]any{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "notifications/initialized",
|
||||
}
|
||||
notifBody, _ := json.Marshal(notif)
|
||||
|
||||
notifReq, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(notifBody))
|
||||
setHeaders(notifReq, token, sessionID)
|
||||
|
||||
notifResp, err := client.Do(notifReq)
|
||||
if err == nil {
|
||||
notifResp.Body.Close()
|
||||
}
|
||||
|
||||
return sessionID, nil
|
||||
}
|
||||
|
||||
// mcpCall sends a JSON-RPC request and returns the parsed response.
|
||||
// Handles the SSE response format (text/event-stream with data: lines).
|
||||
func mcpCall(ctx context.Context, client *http.Client, url, token, sessionID string, body []byte) ([]byte, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, core.E("mcpCall", "create request", err)
|
||||
}
|
||||
setHeaders(req, token, sessionID)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, core.E("mcpCall", "request failed", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, core.E("mcpCall", core.Sprintf("HTTP %d", resp.StatusCode), nil)
|
||||
}
|
||||
|
||||
// Parse SSE response — extract data: lines
|
||||
return readSSEData(resp)
|
||||
}
|
||||
|
||||
// readSSEData reads an SSE response and extracts the JSON from data: lines.
|
||||
func readSSEData(resp *http.Response) ([]byte, error) {
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if core.HasPrefix(line, "data: ") {
|
||||
return []byte(core.TrimPrefix(line, "data: ")), nil
|
||||
}
|
||||
}
|
||||
return nil, core.E("readSSEData", "no data in SSE response", nil)
|
||||
}
|
||||
|
||||
// setHeaders applies standard MCP HTTP headers.
|
||||
func setHeaders(req *http.Request, token, sessionID string) {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json, text/event-stream")
|
||||
if token != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
}
|
||||
if sessionID != "" {
|
||||
req.Header.Set("Mcp-Session-Id", sessionID)
|
||||
}
|
||||
}
|
||||
|
||||
// drainSSE reads and discards an SSE response body.
|
||||
func drainSSE(resp *http.Response) {
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
for scanner.Scan() {
|
||||
// Discard
|
||||
}
|
||||
}
|
||||
|
|
|
|||
11
pkg/agentic/remote_client_example_test.go
Normal file
11
pkg/agentic/remote_client_example_test.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExampleDispatchInput_remote() {
|
||||
input := DispatchInput{Repo: "go-io", Task: "Fix tests", Agent: "codex"}
|
||||
core.Println(input.Agent)
|
||||
// Output: codex
|
||||
}
|
||||
|
|
@ -41,7 +41,7 @@ func TestRemoteClient_McpInitialize_Good(t *testing.T) {
|
|||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
sessionID, err := mcpInitialize(context.Background(), srv.Client(), srv.URL, "test-token")
|
||||
sessionID, err := mcpInitialize(context.Background(), srv.URL, "test-token")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "session-abc", sessionID)
|
||||
assert.Equal(t, 2, callCount, "should make init + notification requests")
|
||||
|
|
@ -53,13 +53,13 @@ func TestRemoteClient_McpInitialize_Bad_ServerError(t *testing.T) {
|
|||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
_, err := mcpInitialize(context.Background(), srv.Client(), srv.URL, "")
|
||||
_, err := mcpInitialize(context.Background(), srv.URL, "")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "HTTP 500")
|
||||
}
|
||||
|
||||
func TestRemoteClient_McpInitialize_Bad_Unreachable(t *testing.T) {
|
||||
_, err := mcpInitialize(context.Background(), http.DefaultClient, "http://127.0.0.1:1", "")
|
||||
_, err := mcpInitialize(context.Background(), "http://127.0.0.1:1", "")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "request failed")
|
||||
}
|
||||
|
|
@ -77,7 +77,7 @@ func TestRemoteClient_McpCall_Good(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
body := []byte(`{"jsonrpc":"2.0","id":1,"method":"tools/call"}`)
|
||||
result, err := mcpCall(context.Background(), srv.Client(), srv.URL, "mytoken", "sess-123", body)
|
||||
result, err := mcpCall(context.Background(), srv.URL, "mytoken", "sess-123", body)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(result), "hello")
|
||||
}
|
||||
|
|
@ -88,7 +88,7 @@ func TestRemoteClient_McpCall_Bad_HTTP500(t *testing.T) {
|
|||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
_, err := mcpCall(context.Background(), srv.Client(), srv.URL, "", "", nil)
|
||||
_, err := mcpCall(context.Background(), srv.URL, "", "", nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "HTTP 500")
|
||||
}
|
||||
|
|
@ -100,7 +100,7 @@ func TestRemoteClient_McpCall_Bad_NoSSEData(t *testing.T) {
|
|||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
_, err := mcpCall(context.Background(), srv.Client(), srv.URL, "", "", nil)
|
||||
_, err := mcpCall(context.Background(), srv.URL, "", "", nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no data")
|
||||
}
|
||||
|
|
@ -109,7 +109,7 @@ func TestRemoteClient_McpCall_Bad_NoSSEData(t *testing.T) {
|
|||
|
||||
func TestRemoteClient_SetHeaders_Good_All(t *testing.T) {
|
||||
req, _ := http.NewRequest("POST", "http://example.com", nil)
|
||||
setHeaders(req, "my-token", "my-session")
|
||||
mcpHeaders(req, "my-token", "my-session")
|
||||
|
||||
assert.Equal(t, "application/json", req.Header.Get("Content-Type"))
|
||||
assert.Equal(t, "application/json, text/event-stream", req.Header.Get("Accept"))
|
||||
|
|
@ -119,7 +119,7 @@ func TestRemoteClient_SetHeaders_Good_All(t *testing.T) {
|
|||
|
||||
func TestRemoteClient_SetHeaders_Good_NoToken(t *testing.T) {
|
||||
req, _ := http.NewRequest("POST", "http://example.com", nil)
|
||||
setHeaders(req, "", "")
|
||||
mcpHeaders(req, "", "")
|
||||
|
||||
assert.Empty(t, req.Header.Get("Authorization"))
|
||||
assert.Empty(t, req.Header.Get("Mcp-Session-Id"))
|
||||
|
|
@ -130,7 +130,7 @@ func TestRemoteClient_SetHeaders_Good_NoToken(t *testing.T) {
|
|||
func TestRemoteClient_SetHeaders_Bad(t *testing.T) {
|
||||
// Both token and session empty — only Content-Type and Accept are set
|
||||
req, _ := http.NewRequest("POST", "http://example.com", nil)
|
||||
setHeaders(req, "", "")
|
||||
mcpHeaders(req, "", "")
|
||||
|
||||
assert.Equal(t, "application/json", req.Header.Get("Content-Type"))
|
||||
assert.Equal(t, "application/json, text/event-stream", req.Header.Get("Accept"))
|
||||
|
|
@ -199,7 +199,7 @@ func TestRemoteClient_McpInitialize_Ugly_NonJSONSSE(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
// mcpInitialize drains the SSE body but doesn't parse it — should succeed
|
||||
sessionID, err := mcpInitialize(context.Background(), srv.Client(), srv.URL, "tok")
|
||||
sessionID, err := mcpInitialize(context.Background(), srv.URL, "tok")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "sess-ugly", sessionID)
|
||||
}
|
||||
|
|
@ -213,7 +213,7 @@ func TestRemoteClient_McpCall_Ugly_EmptyResponseBody(t *testing.T) {
|
|||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
_, err := mcpCall(context.Background(), srv.Client(), srv.URL, "", "", nil)
|
||||
_, err := mcpCall(context.Background(), srv.URL, "", "", nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no data")
|
||||
}
|
||||
|
|
@ -242,7 +242,7 @@ func TestRemoteClient_ReadSSEData_Ugly_OnlyEventLines(t *testing.T) {
|
|||
func TestRemoteClient_SetHeaders_Ugly_VeryLongToken(t *testing.T) {
|
||||
req, _ := http.NewRequest("POST", "http://example.com", nil)
|
||||
longToken := strings.Repeat("a", 10000)
|
||||
setHeaders(req, longToken, "sess-123")
|
||||
mcpHeaders(req, longToken, "sess-123")
|
||||
|
||||
assert.Equal(t, "Bearer "+longToken, req.Header.Get("Authorization"))
|
||||
assert.Equal(t, "sess-123", req.Header.Get("Mcp-Session-Id"))
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -44,7 +45,7 @@ func TestRemote_DispatchRemote_Good(t *testing.T) {
|
|||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
_, out, err := s.dispatchRemote(context.Background(), nil, RemoteDispatchInput{
|
||||
Host: srv.Listener.Addr().String(), Repo: "go-io", Task: "Fix tests",
|
||||
})
|
||||
|
|
@ -54,7 +55,7 @@ func TestRemote_DispatchRemote_Good(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRemote_DispatchRemote_Bad(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
|
||||
// Missing host
|
||||
_, _, err := s.dispatchRemote(context.Background(), nil, RemoteDispatchInput{Repo: "go-io", Task: "do"})
|
||||
|
|
@ -97,7 +98,7 @@ func TestRemote_DispatchRemote_Ugly(t *testing.T) {
|
|||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
_, out, err := s.dispatchRemote(context.Background(), nil, RemoteDispatchInput{
|
||||
Host: srv.Listener.Addr().String(), Repo: "go-io", Task: "test",
|
||||
Agent: "claude:opus", Org: "core", Template: "coding", Persona: "eng",
|
||||
|
|
|
|||
11
pkg/agentic/remote_example_test.go
Normal file
11
pkg/agentic/remote_example_test.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExampleDispatchInput() {
|
||||
input := DispatchInput{Repo: "go-io", Task: "Fix tests", Agent: "codex"}
|
||||
core.Println(input.Repo, input.Agent)
|
||||
// Output: go-io codex
|
||||
}
|
||||
|
|
@ -4,10 +4,6 @@ package agentic
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
|
@ -47,13 +43,11 @@ func (s *PrepSubsystem) statusRemote(ctx context.Context, _ *mcp.CallToolRequest
|
|||
token := remoteToken(input.Host)
|
||||
url := "http://" + addr + "/mcp"
|
||||
|
||||
client := &http.Client{Timeout: 15 * time.Second}
|
||||
|
||||
sessionID, err := mcpInitialize(ctx, client, url, token)
|
||||
sessionID, err := mcpInitialize(ctx, url, token)
|
||||
if err != nil {
|
||||
return nil, RemoteStatusOutput{
|
||||
Host: input.Host,
|
||||
Error: "unreachable: " + err.Error(),
|
||||
Error: core.Concat("unreachable: ", err.Error()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -66,13 +60,13 @@ func (s *PrepSubsystem) statusRemote(ctx context.Context, _ *mcp.CallToolRequest
|
|||
"arguments": map[string]any{},
|
||||
},
|
||||
}
|
||||
body, _ := json.Marshal(rpcReq)
|
||||
body := []byte(core.JSONMarshalString(rpcReq))
|
||||
|
||||
result, err := mcpCall(ctx, client, url, token, sessionID, body)
|
||||
result, err := mcpCall(ctx, url, token, sessionID, body)
|
||||
if err != nil {
|
||||
return nil, RemoteStatusOutput{
|
||||
Host: input.Host,
|
||||
Error: "call failed: " + err.Error(),
|
||||
Error: core.Concat("call failed: ", err.Error()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -92,7 +86,7 @@ func (s *PrepSubsystem) statusRemote(ctx context.Context, _ *mcp.CallToolRequest
|
|||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
if json.Unmarshal(result, &rpcResp) != nil {
|
||||
if r := core.JSONUnmarshal(result, &rpcResp); !r.OK {
|
||||
output.Success = false
|
||||
output.Error = "failed to parse response"
|
||||
return nil, output, nil
|
||||
|
|
@ -104,7 +98,7 @@ func (s *PrepSubsystem) statusRemote(ctx context.Context, _ *mcp.CallToolRequest
|
|||
}
|
||||
if len(rpcResp.Result.Content) > 0 {
|
||||
var statusOut StatusOutput
|
||||
if json.Unmarshal([]byte(rpcResp.Result.Content[0].Text), &statusOut) == nil {
|
||||
if r := core.JSONUnmarshalString(rpcResp.Result.Content[0].Text, &statusOut); r.OK {
|
||||
output.Stats = statusOut
|
||||
}
|
||||
}
|
||||
|
|
|
|||
11
pkg/agentic/remote_status_example_test.go
Normal file
11
pkg/agentic/remote_status_example_test.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExampleRemoteStatusOutput() {
|
||||
out := RemoteStatusOutput{Success: true}
|
||||
core.Println(out.Success)
|
||||
// Output: true
|
||||
}
|
||||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -44,7 +45,7 @@ func TestRemoteStatus_StatusRemote_Good(t *testing.T) {
|
|||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
_, out, err := s.statusRemote(context.Background(), nil, RemoteStatusInput{
|
||||
Host: srv.Listener.Addr().String(),
|
||||
})
|
||||
|
|
@ -55,7 +56,7 @@ func TestRemoteStatus_StatusRemote_Good(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRemoteStatus_StatusRemote_Bad(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
|
||||
// Missing host
|
||||
_, _, err := s.statusRemote(context.Background(), nil, RemoteStatusInput{})
|
||||
|
|
@ -108,7 +109,7 @@ func TestRemoteStatus_StatusRemote_Ugly(t *testing.T) {
|
|||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
_, out, _ := s.statusRemote(context.Background(), nil, RemoteStatusInput{Host: srv.Listener.Addr().String()})
|
||||
assert.False(t, out.Success)
|
||||
assert.Contains(t, out.Error, "internal error")
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
@ -13,6 +14,7 @@ import (
|
|||
|
||||
func TestPrep_RenderPlan_Good_BugFix(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -28,6 +30,7 @@ func TestPrep_RenderPlan_Good_BugFix(t *testing.T) {
|
|||
|
||||
func TestPrep_RenderPlan_Good_WithVariables(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -44,6 +47,7 @@ func TestPrep_RenderPlan_Good_WithVariables(t *testing.T) {
|
|||
|
||||
func TestPrep_RenderPlan_Bad_UnknownTemplate(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -53,6 +57,7 @@ func TestPrep_RenderPlan_Bad_UnknownTemplate(t *testing.T) {
|
|||
|
||||
func TestPrep_RenderPlan_Good_NoTask(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -65,6 +70,7 @@ func TestPrep_RenderPlan_Good_NoTask(t *testing.T) {
|
|||
|
||||
func TestPrep_RenderPlan_Good_NewFeature(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -78,11 +78,11 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
|||
}
|
||||
|
||||
// Build resume prompt — inline the task and answer, no file references
|
||||
prompt := "You are resuming previous work.\n\nORIGINAL TASK:\n" + st.Task
|
||||
prompt := core.Concat("You are resuming previous work.\n\nORIGINAL TASK:\n", st.Task)
|
||||
if input.Answer != "" {
|
||||
prompt += "\n\nANSWER TO YOUR QUESTION:\n" + input.Answer
|
||||
prompt = core.Concat(prompt, "\n\nANSWER TO YOUR QUESTION:\n", input.Answer)
|
||||
}
|
||||
prompt += "\n\nContinue working. Read BLOCKED.md to see what you were stuck on. Commit when done."
|
||||
prompt = core.Concat(prompt, "\n\nContinue working. Read BLOCKED.md to see what you were stuck on. Commit when done.")
|
||||
|
||||
if input.DryRun {
|
||||
return nil, ResumeOutput{
|
||||
|
|
|
|||
11
pkg/agentic/resume_example_test.go
Normal file
11
pkg/agentic/resume_example_test.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExampleResumeInput() {
|
||||
input := ResumeInput{Workspace: "core/go-io/task-5", Answer: "Use v2 API"}
|
||||
core.Println(input.Workspace)
|
||||
// Output: core/go-io/task-5
|
||||
}
|
||||
|
|
@ -11,6 +11,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -31,7 +32,7 @@ func TestResume_Resume_Good(t *testing.T) {
|
|||
data, _ := json.Marshal(st)
|
||||
os.WriteFile(filepath.Join(ws, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
_, out, err := s.resume(context.Background(), nil, ResumeInput{
|
||||
Workspace: "ws-blocked", Answer: "Use the new Core API", DryRun: true,
|
||||
})
|
||||
|
|
@ -68,7 +69,7 @@ func TestResume_Resume_Bad(t *testing.T) {
|
|||
root := t.TempDir()
|
||||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
|
||||
// Empty workspace
|
||||
_, _, err := s.resume(context.Background(), nil, ResumeInput{})
|
||||
|
|
@ -102,7 +103,7 @@ func TestResume_Resume_Ugly(t *testing.T) {
|
|||
os.MkdirAll(filepath.Join(ws, "repo"), 0o755)
|
||||
exec.Command("git", "init", filepath.Join(ws, "repo")).Run()
|
||||
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
_, _, err := s.resume(context.Background(), nil, ResumeInput{Workspace: "ws-nostatus"})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no status.json")
|
||||
|
|
|
|||
|
|
@ -4,9 +4,6 @@ package agentic
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
|
|
@ -134,24 +131,20 @@ func (s *PrepSubsystem) reviewQueue(ctx context.Context, _ *mcp.CallToolRequest,
|
|||
|
||||
// findReviewCandidates returns repos that are ahead of GitHub main.
|
||||
func (s *PrepSubsystem) findReviewCandidates(basePath string) []string {
|
||||
r := fs.List(basePath)
|
||||
if !r.OK {
|
||||
return nil
|
||||
}
|
||||
entries := r.Value.([]os.DirEntry)
|
||||
paths := core.PathGlob(core.JoinPath(basePath, "*"))
|
||||
|
||||
var candidates []string
|
||||
for _, e := range entries {
|
||||
if !e.IsDir() {
|
||||
for _, p := range paths {
|
||||
if !fs.IsDir(p) {
|
||||
continue
|
||||
}
|
||||
repoDir := core.JoinPath(basePath, e.Name())
|
||||
if !hasRemote(repoDir, "github") {
|
||||
name := core.PathBase(p)
|
||||
if !s.hasRemote(p, "github") {
|
||||
continue
|
||||
}
|
||||
ahead := commitsAhead(repoDir, "github/main", "HEAD")
|
||||
ahead := s.commitsAhead(p, "github/main", "HEAD")
|
||||
if ahead > 0 {
|
||||
candidates = append(candidates, e.Name())
|
||||
candidates = append(candidates, name)
|
||||
}
|
||||
}
|
||||
return candidates
|
||||
|
|
@ -173,7 +166,8 @@ func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer
|
|||
reviewer = "coderabbit"
|
||||
}
|
||||
command, args := s.buildReviewCommand(repoDir, reviewer)
|
||||
output, err := runCmd(ctx, repoDir, command, args...)
|
||||
r := s.runCmd(ctx, repoDir, command, args...)
|
||||
output, _ := r.Value.(string)
|
||||
|
||||
// Parse rate limit (both reviewers use similar patterns)
|
||||
if core.Contains(output, "Rate limit exceeded") || core.Contains(output, "rate limit") {
|
||||
|
|
@ -183,7 +177,7 @@ func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer
|
|||
}
|
||||
|
||||
// Parse error
|
||||
if err != nil && !core.Contains(output, "No findings") && !core.Contains(output, "no issues") {
|
||||
if !r.OK && !core.Contains(output, "No findings") && !core.Contains(output, "no issues") {
|
||||
result.Verdict = "error"
|
||||
result.Detail = output
|
||||
return result
|
||||
|
|
@ -209,7 +203,7 @@ func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer
|
|||
|
||||
// Push to GitHub and mark PR ready / merge
|
||||
if err := s.pushAndMerge(ctx, repoDir, repo); err != nil {
|
||||
result.Action = "push failed: " + err.Error()
|
||||
result.Action = core.Concat("push failed: ", err.Error())
|
||||
} else {
|
||||
result.Action = "merged"
|
||||
}
|
||||
|
|
@ -247,15 +241,15 @@ func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer
|
|||
|
||||
// pushAndMerge pushes to GitHub dev and merges the PR.
|
||||
func (s *PrepSubsystem) pushAndMerge(ctx context.Context, repoDir, repo string) error {
|
||||
if out, err := gitCmd(ctx, repoDir, "push", "github", "HEAD:refs/heads/dev", "--force"); err != nil {
|
||||
return core.E("pushAndMerge", "push failed: "+out, err)
|
||||
if r := s.gitCmd(ctx, repoDir, "push", "github", "HEAD:refs/heads/dev", "--force"); !r.OK {
|
||||
return core.E("pushAndMerge", "push failed: "+r.Value.(string), nil)
|
||||
}
|
||||
|
||||
// Mark PR ready if draft
|
||||
runCmdOK(ctx, repoDir, "gh", "pr", "ready", "--repo", GitHubOrg()+"/"+repo)
|
||||
s.runCmdOK(ctx, repoDir, "gh", "pr", "ready", "--repo", GitHubOrg()+"/"+repo)
|
||||
|
||||
if out, err := runCmd(ctx, repoDir, "gh", "pr", "merge", "--merge", "--delete-branch"); err != nil {
|
||||
return core.E("pushAndMerge", "merge failed: "+out, err)
|
||||
if r := s.runCmd(ctx, repoDir, "gh", "pr", "merge", "--merge", "--delete-branch"); !r.OK {
|
||||
return core.E("pushAndMerge", "merge failed: "+r.Value.(string), nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -348,23 +342,20 @@ func (s *PrepSubsystem) storeReviewOutput(repoDir, repo, reviewer, output string
|
|||
if !core.Contains(output, "No findings") && !core.Contains(output, "no issues") {
|
||||
entry["verdict"] = "findings"
|
||||
}
|
||||
jsonLine, _ := json.Marshal(entry)
|
||||
jsonLine := core.JSONMarshalString(entry)
|
||||
|
||||
jsonlPath := core.JoinPath(dataDir, "reviews.jsonl")
|
||||
r := fs.Append(jsonlPath)
|
||||
if !r.OK {
|
||||
return
|
||||
}
|
||||
wc := r.Value.(io.WriteCloser)
|
||||
defer wc.Close()
|
||||
wc.Write(append(jsonLine, '\n'))
|
||||
core.WriteAll(r.Value, core.Concat(jsonLine, "\n"))
|
||||
}
|
||||
|
||||
// saveRateLimitState persists rate limit info for cross-run awareness.
|
||||
func (s *PrepSubsystem) saveRateLimitState(info *RateLimitInfo) {
|
||||
path := core.JoinPath(core.Env("DIR_HOME"), ".core", "coderabbit-ratelimit.json")
|
||||
data, _ := json.Marshal(info)
|
||||
fs.Write(path, string(data))
|
||||
fs.Write(path, core.JSONMarshalString(info))
|
||||
}
|
||||
|
||||
// loadRateLimitState reads persisted rate limit info.
|
||||
|
|
@ -375,7 +366,7 @@ func (s *PrepSubsystem) loadRateLimitState() *RateLimitInfo {
|
|||
return nil
|
||||
}
|
||||
var info RateLimitInfo
|
||||
if json.Unmarshal([]byte(r.Value.(string)), &info) != nil {
|
||||
if ur := core.JSONUnmarshalString(r.Value.(string), &info); !ur.OK {
|
||||
return nil
|
||||
}
|
||||
return &info
|
||||
|
|
|
|||
11
pkg/agentic/review_queue_example_test.go
Normal file
11
pkg/agentic/review_queue_example_test.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExampleReviewQueueInput() {
|
||||
input := ReviewQueueInput{Limit: 4, Reviewer: "coderabbit"}
|
||||
core.Println(input.Reviewer, input.Limit)
|
||||
// Output: coderabbit 4
|
||||
}
|
||||
|
|
@ -19,7 +19,7 @@ import (
|
|||
// --- buildReviewCommand ---
|
||||
|
||||
func TestReviewQueue_BuildReviewCommand_Good_CodeRabbit(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
cmd, args := s.buildReviewCommand("/tmp/repo", "coderabbit")
|
||||
assert.Equal(t, "coderabbit", cmd)
|
||||
assert.Contains(t, args, "review")
|
||||
|
|
@ -28,7 +28,7 @@ func TestReviewQueue_BuildReviewCommand_Good_CodeRabbit(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestReviewQueue_BuildReviewCommand_Good_Codex(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
cmd, args := s.buildReviewCommand("/tmp/repo", "codex")
|
||||
assert.Equal(t, "codex", cmd)
|
||||
assert.Contains(t, args, "review")
|
||||
|
|
@ -36,7 +36,7 @@ func TestReviewQueue_BuildReviewCommand_Good_Codex(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestReviewQueue_BuildReviewCommand_Good_DefaultReviewer(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
cmd, args := s.buildReviewCommand("/tmp/repo", "")
|
||||
assert.Equal(t, "coderabbit", cmd)
|
||||
assert.Contains(t, args, "--plain")
|
||||
|
|
@ -55,6 +55,7 @@ func TestSaveLoadRateLimitState_Good_Roundtrip(t *testing.T) {
|
|||
// We need to work around this by using CORE_WORKSPACE for the load,
|
||||
// but save/load use DIR_HOME. Skip if not writable.
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -80,6 +81,7 @@ func TestReviewQueue_StoreReviewOutput_Good(t *testing.T) {
|
|||
// storeReviewOutput uses core.Env("DIR_HOME") so we can't fully control the path
|
||||
// but we can verify it doesn't panic
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -99,6 +101,7 @@ func TestReviewQueue_Good_NoCandidates(t *testing.T) {
|
|||
os.MkdirAll(coreDir, 0o755)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: root,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -135,6 +138,7 @@ func TestStatus_Good_FilteredByStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -226,6 +230,7 @@ func TestReviewQueue_LoadRateLimitState_Ugly(t *testing.T) {
|
|||
})
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -239,6 +244,7 @@ func TestReviewQueue_LoadRateLimitState_Ugly(t *testing.T) {
|
|||
func TestReviewQueue_BuildReviewCommand_Bad(t *testing.T) {
|
||||
// Empty reviewer string — defaults to coderabbit
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -248,7 +254,7 @@ func TestReviewQueue_BuildReviewCommand_Bad(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestReviewQueue_BuildReviewCommand_Ugly(t *testing.T) {
|
||||
s := &PrepSubsystem{backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
s := &PrepSubsystem{ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}), backoff: make(map[string]time.Time), failCount: make(map[string]int)}
|
||||
cmd, args := s.buildReviewCommand("/tmp/repo", "unknown-reviewer")
|
||||
assert.Equal(t, "coderabbit", cmd)
|
||||
assert.Contains(t, args, "--plain")
|
||||
|
|
@ -284,6 +290,7 @@ func TestReviewQueue_ParseRetryAfter_Ugly(t *testing.T) {
|
|||
func TestReviewQueue_StoreReviewOutput_Bad(t *testing.T) {
|
||||
// Empty output
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -295,6 +302,7 @@ func TestReviewQueue_StoreReviewOutput_Bad(t *testing.T) {
|
|||
func TestReviewQueue_StoreReviewOutput_Ugly(t *testing.T) {
|
||||
// Very large output
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -308,6 +316,7 @@ func TestReviewQueue_StoreReviewOutput_Ugly(t *testing.T) {
|
|||
|
||||
func TestReviewQueue_SaveRateLimitState_Good(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -325,6 +334,7 @@ func TestReviewQueue_SaveRateLimitState_Good(t *testing.T) {
|
|||
func TestReviewQueue_SaveRateLimitState_Bad(t *testing.T) {
|
||||
// Save nil info
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -336,6 +346,7 @@ func TestReviewQueue_SaveRateLimitState_Bad(t *testing.T) {
|
|||
func TestReviewQueue_SaveRateLimitState_Ugly(t *testing.T) {
|
||||
// Save with far-future RetryAt
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -354,6 +365,7 @@ func TestReviewQueue_SaveRateLimitState_Ugly(t *testing.T) {
|
|||
func TestReviewQueue_LoadRateLimitState_Good(t *testing.T) {
|
||||
// Write then load valid state
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -378,6 +390,7 @@ func TestReviewQueue_LoadRateLimitState_Good(t *testing.T) {
|
|||
func TestReviewQueue_LoadRateLimitState_Bad(t *testing.T) {
|
||||
// File doesn't exist — should return nil
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
|
|||
16
pkg/agentic/runner_example_test.go
Normal file
16
pkg/agentic/runner_example_test.go
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func ExamplePrepSubsystem_Poke() {
|
||||
s := newPrepWithProcess()
|
||||
s.pokeCh = make(chan struct{}, 1)
|
||||
|
||||
s.Poke()
|
||||
core.Println(len(s.pokeCh))
|
||||
// Output: 1
|
||||
}
|
||||
28
pkg/agentic/runner_test.go
Normal file
28
pkg/agentic/runner_test.go
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRunner_StartRunner_Good(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
assert.Nil(t, s.pokeCh)
|
||||
s.StartRunner()
|
||||
assert.NotNil(t, s.pokeCh)
|
||||
}
|
||||
|
||||
func TestRunner_StartRunner_Bad_AlreadyRunning(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
s.StartRunner()
|
||||
// Second call should not panic
|
||||
assert.NotPanics(t, func() { s.StartRunner() })
|
||||
}
|
||||
|
||||
func TestRunner_Poke_Ugly_NilChannel(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
assert.NotPanics(t, func() { s.Poke() })
|
||||
}
|
||||
17
pkg/agentic/sanitise_example_test.go
Normal file
17
pkg/agentic/sanitise_example_test.go
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func Example_sanitiseBranchSlug() {
|
||||
core.Println(sanitiseBranchSlug("Fix the broken tests!", 40))
|
||||
// Output: fix-the-broken-tests
|
||||
}
|
||||
|
||||
func Example_sanitiseBranchSlug_truncate() {
|
||||
core.Println(len(sanitiseBranchSlug("a very long task description that should be truncated to fit", 20)) <= 20)
|
||||
// Output: true
|
||||
}
|
||||
22
pkg/agentic/sanitise_test.go
Normal file
22
pkg/agentic/sanitise_test.go
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSanitise_SanitiseBranchSlug_Good_Basic(t *testing.T) {
|
||||
assert.Equal(t, "fix-broken-tests", sanitiseBranchSlug("Fix broken tests", 40))
|
||||
}
|
||||
|
||||
func TestSanitise_SanitiseBranchSlug_Bad_Empty(t *testing.T) {
|
||||
assert.Equal(t, "", sanitiseBranchSlug("", 40))
|
||||
}
|
||||
|
||||
func TestSanitise_SanitiseBranchSlug_Ugly_Truncate(t *testing.T) {
|
||||
result := sanitiseBranchSlug("a very long description that exceeds the limit", 10)
|
||||
assert.True(t, len(result) <= 10)
|
||||
}
|
||||
|
|
@ -4,8 +4,6 @@ package agentic
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
|
|
@ -120,22 +118,11 @@ func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label str
|
|||
u := core.Sprintf("%s/api/v1/repos/%s/%s/issues?state=open&limit=10&type=issues",
|
||||
s.forgeURL, org, repo)
|
||||
if label != "" {
|
||||
u += "&labels=" + core.Replace(core.Replace(label, " ", "%20"), "&", "%26")
|
||||
u = core.Concat(u, "&labels=", core.Replace(core.Replace(label, " ", "%20"), "&", "%26"))
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", u, nil)
|
||||
if err != nil {
|
||||
return nil, core.E("scan.listRepoIssues", "failed to create request", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, core.E("scan.listRepoIssues", "failed to list issues for "+repo, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, core.E("scan.listRepoIssues", core.Sprintf("HTTP %d listing issues for %s", resp.StatusCode, repo), nil)
|
||||
r := HTTPGet(ctx, u, s.forgeToken, "token")
|
||||
if !r.OK {
|
||||
return nil, core.E("scan.listRepoIssues", core.Concat("failed to list issues for ", repo), nil)
|
||||
}
|
||||
|
||||
var issues []struct {
|
||||
|
|
@ -149,7 +136,7 @@ func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label str
|
|||
} `json:"assignee"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&issues)
|
||||
core.JSONUnmarshalString(r.Value.(string), &issues)
|
||||
|
||||
var result []ScanIssue
|
||||
for _, issue := range issues {
|
||||
|
|
|
|||
11
pkg/agentic/scan_example_test.go
Normal file
11
pkg/agentic/scan_example_test.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExampleScanInput() {
|
||||
input := ScanInput{Org: "core", Limit: 10}
|
||||
core.Println(input.Org, input.Limit)
|
||||
// Output: core 10
|
||||
}
|
||||
|
|
@ -11,6 +11,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/forge"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
@ -77,6 +78,7 @@ func mockScanServer(t *testing.T) *httptest.Server {
|
|||
func TestScan_Scan_Good(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -100,6 +102,7 @@ func TestScan_Scan_Good(t *testing.T) {
|
|||
func TestScan_Good_AllRepos(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -117,6 +120,7 @@ func TestScan_Good_AllRepos(t *testing.T) {
|
|||
func TestScan_Good_WithLimit(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -134,6 +138,7 @@ func TestScan_Good_WithLimit(t *testing.T) {
|
|||
func TestScan_Good_DefaultLabels(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -151,6 +156,7 @@ func TestScan_Good_DefaultLabels(t *testing.T) {
|
|||
func TestScan_Good_CustomLabels(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -169,6 +175,7 @@ func TestScan_Good_CustomLabels(t *testing.T) {
|
|||
func TestScan_Good_Deduplicates(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -196,6 +203,7 @@ func TestScan_Good_Deduplicates(t *testing.T) {
|
|||
|
||||
func TestScan_Bad_NoToken(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeToken: "",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -211,6 +219,7 @@ func TestScan_Bad_NoToken(t *testing.T) {
|
|||
func TestScan_ListRepoIssues_Good_ReturnsIssues(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -229,6 +238,7 @@ func TestScan_ListRepoIssues_Good_ReturnsIssues(t *testing.T) {
|
|||
func TestScan_ListRepoIssues_Good_EmptyResult(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -244,6 +254,7 @@ func TestScan_ListRepoIssues_Good_EmptyResult(t *testing.T) {
|
|||
func TestScan_ListRepoIssues_Good_AssigneeExtracted(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -265,6 +276,7 @@ func TestScan_ListRepoIssues_Bad_ServerError(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -286,6 +298,7 @@ func TestScan_Scan_Bad(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -310,6 +323,7 @@ func TestScan_Scan_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -329,6 +343,7 @@ func TestScan_Scan_Ugly(t *testing.T) {
|
|||
func TestScan_ListOrgRepos_Good(t *testing.T) {
|
||||
srv := mockScanServer(t)
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -353,6 +368,7 @@ func TestScan_ListOrgRepos_Bad(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -373,6 +389,7 @@ func TestScan_ListOrgRepos_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -405,6 +422,7 @@ func TestScan_ListRepoIssues_Ugly(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -433,6 +451,7 @@ func TestScan_ListRepoIssues_Good_URLRewrite(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
|
|||
12
pkg/agentic/shutdown_example_test.go
Normal file
12
pkg/agentic/shutdown_example_test.go
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
func ExamplePrepSubsystem_Shutdown() {
|
||||
s := newPrepWithProcess()
|
||||
err := s.Shutdown(nil)
|
||||
core.Println(err == nil)
|
||||
// Output: true
|
||||
}
|
||||
29
pkg/agentic/shutdown_test.go
Normal file
29
pkg/agentic/shutdown_test.go
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestShutdown_Shutdown_Good(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
err := s.Shutdown(nil)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestShutdown_Shutdown_Bad_AlreadyFrozen(t *testing.T) {
|
||||
s := newPrepWithProcess()
|
||||
s.frozen = true
|
||||
err := s.Shutdown(nil)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestShutdown_Shutdown_Ugly_NilRuntime(t *testing.T) {
|
||||
s := &PrepSubsystem{}
|
||||
assert.NotPanics(t, func() {
|
||||
_ = s.Shutdown(nil)
|
||||
})
|
||||
}
|
||||
|
|
@ -4,7 +4,6 @@ package agentic
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
|
@ -48,11 +47,8 @@ type WorkspaceStatus struct {
|
|||
|
||||
func writeStatus(wsDir string, status *WorkspaceStatus) error {
|
||||
status.UpdatedAt = time.Now()
|
||||
data, err := json.MarshalIndent(status, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if r := fs.Write(core.JoinPath(wsDir, "status.json"), string(data)); !r.OK {
|
||||
statusPath := core.JoinPath(wsDir, "status.json")
|
||||
if r := fs.WriteAtomic(statusPath, core.JSONMarshalString(status)); !r.OK {
|
||||
err, _ := r.Value.(error)
|
||||
return core.E("writeStatus", "failed to write status", err)
|
||||
}
|
||||
|
|
@ -68,8 +64,9 @@ func ReadStatus(wsDir string) (*WorkspaceStatus, error) {
|
|||
return nil, core.E("ReadStatus", "status not found", nil)
|
||||
}
|
||||
var s WorkspaceStatus
|
||||
if err := json.Unmarshal([]byte(r.Value.(string)), &s); err != nil {
|
||||
return nil, err
|
||||
if ur := core.JSONUnmarshalString(r.Value.(string), &s); !ur.OK {
|
||||
err, _ := ur.Value.(error)
|
||||
return nil, core.E("ReadStatus", "invalid status json", err)
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
|
|
|||
45
pkg/agentic/status_example_test.go
Normal file
45
pkg/agentic/status_example_test.go
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func Example_writeStatus() {
|
||||
dir := (&core.Fs{}).NewUnrestricted().TempDir("example-ws")
|
||||
defer (&core.Fs{}).NewUnrestricted().DeleteAll(dir)
|
||||
|
||||
st := &WorkspaceStatus{
|
||||
Status: "running",
|
||||
Agent: "codex",
|
||||
Repo: "go-io",
|
||||
Task: "Fix tests",
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
err := writeStatus(dir, st)
|
||||
core.Println(err == nil)
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExampleReadStatus() {
|
||||
dir := (&core.Fs{}).NewUnrestricted().TempDir("example-ws")
|
||||
defer (&core.Fs{}).NewUnrestricted().DeleteAll(dir)
|
||||
|
||||
writeStatus(dir, &WorkspaceStatus{
|
||||
Status: "completed",
|
||||
Agent: "claude",
|
||||
Repo: "go-io",
|
||||
})
|
||||
|
||||
st, err := ReadStatus(dir)
|
||||
core.Println(err == nil)
|
||||
core.Println(st.Status)
|
||||
core.Println(st.Agent)
|
||||
// Output:
|
||||
// true
|
||||
// completed
|
||||
// claude
|
||||
}
|
||||
|
|
@ -11,6 +11,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/forge"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
@ -24,6 +25,7 @@ func TestStatus_Good_EmptyWorkspace(t *testing.T) {
|
|||
require.True(t, fs.EnsureDir(filepath.Join(root, "workspace")).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -78,6 +80,7 @@ func TestStatus_Good_MixedWorkspaces(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -108,6 +111,7 @@ func TestStatus_Good_DeepLayout(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -128,6 +132,7 @@ func TestStatus_Good_CorruptStatusFile(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(ws, "status.json"), "invalid-json{{{").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -142,6 +147,7 @@ func TestStatus_Good_CorruptStatusFile(t *testing.T) {
|
|||
|
||||
func TestShutdown_DispatchStart_Good(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: true,
|
||||
pokeCh: make(chan struct{}, 1),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -160,6 +166,7 @@ func TestShutdown_ShutdownGraceful_Good(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: false,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -178,6 +185,7 @@ func TestShutdown_ShutdownNow_Good_EmptyWorkspace(t *testing.T) {
|
|||
require.True(t, fs.EnsureDir(filepath.Join(root, "workspace")).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: false,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -207,6 +215,7 @@ func TestShutdown_ShutdownNow_Good_ClearsQueued(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -242,6 +251,7 @@ func TestPrep_BrainRecall_Good_Success(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainURL: srv.URL,
|
||||
brainKey: "test-brain-key",
|
||||
client: srv.Client(),
|
||||
|
|
@ -264,6 +274,7 @@ func TestPrep_BrainRecall_Good_NoMemories(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainURL: srv.URL,
|
||||
brainKey: "test-brain-key",
|
||||
client: srv.Client(),
|
||||
|
|
@ -278,6 +289,7 @@ func TestPrep_BrainRecall_Good_NoMemories(t *testing.T) {
|
|||
|
||||
func TestPrep_BrainRecall_Bad_NoBrainKey(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainKey: "",
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -295,6 +307,7 @@ func TestPrep_BrainRecall_Bad_ServerError(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
brainURL: srv.URL,
|
||||
brainKey: "test-brain-key",
|
||||
client: srv.Client(),
|
||||
|
|
@ -311,6 +324,7 @@ func TestPrep_BrainRecall_Bad_ServerError(t *testing.T) {
|
|||
|
||||
func TestPrep_PrepWorkspace_Bad_NoRepo(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -325,6 +339,7 @@ func TestPrep_PrepWorkspace_Bad_NoIdentifier(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -342,6 +357,7 @@ func TestPrep_PrepWorkspace_Bad_InvalidRepoName(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
codePath: t.TempDir(),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -377,6 +393,7 @@ func TestPr_ListPRs_Good_SpecificRepo(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -401,6 +418,7 @@ func TestPr_ListPRs_Good_SpecificRepo(t *testing.T) {
|
|||
|
||||
func TestRunner_Poke_Good_SendsSignal(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
pokeCh: make(chan struct{}, 1),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -418,6 +436,7 @@ func TestRunner_Poke_Good_SendsSignal(t *testing.T) {
|
|||
|
||||
func TestRunner_Poke_Good_NonBlocking(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
pokeCh: make(chan struct{}, 1),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -434,6 +453,7 @@ func TestRunner_Poke_Good_NonBlocking(t *testing.T) {
|
|||
|
||||
func TestRunner_Poke_Bad_NilChannel(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
pokeCh: nil,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -506,13 +526,14 @@ func TestWriteReadStatus_Good_AllFields(t *testing.T) {
|
|||
|
||||
func TestPrep_OnShutdown_Good(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: false,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
||||
err := s.OnShutdown(context.Background())
|
||||
assert.NoError(t, err)
|
||||
r := s.OnShutdown(context.Background())
|
||||
assert.True(t, r.OK)
|
||||
assert.True(t, s.frozen)
|
||||
}
|
||||
|
||||
|
|
@ -523,6 +544,7 @@ func TestQueue_DrainQueue_Good_FrozenDoesNothing(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: true,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -552,6 +574,7 @@ func TestPrep_Shutdown_ShutdownNow_Ugly(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: false,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -574,6 +597,7 @@ func TestPrep_Shutdown_ShutdownNow_Ugly(t *testing.T) {
|
|||
|
||||
func TestShutdown_DispatchStart_Bad_NilPokeCh(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: true,
|
||||
pokeCh: nil,
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -589,6 +613,7 @@ func TestShutdown_DispatchStart_Bad_NilPokeCh(t *testing.T) {
|
|||
|
||||
func TestShutdown_DispatchStart_Ugly_AlreadyUnfrozen(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: false, // already unfrozen
|
||||
pokeCh: make(chan struct{}, 1),
|
||||
backoff: make(map[string]time.Time),
|
||||
|
|
@ -609,6 +634,7 @@ func TestShutdown_ShutdownGraceful_Bad_AlreadyFrozen(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", root)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: true, // already frozen
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -638,6 +664,7 @@ func TestShutdown_ShutdownGraceful_Ugly_WithWorkspaces(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: false,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
@ -670,6 +697,7 @@ func TestShutdown_ShutdownNow_Bad_NoRunningPIDs(t *testing.T) {
|
|||
}
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
frozen: false,
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -223,6 +224,7 @@ func TestStatus_Status_Ugly(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -339,6 +341,7 @@ func TestStatus_Status_Good_PopulatedWorkspaces(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -358,6 +361,7 @@ func TestStatus_Status_Bad_EmptyWorkspaceRoot(t *testing.T) {
|
|||
// Do NOT create the workspace/ subdirectory
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
|
|||
258
pkg/agentic/transport.go
Normal file
258
pkg/agentic/transport.go
Normal file
|
|
@ -0,0 +1,258 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// HTTP transport for Core API streams.
|
||||
// This is the ONE file in core/agent that imports net/http.
|
||||
// All other files use the exported helpers: HTTPGet, HTTPPost, HTTPCall.
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// defaultClient is the shared HTTP client for all transport calls.
|
||||
var defaultClient = &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
// httpStream implements core.Stream over HTTP request/response.
|
||||
type httpStream struct {
|
||||
client *http.Client
|
||||
url string
|
||||
token string
|
||||
method string
|
||||
response []byte
|
||||
}
|
||||
|
||||
func (s *httpStream) Send(data []byte) error {
|
||||
req, err := http.NewRequestWithContext(context.Background(), s.method, s.url, core.NewReader(string(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
if s.token != "" {
|
||||
req.Header.Set("Authorization", core.Concat("token ", s.token))
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r := core.ReadAll(resp.Body)
|
||||
if !r.OK {
|
||||
return core.E("httpStream.Send", "failed to read response", nil)
|
||||
}
|
||||
s.response = []byte(r.Value.(string))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *httpStream) Receive() ([]byte, error) {
|
||||
return s.response, nil
|
||||
}
|
||||
|
||||
func (s *httpStream) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterHTTPTransport registers the HTTP/HTTPS protocol handler with Core API.
|
||||
//
|
||||
// agentic.RegisterHTTPTransport(c)
|
||||
func RegisterHTTPTransport(c *core.Core) {
|
||||
factory := func(handle *core.DriveHandle) (core.Stream, error) {
|
||||
token := handle.Options.String("token")
|
||||
return &httpStream{
|
||||
client: defaultClient,
|
||||
url: handle.Transport,
|
||||
token: token,
|
||||
method: "POST",
|
||||
}, nil
|
||||
}
|
||||
c.API().RegisterProtocol("http", factory)
|
||||
c.API().RegisterProtocol("https", factory)
|
||||
}
|
||||
|
||||
// --- REST helpers — all HTTP in core/agent routes through these ---
|
||||
|
||||
// HTTPGet performs a GET request. Returns Result{Value: string (response body), OK: bool}.
|
||||
// Auth is "token {token}" for Forge, "Bearer {token}" for Brain.
|
||||
//
|
||||
// r := agentic.HTTPGet(ctx, "https://forge.lthn.ai/api/v1/repos", "my-token", "token")
|
||||
func HTTPGet(ctx context.Context, url, token, authScheme string) core.Result {
|
||||
return httpDo(ctx, "GET", url, "", token, authScheme)
|
||||
}
|
||||
|
||||
// HTTPPost performs a POST request with a JSON body. Returns Result{Value: string, OK: bool}.
|
||||
//
|
||||
// r := agentic.HTTPPost(ctx, url, core.JSONMarshalString(payload), token, "token")
|
||||
func HTTPPost(ctx context.Context, url, body, token, authScheme string) core.Result {
|
||||
return httpDo(ctx, "POST", url, body, token, authScheme)
|
||||
}
|
||||
|
||||
// HTTPPatch performs a PATCH request with a JSON body.
|
||||
//
|
||||
// r := agentic.HTTPPatch(ctx, url, body, token, "token")
|
||||
func HTTPPatch(ctx context.Context, url, body, token, authScheme string) core.Result {
|
||||
return httpDo(ctx, "PATCH", url, body, token, authScheme)
|
||||
}
|
||||
|
||||
// HTTPDelete performs a DELETE request.
|
||||
//
|
||||
// r := agentic.HTTPDelete(ctx, url, body, token, "Bearer")
|
||||
func HTTPDelete(ctx context.Context, url, body, token, authScheme string) core.Result {
|
||||
return httpDo(ctx, "DELETE", url, body, token, authScheme)
|
||||
}
|
||||
|
||||
// HTTPDo performs an HTTP request with the specified method.
|
||||
//
|
||||
// r := agentic.HTTPDo(ctx, "PUT", url, body, token, "token")
|
||||
func HTTPDo(ctx context.Context, method, url, body, token, authScheme string) core.Result {
|
||||
return httpDo(ctx, method, url, body, token, authScheme)
|
||||
}
|
||||
|
||||
// httpDo is the single HTTP execution point. Every HTTP call in core/agent routes here.
|
||||
func httpDo(ctx context.Context, method, url, body, token, authScheme string) core.Result {
|
||||
var req *http.Request
|
||||
var err error
|
||||
|
||||
if body != "" {
|
||||
req, err = http.NewRequestWithContext(ctx, method, url, core.NewReader(body))
|
||||
} else {
|
||||
req, err = http.NewRequestWithContext(ctx, method, url, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return core.Result{OK: false}
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
if token != "" {
|
||||
if authScheme == "" {
|
||||
authScheme = "token"
|
||||
}
|
||||
req.Header.Set("Authorization", core.Concat(authScheme, " ", token))
|
||||
}
|
||||
|
||||
resp, err := defaultClient.Do(req)
|
||||
if err != nil {
|
||||
return core.Result{OK: false}
|
||||
}
|
||||
|
||||
r := core.ReadAll(resp.Body)
|
||||
if !r.OK {
|
||||
return core.Result{OK: false}
|
||||
}
|
||||
|
||||
return core.Result{Value: r.Value.(string), OK: resp.StatusCode < 400}
|
||||
}
|
||||
|
||||
// --- MCP Streamable HTTP Transport ---
|
||||
|
||||
// mcpInitialize performs the MCP initialise handshake over Streamable HTTP.
|
||||
// Returns the session ID from the Mcp-Session-Id header.
|
||||
func mcpInitialize(ctx context.Context, url, token string) (string, error) {
|
||||
initReq := map[string]any{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "initialize",
|
||||
"params": map[string]any{
|
||||
"protocolVersion": "2025-03-26",
|
||||
"capabilities": map[string]any{},
|
||||
"clientInfo": map[string]any{
|
||||
"name": "core-agent-remote",
|
||||
"version": "0.2.0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
body := core.JSONMarshalString(initReq)
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, core.NewReader(body))
|
||||
if err != nil {
|
||||
return "", core.E("mcpInitialize", "create request", nil)
|
||||
}
|
||||
mcpHeaders(req, token, "")
|
||||
|
||||
resp, err := defaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", core.E("mcpInitialize", "request failed", nil)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return "", core.E("mcpInitialize", core.Sprintf("HTTP %d", resp.StatusCode), nil)
|
||||
}
|
||||
|
||||
sessionID := resp.Header.Get("Mcp-Session-Id")
|
||||
|
||||
// Drain SSE response
|
||||
drainSSE(resp)
|
||||
|
||||
// Send initialised notification
|
||||
notif := core.JSONMarshalString(map[string]any{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "notifications/initialized",
|
||||
})
|
||||
notifReq, _ := http.NewRequestWithContext(ctx, "POST", url, core.NewReader(notif))
|
||||
mcpHeaders(notifReq, token, sessionID)
|
||||
notifResp, err := defaultClient.Do(notifReq)
|
||||
if err == nil {
|
||||
notifResp.Body.Close()
|
||||
}
|
||||
|
||||
return sessionID, nil
|
||||
}
|
||||
|
||||
// mcpCall sends a JSON-RPC request and returns the parsed response.
|
||||
func mcpCall(ctx context.Context, url, token, sessionID string, body []byte) ([]byte, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, core.NewReader(string(body)))
|
||||
if err != nil {
|
||||
return nil, core.E("mcpCall", "create request", nil)
|
||||
}
|
||||
mcpHeaders(req, token, sessionID)
|
||||
|
||||
resp, err := defaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, core.E("mcpCall", "request failed", nil)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, core.E("mcpCall", core.Sprintf("HTTP %d", resp.StatusCode), nil)
|
||||
}
|
||||
|
||||
return readSSEData(resp)
|
||||
}
|
||||
|
||||
// readSSEData reads an SSE response and extracts JSON from data: lines.
|
||||
func readSSEData(resp *http.Response) ([]byte, error) {
|
||||
r := core.ReadAll(resp.Body)
|
||||
if !r.OK {
|
||||
return nil, core.E("readSSEData", "failed to read response", nil)
|
||||
}
|
||||
for _, line := range core.Split(r.Value.(string), "\n") {
|
||||
if core.HasPrefix(line, "data: ") {
|
||||
return []byte(core.TrimPrefix(line, "data: ")), nil
|
||||
}
|
||||
}
|
||||
return nil, core.E("readSSEData", "no data in SSE response", nil)
|
||||
}
|
||||
|
||||
// mcpHeaders applies standard MCP HTTP headers.
|
||||
func mcpHeaders(req *http.Request, token, sessionID string) {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json, text/event-stream")
|
||||
if token != "" {
|
||||
req.Header.Set("Authorization", core.Concat("Bearer ", token))
|
||||
}
|
||||
if sessionID != "" {
|
||||
req.Header.Set("Mcp-Session-Id", sessionID)
|
||||
}
|
||||
}
|
||||
|
||||
// drainSSE reads and discards an SSE response body.
|
||||
func drainSSE(resp *http.Response) {
|
||||
core.ReadAll(resp.Body)
|
||||
}
|
||||
|
|
@ -3,10 +3,7 @@
|
|||
package agentic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
|
|
@ -93,8 +90,8 @@ func (s *PrepSubsystem) attemptVerifyAndMerge(repoDir, org, repo, branch string,
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := s.forgeMergePR(ctx, org, repo, prNum); err != nil {
|
||||
comment := core.Sprintf("## Tests Passed — Merge Failed\n\n`%s` passed but merge failed: %v", testResult.testCmd, err)
|
||||
if r := s.forgeMergePR(ctx, org, repo, prNum); !r.OK {
|
||||
comment := core.Sprintf("## Tests Passed — Merge Failed\n\n`%s` passed but merge failed", testResult.testCmd)
|
||||
s.commentOnIssue(context.Background(), org, repo, prNum, comment)
|
||||
return mergeConflict
|
||||
}
|
||||
|
|
@ -107,14 +104,14 @@ func (s *PrepSubsystem) attemptVerifyAndMerge(repoDir, org, repo, branch string,
|
|||
// rebaseBranch rebases the current branch onto the default branch and force-pushes.
|
||||
func (s *PrepSubsystem) rebaseBranch(repoDir, branch string) bool {
|
||||
ctx := context.Background()
|
||||
base := DefaultBranch(repoDir)
|
||||
base := s.DefaultBranch(repoDir)
|
||||
|
||||
if !gitCmdOK(ctx, repoDir, "fetch", "origin", base) {
|
||||
if !s.gitCmdOK(ctx, repoDir, "fetch", "origin", base) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !gitCmdOK(ctx, repoDir, "rebase", "origin/"+base) {
|
||||
gitCmdOK(ctx, repoDir, "rebase", "--abort")
|
||||
if !s.gitCmdOK(ctx, repoDir, "rebase", "origin/"+base) {
|
||||
s.gitCmdOK(ctx, repoDir, "rebase", "--abort")
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
@ -128,7 +125,7 @@ func (s *PrepSubsystem) rebaseBranch(repoDir, branch string) bool {
|
|||
repo = st.Repo
|
||||
}
|
||||
forgeRemote := core.Sprintf("ssh://git@forge.lthn.ai:2223/%s/%s.git", org, repo)
|
||||
return gitCmdOK(ctx, repoDir, "push", "--force-with-lease", forgeRemote, branch)
|
||||
return s.gitCmdOK(ctx, repoDir, "push", "--force-with-lease", forgeRemote, branch)
|
||||
}
|
||||
|
||||
// flagForReview adds the "needs-review" label to the PR via Forge API.
|
||||
|
|
@ -140,17 +137,11 @@ func (s *PrepSubsystem) flagForReview(org, repo string, prNum int, result mergeR
|
|||
s.ensureLabel(ctx, org, repo, "needs-review", "e11d48")
|
||||
|
||||
// Add label to PR
|
||||
payload, _ := json.Marshal(map[string]any{
|
||||
payload := core.JSONMarshalString(map[string]any{
|
||||
"labels": []int{s.getLabelID(ctx, org, repo, "needs-review")},
|
||||
})
|
||||
url := core.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/labels", s.forgeURL, org, repo, prNum)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
resp, err := s.client.Do(req)
|
||||
if err == nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
HTTPPost(ctx, url, payload, s.forgeToken, "token")
|
||||
|
||||
// Comment explaining the situation
|
||||
reason := "Tests failed after rebase"
|
||||
|
|
@ -163,36 +154,27 @@ func (s *PrepSubsystem) flagForReview(org, repo string, prNum int, result mergeR
|
|||
|
||||
// ensureLabel creates a label if it doesn't exist.
|
||||
func (s *PrepSubsystem) ensureLabel(ctx context.Context, org, repo, name, colour string) {
|
||||
payload, _ := json.Marshal(map[string]string{
|
||||
payload := core.JSONMarshalString(map[string]string{
|
||||
"name": name,
|
||||
"color": "#" + colour,
|
||||
"color": core.Concat("#", colour),
|
||||
})
|
||||
url := core.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
resp, err := s.client.Do(req)
|
||||
if err == nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
HTTPPost(ctx, url, payload, s.forgeToken, "token")
|
||||
}
|
||||
|
||||
// getLabelID fetches the ID of a label by name.
|
||||
func (s *PrepSubsystem) getLabelID(ctx context.Context, org, repo, name string) int {
|
||||
url := core.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
r := HTTPGet(ctx, url, s.forgeToken, "token")
|
||||
if !r.OK {
|
||||
return 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var labels []struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&labels)
|
||||
core.JSONUnmarshalString(r.Value.(string), &labels)
|
||||
for _, l := range labels {
|
||||
if l.Name == name {
|
||||
return l.ID
|
||||
|
|
@ -225,27 +207,27 @@ func (s *PrepSubsystem) runVerification(repoDir string) verifyResult {
|
|||
|
||||
func (s *PrepSubsystem) runGoTests(repoDir string) verifyResult {
|
||||
ctx := context.Background()
|
||||
out, err := runCmdEnv(ctx, repoDir, []string{"GOWORK=off"}, "go", "test", "./...", "-count=1", "-timeout", "120s")
|
||||
passed := err == nil
|
||||
r := s.runCmdEnv(ctx, repoDir, []string{"GOWORK=off"}, "go", "test", "./...", "-count=1", "-timeout", "120s")
|
||||
out := r.Value.(string)
|
||||
exitCode := 0
|
||||
if err != nil {
|
||||
if !r.OK {
|
||||
exitCode = 1
|
||||
}
|
||||
return verifyResult{passed: passed, output: out, exitCode: exitCode, testCmd: "go test ./..."}
|
||||
return verifyResult{passed: r.OK, output: out, exitCode: exitCode, testCmd: "go test ./..."}
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) runPHPTests(repoDir string) verifyResult {
|
||||
ctx := context.Background()
|
||||
out, err := runCmd(ctx, repoDir, "composer", "test", "--no-interaction")
|
||||
if err != nil {
|
||||
r := s.runCmd(ctx, repoDir, "composer", "test", "--no-interaction")
|
||||
if !r.OK {
|
||||
// Try pest as fallback
|
||||
out2, err2 := runCmd(ctx, repoDir, "./vendor/bin/pest", "--no-interaction")
|
||||
if err2 != nil {
|
||||
r2 := s.runCmd(ctx, repoDir, "./vendor/bin/pest", "--no-interaction")
|
||||
if !r2.OK {
|
||||
return verifyResult{passed: false, testCmd: "none", output: "No PHP test runner found (composer test and vendor/bin/pest both unavailable)", exitCode: 1}
|
||||
}
|
||||
return verifyResult{passed: true, output: out2, exitCode: 0, testCmd: "vendor/bin/pest"}
|
||||
return verifyResult{passed: true, output: r2.Value.(string), exitCode: 0, testCmd: "vendor/bin/pest"}
|
||||
}
|
||||
return verifyResult{passed: true, output: out, exitCode: 0, testCmd: "composer test"}
|
||||
return verifyResult{passed: true, output: r.Value.(string), exitCode: 0, testCmd: "composer test"}
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) runNodeTests(repoDir string) verifyResult {
|
||||
|
|
@ -257,47 +239,30 @@ func (s *PrepSubsystem) runNodeTests(repoDir string) verifyResult {
|
|||
var pkg struct {
|
||||
Scripts map[string]string `json:"scripts"`
|
||||
}
|
||||
if json.Unmarshal([]byte(r.Value.(string)), &pkg) != nil || pkg.Scripts["test"] == "" {
|
||||
if ur := core.JSONUnmarshalString(r.Value.(string), &pkg); !ur.OK || pkg.Scripts["test"] == "" {
|
||||
return verifyResult{passed: true, testCmd: "none", output: "No test script in package.json"}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
out, err := runCmd(ctx, repoDir, "npm", "test")
|
||||
passed := err == nil
|
||||
r = s.runCmd(ctx, repoDir, "npm", "test")
|
||||
out := r.Value.(string)
|
||||
exitCode := 0
|
||||
if err != nil {
|
||||
if !r.OK {
|
||||
exitCode = 1
|
||||
}
|
||||
return verifyResult{passed: passed, output: out, exitCode: exitCode, testCmd: "npm test"}
|
||||
return verifyResult{passed: r.OK, output: out, exitCode: exitCode, testCmd: "npm test"}
|
||||
}
|
||||
|
||||
// forgeMergePR merges a PR via the Forge API.
|
||||
func (s *PrepSubsystem) forgeMergePR(ctx context.Context, org, repo string, prNum int) error {
|
||||
payload, _ := json.Marshal(map[string]any{
|
||||
func (s *PrepSubsystem) forgeMergePR(ctx context.Context, org, repo string, prNum int) core.Result {
|
||||
payload := core.JSONMarshalString(map[string]any{
|
||||
"Do": "merge",
|
||||
"merge_message_field": "Auto-merged by core-agent after verification\n\nCo-Authored-By: Virgil <virgil@lethean.io>",
|
||||
"delete_branch_after_merge": true,
|
||||
})
|
||||
|
||||
url := core.Sprintf("%s/api/v1/repos/%s/%s/pulls/%d/merge", s.forgeURL, org, repo, prNum)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return core.E("forgeMergePR", "request failed", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 && resp.StatusCode != 204 {
|
||||
var errBody map[string]any
|
||||
json.NewDecoder(resp.Body).Decode(&errBody)
|
||||
msg, _ := errBody["message"].(string)
|
||||
return core.E("forgeMergePR", core.Sprintf("HTTP %d: %s", resp.StatusCode, msg), nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
return HTTPPost(ctx, url, payload, s.forgeToken, "token")
|
||||
}
|
||||
|
||||
// extractPRNumber gets the PR number from a Forge PR URL.
|
||||
|
|
|
|||
20
pkg/agentic/verify_example_test.go
Normal file
20
pkg/agentic/verify_example_test.go
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func Example_fileExists() {
|
||||
dir := (&core.Fs{}).NewUnrestricted().TempDir("example")
|
||||
defer (&core.Fs{}).NewUnrestricted().DeleteAll(dir)
|
||||
|
||||
(&core.Fs{}).NewUnrestricted().Write(core.JoinPath(dir, "go.mod"), "module test")
|
||||
|
||||
core.Println(fileExists(core.JoinPath(dir, "go.mod")))
|
||||
core.Println(fileExists(core.JoinPath(dir, "missing.txt")))
|
||||
// Output:
|
||||
// true
|
||||
// false
|
||||
}
|
||||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/forge"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
@ -33,6 +34,7 @@ func TestPr_CommentOnIssue_Good_PostsCommentOnPR(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -81,6 +83,7 @@ func TestVerify_AutoVerifyAndMerge_Good_FullPipeline(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(wsDir, "status.json"), data, 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -114,6 +117,7 @@ func TestVerify_AttemptVerifyAndMerge_Good_TestsPassMergeSucceeds(t *testing.T)
|
|||
dir := t.TempDir() // No project files = passes verification
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -140,6 +144,7 @@ func TestVerify_AttemptVerifyAndMerge_Bad_MergeFails(t *testing.T) {
|
|||
dir := t.TempDir()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/forge"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
@ -35,6 +36,7 @@ func TestVerify_ForgeMergePR_Good_Success(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-forge-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -42,8 +44,8 @@ func TestVerify_ForgeMergePR_Good_Success(t *testing.T) {
|
|||
failCount: make(map[string]int),
|
||||
}
|
||||
|
||||
err := s.forgeMergePR(context.Background(), "core", "test-repo", 42)
|
||||
assert.NoError(t, err)
|
||||
r := s.forgeMergePR(context.Background(), "core", "test-repo", 42)
|
||||
assert.True(t, r.OK)
|
||||
}
|
||||
|
||||
func TestVerify_ForgeMergePR_Good_204Response(t *testing.T) {
|
||||
|
|
@ -53,6 +55,7 @@ func TestVerify_ForgeMergePR_Good_204Response(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -60,8 +63,8 @@ func TestVerify_ForgeMergePR_Good_204Response(t *testing.T) {
|
|||
failCount: make(map[string]int),
|
||||
}
|
||||
|
||||
err := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
|
||||
assert.NoError(t, err)
|
||||
r := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
|
||||
assert.True(t, r.OK)
|
||||
}
|
||||
|
||||
func TestVerify_ForgeMergePR_Bad_ConflictResponse(t *testing.T) {
|
||||
|
|
@ -74,6 +77,7 @@ func TestVerify_ForgeMergePR_Bad_ConflictResponse(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -81,10 +85,9 @@ func TestVerify_ForgeMergePR_Bad_ConflictResponse(t *testing.T) {
|
|||
failCount: make(map[string]int),
|
||||
}
|
||||
|
||||
err := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "409")
|
||||
assert.Contains(t, err.Error(), "merge conflict")
|
||||
r := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
|
||||
assert.False(t, r.OK)
|
||||
assert.Contains(t, r.Value.(string), "merge conflict")
|
||||
}
|
||||
|
||||
func TestVerify_ForgeMergePR_Bad_ServerError(t *testing.T) {
|
||||
|
|
@ -97,6 +100,7 @@ func TestVerify_ForgeMergePR_Bad_ServerError(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -104,9 +108,9 @@ func TestVerify_ForgeMergePR_Bad_ServerError(t *testing.T) {
|
|||
failCount: make(map[string]int),
|
||||
}
|
||||
|
||||
err := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "500")
|
||||
r := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
|
||||
assert.False(t, r.OK)
|
||||
assert.Contains(t, r.Value.(string), "internal server error")
|
||||
}
|
||||
|
||||
func TestVerify_ForgeMergePR_Bad_NetworkError(t *testing.T) {
|
||||
|
|
@ -114,6 +118,7 @@ func TestVerify_ForgeMergePR_Bad_NetworkError(t *testing.T) {
|
|||
srv.Close() // close immediately to cause connection error
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: &http.Client{},
|
||||
|
|
@ -121,8 +126,8 @@ func TestVerify_ForgeMergePR_Bad_NetworkError(t *testing.T) {
|
|||
failCount: make(map[string]int),
|
||||
}
|
||||
|
||||
err := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
|
||||
assert.Error(t, err)
|
||||
r := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
|
||||
assert.False(t, r.OK)
|
||||
}
|
||||
|
||||
// --- extractPRNumber (additional _Ugly cases) ---
|
||||
|
|
@ -163,6 +168,7 @@ func TestVerify_EnsureLabel_Good_CreatesLabel(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -179,6 +185,7 @@ func TestVerify_EnsureLabel_Bad_NetworkError(t *testing.T) {
|
|||
srv.Close()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: &http.Client{},
|
||||
|
|
@ -204,6 +211,7 @@ func TestVerify_GetLabelID_Good_Found(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -224,6 +232,7 @@ func TestVerify_GetLabelID_Bad_NotFound(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -240,6 +249,7 @@ func TestVerify_GetLabelID_Bad_NetworkError(t *testing.T) {
|
|||
srv.Close()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: &http.Client{},
|
||||
|
|
@ -257,6 +267,7 @@ func TestVerify_RunVerification_Good_NoProjectFile(t *testing.T) {
|
|||
dir := t.TempDir() // No go.mod, composer.json, or package.json
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -271,6 +282,7 @@ func TestVerify_RunVerification_Good_GoProject(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "go.mod"), "module test").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -285,6 +297,7 @@ func TestVerify_RunVerification_Good_PHPProject(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "composer.json"), `{"require":{}}`).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -299,6 +312,7 @@ func TestVerify_RunVerification_Good_NodeProject(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "package.json"), `{"scripts":{"test":"echo ok"}}`).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -312,6 +326,7 @@ func TestVerify_RunVerification_Good_NodeNoTestScript(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "package.json"), `{"scripts":{}}`).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -345,6 +360,7 @@ func TestVerify_FileExists_Bad_IsDirectory(t *testing.T) {
|
|||
func TestVerify_AutoVerifyAndMerge_Bad_NoStatus(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -363,6 +379,7 @@ func TestVerify_AutoVerifyAndMerge_Bad_NoPRURL(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -381,6 +398,7 @@ func TestVerify_AutoVerifyAndMerge_Bad_EmptyRepo(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -400,6 +418,7 @@ func TestVerify_AutoVerifyAndMerge_Bad_InvalidPRURL(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -442,6 +461,7 @@ func TestVerify_FlagForReview_Good_AddsLabel(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -475,6 +495,7 @@ func TestVerify_FlagForReview_Good_MergeConflictMessage(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -523,6 +544,7 @@ func TestVerify_AutoVerifyAndMerge_Ugly(t *testing.T) {
|
|||
}))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -561,6 +583,7 @@ func TestVerify_AttemptVerifyAndMerge_Ugly(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "broken.go"), "package broken\n\nfunc Bad() { undeclared() }").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -598,6 +621,7 @@ func TestVerify_EnsureLabel_Ugly_AlreadyExists409(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -620,6 +644,7 @@ func TestVerify_GetLabelID_Ugly_EmptyArray(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -641,6 +666,7 @@ func TestVerify_ForgeMergePR_Ugly_EmptyBody200(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
client: srv.Client(),
|
||||
|
|
@ -648,8 +674,8 @@ func TestVerify_ForgeMergePR_Ugly_EmptyBody200(t *testing.T) {
|
|||
failCount: make(map[string]int),
|
||||
}
|
||||
|
||||
err := s.forgeMergePR(context.Background(), "core", "test-repo", 42)
|
||||
assert.NoError(t, err) // 200 is success even with empty body
|
||||
r := s.forgeMergePR(context.Background(), "core", "test-repo", 42)
|
||||
assert.True(t, r.OK) // 200 is success even with empty body
|
||||
}
|
||||
|
||||
// --- FileExists Ugly ---
|
||||
|
|
@ -673,6 +699,7 @@ func TestVerify_FlagForReview_Bad_AllAPICallsFail(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -700,6 +727,7 @@ func TestVerify_FlagForReview_Ugly_LabelNotFoundZeroID(t *testing.T) {
|
|||
t.Cleanup(srv.Close)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
forge: forge.NewForge(srv.URL, "test-token"),
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "test-token",
|
||||
|
|
@ -722,6 +750,7 @@ func TestVerify_RunVerification_Bad_GoModButNoGoFiles(t *testing.T) {
|
|||
// go.mod exists but no .go files — go test should fail
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -739,6 +768,7 @@ func TestVerify_RunVerification_Ugly_MultipleProjectFiles(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "package.json"), `{"scripts":{"test":"echo ok"}}`).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -756,6 +786,7 @@ func TestVerify_RunVerification_Ugly_GoAndPHPProjectFiles(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "composer.json"), `{"require":{}}`).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -783,6 +814,7 @@ func TestAdd(t *testing.T) {
|
|||
`).OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -800,6 +832,7 @@ func TestVerify_RunGoTests_Bad(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "broken.go"), "package broken\n\nfunc Bad() { undeclared() }\n").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -817,6 +850,7 @@ func TestVerify_RunGoTests_Ugly(t *testing.T) {
|
|||
require.True(t, fs.Write(filepath.Join(dir, "main.go"), "package empty\n").OK)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
|
|||
13
pkg/agentic/watch_example_test.go
Normal file
13
pkg/agentic/watch_example_test.go
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func ExampleWorkspaceRoot_watch() {
|
||||
root := WorkspaceRoot()
|
||||
core.Println(core.HasSuffix(root, "workspace"))
|
||||
// Output: true
|
||||
}
|
||||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
@ -16,6 +17,7 @@ import (
|
|||
|
||||
func TestWatch_ResolveWorkspaceDir_Good_RelativeName(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -26,6 +28,7 @@ func TestWatch_ResolveWorkspaceDir_Good_RelativeName(t *testing.T) {
|
|||
|
||||
func TestWatch_ResolveWorkspaceDir_Good_AbsolutePath(t *testing.T) {
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -60,6 +63,7 @@ func TestWatch_FindActiveWorkspaces_Good_WithActive(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(ws3, "status.json"), st3, 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -77,6 +81,7 @@ func TestWatch_FindActiveWorkspaces_Good_Empty(t *testing.T) {
|
|||
os.MkdirAll(filepath.Join(root, "workspace"), 0o755)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -92,6 +97,7 @@ func TestWatch_FindActiveWorkspaces_Bad(t *testing.T) {
|
|||
t.Setenv("CORE_WORKSPACE", filepath.Join(root, "nonexistent"))
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -119,6 +125,7 @@ func TestWatch_FindActiveWorkspaces_Ugly(t *testing.T) {
|
|||
os.WriteFile(filepath.Join(ws2, "status.json"), st, 0o644)
|
||||
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -134,6 +141,7 @@ func TestWatch_FindActiveWorkspaces_Ugly(t *testing.T) {
|
|||
func TestWatch_ResolveWorkspaceDir_Bad(t *testing.T) {
|
||||
// Empty name
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
@ -145,6 +153,7 @@ func TestWatch_ResolveWorkspaceDir_Bad(t *testing.T) {
|
|||
func TestWatch_ResolveWorkspaceDir_Ugly(t *testing.T) {
|
||||
// Name with path traversal "../.."
|
||||
s := &PrepSubsystem{
|
||||
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
|
||||
backoff: make(map[string]time.Time),
|
||||
failCount: make(map[string]int),
|
||||
}
|
||||
|
|
|
|||
13
pkg/brain/brain_example_test.go
Normal file
13
pkg/brain/brain_example_test.go
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package brain
|
||||
|
||||
import (
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func ExampleRegister_services() {
|
||||
c := core.New(core.WithService(Register))
|
||||
core.Println(c.Services())
|
||||
// Output is non-deterministic (slice order), so no Output comment
|
||||
}
|
||||
|
|
@ -3,10 +3,7 @@
|
|||
package brain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"dappco.re/go/agent/pkg/agentic"
|
||||
|
|
@ -22,7 +19,6 @@ import (
|
|||
type DirectSubsystem struct {
|
||||
apiURL string
|
||||
apiKey string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var _ coremcp.Subsystem = (*DirectSubsystem)(nil)
|
||||
|
|
@ -57,7 +53,6 @@ func NewDirect() *DirectSubsystem {
|
|||
return &DirectSubsystem{
|
||||
apiURL: apiURL,
|
||||
apiKey: apiKey,
|
||||
client: &http.Client{Timeout: 30 * time.Second},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -114,52 +109,21 @@ func (s *DirectSubsystem) apiCall(ctx context.Context, method, path string, body
|
|||
return nil, core.E("brain.apiCall", "no API key (set CORE_BRAIN_KEY or create ~/.claude/brain.key)", nil)
|
||||
}
|
||||
|
||||
var reqBody *bytes.Reader
|
||||
if body != nil {
|
||||
data, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
core.Error("brain API request marshal failed", "method", method, "path", path, "err", err)
|
||||
return nil, core.E("brain.apiCall", "marshal request", err)
|
||||
}
|
||||
reqBody = bytes.NewReader(data)
|
||||
}
|
||||
|
||||
requestURL := core.Concat(s.apiURL, path)
|
||||
req, err := http.NewRequestWithContext(ctx, method, requestURL, nil)
|
||||
if reqBody != nil {
|
||||
req, err = http.NewRequestWithContext(ctx, method, requestURL, reqBody)
|
||||
var bodyStr string
|
||||
if body != nil {
|
||||
bodyStr = core.JSONMarshalString(body)
|
||||
}
|
||||
if err != nil {
|
||||
core.Error("brain API request creation failed", "method", method, "path", path, "err", err)
|
||||
return nil, core.E("brain.apiCall", "create request", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", core.Concat("Bearer ", s.apiKey))
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
core.Error("brain API call failed", "method", method, "path", path, "err", err)
|
||||
return nil, core.E("brain.apiCall", "API call failed", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBuffer := bytes.NewBuffer(nil)
|
||||
if _, err := respBuffer.ReadFrom(resp.Body); err != nil {
|
||||
core.Error("brain API response read failed", "method", method, "path", path, "err", err)
|
||||
return nil, core.E("brain.apiCall", "read response", err)
|
||||
}
|
||||
respData := respBuffer.Bytes()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
core.Warn("brain API returned error status", "method", method, "path", path, "status", resp.StatusCode)
|
||||
return nil, core.E("brain.apiCall", core.Sprintf("API returned %d: %s", resp.StatusCode, string(respData)), nil)
|
||||
r := agentic.HTTPDo(ctx, method, requestURL, bodyStr, s.apiKey, "Bearer")
|
||||
if !r.OK {
|
||||
core.Error("brain API call failed", "method", method, "path", path)
|
||||
return nil, core.E("brain.apiCall", "API call failed", nil)
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(respData, &result); err != nil {
|
||||
core.Error("brain API response parse failed", "method", method, "path", path, "err", err)
|
||||
return nil, core.E("brain.apiCall", "parse response", err)
|
||||
if ur := core.JSONUnmarshalString(r.Value.(string), &result); !ur.OK {
|
||||
core.Error("brain API response parse failed", "method", method, "path", path)
|
||||
return nil, core.E("brain.apiCall", "parse response", nil)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue