Compare commits

...

3 commits

Author SHA1 Message Date
b5873a8f31 Merge pull request 'fix(monitor): agent.completed events + nested concurrency config' (#16) from feat/monitor-notifications-and-concurrency into dev
Reviewed-on: #16
2026-03-24 14:44:02 +00:00
Snider
53acf4000d feat(concurrency): nested per-model limits under agent pools
Concurrency config now supports both flat and nested formats:

  claude: 1                    # flat — 1 total
  codex:                       # nested — 2 total, per-model caps
    total: 2
    gpt-5.4: 1
    gpt-5.3-codex-spark: 1

canDispatchAgent checks pool total first, then per-model limit.
countRunningByModel added for exact agent string matching.
ConcurrencyLimit custom YAML unmarshaler handles both int and map.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-24 13:05:41 +00:00
Snider
04e3d492e9 fix(monitor): emit agent.completed per task, verify PIDs for queue.drained
- Export ReadStatus (was readStatus) for cross-package use
- AgentCompleted now emits agent.completed with repo/agent/workspace/status
  for every finished task, not just failures
- queue.drained only fires when genuinely empty — verified by checking
  PIDs are alive via kill(0), not just trusting stale status files
- Fix Docker mount paths: /root/ → /home/dev/ for non-root container
- Update all callers and tests

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-24 13:02:41 +00:00
15 changed files with 203 additions and 83 deletions

View file

@ -13,7 +13,7 @@ import (
// autoCreatePR pushes the agent's branch and creates a PR on Forge // autoCreatePR pushes the agent's branch and creates a PR on Forge
// if the agent made any commits beyond the initial clone. // if the agent made any commits beyond the initial clone.
func (s *PrepSubsystem) autoCreatePR(wsDir string) { func (s *PrepSubsystem) autoCreatePR(wsDir string) {
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil || st.Branch == "" || st.Repo == "" { if err != nil || st.Branch == "" || st.Repo == "" {
return return
} }
@ -44,7 +44,7 @@ func (s *PrepSubsystem) autoCreatePR(wsDir string) {
pushCmd.Dir = repoDir pushCmd.Dir = repoDir
if pushErr := pushCmd.Run(); pushErr != nil { if pushErr := pushCmd.Run(); pushErr != nil {
// Push failed — update status with error but don't block // Push failed — update status with error but don't block
if st2, err := readStatus(wsDir); err == nil { if st2, err := ReadStatus(wsDir); err == nil {
st2.Question = core.Sprintf("PR push failed: %v", pushErr) st2.Question = core.Sprintf("PR push failed: %v", pushErr)
writeStatus(wsDir, st2) writeStatus(wsDir, st2)
} }
@ -60,7 +60,7 @@ func (s *PrepSubsystem) autoCreatePR(wsDir string) {
prURL, _, err := s.forgeCreatePR(ctx, org, st.Repo, st.Branch, base, title, body) prURL, _, err := s.forgeCreatePR(ctx, org, st.Repo, st.Branch, base, title, body)
if err != nil { if err != nil {
if st2, err := readStatus(wsDir); err == nil { if st2, err := ReadStatus(wsDir); err == nil {
st2.Question = core.Sprintf("PR creation failed: %v", err) st2.Question = core.Sprintf("PR creation failed: %v", err)
writeStatus(wsDir, st2) writeStatus(wsDir, st2)
} }
@ -68,7 +68,7 @@ func (s *PrepSubsystem) autoCreatePR(wsDir string) {
} }
// Update status with PR URL // Update status with PR URL
if st2, err := readStatus(wsDir); err == nil { if st2, err := ReadStatus(wsDir); err == nil {
st2.PRURL = prURL st2.PRURL = prURL
writeStatus(wsDir, st2) writeStatus(wsDir, st2)
} }

View file

@ -156,7 +156,7 @@ func containerCommand(agentType, command string, args []string, repoDir, metaDir
"-v", metaDir + ":/workspace/.meta", "-v", metaDir + ":/workspace/.meta",
"-w", "/workspace", "-w", "/workspace",
// Auth: agent configs only — NO SSH keys, git push runs on host // Auth: agent configs only — NO SSH keys, git push runs on host
"-v", core.JoinPath(home, ".codex") + ":/root/.codex:ro", "-v", core.JoinPath(home, ".codex") + ":/home/dev/.codex:ro",
// API keys — passed by name, Docker resolves from host env // API keys — passed by name, Docker resolves from host env
"-e", "OPENAI_API_KEY", "-e", "OPENAI_API_KEY",
"-e", "ANTHROPIC_API_KEY", "-e", "ANTHROPIC_API_KEY",
@ -175,14 +175,14 @@ func containerCommand(agentType, command string, args []string, repoDir, metaDir
// Mount Claude config if dispatching claude agent // Mount Claude config if dispatching claude agent
if command == "claude" { if command == "claude" {
dockerArgs = append(dockerArgs, dockerArgs = append(dockerArgs,
"-v", core.JoinPath(home, ".claude")+":/root/.claude:ro", "-v", core.JoinPath(home, ".claude")+":/home/dev/.claude:ro",
) )
} }
// Mount Gemini config if dispatching gemini agent // Mount Gemini config if dispatching gemini agent
if command == "gemini" { if command == "gemini" {
dockerArgs = append(dockerArgs, dockerArgs = append(dockerArgs,
"-v", core.JoinPath(home, ".gemini")+":/root/.gemini:ro", "-v", core.JoinPath(home, ".gemini")+":/home/dev/.gemini:ro",
) )
} }
@ -228,7 +228,7 @@ func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir string) (int, string, er
// Notify monitor directly — no filesystem polling // Notify monitor directly — no filesystem polling
if s.onComplete != nil { if s.onComplete != nil {
st, _ := readStatus(wsDir) st, _ := ReadStatus(wsDir)
repo := "" repo := ""
if st != nil { if st != nil {
repo = st.Repo repo = st.Repo
@ -238,7 +238,7 @@ func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir string) (int, string, er
emitStartEvent(agent, core.PathBase(wsDir)) // audit log emitStartEvent(agent, core.PathBase(wsDir)) // audit log
// Start Forge stopwatch on the issue (time tracking) // Start Forge stopwatch on the issue (time tracking)
if st, _ := readStatus(wsDir); st != nil && st.Issue > 0 { if st, _ := ReadStatus(wsDir); st != nil && st.Issue > 0 {
org := st.Org org := st.Org
if org == "" { if org == "" {
org = "core" org = "core"
@ -281,7 +281,7 @@ func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir string) (int, string, er
} }
} }
if st, stErr := readStatus(wsDir); stErr == nil { if st, stErr := ReadStatus(wsDir); stErr == nil {
st.Status = finalStatus st.Status = finalStatus
st.PID = 0 st.PID = 0
st.Question = question st.Question = question
@ -293,7 +293,7 @@ func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir string) (int, string, er
// Rate-limit detection: if agent failed fast (<60s), track consecutive failures // Rate-limit detection: if agent failed fast (<60s), track consecutive failures
pool := baseAgent(agent) pool := baseAgent(agent)
if finalStatus == "failed" { if finalStatus == "failed" {
if st, _ := readStatus(wsDir); st != nil { if st, _ := ReadStatus(wsDir); st != nil {
elapsed := time.Since(st.StartedAt) elapsed := time.Since(st.StartedAt)
if elapsed < 60*time.Second { if elapsed < 60*time.Second {
s.failCount[pool]++ s.failCount[pool]++
@ -310,7 +310,7 @@ func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir string) (int, string, er
} }
// Stop Forge stopwatch on the issue (time tracking) // Stop Forge stopwatch on the issue (time tracking)
if st, _ := readStatus(wsDir); st != nil && st.Issue > 0 { if st, _ := ReadStatus(wsDir); st != nil && st.Issue > 0 {
org := st.Org org := st.Org
if org == "" { if org == "" {
org = "core" org = "core"
@ -320,7 +320,7 @@ func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir string) (int, string, er
// Push notification directly — no filesystem polling // Push notification directly — no filesystem polling
if s.onComplete != nil { if s.onComplete != nil {
stNow, _ := readStatus(wsDir) stNow, _ := ReadStatus(wsDir)
repoName := "" repoName := ""
if stNow != nil { if stNow != nil {
repoName = stNow.Repo repoName = stNow.Repo
@ -333,7 +333,7 @@ func (s *PrepSubsystem) spawnAgent(agent, prompt, wsDir string) (int, string, er
if !s.runQA(wsDir) { if !s.runQA(wsDir) {
finalStatus = "failed" finalStatus = "failed"
question = "QA check failed — build or tests did not pass" question = "QA check failed — build or tests did not pass"
if st, stErr := readStatus(wsDir); stErr == nil { if st, stErr := ReadStatus(wsDir); stErr == nil {
st.Status = finalStatus st.Status = finalStatus
st.Question = question st.Question = question
writeStatus(wsDir, st) writeStatus(wsDir, st)

View file

@ -82,7 +82,7 @@ func (s *PrepSubsystem) DispatchSync(ctx context.Context, input DispatchSyncInpu
case <-ticker.C: case <-ticker.C:
if pid > 0 && syscall.Kill(pid, 0) != nil { if pid > 0 && syscall.Kill(pid, 0) != nil {
// Process exited — read final status // Process exited — read final status
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil { if err != nil {
return DispatchSyncResult{Error: "can't read final status"} return DispatchSyncResult{Error: "can't read final status"}
} }

View file

@ -13,7 +13,7 @@ import (
// ingestFindings reads the agent output log and creates issues via the API // ingestFindings reads the agent output log and creates issues via the API
// for scan/audit results. Only runs for conventions and security templates. // for scan/audit results. Only runs for conventions and security templates.
func (s *PrepSubsystem) ingestFindings(wsDir string) { func (s *PrepSubsystem) ingestFindings(wsDir string) {
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil || st.Status != "completed" { if err != nil || st.Status != "completed" {
return return
} }

View file

@ -61,7 +61,7 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in
} }
// Read workspace status for repo, branch, issue context // Read workspace status for repo, branch, issue context
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil { if err != nil {
return nil, CreatePROutput{}, core.E("createPR", "no status.json", err) return nil, CreatePROutput{}, core.E("createPR", "no status.json", err)
} }

View file

@ -32,14 +32,49 @@ type RateConfig struct {
BurstDelay int `yaml:"burst_delay"` // Delay during burst window BurstDelay int `yaml:"burst_delay"` // Delay during burst window
} }
// ConcurrencyLimit supports both flat (int) and nested (map with total + per-model) formats.
//
// claude: 1 → Total=1, Models=nil
// codex: → Total=2, Models={"gpt-5.4": 1, "gpt-5.3-codex-spark": 1}
// total: 2
// gpt-5.4: 1
// gpt-5.3-codex-spark: 1
type ConcurrencyLimit struct {
Total int
Models map[string]int
}
// UnmarshalYAML handles both int and map forms.
func (c *ConcurrencyLimit) UnmarshalYAML(value *yaml.Node) error {
// Try int first
var n int
if err := value.Decode(&n); err == nil {
c.Total = n
return nil
}
// Try map
var m map[string]int
if err := value.Decode(&m); err != nil {
return err
}
c.Total = m["total"]
c.Models = make(map[string]int)
for k, v := range m {
if k != "total" {
c.Models[k] = v
}
}
return nil
}
// AgentsConfig is the root of config/agents.yaml. // AgentsConfig is the root of config/agents.yaml.
// //
// cfg := agentic.AgentsConfig{Version: 1, Dispatch: agentic.DispatchConfig{DefaultAgent: "claude"}} // cfg := agentic.AgentsConfig{Version: 1, Dispatch: agentic.DispatchConfig{DefaultAgent: "claude"}}
type AgentsConfig struct { type AgentsConfig struct {
Version int `yaml:"version"` Version int `yaml:"version"`
Dispatch DispatchConfig `yaml:"dispatch"` Dispatch DispatchConfig `yaml:"dispatch"`
Concurrency map[string]int `yaml:"concurrency"` Concurrency map[string]ConcurrencyLimit `yaml:"concurrency"`
Rates map[string]RateConfig `yaml:"rates"` Rates map[string]RateConfig `yaml:"rates"`
} }
// loadAgentsConfig reads config/agents.yaml from the code path. // loadAgentsConfig reads config/agents.yaml from the code path.
@ -66,9 +101,9 @@ func (s *PrepSubsystem) loadAgentsConfig() *AgentsConfig {
DefaultAgent: "claude", DefaultAgent: "claude",
DefaultTemplate: "coding", DefaultTemplate: "coding",
}, },
Concurrency: map[string]int{ Concurrency: map[string]ConcurrencyLimit{
"claude": 1, "claude": {Total: 1},
"gemini": 3, "gemini": {Total: 3},
}, },
} }
} }
@ -126,7 +161,7 @@ func (s *PrepSubsystem) countRunningByAgent(agent string) int {
count := 0 count := 0
for _, statusPath := range paths { for _, statusPath := range paths {
st, err := readStatus(core.PathDir(statusPath)) st, err := ReadStatus(core.PathDir(statusPath))
if err != nil || st.Status != "running" { if err != nil || st.Status != "running" {
continue continue
} }
@ -142,6 +177,28 @@ func (s *PrepSubsystem) countRunningByAgent(agent string) int {
return count return count
} }
// countRunningByModel counts running workspaces for a specific agent:model string.
func (s *PrepSubsystem) countRunningByModel(agent string) int {
wsRoot := WorkspaceRoot()
old := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json"))
deep := core.PathGlob(core.JoinPath(wsRoot, "*", "*", "*", "status.json"))
count := 0
for _, statusPath := range append(old, deep...) {
st, err := ReadStatus(core.PathDir(statusPath))
if err != nil || st.Status != "running" {
continue
}
if st.Agent != agent {
continue
}
if st.PID > 0 && syscall.Kill(st.PID, 0) == nil {
count++
}
}
return count
}
// baseAgent strips the model variant (gemini:flash → gemini). // baseAgent strips the model variant (gemini:flash → gemini).
func baseAgent(agent string) string { func baseAgent(agent string) string {
// codex:gpt-5.3-codex-spark → codex-spark (separate pool) // codex:gpt-5.3-codex-spark → codex-spark (separate pool)
@ -151,15 +208,48 @@ func baseAgent(agent string) string {
return core.SplitN(agent, ":", 2)[0] return core.SplitN(agent, ":", 2)[0]
} }
// canDispatchAgent checks if we're under the concurrency limit for a specific agent type. // canDispatchAgent checks both pool-level and per-model concurrency limits.
//
// codex: {total: 2, models: {gpt-5.4: 1}} → max 2 codex total, max 1 gpt-5.4
func (s *PrepSubsystem) canDispatchAgent(agent string) bool { func (s *PrepSubsystem) canDispatchAgent(agent string) bool {
cfg := s.loadAgentsConfig() cfg := s.loadAgentsConfig()
base := baseAgent(agent) base := baseAgent(agent)
limit, ok := cfg.Concurrency[base] limit, ok := cfg.Concurrency[base]
if !ok || limit <= 0 { if !ok || limit.Total <= 0 {
return true return true
} }
return s.countRunningByAgent(base) < limit
// Check pool total
if s.countRunningByAgent(base) >= limit.Total {
return false
}
// Check per-model limit if configured
if limit.Models != nil {
model := modelVariant(agent)
if model != "" {
if modelLimit, has := limit.Models[model]; has && modelLimit > 0 {
if s.countRunningByModel(agent) >= modelLimit {
return false
}
}
}
}
return true
}
// modelVariant extracts the model name from an agent string.
//
// codex:gpt-5.4 → gpt-5.4
// codex:gpt-5.3-codex-spark → gpt-5.3-codex-spark
// claude → ""
func modelVariant(agent string) string {
parts := core.SplitN(agent, ":", 2)
if len(parts) < 2 {
return ""
}
return parts[1]
} }
// drainQueue fills all available concurrency slots from queued workspaces. // drainQueue fills all available concurrency slots from queued workspaces.
@ -188,7 +278,7 @@ func (s *PrepSubsystem) drainOne() bool {
for _, statusPath := range statusFiles { for _, statusPath := range statusFiles {
wsDir := core.PathDir(statusPath) wsDir := core.PathDir(statusPath)
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil || st.Status != "queued" { if err != nil || st.Status != "queued" {
continue continue
} }

View file

@ -26,8 +26,8 @@ func TestDispatchConfig_Good_Defaults(t *testing.T) {
cfg := s.loadAgentsConfig() cfg := s.loadAgentsConfig()
assert.Equal(t, "claude", cfg.Dispatch.DefaultAgent) assert.Equal(t, "claude", cfg.Dispatch.DefaultAgent)
assert.Equal(t, "coding", cfg.Dispatch.DefaultTemplate) assert.Equal(t, "coding", cfg.Dispatch.DefaultTemplate)
assert.Equal(t, 1, cfg.Concurrency["claude"]) assert.Equal(t, 1, cfg.Concurrency["claude"].Total)
assert.Equal(t, 3, cfg.Concurrency["gemini"]) assert.Equal(t, 3, cfg.Concurrency["gemini"].Total)
} }
func TestCanDispatchAgent_Good_NoConfig(t *testing.T) { func TestCanDispatchAgent_Good_NoConfig(t *testing.T) {

View file

@ -52,7 +52,7 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu
} }
// Read current status // Read current status
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil { if err != nil {
return nil, ResumeOutput{}, core.E("resume", "no status.json in workspace", err) return nil, ResumeOutput{}, core.E("resume", "no status.json in workspace", err)
} }

View file

@ -81,7 +81,7 @@ func (s *PrepSubsystem) shutdownNow(ctx context.Context, _ *mcp.CallToolRequest,
for _, statusPath := range statusFiles { for _, statusPath := range statusFiles {
wsDir := core.PathDir(statusPath) wsDir := core.PathDir(statusPath)
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil { if err != nil {
continue continue
} }

View file

@ -27,7 +27,7 @@ import (
// WorkspaceStatus represents the current state of an agent workspace. // WorkspaceStatus represents the current state of an agent workspace.
// //
// st, err := readStatus(wsDir) // st, err := ReadStatus(wsDir)
// if err == nil && st.Status == "completed" { autoCreatePR(wsDir) } // if err == nil && st.Status == "completed" { autoCreatePR(wsDir) }
type WorkspaceStatus struct { type WorkspaceStatus struct {
Status string `json:"status"` // running, completed, blocked, failed Status string `json:"status"` // running, completed, blocked, failed
@ -58,10 +58,13 @@ func writeStatus(wsDir string, status *WorkspaceStatus) error {
return nil return nil
} }
func readStatus(wsDir string) (*WorkspaceStatus, error) { // ReadStatus parses the status.json in a workspace directory.
//
// st, err := agentic.ReadStatus("/path/to/workspace")
func ReadStatus(wsDir string) (*WorkspaceStatus, error) {
r := fs.Read(core.JoinPath(wsDir, "status.json")) r := fs.Read(core.JoinPath(wsDir, "status.json"))
if !r.OK { if !r.OK {
return nil, core.E("readStatus", "status not found", nil) return nil, core.E("ReadStatus", "status not found", nil)
} }
var s WorkspaceStatus var s WorkspaceStatus
if err := json.Unmarshal([]byte(r.Value.(string)), &s); err != nil { if err := json.Unmarshal([]byte(r.Value.(string)), &s); err != nil {
@ -125,7 +128,7 @@ func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, inpu
wsDir := core.PathDir(statusPath) wsDir := core.PathDir(statusPath)
name := wsDir[len(wsRoot)+1:] name := wsDir[len(wsRoot)+1:]
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil { if err != nil {
out.Total++ out.Total++
out.Failed++ out.Failed++

View file

@ -77,7 +77,7 @@ func TestReadStatus_Good(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, fs.Write(filepath.Join(dir, "status.json"), string(data)).OK) require.True(t, fs.Write(filepath.Join(dir, "status.json"), string(data)).OK)
read, err := readStatus(dir) read, err := ReadStatus(dir)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "completed", read.Status) assert.Equal(t, "completed", read.Status)
@ -91,7 +91,7 @@ func TestReadStatus_Good(t *testing.T) {
func TestReadStatus_Bad_NoFile(t *testing.T) { func TestReadStatus_Bad_NoFile(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
_, err := readStatus(dir) _, err := ReadStatus(dir)
assert.Error(t, err) assert.Error(t, err)
} }
@ -99,7 +99,7 @@ func TestReadStatus_Bad_InvalidJSON(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
require.True(t, fs.Write(filepath.Join(dir, "status.json"), "not json{").OK) require.True(t, fs.Write(filepath.Join(dir, "status.json"), "not json{").OK)
_, err := readStatus(dir) _, err := ReadStatus(dir)
assert.Error(t, err) assert.Error(t, err)
} }
@ -117,7 +117,7 @@ func TestReadStatus_Good_BlockedWithQuestion(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, fs.Write(filepath.Join(dir, "status.json"), string(data)).OK) require.True(t, fs.Write(filepath.Join(dir, "status.json"), string(data)).OK)
read, err := readStatus(dir) read, err := ReadStatus(dir)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "blocked", read.Status) assert.Equal(t, "blocked", read.Status)
@ -143,7 +143,7 @@ func TestWriteReadStatus_Good_Roundtrip(t *testing.T) {
err := writeStatus(dir, original) err := writeStatus(dir, original)
require.NoError(t, err) require.NoError(t, err)
read, err := readStatus(dir) read, err := ReadStatus(dir)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, original.Status, read.Status) assert.Equal(t, original.Status, read.Status)
@ -168,7 +168,7 @@ func TestWriteStatus_Good_OverwriteExisting(t *testing.T) {
err = writeStatus(dir, second) err = writeStatus(dir, second)
require.NoError(t, err) require.NoError(t, err)
read, err := readStatus(dir) read, err := ReadStatus(dir)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "completed", read.Status) assert.Equal(t, "completed", read.Status)
} }
@ -177,6 +177,6 @@ func TestReadStatus_Ugly_EmptyFile(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
require.True(t, fs.Write(filepath.Join(dir, "status.json"), "").OK) require.True(t, fs.Write(filepath.Join(dir, "status.json"), "").OK)
_, err := readStatus(dir) _, err := ReadStatus(dir)
assert.Error(t, err) assert.Error(t, err)
} }

View file

@ -22,7 +22,7 @@ import (
// //
// agentic_dispatch repo=go-crypt template=verify persona=engineering/engineering-security-engineer // agentic_dispatch repo=go-crypt template=verify persona=engineering/engineering-security-engineer
func (s *PrepSubsystem) autoVerifyAndMerge(wsDir string) { func (s *PrepSubsystem) autoVerifyAndMerge(wsDir string) {
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil || st.PRURL == "" || st.Repo == "" { if err != nil || st.PRURL == "" || st.Repo == "" {
return return
} }
@ -40,7 +40,7 @@ func (s *PrepSubsystem) autoVerifyAndMerge(wsDir string) {
// markMerged is a helper to avoid repeating the status update. // markMerged is a helper to avoid repeating the status update.
markMerged := func() { markMerged := func() {
if st2, err := readStatus(wsDir); err == nil { if st2, err := ReadStatus(wsDir); err == nil {
st2.Status = "merged" st2.Status = "merged"
writeStatus(wsDir, st2) writeStatus(wsDir, st2)
} }
@ -66,7 +66,7 @@ func (s *PrepSubsystem) autoVerifyAndMerge(wsDir string) {
// Both attempts failed — flag for human review // Both attempts failed — flag for human review
s.flagForReview(org, st.Repo, prNum, result) s.flagForReview(org, st.Repo, prNum, result)
if st2, err := readStatus(wsDir); err == nil { if st2, err := ReadStatus(wsDir); err == nil {
st2.Question = "Flagged for review — auto-merge failed after retry" st2.Question = "Flagged for review — auto-merge failed after retry"
writeStatus(wsDir, st2) writeStatus(wsDir, st2)
} }
@ -129,7 +129,7 @@ func (s *PrepSubsystem) rebaseBranch(repoDir, branch string) bool {
} }
// Force-push the rebased branch to Forge (origin is local clone) // Force-push the rebased branch to Forge (origin is local clone)
st, _ := readStatus(core.PathDir(repoDir)) st, _ := ReadStatus(core.PathDir(repoDir))
org := "core" org := "core"
repo := "" repo := ""
if st != nil { if st != nil {

View file

@ -109,7 +109,7 @@ func (s *PrepSubsystem) watch(ctx context.Context, req *mcp.CallToolRequest, inp
for ws := range remaining { for ws := range remaining {
wsDir := s.resolveWorkspaceDir(ws) wsDir := s.resolveWorkspaceDir(ws)
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil { if err != nil {
continue continue
} }
@ -196,7 +196,7 @@ func (s *PrepSubsystem) findActiveWorkspaces() []string {
var active []string var active []string
for _, entry := range entries { for _, entry := range entries {
wsDir := core.PathDir(entry) wsDir := core.PathDir(entry)
st, err := readStatus(wsDir) st, err := ReadStatus(wsDir)
if err != nil { if err != nil {
continue continue
} }

View file

@ -17,6 +17,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
"syscall"
"time" "time"
"dappco.re/go/agent/pkg/agentic" "dappco.re/go/agent/pkg/agentic"
@ -231,7 +232,7 @@ func (m *Subsystem) AgentStarted(agent, repo, workspace string) {
} }
// AgentCompleted is called when an agent finishes. // AgentCompleted is called when an agent finishes.
// Only sends notifications for failures. Sends "queue.drained" when all work is done. // Emits agent.completed for every finish, then checks if the queue is empty.
// //
// mon.AgentCompleted("codex", "go-io", "core/go-io/task-5", "completed") // mon.AgentCompleted("codex", "go-io", "core/go-io/task-5", "completed")
func (m *Subsystem) AgentCompleted(agent, repo, workspace, status string) { func (m *Subsystem) AgentCompleted(agent, repo, workspace, status string) {
@ -240,53 +241,68 @@ func (m *Subsystem) AgentCompleted(agent, repo, workspace, status string) {
m.mu.Unlock() m.mu.Unlock()
if m.notifier != nil { if m.notifier != nil {
// Only notify on failures — those need attention m.notifier.ChannelSend(context.Background(), "agent.completed", map[string]any{
if status == "failed" || status == "blocked" { "repo": repo,
m.notifier.ChannelSend(context.Background(), "agent.failed", map[string]any{ "agent": agent,
"repo": repo, "workspace": workspace,
"agent": agent, "status": status,
"status": status, })
})
}
} }
// Check if queue is drained (0 running + 0 queued)
m.Poke() m.Poke()
go m.checkIdleAfterDelay() go m.checkIdleAfterDelay()
} }
// checkIdleAfterDelay waits briefly then checks if the fleet is idle. // checkIdleAfterDelay waits briefly then checks if the fleet is genuinely idle.
// Sends a single "queue.drained" notification when all work stops. // Only emits queue.drained when there are truly zero running or queued agents,
// verified by checking PIDs are alive, not just trusting status files.
func (m *Subsystem) checkIdleAfterDelay() { func (m *Subsystem) checkIdleAfterDelay() {
time.Sleep(5 * time.Second) // wait for runner to fill slots time.Sleep(5 * time.Second) // wait for queue drain to fill slots
if m.notifier == nil { if m.notifier == nil {
return return
} }
// Quick count — scan for running/queued running, queued := m.countLiveWorkspaces()
running := 0 if running == 0 && queued == 0 {
queued := 0 m.notifier.ChannelSend(context.Background(), "queue.drained", map[string]any{
"running": running,
"queued": queued,
})
}
}
// countLiveWorkspaces counts workspaces that are genuinely active.
// For "running" status, verifies the PID is still alive.
func (m *Subsystem) countLiveWorkspaces() (running, queued int) {
wsRoot := agentic.WorkspaceRoot() wsRoot := agentic.WorkspaceRoot()
old := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json")) old := core.PathGlob(core.JoinPath(wsRoot, "*", "status.json"))
deep := core.PathGlob(core.JoinPath(wsRoot, "*", "*", "*", "status.json")) deep := core.PathGlob(core.JoinPath(wsRoot, "*", "*", "*", "status.json"))
for _, path := range append(old, deep...) { for _, path := range append(old, deep...) {
r := fs.Read(path) wsDir := core.PathDir(path)
if !r.OK { st, err := agentic.ReadStatus(wsDir)
if err != nil {
continue continue
} }
s := r.Value.(string) switch st.Status {
if core.Contains(s, `"status":"running"`) { case "running":
running++ if st.PID > 0 && pidAlive(st.PID) {
} else if core.Contains(s, `"status":"queued"`) { running++
}
case "queued":
queued++ queued++
} }
} }
return
}
if running == 0 && queued == 0 { // pidAlive checks whether a process is still running.
m.notifier.ChannelSend(context.Background(), "queue.drained", map[string]any{ func pidAlive(pid int) bool {
"message": "all work complete", proc, err := os.FindProcess(pid)
}) if err != nil {
return false
} }
err = proc.Signal(syscall.Signal(0))
return err == nil
} }
func (m *Subsystem) loop(ctx context.Context) { func (m *Subsystem) loop(ctx context.Context) {
@ -430,11 +446,20 @@ func (m *Subsystem) checkCompletions() string {
return "" return ""
} }
// Only notify on queue drain (0 running + 0 queued) — individual completions are noise // Emit agent.completed for each newly finished task
if m.notifier != nil && running == 0 && queued == 0 { if m.notifier != nil {
for _, desc := range newlyCompleted {
m.notifier.ChannelSend(context.Background(), "agent.completed", map[string]any{
"description": desc,
})
}
}
// Only emit queue.drained when genuinely empty — verified by live PID check
liveRunning, liveQueued := m.countLiveWorkspaces()
if m.notifier != nil && liveRunning == 0 && liveQueued == 0 {
m.notifier.ChannelSend(context.Background(), "queue.drained", map[string]any{ m.notifier.ChannelSend(context.Background(), "queue.drained", map[string]any{
"completed": len(newlyCompleted), "completed": len(newlyCompleted),
"message": "all work complete",
}) })
} }

View file

@ -146,10 +146,12 @@ func TestCheckCompletions_Good_NewCompletions(t *testing.T) {
assert.Contains(t, msg, "2 agent(s) completed") assert.Contains(t, msg, "2 agent(s) completed")
events := notifier.Events() events := notifier.Events()
require.Len(t, events, 1) require.Len(t, events, 3) // 2 agent.completed + 1 queue.drained
assert.Equal(t, "agent.complete", events[0].channel) assert.Equal(t, "agent.completed", events[0].channel)
eventData := events[0].data.(map[string]any) assert.Equal(t, "agent.completed", events[1].channel)
assert.Equal(t, 2, eventData["count"]) assert.Equal(t, "queue.drained", events[2].channel)
drainData := events[2].data.(map[string]any)
assert.Equal(t, 2, drainData["completed"])
} }
func TestCheckCompletions_Good_MixedStatuses(t *testing.T) { func TestCheckCompletions_Good_MixedStatuses(t *testing.T) {