refactor: move brain + agentic packages into core/agent, use core/cli
Brain and agentic subsystems now live in core/agent/pkg/ instead of core/mcp/pkg/mcp/. core-agent binary uses core/cli for proper command framework. Usage: core-agent mcp One repo, one clone, everything works. Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
84e2b62beb
commit
2ea50959f2
18 changed files with 3749 additions and 26 deletions
|
|
@ -1,37 +1,37 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"forge.lthn.ai/core/agent/pkg/agentic"
|
||||
"forge.lthn.ai/core/agent/pkg/brain"
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/mcp/pkg/mcp"
|
||||
"forge.lthn.ai/core/mcp/pkg/mcp/agentic"
|
||||
"forge.lthn.ai/core/mcp/pkg/mcp/brain"
|
||||
)
|
||||
|
||||
func main() {
|
||||
svc, err := mcp.New(
|
||||
mcp.WithSubsystem(brain.NewDirect()),
|
||||
mcp.WithSubsystem(agentic.NewPrep()),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create MCP service: %v", err)
|
||||
if err := cli.Init(cli.Options{
|
||||
AppName: "core-agent",
|
||||
Version: "0.1.0",
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
mcpCmd := cli.NewCommand("mcp", "Start the MCP server on stdio", "", func(cmd *cli.Command, args []string) error {
|
||||
svc, err := mcp.New(
|
||||
mcp.WithSubsystem(brain.NewDirect()),
|
||||
mcp.WithSubsystem(agentic.NewPrep()),
|
||||
)
|
||||
if err != nil {
|
||||
return cli.Wrap(err, "create MCP service")
|
||||
}
|
||||
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-sigCh
|
||||
cancel()
|
||||
}()
|
||||
return svc.Run(cmd.Context())
|
||||
})
|
||||
|
||||
if err := svc.Run(ctx); err != nil {
|
||||
log.Printf("MCP error: %v", err)
|
||||
cli.RootCmd().AddCommand(mcpCmd)
|
||||
|
||||
if err := cli.Execute(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
BIN
core-agent
Executable file
BIN
core-agent
Executable file
Binary file not shown.
8
go.mod
8
go.mod
|
|
@ -4,6 +4,7 @@ go 1.26.0
|
|||
|
||||
require (
|
||||
codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2 v2.2.0
|
||||
forge.lthn.ai/core/api v0.1.2
|
||||
forge.lthn.ai/core/cli v0.3.0
|
||||
forge.lthn.ai/core/config v0.1.0
|
||||
forge.lthn.ai/core/go v0.3.1
|
||||
|
|
@ -15,7 +16,10 @@ require (
|
|||
forge.lthn.ai/core/go-ratelimit v0.1.0
|
||||
forge.lthn.ai/core/go-scm v0.2.0
|
||||
forge.lthn.ai/core/go-store v0.1.3
|
||||
forge.lthn.ai/core/go-ws v0.2.0
|
||||
forge.lthn.ai/core/mcp v0.3.1
|
||||
github.com/gin-gonic/gin v1.12.0
|
||||
github.com/modelcontextprotocol/go-sdk v1.4.1
|
||||
github.com/redis/go-redis/v9 v9.18.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
|
|
@ -23,14 +27,12 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
forge.lthn.ai/core/api v0.1.2 // indirect
|
||||
forge.lthn.ai/core/go-crypt v0.1.6 // indirect
|
||||
forge.lthn.ai/core/go-ml v0.1.8 // indirect
|
||||
forge.lthn.ai/core/go-mlx v0.1.0 // indirect
|
||||
forge.lthn.ai/core/go-process v0.2.2 // indirect
|
||||
forge.lthn.ai/core/go-rag v0.1.0 // indirect
|
||||
forge.lthn.ai/core/go-webview v0.1.0 // indirect
|
||||
forge.lthn.ai/core/go-ws v0.2.0 // indirect
|
||||
github.com/42wim/httpsig v1.2.3 // indirect
|
||||
github.com/99designs/gqlgen v0.17.88 // indirect
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
|
|
@ -79,7 +81,6 @@ require (
|
|||
github.com/gin-contrib/sse v1.1.0 // indirect
|
||||
github.com/gin-contrib/static v1.1.5 // indirect
|
||||
github.com/gin-contrib/timeout v1.1.0 // indirect
|
||||
github.com/gin-gonic/gin v1.12.0 // indirect
|
||||
github.com/go-fed/httpsig v1.1.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
|
|
@ -121,7 +122,6 @@ require (
|
|||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.21 // indirect
|
||||
github.com/modelcontextprotocol/go-sdk v1.4.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
|
|
|
|||
231
pkg/agentic/dispatch.go
Normal file
231
pkg/agentic/dispatch.go
Normal file
|
|
@ -0,0 +1,231 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// DispatchInput is the input for agentic_dispatch.
|
||||
type DispatchInput struct {
|
||||
Repo string `json:"repo"` // Target repo (e.g. "go-io")
|
||||
Org string `json:"org,omitempty"` // Forge org (default "core")
|
||||
Task string `json:"task"` // What the agent should do
|
||||
Agent string `json:"agent,omitempty"` // "gemini" (default), "codex", "claude"
|
||||
Template string `json:"template,omitempty"` // "conventions", "security", "coding" (default)
|
||||
PlanTemplate string `json:"plan_template,omitempty"` // Plan template: bug-fix, code-review, new-feature, refactor, feature-port
|
||||
Variables map[string]string `json:"variables,omitempty"` // Template variable substitution
|
||||
Persona string `json:"persona,omitempty"` // Persona: engineering/backend-architect, testing/api-tester, etc.
|
||||
Issue int `json:"issue,omitempty"` // Forge issue to work from
|
||||
DryRun bool `json:"dry_run,omitempty"` // Preview without executing
|
||||
}
|
||||
|
||||
// DispatchOutput is the output for agentic_dispatch.
|
||||
type DispatchOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Agent string `json:"agent"`
|
||||
Repo string `json:"repo"`
|
||||
WorkspaceDir string `json:"workspace_dir"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
PID int `json:"pid,omitempty"`
|
||||
OutputFile string `json:"output_file,omitempty"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerDispatchTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_dispatch",
|
||||
Description: "Dispatch a subagent (Gemini, Codex, or Claude) to work on a task. Preps a sandboxed workspace first, then spawns the agent inside it. Templates: conventions, security, coding.",
|
||||
}, s.dispatch)
|
||||
}
|
||||
|
||||
// agentCommand returns the command and args for a given agent type.
|
||||
// Supports model variants: "gemini", "gemini:flash", "gemini:pro", "claude", "claude:haiku".
|
||||
func agentCommand(agent, prompt string) (string, []string, error) {
|
||||
parts := strings.SplitN(agent, ":", 2)
|
||||
base := parts[0]
|
||||
model := ""
|
||||
if len(parts) > 1 {
|
||||
model = parts[1]
|
||||
}
|
||||
|
||||
switch base {
|
||||
case "gemini":
|
||||
args := []string{"-p", prompt, "--yolo", "--sandbox"}
|
||||
if model != "" {
|
||||
args = append(args, "-m", "gemini-2.5-"+model)
|
||||
}
|
||||
return "gemini", args, nil
|
||||
case "codex":
|
||||
return "codex", []string{"--approval-mode", "full-auto", "-q", prompt}, nil
|
||||
case "claude":
|
||||
args := []string{"-p", prompt, "--dangerously-skip-permissions"}
|
||||
if model != "" {
|
||||
args = append(args, "--model", model)
|
||||
}
|
||||
return "claude", args, nil
|
||||
case "local":
|
||||
home, _ := os.UserHomeDir()
|
||||
script := filepath.Join(home, "Code", "core", "agent", "scripts", "local-agent.sh")
|
||||
return "bash", []string{script, prompt}, nil
|
||||
default:
|
||||
return "", nil, fmt.Errorf("unknown agent: %s", agent)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest, input DispatchInput) (*mcp.CallToolResult, DispatchOutput, error) {
|
||||
if input.Repo == "" {
|
||||
return nil, DispatchOutput{}, fmt.Errorf("repo is required")
|
||||
}
|
||||
if input.Task == "" {
|
||||
return nil, DispatchOutput{}, fmt.Errorf("task is required")
|
||||
}
|
||||
if input.Org == "" {
|
||||
input.Org = "core"
|
||||
}
|
||||
if input.Agent == "" {
|
||||
input.Agent = "gemini"
|
||||
}
|
||||
if input.Template == "" {
|
||||
input.Template = "coding"
|
||||
}
|
||||
|
||||
// Step 1: Prep the sandboxed workspace
|
||||
prepInput := PrepInput{
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Issue: input.Issue,
|
||||
Task: input.Task,
|
||||
Template: input.Template,
|
||||
PlanTemplate: input.PlanTemplate,
|
||||
Variables: input.Variables,
|
||||
Persona: input.Persona,
|
||||
}
|
||||
_, prepOut, err := s.prepWorkspace(ctx, req, prepInput)
|
||||
if err != nil {
|
||||
return nil, DispatchOutput{}, fmt.Errorf("prep workspace failed: %w", err)
|
||||
}
|
||||
|
||||
wsDir := prepOut.WorkspaceDir
|
||||
srcDir := filepath.Join(wsDir, "src")
|
||||
|
||||
// The prompt is just: read PROMPT.md and do the work
|
||||
prompt := "Read PROMPT.md for instructions. All context files (CLAUDE.md, TODO.md, CONTEXT.md, CONSUMERS.md, RECENT.md) are in the parent directory. Work in this directory."
|
||||
|
||||
if input.DryRun {
|
||||
// Read PROMPT.md for the dry run output
|
||||
promptContent, _ := os.ReadFile(filepath.Join(wsDir, "PROMPT.md"))
|
||||
return nil, DispatchOutput{
|
||||
Success: true,
|
||||
Agent: input.Agent,
|
||||
Repo: input.Repo,
|
||||
WorkspaceDir: wsDir,
|
||||
Prompt: string(promptContent),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Step 2: Check per-agent concurrency limit
|
||||
if !s.canDispatchAgent(input.Agent) {
|
||||
// Queue the workspace — write status as "queued" and return
|
||||
writeStatus(wsDir, &WorkspaceStatus{
|
||||
Status: "queued",
|
||||
Agent: input.Agent,
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Task: input.Task,
|
||||
StartedAt: time.Now(),
|
||||
Runs: 0,
|
||||
})
|
||||
return nil, DispatchOutput{
|
||||
Success: true,
|
||||
Agent: input.Agent,
|
||||
Repo: input.Repo,
|
||||
WorkspaceDir: wsDir,
|
||||
OutputFile: "queued — waiting for a slot",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Step 3: Spawn agent as a detached process
|
||||
// Uses Setpgid so the agent survives parent (MCP server) death.
|
||||
// Output goes directly to log file (not buffered in memory).
|
||||
command, args, err := agentCommand(input.Agent, prompt)
|
||||
if err != nil {
|
||||
return nil, DispatchOutput{}, err
|
||||
}
|
||||
|
||||
outputFile := filepath.Join(wsDir, fmt.Sprintf("agent-%s.log", input.Agent))
|
||||
outFile, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
return nil, DispatchOutput{}, fmt.Errorf("failed to create log file: %w", err)
|
||||
}
|
||||
|
||||
// Fully detach from terminal:
|
||||
// - Setpgid: own process group
|
||||
// - Stdin from /dev/null
|
||||
// - TERM=dumb prevents terminal control sequences
|
||||
// - NO_COLOR=1 disables colour output
|
||||
devNull, _ := os.Open(os.DevNull)
|
||||
cmd := exec.Command(command, args...)
|
||||
cmd.Dir = srcDir
|
||||
cmd.Stdin = devNull
|
||||
cmd.Stdout = outFile
|
||||
cmd.Stderr = outFile
|
||||
cmd.Env = append(os.Environ(), "TERM=dumb", "NO_COLOR=1", "CI=true")
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
outFile.Close()
|
||||
return nil, DispatchOutput{}, fmt.Errorf("failed to spawn %s: %w", input.Agent, err)
|
||||
}
|
||||
|
||||
pid := cmd.Process.Pid
|
||||
|
||||
// Write initial status
|
||||
writeStatus(wsDir, &WorkspaceStatus{
|
||||
Status: "running",
|
||||
Agent: input.Agent,
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Task: input.Task,
|
||||
PID: pid,
|
||||
StartedAt: time.Now(),
|
||||
Runs: 1,
|
||||
})
|
||||
|
||||
// Background goroutine: close file handle when process exits,
|
||||
// update status, then drain queue if a slot opened up.
|
||||
go func() {
|
||||
cmd.Wait()
|
||||
outFile.Close()
|
||||
|
||||
// Update status to completed
|
||||
if st, err := readStatus(wsDir); err == nil {
|
||||
st.Status = "completed"
|
||||
st.PID = 0
|
||||
writeStatus(wsDir, st)
|
||||
}
|
||||
|
||||
// Ingest scan findings as issues
|
||||
s.ingestFindings(wsDir)
|
||||
|
||||
// Drain queue: pop next queued workspace and spawn it
|
||||
s.drainQueue()
|
||||
}()
|
||||
|
||||
return nil, DispatchOutput{
|
||||
Success: true,
|
||||
Agent: input.Agent,
|
||||
Repo: input.Repo,
|
||||
WorkspaceDir: wsDir,
|
||||
PID: pid,
|
||||
OutputFile: outputFile,
|
||||
}, nil
|
||||
}
|
||||
266
pkg/agentic/epic.go
Normal file
266
pkg/agentic/epic.go
Normal file
|
|
@ -0,0 +1,266 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// --- agentic_create_epic ---
|
||||
|
||||
// EpicInput is the input for agentic_create_epic.
|
||||
type EpicInput struct {
|
||||
Repo string `json:"repo"` // Target repo (e.g. "go-scm")
|
||||
Org string `json:"org,omitempty"` // Forge org (default "core")
|
||||
Title string `json:"title"` // Epic title
|
||||
Body string `json:"body,omitempty"` // Epic description (above checklist)
|
||||
Tasks []string `json:"tasks"` // Sub-task titles (become child issues)
|
||||
Labels []string `json:"labels,omitempty"` // Labels for epic + children (e.g. ["agentic"])
|
||||
Dispatch bool `json:"dispatch,omitempty"` // Auto-dispatch agents to each child
|
||||
Agent string `json:"agent,omitempty"` // Agent type for dispatch (default "claude")
|
||||
Template string `json:"template,omitempty"` // Prompt template for dispatch (default "coding")
|
||||
}
|
||||
|
||||
// EpicOutput is the output for agentic_create_epic.
|
||||
type EpicOutput struct {
|
||||
Success bool `json:"success"`
|
||||
EpicNumber int `json:"epic_number"`
|
||||
EpicURL string `json:"epic_url"`
|
||||
Children []ChildRef `json:"children"`
|
||||
Dispatched int `json:"dispatched,omitempty"`
|
||||
}
|
||||
|
||||
// ChildRef references a child issue.
|
||||
type ChildRef struct {
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerEpicTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_create_epic",
|
||||
Description: "Create an epic issue with child issues on Forge. Each task becomes a child issue linked via checklist. Optionally auto-dispatch agents to work each child.",
|
||||
}, s.createEpic)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) createEpic(ctx context.Context, req *mcp.CallToolRequest, input EpicInput) (*mcp.CallToolResult, EpicOutput, error) {
|
||||
if input.Title == "" {
|
||||
return nil, EpicOutput{}, fmt.Errorf("title is required")
|
||||
}
|
||||
if len(input.Tasks) == 0 {
|
||||
return nil, EpicOutput{}, fmt.Errorf("at least one task is required")
|
||||
}
|
||||
if s.forgeToken == "" {
|
||||
return nil, EpicOutput{}, fmt.Errorf("no Forge token configured")
|
||||
}
|
||||
if input.Org == "" {
|
||||
input.Org = "core"
|
||||
}
|
||||
if input.Agent == "" {
|
||||
input.Agent = "claude"
|
||||
}
|
||||
if input.Template == "" {
|
||||
input.Template = "coding"
|
||||
}
|
||||
|
||||
// Ensure "agentic" label exists
|
||||
labels := input.Labels
|
||||
hasAgentic := false
|
||||
for _, l := range labels {
|
||||
if l == "agentic" {
|
||||
hasAgentic = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasAgentic {
|
||||
labels = append(labels, "agentic")
|
||||
}
|
||||
|
||||
// Get label IDs
|
||||
labelIDs := s.resolveLabelIDs(ctx, input.Org, input.Repo, labels)
|
||||
|
||||
// Step 1: Create child issues first (we need their numbers for the checklist)
|
||||
var children []ChildRef
|
||||
for _, task := range input.Tasks {
|
||||
child, err := s.createIssue(ctx, input.Org, input.Repo, task, "", labelIDs)
|
||||
if err != nil {
|
||||
continue // Skip failed children, create what we can
|
||||
}
|
||||
children = append(children, child)
|
||||
}
|
||||
|
||||
// Step 2: Build epic body with checklist
|
||||
var body strings.Builder
|
||||
if input.Body != "" {
|
||||
body.WriteString(input.Body)
|
||||
body.WriteString("\n\n")
|
||||
}
|
||||
body.WriteString("## Tasks\n\n")
|
||||
for _, child := range children {
|
||||
body.WriteString(fmt.Sprintf("- [ ] #%d %s\n", child.Number, child.Title))
|
||||
}
|
||||
|
||||
// Step 3: Create epic issue
|
||||
epicLabels := append(labelIDs, s.resolveLabelIDs(ctx, input.Org, input.Repo, []string{"epic"})...)
|
||||
epic, err := s.createIssue(ctx, input.Org, input.Repo, input.Title, body.String(), epicLabels)
|
||||
if err != nil {
|
||||
return nil, EpicOutput{}, fmt.Errorf("failed to create epic: %w", err)
|
||||
}
|
||||
|
||||
out := EpicOutput{
|
||||
Success: true,
|
||||
EpicNumber: epic.Number,
|
||||
EpicURL: epic.URL,
|
||||
Children: children,
|
||||
}
|
||||
|
||||
// Step 4: Optionally dispatch agents to each child
|
||||
if input.Dispatch {
|
||||
for _, child := range children {
|
||||
_, _, err := s.dispatch(ctx, req, DispatchInput{
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Task: child.Title,
|
||||
Agent: input.Agent,
|
||||
Template: input.Template,
|
||||
Issue: child.Number,
|
||||
})
|
||||
if err == nil {
|
||||
out.Dispatched++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, out, nil
|
||||
}
|
||||
|
||||
// createIssue creates a single issue on Forge and returns its reference.
|
||||
func (s *PrepSubsystem) createIssue(ctx context.Context, org, repo, title, body string, labelIDs []int64) (ChildRef, error) {
|
||||
payload := map[string]any{
|
||||
"title": title,
|
||||
}
|
||||
if body != "" {
|
||||
payload["body"] = body
|
||||
}
|
||||
if len(labelIDs) > 0 {
|
||||
payload["labels"] = labelIDs
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(payload)
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(data))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return ChildRef{}, fmt.Errorf("create issue request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 201 {
|
||||
return ChildRef{}, fmt.Errorf("create issue returned %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Number int `json:"number"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&result)
|
||||
|
||||
return ChildRef{
|
||||
Number: result.Number,
|
||||
Title: title,
|
||||
URL: result.HTMLURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// resolveLabelIDs looks up label IDs by name, creating labels that don't exist.
|
||||
func (s *PrepSubsystem) resolveLabelIDs(ctx context.Context, org, repo string, names []string) []int64 {
|
||||
if len(names) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch existing labels
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/labels?limit=50", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
return nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var existing []struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&existing)
|
||||
|
||||
nameToID := make(map[string]int64)
|
||||
for _, l := range existing {
|
||||
nameToID[l.Name] = l.ID
|
||||
}
|
||||
|
||||
var ids []int64
|
||||
for _, name := range names {
|
||||
if id, ok := nameToID[name]; ok {
|
||||
ids = append(ids, id)
|
||||
} else {
|
||||
// Create the label
|
||||
id := s.createLabel(ctx, org, repo, name)
|
||||
if id > 0 {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ids
|
||||
}
|
||||
|
||||
// createLabel creates a label on Forge and returns its ID.
|
||||
func (s *PrepSubsystem) createLabel(ctx context.Context, org, repo, name string) int64 {
|
||||
colours := map[string]string{
|
||||
"agentic": "#7c3aed",
|
||||
"epic": "#dc2626",
|
||||
"bug": "#ef4444",
|
||||
"help-wanted": "#22c55e",
|
||||
}
|
||||
colour := colours[name]
|
||||
if colour == "" {
|
||||
colour = "#6b7280"
|
||||
}
|
||||
|
||||
payload, _ := json.Marshal(map[string]string{
|
||||
"name": name,
|
||||
"color": colour,
|
||||
})
|
||||
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil || resp.StatusCode != 201 {
|
||||
return 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result struct {
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&result)
|
||||
return result.ID
|
||||
}
|
||||
|
||||
// listOrgRepos is defined in pr.go
|
||||
120
pkg/agentic/ingest.go
Normal file
120
pkg/agentic/ingest.go
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ingestFindings reads the agent output log and creates issues via the API
|
||||
// for scan/audit results. Only runs for conventions and security templates.
|
||||
func (s *PrepSubsystem) ingestFindings(wsDir string) {
|
||||
st, err := readStatus(wsDir)
|
||||
if err != nil || st.Status != "completed" {
|
||||
return
|
||||
}
|
||||
|
||||
// Read the log file
|
||||
logFiles, _ := filepath.Glob(filepath.Join(wsDir, "agent-*.log"))
|
||||
if len(logFiles) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(logFiles[0])
|
||||
if err != nil || len(content) < 100 {
|
||||
return
|
||||
}
|
||||
|
||||
body := string(content)
|
||||
|
||||
// Skip quota errors
|
||||
if strings.Contains(body, "QUOTA_EXHAUSTED") || strings.Contains(body, "QuotaError") {
|
||||
return
|
||||
}
|
||||
|
||||
// Only ingest if there are actual findings (file:line references)
|
||||
findings := countFileRefs(body)
|
||||
if findings < 2 {
|
||||
return // No meaningful findings
|
||||
}
|
||||
|
||||
// Determine issue type from the template used
|
||||
issueType := "task"
|
||||
priority := "normal"
|
||||
if strings.Contains(body, "security") || strings.Contains(body, "Security") {
|
||||
issueType = "bug"
|
||||
priority = "high"
|
||||
}
|
||||
|
||||
// Create a single issue per repo with all findings in the body
|
||||
title := fmt.Sprintf("Scan findings for %s (%d items)", st.Repo, findings)
|
||||
|
||||
// Truncate body to reasonable size for issue description
|
||||
description := body
|
||||
if len(description) > 10000 {
|
||||
description = description[:10000] + "\n\n... (truncated, see full log in workspace)"
|
||||
}
|
||||
|
||||
s.createIssueViaAPI(st.Repo, title, description, issueType, priority, "scan")
|
||||
}
|
||||
|
||||
// countFileRefs counts file:line references in the output (indicates real findings)
|
||||
func countFileRefs(body string) int {
|
||||
count := 0
|
||||
for i := 0; i < len(body)-5; i++ {
|
||||
if body[i] == '`' {
|
||||
// Look for pattern: `file.go:123`
|
||||
j := i + 1
|
||||
for j < len(body) && body[j] != '`' && j-i < 100 {
|
||||
j++
|
||||
}
|
||||
if j < len(body) && body[j] == '`' {
|
||||
ref := body[i+1 : j]
|
||||
if strings.Contains(ref, ".go:") || strings.Contains(ref, ".php:") {
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// createIssueViaAPI posts an issue to the lthn.sh API
|
||||
func (s *PrepSubsystem) createIssueViaAPI(repo, title, description, issueType, priority, source string) {
|
||||
if s.brainKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Read the agent API key from file
|
||||
home, _ := os.UserHomeDir()
|
||||
apiKeyData, err := os.ReadFile(filepath.Join(home, ".claude", "agent-api.key"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
apiKey := strings.TrimSpace(string(apiKeyData))
|
||||
|
||||
payload, _ := json.Marshal(map[string]string{
|
||||
"title": title,
|
||||
"description": description,
|
||||
"type": issueType,
|
||||
"priority": priority,
|
||||
"reporter": "cladius",
|
||||
})
|
||||
|
||||
req, _ := http.NewRequest("POST", s.brainURL+"/v1/issues", bytes.NewReader(payload))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
386
pkg/agentic/plan.go
Normal file
386
pkg/agentic/plan.go
Normal file
|
|
@ -0,0 +1,386 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// Plan represents an implementation plan for agent work.
|
||||
type Plan struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"` // draft, ready, in_progress, needs_verification, verified, approved
|
||||
Repo string `json:"repo,omitempty"`
|
||||
Org string `json:"org,omitempty"`
|
||||
Objective string `json:"objective"`
|
||||
Phases []Phase `json:"phases,omitempty"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
Agent string `json:"agent,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// Phase represents a phase within an implementation plan.
|
||||
type Phase struct {
|
||||
Number int `json:"number"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"` // pending, in_progress, done
|
||||
Criteria []string `json:"criteria,omitempty"`
|
||||
Tests int `json:"tests,omitempty"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
}
|
||||
|
||||
// --- Input/Output types ---
|
||||
|
||||
// PlanCreateInput is the input for agentic_plan_create.
|
||||
type PlanCreateInput struct {
|
||||
Title string `json:"title"`
|
||||
Objective string `json:"objective"`
|
||||
Repo string `json:"repo,omitempty"`
|
||||
Org string `json:"org,omitempty"`
|
||||
Phases []Phase `json:"phases,omitempty"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
}
|
||||
|
||||
// PlanCreateOutput is the output for agentic_plan_create.
|
||||
type PlanCreateOutput struct {
|
||||
Success bool `json:"success"`
|
||||
ID string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// PlanReadInput is the input for agentic_plan_read.
|
||||
type PlanReadInput struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// PlanReadOutput is the output for agentic_plan_read.
|
||||
type PlanReadOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Plan Plan `json:"plan"`
|
||||
}
|
||||
|
||||
// PlanUpdateInput is the input for agentic_plan_update.
|
||||
type PlanUpdateInput struct {
|
||||
ID string `json:"id"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Title string `json:"title,omitempty"`
|
||||
Objective string `json:"objective,omitempty"`
|
||||
Phases []Phase `json:"phases,omitempty"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
Agent string `json:"agent,omitempty"`
|
||||
}
|
||||
|
||||
// PlanUpdateOutput is the output for agentic_plan_update.
|
||||
type PlanUpdateOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Plan Plan `json:"plan"`
|
||||
}
|
||||
|
||||
// PlanDeleteInput is the input for agentic_plan_delete.
|
||||
type PlanDeleteInput struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// PlanDeleteOutput is the output for agentic_plan_delete.
|
||||
type PlanDeleteOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Deleted string `json:"deleted"`
|
||||
}
|
||||
|
||||
// PlanListInput is the input for agentic_plan_list.
|
||||
type PlanListInput struct {
|
||||
Status string `json:"status,omitempty"`
|
||||
Repo string `json:"repo,omitempty"`
|
||||
}
|
||||
|
||||
// PlanListOutput is the output for agentic_plan_list.
|
||||
type PlanListOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Count int `json:"count"`
|
||||
Plans []Plan `json:"plans"`
|
||||
}
|
||||
|
||||
// --- Registration ---
|
||||
|
||||
func (s *PrepSubsystem) registerPlanTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_plan_create",
|
||||
Description: "Create an implementation plan. Plans track phased work with acceptance criteria, status lifecycle (draft → ready → in_progress → needs_verification → verified → approved), and per-phase progress.",
|
||||
}, s.planCreate)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_plan_read",
|
||||
Description: "Read an implementation plan by ID. Returns the full plan with all phases, criteria, and status.",
|
||||
}, s.planRead)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_plan_update",
|
||||
Description: "Update an implementation plan. Supports partial updates — only provided fields are changed. Use this to advance status, update phases, or add notes.",
|
||||
}, s.planUpdate)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_plan_delete",
|
||||
Description: "Delete an implementation plan by ID. Permanently removes the plan file.",
|
||||
}, s.planDelete)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_plan_list",
|
||||
Description: "List implementation plans. Supports filtering by status (draft, ready, in_progress, etc.) and repo.",
|
||||
}, s.planList)
|
||||
}
|
||||
|
||||
// --- Handlers ---
|
||||
|
||||
func (s *PrepSubsystem) planCreate(_ context.Context, _ *mcp.CallToolRequest, input PlanCreateInput) (*mcp.CallToolResult, PlanCreateOutput, error) {
|
||||
if input.Title == "" {
|
||||
return nil, PlanCreateOutput{}, fmt.Errorf("title is required")
|
||||
}
|
||||
if input.Objective == "" {
|
||||
return nil, PlanCreateOutput{}, fmt.Errorf("objective is required")
|
||||
}
|
||||
|
||||
id := generatePlanID(input.Title)
|
||||
plan := Plan{
|
||||
ID: id,
|
||||
Title: input.Title,
|
||||
Status: "draft",
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Objective: input.Objective,
|
||||
Phases: input.Phases,
|
||||
Notes: input.Notes,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
// Default phase status to pending
|
||||
for i := range plan.Phases {
|
||||
if plan.Phases[i].Status == "" {
|
||||
plan.Phases[i].Status = "pending"
|
||||
}
|
||||
if plan.Phases[i].Number == 0 {
|
||||
plan.Phases[i].Number = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
path, err := writePlan(s.plansDir(), &plan)
|
||||
if err != nil {
|
||||
return nil, PlanCreateOutput{}, fmt.Errorf("failed to write plan: %w", err)
|
||||
}
|
||||
|
||||
return nil, PlanCreateOutput{
|
||||
Success: true,
|
||||
ID: id,
|
||||
Path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) planRead(_ context.Context, _ *mcp.CallToolRequest, input PlanReadInput) (*mcp.CallToolResult, PlanReadOutput, error) {
|
||||
if input.ID == "" {
|
||||
return nil, PlanReadOutput{}, fmt.Errorf("id is required")
|
||||
}
|
||||
|
||||
plan, err := readPlan(s.plansDir(), input.ID)
|
||||
if err != nil {
|
||||
return nil, PlanReadOutput{}, err
|
||||
}
|
||||
|
||||
return nil, PlanReadOutput{
|
||||
Success: true,
|
||||
Plan: *plan,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) planUpdate(_ context.Context, _ *mcp.CallToolRequest, input PlanUpdateInput) (*mcp.CallToolResult, PlanUpdateOutput, error) {
|
||||
if input.ID == "" {
|
||||
return nil, PlanUpdateOutput{}, fmt.Errorf("id is required")
|
||||
}
|
||||
|
||||
plan, err := readPlan(s.plansDir(), input.ID)
|
||||
if err != nil {
|
||||
return nil, PlanUpdateOutput{}, err
|
||||
}
|
||||
|
||||
// Apply partial updates
|
||||
if input.Status != "" {
|
||||
if !validPlanStatus(input.Status) {
|
||||
return nil, PlanUpdateOutput{}, fmt.Errorf("invalid status: %s (valid: draft, ready, in_progress, needs_verification, verified, approved)", input.Status)
|
||||
}
|
||||
plan.Status = input.Status
|
||||
}
|
||||
if input.Title != "" {
|
||||
plan.Title = input.Title
|
||||
}
|
||||
if input.Objective != "" {
|
||||
plan.Objective = input.Objective
|
||||
}
|
||||
if input.Phases != nil {
|
||||
plan.Phases = input.Phases
|
||||
}
|
||||
if input.Notes != "" {
|
||||
plan.Notes = input.Notes
|
||||
}
|
||||
if input.Agent != "" {
|
||||
plan.Agent = input.Agent
|
||||
}
|
||||
|
||||
plan.UpdatedAt = time.Now()
|
||||
|
||||
if _, err := writePlan(s.plansDir(), plan); err != nil {
|
||||
return nil, PlanUpdateOutput{}, fmt.Errorf("failed to write plan: %w", err)
|
||||
}
|
||||
|
||||
return nil, PlanUpdateOutput{
|
||||
Success: true,
|
||||
Plan: *plan,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) planDelete(_ context.Context, _ *mcp.CallToolRequest, input PlanDeleteInput) (*mcp.CallToolResult, PlanDeleteOutput, error) {
|
||||
if input.ID == "" {
|
||||
return nil, PlanDeleteOutput{}, fmt.Errorf("id is required")
|
||||
}
|
||||
|
||||
path := planPath(s.plansDir(), input.ID)
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return nil, PlanDeleteOutput{}, fmt.Errorf("plan not found: %s", input.ID)
|
||||
}
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
return nil, PlanDeleteOutput{}, fmt.Errorf("failed to delete plan: %w", err)
|
||||
}
|
||||
|
||||
return nil, PlanDeleteOutput{
|
||||
Success: true,
|
||||
Deleted: input.ID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) planList(_ context.Context, _ *mcp.CallToolRequest, input PlanListInput) (*mcp.CallToolResult, PlanListOutput, error) {
|
||||
dir := s.plansDir()
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, PlanListOutput{}, fmt.Errorf("failed to access plans directory: %w", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, PlanListOutput{}, fmt.Errorf("failed to read plans directory: %w", err)
|
||||
}
|
||||
|
||||
var plans []Plan
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".json") {
|
||||
continue
|
||||
}
|
||||
|
||||
id := strings.TrimSuffix(entry.Name(), ".json")
|
||||
plan, err := readPlan(dir, id)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply filters
|
||||
if input.Status != "" && plan.Status != input.Status {
|
||||
continue
|
||||
}
|
||||
if input.Repo != "" && plan.Repo != input.Repo {
|
||||
continue
|
||||
}
|
||||
|
||||
plans = append(plans, *plan)
|
||||
}
|
||||
|
||||
return nil, PlanListOutput{
|
||||
Success: true,
|
||||
Count: len(plans),
|
||||
Plans: plans,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// --- Helpers ---
|
||||
|
||||
func (s *PrepSubsystem) plansDir() string {
|
||||
home, _ := os.UserHomeDir()
|
||||
return filepath.Join(home, "Code", "host-uk", "core", ".core", "plans")
|
||||
}
|
||||
|
||||
func planPath(dir, id string) string {
|
||||
return filepath.Join(dir, id+".json")
|
||||
}
|
||||
|
||||
func generatePlanID(title string) string {
|
||||
slug := strings.Map(func(r rune) rune {
|
||||
if r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-' {
|
||||
return r
|
||||
}
|
||||
if r >= 'A' && r <= 'Z' {
|
||||
return r + 32
|
||||
}
|
||||
if r == ' ' {
|
||||
return '-'
|
||||
}
|
||||
return -1
|
||||
}, title)
|
||||
|
||||
// Trim consecutive dashes and cap length
|
||||
for strings.Contains(slug, "--") {
|
||||
slug = strings.ReplaceAll(slug, "--", "-")
|
||||
}
|
||||
slug = strings.Trim(slug, "-")
|
||||
if len(slug) > 30 {
|
||||
slug = slug[:30]
|
||||
}
|
||||
slug = strings.TrimRight(slug, "-")
|
||||
|
||||
// Append short random suffix for uniqueness
|
||||
b := make([]byte, 3)
|
||||
rand.Read(b)
|
||||
return slug + "-" + hex.EncodeToString(b)
|
||||
}
|
||||
|
||||
func readPlan(dir, id string) (*Plan, error) {
|
||||
data, err := os.ReadFile(planPath(dir, id))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("plan not found: %s", id)
|
||||
}
|
||||
|
||||
var plan Plan
|
||||
if err := json.Unmarshal(data, &plan); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse plan %s: %w", id, err)
|
||||
}
|
||||
return &plan, nil
|
||||
}
|
||||
|
||||
func writePlan(dir string, plan *Plan) (string, error) {
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create plans directory: %w", err)
|
||||
}
|
||||
|
||||
path := planPath(dir, plan.ID)
|
||||
data, err := json.MarshalIndent(plan, "", " ")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path, os.WriteFile(path, data, 0644)
|
||||
}
|
||||
|
||||
func validPlanStatus(status string) bool {
|
||||
switch status {
|
||||
case "draft", "ready", "in_progress", "needs_verification", "verified", "approved":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
358
pkg/agentic/pr.go
Normal file
358
pkg/agentic/pr.go
Normal file
|
|
@ -0,0 +1,358 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// --- agentic_create_pr ---
|
||||
|
||||
// CreatePRInput is the input for agentic_create_pr.
|
||||
type CreatePRInput struct {
|
||||
Workspace string `json:"workspace"` // workspace name (e.g. "mcp-1773581873")
|
||||
Title string `json:"title,omitempty"` // PR title (default: task description)
|
||||
Body string `json:"body,omitempty"` // PR body (default: auto-generated)
|
||||
Base string `json:"base,omitempty"` // base branch (default: "main")
|
||||
DryRun bool `json:"dry_run,omitempty"` // preview without creating
|
||||
}
|
||||
|
||||
// CreatePROutput is the output for agentic_create_pr.
|
||||
type CreatePROutput struct {
|
||||
Success bool `json:"success"`
|
||||
PRURL string `json:"pr_url,omitempty"`
|
||||
PRNum int `json:"pr_number,omitempty"`
|
||||
Title string `json:"title"`
|
||||
Branch string `json:"branch"`
|
||||
Repo string `json:"repo"`
|
||||
Pushed bool `json:"pushed"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerCreatePRTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_create_pr",
|
||||
Description: "Create a pull request from an agent workspace. Pushes the branch to Forge and opens a PR. Links to the source issue if one was tracked.",
|
||||
}, s.createPR)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, input CreatePRInput) (*mcp.CallToolResult, CreatePROutput, error) {
|
||||
if input.Workspace == "" {
|
||||
return nil, CreatePROutput{}, fmt.Errorf("workspace is required")
|
||||
}
|
||||
if s.forgeToken == "" {
|
||||
return nil, CreatePROutput{}, fmt.Errorf("no Forge token configured")
|
||||
}
|
||||
|
||||
home, _ := os.UserHomeDir()
|
||||
wsDir := filepath.Join(home, "Code", "host-uk", "core", ".core", "workspace", input.Workspace)
|
||||
srcDir := filepath.Join(wsDir, "src")
|
||||
|
||||
if _, err := os.Stat(srcDir); err != nil {
|
||||
return nil, CreatePROutput{}, fmt.Errorf("workspace not found: %s", input.Workspace)
|
||||
}
|
||||
|
||||
// Read workspace status for repo, branch, issue context
|
||||
st, err := readStatus(wsDir)
|
||||
if err != nil {
|
||||
return nil, CreatePROutput{}, fmt.Errorf("no status.json: %w", err)
|
||||
}
|
||||
|
||||
if st.Branch == "" {
|
||||
// Detect branch from git
|
||||
branchCmd := exec.CommandContext(ctx, "git", "rev-parse", "--abbrev-ref", "HEAD")
|
||||
branchCmd.Dir = srcDir
|
||||
out, err := branchCmd.Output()
|
||||
if err != nil {
|
||||
return nil, CreatePROutput{}, fmt.Errorf("failed to detect branch: %w", err)
|
||||
}
|
||||
st.Branch = strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
org := st.Org
|
||||
if org == "" {
|
||||
org = "core"
|
||||
}
|
||||
base := input.Base
|
||||
if base == "" {
|
||||
base = "main"
|
||||
}
|
||||
|
||||
// Build PR title
|
||||
title := input.Title
|
||||
if title == "" {
|
||||
title = st.Task
|
||||
}
|
||||
if title == "" {
|
||||
title = fmt.Sprintf("Agent work on %s", st.Branch)
|
||||
}
|
||||
|
||||
// Build PR body
|
||||
body := input.Body
|
||||
if body == "" {
|
||||
body = s.buildPRBody(st)
|
||||
}
|
||||
|
||||
if input.DryRun {
|
||||
return nil, CreatePROutput{
|
||||
Success: true,
|
||||
Title: title,
|
||||
Branch: st.Branch,
|
||||
Repo: st.Repo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Push branch to forge
|
||||
pushCmd := exec.CommandContext(ctx, "git", "push", "-u", "origin", st.Branch)
|
||||
pushCmd.Dir = srcDir
|
||||
pushOut, err := pushCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, CreatePROutput{}, fmt.Errorf("git push failed: %s: %w", string(pushOut), err)
|
||||
}
|
||||
|
||||
// Create PR via Forge API
|
||||
prURL, prNum, err := s.forgeCreatePR(ctx, org, st.Repo, st.Branch, base, title, body)
|
||||
if err != nil {
|
||||
return nil, CreatePROutput{}, fmt.Errorf("failed to create PR: %w", err)
|
||||
}
|
||||
|
||||
// Update status with PR URL
|
||||
st.PRURL = prURL
|
||||
writeStatus(wsDir, st)
|
||||
|
||||
// Comment on issue if tracked
|
||||
if st.Issue > 0 {
|
||||
comment := fmt.Sprintf("Pull request created: %s", prURL)
|
||||
s.commentOnIssue(ctx, org, st.Repo, st.Issue, comment)
|
||||
}
|
||||
|
||||
return nil, CreatePROutput{
|
||||
Success: true,
|
||||
PRURL: prURL,
|
||||
PRNum: prNum,
|
||||
Title: title,
|
||||
Branch: st.Branch,
|
||||
Repo: st.Repo,
|
||||
Pushed: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) buildPRBody(st *WorkspaceStatus) string {
|
||||
var b strings.Builder
|
||||
b.WriteString("## Summary\n\n")
|
||||
if st.Task != "" {
|
||||
b.WriteString(st.Task)
|
||||
b.WriteString("\n\n")
|
||||
}
|
||||
if st.Issue > 0 {
|
||||
b.WriteString(fmt.Sprintf("Closes #%d\n\n", st.Issue))
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("**Agent:** %s\n", st.Agent))
|
||||
b.WriteString(fmt.Sprintf("**Runs:** %d\n", st.Runs))
|
||||
b.WriteString("\n---\n*Created by agentic dispatch*\n")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) forgeCreatePR(ctx context.Context, org, repo, head, base, title, body string) (string, int, error) {
|
||||
payload, _ := json.Marshal(map[string]any{
|
||||
"title": title,
|
||||
"body": body,
|
||||
"head": head,
|
||||
"base": base,
|
||||
})
|
||||
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 201 {
|
||||
var errBody map[string]any
|
||||
json.NewDecoder(resp.Body).Decode(&errBody)
|
||||
msg, _ := errBody["message"].(string)
|
||||
return "", 0, fmt.Errorf("HTTP %d: %s", resp.StatusCode, msg)
|
||||
}
|
||||
|
||||
var pr struct {
|
||||
Number int `json:"number"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&pr)
|
||||
|
||||
return pr.HTMLURL, pr.Number, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) commentOnIssue(ctx context.Context, org, repo string, issue int, comment string) {
|
||||
payload, _ := json.Marshal(map[string]string{"body": comment})
|
||||
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/comments", s.forgeURL, org, repo, issue)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
// --- agentic_list_prs ---
|
||||
|
||||
// ListPRsInput is the input for agentic_list_prs.
|
||||
type ListPRsInput struct {
|
||||
Org string `json:"org,omitempty"` // forge org (default "core")
|
||||
Repo string `json:"repo,omitempty"` // specific repo, or empty for all
|
||||
State string `json:"state,omitempty"` // "open" (default), "closed", "all"
|
||||
Limit int `json:"limit,omitempty"` // max results (default 20)
|
||||
}
|
||||
|
||||
// ListPRsOutput is the output for agentic_list_prs.
|
||||
type ListPRsOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Count int `json:"count"`
|
||||
PRs []PRInfo `json:"prs"`
|
||||
}
|
||||
|
||||
// PRInfo represents a pull request.
|
||||
type PRInfo struct {
|
||||
Repo string `json:"repo"`
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
State string `json:"state"`
|
||||
Author string `json:"author"`
|
||||
Branch string `json:"branch"`
|
||||
Base string `json:"base"`
|
||||
Labels []string `json:"labels,omitempty"`
|
||||
Mergeable bool `json:"mergeable"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerListPRsTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_list_prs",
|
||||
Description: "List pull requests across Forge repos. Filter by org, repo, and state (open/closed/all).",
|
||||
}, s.listPRs)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) listPRs(ctx context.Context, _ *mcp.CallToolRequest, input ListPRsInput) (*mcp.CallToolResult, ListPRsOutput, error) {
|
||||
if s.forgeToken == "" {
|
||||
return nil, ListPRsOutput{}, fmt.Errorf("no Forge token configured")
|
||||
}
|
||||
|
||||
if input.Org == "" {
|
||||
input.Org = "core"
|
||||
}
|
||||
if input.State == "" {
|
||||
input.State = "open"
|
||||
}
|
||||
if input.Limit == 0 {
|
||||
input.Limit = 20
|
||||
}
|
||||
|
||||
var repos []string
|
||||
if input.Repo != "" {
|
||||
repos = []string{input.Repo}
|
||||
} else {
|
||||
var err error
|
||||
repos, err = s.listOrgRepos(ctx, input.Org)
|
||||
if err != nil {
|
||||
return nil, ListPRsOutput{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var allPRs []PRInfo
|
||||
|
||||
for _, repo := range repos {
|
||||
prs, err := s.listRepoPRs(ctx, input.Org, repo, input.State)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
allPRs = append(allPRs, prs...)
|
||||
|
||||
if len(allPRs) >= input.Limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(allPRs) > input.Limit {
|
||||
allPRs = allPRs[:input.Limit]
|
||||
}
|
||||
|
||||
return nil, ListPRsOutput{
|
||||
Success: true,
|
||||
Count: len(allPRs),
|
||||
PRs: allPRs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) listRepoPRs(ctx context.Context, org, repo, state string) ([]PRInfo, error) {
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls?state=%s&limit=10",
|
||||
s.forgeURL, org, repo, state)
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("failed to list PRs for %s: %v", repo, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var prs []struct {
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
State string `json:"state"`
|
||||
Mergeable bool `json:"mergeable"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
Head struct {
|
||||
Ref string `json:"ref"`
|
||||
} `json:"head"`
|
||||
Base struct {
|
||||
Ref string `json:"ref"`
|
||||
} `json:"base"`
|
||||
User struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"user"`
|
||||
Labels []struct {
|
||||
Name string `json:"name"`
|
||||
} `json:"labels"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&prs)
|
||||
|
||||
var result []PRInfo
|
||||
for _, pr := range prs {
|
||||
var labels []string
|
||||
for _, l := range pr.Labels {
|
||||
labels = append(labels, l.Name)
|
||||
}
|
||||
result = append(result, PRInfo{
|
||||
Repo: repo,
|
||||
Number: pr.Number,
|
||||
Title: pr.Title,
|
||||
State: pr.State,
|
||||
Author: pr.User.Login,
|
||||
Branch: pr.Head.Ref,
|
||||
Base: pr.Base.Ref,
|
||||
Labels: labels,
|
||||
Mergeable: pr.Mergeable,
|
||||
URL: pr.HTMLURL,
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
594
pkg/agentic/prep.go
Normal file
594
pkg/agentic/prep.go
Normal file
|
|
@ -0,0 +1,594 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Package agentic provides MCP tools for agent orchestration.
|
||||
// Prepares sandboxed workspaces and dispatches subagents.
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// PrepSubsystem provides agentic MCP tools.
|
||||
type PrepSubsystem struct {
|
||||
forgeURL string
|
||||
forgeToken string
|
||||
brainURL string
|
||||
brainKey string
|
||||
specsPath string
|
||||
codePath string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewPrep creates an agentic subsystem.
|
||||
func NewPrep() *PrepSubsystem {
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
forgeToken := os.Getenv("FORGE_TOKEN")
|
||||
if forgeToken == "" {
|
||||
forgeToken = os.Getenv("GITEA_TOKEN")
|
||||
}
|
||||
|
||||
brainKey := os.Getenv("CORE_BRAIN_KEY")
|
||||
if brainKey == "" {
|
||||
if data, err := os.ReadFile(filepath.Join(home, ".claude", "brain.key")); err == nil {
|
||||
brainKey = strings.TrimSpace(string(data))
|
||||
}
|
||||
}
|
||||
|
||||
return &PrepSubsystem{
|
||||
forgeURL: envOr("FORGE_URL", "https://forge.lthn.ai"),
|
||||
forgeToken: forgeToken,
|
||||
brainURL: envOr("CORE_BRAIN_URL", "https://api.lthn.sh"),
|
||||
brainKey: brainKey,
|
||||
specsPath: envOr("SPECS_PATH", filepath.Join(home, "Code", "host-uk", "specs")),
|
||||
codePath: envOr("CODE_PATH", filepath.Join(home, "Code")),
|
||||
client: &http.Client{Timeout: 30 * time.Second},
|
||||
}
|
||||
}
|
||||
|
||||
func envOr(key, fallback string) string {
|
||||
if v := os.Getenv(key); v != "" {
|
||||
return v
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
|
||||
// Name implements mcp.Subsystem.
|
||||
func (s *PrepSubsystem) Name() string { return "agentic" }
|
||||
|
||||
// RegisterTools implements mcp.Subsystem.
|
||||
func (s *PrepSubsystem) RegisterTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_prep_workspace",
|
||||
Description: "Prepare a sandboxed agent workspace with TODO.md, CLAUDE.md, CONTEXT.md, CONSUMERS.md, RECENT.md, and a git clone of the target repo in src/.",
|
||||
}, s.prepWorkspace)
|
||||
|
||||
s.registerDispatchTool(server)
|
||||
s.registerStatusTool(server)
|
||||
s.registerResumeTool(server)
|
||||
s.registerCreatePRTool(server)
|
||||
s.registerListPRsTool(server)
|
||||
s.registerEpicTool(server)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_scan",
|
||||
Description: "Scan Forge repos for open issues with actionable labels (agentic, help-wanted, bug).",
|
||||
}, s.scan)
|
||||
|
||||
s.registerPlanTools(server)
|
||||
}
|
||||
|
||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||
func (s *PrepSubsystem) Shutdown(_ context.Context) error { return nil }
|
||||
|
||||
// --- Input/Output types ---
|
||||
|
||||
// PrepInput is the input for agentic_prep_workspace.
|
||||
type PrepInput struct {
|
||||
Repo string `json:"repo"` // e.g. "go-io"
|
||||
Org string `json:"org,omitempty"` // default "core"
|
||||
Issue int `json:"issue,omitempty"` // Forge issue number
|
||||
Task string `json:"task,omitempty"` // Task description (if no issue)
|
||||
Template string `json:"template,omitempty"` // Prompt template: conventions, security, coding (default: coding)
|
||||
PlanTemplate string `json:"plan_template,omitempty"` // Plan template slug: bug-fix, code-review, new-feature, refactor, feature-port
|
||||
Variables map[string]string `json:"variables,omitempty"` // Template variable substitution
|
||||
Persona string `json:"persona,omitempty"` // Persona slug: engineering/backend-architect, testing/api-tester, etc.
|
||||
}
|
||||
|
||||
// PrepOutput is the output for agentic_prep_workspace.
|
||||
type PrepOutput struct {
|
||||
Success bool `json:"success"`
|
||||
WorkspaceDir string `json:"workspace_dir"`
|
||||
WikiPages int `json:"wiki_pages"`
|
||||
SpecFiles int `json:"spec_files"`
|
||||
Memories int `json:"memories"`
|
||||
Consumers int `json:"consumers"`
|
||||
ClaudeMd bool `json:"claude_md"`
|
||||
GitLog int `json:"git_log_entries"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolRequest, input PrepInput) (*mcp.CallToolResult, PrepOutput, error) {
|
||||
if input.Repo == "" {
|
||||
return nil, PrepOutput{}, fmt.Errorf("repo is required")
|
||||
}
|
||||
if input.Org == "" {
|
||||
input.Org = "core"
|
||||
}
|
||||
if input.Template == "" {
|
||||
input.Template = "coding"
|
||||
}
|
||||
|
||||
// Workspace root: .core/workspace/{repo}-{timestamp}/
|
||||
home, _ := os.UserHomeDir()
|
||||
wsRoot := filepath.Join(home, "Code", "host-uk", "core", ".core", "workspace")
|
||||
wsName := fmt.Sprintf("%s-%d", input.Repo, time.Now().Unix())
|
||||
wsDir := filepath.Join(wsRoot, wsName)
|
||||
|
||||
// Create workspace structure
|
||||
// kb/ and specs/ will be created inside src/ after clone
|
||||
|
||||
out := PrepOutput{WorkspaceDir: wsDir}
|
||||
|
||||
// Source repo path
|
||||
repoPath := filepath.Join(s.codePath, "core", input.Repo)
|
||||
|
||||
// 1. Clone repo into src/ and create feature branch
|
||||
srcDir := filepath.Join(wsDir, "src")
|
||||
cloneCmd := exec.CommandContext(ctx, "git", "clone", repoPath, srcDir)
|
||||
cloneCmd.Run()
|
||||
|
||||
// Create feature branch
|
||||
taskSlug := strings.Map(func(r rune) rune {
|
||||
if r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-' {
|
||||
return r
|
||||
}
|
||||
if r >= 'A' && r <= 'Z' {
|
||||
return r + 32 // lowercase
|
||||
}
|
||||
return '-'
|
||||
}, input.Task)
|
||||
if len(taskSlug) > 40 {
|
||||
taskSlug = taskSlug[:40]
|
||||
}
|
||||
taskSlug = strings.Trim(taskSlug, "-")
|
||||
branchName := fmt.Sprintf("agent/%s", taskSlug)
|
||||
|
||||
branchCmd := exec.CommandContext(ctx, "git", "checkout", "-b", branchName)
|
||||
branchCmd.Dir = srcDir
|
||||
branchCmd.Run()
|
||||
|
||||
// Create context dirs inside src/
|
||||
os.MkdirAll(filepath.Join(srcDir, "kb"), 0755)
|
||||
os.MkdirAll(filepath.Join(srcDir, "specs"), 0755)
|
||||
|
||||
// Remote stays as local clone origin — agent cannot push to forge.
|
||||
// Reviewer pulls changes from workspace and pushes after verification.
|
||||
|
||||
// 2. Copy CLAUDE.md and GEMINI.md to workspace
|
||||
claudeMdPath := filepath.Join(repoPath, "CLAUDE.md")
|
||||
if data, err := os.ReadFile(claudeMdPath); err == nil {
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "CLAUDE.md"), data, 0644)
|
||||
out.ClaudeMd = true
|
||||
}
|
||||
// Copy GEMINI.md from core/agent (ethics framework for all agents)
|
||||
agentGeminiMd := filepath.Join(s.codePath, "core", "agent", "GEMINI.md")
|
||||
if data, err := os.ReadFile(agentGeminiMd); err == nil {
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "GEMINI.md"), data, 0644)
|
||||
}
|
||||
|
||||
// Copy persona if specified
|
||||
if input.Persona != "" {
|
||||
personaPath := filepath.Join(s.codePath, "core", "agent", "prompts", "personas", input.Persona+".md")
|
||||
if data, err := os.ReadFile(personaPath); err == nil {
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "PERSONA.md"), data, 0644)
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Generate TODO.md
|
||||
if input.Issue > 0 {
|
||||
s.generateTodo(ctx, input.Org, input.Repo, input.Issue, wsDir)
|
||||
} else if input.Task != "" {
|
||||
todo := fmt.Sprintf("# TASK: %s\n\n**Repo:** %s/%s\n**Status:** ready\n\n## Objective\n\n%s\n",
|
||||
input.Task, input.Org, input.Repo, input.Task)
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "TODO.md"), []byte(todo), 0644)
|
||||
}
|
||||
|
||||
// 4. Generate CONTEXT.md from OpenBrain
|
||||
out.Memories = s.generateContext(ctx, input.Repo, wsDir)
|
||||
|
||||
// 5. Generate CONSUMERS.md
|
||||
out.Consumers = s.findConsumers(input.Repo, wsDir)
|
||||
|
||||
// 6. Generate RECENT.md
|
||||
out.GitLog = s.gitLog(repoPath, wsDir)
|
||||
|
||||
// 7. Pull wiki pages into kb/
|
||||
out.WikiPages = s.pullWiki(ctx, input.Org, input.Repo, wsDir)
|
||||
|
||||
// 8. Copy spec files into specs/
|
||||
out.SpecFiles = s.copySpecs(wsDir)
|
||||
|
||||
// 9. Write PLAN.md from template (if specified)
|
||||
if input.PlanTemplate != "" {
|
||||
s.writePlanFromTemplate(input.PlanTemplate, input.Variables, input.Task, wsDir)
|
||||
}
|
||||
|
||||
// 10. Write prompt template
|
||||
s.writePromptTemplate(input.Template, wsDir)
|
||||
|
||||
out.Success = true
|
||||
return nil, out, nil
|
||||
}
|
||||
|
||||
// --- Prompt templates ---
|
||||
|
||||
func (s *PrepSubsystem) writePromptTemplate(template, wsDir string) {
|
||||
var prompt string
|
||||
|
||||
switch template {
|
||||
case "conventions":
|
||||
prompt = `Read CLAUDE.md for project conventions.
|
||||
Review all Go files in src/ for:
|
||||
- Error handling: should use coreerr.E() from go-log, not fmt.Errorf or errors.New
|
||||
- Compile-time interface checks: var _ Interface = (*Impl)(nil)
|
||||
- Import aliasing: stdlib io aliased as goio
|
||||
- UK English in comments (colour not color, initialise not initialize)
|
||||
- No fmt.Print* debug statements (use go-log)
|
||||
- Test coverage gaps
|
||||
|
||||
Report findings with file:line references. Do not fix — only report.
|
||||
`
|
||||
case "security":
|
||||
prompt = `Read CLAUDE.md for project context.
|
||||
Review all Go files in src/ for security issues:
|
||||
- Path traversal vulnerabilities
|
||||
- Unvalidated input
|
||||
- SQL injection (if applicable)
|
||||
- Hardcoded credentials or tokens
|
||||
- Unsafe type assertions
|
||||
- Missing error checks
|
||||
- Race conditions (shared state without mutex)
|
||||
- Unsafe use of os/exec
|
||||
|
||||
Report findings with severity (critical/high/medium/low) and file:line references.
|
||||
`
|
||||
case "coding":
|
||||
prompt = `Read PERSONA.md if it exists — adopt that identity and approach.
|
||||
Read CLAUDE.md for project conventions and context.
|
||||
Read TODO.md for your task.
|
||||
Read PLAN.md if it exists — work through each phase in order.
|
||||
Read CONTEXT.md for relevant knowledge from previous sessions.
|
||||
Read CONSUMERS.md to understand breaking change risk.
|
||||
Read RECENT.md for recent changes.
|
||||
|
||||
Work in the src/ directory. Follow the conventions in CLAUDE.md.
|
||||
|
||||
## Workflow
|
||||
|
||||
If PLAN.md exists, you MUST work through it phase by phase:
|
||||
1. Complete all tasks in the current phase
|
||||
2. STOP and commit before moving on: type(scope): phase N - description
|
||||
3. Only then start the next phase
|
||||
4. If you are blocked or unsure, write BLOCKED.md explaining the question and stop
|
||||
5. Do NOT skip phases or combine multiple phases into one commit
|
||||
|
||||
Each phase = one commit. This is not optional.
|
||||
|
||||
If no PLAN.md, complete TODO.md as a single unit of work.
|
||||
|
||||
## Commit Convention
|
||||
|
||||
Commit message format: type(scope): description
|
||||
Co-Author: Co-Authored-By: Virgil <virgil@lethean.io>
|
||||
|
||||
Do NOT push. Commit only — a reviewer will verify and push.
|
||||
`
|
||||
default:
|
||||
prompt = "Read TODO.md and complete the task. Work in src/.\n"
|
||||
}
|
||||
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "PROMPT.md"), []byte(prompt), 0644)
|
||||
}
|
||||
|
||||
// --- Plan template rendering ---
|
||||
|
||||
// writePlanFromTemplate loads a YAML plan template, substitutes variables,
|
||||
// and writes PLAN.md into the workspace src/ directory.
|
||||
func (s *PrepSubsystem) writePlanFromTemplate(templateSlug string, variables map[string]string, task string, wsDir string) {
|
||||
// Look for template in core/agent/prompts/templates/
|
||||
templatePath := filepath.Join(s.codePath, "core", "agent", "prompts", "templates", templateSlug+".yaml")
|
||||
data, err := os.ReadFile(templatePath)
|
||||
if err != nil {
|
||||
// Try .yml extension
|
||||
templatePath = filepath.Join(s.codePath, "core", "agent", "prompts", "templates", templateSlug+".yml")
|
||||
data, err = os.ReadFile(templatePath)
|
||||
if err != nil {
|
||||
return // Template not found, skip silently
|
||||
}
|
||||
}
|
||||
|
||||
content := string(data)
|
||||
|
||||
// Substitute variables ({{variable_name}} → value)
|
||||
for key, value := range variables {
|
||||
content = strings.ReplaceAll(content, "{{"+key+"}}", value)
|
||||
content = strings.ReplaceAll(content, "{{ "+key+" }}", value)
|
||||
}
|
||||
|
||||
// Parse the YAML to render as markdown
|
||||
var tmpl struct {
|
||||
Name string `yaml:"name"`
|
||||
Description string `yaml:"description"`
|
||||
Guidelines []string `yaml:"guidelines"`
|
||||
Phases []struct {
|
||||
Name string `yaml:"name"`
|
||||
Description string `yaml:"description"`
|
||||
Tasks []any `yaml:"tasks"`
|
||||
} `yaml:"phases"`
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(content), &tmpl); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Render as PLAN.md
|
||||
var plan strings.Builder
|
||||
plan.WriteString("# Plan: " + tmpl.Name + "\n\n")
|
||||
if task != "" {
|
||||
plan.WriteString("**Task:** " + task + "\n\n")
|
||||
}
|
||||
if tmpl.Description != "" {
|
||||
plan.WriteString(tmpl.Description + "\n\n")
|
||||
}
|
||||
|
||||
if len(tmpl.Guidelines) > 0 {
|
||||
plan.WriteString("## Guidelines\n\n")
|
||||
for _, g := range tmpl.Guidelines {
|
||||
plan.WriteString("- " + g + "\n")
|
||||
}
|
||||
plan.WriteString("\n")
|
||||
}
|
||||
|
||||
for i, phase := range tmpl.Phases {
|
||||
plan.WriteString(fmt.Sprintf("## Phase %d: %s\n\n", i+1, phase.Name))
|
||||
if phase.Description != "" {
|
||||
plan.WriteString(phase.Description + "\n\n")
|
||||
}
|
||||
for _, task := range phase.Tasks {
|
||||
switch t := task.(type) {
|
||||
case string:
|
||||
plan.WriteString("- [ ] " + t + "\n")
|
||||
case map[string]any:
|
||||
if name, ok := t["name"].(string); ok {
|
||||
plan.WriteString("- [ ] " + name + "\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
plan.WriteString("\n**Commit after completing this phase.**\n\n---\n\n")
|
||||
}
|
||||
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "PLAN.md"), []byte(plan.String()), 0644)
|
||||
}
|
||||
|
||||
// --- Helpers (unchanged) ---
|
||||
|
||||
func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) int {
|
||||
if s.forgeToken == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/wiki/pages", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
return 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var pages []struct {
|
||||
Title string `json:"title"`
|
||||
SubURL string `json:"sub_url"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&pages)
|
||||
|
||||
count := 0
|
||||
for _, page := range pages {
|
||||
subURL := page.SubURL
|
||||
if subURL == "" {
|
||||
subURL = page.Title
|
||||
}
|
||||
|
||||
pageURL := fmt.Sprintf("%s/api/v1/repos/%s/%s/wiki/page/%s", s.forgeURL, org, repo, subURL)
|
||||
pageReq, _ := http.NewRequestWithContext(ctx, "GET", pageURL, nil)
|
||||
pageReq.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
pageResp, err := s.client.Do(pageReq)
|
||||
if err != nil || pageResp.StatusCode != 200 {
|
||||
continue
|
||||
}
|
||||
|
||||
var pageData struct {
|
||||
ContentBase64 string `json:"content_base64"`
|
||||
}
|
||||
json.NewDecoder(pageResp.Body).Decode(&pageData)
|
||||
pageResp.Body.Close()
|
||||
|
||||
if pageData.ContentBase64 == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
content, _ := base64.StdEncoding.DecodeString(pageData.ContentBase64)
|
||||
filename := strings.Map(func(r rune) rune {
|
||||
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' {
|
||||
return r
|
||||
}
|
||||
return '-'
|
||||
}, page.Title) + ".md"
|
||||
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "kb", filename), content, 0644)
|
||||
count++
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) copySpecs(wsDir string) int {
|
||||
specFiles := []string{"AGENT_CONTEXT.md", "TASK_PROTOCOL.md"}
|
||||
count := 0
|
||||
|
||||
for _, file := range specFiles {
|
||||
src := filepath.Join(s.specsPath, file)
|
||||
if data, err := os.ReadFile(src); err == nil {
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "specs", file), data, 0644)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) generateContext(ctx context.Context, repo, wsDir string) int {
|
||||
if s.brainKey == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"query": "architecture conventions key interfaces for " + repo,
|
||||
"top_k": 10,
|
||||
"project": repo,
|
||||
"agent_id": "cladius",
|
||||
})
|
||||
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", s.brainURL+"/v1/brain/recall", strings.NewReader(string(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+s.brainKey)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
return 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respData, _ := io.ReadAll(resp.Body)
|
||||
var result struct {
|
||||
Memories []map[string]any `json:"memories"`
|
||||
}
|
||||
json.Unmarshal(respData, &result)
|
||||
|
||||
var content strings.Builder
|
||||
content.WriteString("# Context — " + repo + "\n\n")
|
||||
content.WriteString("> Relevant knowledge from OpenBrain.\n\n")
|
||||
|
||||
for i, mem := range result.Memories {
|
||||
memType, _ := mem["type"].(string)
|
||||
memContent, _ := mem["content"].(string)
|
||||
memProject, _ := mem["project"].(string)
|
||||
score, _ := mem["score"].(float64)
|
||||
content.WriteString(fmt.Sprintf("### %d. %s [%s] (score: %.3f)\n\n%s\n\n", i+1, memProject, memType, score, memContent))
|
||||
}
|
||||
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "CONTEXT.md"), []byte(content.String()), 0644)
|
||||
return len(result.Memories)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) findConsumers(repo, wsDir string) int {
|
||||
goWorkPath := filepath.Join(s.codePath, "go.work")
|
||||
modulePath := "forge.lthn.ai/core/" + repo
|
||||
|
||||
workData, err := os.ReadFile(goWorkPath)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
var consumers []string
|
||||
for _, line := range strings.Split(string(workData), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if !strings.HasPrefix(line, "./") {
|
||||
continue
|
||||
}
|
||||
dir := filepath.Join(s.codePath, strings.TrimPrefix(line, "./"))
|
||||
goMod := filepath.Join(dir, "go.mod")
|
||||
modData, err := os.ReadFile(goMod)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(string(modData), modulePath) && !strings.HasPrefix(string(modData), "module "+modulePath) {
|
||||
consumers = append(consumers, filepath.Base(dir))
|
||||
}
|
||||
}
|
||||
|
||||
if len(consumers) > 0 {
|
||||
content := "# Consumers of " + repo + "\n\n"
|
||||
content += "These modules import `" + modulePath + "`:\n\n"
|
||||
for _, c := range consumers {
|
||||
content += "- " + c + "\n"
|
||||
}
|
||||
content += fmt.Sprintf("\n**Breaking change risk: %d consumers.**\n", len(consumers))
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "CONSUMERS.md"), []byte(content), 0644)
|
||||
}
|
||||
|
||||
return len(consumers)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) gitLog(repoPath, wsDir string) int {
|
||||
cmd := exec.Command("git", "log", "--oneline", "-20")
|
||||
cmd.Dir = repoPath
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
if len(lines) > 0 && lines[0] != "" {
|
||||
content := "# Recent Changes\n\n```\n" + string(output) + "```\n"
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "RECENT.md"), []byte(content), 0644)
|
||||
}
|
||||
|
||||
return len(lines)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) generateTodo(ctx context.Context, org, repo string, issue int, wsDir string) {
|
||||
if s.forgeToken == "" {
|
||||
return
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue)
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var issueData struct {
|
||||
Title string `json:"title"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&issueData)
|
||||
|
||||
content := fmt.Sprintf("# TASK: %s\n\n", issueData.Title)
|
||||
content += fmt.Sprintf("**Status:** ready\n")
|
||||
content += fmt.Sprintf("**Source:** %s/%s/%s/issues/%d\n", s.forgeURL, org, repo, issue)
|
||||
content += fmt.Sprintf("**Repo:** %s/%s\n\n---\n\n", org, repo)
|
||||
content += "## Objective\n\n" + issueData.Body + "\n"
|
||||
|
||||
os.WriteFile(filepath.Join(wsDir, "src", "TODO.md"), []byte(content), 0644)
|
||||
}
|
||||
251
pkg/agentic/queue.go
Normal file
251
pkg/agentic/queue.go
Normal file
|
|
@ -0,0 +1,251 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// DispatchConfig controls agent dispatch behaviour.
|
||||
type DispatchConfig struct {
|
||||
DefaultAgent string `yaml:"default_agent"`
|
||||
DefaultTemplate string `yaml:"default_template"`
|
||||
WorkspaceRoot string `yaml:"workspace_root"`
|
||||
}
|
||||
|
||||
// RateConfig controls pacing between task dispatches.
|
||||
type RateConfig struct {
|
||||
ResetUTC string `yaml:"reset_utc"` // Daily quota reset time (UTC), e.g. "06:00"
|
||||
DailyLimit int `yaml:"daily_limit"` // Max requests per day (0 = unknown)
|
||||
MinDelay int `yaml:"min_delay"` // Minimum seconds between task starts
|
||||
SustainedDelay int `yaml:"sustained_delay"` // Delay when pacing for full-day use
|
||||
BurstWindow int `yaml:"burst_window"` // Hours before reset where burst kicks in
|
||||
BurstDelay int `yaml:"burst_delay"` // Delay during burst window
|
||||
}
|
||||
|
||||
// AgentsConfig is the root of config/agents.yaml.
|
||||
type AgentsConfig struct {
|
||||
Version int `yaml:"version"`
|
||||
Dispatch DispatchConfig `yaml:"dispatch"`
|
||||
Concurrency map[string]int `yaml:"concurrency"`
|
||||
Rates map[string]RateConfig `yaml:"rates"`
|
||||
}
|
||||
|
||||
// loadAgentsConfig reads config/agents.yaml from the code path.
|
||||
func (s *PrepSubsystem) loadAgentsConfig() *AgentsConfig {
|
||||
paths := []string{
|
||||
filepath.Join(s.codePath, "core", "agent", "config", "agents.yaml"),
|
||||
filepath.Join(s.codePath, "core", "agent", ".core", "agents.yaml"),
|
||||
filepath.Join(s.codePath, "host-uk", "core", ".core", "agents.yaml"),
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var cfg AgentsConfig
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
continue
|
||||
}
|
||||
return &cfg
|
||||
}
|
||||
|
||||
return &AgentsConfig{
|
||||
Dispatch: DispatchConfig{
|
||||
DefaultAgent: "claude",
|
||||
DefaultTemplate: "coding",
|
||||
},
|
||||
Concurrency: map[string]int{
|
||||
"claude": 1,
|
||||
"gemini": 3,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// delayForAgent calculates how long to wait before spawning the next task
|
||||
// for a given agent type, based on rate config and time of day.
|
||||
func (s *PrepSubsystem) delayForAgent(agent string) time.Duration {
|
||||
cfg := s.loadAgentsConfig()
|
||||
rate, ok := cfg.Rates[agent]
|
||||
if !ok || rate.SustainedDelay == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Parse reset time
|
||||
resetHour, resetMin := 6, 0
|
||||
fmt.Sscanf(rate.ResetUTC, "%d:%d", &resetHour, &resetMin)
|
||||
|
||||
now := time.Now().UTC()
|
||||
resetToday := time.Date(now.Year(), now.Month(), now.Day(), resetHour, resetMin, 0, 0, time.UTC)
|
||||
if now.Before(resetToday) {
|
||||
// Reset hasn't happened yet today — reset was yesterday
|
||||
resetToday = resetToday.AddDate(0, 0, -1)
|
||||
}
|
||||
nextReset := resetToday.AddDate(0, 0, 1)
|
||||
hoursUntilReset := nextReset.Sub(now).Hours()
|
||||
|
||||
// Burst mode: if within burst window of reset, use burst delay
|
||||
if rate.BurstWindow > 0 && hoursUntilReset <= float64(rate.BurstWindow) {
|
||||
return time.Duration(rate.BurstDelay) * time.Second
|
||||
}
|
||||
|
||||
// Sustained mode
|
||||
return time.Duration(rate.SustainedDelay) * time.Second
|
||||
}
|
||||
|
||||
// countRunningByAgent counts running workspaces for a specific agent type.
|
||||
func (s *PrepSubsystem) countRunningByAgent(agent string) int {
|
||||
home, _ := os.UserHomeDir()
|
||||
wsRoot := filepath.Join(home, "Code", "host-uk", "core", ".core", "workspace")
|
||||
|
||||
entries, err := os.ReadDir(wsRoot)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
st, err := readStatus(filepath.Join(wsRoot, entry.Name()))
|
||||
if err != nil || st.Status != "running" {
|
||||
continue
|
||||
}
|
||||
// Match on base agent type (gemini:flash matches gemini)
|
||||
stBase := strings.SplitN(st.Agent, ":", 2)[0]
|
||||
if stBase != agent {
|
||||
continue
|
||||
}
|
||||
|
||||
if st.PID > 0 {
|
||||
proc, err := os.FindProcess(st.PID)
|
||||
if err == nil && proc.Signal(syscall.Signal(0)) == nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// baseAgent strips the model variant (gemini:flash → gemini).
|
||||
func baseAgent(agent string) string {
|
||||
return strings.SplitN(agent, ":", 2)[0]
|
||||
}
|
||||
|
||||
// canDispatchAgent checks if we're under the concurrency limit for a specific agent type.
|
||||
func (s *PrepSubsystem) canDispatchAgent(agent string) bool {
|
||||
cfg := s.loadAgentsConfig()
|
||||
base := baseAgent(agent)
|
||||
limit, ok := cfg.Concurrency[base]
|
||||
if !ok || limit <= 0 {
|
||||
return true
|
||||
}
|
||||
return s.countRunningByAgent(base) < limit
|
||||
}
|
||||
|
||||
// canDispatch is kept for backwards compat.
|
||||
func (s *PrepSubsystem) canDispatch() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// drainQueue finds the oldest queued workspace and spawns it if a slot is available.
|
||||
// Applies rate-based delay between spawns.
|
||||
func (s *PrepSubsystem) drainQueue() {
|
||||
home, _ := os.UserHomeDir()
|
||||
wsRoot := filepath.Join(home, "Code", "host-uk", "core", ".core", "workspace")
|
||||
|
||||
entries, err := os.ReadDir(wsRoot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
wsDir := filepath.Join(wsRoot, entry.Name())
|
||||
st, err := readStatus(wsDir)
|
||||
if err != nil || st.Status != "queued" {
|
||||
continue
|
||||
}
|
||||
|
||||
if !s.canDispatchAgent(st.Agent) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply rate delay before spawning
|
||||
delay := s.delayForAgent(st.Agent)
|
||||
if delay > 0 {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
// Re-check concurrency after delay (another task may have started)
|
||||
if !s.canDispatchAgent(st.Agent) {
|
||||
continue
|
||||
}
|
||||
|
||||
srcDir := filepath.Join(wsDir, "src")
|
||||
prompt := "Read PROMPT.md for instructions. All context files (CLAUDE.md, TODO.md, CONTEXT.md, CONSUMERS.md, RECENT.md) are in the parent directory. Work in this directory."
|
||||
|
||||
command, args, err := agentCommand(st.Agent, prompt)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
outputFile := filepath.Join(wsDir, fmt.Sprintf("agent-%s.log", st.Agent))
|
||||
outFile, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
devNull, _ := os.Open(os.DevNull)
|
||||
cmd := exec.Command(command, args...)
|
||||
cmd.Dir = srcDir
|
||||
cmd.Stdin = devNull
|
||||
cmd.Stdout = outFile
|
||||
cmd.Stderr = outFile
|
||||
cmd.Env = append(os.Environ(), "TERM=dumb", "NO_COLOR=1", "CI=true")
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
outFile.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
st.Status = "running"
|
||||
st.PID = cmd.Process.Pid
|
||||
st.Runs++
|
||||
writeStatus(wsDir, st)
|
||||
|
||||
go func() {
|
||||
cmd.Wait()
|
||||
outFile.Close()
|
||||
|
||||
if st2, err := readStatus(wsDir); err == nil {
|
||||
st2.Status = "completed"
|
||||
st2.PID = 0
|
||||
writeStatus(wsDir, st2)
|
||||
}
|
||||
|
||||
// Ingest scan findings as issues
|
||||
s.ingestFindings(wsDir)
|
||||
|
||||
s.drainQueue()
|
||||
}()
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
138
pkg/agentic/resume.go
Normal file
138
pkg/agentic/resume.go
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// ResumeInput is the input for agentic_resume.
|
||||
type ResumeInput struct {
|
||||
Workspace string `json:"workspace"` // workspace name (e.g. "go-scm-1773581173")
|
||||
Answer string `json:"answer,omitempty"` // answer to the blocked question (written to ANSWER.md)
|
||||
Agent string `json:"agent,omitempty"` // override agent type (default: same as original)
|
||||
DryRun bool `json:"dry_run,omitempty"` // preview without executing
|
||||
}
|
||||
|
||||
// ResumeOutput is the output for agentic_resume.
|
||||
type ResumeOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Workspace string `json:"workspace"`
|
||||
Agent string `json:"agent"`
|
||||
PID int `json:"pid,omitempty"`
|
||||
OutputFile string `json:"output_file,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerResumeTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_resume",
|
||||
Description: "Resume a blocked agent workspace. Writes ANSWER.md if an answer is provided, then relaunches the agent with instructions to read it and continue.",
|
||||
}, s.resume)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, input ResumeInput) (*mcp.CallToolResult, ResumeOutput, error) {
|
||||
if input.Workspace == "" {
|
||||
return nil, ResumeOutput{}, fmt.Errorf("workspace is required")
|
||||
}
|
||||
|
||||
home, _ := os.UserHomeDir()
|
||||
wsDir := filepath.Join(home, "Code", "host-uk", "core", ".core", "workspace", input.Workspace)
|
||||
srcDir := filepath.Join(wsDir, "src")
|
||||
|
||||
// Verify workspace exists
|
||||
if _, err := os.Stat(srcDir); err != nil {
|
||||
return nil, ResumeOutput{}, fmt.Errorf("workspace not found: %s", input.Workspace)
|
||||
}
|
||||
|
||||
// Read current status
|
||||
st, err := readStatus(wsDir)
|
||||
if err != nil {
|
||||
return nil, ResumeOutput{}, fmt.Errorf("no status.json in workspace: %w", err)
|
||||
}
|
||||
|
||||
if st.Status != "blocked" && st.Status != "failed" && st.Status != "completed" {
|
||||
return nil, ResumeOutput{}, fmt.Errorf("workspace is %s, not resumable (must be blocked, failed, or completed)", st.Status)
|
||||
}
|
||||
|
||||
// Determine agent
|
||||
agent := st.Agent
|
||||
if input.Agent != "" {
|
||||
agent = input.Agent
|
||||
}
|
||||
|
||||
// Write ANSWER.md if answer provided
|
||||
if input.Answer != "" {
|
||||
answerPath := filepath.Join(srcDir, "ANSWER.md")
|
||||
content := fmt.Sprintf("# Answer\n\n%s\n", input.Answer)
|
||||
if err := os.WriteFile(answerPath, []byte(content), 0644); err != nil {
|
||||
return nil, ResumeOutput{}, fmt.Errorf("failed to write ANSWER.md: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Build resume prompt
|
||||
prompt := "You are resuming previous work in this workspace. "
|
||||
if input.Answer != "" {
|
||||
prompt += "Read ANSWER.md for the response to your question. "
|
||||
}
|
||||
prompt += "Read PROMPT.md for the original task. Read BLOCKED.md to see what you were stuck on. Continue working."
|
||||
|
||||
if input.DryRun {
|
||||
return nil, ResumeOutput{
|
||||
Success: true,
|
||||
Workspace: input.Workspace,
|
||||
Agent: agent,
|
||||
Prompt: prompt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Spawn agent as detached process (survives parent death)
|
||||
outputFile := filepath.Join(wsDir, fmt.Sprintf("agent-%s-run%d.log", agent, st.Runs+1))
|
||||
|
||||
command, args, err := agentCommand(agent, prompt)
|
||||
if err != nil {
|
||||
return nil, ResumeOutput{}, err
|
||||
}
|
||||
|
||||
devNull, _ := os.Open(os.DevNull)
|
||||
outFile, _ := os.Create(outputFile)
|
||||
cmd := exec.Command(command, args...)
|
||||
cmd.Dir = srcDir
|
||||
cmd.Stdin = devNull
|
||||
cmd.Stdout = outFile
|
||||
cmd.Stderr = outFile
|
||||
cmd.Env = append(os.Environ(), "TERM=dumb", "NO_COLOR=1", "CI=true")
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
outFile.Close()
|
||||
return nil, ResumeOutput{}, fmt.Errorf("failed to spawn %s: %w", agent, err)
|
||||
}
|
||||
|
||||
// Update status
|
||||
st.Status = "running"
|
||||
st.PID = cmd.Process.Pid
|
||||
st.Runs++
|
||||
st.Question = ""
|
||||
writeStatus(wsDir, st)
|
||||
|
||||
go func() {
|
||||
cmd.Wait()
|
||||
outFile.Close()
|
||||
}()
|
||||
|
||||
return nil, ResumeOutput{
|
||||
Success: true,
|
||||
Workspace: input.Workspace,
|
||||
Agent: agent,
|
||||
PID: cmd.Process.Pid,
|
||||
OutputFile: outputFile,
|
||||
}, nil
|
||||
}
|
||||
171
pkg/agentic/scan.go
Normal file
171
pkg/agentic/scan.go
Normal file
|
|
@ -0,0 +1,171 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// ScanInput is the input for agentic_scan.
|
||||
type ScanInput struct {
|
||||
Org string `json:"org,omitempty"` // default "core"
|
||||
Labels []string `json:"labels,omitempty"` // filter by labels (default: agentic, help-wanted, bug)
|
||||
Limit int `json:"limit,omitempty"` // max issues to return
|
||||
}
|
||||
|
||||
// ScanOutput is the output for agentic_scan.
|
||||
type ScanOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Count int `json:"count"`
|
||||
Issues []ScanIssue `json:"issues"`
|
||||
}
|
||||
|
||||
// ScanIssue is a single actionable issue.
|
||||
type ScanIssue struct {
|
||||
Repo string `json:"repo"`
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
Labels []string `json:"labels"`
|
||||
Assignee string `json:"assignee,omitempty"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) scan(ctx context.Context, _ *mcp.CallToolRequest, input ScanInput) (*mcp.CallToolResult, ScanOutput, error) {
|
||||
if s.forgeToken == "" {
|
||||
return nil, ScanOutput{}, fmt.Errorf("no Forge token configured")
|
||||
}
|
||||
|
||||
if input.Org == "" {
|
||||
input.Org = "core"
|
||||
}
|
||||
if input.Limit == 0 {
|
||||
input.Limit = 20
|
||||
}
|
||||
if len(input.Labels) == 0 {
|
||||
input.Labels = []string{"agentic", "help-wanted", "bug"}
|
||||
}
|
||||
|
||||
var allIssues []ScanIssue
|
||||
|
||||
// Get repos for the org
|
||||
repos, err := s.listOrgRepos(ctx, input.Org)
|
||||
if err != nil {
|
||||
return nil, ScanOutput{}, err
|
||||
}
|
||||
|
||||
for _, repo := range repos {
|
||||
for _, label := range input.Labels {
|
||||
issues, err := s.listRepoIssues(ctx, input.Org, repo, label)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
allIssues = append(allIssues, issues...)
|
||||
|
||||
if len(allIssues) >= input.Limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(allIssues) >= input.Limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Deduplicate by repo+number
|
||||
seen := make(map[string]bool)
|
||||
var unique []ScanIssue
|
||||
for _, issue := range allIssues {
|
||||
key := fmt.Sprintf("%s#%d", issue.Repo, issue.Number)
|
||||
if !seen[key] {
|
||||
seen[key] = true
|
||||
unique = append(unique, issue)
|
||||
}
|
||||
}
|
||||
|
||||
if len(unique) > input.Limit {
|
||||
unique = unique[:input.Limit]
|
||||
}
|
||||
|
||||
return nil, ScanOutput{
|
||||
Success: true,
|
||||
Count: len(unique),
|
||||
Issues: unique,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) listOrgRepos(ctx context.Context, org string) ([]string, error) {
|
||||
url := fmt.Sprintf("%s/api/v1/orgs/%s/repos?limit=50", s.forgeURL, org)
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("failed to list repos: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var repos []struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&repos)
|
||||
|
||||
var names []string
|
||||
for _, r := range repos {
|
||||
names = append(names, r.Name)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label string) ([]ScanIssue, error) {
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues?state=open&labels=%s&limit=10&type=issues",
|
||||
s.forgeURL, org, repo, label)
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("failed to list issues for %s: %v", repo, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var issues []struct {
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
Labels []struct {
|
||||
Name string `json:"name"`
|
||||
} `json:"labels"`
|
||||
Assignee *struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"assignee"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&issues)
|
||||
|
||||
var result []ScanIssue
|
||||
for _, issue := range issues {
|
||||
var labels []string
|
||||
for _, l := range issue.Labels {
|
||||
labels = append(labels, l.Name)
|
||||
}
|
||||
assignee := ""
|
||||
if issue.Assignee != nil {
|
||||
assignee = issue.Assignee.Login
|
||||
}
|
||||
|
||||
result = append(result, ScanIssue{
|
||||
Repo: repo,
|
||||
Number: issue.Number,
|
||||
Title: issue.Title,
|
||||
Labels: labels,
|
||||
Assignee: assignee,
|
||||
URL: strings.Replace(issue.HTMLURL, "https://forge.lthn.ai", s.forgeURL, 1),
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
177
pkg/agentic/status.go
Normal file
177
pkg/agentic/status.go
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// Workspace status file convention:
|
||||
//
|
||||
// {workspace}/status.json — current state of the workspace
|
||||
// {workspace}/BLOCKED.md — question the agent needs answered (written by agent)
|
||||
// {workspace}/ANSWER.md — response from human (written by reviewer)
|
||||
//
|
||||
// Status lifecycle:
|
||||
// running → completed (normal finish)
|
||||
// running → blocked (agent wrote BLOCKED.md and exited)
|
||||
// blocked → running (resume after ANSWER.md provided)
|
||||
// running → failed (agent crashed / non-zero exit)
|
||||
|
||||
// WorkspaceStatus represents the current state of an agent workspace.
|
||||
type WorkspaceStatus struct {
|
||||
Status string `json:"status"` // running, completed, blocked, failed
|
||||
Agent string `json:"agent"` // gemini, claude, codex
|
||||
Repo string `json:"repo"` // target repo
|
||||
Org string `json:"org,omitempty"` // forge org (e.g. "core")
|
||||
Task string `json:"task"` // task description
|
||||
Branch string `json:"branch,omitempty"` // git branch name
|
||||
Issue int `json:"issue,omitempty"` // forge issue number
|
||||
PID int `json:"pid,omitempty"` // process ID (if running)
|
||||
StartedAt time.Time `json:"started_at"` // when dispatch started
|
||||
UpdatedAt time.Time `json:"updated_at"` // last status change
|
||||
Question string `json:"question,omitempty"` // from BLOCKED.md
|
||||
Runs int `json:"runs"` // how many times dispatched/resumed
|
||||
PRURL string `json:"pr_url,omitempty"` // pull request URL (after PR created)
|
||||
}
|
||||
|
||||
func writeStatus(wsDir string, status *WorkspaceStatus) error {
|
||||
status.UpdatedAt = time.Now()
|
||||
data, err := json.MarshalIndent(status, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(filepath.Join(wsDir, "status.json"), data, 0644)
|
||||
}
|
||||
|
||||
func readStatus(wsDir string) (*WorkspaceStatus, error) {
|
||||
data, err := os.ReadFile(filepath.Join(wsDir, "status.json"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var s WorkspaceStatus
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// --- agentic_status tool ---
|
||||
|
||||
type StatusInput struct {
|
||||
Workspace string `json:"workspace,omitempty"` // specific workspace name, or empty for all
|
||||
}
|
||||
|
||||
type StatusOutput struct {
|
||||
Workspaces []WorkspaceInfo `json:"workspaces"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type WorkspaceInfo struct {
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Agent string `json:"agent"`
|
||||
Repo string `json:"repo"`
|
||||
Task string `json:"task"`
|
||||
Age string `json:"age"`
|
||||
Question string `json:"question,omitempty"`
|
||||
Runs int `json:"runs"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerStatusTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "agentic_status",
|
||||
Description: "List agent workspaces and their status (running, completed, blocked, failed). Shows blocked agents with their questions.",
|
||||
}, s.status)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, input StatusInput) (*mcp.CallToolResult, StatusOutput, error) {
|
||||
home, _ := os.UserHomeDir()
|
||||
wsRoot := filepath.Join(home, "Code", "host-uk", "core", ".core", "workspace")
|
||||
|
||||
entries, err := os.ReadDir(wsRoot)
|
||||
if err != nil {
|
||||
return nil, StatusOutput{}, fmt.Errorf("no workspaces found: %w", err)
|
||||
}
|
||||
|
||||
var workspaces []WorkspaceInfo
|
||||
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
name := entry.Name()
|
||||
|
||||
// Filter by specific workspace if requested
|
||||
if input.Workspace != "" && name != input.Workspace {
|
||||
continue
|
||||
}
|
||||
|
||||
wsDir := filepath.Join(wsRoot, name)
|
||||
info := WorkspaceInfo{Name: name}
|
||||
|
||||
// Try reading status.json
|
||||
st, err := readStatus(wsDir)
|
||||
if err != nil {
|
||||
// Legacy workspace (no status.json) — check for log file
|
||||
logFiles, _ := filepath.Glob(filepath.Join(wsDir, "agent-*.log"))
|
||||
if len(logFiles) > 0 {
|
||||
info.Status = "completed"
|
||||
} else {
|
||||
info.Status = "unknown"
|
||||
}
|
||||
fi, _ := entry.Info()
|
||||
if fi != nil {
|
||||
info.Age = time.Since(fi.ModTime()).Truncate(time.Minute).String()
|
||||
}
|
||||
workspaces = append(workspaces, info)
|
||||
continue
|
||||
}
|
||||
|
||||
info.Status = st.Status
|
||||
info.Agent = st.Agent
|
||||
info.Repo = st.Repo
|
||||
info.Task = st.Task
|
||||
info.Runs = st.Runs
|
||||
info.Age = time.Since(st.StartedAt).Truncate(time.Minute).String()
|
||||
|
||||
// If status is "running", check if PID is still alive
|
||||
if st.Status == "running" && st.PID > 0 {
|
||||
proc, err := os.FindProcess(st.PID)
|
||||
if err != nil || proc.Signal(nil) != nil {
|
||||
// Process died — check for BLOCKED.md
|
||||
blockedPath := filepath.Join(wsDir, "src", "BLOCKED.md")
|
||||
if data, err := os.ReadFile(blockedPath); err == nil {
|
||||
info.Status = "blocked"
|
||||
info.Question = strings.TrimSpace(string(data))
|
||||
st.Status = "blocked"
|
||||
st.Question = info.Question
|
||||
} else {
|
||||
info.Status = "completed"
|
||||
st.Status = "completed"
|
||||
}
|
||||
writeStatus(wsDir, st)
|
||||
}
|
||||
}
|
||||
|
||||
if st.Status == "blocked" {
|
||||
info.Question = st.Question
|
||||
}
|
||||
|
||||
workspaces = append(workspaces, info)
|
||||
}
|
||||
|
||||
return nil, StatusOutput{
|
||||
Workspaces: workspaces,
|
||||
Count: len(workspaces),
|
||||
}, nil
|
||||
}
|
||||
42
pkg/brain/brain.go
Normal file
42
pkg/brain/brain.go
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
// Package brain provides an MCP subsystem that proxies OpenBrain knowledge
|
||||
// store operations to the Laravel php-agentic backend via the IDE bridge.
|
||||
package brain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"forge.lthn.ai/core/mcp/pkg/mcp/ide"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// errBridgeNotAvailable is returned when a tool requires the Laravel bridge
|
||||
// but it has not been initialised (headless mode).
|
||||
var errBridgeNotAvailable = errors.New("brain: bridge not available")
|
||||
|
||||
// Subsystem implements mcp.Subsystem for OpenBrain knowledge store operations.
|
||||
// It proxies brain_* tool calls to the Laravel backend via the shared IDE bridge.
|
||||
type Subsystem struct {
|
||||
bridge *ide.Bridge
|
||||
}
|
||||
|
||||
// New creates a brain subsystem that uses the given IDE bridge for Laravel communication.
|
||||
// Pass nil if headless (tools will return errBridgeNotAvailable).
|
||||
func New(bridge *ide.Bridge) *Subsystem {
|
||||
return &Subsystem{bridge: bridge}
|
||||
}
|
||||
|
||||
// Name implements mcp.Subsystem.
|
||||
func (s *Subsystem) Name() string { return "brain" }
|
||||
|
||||
// RegisterTools implements mcp.Subsystem.
|
||||
func (s *Subsystem) RegisterTools(server *mcp.Server) {
|
||||
s.registerBrainTools(server)
|
||||
}
|
||||
|
||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||
func (s *Subsystem) Shutdown(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
229
pkg/brain/brain_test.go
Normal file
229
pkg/brain/brain_test.go
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package brain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// --- Nil bridge tests (headless mode) ---
|
||||
|
||||
func TestBrainRemember_Bad_NilBridge(t *testing.T) {
|
||||
sub := New(nil)
|
||||
_, _, err := sub.brainRemember(context.Background(), nil, RememberInput{
|
||||
Content: "test memory",
|
||||
Type: "observation",
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrainRecall_Bad_NilBridge(t *testing.T) {
|
||||
sub := New(nil)
|
||||
_, _, err := sub.brainRecall(context.Background(), nil, RecallInput{
|
||||
Query: "how does scoring work?",
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrainForget_Bad_NilBridge(t *testing.T) {
|
||||
sub := New(nil)
|
||||
_, _, err := sub.brainForget(context.Background(), nil, ForgetInput{
|
||||
ID: "550e8400-e29b-41d4-a716-446655440000",
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrainList_Bad_NilBridge(t *testing.T) {
|
||||
sub := New(nil)
|
||||
_, _, err := sub.brainList(context.Background(), nil, ListInput{
|
||||
Project: "eaas",
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
}
|
||||
}
|
||||
|
||||
// --- Subsystem interface tests ---
|
||||
|
||||
func TestSubsystem_Good_Name(t *testing.T) {
|
||||
sub := New(nil)
|
||||
if sub.Name() != "brain" {
|
||||
t.Errorf("expected Name() = 'brain', got %q", sub.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubsystem_Good_ShutdownNoop(t *testing.T) {
|
||||
sub := New(nil)
|
||||
if err := sub.Shutdown(context.Background()); err != nil {
|
||||
t.Errorf("Shutdown failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Struct round-trip tests ---
|
||||
|
||||
func TestRememberInput_Good_RoundTrip(t *testing.T) {
|
||||
in := RememberInput{
|
||||
Content: "LEM scoring was blind to negative emotions",
|
||||
Type: "bug",
|
||||
Tags: []string{"scoring", "lem"},
|
||||
Project: "eaas",
|
||||
Confidence: 0.95,
|
||||
Supersedes: "550e8400-e29b-41d4-a716-446655440000",
|
||||
ExpiresIn: 24,
|
||||
}
|
||||
data, err := json.Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal failed: %v", err)
|
||||
}
|
||||
var out RememberInput
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
t.Fatalf("unmarshal failed: %v", err)
|
||||
}
|
||||
if out.Content != in.Content || out.Type != in.Type {
|
||||
t.Errorf("round-trip mismatch: content or type")
|
||||
}
|
||||
if len(out.Tags) != 2 || out.Tags[0] != "scoring" {
|
||||
t.Errorf("round-trip mismatch: tags")
|
||||
}
|
||||
if out.Confidence != 0.95 {
|
||||
t.Errorf("round-trip mismatch: confidence %f != 0.95", out.Confidence)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRememberOutput_Good_RoundTrip(t *testing.T) {
|
||||
in := RememberOutput{
|
||||
Success: true,
|
||||
MemoryID: "550e8400-e29b-41d4-a716-446655440000",
|
||||
Timestamp: time.Now().Truncate(time.Second),
|
||||
}
|
||||
data, err := json.Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal failed: %v", err)
|
||||
}
|
||||
var out RememberOutput
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
t.Fatalf("unmarshal failed: %v", err)
|
||||
}
|
||||
if !out.Success || out.MemoryID != in.MemoryID {
|
||||
t.Errorf("round-trip mismatch: %+v != %+v", out, in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecallInput_Good_RoundTrip(t *testing.T) {
|
||||
in := RecallInput{
|
||||
Query: "how does verdict classification work?",
|
||||
TopK: 5,
|
||||
Filter: RecallFilter{
|
||||
Project: "eaas",
|
||||
MinConfidence: 0.5,
|
||||
},
|
||||
}
|
||||
data, err := json.Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal failed: %v", err)
|
||||
}
|
||||
var out RecallInput
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
t.Fatalf("unmarshal failed: %v", err)
|
||||
}
|
||||
if out.Query != in.Query || out.TopK != 5 {
|
||||
t.Errorf("round-trip mismatch: query or topK")
|
||||
}
|
||||
if out.Filter.Project != "eaas" || out.Filter.MinConfidence != 0.5 {
|
||||
t.Errorf("round-trip mismatch: filter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemory_Good_RoundTrip(t *testing.T) {
|
||||
in := Memory{
|
||||
ID: "550e8400-e29b-41d4-a716-446655440000",
|
||||
AgentID: "virgil",
|
||||
Type: "decision",
|
||||
Content: "Use Qdrant for vector search",
|
||||
Tags: []string{"architecture", "openbrain"},
|
||||
Project: "php-agentic",
|
||||
Confidence: 0.9,
|
||||
CreatedAt: "2026-03-03T12:00:00+00:00",
|
||||
UpdatedAt: "2026-03-03T12:00:00+00:00",
|
||||
}
|
||||
data, err := json.Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal failed: %v", err)
|
||||
}
|
||||
var out Memory
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
t.Fatalf("unmarshal failed: %v", err)
|
||||
}
|
||||
if out.ID != in.ID || out.AgentID != "virgil" || out.Type != "decision" {
|
||||
t.Errorf("round-trip mismatch: %+v", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestForgetInput_Good_RoundTrip(t *testing.T) {
|
||||
in := ForgetInput{
|
||||
ID: "550e8400-e29b-41d4-a716-446655440000",
|
||||
Reason: "Superseded by new approach",
|
||||
}
|
||||
data, err := json.Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal failed: %v", err)
|
||||
}
|
||||
var out ForgetInput
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
t.Fatalf("unmarshal failed: %v", err)
|
||||
}
|
||||
if out.ID != in.ID || out.Reason != in.Reason {
|
||||
t.Errorf("round-trip mismatch: %+v != %+v", out, in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListInput_Good_RoundTrip(t *testing.T) {
|
||||
in := ListInput{
|
||||
Project: "eaas",
|
||||
Type: "decision",
|
||||
AgentID: "charon",
|
||||
Limit: 20,
|
||||
}
|
||||
data, err := json.Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal failed: %v", err)
|
||||
}
|
||||
var out ListInput
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
t.Fatalf("unmarshal failed: %v", err)
|
||||
}
|
||||
if out.Project != "eaas" || out.Type != "decision" || out.AgentID != "charon" || out.Limit != 20 {
|
||||
t.Errorf("round-trip mismatch: %+v", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListOutput_Good_RoundTrip(t *testing.T) {
|
||||
in := ListOutput{
|
||||
Success: true,
|
||||
Count: 2,
|
||||
Memories: []Memory{
|
||||
{ID: "id-1", AgentID: "virgil", Type: "decision", Content: "memory 1", Confidence: 0.9, CreatedAt: "2026-03-03T12:00:00+00:00", UpdatedAt: "2026-03-03T12:00:00+00:00"},
|
||||
{ID: "id-2", AgentID: "charon", Type: "bug", Content: "memory 2", Confidence: 0.8, CreatedAt: "2026-03-03T13:00:00+00:00", UpdatedAt: "2026-03-03T13:00:00+00:00"},
|
||||
},
|
||||
}
|
||||
data, err := json.Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal failed: %v", err)
|
||||
}
|
||||
var out ListOutput
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
t.Fatalf("unmarshal failed: %v", err)
|
||||
}
|
||||
if !out.Success || out.Count != 2 || len(out.Memories) != 2 {
|
||||
t.Errorf("round-trip mismatch: %+v", out)
|
||||
}
|
||||
}
|
||||
204
pkg/brain/direct.go
Normal file
204
pkg/brain/direct.go
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package brain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// DirectSubsystem implements mcp.Subsystem for OpenBrain via direct HTTP calls.
|
||||
// Unlike Subsystem (which uses the IDE WebSocket bridge), this calls the
|
||||
// Laravel API directly — suitable for standalone core-mcp usage.
|
||||
type DirectSubsystem struct {
|
||||
apiURL string
|
||||
apiKey string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewDirect creates a brain subsystem that calls the OpenBrain API directly.
|
||||
// Reads CORE_BRAIN_URL and CORE_BRAIN_KEY from environment, or falls back
|
||||
// to ~/.claude/brain.key for the API key.
|
||||
func NewDirect() *DirectSubsystem {
|
||||
apiURL := os.Getenv("CORE_BRAIN_URL")
|
||||
if apiURL == "" {
|
||||
apiURL = "https://api.lthn.sh"
|
||||
}
|
||||
|
||||
apiKey := os.Getenv("CORE_BRAIN_KEY")
|
||||
if apiKey == "" {
|
||||
if data, err := os.ReadFile(os.ExpandEnv("$HOME/.claude/brain.key")); err == nil {
|
||||
apiKey = strings.TrimSpace(string(data))
|
||||
}
|
||||
}
|
||||
|
||||
return &DirectSubsystem{
|
||||
apiURL: apiURL,
|
||||
apiKey: apiKey,
|
||||
client: &http.Client{Timeout: 30 * time.Second},
|
||||
}
|
||||
}
|
||||
|
||||
// Name implements mcp.Subsystem.
|
||||
func (s *DirectSubsystem) Name() string { return "brain" }
|
||||
|
||||
// RegisterTools implements mcp.Subsystem.
|
||||
func (s *DirectSubsystem) RegisterTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "brain_remember",
|
||||
Description: "Store a memory in OpenBrain. Types: fact, decision, observation, plan, convention, architecture, research, documentation, service, bug, pattern, context, procedure.",
|
||||
}, s.remember)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "brain_recall",
|
||||
Description: "Semantic search across OpenBrain memories. Returns memories ranked by similarity. Use agent_id 'cladius' for Cladius's memories.",
|
||||
}, s.recall)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "brain_forget",
|
||||
Description: "Remove a memory from OpenBrain by ID.",
|
||||
}, s.forget)
|
||||
}
|
||||
|
||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||
func (s *DirectSubsystem) Shutdown(_ context.Context) error { return nil }
|
||||
|
||||
func (s *DirectSubsystem) apiCall(ctx context.Context, method, path string, body any) (map[string]any, error) {
|
||||
if s.apiKey == "" {
|
||||
return nil, fmt.Errorf("brain: no API key (set CORE_BRAIN_KEY or create ~/.claude/brain.key)")
|
||||
}
|
||||
|
||||
var reqBody io.Reader
|
||||
if body != nil {
|
||||
data, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("brain: marshal request: %w", err)
|
||||
}
|
||||
reqBody = bytes.NewReader(data)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, s.apiURL+path, reqBody)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("brain: create request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+s.apiKey)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("brain: API call failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("brain: read response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return nil, fmt.Errorf("brain: API returned %d: %s", resp.StatusCode, string(respData))
|
||||
}
|
||||
|
||||
var result map[string]any
|
||||
if err := json.Unmarshal(respData, &result); err != nil {
|
||||
return nil, fmt.Errorf("brain: parse response: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *DirectSubsystem) remember(ctx context.Context, _ *mcp.CallToolRequest, input RememberInput) (*mcp.CallToolResult, RememberOutput, error) {
|
||||
result, err := s.apiCall(ctx, "POST", "/v1/brain/remember", map[string]any{
|
||||
"content": input.Content,
|
||||
"type": input.Type,
|
||||
"tags": input.Tags,
|
||||
"project": input.Project,
|
||||
"agent_id": "cladius",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, RememberOutput{}, err
|
||||
}
|
||||
|
||||
id, _ := result["id"].(string)
|
||||
return nil, RememberOutput{
|
||||
Success: true,
|
||||
MemoryID: id,
|
||||
Timestamp: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *DirectSubsystem) recall(ctx context.Context, _ *mcp.CallToolRequest, input RecallInput) (*mcp.CallToolResult, RecallOutput, error) {
|
||||
body := map[string]any{
|
||||
"query": input.Query,
|
||||
"top_k": input.TopK,
|
||||
"agent_id": "cladius",
|
||||
}
|
||||
if input.Filter.Project != "" {
|
||||
body["project"] = input.Filter.Project
|
||||
}
|
||||
if input.Filter.Type != nil {
|
||||
body["type"] = input.Filter.Type
|
||||
}
|
||||
if input.TopK == 0 {
|
||||
body["top_k"] = 10
|
||||
}
|
||||
|
||||
result, err := s.apiCall(ctx, "POST", "/v1/brain/recall", body)
|
||||
if err != nil {
|
||||
return nil, RecallOutput{}, err
|
||||
}
|
||||
|
||||
var memories []Memory
|
||||
if mems, ok := result["memories"].([]any); ok {
|
||||
for _, m := range mems {
|
||||
if mm, ok := m.(map[string]any); ok {
|
||||
mem := Memory{
|
||||
Content: fmt.Sprintf("%v", mm["content"]),
|
||||
Type: fmt.Sprintf("%v", mm["type"]),
|
||||
Project: fmt.Sprintf("%v", mm["project"]),
|
||||
AgentID: fmt.Sprintf("%v", mm["agent_id"]),
|
||||
CreatedAt: fmt.Sprintf("%v", mm["created_at"]),
|
||||
}
|
||||
if id, ok := mm["id"].(string); ok {
|
||||
mem.ID = id
|
||||
}
|
||||
if score, ok := mm["score"].(float64); ok {
|
||||
mem.Confidence = score
|
||||
}
|
||||
if source, ok := mm["source"].(string); ok {
|
||||
mem.Tags = append(mem.Tags, "source:"+source)
|
||||
}
|
||||
memories = append(memories, mem)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, RecallOutput{
|
||||
Success: true,
|
||||
Count: len(memories),
|
||||
Memories: memories,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *DirectSubsystem) forget(ctx context.Context, _ *mcp.CallToolRequest, input ForgetInput) (*mcp.CallToolResult, ForgetOutput, error) {
|
||||
_, err := s.apiCall(ctx, "DELETE", "/v1/brain/forget/"+input.ID, nil)
|
||||
if err != nil {
|
||||
return nil, ForgetOutput{}, err
|
||||
}
|
||||
|
||||
return nil, ForgetOutput{
|
||||
Success: true,
|
||||
Forgotten: input.ID,
|
||||
Timestamp: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
336
pkg/brain/provider.go
Normal file
336
pkg/brain/provider.go
Normal file
|
|
@ -0,0 +1,336 @@
|
|||
// SPDX-Licence-Identifier: EUPL-1.2
|
||||
|
||||
package brain
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"forge.lthn.ai/core/api"
|
||||
"forge.lthn.ai/core/api/pkg/provider"
|
||||
"forge.lthn.ai/core/go-ws"
|
||||
"forge.lthn.ai/core/mcp/pkg/mcp/ide"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// BrainProvider wraps the brain Subsystem as a service provider with REST
|
||||
// endpoints. It delegates to the same IDE bridge that the MCP tools use.
|
||||
type BrainProvider struct {
|
||||
bridge *ide.Bridge
|
||||
hub *ws.Hub
|
||||
}
|
||||
|
||||
// compile-time interface checks
|
||||
var (
|
||||
_ provider.Provider = (*BrainProvider)(nil)
|
||||
_ provider.Streamable = (*BrainProvider)(nil)
|
||||
_ provider.Describable = (*BrainProvider)(nil)
|
||||
_ provider.Renderable = (*BrainProvider)(nil)
|
||||
)
|
||||
|
||||
// NewProvider creates a brain provider that proxies to Laravel via the IDE bridge.
|
||||
// The WS hub is used to emit brain events. Pass nil for hub if not needed.
|
||||
func NewProvider(bridge *ide.Bridge, hub *ws.Hub) *BrainProvider {
|
||||
return &BrainProvider{
|
||||
bridge: bridge,
|
||||
hub: hub,
|
||||
}
|
||||
}
|
||||
|
||||
// Name implements api.RouteGroup.
|
||||
func (p *BrainProvider) Name() string { return "brain" }
|
||||
|
||||
// BasePath implements api.RouteGroup.
|
||||
func (p *BrainProvider) BasePath() string { return "/api/brain" }
|
||||
|
||||
// Channels implements provider.Streamable.
|
||||
func (p *BrainProvider) Channels() []string {
|
||||
return []string{
|
||||
"brain.remember.complete",
|
||||
"brain.recall.complete",
|
||||
"brain.forget.complete",
|
||||
}
|
||||
}
|
||||
|
||||
// Element implements provider.Renderable.
|
||||
func (p *BrainProvider) Element() provider.ElementSpec {
|
||||
return provider.ElementSpec{
|
||||
Tag: "core-brain-panel",
|
||||
Source: "/assets/brain-panel.js",
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterRoutes implements api.RouteGroup.
|
||||
func (p *BrainProvider) RegisterRoutes(rg *gin.RouterGroup) {
|
||||
rg.POST("/remember", p.remember)
|
||||
rg.POST("/recall", p.recall)
|
||||
rg.POST("/forget", p.forget)
|
||||
rg.GET("/list", p.list)
|
||||
rg.GET("/status", p.status)
|
||||
}
|
||||
|
||||
// Describe implements api.DescribableGroup.
|
||||
func (p *BrainProvider) Describe() []api.RouteDescription {
|
||||
return []api.RouteDescription{
|
||||
{
|
||||
Method: "POST",
|
||||
Path: "/remember",
|
||||
Summary: "Store a memory",
|
||||
Description: "Store a memory in the shared OpenBrain knowledge store via the Laravel backend.",
|
||||
Tags: []string{"brain"},
|
||||
RequestBody: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"content": map[string]any{"type": "string"},
|
||||
"type": map[string]any{"type": "string"},
|
||||
"tags": map[string]any{"type": "array", "items": map[string]any{"type": "string"}},
|
||||
"project": map[string]any{"type": "string"},
|
||||
"confidence": map[string]any{"type": "number"},
|
||||
},
|
||||
"required": []string{"content", "type"},
|
||||
},
|
||||
Response: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"success": map[string]any{"type": "boolean"},
|
||||
"memoryId": map[string]any{"type": "string"},
|
||||
"timestamp": map[string]any{"type": "string", "format": "date-time"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Method: "POST",
|
||||
Path: "/recall",
|
||||
Summary: "Semantic search memories",
|
||||
Description: "Semantic search across the shared OpenBrain knowledge store.",
|
||||
Tags: []string{"brain"},
|
||||
RequestBody: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"query": map[string]any{"type": "string"},
|
||||
"top_k": map[string]any{"type": "integer"},
|
||||
"filter": map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"project": map[string]any{"type": "string"},
|
||||
"type": map[string]any{"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": []string{"query"},
|
||||
},
|
||||
Response: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"success": map[string]any{"type": "boolean"},
|
||||
"count": map[string]any{"type": "integer"},
|
||||
"memories": map[string]any{"type": "array"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Method: "POST",
|
||||
Path: "/forget",
|
||||
Summary: "Remove a memory",
|
||||
Description: "Permanently delete a memory from the knowledge store.",
|
||||
Tags: []string{"brain"},
|
||||
RequestBody: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"id": map[string]any{"type": "string"},
|
||||
"reason": map[string]any{"type": "string"},
|
||||
},
|
||||
"required": []string{"id"},
|
||||
},
|
||||
Response: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"success": map[string]any{"type": "boolean"},
|
||||
"forgotten": map[string]any{"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Method: "GET",
|
||||
Path: "/list",
|
||||
Summary: "List memories",
|
||||
Description: "List memories with optional filtering by project, type, and agent.",
|
||||
Tags: []string{"brain"},
|
||||
Response: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"success": map[string]any{"type": "boolean"},
|
||||
"count": map[string]any{"type": "integer"},
|
||||
"memories": map[string]any{"type": "array"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Method: "GET",
|
||||
Path: "/status",
|
||||
Summary: "Brain bridge status",
|
||||
Description: "Returns whether the Laravel bridge is connected.",
|
||||
Tags: []string{"brain"},
|
||||
Response: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"connected": map[string]any{"type": "boolean"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// -- Handlers -----------------------------------------------------------------
|
||||
|
||||
func (p *BrainProvider) remember(c *gin.Context) {
|
||||
if p.bridge == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, api.Fail("bridge_unavailable", "brain bridge not available"))
|
||||
return
|
||||
}
|
||||
|
||||
var input RememberInput
|
||||
if err := c.ShouldBindJSON(&input); err != nil {
|
||||
c.JSON(http.StatusBadRequest, api.Fail("invalid_input", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
err := p.bridge.Send(ide.BridgeMessage{
|
||||
Type: "brain_remember",
|
||||
Data: map[string]any{
|
||||
"content": input.Content,
|
||||
"type": input.Type,
|
||||
"tags": input.Tags,
|
||||
"project": input.Project,
|
||||
"confidence": input.Confidence,
|
||||
"supersedes": input.Supersedes,
|
||||
"expires_in": input.ExpiresIn,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, api.Fail("bridge_error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
p.emitEvent("brain.remember.complete", map[string]any{
|
||||
"type": input.Type,
|
||||
"project": input.Project,
|
||||
})
|
||||
|
||||
c.JSON(http.StatusOK, api.OK(map[string]any{"success": true}))
|
||||
}
|
||||
|
||||
func (p *BrainProvider) recall(c *gin.Context) {
|
||||
if p.bridge == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, api.Fail("bridge_unavailable", "brain bridge not available"))
|
||||
return
|
||||
}
|
||||
|
||||
var input RecallInput
|
||||
if err := c.ShouldBindJSON(&input); err != nil {
|
||||
c.JSON(http.StatusBadRequest, api.Fail("invalid_input", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
err := p.bridge.Send(ide.BridgeMessage{
|
||||
Type: "brain_recall",
|
||||
Data: map[string]any{
|
||||
"query": input.Query,
|
||||
"top_k": input.TopK,
|
||||
"filter": input.Filter,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, api.Fail("bridge_error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
p.emitEvent("brain.recall.complete", map[string]any{
|
||||
"query": input.Query,
|
||||
})
|
||||
|
||||
c.JSON(http.StatusOK, api.OK(RecallOutput{
|
||||
Success: true,
|
||||
Memories: []Memory{},
|
||||
}))
|
||||
}
|
||||
|
||||
func (p *BrainProvider) forget(c *gin.Context) {
|
||||
if p.bridge == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, api.Fail("bridge_unavailable", "brain bridge not available"))
|
||||
return
|
||||
}
|
||||
|
||||
var input ForgetInput
|
||||
if err := c.ShouldBindJSON(&input); err != nil {
|
||||
c.JSON(http.StatusBadRequest, api.Fail("invalid_input", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
err := p.bridge.Send(ide.BridgeMessage{
|
||||
Type: "brain_forget",
|
||||
Data: map[string]any{
|
||||
"id": input.ID,
|
||||
"reason": input.Reason,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, api.Fail("bridge_error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
p.emitEvent("brain.forget.complete", map[string]any{
|
||||
"id": input.ID,
|
||||
})
|
||||
|
||||
c.JSON(http.StatusOK, api.OK(map[string]any{
|
||||
"success": true,
|
||||
"forgotten": input.ID,
|
||||
}))
|
||||
}
|
||||
|
||||
func (p *BrainProvider) list(c *gin.Context) {
|
||||
if p.bridge == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, api.Fail("bridge_unavailable", "brain bridge not available"))
|
||||
return
|
||||
}
|
||||
|
||||
err := p.bridge.Send(ide.BridgeMessage{
|
||||
Type: "brain_list",
|
||||
Data: map[string]any{
|
||||
"project": c.Query("project"),
|
||||
"type": c.Query("type"),
|
||||
"agent_id": c.Query("agent_id"),
|
||||
"limit": c.Query("limit"),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, api.Fail("bridge_error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, api.OK(ListOutput{
|
||||
Success: true,
|
||||
Memories: []Memory{},
|
||||
}))
|
||||
}
|
||||
|
||||
func (p *BrainProvider) status(c *gin.Context) {
|
||||
connected := false
|
||||
if p.bridge != nil {
|
||||
connected = p.bridge.Connected()
|
||||
}
|
||||
c.JSON(http.StatusOK, api.OK(map[string]any{
|
||||
"connected": connected,
|
||||
}))
|
||||
}
|
||||
|
||||
// emitEvent sends a WS event if the hub is available.
|
||||
func (p *BrainProvider) emitEvent(channel string, data any) {
|
||||
if p.hub == nil {
|
||||
return
|
||||
}
|
||||
_ = p.hub.SendToChannel(channel, ws.Message{
|
||||
Type: ws.TypeEvent,
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
220
pkg/brain/tools.go
Normal file
220
pkg/brain/tools.go
Normal file
|
|
@ -0,0 +1,220 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package brain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/mcp/pkg/mcp/ide"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// -- Input/Output types -------------------------------------------------------
|
||||
|
||||
// RememberInput is the input for brain_remember.
|
||||
type RememberInput struct {
|
||||
Content string `json:"content"`
|
||||
Type string `json:"type"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Project string `json:"project,omitempty"`
|
||||
Confidence float64 `json:"confidence,omitempty"`
|
||||
Supersedes string `json:"supersedes,omitempty"`
|
||||
ExpiresIn int `json:"expires_in,omitempty"`
|
||||
}
|
||||
|
||||
// RememberOutput is the output for brain_remember.
|
||||
type RememberOutput struct {
|
||||
Success bool `json:"success"`
|
||||
MemoryID string `json:"memoryId,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// RecallInput is the input for brain_recall.
|
||||
type RecallInput struct {
|
||||
Query string `json:"query"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Filter RecallFilter `json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
// RecallFilter holds optional filter criteria for brain_recall.
|
||||
type RecallFilter struct {
|
||||
Project string `json:"project,omitempty"`
|
||||
Type any `json:"type,omitempty"`
|
||||
AgentID string `json:"agent_id,omitempty"`
|
||||
MinConfidence float64 `json:"min_confidence,omitempty"`
|
||||
}
|
||||
|
||||
// RecallOutput is the output for brain_recall.
|
||||
type RecallOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Count int `json:"count"`
|
||||
Memories []Memory `json:"memories"`
|
||||
}
|
||||
|
||||
// Memory is a single memory entry returned by recall or list.
|
||||
type Memory struct {
|
||||
ID string `json:"id"`
|
||||
AgentID string `json:"agent_id"`
|
||||
Type string `json:"type"`
|
||||
Content string `json:"content"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Project string `json:"project,omitempty"`
|
||||
Confidence float64 `json:"confidence"`
|
||||
SupersedesID string `json:"supersedes_id,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
}
|
||||
|
||||
// ForgetInput is the input for brain_forget.
|
||||
type ForgetInput struct {
|
||||
ID string `json:"id"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
}
|
||||
|
||||
// ForgetOutput is the output for brain_forget.
|
||||
type ForgetOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Forgotten string `json:"forgotten"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// ListInput is the input for brain_list.
|
||||
type ListInput struct {
|
||||
Project string `json:"project,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
AgentID string `json:"agent_id,omitempty"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
// ListOutput is the output for brain_list.
|
||||
type ListOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Count int `json:"count"`
|
||||
Memories []Memory `json:"memories"`
|
||||
}
|
||||
|
||||
// -- Tool registration --------------------------------------------------------
|
||||
|
||||
func (s *Subsystem) registerBrainTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "brain_remember",
|
||||
Description: "Store a memory in the shared OpenBrain knowledge store. Persists decisions, observations, conventions, research, plans, bugs, or architecture knowledge for other agents.",
|
||||
}, s.brainRemember)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "brain_recall",
|
||||
Description: "Semantic search across the shared OpenBrain knowledge store. Returns memories ranked by similarity to your query, with optional filtering.",
|
||||
}, s.brainRecall)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "brain_forget",
|
||||
Description: "Remove a memory from the shared OpenBrain knowledge store. Permanently deletes from both database and vector index.",
|
||||
}, s.brainForget)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
Name: "brain_list",
|
||||
Description: "List memories in the shared OpenBrain knowledge store. Supports filtering by project, type, and agent. No vector search -- use brain_recall for semantic queries.",
|
||||
}, s.brainList)
|
||||
}
|
||||
|
||||
// -- Tool handlers ------------------------------------------------------------
|
||||
|
||||
func (s *Subsystem) brainRemember(_ context.Context, _ *mcp.CallToolRequest, input RememberInput) (*mcp.CallToolResult, RememberOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, RememberOutput{}, errBridgeNotAvailable
|
||||
}
|
||||
|
||||
err := s.bridge.Send(ide.BridgeMessage{
|
||||
Type: "brain_remember",
|
||||
Data: map[string]any{
|
||||
"content": input.Content,
|
||||
"type": input.Type,
|
||||
"tags": input.Tags,
|
||||
"project": input.Project,
|
||||
"confidence": input.Confidence,
|
||||
"supersedes": input.Supersedes,
|
||||
"expires_in": input.ExpiresIn,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, RememberOutput{}, fmt.Errorf("failed to send brain_remember: %w", err)
|
||||
}
|
||||
|
||||
return nil, RememberOutput{
|
||||
Success: true,
|
||||
Timestamp: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Subsystem) brainRecall(_ context.Context, _ *mcp.CallToolRequest, input RecallInput) (*mcp.CallToolResult, RecallOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, RecallOutput{}, errBridgeNotAvailable
|
||||
}
|
||||
|
||||
err := s.bridge.Send(ide.BridgeMessage{
|
||||
Type: "brain_recall",
|
||||
Data: map[string]any{
|
||||
"query": input.Query,
|
||||
"top_k": input.TopK,
|
||||
"filter": input.Filter,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, RecallOutput{}, fmt.Errorf("failed to send brain_recall: %w", err)
|
||||
}
|
||||
|
||||
return nil, RecallOutput{
|
||||
Success: true,
|
||||
Memories: []Memory{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Subsystem) brainForget(_ context.Context, _ *mcp.CallToolRequest, input ForgetInput) (*mcp.CallToolResult, ForgetOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, ForgetOutput{}, errBridgeNotAvailable
|
||||
}
|
||||
|
||||
err := s.bridge.Send(ide.BridgeMessage{
|
||||
Type: "brain_forget",
|
||||
Data: map[string]any{
|
||||
"id": input.ID,
|
||||
"reason": input.Reason,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, ForgetOutput{}, fmt.Errorf("failed to send brain_forget: %w", err)
|
||||
}
|
||||
|
||||
return nil, ForgetOutput{
|
||||
Success: true,
|
||||
Forgotten: input.ID,
|
||||
Timestamp: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Subsystem) brainList(_ context.Context, _ *mcp.CallToolRequest, input ListInput) (*mcp.CallToolResult, ListOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, ListOutput{}, errBridgeNotAvailable
|
||||
}
|
||||
|
||||
err := s.bridge.Send(ide.BridgeMessage{
|
||||
Type: "brain_list",
|
||||
Data: map[string]any{
|
||||
"project": input.Project,
|
||||
"type": input.Type,
|
||||
"agent_id": input.AgentID,
|
||||
"limit": input.Limit,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, ListOutput{}, fmt.Errorf("failed to send brain_list: %w", err)
|
||||
}
|
||||
|
||||
return nil, ListOutput{
|
||||
Success: true,
|
||||
Memories: []Memory{},
|
||||
}, nil
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue