Compare commits
84 commits
agent/upda
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd305904e5 | ||
|
|
0b63f9cd22 | ||
|
|
ab1aa0cad0 | ||
|
|
c1d3db1ad3 | ||
|
|
1b3d102684 | ||
|
|
23063599df | ||
|
|
476a699b96 | ||
|
|
0bd3f70e20 | ||
|
|
6e73fb6e8d | ||
|
|
cf0885389a | ||
|
|
6a5a177bec | ||
|
|
8a1efa8f12 | ||
|
|
583abea788 | ||
|
|
9f68a74491 | ||
|
|
954a5e1e98 | ||
|
|
1373a6d296 | ||
|
|
d4de2b4cd7 | ||
|
|
1c3fe69cbc | ||
|
|
c42e7ad050 | ||
|
|
4732e31b74 | ||
|
|
6e1a7d7d2a | ||
|
|
5edaa7ead1 | ||
|
|
a3c39ccae7 | ||
|
|
1873adb6ae | ||
|
|
da30f3144a | ||
|
|
a215428df8 | ||
|
|
b6aa33a8e0 | ||
|
|
c83df5f113 | ||
|
|
6b78f0c137 | ||
|
|
4ab909f391 | ||
|
|
ffcd05ea1f | ||
|
|
555f9ec614 | ||
|
|
94cf1c0ba7 | ||
|
|
fa9a5eed28 | ||
|
|
af3cf3c8e3 | ||
|
|
2a4e8b7ba3 | ||
|
|
aae824a4d0 | ||
|
|
072a36cb73 | ||
|
|
8b7e0c40a6 | ||
|
|
8bc44d83a4 | ||
|
|
d9d452b941 | ||
|
|
12346208cc | ||
|
|
cd60d9030c | ||
|
|
a0caa6918c | ||
|
|
d498b2981a | ||
|
|
41f83b52f6 | ||
|
|
8e77c5e58d | ||
|
|
e09f3518e0 | ||
|
|
ad6ccd09bb | ||
|
|
c7b317402b | ||
|
|
91e41615d1 | ||
|
|
981ad9f7da | ||
|
|
e138af6635 | ||
|
|
e40b05c900 | ||
|
|
f62c9c924d | ||
|
|
ca9d879b21 | ||
|
|
2df8866404 | ||
|
|
d57f9d4039 | ||
|
|
dcd3187aed | ||
|
|
dd33bfb691 | ||
|
|
d5a76bf2c7 | ||
|
|
ed4efcbd55 | ||
|
|
dd48cc16f8 | ||
|
|
dd01f366f2 | ||
|
|
e62f4ab654 | ||
|
|
45d439926f | ||
|
|
b96b05ab0b | ||
|
|
599d0b6298 | ||
|
|
b82d399349 | ||
|
|
0516e51db8 | ||
|
|
bf3ef9f595 | ||
|
|
a34115266c | ||
|
|
76d351d8a4 | ||
|
|
c3a449e678 | ||
|
|
a541c95dc8 | ||
|
|
116df41200 | ||
|
|
53fffbd96a | ||
|
|
faf9490e7f | ||
|
|
aa1146807e | ||
|
|
27107cd75e | ||
|
|
6adf61e593 | ||
|
|
7b22fd3141 | ||
|
|
bfb5bf84e2 | ||
|
|
20caaebc21 |
77 changed files with 7249 additions and 774 deletions
|
|
@ -10,13 +10,22 @@ import (
|
|||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"dappco.re/go/mcp/pkg/mcp"
|
||||
"dappco.re/go/mcp/pkg/mcp/agentic"
|
||||
"dappco.re/go/mcp/pkg/mcp/brain"
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
)
|
||||
|
||||
var workspaceFlag string
|
||||
var unrestrictedFlag bool
|
||||
|
||||
var newMCPService = mcp.New
|
||||
var runMCPService = func(svc *mcp.Service, ctx context.Context) error {
|
||||
return svc.Run(ctx)
|
||||
}
|
||||
var shutdownMCPService = func(svc *mcp.Service, ctx context.Context) error {
|
||||
return svc.Shutdown(ctx)
|
||||
}
|
||||
|
||||
var mcpCmd = &cli.Command{
|
||||
Use: "mcp",
|
||||
|
|
@ -27,13 +36,19 @@ var mcpCmd = &cli.Command{
|
|||
var serveCmd = &cli.Command{
|
||||
Use: "serve",
|
||||
Short: "Start the MCP server",
|
||||
Long: `Start the MCP server on stdio (default) or TCP.
|
||||
Long: `Start the MCP server on stdio (default), TCP, Unix socket, or HTTP.
|
||||
|
||||
The server provides file operations, RAG tools, and metrics tools for AI assistants.
|
||||
The server provides file operations plus the brain and agentic subsystems
|
||||
registered by this command.
|
||||
|
||||
Environment variables:
|
||||
MCP_ADDR TCP address to listen on (e.g., "localhost:9999")
|
||||
If not set, uses stdio transport.
|
||||
MCP_UNIX_SOCKET
|
||||
Unix socket path to listen on (e.g., "/tmp/core-mcp.sock")
|
||||
Selected after MCP_ADDR and before stdio.
|
||||
MCP_HTTP_ADDR
|
||||
HTTP address to listen on (e.g., "127.0.0.1:9101")
|
||||
Selected before MCP_ADDR and stdio.
|
||||
|
||||
Examples:
|
||||
# Start with stdio transport (for Claude Code integration)
|
||||
|
|
@ -42,6 +57,9 @@ Examples:
|
|||
# Start with workspace restriction
|
||||
core mcp serve --workspace /path/to/project
|
||||
|
||||
# Start unrestricted (explicit opt-in)
|
||||
core mcp serve --unrestricted
|
||||
|
||||
# Start TCP server
|
||||
MCP_ADDR=localhost:9999 core mcp serve`,
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
|
|
@ -50,7 +68,8 @@ Examples:
|
|||
}
|
||||
|
||||
func initFlags() {
|
||||
cli.StringFlag(serveCmd, &workspaceFlag, "workspace", "w", "", "Restrict file operations to this directory (empty = unrestricted)")
|
||||
cli.StringFlag(serveCmd, &workspaceFlag, "workspace", "w", "", "Restrict file operations to this directory")
|
||||
cli.BoolFlag(serveCmd, &unrestrictedFlag, "unrestricted", "", false, "Disable filesystem sandboxing entirely")
|
||||
}
|
||||
|
||||
// AddMCPCommands registers the 'mcp' command and all subcommands.
|
||||
|
|
@ -63,11 +82,10 @@ func AddMCPCommands(root *cli.Command) {
|
|||
func runServe() error {
|
||||
opts := mcp.Options{}
|
||||
|
||||
if workspaceFlag != "" {
|
||||
opts.WorkspaceRoot = workspaceFlag
|
||||
} else {
|
||||
// Explicitly unrestricted when no workspace specified
|
||||
if unrestrictedFlag {
|
||||
opts.Unrestricted = true
|
||||
} else if workspaceFlag != "" {
|
||||
opts.WorkspaceRoot = workspaceFlag
|
||||
}
|
||||
|
||||
// Register OpenBrain and agentic subsystems
|
||||
|
|
@ -77,10 +95,13 @@ func runServe() error {
|
|||
}
|
||||
|
||||
// Create the MCP service
|
||||
svc, err := mcp.New(opts)
|
||||
svc, err := newMCPService(opts)
|
||||
if err != nil {
|
||||
return cli.Wrap(err, "create MCP service")
|
||||
}
|
||||
defer func() {
|
||||
_ = shutdownMCPService(svc, context.Background())
|
||||
}()
|
||||
|
||||
// Set up signal handling for clean shutdown
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
|
@ -95,5 +116,5 @@ func runServe() error {
|
|||
}()
|
||||
|
||||
// Run the server (blocks until context cancelled or error)
|
||||
return svc.Run(ctx)
|
||||
return runMCPService(svc, ctx)
|
||||
}
|
||||
|
|
|
|||
52
cmd/mcpcmd/cmd_mcp_test.go
Normal file
52
cmd/mcpcmd/cmd_mcp_test.go
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
package mcpcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"dappco.re/go/mcp/pkg/mcp"
|
||||
)
|
||||
|
||||
func TestRunServe_Good_ShutsDownService(t *testing.T) {
|
||||
oldNew := newMCPService
|
||||
oldRun := runMCPService
|
||||
oldShutdown := shutdownMCPService
|
||||
oldWorkspace := workspaceFlag
|
||||
oldUnrestricted := unrestrictedFlag
|
||||
|
||||
t.Cleanup(func() {
|
||||
newMCPService = oldNew
|
||||
runMCPService = oldRun
|
||||
shutdownMCPService = oldShutdown
|
||||
workspaceFlag = oldWorkspace
|
||||
unrestrictedFlag = oldUnrestricted
|
||||
})
|
||||
|
||||
workspaceFlag = ""
|
||||
unrestrictedFlag = false
|
||||
|
||||
var runCalled bool
|
||||
var shutdownCalled bool
|
||||
|
||||
newMCPService = func(opts mcp.Options) (*mcp.Service, error) {
|
||||
return mcp.New(mcp.Options{})
|
||||
}
|
||||
runMCPService = func(svc *mcp.Service, ctx context.Context) error {
|
||||
runCalled = true
|
||||
return nil
|
||||
}
|
||||
shutdownMCPService = func(svc *mcp.Service, ctx context.Context) error {
|
||||
shutdownCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := runServe(); err != nil {
|
||||
t.Fatalf("runServe() returned error: %v", err)
|
||||
}
|
||||
if !runCalled {
|
||||
t.Fatal("expected runMCPService to be called")
|
||||
}
|
||||
if !shutdownCalled {
|
||||
t.Fatal("expected shutdownMCPService to be called")
|
||||
}
|
||||
}
|
||||
|
|
@ -226,7 +226,7 @@ The `McpApiController` exposes five endpoints behind `mcp.auth` middleware:
|
|||
| `GET` | `/servers/{id}.json` | Server details with tool definitions |
|
||||
| `GET` | `/servers/{id}/tools` | List tools for a server |
|
||||
| `POST` | `/tools/call` | Execute a tool |
|
||||
| `GET` | `/resources/{uri}` | Read a resource (not yet implemented -- returns 501) |
|
||||
| `GET` | `/resources/{uri}` | Read a resource |
|
||||
|
||||
`POST /tools/call` accepts:
|
||||
|
||||
|
|
|
|||
24
go.mod
24
go.mod
|
|
@ -4,15 +4,15 @@ go 1.26.0
|
|||
|
||||
require (
|
||||
dappco.re/go/core v0.8.0-alpha.1
|
||||
forge.lthn.ai/core/api v0.1.5
|
||||
forge.lthn.ai/core/cli v0.3.7
|
||||
forge.lthn.ai/core/go-ai v0.1.12
|
||||
forge.lthn.ai/core/go-io v0.1.7
|
||||
forge.lthn.ai/core/go-log v0.0.4
|
||||
forge.lthn.ai/core/go-process v0.2.9
|
||||
forge.lthn.ai/core/go-rag v0.1.11
|
||||
forge.lthn.ai/core/go-webview v0.1.6
|
||||
forge.lthn.ai/core/go-ws v0.2.5
|
||||
dappco.re/go/core/api v0.1.5
|
||||
dappco.re/go/core/cli v0.3.7
|
||||
dappco.re/go/core/ai v0.1.12
|
||||
dappco.re/go/core/io v0.1.7
|
||||
dappco.re/go/core/log v0.0.4
|
||||
dappco.re/go/core/process v0.2.9
|
||||
dappco.re/go/core/rag v0.1.11
|
||||
dappco.re/go/core/webview v0.1.6
|
||||
dappco.re/go/core/ws v0.2.5
|
||||
github.com/gin-gonic/gin v1.12.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/modelcontextprotocol/go-sdk v1.4.1
|
||||
|
|
@ -21,9 +21,9 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
forge.lthn.ai/core/go v0.3.3 // indirect
|
||||
forge.lthn.ai/core/go-i18n v0.1.7 // indirect
|
||||
forge.lthn.ai/core/go-inference v0.1.6 // indirect
|
||||
dappco.re/go/core v0.3.3 // indirect
|
||||
dappco.re/go/core/i18n v0.1.7 // indirect
|
||||
dappco.re/go/core/inference v0.1.6 // indirect
|
||||
github.com/99designs/gqlgen v0.17.88 // indirect
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
|
|
@ -42,8 +43,9 @@ type DispatchOutput struct {
|
|||
OutputFile string `json:"output_file,omitempty"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerDispatchTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *PrepSubsystem) registerDispatchTool(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_dispatch",
|
||||
Description: "Dispatch a subagent (Gemini, Codex, or Claude) to work on a task. Preps a sandboxed workspace first, then spawns the agent inside it. Templates: conventions, security, coding.",
|
||||
}, s.dispatch)
|
||||
|
|
@ -137,12 +139,14 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
|||
// Step 2: Check per-agent concurrency limit
|
||||
if !s.canDispatchAgent(input.Agent) {
|
||||
// Queue the workspace — write status as "queued" and return
|
||||
writeStatus(wsDir, &WorkspaceStatus{
|
||||
s.saveStatus(wsDir, &WorkspaceStatus{
|
||||
Status: "queued",
|
||||
Agent: input.Agent,
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Task: input.Task,
|
||||
Issue: input.Issue,
|
||||
Branch: prepOut.Branch,
|
||||
StartedAt: time.Now(),
|
||||
Runs: 0,
|
||||
})
|
||||
|
|
@ -157,12 +161,14 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
|||
|
||||
// Step 3: Write status BEFORE spawning so concurrent dispatches
|
||||
// see this workspace as "running" during the concurrency check.
|
||||
writeStatus(wsDir, &WorkspaceStatus{
|
||||
s.saveStatus(wsDir, &WorkspaceStatus{
|
||||
Status: "running",
|
||||
Agent: input.Agent,
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Task: input.Task,
|
||||
Issue: input.Issue,
|
||||
Branch: prepOut.Branch,
|
||||
StartedAt: time.Now(),
|
||||
Runs: 1,
|
||||
})
|
||||
|
|
@ -204,11 +210,13 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
|||
if err := cmd.Start(); err != nil {
|
||||
outFile.Close()
|
||||
// Revert status so the slot is freed
|
||||
writeStatus(wsDir, &WorkspaceStatus{
|
||||
s.saveStatus(wsDir, &WorkspaceStatus{
|
||||
Status: "failed",
|
||||
Agent: input.Agent,
|
||||
Repo: input.Repo,
|
||||
Task: input.Task,
|
||||
Issue: input.Issue,
|
||||
Branch: prepOut.Branch,
|
||||
})
|
||||
return nil, DispatchOutput{}, coreerr.E("dispatch", "failed to spawn "+input.Agent, err)
|
||||
}
|
||||
|
|
@ -216,12 +224,14 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
|||
pid := cmd.Process.Pid
|
||||
|
||||
// Update status with PID now that agent is running
|
||||
writeStatus(wsDir, &WorkspaceStatus{
|
||||
s.saveStatus(wsDir, &WorkspaceStatus{
|
||||
Status: "running",
|
||||
Agent: input.Agent,
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Task: input.Task,
|
||||
Issue: input.Issue,
|
||||
Branch: prepOut.Branch,
|
||||
PID: pid,
|
||||
StartedAt: time.Now(),
|
||||
Runs: 1,
|
||||
|
|
@ -233,13 +243,38 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
|||
cmd.Wait()
|
||||
outFile.Close()
|
||||
|
||||
// Update status to completed
|
||||
if st, err := readStatus(wsDir); err == nil {
|
||||
st.Status = "completed"
|
||||
st.PID = 0
|
||||
writeStatus(wsDir, st)
|
||||
postCtx := context.WithoutCancel(ctx)
|
||||
status := "completed"
|
||||
channel := coremcp.ChannelAgentComplete
|
||||
payload := map[string]any{
|
||||
"workspace": filepath.Base(wsDir),
|
||||
"repo": input.Repo,
|
||||
"org": input.Org,
|
||||
"agent": input.Agent,
|
||||
"branch": prepOut.Branch,
|
||||
}
|
||||
|
||||
// Update status to completed or blocked.
|
||||
if st, err := readStatus(wsDir); err == nil {
|
||||
st.PID = 0
|
||||
if data, err := coreio.Local.Read(filepath.Join(wsDir, "src", "BLOCKED.md")); err == nil {
|
||||
status = "blocked"
|
||||
channel = coremcp.ChannelAgentBlocked
|
||||
st.Status = status
|
||||
st.Question = strings.TrimSpace(data)
|
||||
if st.Question != "" {
|
||||
payload["question"] = st.Question
|
||||
}
|
||||
} else {
|
||||
st.Status = status
|
||||
}
|
||||
s.saveStatus(wsDir, st)
|
||||
}
|
||||
|
||||
payload["status"] = status
|
||||
s.emitChannel(postCtx, channel, payload)
|
||||
s.emitChannel(postCtx, coremcp.ChannelAgentStatus, payload)
|
||||
|
||||
// Ingest scan findings as issues
|
||||
s.ingestFindings(wsDir)
|
||||
|
||||
|
|
@ -256,4 +291,3 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
|||
OutputFile: outputFile,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
|
@ -19,23 +20,23 @@ import (
|
|||
// EpicInput is the input for agentic_create_epic.
|
||||
type EpicInput struct {
|
||||
Repo string `json:"repo"` // Target repo (e.g. "go-scm")
|
||||
Org string `json:"org,omitempty"` // Forge org (default "core")
|
||||
Title string `json:"title"` // Epic title
|
||||
Body string `json:"body,omitempty"` // Epic description (above checklist)
|
||||
Tasks []string `json:"tasks"` // Sub-task titles (become child issues)
|
||||
Labels []string `json:"labels,omitempty"` // Labels for epic + children (e.g. ["agentic"])
|
||||
Dispatch bool `json:"dispatch,omitempty"` // Auto-dispatch agents to each child
|
||||
Agent string `json:"agent,omitempty"` // Agent type for dispatch (default "claude")
|
||||
Template string `json:"template,omitempty"` // Prompt template for dispatch (default "coding")
|
||||
Org string `json:"org,omitempty"` // Forge org (default "core")
|
||||
Title string `json:"title"` // Epic title
|
||||
Body string `json:"body,omitempty"` // Epic description (above checklist)
|
||||
Tasks []string `json:"tasks"` // Sub-task titles (become child issues)
|
||||
Labels []string `json:"labels,omitempty"` // Labels for epic + children (e.g. ["agentic"])
|
||||
Dispatch bool `json:"dispatch,omitempty"` // Auto-dispatch agents to each child
|
||||
Agent string `json:"agent,omitempty"` // Agent type for dispatch (default "claude")
|
||||
Template string `json:"template,omitempty"` // Prompt template for dispatch (default "coding")
|
||||
}
|
||||
|
||||
// EpicOutput is the output for agentic_create_epic.
|
||||
type EpicOutput struct {
|
||||
Success bool `json:"success"`
|
||||
EpicNumber int `json:"epic_number"`
|
||||
EpicURL string `json:"epic_url"`
|
||||
Children []ChildRef `json:"children"`
|
||||
Dispatched int `json:"dispatched,omitempty"`
|
||||
Success bool `json:"success"`
|
||||
EpicNumber int `json:"epic_number"`
|
||||
EpicURL string `json:"epic_url"`
|
||||
Children []ChildRef `json:"children"`
|
||||
Dispatched int `json:"dispatched,omitempty"`
|
||||
}
|
||||
|
||||
// ChildRef references a child issue.
|
||||
|
|
@ -45,8 +46,9 @@ type ChildRef struct {
|
|||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerEpicTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *PrepSubsystem) registerEpicTool(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_create_epic",
|
||||
Description: "Create an epic issue with child issues on Forge. Each task becomes a child issue linked via checklist. Optionally auto-dispatch agents to work each child.",
|
||||
}, s.createEpic)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ package agentic
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
|
@ -11,6 +12,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
|
|
@ -45,7 +47,9 @@ func (s *PrepSubsystem) ingestFindings(wsDir string) {
|
|||
|
||||
// Only ingest if there are actual findings (file:line references)
|
||||
findings := countFileRefs(body)
|
||||
issueCreated := false
|
||||
if findings < 2 {
|
||||
s.emitHarvestComplete(context.Background(), wsDir, st.Repo, findings, issueCreated)
|
||||
return // No meaningful findings
|
||||
}
|
||||
|
||||
|
|
@ -66,7 +70,8 @@ func (s *PrepSubsystem) ingestFindings(wsDir string) {
|
|||
description = description[:10000] + "\n\n... (truncated, see full log in workspace)"
|
||||
}
|
||||
|
||||
s.createIssueViaAPI(st.Repo, title, description, issueType, priority, "scan")
|
||||
issueCreated = s.createIssueViaAPI(st.Repo, title, description, issueType, priority, "scan")
|
||||
s.emitHarvestComplete(context.Background(), wsDir, st.Repo, findings, issueCreated)
|
||||
}
|
||||
|
||||
// countFileRefs counts file:line references in the output (indicates real findings)
|
||||
|
|
@ -91,35 +96,55 @@ func countFileRefs(body string) int {
|
|||
}
|
||||
|
||||
// createIssueViaAPI posts an issue to the lthn.sh API
|
||||
func (s *PrepSubsystem) createIssueViaAPI(repo, title, description, issueType, priority, source string) {
|
||||
func (s *PrepSubsystem) createIssueViaAPI(repo, title, description, issueType, priority, source string) bool {
|
||||
if s.brainKey == "" {
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
// Read the agent API key from file
|
||||
home, _ := os.UserHomeDir()
|
||||
apiKeyData, err := coreio.Local.Read(filepath.Join(home, ".claude", "agent-api.key"))
|
||||
if err != nil {
|
||||
return
|
||||
return false
|
||||
}
|
||||
apiKey := strings.TrimSpace(apiKeyData)
|
||||
|
||||
payload, _ := json.Marshal(map[string]string{
|
||||
payload, err := json.Marshal(map[string]string{
|
||||
"title": title,
|
||||
"description": description,
|
||||
"type": issueType,
|
||||
"priority": priority,
|
||||
"reporter": "cladius",
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("POST", s.brainURL+"/v1/issues", bytes.NewReader(payload))
|
||||
req, err := http.NewRequest("POST", s.brainURL+"/v1/issues", bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
return resp.StatusCode < 400
|
||||
}
|
||||
|
||||
// emitHarvestComplete announces that finding ingestion finished for a workspace.
|
||||
//
|
||||
// ctx := context.Background()
|
||||
// s.emitHarvestComplete(ctx, "go-io-123", "go-io", 4, true)
|
||||
func (s *PrepSubsystem) emitHarvestComplete(ctx context.Context, workspace, repo string, findings int, issueCreated bool) {
|
||||
s.emitChannel(ctx, coremcp.ChannelHarvestComplete, map[string]any{
|
||||
"workspace": workspace,
|
||||
"repo": repo,
|
||||
"findings": findings,
|
||||
"issue_created": issueCreated,
|
||||
})
|
||||
}
|
||||
|
|
|
|||
216
pkg/mcp/agentic/issue.go
Normal file
216
pkg/mcp/agentic/issue.go
Normal file
|
|
@ -0,0 +1,216 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// IssueDispatchInput is the input for agentic_dispatch_issue.
|
||||
//
|
||||
// input := IssueDispatchInput{
|
||||
// Repo: "go-io",
|
||||
// Issue: 123,
|
||||
// Agent: "claude",
|
||||
// }
|
||||
type IssueDispatchInput struct {
|
||||
Repo string `json:"repo"` // Target repo (e.g. "go-io")
|
||||
Org string `json:"org,omitempty"` // Forge org (default "core")
|
||||
Issue int `json:"issue"` // Forge issue number
|
||||
Agent string `json:"agent,omitempty"` // "claude" (default), "codex", "gemini"
|
||||
Template string `json:"template,omitempty"` // "conventions", "security", "coding" (default)
|
||||
DryRun bool `json:"dry_run,omitempty"` // Preview without executing
|
||||
}
|
||||
|
||||
type forgeIssue struct {
|
||||
Title string `json:"title"`
|
||||
Body string `json:"body"`
|
||||
State string `json:"state"`
|
||||
Labels []struct {
|
||||
Name string `json:"name"`
|
||||
} `json:"labels"`
|
||||
Assignee *struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"assignee"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerIssueTools(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_dispatch_issue",
|
||||
Description: "Dispatch an agent to work on a Forge issue. Assigns the issue as a lock, prepends the issue body to TODO.md, creates an issue-specific branch, and spawns the agent.",
|
||||
}, s.dispatchIssue)
|
||||
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_pr",
|
||||
Description: "Create a pull request from an agent workspace. Pushes the branch and creates a Forge PR linked to the tracked issue, if any.",
|
||||
}, s.createPR)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) dispatchIssue(ctx context.Context, req *mcp.CallToolRequest, input IssueDispatchInput) (*mcp.CallToolResult, DispatchOutput, error) {
|
||||
if input.Repo == "" {
|
||||
return nil, DispatchOutput{}, coreerr.E("dispatchIssue", "repo is required", nil)
|
||||
}
|
||||
if input.Issue == 0 {
|
||||
return nil, DispatchOutput{}, coreerr.E("dispatchIssue", "issue is required", nil)
|
||||
}
|
||||
if input.Org == "" {
|
||||
input.Org = "core"
|
||||
}
|
||||
if input.Agent == "" {
|
||||
input.Agent = "claude"
|
||||
}
|
||||
if input.Template == "" {
|
||||
input.Template = "coding"
|
||||
}
|
||||
|
||||
issue, err := s.fetchIssue(ctx, input.Org, input.Repo, input.Issue)
|
||||
if err != nil {
|
||||
return nil, DispatchOutput{}, err
|
||||
}
|
||||
if issue.State != "open" {
|
||||
return nil, DispatchOutput{}, coreerr.E("dispatchIssue", fmt.Sprintf("issue %d is %s, not open", input.Issue, issue.State), nil)
|
||||
}
|
||||
if issue.Assignee != nil && issue.Assignee.Login != "" {
|
||||
return nil, DispatchOutput{}, coreerr.E("dispatchIssue", fmt.Sprintf("issue %d is already assigned to %s", input.Issue, issue.Assignee.Login), nil)
|
||||
}
|
||||
|
||||
if !input.DryRun {
|
||||
if err := s.lockIssue(ctx, input.Org, input.Repo, input.Issue, input.Agent); err != nil {
|
||||
return nil, DispatchOutput{}, err
|
||||
}
|
||||
|
||||
var dispatchErr error
|
||||
defer func() {
|
||||
if dispatchErr != nil {
|
||||
_ = s.unlockIssue(ctx, input.Org, input.Repo, input.Issue, issue.Labels)
|
||||
}
|
||||
}()
|
||||
|
||||
result, out, dispatchErr := s.dispatch(ctx, req, DispatchInput{
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Issue: input.Issue,
|
||||
Task: issue.Title,
|
||||
Agent: input.Agent,
|
||||
Template: input.Template,
|
||||
DryRun: input.DryRun,
|
||||
})
|
||||
if dispatchErr != nil {
|
||||
return nil, DispatchOutput{}, dispatchErr
|
||||
}
|
||||
return result, out, nil
|
||||
}
|
||||
|
||||
return s.dispatch(ctx, req, DispatchInput{
|
||||
Repo: input.Repo,
|
||||
Org: input.Org,
|
||||
Issue: input.Issue,
|
||||
Task: issue.Title,
|
||||
Agent: input.Agent,
|
||||
Template: input.Template,
|
||||
DryRun: input.DryRun,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) unlockIssue(ctx context.Context, org, repo string, issue int, labels []struct {
|
||||
Name string `json:"name"`
|
||||
}) error {
|
||||
updateURL := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue)
|
||||
issueLabels := make([]string, 0, len(labels))
|
||||
for _, label := range labels {
|
||||
if label.Name == "in-progress" {
|
||||
continue
|
||||
}
|
||||
issueLabels = append(issueLabels, label.Name)
|
||||
}
|
||||
if issueLabels == nil {
|
||||
issueLabels = []string{}
|
||||
}
|
||||
payload, err := json.Marshal(map[string]any{
|
||||
"assignees": []string{},
|
||||
"labels": issueLabels,
|
||||
})
|
||||
if err != nil {
|
||||
return coreerr.E("unlockIssue", "failed to encode issue unlock", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, updateURL, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return coreerr.E("unlockIssue", "failed to build unlock request", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return coreerr.E("unlockIssue", "failed to update issue", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode >= http.StatusBadRequest {
|
||||
return coreerr.E("unlockIssue", fmt.Sprintf("issue unlock returned %d", resp.StatusCode), nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) fetchIssue(ctx context.Context, org, repo string, issue int) (*forgeIssue, error) {
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("fetchIssue", "failed to build request", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("fetchIssue", "failed to fetch issue", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, coreerr.E("fetchIssue", fmt.Sprintf("issue %d not found in %s/%s", issue, org, repo), nil)
|
||||
}
|
||||
|
||||
var out forgeIssue
|
||||
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
|
||||
return nil, coreerr.E("fetchIssue", "failed to decode issue", err)
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) lockIssue(ctx context.Context, org, repo string, issue int, assignee string) error {
|
||||
updateURL := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue)
|
||||
payload, err := json.Marshal(map[string]any{
|
||||
"assignees": []string{assignee},
|
||||
"labels": []string{"in-progress"},
|
||||
})
|
||||
if err != nil {
|
||||
return coreerr.E("lockIssue", "failed to encode issue update", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, updateURL, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return coreerr.E("lockIssue", "failed to build update request", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
return coreerr.E("lockIssue", "failed to update issue", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode >= http.StatusBadRequest {
|
||||
return coreerr.E("lockIssue", fmt.Sprintf("issue update returned %d", resp.StatusCode), nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
227
pkg/mcp/agentic/issue_test.go
Normal file
227
pkg/mcp/agentic/issue_test.go
Normal file
|
|
@ -0,0 +1,227 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBranchSlug_Good(t *testing.T) {
|
||||
got := branchSlug("Fix login crash in API v2")
|
||||
want := "fix-login-crash-in-api-v2"
|
||||
if got != want {
|
||||
t.Fatalf("expected %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepWorkspace_Good_IssueBranchName(t *testing.T) {
|
||||
codePath := t.TempDir()
|
||||
repoDir := initTestRepo(t, codePath, "demo")
|
||||
_ = repoDir
|
||||
|
||||
s := &PrepSubsystem{codePath: codePath}
|
||||
_, out, err := s.prepWorkspace(context.Background(), nil, PrepInput{
|
||||
Repo: "demo",
|
||||
Issue: 42,
|
||||
Task: "Fix login crash",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("prepWorkspace failed: %v", err)
|
||||
}
|
||||
|
||||
want := "agent/issue-42-fix-login-crash"
|
||||
if out.Branch != want {
|
||||
t.Fatalf("expected branch %q, got %q", want, out.Branch)
|
||||
}
|
||||
|
||||
srcDir := filepath.Join(out.WorkspaceDir, "src")
|
||||
cmd := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD")
|
||||
cmd.Dir = srcDir
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read branch: %v", err)
|
||||
}
|
||||
if got := strings.TrimSpace(string(data)); got != want {
|
||||
t.Fatalf("expected git branch %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDispatchIssue_Bad_AssignedIssue(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"title": "Fix login crash",
|
||||
"body": "details",
|
||||
"state": "open",
|
||||
"assignee": map[string]any{
|
||||
"login": "someone-else",
|
||||
},
|
||||
})
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
forgeURL: srv.URL,
|
||||
client: srv.Client(),
|
||||
}
|
||||
|
||||
_, _, err := s.dispatchIssue(context.Background(), nil, IssueDispatchInput{
|
||||
Repo: "demo",
|
||||
Org: "core",
|
||||
Issue: 42,
|
||||
DryRun: true,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected assigned issue to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDispatchIssue_Good_UnlocksOnPrepFailure(t *testing.T) {
|
||||
var methods []string
|
||||
var bodies []string
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
methods = append(methods, r.Method)
|
||||
bodies = append(bodies, string(body))
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"title": "Fix login crash",
|
||||
"body": "details",
|
||||
"state": "open",
|
||||
"labels": []map[string]any{
|
||||
{"name": "bug"},
|
||||
},
|
||||
})
|
||||
case http.MethodPatch:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
forgeURL: srv.URL,
|
||||
forgeToken: "token",
|
||||
client: srv.Client(),
|
||||
codePath: t.TempDir(),
|
||||
}
|
||||
|
||||
_, _, err := s.dispatchIssue(context.Background(), nil, IssueDispatchInput{
|
||||
Repo: "demo",
|
||||
Org: "core",
|
||||
Issue: 42,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected dispatch to fail when the repo clone is missing")
|
||||
}
|
||||
|
||||
if got, want := len(methods), 3; got != want {
|
||||
t.Fatalf("expected %d requests, got %d (%v)", want, got, methods)
|
||||
}
|
||||
if methods[0] != http.MethodGet {
|
||||
t.Fatalf("expected first request to fetch issue, got %s", methods[0])
|
||||
}
|
||||
if methods[1] != http.MethodPatch {
|
||||
t.Fatalf("expected second request to lock issue, got %s", methods[1])
|
||||
}
|
||||
if methods[2] != http.MethodPatch {
|
||||
t.Fatalf("expected third request to unlock issue, got %s", methods[2])
|
||||
}
|
||||
if !strings.Contains(bodies[1], `"assignees":["claude"]`) {
|
||||
t.Fatalf("expected lock request to assign claude, got %s", bodies[1])
|
||||
}
|
||||
if !strings.Contains(bodies[2], `"assignees":[]`) {
|
||||
t.Fatalf("expected unlock request to clear assignees, got %s", bodies[2])
|
||||
}
|
||||
if !strings.Contains(bodies[2], `"labels":["bug"]`) {
|
||||
t.Fatalf("expected unlock request to preserve original labels, got %s", bodies[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestLockIssue_Good_RequestBody(t *testing.T) {
|
||||
var gotMethod string
|
||||
var gotPath string
|
||||
var gotBody []byte
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotMethod = r.Method
|
||||
gotPath = r.URL.Path
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
gotBody = append([]byte(nil), body...)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
forgeURL: srv.URL,
|
||||
client: srv.Client(),
|
||||
}
|
||||
|
||||
if err := s.lockIssue(context.Background(), "core", "demo", 42, "claude"); err != nil {
|
||||
t.Fatalf("lockIssue failed: %v", err)
|
||||
}
|
||||
|
||||
if gotMethod != http.MethodPatch {
|
||||
t.Fatalf("expected PATCH, got %s", gotMethod)
|
||||
}
|
||||
if gotPath != "/api/v1/repos/core/demo/issues/42" {
|
||||
t.Fatalf("unexpected path %q", gotPath)
|
||||
}
|
||||
if !bytes.Contains(gotBody, []byte(`"assignees":["claude"]`)) {
|
||||
t.Fatalf("expected assignee in body, got %s", string(gotBody))
|
||||
}
|
||||
if !bytes.Contains(gotBody, []byte(`"in-progress"`)) {
|
||||
t.Fatalf("expected in-progress label in body, got %s", string(gotBody))
|
||||
}
|
||||
}
|
||||
|
||||
func initTestRepo(t *testing.T, codePath, repo string) string {
|
||||
t.Helper()
|
||||
|
||||
repoDir := filepath.Join(codePath, "core", repo)
|
||||
if err := os.MkdirAll(repoDir, 0o755); err != nil {
|
||||
t.Fatalf("mkdir repo dir: %v", err)
|
||||
}
|
||||
|
||||
run := func(args ...string) {
|
||||
t.Helper()
|
||||
cmd := exec.Command("git", args...)
|
||||
cmd.Dir = repoDir
|
||||
cmd.Env = append(os.Environ(),
|
||||
"GIT_AUTHOR_NAME=Test User",
|
||||
"GIT_AUTHOR_EMAIL=test@example.com",
|
||||
"GIT_COMMITTER_NAME=Test User",
|
||||
"GIT_COMMITTER_EMAIL=test@example.com",
|
||||
)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git %v failed: %v\n%s", args, err, string(out))
|
||||
}
|
||||
}
|
||||
|
||||
run("init", "-b", "main")
|
||||
if err := os.WriteFile(filepath.Join(repoDir, "README.md"), []byte("# demo\n"), 0o644); err != nil {
|
||||
t.Fatalf("write file: %v", err)
|
||||
}
|
||||
run("add", "README.md")
|
||||
run("commit", "-m", "initial commit")
|
||||
|
||||
return repoDir
|
||||
}
|
||||
125
pkg/mcp/agentic/mirror.go
Normal file
125
pkg/mcp/agentic/mirror.go
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// MirrorInput controls Forge to GitHub mirror sync.
|
||||
type MirrorInput struct {
|
||||
Repo string `json:"repo,omitempty"`
|
||||
DryRun bool `json:"dry_run,omitempty"`
|
||||
MaxFiles int `json:"max_files,omitempty"`
|
||||
}
|
||||
|
||||
// MirrorOutput reports mirror sync results.
|
||||
type MirrorOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Synced []MirrorSync `json:"synced"`
|
||||
Skipped []string `json:"skipped,omitempty"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// MirrorSync records one repo sync attempt.
|
||||
type MirrorSync struct {
|
||||
Repo string `json:"repo"`
|
||||
CommitsAhead int `json:"commits_ahead"`
|
||||
FilesChanged int `json:"files_changed"`
|
||||
PRURL string `json:"pr_url,omitempty"`
|
||||
Pushed bool `json:"pushed"`
|
||||
Skipped string `json:"skipped,omitempty"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerMirrorTool(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_mirror",
|
||||
Description: "Mirror Forge repositories to GitHub and open a GitHub PR when there are commits ahead of the remote mirror.",
|
||||
}, s.mirror)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, input MirrorInput) (*mcp.CallToolResult, MirrorOutput, error) {
|
||||
maxFiles := input.MaxFiles
|
||||
if maxFiles <= 0 {
|
||||
maxFiles = 50
|
||||
}
|
||||
|
||||
basePath := repoRootFromCodePath(s.codePath)
|
||||
repos := []string{}
|
||||
if input.Repo != "" {
|
||||
repos = []string{input.Repo}
|
||||
} else {
|
||||
repos = listLocalRepos(basePath)
|
||||
}
|
||||
|
||||
synced := make([]MirrorSync, 0, len(repos))
|
||||
skipped := make([]string, 0)
|
||||
|
||||
for _, repo := range repos {
|
||||
repoDir := filepath.Join(basePath, repo)
|
||||
if !hasRemote(repoDir, "github") {
|
||||
skipped = append(skipped, repo+": no github remote")
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := exec.LookPath("git"); err != nil {
|
||||
return nil, MirrorOutput{}, coreerr.E("mirror", "git CLI is not available", err)
|
||||
}
|
||||
|
||||
_, _ = gitOutput(repoDir, "fetch", "github")
|
||||
ahead := commitsAhead(repoDir, "github/main", "HEAD")
|
||||
if ahead <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
files := filesChanged(repoDir, "github/main", "HEAD")
|
||||
sync := MirrorSync{
|
||||
Repo: repo,
|
||||
CommitsAhead: ahead,
|
||||
FilesChanged: files,
|
||||
}
|
||||
|
||||
if files > maxFiles {
|
||||
sync.Skipped = fmt.Sprintf("%d files exceeds limit of %d", files, maxFiles)
|
||||
synced = append(synced, sync)
|
||||
continue
|
||||
}
|
||||
|
||||
if input.DryRun {
|
||||
sync.Skipped = "dry run"
|
||||
synced = append(synced, sync)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := ensureDevBranch(repoDir); err != nil {
|
||||
sync.Skipped = err.Error()
|
||||
synced = append(synced, sync)
|
||||
continue
|
||||
}
|
||||
sync.Pushed = true
|
||||
|
||||
prURL, err := createGitHubPR(ctx, repoDir, repo, ahead, files)
|
||||
if err != nil {
|
||||
sync.Skipped = err.Error()
|
||||
} else {
|
||||
sync.PRURL = prURL
|
||||
}
|
||||
|
||||
synced = append(synced, sync)
|
||||
}
|
||||
|
||||
return nil, MirrorOutput{
|
||||
Success: true,
|
||||
Synced: synced,
|
||||
Skipped: skipped,
|
||||
Count: len(synced),
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -11,16 +11,22 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// Plan represents an implementation plan for agent work.
|
||||
//
|
||||
// plan := Plan{
|
||||
// Title: "Add notifications",
|
||||
// Status: "draft",
|
||||
// }
|
||||
type Plan struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"` // draft, ready, in_progress, needs_verification, verified, approved
|
||||
Status string `json:"status"` // draft, ready, in_progress, needs_verification, verified, approved
|
||||
Repo string `json:"repo,omitempty"`
|
||||
Org string `json:"org,omitempty"`
|
||||
Objective string `json:"objective"`
|
||||
|
|
@ -32,18 +38,32 @@ type Plan struct {
|
|||
}
|
||||
|
||||
// Phase represents a phase within an implementation plan.
|
||||
//
|
||||
// phase := Phase{Name: "Implementation", Status: "pending"}
|
||||
type Phase struct {
|
||||
Number int `json:"number"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"` // pending, in_progress, done
|
||||
Criteria []string `json:"criteria,omitempty"`
|
||||
Tests int `json:"tests,omitempty"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
Number int `json:"number"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"` // pending, in_progress, done
|
||||
Criteria []string `json:"criteria,omitempty"`
|
||||
Tests int `json:"tests,omitempty"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
Checkpoints []Checkpoint `json:"checkpoints,omitempty"`
|
||||
}
|
||||
|
||||
// Checkpoint records phase progress or completion details.
|
||||
//
|
||||
// cp := Checkpoint{Notes: "Implemented transport hooks", Done: true}
|
||||
type Checkpoint struct {
|
||||
Notes string `json:"notes,omitempty"`
|
||||
Done bool `json:"done,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
}
|
||||
|
||||
// --- Input/Output types ---
|
||||
|
||||
// PlanCreateInput is the input for agentic_plan_create.
|
||||
//
|
||||
// input := PlanCreateInput{Title: "Add notifications", Objective: "Broadcast MCP events"}
|
||||
type PlanCreateInput struct {
|
||||
Title string `json:"title"`
|
||||
Objective string `json:"objective"`
|
||||
|
|
@ -54,6 +74,8 @@ type PlanCreateInput struct {
|
|||
}
|
||||
|
||||
// PlanCreateOutput is the output for agentic_plan_create.
|
||||
//
|
||||
// // out.Success == true, out.ID != ""
|
||||
type PlanCreateOutput struct {
|
||||
Success bool `json:"success"`
|
||||
ID string `json:"id"`
|
||||
|
|
@ -61,17 +83,23 @@ type PlanCreateOutput struct {
|
|||
}
|
||||
|
||||
// PlanReadInput is the input for agentic_plan_read.
|
||||
//
|
||||
// input := PlanReadInput{ID: "add-notifications"}
|
||||
type PlanReadInput struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// PlanReadOutput is the output for agentic_plan_read.
|
||||
//
|
||||
// // out.Plan.Title == "Add notifications"
|
||||
type PlanReadOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Plan Plan `json:"plan"`
|
||||
}
|
||||
|
||||
// PlanUpdateInput is the input for agentic_plan_update.
|
||||
//
|
||||
// input := PlanUpdateInput{ID: "add-notifications", Status: "ready"}
|
||||
type PlanUpdateInput struct {
|
||||
ID string `json:"id"`
|
||||
Status string `json:"status,omitempty"`
|
||||
|
|
@ -83,62 +111,102 @@ type PlanUpdateInput struct {
|
|||
}
|
||||
|
||||
// PlanUpdateOutput is the output for agentic_plan_update.
|
||||
//
|
||||
// // out.Plan.Status == "ready"
|
||||
type PlanUpdateOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Plan Plan `json:"plan"`
|
||||
}
|
||||
|
||||
// PlanDeleteInput is the input for agentic_plan_delete.
|
||||
//
|
||||
// input := PlanDeleteInput{ID: "add-notifications"}
|
||||
type PlanDeleteInput struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// PlanDeleteOutput is the output for agentic_plan_delete.
|
||||
//
|
||||
// // out.Deleted == "add-notifications"
|
||||
type PlanDeleteOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Deleted string `json:"deleted"`
|
||||
}
|
||||
|
||||
// PlanListInput is the input for agentic_plan_list.
|
||||
//
|
||||
// input := PlanListInput{Status: "draft"}
|
||||
type PlanListInput struct {
|
||||
Status string `json:"status,omitempty"`
|
||||
Repo string `json:"repo,omitempty"`
|
||||
}
|
||||
|
||||
// PlanListOutput is the output for agentic_plan_list.
|
||||
//
|
||||
// // len(out.Plans) >= 1
|
||||
type PlanListOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Count int `json:"count"`
|
||||
Plans []Plan `json:"plans"`
|
||||
}
|
||||
|
||||
// PlanCheckpointInput is the input for agentic_plan_checkpoint.
|
||||
//
|
||||
// input := PlanCheckpointInput{ID: "add-notifications", Phase: 1, Done: true}
|
||||
type PlanCheckpointInput struct {
|
||||
ID string `json:"id"`
|
||||
Phase int `json:"phase"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
Done bool `json:"done,omitempty"`
|
||||
}
|
||||
|
||||
// PlanCheckpointOutput is the output for agentic_plan_checkpoint.
|
||||
//
|
||||
// // out.Plan.Phases[0].Status == "done"
|
||||
type PlanCheckpointOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Plan Plan `json:"plan"`
|
||||
}
|
||||
|
||||
// --- Registration ---
|
||||
|
||||
func (s *PrepSubsystem) registerPlanTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *PrepSubsystem) registerPlanTools(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_plan_create",
|
||||
Description: "Create an implementation plan. Plans track phased work with acceptance criteria, status lifecycle (draft → ready → in_progress → needs_verification → verified → approved), and per-phase progress.",
|
||||
}, s.planCreate)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_plan_read",
|
||||
Description: "Read an implementation plan by ID. Returns the full plan with all phases, criteria, and status.",
|
||||
}, s.planRead)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
// agentic_plan_status is kept as a user-facing alias for the read tool.
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_plan_status",
|
||||
Description: "Get the current status of an implementation plan by ID. Returns the full plan with all phases, criteria, and status.",
|
||||
}, s.planRead)
|
||||
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_plan_update",
|
||||
Description: "Update an implementation plan. Supports partial updates — only provided fields are changed. Use this to advance status, update phases, or add notes.",
|
||||
}, s.planUpdate)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_plan_delete",
|
||||
Description: "Delete an implementation plan by ID. Permanently removes the plan file.",
|
||||
}, s.planDelete)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_plan_list",
|
||||
Description: "List implementation plans. Supports filtering by status (draft, ready, in_progress, etc.) and repo.",
|
||||
}, s.planList)
|
||||
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_plan_checkpoint",
|
||||
Description: "Record a checkpoint for a plan phase and optionally mark the phase done.",
|
||||
}, s.planCheckpoint)
|
||||
}
|
||||
|
||||
// --- Handlers ---
|
||||
|
|
@ -309,6 +377,48 @@ func (s *PrepSubsystem) planList(_ context.Context, _ *mcp.CallToolRequest, inpu
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) planCheckpoint(_ context.Context, _ *mcp.CallToolRequest, input PlanCheckpointInput) (*mcp.CallToolResult, PlanCheckpointOutput, error) {
|
||||
if input.ID == "" {
|
||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "id is required", nil)
|
||||
}
|
||||
if input.Phase <= 0 {
|
||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "phase must be greater than zero", nil)
|
||||
}
|
||||
if input.Notes == "" && !input.Done {
|
||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "notes or done is required", nil)
|
||||
}
|
||||
|
||||
plan, err := readPlan(s.plansDir(), input.ID)
|
||||
if err != nil {
|
||||
return nil, PlanCheckpointOutput{}, err
|
||||
}
|
||||
|
||||
phaseIndex := input.Phase - 1
|
||||
if phaseIndex >= len(plan.Phases) {
|
||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "phase not found", nil)
|
||||
}
|
||||
|
||||
phase := &plan.Phases[phaseIndex]
|
||||
phase.Checkpoints = append(phase.Checkpoints, Checkpoint{
|
||||
Notes: input.Notes,
|
||||
Done: input.Done,
|
||||
CreatedAt: time.Now(),
|
||||
})
|
||||
if input.Done {
|
||||
phase.Status = "done"
|
||||
}
|
||||
|
||||
plan.UpdatedAt = time.Now()
|
||||
if _, err := writePlan(s.plansDir(), plan); err != nil {
|
||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "failed to write plan", err)
|
||||
}
|
||||
|
||||
return nil, PlanCheckpointOutput{
|
||||
Success: true,
|
||||
Plan: *plan,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// --- Helpers ---
|
||||
|
||||
func (s *PrepSubsystem) plansDir() string {
|
||||
|
|
@ -373,7 +483,7 @@ func writePlan(dir string, plan *Plan) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
return path, coreio.Local.Write(path, string(data))
|
||||
return path, writeAtomic(path, string(data))
|
||||
}
|
||||
|
||||
func validPlanStatus(status string) bool {
|
||||
|
|
|
|||
62
pkg/mcp/agentic/plan_test.go
Normal file
62
pkg/mcp/agentic/plan_test.go
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPlanCheckpoint_Good_AppendsCheckpointAndMarksPhaseDone(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
sub := &PrepSubsystem{codePath: root}
|
||||
|
||||
plan := &Plan{
|
||||
ID: "plan-1",
|
||||
Title: "Test plan",
|
||||
Status: "in_progress",
|
||||
Objective: "Verify checkpoints",
|
||||
Phases: []Phase{
|
||||
{
|
||||
Number: 1,
|
||||
Name: "Phase 1",
|
||||
Status: "in_progress",
|
||||
},
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
if _, err := writePlan(sub.plansDir(), plan); err != nil {
|
||||
t.Fatalf("writePlan failed: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := sub.planCheckpoint(context.Background(), nil, PlanCheckpointInput{
|
||||
ID: plan.ID,
|
||||
Phase: 1,
|
||||
Notes: "Implementation verified",
|
||||
Done: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("planCheckpoint failed: %v", err)
|
||||
}
|
||||
if !out.Success {
|
||||
t.Fatal("expected checkpoint output success")
|
||||
}
|
||||
if out.Plan.Phases[0].Status != "done" {
|
||||
t.Fatalf("expected phase status done, got %q", out.Plan.Phases[0].Status)
|
||||
}
|
||||
if len(out.Plan.Phases[0].Checkpoints) != 1 {
|
||||
t.Fatalf("expected 1 checkpoint, got %d", len(out.Plan.Phases[0].Checkpoints))
|
||||
}
|
||||
if out.Plan.Phases[0].Checkpoints[0].Notes != "Implementation verified" {
|
||||
t.Fatalf("unexpected checkpoint notes: %q", out.Plan.Phases[0].Checkpoints[0].Notes)
|
||||
}
|
||||
if !out.Plan.Phases[0].Checkpoints[0].Done {
|
||||
t.Fatal("expected checkpoint to be marked done")
|
||||
}
|
||||
if out.Plan.Phases[0].Checkpoints[0].CreatedAt.IsZero() {
|
||||
t.Fatal("expected checkpoint timestamp")
|
||||
}
|
||||
}
|
||||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
|
|
@ -19,16 +20,26 @@ import (
|
|||
|
||||
// --- agentic_create_pr ---
|
||||
|
||||
// CreatePRInput is the input for agentic_create_pr.
|
||||
type CreatePRInput struct {
|
||||
Workspace string `json:"workspace"` // workspace name (e.g. "mcp-1773581873")
|
||||
Title string `json:"title,omitempty"` // PR title (default: task description)
|
||||
Body string `json:"body,omitempty"` // PR body (default: auto-generated)
|
||||
Base string `json:"base,omitempty"` // base branch (default: "main")
|
||||
DryRun bool `json:"dry_run,omitempty"` // preview without creating
|
||||
// PRInput is the input for agentic_create_pr and agentic_pr.
|
||||
//
|
||||
// input := PRInput{
|
||||
// Workspace: "mcp-1773581873",
|
||||
// Base: "main",
|
||||
// }
|
||||
type PRInput struct {
|
||||
Workspace string `json:"workspace"` // workspace name (e.g. "mcp-1773581873")
|
||||
Title string `json:"title,omitempty"` // PR title (default: task description)
|
||||
Body string `json:"body,omitempty"` // PR body (default: auto-generated)
|
||||
Base string `json:"base,omitempty"` // base branch (default: "main")
|
||||
DryRun bool `json:"dry_run,omitempty"` // preview without creating
|
||||
}
|
||||
|
||||
// CreatePRInput is kept as a compatibility alias for older callers.
|
||||
type CreatePRInput = PRInput
|
||||
|
||||
// CreatePROutput is the output for agentic_create_pr.
|
||||
//
|
||||
// // out.Success == true, out.Branch == "agent/issue-123-fix", out.Pushed == true
|
||||
type CreatePROutput struct {
|
||||
Success bool `json:"success"`
|
||||
PRURL string `json:"pr_url,omitempty"`
|
||||
|
|
@ -39,14 +50,15 @@ type CreatePROutput struct {
|
|||
Pushed bool `json:"pushed"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerCreatePRTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *PrepSubsystem) registerCreatePRTool(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_create_pr",
|
||||
Description: "Create a pull request from an agent workspace. Pushes the branch to Forge and opens a PR. Links to the source issue if one was tracked.",
|
||||
}, s.createPR)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, input CreatePRInput) (*mcp.CallToolResult, CreatePROutput, error) {
|
||||
func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, input PRInput) (*mcp.CallToolResult, CreatePROutput, error) {
|
||||
if input.Workspace == "" {
|
||||
return nil, CreatePROutput{}, coreerr.E("createPR", "workspace is required", nil)
|
||||
}
|
||||
|
|
@ -127,7 +139,7 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in
|
|||
|
||||
// Update status with PR URL
|
||||
st.PRURL = prURL
|
||||
writeStatus(wsDir, st)
|
||||
s.saveStatus(wsDir, st)
|
||||
|
||||
// Comment on issue if tracked
|
||||
if st.Issue > 0 {
|
||||
|
|
@ -163,15 +175,21 @@ func (s *PrepSubsystem) buildPRBody(st *WorkspaceStatus) string {
|
|||
}
|
||||
|
||||
func (s *PrepSubsystem) forgeCreatePR(ctx context.Context, org, repo, head, base, title, body string) (string, int, error) {
|
||||
payload, _ := json.Marshal(map[string]any{
|
||||
payload, err := json.Marshal(map[string]any{
|
||||
"title": title,
|
||||
"body": body,
|
||||
"head": head,
|
||||
"base": base,
|
||||
})
|
||||
if err != nil {
|
||||
return "", 0, coreerr.E("forgeCreatePR", "failed to marshal PR payload", err)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return "", 0, coreerr.E("forgeCreatePR", "failed to build PR request", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
|
|
@ -183,7 +201,9 @@ func (s *PrepSubsystem) forgeCreatePR(ctx context.Context, org, repo, head, base
|
|||
|
||||
if resp.StatusCode != 201 {
|
||||
var errBody map[string]any
|
||||
json.NewDecoder(resp.Body).Decode(&errBody)
|
||||
if err := json.NewDecoder(resp.Body).Decode(&errBody); err != nil {
|
||||
return "", 0, coreerr.E("forgeCreatePR", fmt.Sprintf("HTTP %d with unreadable error body", resp.StatusCode), err)
|
||||
}
|
||||
msg, _ := errBody["message"].(string)
|
||||
return "", 0, coreerr.E("forgeCreatePR", fmt.Sprintf("HTTP %d: %s", resp.StatusCode, msg), nil)
|
||||
}
|
||||
|
|
@ -192,16 +212,24 @@ func (s *PrepSubsystem) forgeCreatePR(ctx context.Context, org, repo, head, base
|
|||
Number int `json:"number"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&pr)
|
||||
if err := json.NewDecoder(resp.Body).Decode(&pr); err != nil {
|
||||
return "", 0, coreerr.E("forgeCreatePR", "failed to decode PR response", err)
|
||||
}
|
||||
|
||||
return pr.HTMLURL, pr.Number, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) commentOnIssue(ctx context.Context, org, repo string, issue int, comment string) {
|
||||
payload, _ := json.Marshal(map[string]string{"body": comment})
|
||||
payload, err := json.Marshal(map[string]string{"body": comment})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/comments", s.forgeURL, org, repo, issue)
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
|
|
@ -215,14 +243,18 @@ func (s *PrepSubsystem) commentOnIssue(ctx context.Context, org, repo string, is
|
|||
// --- agentic_list_prs ---
|
||||
|
||||
// ListPRsInput is the input for agentic_list_prs.
|
||||
//
|
||||
// input := ListPRsInput{Org: "core", Repo: "go-io", State: "open", Limit: 20}
|
||||
type ListPRsInput struct {
|
||||
Org string `json:"org,omitempty"` // forge org (default "core")
|
||||
Repo string `json:"repo,omitempty"` // specific repo, or empty for all
|
||||
Repo string `json:"repo,omitempty"` // specific repo, or empty for all
|
||||
State string `json:"state,omitempty"` // "open" (default), "closed", "all"
|
||||
Limit int `json:"limit,omitempty"` // max results (default 20)
|
||||
}
|
||||
|
||||
// ListPRsOutput is the output for agentic_list_prs.
|
||||
//
|
||||
// // out.Success == true, len(out.PRs) <= 20
|
||||
type ListPRsOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Count int `json:"count"`
|
||||
|
|
@ -230,6 +262,8 @@ type ListPRsOutput struct {
|
|||
}
|
||||
|
||||
// PRInfo represents a pull request.
|
||||
//
|
||||
// // pr.Number == 42, pr.Branch == "agent/issue-42-fix"
|
||||
type PRInfo struct {
|
||||
Repo string `json:"repo"`
|
||||
Number int `json:"number"`
|
||||
|
|
@ -243,8 +277,9 @@ type PRInfo struct {
|
|||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerListPRsTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *PrepSubsystem) registerListPRsTool(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_list_prs",
|
||||
Description: "List pull requests across Forge repos. Filter by org, repo, and state (open/closed/all).",
|
||||
}, s.listPRs)
|
||||
|
|
|
|||
28
pkg/mcp/agentic/pr_test.go
Normal file
28
pkg/mcp/agentic/pr_test.go
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestForgeCreatePR_Bad_InvalidJSONResponse(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_, _ = w.Write([]byte("{not-json"))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
s := &PrepSubsystem{
|
||||
forgeURL: srv.URL,
|
||||
client: srv.Client(),
|
||||
}
|
||||
|
||||
_, _, err := s.forgeCreatePR(context.Background(), "core", "demo", "agent/test", "main", "Fix bug", "body")
|
||||
if err == nil {
|
||||
t.Fatal("expected malformed PR response to fail")
|
||||
}
|
||||
}
|
||||
|
|
@ -17,6 +17,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
|
|
@ -32,9 +33,18 @@ type PrepSubsystem struct {
|
|||
specsPath string
|
||||
codePath string
|
||||
client *http.Client
|
||||
notifier coremcp.Notifier
|
||||
}
|
||||
|
||||
var (
|
||||
_ coremcp.Subsystem = (*PrepSubsystem)(nil)
|
||||
_ coremcp.SubsystemWithShutdown = (*PrepSubsystem)(nil)
|
||||
_ coremcp.SubsystemWithNotifier = (*PrepSubsystem)(nil)
|
||||
)
|
||||
|
||||
// NewPrep creates an agentic subsystem.
|
||||
//
|
||||
// prep := NewPrep()
|
||||
func NewPrep() *PrepSubsystem {
|
||||
home, _ := os.UserHomeDir()
|
||||
|
||||
|
|
@ -61,6 +71,18 @@ func NewPrep() *PrepSubsystem {
|
|||
}
|
||||
}
|
||||
|
||||
// SetNotifier wires the shared MCP notifier into the agentic subsystem.
|
||||
func (s *PrepSubsystem) SetNotifier(n coremcp.Notifier) {
|
||||
s.notifier = n
|
||||
}
|
||||
|
||||
// emitChannel pushes an agentic event through the shared notifier.
|
||||
func (s *PrepSubsystem) emitChannel(ctx context.Context, channel string, data any) {
|
||||
if s.notifier != nil {
|
||||
s.notifier.ChannelSend(ctx, channel, data)
|
||||
}
|
||||
}
|
||||
|
||||
func envOr(key, fallback string) string {
|
||||
if v := os.Getenv(key); v != "" {
|
||||
return v
|
||||
|
|
@ -108,25 +130,30 @@ func sanitizeRepoPathSegment(value, field string, allowSubdirs bool) (string, er
|
|||
func (s *PrepSubsystem) Name() string { return "agentic" }
|
||||
|
||||
// RegisterTools implements mcp.Subsystem.
|
||||
func (s *PrepSubsystem) RegisterTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *PrepSubsystem) RegisterTools(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_prep_workspace",
|
||||
Description: "Prepare a sandboxed agent workspace with TODO.md, CLAUDE.md, CONTEXT.md, CONSUMERS.md, RECENT.md, and a git clone of the target repo in src/.",
|
||||
}, s.prepWorkspace)
|
||||
|
||||
s.registerDispatchTool(server)
|
||||
s.registerStatusTool(server)
|
||||
s.registerResumeTool(server)
|
||||
s.registerCreatePRTool(server)
|
||||
s.registerListPRsTool(server)
|
||||
s.registerEpicTool(server)
|
||||
s.registerDispatchTool(svc)
|
||||
s.registerIssueTools(svc)
|
||||
s.registerStatusTool(svc)
|
||||
s.registerResumeTool(svc)
|
||||
s.registerCreatePRTool(svc)
|
||||
s.registerListPRsTool(svc)
|
||||
s.registerEpicTool(svc)
|
||||
s.registerWatchTool(svc)
|
||||
s.registerReviewQueueTool(svc)
|
||||
s.registerMirrorTool(svc)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_scan",
|
||||
Description: "Scan Forge repos for open issues with actionable labels (agentic, help-wanted, bug).",
|
||||
}, s.scan)
|
||||
|
||||
s.registerPlanTools(server)
|
||||
s.registerPlanTools(svc)
|
||||
}
|
||||
|
||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||
|
|
@ -145,6 +172,7 @@ type PrepInput struct {
|
|||
Org string `json:"org,omitempty"` // default "core"
|
||||
Issue int `json:"issue,omitempty"` // Forge issue number
|
||||
Task string `json:"task,omitempty"` // Task description (if no issue)
|
||||
Branch string `json:"branch,omitempty"` // Override branch name
|
||||
Template string `json:"template,omitempty"` // Prompt template: conventions, security, coding (default: coding)
|
||||
PlanTemplate string `json:"plan_template,omitempty"` // Plan template slug: bug-fix, code-review, new-feature, refactor, feature-port
|
||||
Variables map[string]string `json:"variables,omitempty"` // Template variable substitution
|
||||
|
|
@ -155,6 +183,7 @@ type PrepInput struct {
|
|||
type PrepOutput struct {
|
||||
Success bool `json:"success"`
|
||||
WorkspaceDir string `json:"workspace_dir"`
|
||||
Branch string `json:"branch,omitempty"`
|
||||
WikiPages int `json:"wiki_pages"`
|
||||
SpecFiles int `json:"spec_files"`
|
||||
Memories int `json:"memories"`
|
||||
|
|
@ -197,6 +226,7 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
|||
|
||||
// Workspace root: .core/workspace/{repo}-{timestamp}/
|
||||
wsRoot := s.workspaceRoot()
|
||||
coreio.Local.EnsureDir(wsRoot)
|
||||
wsName := fmt.Sprintf("%s-%d", input.Repo, time.Now().Unix())
|
||||
wsDir := filepath.Join(wsRoot, wsName)
|
||||
|
||||
|
|
@ -215,27 +245,27 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
|||
return nil, PrepOutput{}, coreerr.E("prepWorkspace", "failed to clone repository", err)
|
||||
}
|
||||
|
||||
// Create feature branch
|
||||
taskSlug := strings.Map(func(r rune) rune {
|
||||
if r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-' {
|
||||
return r
|
||||
// Create feature branch.
|
||||
branchName := input.Branch
|
||||
if branchName == "" {
|
||||
taskSlug := branchSlug(input.Task)
|
||||
if input.Issue > 0 {
|
||||
issueSlug := branchSlug(input.Task)
|
||||
branchName = fmt.Sprintf("agent/issue-%d", input.Issue)
|
||||
if issueSlug != "" {
|
||||
branchName += "-" + issueSlug
|
||||
}
|
||||
} else if taskSlug != "" {
|
||||
branchName = fmt.Sprintf("agent/%s", taskSlug)
|
||||
}
|
||||
if r >= 'A' && r <= 'Z' {
|
||||
return r + 32 // lowercase
|
||||
}
|
||||
return '-'
|
||||
}, input.Task)
|
||||
if len(taskSlug) > 40 {
|
||||
taskSlug = taskSlug[:40]
|
||||
}
|
||||
taskSlug = strings.Trim(taskSlug, "-")
|
||||
if taskSlug != "" {
|
||||
branchName := fmt.Sprintf("agent/%s", taskSlug)
|
||||
if branchName != "" {
|
||||
branchCmd := exec.CommandContext(ctx, "git", "checkout", "-b", branchName)
|
||||
branchCmd.Dir = srcDir
|
||||
if err := branchCmd.Run(); err != nil {
|
||||
return nil, PrepOutput{}, coreerr.E("prepWorkspace", "failed to create branch", err)
|
||||
}
|
||||
out.Branch = branchName
|
||||
}
|
||||
|
||||
// Create context dirs inside src/
|
||||
|
|
@ -248,20 +278,20 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
|||
// 2. Copy CLAUDE.md and GEMINI.md to workspace
|
||||
claudeMdPath := filepath.Join(repoPath, "CLAUDE.md")
|
||||
if data, err := coreio.Local.Read(claudeMdPath); err == nil {
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "CLAUDE.md"), data)
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "CLAUDE.md"), data)
|
||||
out.ClaudeMd = true
|
||||
}
|
||||
// Copy GEMINI.md from core/agent (ethics framework for all agents)
|
||||
agentGeminiMd := filepath.Join(s.codePath, "core", "agent", "GEMINI.md")
|
||||
if data, err := coreio.Local.Read(agentGeminiMd); err == nil {
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "GEMINI.md"), data)
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "GEMINI.md"), data)
|
||||
}
|
||||
|
||||
// Copy persona if specified
|
||||
if persona != "" {
|
||||
personaPath := filepath.Join(s.codePath, "core", "agent", "prompts", "personas", persona+".md")
|
||||
if data, err := coreio.Local.Read(personaPath); err == nil {
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "PERSONA.md"), data)
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "PERSONA.md"), data)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -271,7 +301,7 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
|||
} else if input.Task != "" {
|
||||
todo := fmt.Sprintf("# TASK: %s\n\n**Repo:** %s/%s\n**Status:** ready\n\n## Objective\n\n%s\n",
|
||||
input.Task, input.Org, input.Repo, input.Task)
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "TODO.md"), todo)
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "TODO.md"), todo)
|
||||
}
|
||||
|
||||
// 4. Generate CONTEXT.md from OpenBrain
|
||||
|
|
@ -301,6 +331,42 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
|||
return nil, out, nil
|
||||
}
|
||||
|
||||
// branchSlug converts a free-form string into a git-friendly branch suffix.
|
||||
func branchSlug(value string) string {
|
||||
value = strings.ToLower(strings.TrimSpace(value))
|
||||
if value == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(len(value))
|
||||
lastDash := false
|
||||
for _, r := range value {
|
||||
switch {
|
||||
case r >= 'a' && r <= 'z', r >= '0' && r <= '9':
|
||||
b.WriteRune(r)
|
||||
lastDash = false
|
||||
case r == '-' || r == '_' || r == '.' || r == ' ':
|
||||
if !lastDash {
|
||||
b.WriteByte('-')
|
||||
lastDash = true
|
||||
}
|
||||
default:
|
||||
if !lastDash {
|
||||
b.WriteByte('-')
|
||||
lastDash = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
slug := strings.Trim(b.String(), "-")
|
||||
if len(slug) > 40 {
|
||||
slug = slug[:40]
|
||||
slug = strings.Trim(slug, "-")
|
||||
}
|
||||
return slug
|
||||
}
|
||||
|
||||
// --- Prompt templates ---
|
||||
|
||||
func (s *PrepSubsystem) writePromptTemplate(template, wsDir string) {
|
||||
|
|
@ -368,7 +434,7 @@ Do NOT push. Commit only — a reviewer will verify and push.
|
|||
prompt = "Read TODO.md and complete the task. Work in src/.\n"
|
||||
}
|
||||
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "PROMPT.md"), prompt)
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "PROMPT.md"), prompt)
|
||||
}
|
||||
|
||||
// --- Plan template rendering ---
|
||||
|
|
@ -446,7 +512,7 @@ func (s *PrepSubsystem) writePlanFromTemplate(templateSlug string, variables map
|
|||
plan.WriteString("\n**Commit after completing this phase.**\n\n---\n\n")
|
||||
}
|
||||
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "PLAN.md"), plan.String())
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "PLAN.md"), plan.String())
|
||||
}
|
||||
|
||||
// --- Helpers (unchanged) ---
|
||||
|
|
@ -457,7 +523,10 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
|||
}
|
||||
|
||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/wiki/pages", s.forgeURL, org, repo)
|
||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
|
|
@ -473,7 +542,9 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
|||
Title string `json:"title"`
|
||||
SubURL string `json:"sub_url"`
|
||||
}
|
||||
json.NewDecoder(resp.Body).Decode(&pages)
|
||||
if err := json.NewDecoder(resp.Body).Decode(&pages); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, page := range pages {
|
||||
|
|
@ -483,7 +554,10 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
|||
}
|
||||
|
||||
pageURL := fmt.Sprintf("%s/api/v1/repos/%s/%s/wiki/page/%s", s.forgeURL, org, repo, subURL)
|
||||
pageReq, _ := http.NewRequestWithContext(ctx, "GET", pageURL, nil)
|
||||
pageReq, err := http.NewRequestWithContext(ctx, "GET", pageURL, nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pageReq.Header.Set("Authorization", "token "+s.forgeToken)
|
||||
|
||||
pageResp, err := s.client.Do(pageReq)
|
||||
|
|
@ -498,14 +572,19 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
|||
var pageData struct {
|
||||
ContentBase64 string `json:"content_base64"`
|
||||
}
|
||||
json.NewDecoder(pageResp.Body).Decode(&pageData)
|
||||
if err := json.NewDecoder(pageResp.Body).Decode(&pageData); err != nil {
|
||||
continue
|
||||
}
|
||||
pageResp.Body.Close()
|
||||
|
||||
if pageData.ContentBase64 == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
content, _ := base64.StdEncoding.DecodeString(pageData.ContentBase64)
|
||||
content, err := base64.StdEncoding.DecodeString(pageData.ContentBase64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
filename := strings.Map(func(r rune) rune {
|
||||
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' {
|
||||
return r
|
||||
|
|
@ -513,7 +592,7 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
|||
return '-'
|
||||
}, page.Title) + ".md"
|
||||
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "kb", filename), string(content))
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "kb", filename), string(content))
|
||||
count++
|
||||
}
|
||||
|
||||
|
|
@ -527,7 +606,7 @@ func (s *PrepSubsystem) copySpecs(wsDir string) int {
|
|||
for _, file := range specFiles {
|
||||
src := filepath.Join(s.specsPath, file)
|
||||
if data, err := coreio.Local.Read(src); err == nil {
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "specs", file), data)
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "specs", file), data)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
|
@ -540,14 +619,20 @@ func (s *PrepSubsystem) generateContext(ctx context.Context, repo, wsDir string)
|
|||
return 0
|
||||
}
|
||||
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
body, err := json.Marshal(map[string]any{
|
||||
"query": "architecture conventions key interfaces for " + repo,
|
||||
"top_k": 10,
|
||||
"project": repo,
|
||||
"agent_id": "cladius",
|
||||
})
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", s.brainURL+"/v1/brain/recall", strings.NewReader(string(body)))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", s.brainURL+"/v1/brain/recall", strings.NewReader(string(body)))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+s.brainKey)
|
||||
|
|
@ -561,11 +646,16 @@ func (s *PrepSubsystem) generateContext(ctx context.Context, repo, wsDir string)
|
|||
return 0
|
||||
}
|
||||
|
||||
respData, _ := goio.ReadAll(resp.Body)
|
||||
respData, err := goio.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
var result struct {
|
||||
Memories []map[string]any `json:"memories"`
|
||||
}
|
||||
json.Unmarshal(respData, &result)
|
||||
if err := json.Unmarshal(respData, &result); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
var content strings.Builder
|
||||
content.WriteString("# Context — " + repo + "\n\n")
|
||||
|
|
@ -579,7 +669,7 @@ func (s *PrepSubsystem) generateContext(ctx context.Context, repo, wsDir string)
|
|||
content.WriteString(fmt.Sprintf("### %d. %s [%s] (score: %.3f)\n\n%s\n\n", i+1, memProject, memType, score, memContent))
|
||||
}
|
||||
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "CONTEXT.md"), content.String())
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "CONTEXT.md"), content.String())
|
||||
return len(result.Memories)
|
||||
}
|
||||
|
||||
|
|
@ -616,7 +706,7 @@ func (s *PrepSubsystem) findConsumers(repo, wsDir string) int {
|
|||
content += "- " + c + "\n"
|
||||
}
|
||||
content += fmt.Sprintf("\n**Breaking change risk: %d consumers.**\n", len(consumers))
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "CONSUMERS.md"), content)
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "CONSUMERS.md"), content)
|
||||
}
|
||||
|
||||
return len(consumers)
|
||||
|
|
@ -633,7 +723,7 @@ func (s *PrepSubsystem) gitLog(repoPath, wsDir string) int {
|
|||
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
if len(lines) > 0 && lines[0] != "" {
|
||||
content := "# Recent Changes\n\n```\n" + string(output) + "```\n"
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "RECENT.md"), content)
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "RECENT.md"), content)
|
||||
}
|
||||
|
||||
return len(lines)
|
||||
|
|
@ -669,5 +759,5 @@ func (s *PrepSubsystem) generateTodo(ctx context.Context, org, repo string, issu
|
|||
content += fmt.Sprintf("**Repo:** %s/%s\n\n---\n\n", org, repo)
|
||||
content += "## Objective\n\n" + issueData.Body + "\n"
|
||||
|
||||
coreio.Local.Write(filepath.Join(wsDir, "src", "TODO.md"), content)
|
||||
_ = writeAtomic(filepath.Join(wsDir, "src", "TODO.md"), content)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,8 +6,20 @@ import (
|
|||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
)
|
||||
|
||||
type recordingNotifier struct {
|
||||
channel string
|
||||
data any
|
||||
}
|
||||
|
||||
func (r *recordingNotifier) ChannelSend(_ context.Context, channel string, data any) {
|
||||
r.channel = channel
|
||||
r.data = data
|
||||
}
|
||||
|
||||
func TestSanitizeRepoPathSegment_Good(t *testing.T) {
|
||||
t.Run("repo", func(t *testing.T) {
|
||||
value, err := sanitizeRepoPathSegment("go-io", "repo", false)
|
||||
|
|
@ -94,3 +106,46 @@ func TestPrepWorkspace_Bad_BadPlanTemplateTraversal(t *testing.T) {
|
|||
t.Fatalf("expected plan template error, got %q", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetNotifier_Good_EmitsChannelEvents(t *testing.T) {
|
||||
s := NewPrep()
|
||||
notifier := &recordingNotifier{}
|
||||
s.SetNotifier(notifier)
|
||||
|
||||
s.emitChannel(context.Background(), coremcp.ChannelAgentStatus, map[string]any{"status": "running"})
|
||||
|
||||
if notifier.channel != coremcp.ChannelAgentStatus {
|
||||
t.Fatalf("expected %s channel, got %q", coremcp.ChannelAgentStatus, notifier.channel)
|
||||
}
|
||||
if payload, ok := notifier.data.(map[string]any); !ok || payload["status"] != "running" {
|
||||
t.Fatalf("expected payload to include running status, got %#v", notifier.data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmitHarvestComplete_Good_EmitsChannelEvents(t *testing.T) {
|
||||
s := NewPrep()
|
||||
notifier := &recordingNotifier{}
|
||||
s.SetNotifier(notifier)
|
||||
|
||||
s.emitHarvestComplete(context.Background(), "go-io-123", "go-io", 4, true)
|
||||
|
||||
if notifier.channel != coremcp.ChannelHarvestComplete {
|
||||
t.Fatalf("expected %s channel, got %q", coremcp.ChannelHarvestComplete, notifier.channel)
|
||||
}
|
||||
payload, ok := notifier.data.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected payload object, got %#v", notifier.data)
|
||||
}
|
||||
if payload["workspace"] != "go-io-123" {
|
||||
t.Fatalf("expected workspace go-io-123, got %#v", payload["workspace"])
|
||||
}
|
||||
if payload["repo"] != "go-io" {
|
||||
t.Fatalf("expected repo go-io, got %#v", payload["repo"])
|
||||
}
|
||||
if payload["findings"] != 4 {
|
||||
t.Fatalf("expected findings 4, got %#v", payload["findings"])
|
||||
}
|
||||
if payload["issue_created"] != true {
|
||||
t.Fatalf("expected issue_created true, got %#v", payload["issue_created"])
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,18 +25,18 @@ type DispatchConfig struct {
|
|||
// RateConfig controls pacing between task dispatches.
|
||||
type RateConfig struct {
|
||||
ResetUTC string `yaml:"reset_utc"` // Daily quota reset time (UTC), e.g. "06:00"
|
||||
DailyLimit int `yaml:"daily_limit"` // Max requests per day (0 = unknown)
|
||||
MinDelay int `yaml:"min_delay"` // Minimum seconds between task starts
|
||||
SustainedDelay int `yaml:"sustained_delay"` // Delay when pacing for full-day use
|
||||
BurstWindow int `yaml:"burst_window"` // Hours before reset where burst kicks in
|
||||
BurstDelay int `yaml:"burst_delay"` // Delay during burst window
|
||||
DailyLimit int `yaml:"daily_limit"` // Max requests per day (0 = unknown)
|
||||
MinDelay int `yaml:"min_delay"` // Minimum seconds between task starts
|
||||
SustainedDelay int `yaml:"sustained_delay"` // Delay when pacing for full-day use
|
||||
BurstWindow int `yaml:"burst_window"` // Hours before reset where burst kicks in
|
||||
BurstDelay int `yaml:"burst_delay"` // Delay during burst window
|
||||
}
|
||||
|
||||
// AgentsConfig is the root of config/agents.yaml.
|
||||
type AgentsConfig struct {
|
||||
Version int `yaml:"version"`
|
||||
Dispatch DispatchConfig `yaml:"dispatch"`
|
||||
Concurrency map[string]int `yaml:"concurrency"`
|
||||
Version int `yaml:"version"`
|
||||
Dispatch DispatchConfig `yaml:"dispatch"`
|
||||
Concurrency map[string]int `yaml:"concurrency"`
|
||||
Rates map[string]RateConfig `yaml:"rates"`
|
||||
}
|
||||
|
||||
|
|
@ -243,7 +243,7 @@ func (s *PrepSubsystem) drainQueue() {
|
|||
st.Status = "running"
|
||||
st.PID = cmd.Process.Pid
|
||||
st.Runs++
|
||||
writeStatus(wsDir, st)
|
||||
s.saveStatus(wsDir, st)
|
||||
|
||||
go func() {
|
||||
cmd.Wait()
|
||||
|
|
@ -252,7 +252,7 @@ func (s *PrepSubsystem) drainQueue() {
|
|||
if st2, err := readStatus(wsDir); err == nil {
|
||||
st2.Status = "completed"
|
||||
st2.PID = 0
|
||||
writeStatus(wsDir, st2)
|
||||
s.saveStatus(wsDir, st2)
|
||||
}
|
||||
|
||||
// Ingest scan findings as issues
|
||||
|
|
|
|||
209
pkg/mcp/agentic/repo_helpers.go
Normal file
209
pkg/mcp/agentic/repo_helpers.go
Normal file
|
|
@ -0,0 +1,209 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
)
|
||||
|
||||
func listLocalRepos(basePath string) []string {
|
||||
entries, err := os.ReadDir(basePath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
repos := make([]string, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
repos = append(repos, entry.Name())
|
||||
}
|
||||
}
|
||||
return repos
|
||||
}
|
||||
|
||||
func hasRemote(repoDir, remote string) bool {
|
||||
cmd := exec.Command("git", "remote", "get-url", remote)
|
||||
cmd.Dir = repoDir
|
||||
if out, err := cmd.Output(); err == nil {
|
||||
return strings.TrimSpace(string(out)) != ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func commitsAhead(repoDir, baseRef, headRef string) int {
|
||||
cmd := exec.Command("git", "rev-list", "--count", baseRef+".."+headRef)
|
||||
cmd.Dir = repoDir
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
count, err := parsePositiveInt(strings.TrimSpace(string(out)))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func filesChanged(repoDir, baseRef, headRef string) int {
|
||||
cmd := exec.Command("git", "diff", "--name-only", baseRef+".."+headRef)
|
||||
cmd.Dir = repoDir
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, line := range strings.Split(strings.TrimSpace(string(out)), "\n") {
|
||||
if strings.TrimSpace(line) != "" {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func gitOutput(repoDir string, args ...string) (string, error) {
|
||||
cmd := exec.Command("git", args...)
|
||||
cmd.Dir = repoDir
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", coreerr.E("gitOutput", string(out), err)
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
}
|
||||
|
||||
func parsePositiveInt(value string) (int, error) {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
return 0, coreerr.E("parsePositiveInt", "empty value", nil)
|
||||
}
|
||||
n := 0
|
||||
for _, r := range value {
|
||||
if r < '0' || r > '9' {
|
||||
return 0, coreerr.E("parsePositiveInt", "value contains non-numeric characters", nil)
|
||||
}
|
||||
n = n*10 + int(r-'0')
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func readGitHubPRURL(repoDir string) (string, error) {
|
||||
cmd := exec.Command("gh", "pr", "list", "--head", "dev", "--state", "open", "--json", "url", "--limit", "1")
|
||||
cmd.Dir = repoDir
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var rows []struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &rows); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(rows) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return rows[0].URL, nil
|
||||
}
|
||||
|
||||
func createGitHubPR(ctx context.Context, repoDir, repo string, commits, files int) (string, error) {
|
||||
if _, err := exec.LookPath("gh"); err != nil {
|
||||
return "", coreerr.E("createGitHubPR", "gh CLI is not available", err)
|
||||
}
|
||||
|
||||
if url, err := readGitHubPRURL(repoDir); err == nil && url != "" {
|
||||
return url, nil
|
||||
}
|
||||
|
||||
body := "## Forge -> GitHub Sync\n\n"
|
||||
body += "**Commits:** " + itoa(commits) + "\n"
|
||||
body += "**Files changed:** " + itoa(files) + "\n\n"
|
||||
body += "Automated sync from Forge (forge.lthn.ai) to GitHub mirror.\n"
|
||||
body += "Review with CodeRabbit before merging.\n\n"
|
||||
body += "---\n"
|
||||
body += "Co-Authored-By: Virgil <virgil@lethean.io>"
|
||||
|
||||
title := "[sync] " + repo + ": " + itoa(commits) + " commits, " + itoa(files) + " files"
|
||||
|
||||
cmd := exec.CommandContext(ctx, "gh", "pr", "create",
|
||||
"--head", "dev",
|
||||
"--base", "main",
|
||||
"--title", title,
|
||||
"--body", body,
|
||||
)
|
||||
cmd.Dir = repoDir
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", coreerr.E("createGitHubPR", string(out), err)
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
if len(lines) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return strings.TrimSpace(lines[len(lines)-1]), nil
|
||||
}
|
||||
|
||||
func ensureDevBranch(repoDir string) error {
|
||||
cmd := exec.Command("git", "push", "github", "HEAD:refs/heads/dev", "--force")
|
||||
cmd.Dir = repoDir
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return coreerr.E("ensureDevBranch", string(out), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reviewerCommand(ctx context.Context, repoDir, reviewer string) *exec.Cmd {
|
||||
switch reviewer {
|
||||
case "coderabbit":
|
||||
return exec.CommandContext(ctx, "coderabbit", "review")
|
||||
case "codex":
|
||||
return exec.CommandContext(ctx, "codex", "review")
|
||||
case "both":
|
||||
return exec.CommandContext(ctx, "coderabbit", "review")
|
||||
default:
|
||||
return exec.CommandContext(ctx, reviewer)
|
||||
}
|
||||
}
|
||||
|
||||
func itoa(value int) string {
|
||||
return strconv.Itoa(value)
|
||||
}
|
||||
|
||||
func parseRetryAfter(detail string) time.Duration {
|
||||
re := regexp.MustCompile(`(?i)(\d+)\s*(minute|minutes|hour|hours|second|seconds)`)
|
||||
match := re.FindStringSubmatch(detail)
|
||||
if len(match) != 3 {
|
||||
return 5 * time.Minute
|
||||
}
|
||||
|
||||
n, err := strconv.Atoi(match[1])
|
||||
if err != nil || n <= 0 {
|
||||
return 5 * time.Minute
|
||||
}
|
||||
|
||||
switch strings.ToLower(match[2]) {
|
||||
case "hour", "hours":
|
||||
return time.Duration(n) * time.Hour
|
||||
case "second", "seconds":
|
||||
return time.Duration(n) * time.Second
|
||||
default:
|
||||
return time.Duration(n) * time.Minute
|
||||
}
|
||||
}
|
||||
|
||||
func repoRootFromCodePath(codePath string) string {
|
||||
return filepath.Join(codePath, "core")
|
||||
}
|
||||
|
|
@ -8,33 +8,40 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// ResumeInput is the input for agentic_resume.
|
||||
//
|
||||
// input := ResumeInput{Workspace: "go-mcp-1700000000", Answer: "Use the shared notifier"}
|
||||
type ResumeInput struct {
|
||||
Workspace string `json:"workspace"` // workspace name (e.g. "go-scm-1773581173")
|
||||
Answer string `json:"answer,omitempty"` // answer to the blocked question (written to ANSWER.md)
|
||||
Agent string `json:"agent,omitempty"` // override agent type (default: same as original)
|
||||
DryRun bool `json:"dry_run,omitempty"` // preview without executing
|
||||
Workspace string `json:"workspace"` // workspace name (e.g. "go-scm-1773581173")
|
||||
Answer string `json:"answer,omitempty"` // answer to the blocked question (written to ANSWER.md)
|
||||
Agent string `json:"agent,omitempty"` // override agent type (default: same as original)
|
||||
DryRun bool `json:"dry_run,omitempty"` // preview without executing
|
||||
}
|
||||
|
||||
// ResumeOutput is the output for agentic_resume.
|
||||
//
|
||||
// // out.Success == true, out.PID > 0
|
||||
type ResumeOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Workspace string `json:"workspace"`
|
||||
Agent string `json:"agent"`
|
||||
PID int `json:"pid,omitempty"`
|
||||
OutputFile string `json:"output_file,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Success bool `json:"success"`
|
||||
Workspace string `json:"workspace"`
|
||||
Agent string `json:"agent"`
|
||||
PID int `json:"pid,omitempty"`
|
||||
OutputFile string `json:"output_file,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerResumeTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *PrepSubsystem) registerResumeTool(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_resume",
|
||||
Description: "Resume a blocked agent workspace. Writes ANSWER.md if an answer is provided, then relaunches the agent with instructions to read it and continue.",
|
||||
}, s.resume)
|
||||
|
|
@ -73,7 +80,7 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
|||
if input.Answer != "" {
|
||||
answerPath := filepath.Join(srcDir, "ANSWER.md")
|
||||
content := fmt.Sprintf("# Answer\n\n%s\n", input.Answer)
|
||||
if err := coreio.Local.Write(answerPath, content); err != nil {
|
||||
if err := writeAtomic(answerPath, content); err != nil {
|
||||
return nil, ResumeOutput{}, coreerr.E("resume", "failed to write ANSWER.md", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -131,11 +138,38 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
|||
st.PID = cmd.Process.Pid
|
||||
st.Runs++
|
||||
st.Question = ""
|
||||
writeStatus(wsDir, st)
|
||||
s.saveStatus(wsDir, st)
|
||||
|
||||
go func() {
|
||||
cmd.Wait()
|
||||
outFile.Close()
|
||||
|
||||
postCtx := context.WithoutCancel(ctx)
|
||||
status := "completed"
|
||||
channel := coremcp.ChannelAgentComplete
|
||||
payload := map[string]any{
|
||||
"workspace": input.Workspace,
|
||||
"agent": agent,
|
||||
"repo": st.Repo,
|
||||
"branch": st.Branch,
|
||||
}
|
||||
|
||||
if data, err := coreio.Local.Read(filepath.Join(srcDir, "BLOCKED.md")); err == nil {
|
||||
status = "blocked"
|
||||
channel = coremcp.ChannelAgentBlocked
|
||||
st.Question = strings.TrimSpace(data)
|
||||
if st.Question != "" {
|
||||
payload["question"] = st.Question
|
||||
}
|
||||
}
|
||||
|
||||
st.Status = status
|
||||
st.PID = 0
|
||||
s.saveStatus(wsDir, st)
|
||||
|
||||
payload["status"] = status
|
||||
s.emitChannel(postCtx, channel, payload)
|
||||
s.emitChannel(postCtx, coremcp.ChannelAgentStatus, payload)
|
||||
}()
|
||||
|
||||
return nil, ResumeOutput{
|
||||
|
|
|
|||
273
pkg/mcp/agentic/review_queue.go
Normal file
273
pkg/mcp/agentic/review_queue.go
Normal file
|
|
@ -0,0 +1,273 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// ReviewQueueInput controls the review queue runner.
|
||||
type ReviewQueueInput struct {
|
||||
Limit int `json:"limit,omitempty"`
|
||||
Reviewer string `json:"reviewer,omitempty"`
|
||||
DryRun bool `json:"dry_run,omitempty"`
|
||||
LocalOnly bool `json:"local_only,omitempty"`
|
||||
}
|
||||
|
||||
// ReviewQueueOutput reports what happened.
|
||||
type ReviewQueueOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Processed []ReviewResult `json:"processed"`
|
||||
Skipped []string `json:"skipped,omitempty"`
|
||||
RateLimit *RateLimitInfo `json:"rate_limit,omitempty"`
|
||||
}
|
||||
|
||||
// ReviewResult is the outcome of reviewing one repo.
|
||||
type ReviewResult struct {
|
||||
Repo string `json:"repo"`
|
||||
Verdict string `json:"verdict"`
|
||||
Findings int `json:"findings"`
|
||||
Action string `json:"action"`
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// RateLimitInfo tracks review rate limit state.
|
||||
type RateLimitInfo struct {
|
||||
Limited bool `json:"limited"`
|
||||
RetryAt time.Time `json:"retry_at,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func reviewQueueHomeDir() string {
|
||||
if home := os.Getenv("DIR_HOME"); home != "" {
|
||||
return home
|
||||
}
|
||||
home, _ := os.UserHomeDir()
|
||||
return home
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerReviewQueueTool(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_review_queue",
|
||||
Description: "Process repositories that are ahead of the GitHub mirror and summarise review findings.",
|
||||
}, s.reviewQueue)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) reviewQueue(ctx context.Context, _ *mcp.CallToolRequest, input ReviewQueueInput) (*mcp.CallToolResult, ReviewQueueOutput, error) {
|
||||
limit := input.Limit
|
||||
if limit <= 0 {
|
||||
limit = 4
|
||||
}
|
||||
|
||||
basePath := repoRootFromCodePath(s.codePath)
|
||||
candidates := s.findReviewCandidates(basePath)
|
||||
if len(candidates) == 0 {
|
||||
return nil, ReviewQueueOutput{Success: true, Processed: []ReviewResult{}}, nil
|
||||
}
|
||||
|
||||
processed := make([]ReviewResult, 0, len(candidates))
|
||||
skipped := make([]string, 0)
|
||||
var rateInfo *RateLimitInfo
|
||||
|
||||
for _, repo := range candidates {
|
||||
if len(processed) >= limit {
|
||||
skipped = append(skipped, repo+" (limit reached)")
|
||||
continue
|
||||
}
|
||||
|
||||
if rateInfo != nil && rateInfo.Limited && time.Now().Before(rateInfo.RetryAt) {
|
||||
skipped = append(skipped, repo+" (rate limited)")
|
||||
continue
|
||||
}
|
||||
|
||||
repoDir := filepath.Join(basePath, repo)
|
||||
reviewer := input.Reviewer
|
||||
if reviewer == "" {
|
||||
reviewer = "coderabbit"
|
||||
}
|
||||
|
||||
result := s.reviewRepo(ctx, repoDir, repo, reviewer, input.DryRun, input.LocalOnly)
|
||||
if result.Verdict == "rate_limited" {
|
||||
retryAfter := parseRetryAfter(result.Detail)
|
||||
rateInfo = &RateLimitInfo{
|
||||
Limited: true,
|
||||
RetryAt: time.Now().Add(retryAfter),
|
||||
Message: result.Detail,
|
||||
}
|
||||
skipped = append(skipped, repo+" (rate limited)")
|
||||
continue
|
||||
}
|
||||
|
||||
processed = append(processed, result)
|
||||
}
|
||||
|
||||
if rateInfo != nil {
|
||||
s.saveRateLimitState(rateInfo)
|
||||
}
|
||||
|
||||
return nil, ReviewQueueOutput{
|
||||
Success: true,
|
||||
Processed: processed,
|
||||
Skipped: skipped,
|
||||
RateLimit: rateInfo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) findReviewCandidates(basePath string) []string {
|
||||
entries, err := os.ReadDir(basePath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
candidates := make([]string, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
repoDir := filepath.Join(basePath, entry.Name())
|
||||
if !hasRemote(repoDir, "github") {
|
||||
continue
|
||||
}
|
||||
if commitsAhead(repoDir, "github/main", "HEAD") <= 0 {
|
||||
continue
|
||||
}
|
||||
candidates = append(candidates, entry.Name())
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer string, dryRun, localOnly bool) ReviewResult {
|
||||
result := ReviewResult{Repo: repo}
|
||||
|
||||
if rl := s.loadRateLimitState(); rl != nil && rl.Limited && time.Now().Before(rl.RetryAt) {
|
||||
result.Verdict = "rate_limited"
|
||||
result.Detail = fmt.Sprintf("retry after %s", rl.RetryAt.Format(time.RFC3339))
|
||||
return result
|
||||
}
|
||||
|
||||
cmd := reviewerCommand(ctx, repoDir, reviewer)
|
||||
cmd.Dir = repoDir
|
||||
out, err := cmd.CombinedOutput()
|
||||
output := strings.TrimSpace(string(out))
|
||||
|
||||
if strings.Contains(strings.ToLower(output), "rate limit") {
|
||||
result.Verdict = "rate_limited"
|
||||
result.Detail = output
|
||||
return result
|
||||
}
|
||||
|
||||
if err != nil && !strings.Contains(output, "No findings") && !strings.Contains(output, "no issues") {
|
||||
result.Verdict = "error"
|
||||
if output != "" {
|
||||
result.Detail = output
|
||||
} else {
|
||||
result.Detail = err.Error()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
s.storeReviewOutput(repoDir, repo, reviewer, output)
|
||||
result.Findings = countFindingHints(output)
|
||||
|
||||
if strings.Contains(output, "No findings") || strings.Contains(output, "no issues") || strings.Contains(output, "LGTM") {
|
||||
result.Verdict = "clean"
|
||||
if dryRun {
|
||||
result.Action = "skipped (dry run)"
|
||||
return result
|
||||
}
|
||||
if localOnly {
|
||||
result.Action = "local only"
|
||||
return result
|
||||
}
|
||||
|
||||
if url, err := readGitHubPRURL(repoDir); err == nil && url != "" {
|
||||
mergeCmd := exec.CommandContext(ctx, "gh", "pr", "merge", "--auto", "--squash", "--delete-branch")
|
||||
mergeCmd.Dir = repoDir
|
||||
if mergeOut, err := mergeCmd.CombinedOutput(); err == nil {
|
||||
result.Action = "merged"
|
||||
result.Detail = strings.TrimSpace(string(mergeOut))
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
result.Action = "waiting"
|
||||
return result
|
||||
}
|
||||
|
||||
result.Verdict = "findings"
|
||||
if dryRun {
|
||||
result.Action = "skipped (dry run)"
|
||||
return result
|
||||
}
|
||||
|
||||
result.Action = "waiting"
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) storeReviewOutput(repoDir, repo, reviewer, output string) {
|
||||
home := reviewQueueHomeDir()
|
||||
dataDir := filepath.Join(home, ".core", "training", "reviews")
|
||||
if err := coreio.Local.EnsureDir(dataDir); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
payload := map[string]string{
|
||||
"repo": repo,
|
||||
"reviewer": reviewer,
|
||||
"output": output,
|
||||
"source": repoDir,
|
||||
}
|
||||
data, err := json.MarshalIndent(payload, "", " ")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s-%s-%d.json", repo, reviewer, time.Now().Unix())
|
||||
_ = writeAtomic(filepath.Join(dataDir, name), string(data))
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) saveRateLimitState(info *RateLimitInfo) {
|
||||
home := reviewQueueHomeDir()
|
||||
path := filepath.Join(home, ".core", "coderabbit-ratelimit.json")
|
||||
data, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = writeAtomic(path, string(data))
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) loadRateLimitState() *RateLimitInfo {
|
||||
home := reviewQueueHomeDir()
|
||||
path := filepath.Join(home, ".core", "coderabbit-ratelimit.json")
|
||||
data, err := coreio.Local.Read(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var info RateLimitInfo
|
||||
if err := json.Unmarshal([]byte(data), &info); err != nil {
|
||||
return nil
|
||||
}
|
||||
if !info.Limited {
|
||||
return nil
|
||||
}
|
||||
return &info
|
||||
}
|
||||
|
||||
func countFindingHints(output string) int {
|
||||
re := regexp.MustCompile(`(?m)[^ \t\n\r]+\.(?:go|php|ts|tsx|js|jsx|py|rb|java|cs|cpp|cxx|cc|md):\d+`)
|
||||
return len(re.FindAllString(output, -1))
|
||||
}
|
||||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
|
|
@ -28,20 +29,26 @@ import (
|
|||
// running → failed (agent crashed / non-zero exit)
|
||||
|
||||
// WorkspaceStatus represents the current state of an agent workspace.
|
||||
//
|
||||
// status := WorkspaceStatus{
|
||||
// Status: "blocked",
|
||||
// Agent: "claude",
|
||||
// Repo: "go-mcp",
|
||||
// }
|
||||
type WorkspaceStatus struct {
|
||||
Status string `json:"status"` // running, completed, blocked, failed
|
||||
Agent string `json:"agent"` // gemini, claude, codex
|
||||
Repo string `json:"repo"` // target repo
|
||||
Org string `json:"org,omitempty"` // forge org (e.g. "core")
|
||||
Task string `json:"task"` // task description
|
||||
Branch string `json:"branch,omitempty"` // git branch name
|
||||
Issue int `json:"issue,omitempty"` // forge issue number
|
||||
PID int `json:"pid,omitempty"` // process ID (if running)
|
||||
StartedAt time.Time `json:"started_at"` // when dispatch started
|
||||
UpdatedAt time.Time `json:"updated_at"` // last status change
|
||||
Question string `json:"question,omitempty"` // from BLOCKED.md
|
||||
Runs int `json:"runs"` // how many times dispatched/resumed
|
||||
PRURL string `json:"pr_url,omitempty"` // pull request URL (after PR created)
|
||||
Status string `json:"status"` // running, completed, blocked, failed
|
||||
Agent string `json:"agent"` // gemini, claude, codex
|
||||
Repo string `json:"repo"` // target repo
|
||||
Org string `json:"org,omitempty"` // forge org (e.g. "core")
|
||||
Task string `json:"task"` // task description
|
||||
Branch string `json:"branch,omitempty"` // git branch name
|
||||
Issue int `json:"issue,omitempty"` // forge issue number
|
||||
PID int `json:"pid,omitempty"` // process ID (if running)
|
||||
StartedAt time.Time `json:"started_at"` // when dispatch started
|
||||
UpdatedAt time.Time `json:"updated_at"` // last status change
|
||||
Question string `json:"question,omitempty"` // from BLOCKED.md
|
||||
Runs int `json:"runs"` // how many times dispatched/resumed
|
||||
PRURL string `json:"pr_url,omitempty"` // pull request URL (after PR created)
|
||||
}
|
||||
|
||||
func writeStatus(wsDir string, status *WorkspaceStatus) error {
|
||||
|
|
@ -50,7 +57,13 @@ func writeStatus(wsDir string, status *WorkspaceStatus) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return coreio.Local.Write(filepath.Join(wsDir, "status.json"), string(data))
|
||||
return writeAtomic(filepath.Join(wsDir, "status.json"), string(data))
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) saveStatus(wsDir string, status *WorkspaceStatus) {
|
||||
if err := writeStatus(wsDir, status); err != nil {
|
||||
coreerr.Warn("failed to write workspace status", "workspace", filepath.Base(wsDir), "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func readStatus(wsDir string) (*WorkspaceStatus, error) {
|
||||
|
|
@ -67,28 +80,41 @@ func readStatus(wsDir string) (*WorkspaceStatus, error) {
|
|||
|
||||
// --- agentic_status tool ---
|
||||
|
||||
// StatusInput is the input for agentic_status.
|
||||
//
|
||||
// input := StatusInput{Workspace: "go-mcp-1700000000"}
|
||||
type StatusInput struct {
|
||||
Workspace string `json:"workspace,omitempty"` // specific workspace name, or empty for all
|
||||
}
|
||||
|
||||
// StatusOutput is the output for agentic_status.
|
||||
//
|
||||
// // out.Count == 2, len(out.Workspaces) == 2
|
||||
type StatusOutput struct {
|
||||
Workspaces []WorkspaceInfo `json:"workspaces"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// WorkspaceInfo summarizes a tracked workspace.
|
||||
//
|
||||
// // ws.Name == "go-mcp-1700000000", ws.Status == "running"
|
||||
type WorkspaceInfo struct {
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Agent string `json:"agent"`
|
||||
Repo string `json:"repo"`
|
||||
Task string `json:"task"`
|
||||
Age string `json:"age"`
|
||||
Question string `json:"question,omitempty"`
|
||||
Runs int `json:"runs"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Agent string `json:"agent"`
|
||||
Repo string `json:"repo"`
|
||||
Branch string `json:"branch,omitempty"`
|
||||
Issue int `json:"issue,omitempty"`
|
||||
PRURL string `json:"pr_url,omitempty"`
|
||||
Task string `json:"task"`
|
||||
Age string `json:"age"`
|
||||
Question string `json:"question,omitempty"`
|
||||
Runs int `json:"runs"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerStatusTool(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *PrepSubsystem) registerStatusTool(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_status",
|
||||
Description: "List agent workspaces and their status (running, completed, blocked, failed). Shows blocked agents with their questions.",
|
||||
}, s.status)
|
||||
|
|
@ -96,9 +122,6 @@ func (s *PrepSubsystem) registerStatusTool(server *mcp.Server) {
|
|||
|
||||
func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, input StatusInput) (*mcp.CallToolResult, StatusOutput, error) {
|
||||
wsDirs := s.listWorkspaceDirs()
|
||||
if len(wsDirs) == 0 {
|
||||
return nil, StatusOutput{}, coreerr.E("status", "no workspaces found", nil)
|
||||
}
|
||||
|
||||
var workspaces []WorkspaceInfo
|
||||
|
||||
|
|
@ -132,6 +155,9 @@ func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
|||
info.Status = st.Status
|
||||
info.Agent = st.Agent
|
||||
info.Repo = st.Repo
|
||||
info.Branch = st.Branch
|
||||
info.Issue = st.Issue
|
||||
info.PRURL = st.PRURL
|
||||
info.Task = st.Task
|
||||
info.Runs = st.Runs
|
||||
info.Age = time.Since(st.StartedAt).Truncate(time.Minute).String()
|
||||
|
|
@ -140,6 +166,16 @@ func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
|||
if st.Status == "running" && st.PID > 0 {
|
||||
proc, err := os.FindProcess(st.PID)
|
||||
if err != nil || proc.Signal(nil) != nil {
|
||||
prevStatus := st.Status
|
||||
status := "completed"
|
||||
channel := coremcp.ChannelAgentComplete
|
||||
payload := map[string]any{
|
||||
"workspace": name,
|
||||
"agent": st.Agent,
|
||||
"repo": st.Repo,
|
||||
"branch": st.Branch,
|
||||
}
|
||||
|
||||
// Process died — check for BLOCKED.md
|
||||
blockedPath := filepath.Join(wsDir, "src", "BLOCKED.md")
|
||||
if data, err := coreio.Local.Read(blockedPath); err == nil {
|
||||
|
|
@ -147,11 +183,22 @@ func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
|||
info.Question = strings.TrimSpace(data)
|
||||
st.Status = "blocked"
|
||||
st.Question = info.Question
|
||||
status = "blocked"
|
||||
channel = coremcp.ChannelAgentBlocked
|
||||
if st.Question != "" {
|
||||
payload["question"] = st.Question
|
||||
}
|
||||
} else {
|
||||
info.Status = "completed"
|
||||
st.Status = "completed"
|
||||
}
|
||||
writeStatus(wsDir, st)
|
||||
s.saveStatus(wsDir, st)
|
||||
|
||||
if prevStatus != status {
|
||||
payload["status"] = status
|
||||
s.emitChannel(ctx, channel, payload)
|
||||
s.emitChannel(ctx, coremcp.ChannelAgentStatus, payload)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
94
pkg/mcp/agentic/status_test.go
Normal file
94
pkg/mcp/agentic/status_test.go
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestStatus_Good_EmptyWorkspaceSet(t *testing.T) {
|
||||
sub := &PrepSubsystem{codePath: t.TempDir()}
|
||||
|
||||
_, out, err := sub.status(context.Background(), nil, StatusInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("status failed: %v", err)
|
||||
}
|
||||
if out.Count != 0 {
|
||||
t.Fatalf("expected count 0, got %d", out.Count)
|
||||
}
|
||||
if len(out.Workspaces) != 0 {
|
||||
t.Fatalf("expected empty workspace list, got %d entries", len(out.Workspaces))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPlanRead_Good_ReturnsWrittenPlan(t *testing.T) {
|
||||
sub := &PrepSubsystem{codePath: t.TempDir()}
|
||||
|
||||
plan := &Plan{
|
||||
ID: "plan-1",
|
||||
Title: "Read me",
|
||||
Status: "ready",
|
||||
Objective: "Verify plan reads",
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
if _, err := writePlan(sub.plansDir(), plan); err != nil {
|
||||
t.Fatalf("writePlan failed: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := sub.planRead(context.Background(), nil, PlanReadInput{ID: plan.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("planRead failed: %v", err)
|
||||
}
|
||||
if !out.Success {
|
||||
t.Fatal("expected success output")
|
||||
}
|
||||
if out.Plan.ID != plan.ID {
|
||||
t.Fatalf("expected plan %q, got %q", plan.ID, out.Plan.ID)
|
||||
}
|
||||
if out.Plan.Title != plan.Title {
|
||||
t.Fatalf("expected title %q, got %q", plan.Title, out.Plan.Title)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatus_Good_ExposesWorkspaceMetadata(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
sub := &PrepSubsystem{codePath: root}
|
||||
|
||||
wsDir := filepath.Join(root, ".core", "workspace", "repo-123")
|
||||
plan := &WorkspaceStatus{
|
||||
Status: "completed",
|
||||
Agent: "claude",
|
||||
Repo: "go-mcp",
|
||||
Branch: "agent/issue-42-fix-status",
|
||||
Issue: 42,
|
||||
PRURL: "https://forge.example/pr/42",
|
||||
Task: "Fix status output",
|
||||
Runs: 2,
|
||||
}
|
||||
if err := writeStatus(wsDir, plan); err != nil {
|
||||
t.Fatalf("writeStatus failed: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := sub.status(context.Background(), nil, StatusInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("status failed: %v", err)
|
||||
}
|
||||
if out.Count != 1 {
|
||||
t.Fatalf("expected count 1, got %d", out.Count)
|
||||
}
|
||||
|
||||
info := out.Workspaces[0]
|
||||
if info.Branch != plan.Branch {
|
||||
t.Fatalf("expected branch %q, got %q", plan.Branch, info.Branch)
|
||||
}
|
||||
if info.Issue != plan.Issue {
|
||||
t.Fatalf("expected issue %d, got %d", plan.Issue, info.Issue)
|
||||
}
|
||||
if info.PRURL != plan.PRURL {
|
||||
t.Fatalf("expected PR URL %q, got %q", plan.PRURL, info.PRURL)
|
||||
}
|
||||
}
|
||||
167
pkg/mcp/agentic/watch.go
Normal file
167
pkg/mcp/agentic/watch.go
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// WatchInput is the input for agentic_watch.
|
||||
type WatchInput struct {
|
||||
Workspaces []string `json:"workspaces,omitempty"`
|
||||
PollInterval int `json:"poll_interval,omitempty"`
|
||||
Timeout int `json:"timeout,omitempty"`
|
||||
}
|
||||
|
||||
// WatchOutput is the result of watching one or more workspaces.
|
||||
type WatchOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Completed []WatchResult `json:"completed"`
|
||||
Failed []WatchResult `json:"failed,omitempty"`
|
||||
Duration string `json:"duration"`
|
||||
}
|
||||
|
||||
// WatchResult describes one workspace result.
|
||||
type WatchResult struct {
|
||||
Workspace string `json:"workspace"`
|
||||
Agent string `json:"agent"`
|
||||
Repo string `json:"repo"`
|
||||
Status string `json:"status"`
|
||||
Branch string `json:"branch,omitempty"`
|
||||
Issue int `json:"issue,omitempty"`
|
||||
PRURL string `json:"pr_url,omitempty"`
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) registerWatchTool(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
||||
Name: "agentic_watch",
|
||||
Description: "Watch running or queued agent workspaces until they finish and return a completion summary.",
|
||||
}, s.watch)
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) watch(ctx context.Context, req *mcp.CallToolRequest, input WatchInput) (*mcp.CallToolResult, WatchOutput, error) {
|
||||
pollInterval := time.Duration(input.PollInterval) * time.Second
|
||||
if pollInterval <= 0 {
|
||||
pollInterval = 5 * time.Second
|
||||
}
|
||||
|
||||
timeout := time.Duration(input.Timeout) * time.Second
|
||||
if timeout <= 0 {
|
||||
timeout = 10 * time.Minute
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
deadline := start.Add(timeout)
|
||||
|
||||
targets := input.Workspaces
|
||||
if len(targets) == 0 {
|
||||
targets = s.findActiveWorkspaces()
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return nil, WatchOutput{Success: true, Duration: "0s"}, nil
|
||||
}
|
||||
|
||||
remaining := make(map[string]struct{}, len(targets))
|
||||
for _, workspace := range targets {
|
||||
remaining[workspace] = struct{}{}
|
||||
}
|
||||
|
||||
completed := make([]WatchResult, 0, len(targets))
|
||||
failed := make([]WatchResult, 0)
|
||||
|
||||
for len(remaining) > 0 {
|
||||
if time.Now().After(deadline) {
|
||||
for workspace := range remaining {
|
||||
failed = append(failed, WatchResult{
|
||||
Workspace: workspace,
|
||||
Status: "timeout",
|
||||
})
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, WatchOutput{}, coreerr.E("watch", "cancelled", ctx.Err())
|
||||
case <-time.After(pollInterval):
|
||||
}
|
||||
|
||||
_, statusOut, err := s.status(ctx, req, StatusInput{})
|
||||
if err != nil {
|
||||
return nil, WatchOutput{}, coreerr.E("watch", "failed to refresh status", err)
|
||||
}
|
||||
|
||||
for _, info := range statusOut.Workspaces {
|
||||
if _, ok := remaining[info.Name]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
switch info.Status {
|
||||
case "completed", "merged", "ready-for-review":
|
||||
completed = append(completed, WatchResult{
|
||||
Workspace: info.Name,
|
||||
Agent: info.Agent,
|
||||
Repo: info.Repo,
|
||||
Status: info.Status,
|
||||
Branch: info.Branch,
|
||||
Issue: info.Issue,
|
||||
PRURL: info.PRURL,
|
||||
})
|
||||
delete(remaining, info.Name)
|
||||
case "failed", "blocked":
|
||||
failed = append(failed, WatchResult{
|
||||
Workspace: info.Name,
|
||||
Agent: info.Agent,
|
||||
Repo: info.Repo,
|
||||
Status: info.Status,
|
||||
Branch: info.Branch,
|
||||
Issue: info.Issue,
|
||||
PRURL: info.PRURL,
|
||||
})
|
||||
delete(remaining, info.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, WatchOutput{
|
||||
Success: len(failed) == 0,
|
||||
Completed: completed,
|
||||
Failed: failed,
|
||||
Duration: time.Since(start).Round(time.Second).String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) findActiveWorkspaces() []string {
|
||||
wsDirs := s.listWorkspaceDirs()
|
||||
if len(wsDirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
active := make([]string, 0, len(wsDirs))
|
||||
for _, wsDir := range wsDirs {
|
||||
st, err := readStatus(wsDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
switch st.Status {
|
||||
case "running", "queued":
|
||||
active = append(active, filepath.Base(wsDir))
|
||||
}
|
||||
}
|
||||
return active
|
||||
}
|
||||
|
||||
func (s *PrepSubsystem) resolveWorkspaceDir(name string) string {
|
||||
if filepath.IsAbs(name) {
|
||||
return name
|
||||
}
|
||||
return filepath.Join(s.workspaceRoot(), name)
|
||||
}
|
||||
51
pkg/mcp/agentic/write_atomic.go
Normal file
51
pkg/mcp/agentic/write_atomic.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package agentic
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// writeAtomic writes content to path by staging it in a temporary file and
|
||||
// renaming it into place.
|
||||
//
|
||||
// This avoids exposing partially written workspace files to agents that may
|
||||
// read status, prompt, or plan documents while they are being updated.
|
||||
func writeAtomic(path, content string) error {
|
||||
dir := filepath.Dir(path)
|
||||
if err := coreio.Local.EnsureDir(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp, err := os.CreateTemp(dir, "."+filepath.Base(path)+".*.tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpPath := tmp.Name()
|
||||
|
||||
cleanup := func() {
|
||||
_ = tmp.Close()
|
||||
_ = os.Remove(tmpPath)
|
||||
}
|
||||
|
||||
if _, err := tmp.WriteString(content); err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
}
|
||||
if err := tmp.Sync(); err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
}
|
||||
if err := tmp.Close(); err != nil {
|
||||
_ = os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
if err := os.Rename(tmpPath, path); err != nil {
|
||||
_ = os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -7,9 +7,9 @@ package brain
|
|||
import (
|
||||
"context"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// errBridgeNotAvailable is returned when a tool requires the Laravel bridge
|
||||
|
|
@ -20,31 +20,56 @@ var errBridgeNotAvailable = coreerr.E("brain", "bridge not available", nil)
|
|||
// It proxies brain_* tool calls to the Laravel backend via the shared IDE bridge.
|
||||
type Subsystem struct {
|
||||
bridge *ide.Bridge
|
||||
notifier Notifier
|
||||
notifier coremcp.Notifier
|
||||
}
|
||||
|
||||
var (
|
||||
_ coremcp.Subsystem = (*Subsystem)(nil)
|
||||
_ coremcp.SubsystemWithShutdown = (*Subsystem)(nil)
|
||||
_ coremcp.SubsystemWithNotifier = (*Subsystem)(nil)
|
||||
)
|
||||
|
||||
// New creates a brain subsystem that uses the given IDE bridge for Laravel communication.
|
||||
//
|
||||
// brain := New(ideBridge)
|
||||
//
|
||||
// Pass nil if headless (tools will return errBridgeNotAvailable).
|
||||
func New(bridge *ide.Bridge) *Subsystem {
|
||||
return &Subsystem{bridge: bridge}
|
||||
s := &Subsystem{bridge: bridge}
|
||||
if bridge != nil {
|
||||
bridge.AddObserver(func(msg ide.BridgeMessage) {
|
||||
s.handleBridgeMessage(msg)
|
||||
})
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Name implements mcp.Subsystem.
|
||||
func (s *Subsystem) Name() string { return "brain" }
|
||||
|
||||
// Notifier pushes events to MCP sessions (matches pkg/mcp.Notifier).
|
||||
type Notifier interface {
|
||||
ChannelSend(ctx context.Context, channel string, data any)
|
||||
}
|
||||
|
||||
// SetNotifier stores the shared notifier so this subsystem can emit channel events.
|
||||
func (s *Subsystem) SetNotifier(n Notifier) {
|
||||
func (s *Subsystem) SetNotifier(n coremcp.Notifier) {
|
||||
s.notifier = n
|
||||
}
|
||||
|
||||
// RegisterTools implements mcp.Subsystem.
|
||||
func (s *Subsystem) RegisterTools(server *mcp.Server) {
|
||||
s.registerBrainTools(server)
|
||||
func (s *Subsystem) RegisterTools(svc *coremcp.Service) {
|
||||
s.registerBrainTools(svc)
|
||||
}
|
||||
|
||||
func (s *Subsystem) handleBridgeMessage(msg ide.BridgeMessage) {
|
||||
switch msg.Type {
|
||||
case "brain_remember":
|
||||
emitBridgeChannel(context.Background(), s.notifier, coremcp.ChannelBrainRememberDone, bridgePayload(msg.Data, "type", "project"))
|
||||
case "brain_recall":
|
||||
payload := bridgePayload(msg.Data, "query", "project", "type", "agent_id")
|
||||
payload["count"] = bridgeCount(msg.Data)
|
||||
emitBridgeChannel(context.Background(), s.notifier, coremcp.ChannelBrainRecallDone, payload)
|
||||
case "brain_forget":
|
||||
emitBridgeChannel(context.Background(), s.notifier, coremcp.ChannelBrainForgetDone, bridgePayload(msg.Data, "id", "reason"))
|
||||
case "brain_list":
|
||||
emitBridgeChannel(context.Background(), s.notifier, coremcp.ChannelBrainListDone, bridgePayload(msg.Data, "project", "type", "agent_id", "limit"))
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||
|
|
|
|||
|
|
@ -7,8 +7,20 @@ import (
|
|||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||
)
|
||||
|
||||
type recordingNotifier struct {
|
||||
channel string
|
||||
data any
|
||||
}
|
||||
|
||||
func (r *recordingNotifier) ChannelSend(_ context.Context, channel string, data any) {
|
||||
r.channel = channel
|
||||
r.data = data
|
||||
}
|
||||
|
||||
// --- Nil bridge tests (headless mode) ---
|
||||
|
||||
func TestBrainRemember_Bad_NilBridge(t *testing.T) {
|
||||
|
|
@ -68,6 +80,38 @@ func TestSubsystem_Good_ShutdownNoop(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSubsystem_Good_BridgeRecallNotification(t *testing.T) {
|
||||
sub := New(nil)
|
||||
notifier := &recordingNotifier{}
|
||||
sub.notifier = notifier
|
||||
|
||||
sub.handleBridgeMessage(ide.BridgeMessage{
|
||||
Type: "brain_recall",
|
||||
Data: map[string]any{
|
||||
"query": "how does scoring work?",
|
||||
"memories": []any{
|
||||
map[string]any{"id": "m1"},
|
||||
map[string]any{"id": "m2"},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if notifier.channel != "brain.recall.complete" {
|
||||
t.Fatalf("expected brain.recall.complete, got %q", notifier.channel)
|
||||
}
|
||||
|
||||
payload, ok := notifier.data.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected payload map, got %T", notifier.data)
|
||||
}
|
||||
if payload["count"] != 2 {
|
||||
t.Fatalf("expected count 2, got %v", payload["count"])
|
||||
}
|
||||
if payload["query"] != "how does scoring work?" {
|
||||
t.Fatalf("expected query to be forwarded, got %v", payload["query"])
|
||||
}
|
||||
}
|
||||
|
||||
// --- Struct round-trip tests ---
|
||||
|
||||
func TestRememberInput_Good_RoundTrip(t *testing.T) {
|
||||
|
|
|
|||
59
pkg/mcp/brain/bridge_events.go
Normal file
59
pkg/mcp/brain/bridge_events.go
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package brain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
)
|
||||
|
||||
func bridgePayload(data any, keys ...string) map[string]any {
|
||||
payload := make(map[string]any)
|
||||
|
||||
m, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return payload
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
if value, ok := m[key]; ok {
|
||||
payload[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return payload
|
||||
}
|
||||
|
||||
func bridgeCount(data any) int {
|
||||
m, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
if count, ok := m["count"]; ok {
|
||||
switch v := count.(type) {
|
||||
case int:
|
||||
return v
|
||||
case int32:
|
||||
return int(v)
|
||||
case int64:
|
||||
return int(v)
|
||||
case float64:
|
||||
return int(v)
|
||||
}
|
||||
}
|
||||
|
||||
if memories, ok := m["memories"].([]any); ok {
|
||||
return len(memories)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func emitBridgeChannel(ctx context.Context, notifier coremcp.Notifier, channel string, data any) {
|
||||
if notifier == nil {
|
||||
return
|
||||
}
|
||||
notifier.ChannelSend(ctx, channel, data)
|
||||
}
|
||||
|
|
@ -9,16 +9,20 @@ import (
|
|||
"fmt"
|
||||
goio "io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// channelSender is the callback for pushing channel events.
|
||||
//
|
||||
// fn := func(ctx context.Context, channel string, data any) { ... }
|
||||
type channelSender func(ctx context.Context, channel string, data any)
|
||||
|
||||
// DirectSubsystem implements mcp.Subsystem for OpenBrain via direct HTTP calls.
|
||||
|
|
@ -31,6 +35,12 @@ type DirectSubsystem struct {
|
|||
onChannel channelSender
|
||||
}
|
||||
|
||||
var (
|
||||
_ coremcp.Subsystem = (*DirectSubsystem)(nil)
|
||||
_ coremcp.SubsystemWithShutdown = (*DirectSubsystem)(nil)
|
||||
_ coremcp.SubsystemWithChannelCallback = (*DirectSubsystem)(nil)
|
||||
)
|
||||
|
||||
// OnChannel sets a callback for channel event broadcasting.
|
||||
// Called by the MCP service after creation to wire up notifications.
|
||||
//
|
||||
|
|
@ -42,6 +52,9 @@ func (s *DirectSubsystem) OnChannel(fn func(ctx context.Context, channel string,
|
|||
}
|
||||
|
||||
// NewDirect creates a brain subsystem that calls the OpenBrain API directly.
|
||||
//
|
||||
// brain := NewDirect()
|
||||
//
|
||||
// Reads CORE_BRAIN_URL and CORE_BRAIN_KEY from environment, or falls back
|
||||
// to ~/.claude/brain.key for the API key.
|
||||
func NewDirect() *DirectSubsystem {
|
||||
|
|
@ -68,21 +81,27 @@ func NewDirect() *DirectSubsystem {
|
|||
func (s *DirectSubsystem) Name() string { return "brain" }
|
||||
|
||||
// RegisterTools implements mcp.Subsystem.
|
||||
func (s *DirectSubsystem) RegisterTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *DirectSubsystem) RegisterTools(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
||||
Name: "brain_remember",
|
||||
Description: "Store a memory in OpenBrain. Types: fact, decision, observation, plan, convention, architecture, research, documentation, service, bug, pattern, context, procedure.",
|
||||
}, s.remember)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
||||
Name: "brain_recall",
|
||||
Description: "Semantic search across OpenBrain memories. Returns memories ranked by similarity. Use agent_id 'cladius' for Cladius's memories.",
|
||||
}, s.recall)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
||||
Name: "brain_forget",
|
||||
Description: "Remove a memory from OpenBrain by ID.",
|
||||
}, s.forget)
|
||||
|
||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
||||
Name: "brain_list",
|
||||
Description: "List memories in OpenBrain with optional filtering by project, type, and agent.",
|
||||
}, s.list)
|
||||
}
|
||||
|
||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||
|
|
@ -147,7 +166,7 @@ func (s *DirectSubsystem) remember(ctx context.Context, _ *mcp.CallToolRequest,
|
|||
|
||||
id, _ := result["id"].(string)
|
||||
if s.onChannel != nil {
|
||||
s.onChannel(ctx, "brain.remember.complete", map[string]any{
|
||||
s.onChannel(ctx, coremcp.ChannelBrainRememberDone, map[string]any{
|
||||
"id": id,
|
||||
"type": input.Type,
|
||||
"project": input.Project,
|
||||
|
|
@ -207,7 +226,7 @@ func (s *DirectSubsystem) recall(ctx context.Context, _ *mcp.CallToolRequest, in
|
|||
}
|
||||
|
||||
if s.onChannel != nil {
|
||||
s.onChannel(ctx, "brain.recall.complete", map[string]any{
|
||||
s.onChannel(ctx, coremcp.ChannelBrainRecallDone, map[string]any{
|
||||
"query": input.Query,
|
||||
"count": len(memories),
|
||||
})
|
||||
|
|
@ -225,9 +244,80 @@ func (s *DirectSubsystem) forget(ctx context.Context, _ *mcp.CallToolRequest, in
|
|||
return nil, ForgetOutput{}, err
|
||||
}
|
||||
|
||||
if s.onChannel != nil {
|
||||
s.onChannel(ctx, coremcp.ChannelBrainForgetDone, map[string]any{
|
||||
"id": input.ID,
|
||||
"reason": input.Reason,
|
||||
})
|
||||
}
|
||||
|
||||
return nil, ForgetOutput{
|
||||
Success: true,
|
||||
Forgotten: input.ID,
|
||||
Timestamp: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *DirectSubsystem) list(ctx context.Context, _ *mcp.CallToolRequest, input ListInput) (*mcp.CallToolResult, ListOutput, error) {
|
||||
limit := input.Limit
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
|
||||
values := url.Values{}
|
||||
if input.Project != "" {
|
||||
values.Set("project", input.Project)
|
||||
}
|
||||
if input.Type != "" {
|
||||
values.Set("type", input.Type)
|
||||
}
|
||||
if input.AgentID != "" {
|
||||
values.Set("agent_id", input.AgentID)
|
||||
}
|
||||
values.Set("limit", fmt.Sprintf("%d", limit))
|
||||
|
||||
result, err := s.apiCall(ctx, http.MethodGet, "/v1/brain/list?"+values.Encode(), nil)
|
||||
if err != nil {
|
||||
return nil, ListOutput{}, err
|
||||
}
|
||||
|
||||
var memories []Memory
|
||||
if mems, ok := result["memories"].([]any); ok {
|
||||
for _, m := range mems {
|
||||
if mm, ok := m.(map[string]any); ok {
|
||||
mem := Memory{
|
||||
Content: fmt.Sprintf("%v", mm["content"]),
|
||||
Type: fmt.Sprintf("%v", mm["type"]),
|
||||
Project: fmt.Sprintf("%v", mm["project"]),
|
||||
AgentID: fmt.Sprintf("%v", mm["agent_id"]),
|
||||
CreatedAt: fmt.Sprintf("%v", mm["created_at"]),
|
||||
}
|
||||
if id, ok := mm["id"].(string); ok {
|
||||
mem.ID = id
|
||||
}
|
||||
if score, ok := mm["score"].(float64); ok {
|
||||
mem.Confidence = score
|
||||
}
|
||||
if source, ok := mm["source"].(string); ok {
|
||||
mem.Tags = append(mem.Tags, "source:"+source)
|
||||
}
|
||||
memories = append(memories, mem)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.onChannel != nil {
|
||||
s.onChannel(ctx, coremcp.ChannelBrainListDone, map[string]any{
|
||||
"project": input.Project,
|
||||
"type": input.Type,
|
||||
"agent_id": input.AgentID,
|
||||
"limit": limit,
|
||||
})
|
||||
}
|
||||
|
||||
return nil, ListOutput{
|
||||
Success: true,
|
||||
Count: len(memories),
|
||||
Memories: memories,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -207,8 +207,8 @@ func TestDirectRecall_Good(t *testing.T) {
|
|||
|
||||
s := newTestDirect(srv.URL)
|
||||
_, out, err := s.recall(context.Background(), nil, RecallInput{
|
||||
Query: "scoring algorithm",
|
||||
TopK: 5,
|
||||
Query: "scoring algorithm",
|
||||
TopK: 5,
|
||||
Filter: RecallFilter{Project: "eaas"},
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -290,6 +290,48 @@ func TestDirectForget_Good(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDirectForget_Good_EmitsChannel(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
json.NewEncoder(w).Encode(map[string]any{"success": true})
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
var gotChannel string
|
||||
var gotPayload map[string]any
|
||||
|
||||
s := newTestDirect(srv.URL)
|
||||
s.onChannel = func(_ context.Context, channel string, data any) {
|
||||
gotChannel = channel
|
||||
if payload, ok := data.(map[string]any); ok {
|
||||
gotPayload = payload
|
||||
}
|
||||
}
|
||||
|
||||
_, out, err := s.forget(context.Background(), nil, ForgetInput{
|
||||
ID: "mem-789",
|
||||
Reason: "outdated",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("forget failed: %v", err)
|
||||
}
|
||||
if !out.Success {
|
||||
t.Fatal("expected success=true")
|
||||
}
|
||||
if gotChannel != "brain.forget.complete" {
|
||||
t.Fatalf("expected brain.forget.complete, got %q", gotChannel)
|
||||
}
|
||||
if gotPayload == nil {
|
||||
t.Fatal("expected channel payload")
|
||||
}
|
||||
if gotPayload["id"] != "mem-789" {
|
||||
t.Fatalf("expected id=mem-789, got %v", gotPayload["id"])
|
||||
}
|
||||
if gotPayload["reason"] != "outdated" {
|
||||
t.Fatalf("expected reason=outdated, got %v", gotPayload["reason"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirectForget_Bad_ApiError(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(404)
|
||||
|
|
@ -303,3 +345,124 @@ func TestDirectForget_Bad_ApiError(t *testing.T) {
|
|||
t.Error("expected error on 404")
|
||||
}
|
||||
}
|
||||
|
||||
// --- list tool tests ---
|
||||
|
||||
func TestDirectList_Good(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
t.Errorf("expected GET, got %s", r.Method)
|
||||
}
|
||||
if got := r.URL.Query().Get("project"); got != "eaas" {
|
||||
t.Errorf("expected project=eaas, got %q", got)
|
||||
}
|
||||
if got := r.URL.Query().Get("type"); got != "decision" {
|
||||
t.Errorf("expected type=decision, got %q", got)
|
||||
}
|
||||
if got := r.URL.Query().Get("agent_id"); got != "virgil" {
|
||||
t.Errorf("expected agent_id=virgil, got %q", got)
|
||||
}
|
||||
if got := r.URL.Query().Get("limit"); got != "20" {
|
||||
t.Errorf("expected limit=20, got %q", got)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(map[string]any{
|
||||
"memories": []any{
|
||||
map[string]any{
|
||||
"id": "mem-1",
|
||||
"content": "use qdrant",
|
||||
"type": "decision",
|
||||
"project": "eaas",
|
||||
"agent_id": "virgil",
|
||||
"score": 0.88,
|
||||
"created_at": "2026-03-01T00:00:00Z",
|
||||
},
|
||||
},
|
||||
})
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
s := newTestDirect(srv.URL)
|
||||
_, out, err := s.list(context.Background(), nil, ListInput{
|
||||
Project: "eaas",
|
||||
Type: "decision",
|
||||
AgentID: "virgil",
|
||||
Limit: 20,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("list failed: %v", err)
|
||||
}
|
||||
if !out.Success || out.Count != 1 {
|
||||
t.Fatalf("expected 1 memory, got %+v", out)
|
||||
}
|
||||
if out.Memories[0].ID != "mem-1" {
|
||||
t.Errorf("expected id=mem-1, got %q", out.Memories[0].ID)
|
||||
}
|
||||
if out.Memories[0].Confidence != 0.88 {
|
||||
t.Errorf("expected score=0.88, got %f", out.Memories[0].Confidence)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirectList_Good_EmitsAgentIDChannelPayload(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(map[string]any{"memories": []any{}})
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
var gotChannel string
|
||||
var gotPayload map[string]any
|
||||
|
||||
s := newTestDirect(srv.URL)
|
||||
s.onChannel = func(_ context.Context, channel string, data any) {
|
||||
gotChannel = channel
|
||||
if payload, ok := data.(map[string]any); ok {
|
||||
gotPayload = payload
|
||||
}
|
||||
}
|
||||
|
||||
_, out, err := s.list(context.Background(), nil, ListInput{
|
||||
Project: "eaas",
|
||||
Type: "decision",
|
||||
AgentID: "virgil",
|
||||
Limit: 20,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("list failed: %v", err)
|
||||
}
|
||||
if !out.Success {
|
||||
t.Fatal("expected list success")
|
||||
}
|
||||
if gotChannel != "brain.list.complete" {
|
||||
t.Fatalf("expected brain.list.complete, got %q", gotChannel)
|
||||
}
|
||||
if gotPayload == nil {
|
||||
t.Fatal("expected channel payload")
|
||||
}
|
||||
if gotPayload["agent_id"] != "virgil" {
|
||||
t.Fatalf("expected agent_id=virgil, got %v", gotPayload["agent_id"])
|
||||
}
|
||||
if gotPayload["project"] != "eaas" {
|
||||
t.Fatalf("expected project=eaas, got %v", gotPayload["project"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirectList_Good_DefaultLimit(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if got := r.URL.Query().Get("limit"); got != "50" {
|
||||
t.Errorf("expected limit=50, got %q", got)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(map[string]any{"memories": []any{}})
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
s := newTestDirect(srv.URL)
|
||||
_, out, err := s.list(context.Background(), nil, ListInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("list failed: %v", err)
|
||||
}
|
||||
if !out.Success || out.Count != 0 {
|
||||
t.Fatalf("expected empty list, got %+v", out)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,10 +5,11 @@ package brain
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||
"forge.lthn.ai/core/api"
|
||||
"forge.lthn.ai/core/api/pkg/provider"
|
||||
"forge.lthn.ai/core/go-ws"
|
||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
|
|
@ -30,10 +31,16 @@ var (
|
|||
// NewProvider creates a brain provider that proxies to Laravel via the IDE bridge.
|
||||
// The WS hub is used to emit brain events. Pass nil for hub if not needed.
|
||||
func NewProvider(bridge *ide.Bridge, hub *ws.Hub) *BrainProvider {
|
||||
return &BrainProvider{
|
||||
p := &BrainProvider{
|
||||
bridge: bridge,
|
||||
hub: hub,
|
||||
}
|
||||
if bridge != nil {
|
||||
bridge.AddObserver(func(msg ide.BridgeMessage) {
|
||||
p.handleBridgeMessage(msg)
|
||||
})
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Name implements api.RouteGroup.
|
||||
|
|
@ -45,10 +52,10 @@ func (p *BrainProvider) BasePath() string { return "/api/brain" }
|
|||
// Channels implements provider.Streamable.
|
||||
func (p *BrainProvider) Channels() []string {
|
||||
return []string{
|
||||
"brain.remember.complete",
|
||||
"brain.recall.complete",
|
||||
"brain.forget.complete",
|
||||
"brain.list.complete",
|
||||
coremcp.ChannelBrainRememberDone,
|
||||
coremcp.ChannelBrainRecallDone,
|
||||
coremcp.ChannelBrainForgetDone,
|
||||
coremcp.ChannelBrainListDone,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -212,7 +219,7 @@ func (p *BrainProvider) remember(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
p.emitEvent("brain.remember.complete", map[string]any{
|
||||
p.emitEvent(coremcp.ChannelBrainRememberDone, map[string]any{
|
||||
"type": input.Type,
|
||||
"project": input.Project,
|
||||
})
|
||||
|
|
@ -245,10 +252,6 @@ func (p *BrainProvider) recall(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
p.emitEvent("brain.recall.complete", map[string]any{
|
||||
"query": input.Query,
|
||||
})
|
||||
|
||||
c.JSON(http.StatusOK, api.OK(RecallOutput{
|
||||
Success: true,
|
||||
Memories: []Memory{},
|
||||
|
|
@ -279,7 +282,7 @@ func (p *BrainProvider) forget(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
p.emitEvent("brain.forget.complete", map[string]any{
|
||||
p.emitEvent(coremcp.ChannelBrainForgetDone, map[string]any{
|
||||
"id": input.ID,
|
||||
})
|
||||
|
||||
|
|
@ -314,11 +317,11 @@ func (p *BrainProvider) list(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
p.emitEvent("brain.list.complete", map[string]any{
|
||||
"project": project,
|
||||
"type": typ,
|
||||
"agent": agentID,
|
||||
"limit": limit,
|
||||
p.emitEvent(coremcp.ChannelBrainListDone, map[string]any{
|
||||
"project": project,
|
||||
"type": typ,
|
||||
"agent_id": agentID,
|
||||
"limit": limit,
|
||||
})
|
||||
|
||||
c.JSON(http.StatusOK, api.OK(ListOutput{
|
||||
|
|
@ -347,3 +350,18 @@ func (p *BrainProvider) emitEvent(channel string, data any) {
|
|||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
func (p *BrainProvider) handleBridgeMessage(msg ide.BridgeMessage) {
|
||||
switch msg.Type {
|
||||
case "brain_remember":
|
||||
p.emitEvent(coremcp.ChannelBrainRememberDone, bridgePayload(msg.Data, "type", "project"))
|
||||
case "brain_recall":
|
||||
payload := bridgePayload(msg.Data, "query", "project", "type", "agent_id")
|
||||
payload["count"] = bridgeCount(msg.Data)
|
||||
p.emitEvent(coremcp.ChannelBrainRecallDone, payload)
|
||||
case "brain_forget":
|
||||
p.emitEvent(coremcp.ChannelBrainForgetDone, bridgePayload(msg.Data, "id", "reason"))
|
||||
case "brain_list":
|
||||
p.emitEvent(coremcp.ChannelBrainListDone, bridgePayload(msg.Data, "project", "type", "agent_id", "limit"))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,11 @@
|
|||
|
||||
package brain
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||
)
|
||||
|
||||
func TestBrainProviderChannels_Good_IncludesListComplete(t *testing.T) {
|
||||
p := NewProvider(nil, nil)
|
||||
|
|
@ -20,3 +24,15 @@ func TestBrainProviderChannels_Good_IncludesListComplete(t *testing.T) {
|
|||
t.Fatalf("expected brain.list.complete in provider channels: %#v", channels)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrainProviderHandleBridgeMessage_Good_SupportsBrainEvents(t *testing.T) {
|
||||
p := NewProvider(nil, nil)
|
||||
for _, msg := range []ide.BridgeMessage{
|
||||
{Type: "brain_remember", Data: map[string]any{"type": "bug", "project": "core/mcp"}},
|
||||
{Type: "brain_recall", Data: map[string]any{"query": "test", "memories": []any{map[string]any{"id": "m1"}}}},
|
||||
{Type: "brain_forget", Data: map[string]any{"id": "mem-123", "reason": "outdated"}},
|
||||
{Type: "brain_list", Data: map[string]any{"project": "core/mcp", "limit": 10}},
|
||||
} {
|
||||
p.handleBridgeMessage(msg)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
|
|
@ -21,6 +22,8 @@ func (s *Subsystem) emitChannel(ctx context.Context, channel string, data any) {
|
|||
// -- Input/Output types -------------------------------------------------------
|
||||
|
||||
// RememberInput is the input for brain_remember.
|
||||
//
|
||||
// input := RememberInput{Content: "Use Qdrant for vector search", Type: "decision"}
|
||||
type RememberInput struct {
|
||||
Content string `json:"content"`
|
||||
Type string `json:"type"`
|
||||
|
|
@ -32,6 +35,8 @@ type RememberInput struct {
|
|||
}
|
||||
|
||||
// RememberOutput is the output for brain_remember.
|
||||
//
|
||||
// // out.Success == true
|
||||
type RememberOutput struct {
|
||||
Success bool `json:"success"`
|
||||
MemoryID string `json:"memoryId,omitempty"`
|
||||
|
|
@ -39,6 +44,8 @@ type RememberOutput struct {
|
|||
}
|
||||
|
||||
// RecallInput is the input for brain_recall.
|
||||
//
|
||||
// input := RecallInput{Query: "vector search", TopK: 5}
|
||||
type RecallInput struct {
|
||||
Query string `json:"query"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
|
|
@ -46,6 +53,8 @@ type RecallInput struct {
|
|||
}
|
||||
|
||||
// RecallFilter holds optional filter criteria for brain_recall.
|
||||
//
|
||||
// filter := RecallFilter{Project: "core/mcp", MinConfidence: 0.5}
|
||||
type RecallFilter struct {
|
||||
Project string `json:"project,omitempty"`
|
||||
Type any `json:"type,omitempty"`
|
||||
|
|
@ -54,6 +63,8 @@ type RecallFilter struct {
|
|||
}
|
||||
|
||||
// RecallOutput is the output for brain_recall.
|
||||
//
|
||||
// // out.Memories contains ranked matches
|
||||
type RecallOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Count int `json:"count"`
|
||||
|
|
@ -61,6 +72,8 @@ type RecallOutput struct {
|
|||
}
|
||||
|
||||
// Memory is a single memory entry returned by recall or list.
|
||||
//
|
||||
// mem := Memory{ID: "m1", Type: "bug", Content: "Fix timeout handling"}
|
||||
type Memory struct {
|
||||
ID string `json:"id"`
|
||||
AgentID string `json:"agent_id"`
|
||||
|
|
@ -76,12 +89,16 @@ type Memory struct {
|
|||
}
|
||||
|
||||
// ForgetInput is the input for brain_forget.
|
||||
//
|
||||
// input := ForgetInput{ID: "m1"}
|
||||
type ForgetInput struct {
|
||||
ID string `json:"id"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
}
|
||||
|
||||
// ForgetOutput is the output for brain_forget.
|
||||
//
|
||||
// // out.Forgotten contains the deleted memory ID
|
||||
type ForgetOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Forgotten string `json:"forgotten"`
|
||||
|
|
@ -89,6 +106,8 @@ type ForgetOutput struct {
|
|||
}
|
||||
|
||||
// ListInput is the input for brain_list.
|
||||
//
|
||||
// input := ListInput{Project: "core/mcp", Limit: 50}
|
||||
type ListInput struct {
|
||||
Project string `json:"project,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
|
|
@ -97,6 +116,8 @@ type ListInput struct {
|
|||
}
|
||||
|
||||
// ListOutput is the output for brain_list.
|
||||
//
|
||||
// // out.Count reports how many memories were returned
|
||||
type ListOutput struct {
|
||||
Success bool `json:"success"`
|
||||
Count int `json:"count"`
|
||||
|
|
@ -105,23 +126,24 @@ type ListOutput struct {
|
|||
|
||||
// -- Tool registration --------------------------------------------------------
|
||||
|
||||
func (s *Subsystem) registerBrainTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *Subsystem) registerBrainTools(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
||||
Name: "brain_remember",
|
||||
Description: "Store a memory in the shared OpenBrain knowledge store. Persists decisions, observations, conventions, research, plans, bugs, or architecture knowledge for other agents.",
|
||||
}, s.brainRemember)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
||||
Name: "brain_recall",
|
||||
Description: "Semantic search across the shared OpenBrain knowledge store. Returns memories ranked by similarity to your query, with optional filtering.",
|
||||
}, s.brainRecall)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
||||
Name: "brain_forget",
|
||||
Description: "Remove a memory from the shared OpenBrain knowledge store. Permanently deletes from both database and vector index.",
|
||||
}, s.brainForget)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
||||
Name: "brain_list",
|
||||
Description: "List memories in the shared OpenBrain knowledge store. Supports filtering by project, type, and agent. No vector search -- use brain_recall for semantic queries.",
|
||||
}, s.brainList)
|
||||
|
|
@ -150,7 +172,7 @@ func (s *Subsystem) brainRemember(ctx context.Context, _ *mcp.CallToolRequest, i
|
|||
return nil, RememberOutput{}, coreerr.E("brain.remember", "failed to send brain_remember", err)
|
||||
}
|
||||
|
||||
s.emitChannel(ctx, "brain.remember.complete", map[string]any{
|
||||
s.emitChannel(ctx, coremcp.ChannelBrainRememberDone, map[string]any{
|
||||
"type": input.Type,
|
||||
"project": input.Project,
|
||||
})
|
||||
|
|
@ -178,11 +200,6 @@ func (s *Subsystem) brainRecall(ctx context.Context, _ *mcp.CallToolRequest, inp
|
|||
return nil, RecallOutput{}, coreerr.E("brain.recall", "failed to send brain_recall", err)
|
||||
}
|
||||
|
||||
s.emitChannel(ctx, "brain.recall.complete", map[string]any{
|
||||
"query": input.Query,
|
||||
"count": 0,
|
||||
})
|
||||
|
||||
return nil, RecallOutput{
|
||||
Success: true,
|
||||
Memories: []Memory{},
|
||||
|
|
@ -205,7 +222,7 @@ func (s *Subsystem) brainForget(ctx context.Context, _ *mcp.CallToolRequest, inp
|
|||
return nil, ForgetOutput{}, coreerr.E("brain.forget", "failed to send brain_forget", err)
|
||||
}
|
||||
|
||||
s.emitChannel(ctx, "brain.forget.complete", map[string]any{
|
||||
s.emitChannel(ctx, coremcp.ChannelBrainForgetDone, map[string]any{
|
||||
"id": input.ID,
|
||||
})
|
||||
|
||||
|
|
@ -238,11 +255,11 @@ func (s *Subsystem) brainList(ctx context.Context, _ *mcp.CallToolRequest, input
|
|||
return nil, ListOutput{}, coreerr.E("brain.list", "failed to send brain_list", err)
|
||||
}
|
||||
|
||||
s.emitChannel(ctx, "brain.list.complete", map[string]any{
|
||||
"project": input.Project,
|
||||
"type": input.Type,
|
||||
"agent": input.AgentID,
|
||||
"limit": limit,
|
||||
s.emitChannel(ctx, coremcp.ChannelBrainListDone, map[string]any{
|
||||
"project": input.Project,
|
||||
"type": input.Type,
|
||||
"agent_id": input.AgentID,
|
||||
"limit": limit,
|
||||
})
|
||||
|
||||
return nil, ListOutput{
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
package mcp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
|
|
@ -23,6 +24,10 @@ const maxBodySize = 10 << 20 // 10 MB
|
|||
// mcp.BridgeToAPI(svc, bridge)
|
||||
// bridge.Mount(router, "/v1/tools")
|
||||
func BridgeToAPI(svc *Service, bridge *api.ToolBridge) {
|
||||
if svc == nil || bridge == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for rec := range svc.ToolsSeq() {
|
||||
desc := api.ToolDescriptor{
|
||||
Name: rec.Name,
|
||||
|
|
@ -38,8 +43,16 @@ func BridgeToAPI(svc *Service, bridge *api.ToolBridge) {
|
|||
bridge.Add(desc, func(c *gin.Context) {
|
||||
var body []byte
|
||||
if c.Request.Body != nil {
|
||||
c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, maxBodySize)
|
||||
r := core.ReadAll(c.Request.Body)
|
||||
if !r.OK {
|
||||
if err, ok := r.Value.(error); ok {
|
||||
var maxBytesErr *http.MaxBytesError
|
||||
if errors.As(err, &maxBytesErr) || core.Contains(err.Error(), "request body too large") {
|
||||
c.JSON(http.StatusRequestEntityTooLarge, api.Fail("request_too_large", "Request body exceeds 10 MB limit"))
|
||||
return
|
||||
}
|
||||
}
|
||||
c.JSON(http.StatusBadRequest, api.Fail("invalid_request", "Failed to read request body"))
|
||||
return
|
||||
}
|
||||
|
|
@ -50,7 +63,7 @@ func BridgeToAPI(svc *Service, bridge *api.ToolBridge) {
|
|||
if err != nil {
|
||||
// Body present + error = likely bad input (malformed JSON).
|
||||
// No body + error = tool execution failure.
|
||||
if len(body) > 0 && core.Contains(err.Error(), "unmarshal") {
|
||||
if errors.Is(err, errInvalidRESTInput) {
|
||||
c.JSON(http.StatusBadRequest, api.Fail("invalid_input", "Malformed JSON in request body"))
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
package mcp_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
|
@ -13,6 +13,10 @@ import (
|
|||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
mcp "dappco.re/go/mcp/pkg/mcp"
|
||||
"dappco.re/go/mcp/pkg/mcp/agentic"
|
||||
"dappco.re/go/mcp/pkg/mcp/brain"
|
||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||
api "forge.lthn.ai/core/api"
|
||||
)
|
||||
|
||||
|
|
@ -21,13 +25,20 @@ func init() {
|
|||
}
|
||||
|
||||
func TestBridgeToAPI_Good_AllTools(t *testing.T) {
|
||||
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
||||
svc, err := mcp.New(mcp.Options{
|
||||
WorkspaceRoot: t.TempDir(),
|
||||
Subsystems: []mcp.Subsystem{
|
||||
brain.New(nil),
|
||||
agentic.NewPrep(),
|
||||
ide.New(nil, ide.Config{}),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bridge := api.NewToolBridge("/tools")
|
||||
BridgeToAPI(svc, bridge)
|
||||
mcp.BridgeToAPI(svc, bridge)
|
||||
|
||||
svcCount := len(svc.Tools())
|
||||
bridgeCount := len(bridge.Tools())
|
||||
|
|
@ -49,16 +60,22 @@ func TestBridgeToAPI_Good_AllTools(t *testing.T) {
|
|||
t.Errorf("bridge has tool %q not found in service", td.Name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, want := range []string{"brain_list", "agentic_plan_create", "ide_dashboard_overview"} {
|
||||
if !svcNames[want] {
|
||||
t.Fatalf("expected recorded tool %q to be present", want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBridgeToAPI_Good_DescribableGroup(t *testing.T) {
|
||||
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: t.TempDir()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bridge := api.NewToolBridge("/tools")
|
||||
BridgeToAPI(svc, bridge)
|
||||
mcp.BridgeToAPI(svc, bridge)
|
||||
|
||||
// ToolBridge implements DescribableGroup.
|
||||
var dg api.DescribableGroup = bridge
|
||||
|
|
@ -90,13 +107,13 @@ func TestBridgeToAPI_Good_FileRead(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
svc, err := New(Options{WorkspaceRoot: tmpDir})
|
||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: tmpDir})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bridge := api.NewToolBridge("/tools")
|
||||
BridgeToAPI(svc, bridge)
|
||||
mcp.BridgeToAPI(svc, bridge)
|
||||
|
||||
// Register with a Gin engine and make a request.
|
||||
engine := gin.New()
|
||||
|
|
@ -114,7 +131,7 @@ func TestBridgeToAPI_Good_FileRead(t *testing.T) {
|
|||
}
|
||||
|
||||
// Parse the response envelope.
|
||||
var resp api.Response[ReadFileOutput]
|
||||
var resp api.Response[mcp.ReadFileOutput]
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("unmarshal error: %v", err)
|
||||
}
|
||||
|
|
@ -130,13 +147,13 @@ func TestBridgeToAPI_Good_FileRead(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBridgeToAPI_Bad_InvalidJSON(t *testing.T) {
|
||||
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: t.TempDir()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bridge := api.NewToolBridge("/tools")
|
||||
BridgeToAPI(svc, bridge)
|
||||
mcp.BridgeToAPI(svc, bridge)
|
||||
|
||||
engine := gin.New()
|
||||
rg := engine.Group(bridge.BasePath())
|
||||
|
|
@ -148,13 +165,8 @@ func TestBridgeToAPI_Bad_InvalidJSON(t *testing.T) {
|
|||
req.Header.Set("Content-Type", "application/json")
|
||||
engine.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusInternalServerError {
|
||||
// The handler unmarshals via RESTHandler which returns an error,
|
||||
// but since it's a JSON parse error it ends up as tool_error.
|
||||
// Check we get a non-200 with an error envelope.
|
||||
if w.Code == http.StatusOK {
|
||||
t.Fatalf("expected non-200 for invalid JSON, got 200")
|
||||
}
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Fatalf("expected 400 for invalid JSON, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp api.Response[any]
|
||||
|
|
@ -169,14 +181,49 @@ func TestBridgeToAPI_Bad_InvalidJSON(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBridgeToAPI_Good_EndToEnd(t *testing.T) {
|
||||
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
||||
func TestBridgeToAPI_Bad_OversizedBody(t *testing.T) {
|
||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: t.TempDir()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bridge := api.NewToolBridge("/tools")
|
||||
BridgeToAPI(svc, bridge)
|
||||
mcp.BridgeToAPI(svc, bridge)
|
||||
|
||||
engine := gin.New()
|
||||
rg := engine.Group(bridge.BasePath())
|
||||
bridge.RegisterRoutes(rg)
|
||||
|
||||
body := strings.Repeat("a", 10<<20+1)
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodPost, "/tools/file_read", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
engine.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusRequestEntityTooLarge {
|
||||
t.Fatalf("expected 413 for oversized body, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp api.Response[any]
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("unmarshal error: %v", err)
|
||||
}
|
||||
if resp.Success {
|
||||
t.Fatal("expected Success=false for oversized body")
|
||||
}
|
||||
if resp.Error == nil {
|
||||
t.Fatal("expected error in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBridgeToAPI_Good_EndToEnd(t *testing.T) {
|
||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: t.TempDir()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bridge := api.NewToolBridge("/tools")
|
||||
mcp.BridgeToAPI(svc, bridge)
|
||||
|
||||
// Create an api.Engine with the bridge registered and Swagger enabled.
|
||||
e, err := api.New(
|
||||
|
|
@ -212,7 +259,7 @@ func TestBridgeToAPI_Good_EndToEnd(t *testing.T) {
|
|||
t.Fatalf("expected 200 for /tools/lang_list, got %d", resp2.StatusCode)
|
||||
}
|
||||
|
||||
var langResp api.Response[GetSupportedLanguagesOutput]
|
||||
var langResp api.Response[mcp.GetSupportedLanguagesOutput]
|
||||
if err := json.NewDecoder(resp2.Body).Decode(&langResp); err != nil {
|
||||
t.Fatalf("unmarshal error: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package ide
|
||||
|
||||
import (
|
||||
|
|
@ -12,7 +14,13 @@ import (
|
|||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
// BridgeMessage is the wire format between the IDE and Laravel.
|
||||
// BridgeMessage is the wire format between the IDE bridge and Laravel.
|
||||
//
|
||||
// msg := BridgeMessage{
|
||||
// Type: "chat_send",
|
||||
// SessionID: "sess-42",
|
||||
// Data: "hello",
|
||||
// }
|
||||
type BridgeMessage struct {
|
||||
Type string `json:"type"`
|
||||
Channel string `json:"channel,omitempty"`
|
||||
|
|
@ -23,6 +31,8 @@ type BridgeMessage struct {
|
|||
|
||||
// Bridge maintains a WebSocket connection to the Laravel core-agentic
|
||||
// backend and forwards responses to a local ws.Hub.
|
||||
//
|
||||
// bridge := NewBridge(hub, cfg)
|
||||
type Bridge struct {
|
||||
cfg Config
|
||||
hub *ws.Hub
|
||||
|
|
@ -31,22 +41,57 @@ type Bridge struct {
|
|||
mu sync.Mutex
|
||||
connected bool
|
||||
cancel context.CancelFunc
|
||||
observers []func(BridgeMessage)
|
||||
}
|
||||
|
||||
// NewBridge creates a bridge that will connect to the Laravel backend and
|
||||
// forward incoming messages to the provided ws.Hub channels.
|
||||
//
|
||||
// bridge := NewBridge(hub, cfg)
|
||||
func NewBridge(hub *ws.Hub, cfg Config) *Bridge {
|
||||
return &Bridge{cfg: cfg, hub: hub}
|
||||
}
|
||||
|
||||
// SetObserver registers a callback for inbound bridge messages.
|
||||
//
|
||||
// bridge.SetObserver(func(msg BridgeMessage) {
|
||||
// fmt.Println(msg.Type)
|
||||
// })
|
||||
func (b *Bridge) SetObserver(fn func(BridgeMessage)) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if fn == nil {
|
||||
b.observers = nil
|
||||
return
|
||||
}
|
||||
b.observers = []func(BridgeMessage){fn}
|
||||
}
|
||||
|
||||
// AddObserver registers an additional bridge observer.
|
||||
// Observers are invoked in registration order after each inbound message.
|
||||
//
|
||||
// bridge.AddObserver(func(msg BridgeMessage) { log.Println(msg.Type) })
|
||||
func (b *Bridge) AddObserver(fn func(BridgeMessage)) {
|
||||
if fn == nil {
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.observers = append(b.observers, fn)
|
||||
}
|
||||
|
||||
// Start begins the connection loop in a background goroutine.
|
||||
// Call Shutdown to stop it.
|
||||
//
|
||||
// bridge.Start(ctx)
|
||||
func (b *Bridge) Start(ctx context.Context) {
|
||||
ctx, b.cancel = context.WithCancel(ctx)
|
||||
go b.connectLoop(ctx)
|
||||
}
|
||||
|
||||
// Shutdown cleanly closes the bridge.
|
||||
//
|
||||
// bridge.Shutdown()
|
||||
func (b *Bridge) Shutdown() {
|
||||
if b.cancel != nil {
|
||||
b.cancel()
|
||||
|
|
@ -61,6 +106,10 @@ func (b *Bridge) Shutdown() {
|
|||
}
|
||||
|
||||
// Connected reports whether the bridge has an active connection.
|
||||
//
|
||||
// if bridge.Connected() {
|
||||
// fmt.Println("online")
|
||||
// }
|
||||
func (b *Bridge) Connected() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
|
@ -68,6 +117,8 @@ func (b *Bridge) Connected() bool {
|
|||
}
|
||||
|
||||
// Send sends a message to the Laravel backend.
|
||||
//
|
||||
// err := bridge.Send(BridgeMessage{Type: "dashboard_overview"})
|
||||
func (b *Bridge) Send(msg BridgeMessage) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
|
@ -161,9 +212,24 @@ func (b *Bridge) readLoop(ctx context.Context) {
|
|||
}
|
||||
|
||||
b.dispatch(msg)
|
||||
for _, observer := range b.snapshotObservers() {
|
||||
observer(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bridge) snapshotObservers() []func(BridgeMessage) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if len(b.observers) == 0 {
|
||||
return nil
|
||||
}
|
||||
observers := make([]func(BridgeMessage), len(b.observers))
|
||||
copy(observers, b.observers)
|
||||
return observers
|
||||
}
|
||||
|
||||
// dispatch routes an incoming message to the appropriate ws.Hub channel.
|
||||
func (b *Bridge) dispatch(msg BridgeMessage) {
|
||||
if b.hub == nil {
|
||||
|
|
|
|||
|
|
@ -164,6 +164,71 @@ func TestBridge_Good_MessageDispatch(t *testing.T) {
|
|||
// This confirms the dispatch path ran without error.
|
||||
}
|
||||
|
||||
func TestBridge_Good_MultipleObservers(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := testUpgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
msg := BridgeMessage{
|
||||
Type: "brain_recall",
|
||||
Data: map[string]any{
|
||||
"query": "test query",
|
||||
"count": 3,
|
||||
},
|
||||
}
|
||||
data, _ := json.Marshal(msg)
|
||||
_ = conn.WriteMessage(websocket.TextMessage, data)
|
||||
|
||||
for {
|
||||
if _, _, err := conn.ReadMessage(); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
hub := ws.NewHub()
|
||||
ctx := t.Context()
|
||||
go hub.Run(ctx)
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.LaravelWSURL = wsURL(ts)
|
||||
cfg.ReconnectInterval = 100 * time.Millisecond
|
||||
|
||||
bridge := NewBridge(hub, cfg)
|
||||
|
||||
first := make(chan struct{}, 1)
|
||||
second := make(chan struct{}, 1)
|
||||
bridge.AddObserver(func(msg BridgeMessage) {
|
||||
if msg.Type == "brain_recall" {
|
||||
first <- struct{}{}
|
||||
}
|
||||
})
|
||||
bridge.AddObserver(func(msg BridgeMessage) {
|
||||
if msg.Type == "brain_recall" {
|
||||
second <- struct{}{}
|
||||
}
|
||||
})
|
||||
|
||||
bridge.Start(ctx)
|
||||
waitConnected(t, bridge, 2*time.Second)
|
||||
|
||||
select {
|
||||
case <-first:
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("timed out waiting for first observer")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-second:
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("timed out waiting for second observer")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBridge_Good_Reconnect(t *testing.T) {
|
||||
// Use atomic counter to avoid data race between HTTP handler goroutine
|
||||
// and the test goroutine.
|
||||
|
|
|
|||
|
|
@ -1,10 +1,17 @@
|
|||
// Package ide provides an MCP subsystem that bridges the desktop IDE to
|
||||
// a Laravel core-agentic backend over WebSocket.
|
||||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package ide
|
||||
|
||||
import "time"
|
||||
|
||||
// Config holds connection and workspace settings for the IDE subsystem.
|
||||
//
|
||||
// cfg := Config{
|
||||
// LaravelWSURL: "ws://localhost:9876/ws",
|
||||
// WorkspaceRoot: "/workspace",
|
||||
// }
|
||||
type Config struct {
|
||||
// LaravelWSURL is the WebSocket endpoint for the Laravel core-agentic backend.
|
||||
LaravelWSURL string
|
||||
|
|
@ -24,11 +31,15 @@ type Config struct {
|
|||
}
|
||||
|
||||
// DefaultConfig returns sensible defaults for local development.
|
||||
//
|
||||
// cfg := DefaultConfig()
|
||||
func DefaultConfig() Config {
|
||||
return Config{}.WithDefaults()
|
||||
}
|
||||
|
||||
// WithDefaults fills unset fields with the default development values.
|
||||
//
|
||||
// cfg := Config{WorkspaceRoot: "/workspace"}.WithDefaults()
|
||||
func (c Config) WithDefaults() Config {
|
||||
if c.LaravelWSURL == "" {
|
||||
c.LaravelWSURL = "ws://localhost:9876/ws"
|
||||
|
|
|
|||
|
|
@ -1,11 +1,17 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package ide
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-ws"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// errBridgeNotAvailable is returned when a tool requires the Laravel bridge
|
||||
|
|
@ -14,32 +20,62 @@ var errBridgeNotAvailable = coreerr.E("ide", "bridge not available", nil)
|
|||
|
||||
// Subsystem implements mcp.Subsystem and mcp.SubsystemWithShutdown for the IDE.
|
||||
type Subsystem struct {
|
||||
cfg Config
|
||||
bridge *Bridge
|
||||
hub *ws.Hub
|
||||
cfg Config
|
||||
bridge *Bridge
|
||||
hub *ws.Hub
|
||||
notifier coremcp.Notifier
|
||||
|
||||
stateMu sync.Mutex
|
||||
sessionOrder []string
|
||||
sessions map[string]Session
|
||||
chats map[string][]ChatMessage
|
||||
buildOrder []string
|
||||
builds map[string]BuildInfo
|
||||
buildLogMap map[string][]string
|
||||
activity []ActivityEvent
|
||||
}
|
||||
|
||||
var (
|
||||
_ coremcp.Subsystem = (*Subsystem)(nil)
|
||||
_ coremcp.SubsystemWithShutdown = (*Subsystem)(nil)
|
||||
_ coremcp.SubsystemWithNotifier = (*Subsystem)(nil)
|
||||
)
|
||||
|
||||
// New creates an IDE subsystem from a Config DTO.
|
||||
//
|
||||
// cfg := DefaultConfig()
|
||||
// ide := New(hub, cfg)
|
||||
//
|
||||
// The ws.Hub is used for real-time forwarding; pass nil if headless
|
||||
// (tools still work but real-time streaming is disabled).
|
||||
func New(hub *ws.Hub, cfg Config) *Subsystem {
|
||||
cfg = cfg.WithDefaults()
|
||||
var bridge *Bridge
|
||||
if hub != nil {
|
||||
bridge = NewBridge(hub, cfg)
|
||||
s := &Subsystem{
|
||||
cfg: cfg,
|
||||
bridge: nil,
|
||||
hub: hub,
|
||||
sessions: make(map[string]Session),
|
||||
chats: make(map[string][]ChatMessage),
|
||||
builds: make(map[string]BuildInfo),
|
||||
buildLogMap: make(map[string][]string),
|
||||
}
|
||||
return &Subsystem{cfg: cfg, bridge: bridge, hub: hub}
|
||||
if hub != nil {
|
||||
s.bridge = NewBridge(hub, cfg)
|
||||
s.bridge.AddObserver(func(msg BridgeMessage) {
|
||||
s.handleBridgeMessage(msg)
|
||||
})
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Name implements mcp.Subsystem.
|
||||
func (s *Subsystem) Name() string { return "ide" }
|
||||
|
||||
// RegisterTools implements mcp.Subsystem.
|
||||
func (s *Subsystem) RegisterTools(server *mcp.Server) {
|
||||
s.registerChatTools(server)
|
||||
s.registerBuildTools(server)
|
||||
s.registerDashboardTools(server)
|
||||
func (s *Subsystem) RegisterTools(svc *coremcp.Service) {
|
||||
s.registerChatTools(svc)
|
||||
s.registerBuildTools(svc)
|
||||
s.registerDashboardTools(svc)
|
||||
}
|
||||
|
||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||
|
|
@ -50,6 +86,11 @@ func (s *Subsystem) Shutdown(_ context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetNotifier wires the shared MCP notifier into the IDE subsystem.
|
||||
func (s *Subsystem) SetNotifier(n coremcp.Notifier) {
|
||||
s.notifier = n
|
||||
}
|
||||
|
||||
// Bridge returns the Laravel WebSocket bridge (may be nil in headless mode).
|
||||
func (s *Subsystem) Bridge() *Bridge { return s.bridge }
|
||||
|
||||
|
|
@ -59,3 +100,469 @@ func (s *Subsystem) StartBridge(ctx context.Context) {
|
|||
s.bridge.Start(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subsystem) addSession(session Session) {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
if s.sessions == nil {
|
||||
s.sessions = make(map[string]Session)
|
||||
}
|
||||
if s.chats == nil {
|
||||
s.chats = make(map[string][]ChatMessage)
|
||||
}
|
||||
if _, exists := s.sessions[session.ID]; !exists {
|
||||
s.sessionOrder = append(s.sessionOrder, session.ID)
|
||||
}
|
||||
s.sessions[session.ID] = session
|
||||
}
|
||||
|
||||
func (s *Subsystem) addBuild(build BuildInfo) {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
if s.builds == nil {
|
||||
s.builds = make(map[string]BuildInfo)
|
||||
}
|
||||
if s.buildLogMap == nil {
|
||||
s.buildLogMap = make(map[string][]string)
|
||||
}
|
||||
if _, exists := s.builds[build.ID]; !exists {
|
||||
s.buildOrder = append(s.buildOrder, build.ID)
|
||||
}
|
||||
if build.StartedAt.IsZero() {
|
||||
build.StartedAt = time.Now()
|
||||
}
|
||||
s.builds[build.ID] = build
|
||||
}
|
||||
|
||||
func (s *Subsystem) listBuilds(repo string, limit int) []BuildInfo {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
if len(s.buildOrder) == 0 {
|
||||
return []BuildInfo{}
|
||||
}
|
||||
|
||||
if limit <= 0 {
|
||||
limit = len(s.buildOrder)
|
||||
}
|
||||
|
||||
builds := make([]BuildInfo, 0, limit)
|
||||
for i := len(s.buildOrder) - 1; i >= 0; i-- {
|
||||
id := s.buildOrder[i]
|
||||
build, ok := s.builds[id]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if repo != "" && build.Repo != repo {
|
||||
continue
|
||||
}
|
||||
builds = append(builds, build)
|
||||
if len(builds) >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return builds
|
||||
}
|
||||
|
||||
func (s *Subsystem) appendBuildLog(buildID, line string) {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
if s.buildLogMap == nil {
|
||||
s.buildLogMap = make(map[string][]string)
|
||||
}
|
||||
s.buildLogMap[buildID] = append(s.buildLogMap[buildID], line)
|
||||
}
|
||||
|
||||
func (s *Subsystem) setBuildLogs(buildID string, lines []string) {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
if s.buildLogMap == nil {
|
||||
s.buildLogMap = make(map[string][]string)
|
||||
}
|
||||
if len(lines) == 0 {
|
||||
s.buildLogMap[buildID] = []string{}
|
||||
return
|
||||
}
|
||||
out := make([]string, len(lines))
|
||||
copy(out, lines)
|
||||
s.buildLogMap[buildID] = out
|
||||
}
|
||||
|
||||
func (s *Subsystem) buildLogTail(buildID string, tail int) []string {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
lines := s.buildLogMap[buildID]
|
||||
if len(lines) == 0 {
|
||||
return []string{}
|
||||
}
|
||||
if tail <= 0 || tail > len(lines) {
|
||||
tail = len(lines)
|
||||
}
|
||||
start := len(lines) - tail
|
||||
out := make([]string, tail)
|
||||
copy(out, lines[start:])
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *Subsystem) buildSnapshot(buildID string) (BuildInfo, bool) {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
build, ok := s.builds[buildID]
|
||||
return build, ok
|
||||
}
|
||||
|
||||
func (s *Subsystem) buildRepoCount() int {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
repos := make(map[string]struct{})
|
||||
for _, build := range s.builds {
|
||||
if build.Repo != "" {
|
||||
repos[build.Repo] = struct{}{}
|
||||
}
|
||||
}
|
||||
return len(repos)
|
||||
}
|
||||
|
||||
func (s *Subsystem) listSessions() []Session {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
if len(s.sessionOrder) == 0 {
|
||||
return []Session{}
|
||||
}
|
||||
|
||||
result := make([]Session, 0, len(s.sessionOrder))
|
||||
for _, id := range s.sessionOrder {
|
||||
if session, ok := s.sessions[id]; ok {
|
||||
result = append(result, session)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *Subsystem) appendChatMessage(sessionID, role, content string) {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
if s.chats == nil {
|
||||
s.chats = make(map[string][]ChatMessage)
|
||||
}
|
||||
s.chats[sessionID] = append(s.chats[sessionID], ChatMessage{
|
||||
Role: role,
|
||||
Content: content,
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Subsystem) chatMessages(sessionID string) []ChatMessage {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
history := s.chats[sessionID]
|
||||
if len(history) == 0 {
|
||||
return []ChatMessage{}
|
||||
}
|
||||
out := make([]ChatMessage, len(history))
|
||||
copy(out, history)
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *Subsystem) recordActivity(typ, msg string) {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
s.activity = append(s.activity, ActivityEvent{
|
||||
Type: typ,
|
||||
Message: msg,
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Subsystem) activityFeed(limit int) []ActivityEvent {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
|
||||
if limit <= 0 || limit > len(s.activity) {
|
||||
limit = len(s.activity)
|
||||
}
|
||||
if limit == 0 {
|
||||
return []ActivityEvent{}
|
||||
}
|
||||
|
||||
start := len(s.activity) - limit
|
||||
out := make([]ActivityEvent, limit)
|
||||
copy(out, s.activity[start:])
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *Subsystem) handleBridgeMessage(msg BridgeMessage) {
|
||||
switch msg.Type {
|
||||
case "build_status":
|
||||
if build, ok := buildInfoFromData(msg.Data); ok {
|
||||
s.addBuild(build)
|
||||
s.emitBuildLifecycle(build)
|
||||
if lines := buildLinesFromData(msg.Data); len(lines) > 0 {
|
||||
s.setBuildLogs(build.ID, lines)
|
||||
}
|
||||
}
|
||||
case "build_list":
|
||||
for _, build := range buildInfosFromData(msg.Data) {
|
||||
s.addBuild(build)
|
||||
}
|
||||
case "build_logs":
|
||||
buildID, lines := buildLogsFromData(msg.Data)
|
||||
if buildID != "" {
|
||||
s.setBuildLogs(buildID, lines)
|
||||
}
|
||||
case "session_list":
|
||||
for _, session := range sessionsFromData(msg.Data) {
|
||||
s.addSession(session)
|
||||
}
|
||||
case "session_create":
|
||||
if session, ok := sessionFromData(msg.Data); ok {
|
||||
s.addSession(session)
|
||||
}
|
||||
case "chat_history":
|
||||
if sessionID, messages := chatHistoryFromData(msg.Data); sessionID != "" {
|
||||
for _, message := range messages {
|
||||
s.appendChatMessage(sessionID, message.Role, message.Content)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subsystem) emitBuildLifecycle(build BuildInfo) {
|
||||
if s.notifier == nil {
|
||||
return
|
||||
}
|
||||
|
||||
channel := ""
|
||||
switch build.Status {
|
||||
case "running", "in_progress", "started":
|
||||
channel = coremcp.ChannelBuildStart
|
||||
case "success", "succeeded", "completed", "passed":
|
||||
channel = coremcp.ChannelBuildComplete
|
||||
case "failed", "error":
|
||||
channel = coremcp.ChannelBuildFailed
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
payload := map[string]any{
|
||||
"id": build.ID,
|
||||
"repo": build.Repo,
|
||||
"branch": build.Branch,
|
||||
"status": build.Status,
|
||||
"startedAt": build.StartedAt,
|
||||
}
|
||||
if build.Duration != "" {
|
||||
payload["duration"] = build.Duration
|
||||
}
|
||||
s.notifier.ChannelSend(context.Background(), channel, payload)
|
||||
}
|
||||
|
||||
func buildInfoFromData(data any) (BuildInfo, bool) {
|
||||
m, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return BuildInfo{}, false
|
||||
}
|
||||
|
||||
id, _ := m["buildId"].(string)
|
||||
if id == "" {
|
||||
id, _ = m["id"].(string)
|
||||
}
|
||||
if id == "" {
|
||||
return BuildInfo{}, false
|
||||
}
|
||||
|
||||
build := BuildInfo{
|
||||
ID: id,
|
||||
Repo: stringFromAny(m["repo"]),
|
||||
Branch: stringFromAny(m["branch"]),
|
||||
Status: stringFromAny(m["status"]),
|
||||
}
|
||||
if build.Status == "" {
|
||||
build.Status = "unknown"
|
||||
}
|
||||
if startedAt, ok := m["startedAt"].(time.Time); ok {
|
||||
build.StartedAt = startedAt
|
||||
}
|
||||
if duration := stringFromAny(m["duration"]); duration != "" {
|
||||
build.Duration = duration
|
||||
}
|
||||
return build, true
|
||||
}
|
||||
|
||||
func buildInfosFromData(data any) []BuildInfo {
|
||||
m, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return []BuildInfo{}
|
||||
}
|
||||
|
||||
raw, ok := m["builds"].([]any)
|
||||
if !ok {
|
||||
return []BuildInfo{}
|
||||
}
|
||||
|
||||
builds := make([]BuildInfo, 0, len(raw))
|
||||
for _, item := range raw {
|
||||
build, ok := buildInfoFromData(item)
|
||||
if ok {
|
||||
builds = append(builds, build)
|
||||
}
|
||||
}
|
||||
return builds
|
||||
}
|
||||
|
||||
func buildLinesFromData(data any) []string {
|
||||
_, lines := buildLogsFromData(data)
|
||||
return lines
|
||||
}
|
||||
|
||||
func buildLogsFromData(data any) (string, []string) {
|
||||
m, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return "", []string{}
|
||||
}
|
||||
|
||||
buildID, _ := m["buildId"].(string)
|
||||
if buildID == "" {
|
||||
buildID, _ = m["id"].(string)
|
||||
}
|
||||
|
||||
switch raw := m["lines"].(type) {
|
||||
case []any:
|
||||
lines := make([]string, 0, len(raw))
|
||||
for _, item := range raw {
|
||||
lines = append(lines, stringFromAny(item))
|
||||
}
|
||||
return buildID, lines
|
||||
case []string:
|
||||
lines := make([]string, len(raw))
|
||||
copy(lines, raw)
|
||||
return buildID, lines
|
||||
}
|
||||
|
||||
if output := stringFromAny(m["output"]); output != "" {
|
||||
return buildID, []string{output}
|
||||
}
|
||||
|
||||
return buildID, []string{}
|
||||
}
|
||||
|
||||
func sessionsFromData(data any) []Session {
|
||||
m, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return []Session{}
|
||||
}
|
||||
|
||||
raw, ok := m["sessions"].([]any)
|
||||
if !ok {
|
||||
return []Session{}
|
||||
}
|
||||
|
||||
sessions := make([]Session, 0, len(raw))
|
||||
for _, item := range raw {
|
||||
session, ok := sessionFromData(item)
|
||||
if ok {
|
||||
sessions = append(sessions, session)
|
||||
}
|
||||
}
|
||||
return sessions
|
||||
}
|
||||
|
||||
func sessionFromData(data any) (Session, bool) {
|
||||
m, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return Session{}, false
|
||||
}
|
||||
|
||||
id, _ := m["id"].(string)
|
||||
if id == "" {
|
||||
return Session{}, false
|
||||
}
|
||||
|
||||
session := Session{
|
||||
ID: id,
|
||||
Name: stringFromAny(m["name"]),
|
||||
Status: stringFromAny(m["status"]),
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
if createdAt, ok := m["createdAt"].(time.Time); ok {
|
||||
session.CreatedAt = createdAt
|
||||
}
|
||||
if session.Status == "" {
|
||||
session.Status = "unknown"
|
||||
}
|
||||
return session, true
|
||||
}
|
||||
|
||||
func chatHistoryFromData(data any) (string, []ChatMessage) {
|
||||
m, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return "", []ChatMessage{}
|
||||
}
|
||||
|
||||
sessionID, _ := m["sessionId"].(string)
|
||||
if sessionID == "" {
|
||||
sessionID, _ = m["session_id"].(string)
|
||||
}
|
||||
|
||||
raw, ok := m["messages"].([]any)
|
||||
if !ok {
|
||||
return sessionID, []ChatMessage{}
|
||||
}
|
||||
|
||||
messages := make([]ChatMessage, 0, len(raw))
|
||||
for _, item := range raw {
|
||||
if msg, ok := chatMessageFromData(item); ok {
|
||||
messages = append(messages, msg)
|
||||
}
|
||||
}
|
||||
return sessionID, messages
|
||||
}
|
||||
|
||||
func chatMessageFromData(data any) (ChatMessage, bool) {
|
||||
m, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return ChatMessage{}, false
|
||||
}
|
||||
|
||||
role := stringFromAny(m["role"])
|
||||
content := stringFromAny(m["content"])
|
||||
if role == "" && content == "" {
|
||||
return ChatMessage{}, false
|
||||
}
|
||||
|
||||
msg := ChatMessage{
|
||||
Role: role,
|
||||
Content: content,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
if ts, ok := m["timestamp"].(time.Time); ok {
|
||||
msg.Timestamp = ts
|
||||
}
|
||||
return msg, true
|
||||
}
|
||||
|
||||
func stringFromAny(v any) string {
|
||||
switch value := v.(type) {
|
||||
case string:
|
||||
return value
|
||||
case fmt.Stringer:
|
||||
return value.String()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func newSessionID() string {
|
||||
return core.ID()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,20 +1,27 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package ide
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// Build tool input/output types.
|
||||
|
||||
// BuildStatusInput is the input for ide_build_status.
|
||||
//
|
||||
// input := BuildStatusInput{BuildID: "build-123"}
|
||||
type BuildStatusInput struct {
|
||||
BuildID string `json:"buildId"`
|
||||
}
|
||||
|
||||
// BuildInfo represents a single build.
|
||||
//
|
||||
// info := BuildInfo{ID: "build-123", Repo: "go-io", Status: "running"}
|
||||
type BuildInfo struct {
|
||||
ID string `json:"id"`
|
||||
Repo string `json:"repo"`
|
||||
|
|
@ -25,90 +32,102 @@ type BuildInfo struct {
|
|||
}
|
||||
|
||||
// BuildStatusOutput is the output for ide_build_status.
|
||||
//
|
||||
// // out.Build.Status == "running"
|
||||
type BuildStatusOutput struct {
|
||||
Build BuildInfo `json:"build"`
|
||||
}
|
||||
|
||||
// BuildListInput is the input for ide_build_list.
|
||||
//
|
||||
// input := BuildListInput{Repo: "go-io", Limit: 20}
|
||||
type BuildListInput struct {
|
||||
Repo string `json:"repo,omitempty"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
// BuildListOutput is the output for ide_build_list.
|
||||
//
|
||||
// // out.Builds holds the local build snapshot
|
||||
type BuildListOutput struct {
|
||||
Builds []BuildInfo `json:"builds"`
|
||||
}
|
||||
|
||||
// BuildLogsInput is the input for ide_build_logs.
|
||||
//
|
||||
// input := BuildLogsInput{BuildID: "build-123", Tail: 200}
|
||||
type BuildLogsInput struct {
|
||||
BuildID string `json:"buildId"`
|
||||
Tail int `json:"tail,omitempty"`
|
||||
}
|
||||
|
||||
// BuildLogsOutput is the output for ide_build_logs.
|
||||
//
|
||||
// // out.Lines contains the captured build log lines
|
||||
type BuildLogsOutput struct {
|
||||
BuildID string `json:"buildId"`
|
||||
Lines []string `json:"lines"`
|
||||
}
|
||||
|
||||
func (s *Subsystem) registerBuildTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *Subsystem) registerBuildTools(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_build_status",
|
||||
Description: "Get the status of a specific build",
|
||||
}, s.buildStatus)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_build_list",
|
||||
Description: "List recent builds, optionally filtered by repository",
|
||||
}, s.buildList)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_build_logs",
|
||||
Description: "Retrieve log output for a build",
|
||||
}, s.buildLogs)
|
||||
}
|
||||
|
||||
// buildStatus requests build status from the Laravel backend.
|
||||
// Stub implementation: sends request via bridge, returns "unknown" status. Awaiting Laravel backend.
|
||||
// buildStatus returns a local best-effort build status and refreshes the
|
||||
// Laravel backend when the bridge is available.
|
||||
func (s *Subsystem) buildStatus(_ context.Context, _ *mcp.CallToolRequest, input BuildStatusInput) (*mcp.CallToolResult, BuildStatusOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, BuildStatusOutput{}, errBridgeNotAvailable
|
||||
if s.bridge != nil {
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "build_status",
|
||||
Data: map[string]any{"buildId": input.BuildID},
|
||||
})
|
||||
}
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "build_status",
|
||||
Data: map[string]any{"buildId": input.BuildID},
|
||||
})
|
||||
return nil, BuildStatusOutput{
|
||||
Build: BuildInfo{ID: input.BuildID, Status: "unknown"},
|
||||
}, nil
|
||||
|
||||
build := BuildInfo{ID: input.BuildID, Status: "unknown"}
|
||||
if cached, ok := s.buildSnapshot(input.BuildID); ok {
|
||||
build = cached
|
||||
}
|
||||
|
||||
return nil, BuildStatusOutput{Build: build}, nil
|
||||
}
|
||||
|
||||
// buildList requests a list of builds from the Laravel backend.
|
||||
// Stub implementation: sends request via bridge, returns empty list. Awaiting Laravel backend.
|
||||
// buildList returns the local build list snapshot and refreshes the Laravel
|
||||
// backend when the bridge is available.
|
||||
func (s *Subsystem) buildList(_ context.Context, _ *mcp.CallToolRequest, input BuildListInput) (*mcp.CallToolResult, BuildListOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, BuildListOutput{}, errBridgeNotAvailable
|
||||
if s.bridge != nil {
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "build_list",
|
||||
Data: map[string]any{"repo": input.Repo, "limit": input.Limit},
|
||||
})
|
||||
}
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "build_list",
|
||||
Data: map[string]any{"repo": input.Repo, "limit": input.Limit},
|
||||
})
|
||||
return nil, BuildListOutput{Builds: []BuildInfo{}}, nil
|
||||
return nil, BuildListOutput{Builds: s.listBuilds(input.Repo, input.Limit)}, nil
|
||||
}
|
||||
|
||||
// buildLogs requests build log output from the Laravel backend.
|
||||
// Stub implementation: sends request via bridge, returns empty lines. Awaiting Laravel backend.
|
||||
// buildLogs returns the local build log snapshot and refreshes the Laravel
|
||||
// backend when the bridge is available.
|
||||
func (s *Subsystem) buildLogs(_ context.Context, _ *mcp.CallToolRequest, input BuildLogsInput) (*mcp.CallToolResult, BuildLogsOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, BuildLogsOutput{}, errBridgeNotAvailable
|
||||
if s.bridge != nil {
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "build_logs",
|
||||
Data: map[string]any{"buildId": input.BuildID, "tail": input.Tail},
|
||||
})
|
||||
}
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "build_logs",
|
||||
Data: map[string]any{"buildId": input.BuildID, "tail": input.Tail},
|
||||
})
|
||||
return nil, BuildLogsOutput{
|
||||
BuildID: input.BuildID,
|
||||
Lines: []string{},
|
||||
Lines: s.buildLogTail(input.BuildID, input.Tail),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,12 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package ide
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
coreerr "forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
|
@ -11,12 +14,16 @@ import (
|
|||
// Chat tool input/output types.
|
||||
|
||||
// ChatSendInput is the input for ide_chat_send.
|
||||
//
|
||||
// input := ChatSendInput{SessionID: "sess-42", Message: "hello"}
|
||||
type ChatSendInput struct {
|
||||
SessionID string `json:"sessionId"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// ChatSendOutput is the output for ide_chat_send.
|
||||
//
|
||||
// // out.Sent == true, out.SessionID == "sess-42"
|
||||
type ChatSendOutput struct {
|
||||
Sent bool `json:"sent"`
|
||||
SessionID string `json:"sessionId"`
|
||||
|
|
@ -24,12 +31,16 @@ type ChatSendOutput struct {
|
|||
}
|
||||
|
||||
// ChatHistoryInput is the input for ide_chat_history.
|
||||
//
|
||||
// input := ChatHistoryInput{SessionID: "sess-42", Limit: 50}
|
||||
type ChatHistoryInput struct {
|
||||
SessionID string `json:"sessionId"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
// ChatMessage represents a single message in history.
|
||||
//
|
||||
// msg := ChatMessage{Role: "user", Content: "hello"}
|
||||
type ChatMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
|
|
@ -37,15 +48,21 @@ type ChatMessage struct {
|
|||
}
|
||||
|
||||
// ChatHistoryOutput is the output for ide_chat_history.
|
||||
//
|
||||
// // out.Messages contains the stored chat transcript
|
||||
type ChatHistoryOutput struct {
|
||||
SessionID string `json:"sessionId"`
|
||||
Messages []ChatMessage `json:"messages"`
|
||||
}
|
||||
|
||||
// SessionListInput is the input for ide_session_list.
|
||||
//
|
||||
// input := SessionListInput{}
|
||||
type SessionListInput struct{}
|
||||
|
||||
// Session represents an agent session.
|
||||
//
|
||||
// session := Session{ID: "sess-42", Name: "draft", Status: "running"}
|
||||
type Session struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
|
|
@ -54,67 +71,81 @@ type Session struct {
|
|||
}
|
||||
|
||||
// SessionListOutput is the output for ide_session_list.
|
||||
//
|
||||
// // out.Sessions contains every locally tracked session
|
||||
type SessionListOutput struct {
|
||||
Sessions []Session `json:"sessions"`
|
||||
}
|
||||
|
||||
// SessionCreateInput is the input for ide_session_create.
|
||||
//
|
||||
// input := SessionCreateInput{Name: "draft"}
|
||||
type SessionCreateInput struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// SessionCreateOutput is the output for ide_session_create.
|
||||
//
|
||||
// // out.Session.ID is assigned by the backend or local store
|
||||
type SessionCreateOutput struct {
|
||||
Session Session `json:"session"`
|
||||
}
|
||||
|
||||
// PlanStatusInput is the input for ide_plan_status.
|
||||
//
|
||||
// input := PlanStatusInput{SessionID: "sess-42"}
|
||||
type PlanStatusInput struct {
|
||||
SessionID string `json:"sessionId"`
|
||||
}
|
||||
|
||||
// PlanStep is a single step in an agent plan.
|
||||
//
|
||||
// step := PlanStep{Name: "prep", Status: "done"}
|
||||
type PlanStep struct {
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// PlanStatusOutput is the output for ide_plan_status.
|
||||
//
|
||||
// // out.Steps contains the current plan breakdown
|
||||
type PlanStatusOutput struct {
|
||||
SessionID string `json:"sessionId"`
|
||||
Status string `json:"status"`
|
||||
Steps []PlanStep `json:"steps"`
|
||||
}
|
||||
|
||||
func (s *Subsystem) registerChatTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *Subsystem) registerChatTools(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_chat_send",
|
||||
Description: "Send a message to an agent chat session",
|
||||
}, s.chatSend)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_chat_history",
|
||||
Description: "Retrieve message history for a chat session",
|
||||
}, s.chatHistory)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_session_list",
|
||||
Description: "List active agent sessions",
|
||||
}, s.sessionList)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_session_create",
|
||||
Description: "Create a new agent session",
|
||||
}, s.sessionCreate)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_plan_status",
|
||||
Description: "Get the current plan status for a session",
|
||||
}, s.planStatus)
|
||||
}
|
||||
|
||||
// chatSend forwards a chat message to the Laravel backend via bridge.
|
||||
// Stub implementation: delegates to bridge, real response arrives via WebSocket subscription.
|
||||
// The subsystem also stores the message locally so history lookups can
|
||||
// return something useful before the backend answers.
|
||||
func (s *Subsystem) chatSend(_ context.Context, _ *mcp.CallToolRequest, input ChatSendInput) (*mcp.CallToolResult, ChatSendOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, ChatSendOutput{}, errBridgeNotAvailable
|
||||
|
|
@ -128,6 +159,10 @@ func (s *Subsystem) chatSend(_ context.Context, _ *mcp.CallToolRequest, input Ch
|
|||
if err != nil {
|
||||
return nil, ChatSendOutput{}, coreerr.E("ide.chatSend", "failed to send message", err)
|
||||
}
|
||||
|
||||
s.appendChatMessage(input.SessionID, "user", input.Message)
|
||||
s.recordActivity("chat_send", "forwarded chat message for session "+input.SessionID)
|
||||
|
||||
return nil, ChatSendOutput{
|
||||
Sent: true,
|
||||
SessionID: input.SessionID,
|
||||
|
|
@ -135,67 +170,77 @@ func (s *Subsystem) chatSend(_ context.Context, _ *mcp.CallToolRequest, input Ch
|
|||
}, nil
|
||||
}
|
||||
|
||||
// chatHistory requests message history from the Laravel backend.
|
||||
// Stub implementation: sends request via bridge, returns empty messages. Real data arrives via WebSocket.
|
||||
// chatHistory returns the local message history for a session and refreshes
|
||||
// the Laravel backend when the bridge is available.
|
||||
func (s *Subsystem) chatHistory(_ context.Context, _ *mcp.CallToolRequest, input ChatHistoryInput) (*mcp.CallToolResult, ChatHistoryOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, ChatHistoryOutput{}, errBridgeNotAvailable
|
||||
if s.bridge != nil {
|
||||
// Request history via bridge when available; the local cache still
|
||||
// provides an immediate response in headless mode.
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "chat_history",
|
||||
SessionID: input.SessionID,
|
||||
Data: map[string]any{"limit": input.Limit},
|
||||
})
|
||||
}
|
||||
// Request history via bridge; for now return placeholder indicating the
|
||||
// request was forwarded. Real data arrives via WebSocket subscription.
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "chat_history",
|
||||
SessionID: input.SessionID,
|
||||
Data: map[string]any{"limit": input.Limit},
|
||||
})
|
||||
return nil, ChatHistoryOutput{
|
||||
SessionID: input.SessionID,
|
||||
Messages: []ChatMessage{},
|
||||
Messages: s.chatMessages(input.SessionID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// sessionList requests the session list from the Laravel backend.
|
||||
// Stub implementation: sends request via bridge, returns empty sessions. Awaiting Laravel backend.
|
||||
// sessionList returns the local session cache and refreshes the Laravel
|
||||
// backend when the bridge is available.
|
||||
func (s *Subsystem) sessionList(_ context.Context, _ *mcp.CallToolRequest, _ SessionListInput) (*mcp.CallToolResult, SessionListOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, SessionListOutput{}, errBridgeNotAvailable
|
||||
if s.bridge != nil {
|
||||
_ = s.bridge.Send(BridgeMessage{Type: "session_list"})
|
||||
}
|
||||
_ = s.bridge.Send(BridgeMessage{Type: "session_list"})
|
||||
return nil, SessionListOutput{Sessions: []Session{}}, nil
|
||||
return nil, SessionListOutput{Sessions: s.listSessions()}, nil
|
||||
}
|
||||
|
||||
// sessionCreate requests a new session from the Laravel backend.
|
||||
// Stub implementation: sends request via bridge, returns placeholder session. Awaiting Laravel backend.
|
||||
// sessionCreate creates a local session record immediately and forwards the
|
||||
// request to the Laravel backend when the bridge is available.
|
||||
func (s *Subsystem) sessionCreate(_ context.Context, _ *mcp.CallToolRequest, input SessionCreateInput) (*mcp.CallToolResult, SessionCreateOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, SessionCreateOutput{}, errBridgeNotAvailable
|
||||
if s.bridge != nil {
|
||||
if err := s.bridge.Send(BridgeMessage{
|
||||
Type: "session_create",
|
||||
Data: map[string]any{"name": input.Name},
|
||||
}); err != nil {
|
||||
return nil, SessionCreateOutput{}, err
|
||||
}
|
||||
}
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "session_create",
|
||||
Data: map[string]any{"name": input.Name},
|
||||
})
|
||||
session := Session{
|
||||
ID: newSessionID(),
|
||||
Name: input.Name,
|
||||
Status: "creating",
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
s.addSession(session)
|
||||
s.recordActivity("session_create", "created session "+session.ID)
|
||||
return nil, SessionCreateOutput{
|
||||
Session: Session{
|
||||
Name: input.Name,
|
||||
Status: "creating",
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
Session: session,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// planStatus requests plan status from the Laravel backend.
|
||||
// Stub implementation: sends request via bridge, returns "unknown" status. Awaiting Laravel backend.
|
||||
// planStatus returns the local best-effort session status and refreshes the
|
||||
// Laravel backend when the bridge is available.
|
||||
func (s *Subsystem) planStatus(_ context.Context, _ *mcp.CallToolRequest, input PlanStatusInput) (*mcp.CallToolResult, PlanStatusOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, PlanStatusOutput{}, errBridgeNotAvailable
|
||||
if s.bridge != nil {
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "plan_status",
|
||||
SessionID: input.SessionID,
|
||||
})
|
||||
}
|
||||
s.stateMu.Lock()
|
||||
session, ok := s.sessions[input.SessionID]
|
||||
s.stateMu.Unlock()
|
||||
|
||||
status := "unknown"
|
||||
if ok && session.Status != "" {
|
||||
status = session.Status
|
||||
}
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "plan_status",
|
||||
SessionID: input.SessionID,
|
||||
})
|
||||
return nil, PlanStatusOutput{
|
||||
SessionID: input.SessionID,
|
||||
Status: "unknown",
|
||||
Status: status,
|
||||
Steps: []PlanStep{},
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,18 +1,25 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package ide
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// Dashboard tool input/output types.
|
||||
|
||||
// DashboardOverviewInput is the input for ide_dashboard_overview.
|
||||
//
|
||||
// input := DashboardOverviewInput{}
|
||||
type DashboardOverviewInput struct{}
|
||||
|
||||
// DashboardOverview contains high-level platform stats.
|
||||
//
|
||||
// overview := DashboardOverview{Repos: 12, ActiveSessions: 3}
|
||||
type DashboardOverview struct {
|
||||
Repos int `json:"repos"`
|
||||
Services int `json:"services"`
|
||||
|
|
@ -22,16 +29,22 @@ type DashboardOverview struct {
|
|||
}
|
||||
|
||||
// DashboardOverviewOutput is the output for ide_dashboard_overview.
|
||||
//
|
||||
// // out.Overview.BridgeOnline reports bridge connectivity
|
||||
type DashboardOverviewOutput struct {
|
||||
Overview DashboardOverview `json:"overview"`
|
||||
}
|
||||
|
||||
// DashboardActivityInput is the input for ide_dashboard_activity.
|
||||
//
|
||||
// input := DashboardActivityInput{Limit: 25}
|
||||
type DashboardActivityInput struct {
|
||||
Limit int `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
// ActivityEvent represents a single activity feed item.
|
||||
//
|
||||
// event := ActivityEvent{Type: "build", Message: "build finished"}
|
||||
type ActivityEvent struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
|
|
@ -39,16 +52,22 @@ type ActivityEvent struct {
|
|||
}
|
||||
|
||||
// DashboardActivityOutput is the output for ide_dashboard_activity.
|
||||
//
|
||||
// // out.Events contains the recent activity feed
|
||||
type DashboardActivityOutput struct {
|
||||
Events []ActivityEvent `json:"events"`
|
||||
}
|
||||
|
||||
// DashboardMetricsInput is the input for ide_dashboard_metrics.
|
||||
//
|
||||
// input := DashboardMetricsInput{Period: "24h"}
|
||||
type DashboardMetricsInput struct {
|
||||
Period string `json:"period,omitempty"` // "1h", "24h", "7d"
|
||||
}
|
||||
|
||||
// DashboardMetrics contains aggregate metrics.
|
||||
//
|
||||
// metrics := DashboardMetrics{BuildsTotal: 42, SuccessRate: 0.95}
|
||||
type DashboardMetrics struct {
|
||||
BuildsTotal int `json:"buildsTotal"`
|
||||
BuildsSuccess int `json:"buildsSuccess"`
|
||||
|
|
@ -60,32 +79,38 @@ type DashboardMetrics struct {
|
|||
}
|
||||
|
||||
// DashboardMetricsOutput is the output for ide_dashboard_metrics.
|
||||
//
|
||||
// // out.Metrics summarises the selected time window
|
||||
type DashboardMetricsOutput struct {
|
||||
Period string `json:"period"`
|
||||
Metrics DashboardMetrics `json:"metrics"`
|
||||
}
|
||||
|
||||
func (s *Subsystem) registerDashboardTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
func (s *Subsystem) registerDashboardTools(svc *coremcp.Service) {
|
||||
server := svc.Server()
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_dashboard_overview",
|
||||
Description: "Get a high-level overview of the platform (repos, services, sessions, builds)",
|
||||
}, s.dashboardOverview)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_dashboard_activity",
|
||||
Description: "Get the recent activity feed",
|
||||
}, s.dashboardActivity)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
||||
Name: "ide_dashboard_metrics",
|
||||
Description: "Get aggregate build and agent metrics for a time period",
|
||||
}, s.dashboardMetrics)
|
||||
}
|
||||
|
||||
// dashboardOverview returns a platform overview with bridge status.
|
||||
// Stub implementation: only BridgeOnline is live; other fields return zero values. Awaiting Laravel backend.
|
||||
// dashboardOverview returns a platform overview with bridge status and
|
||||
// locally tracked session counts.
|
||||
func (s *Subsystem) dashboardOverview(_ context.Context, _ *mcp.CallToolRequest, _ DashboardOverviewInput) (*mcp.CallToolResult, DashboardOverviewOutput, error) {
|
||||
connected := s.bridge != nil && s.bridge.Connected()
|
||||
activeSessions := len(s.listSessions())
|
||||
builds := s.listBuilds("", 0)
|
||||
repos := s.buildRepoCount()
|
||||
|
||||
if s.bridge != nil {
|
||||
_ = s.bridge.Send(BridgeMessage{Type: "dashboard_overview"})
|
||||
|
|
@ -93,40 +118,96 @@ func (s *Subsystem) dashboardOverview(_ context.Context, _ *mcp.CallToolRequest,
|
|||
|
||||
return nil, DashboardOverviewOutput{
|
||||
Overview: DashboardOverview{
|
||||
BridgeOnline: connected,
|
||||
Repos: repos,
|
||||
Services: len(builds),
|
||||
ActiveSessions: activeSessions,
|
||||
RecentBuilds: len(builds),
|
||||
BridgeOnline: connected,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// dashboardActivity requests the activity feed from the Laravel backend.
|
||||
// Stub implementation: sends request via bridge, returns empty events. Awaiting Laravel backend.
|
||||
// dashboardActivity returns the local activity feed and refreshes the Laravel
|
||||
// backend when the bridge is available.
|
||||
func (s *Subsystem) dashboardActivity(_ context.Context, _ *mcp.CallToolRequest, input DashboardActivityInput) (*mcp.CallToolResult, DashboardActivityOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, DashboardActivityOutput{}, errBridgeNotAvailable
|
||||
if s.bridge != nil {
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "dashboard_activity",
|
||||
Data: map[string]any{"limit": input.Limit},
|
||||
})
|
||||
}
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "dashboard_activity",
|
||||
Data: map[string]any{"limit": input.Limit},
|
||||
})
|
||||
return nil, DashboardActivityOutput{Events: []ActivityEvent{}}, nil
|
||||
return nil, DashboardActivityOutput{Events: s.activityFeed(input.Limit)}, nil
|
||||
}
|
||||
|
||||
// dashboardMetrics requests aggregate metrics from the Laravel backend.
|
||||
// Stub implementation: sends request via bridge, returns zero metrics. Awaiting Laravel backend.
|
||||
// dashboardMetrics returns local session and message counts and refreshes the
|
||||
// Laravel backend when the bridge is available.
|
||||
func (s *Subsystem) dashboardMetrics(_ context.Context, _ *mcp.CallToolRequest, input DashboardMetricsInput) (*mcp.CallToolResult, DashboardMetricsOutput, error) {
|
||||
if s.bridge == nil {
|
||||
return nil, DashboardMetricsOutput{}, errBridgeNotAvailable
|
||||
}
|
||||
period := input.Period
|
||||
if period == "" {
|
||||
period = "24h"
|
||||
}
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "dashboard_metrics",
|
||||
Data: map[string]any{"period": period},
|
||||
})
|
||||
if s.bridge != nil {
|
||||
_ = s.bridge.Send(BridgeMessage{
|
||||
Type: "dashboard_metrics",
|
||||
Data: map[string]any{"period": period},
|
||||
})
|
||||
}
|
||||
|
||||
s.stateMu.Lock()
|
||||
sessions := len(s.sessions)
|
||||
messages := 0
|
||||
builds := make([]BuildInfo, 0, len(s.buildOrder))
|
||||
for _, id := range s.buildOrder {
|
||||
if build, ok := s.builds[id]; ok {
|
||||
builds = append(builds, build)
|
||||
}
|
||||
}
|
||||
for _, history := range s.chats {
|
||||
messages += len(history)
|
||||
}
|
||||
s.stateMu.Unlock()
|
||||
|
||||
total := len(builds)
|
||||
success := 0
|
||||
failed := 0
|
||||
var durationTotal time.Duration
|
||||
var durationCount int
|
||||
for _, build := range builds {
|
||||
switch build.Status {
|
||||
case "success", "succeeded", "completed", "passed":
|
||||
success++
|
||||
case "failed", "error":
|
||||
failed++
|
||||
}
|
||||
if build.Duration == "" {
|
||||
continue
|
||||
}
|
||||
if d, err := time.ParseDuration(build.Duration); err == nil {
|
||||
durationTotal += d
|
||||
durationCount++
|
||||
}
|
||||
}
|
||||
|
||||
avgBuildTime := ""
|
||||
if durationCount > 0 {
|
||||
avgBuildTime = (durationTotal / time.Duration(durationCount)).String()
|
||||
}
|
||||
|
||||
successRate := 0.0
|
||||
if total > 0 {
|
||||
successRate = float64(success) / float64(total)
|
||||
}
|
||||
|
||||
return nil, DashboardMetricsOutput{
|
||||
Period: period,
|
||||
Metrics: DashboardMetrics{},
|
||||
Period: period,
|
||||
Metrics: DashboardMetrics{
|
||||
BuildsTotal: total,
|
||||
BuildsSuccess: success,
|
||||
BuildsFailed: failed,
|
||||
AvgBuildTime: avgBuildTime,
|
||||
AgentSessions: sessions,
|
||||
MessagesTotal: messages,
|
||||
SuccessRate: successRate,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
||||
"forge.lthn.ai/core/go-ws"
|
||||
)
|
||||
|
||||
|
|
@ -18,6 +19,16 @@ func newNilBridgeSubsystem() *Subsystem {
|
|||
return New(nil, Config{})
|
||||
}
|
||||
|
||||
type recordingNotifier struct {
|
||||
channel string
|
||||
data any
|
||||
}
|
||||
|
||||
func (r *recordingNotifier) ChannelSend(_ context.Context, channel string, data any) {
|
||||
r.channel = channel
|
||||
r.data = data
|
||||
}
|
||||
|
||||
// newConnectedSubsystem returns a Subsystem with a connected bridge and a
|
||||
// running echo WS server. Caller must cancel ctx and close server when done.
|
||||
func newConnectedSubsystem(t *testing.T) (*Subsystem, context.CancelFunc, *httptest.Server) {
|
||||
|
|
@ -90,56 +101,90 @@ func TestChatSend_Good_Connected(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestChatHistory_Bad_NilBridge verifies chatHistory returns error without a bridge.
|
||||
func TestChatHistory_Bad_NilBridge(t *testing.T) {
|
||||
// TestChatHistory_Good_NilBridge verifies chatHistory returns local cache without a bridge.
|
||||
func TestChatHistory_Good_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, _, err := sub.chatHistory(context.Background(), nil, ChatHistoryInput{
|
||||
_, out, err := sub.chatHistory(context.Background(), nil, ChatHistoryInput{
|
||||
SessionID: "s1",
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
if err != nil {
|
||||
t.Fatalf("chatHistory failed: %v", err)
|
||||
}
|
||||
if out.SessionID != "s1" {
|
||||
t.Errorf("expected sessionId 's1', got %q", out.SessionID)
|
||||
}
|
||||
if out.Messages == nil {
|
||||
t.Error("expected non-nil messages slice")
|
||||
}
|
||||
}
|
||||
|
||||
// TestChatHistory_Good_Connected verifies chatHistory succeeds and returns empty messages.
|
||||
// TestChatHistory_Good_Connected verifies chatHistory succeeds and returns stored messages.
|
||||
func TestChatHistory_Good_Connected(t *testing.T) {
|
||||
sub, cancel, ts := newConnectedSubsystem(t)
|
||||
defer cancel()
|
||||
defer ts.Close()
|
||||
|
||||
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
||||
Name: "history-test",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("sessionCreate failed: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = sub.chatSend(context.Background(), nil, ChatSendInput{
|
||||
SessionID: sub.listSessions()[0].ID,
|
||||
Message: "hello history",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("chatSend failed: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := sub.chatHistory(context.Background(), nil, ChatHistoryInput{
|
||||
SessionID: "sess-1",
|
||||
SessionID: sub.listSessions()[0].ID,
|
||||
Limit: 50,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("chatHistory failed: %v", err)
|
||||
}
|
||||
if out.SessionID != "sess-1" {
|
||||
t.Errorf("expected sessionId 'sess-1', got %q", out.SessionID)
|
||||
if out.SessionID != sub.listSessions()[0].ID {
|
||||
t.Errorf("expected sessionId %q, got %q", sub.listSessions()[0].ID, out.SessionID)
|
||||
}
|
||||
if out.Messages == nil {
|
||||
t.Error("expected non-nil messages slice")
|
||||
}
|
||||
if len(out.Messages) != 0 {
|
||||
t.Errorf("expected 0 messages (stub), got %d", len(out.Messages))
|
||||
if len(out.Messages) != 1 {
|
||||
t.Errorf("expected 1 stored message, got %d", len(out.Messages))
|
||||
}
|
||||
if out.Messages[0].Content != "hello history" {
|
||||
t.Errorf("expected stored message content %q, got %q", "hello history", out.Messages[0].Content)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionList_Bad_NilBridge verifies sessionList returns error without a bridge.
|
||||
func TestSessionList_Bad_NilBridge(t *testing.T) {
|
||||
// TestSessionList_Good_NilBridge verifies sessionList returns local sessions without a bridge.
|
||||
func TestSessionList_Good_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, _, err := sub.sessionList(context.Background(), nil, SessionListInput{})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
_, out, err := sub.sessionList(context.Background(), nil, SessionListInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("sessionList failed: %v", err)
|
||||
}
|
||||
if out.Sessions == nil {
|
||||
t.Error("expected non-nil sessions slice")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionList_Good_Connected verifies sessionList returns empty sessions.
|
||||
// TestSessionList_Good_Connected verifies sessionList returns stored sessions.
|
||||
func TestSessionList_Good_Connected(t *testing.T) {
|
||||
sub, cancel, ts := newConnectedSubsystem(t)
|
||||
defer cancel()
|
||||
defer ts.Close()
|
||||
|
||||
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
||||
Name: "session-list-test",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("sessionCreate failed: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := sub.sessionList(context.Background(), nil, SessionListInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("sessionList failed: %v", err)
|
||||
|
|
@ -147,23 +192,32 @@ func TestSessionList_Good_Connected(t *testing.T) {
|
|||
if out.Sessions == nil {
|
||||
t.Error("expected non-nil sessions slice")
|
||||
}
|
||||
if len(out.Sessions) != 0 {
|
||||
t.Errorf("expected 0 sessions (stub), got %d", len(out.Sessions))
|
||||
if len(out.Sessions) != 1 {
|
||||
t.Errorf("expected 1 stored session, got %d", len(out.Sessions))
|
||||
}
|
||||
if out.Sessions[0].ID == "" {
|
||||
t.Error("expected stored session to have an ID")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionCreate_Bad_NilBridge verifies sessionCreate returns error without a bridge.
|
||||
func TestSessionCreate_Bad_NilBridge(t *testing.T) {
|
||||
// TestSessionCreate_Good_NilBridge verifies sessionCreate stores a local session without a bridge.
|
||||
func TestSessionCreate_Good_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
||||
_, out, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
||||
Name: "test",
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
if err != nil {
|
||||
t.Fatalf("sessionCreate failed: %v", err)
|
||||
}
|
||||
if out.Session.Name != "test" {
|
||||
t.Errorf("expected session name 'test', got %q", out.Session.Name)
|
||||
}
|
||||
if out.Session.ID == "" {
|
||||
t.Error("expected non-empty session ID")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionCreate_Good_Connected verifies sessionCreate returns a session stub.
|
||||
// TestSessionCreate_Good_Connected verifies sessionCreate returns a stored session.
|
||||
func TestSessionCreate_Good_Connected(t *testing.T) {
|
||||
sub, cancel, ts := newConnectedSubsystem(t)
|
||||
defer cancel()
|
||||
|
|
@ -184,36 +238,52 @@ func TestSessionCreate_Good_Connected(t *testing.T) {
|
|||
if out.Session.CreatedAt.IsZero() {
|
||||
t.Error("expected non-zero CreatedAt")
|
||||
}
|
||||
}
|
||||
|
||||
// TestPlanStatus_Bad_NilBridge verifies planStatus returns error without a bridge.
|
||||
func TestPlanStatus_Bad_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, _, err := sub.planStatus(context.Background(), nil, PlanStatusInput{
|
||||
SessionID: "s1",
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
if out.Session.ID == "" {
|
||||
t.Error("expected non-empty session ID")
|
||||
}
|
||||
}
|
||||
|
||||
// TestPlanStatus_Good_Connected verifies planStatus returns a stub status.
|
||||
// TestPlanStatus_Good_NilBridge verifies planStatus returns local status without a bridge.
|
||||
func TestPlanStatus_Good_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, out, err := sub.planStatus(context.Background(), nil, PlanStatusInput{
|
||||
SessionID: "s1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("planStatus failed: %v", err)
|
||||
}
|
||||
if out.SessionID != "s1" {
|
||||
t.Errorf("expected sessionId 's1', got %q", out.SessionID)
|
||||
}
|
||||
if out.Status != "unknown" {
|
||||
t.Errorf("expected status 'unknown', got %q", out.Status)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPlanStatus_Good_Connected verifies planStatus returns a status for a known session.
|
||||
func TestPlanStatus_Good_Connected(t *testing.T) {
|
||||
sub, cancel, ts := newConnectedSubsystem(t)
|
||||
defer cancel()
|
||||
defer ts.Close()
|
||||
|
||||
_, createOut, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
||||
Name: "plan-status-test",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("sessionCreate failed: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := sub.planStatus(context.Background(), nil, PlanStatusInput{
|
||||
SessionID: "sess-7",
|
||||
SessionID: createOut.Session.ID,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("planStatus failed: %v", err)
|
||||
}
|
||||
if out.SessionID != "sess-7" {
|
||||
t.Errorf("expected sessionId 'sess-7', got %q", out.SessionID)
|
||||
if out.SessionID != createOut.Session.ID {
|
||||
t.Errorf("expected sessionId %q, got %q", createOut.Session.ID, out.SessionID)
|
||||
}
|
||||
if out.Status != "unknown" {
|
||||
t.Errorf("expected status 'unknown', got %q", out.Status)
|
||||
if out.Status != "creating" {
|
||||
t.Errorf("expected status 'creating', got %q", out.Status)
|
||||
}
|
||||
if out.Steps == nil {
|
||||
t.Error("expected non-nil steps slice")
|
||||
|
|
@ -222,14 +292,20 @@ func TestPlanStatus_Good_Connected(t *testing.T) {
|
|||
|
||||
// --- 4.3: Build tool tests ---
|
||||
|
||||
// TestBuildStatus_Bad_NilBridge verifies buildStatus returns error without a bridge.
|
||||
func TestBuildStatus_Bad_NilBridge(t *testing.T) {
|
||||
// TestBuildStatus_Good_NilBridge verifies buildStatus returns a local stub without a bridge.
|
||||
func TestBuildStatus_Good_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, _, err := sub.buildStatus(context.Background(), nil, BuildStatusInput{
|
||||
_, out, err := sub.buildStatus(context.Background(), nil, BuildStatusInput{
|
||||
BuildID: "b1",
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
if err != nil {
|
||||
t.Fatalf("buildStatus failed: %v", err)
|
||||
}
|
||||
if out.Build.ID != "b1" {
|
||||
t.Errorf("expected build ID 'b1', got %q", out.Build.ID)
|
||||
}
|
||||
if out.Build.Status != "unknown" {
|
||||
t.Errorf("expected status 'unknown', got %q", out.Build.Status)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -253,15 +329,74 @@ func TestBuildStatus_Good_Connected(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestBuildList_Bad_NilBridge verifies buildList returns error without a bridge.
|
||||
func TestBuildList_Bad_NilBridge(t *testing.T) {
|
||||
// TestBuildStatus_Good_EmitsLifecycle verifies bridge updates broadcast build lifecycle events.
|
||||
func TestBuildStatus_Good_EmitsLifecycle(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, _, err := sub.buildList(context.Background(), nil, BuildListInput{
|
||||
notifier := &recordingNotifier{}
|
||||
sub.SetNotifier(notifier)
|
||||
|
||||
sub.handleBridgeMessage(BridgeMessage{
|
||||
Type: "build_status",
|
||||
Data: map[string]any{
|
||||
"buildId": "build-1",
|
||||
"repo": "core-php",
|
||||
"branch": "main",
|
||||
"status": "success",
|
||||
},
|
||||
})
|
||||
|
||||
if notifier.channel != coremcp.ChannelBuildComplete {
|
||||
t.Fatalf("expected %s channel, got %q", coremcp.ChannelBuildComplete, notifier.channel)
|
||||
}
|
||||
payload, ok := notifier.data.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected payload map, got %T", notifier.data)
|
||||
}
|
||||
if payload["id"] != "build-1" {
|
||||
t.Fatalf("expected build id build-1, got %v", payload["id"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestBuildStatus_Good_EmitsStartLifecycle verifies running builds broadcast a start event.
|
||||
func TestBuildStatus_Good_EmitsStartLifecycle(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
notifier := &recordingNotifier{}
|
||||
sub.SetNotifier(notifier)
|
||||
|
||||
sub.handleBridgeMessage(BridgeMessage{
|
||||
Type: "build_status",
|
||||
Data: map[string]any{
|
||||
"buildId": "build-2",
|
||||
"repo": "core-php",
|
||||
"branch": "main",
|
||||
"status": "running",
|
||||
},
|
||||
})
|
||||
|
||||
if notifier.channel != coremcp.ChannelBuildStart {
|
||||
t.Fatalf("expected %s channel, got %q", coremcp.ChannelBuildStart, notifier.channel)
|
||||
}
|
||||
payload, ok := notifier.data.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected payload map, got %T", notifier.data)
|
||||
}
|
||||
if payload["id"] != "build-2" {
|
||||
t.Fatalf("expected build id build-2, got %v", payload["id"])
|
||||
}
|
||||
}
|
||||
|
||||
// TestBuildList_Good_NilBridge verifies buildList returns an empty list without a bridge.
|
||||
func TestBuildList_Good_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, out, err := sub.buildList(context.Background(), nil, BuildListInput{
|
||||
Repo: "core-php",
|
||||
Limit: 10,
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
if err != nil {
|
||||
t.Fatalf("buildList failed: %v", err)
|
||||
}
|
||||
if out.Builds == nil {
|
||||
t.Error("expected non-nil builds slice")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -286,15 +421,21 @@ func TestBuildList_Good_Connected(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestBuildLogs_Bad_NilBridge verifies buildLogs returns error without a bridge.
|
||||
func TestBuildLogs_Bad_NilBridge(t *testing.T) {
|
||||
// TestBuildLogs_Good_NilBridge verifies buildLogs returns empty lines without a bridge.
|
||||
func TestBuildLogs_Good_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, _, err := sub.buildLogs(context.Background(), nil, BuildLogsInput{
|
||||
_, out, err := sub.buildLogs(context.Background(), nil, BuildLogsInput{
|
||||
BuildID: "b1",
|
||||
Tail: 100,
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
if err != nil {
|
||||
t.Fatalf("buildLogs failed: %v", err)
|
||||
}
|
||||
if out.BuildID != "b1" {
|
||||
t.Errorf("expected buildId 'b1', got %q", out.BuildID)
|
||||
}
|
||||
if out.Lines == nil {
|
||||
t.Error("expected non-nil lines slice")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -337,12 +478,19 @@ func TestDashboardOverview_Good_NilBridge(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestDashboardOverview_Good_Connected verifies dashboardOverview reports bridge online.
|
||||
// TestDashboardOverview_Good_Connected verifies dashboardOverview reports bridge online and local sessions.
|
||||
func TestDashboardOverview_Good_Connected(t *testing.T) {
|
||||
sub, cancel, ts := newConnectedSubsystem(t)
|
||||
defer cancel()
|
||||
defer ts.Close()
|
||||
|
||||
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
||||
Name: "dashboard-test",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("sessionCreate failed: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := sub.dashboardOverview(context.Background(), nil, DashboardOverviewInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("dashboardOverview failed: %v", err)
|
||||
|
|
@ -350,25 +498,38 @@ func TestDashboardOverview_Good_Connected(t *testing.T) {
|
|||
if !out.Overview.BridgeOnline {
|
||||
t.Error("expected BridgeOnline=true when bridge is connected")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDashboardActivity_Bad_NilBridge verifies dashboardActivity returns error without bridge.
|
||||
func TestDashboardActivity_Bad_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, _, err := sub.dashboardActivity(context.Background(), nil, DashboardActivityInput{
|
||||
Limit: 10,
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
if out.Overview.ActiveSessions != 1 {
|
||||
t.Errorf("expected 1 active session, got %d", out.Overview.ActiveSessions)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDashboardActivity_Good_Connected verifies dashboardActivity returns empty events.
|
||||
// TestDashboardActivity_Good_NilBridge verifies dashboardActivity returns local activity without bridge.
|
||||
func TestDashboardActivity_Good_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, out, err := sub.dashboardActivity(context.Background(), nil, DashboardActivityInput{
|
||||
Limit: 10,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("dashboardActivity failed: %v", err)
|
||||
}
|
||||
if out.Events == nil {
|
||||
t.Error("expected non-nil events slice")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDashboardActivity_Good_Connected verifies dashboardActivity returns stored events.
|
||||
func TestDashboardActivity_Good_Connected(t *testing.T) {
|
||||
sub, cancel, ts := newConnectedSubsystem(t)
|
||||
defer cancel()
|
||||
defer ts.Close()
|
||||
|
||||
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
||||
Name: "activity-test",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("sessionCreate failed: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := sub.dashboardActivity(context.Background(), nil, DashboardActivityInput{
|
||||
Limit: 20,
|
||||
})
|
||||
|
|
@ -378,19 +539,25 @@ func TestDashboardActivity_Good_Connected(t *testing.T) {
|
|||
if out.Events == nil {
|
||||
t.Error("expected non-nil events slice")
|
||||
}
|
||||
if len(out.Events) != 0 {
|
||||
t.Errorf("expected 0 events (stub), got %d", len(out.Events))
|
||||
if len(out.Events) != 1 {
|
||||
t.Errorf("expected 1 stored event, got %d", len(out.Events))
|
||||
}
|
||||
if len(out.Events) > 0 && out.Events[0].Type != "session_create" {
|
||||
t.Errorf("expected first event type 'session_create', got %q", out.Events[0].Type)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDashboardMetrics_Bad_NilBridge verifies dashboardMetrics returns error without bridge.
|
||||
func TestDashboardMetrics_Bad_NilBridge(t *testing.T) {
|
||||
// TestDashboardMetrics_Good_NilBridge verifies dashboardMetrics returns local metrics without bridge.
|
||||
func TestDashboardMetrics_Good_NilBridge(t *testing.T) {
|
||||
sub := newNilBridgeSubsystem()
|
||||
_, _, err := sub.dashboardMetrics(context.Background(), nil, DashboardMetricsInput{
|
||||
_, out, err := sub.dashboardMetrics(context.Background(), nil, DashboardMetricsInput{
|
||||
Period: "1h",
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error when bridge is nil")
|
||||
if err != nil {
|
||||
t.Fatalf("dashboardMetrics failed: %v", err)
|
||||
}
|
||||
if out.Period != "1h" {
|
||||
t.Errorf("expected period '1h', got %q", out.Period)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -29,9 +29,9 @@ func TestService_Iterators(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRegistry_SplitTagSeq(t *testing.T) {
|
||||
func TestRegistry_SplitTag(t *testing.T) {
|
||||
tag := "name,omitempty,json"
|
||||
parts := slices.Collect(splitTagSeq(tag))
|
||||
parts := splitTag(tag)
|
||||
expected := []string{"name", "omitempty", "json"}
|
||||
|
||||
if !slices.Equal(parts, expected) {
|
||||
|
|
|
|||
333
pkg/mcp/mcp.go
333
pkg/mcp/mcp.go
|
|
@ -6,11 +6,14 @@ package mcp
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"iter"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
|
|
@ -21,16 +24,17 @@ import (
|
|||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// Service provides a lightweight MCP server with file operations only.
|
||||
// Service provides a lightweight MCP server with file operations and
|
||||
// optional subsystems.
|
||||
// For full GUI features, use the core-gui package.
|
||||
//
|
||||
// svc, err := mcp.New(mcp.Options{WorkspaceRoot: "/home/user/project"})
|
||||
// defer svc.Shutdown(ctx)
|
||||
type Service struct {
|
||||
*core.ServiceRuntime[McpOptions] // Core access via s.Core()
|
||||
*core.ServiceRuntime[struct{}] // Core access via s.Core()
|
||||
|
||||
server *mcp.Server
|
||||
workspaceRoot string // Root directory for file operations (empty = unrestricted)
|
||||
workspaceRoot string // Root directory for file operations (empty = cwd unless Unrestricted)
|
||||
medium io.Medium // Filesystem medium for sandboxed operations
|
||||
subsystems []Subsystem // Additional subsystems registered via Options.Subsystems
|
||||
logger *log.Logger // Logger for tool execution auditing
|
||||
|
|
@ -39,12 +43,11 @@ type Service struct {
|
|||
wsServer *http.Server // WebSocket HTTP server (optional)
|
||||
wsAddr string // WebSocket server address
|
||||
wsMu sync.Mutex // Protects wsServer and wsAddr
|
||||
tools []ToolRecord // Parallel tool registry for REST bridge
|
||||
processMu sync.Mutex // Protects processMeta
|
||||
processMeta map[string]processRuntime
|
||||
tools []ToolRecord // Parallel tool registry for REST bridge
|
||||
}
|
||||
|
||||
// McpOptions configures the MCP service runtime.
|
||||
type McpOptions struct{}
|
||||
|
||||
// Options configures a Service.
|
||||
//
|
||||
// svc, err := mcp.New(mcp.Options{
|
||||
|
|
@ -60,7 +63,7 @@ type Options struct {
|
|||
Subsystems []Subsystem // Additional tool groups registered at startup
|
||||
}
|
||||
|
||||
// New creates a new MCP service with file operations.
|
||||
// New creates a new MCP service with file operations and optional subsystems.
|
||||
//
|
||||
// svc, err := mcp.New(mcp.Options{WorkspaceRoot: "."})
|
||||
func New(opts Options) (*Service, error) {
|
||||
|
|
@ -81,8 +84,8 @@ func New(opts Options) (*Service, error) {
|
|||
server: server,
|
||||
processService: opts.ProcessService,
|
||||
wsHub: opts.WSHub,
|
||||
subsystems: opts.Subsystems,
|
||||
logger: log.Default(),
|
||||
processMeta: make(map[string]processRuntime),
|
||||
}
|
||||
|
||||
// Workspace root: unrestricted, explicit root, or default to cwd
|
||||
|
|
@ -112,21 +115,23 @@ func New(opts Options) (*Service, error) {
|
|||
|
||||
s.registerTools(s.server)
|
||||
|
||||
for _, sub := range s.subsystems {
|
||||
sub.RegisterTools(s.server)
|
||||
s.subsystems = make([]Subsystem, 0, len(opts.Subsystems))
|
||||
for _, sub := range opts.Subsystems {
|
||||
if sub == nil {
|
||||
continue
|
||||
}
|
||||
s.subsystems = append(s.subsystems, sub)
|
||||
if sn, ok := sub.(SubsystemWithNotifier); ok {
|
||||
sn.SetNotifier(s)
|
||||
}
|
||||
// Wire channel callback for subsystems that use func-based notification
|
||||
type channelWirer interface {
|
||||
OnChannel(func(ctx context.Context, channel string, data any))
|
||||
}
|
||||
if cw, ok := sub.(channelWirer); ok {
|
||||
// Wire channel callback for subsystems that use func-based notification.
|
||||
if cw, ok := sub.(SubsystemWithChannelCallback); ok {
|
||||
svc := s // capture for closure
|
||||
cw.OnChannel(func(ctx context.Context, channel string, data any) {
|
||||
svc.ChannelSend(ctx, channel, data)
|
||||
})
|
||||
}
|
||||
sub.RegisterTools(s)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
|
|
@ -138,7 +143,7 @@ func New(opts Options) (*Service, error) {
|
|||
// fmt.Println(sub.Name())
|
||||
// }
|
||||
func (s *Service) Subsystems() []Subsystem {
|
||||
return s.subsystems
|
||||
return slices.Clone(s.subsystems)
|
||||
}
|
||||
|
||||
// SubsystemsSeq returns an iterator over the registered subsystems.
|
||||
|
|
@ -147,7 +152,7 @@ func (s *Service) Subsystems() []Subsystem {
|
|||
// fmt.Println(sub.Name())
|
||||
// }
|
||||
func (s *Service) SubsystemsSeq() iter.Seq[Subsystem] {
|
||||
return slices.Values(s.subsystems)
|
||||
return slices.Values(slices.Clone(s.subsystems))
|
||||
}
|
||||
|
||||
// Tools returns all recorded tool metadata.
|
||||
|
|
@ -156,7 +161,7 @@ func (s *Service) SubsystemsSeq() iter.Seq[Subsystem] {
|
|||
// fmt.Printf("%s (%s): %s\n", t.Name, t.Group, t.Description)
|
||||
// }
|
||||
func (s *Service) Tools() []ToolRecord {
|
||||
return s.tools
|
||||
return slices.Clone(s.tools)
|
||||
}
|
||||
|
||||
// ToolsSeq returns an iterator over all recorded tool metadata.
|
||||
|
|
@ -165,7 +170,7 @@ func (s *Service) Tools() []ToolRecord {
|
|||
// fmt.Println(rec.Name)
|
||||
// }
|
||||
func (s *Service) ToolsSeq() iter.Seq[ToolRecord] {
|
||||
return slices.Values(s.tools)
|
||||
return slices.Values(slices.Clone(s.tools))
|
||||
}
|
||||
|
||||
// Shutdown gracefully shuts down all subsystems that support it.
|
||||
|
|
@ -174,14 +179,40 @@ func (s *Service) ToolsSeq() iter.Seq[ToolRecord] {
|
|||
// defer cancel()
|
||||
// if err := svc.Shutdown(ctx); err != nil { log.Fatal(err) }
|
||||
func (s *Service) Shutdown(ctx context.Context) error {
|
||||
var shutdownErr error
|
||||
|
||||
for _, sub := range s.subsystems {
|
||||
if sh, ok := sub.(SubsystemWithShutdown); ok {
|
||||
if err := sh.Shutdown(ctx); err != nil {
|
||||
return log.E("mcp.Shutdown", "shutdown "+sub.Name(), err)
|
||||
if shutdownErr == nil {
|
||||
shutdownErr = log.E("mcp.Shutdown", "shutdown "+sub.Name(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
if s.wsServer != nil {
|
||||
s.wsMu.Lock()
|
||||
server := s.wsServer
|
||||
s.wsMu.Unlock()
|
||||
|
||||
if err := server.Shutdown(ctx); err != nil && shutdownErr == nil {
|
||||
shutdownErr = log.E("mcp.Shutdown", "shutdown websocket server", err)
|
||||
}
|
||||
|
||||
s.wsMu.Lock()
|
||||
if s.wsServer == server {
|
||||
s.wsServer = nil
|
||||
s.wsAddr = ""
|
||||
}
|
||||
s.wsMu.Unlock()
|
||||
}
|
||||
|
||||
if err := closeWebviewConnection(); err != nil && shutdownErr == nil {
|
||||
shutdownErr = log.E("mcp.Shutdown", "close webview connection", err)
|
||||
}
|
||||
|
||||
return shutdownErr
|
||||
}
|
||||
|
||||
// WSHub returns the WebSocket hub, or nil if not configured.
|
||||
|
|
@ -202,7 +233,30 @@ func (s *Service) ProcessService() *process.Service {
|
|||
return s.processService
|
||||
}
|
||||
|
||||
// registerTools adds file operation tools to the MCP server.
|
||||
// resolveWorkspacePath converts a tool path into the filesystem path the
|
||||
// service actually operates on.
|
||||
//
|
||||
// Sandboxed services keep paths anchored under workspaceRoot. Unrestricted
|
||||
// services preserve absolute paths and clean relative ones against the current
|
||||
// working directory.
|
||||
func (s *Service) resolveWorkspacePath(path string) string {
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
if s.workspaceRoot == "" {
|
||||
return filepath.Clean(path)
|
||||
}
|
||||
|
||||
clean := filepath.Clean(string(filepath.Separator) + path)
|
||||
clean = strings.TrimPrefix(clean, string(filepath.Separator))
|
||||
if clean == "." || clean == "" {
|
||||
return s.workspaceRoot
|
||||
}
|
||||
return filepath.Join(s.workspaceRoot, clean)
|
||||
}
|
||||
|
||||
// registerTools adds the built-in tool groups to the MCP server.
|
||||
func (s *Service) registerTools(server *mcp.Server) {
|
||||
// File operations
|
||||
addToolRecorded(s, server, "files", &mcp.Tool{
|
||||
|
|
@ -256,6 +310,13 @@ func (s *Service) registerTools(server *mcp.Server) {
|
|||
Name: "lang_list",
|
||||
Description: "Get list of supported programming languages",
|
||||
}, s.getSupportedLanguages)
|
||||
|
||||
// Additional built-in tool groups.
|
||||
s.registerMetricsTools(server)
|
||||
s.registerRAGTools(server)
|
||||
s.registerProcessTools(server)
|
||||
s.registerWebviewTools(server)
|
||||
s.registerWSTools(server)
|
||||
}
|
||||
|
||||
// Tool input/output types for MCP file operations.
|
||||
|
|
@ -405,7 +466,7 @@ type GetSupportedLanguagesInput struct{}
|
|||
|
||||
// GetSupportedLanguagesOutput contains the list of supported languages.
|
||||
//
|
||||
// // len(out.Languages) == 15
|
||||
// // len(out.Languages) == 23
|
||||
// // out.Languages[0].ID == "typescript"
|
||||
type GetSupportedLanguagesOutput struct {
|
||||
Languages []LanguageInfo `json:"languages"` // all recognised languages
|
||||
|
|
@ -446,6 +507,10 @@ type EditDiffOutput struct {
|
|||
// Tool handlers
|
||||
|
||||
func (s *Service) readFile(ctx context.Context, req *mcp.CallToolRequest, input ReadFileInput) (*mcp.CallToolResult, ReadFileOutput, error) {
|
||||
if s.medium == nil {
|
||||
return nil, ReadFileOutput{}, log.E("mcp.readFile", "workspace medium unavailable", nil)
|
||||
}
|
||||
|
||||
content, err := s.medium.Read(input.Path)
|
||||
if err != nil {
|
||||
return nil, ReadFileOutput{}, log.E("mcp.readFile", "failed to read file", err)
|
||||
|
|
@ -458,6 +523,10 @@ func (s *Service) readFile(ctx context.Context, req *mcp.CallToolRequest, input
|
|||
}
|
||||
|
||||
func (s *Service) writeFile(ctx context.Context, req *mcp.CallToolRequest, input WriteFileInput) (*mcp.CallToolResult, WriteFileOutput, error) {
|
||||
if s.medium == nil {
|
||||
return nil, WriteFileOutput{}, log.E("mcp.writeFile", "workspace medium unavailable", nil)
|
||||
}
|
||||
|
||||
// Medium.Write creates parent directories automatically
|
||||
if err := s.medium.Write(input.Path, input.Content); err != nil {
|
||||
return nil, WriteFileOutput{}, log.E("mcp.writeFile", "failed to write file", err)
|
||||
|
|
@ -466,10 +535,17 @@ func (s *Service) writeFile(ctx context.Context, req *mcp.CallToolRequest, input
|
|||
}
|
||||
|
||||
func (s *Service) listDirectory(ctx context.Context, req *mcp.CallToolRequest, input ListDirectoryInput) (*mcp.CallToolResult, ListDirectoryOutput, error) {
|
||||
if s.medium == nil {
|
||||
return nil, ListDirectoryOutput{}, log.E("mcp.listDirectory", "workspace medium unavailable", nil)
|
||||
}
|
||||
|
||||
entries, err := s.medium.List(input.Path)
|
||||
if err != nil {
|
||||
return nil, ListDirectoryOutput{}, log.E("mcp.listDirectory", "failed to list directory", err)
|
||||
}
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
return entries[i].Name() < entries[j].Name()
|
||||
})
|
||||
result := make([]DirectoryEntry, 0, len(entries))
|
||||
for _, e := range entries {
|
||||
info, _ := e.Info()
|
||||
|
|
@ -478,11 +554,8 @@ func (s *Service) listDirectory(ctx context.Context, req *mcp.CallToolRequest, i
|
|||
size = info.Size()
|
||||
}
|
||||
result = append(result, DirectoryEntry{
|
||||
Name: e.Name(),
|
||||
Path: core.JoinPath(input.Path, e.Name()), // Note: This might be relative path, client might expect absolute?
|
||||
// Issue 103 says "Replace ... with local.Medium sandboxing".
|
||||
// Previous code returned `core.JoinPath(input.Path, e.Name())`.
|
||||
// If input.Path is relative, this preserves it.
|
||||
Name: e.Name(),
|
||||
Path: directoryEntryPath(input.Path, e.Name()),
|
||||
IsDir: e.IsDir(),
|
||||
Size: size,
|
||||
})
|
||||
|
|
@ -490,7 +563,23 @@ func (s *Service) listDirectory(ctx context.Context, req *mcp.CallToolRequest, i
|
|||
return nil, ListDirectoryOutput{Entries: result, Path: input.Path}, nil
|
||||
}
|
||||
|
||||
// directoryEntryPath returns the documented display path for a directory entry.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// directoryEntryPath("src", "main.go") == "src/main.go"
|
||||
func directoryEntryPath(dir, name string) string {
|
||||
if dir == "" {
|
||||
return name
|
||||
}
|
||||
return core.JoinPath(dir, name)
|
||||
}
|
||||
|
||||
func (s *Service) createDirectory(ctx context.Context, req *mcp.CallToolRequest, input CreateDirectoryInput) (*mcp.CallToolResult, CreateDirectoryOutput, error) {
|
||||
if s.medium == nil {
|
||||
return nil, CreateDirectoryOutput{}, log.E("mcp.createDirectory", "workspace medium unavailable", nil)
|
||||
}
|
||||
|
||||
if err := s.medium.EnsureDir(input.Path); err != nil {
|
||||
return nil, CreateDirectoryOutput{}, log.E("mcp.createDirectory", "failed to create directory", err)
|
||||
}
|
||||
|
|
@ -498,6 +587,10 @@ func (s *Service) createDirectory(ctx context.Context, req *mcp.CallToolRequest,
|
|||
}
|
||||
|
||||
func (s *Service) deleteFile(ctx context.Context, req *mcp.CallToolRequest, input DeleteFileInput) (*mcp.CallToolResult, DeleteFileOutput, error) {
|
||||
if s.medium == nil {
|
||||
return nil, DeleteFileOutput{}, log.E("mcp.deleteFile", "workspace medium unavailable", nil)
|
||||
}
|
||||
|
||||
if err := s.medium.Delete(input.Path); err != nil {
|
||||
return nil, DeleteFileOutput{}, log.E("mcp.deleteFile", "failed to delete file", err)
|
||||
}
|
||||
|
|
@ -505,6 +598,10 @@ func (s *Service) deleteFile(ctx context.Context, req *mcp.CallToolRequest, inpu
|
|||
}
|
||||
|
||||
func (s *Service) renameFile(ctx context.Context, req *mcp.CallToolRequest, input RenameFileInput) (*mcp.CallToolResult, RenameFileOutput, error) {
|
||||
if s.medium == nil {
|
||||
return nil, RenameFileOutput{}, log.E("mcp.renameFile", "workspace medium unavailable", nil)
|
||||
}
|
||||
|
||||
if err := s.medium.Rename(input.OldPath, input.NewPath); err != nil {
|
||||
return nil, RenameFileOutput{}, log.E("mcp.renameFile", "failed to rename file", err)
|
||||
}
|
||||
|
|
@ -512,21 +609,22 @@ func (s *Service) renameFile(ctx context.Context, req *mcp.CallToolRequest, inpu
|
|||
}
|
||||
|
||||
func (s *Service) fileExists(ctx context.Context, req *mcp.CallToolRequest, input FileExistsInput) (*mcp.CallToolResult, FileExistsOutput, error) {
|
||||
exists := s.medium.IsFile(input.Path)
|
||||
if exists {
|
||||
return nil, FileExistsOutput{Exists: true, IsDir: false, Path: input.Path}, nil
|
||||
if s.medium == nil {
|
||||
return nil, FileExistsOutput{}, log.E("mcp.fileExists", "workspace medium unavailable", nil)
|
||||
}
|
||||
// Check if it's a directory by attempting to list it
|
||||
// List might fail if it's a file too (but we checked IsFile) or if doesn't exist.
|
||||
_, err := s.medium.List(input.Path)
|
||||
isDir := err == nil
|
||||
|
||||
// If List failed, it might mean it doesn't exist OR it's a special file or permissions.
|
||||
// Assuming if List works, it's a directory.
|
||||
|
||||
// Refinement: If it doesn't exist, List returns error.
|
||||
|
||||
return nil, FileExistsOutput{Exists: isDir, IsDir: isDir, Path: input.Path}, nil
|
||||
info, err := s.medium.Stat(input.Path)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil, FileExistsOutput{Exists: false, IsDir: false, Path: input.Path}, nil
|
||||
}
|
||||
return nil, FileExistsOutput{}, log.E("mcp.fileExists", "failed to stat path", err)
|
||||
}
|
||||
return nil, FileExistsOutput{
|
||||
Exists: true,
|
||||
IsDir: info.IsDir(),
|
||||
Path: input.Path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Service) detectLanguage(ctx context.Context, req *mcp.CallToolRequest, input DetectLanguageInput) (*mcp.CallToolResult, DetectLanguageOutput, error) {
|
||||
|
|
@ -535,27 +633,14 @@ func (s *Service) detectLanguage(ctx context.Context, req *mcp.CallToolRequest,
|
|||
}
|
||||
|
||||
func (s *Service) getSupportedLanguages(ctx context.Context, req *mcp.CallToolRequest, input GetSupportedLanguagesInput) (*mcp.CallToolResult, GetSupportedLanguagesOutput, error) {
|
||||
languages := []LanguageInfo{
|
||||
{ID: "typescript", Name: "TypeScript", Extensions: []string{".ts", ".tsx"}},
|
||||
{ID: "javascript", Name: "JavaScript", Extensions: []string{".js", ".jsx"}},
|
||||
{ID: "go", Name: "Go", Extensions: []string{".go"}},
|
||||
{ID: "python", Name: "Python", Extensions: []string{".py"}},
|
||||
{ID: "rust", Name: "Rust", Extensions: []string{".rs"}},
|
||||
{ID: "java", Name: "Java", Extensions: []string{".java"}},
|
||||
{ID: "php", Name: "PHP", Extensions: []string{".php"}},
|
||||
{ID: "ruby", Name: "Ruby", Extensions: []string{".rb"}},
|
||||
{ID: "html", Name: "HTML", Extensions: []string{".html", ".htm"}},
|
||||
{ID: "css", Name: "CSS", Extensions: []string{".css"}},
|
||||
{ID: "json", Name: "JSON", Extensions: []string{".json"}},
|
||||
{ID: "yaml", Name: "YAML", Extensions: []string{".yaml", ".yml"}},
|
||||
{ID: "markdown", Name: "Markdown", Extensions: []string{".md", ".markdown"}},
|
||||
{ID: "sql", Name: "SQL", Extensions: []string{".sql"}},
|
||||
{ID: "shell", Name: "Shell", Extensions: []string{".sh", ".bash"}},
|
||||
}
|
||||
return nil, GetSupportedLanguagesOutput{Languages: languages}, nil
|
||||
return nil, GetSupportedLanguagesOutput{Languages: supportedLanguages()}, nil
|
||||
}
|
||||
|
||||
func (s *Service) editDiff(ctx context.Context, req *mcp.CallToolRequest, input EditDiffInput) (*mcp.CallToolResult, EditDiffOutput, error) {
|
||||
if s.medium == nil {
|
||||
return nil, EditDiffOutput{}, log.E("mcp.editDiff", "workspace medium unavailable", nil)
|
||||
}
|
||||
|
||||
if input.OldString == "" {
|
||||
return nil, EditDiffOutput{}, log.E("mcp.editDiff", "old_string cannot be empty", nil)
|
||||
}
|
||||
|
|
@ -594,57 +679,78 @@ func (s *Service) editDiff(ctx context.Context, req *mcp.CallToolRequest, input
|
|||
|
||||
// detectLanguageFromPath maps file extensions to language IDs.
|
||||
func detectLanguageFromPath(path string) string {
|
||||
if core.PathBase(path) == "Dockerfile" {
|
||||
return "dockerfile"
|
||||
}
|
||||
|
||||
ext := core.PathExt(path)
|
||||
switch ext {
|
||||
case ".ts", ".tsx":
|
||||
return "typescript"
|
||||
case ".js", ".jsx":
|
||||
return "javascript"
|
||||
case ".go":
|
||||
return "go"
|
||||
case ".py":
|
||||
return "python"
|
||||
case ".rs":
|
||||
return "rust"
|
||||
case ".rb":
|
||||
return "ruby"
|
||||
case ".java":
|
||||
return "java"
|
||||
case ".php":
|
||||
return "php"
|
||||
case ".c", ".h":
|
||||
return "c"
|
||||
case ".cpp", ".hpp", ".cc", ".cxx":
|
||||
return "cpp"
|
||||
case ".cs":
|
||||
return "csharp"
|
||||
case ".html", ".htm":
|
||||
return "html"
|
||||
case ".css":
|
||||
return "css"
|
||||
case ".scss":
|
||||
return "scss"
|
||||
case ".json":
|
||||
return "json"
|
||||
case ".yaml", ".yml":
|
||||
return "yaml"
|
||||
case ".xml":
|
||||
return "xml"
|
||||
case ".md", ".markdown":
|
||||
return "markdown"
|
||||
case ".sql":
|
||||
return "sql"
|
||||
case ".sh", ".bash":
|
||||
return "shell"
|
||||
case ".swift":
|
||||
return "swift"
|
||||
case ".kt", ".kts":
|
||||
return "kotlin"
|
||||
default:
|
||||
if core.PathBase(path) == "Dockerfile" {
|
||||
return "dockerfile"
|
||||
}
|
||||
return "plaintext"
|
||||
if lang, ok := languageByExtension[ext]; ok {
|
||||
return lang
|
||||
}
|
||||
return "plaintext"
|
||||
}
|
||||
|
||||
var languageByExtension = map[string]string{
|
||||
".ts": "typescript",
|
||||
".tsx": "typescript",
|
||||
".js": "javascript",
|
||||
".jsx": "javascript",
|
||||
".go": "go",
|
||||
".py": "python",
|
||||
".rs": "rust",
|
||||
".rb": "ruby",
|
||||
".java": "java",
|
||||
".php": "php",
|
||||
".c": "c",
|
||||
".h": "c",
|
||||
".cpp": "cpp",
|
||||
".hpp": "cpp",
|
||||
".cc": "cpp",
|
||||
".cxx": "cpp",
|
||||
".cs": "csharp",
|
||||
".html": "html",
|
||||
".htm": "html",
|
||||
".css": "css",
|
||||
".scss": "scss",
|
||||
".json": "json",
|
||||
".yaml": "yaml",
|
||||
".yml": "yaml",
|
||||
".xml": "xml",
|
||||
".md": "markdown",
|
||||
".markdown": "markdown",
|
||||
".sql": "sql",
|
||||
".sh": "shell",
|
||||
".bash": "shell",
|
||||
".swift": "swift",
|
||||
".kt": "kotlin",
|
||||
".kts": "kotlin",
|
||||
}
|
||||
|
||||
func supportedLanguages() []LanguageInfo {
|
||||
return []LanguageInfo{
|
||||
{ID: "typescript", Name: "TypeScript", Extensions: []string{".ts", ".tsx"}},
|
||||
{ID: "javascript", Name: "JavaScript", Extensions: []string{".js", ".jsx"}},
|
||||
{ID: "go", Name: "Go", Extensions: []string{".go"}},
|
||||
{ID: "python", Name: "Python", Extensions: []string{".py"}},
|
||||
{ID: "rust", Name: "Rust", Extensions: []string{".rs"}},
|
||||
{ID: "ruby", Name: "Ruby", Extensions: []string{".rb"}},
|
||||
{ID: "java", Name: "Java", Extensions: []string{".java"}},
|
||||
{ID: "php", Name: "PHP", Extensions: []string{".php"}},
|
||||
{ID: "c", Name: "C", Extensions: []string{".c", ".h"}},
|
||||
{ID: "cpp", Name: "C++", Extensions: []string{".cpp", ".hpp", ".cc", ".cxx"}},
|
||||
{ID: "csharp", Name: "C#", Extensions: []string{".cs"}},
|
||||
{ID: "html", Name: "HTML", Extensions: []string{".html", ".htm"}},
|
||||
{ID: "css", Name: "CSS", Extensions: []string{".css"}},
|
||||
{ID: "scss", Name: "SCSS", Extensions: []string{".scss"}},
|
||||
{ID: "json", Name: "JSON", Extensions: []string{".json"}},
|
||||
{ID: "yaml", Name: "YAML", Extensions: []string{".yaml", ".yml"}},
|
||||
{ID: "xml", Name: "XML", Extensions: []string{".xml"}},
|
||||
{ID: "markdown", Name: "Markdown", Extensions: []string{".md", ".markdown"}},
|
||||
{ID: "sql", Name: "SQL", Extensions: []string{".sql"}},
|
||||
{ID: "shell", Name: "Shell", Extensions: []string{".sh", ".bash"}},
|
||||
{ID: "swift", Name: "Swift", Extensions: []string{".swift"}},
|
||||
{ID: "kotlin", Name: "Kotlin", Extensions: []string{".kt", ".kts"}},
|
||||
{ID: "dockerfile", Name: "Dockerfile", Extensions: []string{}},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -657,6 +763,10 @@ func detectLanguageFromPath(path string) string {
|
|||
// os.Setenv("MCP_ADDR", "127.0.0.1:9100")
|
||||
// svc.Run(ctx)
|
||||
//
|
||||
// // Unix socket (set MCP_UNIX_SOCKET):
|
||||
// os.Setenv("MCP_UNIX_SOCKET", "/tmp/core-mcp.sock")
|
||||
// svc.Run(ctx)
|
||||
//
|
||||
// // HTTP (set MCP_HTTP_ADDR):
|
||||
// os.Setenv("MCP_HTTP_ADDR", "127.0.0.1:9101")
|
||||
// svc.Run(ctx)
|
||||
|
|
@ -667,6 +777,9 @@ func (s *Service) Run(ctx context.Context) error {
|
|||
if addr := core.Env("MCP_ADDR"); addr != "" {
|
||||
return s.ServeTCP(ctx, addr)
|
||||
}
|
||||
if socketPath := core.Env("MCP_UNIX_SOCKET"); socketPath != "" {
|
||||
return s.ServeUnix(ctx, socketPath)
|
||||
}
|
||||
return s.ServeStdio(ctx)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -55,6 +55,114 @@ func TestNew_Good_NoRestriction(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNew_Good_RegistersBuiltInTools(t *testing.T) {
|
||||
s, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create service: %v", err)
|
||||
}
|
||||
|
||||
tools := map[string]bool{}
|
||||
for _, rec := range s.Tools() {
|
||||
tools[rec.Name] = true
|
||||
}
|
||||
|
||||
for _, name := range []string{
|
||||
"metrics_record",
|
||||
"metrics_query",
|
||||
"rag_query",
|
||||
"rag_ingest",
|
||||
"rag_collections",
|
||||
"webview_connect",
|
||||
"webview_disconnect",
|
||||
"webview_navigate",
|
||||
"webview_click",
|
||||
"webview_type",
|
||||
"webview_query",
|
||||
"webview_console",
|
||||
"webview_eval",
|
||||
"webview_screenshot",
|
||||
"webview_wait",
|
||||
} {
|
||||
if !tools[name] {
|
||||
t.Fatalf("expected tool %q to be registered", name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range []string{"process_start", "ws_start"} {
|
||||
if tools[name] {
|
||||
t.Fatalf("did not expect tool %q to be registered without dependencies", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSupportedLanguages_Good_IncludesAllDetectedLanguages(t *testing.T) {
|
||||
s, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create service: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := s.getSupportedLanguages(nil, nil, GetSupportedLanguagesInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("getSupportedLanguages failed: %v", err)
|
||||
}
|
||||
|
||||
if got, want := len(out.Languages), 23; got != want {
|
||||
t.Fatalf("expected %d supported languages, got %d", want, got)
|
||||
}
|
||||
|
||||
got := map[string]bool{}
|
||||
for _, lang := range out.Languages {
|
||||
got[lang.ID] = true
|
||||
}
|
||||
|
||||
for _, want := range []string{
|
||||
"typescript",
|
||||
"javascript",
|
||||
"go",
|
||||
"python",
|
||||
"rust",
|
||||
"ruby",
|
||||
"java",
|
||||
"php",
|
||||
"c",
|
||||
"cpp",
|
||||
"csharp",
|
||||
"html",
|
||||
"css",
|
||||
"scss",
|
||||
"json",
|
||||
"yaml",
|
||||
"xml",
|
||||
"markdown",
|
||||
"sql",
|
||||
"shell",
|
||||
"swift",
|
||||
"kotlin",
|
||||
"dockerfile",
|
||||
} {
|
||||
if !got[want] {
|
||||
t.Fatalf("expected language %q to be listed", want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectLanguageFromPath_Good_KnownExtensions(t *testing.T) {
|
||||
cases := map[string]string{
|
||||
"main.go": "go",
|
||||
"index.tsx": "typescript",
|
||||
"style.scss": "scss",
|
||||
"Program.cs": "csharp",
|
||||
"module.kt": "kotlin",
|
||||
"docker/Dockerfile": "dockerfile",
|
||||
}
|
||||
|
||||
for path, want := range cases {
|
||||
if got := detectLanguageFromPath(path); got != want {
|
||||
t.Fatalf("detectLanguageFromPath(%q) = %q, want %q", path, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMedium_Good_ReadWrite(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
||||
|
|
@ -108,6 +216,71 @@ func TestMedium_Good_EnsureDir(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFileExists_Good_FileAndDirectory(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create service: %v", err)
|
||||
}
|
||||
|
||||
if err := s.medium.EnsureDir("nested"); err != nil {
|
||||
t.Fatalf("Failed to create directory: %v", err)
|
||||
}
|
||||
if err := s.medium.Write("nested/file.txt", "content"); err != nil {
|
||||
t.Fatalf("Failed to write file: %v", err)
|
||||
}
|
||||
|
||||
_, fileOut, err := s.fileExists(nil, nil, FileExistsInput{Path: "nested/file.txt"})
|
||||
if err != nil {
|
||||
t.Fatalf("fileExists(file) failed: %v", err)
|
||||
}
|
||||
if !fileOut.Exists {
|
||||
t.Fatal("expected file to exist")
|
||||
}
|
||||
if fileOut.IsDir {
|
||||
t.Fatal("expected file to not be reported as a directory")
|
||||
}
|
||||
|
||||
_, dirOut, err := s.fileExists(nil, nil, FileExistsInput{Path: "nested"})
|
||||
if err != nil {
|
||||
t.Fatalf("fileExists(dir) failed: %v", err)
|
||||
}
|
||||
if !dirOut.Exists {
|
||||
t.Fatal("expected directory to exist")
|
||||
}
|
||||
if !dirOut.IsDir {
|
||||
t.Fatal("expected directory to be reported as a directory")
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDirectory_Good_ReturnsDocumentedEntryPaths(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create service: %v", err)
|
||||
}
|
||||
|
||||
if err := s.medium.EnsureDir("nested"); err != nil {
|
||||
t.Fatalf("Failed to create directory: %v", err)
|
||||
}
|
||||
if err := s.medium.Write("nested/file.txt", "content"); err != nil {
|
||||
t.Fatalf("Failed to write file: %v", err)
|
||||
}
|
||||
|
||||
_, out, err := s.listDirectory(nil, nil, ListDirectoryInput{Path: "nested"})
|
||||
if err != nil {
|
||||
t.Fatalf("listDirectory failed: %v", err)
|
||||
}
|
||||
if len(out.Entries) != 1 {
|
||||
t.Fatalf("expected one entry, got %d", len(out.Entries))
|
||||
}
|
||||
|
||||
want := filepath.Join("nested", "file.txt")
|
||||
if out.Entries[0].Path != want {
|
||||
t.Fatalf("expected entry path %q, got %q", want, out.Entries[0].Path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMedium_Good_IsFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
||||
|
|
@ -129,6 +302,40 @@ func TestMedium_Good_IsFile(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestResolveWorkspacePath_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create service: %v", err)
|
||||
}
|
||||
|
||||
cases := map[string]string{
|
||||
"docs/readme.md": filepath.Join(tmpDir, "docs", "readme.md"),
|
||||
"/docs/readme.md": filepath.Join(tmpDir, "docs", "readme.md"),
|
||||
"../escape/notes.md": filepath.Join(tmpDir, "escape", "notes.md"),
|
||||
"": "",
|
||||
}
|
||||
for input, want := range cases {
|
||||
if got := s.resolveWorkspacePath(input); got != want {
|
||||
t.Fatalf("resolveWorkspacePath(%q) = %q, want %q", input, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveWorkspacePath_Good_Unrestricted(t *testing.T) {
|
||||
s, err := New(Options{Unrestricted: true})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create service: %v", err)
|
||||
}
|
||||
|
||||
if got, want := s.resolveWorkspacePath("docs/readme.md"), filepath.Clean("docs/readme.md"); got != want {
|
||||
t.Fatalf("resolveWorkspacePath(relative) = %q, want %q", got, want)
|
||||
}
|
||||
if got, want := s.resolveWorkspacePath("/tmp/readme.md"), filepath.Clean("/tmp/readme.md"); got != want {
|
||||
t.Fatalf("resolveWorkspacePath(absolute) = %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxing_Traversal_Sanitized(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
||||
|
|
|
|||
|
|
@ -11,11 +11,23 @@ import (
|
|||
"io"
|
||||
"iter"
|
||||
"os"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
func normalizeNotificationContext(ctx context.Context) context.Context {
|
||||
if ctx == nil {
|
||||
return context.Background()
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
// lockedWriter wraps an io.Writer with a mutex.
|
||||
// Both the SDK's transport and ChannelSend use this writer,
|
||||
// ensuring channel notifications don't interleave with SDK messages.
|
||||
|
|
@ -36,20 +48,149 @@ func (lw *lockedWriter) Close() error { return nil }
|
|||
// Created once when the MCP service enters stdio mode.
|
||||
var sharedStdout = &lockedWriter{w: os.Stdout}
|
||||
|
||||
// ChannelNotificationMethod is the JSON-RPC method used for named channel
|
||||
// events sent through claude/channel.
|
||||
const ChannelNotificationMethod = "notifications/claude/channel"
|
||||
|
||||
// LoggingNotificationMethod is the JSON-RPC method used for log messages sent
|
||||
// to connected MCP clients.
|
||||
const LoggingNotificationMethod = "notifications/message"
|
||||
|
||||
// ClaudeChannelCapabilityName is the experimental capability key advertised
|
||||
// by the MCP server for channel-based client notifications.
|
||||
const ClaudeChannelCapabilityName = "claude/channel"
|
||||
|
||||
// Shared channel names. Keeping them central avoids drift between emitters
|
||||
// and the advertised claude/channel capability.
|
||||
//
|
||||
// Use these names when emitting structured events from subsystems:
|
||||
//
|
||||
// s.ChannelSend(ctx, ChannelProcessStart, map[string]any{"id": "proc-1"})
|
||||
const (
|
||||
ChannelBuildStart = "build.start"
|
||||
ChannelBuildComplete = "build.complete"
|
||||
ChannelBuildFailed = "build.failed"
|
||||
ChannelAgentComplete = "agent.complete"
|
||||
ChannelAgentBlocked = "agent.blocked"
|
||||
ChannelAgentStatus = "agent.status"
|
||||
ChannelBrainForgetDone = "brain.forget.complete"
|
||||
ChannelBrainListDone = "brain.list.complete"
|
||||
ChannelBrainRecallDone = "brain.recall.complete"
|
||||
ChannelBrainRememberDone = "brain.remember.complete"
|
||||
ChannelHarvestComplete = "harvest.complete"
|
||||
ChannelInboxMessage = "inbox.message"
|
||||
ChannelProcessExit = "process.exit"
|
||||
ChannelProcessStart = "process.start"
|
||||
ChannelProcessOutput = "process.output"
|
||||
ChannelTestResult = "test.result"
|
||||
)
|
||||
|
||||
var channelCapabilityList = []string{
|
||||
ChannelBuildStart,
|
||||
ChannelAgentComplete,
|
||||
ChannelAgentBlocked,
|
||||
ChannelAgentStatus,
|
||||
ChannelBuildComplete,
|
||||
ChannelBuildFailed,
|
||||
ChannelBrainForgetDone,
|
||||
ChannelBrainListDone,
|
||||
ChannelBrainRecallDone,
|
||||
ChannelBrainRememberDone,
|
||||
ChannelHarvestComplete,
|
||||
ChannelInboxMessage,
|
||||
ChannelProcessExit,
|
||||
ChannelProcessStart,
|
||||
ChannelProcessOutput,
|
||||
ChannelTestResult,
|
||||
}
|
||||
|
||||
// ChannelCapabilitySpec describes the experimental claude/channel capability.
|
||||
//
|
||||
// spec := ChannelCapabilitySpec{
|
||||
// Version: "1",
|
||||
// Description: "Push events into client sessions via named channels",
|
||||
// Channels: ChannelCapabilityChannels(),
|
||||
// }
|
||||
type ChannelCapabilitySpec struct {
|
||||
Version string `json:"version"` // e.g. "1"
|
||||
Description string `json:"description"` // capability summary shown to clients
|
||||
Channels []string `json:"channels"` // e.g. []string{"build.complete", "agent.status"}
|
||||
}
|
||||
|
||||
// Map converts the typed capability into the wire-format map expected by the SDK.
|
||||
//
|
||||
// caps := ChannelCapabilitySpec{
|
||||
// Version: "1",
|
||||
// Description: "Push events into client sessions via named channels",
|
||||
// Channels: ChannelCapabilityChannels(),
|
||||
// }.Map()
|
||||
func (c ChannelCapabilitySpec) Map() map[string]any {
|
||||
return map[string]any{
|
||||
"version": c.Version,
|
||||
"description": c.Description,
|
||||
"channels": slices.Clone(c.Channels),
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelNotification is the payload sent through the experimental channel
|
||||
// notification method.
|
||||
//
|
||||
// n := ChannelNotification{
|
||||
// Channel: ChannelBuildComplete,
|
||||
// Data: map[string]any{"repo": "core/mcp"},
|
||||
// }
|
||||
type ChannelNotification struct {
|
||||
Channel string `json:"channel"` // e.g. "build.complete"
|
||||
Data any `json:"data"` // arbitrary payload for the named channel
|
||||
}
|
||||
|
||||
// SendNotificationToAllClients broadcasts a log-level notification to every
|
||||
// connected MCP session (stdio, HTTP, TCP, and Unix).
|
||||
// Errors on individual sessions are logged but do not stop the broadcast.
|
||||
//
|
||||
// s.SendNotificationToAllClients(ctx, "info", "monitor", map[string]any{"event": "build complete"})
|
||||
func (s *Service) SendNotificationToAllClients(ctx context.Context, level mcp.LoggingLevel, logger string, data any) {
|
||||
for session := range s.server.Sessions() {
|
||||
if err := session.Log(ctx, &mcp.LoggingMessageParams{
|
||||
Level: level,
|
||||
Logger: logger,
|
||||
Data: data,
|
||||
}); err != nil {
|
||||
s.logger.Debug("notify: failed to send to session", "session", session.ID(), "error", err)
|
||||
}
|
||||
if s == nil || s.server == nil {
|
||||
return
|
||||
}
|
||||
ctx = normalizeNotificationContext(ctx)
|
||||
s.broadcastToSessions(func(session *mcp.ServerSession) {
|
||||
s.sendLoggingNotificationToSession(ctx, session, level, logger, data)
|
||||
})
|
||||
}
|
||||
|
||||
// SendNotificationToSession sends a log-level notification to one connected
|
||||
// MCP session.
|
||||
//
|
||||
// s.SendNotificationToSession(ctx, session, "info", "monitor", data)
|
||||
func (s *Service) SendNotificationToSession(ctx context.Context, session *mcp.ServerSession, level mcp.LoggingLevel, logger string, data any) {
|
||||
if s == nil || s.server == nil {
|
||||
return
|
||||
}
|
||||
ctx = normalizeNotificationContext(ctx)
|
||||
s.sendLoggingNotificationToSession(ctx, session, level, logger, data)
|
||||
}
|
||||
|
||||
// SendNotificationToClient sends a log-level notification to one connected
|
||||
// MCP client.
|
||||
//
|
||||
// s.SendNotificationToClient(ctx, client, "info", "monitor", data)
|
||||
func (s *Service) SendNotificationToClient(ctx context.Context, client *mcp.ServerSession, level mcp.LoggingLevel, logger string, data any) {
|
||||
s.SendNotificationToSession(ctx, client, level, logger, data)
|
||||
}
|
||||
|
||||
func (s *Service) sendLoggingNotificationToSession(ctx context.Context, session *mcp.ServerSession, level mcp.LoggingLevel, logger string, data any) {
|
||||
if s == nil || s.server == nil || session == nil {
|
||||
return
|
||||
}
|
||||
ctx = normalizeNotificationContext(ctx)
|
||||
|
||||
if err := sendSessionNotification(ctx, session, LoggingNotificationMethod, &mcp.LoggingMessageParams{
|
||||
Level: level,
|
||||
Logger: logger,
|
||||
Data: data,
|
||||
}); err != nil {
|
||||
s.debugNotify("notify: failed to send to session", "session", session.ID(), "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -59,32 +200,39 @@ func (s *Service) SendNotificationToAllClients(ctx context.Context, level mcp.Lo
|
|||
// s.ChannelSend(ctx, "agent.complete", map[string]any{"repo": "go-io", "workspace": "go-io-123"})
|
||||
// s.ChannelSend(ctx, "build.failed", map[string]any{"repo": "core", "error": "test timeout"})
|
||||
func (s *Service) ChannelSend(ctx context.Context, channel string, data any) {
|
||||
payload := map[string]any{
|
||||
"channel": channel,
|
||||
"data": data,
|
||||
if s == nil || s.server == nil {
|
||||
return
|
||||
}
|
||||
s.SendNotificationToAllClients(ctx, mcp.LoggingLevel("info"), "channel", payload)
|
||||
if strings.TrimSpace(channel) == "" {
|
||||
return
|
||||
}
|
||||
ctx = normalizeNotificationContext(ctx)
|
||||
payload := ChannelNotification{Channel: channel, Data: data}
|
||||
s.sendChannelNotificationToAllClients(ctx, payload)
|
||||
}
|
||||
|
||||
// ChannelSendToSession pushes a channel event to a specific session.
|
||||
//
|
||||
// s.ChannelSendToSession(ctx, session, "agent.progress", progressData)
|
||||
func (s *Service) ChannelSendToSession(ctx context.Context, session *mcp.ServerSession, channel string, data any) {
|
||||
if session == nil {
|
||||
if s == nil || s.server == nil || session == nil {
|
||||
return
|
||||
}
|
||||
if strings.TrimSpace(channel) == "" {
|
||||
return
|
||||
}
|
||||
ctx = normalizeNotificationContext(ctx)
|
||||
payload := ChannelNotification{Channel: channel, Data: data}
|
||||
if err := sendSessionNotification(ctx, session, ChannelNotificationMethod, payload); err != nil {
|
||||
s.debugNotify("channel: failed to send to session", "session", session.ID(), "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
payload := map[string]any{
|
||||
"channel": channel,
|
||||
"data": data,
|
||||
}
|
||||
if err := session.Log(ctx, &mcp.LoggingMessageParams{
|
||||
Level: mcp.LoggingLevel("info"),
|
||||
Logger: "channel",
|
||||
Data: payload,
|
||||
}); err != nil {
|
||||
s.logger.Debug("channel: failed to send to session", "session", session.ID(), "error", err)
|
||||
}
|
||||
// ChannelSendToClient pushes a channel event to one connected MCP client.
|
||||
//
|
||||
// s.ChannelSendToClient(ctx, client, "agent.progress", progressData)
|
||||
func (s *Service) ChannelSendToClient(ctx context.Context, client *mcp.ServerSession, channel string, data any) {
|
||||
s.ChannelSendToSession(ctx, client, channel, data)
|
||||
}
|
||||
|
||||
// Sessions returns an iterator over all connected MCP sessions.
|
||||
|
|
@ -93,32 +241,171 @@ func (s *Service) ChannelSendToSession(ctx context.Context, session *mcp.ServerS
|
|||
// s.ChannelSendToSession(ctx, session, "status", data)
|
||||
// }
|
||||
func (s *Service) Sessions() iter.Seq[*mcp.ServerSession] {
|
||||
return s.server.Sessions()
|
||||
if s == nil || s.server == nil {
|
||||
return func(yield func(*mcp.ServerSession) bool) {}
|
||||
}
|
||||
return slices.Values(snapshotSessions(s.server))
|
||||
}
|
||||
|
||||
func (s *Service) sendChannelNotificationToAllClients(ctx context.Context, payload ChannelNotification) {
|
||||
if s == nil || s.server == nil {
|
||||
return
|
||||
}
|
||||
ctx = normalizeNotificationContext(ctx)
|
||||
s.broadcastToSessions(func(session *mcp.ServerSession) {
|
||||
if err := sendSessionNotification(ctx, session, ChannelNotificationMethod, payload); err != nil {
|
||||
s.debugNotify("channel: failed to send to session", "session", session.ID(), "error", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) broadcastToSessions(fn func(*mcp.ServerSession)) {
|
||||
if s == nil || s.server == nil || fn == nil {
|
||||
return
|
||||
}
|
||||
for _, session := range snapshotSessions(s.server) {
|
||||
fn(session)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) debugNotify(msg string, args ...any) {
|
||||
if s == nil || s.logger == nil {
|
||||
return
|
||||
}
|
||||
s.logger.Debug(msg, args...)
|
||||
}
|
||||
|
||||
func sendSessionNotification(ctx context.Context, session *mcp.ServerSession, method string, payload any) error {
|
||||
if session == nil {
|
||||
return nil
|
||||
}
|
||||
ctx = normalizeNotificationContext(ctx)
|
||||
|
||||
if conn, err := sessionMCPConnection(session); err == nil {
|
||||
if notifier, ok := conn.(interface {
|
||||
Notify(context.Context, string, any) error
|
||||
}); ok {
|
||||
if err := notifier.Notify(ctx, method, payload); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := sessionJSONRPCConnection(session)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notifier, ok := conn.(interface {
|
||||
Notify(context.Context, string, any) error
|
||||
})
|
||||
if !ok {
|
||||
return coreNotifyError("connection Notify method unavailable")
|
||||
}
|
||||
|
||||
if err := notifier.Notify(ctx, method, payload); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sessionMCPConnection(session *mcp.ServerSession) (any, error) {
|
||||
value := reflect.ValueOf(session)
|
||||
if value.Kind() != reflect.Ptr || value.IsNil() {
|
||||
return nil, coreNotifyError("invalid session")
|
||||
}
|
||||
|
||||
field := value.Elem().FieldByName("mcpConn")
|
||||
if !field.IsValid() {
|
||||
return nil, coreNotifyError("session mcp connection field unavailable")
|
||||
}
|
||||
|
||||
return reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().Interface(), nil
|
||||
}
|
||||
|
||||
func sessionJSONRPCConnection(session *mcp.ServerSession) (any, error) {
|
||||
value := reflect.ValueOf(session)
|
||||
if value.Kind() != reflect.Ptr || value.IsNil() {
|
||||
return nil, coreNotifyError("invalid session")
|
||||
}
|
||||
|
||||
field := value.Elem().FieldByName("conn")
|
||||
if !field.IsValid() {
|
||||
return nil, coreNotifyError("session connection field unavailable")
|
||||
}
|
||||
|
||||
return reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().Interface(), nil
|
||||
}
|
||||
|
||||
func coreNotifyError(message string) error {
|
||||
return ¬ificationError{message: message}
|
||||
}
|
||||
|
||||
func snapshotSessions(server *mcp.Server) []*mcp.ServerSession {
|
||||
if server == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
sessions := make([]*mcp.ServerSession, 0)
|
||||
for session := range server.Sessions() {
|
||||
if session != nil {
|
||||
sessions = append(sessions, session)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(sessions, func(i, j int) bool {
|
||||
return sessions[i].ID() < sessions[j].ID()
|
||||
})
|
||||
|
||||
return sessions
|
||||
}
|
||||
|
||||
type notificationError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e *notificationError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// channelCapability returns the experimental capability descriptor
|
||||
// for claude/channel, registered during New().
|
||||
func channelCapability() map[string]any {
|
||||
return map[string]any{
|
||||
"claude/channel": map[string]any{
|
||||
"version": "1",
|
||||
"description": "Push events into client sessions via named channels",
|
||||
"channels": []string{
|
||||
"agent.complete",
|
||||
"agent.blocked",
|
||||
"agent.status",
|
||||
"build.complete",
|
||||
"build.failed",
|
||||
"brain.list.complete",
|
||||
"brain.forget.complete",
|
||||
"brain.remember.complete",
|
||||
"brain.recall.complete",
|
||||
"inbox.message",
|
||||
"process.start",
|
||||
"process.exit",
|
||||
"harvest.complete",
|
||||
"test.result",
|
||||
},
|
||||
},
|
||||
ClaudeChannelCapabilityName: ClaudeChannelCapability().Map(),
|
||||
}
|
||||
}
|
||||
|
||||
// ClaudeChannelCapability returns the typed experimental capability descriptor.
|
||||
//
|
||||
// cap := ClaudeChannelCapability()
|
||||
// caps := cap.Map()
|
||||
func ClaudeChannelCapability() ChannelCapabilitySpec {
|
||||
return ChannelCapabilitySpec{
|
||||
Version: "1",
|
||||
Description: "Push events into client sessions via named channels",
|
||||
Channels: channelCapabilityChannels(),
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelCapability returns the experimental capability descriptor registered
|
||||
// during New(). Callers can reuse it when exposing server metadata.
|
||||
//
|
||||
// caps := ChannelCapability()
|
||||
func ChannelCapability() map[string]any {
|
||||
return channelCapability()
|
||||
}
|
||||
|
||||
// channelCapabilityChannels lists the named channel events advertised by the
|
||||
// experimental capability.
|
||||
func channelCapabilityChannels() []string {
|
||||
return slices.Clone(channelCapabilityList)
|
||||
}
|
||||
|
||||
// ChannelCapabilityChannels returns the named channel events advertised by the
|
||||
// experimental capability.
|
||||
//
|
||||
// channels := ChannelCapabilityChannels()
|
||||
func ChannelCapabilityChannels() []string {
|
||||
return channelCapabilityChannels()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,94 @@
|
|||
package mcp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"reflect"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
type notificationReadResult struct {
|
||||
msg map[string]any
|
||||
err error
|
||||
}
|
||||
|
||||
func connectNotificationSession(t *testing.T, svc *Service) (context.CancelFunc, *mcp.ServerSession, net.Conn) {
|
||||
t.Helper()
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
||||
if err != nil {
|
||||
cancel()
|
||||
clientConn.Close()
|
||||
t.Fatalf("Connect() failed: %v", err)
|
||||
}
|
||||
|
||||
return cancel, session, clientConn
|
||||
}
|
||||
|
||||
func readNotificationMessage(t *testing.T, conn net.Conn) <-chan notificationReadResult {
|
||||
t.Helper()
|
||||
|
||||
resultCh := make(chan notificationReadResult, 1)
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(conn)
|
||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
||||
|
||||
if !scanner.Scan() {
|
||||
resultCh <- notificationReadResult{err: scanner.Err()}
|
||||
return
|
||||
}
|
||||
|
||||
var msg map[string]any
|
||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
||||
resultCh <- notificationReadResult{err: err}
|
||||
return
|
||||
}
|
||||
|
||||
resultCh <- notificationReadResult{msg: msg}
|
||||
}()
|
||||
|
||||
return resultCh
|
||||
}
|
||||
|
||||
func readNotificationMessageUntil(t *testing.T, conn net.Conn, match func(map[string]any) bool) <-chan notificationReadResult {
|
||||
t.Helper()
|
||||
|
||||
resultCh := make(chan notificationReadResult, 1)
|
||||
scanner := bufio.NewScanner(conn)
|
||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
||||
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
var msg map[string]any
|
||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
||||
resultCh <- notificationReadResult{err: err}
|
||||
return
|
||||
}
|
||||
if match(msg) {
|
||||
resultCh <- notificationReadResult{msg: msg}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
resultCh <- notificationReadResult{err: err}
|
||||
return
|
||||
}
|
||||
resultCh <- notificationReadResult{err: context.DeadlineExceeded}
|
||||
}()
|
||||
|
||||
return resultCh
|
||||
}
|
||||
|
||||
func TestSendNotificationToAllClients_Good(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
|
|
@ -13,10 +97,141 @@ func TestSendNotificationToAllClients_Good(t *testing.T) {
|
|||
|
||||
ctx := context.Background()
|
||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{
|
||||
"event": "build.complete",
|
||||
"event": ChannelBuildComplete,
|
||||
})
|
||||
}
|
||||
|
||||
func TestNotificationMethods_Good_NilService(t *testing.T) {
|
||||
var svc *Service
|
||||
|
||||
ctx := context.Background()
|
||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{"ok": true})
|
||||
svc.SendNotificationToSession(ctx, nil, "info", "test", map[string]any{"ok": true})
|
||||
svc.ChannelSend(ctx, ChannelBuildComplete, map[string]any{"ok": true})
|
||||
svc.ChannelSendToSession(ctx, nil, ChannelBuildComplete, map[string]any{"ok": true})
|
||||
|
||||
for range svc.Sessions() {
|
||||
t.Fatal("expected no sessions from nil service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotificationMethods_Good_NilServer(t *testing.T) {
|
||||
svc := &Service{}
|
||||
|
||||
ctx := context.Background()
|
||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{"ok": true})
|
||||
svc.SendNotificationToSession(ctx, nil, "info", "test", map[string]any{"ok": true})
|
||||
svc.ChannelSend(ctx, ChannelBuildComplete, map[string]any{"ok": true})
|
||||
svc.ChannelSendToSession(ctx, nil, ChannelBuildComplete, map[string]any{"ok": true})
|
||||
|
||||
for range svc.Sessions() {
|
||||
t.Fatal("expected no sessions from service without a server")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSessions_Good_ReturnsSnapshot(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
cancel, session, _ := connectNotificationSession(t, svc)
|
||||
snapshot := svc.Sessions()
|
||||
|
||||
cancel()
|
||||
session.Close()
|
||||
|
||||
var sessions []*mcp.ServerSession
|
||||
for session := range snapshot {
|
||||
sessions = append(sessions, session)
|
||||
}
|
||||
|
||||
if len(sessions) != 1 {
|
||||
t.Fatalf("expected snapshot to retain one session, got %d", len(sessions))
|
||||
}
|
||||
if sessions[0] == nil {
|
||||
t.Fatal("expected snapshot session to be non-nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotificationMethods_Good_NilContext(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
svc.SendNotificationToAllClients(nil, "info", "test", map[string]any{"ok": true})
|
||||
svc.SendNotificationToSession(nil, nil, "info", "test", map[string]any{"ok": true})
|
||||
svc.ChannelSend(nil, ChannelBuildComplete, map[string]any{"ok": true})
|
||||
svc.ChannelSendToSession(nil, nil, ChannelBuildComplete, map[string]any{"ok": true})
|
||||
}
|
||||
|
||||
func TestSendNotificationToAllClients_Good_CustomNotification(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer clientConn.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Connect() failed: %v", err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
|
||||
read := readNotificationMessageUntil(t, clientConn, func(msg map[string]any) bool {
|
||||
return msg["method"] == LoggingNotificationMethod
|
||||
})
|
||||
|
||||
sent := make(chan struct{})
|
||||
go func() {
|
||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{
|
||||
"event": ChannelBuildComplete,
|
||||
})
|
||||
close(sent)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-sent:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for notification send to complete")
|
||||
}
|
||||
|
||||
res := <-read
|
||||
if res.err != nil {
|
||||
t.Fatalf("failed to read notification: %v", res.err)
|
||||
}
|
||||
msg := res.msg
|
||||
if msg["method"] != LoggingNotificationMethod {
|
||||
t.Fatalf("expected method %q, got %v", LoggingNotificationMethod, msg["method"])
|
||||
}
|
||||
|
||||
params, ok := msg["params"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected params object, got %T", msg["params"])
|
||||
}
|
||||
if params["logger"] != "test" {
|
||||
t.Fatalf("expected logger test, got %v", params["logger"])
|
||||
}
|
||||
if params["level"] != "info" {
|
||||
t.Fatalf("expected level info, got %v", params["level"])
|
||||
}
|
||||
data, ok := params["data"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected data object, got %T", params["data"])
|
||||
}
|
||||
if data["event"] != ChannelBuildComplete {
|
||||
t.Fatalf("expected event %s, got %v", ChannelBuildComplete, data["event"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelSend_Good(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
|
|
@ -24,7 +239,7 @@ func TestChannelSend_Good(t *testing.T) {
|
|||
}
|
||||
|
||||
ctx := context.Background()
|
||||
svc.ChannelSend(ctx, "build.complete", map[string]any{
|
||||
svc.ChannelSend(ctx, ChannelBuildComplete, map[string]any{
|
||||
"repo": "go-io",
|
||||
})
|
||||
}
|
||||
|
|
@ -36,14 +251,185 @@ func TestChannelSendToSession_Good_GuardNilSession(t *testing.T) {
|
|||
}
|
||||
|
||||
ctx := context.Background()
|
||||
svc.ChannelSendToSession(ctx, nil, "agent.status", map[string]any{
|
||||
svc.ChannelSendToSession(ctx, nil, ChannelAgentStatus, map[string]any{
|
||||
"ok": true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestSendNotificationToSession_Good_GuardNilSession(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
svc.SendNotificationToSession(ctx, nil, "info", "test", map[string]any{
|
||||
"ok": true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestChannelSendToSession_Good_CustomNotification(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer clientConn.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Connect() failed: %v", err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
|
||||
read := readNotificationMessageUntil(t, clientConn, func(msg map[string]any) bool {
|
||||
return msg["method"] == ChannelNotificationMethod
|
||||
})
|
||||
|
||||
sent := make(chan struct{})
|
||||
go func() {
|
||||
svc.ChannelSendToSession(ctx, session, ChannelBuildComplete, map[string]any{
|
||||
"repo": "go-io",
|
||||
})
|
||||
close(sent)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-sent:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for notification send to complete")
|
||||
}
|
||||
|
||||
res := <-read
|
||||
if res.err != nil {
|
||||
t.Fatalf("failed to read custom notification: %v", res.err)
|
||||
}
|
||||
msg := res.msg
|
||||
if msg["method"] != ChannelNotificationMethod {
|
||||
t.Fatalf("expected method %q, got %v", ChannelNotificationMethod, msg["method"])
|
||||
}
|
||||
|
||||
params, ok := msg["params"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected params object, got %T", msg["params"])
|
||||
}
|
||||
if params["channel"] != ChannelBuildComplete {
|
||||
t.Fatalf("expected channel %s, got %v", ChannelBuildComplete, params["channel"])
|
||||
}
|
||||
payload, ok := params["data"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected data object, got %T", params["data"])
|
||||
}
|
||||
if payload["repo"] != "go-io" {
|
||||
t.Fatalf("expected repo go-io, got %v", payload["repo"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelSendToClient_Good_CustomNotification(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer clientConn.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Connect() failed: %v", err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
|
||||
read := readNotificationMessageUntil(t, clientConn, func(msg map[string]any) bool {
|
||||
return msg["method"] == ChannelNotificationMethod
|
||||
})
|
||||
|
||||
sent := make(chan struct{})
|
||||
go func() {
|
||||
svc.ChannelSendToClient(ctx, session, ChannelBuildComplete, map[string]any{
|
||||
"repo": "go-io",
|
||||
})
|
||||
close(sent)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-sent:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for notification send to complete")
|
||||
}
|
||||
|
||||
res := <-read
|
||||
if res.err != nil {
|
||||
t.Fatalf("failed to read custom notification: %v", res.err)
|
||||
}
|
||||
msg := res.msg
|
||||
if msg["method"] != ChannelNotificationMethod {
|
||||
t.Fatalf("expected method %q, got %v", ChannelNotificationMethod, msg["method"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendNotificationToClient_Good_CustomNotification(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer clientConn.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Connect() failed: %v", err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
|
||||
read := readNotificationMessageUntil(t, clientConn, func(msg map[string]any) bool {
|
||||
return msg["method"] == LoggingNotificationMethod
|
||||
})
|
||||
|
||||
sent := make(chan struct{})
|
||||
go func() {
|
||||
svc.SendNotificationToClient(ctx, session, "info", "test", map[string]any{
|
||||
"event": ChannelBuildComplete,
|
||||
})
|
||||
close(sent)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-sent:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for notification send to complete")
|
||||
}
|
||||
|
||||
res := <-read
|
||||
if res.err != nil {
|
||||
t.Fatalf("failed to read notification: %v", res.err)
|
||||
}
|
||||
msg := res.msg
|
||||
if msg["method"] != LoggingNotificationMethod {
|
||||
t.Fatalf("expected method %q, got %v", LoggingNotificationMethod, msg["method"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelCapability_Good(t *testing.T) {
|
||||
caps := channelCapability()
|
||||
raw, ok := caps["claude/channel"]
|
||||
raw, ok := caps[ClaudeChannelCapabilityName]
|
||||
if !ok {
|
||||
t.Fatal("expected claude/channel capability entry")
|
||||
}
|
||||
|
|
@ -65,14 +451,120 @@ func TestChannelCapability_Good(t *testing.T) {
|
|||
t.Fatal("expected at least one channel in capability definition")
|
||||
}
|
||||
|
||||
foundProcessStart := false
|
||||
for _, channel := range channels {
|
||||
if channel == "process.start" {
|
||||
foundProcessStart = true
|
||||
break
|
||||
want := channelCapabilityChannels()
|
||||
if got, wantLen := len(channels), len(want); got != wantLen {
|
||||
t.Fatalf("expected %d channels, got %d", wantLen, got)
|
||||
}
|
||||
|
||||
for _, channel := range want {
|
||||
if !slices.Contains(channels, channel) {
|
||||
t.Fatalf("expected channel %q to be advertised in capability definition", channel)
|
||||
}
|
||||
}
|
||||
if !foundProcessStart {
|
||||
t.Fatal("expected process.start to be advertised in capability definition")
|
||||
}
|
||||
|
||||
func TestChannelCapability_Good_PublicHelpers(t *testing.T) {
|
||||
got := ChannelCapability()
|
||||
want := channelCapability()
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("expected public capability helper to match internal definition")
|
||||
}
|
||||
|
||||
spec := ClaudeChannelCapability()
|
||||
if spec.Version != "1" {
|
||||
t.Fatalf("expected typed capability version 1, got %q", spec.Version)
|
||||
}
|
||||
if spec.Description == "" {
|
||||
t.Fatal("expected typed capability description to be populated")
|
||||
}
|
||||
if !slices.Equal(spec.Channels, channelCapabilityChannels()) {
|
||||
t.Fatalf("expected typed capability channels to match: got %v want %v", spec.Channels, channelCapabilityChannels())
|
||||
}
|
||||
if !reflect.DeepEqual(spec.Map(), want[ClaudeChannelCapabilityName].(map[string]any)) {
|
||||
t.Fatal("expected typed capability map to match wire-format descriptor")
|
||||
}
|
||||
|
||||
gotChannels := ChannelCapabilityChannels()
|
||||
wantChannels := channelCapabilityChannels()
|
||||
if !slices.Equal(gotChannels, wantChannels) {
|
||||
t.Fatalf("expected public channel list to match internal definition: got %v want %v", gotChannels, wantChannels)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelCapabilitySpec_Map_Good_ClonesChannels(t *testing.T) {
|
||||
spec := ClaudeChannelCapability()
|
||||
mapped := spec.Map()
|
||||
|
||||
channels, ok := mapped["channels"].([]string)
|
||||
if !ok {
|
||||
t.Fatalf("expected channels to be []string, got %T", mapped["channels"])
|
||||
}
|
||||
if len(channels) == 0 {
|
||||
t.Fatal("expected non-empty channels slice")
|
||||
}
|
||||
|
||||
spec.Channels[0] = "mutated.channel"
|
||||
if channels[0] == "mutated.channel" {
|
||||
t.Fatal("expected Map() to clone the channels slice")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendNotificationToAllClients_Good_BroadcastsToMultipleSessions(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cancel1, session1, clientConn1 := connectNotificationSession(t, svc)
|
||||
defer cancel1()
|
||||
defer session1.Close()
|
||||
defer clientConn1.Close()
|
||||
|
||||
cancel2, session2, clientConn2 := connectNotificationSession(t, svc)
|
||||
defer cancel2()
|
||||
defer session2.Close()
|
||||
defer clientConn2.Close()
|
||||
|
||||
read1 := readNotificationMessage(t, clientConn1)
|
||||
read2 := readNotificationMessage(t, clientConn2)
|
||||
|
||||
sent := make(chan struct{})
|
||||
go func() {
|
||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{
|
||||
"event": ChannelBuildComplete,
|
||||
})
|
||||
close(sent)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-sent:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for broadcast to complete")
|
||||
}
|
||||
|
||||
res1 := <-read1
|
||||
if res1.err != nil {
|
||||
t.Fatalf("failed to read notification from session 1: %v", res1.err)
|
||||
}
|
||||
res2 := <-read2
|
||||
if res2.err != nil {
|
||||
t.Fatalf("failed to read notification from session 2: %v", res2.err)
|
||||
}
|
||||
|
||||
for idx, res := range []notificationReadResult{res1, res2} {
|
||||
if res.msg["method"] != LoggingNotificationMethod {
|
||||
t.Fatalf("session %d: expected method %q, got %v", idx+1, LoggingNotificationMethod, res.msg["method"])
|
||||
}
|
||||
|
||||
params, ok := res.msg["params"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("session %d: expected params object, got %T", idx+1, res.msg["params"])
|
||||
}
|
||||
if params["logger"] != "test" {
|
||||
t.Fatalf("session %d: expected logger test, got %v", idx+1, params["logger"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
123
pkg/mcp/process_notifications.go
Normal file
123
pkg/mcp/process_notifications.go
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type processRuntime struct {
|
||||
Command string
|
||||
Args []string
|
||||
Dir string
|
||||
StartedAt time.Time
|
||||
}
|
||||
|
||||
func (s *Service) recordProcessRuntime(id string, meta processRuntime) {
|
||||
if id == "" {
|
||||
return
|
||||
}
|
||||
|
||||
s.processMu.Lock()
|
||||
defer s.processMu.Unlock()
|
||||
|
||||
if s.processMeta == nil {
|
||||
s.processMeta = make(map[string]processRuntime)
|
||||
}
|
||||
s.processMeta[id] = meta
|
||||
}
|
||||
|
||||
func (s *Service) processRuntimeFor(id string) (processRuntime, bool) {
|
||||
s.processMu.Lock()
|
||||
defer s.processMu.Unlock()
|
||||
|
||||
meta, ok := s.processMeta[id]
|
||||
return meta, ok
|
||||
}
|
||||
|
||||
func (s *Service) forgetProcessRuntime(id string) {
|
||||
if id == "" {
|
||||
return
|
||||
}
|
||||
|
||||
s.processMu.Lock()
|
||||
defer s.processMu.Unlock()
|
||||
|
||||
delete(s.processMeta, id)
|
||||
}
|
||||
|
||||
func isTestProcess(command string, args []string) bool {
|
||||
base := strings.ToLower(filepath.Base(command))
|
||||
if base == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
switch base {
|
||||
case "go":
|
||||
return len(args) > 0 && strings.EqualFold(args[0], "test")
|
||||
case "cargo":
|
||||
return len(args) > 0 && strings.EqualFold(args[0], "test")
|
||||
case "npm", "pnpm", "yarn", "bun":
|
||||
for _, arg := range args {
|
||||
if strings.EqualFold(arg, "test") || strings.HasPrefix(strings.ToLower(arg), "test:") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
case "pytest", "phpunit", "jest", "vitest", "rspec", "go-test":
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Service) emitTestResult(ctx context.Context, processID string, exitCode int, duration time.Duration, signal string, errText string) {
|
||||
defer s.forgetProcessRuntime(processID)
|
||||
|
||||
meta, ok := s.processRuntimeFor(processID)
|
||||
if !ok || !isTestProcess(meta.Command, meta.Args) {
|
||||
return
|
||||
}
|
||||
|
||||
if duration <= 0 && !meta.StartedAt.IsZero() {
|
||||
duration = time.Since(meta.StartedAt)
|
||||
}
|
||||
|
||||
status := "failed"
|
||||
if signal != "" {
|
||||
status = "aborted"
|
||||
} else if exitCode == 0 {
|
||||
status = "passed"
|
||||
}
|
||||
|
||||
payload := map[string]any{
|
||||
"id": processID,
|
||||
"command": meta.Command,
|
||||
"args": meta.Args,
|
||||
"status": status,
|
||||
"passed": status == "passed",
|
||||
}
|
||||
if meta.Dir != "" {
|
||||
payload["dir"] = meta.Dir
|
||||
}
|
||||
if !meta.StartedAt.IsZero() {
|
||||
payload["startedAt"] = meta.StartedAt
|
||||
}
|
||||
if duration > 0 {
|
||||
payload["duration"] = duration
|
||||
}
|
||||
if signal == "" || exitCode != 0 {
|
||||
payload["exitCode"] = exitCode
|
||||
}
|
||||
if signal != "" {
|
||||
payload["signal"] = signal
|
||||
}
|
||||
if errText != "" {
|
||||
payload["error"] = errText
|
||||
}
|
||||
|
||||
s.ChannelSend(ctx, ChannelTestResult, payload)
|
||||
}
|
||||
|
|
@ -4,13 +4,17 @@ package mcp
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"forge.lthn.ai/core/go-process"
|
||||
"forge.lthn.ai/core/go-ws"
|
||||
)
|
||||
|
||||
// Register is the service factory for core.WithService.
|
||||
// Creates the MCP service, discovers subsystems from other Core services,
|
||||
// and wires notifiers.
|
||||
// and wires optional process and WebSocket dependencies when they are
|
||||
// already registered in Core.
|
||||
//
|
||||
// core.New(
|
||||
// core.WithService(agentic.Register),
|
||||
|
|
@ -21,6 +25,8 @@ import (
|
|||
func Register(c *core.Core) core.Result {
|
||||
// Collect subsystems from registered services
|
||||
var subsystems []Subsystem
|
||||
var processService *process.Service
|
||||
var wsHub *ws.Hub
|
||||
for _, name := range c.Services() {
|
||||
r := c.Service(name)
|
||||
if !r.OK {
|
||||
|
|
@ -28,23 +34,34 @@ func Register(c *core.Core) core.Result {
|
|||
}
|
||||
if sub, ok := r.Value.(Subsystem); ok {
|
||||
subsystems = append(subsystems, sub)
|
||||
continue
|
||||
}
|
||||
switch v := r.Value.(type) {
|
||||
case *process.Service:
|
||||
processService = v
|
||||
case *ws.Hub:
|
||||
wsHub = v
|
||||
}
|
||||
}
|
||||
|
||||
svc, err := New(Options{
|
||||
Subsystems: subsystems,
|
||||
ProcessService: processService,
|
||||
WSHub: wsHub,
|
||||
Subsystems: subsystems,
|
||||
})
|
||||
if err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
|
||||
svc.ServiceRuntime = core.NewServiceRuntime(c, McpOptions{})
|
||||
svc.ServiceRuntime = core.NewServiceRuntime(c, struct{}{})
|
||||
|
||||
return core.Result{Value: svc, OK: true}
|
||||
}
|
||||
|
||||
// OnStartup implements core.Startable — registers MCP transport commands.
|
||||
//
|
||||
// svc.OnStartup(context.Background())
|
||||
//
|
||||
// core-agent mcp — start MCP server on stdio
|
||||
// core-agent serve — start MCP server on HTTP
|
||||
func (s *Service) OnStartup(ctx context.Context) core.Result {
|
||||
|
|
@ -65,9 +82,9 @@ func (s *Service) OnStartup(ctx context.Context) core.Result {
|
|||
})
|
||||
|
||||
c.Command("serve", core.Command{
|
||||
Description: "Start as a persistent HTTP daemon",
|
||||
Description: "Start the MCP server with auto-selected transport",
|
||||
Action: func(opts core.Options) core.Result {
|
||||
s.logger.Info("MCP HTTP server starting")
|
||||
s.logger.Info("MCP server starting")
|
||||
if err := s.Run(ctx); err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
}
|
||||
|
|
@ -79,18 +96,89 @@ func (s *Service) OnStartup(ctx context.Context) core.Result {
|
|||
}
|
||||
|
||||
// HandleIPCEvents implements Core's IPC handler interface.
|
||||
// Catches ChannelPush messages from other services and pushes them to Claude Code sessions.
|
||||
//
|
||||
// c.ACTION(mcp.ChannelPush{Channel: "agent.status", Data: statusMap})
|
||||
// Catches ChannelPush messages from other services and pushes them to Claude Code sessions.
|
||||
func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result {
|
||||
ctx := context.Background()
|
||||
if c != nil {
|
||||
if coreCtx := c.Context(); coreCtx != nil {
|
||||
ctx = coreCtx
|
||||
}
|
||||
}
|
||||
|
||||
switch ev := msg.(type) {
|
||||
case ChannelPush:
|
||||
s.ChannelSend(context.Background(), ev.Channel, ev.Data)
|
||||
s.ChannelSend(ctx, ev.Channel, ev.Data)
|
||||
case process.ActionProcessStarted:
|
||||
startedAt := time.Now()
|
||||
s.recordProcessRuntime(ev.ID, processRuntime{
|
||||
Command: ev.Command,
|
||||
Args: ev.Args,
|
||||
Dir: ev.Dir,
|
||||
StartedAt: startedAt,
|
||||
})
|
||||
s.ChannelSend(ctx, ChannelProcessStart, map[string]any{
|
||||
"id": ev.ID,
|
||||
"command": ev.Command,
|
||||
"args": ev.Args,
|
||||
"dir": ev.Dir,
|
||||
"pid": ev.PID,
|
||||
"startedAt": startedAt,
|
||||
})
|
||||
case process.ActionProcessOutput:
|
||||
s.ChannelSend(ctx, ChannelProcessOutput, map[string]any{
|
||||
"id": ev.ID,
|
||||
"line": ev.Line,
|
||||
"stream": ev.Stream,
|
||||
})
|
||||
case process.ActionProcessExited:
|
||||
meta, ok := s.processRuntimeFor(ev.ID)
|
||||
payload := map[string]any{
|
||||
"id": ev.ID,
|
||||
"exitCode": ev.ExitCode,
|
||||
"duration": ev.Duration,
|
||||
}
|
||||
if ok {
|
||||
payload["command"] = meta.Command
|
||||
payload["args"] = meta.Args
|
||||
payload["dir"] = meta.Dir
|
||||
if !meta.StartedAt.IsZero() {
|
||||
payload["startedAt"] = meta.StartedAt
|
||||
}
|
||||
}
|
||||
if ev.Error != nil {
|
||||
payload["error"] = ev.Error.Error()
|
||||
}
|
||||
s.ChannelSend(ctx, ChannelProcessExit, payload)
|
||||
errText := ""
|
||||
if ev.Error != nil {
|
||||
errText = ev.Error.Error()
|
||||
}
|
||||
s.emitTestResult(ctx, ev.ID, ev.ExitCode, ev.Duration, "", errText)
|
||||
case process.ActionProcessKilled:
|
||||
meta, ok := s.processRuntimeFor(ev.ID)
|
||||
payload := map[string]any{
|
||||
"id": ev.ID,
|
||||
"signal": ev.Signal,
|
||||
}
|
||||
if ok {
|
||||
payload["command"] = meta.Command
|
||||
payload["args"] = meta.Args
|
||||
payload["dir"] = meta.Dir
|
||||
if !meta.StartedAt.IsZero() {
|
||||
payload["startedAt"] = meta.StartedAt
|
||||
}
|
||||
}
|
||||
s.ChannelSend(ctx, ChannelProcessExit, payload)
|
||||
s.emitTestResult(ctx, ev.ID, 0, 0, ev.Signal, "")
|
||||
}
|
||||
return core.Result{OK: true}
|
||||
}
|
||||
|
||||
// OnShutdown implements core.Stoppable — stops the MCP transport.
|
||||
//
|
||||
// svc.OnShutdown(context.Background())
|
||||
func (s *Service) OnShutdown(ctx context.Context) core.Result {
|
||||
if err := s.Shutdown(ctx); err != nil {
|
||||
return core.Result{Value: err, OK: false}
|
||||
|
|
|
|||
334
pkg/mcp/register_test.go
Normal file
334
pkg/mcp/register_test.go
Normal file
|
|
@ -0,0 +1,334 @@
|
|||
package mcp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dappco.re/go/core"
|
||||
"forge.lthn.ai/core/go-process"
|
||||
"forge.lthn.ai/core/go-ws"
|
||||
)
|
||||
|
||||
func TestRegister_Good_WiresOptionalServices(t *testing.T) {
|
||||
c := core.New()
|
||||
|
||||
ps := &process.Service{}
|
||||
hub := ws.NewHub()
|
||||
|
||||
if r := c.RegisterService("process", ps); !r.OK {
|
||||
t.Fatalf("failed to register process service: %v", r.Value)
|
||||
}
|
||||
if r := c.RegisterService("ws", hub); !r.OK {
|
||||
t.Fatalf("failed to register ws hub: %v", r.Value)
|
||||
}
|
||||
|
||||
result := Register(c)
|
||||
if !result.OK {
|
||||
t.Fatalf("Register() failed: %v", result.Value)
|
||||
}
|
||||
|
||||
svc, ok := result.Value.(*Service)
|
||||
if !ok {
|
||||
t.Fatalf("expected *Service, got %T", result.Value)
|
||||
}
|
||||
|
||||
if svc.ProcessService() != ps {
|
||||
t.Fatalf("expected process service to be wired")
|
||||
}
|
||||
if svc.WSHub() != hub {
|
||||
t.Fatalf("expected ws hub to be wired")
|
||||
}
|
||||
|
||||
tools := map[string]bool{}
|
||||
for _, rec := range svc.Tools() {
|
||||
tools[rec.Name] = true
|
||||
}
|
||||
if !tools["process_start"] {
|
||||
t.Fatal("expected process tools to be registered when process service is available")
|
||||
}
|
||||
if !tools["ws_start"] {
|
||||
t.Fatal("expected ws tools to be registered when ws hub is available")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleIPCEvents_Good_ForwardsProcessActions(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer clientConn.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Connect() failed: %v", err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
scanner := bufio.NewScanner(clientConn)
|
||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
||||
received := make(chan map[string]any, 8)
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
var msg map[string]any
|
||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
received <- msg
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
close(received)
|
||||
}()
|
||||
|
||||
result := svc.HandleIPCEvents(nil, process.ActionProcessStarted{
|
||||
ID: "proc-1",
|
||||
Command: "go",
|
||||
Args: []string{"test", "./..."},
|
||||
Dir: "/workspace",
|
||||
PID: 1234,
|
||||
})
|
||||
if !result.OK {
|
||||
t.Fatalf("HandleIPCEvents() returned non-OK result: %#v", result.Value)
|
||||
}
|
||||
|
||||
deadline := time.NewTimer(5 * time.Second)
|
||||
defer deadline.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
t.Fatalf("failed to read notification: %v", err)
|
||||
case msg, ok := <-received:
|
||||
if !ok {
|
||||
t.Fatal("notification stream closed before expected message arrived")
|
||||
}
|
||||
if msg["method"] != ChannelNotificationMethod {
|
||||
continue
|
||||
}
|
||||
|
||||
params, ok := msg["params"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected params object, got %T", msg["params"])
|
||||
}
|
||||
if params["channel"] != ChannelProcessStart {
|
||||
continue
|
||||
}
|
||||
|
||||
payload, ok := params["data"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected data object, got %T", params["data"])
|
||||
}
|
||||
if payload["id"] != "proc-1" || payload["command"] != "go" {
|
||||
t.Fatalf("unexpected payload: %#v", payload)
|
||||
}
|
||||
if payload["dir"] != "/workspace" {
|
||||
t.Fatalf("expected dir /workspace, got %#v", payload["dir"])
|
||||
}
|
||||
if payload["pid"] != float64(1234) {
|
||||
t.Fatalf("expected pid 1234, got %#v", payload["pid"])
|
||||
}
|
||||
if payload["args"] == nil {
|
||||
t.Fatalf("expected args in payload, got %#v", payload)
|
||||
}
|
||||
return
|
||||
case <-deadline.C:
|
||||
t.Fatal("timed out waiting for process start notification")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleIPCEvents_Good_ForwardsProcessOutput(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer clientConn.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Connect() failed: %v", err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
scanner := bufio.NewScanner(clientConn)
|
||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
||||
received := make(chan map[string]any, 8)
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
var msg map[string]any
|
||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
received <- msg
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
close(received)
|
||||
}()
|
||||
|
||||
result := svc.HandleIPCEvents(nil, process.ActionProcessOutput{
|
||||
ID: "proc-1",
|
||||
Line: "hello world",
|
||||
Stream: process.StreamStdout,
|
||||
})
|
||||
if !result.OK {
|
||||
t.Fatalf("HandleIPCEvents() returned non-OK result: %#v", result.Value)
|
||||
}
|
||||
|
||||
deadline := time.NewTimer(5 * time.Second)
|
||||
defer deadline.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
t.Fatalf("failed to read notification: %v", err)
|
||||
case msg, ok := <-received:
|
||||
if !ok {
|
||||
t.Fatal("notification stream closed before expected message arrived")
|
||||
}
|
||||
if msg["method"] != ChannelNotificationMethod {
|
||||
continue
|
||||
}
|
||||
|
||||
params, ok := msg["params"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected params object, got %T", msg["params"])
|
||||
}
|
||||
if params["channel"] != ChannelProcessOutput {
|
||||
continue
|
||||
}
|
||||
|
||||
payload, ok := params["data"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected data object, got %T", msg["params"])
|
||||
}
|
||||
if payload["id"] != "proc-1" || payload["line"] != "hello world" || payload["stream"] != string(process.StreamStdout) {
|
||||
t.Fatalf("unexpected payload: %#v", payload)
|
||||
}
|
||||
return
|
||||
case <-deadline.C:
|
||||
t.Fatal("timed out waiting for process output notification")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleIPCEvents_Good_ForwardsTestResult(t *testing.T) {
|
||||
svc, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer clientConn.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Connect() failed: %v", err)
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
svc.recordProcessRuntime("proc-test", processRuntime{
|
||||
Command: "go",
|
||||
Args: []string{"test", "./..."},
|
||||
StartedAt: time.Now().Add(-2 * time.Second),
|
||||
})
|
||||
|
||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
scanner := bufio.NewScanner(clientConn)
|
||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
||||
received := make(chan map[string]any, 8)
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
var msg map[string]any
|
||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
received <- msg
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
close(received)
|
||||
}()
|
||||
|
||||
result := svc.HandleIPCEvents(nil, process.ActionProcessExited{
|
||||
ID: "proc-test",
|
||||
ExitCode: 0,
|
||||
Duration: 2 * time.Second,
|
||||
})
|
||||
if !result.OK {
|
||||
t.Fatalf("HandleIPCEvents() returned non-OK result: %#v", result.Value)
|
||||
}
|
||||
|
||||
deadline := time.NewTimer(5 * time.Second)
|
||||
defer deadline.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
t.Fatalf("failed to read notification: %v", err)
|
||||
case msg, ok := <-received:
|
||||
if !ok {
|
||||
t.Fatal("notification stream closed before expected message arrived")
|
||||
}
|
||||
if msg["method"] != ChannelNotificationMethod {
|
||||
continue
|
||||
}
|
||||
|
||||
params, ok := msg["params"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected params object, got %T", msg["params"])
|
||||
}
|
||||
if params["channel"] != ChannelTestResult {
|
||||
continue
|
||||
}
|
||||
|
||||
payload, ok := params["data"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected data object, got %T", msg["params"])
|
||||
}
|
||||
if payload["id"] != "proc-test" || payload["command"] != "go" {
|
||||
t.Fatalf("unexpected payload: %#v", payload)
|
||||
}
|
||||
if payload["dir"] != nil {
|
||||
t.Fatalf("expected dir to be absent when not recorded, got %#v", payload["dir"])
|
||||
}
|
||||
if payload["status"] != "passed" || payload["passed"] != true {
|
||||
t.Fatalf("expected passed test result, got %#v", payload)
|
||||
}
|
||||
return
|
||||
case <-deadline.C:
|
||||
t.Fatal("timed out waiting for test result notification")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -4,8 +4,8 @@ package mcp
|
|||
|
||||
import (
|
||||
"context"
|
||||
"iter"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
|
|
@ -21,6 +21,38 @@ import (
|
|||
// }
|
||||
type RESTHandler func(ctx context.Context, body []byte) (any, error)
|
||||
|
||||
// errInvalidRESTInput marks malformed JSON bodies for the REST bridge.
|
||||
var errInvalidRESTInput = &restInputError{}
|
||||
|
||||
// restInputError preserves invalid-REST-input identity without stdlib
|
||||
// error constructors so bridge.go can keep using errors.Is.
|
||||
type restInputError struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e *restInputError) Error() string {
|
||||
if e == nil || e.cause == nil {
|
||||
return "invalid REST input"
|
||||
}
|
||||
return "invalid REST input: " + e.cause.Error()
|
||||
}
|
||||
|
||||
func (e *restInputError) Unwrap() error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return e.cause
|
||||
}
|
||||
|
||||
func (e *restInputError) Is(target error) bool {
|
||||
_, ok := target.(*restInputError)
|
||||
return ok
|
||||
}
|
||||
|
||||
func invalidRESTInputError(cause error) error {
|
||||
return &restInputError{cause: cause}
|
||||
}
|
||||
|
||||
// ToolRecord captures metadata about a registered MCP tool.
|
||||
//
|
||||
// for _, rec := range svc.Tools() {
|
||||
|
|
@ -35,11 +67,17 @@ type ToolRecord struct {
|
|||
RESTHandler RESTHandler // REST-callable handler created at registration time
|
||||
}
|
||||
|
||||
// addToolRecorded registers a tool with the MCP server AND records its metadata.
|
||||
// AddToolRecorded registers a tool with the MCP server and records its metadata.
|
||||
// This is a generic function that captures the In/Out types for schema extraction.
|
||||
// It also creates a RESTHandler closure that can unmarshal JSON to the correct
|
||||
// input type and call the handler directly, enabling the MCP-to-REST bridge.
|
||||
func addToolRecorded[In, Out any](s *Service, server *mcp.Server, group string, t *mcp.Tool, h mcp.ToolHandlerFor[In, Out]) {
|
||||
//
|
||||
// svc, _ := mcp.New(mcp.Options{})
|
||||
// mcp.AddToolRecorded(svc, svc.Server(), "files", &mcp.Tool{Name: "file_read"},
|
||||
// func(context.Context, *mcp.CallToolRequest, ReadFileInput) (*mcp.CallToolResult, ReadFileOutput, error) {
|
||||
// return nil, ReadFileOutput{Path: "src/main.go"}, nil
|
||||
// })
|
||||
func AddToolRecorded[In, Out any](s *Service, server *mcp.Server, group string, t *mcp.Tool, h mcp.ToolHandlerFor[In, Out]) {
|
||||
mcp.AddTool(server, t, h)
|
||||
|
||||
restHandler := func(ctx context.Context, body []byte) (any, error) {
|
||||
|
|
@ -47,9 +85,9 @@ func addToolRecorded[In, Out any](s *Service, server *mcp.Server, group string,
|
|||
if len(body) > 0 {
|
||||
if r := core.JSONUnmarshal(body, &input); !r.OK {
|
||||
if err, ok := r.Value.(error); ok {
|
||||
return nil, err
|
||||
return nil, invalidRESTInputError(err)
|
||||
}
|
||||
return nil, core.E("registry.RESTHandler", "failed to unmarshal input", nil)
|
||||
return nil, invalidRESTInputError(nil)
|
||||
}
|
||||
}
|
||||
// nil: REST callers have no MCP request context.
|
||||
|
|
@ -68,6 +106,10 @@ func addToolRecorded[In, Out any](s *Service, server *mcp.Server, group string,
|
|||
})
|
||||
}
|
||||
|
||||
func addToolRecorded[In, Out any](s *Service, server *mcp.Server, group string, t *mcp.Tool, h mcp.ToolHandlerFor[In, Out]) {
|
||||
AddToolRecorded(s, server, group, t, h)
|
||||
}
|
||||
|
||||
// structSchema builds a simple JSON Schema from a struct's json tags via reflection.
|
||||
// Returns nil for non-struct types or empty structs.
|
||||
func structSchema(v any) map[string]any {
|
||||
|
|
@ -81,52 +123,7 @@ func structSchema(v any) map[string]any {
|
|||
if t.Kind() != reflect.Struct {
|
||||
return nil
|
||||
}
|
||||
if t.NumField() == 0 {
|
||||
return map[string]any{"type": "object", "properties": map[string]any{}}
|
||||
}
|
||||
|
||||
properties := make(map[string]any)
|
||||
required := make([]string, 0)
|
||||
|
||||
for f := range t.Fields() {
|
||||
f := f
|
||||
if !f.IsExported() {
|
||||
continue
|
||||
}
|
||||
jsonTag := f.Tag.Get("json")
|
||||
if jsonTag == "-" {
|
||||
continue
|
||||
}
|
||||
name := f.Name
|
||||
isOptional := false
|
||||
if jsonTag != "" {
|
||||
parts := splitTag(jsonTag)
|
||||
name = parts[0]
|
||||
for _, p := range parts[1:] {
|
||||
if p == "omitempty" {
|
||||
isOptional = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prop := map[string]any{
|
||||
"type": goTypeToJSONType(f.Type),
|
||||
}
|
||||
properties[name] = prop
|
||||
|
||||
if !isOptional {
|
||||
required = append(required, name)
|
||||
}
|
||||
}
|
||||
|
||||
schema := map[string]any{
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
}
|
||||
if len(required) > 0 {
|
||||
schema["required"] = required
|
||||
}
|
||||
return schema
|
||||
return schemaForType(t, map[reflect.Type]bool{})
|
||||
}
|
||||
|
||||
// splitTag splits a struct tag value by commas.
|
||||
|
|
@ -134,19 +131,6 @@ func splitTag(tag string) []string {
|
|||
return core.Split(tag, ",")
|
||||
}
|
||||
|
||||
// splitTagSeq returns an iterator over the tag parts.
|
||||
func splitTagSeq(tag string) iter.Seq[string] {
|
||||
// core.Split returns []string; wrap as iterator
|
||||
parts := core.Split(tag, ",")
|
||||
return func(yield func(string) bool) {
|
||||
for _, p := range parts {
|
||||
if !yield(p) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// goTypeToJSONType maps Go types to JSON Schema types.
|
||||
func goTypeToJSONType(t reflect.Type) string {
|
||||
switch t.Kind() {
|
||||
|
|
@ -167,3 +151,120 @@ func goTypeToJSONType(t reflect.Type) string {
|
|||
return "string"
|
||||
}
|
||||
}
|
||||
|
||||
func schemaForType(t reflect.Type, seen map[reflect.Type]bool) map[string]any {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if isTimeType(t) {
|
||||
return map[string]any{
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
}
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Interface:
|
||||
return map[string]any{}
|
||||
|
||||
case reflect.Struct:
|
||||
if seen[t] {
|
||||
return map[string]any{"type": "object"}
|
||||
}
|
||||
seen[t] = true
|
||||
|
||||
properties := make(map[string]any)
|
||||
required := make([]string, 0, t.NumField())
|
||||
|
||||
for f := range t.Fields() {
|
||||
f := f
|
||||
if !f.IsExported() {
|
||||
continue
|
||||
}
|
||||
|
||||
jsonTag := f.Tag.Get("json")
|
||||
if jsonTag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
name := f.Name
|
||||
isOptional := false
|
||||
if jsonTag != "" {
|
||||
parts := splitTag(jsonTag)
|
||||
name = parts[0]
|
||||
for _, p := range parts[1:] {
|
||||
if p == "omitempty" {
|
||||
isOptional = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prop := schemaForType(f.Type, cloneSeenSet(seen))
|
||||
if prop == nil {
|
||||
prop = map[string]any{"type": goTypeToJSONType(f.Type)}
|
||||
}
|
||||
properties[name] = prop
|
||||
|
||||
if !isOptional {
|
||||
required = append(required, name)
|
||||
}
|
||||
}
|
||||
|
||||
schema := map[string]any{
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
}
|
||||
if len(required) > 0 {
|
||||
schema["required"] = required
|
||||
}
|
||||
return schema
|
||||
|
||||
case reflect.Slice, reflect.Array:
|
||||
schema := map[string]any{
|
||||
"type": "array",
|
||||
"items": schemaForType(t.Elem(), cloneSeenSet(seen)),
|
||||
}
|
||||
return schema
|
||||
|
||||
case reflect.Map:
|
||||
schema := map[string]any{
|
||||
"type": "object",
|
||||
}
|
||||
if t.Key().Kind() == reflect.String {
|
||||
if valueSchema := schemaForType(t.Elem(), cloneSeenSet(seen)); valueSchema != nil {
|
||||
schema["additionalProperties"] = valueSchema
|
||||
}
|
||||
}
|
||||
return schema
|
||||
|
||||
default:
|
||||
if typeName := goTypeToJSONType(t); typeName != "" {
|
||||
return map[string]any{"type": typeName}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cloneSeenSet(seen map[reflect.Type]bool) map[reflect.Type]bool {
|
||||
if len(seen) == 0 {
|
||||
return map[reflect.Type]bool{}
|
||||
}
|
||||
clone := make(map[reflect.Type]bool, len(seen))
|
||||
for t := range seen {
|
||||
clone[t] = true
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
func isTimeType(t reflect.Type) bool {
|
||||
return t == reflect.TypeOf(time.Time{})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,11 @@
|
|||
package mcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-process"
|
||||
)
|
||||
|
||||
func TestToolRegistry_Good_RecordsTools(t *testing.T) {
|
||||
|
|
@ -68,8 +72,12 @@ func TestToolRegistry_Good_ToolCount(t *testing.T) {
|
|||
|
||||
tools := svc.Tools()
|
||||
// Built-in tools: file_read, file_write, file_delete, file_rename,
|
||||
// file_exists, file_edit, dir_list, dir_create, lang_detect, lang_list
|
||||
const expectedCount = 10
|
||||
// file_exists, file_edit, dir_list, dir_create, lang_detect, lang_list,
|
||||
// metrics_record, metrics_query, rag_query, rag_ingest, rag_collections,
|
||||
// webview_connect, webview_disconnect, webview_navigate, webview_click,
|
||||
// webview_type, webview_query, webview_console, webview_eval,
|
||||
// webview_screenshot, webview_wait
|
||||
const expectedCount = 25
|
||||
if len(tools) != expectedCount {
|
||||
t.Errorf("expected %d tools, got %d", expectedCount, len(tools))
|
||||
for _, tr := range tools {
|
||||
|
|
@ -86,6 +94,9 @@ func TestToolRegistry_Good_GroupAssignment(t *testing.T) {
|
|||
|
||||
fileTools := []string{"file_read", "file_write", "file_delete", "file_rename", "file_exists", "file_edit", "dir_list", "dir_create"}
|
||||
langTools := []string{"lang_detect", "lang_list"}
|
||||
metricsTools := []string{"metrics_record", "metrics_query"}
|
||||
ragTools := []string{"rag_query", "rag_ingest", "rag_collections"}
|
||||
webviewTools := []string{"webview_connect", "webview_disconnect", "webview_navigate", "webview_click", "webview_type", "webview_query", "webview_console", "webview_eval", "webview_screenshot", "webview_wait"}
|
||||
|
||||
byName := make(map[string]ToolRecord)
|
||||
for _, tr := range svc.Tools() {
|
||||
|
|
@ -113,6 +124,39 @@ func TestToolRegistry_Good_GroupAssignment(t *testing.T) {
|
|||
t.Errorf("tool %s: expected group 'language', got %q", name, tr.Group)
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range metricsTools {
|
||||
tr, ok := byName[name]
|
||||
if !ok {
|
||||
t.Errorf("tool %s not found in registry", name)
|
||||
continue
|
||||
}
|
||||
if tr.Group != "metrics" {
|
||||
t.Errorf("tool %s: expected group 'metrics', got %q", name, tr.Group)
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range ragTools {
|
||||
tr, ok := byName[name]
|
||||
if !ok {
|
||||
t.Errorf("tool %s not found in registry", name)
|
||||
continue
|
||||
}
|
||||
if tr.Group != "rag" {
|
||||
t.Errorf("tool %s: expected group 'rag', got %q", name, tr.Group)
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range webviewTools {
|
||||
tr, ok := byName[name]
|
||||
if !ok {
|
||||
t.Errorf("tool %s not found in registry", name)
|
||||
continue
|
||||
}
|
||||
if tr.Group != "webview" {
|
||||
t.Errorf("tool %s: expected group 'webview', got %q", name, tr.Group)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolRegistry_Good_ToolRecordFields(t *testing.T) {
|
||||
|
|
@ -148,3 +192,93 @@ func TestToolRegistry_Good_ToolRecordFields(t *testing.T) {
|
|||
t.Error("expected non-nil OutputSchema")
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolRegistry_Good_TimeSchemas(t *testing.T) {
|
||||
svc, err := New(Options{
|
||||
WorkspaceRoot: t.TempDir(),
|
||||
ProcessService: &process.Service{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
byName := make(map[string]ToolRecord)
|
||||
for _, tr := range svc.Tools() {
|
||||
byName[tr.Name] = tr
|
||||
}
|
||||
|
||||
metrics, ok := byName["metrics_record"]
|
||||
if !ok {
|
||||
t.Fatal("metrics_record not found in registry")
|
||||
}
|
||||
inputProps, ok := metrics.InputSchema["properties"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("expected metrics_record input properties map")
|
||||
}
|
||||
dataSchema, ok := inputProps["data"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("expected data schema for metrics_record input")
|
||||
}
|
||||
if got := dataSchema["type"]; got != "object" {
|
||||
t.Fatalf("expected metrics_record data type object, got %#v", got)
|
||||
}
|
||||
props, ok := metrics.OutputSchema["properties"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("expected metrics_record output properties map")
|
||||
}
|
||||
timestamp, ok := props["timestamp"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("expected timestamp schema for metrics_record output")
|
||||
}
|
||||
if got := timestamp["type"]; got != "string" {
|
||||
t.Fatalf("expected metrics_record timestamp type string, got %#v", got)
|
||||
}
|
||||
if got := timestamp["format"]; got != "date-time" {
|
||||
t.Fatalf("expected metrics_record timestamp format date-time, got %#v", got)
|
||||
}
|
||||
|
||||
processStart, ok := byName["process_start"]
|
||||
if !ok {
|
||||
t.Fatal("process_start not found in registry")
|
||||
}
|
||||
props, ok = processStart.OutputSchema["properties"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("expected process_start output properties map")
|
||||
}
|
||||
startedAt, ok := props["startedAt"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("expected startedAt schema for process_start output")
|
||||
}
|
||||
if got := startedAt["type"]; got != "string" {
|
||||
t.Fatalf("expected process_start startedAt type string, got %#v", got)
|
||||
}
|
||||
if got := startedAt["format"]; got != "date-time" {
|
||||
t.Fatalf("expected process_start startedAt format date-time, got %#v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolRegistry_Bad_InvalidRESTInputIsClassified(t *testing.T) {
|
||||
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var record ToolRecord
|
||||
for _, tr := range svc.Tools() {
|
||||
if tr.Name == "file_read" {
|
||||
record = tr
|
||||
break
|
||||
}
|
||||
}
|
||||
if record.Name == "" {
|
||||
t.Fatal("file_read not found in registry")
|
||||
}
|
||||
|
||||
_, err = record.RESTHandler(context.Background(), []byte("{bad json"))
|
||||
if err == nil {
|
||||
t.Fatal("expected REST handler error for malformed JSON")
|
||||
}
|
||||
if !errors.Is(err, errInvalidRESTInput) {
|
||||
t.Fatalf("expected invalid REST input error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,8 +4,6 @@ package mcp
|
|||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// Subsystem registers additional MCP tools at startup.
|
||||
|
|
@ -13,10 +11,10 @@ import (
|
|||
//
|
||||
// type BrainSubsystem struct{}
|
||||
// func (b *BrainSubsystem) Name() string { return "brain" }
|
||||
// func (b *BrainSubsystem) RegisterTools(server *mcp.Server) { ... }
|
||||
// func (b *BrainSubsystem) RegisterTools(svc *Service) { ... }
|
||||
type Subsystem interface {
|
||||
Name() string
|
||||
RegisterTools(server *mcp.Server)
|
||||
RegisterTools(svc *Service)
|
||||
}
|
||||
|
||||
// SubsystemWithShutdown extends Subsystem with graceful cleanup.
|
||||
|
|
@ -44,7 +42,10 @@ var _ Notifier = (*Service)(nil)
|
|||
// a channel event to connected Claude Code sessions.
|
||||
// The MCP service catches this in HandleIPCEvents and calls ChannelSend.
|
||||
//
|
||||
// c.ACTION(mcp.ChannelPush{Channel: "agent.status", Data: map[string]any{"repo": "go-io"}})
|
||||
// c.ACTION(mcp.ChannelPush{
|
||||
// Channel: "agent.status",
|
||||
// Data: map[string]any{"repo": "go-io"},
|
||||
// })
|
||||
type ChannelPush struct {
|
||||
Channel string
|
||||
Data any
|
||||
|
|
@ -60,3 +61,14 @@ type SubsystemWithNotifier interface {
|
|||
Subsystem
|
||||
SetNotifier(n Notifier)
|
||||
}
|
||||
|
||||
// SubsystemWithChannelCallback extends Subsystem for implementations that
|
||||
// expose an OnChannel callback instead of a Notifier interface.
|
||||
//
|
||||
// brain.OnChannel(func(ctx context.Context, channel string, data any) {
|
||||
// mcpService.ChannelSend(ctx, channel, data)
|
||||
// })
|
||||
type SubsystemWithChannelCallback interface {
|
||||
Subsystem
|
||||
OnChannel(func(ctx context.Context, channel string, data any))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,8 +3,6 @@ package mcp
|
|||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
||||
// stubSubsystem is a minimal Subsystem for testing.
|
||||
|
|
@ -15,7 +13,23 @@ type stubSubsystem struct {
|
|||
|
||||
func (s *stubSubsystem) Name() string { return s.name }
|
||||
|
||||
func (s *stubSubsystem) RegisterTools(server *mcp.Server) {
|
||||
func (s *stubSubsystem) RegisterTools(svc *Service) {
|
||||
s.toolsRegistered = true
|
||||
}
|
||||
|
||||
// notifierSubsystem verifies notifier wiring happens before tool registration.
|
||||
type notifierSubsystem struct {
|
||||
stubSubsystem
|
||||
notifierSet bool
|
||||
sawNotifierAtRegistration bool
|
||||
}
|
||||
|
||||
func (s *notifierSubsystem) SetNotifier(n Notifier) {
|
||||
s.notifierSet = n != nil
|
||||
}
|
||||
|
||||
func (s *notifierSubsystem) RegisterTools(svc *Service) {
|
||||
s.sawNotifierAtRegistration = s.notifierSet
|
||||
s.toolsRegistered = true
|
||||
}
|
||||
|
||||
|
|
@ -72,6 +86,41 @@ func TestSubsystem_Good_MultipleSubsystems(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSubsystem_Good_NilEntriesIgnoredAndSnapshots(t *testing.T) {
|
||||
sub := &stubSubsystem{name: "snap-sub"}
|
||||
svc, err := New(Options{Subsystems: []Subsystem{nil, sub}})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
|
||||
subs := svc.Subsystems()
|
||||
if len(subs) != 1 {
|
||||
t.Fatalf("expected 1 subsystem after filtering nil entries, got %d", len(subs))
|
||||
}
|
||||
if subs[0].Name() != "snap-sub" {
|
||||
t.Fatalf("expected snap-sub, got %q", subs[0].Name())
|
||||
}
|
||||
|
||||
subs[0] = nil
|
||||
if svc.Subsystems()[0] == nil {
|
||||
t.Fatal("expected Subsystems() to return a snapshot, not the live slice")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubsystem_Good_NotifierSetBeforeRegistration(t *testing.T) {
|
||||
sub := ¬ifierSubsystem{stubSubsystem: stubSubsystem{name: "notifier-sub"}}
|
||||
_, err := New(Options{Subsystems: []Subsystem{sub}})
|
||||
if err != nil {
|
||||
t.Fatalf("New() failed: %v", err)
|
||||
}
|
||||
if !sub.notifierSet {
|
||||
t.Fatal("expected notifier to be set")
|
||||
}
|
||||
if !sub.sawNotifierAtRegistration {
|
||||
t.Fatal("expected notifier to be available before RegisterTools ran")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubsystemShutdown_Good(t *testing.T) {
|
||||
sub := &shutdownSubsystem{stubSubsystem: stubSubsystem{name: "shutdown-sub"}}
|
||||
svc, err := New(Options{Subsystems: []Subsystem{sub}})
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
|
||||
import (
|
||||
|
|
@ -5,8 +7,8 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-ai/ai"
|
||||
core "dappco.re/go/core"
|
||||
"forge.lthn.ai/core/go-ai/ai"
|
||||
"forge.lthn.ai/core/go-log"
|
||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||
)
|
||||
|
|
@ -71,19 +73,19 @@ type MetricCount struct {
|
|||
// // ev.Type == "dispatch.complete", ev.AgentID == "cladius", ev.Repo == "core-php"
|
||||
type MetricEventBrief struct {
|
||||
Type string `json:"type"` // e.g. "dispatch.complete"
|
||||
Timestamp time.Time `json:"timestamp"` // when the event occurred
|
||||
Timestamp time.Time `json:"timestamp"` // when the event occurred
|
||||
AgentID string `json:"agent_id,omitempty"` // e.g. "cladius"
|
||||
Repo string `json:"repo,omitempty"` // e.g. "core-php"
|
||||
}
|
||||
|
||||
// registerMetricsTools adds metrics tools to the MCP server.
|
||||
func (s *Service) registerMetricsTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "metrics", &mcp.Tool{
|
||||
Name: "metrics_record",
|
||||
Description: "Record a metrics event for AI/security tracking. Events are stored in daily JSONL files.",
|
||||
}, s.metricsRecord)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "metrics", &mcp.Tool{
|
||||
Name: "metrics_query",
|
||||
Description: "Query metrics events and get aggregated statistics by type, repo, and agent.",
|
||||
}, s.metricsQuery)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
|
||||
import (
|
||||
|
|
@ -139,32 +141,32 @@ func (s *Service) registerProcessTools(server *mcp.Server) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
||||
Name: "process_start",
|
||||
Description: "Start a new external process. Returns process ID for tracking.",
|
||||
}, s.processStart)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
||||
Name: "process_stop",
|
||||
Description: "Gracefully stop a running process by ID.",
|
||||
}, s.processStop)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
||||
Name: "process_kill",
|
||||
Description: "Force kill a process by ID. Use when process_stop doesn't work.",
|
||||
}, s.processKill)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
||||
Name: "process_list",
|
||||
Description: "List all managed processes. Use running_only=true for only active processes.",
|
||||
}, s.processList)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
||||
Name: "process_output",
|
||||
Description: "Get the captured output of a process by ID.",
|
||||
}, s.processOutput)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
||||
Name: "process_input",
|
||||
Description: "Send input to a running process stdin.",
|
||||
}, s.processInput)
|
||||
|
|
@ -174,6 +176,10 @@ func (s *Service) registerProcessTools(server *mcp.Server) bool {
|
|||
|
||||
// processStart handles the process_start tool call.
|
||||
func (s *Service) processStart(ctx context.Context, req *mcp.CallToolRequest, input ProcessStartInput) (*mcp.CallToolResult, ProcessStartOutput, error) {
|
||||
if s.processService == nil {
|
||||
return nil, ProcessStartOutput{}, log.E("processStart", "process service unavailable", nil)
|
||||
}
|
||||
|
||||
s.logger.Security("MCP tool execution", "tool", "process_start", "command", input.Command, "args", input.Args, "dir", input.Dir, "user", log.Username())
|
||||
|
||||
if input.Command == "" {
|
||||
|
|
@ -183,7 +189,7 @@ func (s *Service) processStart(ctx context.Context, req *mcp.CallToolRequest, in
|
|||
opts := process.RunOptions{
|
||||
Command: input.Command,
|
||||
Args: input.Args,
|
||||
Dir: input.Dir,
|
||||
Dir: s.resolveWorkspacePath(input.Dir),
|
||||
Env: input.Env,
|
||||
}
|
||||
|
||||
|
|
@ -201,14 +207,29 @@ func (s *Service) processStart(ctx context.Context, req *mcp.CallToolRequest, in
|
|||
Args: proc.Args,
|
||||
StartedAt: proc.StartedAt,
|
||||
}
|
||||
s.ChannelSend(ctx, "process.start", map[string]any{
|
||||
"id": output.ID, "pid": output.PID, "command": output.Command,
|
||||
s.recordProcessRuntime(output.ID, processRuntime{
|
||||
Command: output.Command,
|
||||
Args: output.Args,
|
||||
Dir: info.Dir,
|
||||
StartedAt: output.StartedAt,
|
||||
})
|
||||
s.ChannelSend(ctx, ChannelProcessStart, map[string]any{
|
||||
"id": output.ID,
|
||||
"pid": output.PID,
|
||||
"command": output.Command,
|
||||
"args": output.Args,
|
||||
"dir": info.Dir,
|
||||
"startedAt": output.StartedAt,
|
||||
})
|
||||
return nil, output, nil
|
||||
}
|
||||
|
||||
// processStop handles the process_stop tool call.
|
||||
func (s *Service) processStop(ctx context.Context, req *mcp.CallToolRequest, input ProcessStopInput) (*mcp.CallToolResult, ProcessStopOutput, error) {
|
||||
if s.processService == nil {
|
||||
return nil, ProcessStopOutput{}, log.E("processStop", "process service unavailable", nil)
|
||||
}
|
||||
|
||||
s.logger.Security("MCP tool execution", "tool", "process_stop", "id", input.ID, "user", log.Username())
|
||||
|
||||
if input.ID == "" {
|
||||
|
|
@ -221,14 +242,23 @@ func (s *Service) processStop(ctx context.Context, req *mcp.CallToolRequest, inp
|
|||
return nil, ProcessStopOutput{}, log.E("processStop", "process not found", err)
|
||||
}
|
||||
|
||||
// For graceful stop, we use Kill() which sends SIGKILL
|
||||
// A more sophisticated implementation could use SIGTERM first
|
||||
if err := proc.Kill(); err != nil {
|
||||
log.Error("mcp: process stop kill failed", "id", input.ID, "err", err)
|
||||
// Use the process service's graceful shutdown path first so callers get
|
||||
// a real stop signal before we fall back to a hard kill internally.
|
||||
if err := proc.Shutdown(); err != nil {
|
||||
log.Error("mcp: process stop failed", "id", input.ID, "err", err)
|
||||
return nil, ProcessStopOutput{}, log.E("processStop", "failed to stop process", err)
|
||||
}
|
||||
|
||||
s.ChannelSend(ctx, "process.exit", map[string]any{"id": input.ID, "signal": "stop"})
|
||||
info := proc.Info()
|
||||
s.ChannelSend(ctx, ChannelProcessExit, map[string]any{
|
||||
"id": input.ID,
|
||||
"signal": "stop",
|
||||
"command": info.Command,
|
||||
"args": info.Args,
|
||||
"dir": info.Dir,
|
||||
"startedAt": info.StartedAt,
|
||||
})
|
||||
s.emitTestResult(ctx, input.ID, 0, 0, "stop", "")
|
||||
return nil, ProcessStopOutput{
|
||||
ID: input.ID,
|
||||
Success: true,
|
||||
|
|
@ -238,18 +268,37 @@ func (s *Service) processStop(ctx context.Context, req *mcp.CallToolRequest, inp
|
|||
|
||||
// processKill handles the process_kill tool call.
|
||||
func (s *Service) processKill(ctx context.Context, req *mcp.CallToolRequest, input ProcessKillInput) (*mcp.CallToolResult, ProcessKillOutput, error) {
|
||||
if s.processService == nil {
|
||||
return nil, ProcessKillOutput{}, log.E("processKill", "process service unavailable", nil)
|
||||
}
|
||||
|
||||
s.logger.Security("MCP tool execution", "tool", "process_kill", "id", input.ID, "user", log.Username())
|
||||
|
||||
if input.ID == "" {
|
||||
return nil, ProcessKillOutput{}, errIDEmpty
|
||||
}
|
||||
|
||||
proc, err := s.processService.Get(input.ID)
|
||||
if err != nil {
|
||||
log.Error("mcp: process kill failed", "id", input.ID, "err", err)
|
||||
return nil, ProcessKillOutput{}, log.E("processKill", "process not found", err)
|
||||
}
|
||||
|
||||
if err := s.processService.Kill(input.ID); err != nil {
|
||||
log.Error("mcp: process kill failed", "id", input.ID, "err", err)
|
||||
return nil, ProcessKillOutput{}, log.E("processKill", "failed to kill process", err)
|
||||
}
|
||||
|
||||
s.ChannelSend(ctx, "process.exit", map[string]any{"id": input.ID, "signal": "kill"})
|
||||
info := proc.Info()
|
||||
s.ChannelSend(ctx, ChannelProcessExit, map[string]any{
|
||||
"id": input.ID,
|
||||
"signal": "kill",
|
||||
"command": info.Command,
|
||||
"args": info.Args,
|
||||
"dir": info.Dir,
|
||||
"startedAt": info.StartedAt,
|
||||
})
|
||||
s.emitTestResult(ctx, input.ID, 0, 0, "kill", "")
|
||||
return nil, ProcessKillOutput{
|
||||
ID: input.ID,
|
||||
Success: true,
|
||||
|
|
@ -259,6 +308,10 @@ func (s *Service) processKill(ctx context.Context, req *mcp.CallToolRequest, inp
|
|||
|
||||
// processList handles the process_list tool call.
|
||||
func (s *Service) processList(ctx context.Context, req *mcp.CallToolRequest, input ProcessListInput) (*mcp.CallToolResult, ProcessListOutput, error) {
|
||||
if s.processService == nil {
|
||||
return nil, ProcessListOutput{}, log.E("processList", "process service unavailable", nil)
|
||||
}
|
||||
|
||||
s.logger.Info("MCP tool execution", "tool", "process_list", "running_only", input.RunningOnly, "user", log.Username())
|
||||
|
||||
var procs []*process.Process
|
||||
|
|
@ -292,6 +345,10 @@ func (s *Service) processList(ctx context.Context, req *mcp.CallToolRequest, inp
|
|||
|
||||
// processOutput handles the process_output tool call.
|
||||
func (s *Service) processOutput(ctx context.Context, req *mcp.CallToolRequest, input ProcessOutputInput) (*mcp.CallToolResult, ProcessOutputOutput, error) {
|
||||
if s.processService == nil {
|
||||
return nil, ProcessOutputOutput{}, log.E("processOutput", "process service unavailable", nil)
|
||||
}
|
||||
|
||||
s.logger.Info("MCP tool execution", "tool", "process_output", "id", input.ID, "user", log.Username())
|
||||
|
||||
if input.ID == "" {
|
||||
|
|
@ -312,6 +369,10 @@ func (s *Service) processOutput(ctx context.Context, req *mcp.CallToolRequest, i
|
|||
|
||||
// processInput handles the process_input tool call.
|
||||
func (s *Service) processInput(ctx context.Context, req *mcp.CallToolRequest, input ProcessInputInput) (*mcp.CallToolResult, ProcessInputOutput, error) {
|
||||
if s.processService == nil {
|
||||
return nil, ProcessInputOutput{}, log.E("processInput", "process service unavailable", nil)
|
||||
}
|
||||
|
||||
s.logger.Security("MCP tool execution", "tool", "process_input", "id", input.ID, "user", log.Username())
|
||||
|
||||
if input.ID == "" {
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ func TestProcessInfo_Good(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestWithProcessService_Good verifies the WithProcessService option.
|
||||
// TestWithProcessService_Good verifies Options{ProcessService: ...}.
|
||||
func TestWithProcessService_Good(t *testing.T) {
|
||||
// Note: We can't easily create a real process.Service here without Core,
|
||||
// so we just verify the option doesn't panic with nil.
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
|
||||
import (
|
||||
|
|
@ -99,17 +101,17 @@ type RAGCollectionsOutput struct {
|
|||
|
||||
// registerRAGTools adds RAG tools to the MCP server.
|
||||
func (s *Service) registerRAGTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "rag", &mcp.Tool{
|
||||
Name: "rag_query",
|
||||
Description: "Query the RAG vector database for relevant documentation. Returns semantically similar content based on the query.",
|
||||
}, s.ragQuery)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "rag", &mcp.Tool{
|
||||
Name: "rag_ingest",
|
||||
Description: "Ingest documents into the RAG vector database. Supports both single files and directories.",
|
||||
}, s.ragIngest)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "rag", &mcp.Tool{
|
||||
Name: "rag_collections",
|
||||
Description: "List all available collections in the RAG vector database.",
|
||||
}, s.ragCollections)
|
||||
|
|
@ -183,12 +185,13 @@ func (s *Service) ragIngest(ctx context.Context, req *mcp.CallToolRequest, input
|
|||
log.Error("mcp: rag ingest stat failed", "path", input.Path, "err", err)
|
||||
return nil, RAGIngestOutput{}, log.E("ragIngest", "failed to access path", err)
|
||||
}
|
||||
resolvedPath := s.resolveWorkspacePath(input.Path)
|
||||
|
||||
var message string
|
||||
var chunks int
|
||||
if info.IsDir() {
|
||||
// Ingest directory
|
||||
err = rag.IngestDirectory(ctx, input.Path, collection, input.Recreate)
|
||||
err = rag.IngestDirectory(ctx, resolvedPath, collection, input.Recreate)
|
||||
if err != nil {
|
||||
log.Error("mcp: rag ingest directory failed", "path", input.Path, "collection", collection, "err", err)
|
||||
return nil, RAGIngestOutput{}, log.E("ragIngest", "failed to ingest directory", err)
|
||||
|
|
@ -196,7 +199,7 @@ func (s *Service) ragIngest(ctx context.Context, req *mcp.CallToolRequest, input
|
|||
message = core.Sprintf("Successfully ingested directory %s into collection %s", input.Path, collection)
|
||||
} else {
|
||||
// Ingest single file
|
||||
chunks, err = rag.IngestSingleFile(ctx, input.Path, collection)
|
||||
chunks, err = rag.IngestSingleFile(ctx, resolvedPath, collection)
|
||||
if err != nil {
|
||||
log.Error("mcp: rag ingest file failed", "path", input.Path, "collection", collection, "err", err)
|
||||
return nil, RAGIngestOutput{}, log.E("ragIngest", "failed to ingest file", err)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,15 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"image"
|
||||
"image/jpeg"
|
||||
_ "image/png"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -25,6 +32,20 @@ var (
|
|||
errSelectorRequired = log.E("webview", "selector is required", nil)
|
||||
)
|
||||
|
||||
// closeWebviewConnection closes and clears the shared browser connection.
|
||||
func closeWebviewConnection() error {
|
||||
webviewMu.Lock()
|
||||
defer webviewMu.Unlock()
|
||||
|
||||
if webviewInstance == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := webviewInstance.Close()
|
||||
webviewInstance = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// WebviewConnectInput contains parameters for connecting to Chrome DevTools.
|
||||
//
|
||||
// input := WebviewConnectInput{DebugURL: "http://localhost:9222", Timeout: 10}
|
||||
|
|
@ -201,52 +222,52 @@ type WebviewDisconnectOutput struct {
|
|||
|
||||
// registerWebviewTools adds webview tools to the MCP server.
|
||||
func (s *Service) registerWebviewTools(server *mcp.Server) {
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_connect",
|
||||
Description: "Connect to Chrome DevTools Protocol. Start Chrome with --remote-debugging-port=9222 first.",
|
||||
}, s.webviewConnect)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_disconnect",
|
||||
Description: "Disconnect from Chrome DevTools.",
|
||||
}, s.webviewDisconnect)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_navigate",
|
||||
Description: "Navigate the browser to a URL.",
|
||||
}, s.webviewNavigate)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_click",
|
||||
Description: "Click on an element by CSS selector.",
|
||||
}, s.webviewClick)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_type",
|
||||
Description: "Type text into an element by CSS selector.",
|
||||
}, s.webviewType)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_query",
|
||||
Description: "Query DOM elements by CSS selector.",
|
||||
}, s.webviewQuery)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_console",
|
||||
Description: "Get browser console output.",
|
||||
}, s.webviewConsole)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_eval",
|
||||
Description: "Evaluate JavaScript in the browser context.",
|
||||
}, s.webviewEval)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_screenshot",
|
||||
Description: "Capture a screenshot of the browser window.",
|
||||
}, s.webviewScreenshot)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
||||
Name: "webview_wait",
|
||||
Description: "Wait for an element to appear by CSS selector.",
|
||||
}, s.webviewWait)
|
||||
|
|
@ -533,6 +554,7 @@ func (s *Service) webviewScreenshot(ctx context.Context, req *mcp.CallToolReques
|
|||
if format == "" {
|
||||
format = "png"
|
||||
}
|
||||
format = strings.ToLower(format)
|
||||
|
||||
data, err := webviewInstance.Screenshot()
|
||||
if err != nil {
|
||||
|
|
@ -540,13 +562,40 @@ func (s *Service) webviewScreenshot(ctx context.Context, req *mcp.CallToolReques
|
|||
return nil, WebviewScreenshotOutput{}, log.E("webviewScreenshot", "failed to capture screenshot", err)
|
||||
}
|
||||
|
||||
encoded, outputFormat, err := normalizeScreenshotData(data, format)
|
||||
if err != nil {
|
||||
return nil, WebviewScreenshotOutput{}, log.E("webviewScreenshot", "failed to encode screenshot", err)
|
||||
}
|
||||
|
||||
return nil, WebviewScreenshotOutput{
|
||||
Success: true,
|
||||
Data: base64.StdEncoding.EncodeToString(data),
|
||||
Format: format,
|
||||
Data: base64.StdEncoding.EncodeToString(encoded),
|
||||
Format: outputFormat,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// normalizeScreenshotData converts screenshot bytes into the requested format.
|
||||
// PNG is preserved as-is. JPEG requests are re-encoded so the output matches
|
||||
// the declared format in WebviewScreenshotOutput.
|
||||
func normalizeScreenshotData(data []byte, format string) ([]byte, string, error) {
|
||||
switch format {
|
||||
case "", "png":
|
||||
return data, "png", nil
|
||||
case "jpeg", "jpg":
|
||||
img, _, err := image.Decode(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := jpeg.Encode(&buf, img, &jpeg.Options{Quality: 90}); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return buf.Bytes(), "jpeg", nil
|
||||
default:
|
||||
return nil, "", log.E("webviewScreenshot", "unsupported screenshot format: "+format, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// webviewWait handles the webview_wait tool call.
|
||||
func (s *Service) webviewWait(ctx context.Context, req *mcp.CallToolRequest, input WebviewWaitInput) (*mcp.CallToolResult, WebviewWaitOutput, error) {
|
||||
webviewMu.Lock()
|
||||
|
|
@ -562,7 +611,15 @@ func (s *Service) webviewWait(ctx context.Context, req *mcp.CallToolRequest, inp
|
|||
return nil, WebviewWaitOutput{}, errSelectorRequired
|
||||
}
|
||||
|
||||
if err := webviewInstance.WaitForSelector(input.Selector); err != nil {
|
||||
timeout := time.Duration(input.Timeout) * time.Second
|
||||
if timeout <= 0 {
|
||||
timeout = 30 * time.Second
|
||||
}
|
||||
|
||||
if err := waitForSelector(ctx, timeout, input.Selector, func(selector string) error {
|
||||
_, err := webviewInstance.QuerySelector(selector)
|
||||
return err
|
||||
}); err != nil {
|
||||
log.Error("mcp: webview wait failed", "selector", input.Selector, "err", err)
|
||||
return nil, WebviewWaitOutput{}, log.E("webviewWait", "failed to wait for selector", err)
|
||||
}
|
||||
|
|
@ -572,3 +629,34 @@ func (s *Service) webviewWait(ctx context.Context, req *mcp.CallToolRequest, inp
|
|||
Message: core.Sprintf("Element found: %s", input.Selector),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// waitForSelector polls until the selector exists or the timeout elapses.
|
||||
// Query helpers in go-webview report "element not found" as an error, so we
|
||||
// keep retrying until we see the element or hit the deadline.
|
||||
func waitForSelector(ctx context.Context, timeout time.Duration, selector string, query func(string) error) error {
|
||||
if timeout <= 0 {
|
||||
timeout = 30 * time.Second
|
||||
}
|
||||
|
||||
waitCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
err := query(selector)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !strings.Contains(err.Error(), "element not found") {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-waitCtx.Done():
|
||||
return log.E("webviewWait", "timed out waiting for selector", waitCtx.Err())
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,13 @@
|
|||
package mcp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"image"
|
||||
"image/color"
|
||||
"image/jpeg"
|
||||
"image/png"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -215,6 +222,41 @@ func TestWebviewWaitInput_Good(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWaitForSelector_Good(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
attempts := 0
|
||||
err := waitForSelector(ctx, 200*time.Millisecond, "#ready", func(selector string) error {
|
||||
attempts++
|
||||
if attempts < 3 {
|
||||
return errors.New("element not found: " + selector)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("waitForSelector failed: %v", err)
|
||||
}
|
||||
if attempts != 3 {
|
||||
t.Fatalf("expected 3 attempts, got %d", attempts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitForSelector_Bad_Timeout(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
start := time.Now()
|
||||
err := waitForSelector(ctx, 50*time.Millisecond, "#missing", func(selector string) error {
|
||||
return errors.New("element not found: " + selector)
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected waitForSelector to time out")
|
||||
}
|
||||
if time.Since(start) < 50*time.Millisecond {
|
||||
t.Fatal("expected waitForSelector to honor timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebviewConnectOutput_Good verifies the WebviewConnectOutput struct has expected fields.
|
||||
func TestWebviewConnectOutput_Good(t *testing.T) {
|
||||
output := WebviewConnectOutput{
|
||||
|
|
@ -358,6 +400,61 @@ func TestWebviewScreenshotOutput_Good(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNormalizeScreenshotData_Good_Png(t *testing.T) {
|
||||
src := mustEncodeTestPNG(t)
|
||||
|
||||
out, format, err := normalizeScreenshotData(src, "png")
|
||||
if err != nil {
|
||||
t.Fatalf("normalizeScreenshotData failed: %v", err)
|
||||
}
|
||||
if format != "png" {
|
||||
t.Fatalf("expected png format, got %q", format)
|
||||
}
|
||||
if !bytes.Equal(out, src) {
|
||||
t.Fatal("expected png output to preserve the original bytes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeScreenshotData_Good_Jpeg(t *testing.T) {
|
||||
src := mustEncodeTestPNG(t)
|
||||
|
||||
out, format, err := normalizeScreenshotData(src, "jpeg")
|
||||
if err != nil {
|
||||
t.Fatalf("normalizeScreenshotData failed: %v", err)
|
||||
}
|
||||
if format != "jpeg" {
|
||||
t.Fatalf("expected jpeg format, got %q", format)
|
||||
}
|
||||
if bytes.Equal(out, src) {
|
||||
t.Fatal("expected jpeg output to differ from png input")
|
||||
}
|
||||
|
||||
if _, err := jpeg.Decode(bytes.NewReader(out)); err != nil {
|
||||
t.Fatalf("expected output to decode as an image: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeScreenshotData_Bad_UnsupportedFormat(t *testing.T) {
|
||||
src := mustEncodeTestPNG(t)
|
||||
|
||||
if _, _, err := normalizeScreenshotData(src, "gif"); err == nil {
|
||||
t.Fatal("expected unsupported format error")
|
||||
}
|
||||
}
|
||||
|
||||
func mustEncodeTestPNG(t *testing.T) []byte {
|
||||
t.Helper()
|
||||
|
||||
img := image.NewRGBA(image.Rect(0, 0, 1, 1))
|
||||
img.Set(0, 0, color.RGBA{R: 200, G: 80, B: 40, A: 255})
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := png.Encode(&buf, img); err != nil {
|
||||
t.Fatalf("png encode failed: %v", err)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// TestWebviewElementInfo_Good verifies the WebviewElementInfo struct has expected fields.
|
||||
func TestWebviewElementInfo_Good(t *testing.T) {
|
||||
elem := WebviewElementInfo{
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
|
||||
import (
|
||||
|
|
@ -47,12 +49,12 @@ func (s *Service) registerWSTools(server *mcp.Server) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "ws", &mcp.Tool{
|
||||
Name: "ws_start",
|
||||
Description: "Start the WebSocket server for real-time process output streaming.",
|
||||
}, s.wsStart)
|
||||
|
||||
mcp.AddTool(server, &mcp.Tool{
|
||||
addToolRecorded(s, server, "ws", &mcp.Tool{
|
||||
Name: "ws_info",
|
||||
Description: "Get WebSocket hub statistics (connected clients and active channels).",
|
||||
}, s.wsInfo)
|
||||
|
|
@ -62,6 +64,10 @@ func (s *Service) registerWSTools(server *mcp.Server) bool {
|
|||
|
||||
// wsStart handles the ws_start tool call.
|
||||
func (s *Service) wsStart(ctx context.Context, req *mcp.CallToolRequest, input WSStartInput) (*mcp.CallToolResult, WSStartOutput, error) {
|
||||
if s.wsHub == nil {
|
||||
return nil, WSStartOutput{}, log.E("wsStart", "websocket hub unavailable", nil)
|
||||
}
|
||||
|
||||
addr := input.Addr
|
||||
if addr == "" {
|
||||
addr = ":8080"
|
||||
|
|
@ -117,6 +123,10 @@ func (s *Service) wsStart(ctx context.Context, req *mcp.CallToolRequest, input W
|
|||
|
||||
// wsInfo handles the ws_info tool call.
|
||||
func (s *Service) wsInfo(ctx context.Context, req *mcp.CallToolRequest, input WSInfoInput) (*mcp.CallToolResult, WSInfoOutput, error) {
|
||||
if s.wsHub == nil {
|
||||
return nil, WSInfoOutput{}, log.E("wsInfo", "websocket hub unavailable", nil)
|
||||
}
|
||||
|
||||
s.logger.Info("MCP tool execution", "tool", "ws_info", "user", log.Username())
|
||||
|
||||
stats := s.wsHub.Stats()
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ func TestWSInfoOutput_Good(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestWithWSHub_Good verifies the WithWSHub option.
|
||||
// TestWithWSHub_Good verifies Options{WSHub: ...}.
|
||||
func TestWithWSHub_Good(t *testing.T) {
|
||||
hub := ws.NewHub()
|
||||
|
||||
|
|
@ -97,7 +97,7 @@ func TestWithWSHub_Good(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestWithWSHub_Nil verifies the WithWSHub option with nil.
|
||||
// TestWithWSHub_Nil verifies Options{WSHub: nil}.
|
||||
func TestWithWSHub_Nil(t *testing.T) {
|
||||
s, err := New(Options{WSHub: nil})
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -82,12 +82,11 @@ func (s *Service) ServeHTTP(ctx context.Context, addr string) error {
|
|||
}
|
||||
|
||||
// withAuth wraps an http.Handler with Bearer token authentication.
|
||||
// If token is empty, requests are rejected.
|
||||
// If token is empty, authentication is disabled for local development.
|
||||
func withAuth(token string, next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.TrimSpace(token) == "" {
|
||||
w.Header().Set("WWW-Authenticate", `Bearer`)
|
||||
http.Error(w, `{"error":"authentication not configured"}`, http.StatusUnauthorized)
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -107,6 +107,44 @@ func TestServeHTTP_Good_AuthRequired(t *testing.T) {
|
|||
<-errCh
|
||||
}
|
||||
|
||||
func TestServeHTTP_Good_NoAuthConfigured(t *testing.T) {
|
||||
os.Unsetenv("MCP_AUTH_TOKEN")
|
||||
|
||||
s, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create service: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to find free port: %v", err)
|
||||
}
|
||||
addr := listener.Addr().String()
|
||||
listener.Close()
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- s.ServeHTTP(ctx, addr)
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/mcp", addr))
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == 401 {
|
||||
t.Fatalf("expected /mcp to be open without MCP_AUTH_TOKEN, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
cancel()
|
||||
<-errCh
|
||||
}
|
||||
|
||||
func TestWithAuth_Good_ValidToken(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
|
|
@ -157,19 +195,18 @@ func TestWithAuth_Bad_MissingToken(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWithAuth_Bad_EmptyConfiguredToken(t *testing.T) {
|
||||
func TestWithAuth_Good_EmptyConfiguredToken_DisablesAuth(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
})
|
||||
|
||||
// Empty token now requires explicit configuration
|
||||
wrapped := withAuth("", handler)
|
||||
|
||||
req, _ := http.NewRequest("GET", "/", nil)
|
||||
rr := &fakeResponseWriter{code: 200}
|
||||
wrapped.ServeHTTP(rr, req)
|
||||
if rr.code != 401 {
|
||||
t.Errorf("expected 401 with empty configured token, got %d", rr.code)
|
||||
if rr.code != 200 {
|
||||
t.Errorf("expected 200 with empty configured token, got %d", rr.code)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
|
||||
import (
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
|
||||
import (
|
||||
|
|
@ -55,11 +57,14 @@ type TCPTransport struct {
|
|||
|
||||
// NewTCPTransport creates a new TCP transport listener.
|
||||
// Defaults to 127.0.0.1 when the host component is empty (e.g. ":9100").
|
||||
// Defaults to DefaultTCPAddr when addr is empty.
|
||||
// Emits a security warning when explicitly binding to 0.0.0.0 (all interfaces).
|
||||
//
|
||||
// t, err := NewTCPTransport("127.0.0.1:9100")
|
||||
// t, err := NewTCPTransport(":9100") // defaults to 127.0.0.1:9100
|
||||
func NewTCPTransport(addr string) (*TCPTransport, error) {
|
||||
addr = normalizeTCPAddr(addr)
|
||||
|
||||
host, port, _ := net.SplitHostPort(addr)
|
||||
if host == "" {
|
||||
addr = net.JoinHostPort("127.0.0.1", port)
|
||||
|
|
@ -73,6 +78,23 @@ func NewTCPTransport(addr string) (*TCPTransport, error) {
|
|||
return &TCPTransport{addr: addr, listener: listener}, nil
|
||||
}
|
||||
|
||||
func normalizeTCPAddr(addr string) string {
|
||||
if addr == "" {
|
||||
return DefaultTCPAddr
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return addr
|
||||
}
|
||||
|
||||
if host == "" {
|
||||
return net.JoinHostPort("127.0.0.1", port)
|
||||
}
|
||||
|
||||
return addr
|
||||
}
|
||||
|
||||
// ServeTCP starts a TCP server for the MCP service.
|
||||
// It accepts connections and spawns a new MCP server session for each connection.
|
||||
//
|
||||
|
|
@ -91,11 +113,7 @@ func (s *Service) ServeTCP(ctx context.Context, addr string) error {
|
|||
<-ctx.Done()
|
||||
_ = t.listener.Close()
|
||||
}()
|
||||
|
||||
if addr == "" {
|
||||
addr = t.listener.Addr().String()
|
||||
}
|
||||
diagPrintf("MCP TCP server listening on %s\n", addr)
|
||||
diagPrintf("MCP TCP server listening on %s\n", t.listener.Addr().String())
|
||||
|
||||
for {
|
||||
conn, err := t.listener.Accept()
|
||||
|
|
@ -123,6 +141,7 @@ func (s *Service) handleConnection(ctx context.Context, conn net.Conn) {
|
|||
conn.Close()
|
||||
return
|
||||
}
|
||||
defer session.Close()
|
||||
// Block until the session ends
|
||||
if err := session.Wait(); err != nil {
|
||||
diagPrintf("Session ended: %v\n", err)
|
||||
|
|
|
|||
|
|
@ -31,6 +31,26 @@ func TestNewTCPTransport_Defaults(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNormalizeTCPAddr_Good_Defaults(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{name: "empty", in: "", want: DefaultTCPAddr},
|
||||
{name: "missing host", in: ":9100", want: "127.0.0.1:9100"},
|
||||
{name: "explicit host", in: "127.0.0.1:9100", want: "127.0.0.1:9100"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := normalizeTCPAddr(tt.in); got != tt.want {
|
||||
t.Fatalf("normalizeTCPAddr(%q) = %q, want %q", tt.in, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTCPTransport_Warning(t *testing.T) {
|
||||
// Capture warning output via setDiagWriter (mutex-protected, no race).
|
||||
var buf bytes.Buffer
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package mcp
|
||||
|
||||
import (
|
||||
|
|
|
|||
47
pkg/mcp/transport_unix_test.go
Normal file
47
pkg/mcp/transport_unix_test.go
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package mcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRun_Good_UnixTrigger(t *testing.T) {
|
||||
s, err := New(Options{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create service: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
socketPath := shortSocketPath(t, "run")
|
||||
t.Setenv("MCP_UNIX_SOCKET", socketPath)
|
||||
t.Setenv("MCP_HTTP_ADDR", "")
|
||||
t.Setenv("MCP_ADDR", "")
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- s.Run(ctx)
|
||||
}()
|
||||
|
||||
var conn net.Conn
|
||||
deadline := time.Now().Add(2 * time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err = net.DialTimeout("unix", socketPath, 200*time.Millisecond)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to connect to Unix socket at %s: %v", socketPath, err)
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
cancel()
|
||||
if err := <-errCh; err != nil {
|
||||
t.Fatalf("Run failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -140,6 +140,75 @@ List all database tables in the application.
|
|||
|
||||
---
|
||||
|
||||
### describe_table
|
||||
|
||||
Describe a database table, including its columns and indexes.
|
||||
|
||||
**Description:** Describe a database table, including columns and indexes
|
||||
|
||||
**Parameters:**
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|------|------|----------|-------------|
|
||||
| `table` | string | Yes | Database table name to inspect |
|
||||
|
||||
**Example Request:**
|
||||
|
||||
```json
|
||||
{
|
||||
"tool": "describe_table",
|
||||
"arguments": {
|
||||
"table": "users"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Success Response:**
|
||||
|
||||
```json
|
||||
{
|
||||
"table": "users",
|
||||
"columns": [
|
||||
{
|
||||
"field": "id",
|
||||
"type": "bigint unsigned",
|
||||
"collation": null,
|
||||
"null": "NO",
|
||||
"key": "PRI",
|
||||
"default": null,
|
||||
"extra": "auto_increment",
|
||||
"privileges": "select,insert,update,references",
|
||||
"comment": "Primary key"
|
||||
}
|
||||
],
|
||||
"indexes": [
|
||||
{
|
||||
"name": "PRIMARY",
|
||||
"unique": true,
|
||||
"type": "BTREE",
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"order": 1,
|
||||
"collation": "A",
|
||||
"cardinality": 1,
|
||||
"sub_part": null,
|
||||
"nullable": "",
|
||||
"comment": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Security Notes:**
|
||||
- Table names are validated to allow only letters, numbers, and underscores
|
||||
- System tables are blocked
|
||||
- Table access may be filtered based on configuration
|
||||
|
||||
---
|
||||
|
||||
## Commerce Tools
|
||||
|
||||
### get_billing_status
|
||||
|
|
@ -690,6 +759,7 @@ curl -X POST https://api.example.com/mcp/tools/call \
|
|||
### Query Tools
|
||||
- `query_database` - Execute SQL queries
|
||||
- `list_tables` - List database tables
|
||||
- `describe_table` - Inspect table columns and indexes
|
||||
|
||||
### Commerce Tools
|
||||
- `get_billing_status` - Get subscription status
|
||||
|
|
|
|||
|
|
@ -113,6 +113,8 @@ class Boot extends ServiceProvider
|
|||
->where('id', '[a-z0-9-]+');
|
||||
Route::get('servers/{id}/tools', [Controllers\McpApiController::class, 'tools'])->name('servers.tools')
|
||||
->where('id', '[a-z0-9-]+');
|
||||
Route::get('servers/{id}/resources', [Controllers\McpApiController::class, 'resources'])->name('servers.resources')
|
||||
->where('id', '[a-z0-9-]+');
|
||||
})
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,9 @@ namespace Core\Mcp\Controllers;
|
|||
|
||||
use Core\Front\Controller;
|
||||
use Core\Mcp\Services\McpQuotaService;
|
||||
use Core\Mod\Agentic\Models\AgentPlan;
|
||||
use Core\Mod\Agentic\Models\AgentSession;
|
||||
use Core\Mod\Content\Models\ContentItem;
|
||||
use Illuminate\Http\JsonResponse;
|
||||
use Illuminate\Http\Request;
|
||||
use Illuminate\Support\Facades\Cache;
|
||||
|
|
@ -13,6 +16,7 @@ use Core\Api\Models\ApiKey;
|
|||
use Core\Mcp\Models\McpApiRequest;
|
||||
use Core\Mcp\Models\McpToolCall;
|
||||
use Core\Mcp\Services\McpWebhookDispatcher;
|
||||
use Core\Tenant\Models\Workspace;
|
||||
use Symfony\Component\Yaml\Yaml;
|
||||
|
||||
/**
|
||||
|
|
@ -78,6 +82,26 @@ class McpApiController extends Controller
|
|||
]);
|
||||
}
|
||||
|
||||
/**
|
||||
* List resources for a specific server.
|
||||
*
|
||||
* GET /api/v1/mcp/servers/{id}/resources
|
||||
*/
|
||||
public function resources(Request $request, string $id): JsonResponse
|
||||
{
|
||||
$server = $this->loadServerFull($id);
|
||||
|
||||
if (! $server) {
|
||||
return response()->json(['error' => 'Server not found'], 404);
|
||||
}
|
||||
|
||||
return response()->json([
|
||||
'server' => $id,
|
||||
'resources' => array_values($server['resources'] ?? []),
|
||||
'count' => count($server['resources'] ?? []),
|
||||
]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a tool on an MCP server.
|
||||
*
|
||||
|
|
@ -175,8 +199,6 @@ class McpApiController extends Controller
|
|||
* Read a resource from an MCP server.
|
||||
*
|
||||
* GET /api/v1/mcp/resources/{uri}
|
||||
*
|
||||
* NOTE: Resource reading is not yet implemented. Returns 501 Not Implemented.
|
||||
*/
|
||||
public function resource(Request $request, string $uri): JsonResponse
|
||||
{
|
||||
|
|
@ -185,19 +207,289 @@ class McpApiController extends Controller
|
|||
return response()->json(['error' => 'Invalid resource URI format'], 400);
|
||||
}
|
||||
|
||||
$serverId = $matches[1];
|
||||
|
||||
$server = $this->loadServerFull($serverId);
|
||||
if (! $server) {
|
||||
return response()->json(['error' => 'Server not found'], 404);
|
||||
$scheme = $matches[1];
|
||||
$content = $this->readResourceContent($scheme, $uri);
|
||||
if ($content === null) {
|
||||
return response()->json([
|
||||
'error' => 'not_found',
|
||||
'message' => 'Resource not found',
|
||||
'uri' => $uri,
|
||||
], 404);
|
||||
}
|
||||
|
||||
// Resource reading not yet implemented
|
||||
return response()->json([
|
||||
'error' => 'not_implemented',
|
||||
'message' => 'MCP resource reading is not yet implemented. Use tool calls instead.',
|
||||
'uri' => $uri,
|
||||
], 501);
|
||||
'content' => $content,
|
||||
]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a supported MCP resource URI into response content.
|
||||
*/
|
||||
protected function readResourceContent(string $scheme, string $uri): ?array
|
||||
{
|
||||
if (str_starts_with($uri, 'plans://')) {
|
||||
return [
|
||||
'mimeType' => 'text/markdown',
|
||||
'text' => $this->resourcePlanContent($uri),
|
||||
];
|
||||
}
|
||||
|
||||
if (str_starts_with($uri, 'sessions://')) {
|
||||
return [
|
||||
'mimeType' => 'text/markdown',
|
||||
'text' => $this->resourceSessionContent($uri),
|
||||
];
|
||||
}
|
||||
|
||||
if (str_starts_with($uri, 'content://')) {
|
||||
return [
|
||||
'mimeType' => 'text/markdown',
|
||||
'text' => $this->resourceContentItem($uri),
|
||||
];
|
||||
}
|
||||
|
||||
return $this->resourceServerContent($scheme, $uri);
|
||||
}
|
||||
|
||||
/**
|
||||
* Render plan resources.
|
||||
*/
|
||||
protected function resourcePlanContent(string $uri): string
|
||||
{
|
||||
if ($uri === 'plans://all') {
|
||||
$plans = AgentPlan::with('agentPhases')->notArchived()->orderBy('updated_at', 'desc')->get();
|
||||
|
||||
$md = "# Work Plans\n\n";
|
||||
$md .= '**Total:** '.$plans->count()." plan(s)\n\n";
|
||||
|
||||
foreach ($plans->groupBy('status') as $status => $group) {
|
||||
$md .= '## '.ucfirst($status).' ('.$group->count().")\n\n";
|
||||
|
||||
foreach ($group as $plan) {
|
||||
$progress = $plan->getProgress();
|
||||
$md .= "- **[{$plan->slug}]** {$plan->title} - {$progress['percentage']}%\n";
|
||||
}
|
||||
$md .= "\n";
|
||||
}
|
||||
|
||||
return $md;
|
||||
}
|
||||
|
||||
$path = substr($uri, 9); // Remove "plans://"
|
||||
$parts = explode('/', $path);
|
||||
$slug = $parts[0];
|
||||
|
||||
$plan = AgentPlan::with('agentPhases')->where('slug', $slug)->first();
|
||||
if (! $plan) {
|
||||
return "Plan not found: {$slug}";
|
||||
}
|
||||
|
||||
if (count($parts) === 3 && $parts[1] === 'phases') {
|
||||
$phase = $plan->agentPhases()->where('order', (int) $parts[2])->first();
|
||||
if (! $phase) {
|
||||
return "Phase not found: {$parts[2]}";
|
||||
}
|
||||
|
||||
$md = "# Phase {$phase->order}: {$phase->name}\n\n";
|
||||
$md .= "**Status:** {$phase->getStatusIcon()} {$phase->status}\n\n";
|
||||
|
||||
if ($phase->description) {
|
||||
$md .= "{$phase->description}\n\n";
|
||||
}
|
||||
|
||||
$md .= "## Tasks\n\n";
|
||||
|
||||
foreach ($phase->tasks ?? [] as $task) {
|
||||
$status = is_string($task) ? 'pending' : ($task['status'] ?? 'pending');
|
||||
$name = is_string($task) ? $task : ($task['name'] ?? 'Unknown');
|
||||
$icon = $status === 'completed' ? '✅' : '⬜';
|
||||
$md .= "- {$icon} {$name}\n";
|
||||
}
|
||||
|
||||
return $md;
|
||||
}
|
||||
|
||||
if (count($parts) === 3 && $parts[1] === 'state') {
|
||||
$state = $plan->states()->where('key', $parts[2])->first();
|
||||
if (! $state) {
|
||||
return "State key not found: {$parts[2]}";
|
||||
}
|
||||
|
||||
return $state->getFormattedValue();
|
||||
}
|
||||
|
||||
return $plan->toMarkdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Render session resources.
|
||||
*/
|
||||
protected function resourceSessionContent(string $uri): string
|
||||
{
|
||||
$path = substr($uri, 11); // Remove "sessions://"
|
||||
$parts = explode('/', $path);
|
||||
|
||||
if (count($parts) !== 2 || $parts[1] !== 'context') {
|
||||
return "Resource not found: {$uri}";
|
||||
}
|
||||
|
||||
$session = AgentSession::where('session_id', $parts[0])->first();
|
||||
if (! $session) {
|
||||
return "Session not found: {$parts[0]}";
|
||||
}
|
||||
|
||||
$md = "# Session: {$session->session_id}\n\n";
|
||||
$md .= "**Agent:** {$session->agent_type}\n";
|
||||
$md .= "**Status:** {$session->status}\n";
|
||||
$md .= "**Duration:** {$session->getDurationFormatted()}\n\n";
|
||||
|
||||
if ($session->plan) {
|
||||
$md .= "## Plan\n\n";
|
||||
$md .= "**{$session->plan->title}** ({$session->plan->slug})\n\n";
|
||||
}
|
||||
|
||||
$context = $session->getHandoffContext();
|
||||
if (! empty($context['summary'])) {
|
||||
$md .= "## Summary\n\n{$context['summary']}\n\n";
|
||||
}
|
||||
if (! empty($context['next_steps'])) {
|
||||
$md .= "## Next Steps\n\n";
|
||||
foreach ((array) $context['next_steps'] as $step) {
|
||||
$md .= "- {$step}\n";
|
||||
}
|
||||
$md .= "\n";
|
||||
}
|
||||
if (! empty($context['blockers'])) {
|
||||
$md .= "## Blockers\n\n";
|
||||
foreach ((array) $context['blockers'] as $blocker) {
|
||||
$md .= "- {$blocker}\n";
|
||||
}
|
||||
$md .= "\n";
|
||||
}
|
||||
|
||||
return $md;
|
||||
}
|
||||
|
||||
/**
|
||||
* Render content resources.
|
||||
*/
|
||||
protected function resourceContentItem(string $uri): string
|
||||
{
|
||||
if (! str_starts_with($uri, 'content://')) {
|
||||
return "Resource not found: {$uri}";
|
||||
}
|
||||
|
||||
$path = substr($uri, 10); // Remove "content://"
|
||||
$parts = explode('/', $path, 2);
|
||||
if (count($parts) < 2) {
|
||||
return "Invalid URI format. Expected: content://{workspace}/{slug}";
|
||||
}
|
||||
|
||||
[$workspaceSlug, $contentSlug] = $parts;
|
||||
|
||||
$workspace = Workspace::where('slug', $workspaceSlug)
|
||||
->orWhere('id', $workspaceSlug)
|
||||
->first();
|
||||
|
||||
if (! $workspace) {
|
||||
return "Workspace not found: {$workspaceSlug}";
|
||||
}
|
||||
|
||||
$item = ContentItem::forWorkspace($workspace->id)
|
||||
->native()
|
||||
->where('slug', $contentSlug)
|
||||
->first();
|
||||
|
||||
if (! $item && is_numeric($contentSlug)) {
|
||||
$item = ContentItem::forWorkspace($workspace->id)
|
||||
->native()
|
||||
->find($contentSlug);
|
||||
}
|
||||
|
||||
if (! $item) {
|
||||
return "Content not found: {$contentSlug}";
|
||||
}
|
||||
|
||||
$item->load(['author', 'taxonomies']);
|
||||
|
||||
$md = "---\n";
|
||||
$md .= "title: \"{$item->title}\"\n";
|
||||
$md .= "slug: {$item->slug}\n";
|
||||
$md .= "workspace: {$workspace->slug}\n";
|
||||
$md .= "type: {$item->type}\n";
|
||||
$md .= "status: {$item->status}\n";
|
||||
|
||||
if ($item->author) {
|
||||
$md .= "author: {$item->author->name}\n";
|
||||
}
|
||||
|
||||
$categories = $item->categories->pluck('name')->all();
|
||||
if (! empty($categories)) {
|
||||
$md .= 'categories: ['.implode(', ', $categories)."]\n";
|
||||
}
|
||||
|
||||
$tags = $item->tags->pluck('name')->all();
|
||||
if (! empty($tags)) {
|
||||
$md .= 'tags: ['.implode(', ', $tags)."]\n";
|
||||
}
|
||||
|
||||
if ($item->publish_at) {
|
||||
$md .= 'publish_at: '.$item->publish_at->toIso8601String()."\n";
|
||||
}
|
||||
|
||||
$md .= 'created_at: '.$item->created_at->toIso8601String()."\n";
|
||||
$md .= 'updated_at: '.$item->updated_at->toIso8601String()."\n";
|
||||
|
||||
if ($item->seo_meta) {
|
||||
if (isset($item->seo_meta['title'])) {
|
||||
$md .= "seo_title: \"{$item->seo_meta['title']}\"\n";
|
||||
}
|
||||
if (isset($item->seo_meta['description'])) {
|
||||
$md .= "seo_description: \"{$item->seo_meta['description']}\"\n";
|
||||
}
|
||||
}
|
||||
|
||||
$md .= "---\n\n";
|
||||
|
||||
if ($item->excerpt) {
|
||||
$md .= "> {$item->excerpt}\n\n";
|
||||
}
|
||||
|
||||
$content = $item->content_markdown
|
||||
?? strip_tags($item->content_html_clean ?? $item->content_html_original ?? '');
|
||||
$md .= $content;
|
||||
|
||||
return $md;
|
||||
}
|
||||
|
||||
/**
|
||||
* Render server-defined static resources when available.
|
||||
*/
|
||||
protected function resourceServerContent(string $scheme, string $uri): ?array
|
||||
{
|
||||
$server = $this->loadServerFull($scheme);
|
||||
if (! $server) {
|
||||
return null;
|
||||
}
|
||||
|
||||
foreach ($server['resources'] ?? [] as $resource) {
|
||||
if (($resource['uri'] ?? null) !== $uri) {
|
||||
continue;
|
||||
}
|
||||
|
||||
$text = $resource['content']['text'] ?? $resource['text'] ?? null;
|
||||
if ($text === null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return [
|
||||
'mimeType' => $resource['mimeType'] ?? 'text/plain',
|
||||
'text' => $text,
|
||||
];
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -197,6 +197,35 @@ class OpenApiGenerator
|
|||
],
|
||||
];
|
||||
|
||||
$paths['/servers/{serverId}/resources'] = [
|
||||
'get' => [
|
||||
'tags' => ['Discovery'],
|
||||
'summary' => 'List resources for a server',
|
||||
'operationId' => 'listServerResources',
|
||||
'security' => [['bearerAuth' => []], ['apiKeyAuth' => []]],
|
||||
'parameters' => [
|
||||
[
|
||||
'name' => 'serverId',
|
||||
'in' => 'path',
|
||||
'required' => true,
|
||||
'schema' => ['type' => 'string'],
|
||||
],
|
||||
],
|
||||
'responses' => [
|
||||
'200' => [
|
||||
'description' => 'List of resources',
|
||||
'content' => [
|
||||
'application/json' => [
|
||||
'schema' => [
|
||||
'$ref' => '#/components/schemas/ResourceList',
|
||||
],
|
||||
],
|
||||
],
|
||||
],
|
||||
],
|
||||
],
|
||||
];
|
||||
|
||||
// Execution endpoint
|
||||
$paths['/tools/call'] = [
|
||||
'post' => [
|
||||
|
|
@ -402,6 +431,17 @@ class OpenApiGenerator
|
|||
],
|
||||
],
|
||||
],
|
||||
'ResourceList' => [
|
||||
'type' => 'object',
|
||||
'properties' => [
|
||||
'server' => ['type' => 'string'],
|
||||
'resources' => [
|
||||
'type' => 'array',
|
||||
'items' => ['$ref' => '#/components/schemas/Resource'],
|
||||
],
|
||||
'count' => ['type' => 'integer'],
|
||||
],
|
||||
],
|
||||
];
|
||||
|
||||
return $schemas;
|
||||
|
|
|
|||
|
|
@ -33,6 +33,9 @@ class ToolRegistry
|
|||
'query' => 'SELECT id, name FROM users LIMIT 10',
|
||||
],
|
||||
'list_tables' => [],
|
||||
'describe_table' => [
|
||||
'table' => 'users',
|
||||
],
|
||||
'list_routes' => [],
|
||||
'list_sites' => [],
|
||||
'get_stats' => [],
|
||||
|
|
|
|||
151
src/php/src/Mcp/Tools/DescribeTable.php
Normal file
151
src/php/src/Mcp/Tools/DescribeTable.php
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
<?php
|
||||
|
||||
declare(strict_types=1);
|
||||
|
||||
namespace Core\Mcp\Tools;
|
||||
|
||||
use Illuminate\Contracts\JsonSchema\JsonSchema;
|
||||
use Illuminate\Support\Facades\Config;
|
||||
use Illuminate\Support\Facades\DB;
|
||||
use Laravel\Mcp\Request;
|
||||
use Laravel\Mcp\Response;
|
||||
use Laravel\Mcp\Server\Tool;
|
||||
|
||||
class DescribeTable extends Tool
|
||||
{
|
||||
protected string $description = 'Describe a database table, including columns and indexes';
|
||||
|
||||
public function handle(Request $request): Response
|
||||
{
|
||||
$table = trim((string) $request->input('table', ''));
|
||||
|
||||
if ($table === '') {
|
||||
return $this->errorResponse('Table name is required');
|
||||
}
|
||||
|
||||
if (! $this->isValidTableName($table)) {
|
||||
return $this->errorResponse('Invalid table name. Use only letters, numbers, and underscores.');
|
||||
}
|
||||
|
||||
if ($this->isBlockedTable($table)) {
|
||||
return $this->errorResponse(sprintf("Access to table '%s' is not permitted", $table));
|
||||
}
|
||||
|
||||
try {
|
||||
$columns = DB::select("SHOW FULL COLUMNS FROM `{$table}`");
|
||||
$indexes = DB::select("SHOW INDEX FROM `{$table}`");
|
||||
} catch (\Throwable $e) {
|
||||
report($e);
|
||||
|
||||
return $this->errorResponse(sprintf('Unable to describe table "%s"', $table));
|
||||
}
|
||||
|
||||
$result = [
|
||||
'table' => $table,
|
||||
'columns' => array_map(
|
||||
fn (object $column): array => $this->normaliseColumn((array) $column),
|
||||
$columns
|
||||
),
|
||||
'indexes' => $this->normaliseIndexes($indexes),
|
||||
];
|
||||
|
||||
return Response::text(json_encode($result, JSON_PRETTY_PRINT));
|
||||
}
|
||||
|
||||
public function schema(JsonSchema $schema): array
|
||||
{
|
||||
return [
|
||||
'table' => $schema->string('Database table name to inspect'),
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the table name before interpolating it into SQL.
|
||||
*/
|
||||
private function isValidTableName(string $table): bool
|
||||
{
|
||||
return (bool) preg_match('/^[A-Za-z0-9_]+$/', $table);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether the table is blocked by configuration or is a system table.
|
||||
*/
|
||||
private function isBlockedTable(string $table): bool
|
||||
{
|
||||
$blockedTables = Config::get('mcp.database.blocked_tables', []);
|
||||
|
||||
if (in_array($table, $blockedTables, true)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
$systemTables = ['information_schema', 'mysql', 'performance_schema', 'sys'];
|
||||
|
||||
return in_array(strtolower($table), $systemTables, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalise a SHOW FULL COLUMNS row into a predictable array shape.
|
||||
*
|
||||
* @param array<string, mixed> $column
|
||||
* @return array<string, mixed>
|
||||
*/
|
||||
private function normaliseColumn(array $column): array
|
||||
{
|
||||
return [
|
||||
'field' => $column['Field'] ?? null,
|
||||
'type' => $column['Type'] ?? null,
|
||||
'collation' => $column['Collation'] ?? null,
|
||||
'null' => $column['Null'] ?? null,
|
||||
'key' => $column['Key'] ?? null,
|
||||
'default' => $column['Default'] ?? null,
|
||||
'extra' => $column['Extra'] ?? null,
|
||||
'privileges' => $column['Privileges'] ?? null,
|
||||
'comment' => $column['Comment'] ?? null,
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Group SHOW INDEX rows by index name.
|
||||
*
|
||||
* @param array<int, object> $indexes
|
||||
* @return array<int, array<string, mixed>>
|
||||
*/
|
||||
private function normaliseIndexes(array $indexes): array
|
||||
{
|
||||
$grouped = [];
|
||||
|
||||
foreach ($indexes as $index) {
|
||||
$row = (array) $index;
|
||||
$name = (string) ($row['Key_name'] ?? 'unknown');
|
||||
|
||||
if (! isset($grouped[$name])) {
|
||||
$grouped[$name] = [
|
||||
'name' => $name,
|
||||
'unique' => ! (bool) ($row['Non_unique'] ?? 1),
|
||||
'type' => $row['Index_type'] ?? null,
|
||||
'columns' => [],
|
||||
];
|
||||
}
|
||||
|
||||
$grouped[$name]['columns'][] = [
|
||||
'name' => $row['Column_name'] ?? null,
|
||||
'order' => $row['Seq_in_index'] ?? null,
|
||||
'collation' => $row['Collation'] ?? null,
|
||||
'cardinality' => $row['Cardinality'] ?? null,
|
||||
'sub_part' => $row['Sub_part'] ?? null,
|
||||
'nullable' => $row['Null'] ?? null,
|
||||
'comment' => $row['Comment'] ?? null,
|
||||
];
|
||||
}
|
||||
|
||||
return array_values($grouped);
|
||||
}
|
||||
|
||||
private function errorResponse(string $message): Response
|
||||
{
|
||||
return Response::text(json_encode([
|
||||
'error' => $message,
|
||||
'code' => 'VALIDATION_ERROR',
|
||||
]));
|
||||
}
|
||||
}
|
||||
113
src/php/tests/Unit/DescribeTableTest.php
Normal file
113
src/php/tests/Unit/DescribeTableTest.php
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
<?php
|
||||
|
||||
declare(strict_types=1);
|
||||
|
||||
namespace Core\Mcp\Tests\Unit;
|
||||
|
||||
use Core\Mcp\Tools\DescribeTable;
|
||||
use Illuminate\Support\Facades\Config;
|
||||
use Illuminate\Support\Facades\DB;
|
||||
use Laravel\Mcp\Request;
|
||||
use Mockery;
|
||||
use Tests\TestCase;
|
||||
|
||||
class DescribeTableTest extends TestCase
|
||||
{
|
||||
protected function tearDown(): void
|
||||
{
|
||||
Mockery::close();
|
||||
parent::tearDown();
|
||||
}
|
||||
|
||||
public function test_handle_returns_columns_and_indexes_for_a_table(): void
|
||||
{
|
||||
DB::shouldReceive('select')
|
||||
->once()
|
||||
->with('SHOW FULL COLUMNS FROM `users`')
|
||||
->andReturn([
|
||||
(object) [
|
||||
'Field' => 'id',
|
||||
'Type' => 'bigint unsigned',
|
||||
'Null' => 'NO',
|
||||
'Key' => 'PRI',
|
||||
'Default' => null,
|
||||
'Extra' => 'auto_increment',
|
||||
'Privileges' => 'select,insert,update,references',
|
||||
'Comment' => 'Primary key',
|
||||
],
|
||||
(object) [
|
||||
'Field' => 'email',
|
||||
'Type' => 'varchar(255)',
|
||||
'Null' => 'NO',
|
||||
'Key' => 'UNI',
|
||||
'Default' => null,
|
||||
'Extra' => '',
|
||||
'Privileges' => 'select,insert,update,references',
|
||||
'Comment' => '',
|
||||
],
|
||||
]);
|
||||
|
||||
DB::shouldReceive('select')
|
||||
->once()
|
||||
->with('SHOW INDEX FROM `users`')
|
||||
->andReturn([
|
||||
(object) [
|
||||
'Key_name' => 'PRIMARY',
|
||||
'Non_unique' => 0,
|
||||
'Index_type' => 'BTREE',
|
||||
'Column_name' => 'id',
|
||||
'Seq_in_index' => 1,
|
||||
'Collation' => 'A',
|
||||
'Cardinality' => 1,
|
||||
'Sub_part' => null,
|
||||
'Null' => '',
|
||||
'Comment' => '',
|
||||
],
|
||||
(object) [
|
||||
'Key_name' => 'users_email_unique',
|
||||
'Non_unique' => 0,
|
||||
'Index_type' => 'BTREE',
|
||||
'Column_name' => 'email',
|
||||
'Seq_in_index' => 1,
|
||||
'Collation' => 'A',
|
||||
'Cardinality' => 1,
|
||||
'Sub_part' => null,
|
||||
'Null' => '',
|
||||
'Comment' => '',
|
||||
],
|
||||
]);
|
||||
|
||||
$tool = new DescribeTable();
|
||||
$response = $tool->handle(new Request(['table' => 'users']));
|
||||
$data = json_decode($response->getContent(), true, flags: JSON_THROW_ON_ERROR);
|
||||
|
||||
$this->assertSame('users', $data['table']);
|
||||
$this->assertCount(2, $data['columns']);
|
||||
$this->assertSame('id', $data['columns'][0]['field']);
|
||||
$this->assertSame('bigint unsigned', $data['columns'][0]['type']);
|
||||
$this->assertSame('PRIMARY', $data['indexes'][0]['name']);
|
||||
$this->assertSame(['id'], array_column($data['indexes'][0]['columns'], 'name'));
|
||||
}
|
||||
|
||||
public function test_handle_rejects_invalid_table_names(): void
|
||||
{
|
||||
$tool = new DescribeTable();
|
||||
$response = $tool->handle(new Request(['table' => 'users; DROP TABLE users']));
|
||||
$data = json_decode($response->getContent(), true, flags: JSON_THROW_ON_ERROR);
|
||||
|
||||
$this->assertSame('VALIDATION_ERROR', $data['code']);
|
||||
$this->assertStringContainsString('Invalid table name', $data['error']);
|
||||
}
|
||||
|
||||
public function test_handle_blocks_system_tables(): void
|
||||
{
|
||||
Config::set('mcp.database.blocked_tables', []);
|
||||
|
||||
$tool = new DescribeTable();
|
||||
$response = $tool->handle(new Request(['table' => 'information_schema']));
|
||||
$data = json_decode($response->getContent(), true, flags: JSON_THROW_ON_ERROR);
|
||||
|
||||
$this->assertSame('VALIDATION_ERROR', $data['code']);
|
||||
$this->assertStringContainsString('not permitted', $data['error']);
|
||||
}
|
||||
}
|
||||
67
src/php/tests/Unit/McpResourceListTest.php
Normal file
67
src/php/tests/Unit/McpResourceListTest.php
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
<?php
|
||||
|
||||
declare(strict_types=1);
|
||||
|
||||
namespace Core\Mcp\Tests\Unit;
|
||||
|
||||
use Core\Mcp\Controllers\McpApiController;
|
||||
use Core\Mcp\Services\OpenApiGenerator;
|
||||
use Illuminate\Http\JsonResponse;
|
||||
use Illuminate\Http\Request;
|
||||
use Tests\TestCase;
|
||||
|
||||
class McpResourceListTest extends TestCase
|
||||
{
|
||||
public function test_resources_endpoint_returns_server_resources(): void
|
||||
{
|
||||
$controller = new class extends McpApiController {
|
||||
protected function loadServerFull(string $id): ?array
|
||||
{
|
||||
if ($id !== 'demo-server') {
|
||||
return null;
|
||||
}
|
||||
|
||||
return [
|
||||
'id' => 'demo-server',
|
||||
'resources' => [
|
||||
[
|
||||
'uri' => 'content://workspace/article',
|
||||
'name' => 'Article',
|
||||
'description' => 'Published article',
|
||||
'mimeType' => 'text/markdown',
|
||||
],
|
||||
[
|
||||
'uri' => 'plans://all',
|
||||
'name' => 'Plans',
|
||||
'description' => 'Work plan index',
|
||||
'mimeType' => 'text/markdown',
|
||||
],
|
||||
],
|
||||
];
|
||||
}
|
||||
};
|
||||
|
||||
$response = $controller->resources(Request::create('/api/v1/mcp/servers/demo-server/resources', 'GET'), 'demo-server');
|
||||
|
||||
$this->assertInstanceOf(JsonResponse::class, $response);
|
||||
$this->assertSame(200, $response->getStatusCode());
|
||||
|
||||
$data = $response->getData(true);
|
||||
$this->assertSame('demo-server', $data['server']);
|
||||
$this->assertSame(2, $data['count']);
|
||||
$this->assertSame('content://workspace/article', $data['resources'][0]['uri']);
|
||||
$this->assertSame('plans://all', $data['resources'][1]['uri']);
|
||||
}
|
||||
|
||||
public function test_openapi_includes_resource_list_endpoint(): void
|
||||
{
|
||||
$schema = (new OpenApiGenerator)->generate();
|
||||
|
||||
$this->assertArrayHasKey('/servers/{serverId}/resources', $schema['paths']);
|
||||
$this->assertArrayHasKey('ResourceList', $schema['components']['schemas']);
|
||||
$this->assertSame(
|
||||
'#/components/schemas/ResourceList',
|
||||
$schema['paths']['/servers/{serverId}/resources']['get']['responses']['200']['content']['application/json']['schema']['$ref']
|
||||
);
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue