Compare commits
1 commit
dev
...
ax/review-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
899a532d76 |
80 changed files with 1491 additions and 7844 deletions
|
|
@ -16,17 +16,15 @@ package main
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
|
||||||
goio "io"
|
goio "io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath" // needed for WalkDir (no core equivalent)
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
)
|
)
|
||||||
|
|
@ -57,53 +55,47 @@ var httpClient = &http.Client{
|
||||||
func main() {
|
func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
fmt.Println("OpenBrain Seed — MCP API Client")
|
os.Stdout.Write([]byte("OpenBrain Seed — MCP API Client\n"))
|
||||||
fmt.Println(strings.Repeat("=", 55))
|
os.Stdout.Write([]byte(core.Sprintf("%s\n", repeatChar('=', 55))))
|
||||||
|
|
||||||
if *apiKey == "" && !*dryRun {
|
if *apiKey == "" && !*dryRun {
|
||||||
fmt.Println("ERROR: -api-key is required (or use -dry-run)")
|
os.Stdout.Write([]byte("ERROR: -api-key is required (or use -dry-run)\n"))
|
||||||
fmt.Println(" Generate one at: https://lthn.sh/admin/mcp/api-keys")
|
os.Stdout.Write([]byte(" Generate one at: https://lthn.sh/admin/mcp/api-keys\n"))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *dryRun {
|
if *dryRun {
|
||||||
fmt.Println("[DRY RUN] — no data will be stored")
|
os.Stdout.Write([]byte("[DRY RUN] — no data will be stored\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("API: %s\n", *apiURL)
|
os.Stdout.Write([]byte(core.Sprintf("API: %s\n", *apiURL)))
|
||||||
fmt.Printf("Server: %s | Agent: %s\n", *server, *agent)
|
os.Stdout.Write([]byte(core.Sprintf("Server: %s | Agent: %s\n", *server, *agent)))
|
||||||
|
|
||||||
// Discover memory files
|
// Discover memory files
|
||||||
memPath := *memoryPath
|
memPath := *memoryPath
|
||||||
if memPath == "" {
|
if memPath == "" {
|
||||||
home, _ := os.UserHomeDir()
|
memPath = core.JoinPath(core.Env("HOME"), ".claude", "projects", "*", "memory")
|
||||||
memPath = filepath.Join(home, ".claude", "projects", "*", "memory")
|
|
||||||
}
|
}
|
||||||
memFiles, _ := filepath.Glob(filepath.Join(memPath, "*.md"))
|
memFiles := core.PathGlob(core.JoinPath(memPath, "*.md"))
|
||||||
fmt.Printf("\nFound %d memory files\n", len(memFiles))
|
os.Stdout.Write([]byte(core.Sprintf("\nFound %d memory files\n", len(memFiles))))
|
||||||
|
|
||||||
// Discover plan files
|
// Discover plan files
|
||||||
var planFiles []string
|
var planFiles []string
|
||||||
if *plans {
|
if *plans {
|
||||||
pPath := *planPath
|
pPath := *planPath
|
||||||
if pPath == "" {
|
if pPath == "" {
|
||||||
home, _ := os.UserHomeDir()
|
pPath = core.JoinPath(core.Env("HOME"), "Code", "*", "docs", "plans")
|
||||||
pPath = filepath.Join(home, "Code", "*", "docs", "plans")
|
|
||||||
}
|
}
|
||||||
planFiles, _ = filepath.Glob(filepath.Join(pPath, "*.md"))
|
planFiles = append(planFiles, core.PathGlob(core.JoinPath(pPath, "*.md"))...)
|
||||||
// Also check nested dirs (completed/, etc.)
|
// Also check nested dirs (completed/, etc.)
|
||||||
nested, _ := filepath.Glob(filepath.Join(pPath, "*", "*.md"))
|
planFiles = append(planFiles, core.PathGlob(core.JoinPath(pPath, "*", "*.md"))...)
|
||||||
planFiles = append(planFiles, nested...)
|
|
||||||
|
|
||||||
// Also check host-uk nested repos
|
// Also check host-uk nested repos
|
||||||
home, _ := os.UserHomeDir()
|
hostUkPath := core.JoinPath(core.Env("HOME"), "Code", "host-uk", "*", "docs", "plans")
|
||||||
hostUkPath := filepath.Join(home, "Code", "host-uk", "*", "docs", "plans")
|
planFiles = append(planFiles, core.PathGlob(core.JoinPath(hostUkPath, "*.md"))...)
|
||||||
hostUkFiles, _ := filepath.Glob(filepath.Join(hostUkPath, "*.md"))
|
planFiles = append(planFiles, core.PathGlob(core.JoinPath(hostUkPath, "*", "*.md"))...)
|
||||||
planFiles = append(planFiles, hostUkFiles...)
|
|
||||||
hostUkNested, _ := filepath.Glob(filepath.Join(hostUkPath, "*", "*.md"))
|
|
||||||
planFiles = append(planFiles, hostUkNested...)
|
|
||||||
|
|
||||||
fmt.Printf("Found %d plan files\n", len(planFiles))
|
os.Stdout.Write([]byte(core.Sprintf("Found %d plan files\n", len(planFiles))))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discover CLAUDE.md files
|
// Discover CLAUDE.md files
|
||||||
|
|
@ -111,11 +103,10 @@ func main() {
|
||||||
if *claudeMd {
|
if *claudeMd {
|
||||||
cPath := *codePath
|
cPath := *codePath
|
||||||
if cPath == "" {
|
if cPath == "" {
|
||||||
home, _ := os.UserHomeDir()
|
cPath = core.JoinPath(core.Env("HOME"), "Code")
|
||||||
cPath = filepath.Join(home, "Code")
|
|
||||||
}
|
}
|
||||||
claudeFiles = discoverClaudeMdFiles(cPath)
|
claudeFiles = discoverClaudeMdFiles(cPath)
|
||||||
fmt.Printf("Found %d CLAUDE.md files\n", len(claudeFiles))
|
os.Stdout.Write([]byte(core.Sprintf("Found %d CLAUDE.md files\n", len(claudeFiles))))
|
||||||
}
|
}
|
||||||
|
|
||||||
imported := 0
|
imported := 0
|
||||||
|
|
@ -123,11 +114,11 @@ func main() {
|
||||||
errors := 0
|
errors := 0
|
||||||
|
|
||||||
// Process memory files
|
// Process memory files
|
||||||
fmt.Println("\n--- Memory Files ---")
|
os.Stdout.Write([]byte("\n--- Memory Files ---\n"))
|
||||||
for _, f := range memFiles {
|
for _, f := range memFiles {
|
||||||
project := extractProject(f)
|
project := extractProject(f)
|
||||||
sections := parseMarkdownSections(f)
|
sections := parseMarkdownSections(f)
|
||||||
filename := strings.TrimSuffix(filepath.Base(f), ".md")
|
filename := core.TrimSuffix(core.PathBase(f), ".md")
|
||||||
|
|
||||||
if len(sections) == 0 {
|
if len(sections) == 0 {
|
||||||
coreerr.Warn("brain-seed: skip file (no sections)", "project", project, "file", filename)
|
coreerr.Warn("brain-seed: skip file (no sections)", "project", project, "file", filename)
|
||||||
|
|
@ -137,7 +128,7 @@ func main() {
|
||||||
|
|
||||||
for _, sec := range sections {
|
for _, sec := range sections {
|
||||||
content := sec.heading + "\n\n" + sec.content
|
content := sec.heading + "\n\n" + sec.content
|
||||||
if strings.TrimSpace(sec.content) == "" {
|
if core.Trim(sec.content) == "" {
|
||||||
skipped++
|
skipped++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -150,8 +141,8 @@ func main() {
|
||||||
content = truncate(content, *maxChars)
|
content = truncate(content, *maxChars)
|
||||||
|
|
||||||
if *dryRun {
|
if *dryRun {
|
||||||
fmt.Printf(" [DRY] %s/%s :: %s (%s) — %d chars\n",
|
os.Stdout.Write([]byte(core.Sprintf(" [DRY] %s/%s :: %s (%s) — %d chars\n",
|
||||||
project, filename, sec.heading, memType, len(content))
|
project, filename, sec.heading, memType, len(content))))
|
||||||
imported++
|
imported++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -161,18 +152,18 @@ func main() {
|
||||||
errors++
|
errors++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Printf(" ok %s/%s :: %s (%s)\n", project, filename, sec.heading, memType)
|
os.Stdout.Write([]byte(core.Sprintf(" ok %s/%s :: %s (%s)\n", project, filename, sec.heading, memType)))
|
||||||
imported++
|
imported++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process plan files
|
// Process plan files
|
||||||
if *plans && len(planFiles) > 0 {
|
if *plans && len(planFiles) > 0 {
|
||||||
fmt.Println("\n--- Plan Documents ---")
|
os.Stdout.Write([]byte("\n--- Plan Documents ---\n"))
|
||||||
for _, f := range planFiles {
|
for _, f := range planFiles {
|
||||||
project := extractProjectFromPlan(f)
|
project := extractProjectFromPlan(f)
|
||||||
sections := parseMarkdownSections(f)
|
sections := parseMarkdownSections(f)
|
||||||
filename := strings.TrimSuffix(filepath.Base(f), ".md")
|
filename := core.TrimSuffix(core.PathBase(f), ".md")
|
||||||
|
|
||||||
if len(sections) == 0 {
|
if len(sections) == 0 {
|
||||||
skipped++
|
skipped++
|
||||||
|
|
@ -181,7 +172,7 @@ func main() {
|
||||||
|
|
||||||
for _, sec := range sections {
|
for _, sec := range sections {
|
||||||
content := sec.heading + "\n\n" + sec.content
|
content := sec.heading + "\n\n" + sec.content
|
||||||
if strings.TrimSpace(sec.content) == "" {
|
if core.Trim(sec.content) == "" {
|
||||||
skipped++
|
skipped++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -190,8 +181,8 @@ func main() {
|
||||||
content = truncate(content, *maxChars)
|
content = truncate(content, *maxChars)
|
||||||
|
|
||||||
if *dryRun {
|
if *dryRun {
|
||||||
fmt.Printf(" [DRY] %s :: %s / %s (plan) — %d chars\n",
|
os.Stdout.Write([]byte(core.Sprintf(" [DRY] %s :: %s / %s (plan) — %d chars\n",
|
||||||
project, filename, sec.heading, len(content))
|
project, filename, sec.heading, len(content))))
|
||||||
imported++
|
imported++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -201,7 +192,7 @@ func main() {
|
||||||
errors++
|
errors++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Printf(" ok %s :: %s / %s (plan)\n", project, filename, sec.heading)
|
os.Stdout.Write([]byte(core.Sprintf(" ok %s :: %s / %s (plan)\n", project, filename, sec.heading)))
|
||||||
imported++
|
imported++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -209,7 +200,7 @@ func main() {
|
||||||
|
|
||||||
// Process CLAUDE.md files
|
// Process CLAUDE.md files
|
||||||
if *claudeMd && len(claudeFiles) > 0 {
|
if *claudeMd && len(claudeFiles) > 0 {
|
||||||
fmt.Println("\n--- CLAUDE.md Files ---")
|
os.Stdout.Write([]byte("\n--- CLAUDE.md Files ---\n"))
|
||||||
for _, f := range claudeFiles {
|
for _, f := range claudeFiles {
|
||||||
project := extractProjectFromClaudeMd(f)
|
project := extractProjectFromClaudeMd(f)
|
||||||
sections := parseMarkdownSections(f)
|
sections := parseMarkdownSections(f)
|
||||||
|
|
@ -221,7 +212,7 @@ func main() {
|
||||||
|
|
||||||
for _, sec := range sections {
|
for _, sec := range sections {
|
||||||
content := sec.heading + "\n\n" + sec.content
|
content := sec.heading + "\n\n" + sec.content
|
||||||
if strings.TrimSpace(sec.content) == "" {
|
if core.Trim(sec.content) == "" {
|
||||||
skipped++
|
skipped++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -230,8 +221,8 @@ func main() {
|
||||||
content = truncate(content, *maxChars)
|
content = truncate(content, *maxChars)
|
||||||
|
|
||||||
if *dryRun {
|
if *dryRun {
|
||||||
fmt.Printf(" [DRY] %s :: CLAUDE.md / %s (convention) — %d chars\n",
|
os.Stdout.Write([]byte(core.Sprintf(" [DRY] %s :: CLAUDE.md / %s (convention) — %d chars\n",
|
||||||
project, sec.heading, len(content))
|
project, sec.heading, len(content))))
|
||||||
imported++
|
imported++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -241,18 +232,18 @@ func main() {
|
||||||
errors++
|
errors++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Printf(" ok %s :: CLAUDE.md / %s (convention)\n", project, sec.heading)
|
os.Stdout.Write([]byte(core.Sprintf(" ok %s :: CLAUDE.md / %s (convention)\n", project, sec.heading)))
|
||||||
imported++
|
imported++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n%s\n", strings.Repeat("=", 55))
|
os.Stdout.Write([]byte(core.Sprintf("\n%s\n", repeatChar('=', 55))))
|
||||||
prefix := ""
|
prefix := ""
|
||||||
if *dryRun {
|
if *dryRun {
|
||||||
prefix = "[DRY RUN] "
|
prefix = "[DRY RUN] "
|
||||||
}
|
}
|
||||||
fmt.Printf("%sImported: %d | Skipped: %d | Errors: %d\n", prefix, imported, skipped, errors)
|
os.Stdout.Write([]byte(core.Sprintf("%sImported: %d | Skipped: %d | Errors: %d\n", prefix, imported, skipped, errors)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// callBrainRemember sends a memory to the MCP API via brain_remember tool.
|
// callBrainRemember sends a memory to the MCP API via brain_remember tool.
|
||||||
|
|
@ -273,10 +264,7 @@ func callBrainRemember(content, memType string, tags []string, project string, c
|
||||||
"arguments": args,
|
"arguments": args,
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := json.Marshal(payload)
|
body := []byte(core.JSONMarshalString(payload))
|
||||||
if err != nil {
|
|
||||||
return coreerr.E("callBrainRemember", "marshal", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", *apiURL+"/tools/call", bytes.NewReader(body))
|
req, err := http.NewRequest("POST", *apiURL+"/tools/call", bytes.NewReader(body))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -301,8 +289,9 @@ func callBrainRemember(content, memType string, tags []string, project string, c
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Error string `json:"error"`
|
Error string `json:"error"`
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(respBody, &result); err != nil {
|
r := core.JSONUnmarshalString(string(respBody), &result)
|
||||||
return coreerr.E("callBrainRemember", "decode", err)
|
if !r.OK {
|
||||||
|
return coreerr.E("callBrainRemember", "decode", nil)
|
||||||
}
|
}
|
||||||
if !result.Success {
|
if !result.Success {
|
||||||
return coreerr.E("callBrainRemember", "API: "+result.Error, nil)
|
return coreerr.E("callBrainRemember", "API: "+result.Error, nil)
|
||||||
|
|
@ -312,19 +301,26 @@ func callBrainRemember(content, memType string, tags []string, project string, c
|
||||||
}
|
}
|
||||||
|
|
||||||
// truncate caps content to maxLen chars, appending an ellipsis if truncated.
|
// truncate caps content to maxLen chars, appending an ellipsis if truncated.
|
||||||
|
//
|
||||||
|
// truncate("hello world", 5) // "hello…"
|
||||||
func truncate(s string, maxLen int) string {
|
func truncate(s string, maxLen int) string {
|
||||||
if len(s) <= maxLen {
|
if len(s) <= maxLen {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
// Find last space before limit to avoid splitting mid-word
|
// Find last space before limit to avoid splitting mid-word
|
||||||
cut := maxLen
|
cut := maxLen
|
||||||
if idx := strings.LastIndex(s[:maxLen], " "); idx > maxLen-200 {
|
for i := maxLen - 1; i > maxLen-200 && i >= 0; i-- {
|
||||||
cut = idx
|
if s[i] == ' ' {
|
||||||
|
cut = i
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return s[:cut] + "…"
|
return s[:cut] + "…"
|
||||||
}
|
}
|
||||||
|
|
||||||
// discoverClaudeMdFiles finds CLAUDE.md files across a code directory.
|
// discoverClaudeMdFiles finds CLAUDE.md files across a code directory.
|
||||||
|
//
|
||||||
|
// files := discoverClaudeMdFiles(core.JoinPath(core.Env("HOME"), "Code"))
|
||||||
func discoverClaudeMdFiles(codePath string) []string {
|
func discoverClaudeMdFiles(codePath string) []string {
|
||||||
var files []string
|
var files []string
|
||||||
|
|
||||||
|
|
@ -338,9 +334,15 @@ func discoverClaudeMdFiles(codePath string) []string {
|
||||||
if name == "node_modules" || name == "vendor" || name == ".claude" {
|
if name == "node_modules" || name == "vendor" || name == ".claude" {
|
||||||
return filepath.SkipDir
|
return filepath.SkipDir
|
||||||
}
|
}
|
||||||
// Limit depth
|
// Limit depth by counting separators manually
|
||||||
rel, _ := filepath.Rel(codePath, path)
|
rel, _ := filepath.Rel(codePath, path)
|
||||||
if strings.Count(rel, string(os.PathSeparator)) > 3 {
|
depth := 0
|
||||||
|
for _, ch := range rel {
|
||||||
|
if ch == os.PathSeparator {
|
||||||
|
depth++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if depth > 3 {
|
||||||
return filepath.SkipDir
|
return filepath.SkipDir
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -363,6 +365,8 @@ type section struct {
|
||||||
var headingRe = regexp.MustCompile(`^#{1,3}\s+(.+)$`)
|
var headingRe = regexp.MustCompile(`^#{1,3}\s+(.+)$`)
|
||||||
|
|
||||||
// parseMarkdownSections splits a markdown file by headings.
|
// parseMarkdownSections splits a markdown file by headings.
|
||||||
|
//
|
||||||
|
// sections := parseMarkdownSections("/path/to/MEMORY.md")
|
||||||
func parseMarkdownSections(path string) []section {
|
func parseMarkdownSections(path string) []section {
|
||||||
data, err := coreio.Local.Read(path)
|
data, err := coreio.Local.Read(path)
|
||||||
if err != nil || len(data) == 0 {
|
if err != nil || len(data) == 0 {
|
||||||
|
|
@ -370,19 +374,19 @@ func parseMarkdownSections(path string) []section {
|
||||||
}
|
}
|
||||||
|
|
||||||
var sections []section
|
var sections []section
|
||||||
lines := strings.Split(data, "\n")
|
lines := core.Split(data, "\n")
|
||||||
var curHeading string
|
var curHeading string
|
||||||
var curContent []string
|
var curContent []string
|
||||||
|
|
||||||
for _, line := range lines {
|
for _, line := range lines {
|
||||||
if m := headingRe.FindStringSubmatch(line); m != nil {
|
if m := headingRe.FindStringSubmatch(line); m != nil {
|
||||||
if curHeading != "" && len(curContent) > 0 {
|
if curHeading != "" && len(curContent) > 0 {
|
||||||
text := strings.TrimSpace(strings.Join(curContent, "\n"))
|
text := core.Trim(joinLines(curContent))
|
||||||
if text != "" {
|
if text != "" {
|
||||||
sections = append(sections, section{curHeading, text})
|
sections = append(sections, section{curHeading, text})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
curHeading = strings.TrimSpace(m[1])
|
curHeading = core.Trim(m[1])
|
||||||
curContent = nil
|
curContent = nil
|
||||||
} else {
|
} else {
|
||||||
curContent = append(curContent, line)
|
curContent = append(curContent, line)
|
||||||
|
|
@ -391,17 +395,17 @@ func parseMarkdownSections(path string) []section {
|
||||||
|
|
||||||
// Flush last section
|
// Flush last section
|
||||||
if curHeading != "" && len(curContent) > 0 {
|
if curHeading != "" && len(curContent) > 0 {
|
||||||
text := strings.TrimSpace(strings.Join(curContent, "\n"))
|
text := core.Trim(joinLines(curContent))
|
||||||
if text != "" {
|
if text != "" {
|
||||||
sections = append(sections, section{curHeading, text})
|
sections = append(sections, section{curHeading, text})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no headings found, treat entire file as one section
|
// If no headings found, treat entire file as one section
|
||||||
if len(sections) == 0 && strings.TrimSpace(data) != "" {
|
if len(sections) == 0 && core.Trim(data) != "" {
|
||||||
sections = append(sections, section{
|
sections = append(sections, section{
|
||||||
heading: strings.TrimSuffix(filepath.Base(path), ".md"),
|
heading: core.TrimSuffix(core.PathBase(path), ".md"),
|
||||||
content: strings.TrimSpace(data),
|
content: core.Trim(data),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -459,7 +463,7 @@ func inferType(heading, content, source string) string {
|
||||||
return "convention"
|
return "convention"
|
||||||
}
|
}
|
||||||
|
|
||||||
lower := strings.ToLower(heading + " " + content)
|
lower := toLower(heading + " " + content)
|
||||||
patterns := map[string][]string{
|
patterns := map[string][]string{
|
||||||
"architecture": {"architecture", "stack", "infrastructure", "layer", "service mesh"},
|
"architecture": {"architecture", "stack", "infrastructure", "layer", "service mesh"},
|
||||||
"convention": {"convention", "standard", "naming", "pattern", "rule", "coding"},
|
"convention": {"convention", "standard", "naming", "pattern", "rule", "coding"},
|
||||||
|
|
@ -468,10 +472,10 @@ func inferType(heading, content, source string) string {
|
||||||
"plan": {"plan", "todo", "roadmap", "milestone", "phase", "task"},
|
"plan": {"plan", "todo", "roadmap", "milestone", "phase", "task"},
|
||||||
"research": {"research", "finding", "discovery", "analysis", "rfc"},
|
"research": {"research", "finding", "discovery", "analysis", "rfc"},
|
||||||
}
|
}
|
||||||
for t, keywords := range patterns {
|
for memoryType, keywords := range patterns {
|
||||||
for _, kw := range keywords {
|
for _, kw := range keywords {
|
||||||
if strings.Contains(lower, kw) {
|
if core.Contains(lower, kw) {
|
||||||
return t
|
return memoryType
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -485,7 +489,7 @@ func buildTags(filename, source, project string) []string {
|
||||||
tags = append(tags, "project:"+project)
|
tags = append(tags, "project:"+project)
|
||||||
}
|
}
|
||||||
if filename != "MEMORY" && filename != "CLAUDE" {
|
if filename != "MEMORY" && filename != "CLAUDE" {
|
||||||
tags = append(tags, strings.ReplaceAll(strings.ReplaceAll(filename, "-", " "), "_", " "))
|
tags = append(tags, core.Replace(core.Replace(filename, "-", " "), "_", " "))
|
||||||
}
|
}
|
||||||
return tags
|
return tags
|
||||||
}
|
}
|
||||||
|
|
@ -503,3 +507,43 @@ func confidenceForSource(source string) float64 {
|
||||||
return 0.5
|
return 0.5
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// repeatChar returns a string of n repetitions of ch.
|
||||||
|
//
|
||||||
|
// repeatChar('=', 3) // "==="
|
||||||
|
func repeatChar(ch byte, n int) string {
|
||||||
|
b := core.NewBuilder()
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
b.WriteByte(ch)
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// joinLines joins a slice of lines with newline separators.
|
||||||
|
//
|
||||||
|
// joinLines([]string{"a", "b"}) // "a\nb"
|
||||||
|
func joinLines(lines []string) string {
|
||||||
|
b := core.NewBuilder()
|
||||||
|
for i, line := range lines {
|
||||||
|
if i > 0 {
|
||||||
|
b.WriteByte('\n')
|
||||||
|
}
|
||||||
|
b.WriteString(line)
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// toLower converts a string to lowercase ASCII.
|
||||||
|
//
|
||||||
|
// toLower("Hello World") // "hello world"
|
||||||
|
func toLower(s string) string {
|
||||||
|
b := core.NewBuilder()
|
||||||
|
for _, ch := range s {
|
||||||
|
if ch >= 'A' && ch <= 'Z' {
|
||||||
|
b.WriteRune(ch + 32)
|
||||||
|
} else {
|
||||||
|
b.WriteRune(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -10,22 +10,13 @@ import (
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"forge.lthn.ai/core/cli/pkg/cli"
|
||||||
"dappco.re/go/mcp/pkg/mcp"
|
"dappco.re/go/mcp/pkg/mcp"
|
||||||
"dappco.re/go/mcp/pkg/mcp/agentic"
|
"dappco.re/go/mcp/pkg/mcp/agentic"
|
||||||
"dappco.re/go/mcp/pkg/mcp/brain"
|
"dappco.re/go/mcp/pkg/mcp/brain"
|
||||||
"forge.lthn.ai/core/cli/pkg/cli"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var workspaceFlag string
|
var workspaceFlag string
|
||||||
var unrestrictedFlag bool
|
|
||||||
|
|
||||||
var newMCPService = mcp.New
|
|
||||||
var runMCPService = func(svc *mcp.Service, ctx context.Context) error {
|
|
||||||
return svc.Run(ctx)
|
|
||||||
}
|
|
||||||
var shutdownMCPService = func(svc *mcp.Service, ctx context.Context) error {
|
|
||||||
return svc.Shutdown(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var mcpCmd = &cli.Command{
|
var mcpCmd = &cli.Command{
|
||||||
Use: "mcp",
|
Use: "mcp",
|
||||||
|
|
@ -36,19 +27,13 @@ var mcpCmd = &cli.Command{
|
||||||
var serveCmd = &cli.Command{
|
var serveCmd = &cli.Command{
|
||||||
Use: "serve",
|
Use: "serve",
|
||||||
Short: "Start the MCP server",
|
Short: "Start the MCP server",
|
||||||
Long: `Start the MCP server on stdio (default), TCP, Unix socket, or HTTP.
|
Long: `Start the MCP server on stdio (default) or TCP.
|
||||||
|
|
||||||
The server provides file operations plus the brain and agentic subsystems
|
The server provides file operations, RAG tools, and metrics tools for AI assistants.
|
||||||
registered by this command.
|
|
||||||
|
|
||||||
Environment variables:
|
Environment variables:
|
||||||
MCP_ADDR TCP address to listen on (e.g., "localhost:9999")
|
MCP_ADDR TCP address to listen on (e.g., "localhost:9999")
|
||||||
MCP_UNIX_SOCKET
|
If not set, uses stdio transport.
|
||||||
Unix socket path to listen on (e.g., "/tmp/core-mcp.sock")
|
|
||||||
Selected after MCP_ADDR and before stdio.
|
|
||||||
MCP_HTTP_ADDR
|
|
||||||
HTTP address to listen on (e.g., "127.0.0.1:9101")
|
|
||||||
Selected before MCP_ADDR and stdio.
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
# Start with stdio transport (for Claude Code integration)
|
# Start with stdio transport (for Claude Code integration)
|
||||||
|
|
@ -57,9 +42,6 @@ Examples:
|
||||||
# Start with workspace restriction
|
# Start with workspace restriction
|
||||||
core mcp serve --workspace /path/to/project
|
core mcp serve --workspace /path/to/project
|
||||||
|
|
||||||
# Start unrestricted (explicit opt-in)
|
|
||||||
core mcp serve --unrestricted
|
|
||||||
|
|
||||||
# Start TCP server
|
# Start TCP server
|
||||||
MCP_ADDR=localhost:9999 core mcp serve`,
|
MCP_ADDR=localhost:9999 core mcp serve`,
|
||||||
RunE: func(cmd *cli.Command, args []string) error {
|
RunE: func(cmd *cli.Command, args []string) error {
|
||||||
|
|
@ -68,8 +50,7 @@ Examples:
|
||||||
}
|
}
|
||||||
|
|
||||||
func initFlags() {
|
func initFlags() {
|
||||||
cli.StringFlag(serveCmd, &workspaceFlag, "workspace", "w", "", "Restrict file operations to this directory")
|
cli.StringFlag(serveCmd, &workspaceFlag, "workspace", "w", "", "Restrict file operations to this directory (empty = unrestricted)")
|
||||||
cli.BoolFlag(serveCmd, &unrestrictedFlag, "unrestricted", "", false, "Disable filesystem sandboxing entirely")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddMCPCommands registers the 'mcp' command and all subcommands.
|
// AddMCPCommands registers the 'mcp' command and all subcommands.
|
||||||
|
|
@ -82,10 +63,11 @@ func AddMCPCommands(root *cli.Command) {
|
||||||
func runServe() error {
|
func runServe() error {
|
||||||
opts := mcp.Options{}
|
opts := mcp.Options{}
|
||||||
|
|
||||||
if unrestrictedFlag {
|
if workspaceFlag != "" {
|
||||||
opts.Unrestricted = true
|
|
||||||
} else if workspaceFlag != "" {
|
|
||||||
opts.WorkspaceRoot = workspaceFlag
|
opts.WorkspaceRoot = workspaceFlag
|
||||||
|
} else {
|
||||||
|
// Explicitly unrestricted when no workspace specified
|
||||||
|
opts.Unrestricted = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register OpenBrain and agentic subsystems
|
// Register OpenBrain and agentic subsystems
|
||||||
|
|
@ -95,13 +77,10 @@ func runServe() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the MCP service
|
// Create the MCP service
|
||||||
svc, err := newMCPService(opts)
|
svc, err := mcp.New(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cli.Wrap(err, "create MCP service")
|
return cli.Wrap(err, "create MCP service")
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
_ = shutdownMCPService(svc, context.Background())
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Set up signal handling for clean shutdown
|
// Set up signal handling for clean shutdown
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
@ -116,5 +95,5 @@ func runServe() error {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Run the server (blocks until context cancelled or error)
|
// Run the server (blocks until context cancelled or error)
|
||||||
return runMCPService(svc, ctx)
|
return svc.Run(ctx)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
||||||
package mcpcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"dappco.re/go/mcp/pkg/mcp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRunServe_Good_ShutsDownService(t *testing.T) {
|
|
||||||
oldNew := newMCPService
|
|
||||||
oldRun := runMCPService
|
|
||||||
oldShutdown := shutdownMCPService
|
|
||||||
oldWorkspace := workspaceFlag
|
|
||||||
oldUnrestricted := unrestrictedFlag
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
newMCPService = oldNew
|
|
||||||
runMCPService = oldRun
|
|
||||||
shutdownMCPService = oldShutdown
|
|
||||||
workspaceFlag = oldWorkspace
|
|
||||||
unrestrictedFlag = oldUnrestricted
|
|
||||||
})
|
|
||||||
|
|
||||||
workspaceFlag = ""
|
|
||||||
unrestrictedFlag = false
|
|
||||||
|
|
||||||
var runCalled bool
|
|
||||||
var shutdownCalled bool
|
|
||||||
|
|
||||||
newMCPService = func(opts mcp.Options) (*mcp.Service, error) {
|
|
||||||
return mcp.New(mcp.Options{})
|
|
||||||
}
|
|
||||||
runMCPService = func(svc *mcp.Service, ctx context.Context) error {
|
|
||||||
runCalled = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
shutdownMCPService = func(svc *mcp.Service, ctx context.Context) error {
|
|
||||||
shutdownCalled = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := runServe(); err != nil {
|
|
||||||
t.Fatalf("runServe() returned error: %v", err)
|
|
||||||
}
|
|
||||||
if !runCalled {
|
|
||||||
t.Fatal("expected runMCPService to be called")
|
|
||||||
}
|
|
||||||
if !shutdownCalled {
|
|
||||||
t.Fatal("expected shutdownMCPService to be called")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -226,7 +226,7 @@ The `McpApiController` exposes five endpoints behind `mcp.auth` middleware:
|
||||||
| `GET` | `/servers/{id}.json` | Server details with tool definitions |
|
| `GET` | `/servers/{id}.json` | Server details with tool definitions |
|
||||||
| `GET` | `/servers/{id}/tools` | List tools for a server |
|
| `GET` | `/servers/{id}/tools` | List tools for a server |
|
||||||
| `POST` | `/tools/call` | Execute a tool |
|
| `POST` | `/tools/call` | Execute a tool |
|
||||||
| `GET` | `/resources/{uri}` | Read a resource |
|
| `GET` | `/resources/{uri}` | Read a resource (not yet implemented -- returns 501) |
|
||||||
|
|
||||||
`POST /tools/call` accepts:
|
`POST /tools/call` accepts:
|
||||||
|
|
||||||
|
|
|
||||||
24
go.mod
24
go.mod
|
|
@ -4,15 +4,15 @@ go 1.26.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
dappco.re/go/core v0.8.0-alpha.1
|
dappco.re/go/core v0.8.0-alpha.1
|
||||||
dappco.re/go/core/api v0.1.5
|
forge.lthn.ai/core/api v0.1.5
|
||||||
dappco.re/go/core/cli v0.3.7
|
forge.lthn.ai/core/cli v0.3.7
|
||||||
dappco.re/go/core/ai v0.1.12
|
forge.lthn.ai/core/go-ai v0.1.12
|
||||||
dappco.re/go/core/io v0.1.7
|
forge.lthn.ai/core/go-io v0.1.7
|
||||||
dappco.re/go/core/log v0.0.4
|
forge.lthn.ai/core/go-log v0.0.4
|
||||||
dappco.re/go/core/process v0.2.9
|
forge.lthn.ai/core/go-process v0.2.9
|
||||||
dappco.re/go/core/rag v0.1.11
|
forge.lthn.ai/core/go-rag v0.1.11
|
||||||
dappco.re/go/core/webview v0.1.6
|
forge.lthn.ai/core/go-webview v0.1.6
|
||||||
dappco.re/go/core/ws v0.2.5
|
forge.lthn.ai/core/go-ws v0.2.5
|
||||||
github.com/gin-gonic/gin v1.12.0
|
github.com/gin-gonic/gin v1.12.0
|
||||||
github.com/gorilla/websocket v1.5.3
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/modelcontextprotocol/go-sdk v1.4.1
|
github.com/modelcontextprotocol/go-sdk v1.4.1
|
||||||
|
|
@ -21,9 +21,9 @@ require (
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
dappco.re/go/core v0.3.3 // indirect
|
forge.lthn.ai/core/go v0.3.3 // indirect
|
||||||
dappco.re/go/core/i18n v0.1.7 // indirect
|
forge.lthn.ai/core/go-i18n v0.1.7 // indirect
|
||||||
dappco.re/go/core/inference v0.1.6 // indirect
|
forge.lthn.ai/core/go-inference v0.1.6 // indirect
|
||||||
github.com/99designs/gqlgen v0.17.88 // indirect
|
github.com/99designs/gqlgen v0.17.88 // indirect
|
||||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||||
|
|
|
||||||
2
go.sum
2
go.sum
|
|
@ -1,3 +1,5 @@
|
||||||
|
dappco.re/go/core v0.4.7 h1:KmIA/2lo6rl1NMtLrKqCWfMlUqpDZYH3q0/d10dTtGA=
|
||||||
|
dappco.re/go/core v0.4.7/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
|
||||||
dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk=
|
dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk=
|
||||||
dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
|
dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
|
||||||
forge.lthn.ai/core/api v0.1.5 h1:NwZrcOyBjaiz5/cn0n0tnlMUodi8Or6FHMx59C7Kv2o=
|
forge.lthn.ai/core/api v0.1.5 h1:NwZrcOyBjaiz5/cn0n0tnlMUodi8Or6FHMx59C7Kv2o=
|
||||||
|
|
|
||||||
|
|
@ -4,15 +4,12 @@ package agentic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
|
|
@ -43,9 +40,8 @@ type DispatchOutput struct {
|
||||||
OutputFile string `json:"output_file,omitempty"`
|
OutputFile string `json:"output_file,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerDispatchTool(svc *coremcp.Service) {
|
func (s *PrepSubsystem) registerDispatchTool(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_dispatch",
|
Name: "agentic_dispatch",
|
||||||
Description: "Dispatch a subagent (Gemini, Codex, or Claude) to work on a task. Preps a sandboxed workspace first, then spawns the agent inside it. Templates: conventions, security, coding.",
|
Description: "Dispatch a subagent (Gemini, Codex, or Claude) to work on a task. Preps a sandboxed workspace first, then spawns the agent inside it. Templates: conventions, security, coding.",
|
||||||
}, s.dispatch)
|
}, s.dispatch)
|
||||||
|
|
@ -54,31 +50,31 @@ func (s *PrepSubsystem) registerDispatchTool(svc *coremcp.Service) {
|
||||||
// agentCommand returns the command and args for a given agent type.
|
// agentCommand returns the command and args for a given agent type.
|
||||||
// Supports model variants: "gemini", "gemini:flash", "gemini:pro", "claude", "claude:haiku".
|
// Supports model variants: "gemini", "gemini:flash", "gemini:pro", "claude", "claude:haiku".
|
||||||
func agentCommand(agent, prompt string) (string, []string, error) {
|
func agentCommand(agent, prompt string) (string, []string, error) {
|
||||||
parts := strings.SplitN(agent, ":", 2)
|
parts := core.SplitN(agent, ":", 2)
|
||||||
base := parts[0]
|
agentBase := parts[0]
|
||||||
model := ""
|
agentModel := ""
|
||||||
if len(parts) > 1 {
|
if len(parts) > 1 {
|
||||||
model = parts[1]
|
agentModel = parts[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
switch base {
|
switch agentBase {
|
||||||
case "gemini":
|
case "gemini":
|
||||||
args := []string{"-p", prompt, "--yolo", "--sandbox"}
|
args := []string{"-p", prompt, "--yolo", "--sandbox"}
|
||||||
if model != "" {
|
if agentModel != "" {
|
||||||
args = append(args, "-m", "gemini-2.5-"+model)
|
args = append(args, "-m", "gemini-2.5-"+agentModel)
|
||||||
}
|
}
|
||||||
return "gemini", args, nil
|
return "gemini", args, nil
|
||||||
case "codex":
|
case "codex":
|
||||||
return "codex", []string{"--approval-mode", "full-auto", "-q", prompt}, nil
|
return "codex", []string{"--approval-mode", "full-auto", "-q", prompt}, nil
|
||||||
case "claude":
|
case "claude":
|
||||||
args := []string{"-p", prompt, "--dangerously-skip-permissions"}
|
args := []string{"-p", prompt, "--dangerously-skip-permissions"}
|
||||||
if model != "" {
|
if agentModel != "" {
|
||||||
args = append(args, "--model", model)
|
args = append(args, "--model", agentModel)
|
||||||
}
|
}
|
||||||
return "claude", args, nil
|
return "claude", args, nil
|
||||||
case "local":
|
case "local":
|
||||||
home, _ := os.UserHomeDir()
|
home := core.Env("HOME")
|
||||||
script := filepath.Join(home, "Code", "core", "agent", "scripts", "local-agent.sh")
|
script := core.JoinPath(home, "Code", "core", "agent", "scripts", "local-agent.sh")
|
||||||
return "bash", []string{script, prompt}, nil
|
return "bash", []string{script, prompt}, nil
|
||||||
default:
|
default:
|
||||||
return "", nil, coreerr.E("agentCommand", "unknown agent: "+agent, nil)
|
return "", nil, coreerr.E("agentCommand", "unknown agent: "+agent, nil)
|
||||||
|
|
@ -119,14 +115,14 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
}
|
}
|
||||||
|
|
||||||
wsDir := prepOut.WorkspaceDir
|
wsDir := prepOut.WorkspaceDir
|
||||||
srcDir := filepath.Join(wsDir, "src")
|
srcDir := core.JoinPath(wsDir, "src")
|
||||||
|
|
||||||
// The prompt is just: read PROMPT.md and do the work
|
// The prompt is just: read PROMPT.md and do the work
|
||||||
prompt := "Read PROMPT.md for instructions. All context files (CLAUDE.md, TODO.md, CONTEXT.md, CONSUMERS.md, RECENT.md) are in the parent directory. Work in this directory."
|
prompt := "Read PROMPT.md for instructions. All context files (CLAUDE.md, TODO.md, CONTEXT.md, CONSUMERS.md, RECENT.md) are in the parent directory. Work in this directory."
|
||||||
|
|
||||||
if input.DryRun {
|
if input.DryRun {
|
||||||
// Read PROMPT.md for the dry run output
|
// Read PROMPT.md for the dry run output
|
||||||
promptRaw, _ := coreio.Local.Read(filepath.Join(wsDir, "PROMPT.md"))
|
promptRaw, _ := coreio.Local.Read(core.JoinPath(wsDir, "PROMPT.md"))
|
||||||
return nil, DispatchOutput{
|
return nil, DispatchOutput{
|
||||||
Success: true,
|
Success: true,
|
||||||
Agent: input.Agent,
|
Agent: input.Agent,
|
||||||
|
|
@ -139,14 +135,12 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
// Step 2: Check per-agent concurrency limit
|
// Step 2: Check per-agent concurrency limit
|
||||||
if !s.canDispatchAgent(input.Agent) {
|
if !s.canDispatchAgent(input.Agent) {
|
||||||
// Queue the workspace — write status as "queued" and return
|
// Queue the workspace — write status as "queued" and return
|
||||||
s.saveStatus(wsDir, &WorkspaceStatus{
|
writeStatus(wsDir, &WorkspaceStatus{
|
||||||
Status: "queued",
|
Status: "queued",
|
||||||
Agent: input.Agent,
|
Agent: input.Agent,
|
||||||
Repo: input.Repo,
|
Repo: input.Repo,
|
||||||
Org: input.Org,
|
Org: input.Org,
|
||||||
Task: input.Task,
|
Task: input.Task,
|
||||||
Issue: input.Issue,
|
|
||||||
Branch: prepOut.Branch,
|
|
||||||
StartedAt: time.Now(),
|
StartedAt: time.Now(),
|
||||||
Runs: 0,
|
Runs: 0,
|
||||||
})
|
})
|
||||||
|
|
@ -161,14 +155,12 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
|
|
||||||
// Step 3: Write status BEFORE spawning so concurrent dispatches
|
// Step 3: Write status BEFORE spawning so concurrent dispatches
|
||||||
// see this workspace as "running" during the concurrency check.
|
// see this workspace as "running" during the concurrency check.
|
||||||
s.saveStatus(wsDir, &WorkspaceStatus{
|
writeStatus(wsDir, &WorkspaceStatus{
|
||||||
Status: "running",
|
Status: "running",
|
||||||
Agent: input.Agent,
|
Agent: input.Agent,
|
||||||
Repo: input.Repo,
|
Repo: input.Repo,
|
||||||
Org: input.Org,
|
Org: input.Org,
|
||||||
Task: input.Task,
|
Task: input.Task,
|
||||||
Issue: input.Issue,
|
|
||||||
Branch: prepOut.Branch,
|
|
||||||
StartedAt: time.Now(),
|
StartedAt: time.Now(),
|
||||||
Runs: 1,
|
Runs: 1,
|
||||||
})
|
})
|
||||||
|
|
@ -181,7 +173,7 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
return nil, DispatchOutput{}, err
|
return nil, DispatchOutput{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
outputFile := filepath.Join(wsDir, fmt.Sprintf("agent-%s.log", input.Agent))
|
outputFile := core.JoinPath(wsDir, core.Sprintf("agent-%s.log", input.Agent))
|
||||||
outFile, err := os.Create(outputFile)
|
outFile, err := os.Create(outputFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, DispatchOutput{}, coreerr.E("dispatch", "failed to create log file", err)
|
return nil, DispatchOutput{}, coreerr.E("dispatch", "failed to create log file", err)
|
||||||
|
|
@ -210,13 +202,11 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
outFile.Close()
|
outFile.Close()
|
||||||
// Revert status so the slot is freed
|
// Revert status so the slot is freed
|
||||||
s.saveStatus(wsDir, &WorkspaceStatus{
|
writeStatus(wsDir, &WorkspaceStatus{
|
||||||
Status: "failed",
|
Status: "failed",
|
||||||
Agent: input.Agent,
|
Agent: input.Agent,
|
||||||
Repo: input.Repo,
|
Repo: input.Repo,
|
||||||
Task: input.Task,
|
Task: input.Task,
|
||||||
Issue: input.Issue,
|
|
||||||
Branch: prepOut.Branch,
|
|
||||||
})
|
})
|
||||||
return nil, DispatchOutput{}, coreerr.E("dispatch", "failed to spawn "+input.Agent, err)
|
return nil, DispatchOutput{}, coreerr.E("dispatch", "failed to spawn "+input.Agent, err)
|
||||||
}
|
}
|
||||||
|
|
@ -224,14 +214,12 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
pid := cmd.Process.Pid
|
pid := cmd.Process.Pid
|
||||||
|
|
||||||
// Update status with PID now that agent is running
|
// Update status with PID now that agent is running
|
||||||
s.saveStatus(wsDir, &WorkspaceStatus{
|
writeStatus(wsDir, &WorkspaceStatus{
|
||||||
Status: "running",
|
Status: "running",
|
||||||
Agent: input.Agent,
|
Agent: input.Agent,
|
||||||
Repo: input.Repo,
|
Repo: input.Repo,
|
||||||
Org: input.Org,
|
Org: input.Org,
|
||||||
Task: input.Task,
|
Task: input.Task,
|
||||||
Issue: input.Issue,
|
|
||||||
Branch: prepOut.Branch,
|
|
||||||
PID: pid,
|
PID: pid,
|
||||||
StartedAt: time.Now(),
|
StartedAt: time.Now(),
|
||||||
Runs: 1,
|
Runs: 1,
|
||||||
|
|
@ -243,38 +231,13 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
cmd.Wait()
|
cmd.Wait()
|
||||||
outFile.Close()
|
outFile.Close()
|
||||||
|
|
||||||
postCtx := context.WithoutCancel(ctx)
|
// Update status to completed
|
||||||
status := "completed"
|
|
||||||
channel := coremcp.ChannelAgentComplete
|
|
||||||
payload := map[string]any{
|
|
||||||
"workspace": filepath.Base(wsDir),
|
|
||||||
"repo": input.Repo,
|
|
||||||
"org": input.Org,
|
|
||||||
"agent": input.Agent,
|
|
||||||
"branch": prepOut.Branch,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update status to completed or blocked.
|
|
||||||
if st, err := readStatus(wsDir); err == nil {
|
if st, err := readStatus(wsDir); err == nil {
|
||||||
|
st.Status = "completed"
|
||||||
st.PID = 0
|
st.PID = 0
|
||||||
if data, err := coreio.Local.Read(filepath.Join(wsDir, "src", "BLOCKED.md")); err == nil {
|
writeStatus(wsDir, st)
|
||||||
status = "blocked"
|
|
||||||
channel = coremcp.ChannelAgentBlocked
|
|
||||||
st.Status = status
|
|
||||||
st.Question = strings.TrimSpace(data)
|
|
||||||
if st.Question != "" {
|
|
||||||
payload["question"] = st.Question
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
st.Status = status
|
|
||||||
}
|
|
||||||
s.saveStatus(wsDir, st)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
payload["status"] = status
|
|
||||||
s.emitChannel(postCtx, channel, payload)
|
|
||||||
s.emitChannel(postCtx, coremcp.ChannelAgentStatus, payload)
|
|
||||||
|
|
||||||
// Ingest scan findings as issues
|
// Ingest scan findings as issues
|
||||||
s.ingestFindings(wsDir)
|
s.ingestFindings(wsDir)
|
||||||
|
|
||||||
|
|
@ -291,3 +254,4 @@ func (s *PrepSubsystem) dispatch(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
OutputFile: outputFile,
|
OutputFile: outputFile,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,12 +5,9 @@ package agentic
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
core "dappco.re/go/core"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
@ -20,23 +17,23 @@ import (
|
||||||
// EpicInput is the input for agentic_create_epic.
|
// EpicInput is the input for agentic_create_epic.
|
||||||
type EpicInput struct {
|
type EpicInput struct {
|
||||||
Repo string `json:"repo"` // Target repo (e.g. "go-scm")
|
Repo string `json:"repo"` // Target repo (e.g. "go-scm")
|
||||||
Org string `json:"org,omitempty"` // Forge org (default "core")
|
Org string `json:"org,omitempty"` // Forge org (default "core")
|
||||||
Title string `json:"title"` // Epic title
|
Title string `json:"title"` // Epic title
|
||||||
Body string `json:"body,omitempty"` // Epic description (above checklist)
|
Body string `json:"body,omitempty"` // Epic description (above checklist)
|
||||||
Tasks []string `json:"tasks"` // Sub-task titles (become child issues)
|
Tasks []string `json:"tasks"` // Sub-task titles (become child issues)
|
||||||
Labels []string `json:"labels,omitempty"` // Labels for epic + children (e.g. ["agentic"])
|
Labels []string `json:"labels,omitempty"` // Labels for epic + children (e.g. ["agentic"])
|
||||||
Dispatch bool `json:"dispatch,omitempty"` // Auto-dispatch agents to each child
|
Dispatch bool `json:"dispatch,omitempty"` // Auto-dispatch agents to each child
|
||||||
Agent string `json:"agent,omitempty"` // Agent type for dispatch (default "claude")
|
Agent string `json:"agent,omitempty"` // Agent type for dispatch (default "claude")
|
||||||
Template string `json:"template,omitempty"` // Prompt template for dispatch (default "coding")
|
Template string `json:"template,omitempty"` // Prompt template for dispatch (default "coding")
|
||||||
}
|
}
|
||||||
|
|
||||||
// EpicOutput is the output for agentic_create_epic.
|
// EpicOutput is the output for agentic_create_epic.
|
||||||
type EpicOutput struct {
|
type EpicOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
EpicNumber int `json:"epic_number"`
|
EpicNumber int `json:"epic_number"`
|
||||||
EpicURL string `json:"epic_url"`
|
EpicURL string `json:"epic_url"`
|
||||||
Children []ChildRef `json:"children"`
|
Children []ChildRef `json:"children"`
|
||||||
Dispatched int `json:"dispatched,omitempty"`
|
Dispatched int `json:"dispatched,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChildRef references a child issue.
|
// ChildRef references a child issue.
|
||||||
|
|
@ -46,9 +43,8 @@ type ChildRef struct {
|
||||||
URL string `json:"url"`
|
URL string `json:"url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerEpicTool(svc *coremcp.Service) {
|
func (s *PrepSubsystem) registerEpicTool(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_create_epic",
|
Name: "agentic_create_epic",
|
||||||
Description: "Create an epic issue with child issues on Forge. Each task becomes a child issue linked via checklist. Optionally auto-dispatch agents to work each child.",
|
Description: "Create an epic issue with child issues on Forge. Each task becomes a child issue linked via checklist. Optionally auto-dispatch agents to work each child.",
|
||||||
}, s.createEpic)
|
}, s.createEpic)
|
||||||
|
|
@ -101,19 +97,19 @@ func (s *PrepSubsystem) createEpic(ctx context.Context, req *mcp.CallToolRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 2: Build epic body with checklist
|
// Step 2: Build epic body with checklist
|
||||||
var body strings.Builder
|
epicBody := core.NewBuilder()
|
||||||
if input.Body != "" {
|
if input.Body != "" {
|
||||||
body.WriteString(input.Body)
|
epicBody.WriteString(input.Body)
|
||||||
body.WriteString("\n\n")
|
epicBody.WriteString("\n\n")
|
||||||
}
|
}
|
||||||
body.WriteString("## Tasks\n\n")
|
epicBody.WriteString("## Tasks\n\n")
|
||||||
for _, child := range children {
|
for _, child := range children {
|
||||||
body.WriteString(fmt.Sprintf("- [ ] #%d %s\n", child.Number, child.Title))
|
epicBody.WriteString(core.Sprintf("- [ ] #%d %s\n", child.Number, child.Title))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 3: Create epic issue
|
// Step 3: Create epic issue
|
||||||
epicLabels := append(labelIDs, s.resolveLabelIDs(ctx, input.Org, input.Repo, []string{"epic"})...)
|
epicLabels := append(labelIDs, s.resolveLabelIDs(ctx, input.Org, input.Repo, []string{"epic"})...)
|
||||||
epic, err := s.createIssue(ctx, input.Org, input.Repo, input.Title, body.String(), epicLabels)
|
epic, err := s.createIssue(ctx, input.Org, input.Repo, input.Title, epicBody.String(), epicLabels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, EpicOutput{}, coreerr.E("createEpic", "failed to create epic", err)
|
return nil, EpicOutput{}, coreerr.E("createEpic", "failed to create epic", err)
|
||||||
}
|
}
|
||||||
|
|
@ -157,9 +153,9 @@ func (s *PrepSubsystem) createIssue(ctx context.Context, org, repo, title, body
|
||||||
payload["labels"] = labelIDs
|
payload["labels"] = labelIDs
|
||||||
}
|
}
|
||||||
|
|
||||||
data, _ := json.Marshal(payload)
|
data := []byte(core.JSONMarshalString(payload))
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues", s.forgeURL, org, repo)
|
issueURL := core.Sprintf("%s/api/v1/repos/%s/%s/issues", s.forgeURL, org, repo)
|
||||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(data))
|
req, _ := http.NewRequestWithContext(ctx, "POST", issueURL, bytes.NewReader(data))
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
|
|
@ -167,17 +163,17 @@ func (s *PrepSubsystem) createIssue(ctx context.Context, org, repo, title, body
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ChildRef{}, coreerr.E("createIssue", "request failed", err)
|
return ChildRef{}, coreerr.E("createIssue", "request failed", err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != 201 {
|
if resp.StatusCode != 201 {
|
||||||
return ChildRef{}, coreerr.E("createIssue", fmt.Sprintf("returned %d", resp.StatusCode), nil)
|
resp.Body.Close()
|
||||||
|
return ChildRef{}, coreerr.E("createIssue", core.Sprintf("returned %d", resp.StatusCode), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
var result struct {
|
var result struct {
|
||||||
Number int `json:"number"`
|
Number int `json:"number"`
|
||||||
HTMLURL string `json:"html_url"`
|
HTMLURL string `json:"html_url"`
|
||||||
}
|
}
|
||||||
json.NewDecoder(resp.Body).Decode(&result)
|
core.JSONUnmarshalString(readBody(resp.Body), &result)
|
||||||
|
|
||||||
return ChildRef{
|
return ChildRef{
|
||||||
Number: result.Number,
|
Number: result.Number,
|
||||||
|
|
@ -193,16 +189,16 @@ func (s *PrepSubsystem) resolveLabelIDs(ctx context.Context, org, repo string, n
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch existing labels
|
// Fetch existing labels
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/labels?limit=50", s.forgeURL, org, repo)
|
labelsURL := core.Sprintf("%s/api/v1/repos/%s/%s/labels?limit=50", s.forgeURL, org, repo)
|
||||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, _ := http.NewRequestWithContext(ctx, "GET", labelsURL, nil)
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
resp, err := s.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
|
resp.Body.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -210,7 +206,7 @@ func (s *PrepSubsystem) resolveLabelIDs(ctx context.Context, org, repo string, n
|
||||||
ID int64 `json:"id"`
|
ID int64 `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
json.NewDecoder(resp.Body).Decode(&existing)
|
core.JSONUnmarshalString(readBody(resp.Body), &existing)
|
||||||
|
|
||||||
nameToID := make(map[string]int64)
|
nameToID := make(map[string]int64)
|
||||||
for _, l := range existing {
|
for _, l := range existing {
|
||||||
|
|
@ -246,13 +242,13 @@ func (s *PrepSubsystem) createLabel(ctx context.Context, org, repo, name string)
|
||||||
colour = "#6b7280"
|
colour = "#6b7280"
|
||||||
}
|
}
|
||||||
|
|
||||||
payload, _ := json.Marshal(map[string]string{
|
payload := []byte(core.JSONMarshalString(map[string]string{
|
||||||
"name": name,
|
"name": name,
|
||||||
"color": colour,
|
"color": colour,
|
||||||
})
|
}))
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo)
|
createLabelURL := core.Sprintf("%s/api/v1/repos/%s/%s/labels", s.forgeURL, org, repo)
|
||||||
req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
req, _ := http.NewRequestWithContext(ctx, "POST", createLabelURL, bytes.NewReader(payload))
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
|
|
@ -260,15 +256,15 @@ func (s *PrepSubsystem) createLabel(ctx context.Context, org, repo, name string)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != 201 {
|
if resp.StatusCode != 201 {
|
||||||
|
resp.Body.Close()
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
var result struct {
|
var result struct {
|
||||||
ID int64 `json:"id"`
|
ID int64 `json:"id"`
|
||||||
}
|
}
|
||||||
json.NewDecoder(resp.Body).Decode(&result)
|
core.JSONUnmarshalString(readBody(resp.Body), &result)
|
||||||
return result.ID
|
return result.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,15 +4,9 @@ package agentic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -25,10 +19,7 @@ func (s *PrepSubsystem) ingestFindings(wsDir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the log file
|
// Read the log file
|
||||||
logFiles, err := filepath.Glob(filepath.Join(wsDir, "agent-*.log"))
|
logFiles := core.PathGlob(core.JoinPath(wsDir, "agent-*.log"))
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(logFiles) == 0 {
|
if len(logFiles) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -41,28 +32,26 @@ func (s *PrepSubsystem) ingestFindings(wsDir string) {
|
||||||
body := contentStr
|
body := contentStr
|
||||||
|
|
||||||
// Skip quota errors
|
// Skip quota errors
|
||||||
if strings.Contains(body, "QUOTA_EXHAUSTED") || strings.Contains(body, "QuotaError") {
|
if core.Contains(body, "QUOTA_EXHAUSTED") || core.Contains(body, "QuotaError") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only ingest if there are actual findings (file:line references)
|
// Only ingest if there are actual findings (file:line references)
|
||||||
findings := countFileRefs(body)
|
findings := countFileRefs(body)
|
||||||
issueCreated := false
|
|
||||||
if findings < 2 {
|
if findings < 2 {
|
||||||
s.emitHarvestComplete(context.Background(), wsDir, st.Repo, findings, issueCreated)
|
|
||||||
return // No meaningful findings
|
return // No meaningful findings
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine issue type from the template used
|
// Determine issue type from the template used
|
||||||
issueType := "task"
|
issueType := "task"
|
||||||
priority := "normal"
|
priority := "normal"
|
||||||
if strings.Contains(body, "security") || strings.Contains(body, "Security") {
|
if core.Contains(body, "security") || core.Contains(body, "Security") {
|
||||||
issueType = "bug"
|
issueType = "bug"
|
||||||
priority = "high"
|
priority = "high"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a single issue per repo with all findings in the body
|
// Create a single issue per repo with all findings in the body
|
||||||
title := fmt.Sprintf("Scan findings for %s (%d items)", st.Repo, findings)
|
title := core.Sprintf("Scan findings for %s (%d items)", st.Repo, findings)
|
||||||
|
|
||||||
// Truncate body to reasonable size for issue description
|
// Truncate body to reasonable size for issue description
|
||||||
description := body
|
description := body
|
||||||
|
|
@ -70,8 +59,7 @@ func (s *PrepSubsystem) ingestFindings(wsDir string) {
|
||||||
description = description[:10000] + "\n\n... (truncated, see full log in workspace)"
|
description = description[:10000] + "\n\n... (truncated, see full log in workspace)"
|
||||||
}
|
}
|
||||||
|
|
||||||
issueCreated = s.createIssueViaAPI(st.Repo, title, description, issueType, priority, "scan")
|
s.createIssueViaAPI(st.Repo, title, description, issueType, priority, "scan")
|
||||||
s.emitHarvestComplete(context.Background(), wsDir, st.Repo, findings, issueCreated)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// countFileRefs counts file:line references in the output (indicates real findings)
|
// countFileRefs counts file:line references in the output (indicates real findings)
|
||||||
|
|
@ -86,7 +74,7 @@ func countFileRefs(body string) int {
|
||||||
}
|
}
|
||||||
if j < len(body) && body[j] == '`' {
|
if j < len(body) && body[j] == '`' {
|
||||||
ref := body[i+1 : j]
|
ref := body[i+1 : j]
|
||||||
if strings.Contains(ref, ".go:") || strings.Contains(ref, ".php:") {
|
if core.Contains(ref, ".go:") || core.Contains(ref, ".php:") {
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -96,55 +84,35 @@ func countFileRefs(body string) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// createIssueViaAPI posts an issue to the lthn.sh API
|
// createIssueViaAPI posts an issue to the lthn.sh API
|
||||||
func (s *PrepSubsystem) createIssueViaAPI(repo, title, description, issueType, priority, source string) bool {
|
func (s *PrepSubsystem) createIssueViaAPI(repo, title, description, issueType, priority, source string) {
|
||||||
if s.brainKey == "" {
|
if s.brainKey == "" {
|
||||||
return false
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the agent API key from file
|
// Read the agent API key from file
|
||||||
home, _ := os.UserHomeDir()
|
home := core.Env("HOME")
|
||||||
apiKeyData, err := coreio.Local.Read(filepath.Join(home, ".claude", "agent-api.key"))
|
apiKeyData, err := coreio.Local.Read(core.JoinPath(home, ".claude", "agent-api.key"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return
|
||||||
}
|
}
|
||||||
apiKey := strings.TrimSpace(apiKeyData)
|
apiKey := core.Trim(apiKeyData)
|
||||||
|
|
||||||
payload, err := json.Marshal(map[string]string{
|
payload := []byte(core.JSONMarshalString(map[string]string{
|
||||||
"title": title,
|
"title": title,
|
||||||
"description": description,
|
"description": description,
|
||||||
"type": issueType,
|
"type": issueType,
|
||||||
"priority": priority,
|
"priority": priority,
|
||||||
"reporter": "cladius",
|
"reporter": "cladius",
|
||||||
})
|
}))
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", s.brainURL+"/v1/issues", bytes.NewReader(payload))
|
req, _ := http.NewRequest("POST", s.brainURL+"/v1/issues", bytes.NewReader(payload))
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
req.Header.Set("Accept", "application/json")
|
req.Header.Set("Accept", "application/json")
|
||||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
resp, err := s.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return
|
||||||
}
|
}
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
return resp.StatusCode < 400
|
|
||||||
}
|
|
||||||
|
|
||||||
// emitHarvestComplete announces that finding ingestion finished for a workspace.
|
|
||||||
//
|
|
||||||
// ctx := context.Background()
|
|
||||||
// s.emitHarvestComplete(ctx, "go-io-123", "go-io", 4, true)
|
|
||||||
func (s *PrepSubsystem) emitHarvestComplete(ctx context.Context, workspace, repo string, findings int, issueCreated bool) {
|
|
||||||
s.emitChannel(ctx, coremcp.ChannelHarvestComplete, map[string]any{
|
|
||||||
"workspace": workspace,
|
|
||||||
"repo": repo,
|
|
||||||
"findings": findings,
|
|
||||||
"issue_created": issueCreated,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,216 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IssueDispatchInput is the input for agentic_dispatch_issue.
|
|
||||||
//
|
|
||||||
// input := IssueDispatchInput{
|
|
||||||
// Repo: "go-io",
|
|
||||||
// Issue: 123,
|
|
||||||
// Agent: "claude",
|
|
||||||
// }
|
|
||||||
type IssueDispatchInput struct {
|
|
||||||
Repo string `json:"repo"` // Target repo (e.g. "go-io")
|
|
||||||
Org string `json:"org,omitempty"` // Forge org (default "core")
|
|
||||||
Issue int `json:"issue"` // Forge issue number
|
|
||||||
Agent string `json:"agent,omitempty"` // "claude" (default), "codex", "gemini"
|
|
||||||
Template string `json:"template,omitempty"` // "conventions", "security", "coding" (default)
|
|
||||||
DryRun bool `json:"dry_run,omitempty"` // Preview without executing
|
|
||||||
}
|
|
||||||
|
|
||||||
type forgeIssue struct {
|
|
||||||
Title string `json:"title"`
|
|
||||||
Body string `json:"body"`
|
|
||||||
State string `json:"state"`
|
|
||||||
Labels []struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
} `json:"labels"`
|
|
||||||
Assignee *struct {
|
|
||||||
Login string `json:"login"`
|
|
||||||
} `json:"assignee"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerIssueTools(svc *coremcp.Service) {
|
|
||||||
server := svc.Server()
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_dispatch_issue",
|
|
||||||
Description: "Dispatch an agent to work on a Forge issue. Assigns the issue as a lock, prepends the issue body to TODO.md, creates an issue-specific branch, and spawns the agent.",
|
|
||||||
}, s.dispatchIssue)
|
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_pr",
|
|
||||||
Description: "Create a pull request from an agent workspace. Pushes the branch and creates a Forge PR linked to the tracked issue, if any.",
|
|
||||||
}, s.createPR)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) dispatchIssue(ctx context.Context, req *mcp.CallToolRequest, input IssueDispatchInput) (*mcp.CallToolResult, DispatchOutput, error) {
|
|
||||||
if input.Repo == "" {
|
|
||||||
return nil, DispatchOutput{}, coreerr.E("dispatchIssue", "repo is required", nil)
|
|
||||||
}
|
|
||||||
if input.Issue == 0 {
|
|
||||||
return nil, DispatchOutput{}, coreerr.E("dispatchIssue", "issue is required", nil)
|
|
||||||
}
|
|
||||||
if input.Org == "" {
|
|
||||||
input.Org = "core"
|
|
||||||
}
|
|
||||||
if input.Agent == "" {
|
|
||||||
input.Agent = "claude"
|
|
||||||
}
|
|
||||||
if input.Template == "" {
|
|
||||||
input.Template = "coding"
|
|
||||||
}
|
|
||||||
|
|
||||||
issue, err := s.fetchIssue(ctx, input.Org, input.Repo, input.Issue)
|
|
||||||
if err != nil {
|
|
||||||
return nil, DispatchOutput{}, err
|
|
||||||
}
|
|
||||||
if issue.State != "open" {
|
|
||||||
return nil, DispatchOutput{}, coreerr.E("dispatchIssue", fmt.Sprintf("issue %d is %s, not open", input.Issue, issue.State), nil)
|
|
||||||
}
|
|
||||||
if issue.Assignee != nil && issue.Assignee.Login != "" {
|
|
||||||
return nil, DispatchOutput{}, coreerr.E("dispatchIssue", fmt.Sprintf("issue %d is already assigned to %s", input.Issue, issue.Assignee.Login), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !input.DryRun {
|
|
||||||
if err := s.lockIssue(ctx, input.Org, input.Repo, input.Issue, input.Agent); err != nil {
|
|
||||||
return nil, DispatchOutput{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var dispatchErr error
|
|
||||||
defer func() {
|
|
||||||
if dispatchErr != nil {
|
|
||||||
_ = s.unlockIssue(ctx, input.Org, input.Repo, input.Issue, issue.Labels)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
result, out, dispatchErr := s.dispatch(ctx, req, DispatchInput{
|
|
||||||
Repo: input.Repo,
|
|
||||||
Org: input.Org,
|
|
||||||
Issue: input.Issue,
|
|
||||||
Task: issue.Title,
|
|
||||||
Agent: input.Agent,
|
|
||||||
Template: input.Template,
|
|
||||||
DryRun: input.DryRun,
|
|
||||||
})
|
|
||||||
if dispatchErr != nil {
|
|
||||||
return nil, DispatchOutput{}, dispatchErr
|
|
||||||
}
|
|
||||||
return result, out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.dispatch(ctx, req, DispatchInput{
|
|
||||||
Repo: input.Repo,
|
|
||||||
Org: input.Org,
|
|
||||||
Issue: input.Issue,
|
|
||||||
Task: issue.Title,
|
|
||||||
Agent: input.Agent,
|
|
||||||
Template: input.Template,
|
|
||||||
DryRun: input.DryRun,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) unlockIssue(ctx context.Context, org, repo string, issue int, labels []struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
}) error {
|
|
||||||
updateURL := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue)
|
|
||||||
issueLabels := make([]string, 0, len(labels))
|
|
||||||
for _, label := range labels {
|
|
||||||
if label.Name == "in-progress" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
issueLabels = append(issueLabels, label.Name)
|
|
||||||
}
|
|
||||||
if issueLabels == nil {
|
|
||||||
issueLabels = []string{}
|
|
||||||
}
|
|
||||||
payload, err := json.Marshal(map[string]any{
|
|
||||||
"assignees": []string{},
|
|
||||||
"labels": issueLabels,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return coreerr.E("unlockIssue", "failed to encode issue unlock", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, updateURL, bytes.NewReader(payload))
|
|
||||||
if err != nil {
|
|
||||||
return coreerr.E("unlockIssue", "failed to build unlock request", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return coreerr.E("unlockIssue", "failed to update issue", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode >= http.StatusBadRequest {
|
|
||||||
return coreerr.E("unlockIssue", fmt.Sprintf("issue unlock returned %d", resp.StatusCode), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) fetchIssue(ctx context.Context, org, repo string, issue int) (*forgeIssue, error) {
|
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue)
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E("fetchIssue", "failed to build request", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E("fetchIssue", "failed to fetch issue", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, coreerr.E("fetchIssue", fmt.Sprintf("issue %d not found in %s/%s", issue, org, repo), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
var out forgeIssue
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
|
|
||||||
return nil, coreerr.E("fetchIssue", "failed to decode issue", err)
|
|
||||||
}
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) lockIssue(ctx context.Context, org, repo string, issue int, assignee string) error {
|
|
||||||
updateURL := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue)
|
|
||||||
payload, err := json.Marshal(map[string]any{
|
|
||||||
"assignees": []string{assignee},
|
|
||||||
"labels": []string{"in-progress"},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return coreerr.E("lockIssue", "failed to encode issue update", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, updateURL, bytes.NewReader(payload))
|
|
||||||
if err != nil {
|
|
||||||
return coreerr.E("lockIssue", "failed to build update request", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return coreerr.E("lockIssue", "failed to update issue", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode >= http.StatusBadRequest {
|
|
||||||
return coreerr.E("lockIssue", fmt.Sprintf("issue update returned %d", resp.StatusCode), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,227 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBranchSlug_Good(t *testing.T) {
|
|
||||||
got := branchSlug("Fix login crash in API v2")
|
|
||||||
want := "fix-login-crash-in-api-v2"
|
|
||||||
if got != want {
|
|
||||||
t.Fatalf("expected %q, got %q", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrepWorkspace_Good_IssueBranchName(t *testing.T) {
|
|
||||||
codePath := t.TempDir()
|
|
||||||
repoDir := initTestRepo(t, codePath, "demo")
|
|
||||||
_ = repoDir
|
|
||||||
|
|
||||||
s := &PrepSubsystem{codePath: codePath}
|
|
||||||
_, out, err := s.prepWorkspace(context.Background(), nil, PrepInput{
|
|
||||||
Repo: "demo",
|
|
||||||
Issue: 42,
|
|
||||||
Task: "Fix login crash",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("prepWorkspace failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
want := "agent/issue-42-fix-login-crash"
|
|
||||||
if out.Branch != want {
|
|
||||||
t.Fatalf("expected branch %q, got %q", want, out.Branch)
|
|
||||||
}
|
|
||||||
|
|
||||||
srcDir := filepath.Join(out.WorkspaceDir, "src")
|
|
||||||
cmd := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD")
|
|
||||||
cmd.Dir = srcDir
|
|
||||||
data, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to read branch: %v", err)
|
|
||||||
}
|
|
||||||
if got := strings.TrimSpace(string(data)); got != want {
|
|
||||||
t.Fatalf("expected git branch %q, got %q", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDispatchIssue_Bad_AssignedIssue(t *testing.T) {
|
|
||||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
switch r.Method {
|
|
||||||
case http.MethodGet:
|
|
||||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
|
||||||
"title": "Fix login crash",
|
|
||||||
"body": "details",
|
|
||||||
"state": "open",
|
|
||||||
"assignee": map[string]any{
|
|
||||||
"login": "someone-else",
|
|
||||||
},
|
|
||||||
})
|
|
||||||
default:
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
defer srv.Close()
|
|
||||||
|
|
||||||
s := &PrepSubsystem{
|
|
||||||
forgeURL: srv.URL,
|
|
||||||
client: srv.Client(),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, err := s.dispatchIssue(context.Background(), nil, IssueDispatchInput{
|
|
||||||
Repo: "demo",
|
|
||||||
Org: "core",
|
|
||||||
Issue: 42,
|
|
||||||
DryRun: true,
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected assigned issue to fail")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDispatchIssue_Good_UnlocksOnPrepFailure(t *testing.T) {
|
|
||||||
var methods []string
|
|
||||||
var bodies []string
|
|
||||||
|
|
||||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
body, _ := io.ReadAll(r.Body)
|
|
||||||
methods = append(methods, r.Method)
|
|
||||||
bodies = append(bodies, string(body))
|
|
||||||
|
|
||||||
switch r.Method {
|
|
||||||
case http.MethodGet:
|
|
||||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
|
||||||
"title": "Fix login crash",
|
|
||||||
"body": "details",
|
|
||||||
"state": "open",
|
|
||||||
"labels": []map[string]any{
|
|
||||||
{"name": "bug"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
case http.MethodPatch:
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
default:
|
|
||||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
defer srv.Close()
|
|
||||||
|
|
||||||
s := &PrepSubsystem{
|
|
||||||
forgeURL: srv.URL,
|
|
||||||
forgeToken: "token",
|
|
||||||
client: srv.Client(),
|
|
||||||
codePath: t.TempDir(),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, err := s.dispatchIssue(context.Background(), nil, IssueDispatchInput{
|
|
||||||
Repo: "demo",
|
|
||||||
Org: "core",
|
|
||||||
Issue: 42,
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected dispatch to fail when the repo clone is missing")
|
|
||||||
}
|
|
||||||
|
|
||||||
if got, want := len(methods), 3; got != want {
|
|
||||||
t.Fatalf("expected %d requests, got %d (%v)", want, got, methods)
|
|
||||||
}
|
|
||||||
if methods[0] != http.MethodGet {
|
|
||||||
t.Fatalf("expected first request to fetch issue, got %s", methods[0])
|
|
||||||
}
|
|
||||||
if methods[1] != http.MethodPatch {
|
|
||||||
t.Fatalf("expected second request to lock issue, got %s", methods[1])
|
|
||||||
}
|
|
||||||
if methods[2] != http.MethodPatch {
|
|
||||||
t.Fatalf("expected third request to unlock issue, got %s", methods[2])
|
|
||||||
}
|
|
||||||
if !strings.Contains(bodies[1], `"assignees":["claude"]`) {
|
|
||||||
t.Fatalf("expected lock request to assign claude, got %s", bodies[1])
|
|
||||||
}
|
|
||||||
if !strings.Contains(bodies[2], `"assignees":[]`) {
|
|
||||||
t.Fatalf("expected unlock request to clear assignees, got %s", bodies[2])
|
|
||||||
}
|
|
||||||
if !strings.Contains(bodies[2], `"labels":["bug"]`) {
|
|
||||||
t.Fatalf("expected unlock request to preserve original labels, got %s", bodies[2])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLockIssue_Good_RequestBody(t *testing.T) {
|
|
||||||
var gotMethod string
|
|
||||||
var gotPath string
|
|
||||||
var gotBody []byte
|
|
||||||
|
|
||||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
gotMethod = r.Method
|
|
||||||
gotPath = r.URL.Path
|
|
||||||
body, _ := io.ReadAll(r.Body)
|
|
||||||
gotBody = append([]byte(nil), body...)
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}))
|
|
||||||
defer srv.Close()
|
|
||||||
|
|
||||||
s := &PrepSubsystem{
|
|
||||||
forgeURL: srv.URL,
|
|
||||||
client: srv.Client(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.lockIssue(context.Background(), "core", "demo", 42, "claude"); err != nil {
|
|
||||||
t.Fatalf("lockIssue failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if gotMethod != http.MethodPatch {
|
|
||||||
t.Fatalf("expected PATCH, got %s", gotMethod)
|
|
||||||
}
|
|
||||||
if gotPath != "/api/v1/repos/core/demo/issues/42" {
|
|
||||||
t.Fatalf("unexpected path %q", gotPath)
|
|
||||||
}
|
|
||||||
if !bytes.Contains(gotBody, []byte(`"assignees":["claude"]`)) {
|
|
||||||
t.Fatalf("expected assignee in body, got %s", string(gotBody))
|
|
||||||
}
|
|
||||||
if !bytes.Contains(gotBody, []byte(`"in-progress"`)) {
|
|
||||||
t.Fatalf("expected in-progress label in body, got %s", string(gotBody))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func initTestRepo(t *testing.T, codePath, repo string) string {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
repoDir := filepath.Join(codePath, "core", repo)
|
|
||||||
if err := os.MkdirAll(repoDir, 0o755); err != nil {
|
|
||||||
t.Fatalf("mkdir repo dir: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
run := func(args ...string) {
|
|
||||||
t.Helper()
|
|
||||||
cmd := exec.Command("git", args...)
|
|
||||||
cmd.Dir = repoDir
|
|
||||||
cmd.Env = append(os.Environ(),
|
|
||||||
"GIT_AUTHOR_NAME=Test User",
|
|
||||||
"GIT_AUTHOR_EMAIL=test@example.com",
|
|
||||||
"GIT_COMMITTER_NAME=Test User",
|
|
||||||
"GIT_COMMITTER_EMAIL=test@example.com",
|
|
||||||
)
|
|
||||||
if out, err := cmd.CombinedOutput(); err != nil {
|
|
||||||
t.Fatalf("git %v failed: %v\n%s", args, err, string(out))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
run("init", "-b", "main")
|
|
||||||
if err := os.WriteFile(filepath.Join(repoDir, "README.md"), []byte("# demo\n"), 0o644); err != nil {
|
|
||||||
t.Fatalf("write file: %v", err)
|
|
||||||
}
|
|
||||||
run("add", "README.md")
|
|
||||||
run("commit", "-m", "initial commit")
|
|
||||||
|
|
||||||
return repoDir
|
|
||||||
}
|
|
||||||
|
|
@ -1,125 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MirrorInput controls Forge to GitHub mirror sync.
|
|
||||||
type MirrorInput struct {
|
|
||||||
Repo string `json:"repo,omitempty"`
|
|
||||||
DryRun bool `json:"dry_run,omitempty"`
|
|
||||||
MaxFiles int `json:"max_files,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MirrorOutput reports mirror sync results.
|
|
||||||
type MirrorOutput struct {
|
|
||||||
Success bool `json:"success"`
|
|
||||||
Synced []MirrorSync `json:"synced"`
|
|
||||||
Skipped []string `json:"skipped,omitempty"`
|
|
||||||
Count int `json:"count"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MirrorSync records one repo sync attempt.
|
|
||||||
type MirrorSync struct {
|
|
||||||
Repo string `json:"repo"`
|
|
||||||
CommitsAhead int `json:"commits_ahead"`
|
|
||||||
FilesChanged int `json:"files_changed"`
|
|
||||||
PRURL string `json:"pr_url,omitempty"`
|
|
||||||
Pushed bool `json:"pushed"`
|
|
||||||
Skipped string `json:"skipped,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerMirrorTool(svc *coremcp.Service) {
|
|
||||||
server := svc.Server()
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_mirror",
|
|
||||||
Description: "Mirror Forge repositories to GitHub and open a GitHub PR when there are commits ahead of the remote mirror.",
|
|
||||||
}, s.mirror)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) mirror(ctx context.Context, _ *mcp.CallToolRequest, input MirrorInput) (*mcp.CallToolResult, MirrorOutput, error) {
|
|
||||||
maxFiles := input.MaxFiles
|
|
||||||
if maxFiles <= 0 {
|
|
||||||
maxFiles = 50
|
|
||||||
}
|
|
||||||
|
|
||||||
basePath := repoRootFromCodePath(s.codePath)
|
|
||||||
repos := []string{}
|
|
||||||
if input.Repo != "" {
|
|
||||||
repos = []string{input.Repo}
|
|
||||||
} else {
|
|
||||||
repos = listLocalRepos(basePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
synced := make([]MirrorSync, 0, len(repos))
|
|
||||||
skipped := make([]string, 0)
|
|
||||||
|
|
||||||
for _, repo := range repos {
|
|
||||||
repoDir := filepath.Join(basePath, repo)
|
|
||||||
if !hasRemote(repoDir, "github") {
|
|
||||||
skipped = append(skipped, repo+": no github remote")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := exec.LookPath("git"); err != nil {
|
|
||||||
return nil, MirrorOutput{}, coreerr.E("mirror", "git CLI is not available", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _ = gitOutput(repoDir, "fetch", "github")
|
|
||||||
ahead := commitsAhead(repoDir, "github/main", "HEAD")
|
|
||||||
if ahead <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
files := filesChanged(repoDir, "github/main", "HEAD")
|
|
||||||
sync := MirrorSync{
|
|
||||||
Repo: repo,
|
|
||||||
CommitsAhead: ahead,
|
|
||||||
FilesChanged: files,
|
|
||||||
}
|
|
||||||
|
|
||||||
if files > maxFiles {
|
|
||||||
sync.Skipped = fmt.Sprintf("%d files exceeds limit of %d", files, maxFiles)
|
|
||||||
synced = append(synced, sync)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.DryRun {
|
|
||||||
sync.Skipped = "dry run"
|
|
||||||
synced = append(synced, sync)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ensureDevBranch(repoDir); err != nil {
|
|
||||||
sync.Skipped = err.Error()
|
|
||||||
synced = append(synced, sync)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
sync.Pushed = true
|
|
||||||
|
|
||||||
prURL, err := createGitHubPR(ctx, repoDir, repo, ahead, files)
|
|
||||||
if err != nil {
|
|
||||||
sync.Skipped = err.Error()
|
|
||||||
} else {
|
|
||||||
sync.PRURL = prURL
|
|
||||||
}
|
|
||||||
|
|
||||||
synced = append(synced, sync)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, MirrorOutput{
|
|
||||||
Success: true,
|
|
||||||
Synced: synced,
|
|
||||||
Skipped: skipped,
|
|
||||||
Count: len(synced),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
@ -6,27 +6,19 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Plan represents an implementation plan for agent work.
|
// Plan represents an implementation plan for agent work.
|
||||||
//
|
|
||||||
// plan := Plan{
|
|
||||||
// Title: "Add notifications",
|
|
||||||
// Status: "draft",
|
|
||||||
// }
|
|
||||||
type Plan struct {
|
type Plan struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
Status string `json:"status"` // draft, ready, in_progress, needs_verification, verified, approved
|
Status string `json:"status"` // draft, ready, in_progress, needs_verification, verified, approved
|
||||||
Repo string `json:"repo,omitempty"`
|
Repo string `json:"repo,omitempty"`
|
||||||
Org string `json:"org,omitempty"`
|
Org string `json:"org,omitempty"`
|
||||||
Objective string `json:"objective"`
|
Objective string `json:"objective"`
|
||||||
|
|
@ -38,32 +30,18 @@ type Plan struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase represents a phase within an implementation plan.
|
// Phase represents a phase within an implementation plan.
|
||||||
//
|
|
||||||
// phase := Phase{Name: "Implementation", Status: "pending"}
|
|
||||||
type Phase struct {
|
type Phase struct {
|
||||||
Number int `json:"number"`
|
Number int `json:"number"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Status string `json:"status"` // pending, in_progress, done
|
Status string `json:"status"` // pending, in_progress, done
|
||||||
Criteria []string `json:"criteria,omitempty"`
|
Criteria []string `json:"criteria,omitempty"`
|
||||||
Tests int `json:"tests,omitempty"`
|
Tests int `json:"tests,omitempty"`
|
||||||
Notes string `json:"notes,omitempty"`
|
Notes string `json:"notes,omitempty"`
|
||||||
Checkpoints []Checkpoint `json:"checkpoints,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checkpoint records phase progress or completion details.
|
|
||||||
//
|
|
||||||
// cp := Checkpoint{Notes: "Implemented transport hooks", Done: true}
|
|
||||||
type Checkpoint struct {
|
|
||||||
Notes string `json:"notes,omitempty"`
|
|
||||||
Done bool `json:"done,omitempty"`
|
|
||||||
CreatedAt time.Time `json:"created_at"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Input/Output types ---
|
// --- Input/Output types ---
|
||||||
|
|
||||||
// PlanCreateInput is the input for agentic_plan_create.
|
// PlanCreateInput is the input for agentic_plan_create.
|
||||||
//
|
|
||||||
// input := PlanCreateInput{Title: "Add notifications", Objective: "Broadcast MCP events"}
|
|
||||||
type PlanCreateInput struct {
|
type PlanCreateInput struct {
|
||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
Objective string `json:"objective"`
|
Objective string `json:"objective"`
|
||||||
|
|
@ -74,8 +52,6 @@ type PlanCreateInput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanCreateOutput is the output for agentic_plan_create.
|
// PlanCreateOutput is the output for agentic_plan_create.
|
||||||
//
|
|
||||||
// // out.Success == true, out.ID != ""
|
|
||||||
type PlanCreateOutput struct {
|
type PlanCreateOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
|
|
@ -83,23 +59,17 @@ type PlanCreateOutput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanReadInput is the input for agentic_plan_read.
|
// PlanReadInput is the input for agentic_plan_read.
|
||||||
//
|
|
||||||
// input := PlanReadInput{ID: "add-notifications"}
|
|
||||||
type PlanReadInput struct {
|
type PlanReadInput struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanReadOutput is the output for agentic_plan_read.
|
// PlanReadOutput is the output for agentic_plan_read.
|
||||||
//
|
|
||||||
// // out.Plan.Title == "Add notifications"
|
|
||||||
type PlanReadOutput struct {
|
type PlanReadOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Plan Plan `json:"plan"`
|
Plan Plan `json:"plan"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanUpdateInput is the input for agentic_plan_update.
|
// PlanUpdateInput is the input for agentic_plan_update.
|
||||||
//
|
|
||||||
// input := PlanUpdateInput{ID: "add-notifications", Status: "ready"}
|
|
||||||
type PlanUpdateInput struct {
|
type PlanUpdateInput struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Status string `json:"status,omitempty"`
|
Status string `json:"status,omitempty"`
|
||||||
|
|
@ -111,102 +81,62 @@ type PlanUpdateInput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanUpdateOutput is the output for agentic_plan_update.
|
// PlanUpdateOutput is the output for agentic_plan_update.
|
||||||
//
|
|
||||||
// // out.Plan.Status == "ready"
|
|
||||||
type PlanUpdateOutput struct {
|
type PlanUpdateOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Plan Plan `json:"plan"`
|
Plan Plan `json:"plan"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanDeleteInput is the input for agentic_plan_delete.
|
// PlanDeleteInput is the input for agentic_plan_delete.
|
||||||
//
|
|
||||||
// input := PlanDeleteInput{ID: "add-notifications"}
|
|
||||||
type PlanDeleteInput struct {
|
type PlanDeleteInput struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanDeleteOutput is the output for agentic_plan_delete.
|
// PlanDeleteOutput is the output for agentic_plan_delete.
|
||||||
//
|
|
||||||
// // out.Deleted == "add-notifications"
|
|
||||||
type PlanDeleteOutput struct {
|
type PlanDeleteOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Deleted string `json:"deleted"`
|
Deleted string `json:"deleted"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanListInput is the input for agentic_plan_list.
|
// PlanListInput is the input for agentic_plan_list.
|
||||||
//
|
|
||||||
// input := PlanListInput{Status: "draft"}
|
|
||||||
type PlanListInput struct {
|
type PlanListInput struct {
|
||||||
Status string `json:"status,omitempty"`
|
Status string `json:"status,omitempty"`
|
||||||
Repo string `json:"repo,omitempty"`
|
Repo string `json:"repo,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanListOutput is the output for agentic_plan_list.
|
// PlanListOutput is the output for agentic_plan_list.
|
||||||
//
|
|
||||||
// // len(out.Plans) >= 1
|
|
||||||
type PlanListOutput struct {
|
type PlanListOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
Plans []Plan `json:"plans"`
|
Plans []Plan `json:"plans"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanCheckpointInput is the input for agentic_plan_checkpoint.
|
|
||||||
//
|
|
||||||
// input := PlanCheckpointInput{ID: "add-notifications", Phase: 1, Done: true}
|
|
||||||
type PlanCheckpointInput struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Phase int `json:"phase"`
|
|
||||||
Notes string `json:"notes,omitempty"`
|
|
||||||
Done bool `json:"done,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PlanCheckpointOutput is the output for agentic_plan_checkpoint.
|
|
||||||
//
|
|
||||||
// // out.Plan.Phases[0].Status == "done"
|
|
||||||
type PlanCheckpointOutput struct {
|
|
||||||
Success bool `json:"success"`
|
|
||||||
Plan Plan `json:"plan"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Registration ---
|
// --- Registration ---
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerPlanTools(svc *coremcp.Service) {
|
func (s *PrepSubsystem) registerPlanTools(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_plan_create",
|
Name: "agentic_plan_create",
|
||||||
Description: "Create an implementation plan. Plans track phased work with acceptance criteria, status lifecycle (draft → ready → in_progress → needs_verification → verified → approved), and per-phase progress.",
|
Description: "Create an implementation plan. Plans track phased work with acceptance criteria, status lifecycle (draft → ready → in_progress → needs_verification → verified → approved), and per-phase progress.",
|
||||||
}, s.planCreate)
|
}, s.planCreate)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "agentic_plan_read",
|
Name: "agentic_plan_read",
|
||||||
Description: "Read an implementation plan by ID. Returns the full plan with all phases, criteria, and status.",
|
Description: "Read an implementation plan by ID. Returns the full plan with all phases, criteria, and status.",
|
||||||
}, s.planRead)
|
}, s.planRead)
|
||||||
|
|
||||||
// agentic_plan_status is kept as a user-facing alias for the read tool.
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_plan_status",
|
|
||||||
Description: "Get the current status of an implementation plan by ID. Returns the full plan with all phases, criteria, and status.",
|
|
||||||
}, s.planRead)
|
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_plan_update",
|
Name: "agentic_plan_update",
|
||||||
Description: "Update an implementation plan. Supports partial updates — only provided fields are changed. Use this to advance status, update phases, or add notes.",
|
Description: "Update an implementation plan. Supports partial updates — only provided fields are changed. Use this to advance status, update phases, or add notes.",
|
||||||
}, s.planUpdate)
|
}, s.planUpdate)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "agentic_plan_delete",
|
Name: "agentic_plan_delete",
|
||||||
Description: "Delete an implementation plan by ID. Permanently removes the plan file.",
|
Description: "Delete an implementation plan by ID. Permanently removes the plan file.",
|
||||||
}, s.planDelete)
|
}, s.planDelete)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "agentic_plan_list",
|
Name: "agentic_plan_list",
|
||||||
Description: "List implementation plans. Supports filtering by status (draft, ready, in_progress, etc.) and repo.",
|
Description: "List implementation plans. Supports filtering by status (draft, ready, in_progress, etc.) and repo.",
|
||||||
}, s.planList)
|
}, s.planList)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_plan_checkpoint",
|
|
||||||
Description: "Record a checkpoint for a plan phase and optionally mark the phase done.",
|
|
||||||
}, s.planCheckpoint)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Handlers ---
|
// --- Handlers ---
|
||||||
|
|
@ -349,11 +279,11 @@ func (s *PrepSubsystem) planList(_ context.Context, _ *mcp.CallToolRequest, inpu
|
||||||
|
|
||||||
var plans []Plan
|
var plans []Plan
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".json") {
|
if entry.IsDir() || !core.HasSuffix(entry.Name(), ".json") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
id := strings.TrimSuffix(entry.Name(), ".json")
|
id := core.TrimSuffix(entry.Name(), ".json")
|
||||||
plan, err := readPlan(dir, id)
|
plan, err := readPlan(dir, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
|
@ -377,81 +307,46 @@ func (s *PrepSubsystem) planList(_ context.Context, _ *mcp.CallToolRequest, inpu
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) planCheckpoint(_ context.Context, _ *mcp.CallToolRequest, input PlanCheckpointInput) (*mcp.CallToolResult, PlanCheckpointOutput, error) {
|
|
||||||
if input.ID == "" {
|
|
||||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "id is required", nil)
|
|
||||||
}
|
|
||||||
if input.Phase <= 0 {
|
|
||||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "phase must be greater than zero", nil)
|
|
||||||
}
|
|
||||||
if input.Notes == "" && !input.Done {
|
|
||||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "notes or done is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
plan, err := readPlan(s.plansDir(), input.ID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, PlanCheckpointOutput{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
phaseIndex := input.Phase - 1
|
|
||||||
if phaseIndex >= len(plan.Phases) {
|
|
||||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "phase not found", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
phase := &plan.Phases[phaseIndex]
|
|
||||||
phase.Checkpoints = append(phase.Checkpoints, Checkpoint{
|
|
||||||
Notes: input.Notes,
|
|
||||||
Done: input.Done,
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
})
|
|
||||||
if input.Done {
|
|
||||||
phase.Status = "done"
|
|
||||||
}
|
|
||||||
|
|
||||||
plan.UpdatedAt = time.Now()
|
|
||||||
if _, err := writePlan(s.plansDir(), plan); err != nil {
|
|
||||||
return nil, PlanCheckpointOutput{}, coreerr.E("planCheckpoint", "failed to write plan", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, PlanCheckpointOutput{
|
|
||||||
Success: true,
|
|
||||||
Plan: *plan,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Helpers ---
|
// --- Helpers ---
|
||||||
|
|
||||||
func (s *PrepSubsystem) plansDir() string {
|
func (s *PrepSubsystem) plansDir() string {
|
||||||
return filepath.Join(s.codePath, ".core", "plans")
|
return core.JoinPath(s.codePath, ".core", "plans")
|
||||||
}
|
}
|
||||||
|
|
||||||
func planPath(dir, id string) string {
|
func planPath(dir, id string) string {
|
||||||
return filepath.Join(dir, id+".json")
|
return core.JoinPath(dir, id+".json")
|
||||||
}
|
}
|
||||||
|
|
||||||
func generatePlanID(title string) string {
|
func generatePlanID(title string) string {
|
||||||
slug := strings.Map(func(r rune) rune {
|
// Build slug: lowercase, letters/digits/-
|
||||||
if r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-' {
|
builder := core.NewBuilder()
|
||||||
return r
|
for _, r := range title {
|
||||||
|
switch {
|
||||||
|
case r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-':
|
||||||
|
builder.WriteRune(r)
|
||||||
|
case r >= 'A' && r <= 'Z':
|
||||||
|
builder.WriteRune(r + 32)
|
||||||
|
case r == ' ':
|
||||||
|
builder.WriteRune('-')
|
||||||
}
|
}
|
||||||
if r >= 'A' && r <= 'Z' {
|
}
|
||||||
return r + 32
|
slug := builder.String()
|
||||||
}
|
|
||||||
if r == ' ' {
|
|
||||||
return '-'
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}, title)
|
|
||||||
|
|
||||||
// Trim consecutive dashes and cap length
|
// Trim consecutive dashes and cap length
|
||||||
for strings.Contains(slug, "--") {
|
for core.Contains(slug, "--") {
|
||||||
slug = strings.ReplaceAll(slug, "--", "-")
|
slug = core.Replace(slug, "--", "-")
|
||||||
|
}
|
||||||
|
slug = core.Trim(slug)
|
||||||
|
// Trim leading/trailing dashes manually (core.Trim does whitespace only)
|
||||||
|
for len(slug) > 0 && slug[0] == '-' {
|
||||||
|
slug = slug[1:]
|
||||||
}
|
}
|
||||||
slug = strings.Trim(slug, "-")
|
|
||||||
if len(slug) > 30 {
|
if len(slug) > 30 {
|
||||||
slug = slug[:30]
|
slug = slug[:30]
|
||||||
}
|
}
|
||||||
slug = strings.TrimRight(slug, "-")
|
for len(slug) > 0 && slug[len(slug)-1] == '-' {
|
||||||
|
slug = slug[:len(slug)-1]
|
||||||
|
}
|
||||||
|
|
||||||
// Append short random suffix for uniqueness
|
// Append short random suffix for uniqueness
|
||||||
b := make([]byte, 3)
|
b := make([]byte, 3)
|
||||||
|
|
@ -466,8 +361,9 @@ func readPlan(dir, id string) (*Plan, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var plan Plan
|
var plan Plan
|
||||||
if err := json.Unmarshal([]byte(data), &plan); err != nil {
|
result := core.JSONUnmarshalString(data, &plan)
|
||||||
return nil, coreerr.E("readPlan", "failed to parse plan "+id, err)
|
if !result.OK {
|
||||||
|
return nil, coreerr.E("readPlan", "failed to parse plan "+id, nil)
|
||||||
}
|
}
|
||||||
return &plan, nil
|
return &plan, nil
|
||||||
}
|
}
|
||||||
|
|
@ -478,12 +374,9 @@ func writePlan(dir string, plan *Plan) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
path := planPath(dir, plan.ID)
|
path := planPath(dir, plan.ID)
|
||||||
data, err := json.MarshalIndent(plan, "", " ")
|
encoded := core.JSONMarshalString(plan)
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return path, writeAtomic(path, string(data))
|
return path, coreio.Local.Write(path, encoded)
|
||||||
}
|
}
|
||||||
|
|
||||||
func validPlanStatus(status string) bool {
|
func validPlanStatus(status string) bool {
|
||||||
|
|
|
||||||
|
|
@ -1,62 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPlanCheckpoint_Good_AppendsCheckpointAndMarksPhaseDone(t *testing.T) {
|
|
||||||
root := t.TempDir()
|
|
||||||
sub := &PrepSubsystem{codePath: root}
|
|
||||||
|
|
||||||
plan := &Plan{
|
|
||||||
ID: "plan-1",
|
|
||||||
Title: "Test plan",
|
|
||||||
Status: "in_progress",
|
|
||||||
Objective: "Verify checkpoints",
|
|
||||||
Phases: []Phase{
|
|
||||||
{
|
|
||||||
Number: 1,
|
|
||||||
Name: "Phase 1",
|
|
||||||
Status: "in_progress",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
UpdatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := writePlan(sub.plansDir(), plan); err != nil {
|
|
||||||
t.Fatalf("writePlan failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := sub.planCheckpoint(context.Background(), nil, PlanCheckpointInput{
|
|
||||||
ID: plan.ID,
|
|
||||||
Phase: 1,
|
|
||||||
Notes: "Implementation verified",
|
|
||||||
Done: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("planCheckpoint failed: %v", err)
|
|
||||||
}
|
|
||||||
if !out.Success {
|
|
||||||
t.Fatal("expected checkpoint output success")
|
|
||||||
}
|
|
||||||
if out.Plan.Phases[0].Status != "done" {
|
|
||||||
t.Fatalf("expected phase status done, got %q", out.Plan.Phases[0].Status)
|
|
||||||
}
|
|
||||||
if len(out.Plan.Phases[0].Checkpoints) != 1 {
|
|
||||||
t.Fatalf("expected 1 checkpoint, got %d", len(out.Plan.Phases[0].Checkpoints))
|
|
||||||
}
|
|
||||||
if out.Plan.Phases[0].Checkpoints[0].Notes != "Implementation verified" {
|
|
||||||
t.Fatalf("unexpected checkpoint notes: %q", out.Plan.Phases[0].Checkpoints[0].Notes)
|
|
||||||
}
|
|
||||||
if !out.Plan.Phases[0].Checkpoints[0].Done {
|
|
||||||
t.Fatal("expected checkpoint to be marked done")
|
|
||||||
}
|
|
||||||
if out.Plan.Phases[0].Checkpoints[0].CreatedAt.IsZero() {
|
|
||||||
t.Fatal("expected checkpoint timestamp")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -5,14 +5,10 @@ package agentic
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
|
|
@ -20,26 +16,16 @@ import (
|
||||||
|
|
||||||
// --- agentic_create_pr ---
|
// --- agentic_create_pr ---
|
||||||
|
|
||||||
// PRInput is the input for agentic_create_pr and agentic_pr.
|
// CreatePRInput is the input for agentic_create_pr.
|
||||||
//
|
type CreatePRInput struct {
|
||||||
// input := PRInput{
|
Workspace string `json:"workspace"` // workspace name (e.g. "mcp-1773581873")
|
||||||
// Workspace: "mcp-1773581873",
|
Title string `json:"title,omitempty"` // PR title (default: task description)
|
||||||
// Base: "main",
|
Body string `json:"body,omitempty"` // PR body (default: auto-generated)
|
||||||
// }
|
Base string `json:"base,omitempty"` // base branch (default: "main")
|
||||||
type PRInput struct {
|
DryRun bool `json:"dry_run,omitempty"` // preview without creating
|
||||||
Workspace string `json:"workspace"` // workspace name (e.g. "mcp-1773581873")
|
|
||||||
Title string `json:"title,omitempty"` // PR title (default: task description)
|
|
||||||
Body string `json:"body,omitempty"` // PR body (default: auto-generated)
|
|
||||||
Base string `json:"base,omitempty"` // base branch (default: "main")
|
|
||||||
DryRun bool `json:"dry_run,omitempty"` // preview without creating
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreatePRInput is kept as a compatibility alias for older callers.
|
|
||||||
type CreatePRInput = PRInput
|
|
||||||
|
|
||||||
// CreatePROutput is the output for agentic_create_pr.
|
// CreatePROutput is the output for agentic_create_pr.
|
||||||
//
|
|
||||||
// // out.Success == true, out.Branch == "agent/issue-123-fix", out.Pushed == true
|
|
||||||
type CreatePROutput struct {
|
type CreatePROutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
PRURL string `json:"pr_url,omitempty"`
|
PRURL string `json:"pr_url,omitempty"`
|
||||||
|
|
@ -50,15 +36,14 @@ type CreatePROutput struct {
|
||||||
Pushed bool `json:"pushed"`
|
Pushed bool `json:"pushed"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerCreatePRTool(svc *coremcp.Service) {
|
func (s *PrepSubsystem) registerCreatePRTool(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_create_pr",
|
Name: "agentic_create_pr",
|
||||||
Description: "Create a pull request from an agent workspace. Pushes the branch to Forge and opens a PR. Links to the source issue if one was tracked.",
|
Description: "Create a pull request from an agent workspace. Pushes the branch to Forge and opens a PR. Links to the source issue if one was tracked.",
|
||||||
}, s.createPR)
|
}, s.createPR)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, input PRInput) (*mcp.CallToolResult, CreatePROutput, error) {
|
func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, input CreatePRInput) (*mcp.CallToolResult, CreatePROutput, error) {
|
||||||
if input.Workspace == "" {
|
if input.Workspace == "" {
|
||||||
return nil, CreatePROutput{}, coreerr.E("createPR", "workspace is required", nil)
|
return nil, CreatePROutput{}, coreerr.E("createPR", "workspace is required", nil)
|
||||||
}
|
}
|
||||||
|
|
@ -66,8 +51,8 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in
|
||||||
return nil, CreatePROutput{}, coreerr.E("createPR", "no Forge token configured", nil)
|
return nil, CreatePROutput{}, coreerr.E("createPR", "no Forge token configured", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
wsDir := filepath.Join(s.workspaceRoot(), input.Workspace)
|
wsDir := core.JoinPath(s.workspaceRoot(), input.Workspace)
|
||||||
srcDir := filepath.Join(wsDir, "src")
|
srcDir := core.JoinPath(wsDir, "src")
|
||||||
|
|
||||||
if _, err := coreio.Local.List(srcDir); err != nil {
|
if _, err := coreio.Local.List(srcDir); err != nil {
|
||||||
return nil, CreatePROutput{}, coreerr.E("createPR", "workspace not found: "+input.Workspace, nil)
|
return nil, CreatePROutput{}, coreerr.E("createPR", "workspace not found: "+input.Workspace, nil)
|
||||||
|
|
@ -87,7 +72,7 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, CreatePROutput{}, coreerr.E("createPR", "failed to detect branch", err)
|
return nil, CreatePROutput{}, coreerr.E("createPR", "failed to detect branch", err)
|
||||||
}
|
}
|
||||||
st.Branch = strings.TrimSpace(string(out))
|
st.Branch = core.Trim(string(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
org := st.Org
|
org := st.Org
|
||||||
|
|
@ -105,7 +90,7 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in
|
||||||
title = st.Task
|
title = st.Task
|
||||||
}
|
}
|
||||||
if title == "" {
|
if title == "" {
|
||||||
title = fmt.Sprintf("Agent work on %s", st.Branch)
|
title = core.Sprintf("Agent work on %s", st.Branch)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build PR body
|
// Build PR body
|
||||||
|
|
@ -139,11 +124,11 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in
|
||||||
|
|
||||||
// Update status with PR URL
|
// Update status with PR URL
|
||||||
st.PRURL = prURL
|
st.PRURL = prURL
|
||||||
s.saveStatus(wsDir, st)
|
writeStatus(wsDir, st)
|
||||||
|
|
||||||
// Comment on issue if tracked
|
// Comment on issue if tracked
|
||||||
if st.Issue > 0 {
|
if st.Issue > 0 {
|
||||||
comment := fmt.Sprintf("Pull request created: %s", prURL)
|
comment := core.Sprintf("Pull request created: %s", prURL)
|
||||||
s.commentOnIssue(ctx, org, st.Repo, st.Issue, comment)
|
s.commentOnIssue(ctx, org, st.Repo, st.Issue, comment)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -159,37 +144,31 @@ func (s *PrepSubsystem) createPR(ctx context.Context, _ *mcp.CallToolRequest, in
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) buildPRBody(st *WorkspaceStatus) string {
|
func (s *PrepSubsystem) buildPRBody(st *WorkspaceStatus) string {
|
||||||
var b strings.Builder
|
builder := core.NewBuilder()
|
||||||
b.WriteString("## Summary\n\n")
|
builder.WriteString("## Summary\n\n")
|
||||||
if st.Task != "" {
|
if st.Task != "" {
|
||||||
b.WriteString(st.Task)
|
builder.WriteString(st.Task)
|
||||||
b.WriteString("\n\n")
|
builder.WriteString("\n\n")
|
||||||
}
|
}
|
||||||
if st.Issue > 0 {
|
if st.Issue > 0 {
|
||||||
b.WriteString(fmt.Sprintf("Closes #%d\n\n", st.Issue))
|
builder.WriteString(core.Sprintf("Closes #%d\n\n", st.Issue))
|
||||||
}
|
}
|
||||||
b.WriteString(fmt.Sprintf("**Agent:** %s\n", st.Agent))
|
builder.WriteString(core.Sprintf("**Agent:** %s\n", st.Agent))
|
||||||
b.WriteString(fmt.Sprintf("**Runs:** %d\n", st.Runs))
|
builder.WriteString(core.Sprintf("**Runs:** %d\n", st.Runs))
|
||||||
b.WriteString("\n---\n*Created by agentic dispatch*\n")
|
builder.WriteString("\n---\n*Created by agentic dispatch*\n")
|
||||||
return b.String()
|
return builder.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) forgeCreatePR(ctx context.Context, org, repo, head, base, title, body string) (string, int, error) {
|
func (s *PrepSubsystem) forgeCreatePR(ctx context.Context, org, repo, head, base, title, body string) (string, int, error) {
|
||||||
payload, err := json.Marshal(map[string]any{
|
payload := []byte(core.JSONMarshalString(map[string]any{
|
||||||
"title": title,
|
"title": title,
|
||||||
"body": body,
|
"body": body,
|
||||||
"head": head,
|
"head": head,
|
||||||
"base": base,
|
"base": base,
|
||||||
})
|
}))
|
||||||
if err != nil {
|
|
||||||
return "", 0, coreerr.E("forgeCreatePR", "failed to marshal PR payload", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls", s.forgeURL, org, repo)
|
pullsURL := core.Sprintf("%s/api/v1/repos/%s/%s/pulls", s.forgeURL, org, repo)
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
req, _ := http.NewRequestWithContext(ctx, "POST", pullsURL, bytes.NewReader(payload))
|
||||||
if err != nil {
|
|
||||||
return "", 0, coreerr.E("forgeCreatePR", "failed to build PR request", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
|
|
@ -201,35 +180,25 @@ func (s *PrepSubsystem) forgeCreatePR(ctx context.Context, org, repo, head, base
|
||||||
|
|
||||||
if resp.StatusCode != 201 {
|
if resp.StatusCode != 201 {
|
||||||
var errBody map[string]any
|
var errBody map[string]any
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&errBody); err != nil {
|
core.JSONUnmarshalString(readBody(resp.Body), &errBody)
|
||||||
return "", 0, coreerr.E("forgeCreatePR", fmt.Sprintf("HTTP %d with unreadable error body", resp.StatusCode), err)
|
|
||||||
}
|
|
||||||
msg, _ := errBody["message"].(string)
|
msg, _ := errBody["message"].(string)
|
||||||
return "", 0, coreerr.E("forgeCreatePR", fmt.Sprintf("HTTP %d: %s", resp.StatusCode, msg), nil)
|
return "", 0, coreerr.E("forgeCreatePR", core.Sprintf("HTTP %d: %s", resp.StatusCode, msg), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
var pr struct {
|
var pr struct {
|
||||||
Number int `json:"number"`
|
Number int `json:"number"`
|
||||||
HTMLURL string `json:"html_url"`
|
HTMLURL string `json:"html_url"`
|
||||||
}
|
}
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&pr); err != nil {
|
core.JSONUnmarshalString(readBody(resp.Body), &pr)
|
||||||
return "", 0, coreerr.E("forgeCreatePR", "failed to decode PR response", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return pr.HTMLURL, pr.Number, nil
|
return pr.HTMLURL, pr.Number, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) commentOnIssue(ctx context.Context, org, repo string, issue int, comment string) {
|
func (s *PrepSubsystem) commentOnIssue(ctx context.Context, org, repo string, issue int, comment string) {
|
||||||
payload, err := json.Marshal(map[string]string{"body": comment})
|
payload := []byte(core.JSONMarshalString(map[string]string{"body": comment}))
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/comments", s.forgeURL, org, repo, issue)
|
commentURL := core.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/comments", s.forgeURL, org, repo, issue)
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
|
req, _ := http.NewRequestWithContext(ctx, "POST", commentURL, bytes.NewReader(payload))
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
|
|
@ -243,18 +212,14 @@ func (s *PrepSubsystem) commentOnIssue(ctx context.Context, org, repo string, is
|
||||||
// --- agentic_list_prs ---
|
// --- agentic_list_prs ---
|
||||||
|
|
||||||
// ListPRsInput is the input for agentic_list_prs.
|
// ListPRsInput is the input for agentic_list_prs.
|
||||||
//
|
|
||||||
// input := ListPRsInput{Org: "core", Repo: "go-io", State: "open", Limit: 20}
|
|
||||||
type ListPRsInput struct {
|
type ListPRsInput struct {
|
||||||
Org string `json:"org,omitempty"` // forge org (default "core")
|
Org string `json:"org,omitempty"` // forge org (default "core")
|
||||||
Repo string `json:"repo,omitempty"` // specific repo, or empty for all
|
Repo string `json:"repo,omitempty"` // specific repo, or empty for all
|
||||||
State string `json:"state,omitempty"` // "open" (default), "closed", "all"
|
State string `json:"state,omitempty"` // "open" (default), "closed", "all"
|
||||||
Limit int `json:"limit,omitempty"` // max results (default 20)
|
Limit int `json:"limit,omitempty"` // max results (default 20)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPRsOutput is the output for agentic_list_prs.
|
// ListPRsOutput is the output for agentic_list_prs.
|
||||||
//
|
|
||||||
// // out.Success == true, len(out.PRs) <= 20
|
|
||||||
type ListPRsOutput struct {
|
type ListPRsOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
|
|
@ -262,8 +227,6 @@ type ListPRsOutput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PRInfo represents a pull request.
|
// PRInfo represents a pull request.
|
||||||
//
|
|
||||||
// // pr.Number == 42, pr.Branch == "agent/issue-42-fix"
|
|
||||||
type PRInfo struct {
|
type PRInfo struct {
|
||||||
Repo string `json:"repo"`
|
Repo string `json:"repo"`
|
||||||
Number int `json:"number"`
|
Number int `json:"number"`
|
||||||
|
|
@ -277,9 +240,8 @@ type PRInfo struct {
|
||||||
URL string `json:"url"`
|
URL string `json:"url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerListPRsTool(svc *coremcp.Service) {
|
func (s *PrepSubsystem) registerListPRsTool(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_list_prs",
|
Name: "agentic_list_prs",
|
||||||
Description: "List pull requests across Forge repos. Filter by org, repo, and state (open/closed/all).",
|
Description: "List pull requests across Forge repos. Filter by org, repo, and state (open/closed/all).",
|
||||||
}, s.listPRs)
|
}, s.listPRs)
|
||||||
|
|
@ -337,18 +299,18 @@ func (s *PrepSubsystem) listPRs(ctx context.Context, _ *mcp.CallToolRequest, inp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) listRepoPRs(ctx context.Context, org, repo, state string) ([]PRInfo, error) {
|
func (s *PrepSubsystem) listRepoPRs(ctx context.Context, org, repo, state string) ([]PRInfo, error) {
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls?state=%s&limit=10",
|
repoPullsURL := core.Sprintf("%s/api/v1/repos/%s/%s/pulls?state=%s&limit=10",
|
||||||
s.forgeURL, org, repo, state)
|
s.forgeURL, org, repo, state)
|
||||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, _ := http.NewRequestWithContext(ctx, "GET", repoPullsURL, nil)
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
resp, err := s.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, coreerr.E("listRepoPRs", "failed to list PRs for "+repo, err)
|
return nil, coreerr.E("listRepoPRs", "failed to list PRs for "+repo, err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
return nil, coreerr.E("listRepoPRs", fmt.Sprintf("HTTP %d for "+repo, resp.StatusCode), nil)
|
resp.Body.Close()
|
||||||
|
return nil, coreerr.E("listRepoPRs", core.Sprintf("HTTP %d for "+repo, resp.StatusCode), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
var prs []struct {
|
var prs []struct {
|
||||||
|
|
@ -370,7 +332,7 @@ func (s *PrepSubsystem) listRepoPRs(ctx context.Context, org, repo, state string
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
} `json:"labels"`
|
} `json:"labels"`
|
||||||
}
|
}
|
||||||
json.NewDecoder(resp.Body).Decode(&prs)
|
core.JSONUnmarshalString(readBody(resp.Body), &prs)
|
||||||
|
|
||||||
var result []PRInfo
|
var result []PRInfo
|
||||||
for _, pr := range prs {
|
for _, pr := range prs {
|
||||||
|
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestForgeCreatePR_Bad_InvalidJSONResponse(t *testing.T) {
|
|
||||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusCreated)
|
|
||||||
_, _ = w.Write([]byte("{not-json"))
|
|
||||||
}))
|
|
||||||
defer srv.Close()
|
|
||||||
|
|
||||||
s := &PrepSubsystem{
|
|
||||||
forgeURL: srv.URL,
|
|
||||||
client: srv.Client(),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, err := s.forgeCreatePR(context.Background(), "core", "demo", "agent/test", "main", "Fix bug", "body")
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected malformed PR response to fail")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -7,17 +7,12 @@ package agentic
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
goio "io"
|
goio "io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
|
|
@ -33,30 +28,24 @@ type PrepSubsystem struct {
|
||||||
specsPath string
|
specsPath string
|
||||||
codePath string
|
codePath string
|
||||||
client *http.Client
|
client *http.Client
|
||||||
notifier coremcp.Notifier
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
_ coremcp.Subsystem = (*PrepSubsystem)(nil)
|
|
||||||
_ coremcp.SubsystemWithShutdown = (*PrepSubsystem)(nil)
|
|
||||||
_ coremcp.SubsystemWithNotifier = (*PrepSubsystem)(nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewPrep creates an agentic subsystem.
|
// NewPrep creates an agentic subsystem.
|
||||||
//
|
//
|
||||||
// prep := NewPrep()
|
// sub := agentic.NewPrep()
|
||||||
|
// svc, _ := mcp.New(mcp.Options{Subsystems: []mcp.Subsystem{sub}})
|
||||||
func NewPrep() *PrepSubsystem {
|
func NewPrep() *PrepSubsystem {
|
||||||
home, _ := os.UserHomeDir()
|
home := core.Env("HOME")
|
||||||
|
|
||||||
forgeToken := os.Getenv("FORGE_TOKEN")
|
forgeToken := core.Env("FORGE_TOKEN")
|
||||||
if forgeToken == "" {
|
if forgeToken == "" {
|
||||||
forgeToken = os.Getenv("GITEA_TOKEN")
|
forgeToken = core.Env("GITEA_TOKEN")
|
||||||
}
|
}
|
||||||
|
|
||||||
brainKey := os.Getenv("CORE_BRAIN_KEY")
|
brainKey := core.Env("CORE_BRAIN_KEY")
|
||||||
if brainKey == "" {
|
if brainKey == "" {
|
||||||
if data, err := coreio.Local.Read(filepath.Join(home, ".claude", "brain.key")); err == nil {
|
if data, err := coreio.Local.Read(core.JoinPath(home, ".claude", "brain.key")); err == nil {
|
||||||
brainKey = strings.TrimSpace(data)
|
brainKey = core.Trim(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -65,43 +54,31 @@ func NewPrep() *PrepSubsystem {
|
||||||
forgeToken: forgeToken,
|
forgeToken: forgeToken,
|
||||||
brainURL: envOr("CORE_BRAIN_URL", "https://api.lthn.sh"),
|
brainURL: envOr("CORE_BRAIN_URL", "https://api.lthn.sh"),
|
||||||
brainKey: brainKey,
|
brainKey: brainKey,
|
||||||
specsPath: envOr("SPECS_PATH", filepath.Join(home, "Code", "host-uk", "specs")),
|
specsPath: envOr("SPECS_PATH", core.JoinPath(home, "Code", "host-uk", "specs")),
|
||||||
codePath: envOr("CODE_PATH", filepath.Join(home, "Code")),
|
codePath: envOr("CODE_PATH", core.JoinPath(home, "Code")),
|
||||||
client: &http.Client{Timeout: 30 * time.Second},
|
client: &http.Client{Timeout: 30 * time.Second},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNotifier wires the shared MCP notifier into the agentic subsystem.
|
|
||||||
func (s *PrepSubsystem) SetNotifier(n coremcp.Notifier) {
|
|
||||||
s.notifier = n
|
|
||||||
}
|
|
||||||
|
|
||||||
// emitChannel pushes an agentic event through the shared notifier.
|
|
||||||
func (s *PrepSubsystem) emitChannel(ctx context.Context, channel string, data any) {
|
|
||||||
if s.notifier != nil {
|
|
||||||
s.notifier.ChannelSend(ctx, channel, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func envOr(key, fallback string) string {
|
func envOr(key, fallback string) string {
|
||||||
if v := os.Getenv(key); v != "" {
|
if v := core.Env(key); v != "" {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
return fallback
|
return fallback
|
||||||
}
|
}
|
||||||
|
|
||||||
func sanitizeRepoPathSegment(value, field string, allowSubdirs bool) (string, error) {
|
func sanitizeRepoPathSegment(value, field string, allowSubdirs bool) (string, error) {
|
||||||
if strings.TrimSpace(value) != value {
|
if core.Trim(value) != value {
|
||||||
return "", coreerr.E("prepWorkspace", field+" contains whitespace", nil)
|
return "", coreerr.E("prepWorkspace", field+" contains whitespace", nil)
|
||||||
}
|
}
|
||||||
if value == "" {
|
if value == "" {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
if strings.Contains(value, "\\") {
|
if core.Contains(value, "\\") {
|
||||||
return "", coreerr.E("prepWorkspace", field+" contains invalid path separator", nil)
|
return "", coreerr.E("prepWorkspace", field+" contains invalid path separator", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := strings.Split(value, "/")
|
parts := core.Split(value, "/")
|
||||||
if !allowSubdirs && len(parts) != 1 {
|
if !allowSubdirs && len(parts) != 1 {
|
||||||
return "", coreerr.E("prepWorkspace", field+" may not contain subdirectories", nil)
|
return "", coreerr.E("prepWorkspace", field+" may not contain subdirectories", nil)
|
||||||
}
|
}
|
||||||
|
|
@ -130,30 +107,25 @@ func sanitizeRepoPathSegment(value, field string, allowSubdirs bool) (string, er
|
||||||
func (s *PrepSubsystem) Name() string { return "agentic" }
|
func (s *PrepSubsystem) Name() string { return "agentic" }
|
||||||
|
|
||||||
// RegisterTools implements mcp.Subsystem.
|
// RegisterTools implements mcp.Subsystem.
|
||||||
func (s *PrepSubsystem) RegisterTools(svc *coremcp.Service) {
|
func (s *PrepSubsystem) RegisterTools(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_prep_workspace",
|
Name: "agentic_prep_workspace",
|
||||||
Description: "Prepare a sandboxed agent workspace with TODO.md, CLAUDE.md, CONTEXT.md, CONSUMERS.md, RECENT.md, and a git clone of the target repo in src/.",
|
Description: "Prepare a sandboxed agent workspace with TODO.md, CLAUDE.md, CONTEXT.md, CONSUMERS.md, RECENT.md, and a git clone of the target repo in src/.",
|
||||||
}, s.prepWorkspace)
|
}, s.prepWorkspace)
|
||||||
|
|
||||||
s.registerDispatchTool(svc)
|
s.registerDispatchTool(server)
|
||||||
s.registerIssueTools(svc)
|
s.registerStatusTool(server)
|
||||||
s.registerStatusTool(svc)
|
s.registerResumeTool(server)
|
||||||
s.registerResumeTool(svc)
|
s.registerCreatePRTool(server)
|
||||||
s.registerCreatePRTool(svc)
|
s.registerListPRsTool(server)
|
||||||
s.registerListPRsTool(svc)
|
s.registerEpicTool(server)
|
||||||
s.registerEpicTool(svc)
|
|
||||||
s.registerWatchTool(svc)
|
|
||||||
s.registerReviewQueueTool(svc)
|
|
||||||
s.registerMirrorTool(svc)
|
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "agentic_scan",
|
Name: "agentic_scan",
|
||||||
Description: "Scan Forge repos for open issues with actionable labels (agentic, help-wanted, bug).",
|
Description: "Scan Forge repos for open issues with actionable labels (agentic, help-wanted, bug).",
|
||||||
}, s.scan)
|
}, s.scan)
|
||||||
|
|
||||||
s.registerPlanTools(svc)
|
s.registerPlanTools(server)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||||
|
|
@ -161,7 +133,7 @@ func (s *PrepSubsystem) Shutdown(_ context.Context) error { return nil }
|
||||||
|
|
||||||
// workspaceRoot returns the base directory for agent workspaces.
|
// workspaceRoot returns the base directory for agent workspaces.
|
||||||
func (s *PrepSubsystem) workspaceRoot() string {
|
func (s *PrepSubsystem) workspaceRoot() string {
|
||||||
return filepath.Join(s.codePath, ".core", "workspace")
|
return core.JoinPath(s.codePath, ".core", "workspace")
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Input/Output types ---
|
// --- Input/Output types ---
|
||||||
|
|
@ -172,7 +144,6 @@ type PrepInput struct {
|
||||||
Org string `json:"org,omitempty"` // default "core"
|
Org string `json:"org,omitempty"` // default "core"
|
||||||
Issue int `json:"issue,omitempty"` // Forge issue number
|
Issue int `json:"issue,omitempty"` // Forge issue number
|
||||||
Task string `json:"task,omitempty"` // Task description (if no issue)
|
Task string `json:"task,omitempty"` // Task description (if no issue)
|
||||||
Branch string `json:"branch,omitempty"` // Override branch name
|
|
||||||
Template string `json:"template,omitempty"` // Prompt template: conventions, security, coding (default: coding)
|
Template string `json:"template,omitempty"` // Prompt template: conventions, security, coding (default: coding)
|
||||||
PlanTemplate string `json:"plan_template,omitempty"` // Plan template slug: bug-fix, code-review, new-feature, refactor, feature-port
|
PlanTemplate string `json:"plan_template,omitempty"` // Plan template slug: bug-fix, code-review, new-feature, refactor, feature-port
|
||||||
Variables map[string]string `json:"variables,omitempty"` // Template variable substitution
|
Variables map[string]string `json:"variables,omitempty"` // Template variable substitution
|
||||||
|
|
@ -183,7 +154,6 @@ type PrepInput struct {
|
||||||
type PrepOutput struct {
|
type PrepOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
WorkspaceDir string `json:"workspace_dir"`
|
WorkspaceDir string `json:"workspace_dir"`
|
||||||
Branch string `json:"branch,omitempty"`
|
|
||||||
WikiPages int `json:"wiki_pages"`
|
WikiPages int `json:"wiki_pages"`
|
||||||
SpecFiles int `json:"spec_files"`
|
SpecFiles int `json:"spec_files"`
|
||||||
Memories int `json:"memories"`
|
Memories int `json:"memories"`
|
||||||
|
|
@ -226,9 +196,8 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
||||||
|
|
||||||
// Workspace root: .core/workspace/{repo}-{timestamp}/
|
// Workspace root: .core/workspace/{repo}-{timestamp}/
|
||||||
wsRoot := s.workspaceRoot()
|
wsRoot := s.workspaceRoot()
|
||||||
coreio.Local.EnsureDir(wsRoot)
|
wsName := core.Sprintf("%s-%d", input.Repo, time.Now().Unix())
|
||||||
wsName := fmt.Sprintf("%s-%d", input.Repo, time.Now().Unix())
|
wsDir := core.JoinPath(wsRoot, wsName)
|
||||||
wsDir := filepath.Join(wsRoot, wsName)
|
|
||||||
|
|
||||||
// Create workspace structure
|
// Create workspace structure
|
||||||
// kb/ and specs/ will be created inside src/ after clone
|
// kb/ and specs/ will be created inside src/ after clone
|
||||||
|
|
@ -236,62 +205,72 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
||||||
out := PrepOutput{WorkspaceDir: wsDir}
|
out := PrepOutput{WorkspaceDir: wsDir}
|
||||||
|
|
||||||
// Source repo path
|
// Source repo path
|
||||||
repoPath := filepath.Join(s.codePath, "core", input.Repo)
|
repoPath := core.JoinPath(s.codePath, "core", input.Repo)
|
||||||
|
|
||||||
// 1. Clone repo into src/ and create feature branch
|
// 1. Clone repo into src/ and create feature branch
|
||||||
srcDir := filepath.Join(wsDir, "src")
|
srcDir := core.JoinPath(wsDir, "src")
|
||||||
cloneCmd := exec.CommandContext(ctx, "git", "clone", repoPath, srcDir)
|
cloneCmd := exec.CommandContext(ctx, "git", "clone", repoPath, srcDir)
|
||||||
if err := cloneCmd.Run(); err != nil {
|
if err := cloneCmd.Run(); err != nil {
|
||||||
return nil, PrepOutput{}, coreerr.E("prepWorkspace", "failed to clone repository", err)
|
return nil, PrepOutput{}, coreerr.E("prepWorkspace", "failed to clone repository", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create feature branch.
|
// Create feature branch
|
||||||
branchName := input.Branch
|
taskBuilder := core.NewBuilder()
|
||||||
if branchName == "" {
|
for _, r := range input.Task {
|
||||||
taskSlug := branchSlug(input.Task)
|
switch {
|
||||||
if input.Issue > 0 {
|
case r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || r == '-':
|
||||||
issueSlug := branchSlug(input.Task)
|
taskBuilder.WriteRune(r)
|
||||||
branchName = fmt.Sprintf("agent/issue-%d", input.Issue)
|
case r >= 'A' && r <= 'Z':
|
||||||
if issueSlug != "" {
|
taskBuilder.WriteRune(r + 32)
|
||||||
branchName += "-" + issueSlug
|
default:
|
||||||
}
|
taskBuilder.WriteRune('-')
|
||||||
} else if taskSlug != "" {
|
|
||||||
branchName = fmt.Sprintf("agent/%s", taskSlug)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if branchName != "" {
|
taskSlug := taskBuilder.String()
|
||||||
|
if len(taskSlug) > 40 {
|
||||||
|
taskSlug = taskSlug[:40]
|
||||||
|
}
|
||||||
|
taskSlug = core.Trim(taskSlug)
|
||||||
|
// Trim leading/trailing dashes
|
||||||
|
for len(taskSlug) > 0 && taskSlug[0] == '-' {
|
||||||
|
taskSlug = taskSlug[1:]
|
||||||
|
}
|
||||||
|
for len(taskSlug) > 0 && taskSlug[len(taskSlug)-1] == '-' {
|
||||||
|
taskSlug = taskSlug[:len(taskSlug)-1]
|
||||||
|
}
|
||||||
|
if taskSlug != "" {
|
||||||
|
branchName := core.Sprintf("agent/%s", taskSlug)
|
||||||
branchCmd := exec.CommandContext(ctx, "git", "checkout", "-b", branchName)
|
branchCmd := exec.CommandContext(ctx, "git", "checkout", "-b", branchName)
|
||||||
branchCmd.Dir = srcDir
|
branchCmd.Dir = srcDir
|
||||||
if err := branchCmd.Run(); err != nil {
|
if err := branchCmd.Run(); err != nil {
|
||||||
return nil, PrepOutput{}, coreerr.E("prepWorkspace", "failed to create branch", err)
|
return nil, PrepOutput{}, coreerr.E("prepWorkspace", "failed to create branch", err)
|
||||||
}
|
}
|
||||||
out.Branch = branchName
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create context dirs inside src/
|
// Create context dirs inside src/
|
||||||
coreio.Local.EnsureDir(filepath.Join(srcDir, "kb"))
|
coreio.Local.EnsureDir(core.JoinPath(srcDir, "kb"))
|
||||||
coreio.Local.EnsureDir(filepath.Join(srcDir, "specs"))
|
coreio.Local.EnsureDir(core.JoinPath(srcDir, "specs"))
|
||||||
|
|
||||||
// Remote stays as local clone origin — agent cannot push to forge.
|
// Remote stays as local clone origin — agent cannot push to forge.
|
||||||
// Reviewer pulls changes from workspace and pushes after verification.
|
// Reviewer pulls changes from workspace and pushes after verification.
|
||||||
|
|
||||||
// 2. Copy CLAUDE.md and GEMINI.md to workspace
|
// 2. Copy CLAUDE.md and GEMINI.md to workspace
|
||||||
claudeMdPath := filepath.Join(repoPath, "CLAUDE.md")
|
claudeMdPath := core.JoinPath(repoPath, "CLAUDE.md")
|
||||||
if data, err := coreio.Local.Read(claudeMdPath); err == nil {
|
if data, err := coreio.Local.Read(claudeMdPath); err == nil {
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "CLAUDE.md"), data)
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "CLAUDE.md"), data)
|
||||||
out.ClaudeMd = true
|
out.ClaudeMd = true
|
||||||
}
|
}
|
||||||
// Copy GEMINI.md from core/agent (ethics framework for all agents)
|
// Copy GEMINI.md from core/agent (ethics framework for all agents)
|
||||||
agentGeminiMd := filepath.Join(s.codePath, "core", "agent", "GEMINI.md")
|
agentGeminiMd := core.JoinPath(s.codePath, "core", "agent", "GEMINI.md")
|
||||||
if data, err := coreio.Local.Read(agentGeminiMd); err == nil {
|
if data, err := coreio.Local.Read(agentGeminiMd); err == nil {
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "GEMINI.md"), data)
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "GEMINI.md"), data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy persona if specified
|
// Copy persona if specified
|
||||||
if persona != "" {
|
if persona != "" {
|
||||||
personaPath := filepath.Join(s.codePath, "core", "agent", "prompts", "personas", persona+".md")
|
personaPath := core.JoinPath(s.codePath, "core", "agent", "prompts", "personas", persona+".md")
|
||||||
if data, err := coreio.Local.Read(personaPath); err == nil {
|
if data, err := coreio.Local.Read(personaPath); err == nil {
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "PERSONA.md"), data)
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "PERSONA.md"), data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -299,9 +278,9 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
||||||
if input.Issue > 0 {
|
if input.Issue > 0 {
|
||||||
s.generateTodo(ctx, input.Org, input.Repo, input.Issue, wsDir)
|
s.generateTodo(ctx, input.Org, input.Repo, input.Issue, wsDir)
|
||||||
} else if input.Task != "" {
|
} else if input.Task != "" {
|
||||||
todo := fmt.Sprintf("# TASK: %s\n\n**Repo:** %s/%s\n**Status:** ready\n\n## Objective\n\n%s\n",
|
todo := core.Sprintf("# TASK: %s\n\n**Repo:** %s/%s\n**Status:** ready\n\n## Objective\n\n%s\n",
|
||||||
input.Task, input.Org, input.Repo, input.Task)
|
input.Task, input.Org, input.Repo, input.Task)
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "TODO.md"), todo)
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "TODO.md"), todo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. Generate CONTEXT.md from OpenBrain
|
// 4. Generate CONTEXT.md from OpenBrain
|
||||||
|
|
@ -331,42 +310,6 @@ func (s *PrepSubsystem) prepWorkspace(ctx context.Context, _ *mcp.CallToolReques
|
||||||
return nil, out, nil
|
return nil, out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// branchSlug converts a free-form string into a git-friendly branch suffix.
|
|
||||||
func branchSlug(value string) string {
|
|
||||||
value = strings.ToLower(strings.TrimSpace(value))
|
|
||||||
if value == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var b strings.Builder
|
|
||||||
b.Grow(len(value))
|
|
||||||
lastDash := false
|
|
||||||
for _, r := range value {
|
|
||||||
switch {
|
|
||||||
case r >= 'a' && r <= 'z', r >= '0' && r <= '9':
|
|
||||||
b.WriteRune(r)
|
|
||||||
lastDash = false
|
|
||||||
case r == '-' || r == '_' || r == '.' || r == ' ':
|
|
||||||
if !lastDash {
|
|
||||||
b.WriteByte('-')
|
|
||||||
lastDash = true
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if !lastDash {
|
|
||||||
b.WriteByte('-')
|
|
||||||
lastDash = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
slug := strings.Trim(b.String(), "-")
|
|
||||||
if len(slug) > 40 {
|
|
||||||
slug = slug[:40]
|
|
||||||
slug = strings.Trim(slug, "-")
|
|
||||||
}
|
|
||||||
return slug
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Prompt templates ---
|
// --- Prompt templates ---
|
||||||
|
|
||||||
func (s *PrepSubsystem) writePromptTemplate(template, wsDir string) {
|
func (s *PrepSubsystem) writePromptTemplate(template, wsDir string) {
|
||||||
|
|
@ -434,7 +377,7 @@ Do NOT push. Commit only — a reviewer will verify and push.
|
||||||
prompt = "Read TODO.md and complete the task. Work in src/.\n"
|
prompt = "Read TODO.md and complete the task. Work in src/.\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "PROMPT.md"), prompt)
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "PROMPT.md"), prompt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Plan template rendering ---
|
// --- Plan template rendering ---
|
||||||
|
|
@ -443,11 +386,11 @@ Do NOT push. Commit only — a reviewer will verify and push.
|
||||||
// and writes PLAN.md into the workspace src/ directory.
|
// and writes PLAN.md into the workspace src/ directory.
|
||||||
func (s *PrepSubsystem) writePlanFromTemplate(templateSlug string, variables map[string]string, task string, wsDir string) {
|
func (s *PrepSubsystem) writePlanFromTemplate(templateSlug string, variables map[string]string, task string, wsDir string) {
|
||||||
// Look for template in core/agent/prompts/templates/
|
// Look for template in core/agent/prompts/templates/
|
||||||
templatePath := filepath.Join(s.codePath, "core", "agent", "prompts", "templates", templateSlug+".yaml")
|
templatePath := core.JoinPath(s.codePath, "core", "agent", "prompts", "templates", templateSlug+".yaml")
|
||||||
content, err := coreio.Local.Read(templatePath)
|
content, err := coreio.Local.Read(templatePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Try .yml extension
|
// Try .yml extension
|
||||||
templatePath = filepath.Join(s.codePath, "core", "agent", "prompts", "templates", templateSlug+".yml")
|
templatePath = core.JoinPath(s.codePath, "core", "agent", "prompts", "templates", templateSlug+".yml")
|
||||||
content, err = coreio.Local.Read(templatePath)
|
content, err = coreio.Local.Read(templatePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return // Template not found, skip silently
|
return // Template not found, skip silently
|
||||||
|
|
@ -456,8 +399,8 @@ func (s *PrepSubsystem) writePlanFromTemplate(templateSlug string, variables map
|
||||||
|
|
||||||
// Substitute variables ({{variable_name}} → value)
|
// Substitute variables ({{variable_name}} → value)
|
||||||
for key, value := range variables {
|
for key, value := range variables {
|
||||||
content = strings.ReplaceAll(content, "{{"+key+"}}", value)
|
content = core.Replace(content, "{{"+key+"}}", value)
|
||||||
content = strings.ReplaceAll(content, "{{ "+key+" }}", value)
|
content = core.Replace(content, "{{ "+key+" }}", value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the YAML to render as markdown
|
// Parse the YAML to render as markdown
|
||||||
|
|
@ -477,42 +420,42 @@ func (s *PrepSubsystem) writePlanFromTemplate(templateSlug string, variables map
|
||||||
}
|
}
|
||||||
|
|
||||||
// Render as PLAN.md
|
// Render as PLAN.md
|
||||||
var plan strings.Builder
|
planBuilder := core.NewBuilder()
|
||||||
plan.WriteString("# Plan: " + tmpl.Name + "\n\n")
|
planBuilder.WriteString("# Plan: " + tmpl.Name + "\n\n")
|
||||||
if task != "" {
|
if task != "" {
|
||||||
plan.WriteString("**Task:** " + task + "\n\n")
|
planBuilder.WriteString("**Task:** " + task + "\n\n")
|
||||||
}
|
}
|
||||||
if tmpl.Description != "" {
|
if tmpl.Description != "" {
|
||||||
plan.WriteString(tmpl.Description + "\n\n")
|
planBuilder.WriteString(tmpl.Description + "\n\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(tmpl.Guidelines) > 0 {
|
if len(tmpl.Guidelines) > 0 {
|
||||||
plan.WriteString("## Guidelines\n\n")
|
planBuilder.WriteString("## Guidelines\n\n")
|
||||||
for _, g := range tmpl.Guidelines {
|
for _, guideline := range tmpl.Guidelines {
|
||||||
plan.WriteString("- " + g + "\n")
|
planBuilder.WriteString("- " + guideline + "\n")
|
||||||
}
|
}
|
||||||
plan.WriteString("\n")
|
planBuilder.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, phase := range tmpl.Phases {
|
for phaseIndex, phase := range tmpl.Phases {
|
||||||
plan.WriteString(fmt.Sprintf("## Phase %d: %s\n\n", i+1, phase.Name))
|
planBuilder.WriteString(core.Sprintf("## Phase %d: %s\n\n", phaseIndex+1, phase.Name))
|
||||||
if phase.Description != "" {
|
if phase.Description != "" {
|
||||||
plan.WriteString(phase.Description + "\n\n")
|
planBuilder.WriteString(phase.Description + "\n\n")
|
||||||
}
|
}
|
||||||
for _, task := range phase.Tasks {
|
for _, phaseTask := range phase.Tasks {
|
||||||
switch t := task.(type) {
|
switch taskValue := phaseTask.(type) {
|
||||||
case string:
|
case string:
|
||||||
plan.WriteString("- [ ] " + t + "\n")
|
planBuilder.WriteString("- [ ] " + taskValue + "\n")
|
||||||
case map[string]any:
|
case map[string]any:
|
||||||
if name, ok := t["name"].(string); ok {
|
if name, ok := taskValue["name"].(string); ok {
|
||||||
plan.WriteString("- [ ] " + name + "\n")
|
planBuilder.WriteString("- [ ] " + name + "\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
plan.WriteString("\n**Commit after completing this phase.**\n\n---\n\n")
|
planBuilder.WriteString("\n**Commit after completing this phase.**\n\n---\n\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "PLAN.md"), plan.String())
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "PLAN.md"), planBuilder.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Helpers (unchanged) ---
|
// --- Helpers (unchanged) ---
|
||||||
|
|
@ -522,11 +465,8 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/wiki/pages", s.forgeURL, org, repo)
|
wikiURL := core.Sprintf("%s/api/v1/repos/%s/%s/wiki/pages", s.forgeURL, org, repo)
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, _ := http.NewRequestWithContext(ctx, "GET", wikiURL, nil)
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
resp, err := s.client.Do(req)
|
||||||
|
|
@ -542,9 +482,7 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
SubURL string `json:"sub_url"`
|
SubURL string `json:"sub_url"`
|
||||||
}
|
}
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&pages); err != nil {
|
core.JSONUnmarshalString(readBody(resp.Body), &pages)
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
count := 0
|
count := 0
|
||||||
for _, page := range pages {
|
for _, page := range pages {
|
||||||
|
|
@ -553,11 +491,8 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
||||||
subURL = page.Title
|
subURL = page.Title
|
||||||
}
|
}
|
||||||
|
|
||||||
pageURL := fmt.Sprintf("%s/api/v1/repos/%s/%s/wiki/page/%s", s.forgeURL, org, repo, subURL)
|
pageURL := core.Sprintf("%s/api/v1/repos/%s/%s/wiki/page/%s", s.forgeURL, org, repo, subURL)
|
||||||
pageReq, err := http.NewRequestWithContext(ctx, "GET", pageURL, nil)
|
pageReq, _ := http.NewRequestWithContext(ctx, "GET", pageURL, nil)
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pageReq.Header.Set("Authorization", "token "+s.forgeToken)
|
pageReq.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
pageResp, err := s.client.Do(pageReq)
|
pageResp, err := s.client.Do(pageReq)
|
||||||
|
|
@ -572,27 +507,25 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
||||||
var pageData struct {
|
var pageData struct {
|
||||||
ContentBase64 string `json:"content_base64"`
|
ContentBase64 string `json:"content_base64"`
|
||||||
}
|
}
|
||||||
if err := json.NewDecoder(pageResp.Body).Decode(&pageData); err != nil {
|
core.JSONUnmarshalString(readBody(pageResp.Body), &pageData)
|
||||||
continue
|
|
||||||
}
|
|
||||||
pageResp.Body.Close()
|
pageResp.Body.Close()
|
||||||
|
|
||||||
if pageData.ContentBase64 == "" {
|
if pageData.ContentBase64 == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
content, err := base64.StdEncoding.DecodeString(pageData.ContentBase64)
|
content, _ := base64.StdEncoding.DecodeString(pageData.ContentBase64)
|
||||||
if err != nil {
|
fileBuilder := core.NewBuilder()
|
||||||
continue
|
for _, r := range page.Title {
|
||||||
}
|
|
||||||
filename := strings.Map(func(r rune) rune {
|
|
||||||
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' {
|
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' {
|
||||||
return r
|
fileBuilder.WriteRune(r)
|
||||||
|
} else {
|
||||||
|
fileBuilder.WriteRune('-')
|
||||||
}
|
}
|
||||||
return '-'
|
}
|
||||||
}, page.Title) + ".md"
|
filename := fileBuilder.String() + ".md"
|
||||||
|
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "kb", filename), string(content))
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "kb", filename), string(content))
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -601,17 +534,17 @@ func (s *PrepSubsystem) pullWiki(ctx context.Context, org, repo, wsDir string) i
|
||||||
|
|
||||||
func (s *PrepSubsystem) copySpecs(wsDir string) int {
|
func (s *PrepSubsystem) copySpecs(wsDir string) int {
|
||||||
specFiles := []string{"AGENT_CONTEXT.md", "TASK_PROTOCOL.md"}
|
specFiles := []string{"AGENT_CONTEXT.md", "TASK_PROTOCOL.md"}
|
||||||
count := 0
|
specCount := 0
|
||||||
|
|
||||||
for _, file := range specFiles {
|
for _, file := range specFiles {
|
||||||
src := filepath.Join(s.specsPath, file)
|
sourcePath := core.JoinPath(s.specsPath, file)
|
||||||
if data, err := coreio.Local.Read(src); err == nil {
|
if data, err := coreio.Local.Read(sourcePath); err == nil {
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "specs", file), data)
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "specs", file), data)
|
||||||
count++
|
specCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return count
|
return specCount
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) generateContext(ctx context.Context, repo, wsDir string) int {
|
func (s *PrepSubsystem) generateContext(ctx context.Context, repo, wsDir string) int {
|
||||||
|
|
@ -619,20 +552,14 @@ func (s *PrepSubsystem) generateContext(ctx context.Context, repo, wsDir string)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := json.Marshal(map[string]any{
|
body := core.JSONMarshalString(map[string]any{
|
||||||
"query": "architecture conventions key interfaces for " + repo,
|
"query": "architecture conventions key interfaces for " + repo,
|
||||||
"top_k": 10,
|
"top_k": 10,
|
||||||
"project": repo,
|
"project": repo,
|
||||||
"agent_id": "cladius",
|
"agent_id": "cladius",
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", s.brainURL+"/v1/brain/recall", strings.NewReader(string(body)))
|
req, _ := http.NewRequestWithContext(ctx, "POST", s.brainURL+"/v1/brain/recall", core.NewReader(body))
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
req.Header.Set("Accept", "application/json")
|
req.Header.Set("Accept", "application/json")
|
||||||
req.Header.Set("Authorization", "Bearer "+s.brainKey)
|
req.Header.Set("Authorization", "Bearer "+s.brainKey)
|
||||||
|
|
@ -646,35 +573,30 @@ func (s *PrepSubsystem) generateContext(ctx context.Context, repo, wsDir string)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
respData, err := goio.ReadAll(resp.Body)
|
respData, _ := goio.ReadAll(resp.Body)
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var result struct {
|
var result struct {
|
||||||
Memories []map[string]any `json:"memories"`
|
Memories []map[string]any `json:"memories"`
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(respData, &result); err != nil {
|
core.JSONUnmarshalString(string(respData), &result)
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
var content strings.Builder
|
contextBuilder := core.NewBuilder()
|
||||||
content.WriteString("# Context — " + repo + "\n\n")
|
contextBuilder.WriteString("# Context — " + repo + "\n\n")
|
||||||
content.WriteString("> Relevant knowledge from OpenBrain.\n\n")
|
contextBuilder.WriteString("> Relevant knowledge from OpenBrain.\n\n")
|
||||||
|
|
||||||
for i, mem := range result.Memories {
|
for memIndex, mem := range result.Memories {
|
||||||
memType, _ := mem["type"].(string)
|
memType, _ := mem["type"].(string)
|
||||||
memContent, _ := mem["content"].(string)
|
memContent, _ := mem["content"].(string)
|
||||||
memProject, _ := mem["project"].(string)
|
memProject, _ := mem["project"].(string)
|
||||||
score, _ := mem["score"].(float64)
|
memScore, _ := mem["score"].(float64)
|
||||||
content.WriteString(fmt.Sprintf("### %d. %s [%s] (score: %.3f)\n\n%s\n\n", i+1, memProject, memType, score, memContent))
|
contextBuilder.WriteString(core.Sprintf("### %d. %s [%s] (score: %.3f)\n\n%s\n\n", memIndex+1, memProject, memType, memScore, memContent))
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "CONTEXT.md"), content.String())
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "CONTEXT.md"), contextBuilder.String())
|
||||||
return len(result.Memories)
|
return len(result.Memories)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) findConsumers(repo, wsDir string) int {
|
func (s *PrepSubsystem) findConsumers(repo, wsDir string) int {
|
||||||
goWorkPath := filepath.Join(s.codePath, "go.work")
|
goWorkPath := core.JoinPath(s.codePath, "go.work")
|
||||||
modulePath := "forge.lthn.ai/core/" + repo
|
modulePath := "forge.lthn.ai/core/" + repo
|
||||||
|
|
||||||
workData, err := coreio.Local.Read(goWorkPath)
|
workData, err := coreio.Local.Read(goWorkPath)
|
||||||
|
|
@ -683,47 +605,47 @@ func (s *PrepSubsystem) findConsumers(repo, wsDir string) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
var consumers []string
|
var consumers []string
|
||||||
for _, line := range strings.Split(workData, "\n") {
|
for _, line := range core.Split(workData, "\n") {
|
||||||
line = strings.TrimSpace(line)
|
line = core.Trim(line)
|
||||||
if !strings.HasPrefix(line, "./") {
|
if !core.HasPrefix(line, "./") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dir := filepath.Join(s.codePath, strings.TrimPrefix(line, "./"))
|
dir := core.JoinPath(s.codePath, core.TrimPrefix(line, "./"))
|
||||||
goMod := filepath.Join(dir, "go.mod")
|
goMod := core.JoinPath(dir, "go.mod")
|
||||||
modData, err := coreio.Local.Read(goMod)
|
modData, err := coreio.Local.Read(goMod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if strings.Contains(modData, modulePath) && !strings.HasPrefix(modData, "module "+modulePath) {
|
if core.Contains(modData, modulePath) && !core.HasPrefix(modData, "module "+modulePath) {
|
||||||
consumers = append(consumers, filepath.Base(dir))
|
consumers = append(consumers, core.PathBase(dir))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(consumers) > 0 {
|
if len(consumers) > 0 {
|
||||||
content := "# Consumers of " + repo + "\n\n"
|
consumersContent := "# Consumers of " + repo + "\n\n"
|
||||||
content += "These modules import `" + modulePath + "`:\n\n"
|
consumersContent += "These modules import `" + modulePath + "`:\n\n"
|
||||||
for _, c := range consumers {
|
for _, consumer := range consumers {
|
||||||
content += "- " + c + "\n"
|
consumersContent += "- " + consumer + "\n"
|
||||||
}
|
}
|
||||||
content += fmt.Sprintf("\n**Breaking change risk: %d consumers.**\n", len(consumers))
|
consumersContent += core.Sprintf("\n**Breaking change risk: %d consumers.**\n", len(consumers))
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "CONSUMERS.md"), content)
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "CONSUMERS.md"), consumersContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
return len(consumers)
|
return len(consumers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) gitLog(repoPath, wsDir string) int {
|
func (s *PrepSubsystem) gitLog(repoPath, wsDir string) int {
|
||||||
cmd := exec.Command("git", "log", "--oneline", "-20")
|
gitCmd := exec.Command("git", "log", "--oneline", "-20")
|
||||||
cmd.Dir = repoPath
|
gitCmd.Dir = repoPath
|
||||||
output, err := cmd.Output()
|
output, err := gitCmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
lines := core.Split(core.Trim(string(output)), "\n")
|
||||||
if len(lines) > 0 && lines[0] != "" {
|
if len(lines) > 0 && lines[0] != "" {
|
||||||
content := "# Recent Changes\n\n```\n" + string(output) + "```\n"
|
content := "# Recent Changes\n\n```\n" + string(output) + "```\n"
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "RECENT.md"), content)
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "RECENT.md"), content)
|
||||||
}
|
}
|
||||||
|
|
||||||
return len(lines)
|
return len(lines)
|
||||||
|
|
@ -734,8 +656,8 @@ func (s *PrepSubsystem) generateTodo(ctx context.Context, org, repo string, issu
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue)
|
issueURL := core.Sprintf("%s/api/v1/repos/%s/%s/issues/%d", s.forgeURL, org, repo, issue)
|
||||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, _ := http.NewRequestWithContext(ctx, "GET", issueURL, nil)
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
resp, err := s.client.Do(req)
|
||||||
|
|
@ -751,13 +673,23 @@ func (s *PrepSubsystem) generateTodo(ctx context.Context, org, repo string, issu
|
||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
Body string `json:"body"`
|
Body string `json:"body"`
|
||||||
}
|
}
|
||||||
json.NewDecoder(resp.Body).Decode(&issueData)
|
core.JSONUnmarshalString(readBody(resp.Body), &issueData)
|
||||||
|
|
||||||
content := fmt.Sprintf("# TASK: %s\n\n", issueData.Title)
|
todoContent := core.Sprintf("# TASK: %s\n\n", issueData.Title)
|
||||||
content += fmt.Sprintf("**Status:** ready\n")
|
todoContent += "**Status:** ready\n"
|
||||||
content += fmt.Sprintf("**Source:** %s/%s/%s/issues/%d\n", s.forgeURL, org, repo, issue)
|
todoContent += core.Sprintf("**Source:** %s/%s/%s/issues/%d\n", s.forgeURL, org, repo, issue)
|
||||||
content += fmt.Sprintf("**Repo:** %s/%s\n\n---\n\n", org, repo)
|
todoContent += core.Sprintf("**Repo:** %s/%s\n\n---\n\n", org, repo)
|
||||||
content += "## Objective\n\n" + issueData.Body + "\n"
|
todoContent += "## Objective\n\n" + issueData.Body + "\n"
|
||||||
|
|
||||||
_ = writeAtomic(filepath.Join(wsDir, "src", "TODO.md"), content)
|
coreio.Local.Write(core.JoinPath(wsDir, "src", "TODO.md"), todoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readBody reads an HTTP response body to a string and closes it.
|
||||||
|
//
|
||||||
|
// body := readBody(resp.Body)
|
||||||
|
// core.JSONUnmarshalString(body, &result)
|
||||||
|
func readBody(body goio.ReadCloser) string {
|
||||||
|
data, _ := goio.ReadAll(body)
|
||||||
|
body.Close()
|
||||||
|
return string(data)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -6,20 +6,8 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type recordingNotifier struct {
|
|
||||||
channel string
|
|
||||||
data any
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *recordingNotifier) ChannelSend(_ context.Context, channel string, data any) {
|
|
||||||
r.channel = channel
|
|
||||||
r.data = data
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSanitizeRepoPathSegment_Good(t *testing.T) {
|
func TestSanitizeRepoPathSegment_Good(t *testing.T) {
|
||||||
t.Run("repo", func(t *testing.T) {
|
t.Run("repo", func(t *testing.T) {
|
||||||
value, err := sanitizeRepoPathSegment("go-io", "repo", false)
|
value, err := sanitizeRepoPathSegment("go-io", "repo", false)
|
||||||
|
|
@ -107,45 +95,35 @@ func TestPrepWorkspace_Bad_BadPlanTemplateTraversal(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetNotifier_Good_EmitsChannelEvents(t *testing.T) {
|
func TestSanitizeRepoPathSegment_Ugly(t *testing.T) {
|
||||||
s := NewPrep()
|
// Empty value is allowed (returns "", nil) — callers validate presence separately
|
||||||
notifier := &recordingNotifier{}
|
value, err := sanitizeRepoPathSegment("", "repo", false)
|
||||||
s.SetNotifier(notifier)
|
if err != nil {
|
||||||
|
t.Errorf("expected nil error for empty value, got %v", err)
|
||||||
s.emitChannel(context.Background(), coremcp.ChannelAgentStatus, map[string]any{"status": "running"})
|
|
||||||
|
|
||||||
if notifier.channel != coremcp.ChannelAgentStatus {
|
|
||||||
t.Fatalf("expected %s channel, got %q", coremcp.ChannelAgentStatus, notifier.channel)
|
|
||||||
}
|
}
|
||||||
if payload, ok := notifier.data.(map[string]any); !ok || payload["status"] != "running" {
|
if value != "" {
|
||||||
t.Fatalf("expected payload to include running status, got %#v", notifier.data)
|
t.Errorf("expected empty string, got %q", value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Null bytes are rejected as invalid characters
|
||||||
|
_, err = sanitizeRepoPathSegment("repo\x00name", "repo", false)
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expected error for null byte in value, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Leading whitespace is rejected
|
||||||
|
_, err = sanitizeRepoPathSegment(" repo", "repo", false)
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expected error for leading whitespace, got nil")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmitHarvestComplete_Good_EmitsChannelEvents(t *testing.T) {
|
func TestPrepWorkspace_Ugly(t *testing.T) {
|
||||||
s := NewPrep()
|
// Empty codePath still validates inputs before hitting the filesystem
|
||||||
notifier := &recordingNotifier{}
|
s := &PrepSubsystem{codePath: ""}
|
||||||
s.SetNotifier(notifier)
|
|
||||||
|
|
||||||
s.emitHarvestComplete(context.Background(), "go-io-123", "go-io", 4, true)
|
_, _, err := s.prepWorkspace(context.Background(), nil, PrepInput{Repo: ""})
|
||||||
|
if err == nil {
|
||||||
if notifier.channel != coremcp.ChannelHarvestComplete {
|
t.Error("expected error for empty repo with empty codePath, got nil")
|
||||||
t.Fatalf("expected %s channel, got %q", coremcp.ChannelHarvestComplete, notifier.channel)
|
|
||||||
}
|
|
||||||
payload, ok := notifier.data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected payload object, got %#v", notifier.data)
|
|
||||||
}
|
|
||||||
if payload["workspace"] != "go-io-123" {
|
|
||||||
t.Fatalf("expected workspace go-io-123, got %#v", payload["workspace"])
|
|
||||||
}
|
|
||||||
if payload["repo"] != "go-io" {
|
|
||||||
t.Fatalf("expected repo go-io, got %#v", payload["repo"])
|
|
||||||
}
|
|
||||||
if payload["findings"] != 4 {
|
|
||||||
t.Fatalf("expected findings 4, got %#v", payload["findings"])
|
|
||||||
}
|
|
||||||
if payload["issue_created"] != true {
|
|
||||||
t.Fatalf("expected issue_created true, got %#v", payload["issue_created"])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,14 +3,12 @@
|
||||||
package agentic
|
package agentic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
@ -25,37 +23,40 @@ type DispatchConfig struct {
|
||||||
// RateConfig controls pacing between task dispatches.
|
// RateConfig controls pacing between task dispatches.
|
||||||
type RateConfig struct {
|
type RateConfig struct {
|
||||||
ResetUTC string `yaml:"reset_utc"` // Daily quota reset time (UTC), e.g. "06:00"
|
ResetUTC string `yaml:"reset_utc"` // Daily quota reset time (UTC), e.g. "06:00"
|
||||||
DailyLimit int `yaml:"daily_limit"` // Max requests per day (0 = unknown)
|
DailyLimit int `yaml:"daily_limit"` // Max requests per day (0 = unknown)
|
||||||
MinDelay int `yaml:"min_delay"` // Minimum seconds between task starts
|
MinDelay int `yaml:"min_delay"` // Minimum seconds between task starts
|
||||||
SustainedDelay int `yaml:"sustained_delay"` // Delay when pacing for full-day use
|
SustainedDelay int `yaml:"sustained_delay"` // Delay when pacing for full-day use
|
||||||
BurstWindow int `yaml:"burst_window"` // Hours before reset where burst kicks in
|
BurstWindow int `yaml:"burst_window"` // Hours before reset where burst kicks in
|
||||||
BurstDelay int `yaml:"burst_delay"` // Delay during burst window
|
BurstDelay int `yaml:"burst_delay"` // Delay during burst window
|
||||||
}
|
}
|
||||||
|
|
||||||
// AgentsConfig is the root of config/agents.yaml.
|
// AgentsConfig is the root of config/agents.yaml.
|
||||||
type AgentsConfig struct {
|
type AgentsConfig struct {
|
||||||
Version int `yaml:"version"`
|
Version int `yaml:"version"`
|
||||||
Dispatch DispatchConfig `yaml:"dispatch"`
|
Dispatch DispatchConfig `yaml:"dispatch"`
|
||||||
Concurrency map[string]int `yaml:"concurrency"`
|
Concurrency map[string]int `yaml:"concurrency"`
|
||||||
Rates map[string]RateConfig `yaml:"rates"`
|
Rates map[string]RateConfig `yaml:"rates"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadAgentsConfig reads config/agents.yaml from the code path.
|
// loadAgentsConfig reads config/agents.yaml from the code path.
|
||||||
|
//
|
||||||
|
// agentsConfig := s.loadAgentsConfig()
|
||||||
|
// limit := agentsConfig.Concurrency["claude"] // 1
|
||||||
func (s *PrepSubsystem) loadAgentsConfig() *AgentsConfig {
|
func (s *PrepSubsystem) loadAgentsConfig() *AgentsConfig {
|
||||||
paths := []string{
|
paths := []string{
|
||||||
filepath.Join(s.codePath, ".core", "agents.yaml"),
|
core.JoinPath(s.codePath, ".core", "agents.yaml"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, path := range paths {
|
for _, configPath := range paths {
|
||||||
data, err := coreio.Local.Read(path)
|
data, err := coreio.Local.Read(configPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var cfg AgentsConfig
|
var configuration AgentsConfig
|
||||||
if err := yaml.Unmarshal([]byte(data), &cfg); err != nil {
|
if err := yaml.Unmarshal([]byte(data), &configuration); err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return &cfg
|
return &configuration
|
||||||
}
|
}
|
||||||
|
|
||||||
return &AgentsConfig{
|
return &AgentsConfig{
|
||||||
|
|
@ -73,15 +74,15 @@ func (s *PrepSubsystem) loadAgentsConfig() *AgentsConfig {
|
||||||
// delayForAgent calculates how long to wait before spawning the next task
|
// delayForAgent calculates how long to wait before spawning the next task
|
||||||
// for a given agent type, based on rate config and time of day.
|
// for a given agent type, based on rate config and time of day.
|
||||||
func (s *PrepSubsystem) delayForAgent(agent string) time.Duration {
|
func (s *PrepSubsystem) delayForAgent(agent string) time.Duration {
|
||||||
cfg := s.loadAgentsConfig()
|
agentsConfig := s.loadAgentsConfig()
|
||||||
rate, ok := cfg.Rates[agent]
|
rate, ok := agentsConfig.Rates[agent]
|
||||||
if !ok || rate.SustainedDelay == 0 {
|
if !ok || rate.SustainedDelay == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse reset time
|
// Parse reset time (e.g. "06:00")
|
||||||
resetHour, resetMin := 6, 0
|
resetHour, resetMin := 6, 0
|
||||||
fmt.Sscanf(rate.ResetUTC, "%d:%d", &resetHour, &resetMin)
|
parseResetTime(rate.ResetUTC, &resetHour, &resetMin)
|
||||||
|
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
resetToday := time.Date(now.Year(), now.Month(), now.Day(), resetHour, resetMin, 0, 0, time.UTC)
|
resetToday := time.Date(now.Year(), now.Month(), now.Day(), resetHour, resetMin, 0, 0, time.UTC)
|
||||||
|
|
@ -103,6 +104,9 @@ func (s *PrepSubsystem) delayForAgent(agent string) time.Duration {
|
||||||
|
|
||||||
// listWorkspaceDirs returns all workspace directories, including those
|
// listWorkspaceDirs returns all workspace directories, including those
|
||||||
// nested one level deep (e.g. workspace/core/go-io-123/).
|
// nested one level deep (e.g. workspace/core/go-io-123/).
|
||||||
|
//
|
||||||
|
// dirs := s.listWorkspaceDirs()
|
||||||
|
// // dirs == ["/home/user/.core/workspace/go-io-123", ...]
|
||||||
func (s *PrepSubsystem) listWorkspaceDirs() []string {
|
func (s *PrepSubsystem) listWorkspaceDirs() []string {
|
||||||
wsRoot := s.workspaceRoot()
|
wsRoot := s.workspaceRoot()
|
||||||
entries, err := coreio.Local.List(wsRoot)
|
entries, err := coreio.Local.List(wsRoot)
|
||||||
|
|
@ -115,21 +119,21 @@ func (s *PrepSubsystem) listWorkspaceDirs() []string {
|
||||||
if !entry.IsDir() {
|
if !entry.IsDir() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
path := filepath.Join(wsRoot, entry.Name())
|
entryPath := core.JoinPath(wsRoot, entry.Name())
|
||||||
// Check if this dir has a status.json (it's a workspace)
|
// Check if this dir has a status.json (it's a workspace)
|
||||||
if coreio.Local.IsFile(filepath.Join(path, "status.json")) {
|
if coreio.Local.IsFile(core.JoinPath(entryPath, "status.json")) {
|
||||||
dirs = append(dirs, path)
|
dirs = append(dirs, entryPath)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Otherwise check one level deeper (org subdirectory)
|
// Otherwise check one level deeper (org subdirectory)
|
||||||
subEntries, err := coreio.Local.List(path)
|
subEntries, err := coreio.Local.List(entryPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, sub := range subEntries {
|
for _, sub := range subEntries {
|
||||||
if sub.IsDir() {
|
if sub.IsDir() {
|
||||||
subPath := filepath.Join(path, sub.Name())
|
subPath := core.JoinPath(entryPath, sub.Name())
|
||||||
if coreio.Local.IsFile(filepath.Join(subPath, "status.json")) {
|
if coreio.Local.IsFile(core.JoinPath(subPath, "status.json")) {
|
||||||
dirs = append(dirs, subPath)
|
dirs = append(dirs, subPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -146,7 +150,7 @@ func (s *PrepSubsystem) countRunningByAgent(agent string) int {
|
||||||
if err != nil || st.Status != "running" {
|
if err != nil || st.Status != "running" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
stBase := strings.SplitN(st.Agent, ":", 2)[0]
|
stBase := core.SplitN(st.Agent, ":", 2)[0]
|
||||||
if stBase != agent {
|
if stBase != agent {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -161,15 +165,18 @@ func (s *PrepSubsystem) countRunningByAgent(agent string) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// baseAgent strips the model variant (gemini:flash → gemini).
|
// baseAgent strips the model variant (gemini:flash → gemini).
|
||||||
|
//
|
||||||
|
// baseAgent("gemini:flash") == "gemini"
|
||||||
|
// baseAgent("claude") == "claude"
|
||||||
func baseAgent(agent string) string {
|
func baseAgent(agent string) string {
|
||||||
return strings.SplitN(agent, ":", 2)[0]
|
return core.SplitN(agent, ":", 2)[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
// canDispatchAgent checks if we're under the concurrency limit for a specific agent type.
|
// canDispatchAgent checks if we're under the concurrency limit for a specific agent type.
|
||||||
func (s *PrepSubsystem) canDispatchAgent(agent string) bool {
|
func (s *PrepSubsystem) canDispatchAgent(agent string) bool {
|
||||||
cfg := s.loadAgentsConfig()
|
agentsConfig := s.loadAgentsConfig()
|
||||||
base := baseAgent(agent)
|
base := baseAgent(agent)
|
||||||
limit, ok := cfg.Concurrency[base]
|
limit, ok := agentsConfig.Concurrency[base]
|
||||||
if !ok || limit <= 0 {
|
if !ok || limit <= 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -205,7 +212,7 @@ func (s *PrepSubsystem) drainQueue() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
srcDir := filepath.Join(wsDir, "src")
|
srcDir := core.JoinPath(wsDir, "src")
|
||||||
prompt := "Read PROMPT.md for instructions. All context files (CLAUDE.md, TODO.md, CONTEXT.md, CONSUMERS.md, RECENT.md) are in the parent directory. Work in this directory."
|
prompt := "Read PROMPT.md for instructions. All context files (CLAUDE.md, TODO.md, CONTEXT.md, CONSUMERS.md, RECENT.md) are in the parent directory. Work in this directory."
|
||||||
|
|
||||||
command, args, err := agentCommand(st.Agent, prompt)
|
command, args, err := agentCommand(st.Agent, prompt)
|
||||||
|
|
@ -213,7 +220,7 @@ func (s *PrepSubsystem) drainQueue() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
outputFile := filepath.Join(wsDir, fmt.Sprintf("agent-%s.log", st.Agent))
|
outputFile := core.JoinPath(wsDir, core.Sprintf("agent-%s.log", st.Agent))
|
||||||
outFile, err := os.Create(outputFile)
|
outFile, err := os.Create(outputFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
|
@ -243,7 +250,7 @@ func (s *PrepSubsystem) drainQueue() {
|
||||||
st.Status = "running"
|
st.Status = "running"
|
||||||
st.PID = cmd.Process.Pid
|
st.PID = cmd.Process.Pid
|
||||||
st.Runs++
|
st.Runs++
|
||||||
s.saveStatus(wsDir, st)
|
writeStatus(wsDir, st)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
cmd.Wait()
|
cmd.Wait()
|
||||||
|
|
@ -252,7 +259,7 @@ func (s *PrepSubsystem) drainQueue() {
|
||||||
if st2, err := readStatus(wsDir); err == nil {
|
if st2, err := readStatus(wsDir); err == nil {
|
||||||
st2.Status = "completed"
|
st2.Status = "completed"
|
||||||
st2.PID = 0
|
st2.PID = 0
|
||||||
s.saveStatus(wsDir, st2)
|
writeStatus(wsDir, st2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ingest scan findings as issues
|
// Ingest scan findings as issues
|
||||||
|
|
@ -264,3 +271,28 @@ func (s *PrepSubsystem) drainQueue() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseResetTime parses "HH:MM" into hour and minute integers.
|
||||||
|
// On invalid input the defaults are unchanged.
|
||||||
|
//
|
||||||
|
// parseResetTime("06:30", &h, &m) // h=6, m=30
|
||||||
|
func parseResetTime(value string, hour, minute *int) {
|
||||||
|
parts := core.SplitN(value, ":", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type hm struct {
|
||||||
|
H int `json:"h"`
|
||||||
|
M int `json:"m"`
|
||||||
|
}
|
||||||
|
var target hm
|
||||||
|
result := core.JSONUnmarshalString(
|
||||||
|
core.Sprintf(`{"h":%s,"m":%s}`, core.Trim(parts[0]), core.Trim(parts[1])),
|
||||||
|
&target,
|
||||||
|
)
|
||||||
|
if result.OK {
|
||||||
|
*hour = target.H
|
||||||
|
*minute = target.M
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,209 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
|
||||||
)
|
|
||||||
|
|
||||||
func listLocalRepos(basePath string) []string {
|
|
||||||
entries, err := os.ReadDir(basePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
repos := make([]string, 0, len(entries))
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.IsDir() {
|
|
||||||
repos = append(repos, entry.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return repos
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasRemote(repoDir, remote string) bool {
|
|
||||||
cmd := exec.Command("git", "remote", "get-url", remote)
|
|
||||||
cmd.Dir = repoDir
|
|
||||||
if out, err := cmd.Output(); err == nil {
|
|
||||||
return strings.TrimSpace(string(out)) != ""
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func commitsAhead(repoDir, baseRef, headRef string) int {
|
|
||||||
cmd := exec.Command("git", "rev-list", "--count", baseRef+".."+headRef)
|
|
||||||
cmd.Dir = repoDir
|
|
||||||
out, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
count, err := parsePositiveInt(strings.TrimSpace(string(out)))
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
func filesChanged(repoDir, baseRef, headRef string) int {
|
|
||||||
cmd := exec.Command("git", "diff", "--name-only", baseRef+".."+headRef)
|
|
||||||
cmd.Dir = repoDir
|
|
||||||
out, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
count := 0
|
|
||||||
for _, line := range strings.Split(strings.TrimSpace(string(out)), "\n") {
|
|
||||||
if strings.TrimSpace(line) != "" {
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
func gitOutput(repoDir string, args ...string) (string, error) {
|
|
||||||
cmd := exec.Command("git", args...)
|
|
||||||
cmd.Dir = repoDir
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return "", coreerr.E("gitOutput", string(out), err)
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(string(out)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parsePositiveInt(value string) (int, error) {
|
|
||||||
value = strings.TrimSpace(value)
|
|
||||||
if value == "" {
|
|
||||||
return 0, coreerr.E("parsePositiveInt", "empty value", nil)
|
|
||||||
}
|
|
||||||
n := 0
|
|
||||||
for _, r := range value {
|
|
||||||
if r < '0' || r > '9' {
|
|
||||||
return 0, coreerr.E("parsePositiveInt", "value contains non-numeric characters", nil)
|
|
||||||
}
|
|
||||||
n = n*10 + int(r-'0')
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readGitHubPRURL(repoDir string) (string, error) {
|
|
||||||
cmd := exec.Command("gh", "pr", "list", "--head", "dev", "--state", "open", "--json", "url", "--limit", "1")
|
|
||||||
cmd.Dir = repoDir
|
|
||||||
out, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
var rows []struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(out, &rows); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if len(rows) == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
return rows[0].URL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createGitHubPR(ctx context.Context, repoDir, repo string, commits, files int) (string, error) {
|
|
||||||
if _, err := exec.LookPath("gh"); err != nil {
|
|
||||||
return "", coreerr.E("createGitHubPR", "gh CLI is not available", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if url, err := readGitHubPRURL(repoDir); err == nil && url != "" {
|
|
||||||
return url, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
body := "## Forge -> GitHub Sync\n\n"
|
|
||||||
body += "**Commits:** " + itoa(commits) + "\n"
|
|
||||||
body += "**Files changed:** " + itoa(files) + "\n\n"
|
|
||||||
body += "Automated sync from Forge (forge.lthn.ai) to GitHub mirror.\n"
|
|
||||||
body += "Review with CodeRabbit before merging.\n\n"
|
|
||||||
body += "---\n"
|
|
||||||
body += "Co-Authored-By: Virgil <virgil@lethean.io>"
|
|
||||||
|
|
||||||
title := "[sync] " + repo + ": " + itoa(commits) + " commits, " + itoa(files) + " files"
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "gh", "pr", "create",
|
|
||||||
"--head", "dev",
|
|
||||||
"--base", "main",
|
|
||||||
"--title", title,
|
|
||||||
"--body", body,
|
|
||||||
)
|
|
||||||
cmd.Dir = repoDir
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return "", coreerr.E("createGitHubPR", string(out), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
|
|
||||||
if len(lines) == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(lines[len(lines)-1]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureDevBranch(repoDir string) error {
|
|
||||||
cmd := exec.Command("git", "push", "github", "HEAD:refs/heads/dev", "--force")
|
|
||||||
cmd.Dir = repoDir
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return coreerr.E("ensureDevBranch", string(out), err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func reviewerCommand(ctx context.Context, repoDir, reviewer string) *exec.Cmd {
|
|
||||||
switch reviewer {
|
|
||||||
case "coderabbit":
|
|
||||||
return exec.CommandContext(ctx, "coderabbit", "review")
|
|
||||||
case "codex":
|
|
||||||
return exec.CommandContext(ctx, "codex", "review")
|
|
||||||
case "both":
|
|
||||||
return exec.CommandContext(ctx, "coderabbit", "review")
|
|
||||||
default:
|
|
||||||
return exec.CommandContext(ctx, reviewer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func itoa(value int) string {
|
|
||||||
return strconv.Itoa(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseRetryAfter(detail string) time.Duration {
|
|
||||||
re := regexp.MustCompile(`(?i)(\d+)\s*(minute|minutes|hour|hours|second|seconds)`)
|
|
||||||
match := re.FindStringSubmatch(detail)
|
|
||||||
if len(match) != 3 {
|
|
||||||
return 5 * time.Minute
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := strconv.Atoi(match[1])
|
|
||||||
if err != nil || n <= 0 {
|
|
||||||
return 5 * time.Minute
|
|
||||||
}
|
|
||||||
|
|
||||||
switch strings.ToLower(match[2]) {
|
|
||||||
case "hour", "hours":
|
|
||||||
return time.Duration(n) * time.Hour
|
|
||||||
case "second", "seconds":
|
|
||||||
return time.Duration(n) * time.Second
|
|
||||||
default:
|
|
||||||
return time.Duration(n) * time.Minute
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func repoRootFromCodePath(codePath string) string {
|
|
||||||
return filepath.Join(codePath, "core")
|
|
||||||
}
|
|
||||||
|
|
@ -4,44 +4,36 @@ package agentic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ResumeInput is the input for agentic_resume.
|
// ResumeInput is the input for agentic_resume.
|
||||||
//
|
|
||||||
// input := ResumeInput{Workspace: "go-mcp-1700000000", Answer: "Use the shared notifier"}
|
|
||||||
type ResumeInput struct {
|
type ResumeInput struct {
|
||||||
Workspace string `json:"workspace"` // workspace name (e.g. "go-scm-1773581173")
|
Workspace string `json:"workspace"` // workspace name (e.g. "go-scm-1773581173")
|
||||||
Answer string `json:"answer,omitempty"` // answer to the blocked question (written to ANSWER.md)
|
Answer string `json:"answer,omitempty"` // answer to the blocked question (written to ANSWER.md)
|
||||||
Agent string `json:"agent,omitempty"` // override agent type (default: same as original)
|
Agent string `json:"agent,omitempty"` // override agent type (default: same as original)
|
||||||
DryRun bool `json:"dry_run,omitempty"` // preview without executing
|
DryRun bool `json:"dry_run,omitempty"` // preview without executing
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResumeOutput is the output for agentic_resume.
|
// ResumeOutput is the output for agentic_resume.
|
||||||
//
|
|
||||||
// // out.Success == true, out.PID > 0
|
|
||||||
type ResumeOutput struct {
|
type ResumeOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Workspace string `json:"workspace"`
|
Workspace string `json:"workspace"`
|
||||||
Agent string `json:"agent"`
|
Agent string `json:"agent"`
|
||||||
PID int `json:"pid,omitempty"`
|
PID int `json:"pid,omitempty"`
|
||||||
OutputFile string `json:"output_file,omitempty"`
|
OutputFile string `json:"output_file,omitempty"`
|
||||||
Prompt string `json:"prompt,omitempty"`
|
Prompt string `json:"prompt,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerResumeTool(svc *coremcp.Service) {
|
func (s *PrepSubsystem) registerResumeTool(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_resume",
|
Name: "agentic_resume",
|
||||||
Description: "Resume a blocked agent workspace. Writes ANSWER.md if an answer is provided, then relaunches the agent with instructions to read it and continue.",
|
Description: "Resume a blocked agent workspace. Writes ANSWER.md if an answer is provided, then relaunches the agent with instructions to read it and continue.",
|
||||||
}, s.resume)
|
}, s.resume)
|
||||||
|
|
@ -52,8 +44,8 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
||||||
return nil, ResumeOutput{}, coreerr.E("resume", "workspace is required", nil)
|
return nil, ResumeOutput{}, coreerr.E("resume", "workspace is required", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
wsDir := filepath.Join(s.workspaceRoot(), input.Workspace)
|
wsDir := core.JoinPath(s.workspaceRoot(), input.Workspace)
|
||||||
srcDir := filepath.Join(wsDir, "src")
|
srcDir := core.JoinPath(wsDir, "src")
|
||||||
|
|
||||||
// Verify workspace exists
|
// Verify workspace exists
|
||||||
if _, err := coreio.Local.List(srcDir); err != nil {
|
if _, err := coreio.Local.List(srcDir); err != nil {
|
||||||
|
|
@ -78,9 +70,9 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
||||||
|
|
||||||
// Write ANSWER.md if answer provided
|
// Write ANSWER.md if answer provided
|
||||||
if input.Answer != "" {
|
if input.Answer != "" {
|
||||||
answerPath := filepath.Join(srcDir, "ANSWER.md")
|
answerPath := core.JoinPath(srcDir, "ANSWER.md")
|
||||||
content := fmt.Sprintf("# Answer\n\n%s\n", input.Answer)
|
content := core.Sprintf("# Answer\n\n%s\n", input.Answer)
|
||||||
if err := writeAtomic(answerPath, content); err != nil {
|
if err := coreio.Local.Write(answerPath, content); err != nil {
|
||||||
return nil, ResumeOutput{}, coreerr.E("resume", "failed to write ANSWER.md", err)
|
return nil, ResumeOutput{}, coreerr.E("resume", "failed to write ANSWER.md", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -102,7 +94,7 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spawn agent as detached process (survives parent death)
|
// Spawn agent as detached process (survives parent death)
|
||||||
outputFile := filepath.Join(wsDir, fmt.Sprintf("agent-%s-run%d.log", agent, st.Runs+1))
|
outputFile := core.JoinPath(wsDir, core.Sprintf("agent-%s-run%d.log", agent, st.Runs+1))
|
||||||
|
|
||||||
command, args, err := agentCommand(agent, prompt)
|
command, args, err := agentCommand(agent, prompt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -138,38 +130,11 @@ func (s *PrepSubsystem) resume(ctx context.Context, _ *mcp.CallToolRequest, inpu
|
||||||
st.PID = cmd.Process.Pid
|
st.PID = cmd.Process.Pid
|
||||||
st.Runs++
|
st.Runs++
|
||||||
st.Question = ""
|
st.Question = ""
|
||||||
s.saveStatus(wsDir, st)
|
writeStatus(wsDir, st)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
cmd.Wait()
|
cmd.Wait()
|
||||||
outFile.Close()
|
outFile.Close()
|
||||||
|
|
||||||
postCtx := context.WithoutCancel(ctx)
|
|
||||||
status := "completed"
|
|
||||||
channel := coremcp.ChannelAgentComplete
|
|
||||||
payload := map[string]any{
|
|
||||||
"workspace": input.Workspace,
|
|
||||||
"agent": agent,
|
|
||||||
"repo": st.Repo,
|
|
||||||
"branch": st.Branch,
|
|
||||||
}
|
|
||||||
|
|
||||||
if data, err := coreio.Local.Read(filepath.Join(srcDir, "BLOCKED.md")); err == nil {
|
|
||||||
status = "blocked"
|
|
||||||
channel = coremcp.ChannelAgentBlocked
|
|
||||||
st.Question = strings.TrimSpace(data)
|
|
||||||
if st.Question != "" {
|
|
||||||
payload["question"] = st.Question
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
st.Status = status
|
|
||||||
st.PID = 0
|
|
||||||
s.saveStatus(wsDir, st)
|
|
||||||
|
|
||||||
payload["status"] = status
|
|
||||||
s.emitChannel(postCtx, channel, payload)
|
|
||||||
s.emitChannel(postCtx, coremcp.ChannelAgentStatus, payload)
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return nil, ResumeOutput{
|
return nil, ResumeOutput{
|
||||||
|
|
|
||||||
|
|
@ -1,273 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReviewQueueInput controls the review queue runner.
|
|
||||||
type ReviewQueueInput struct {
|
|
||||||
Limit int `json:"limit,omitempty"`
|
|
||||||
Reviewer string `json:"reviewer,omitempty"`
|
|
||||||
DryRun bool `json:"dry_run,omitempty"`
|
|
||||||
LocalOnly bool `json:"local_only,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReviewQueueOutput reports what happened.
|
|
||||||
type ReviewQueueOutput struct {
|
|
||||||
Success bool `json:"success"`
|
|
||||||
Processed []ReviewResult `json:"processed"`
|
|
||||||
Skipped []string `json:"skipped,omitempty"`
|
|
||||||
RateLimit *RateLimitInfo `json:"rate_limit,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReviewResult is the outcome of reviewing one repo.
|
|
||||||
type ReviewResult struct {
|
|
||||||
Repo string `json:"repo"`
|
|
||||||
Verdict string `json:"verdict"`
|
|
||||||
Findings int `json:"findings"`
|
|
||||||
Action string `json:"action"`
|
|
||||||
Detail string `json:"detail,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RateLimitInfo tracks review rate limit state.
|
|
||||||
type RateLimitInfo struct {
|
|
||||||
Limited bool `json:"limited"`
|
|
||||||
RetryAt time.Time `json:"retry_at,omitempty"`
|
|
||||||
Message string `json:"message,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func reviewQueueHomeDir() string {
|
|
||||||
if home := os.Getenv("DIR_HOME"); home != "" {
|
|
||||||
return home
|
|
||||||
}
|
|
||||||
home, _ := os.UserHomeDir()
|
|
||||||
return home
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerReviewQueueTool(svc *coremcp.Service) {
|
|
||||||
server := svc.Server()
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_review_queue",
|
|
||||||
Description: "Process repositories that are ahead of the GitHub mirror and summarise review findings.",
|
|
||||||
}, s.reviewQueue)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) reviewQueue(ctx context.Context, _ *mcp.CallToolRequest, input ReviewQueueInput) (*mcp.CallToolResult, ReviewQueueOutput, error) {
|
|
||||||
limit := input.Limit
|
|
||||||
if limit <= 0 {
|
|
||||||
limit = 4
|
|
||||||
}
|
|
||||||
|
|
||||||
basePath := repoRootFromCodePath(s.codePath)
|
|
||||||
candidates := s.findReviewCandidates(basePath)
|
|
||||||
if len(candidates) == 0 {
|
|
||||||
return nil, ReviewQueueOutput{Success: true, Processed: []ReviewResult{}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
processed := make([]ReviewResult, 0, len(candidates))
|
|
||||||
skipped := make([]string, 0)
|
|
||||||
var rateInfo *RateLimitInfo
|
|
||||||
|
|
||||||
for _, repo := range candidates {
|
|
||||||
if len(processed) >= limit {
|
|
||||||
skipped = append(skipped, repo+" (limit reached)")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if rateInfo != nil && rateInfo.Limited && time.Now().Before(rateInfo.RetryAt) {
|
|
||||||
skipped = append(skipped, repo+" (rate limited)")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
repoDir := filepath.Join(basePath, repo)
|
|
||||||
reviewer := input.Reviewer
|
|
||||||
if reviewer == "" {
|
|
||||||
reviewer = "coderabbit"
|
|
||||||
}
|
|
||||||
|
|
||||||
result := s.reviewRepo(ctx, repoDir, repo, reviewer, input.DryRun, input.LocalOnly)
|
|
||||||
if result.Verdict == "rate_limited" {
|
|
||||||
retryAfter := parseRetryAfter(result.Detail)
|
|
||||||
rateInfo = &RateLimitInfo{
|
|
||||||
Limited: true,
|
|
||||||
RetryAt: time.Now().Add(retryAfter),
|
|
||||||
Message: result.Detail,
|
|
||||||
}
|
|
||||||
skipped = append(skipped, repo+" (rate limited)")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
processed = append(processed, result)
|
|
||||||
}
|
|
||||||
|
|
||||||
if rateInfo != nil {
|
|
||||||
s.saveRateLimitState(rateInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, ReviewQueueOutput{
|
|
||||||
Success: true,
|
|
||||||
Processed: processed,
|
|
||||||
Skipped: skipped,
|
|
||||||
RateLimit: rateInfo,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) findReviewCandidates(basePath string) []string {
|
|
||||||
entries, err := os.ReadDir(basePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
candidates := make([]string, 0, len(entries))
|
|
||||||
for _, entry := range entries {
|
|
||||||
if !entry.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
repoDir := filepath.Join(basePath, entry.Name())
|
|
||||||
if !hasRemote(repoDir, "github") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if commitsAhead(repoDir, "github/main", "HEAD") <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
candidates = append(candidates, entry.Name())
|
|
||||||
}
|
|
||||||
return candidates
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) reviewRepo(ctx context.Context, repoDir, repo, reviewer string, dryRun, localOnly bool) ReviewResult {
|
|
||||||
result := ReviewResult{Repo: repo}
|
|
||||||
|
|
||||||
if rl := s.loadRateLimitState(); rl != nil && rl.Limited && time.Now().Before(rl.RetryAt) {
|
|
||||||
result.Verdict = "rate_limited"
|
|
||||||
result.Detail = fmt.Sprintf("retry after %s", rl.RetryAt.Format(time.RFC3339))
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := reviewerCommand(ctx, repoDir, reviewer)
|
|
||||||
cmd.Dir = repoDir
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
output := strings.TrimSpace(string(out))
|
|
||||||
|
|
||||||
if strings.Contains(strings.ToLower(output), "rate limit") {
|
|
||||||
result.Verdict = "rate_limited"
|
|
||||||
result.Detail = output
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil && !strings.Contains(output, "No findings") && !strings.Contains(output, "no issues") {
|
|
||||||
result.Verdict = "error"
|
|
||||||
if output != "" {
|
|
||||||
result.Detail = output
|
|
||||||
} else {
|
|
||||||
result.Detail = err.Error()
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
s.storeReviewOutput(repoDir, repo, reviewer, output)
|
|
||||||
result.Findings = countFindingHints(output)
|
|
||||||
|
|
||||||
if strings.Contains(output, "No findings") || strings.Contains(output, "no issues") || strings.Contains(output, "LGTM") {
|
|
||||||
result.Verdict = "clean"
|
|
||||||
if dryRun {
|
|
||||||
result.Action = "skipped (dry run)"
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
if localOnly {
|
|
||||||
result.Action = "local only"
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
if url, err := readGitHubPRURL(repoDir); err == nil && url != "" {
|
|
||||||
mergeCmd := exec.CommandContext(ctx, "gh", "pr", "merge", "--auto", "--squash", "--delete-branch")
|
|
||||||
mergeCmd.Dir = repoDir
|
|
||||||
if mergeOut, err := mergeCmd.CombinedOutput(); err == nil {
|
|
||||||
result.Action = "merged"
|
|
||||||
result.Detail = strings.TrimSpace(string(mergeOut))
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Action = "waiting"
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Verdict = "findings"
|
|
||||||
if dryRun {
|
|
||||||
result.Action = "skipped (dry run)"
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Action = "waiting"
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) storeReviewOutput(repoDir, repo, reviewer, output string) {
|
|
||||||
home := reviewQueueHomeDir()
|
|
||||||
dataDir := filepath.Join(home, ".core", "training", "reviews")
|
|
||||||
if err := coreio.Local.EnsureDir(dataDir); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
payload := map[string]string{
|
|
||||||
"repo": repo,
|
|
||||||
"reviewer": reviewer,
|
|
||||||
"output": output,
|
|
||||||
"source": repoDir,
|
|
||||||
}
|
|
||||||
data, err := json.MarshalIndent(payload, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
name := fmt.Sprintf("%s-%s-%d.json", repo, reviewer, time.Now().Unix())
|
|
||||||
_ = writeAtomic(filepath.Join(dataDir, name), string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) saveRateLimitState(info *RateLimitInfo) {
|
|
||||||
home := reviewQueueHomeDir()
|
|
||||||
path := filepath.Join(home, ".core", "coderabbit-ratelimit.json")
|
|
||||||
data, err := json.Marshal(info)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_ = writeAtomic(path, string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) loadRateLimitState() *RateLimitInfo {
|
|
||||||
home := reviewQueueHomeDir()
|
|
||||||
path := filepath.Join(home, ".core", "coderabbit-ratelimit.json")
|
|
||||||
data, err := coreio.Local.Read(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var info RateLimitInfo
|
|
||||||
if err := json.Unmarshal([]byte(data), &info); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !info.Limited {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &info
|
|
||||||
}
|
|
||||||
|
|
||||||
func countFindingHints(output string) int {
|
|
||||||
re := regexp.MustCompile(`(?m)[^ \t\n\r]+\.(?:go|php|ts|tsx|js|jsx|py|rb|java|cs|cpp|cxx|cc|md):\d+`)
|
|
||||||
return len(re.FindAllString(output, -1))
|
|
||||||
}
|
|
||||||
|
|
@ -4,11 +4,9 @@ package agentic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
|
|
||||||
|
core "dappco.re/go/core"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
@ -81,7 +79,7 @@ func (s *PrepSubsystem) scan(ctx context.Context, _ *mcp.CallToolRequest, input
|
||||||
seen := make(map[string]bool)
|
seen := make(map[string]bool)
|
||||||
var unique []ScanIssue
|
var unique []ScanIssue
|
||||||
for _, issue := range allIssues {
|
for _, issue := range allIssues {
|
||||||
key := fmt.Sprintf("%s#%d", issue.Repo, issue.Number)
|
key := core.Sprintf("%s#%d", issue.Repo, issue.Number)
|
||||||
if !seen[key] {
|
if !seen[key] {
|
||||||
seen[key] = true
|
seen[key] = true
|
||||||
unique = append(unique, issue)
|
unique = append(unique, issue)
|
||||||
|
|
@ -100,8 +98,8 @@ func (s *PrepSubsystem) scan(ctx context.Context, _ *mcp.CallToolRequest, input
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) listOrgRepos(ctx context.Context, org string) ([]string, error) {
|
func (s *PrepSubsystem) listOrgRepos(ctx context.Context, org string) ([]string, error) {
|
||||||
url := fmt.Sprintf("%s/api/v1/orgs/%s/repos?limit=50", s.forgeURL, org)
|
orgReposURL := core.Sprintf("%s/api/v1/orgs/%s/repos?limit=50", s.forgeURL, org)
|
||||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, _ := http.NewRequestWithContext(ctx, "GET", orgReposURL, nil)
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
resp, err := s.client.Do(req)
|
||||||
|
|
@ -110,13 +108,14 @@ func (s *PrepSubsystem) listOrgRepos(ctx context.Context, org string) ([]string,
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
return nil, coreerr.E("listOrgRepos", fmt.Sprintf("HTTP %d listing repos", resp.StatusCode), nil)
|
resp.Body.Close()
|
||||||
|
return nil, coreerr.E("listOrgRepos", core.Sprintf("HTTP %d listing repos", resp.StatusCode), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
var repos []struct {
|
var repos []struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
json.NewDecoder(resp.Body).Decode(&repos)
|
core.JSONUnmarshalString(readBody(resp.Body), &repos)
|
||||||
|
|
||||||
var names []string
|
var names []string
|
||||||
for _, r := range repos {
|
for _, r := range repos {
|
||||||
|
|
@ -126,9 +125,9 @@ func (s *PrepSubsystem) listOrgRepos(ctx context.Context, org string) ([]string,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label string) ([]ScanIssue, error) {
|
func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label string) ([]ScanIssue, error) {
|
||||||
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues?state=open&labels=%s&limit=10&type=issues",
|
repoIssuesURL := core.Sprintf("%s/api/v1/repos/%s/%s/issues?state=open&labels=%s&limit=10&type=issues",
|
||||||
s.forgeURL, org, repo, label)
|
s.forgeURL, org, repo, label)
|
||||||
req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, _ := http.NewRequestWithContext(ctx, "GET", repoIssuesURL, nil)
|
||||||
req.Header.Set("Authorization", "token "+s.forgeToken)
|
req.Header.Set("Authorization", "token "+s.forgeToken)
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
resp, err := s.client.Do(req)
|
||||||
|
|
@ -137,7 +136,8 @@ func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label str
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
return nil, coreerr.E("listRepoIssues", fmt.Sprintf("HTTP %d for "+repo, resp.StatusCode), nil)
|
resp.Body.Close()
|
||||||
|
return nil, coreerr.E("listRepoIssues", core.Sprintf("HTTP %d for "+repo, resp.StatusCode), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
var issues []struct {
|
var issues []struct {
|
||||||
|
|
@ -151,7 +151,7 @@ func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label str
|
||||||
} `json:"assignee"`
|
} `json:"assignee"`
|
||||||
HTMLURL string `json:"html_url"`
|
HTMLURL string `json:"html_url"`
|
||||||
}
|
}
|
||||||
json.NewDecoder(resp.Body).Decode(&issues)
|
core.JSONUnmarshalString(readBody(resp.Body), &issues)
|
||||||
|
|
||||||
var result []ScanIssue
|
var result []ScanIssue
|
||||||
for _, issue := range issues {
|
for _, issue := range issues {
|
||||||
|
|
@ -170,7 +170,7 @@ func (s *PrepSubsystem) listRepoIssues(ctx context.Context, org, repo, label str
|
||||||
Title: issue.Title,
|
Title: issue.Title,
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
Assignee: assignee,
|
Assignee: assignee,
|
||||||
URL: strings.Replace(issue.HTMLURL, "https://forge.lthn.ai", s.forgeURL, 1),
|
URL: core.Replace(issue.HTMLURL, "https://forge.lthn.ai", s.forgeURL),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,13 +4,10 @@ package agentic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
|
|
@ -29,92 +26,71 @@ import (
|
||||||
// running → failed (agent crashed / non-zero exit)
|
// running → failed (agent crashed / non-zero exit)
|
||||||
|
|
||||||
// WorkspaceStatus represents the current state of an agent workspace.
|
// WorkspaceStatus represents the current state of an agent workspace.
|
||||||
//
|
|
||||||
// status := WorkspaceStatus{
|
|
||||||
// Status: "blocked",
|
|
||||||
// Agent: "claude",
|
|
||||||
// Repo: "go-mcp",
|
|
||||||
// }
|
|
||||||
type WorkspaceStatus struct {
|
type WorkspaceStatus struct {
|
||||||
Status string `json:"status"` // running, completed, blocked, failed
|
Status string `json:"status"` // running, completed, blocked, failed
|
||||||
Agent string `json:"agent"` // gemini, claude, codex
|
Agent string `json:"agent"` // gemini, claude, codex
|
||||||
Repo string `json:"repo"` // target repo
|
Repo string `json:"repo"` // target repo
|
||||||
Org string `json:"org,omitempty"` // forge org (e.g. "core")
|
Org string `json:"org,omitempty"` // forge org (e.g. "core")
|
||||||
Task string `json:"task"` // task description
|
Task string `json:"task"` // task description
|
||||||
Branch string `json:"branch,omitempty"` // git branch name
|
Branch string `json:"branch,omitempty"` // git branch name
|
||||||
Issue int `json:"issue,omitempty"` // forge issue number
|
Issue int `json:"issue,omitempty"` // forge issue number
|
||||||
PID int `json:"pid,omitempty"` // process ID (if running)
|
PID int `json:"pid,omitempty"` // process ID (if running)
|
||||||
StartedAt time.Time `json:"started_at"` // when dispatch started
|
StartedAt time.Time `json:"started_at"` // when dispatch started
|
||||||
UpdatedAt time.Time `json:"updated_at"` // last status change
|
UpdatedAt time.Time `json:"updated_at"` // last status change
|
||||||
Question string `json:"question,omitempty"` // from BLOCKED.md
|
Question string `json:"question,omitempty"` // from BLOCKED.md
|
||||||
Runs int `json:"runs"` // how many times dispatched/resumed
|
Runs int `json:"runs"` // how many times dispatched/resumed
|
||||||
PRURL string `json:"pr_url,omitempty"` // pull request URL (after PR created)
|
PRURL string `json:"pr_url,omitempty"` // pull request URL (after PR created)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// writeStatus serialises workspace status to status.json.
|
||||||
|
//
|
||||||
|
// writeStatus(wsDir, &WorkspaceStatus{Status: "running", Agent: "claude"})
|
||||||
func writeStatus(wsDir string, status *WorkspaceStatus) error {
|
func writeStatus(wsDir string, status *WorkspaceStatus) error {
|
||||||
status.UpdatedAt = time.Now()
|
status.UpdatedAt = time.Now()
|
||||||
data, err := json.MarshalIndent(status, "", " ")
|
return coreio.Local.Write(core.JoinPath(wsDir, "status.json"), core.JSONMarshalString(status))
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return writeAtomic(filepath.Join(wsDir, "status.json"), string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) saveStatus(wsDir string, status *WorkspaceStatus) {
|
|
||||||
if err := writeStatus(wsDir, status); err != nil {
|
|
||||||
coreerr.Warn("failed to write workspace status", "workspace", filepath.Base(wsDir), "err", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// readStatus deserialises workspace status from status.json.
|
||||||
|
//
|
||||||
|
// st, err := readStatus(wsDir)
|
||||||
|
// // st.Status == "running", st.Agent == "claude"
|
||||||
func readStatus(wsDir string) (*WorkspaceStatus, error) {
|
func readStatus(wsDir string) (*WorkspaceStatus, error) {
|
||||||
data, err := coreio.Local.Read(filepath.Join(wsDir, "status.json"))
|
data, err := coreio.Local.Read(core.JoinPath(wsDir, "status.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var s WorkspaceStatus
|
var workspaceStatus WorkspaceStatus
|
||||||
if err := json.Unmarshal([]byte(data), &s); err != nil {
|
result := core.JSONUnmarshalString(data, &workspaceStatus)
|
||||||
return nil, err
|
if !result.OK {
|
||||||
|
return nil, coreerr.E("readStatus", "failed to parse status.json", nil)
|
||||||
}
|
}
|
||||||
return &s, nil
|
return &workspaceStatus, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- agentic_status tool ---
|
// --- agentic_status tool ---
|
||||||
|
|
||||||
// StatusInput is the input for agentic_status.
|
|
||||||
//
|
|
||||||
// input := StatusInput{Workspace: "go-mcp-1700000000"}
|
|
||||||
type StatusInput struct {
|
type StatusInput struct {
|
||||||
Workspace string `json:"workspace,omitempty"` // specific workspace name, or empty for all
|
Workspace string `json:"workspace,omitempty"` // specific workspace name, or empty for all
|
||||||
}
|
}
|
||||||
|
|
||||||
// StatusOutput is the output for agentic_status.
|
|
||||||
//
|
|
||||||
// // out.Count == 2, len(out.Workspaces) == 2
|
|
||||||
type StatusOutput struct {
|
type StatusOutput struct {
|
||||||
Workspaces []WorkspaceInfo `json:"workspaces"`
|
Workspaces []WorkspaceInfo `json:"workspaces"`
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// WorkspaceInfo summarizes a tracked workspace.
|
|
||||||
//
|
|
||||||
// // ws.Name == "go-mcp-1700000000", ws.Status == "running"
|
|
||||||
type WorkspaceInfo struct {
|
type WorkspaceInfo struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Agent string `json:"agent"`
|
Agent string `json:"agent"`
|
||||||
Repo string `json:"repo"`
|
Repo string `json:"repo"`
|
||||||
Branch string `json:"branch,omitempty"`
|
Task string `json:"task"`
|
||||||
Issue int `json:"issue,omitempty"`
|
Age string `json:"age"`
|
||||||
PRURL string `json:"pr_url,omitempty"`
|
Question string `json:"question,omitempty"`
|
||||||
Task string `json:"task"`
|
Runs int `json:"runs"`
|
||||||
Age string `json:"age"`
|
|
||||||
Question string `json:"question,omitempty"`
|
|
||||||
Runs int `json:"runs"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerStatusTool(svc *coremcp.Service) {
|
func (s *PrepSubsystem) registerStatusTool(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_status",
|
Name: "agentic_status",
|
||||||
Description: "List agent workspaces and their status (running, completed, blocked, failed). Shows blocked agents with their questions.",
|
Description: "List agent workspaces and their status (running, completed, blocked, failed). Shows blocked agents with their questions.",
|
||||||
}, s.status)
|
}, s.status)
|
||||||
|
|
@ -122,88 +98,67 @@ func (s *PrepSubsystem) registerStatusTool(svc *coremcp.Service) {
|
||||||
|
|
||||||
func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, input StatusInput) (*mcp.CallToolResult, StatusOutput, error) {
|
func (s *PrepSubsystem) status(ctx context.Context, _ *mcp.CallToolRequest, input StatusInput) (*mcp.CallToolResult, StatusOutput, error) {
|
||||||
wsDirs := s.listWorkspaceDirs()
|
wsDirs := s.listWorkspaceDirs()
|
||||||
|
if len(wsDirs) == 0 {
|
||||||
|
return nil, StatusOutput{}, coreerr.E("status", "no workspaces found", nil)
|
||||||
|
}
|
||||||
|
|
||||||
var workspaces []WorkspaceInfo
|
var workspaces []WorkspaceInfo
|
||||||
|
|
||||||
for _, wsDir := range wsDirs {
|
for _, wsDir := range wsDirs {
|
||||||
name := filepath.Base(wsDir)
|
workspaceName := core.PathBase(wsDir)
|
||||||
|
|
||||||
// Filter by specific workspace if requested
|
// Filter by specific workspace if requested
|
||||||
if input.Workspace != "" && name != input.Workspace {
|
if input.Workspace != "" && workspaceName != input.Workspace {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
info := WorkspaceInfo{Name: name}
|
info := WorkspaceInfo{Name: workspaceName}
|
||||||
|
|
||||||
// Try reading status.json
|
// Try reading status.json
|
||||||
st, err := readStatus(wsDir)
|
statusRecord, err := readStatus(wsDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Legacy workspace (no status.json) — check for log file
|
// Legacy workspace (no status.json) — check for log file
|
||||||
logFiles, _ := filepath.Glob(filepath.Join(wsDir, "agent-*.log"))
|
logFiles := core.PathGlob(core.JoinPath(wsDir, "agent-*.log"))
|
||||||
if len(logFiles) > 0 {
|
if len(logFiles) > 0 {
|
||||||
info.Status = "completed"
|
info.Status = "completed"
|
||||||
} else {
|
} else {
|
||||||
info.Status = "unknown"
|
info.Status = "unknown"
|
||||||
}
|
}
|
||||||
if fi, err := os.Stat(wsDir); err == nil {
|
if fileInfo, statErr := os.Stat(wsDir); statErr == nil {
|
||||||
info.Age = time.Since(fi.ModTime()).Truncate(time.Minute).String()
|
info.Age = time.Since(fileInfo.ModTime()).Truncate(time.Minute).String()
|
||||||
}
|
}
|
||||||
workspaces = append(workspaces, info)
|
workspaces = append(workspaces, info)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
info.Status = st.Status
|
info.Status = statusRecord.Status
|
||||||
info.Agent = st.Agent
|
info.Agent = statusRecord.Agent
|
||||||
info.Repo = st.Repo
|
info.Repo = statusRecord.Repo
|
||||||
info.Branch = st.Branch
|
info.Task = statusRecord.Task
|
||||||
info.Issue = st.Issue
|
info.Runs = statusRecord.Runs
|
||||||
info.PRURL = st.PRURL
|
info.Age = time.Since(statusRecord.StartedAt).Truncate(time.Minute).String()
|
||||||
info.Task = st.Task
|
|
||||||
info.Runs = st.Runs
|
|
||||||
info.Age = time.Since(st.StartedAt).Truncate(time.Minute).String()
|
|
||||||
|
|
||||||
// If status is "running", check if PID is still alive
|
// If status is "running", check if PID is still alive
|
||||||
if st.Status == "running" && st.PID > 0 {
|
if statusRecord.Status == "running" && statusRecord.PID > 0 {
|
||||||
proc, err := os.FindProcess(st.PID)
|
proc, err := os.FindProcess(statusRecord.PID)
|
||||||
if err != nil || proc.Signal(nil) != nil {
|
if err != nil || proc.Signal(nil) != nil {
|
||||||
prevStatus := st.Status
|
|
||||||
status := "completed"
|
|
||||||
channel := coremcp.ChannelAgentComplete
|
|
||||||
payload := map[string]any{
|
|
||||||
"workspace": name,
|
|
||||||
"agent": st.Agent,
|
|
||||||
"repo": st.Repo,
|
|
||||||
"branch": st.Branch,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process died — check for BLOCKED.md
|
// Process died — check for BLOCKED.md
|
||||||
blockedPath := filepath.Join(wsDir, "src", "BLOCKED.md")
|
blockedPath := core.JoinPath(wsDir, "src", "BLOCKED.md")
|
||||||
if data, err := coreio.Local.Read(blockedPath); err == nil {
|
if data, err := coreio.Local.Read(blockedPath); err == nil {
|
||||||
info.Status = "blocked"
|
info.Status = "blocked"
|
||||||
info.Question = strings.TrimSpace(data)
|
info.Question = core.Trim(data)
|
||||||
st.Status = "blocked"
|
statusRecord.Status = "blocked"
|
||||||
st.Question = info.Question
|
statusRecord.Question = info.Question
|
||||||
status = "blocked"
|
|
||||||
channel = coremcp.ChannelAgentBlocked
|
|
||||||
if st.Question != "" {
|
|
||||||
payload["question"] = st.Question
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
info.Status = "completed"
|
info.Status = "completed"
|
||||||
st.Status = "completed"
|
statusRecord.Status = "completed"
|
||||||
}
|
|
||||||
s.saveStatus(wsDir, st)
|
|
||||||
|
|
||||||
if prevStatus != status {
|
|
||||||
payload["status"] = status
|
|
||||||
s.emitChannel(ctx, channel, payload)
|
|
||||||
s.emitChannel(ctx, coremcp.ChannelAgentStatus, payload)
|
|
||||||
}
|
}
|
||||||
|
writeStatus(wsDir, statusRecord)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if st.Status == "blocked" {
|
if statusRecord.Status == "blocked" {
|
||||||
info.Question = st.Question
|
info.Question = statusRecord.Question
|
||||||
}
|
}
|
||||||
|
|
||||||
workspaces = append(workspaces, info)
|
workspaces = append(workspaces, info)
|
||||||
|
|
|
||||||
|
|
@ -1,94 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestStatus_Good_EmptyWorkspaceSet(t *testing.T) {
|
|
||||||
sub := &PrepSubsystem{codePath: t.TempDir()}
|
|
||||||
|
|
||||||
_, out, err := sub.status(context.Background(), nil, StatusInput{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("status failed: %v", err)
|
|
||||||
}
|
|
||||||
if out.Count != 0 {
|
|
||||||
t.Fatalf("expected count 0, got %d", out.Count)
|
|
||||||
}
|
|
||||||
if len(out.Workspaces) != 0 {
|
|
||||||
t.Fatalf("expected empty workspace list, got %d entries", len(out.Workspaces))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPlanRead_Good_ReturnsWrittenPlan(t *testing.T) {
|
|
||||||
sub := &PrepSubsystem{codePath: t.TempDir()}
|
|
||||||
|
|
||||||
plan := &Plan{
|
|
||||||
ID: "plan-1",
|
|
||||||
Title: "Read me",
|
|
||||||
Status: "ready",
|
|
||||||
Objective: "Verify plan reads",
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
UpdatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
if _, err := writePlan(sub.plansDir(), plan); err != nil {
|
|
||||||
t.Fatalf("writePlan failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := sub.planRead(context.Background(), nil, PlanReadInput{ID: plan.ID})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("planRead failed: %v", err)
|
|
||||||
}
|
|
||||||
if !out.Success {
|
|
||||||
t.Fatal("expected success output")
|
|
||||||
}
|
|
||||||
if out.Plan.ID != plan.ID {
|
|
||||||
t.Fatalf("expected plan %q, got %q", plan.ID, out.Plan.ID)
|
|
||||||
}
|
|
||||||
if out.Plan.Title != plan.Title {
|
|
||||||
t.Fatalf("expected title %q, got %q", plan.Title, out.Plan.Title)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStatus_Good_ExposesWorkspaceMetadata(t *testing.T) {
|
|
||||||
root := t.TempDir()
|
|
||||||
sub := &PrepSubsystem{codePath: root}
|
|
||||||
|
|
||||||
wsDir := filepath.Join(root, ".core", "workspace", "repo-123")
|
|
||||||
plan := &WorkspaceStatus{
|
|
||||||
Status: "completed",
|
|
||||||
Agent: "claude",
|
|
||||||
Repo: "go-mcp",
|
|
||||||
Branch: "agent/issue-42-fix-status",
|
|
||||||
Issue: 42,
|
|
||||||
PRURL: "https://forge.example/pr/42",
|
|
||||||
Task: "Fix status output",
|
|
||||||
Runs: 2,
|
|
||||||
}
|
|
||||||
if err := writeStatus(wsDir, plan); err != nil {
|
|
||||||
t.Fatalf("writeStatus failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := sub.status(context.Background(), nil, StatusInput{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("status failed: %v", err)
|
|
||||||
}
|
|
||||||
if out.Count != 1 {
|
|
||||||
t.Fatalf("expected count 1, got %d", out.Count)
|
|
||||||
}
|
|
||||||
|
|
||||||
info := out.Workspaces[0]
|
|
||||||
if info.Branch != plan.Branch {
|
|
||||||
t.Fatalf("expected branch %q, got %q", plan.Branch, info.Branch)
|
|
||||||
}
|
|
||||||
if info.Issue != plan.Issue {
|
|
||||||
t.Fatalf("expected issue %d, got %d", plan.Issue, info.Issue)
|
|
||||||
}
|
|
||||||
if info.PRURL != plan.PRURL {
|
|
||||||
t.Fatalf("expected PR URL %q, got %q", plan.PRURL, info.PRURL)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,167 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WatchInput is the input for agentic_watch.
|
|
||||||
type WatchInput struct {
|
|
||||||
Workspaces []string `json:"workspaces,omitempty"`
|
|
||||||
PollInterval int `json:"poll_interval,omitempty"`
|
|
||||||
Timeout int `json:"timeout,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// WatchOutput is the result of watching one or more workspaces.
|
|
||||||
type WatchOutput struct {
|
|
||||||
Success bool `json:"success"`
|
|
||||||
Completed []WatchResult `json:"completed"`
|
|
||||||
Failed []WatchResult `json:"failed,omitempty"`
|
|
||||||
Duration string `json:"duration"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// WatchResult describes one workspace result.
|
|
||||||
type WatchResult struct {
|
|
||||||
Workspace string `json:"workspace"`
|
|
||||||
Agent string `json:"agent"`
|
|
||||||
Repo string `json:"repo"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
Branch string `json:"branch,omitempty"`
|
|
||||||
Issue int `json:"issue,omitempty"`
|
|
||||||
PRURL string `json:"pr_url,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) registerWatchTool(svc *coremcp.Service) {
|
|
||||||
server := svc.Server()
|
|
||||||
coremcp.AddToolRecorded(svc, server, "agentic", &mcp.Tool{
|
|
||||||
Name: "agentic_watch",
|
|
||||||
Description: "Watch running or queued agent workspaces until they finish and return a completion summary.",
|
|
||||||
}, s.watch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) watch(ctx context.Context, req *mcp.CallToolRequest, input WatchInput) (*mcp.CallToolResult, WatchOutput, error) {
|
|
||||||
pollInterval := time.Duration(input.PollInterval) * time.Second
|
|
||||||
if pollInterval <= 0 {
|
|
||||||
pollInterval = 5 * time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
timeout := time.Duration(input.Timeout) * time.Second
|
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = 10 * time.Minute
|
|
||||||
}
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
deadline := start.Add(timeout)
|
|
||||||
|
|
||||||
targets := input.Workspaces
|
|
||||||
if len(targets) == 0 {
|
|
||||||
targets = s.findActiveWorkspaces()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(targets) == 0 {
|
|
||||||
return nil, WatchOutput{Success: true, Duration: "0s"}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
remaining := make(map[string]struct{}, len(targets))
|
|
||||||
for _, workspace := range targets {
|
|
||||||
remaining[workspace] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
completed := make([]WatchResult, 0, len(targets))
|
|
||||||
failed := make([]WatchResult, 0)
|
|
||||||
|
|
||||||
for len(remaining) > 0 {
|
|
||||||
if time.Now().After(deadline) {
|
|
||||||
for workspace := range remaining {
|
|
||||||
failed = append(failed, WatchResult{
|
|
||||||
Workspace: workspace,
|
|
||||||
Status: "timeout",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, WatchOutput{}, coreerr.E("watch", "cancelled", ctx.Err())
|
|
||||||
case <-time.After(pollInterval):
|
|
||||||
}
|
|
||||||
|
|
||||||
_, statusOut, err := s.status(ctx, req, StatusInput{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, WatchOutput{}, coreerr.E("watch", "failed to refresh status", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, info := range statusOut.Workspaces {
|
|
||||||
if _, ok := remaining[info.Name]; !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch info.Status {
|
|
||||||
case "completed", "merged", "ready-for-review":
|
|
||||||
completed = append(completed, WatchResult{
|
|
||||||
Workspace: info.Name,
|
|
||||||
Agent: info.Agent,
|
|
||||||
Repo: info.Repo,
|
|
||||||
Status: info.Status,
|
|
||||||
Branch: info.Branch,
|
|
||||||
Issue: info.Issue,
|
|
||||||
PRURL: info.PRURL,
|
|
||||||
})
|
|
||||||
delete(remaining, info.Name)
|
|
||||||
case "failed", "blocked":
|
|
||||||
failed = append(failed, WatchResult{
|
|
||||||
Workspace: info.Name,
|
|
||||||
Agent: info.Agent,
|
|
||||||
Repo: info.Repo,
|
|
||||||
Status: info.Status,
|
|
||||||
Branch: info.Branch,
|
|
||||||
Issue: info.Issue,
|
|
||||||
PRURL: info.PRURL,
|
|
||||||
})
|
|
||||||
delete(remaining, info.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, WatchOutput{
|
|
||||||
Success: len(failed) == 0,
|
|
||||||
Completed: completed,
|
|
||||||
Failed: failed,
|
|
||||||
Duration: time.Since(start).Round(time.Second).String(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) findActiveWorkspaces() []string {
|
|
||||||
wsDirs := s.listWorkspaceDirs()
|
|
||||||
if len(wsDirs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
active := make([]string, 0, len(wsDirs))
|
|
||||||
for _, wsDir := range wsDirs {
|
|
||||||
st, err := readStatus(wsDir)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch st.Status {
|
|
||||||
case "running", "queued":
|
|
||||||
active = append(active, filepath.Base(wsDir))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return active
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *PrepSubsystem) resolveWorkspaceDir(name string) string {
|
|
||||||
if filepath.IsAbs(name) {
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
return filepath.Join(s.workspaceRoot(), name)
|
|
||||||
}
|
|
||||||
|
|
@ -1,51 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// writeAtomic writes content to path by staging it in a temporary file and
|
|
||||||
// renaming it into place.
|
|
||||||
//
|
|
||||||
// This avoids exposing partially written workspace files to agents that may
|
|
||||||
// read status, prompt, or plan documents while they are being updated.
|
|
||||||
func writeAtomic(path, content string) error {
|
|
||||||
dir := filepath.Dir(path)
|
|
||||||
if err := coreio.Local.EnsureDir(dir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tmp, err := os.CreateTemp(dir, "."+filepath.Base(path)+".*.tmp")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tmpPath := tmp.Name()
|
|
||||||
|
|
||||||
cleanup := func() {
|
|
||||||
_ = tmp.Close()
|
|
||||||
_ = os.Remove(tmpPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := tmp.WriteString(content); err != nil {
|
|
||||||
cleanup()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := tmp.Sync(); err != nil {
|
|
||||||
cleanup()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := tmp.Close(); err != nil {
|
|
||||||
_ = os.Remove(tmpPath)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.Rename(tmpPath, path); err != nil {
|
|
||||||
_ = os.Remove(tmpPath)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -7,9 +7,9 @@ package brain
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// errBridgeNotAvailable is returned when a tool requires the Laravel bridge
|
// errBridgeNotAvailable is returned when a tool requires the Laravel bridge
|
||||||
|
|
@ -20,56 +20,31 @@ var errBridgeNotAvailable = coreerr.E("brain", "bridge not available", nil)
|
||||||
// It proxies brain_* tool calls to the Laravel backend via the shared IDE bridge.
|
// It proxies brain_* tool calls to the Laravel backend via the shared IDE bridge.
|
||||||
type Subsystem struct {
|
type Subsystem struct {
|
||||||
bridge *ide.Bridge
|
bridge *ide.Bridge
|
||||||
notifier coremcp.Notifier
|
notifier Notifier
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
_ coremcp.Subsystem = (*Subsystem)(nil)
|
|
||||||
_ coremcp.SubsystemWithShutdown = (*Subsystem)(nil)
|
|
||||||
_ coremcp.SubsystemWithNotifier = (*Subsystem)(nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// New creates a brain subsystem that uses the given IDE bridge for Laravel communication.
|
// New creates a brain subsystem that uses the given IDE bridge for Laravel communication.
|
||||||
//
|
|
||||||
// brain := New(ideBridge)
|
|
||||||
//
|
|
||||||
// Pass nil if headless (tools will return errBridgeNotAvailable).
|
// Pass nil if headless (tools will return errBridgeNotAvailable).
|
||||||
func New(bridge *ide.Bridge) *Subsystem {
|
func New(bridge *ide.Bridge) *Subsystem {
|
||||||
s := &Subsystem{bridge: bridge}
|
return &Subsystem{bridge: bridge}
|
||||||
if bridge != nil {
|
|
||||||
bridge.AddObserver(func(msg ide.BridgeMessage) {
|
|
||||||
s.handleBridgeMessage(msg)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name implements mcp.Subsystem.
|
// Name implements mcp.Subsystem.
|
||||||
func (s *Subsystem) Name() string { return "brain" }
|
func (s *Subsystem) Name() string { return "brain" }
|
||||||
|
|
||||||
|
// Notifier pushes events to MCP sessions (matches pkg/mcp.Notifier).
|
||||||
|
type Notifier interface {
|
||||||
|
ChannelSend(ctx context.Context, channel string, data any)
|
||||||
|
}
|
||||||
|
|
||||||
// SetNotifier stores the shared notifier so this subsystem can emit channel events.
|
// SetNotifier stores the shared notifier so this subsystem can emit channel events.
|
||||||
func (s *Subsystem) SetNotifier(n coremcp.Notifier) {
|
func (s *Subsystem) SetNotifier(n Notifier) {
|
||||||
s.notifier = n
|
s.notifier = n
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterTools implements mcp.Subsystem.
|
// RegisterTools implements mcp.Subsystem.
|
||||||
func (s *Subsystem) RegisterTools(svc *coremcp.Service) {
|
func (s *Subsystem) RegisterTools(server *mcp.Server) {
|
||||||
s.registerBrainTools(svc)
|
s.registerBrainTools(server)
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) handleBridgeMessage(msg ide.BridgeMessage) {
|
|
||||||
switch msg.Type {
|
|
||||||
case "brain_remember":
|
|
||||||
emitBridgeChannel(context.Background(), s.notifier, coremcp.ChannelBrainRememberDone, bridgePayload(msg.Data, "type", "project"))
|
|
||||||
case "brain_recall":
|
|
||||||
payload := bridgePayload(msg.Data, "query", "project", "type", "agent_id")
|
|
||||||
payload["count"] = bridgeCount(msg.Data)
|
|
||||||
emitBridgeChannel(context.Background(), s.notifier, coremcp.ChannelBrainRecallDone, payload)
|
|
||||||
case "brain_forget":
|
|
||||||
emitBridgeChannel(context.Background(), s.notifier, coremcp.ChannelBrainForgetDone, bridgePayload(msg.Data, "id", "reason"))
|
|
||||||
case "brain_list":
|
|
||||||
emitBridgeChannel(context.Background(), s.notifier, coremcp.ChannelBrainListDone, bridgePayload(msg.Data, "project", "type", "agent_id", "limit"))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||||
|
|
|
||||||
|
|
@ -7,20 +7,8 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type recordingNotifier struct {
|
|
||||||
channel string
|
|
||||||
data any
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *recordingNotifier) ChannelSend(_ context.Context, channel string, data any) {
|
|
||||||
r.channel = channel
|
|
||||||
r.data = data
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Nil bridge tests (headless mode) ---
|
// --- Nil bridge tests (headless mode) ---
|
||||||
|
|
||||||
func TestBrainRemember_Bad_NilBridge(t *testing.T) {
|
func TestBrainRemember_Bad_NilBridge(t *testing.T) {
|
||||||
|
|
@ -80,38 +68,6 @@ func TestSubsystem_Good_ShutdownNoop(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSubsystem_Good_BridgeRecallNotification(t *testing.T) {
|
|
||||||
sub := New(nil)
|
|
||||||
notifier := &recordingNotifier{}
|
|
||||||
sub.notifier = notifier
|
|
||||||
|
|
||||||
sub.handleBridgeMessage(ide.BridgeMessage{
|
|
||||||
Type: "brain_recall",
|
|
||||||
Data: map[string]any{
|
|
||||||
"query": "how does scoring work?",
|
|
||||||
"memories": []any{
|
|
||||||
map[string]any{"id": "m1"},
|
|
||||||
map[string]any{"id": "m2"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if notifier.channel != "brain.recall.complete" {
|
|
||||||
t.Fatalf("expected brain.recall.complete, got %q", notifier.channel)
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, ok := notifier.data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected payload map, got %T", notifier.data)
|
|
||||||
}
|
|
||||||
if payload["count"] != 2 {
|
|
||||||
t.Fatalf("expected count 2, got %v", payload["count"])
|
|
||||||
}
|
|
||||||
if payload["query"] != "how does scoring work?" {
|
|
||||||
t.Fatalf("expected query to be forwarded, got %v", payload["query"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Struct round-trip tests ---
|
// --- Struct round-trip tests ---
|
||||||
|
|
||||||
func TestRememberInput_Good_RoundTrip(t *testing.T) {
|
func TestRememberInput_Good_RoundTrip(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -1,59 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package brain
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func bridgePayload(data any, keys ...string) map[string]any {
|
|
||||||
payload := make(map[string]any)
|
|
||||||
|
|
||||||
m, ok := data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return payload
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
if value, ok := m[key]; ok {
|
|
||||||
payload[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return payload
|
|
||||||
}
|
|
||||||
|
|
||||||
func bridgeCount(data any) int {
|
|
||||||
m, ok := data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if count, ok := m["count"]; ok {
|
|
||||||
switch v := count.(type) {
|
|
||||||
case int:
|
|
||||||
return v
|
|
||||||
case int32:
|
|
||||||
return int(v)
|
|
||||||
case int64:
|
|
||||||
return int(v)
|
|
||||||
case float64:
|
|
||||||
return int(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if memories, ok := m["memories"].([]any); ok {
|
|
||||||
return len(memories)
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func emitBridgeChannel(ctx context.Context, notifier coremcp.Notifier, channel string, data any) {
|
|
||||||
if notifier == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
notifier.ChannelSend(ctx, channel, data)
|
|
||||||
}
|
|
||||||
|
|
@ -5,24 +5,17 @@ package brain
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
goio "io"
|
goio "io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
core "dappco.re/go/core"
|
||||||
coreio "forge.lthn.ai/core/go-io"
|
coreio "forge.lthn.ai/core/go-io"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// channelSender is the callback for pushing channel events.
|
// channelSender is the callback for pushing channel events.
|
||||||
//
|
|
||||||
// fn := func(ctx context.Context, channel string, data any) { ... }
|
|
||||||
type channelSender func(ctx context.Context, channel string, data any)
|
type channelSender func(ctx context.Context, channel string, data any)
|
||||||
|
|
||||||
// DirectSubsystem implements mcp.Subsystem for OpenBrain via direct HTTP calls.
|
// DirectSubsystem implements mcp.Subsystem for OpenBrain via direct HTTP calls.
|
||||||
|
|
@ -35,12 +28,6 @@ type DirectSubsystem struct {
|
||||||
onChannel channelSender
|
onChannel channelSender
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
_ coremcp.Subsystem = (*DirectSubsystem)(nil)
|
|
||||||
_ coremcp.SubsystemWithShutdown = (*DirectSubsystem)(nil)
|
|
||||||
_ coremcp.SubsystemWithChannelCallback = (*DirectSubsystem)(nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// OnChannel sets a callback for channel event broadcasting.
|
// OnChannel sets a callback for channel event broadcasting.
|
||||||
// Called by the MCP service after creation to wire up notifications.
|
// Called by the MCP service after creation to wire up notifications.
|
||||||
//
|
//
|
||||||
|
|
@ -52,21 +39,22 @@ func (s *DirectSubsystem) OnChannel(fn func(ctx context.Context, channel string,
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDirect creates a brain subsystem that calls the OpenBrain API directly.
|
// NewDirect creates a brain subsystem that calls the OpenBrain API directly.
|
||||||
//
|
|
||||||
// brain := NewDirect()
|
|
||||||
//
|
|
||||||
// Reads CORE_BRAIN_URL and CORE_BRAIN_KEY from environment, or falls back
|
// Reads CORE_BRAIN_URL and CORE_BRAIN_KEY from environment, or falls back
|
||||||
// to ~/.claude/brain.key for the API key.
|
// to ~/.claude/brain.key for the API key.
|
||||||
|
//
|
||||||
|
// sub := brain.NewDirect()
|
||||||
|
// svc, _ := mcp.New(mcp.Options{Subsystems: []mcp.Subsystem{sub}})
|
||||||
func NewDirect() *DirectSubsystem {
|
func NewDirect() *DirectSubsystem {
|
||||||
apiURL := os.Getenv("CORE_BRAIN_URL")
|
apiURL := core.Env("CORE_BRAIN_URL")
|
||||||
if apiURL == "" {
|
if apiURL == "" {
|
||||||
apiURL = "https://api.lthn.sh"
|
apiURL = "https://api.lthn.sh"
|
||||||
}
|
}
|
||||||
|
|
||||||
apiKey := os.Getenv("CORE_BRAIN_KEY")
|
apiKey := core.Env("CORE_BRAIN_KEY")
|
||||||
if apiKey == "" {
|
if apiKey == "" {
|
||||||
if data, err := coreio.Local.Read(os.ExpandEnv("$HOME/.claude/brain.key")); err == nil {
|
keyPath := core.JoinPath(core.Env("HOME"), ".claude", "brain.key")
|
||||||
apiKey = strings.TrimSpace(data)
|
if data, err := coreio.Local.Read(keyPath); err == nil {
|
||||||
|
apiKey = core.Trim(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -81,27 +69,21 @@ func NewDirect() *DirectSubsystem {
|
||||||
func (s *DirectSubsystem) Name() string { return "brain" }
|
func (s *DirectSubsystem) Name() string { return "brain" }
|
||||||
|
|
||||||
// RegisterTools implements mcp.Subsystem.
|
// RegisterTools implements mcp.Subsystem.
|
||||||
func (s *DirectSubsystem) RegisterTools(svc *coremcp.Service) {
|
func (s *DirectSubsystem) RegisterTools(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
|
||||||
Name: "brain_remember",
|
Name: "brain_remember",
|
||||||
Description: "Store a memory in OpenBrain. Types: fact, decision, observation, plan, convention, architecture, research, documentation, service, bug, pattern, context, procedure.",
|
Description: "Store a memory in OpenBrain. Types: fact, decision, observation, plan, convention, architecture, research, documentation, service, bug, pattern, context, procedure.",
|
||||||
}, s.remember)
|
}, s.remember)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "brain_recall",
|
Name: "brain_recall",
|
||||||
Description: "Semantic search across OpenBrain memories. Returns memories ranked by similarity. Use agent_id 'cladius' for Cladius's memories.",
|
Description: "Semantic search across OpenBrain memories. Returns memories ranked by similarity. Use agent_id 'cladius' for Cladius's memories.",
|
||||||
}, s.recall)
|
}, s.recall)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "brain_forget",
|
Name: "brain_forget",
|
||||||
Description: "Remove a memory from OpenBrain by ID.",
|
Description: "Remove a memory from OpenBrain by ID.",
|
||||||
}, s.forget)
|
}, s.forget)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
|
||||||
Name: "brain_list",
|
|
||||||
Description: "List memories in OpenBrain with optional filtering by project, type, and agent.",
|
|
||||||
}, s.list)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||||
|
|
@ -114,11 +96,7 @@ func (s *DirectSubsystem) apiCall(ctx context.Context, method, path string, body
|
||||||
|
|
||||||
var reqBody goio.Reader
|
var reqBody goio.Reader
|
||||||
if body != nil {
|
if body != nil {
|
||||||
data, err := json.Marshal(body)
|
reqBody = bytes.NewReader([]byte(core.JSONMarshalString(body)))
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E("brain.apiCall", "marshal request", err)
|
|
||||||
}
|
|
||||||
reqBody = bytes.NewReader(data)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, method, s.apiURL+path, reqBody)
|
req, err := http.NewRequestWithContext(ctx, method, s.apiURL+path, reqBody)
|
||||||
|
|
@ -145,8 +123,9 @@ func (s *DirectSubsystem) apiCall(ctx context.Context, method, path string, body
|
||||||
}
|
}
|
||||||
|
|
||||||
var result map[string]any
|
var result map[string]any
|
||||||
if err := json.Unmarshal(respData, &result); err != nil {
|
r := core.JSONUnmarshalString(string(respData), &result)
|
||||||
return nil, coreerr.E("brain.apiCall", "parse response", err)
|
if !r.OK {
|
||||||
|
return nil, coreerr.E("brain.apiCall", "parse response", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
|
|
@ -166,7 +145,7 @@ func (s *DirectSubsystem) remember(ctx context.Context, _ *mcp.CallToolRequest,
|
||||||
|
|
||||||
id, _ := result["id"].(string)
|
id, _ := result["id"].(string)
|
||||||
if s.onChannel != nil {
|
if s.onChannel != nil {
|
||||||
s.onChannel(ctx, coremcp.ChannelBrainRememberDone, map[string]any{
|
s.onChannel(ctx, "brain.remember.complete", map[string]any{
|
||||||
"id": id,
|
"id": id,
|
||||||
"type": input.Type,
|
"type": input.Type,
|
||||||
"project": input.Project,
|
"project": input.Project,
|
||||||
|
|
@ -205,11 +184,11 @@ func (s *DirectSubsystem) recall(ctx context.Context, _ *mcp.CallToolRequest, in
|
||||||
for _, m := range mems {
|
for _, m := range mems {
|
||||||
if mm, ok := m.(map[string]any); ok {
|
if mm, ok := m.(map[string]any); ok {
|
||||||
mem := Memory{
|
mem := Memory{
|
||||||
Content: fmt.Sprintf("%v", mm["content"]),
|
Content: core.Sprintf("%v", mm["content"]),
|
||||||
Type: fmt.Sprintf("%v", mm["type"]),
|
Type: core.Sprintf("%v", mm["type"]),
|
||||||
Project: fmt.Sprintf("%v", mm["project"]),
|
Project: core.Sprintf("%v", mm["project"]),
|
||||||
AgentID: fmt.Sprintf("%v", mm["agent_id"]),
|
AgentID: core.Sprintf("%v", mm["agent_id"]),
|
||||||
CreatedAt: fmt.Sprintf("%v", mm["created_at"]),
|
CreatedAt: core.Sprintf("%v", mm["created_at"]),
|
||||||
}
|
}
|
||||||
if id, ok := mm["id"].(string); ok {
|
if id, ok := mm["id"].(string); ok {
|
||||||
mem.ID = id
|
mem.ID = id
|
||||||
|
|
@ -226,7 +205,7 @@ func (s *DirectSubsystem) recall(ctx context.Context, _ *mcp.CallToolRequest, in
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.onChannel != nil {
|
if s.onChannel != nil {
|
||||||
s.onChannel(ctx, coremcp.ChannelBrainRecallDone, map[string]any{
|
s.onChannel(ctx, "brain.recall.complete", map[string]any{
|
||||||
"query": input.Query,
|
"query": input.Query,
|
||||||
"count": len(memories),
|
"count": len(memories),
|
||||||
})
|
})
|
||||||
|
|
@ -244,80 +223,9 @@ func (s *DirectSubsystem) forget(ctx context.Context, _ *mcp.CallToolRequest, in
|
||||||
return nil, ForgetOutput{}, err
|
return nil, ForgetOutput{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.onChannel != nil {
|
|
||||||
s.onChannel(ctx, coremcp.ChannelBrainForgetDone, map[string]any{
|
|
||||||
"id": input.ID,
|
|
||||||
"reason": input.Reason,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, ForgetOutput{
|
return nil, ForgetOutput{
|
||||||
Success: true,
|
Success: true,
|
||||||
Forgotten: input.ID,
|
Forgotten: input.ID,
|
||||||
Timestamp: time.Now(),
|
Timestamp: time.Now(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DirectSubsystem) list(ctx context.Context, _ *mcp.CallToolRequest, input ListInput) (*mcp.CallToolResult, ListOutput, error) {
|
|
||||||
limit := input.Limit
|
|
||||||
if limit == 0 {
|
|
||||||
limit = 50
|
|
||||||
}
|
|
||||||
|
|
||||||
values := url.Values{}
|
|
||||||
if input.Project != "" {
|
|
||||||
values.Set("project", input.Project)
|
|
||||||
}
|
|
||||||
if input.Type != "" {
|
|
||||||
values.Set("type", input.Type)
|
|
||||||
}
|
|
||||||
if input.AgentID != "" {
|
|
||||||
values.Set("agent_id", input.AgentID)
|
|
||||||
}
|
|
||||||
values.Set("limit", fmt.Sprintf("%d", limit))
|
|
||||||
|
|
||||||
result, err := s.apiCall(ctx, http.MethodGet, "/v1/brain/list?"+values.Encode(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ListOutput{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var memories []Memory
|
|
||||||
if mems, ok := result["memories"].([]any); ok {
|
|
||||||
for _, m := range mems {
|
|
||||||
if mm, ok := m.(map[string]any); ok {
|
|
||||||
mem := Memory{
|
|
||||||
Content: fmt.Sprintf("%v", mm["content"]),
|
|
||||||
Type: fmt.Sprintf("%v", mm["type"]),
|
|
||||||
Project: fmt.Sprintf("%v", mm["project"]),
|
|
||||||
AgentID: fmt.Sprintf("%v", mm["agent_id"]),
|
|
||||||
CreatedAt: fmt.Sprintf("%v", mm["created_at"]),
|
|
||||||
}
|
|
||||||
if id, ok := mm["id"].(string); ok {
|
|
||||||
mem.ID = id
|
|
||||||
}
|
|
||||||
if score, ok := mm["score"].(float64); ok {
|
|
||||||
mem.Confidence = score
|
|
||||||
}
|
|
||||||
if source, ok := mm["source"].(string); ok {
|
|
||||||
mem.Tags = append(mem.Tags, "source:"+source)
|
|
||||||
}
|
|
||||||
memories = append(memories, mem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.onChannel != nil {
|
|
||||||
s.onChannel(ctx, coremcp.ChannelBrainListDone, map[string]any{
|
|
||||||
"project": input.Project,
|
|
||||||
"type": input.Type,
|
|
||||||
"agent_id": input.AgentID,
|
|
||||||
"limit": limit,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, ListOutput{
|
|
||||||
Success: true,
|
|
||||||
Count: len(memories),
|
|
||||||
Memories: memories,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -207,8 +207,8 @@ func TestDirectRecall_Good(t *testing.T) {
|
||||||
|
|
||||||
s := newTestDirect(srv.URL)
|
s := newTestDirect(srv.URL)
|
||||||
_, out, err := s.recall(context.Background(), nil, RecallInput{
|
_, out, err := s.recall(context.Background(), nil, RecallInput{
|
||||||
Query: "scoring algorithm",
|
Query: "scoring algorithm",
|
||||||
TopK: 5,
|
TopK: 5,
|
||||||
Filter: RecallFilter{Project: "eaas"},
|
Filter: RecallFilter{Project: "eaas"},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -290,48 +290,6 @@ func TestDirectForget_Good(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDirectForget_Good_EmitsChannel(t *testing.T) {
|
|
||||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(200)
|
|
||||||
json.NewEncoder(w).Encode(map[string]any{"success": true})
|
|
||||||
}))
|
|
||||||
defer srv.Close()
|
|
||||||
|
|
||||||
var gotChannel string
|
|
||||||
var gotPayload map[string]any
|
|
||||||
|
|
||||||
s := newTestDirect(srv.URL)
|
|
||||||
s.onChannel = func(_ context.Context, channel string, data any) {
|
|
||||||
gotChannel = channel
|
|
||||||
if payload, ok := data.(map[string]any); ok {
|
|
||||||
gotPayload = payload
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := s.forget(context.Background(), nil, ForgetInput{
|
|
||||||
ID: "mem-789",
|
|
||||||
Reason: "outdated",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("forget failed: %v", err)
|
|
||||||
}
|
|
||||||
if !out.Success {
|
|
||||||
t.Fatal("expected success=true")
|
|
||||||
}
|
|
||||||
if gotChannel != "brain.forget.complete" {
|
|
||||||
t.Fatalf("expected brain.forget.complete, got %q", gotChannel)
|
|
||||||
}
|
|
||||||
if gotPayload == nil {
|
|
||||||
t.Fatal("expected channel payload")
|
|
||||||
}
|
|
||||||
if gotPayload["id"] != "mem-789" {
|
|
||||||
t.Fatalf("expected id=mem-789, got %v", gotPayload["id"])
|
|
||||||
}
|
|
||||||
if gotPayload["reason"] != "outdated" {
|
|
||||||
t.Fatalf("expected reason=outdated, got %v", gotPayload["reason"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDirectForget_Bad_ApiError(t *testing.T) {
|
func TestDirectForget_Bad_ApiError(t *testing.T) {
|
||||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(404)
|
w.WriteHeader(404)
|
||||||
|
|
@ -345,124 +303,3 @@ func TestDirectForget_Bad_ApiError(t *testing.T) {
|
||||||
t.Error("expected error on 404")
|
t.Error("expected error on 404")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- list tool tests ---
|
|
||||||
|
|
||||||
func TestDirectList_Good(t *testing.T) {
|
|
||||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.Method != http.MethodGet {
|
|
||||||
t.Errorf("expected GET, got %s", r.Method)
|
|
||||||
}
|
|
||||||
if got := r.URL.Query().Get("project"); got != "eaas" {
|
|
||||||
t.Errorf("expected project=eaas, got %q", got)
|
|
||||||
}
|
|
||||||
if got := r.URL.Query().Get("type"); got != "decision" {
|
|
||||||
t.Errorf("expected type=decision, got %q", got)
|
|
||||||
}
|
|
||||||
if got := r.URL.Query().Get("agent_id"); got != "virgil" {
|
|
||||||
t.Errorf("expected agent_id=virgil, got %q", got)
|
|
||||||
}
|
|
||||||
if got := r.URL.Query().Get("limit"); got != "20" {
|
|
||||||
t.Errorf("expected limit=20, got %q", got)
|
|
||||||
}
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
json.NewEncoder(w).Encode(map[string]any{
|
|
||||||
"memories": []any{
|
|
||||||
map[string]any{
|
|
||||||
"id": "mem-1",
|
|
||||||
"content": "use qdrant",
|
|
||||||
"type": "decision",
|
|
||||||
"project": "eaas",
|
|
||||||
"agent_id": "virgil",
|
|
||||||
"score": 0.88,
|
|
||||||
"created_at": "2026-03-01T00:00:00Z",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}))
|
|
||||||
defer srv.Close()
|
|
||||||
|
|
||||||
s := newTestDirect(srv.URL)
|
|
||||||
_, out, err := s.list(context.Background(), nil, ListInput{
|
|
||||||
Project: "eaas",
|
|
||||||
Type: "decision",
|
|
||||||
AgentID: "virgil",
|
|
||||||
Limit: 20,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("list failed: %v", err)
|
|
||||||
}
|
|
||||||
if !out.Success || out.Count != 1 {
|
|
||||||
t.Fatalf("expected 1 memory, got %+v", out)
|
|
||||||
}
|
|
||||||
if out.Memories[0].ID != "mem-1" {
|
|
||||||
t.Errorf("expected id=mem-1, got %q", out.Memories[0].ID)
|
|
||||||
}
|
|
||||||
if out.Memories[0].Confidence != 0.88 {
|
|
||||||
t.Errorf("expected score=0.88, got %f", out.Memories[0].Confidence)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDirectList_Good_EmitsAgentIDChannelPayload(t *testing.T) {
|
|
||||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
json.NewEncoder(w).Encode(map[string]any{"memories": []any{}})
|
|
||||||
}))
|
|
||||||
defer srv.Close()
|
|
||||||
|
|
||||||
var gotChannel string
|
|
||||||
var gotPayload map[string]any
|
|
||||||
|
|
||||||
s := newTestDirect(srv.URL)
|
|
||||||
s.onChannel = func(_ context.Context, channel string, data any) {
|
|
||||||
gotChannel = channel
|
|
||||||
if payload, ok := data.(map[string]any); ok {
|
|
||||||
gotPayload = payload
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := s.list(context.Background(), nil, ListInput{
|
|
||||||
Project: "eaas",
|
|
||||||
Type: "decision",
|
|
||||||
AgentID: "virgil",
|
|
||||||
Limit: 20,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("list failed: %v", err)
|
|
||||||
}
|
|
||||||
if !out.Success {
|
|
||||||
t.Fatal("expected list success")
|
|
||||||
}
|
|
||||||
if gotChannel != "brain.list.complete" {
|
|
||||||
t.Fatalf("expected brain.list.complete, got %q", gotChannel)
|
|
||||||
}
|
|
||||||
if gotPayload == nil {
|
|
||||||
t.Fatal("expected channel payload")
|
|
||||||
}
|
|
||||||
if gotPayload["agent_id"] != "virgil" {
|
|
||||||
t.Fatalf("expected agent_id=virgil, got %v", gotPayload["agent_id"])
|
|
||||||
}
|
|
||||||
if gotPayload["project"] != "eaas" {
|
|
||||||
t.Fatalf("expected project=eaas, got %v", gotPayload["project"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDirectList_Good_DefaultLimit(t *testing.T) {
|
|
||||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if got := r.URL.Query().Get("limit"); got != "50" {
|
|
||||||
t.Errorf("expected limit=50, got %q", got)
|
|
||||||
}
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
json.NewEncoder(w).Encode(map[string]any{"memories": []any{}})
|
|
||||||
}))
|
|
||||||
defer srv.Close()
|
|
||||||
|
|
||||||
s := newTestDirect(srv.URL)
|
|
||||||
_, out, err := s.list(context.Background(), nil, ListInput{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("list failed: %v", err)
|
|
||||||
}
|
|
||||||
if !out.Success || out.Count != 0 {
|
|
||||||
t.Fatalf("expected empty list, got %+v", out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -5,11 +5,10 @@ package brain
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
|
||||||
"forge.lthn.ai/core/api"
|
"forge.lthn.ai/core/api"
|
||||||
"forge.lthn.ai/core/api/pkg/provider"
|
"forge.lthn.ai/core/api/pkg/provider"
|
||||||
"forge.lthn.ai/core/go-ws"
|
"forge.lthn.ai/core/go-ws"
|
||||||
|
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -31,16 +30,10 @@ var (
|
||||||
// NewProvider creates a brain provider that proxies to Laravel via the IDE bridge.
|
// NewProvider creates a brain provider that proxies to Laravel via the IDE bridge.
|
||||||
// The WS hub is used to emit brain events. Pass nil for hub if not needed.
|
// The WS hub is used to emit brain events. Pass nil for hub if not needed.
|
||||||
func NewProvider(bridge *ide.Bridge, hub *ws.Hub) *BrainProvider {
|
func NewProvider(bridge *ide.Bridge, hub *ws.Hub) *BrainProvider {
|
||||||
p := &BrainProvider{
|
return &BrainProvider{
|
||||||
bridge: bridge,
|
bridge: bridge,
|
||||||
hub: hub,
|
hub: hub,
|
||||||
}
|
}
|
||||||
if bridge != nil {
|
|
||||||
bridge.AddObserver(func(msg ide.BridgeMessage) {
|
|
||||||
p.handleBridgeMessage(msg)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name implements api.RouteGroup.
|
// Name implements api.RouteGroup.
|
||||||
|
|
@ -52,10 +45,9 @@ func (p *BrainProvider) BasePath() string { return "/api/brain" }
|
||||||
// Channels implements provider.Streamable.
|
// Channels implements provider.Streamable.
|
||||||
func (p *BrainProvider) Channels() []string {
|
func (p *BrainProvider) Channels() []string {
|
||||||
return []string{
|
return []string{
|
||||||
coremcp.ChannelBrainRememberDone,
|
"brain.remember.complete",
|
||||||
coremcp.ChannelBrainRecallDone,
|
"brain.recall.complete",
|
||||||
coremcp.ChannelBrainForgetDone,
|
"brain.forget.complete",
|
||||||
coremcp.ChannelBrainListDone,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -219,7 +211,7 @@ func (p *BrainProvider) remember(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.emitEvent(coremcp.ChannelBrainRememberDone, map[string]any{
|
p.emitEvent("brain.remember.complete", map[string]any{
|
||||||
"type": input.Type,
|
"type": input.Type,
|
||||||
"project": input.Project,
|
"project": input.Project,
|
||||||
})
|
})
|
||||||
|
|
@ -252,6 +244,10 @@ func (p *BrainProvider) recall(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p.emitEvent("brain.recall.complete", map[string]any{
|
||||||
|
"query": input.Query,
|
||||||
|
})
|
||||||
|
|
||||||
c.JSON(http.StatusOK, api.OK(RecallOutput{
|
c.JSON(http.StatusOK, api.OK(RecallOutput{
|
||||||
Success: true,
|
Success: true,
|
||||||
Memories: []Memory{},
|
Memories: []Memory{},
|
||||||
|
|
@ -282,7 +278,7 @@ func (p *BrainProvider) forget(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.emitEvent(coremcp.ChannelBrainForgetDone, map[string]any{
|
p.emitEvent("brain.forget.complete", map[string]any{
|
||||||
"id": input.ID,
|
"id": input.ID,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
@ -298,18 +294,13 @@ func (p *BrainProvider) list(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
project := c.Query("project")
|
|
||||||
typ := c.Query("type")
|
|
||||||
agentID := c.Query("agent_id")
|
|
||||||
limit := c.Query("limit")
|
|
||||||
|
|
||||||
err := p.bridge.Send(ide.BridgeMessage{
|
err := p.bridge.Send(ide.BridgeMessage{
|
||||||
Type: "brain_list",
|
Type: "brain_list",
|
||||||
Data: map[string]any{
|
Data: map[string]any{
|
||||||
"project": project,
|
"project": c.Query("project"),
|
||||||
"type": typ,
|
"type": c.Query("type"),
|
||||||
"agent_id": agentID,
|
"agent_id": c.Query("agent_id"),
|
||||||
"limit": limit,
|
"limit": c.Query("limit"),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -317,13 +308,6 @@ func (p *BrainProvider) list(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.emitEvent(coremcp.ChannelBrainListDone, map[string]any{
|
|
||||||
"project": project,
|
|
||||||
"type": typ,
|
|
||||||
"agent_id": agentID,
|
|
||||||
"limit": limit,
|
|
||||||
})
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, api.OK(ListOutput{
|
c.JSON(http.StatusOK, api.OK(ListOutput{
|
||||||
Success: true,
|
Success: true,
|
||||||
Memories: []Memory{},
|
Memories: []Memory{},
|
||||||
|
|
@ -350,18 +334,3 @@ func (p *BrainProvider) emitEvent(channel string, data any) {
|
||||||
Data: data,
|
Data: data,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *BrainProvider) handleBridgeMessage(msg ide.BridgeMessage) {
|
|
||||||
switch msg.Type {
|
|
||||||
case "brain_remember":
|
|
||||||
p.emitEvent(coremcp.ChannelBrainRememberDone, bridgePayload(msg.Data, "type", "project"))
|
|
||||||
case "brain_recall":
|
|
||||||
payload := bridgePayload(msg.Data, "query", "project", "type", "agent_id")
|
|
||||||
payload["count"] = bridgeCount(msg.Data)
|
|
||||||
p.emitEvent(coremcp.ChannelBrainRecallDone, payload)
|
|
||||||
case "brain_forget":
|
|
||||||
p.emitEvent(coremcp.ChannelBrainForgetDone, bridgePayload(msg.Data, "id", "reason"))
|
|
||||||
case "brain_list":
|
|
||||||
p.emitEvent(coremcp.ChannelBrainListDone, bridgePayload(msg.Data, "project", "type", "agent_id", "limit"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package brain
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBrainProviderChannels_Good_IncludesListComplete(t *testing.T) {
|
|
||||||
p := NewProvider(nil, nil)
|
|
||||||
|
|
||||||
channels := p.Channels()
|
|
||||||
found := false
|
|
||||||
for _, channel := range channels {
|
|
||||||
if channel == "brain.list.complete" {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
t.Fatalf("expected brain.list.complete in provider channels: %#v", channels)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBrainProviderHandleBridgeMessage_Good_SupportsBrainEvents(t *testing.T) {
|
|
||||||
p := NewProvider(nil, nil)
|
|
||||||
for _, msg := range []ide.BridgeMessage{
|
|
||||||
{Type: "brain_remember", Data: map[string]any{"type": "bug", "project": "core/mcp"}},
|
|
||||||
{Type: "brain_recall", Data: map[string]any{"query": "test", "memories": []any{map[string]any{"id": "m1"}}}},
|
|
||||||
{Type: "brain_forget", Data: map[string]any{"id": "mem-123", "reason": "outdated"}},
|
|
||||||
{Type: "brain_list", Data: map[string]any{"project": "core/mcp", "limit": 10}},
|
|
||||||
} {
|
|
||||||
p.handleBridgeMessage(msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
"dappco.re/go/mcp/pkg/mcp/ide"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
|
|
@ -22,8 +21,6 @@ func (s *Subsystem) emitChannel(ctx context.Context, channel string, data any) {
|
||||||
// -- Input/Output types -------------------------------------------------------
|
// -- Input/Output types -------------------------------------------------------
|
||||||
|
|
||||||
// RememberInput is the input for brain_remember.
|
// RememberInput is the input for brain_remember.
|
||||||
//
|
|
||||||
// input := RememberInput{Content: "Use Qdrant for vector search", Type: "decision"}
|
|
||||||
type RememberInput struct {
|
type RememberInput struct {
|
||||||
Content string `json:"content"`
|
Content string `json:"content"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
|
@ -35,8 +32,6 @@ type RememberInput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RememberOutput is the output for brain_remember.
|
// RememberOutput is the output for brain_remember.
|
||||||
//
|
|
||||||
// // out.Success == true
|
|
||||||
type RememberOutput struct {
|
type RememberOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
MemoryID string `json:"memoryId,omitempty"`
|
MemoryID string `json:"memoryId,omitempty"`
|
||||||
|
|
@ -44,8 +39,6 @@ type RememberOutput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecallInput is the input for brain_recall.
|
// RecallInput is the input for brain_recall.
|
||||||
//
|
|
||||||
// input := RecallInput{Query: "vector search", TopK: 5}
|
|
||||||
type RecallInput struct {
|
type RecallInput struct {
|
||||||
Query string `json:"query"`
|
Query string `json:"query"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
|
|
@ -53,8 +46,6 @@ type RecallInput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecallFilter holds optional filter criteria for brain_recall.
|
// RecallFilter holds optional filter criteria for brain_recall.
|
||||||
//
|
|
||||||
// filter := RecallFilter{Project: "core/mcp", MinConfidence: 0.5}
|
|
||||||
type RecallFilter struct {
|
type RecallFilter struct {
|
||||||
Project string `json:"project,omitempty"`
|
Project string `json:"project,omitempty"`
|
||||||
Type any `json:"type,omitempty"`
|
Type any `json:"type,omitempty"`
|
||||||
|
|
@ -63,8 +54,6 @@ type RecallFilter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecallOutput is the output for brain_recall.
|
// RecallOutput is the output for brain_recall.
|
||||||
//
|
|
||||||
// // out.Memories contains ranked matches
|
|
||||||
type RecallOutput struct {
|
type RecallOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
|
|
@ -72,8 +61,6 @@ type RecallOutput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Memory is a single memory entry returned by recall or list.
|
// Memory is a single memory entry returned by recall or list.
|
||||||
//
|
|
||||||
// mem := Memory{ID: "m1", Type: "bug", Content: "Fix timeout handling"}
|
|
||||||
type Memory struct {
|
type Memory struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
AgentID string `json:"agent_id"`
|
AgentID string `json:"agent_id"`
|
||||||
|
|
@ -89,16 +76,12 @@ type Memory struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForgetInput is the input for brain_forget.
|
// ForgetInput is the input for brain_forget.
|
||||||
//
|
|
||||||
// input := ForgetInput{ID: "m1"}
|
|
||||||
type ForgetInput struct {
|
type ForgetInput struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Reason string `json:"reason,omitempty"`
|
Reason string `json:"reason,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForgetOutput is the output for brain_forget.
|
// ForgetOutput is the output for brain_forget.
|
||||||
//
|
|
||||||
// // out.Forgotten contains the deleted memory ID
|
|
||||||
type ForgetOutput struct {
|
type ForgetOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Forgotten string `json:"forgotten"`
|
Forgotten string `json:"forgotten"`
|
||||||
|
|
@ -106,8 +89,6 @@ type ForgetOutput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListInput is the input for brain_list.
|
// ListInput is the input for brain_list.
|
||||||
//
|
|
||||||
// input := ListInput{Project: "core/mcp", Limit: 50}
|
|
||||||
type ListInput struct {
|
type ListInput struct {
|
||||||
Project string `json:"project,omitempty"`
|
Project string `json:"project,omitempty"`
|
||||||
Type string `json:"type,omitempty"`
|
Type string `json:"type,omitempty"`
|
||||||
|
|
@ -116,8 +97,6 @@ type ListInput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListOutput is the output for brain_list.
|
// ListOutput is the output for brain_list.
|
||||||
//
|
|
||||||
// // out.Count reports how many memories were returned
|
|
||||||
type ListOutput struct {
|
type ListOutput struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
|
|
@ -126,24 +105,23 @@ type ListOutput struct {
|
||||||
|
|
||||||
// -- Tool registration --------------------------------------------------------
|
// -- Tool registration --------------------------------------------------------
|
||||||
|
|
||||||
func (s *Subsystem) registerBrainTools(svc *coremcp.Service) {
|
func (s *Subsystem) registerBrainTools(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
|
||||||
Name: "brain_remember",
|
Name: "brain_remember",
|
||||||
Description: "Store a memory in the shared OpenBrain knowledge store. Persists decisions, observations, conventions, research, plans, bugs, or architecture knowledge for other agents.",
|
Description: "Store a memory in the shared OpenBrain knowledge store. Persists decisions, observations, conventions, research, plans, bugs, or architecture knowledge for other agents.",
|
||||||
}, s.brainRemember)
|
}, s.brainRemember)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "brain_recall",
|
Name: "brain_recall",
|
||||||
Description: "Semantic search across the shared OpenBrain knowledge store. Returns memories ranked by similarity to your query, with optional filtering.",
|
Description: "Semantic search across the shared OpenBrain knowledge store. Returns memories ranked by similarity to your query, with optional filtering.",
|
||||||
}, s.brainRecall)
|
}, s.brainRecall)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "brain_forget",
|
Name: "brain_forget",
|
||||||
Description: "Remove a memory from the shared OpenBrain knowledge store. Permanently deletes from both database and vector index.",
|
Description: "Remove a memory from the shared OpenBrain knowledge store. Permanently deletes from both database and vector index.",
|
||||||
}, s.brainForget)
|
}, s.brainForget)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "brain", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "brain_list",
|
Name: "brain_list",
|
||||||
Description: "List memories in the shared OpenBrain knowledge store. Supports filtering by project, type, and agent. No vector search -- use brain_recall for semantic queries.",
|
Description: "List memories in the shared OpenBrain knowledge store. Supports filtering by project, type, and agent. No vector search -- use brain_recall for semantic queries.",
|
||||||
}, s.brainList)
|
}, s.brainList)
|
||||||
|
|
@ -172,7 +150,7 @@ func (s *Subsystem) brainRemember(ctx context.Context, _ *mcp.CallToolRequest, i
|
||||||
return nil, RememberOutput{}, coreerr.E("brain.remember", "failed to send brain_remember", err)
|
return nil, RememberOutput{}, coreerr.E("brain.remember", "failed to send brain_remember", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.emitChannel(ctx, coremcp.ChannelBrainRememberDone, map[string]any{
|
s.emitChannel(ctx, "brain.remember.complete", map[string]any{
|
||||||
"type": input.Type,
|
"type": input.Type,
|
||||||
"project": input.Project,
|
"project": input.Project,
|
||||||
})
|
})
|
||||||
|
|
@ -200,6 +178,11 @@ func (s *Subsystem) brainRecall(ctx context.Context, _ *mcp.CallToolRequest, inp
|
||||||
return nil, RecallOutput{}, coreerr.E("brain.recall", "failed to send brain_recall", err)
|
return nil, RecallOutput{}, coreerr.E("brain.recall", "failed to send brain_recall", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.emitChannel(ctx, "brain.recall.complete", map[string]any{
|
||||||
|
"query": input.Query,
|
||||||
|
"count": 0,
|
||||||
|
})
|
||||||
|
|
||||||
return nil, RecallOutput{
|
return nil, RecallOutput{
|
||||||
Success: true,
|
Success: true,
|
||||||
Memories: []Memory{},
|
Memories: []Memory{},
|
||||||
|
|
@ -222,7 +205,7 @@ func (s *Subsystem) brainForget(ctx context.Context, _ *mcp.CallToolRequest, inp
|
||||||
return nil, ForgetOutput{}, coreerr.E("brain.forget", "failed to send brain_forget", err)
|
return nil, ForgetOutput{}, coreerr.E("brain.forget", "failed to send brain_forget", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.emitChannel(ctx, coremcp.ChannelBrainForgetDone, map[string]any{
|
s.emitChannel(ctx, "brain.forget.complete", map[string]any{
|
||||||
"id": input.ID,
|
"id": input.ID,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
@ -255,11 +238,11 @@ func (s *Subsystem) brainList(ctx context.Context, _ *mcp.CallToolRequest, input
|
||||||
return nil, ListOutput{}, coreerr.E("brain.list", "failed to send brain_list", err)
|
return nil, ListOutput{}, coreerr.E("brain.list", "failed to send brain_list", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.emitChannel(ctx, coremcp.ChannelBrainListDone, map[string]any{
|
s.emitChannel(ctx, "brain.list.complete", map[string]any{
|
||||||
"project": input.Project,
|
"project": input.Project,
|
||||||
"type": input.Type,
|
"type": input.Type,
|
||||||
"agent_id": input.AgentID,
|
"agent": input.AgentID,
|
||||||
"limit": limit,
|
"limit": limit,
|
||||||
})
|
})
|
||||||
|
|
||||||
return nil, ListOutput{
|
return nil, ListOutput{
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
core "dappco.re/go/core"
|
core "dappco.re/go/core"
|
||||||
|
|
@ -24,10 +23,6 @@ const maxBodySize = 10 << 20 // 10 MB
|
||||||
// mcp.BridgeToAPI(svc, bridge)
|
// mcp.BridgeToAPI(svc, bridge)
|
||||||
// bridge.Mount(router, "/v1/tools")
|
// bridge.Mount(router, "/v1/tools")
|
||||||
func BridgeToAPI(svc *Service, bridge *api.ToolBridge) {
|
func BridgeToAPI(svc *Service, bridge *api.ToolBridge) {
|
||||||
if svc == nil || bridge == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for rec := range svc.ToolsSeq() {
|
for rec := range svc.ToolsSeq() {
|
||||||
desc := api.ToolDescriptor{
|
desc := api.ToolDescriptor{
|
||||||
Name: rec.Name,
|
Name: rec.Name,
|
||||||
|
|
@ -43,16 +38,8 @@ func BridgeToAPI(svc *Service, bridge *api.ToolBridge) {
|
||||||
bridge.Add(desc, func(c *gin.Context) {
|
bridge.Add(desc, func(c *gin.Context) {
|
||||||
var body []byte
|
var body []byte
|
||||||
if c.Request.Body != nil {
|
if c.Request.Body != nil {
|
||||||
c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, maxBodySize)
|
|
||||||
r := core.ReadAll(c.Request.Body)
|
r := core.ReadAll(c.Request.Body)
|
||||||
if !r.OK {
|
if !r.OK {
|
||||||
if err, ok := r.Value.(error); ok {
|
|
||||||
var maxBytesErr *http.MaxBytesError
|
|
||||||
if errors.As(err, &maxBytesErr) || core.Contains(err.Error(), "request body too large") {
|
|
||||||
c.JSON(http.StatusRequestEntityTooLarge, api.Fail("request_too_large", "Request body exceeds 10 MB limit"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.JSON(http.StatusBadRequest, api.Fail("invalid_request", "Failed to read request body"))
|
c.JSON(http.StatusBadRequest, api.Fail("invalid_request", "Failed to read request body"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -63,7 +50,7 @@ func BridgeToAPI(svc *Service, bridge *api.ToolBridge) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Body present + error = likely bad input (malformed JSON).
|
// Body present + error = likely bad input (malformed JSON).
|
||||||
// No body + error = tool execution failure.
|
// No body + error = tool execution failure.
|
||||||
if errors.Is(err, errInvalidRESTInput) {
|
if len(body) > 0 && core.Contains(err.Error(), "unmarshal") {
|
||||||
c.JSON(http.StatusBadRequest, api.Fail("invalid_input", "Malformed JSON in request body"))
|
c.JSON(http.StatusBadRequest, api.Fail("invalid_input", "Malformed JSON in request body"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
// SPDX-License-Identifier: EUPL-1.2
|
||||||
|
|
||||||
package mcp_test
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
@ -13,10 +13,6 @@ import (
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
mcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
"dappco.re/go/mcp/pkg/mcp/agentic"
|
|
||||||
"dappco.re/go/mcp/pkg/mcp/brain"
|
|
||||||
"dappco.re/go/mcp/pkg/mcp/ide"
|
|
||||||
api "forge.lthn.ai/core/api"
|
api "forge.lthn.ai/core/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -25,20 +21,13 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBridgeToAPI_Good_AllTools(t *testing.T) {
|
func TestBridgeToAPI_Good_AllTools(t *testing.T) {
|
||||||
svc, err := mcp.New(mcp.Options{
|
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
||||||
WorkspaceRoot: t.TempDir(),
|
|
||||||
Subsystems: []mcp.Subsystem{
|
|
||||||
brain.New(nil),
|
|
||||||
agentic.NewPrep(),
|
|
||||||
ide.New(nil, ide.Config{}),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bridge := api.NewToolBridge("/tools")
|
bridge := api.NewToolBridge("/tools")
|
||||||
mcp.BridgeToAPI(svc, bridge)
|
BridgeToAPI(svc, bridge)
|
||||||
|
|
||||||
svcCount := len(svc.Tools())
|
svcCount := len(svc.Tools())
|
||||||
bridgeCount := len(bridge.Tools())
|
bridgeCount := len(bridge.Tools())
|
||||||
|
|
@ -60,22 +49,16 @@ func TestBridgeToAPI_Good_AllTools(t *testing.T) {
|
||||||
t.Errorf("bridge has tool %q not found in service", td.Name)
|
t.Errorf("bridge has tool %q not found in service", td.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, want := range []string{"brain_list", "agentic_plan_create", "ide_dashboard_overview"} {
|
|
||||||
if !svcNames[want] {
|
|
||||||
t.Fatalf("expected recorded tool %q to be present", want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBridgeToAPI_Good_DescribableGroup(t *testing.T) {
|
func TestBridgeToAPI_Good_DescribableGroup(t *testing.T) {
|
||||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: t.TempDir()})
|
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bridge := api.NewToolBridge("/tools")
|
bridge := api.NewToolBridge("/tools")
|
||||||
mcp.BridgeToAPI(svc, bridge)
|
BridgeToAPI(svc, bridge)
|
||||||
|
|
||||||
// ToolBridge implements DescribableGroup.
|
// ToolBridge implements DescribableGroup.
|
||||||
var dg api.DescribableGroup = bridge
|
var dg api.DescribableGroup = bridge
|
||||||
|
|
@ -107,13 +90,13 @@ func TestBridgeToAPI_Good_FileRead(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: tmpDir})
|
svc, err := New(Options{WorkspaceRoot: tmpDir})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bridge := api.NewToolBridge("/tools")
|
bridge := api.NewToolBridge("/tools")
|
||||||
mcp.BridgeToAPI(svc, bridge)
|
BridgeToAPI(svc, bridge)
|
||||||
|
|
||||||
// Register with a Gin engine and make a request.
|
// Register with a Gin engine and make a request.
|
||||||
engine := gin.New()
|
engine := gin.New()
|
||||||
|
|
@ -131,7 +114,7 @@ func TestBridgeToAPI_Good_FileRead(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the response envelope.
|
// Parse the response envelope.
|
||||||
var resp api.Response[mcp.ReadFileOutput]
|
var resp api.Response[ReadFileOutput]
|
||||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||||
t.Fatalf("unmarshal error: %v", err)
|
t.Fatalf("unmarshal error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -147,13 +130,13 @@ func TestBridgeToAPI_Good_FileRead(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBridgeToAPI_Bad_InvalidJSON(t *testing.T) {
|
func TestBridgeToAPI_Bad_InvalidJSON(t *testing.T) {
|
||||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: t.TempDir()})
|
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bridge := api.NewToolBridge("/tools")
|
bridge := api.NewToolBridge("/tools")
|
||||||
mcp.BridgeToAPI(svc, bridge)
|
BridgeToAPI(svc, bridge)
|
||||||
|
|
||||||
engine := gin.New()
|
engine := gin.New()
|
||||||
rg := engine.Group(bridge.BasePath())
|
rg := engine.Group(bridge.BasePath())
|
||||||
|
|
@ -165,8 +148,13 @@ func TestBridgeToAPI_Bad_InvalidJSON(t *testing.T) {
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
engine.ServeHTTP(w, req)
|
engine.ServeHTTP(w, req)
|
||||||
|
|
||||||
if w.Code != http.StatusBadRequest {
|
if w.Code != http.StatusInternalServerError {
|
||||||
t.Fatalf("expected 400 for invalid JSON, got %d: %s", w.Code, w.Body.String())
|
// The handler unmarshals via RESTHandler which returns an error,
|
||||||
|
// but since it's a JSON parse error it ends up as tool_error.
|
||||||
|
// Check we get a non-200 with an error envelope.
|
||||||
|
if w.Code == http.StatusOK {
|
||||||
|
t.Fatalf("expected non-200 for invalid JSON, got 200")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var resp api.Response[any]
|
var resp api.Response[any]
|
||||||
|
|
@ -181,49 +169,14 @@ func TestBridgeToAPI_Bad_InvalidJSON(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBridgeToAPI_Bad_OversizedBody(t *testing.T) {
|
|
||||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: t.TempDir()})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bridge := api.NewToolBridge("/tools")
|
|
||||||
mcp.BridgeToAPI(svc, bridge)
|
|
||||||
|
|
||||||
engine := gin.New()
|
|
||||||
rg := engine.Group(bridge.BasePath())
|
|
||||||
bridge.RegisterRoutes(rg)
|
|
||||||
|
|
||||||
body := strings.Repeat("a", 10<<20+1)
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
req, _ := http.NewRequest(http.MethodPost, "/tools/file_read", strings.NewReader(body))
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
engine.ServeHTTP(w, req)
|
|
||||||
|
|
||||||
if w.Code != http.StatusRequestEntityTooLarge {
|
|
||||||
t.Fatalf("expected 413 for oversized body, got %d: %s", w.Code, w.Body.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp api.Response[any]
|
|
||||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
|
||||||
t.Fatalf("unmarshal error: %v", err)
|
|
||||||
}
|
|
||||||
if resp.Success {
|
|
||||||
t.Fatal("expected Success=false for oversized body")
|
|
||||||
}
|
|
||||||
if resp.Error == nil {
|
|
||||||
t.Fatal("expected error in response")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBridgeToAPI_Good_EndToEnd(t *testing.T) {
|
func TestBridgeToAPI_Good_EndToEnd(t *testing.T) {
|
||||||
svc, err := mcp.New(mcp.Options{WorkspaceRoot: t.TempDir()})
|
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bridge := api.NewToolBridge("/tools")
|
bridge := api.NewToolBridge("/tools")
|
||||||
mcp.BridgeToAPI(svc, bridge)
|
BridgeToAPI(svc, bridge)
|
||||||
|
|
||||||
// Create an api.Engine with the bridge registered and Swagger enabled.
|
// Create an api.Engine with the bridge registered and Swagger enabled.
|
||||||
e, err := api.New(
|
e, err := api.New(
|
||||||
|
|
@ -259,7 +212,7 @@ func TestBridgeToAPI_Good_EndToEnd(t *testing.T) {
|
||||||
t.Fatalf("expected 200 for /tools/lang_list, got %d", resp2.StatusCode)
|
t.Fatalf("expected 200 for /tools/lang_list, got %d", resp2.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
var langResp api.Response[mcp.GetSupportedLanguagesOutput]
|
var langResp api.Response[GetSupportedLanguagesOutput]
|
||||||
if err := json.NewDecoder(resp2.Body).Decode(&langResp); err != nil {
|
if err := json.NewDecoder(resp2.Body).Decode(&langResp); err != nil {
|
||||||
t.Fatalf("unmarshal error: %v", err)
|
t.Fatalf("unmarshal error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,3 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package ide
|
package ide
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -14,13 +12,7 @@ import (
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BridgeMessage is the wire format between the IDE bridge and Laravel.
|
// BridgeMessage is the wire format between the IDE and Laravel.
|
||||||
//
|
|
||||||
// msg := BridgeMessage{
|
|
||||||
// Type: "chat_send",
|
|
||||||
// SessionID: "sess-42",
|
|
||||||
// Data: "hello",
|
|
||||||
// }
|
|
||||||
type BridgeMessage struct {
|
type BridgeMessage struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Channel string `json:"channel,omitempty"`
|
Channel string `json:"channel,omitempty"`
|
||||||
|
|
@ -31,67 +23,32 @@ type BridgeMessage struct {
|
||||||
|
|
||||||
// Bridge maintains a WebSocket connection to the Laravel core-agentic
|
// Bridge maintains a WebSocket connection to the Laravel core-agentic
|
||||||
// backend and forwards responses to a local ws.Hub.
|
// backend and forwards responses to a local ws.Hub.
|
||||||
//
|
|
||||||
// bridge := NewBridge(hub, cfg)
|
|
||||||
type Bridge struct {
|
type Bridge struct {
|
||||||
cfg Config
|
config Config
|
||||||
hub *ws.Hub
|
hub *ws.Hub
|
||||||
conn *websocket.Conn
|
conn *websocket.Conn
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
connected bool
|
connected bool
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
observers []func(BridgeMessage)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBridge creates a bridge that will connect to the Laravel backend and
|
// NewBridge creates a bridge that will connect to the Laravel backend and
|
||||||
// forward incoming messages to the provided ws.Hub channels.
|
// forward incoming messages to the provided ws.Hub channels.
|
||||||
//
|
//
|
||||||
// bridge := NewBridge(hub, cfg)
|
// bridge := ide.NewBridge(hub, ide.DefaultConfig())
|
||||||
func NewBridge(hub *ws.Hub, cfg Config) *Bridge {
|
func NewBridge(hub *ws.Hub, configuration Config) *Bridge {
|
||||||
return &Bridge{cfg: cfg, hub: hub}
|
return &Bridge{config: configuration, hub: hub}
|
||||||
}
|
|
||||||
|
|
||||||
// SetObserver registers a callback for inbound bridge messages.
|
|
||||||
//
|
|
||||||
// bridge.SetObserver(func(msg BridgeMessage) {
|
|
||||||
// fmt.Println(msg.Type)
|
|
||||||
// })
|
|
||||||
func (b *Bridge) SetObserver(fn func(BridgeMessage)) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
if fn == nil {
|
|
||||||
b.observers = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.observers = []func(BridgeMessage){fn}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddObserver registers an additional bridge observer.
|
|
||||||
// Observers are invoked in registration order after each inbound message.
|
|
||||||
//
|
|
||||||
// bridge.AddObserver(func(msg BridgeMessage) { log.Println(msg.Type) })
|
|
||||||
func (b *Bridge) AddObserver(fn func(BridgeMessage)) {
|
|
||||||
if fn == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
b.observers = append(b.observers, fn)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start begins the connection loop in a background goroutine.
|
// Start begins the connection loop in a background goroutine.
|
||||||
// Call Shutdown to stop it.
|
// Call Shutdown to stop it.
|
||||||
//
|
|
||||||
// bridge.Start(ctx)
|
|
||||||
func (b *Bridge) Start(ctx context.Context) {
|
func (b *Bridge) Start(ctx context.Context) {
|
||||||
ctx, b.cancel = context.WithCancel(ctx)
|
ctx, b.cancel = context.WithCancel(ctx)
|
||||||
go b.connectLoop(ctx)
|
go b.connectLoop(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown cleanly closes the bridge.
|
// Shutdown cleanly closes the bridge.
|
||||||
//
|
|
||||||
// bridge.Shutdown()
|
|
||||||
func (b *Bridge) Shutdown() {
|
func (b *Bridge) Shutdown() {
|
||||||
if b.cancel != nil {
|
if b.cancel != nil {
|
||||||
b.cancel()
|
b.cancel()
|
||||||
|
|
@ -106,10 +63,6 @@ func (b *Bridge) Shutdown() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connected reports whether the bridge has an active connection.
|
// Connected reports whether the bridge has an active connection.
|
||||||
//
|
|
||||||
// if bridge.Connected() {
|
|
||||||
// fmt.Println("online")
|
|
||||||
// }
|
|
||||||
func (b *Bridge) Connected() bool {
|
func (b *Bridge) Connected() bool {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
@ -117,8 +70,6 @@ func (b *Bridge) Connected() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send sends a message to the Laravel backend.
|
// Send sends a message to the Laravel backend.
|
||||||
//
|
|
||||||
// err := bridge.Send(BridgeMessage{Type: "dashboard_overview"})
|
|
||||||
func (b *Bridge) Send(msg BridgeMessage) error {
|
func (b *Bridge) Send(msg BridgeMessage) error {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
@ -132,7 +83,7 @@ func (b *Bridge) Send(msg BridgeMessage) error {
|
||||||
|
|
||||||
// connectLoop reconnects to Laravel with exponential backoff.
|
// connectLoop reconnects to Laravel with exponential backoff.
|
||||||
func (b *Bridge) connectLoop(ctx context.Context) {
|
func (b *Bridge) connectLoop(ctx context.Context) {
|
||||||
delay := b.cfg.ReconnectInterval
|
delay := b.config.ReconnectInterval
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
|
@ -147,12 +98,12 @@ func (b *Bridge) connectLoop(ctx context.Context) {
|
||||||
return
|
return
|
||||||
case <-time.After(delay):
|
case <-time.After(delay):
|
||||||
}
|
}
|
||||||
delay = min(delay*2, b.cfg.MaxReconnectInterval)
|
delay = min(delay*2, b.config.MaxReconnectInterval)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset backoff on successful connection
|
// Reset backoff on successful connection
|
||||||
delay = b.cfg.ReconnectInterval
|
delay = b.config.ReconnectInterval
|
||||||
b.readLoop(ctx)
|
b.readLoop(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -163,12 +114,12 @@ func (b *Bridge) dial(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var header http.Header
|
var header http.Header
|
||||||
if b.cfg.Token != "" {
|
if b.config.Token != "" {
|
||||||
header = http.Header{}
|
header = http.Header{}
|
||||||
header.Set("Authorization", "Bearer "+b.cfg.Token)
|
header.Set("Authorization", "Bearer "+b.config.Token)
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, _, err := dialer.DialContext(ctx, b.cfg.LaravelWSURL, header)
|
conn, _, err := dialer.DialContext(ctx, b.config.LaravelWSURL, header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -178,7 +129,7 @@ func (b *Bridge) dial(ctx context.Context) error {
|
||||||
b.connected = true
|
b.connected = true
|
||||||
b.mu.Unlock()
|
b.mu.Unlock()
|
||||||
|
|
||||||
coreerr.Info("ide bridge: connected", "url", b.cfg.LaravelWSURL)
|
coreerr.Info("ide bridge: connected", "url", b.config.LaravelWSURL)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -212,24 +163,9 @@ func (b *Bridge) readLoop(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b.dispatch(msg)
|
b.dispatch(msg)
|
||||||
for _, observer := range b.snapshotObservers() {
|
|
||||||
observer(msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bridge) snapshotObservers() []func(BridgeMessage) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if len(b.observers) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
observers := make([]func(BridgeMessage), len(b.observers))
|
|
||||||
copy(observers, b.observers)
|
|
||||||
return observers
|
|
||||||
}
|
|
||||||
|
|
||||||
// dispatch routes an incoming message to the appropriate ws.Hub channel.
|
// dispatch routes an incoming message to the appropriate ws.Hub channel.
|
||||||
func (b *Bridge) dispatch(msg BridgeMessage) {
|
func (b *Bridge) dispatch(msg BridgeMessage) {
|
||||||
if b.hub == nil {
|
if b.hub == nil {
|
||||||
|
|
|
||||||
|
|
@ -164,71 +164,6 @@ func TestBridge_Good_MessageDispatch(t *testing.T) {
|
||||||
// This confirms the dispatch path ran without error.
|
// This confirms the dispatch path ran without error.
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBridge_Good_MultipleObservers(t *testing.T) {
|
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
conn, err := testUpgrader.Upgrade(w, r, nil)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
msg := BridgeMessage{
|
|
||||||
Type: "brain_recall",
|
|
||||||
Data: map[string]any{
|
|
||||||
"query": "test query",
|
|
||||||
"count": 3,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
data, _ := json.Marshal(msg)
|
|
||||||
_ = conn.WriteMessage(websocket.TextMessage, data)
|
|
||||||
|
|
||||||
for {
|
|
||||||
if _, _, err := conn.ReadMessage(); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
hub := ws.NewHub()
|
|
||||||
ctx := t.Context()
|
|
||||||
go hub.Run(ctx)
|
|
||||||
|
|
||||||
cfg := DefaultConfig()
|
|
||||||
cfg.LaravelWSURL = wsURL(ts)
|
|
||||||
cfg.ReconnectInterval = 100 * time.Millisecond
|
|
||||||
|
|
||||||
bridge := NewBridge(hub, cfg)
|
|
||||||
|
|
||||||
first := make(chan struct{}, 1)
|
|
||||||
second := make(chan struct{}, 1)
|
|
||||||
bridge.AddObserver(func(msg BridgeMessage) {
|
|
||||||
if msg.Type == "brain_recall" {
|
|
||||||
first <- struct{}{}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
bridge.AddObserver(func(msg BridgeMessage) {
|
|
||||||
if msg.Type == "brain_recall" {
|
|
||||||
second <- struct{}{}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
bridge.Start(ctx)
|
|
||||||
waitConnected(t, bridge, 2*time.Second)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-first:
|
|
||||||
case <-time.After(2 * time.Second):
|
|
||||||
t.Fatal("timed out waiting for first observer")
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-second:
|
|
||||||
case <-time.After(2 * time.Second):
|
|
||||||
t.Fatal("timed out waiting for second observer")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBridge_Good_Reconnect(t *testing.T) {
|
func TestBridge_Good_Reconnect(t *testing.T) {
|
||||||
// Use atomic counter to avoid data race between HTTP handler goroutine
|
// Use atomic counter to avoid data race between HTTP handler goroutine
|
||||||
// and the test goroutine.
|
// and the test goroutine.
|
||||||
|
|
@ -477,10 +412,11 @@ func TestBridge_Good_NoAuthHeaderWhenTokenEmpty(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBridge_Good_ConfigToken(t *testing.T) {
|
func TestBridge_Good_WithTokenOption(t *testing.T) {
|
||||||
// Verify the Config DTO carries token settings through unchanged.
|
// Verify the WithToken option function works.
|
||||||
cfg := DefaultConfig()
|
cfg := DefaultConfig()
|
||||||
cfg.Token = "my-token"
|
opt := WithToken("my-token")
|
||||||
|
opt(&cfg)
|
||||||
|
|
||||||
if cfg.Token != "my-token" {
|
if cfg.Token != "my-token" {
|
||||||
t.Errorf("expected token 'my-token', got %q", cfg.Token)
|
t.Errorf("expected token 'my-token', got %q", cfg.Token)
|
||||||
|
|
@ -488,14 +424,14 @@ func TestBridge_Good_ConfigToken(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSubsystem_Good_Name(t *testing.T) {
|
func TestSubsystem_Good_Name(t *testing.T) {
|
||||||
sub := New(nil, Config{})
|
sub := New(nil)
|
||||||
if sub.Name() != "ide" {
|
if sub.Name() != "ide" {
|
||||||
t.Errorf("expected name 'ide', got %q", sub.Name())
|
t.Errorf("expected name 'ide', got %q", sub.Name())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSubsystem_Good_NilHub(t *testing.T) {
|
func TestSubsystem_Good_NilHub(t *testing.T) {
|
||||||
sub := New(nil, Config{})
|
sub := New(nil)
|
||||||
if sub.Bridge() != nil {
|
if sub.Bridge() != nil {
|
||||||
t.Error("expected nil bridge when hub is nil")
|
t.Error("expected nil bridge when hub is nil")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,17 +1,10 @@
|
||||||
// Package ide provides an MCP subsystem that bridges the desktop IDE to
|
// Package ide provides an MCP subsystem that bridges the desktop IDE to
|
||||||
// a Laravel core-agentic backend over WebSocket.
|
// a Laravel core-agentic backend over WebSocket.
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package ide
|
package ide
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
// Config holds connection and workspace settings for the IDE subsystem.
|
// Config holds connection and workspace settings for the IDE subsystem.
|
||||||
//
|
|
||||||
// cfg := Config{
|
|
||||||
// LaravelWSURL: "ws://localhost:9876/ws",
|
|
||||||
// WorkspaceRoot: "/workspace",
|
|
||||||
// }
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// LaravelWSURL is the WebSocket endpoint for the Laravel core-agentic backend.
|
// LaravelWSURL is the WebSocket endpoint for the Laravel core-agentic backend.
|
||||||
LaravelWSURL string
|
LaravelWSURL string
|
||||||
|
|
@ -31,27 +24,34 @@ type Config struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultConfig returns sensible defaults for local development.
|
// DefaultConfig returns sensible defaults for local development.
|
||||||
//
|
|
||||||
// cfg := DefaultConfig()
|
|
||||||
func DefaultConfig() Config {
|
func DefaultConfig() Config {
|
||||||
return Config{}.WithDefaults()
|
return Config{
|
||||||
|
LaravelWSURL: "ws://localhost:9876/ws",
|
||||||
|
WorkspaceRoot: ".",
|
||||||
|
ReconnectInterval: 2 * time.Second,
|
||||||
|
MaxReconnectInterval: 30 * time.Second,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithDefaults fills unset fields with the default development values.
|
// Option configures the IDE subsystem.
|
||||||
//
|
type Option func(*Config)
|
||||||
// cfg := Config{WorkspaceRoot: "/workspace"}.WithDefaults()
|
|
||||||
func (c Config) WithDefaults() Config {
|
// WithLaravelURL sets the Laravel WebSocket endpoint.
|
||||||
if c.LaravelWSURL == "" {
|
func WithLaravelURL(url string) Option {
|
||||||
c.LaravelWSURL = "ws://localhost:9876/ws"
|
return func(c *Config) { c.LaravelWSURL = url }
|
||||||
}
|
}
|
||||||
if c.WorkspaceRoot == "" {
|
|
||||||
c.WorkspaceRoot = "."
|
// WithWorkspaceRoot sets the workspace root directory.
|
||||||
}
|
func WithWorkspaceRoot(root string) Option {
|
||||||
if c.ReconnectInterval == 0 {
|
return func(c *Config) { c.WorkspaceRoot = root }
|
||||||
c.ReconnectInterval = 2 * time.Second
|
}
|
||||||
}
|
|
||||||
if c.MaxReconnectInterval == 0 {
|
// WithReconnectInterval sets the base reconnect interval.
|
||||||
c.MaxReconnectInterval = 30 * time.Second
|
func WithReconnectInterval(d time.Duration) Option {
|
||||||
}
|
return func(c *Config) { c.ReconnectInterval = d }
|
||||||
return c
|
}
|
||||||
|
|
||||||
|
// WithToken sets the Bearer token for WebSocket authentication.
|
||||||
|
func WithToken(token string) Option {
|
||||||
|
return func(c *Config) { c.Token = token }
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,17 +1,11 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package ide
|
package ide
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
core "dappco.re/go/core"
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"forge.lthn.ai/core/go-ws"
|
"forge.lthn.ai/core/go-ws"
|
||||||
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// errBridgeNotAvailable is returned when a tool requires the Laravel bridge
|
// errBridgeNotAvailable is returned when a tool requires the Laravel bridge
|
||||||
|
|
@ -20,62 +14,35 @@ var errBridgeNotAvailable = coreerr.E("ide", "bridge not available", nil)
|
||||||
|
|
||||||
// Subsystem implements mcp.Subsystem and mcp.SubsystemWithShutdown for the IDE.
|
// Subsystem implements mcp.Subsystem and mcp.SubsystemWithShutdown for the IDE.
|
||||||
type Subsystem struct {
|
type Subsystem struct {
|
||||||
cfg Config
|
config Config
|
||||||
bridge *Bridge
|
bridge *Bridge
|
||||||
hub *ws.Hub
|
hub *ws.Hub
|
||||||
notifier coremcp.Notifier
|
|
||||||
|
|
||||||
stateMu sync.Mutex
|
|
||||||
sessionOrder []string
|
|
||||||
sessions map[string]Session
|
|
||||||
chats map[string][]ChatMessage
|
|
||||||
buildOrder []string
|
|
||||||
builds map[string]BuildInfo
|
|
||||||
buildLogMap map[string][]string
|
|
||||||
activity []ActivityEvent
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
// New creates an IDE subsystem. The ws.Hub is used for real-time forwarding;
|
||||||
_ coremcp.Subsystem = (*Subsystem)(nil)
|
// pass nil if headless (tools still work but real-time streaming is disabled).
|
||||||
_ coremcp.SubsystemWithShutdown = (*Subsystem)(nil)
|
|
||||||
_ coremcp.SubsystemWithNotifier = (*Subsystem)(nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// New creates an IDE subsystem from a Config DTO.
|
|
||||||
//
|
//
|
||||||
// cfg := DefaultConfig()
|
// sub := ide.New(hub, ide.WithToken("sk-abc"))
|
||||||
// ide := New(hub, cfg)
|
func New(hub *ws.Hub, opts ...Option) *Subsystem {
|
||||||
//
|
configuration := DefaultConfig()
|
||||||
// The ws.Hub is used for real-time forwarding; pass nil if headless
|
for _, opt := range opts {
|
||||||
// (tools still work but real-time streaming is disabled).
|
opt(&configuration)
|
||||||
func New(hub *ws.Hub, cfg Config) *Subsystem {
|
|
||||||
cfg = cfg.WithDefaults()
|
|
||||||
s := &Subsystem{
|
|
||||||
cfg: cfg,
|
|
||||||
bridge: nil,
|
|
||||||
hub: hub,
|
|
||||||
sessions: make(map[string]Session),
|
|
||||||
chats: make(map[string][]ChatMessage),
|
|
||||||
builds: make(map[string]BuildInfo),
|
|
||||||
buildLogMap: make(map[string][]string),
|
|
||||||
}
|
}
|
||||||
|
var bridge *Bridge
|
||||||
if hub != nil {
|
if hub != nil {
|
||||||
s.bridge = NewBridge(hub, cfg)
|
bridge = NewBridge(hub, configuration)
|
||||||
s.bridge.AddObserver(func(msg BridgeMessage) {
|
|
||||||
s.handleBridgeMessage(msg)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
return s
|
return &Subsystem{config: configuration, bridge: bridge, hub: hub}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name implements mcp.Subsystem.
|
// Name implements mcp.Subsystem.
|
||||||
func (s *Subsystem) Name() string { return "ide" }
|
func (s *Subsystem) Name() string { return "ide" }
|
||||||
|
|
||||||
// RegisterTools implements mcp.Subsystem.
|
// RegisterTools implements mcp.Subsystem.
|
||||||
func (s *Subsystem) RegisterTools(svc *coremcp.Service) {
|
func (s *Subsystem) RegisterTools(server *mcp.Server) {
|
||||||
s.registerChatTools(svc)
|
s.registerChatTools(server)
|
||||||
s.registerBuildTools(svc)
|
s.registerBuildTools(server)
|
||||||
s.registerDashboardTools(svc)
|
s.registerDashboardTools(server)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown implements mcp.SubsystemWithShutdown.
|
// Shutdown implements mcp.SubsystemWithShutdown.
|
||||||
|
|
@ -86,11 +53,6 @@ func (s *Subsystem) Shutdown(_ context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNotifier wires the shared MCP notifier into the IDE subsystem.
|
|
||||||
func (s *Subsystem) SetNotifier(n coremcp.Notifier) {
|
|
||||||
s.notifier = n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bridge returns the Laravel WebSocket bridge (may be nil in headless mode).
|
// Bridge returns the Laravel WebSocket bridge (may be nil in headless mode).
|
||||||
func (s *Subsystem) Bridge() *Bridge { return s.bridge }
|
func (s *Subsystem) Bridge() *Bridge { return s.bridge }
|
||||||
|
|
||||||
|
|
@ -100,469 +62,3 @@ func (s *Subsystem) StartBridge(ctx context.Context) {
|
||||||
s.bridge.Start(ctx)
|
s.bridge.Start(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Subsystem) addSession(session Session) {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
if s.sessions == nil {
|
|
||||||
s.sessions = make(map[string]Session)
|
|
||||||
}
|
|
||||||
if s.chats == nil {
|
|
||||||
s.chats = make(map[string][]ChatMessage)
|
|
||||||
}
|
|
||||||
if _, exists := s.sessions[session.ID]; !exists {
|
|
||||||
s.sessionOrder = append(s.sessionOrder, session.ID)
|
|
||||||
}
|
|
||||||
s.sessions[session.ID] = session
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) addBuild(build BuildInfo) {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
if s.builds == nil {
|
|
||||||
s.builds = make(map[string]BuildInfo)
|
|
||||||
}
|
|
||||||
if s.buildLogMap == nil {
|
|
||||||
s.buildLogMap = make(map[string][]string)
|
|
||||||
}
|
|
||||||
if _, exists := s.builds[build.ID]; !exists {
|
|
||||||
s.buildOrder = append(s.buildOrder, build.ID)
|
|
||||||
}
|
|
||||||
if build.StartedAt.IsZero() {
|
|
||||||
build.StartedAt = time.Now()
|
|
||||||
}
|
|
||||||
s.builds[build.ID] = build
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) listBuilds(repo string, limit int) []BuildInfo {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
if len(s.buildOrder) == 0 {
|
|
||||||
return []BuildInfo{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if limit <= 0 {
|
|
||||||
limit = len(s.buildOrder)
|
|
||||||
}
|
|
||||||
|
|
||||||
builds := make([]BuildInfo, 0, limit)
|
|
||||||
for i := len(s.buildOrder) - 1; i >= 0; i-- {
|
|
||||||
id := s.buildOrder[i]
|
|
||||||
build, ok := s.builds[id]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if repo != "" && build.Repo != repo {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
builds = append(builds, build)
|
|
||||||
if len(builds) >= limit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return builds
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) appendBuildLog(buildID, line string) {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
if s.buildLogMap == nil {
|
|
||||||
s.buildLogMap = make(map[string][]string)
|
|
||||||
}
|
|
||||||
s.buildLogMap[buildID] = append(s.buildLogMap[buildID], line)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) setBuildLogs(buildID string, lines []string) {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
if s.buildLogMap == nil {
|
|
||||||
s.buildLogMap = make(map[string][]string)
|
|
||||||
}
|
|
||||||
if len(lines) == 0 {
|
|
||||||
s.buildLogMap[buildID] = []string{}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out := make([]string, len(lines))
|
|
||||||
copy(out, lines)
|
|
||||||
s.buildLogMap[buildID] = out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) buildLogTail(buildID string, tail int) []string {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
lines := s.buildLogMap[buildID]
|
|
||||||
if len(lines) == 0 {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
if tail <= 0 || tail > len(lines) {
|
|
||||||
tail = len(lines)
|
|
||||||
}
|
|
||||||
start := len(lines) - tail
|
|
||||||
out := make([]string, tail)
|
|
||||||
copy(out, lines[start:])
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) buildSnapshot(buildID string) (BuildInfo, bool) {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
build, ok := s.builds[buildID]
|
|
||||||
return build, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) buildRepoCount() int {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
repos := make(map[string]struct{})
|
|
||||||
for _, build := range s.builds {
|
|
||||||
if build.Repo != "" {
|
|
||||||
repos[build.Repo] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(repos)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) listSessions() []Session {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
if len(s.sessionOrder) == 0 {
|
|
||||||
return []Session{}
|
|
||||||
}
|
|
||||||
|
|
||||||
result := make([]Session, 0, len(s.sessionOrder))
|
|
||||||
for _, id := range s.sessionOrder {
|
|
||||||
if session, ok := s.sessions[id]; ok {
|
|
||||||
result = append(result, session)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) appendChatMessage(sessionID, role, content string) {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
if s.chats == nil {
|
|
||||||
s.chats = make(map[string][]ChatMessage)
|
|
||||||
}
|
|
||||||
s.chats[sessionID] = append(s.chats[sessionID], ChatMessage{
|
|
||||||
Role: role,
|
|
||||||
Content: content,
|
|
||||||
Timestamp: time.Now(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) chatMessages(sessionID string) []ChatMessage {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
history := s.chats[sessionID]
|
|
||||||
if len(history) == 0 {
|
|
||||||
return []ChatMessage{}
|
|
||||||
}
|
|
||||||
out := make([]ChatMessage, len(history))
|
|
||||||
copy(out, history)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) recordActivity(typ, msg string) {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
s.activity = append(s.activity, ActivityEvent{
|
|
||||||
Type: typ,
|
|
||||||
Message: msg,
|
|
||||||
Timestamp: time.Now(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) activityFeed(limit int) []ActivityEvent {
|
|
||||||
s.stateMu.Lock()
|
|
||||||
defer s.stateMu.Unlock()
|
|
||||||
|
|
||||||
if limit <= 0 || limit > len(s.activity) {
|
|
||||||
limit = len(s.activity)
|
|
||||||
}
|
|
||||||
if limit == 0 {
|
|
||||||
return []ActivityEvent{}
|
|
||||||
}
|
|
||||||
|
|
||||||
start := len(s.activity) - limit
|
|
||||||
out := make([]ActivityEvent, limit)
|
|
||||||
copy(out, s.activity[start:])
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) handleBridgeMessage(msg BridgeMessage) {
|
|
||||||
switch msg.Type {
|
|
||||||
case "build_status":
|
|
||||||
if build, ok := buildInfoFromData(msg.Data); ok {
|
|
||||||
s.addBuild(build)
|
|
||||||
s.emitBuildLifecycle(build)
|
|
||||||
if lines := buildLinesFromData(msg.Data); len(lines) > 0 {
|
|
||||||
s.setBuildLogs(build.ID, lines)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "build_list":
|
|
||||||
for _, build := range buildInfosFromData(msg.Data) {
|
|
||||||
s.addBuild(build)
|
|
||||||
}
|
|
||||||
case "build_logs":
|
|
||||||
buildID, lines := buildLogsFromData(msg.Data)
|
|
||||||
if buildID != "" {
|
|
||||||
s.setBuildLogs(buildID, lines)
|
|
||||||
}
|
|
||||||
case "session_list":
|
|
||||||
for _, session := range sessionsFromData(msg.Data) {
|
|
||||||
s.addSession(session)
|
|
||||||
}
|
|
||||||
case "session_create":
|
|
||||||
if session, ok := sessionFromData(msg.Data); ok {
|
|
||||||
s.addSession(session)
|
|
||||||
}
|
|
||||||
case "chat_history":
|
|
||||||
if sessionID, messages := chatHistoryFromData(msg.Data); sessionID != "" {
|
|
||||||
for _, message := range messages {
|
|
||||||
s.appendChatMessage(sessionID, message.Role, message.Content)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Subsystem) emitBuildLifecycle(build BuildInfo) {
|
|
||||||
if s.notifier == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
channel := ""
|
|
||||||
switch build.Status {
|
|
||||||
case "running", "in_progress", "started":
|
|
||||||
channel = coremcp.ChannelBuildStart
|
|
||||||
case "success", "succeeded", "completed", "passed":
|
|
||||||
channel = coremcp.ChannelBuildComplete
|
|
||||||
case "failed", "error":
|
|
||||||
channel = coremcp.ChannelBuildFailed
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
payload := map[string]any{
|
|
||||||
"id": build.ID,
|
|
||||||
"repo": build.Repo,
|
|
||||||
"branch": build.Branch,
|
|
||||||
"status": build.Status,
|
|
||||||
"startedAt": build.StartedAt,
|
|
||||||
}
|
|
||||||
if build.Duration != "" {
|
|
||||||
payload["duration"] = build.Duration
|
|
||||||
}
|
|
||||||
s.notifier.ChannelSend(context.Background(), channel, payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildInfoFromData(data any) (BuildInfo, bool) {
|
|
||||||
m, ok := data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return BuildInfo{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
id, _ := m["buildId"].(string)
|
|
||||||
if id == "" {
|
|
||||||
id, _ = m["id"].(string)
|
|
||||||
}
|
|
||||||
if id == "" {
|
|
||||||
return BuildInfo{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
build := BuildInfo{
|
|
||||||
ID: id,
|
|
||||||
Repo: stringFromAny(m["repo"]),
|
|
||||||
Branch: stringFromAny(m["branch"]),
|
|
||||||
Status: stringFromAny(m["status"]),
|
|
||||||
}
|
|
||||||
if build.Status == "" {
|
|
||||||
build.Status = "unknown"
|
|
||||||
}
|
|
||||||
if startedAt, ok := m["startedAt"].(time.Time); ok {
|
|
||||||
build.StartedAt = startedAt
|
|
||||||
}
|
|
||||||
if duration := stringFromAny(m["duration"]); duration != "" {
|
|
||||||
build.Duration = duration
|
|
||||||
}
|
|
||||||
return build, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildInfosFromData(data any) []BuildInfo {
|
|
||||||
m, ok := data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return []BuildInfo{}
|
|
||||||
}
|
|
||||||
|
|
||||||
raw, ok := m["builds"].([]any)
|
|
||||||
if !ok {
|
|
||||||
return []BuildInfo{}
|
|
||||||
}
|
|
||||||
|
|
||||||
builds := make([]BuildInfo, 0, len(raw))
|
|
||||||
for _, item := range raw {
|
|
||||||
build, ok := buildInfoFromData(item)
|
|
||||||
if ok {
|
|
||||||
builds = append(builds, build)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return builds
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildLinesFromData(data any) []string {
|
|
||||||
_, lines := buildLogsFromData(data)
|
|
||||||
return lines
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildLogsFromData(data any) (string, []string) {
|
|
||||||
m, ok := data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return "", []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
buildID, _ := m["buildId"].(string)
|
|
||||||
if buildID == "" {
|
|
||||||
buildID, _ = m["id"].(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch raw := m["lines"].(type) {
|
|
||||||
case []any:
|
|
||||||
lines := make([]string, 0, len(raw))
|
|
||||||
for _, item := range raw {
|
|
||||||
lines = append(lines, stringFromAny(item))
|
|
||||||
}
|
|
||||||
return buildID, lines
|
|
||||||
case []string:
|
|
||||||
lines := make([]string, len(raw))
|
|
||||||
copy(lines, raw)
|
|
||||||
return buildID, lines
|
|
||||||
}
|
|
||||||
|
|
||||||
if output := stringFromAny(m["output"]); output != "" {
|
|
||||||
return buildID, []string{output}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buildID, []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sessionsFromData(data any) []Session {
|
|
||||||
m, ok := data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return []Session{}
|
|
||||||
}
|
|
||||||
|
|
||||||
raw, ok := m["sessions"].([]any)
|
|
||||||
if !ok {
|
|
||||||
return []Session{}
|
|
||||||
}
|
|
||||||
|
|
||||||
sessions := make([]Session, 0, len(raw))
|
|
||||||
for _, item := range raw {
|
|
||||||
session, ok := sessionFromData(item)
|
|
||||||
if ok {
|
|
||||||
sessions = append(sessions, session)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sessions
|
|
||||||
}
|
|
||||||
|
|
||||||
func sessionFromData(data any) (Session, bool) {
|
|
||||||
m, ok := data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return Session{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
id, _ := m["id"].(string)
|
|
||||||
if id == "" {
|
|
||||||
return Session{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
session := Session{
|
|
||||||
ID: id,
|
|
||||||
Name: stringFromAny(m["name"]),
|
|
||||||
Status: stringFromAny(m["status"]),
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
if createdAt, ok := m["createdAt"].(time.Time); ok {
|
|
||||||
session.CreatedAt = createdAt
|
|
||||||
}
|
|
||||||
if session.Status == "" {
|
|
||||||
session.Status = "unknown"
|
|
||||||
}
|
|
||||||
return session, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func chatHistoryFromData(data any) (string, []ChatMessage) {
|
|
||||||
m, ok := data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return "", []ChatMessage{}
|
|
||||||
}
|
|
||||||
|
|
||||||
sessionID, _ := m["sessionId"].(string)
|
|
||||||
if sessionID == "" {
|
|
||||||
sessionID, _ = m["session_id"].(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
raw, ok := m["messages"].([]any)
|
|
||||||
if !ok {
|
|
||||||
return sessionID, []ChatMessage{}
|
|
||||||
}
|
|
||||||
|
|
||||||
messages := make([]ChatMessage, 0, len(raw))
|
|
||||||
for _, item := range raw {
|
|
||||||
if msg, ok := chatMessageFromData(item); ok {
|
|
||||||
messages = append(messages, msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sessionID, messages
|
|
||||||
}
|
|
||||||
|
|
||||||
func chatMessageFromData(data any) (ChatMessage, bool) {
|
|
||||||
m, ok := data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return ChatMessage{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
role := stringFromAny(m["role"])
|
|
||||||
content := stringFromAny(m["content"])
|
|
||||||
if role == "" && content == "" {
|
|
||||||
return ChatMessage{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := ChatMessage{
|
|
||||||
Role: role,
|
|
||||||
Content: content,
|
|
||||||
Timestamp: time.Now(),
|
|
||||||
}
|
|
||||||
if ts, ok := m["timestamp"].(time.Time); ok {
|
|
||||||
msg.Timestamp = ts
|
|
||||||
}
|
|
||||||
return msg, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringFromAny(v any) string {
|
|
||||||
switch value := v.(type) {
|
|
||||||
case string:
|
|
||||||
return value
|
|
||||||
case fmt.Stringer:
|
|
||||||
return value.String()
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSessionID() string {
|
|
||||||
return core.ID()
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,27 +1,20 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package ide
|
package ide
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Build tool input/output types.
|
// Build tool input/output types.
|
||||||
|
|
||||||
// BuildStatusInput is the input for ide_build_status.
|
// BuildStatusInput is the input for ide_build_status.
|
||||||
//
|
|
||||||
// input := BuildStatusInput{BuildID: "build-123"}
|
|
||||||
type BuildStatusInput struct {
|
type BuildStatusInput struct {
|
||||||
BuildID string `json:"buildId"`
|
BuildID string `json:"buildId"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildInfo represents a single build.
|
// BuildInfo represents a single build.
|
||||||
//
|
|
||||||
// info := BuildInfo{ID: "build-123", Repo: "go-io", Status: "running"}
|
|
||||||
type BuildInfo struct {
|
type BuildInfo struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Repo string `json:"repo"`
|
Repo string `json:"repo"`
|
||||||
|
|
@ -32,102 +25,90 @@ type BuildInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildStatusOutput is the output for ide_build_status.
|
// BuildStatusOutput is the output for ide_build_status.
|
||||||
//
|
|
||||||
// // out.Build.Status == "running"
|
|
||||||
type BuildStatusOutput struct {
|
type BuildStatusOutput struct {
|
||||||
Build BuildInfo `json:"build"`
|
Build BuildInfo `json:"build"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildListInput is the input for ide_build_list.
|
// BuildListInput is the input for ide_build_list.
|
||||||
//
|
|
||||||
// input := BuildListInput{Repo: "go-io", Limit: 20}
|
|
||||||
type BuildListInput struct {
|
type BuildListInput struct {
|
||||||
Repo string `json:"repo,omitempty"`
|
Repo string `json:"repo,omitempty"`
|
||||||
Limit int `json:"limit,omitempty"`
|
Limit int `json:"limit,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildListOutput is the output for ide_build_list.
|
// BuildListOutput is the output for ide_build_list.
|
||||||
//
|
|
||||||
// // out.Builds holds the local build snapshot
|
|
||||||
type BuildListOutput struct {
|
type BuildListOutput struct {
|
||||||
Builds []BuildInfo `json:"builds"`
|
Builds []BuildInfo `json:"builds"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildLogsInput is the input for ide_build_logs.
|
// BuildLogsInput is the input for ide_build_logs.
|
||||||
//
|
|
||||||
// input := BuildLogsInput{BuildID: "build-123", Tail: 200}
|
|
||||||
type BuildLogsInput struct {
|
type BuildLogsInput struct {
|
||||||
BuildID string `json:"buildId"`
|
BuildID string `json:"buildId"`
|
||||||
Tail int `json:"tail,omitempty"`
|
Tail int `json:"tail,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildLogsOutput is the output for ide_build_logs.
|
// BuildLogsOutput is the output for ide_build_logs.
|
||||||
//
|
|
||||||
// // out.Lines contains the captured build log lines
|
|
||||||
type BuildLogsOutput struct {
|
type BuildLogsOutput struct {
|
||||||
BuildID string `json:"buildId"`
|
BuildID string `json:"buildId"`
|
||||||
Lines []string `json:"lines"`
|
Lines []string `json:"lines"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Subsystem) registerBuildTools(svc *coremcp.Service) {
|
func (s *Subsystem) registerBuildTools(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
|
||||||
Name: "ide_build_status",
|
Name: "ide_build_status",
|
||||||
Description: "Get the status of a specific build",
|
Description: "Get the status of a specific build",
|
||||||
}, s.buildStatus)
|
}, s.buildStatus)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ide_build_list",
|
Name: "ide_build_list",
|
||||||
Description: "List recent builds, optionally filtered by repository",
|
Description: "List recent builds, optionally filtered by repository",
|
||||||
}, s.buildList)
|
}, s.buildList)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ide_build_logs",
|
Name: "ide_build_logs",
|
||||||
Description: "Retrieve log output for a build",
|
Description: "Retrieve log output for a build",
|
||||||
}, s.buildLogs)
|
}, s.buildLogs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildStatus returns a local best-effort build status and refreshes the
|
// buildStatus requests build status from the Laravel backend.
|
||||||
// Laravel backend when the bridge is available.
|
// Stub implementation: sends request via bridge, returns "unknown" status. Awaiting Laravel backend.
|
||||||
func (s *Subsystem) buildStatus(_ context.Context, _ *mcp.CallToolRequest, input BuildStatusInput) (*mcp.CallToolResult, BuildStatusOutput, error) {
|
func (s *Subsystem) buildStatus(_ context.Context, _ *mcp.CallToolRequest, input BuildStatusInput) (*mcp.CallToolResult, BuildStatusOutput, error) {
|
||||||
if s.bridge != nil {
|
if s.bridge == nil {
|
||||||
_ = s.bridge.Send(BridgeMessage{
|
return nil, BuildStatusOutput{}, errBridgeNotAvailable
|
||||||
Type: "build_status",
|
|
||||||
Data: map[string]any{"buildId": input.BuildID},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
_ = s.bridge.Send(BridgeMessage{
|
||||||
build := BuildInfo{ID: input.BuildID, Status: "unknown"}
|
Type: "build_status",
|
||||||
if cached, ok := s.buildSnapshot(input.BuildID); ok {
|
Data: map[string]any{"buildId": input.BuildID},
|
||||||
build = cached
|
})
|
||||||
}
|
return nil, BuildStatusOutput{
|
||||||
|
Build: BuildInfo{ID: input.BuildID, Status: "unknown"},
|
||||||
return nil, BuildStatusOutput{Build: build}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildList returns the local build list snapshot and refreshes the Laravel
|
// buildList requests a list of builds from the Laravel backend.
|
||||||
// backend when the bridge is available.
|
// Stub implementation: sends request via bridge, returns empty list. Awaiting Laravel backend.
|
||||||
func (s *Subsystem) buildList(_ context.Context, _ *mcp.CallToolRequest, input BuildListInput) (*mcp.CallToolResult, BuildListOutput, error) {
|
func (s *Subsystem) buildList(_ context.Context, _ *mcp.CallToolRequest, input BuildListInput) (*mcp.CallToolResult, BuildListOutput, error) {
|
||||||
if s.bridge != nil {
|
if s.bridge == nil {
|
||||||
_ = s.bridge.Send(BridgeMessage{
|
return nil, BuildListOutput{}, errBridgeNotAvailable
|
||||||
Type: "build_list",
|
}
|
||||||
Data: map[string]any{"repo": input.Repo, "limit": input.Limit},
|
_ = s.bridge.Send(BridgeMessage{
|
||||||
})
|
Type: "build_list",
|
||||||
}
|
Data: map[string]any{"repo": input.Repo, "limit": input.Limit},
|
||||||
return nil, BuildListOutput{Builds: s.listBuilds(input.Repo, input.Limit)}, nil
|
})
|
||||||
}
|
return nil, BuildListOutput{Builds: []BuildInfo{}}, nil
|
||||||
|
}
|
||||||
// buildLogs returns the local build log snapshot and refreshes the Laravel
|
|
||||||
// backend when the bridge is available.
|
// buildLogs requests build log output from the Laravel backend.
|
||||||
func (s *Subsystem) buildLogs(_ context.Context, _ *mcp.CallToolRequest, input BuildLogsInput) (*mcp.CallToolResult, BuildLogsOutput, error) {
|
// Stub implementation: sends request via bridge, returns empty lines. Awaiting Laravel backend.
|
||||||
if s.bridge != nil {
|
func (s *Subsystem) buildLogs(_ context.Context, _ *mcp.CallToolRequest, input BuildLogsInput) (*mcp.CallToolResult, BuildLogsOutput, error) {
|
||||||
_ = s.bridge.Send(BridgeMessage{
|
if s.bridge == nil {
|
||||||
Type: "build_logs",
|
return nil, BuildLogsOutput{}, errBridgeNotAvailable
|
||||||
Data: map[string]any{"buildId": input.BuildID, "tail": input.Tail},
|
}
|
||||||
})
|
_ = s.bridge.Send(BridgeMessage{
|
||||||
}
|
Type: "build_logs",
|
||||||
return nil, BuildLogsOutput{
|
Data: map[string]any{"buildId": input.BuildID, "tail": input.Tail},
|
||||||
BuildID: input.BuildID,
|
})
|
||||||
Lines: s.buildLogTail(input.BuildID, input.Tail),
|
return nil, BuildLogsOutput{
|
||||||
|
BuildID: input.BuildID,
|
||||||
|
Lines: []string{},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,9 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package ide
|
package ide
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
@ -14,16 +11,12 @@ import (
|
||||||
// Chat tool input/output types.
|
// Chat tool input/output types.
|
||||||
|
|
||||||
// ChatSendInput is the input for ide_chat_send.
|
// ChatSendInput is the input for ide_chat_send.
|
||||||
//
|
|
||||||
// input := ChatSendInput{SessionID: "sess-42", Message: "hello"}
|
|
||||||
type ChatSendInput struct {
|
type ChatSendInput struct {
|
||||||
SessionID string `json:"sessionId"`
|
SessionID string `json:"sessionId"`
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChatSendOutput is the output for ide_chat_send.
|
// ChatSendOutput is the output for ide_chat_send.
|
||||||
//
|
|
||||||
// // out.Sent == true, out.SessionID == "sess-42"
|
|
||||||
type ChatSendOutput struct {
|
type ChatSendOutput struct {
|
||||||
Sent bool `json:"sent"`
|
Sent bool `json:"sent"`
|
||||||
SessionID string `json:"sessionId"`
|
SessionID string `json:"sessionId"`
|
||||||
|
|
@ -31,16 +24,12 @@ type ChatSendOutput struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChatHistoryInput is the input for ide_chat_history.
|
// ChatHistoryInput is the input for ide_chat_history.
|
||||||
//
|
|
||||||
// input := ChatHistoryInput{SessionID: "sess-42", Limit: 50}
|
|
||||||
type ChatHistoryInput struct {
|
type ChatHistoryInput struct {
|
||||||
SessionID string `json:"sessionId"`
|
SessionID string `json:"sessionId"`
|
||||||
Limit int `json:"limit,omitempty"`
|
Limit int `json:"limit,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChatMessage represents a single message in history.
|
// ChatMessage represents a single message in history.
|
||||||
//
|
|
||||||
// msg := ChatMessage{Role: "user", Content: "hello"}
|
|
||||||
type ChatMessage struct {
|
type ChatMessage struct {
|
||||||
Role string `json:"role"`
|
Role string `json:"role"`
|
||||||
Content string `json:"content"`
|
Content string `json:"content"`
|
||||||
|
|
@ -48,21 +37,15 @@ type ChatMessage struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChatHistoryOutput is the output for ide_chat_history.
|
// ChatHistoryOutput is the output for ide_chat_history.
|
||||||
//
|
|
||||||
// // out.Messages contains the stored chat transcript
|
|
||||||
type ChatHistoryOutput struct {
|
type ChatHistoryOutput struct {
|
||||||
SessionID string `json:"sessionId"`
|
SessionID string `json:"sessionId"`
|
||||||
Messages []ChatMessage `json:"messages"`
|
Messages []ChatMessage `json:"messages"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SessionListInput is the input for ide_session_list.
|
// SessionListInput is the input for ide_session_list.
|
||||||
//
|
|
||||||
// input := SessionListInput{}
|
|
||||||
type SessionListInput struct{}
|
type SessionListInput struct{}
|
||||||
|
|
||||||
// Session represents an agent session.
|
// Session represents an agent session.
|
||||||
//
|
|
||||||
// session := Session{ID: "sess-42", Name: "draft", Status: "running"}
|
|
||||||
type Session struct {
|
type Session struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
|
@ -71,81 +54,67 @@ type Session struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SessionListOutput is the output for ide_session_list.
|
// SessionListOutput is the output for ide_session_list.
|
||||||
//
|
|
||||||
// // out.Sessions contains every locally tracked session
|
|
||||||
type SessionListOutput struct {
|
type SessionListOutput struct {
|
||||||
Sessions []Session `json:"sessions"`
|
Sessions []Session `json:"sessions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SessionCreateInput is the input for ide_session_create.
|
// SessionCreateInput is the input for ide_session_create.
|
||||||
//
|
|
||||||
// input := SessionCreateInput{Name: "draft"}
|
|
||||||
type SessionCreateInput struct {
|
type SessionCreateInput struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SessionCreateOutput is the output for ide_session_create.
|
// SessionCreateOutput is the output for ide_session_create.
|
||||||
//
|
|
||||||
// // out.Session.ID is assigned by the backend or local store
|
|
||||||
type SessionCreateOutput struct {
|
type SessionCreateOutput struct {
|
||||||
Session Session `json:"session"`
|
Session Session `json:"session"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanStatusInput is the input for ide_plan_status.
|
// PlanStatusInput is the input for ide_plan_status.
|
||||||
//
|
|
||||||
// input := PlanStatusInput{SessionID: "sess-42"}
|
|
||||||
type PlanStatusInput struct {
|
type PlanStatusInput struct {
|
||||||
SessionID string `json:"sessionId"`
|
SessionID string `json:"sessionId"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanStep is a single step in an agent plan.
|
// PlanStep is a single step in an agent plan.
|
||||||
//
|
|
||||||
// step := PlanStep{Name: "prep", Status: "done"}
|
|
||||||
type PlanStep struct {
|
type PlanStep struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanStatusOutput is the output for ide_plan_status.
|
// PlanStatusOutput is the output for ide_plan_status.
|
||||||
//
|
|
||||||
// // out.Steps contains the current plan breakdown
|
|
||||||
type PlanStatusOutput struct {
|
type PlanStatusOutput struct {
|
||||||
SessionID string `json:"sessionId"`
|
SessionID string `json:"sessionId"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Steps []PlanStep `json:"steps"`
|
Steps []PlanStep `json:"steps"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Subsystem) registerChatTools(svc *coremcp.Service) {
|
func (s *Subsystem) registerChatTools(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
|
||||||
Name: "ide_chat_send",
|
Name: "ide_chat_send",
|
||||||
Description: "Send a message to an agent chat session",
|
Description: "Send a message to an agent chat session",
|
||||||
}, s.chatSend)
|
}, s.chatSend)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ide_chat_history",
|
Name: "ide_chat_history",
|
||||||
Description: "Retrieve message history for a chat session",
|
Description: "Retrieve message history for a chat session",
|
||||||
}, s.chatHistory)
|
}, s.chatHistory)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ide_session_list",
|
Name: "ide_session_list",
|
||||||
Description: "List active agent sessions",
|
Description: "List active agent sessions",
|
||||||
}, s.sessionList)
|
}, s.sessionList)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ide_session_create",
|
Name: "ide_session_create",
|
||||||
Description: "Create a new agent session",
|
Description: "Create a new agent session",
|
||||||
}, s.sessionCreate)
|
}, s.sessionCreate)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ide_plan_status",
|
Name: "ide_plan_status",
|
||||||
Description: "Get the current plan status for a session",
|
Description: "Get the current plan status for a session",
|
||||||
}, s.planStatus)
|
}, s.planStatus)
|
||||||
}
|
}
|
||||||
|
|
||||||
// chatSend forwards a chat message to the Laravel backend via bridge.
|
// chatSend forwards a chat message to the Laravel backend via bridge.
|
||||||
// The subsystem also stores the message locally so history lookups can
|
// Stub implementation: delegates to bridge, real response arrives via WebSocket subscription.
|
||||||
// return something useful before the backend answers.
|
|
||||||
func (s *Subsystem) chatSend(_ context.Context, _ *mcp.CallToolRequest, input ChatSendInput) (*mcp.CallToolResult, ChatSendOutput, error) {
|
func (s *Subsystem) chatSend(_ context.Context, _ *mcp.CallToolRequest, input ChatSendInput) (*mcp.CallToolResult, ChatSendOutput, error) {
|
||||||
if s.bridge == nil {
|
if s.bridge == nil {
|
||||||
return nil, ChatSendOutput{}, errBridgeNotAvailable
|
return nil, ChatSendOutput{}, errBridgeNotAvailable
|
||||||
|
|
@ -159,10 +128,6 @@ func (s *Subsystem) chatSend(_ context.Context, _ *mcp.CallToolRequest, input Ch
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ChatSendOutput{}, coreerr.E("ide.chatSend", "failed to send message", err)
|
return nil, ChatSendOutput{}, coreerr.E("ide.chatSend", "failed to send message", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.appendChatMessage(input.SessionID, "user", input.Message)
|
|
||||||
s.recordActivity("chat_send", "forwarded chat message for session "+input.SessionID)
|
|
||||||
|
|
||||||
return nil, ChatSendOutput{
|
return nil, ChatSendOutput{
|
||||||
Sent: true,
|
Sent: true,
|
||||||
SessionID: input.SessionID,
|
SessionID: input.SessionID,
|
||||||
|
|
@ -170,77 +135,67 @@ func (s *Subsystem) chatSend(_ context.Context, _ *mcp.CallToolRequest, input Ch
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// chatHistory returns the local message history for a session and refreshes
|
// chatHistory requests message history from the Laravel backend.
|
||||||
// the Laravel backend when the bridge is available.
|
// Stub implementation: sends request via bridge, returns empty messages. Real data arrives via WebSocket.
|
||||||
func (s *Subsystem) chatHistory(_ context.Context, _ *mcp.CallToolRequest, input ChatHistoryInput) (*mcp.CallToolResult, ChatHistoryOutput, error) {
|
func (s *Subsystem) chatHistory(_ context.Context, _ *mcp.CallToolRequest, input ChatHistoryInput) (*mcp.CallToolResult, ChatHistoryOutput, error) {
|
||||||
if s.bridge != nil {
|
if s.bridge == nil {
|
||||||
// Request history via bridge when available; the local cache still
|
return nil, ChatHistoryOutput{}, errBridgeNotAvailable
|
||||||
// provides an immediate response in headless mode.
|
|
||||||
_ = s.bridge.Send(BridgeMessage{
|
|
||||||
Type: "chat_history",
|
|
||||||
SessionID: input.SessionID,
|
|
||||||
Data: map[string]any{"limit": input.Limit},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
// Request history via bridge; for now return placeholder indicating the
|
||||||
|
// request was forwarded. Real data arrives via WebSocket subscription.
|
||||||
|
_ = s.bridge.Send(BridgeMessage{
|
||||||
|
Type: "chat_history",
|
||||||
|
SessionID: input.SessionID,
|
||||||
|
Data: map[string]any{"limit": input.Limit},
|
||||||
|
})
|
||||||
return nil, ChatHistoryOutput{
|
return nil, ChatHistoryOutput{
|
||||||
SessionID: input.SessionID,
|
SessionID: input.SessionID,
|
||||||
Messages: s.chatMessages(input.SessionID),
|
Messages: []ChatMessage{},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// sessionList returns the local session cache and refreshes the Laravel
|
// sessionList requests the session list from the Laravel backend.
|
||||||
// backend when the bridge is available.
|
// Stub implementation: sends request via bridge, returns empty sessions. Awaiting Laravel backend.
|
||||||
func (s *Subsystem) sessionList(_ context.Context, _ *mcp.CallToolRequest, _ SessionListInput) (*mcp.CallToolResult, SessionListOutput, error) {
|
func (s *Subsystem) sessionList(_ context.Context, _ *mcp.CallToolRequest, _ SessionListInput) (*mcp.CallToolResult, SessionListOutput, error) {
|
||||||
if s.bridge != nil {
|
if s.bridge == nil {
|
||||||
_ = s.bridge.Send(BridgeMessage{Type: "session_list"})
|
return nil, SessionListOutput{}, errBridgeNotAvailable
|
||||||
}
|
}
|
||||||
return nil, SessionListOutput{Sessions: s.listSessions()}, nil
|
_ = s.bridge.Send(BridgeMessage{Type: "session_list"})
|
||||||
|
return nil, SessionListOutput{Sessions: []Session{}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// sessionCreate creates a local session record immediately and forwards the
|
// sessionCreate requests a new session from the Laravel backend.
|
||||||
// request to the Laravel backend when the bridge is available.
|
// Stub implementation: sends request via bridge, returns placeholder session. Awaiting Laravel backend.
|
||||||
func (s *Subsystem) sessionCreate(_ context.Context, _ *mcp.CallToolRequest, input SessionCreateInput) (*mcp.CallToolResult, SessionCreateOutput, error) {
|
func (s *Subsystem) sessionCreate(_ context.Context, _ *mcp.CallToolRequest, input SessionCreateInput) (*mcp.CallToolResult, SessionCreateOutput, error) {
|
||||||
if s.bridge != nil {
|
if s.bridge == nil {
|
||||||
if err := s.bridge.Send(BridgeMessage{
|
return nil, SessionCreateOutput{}, errBridgeNotAvailable
|
||||||
Type: "session_create",
|
|
||||||
Data: map[string]any{"name": input.Name},
|
|
||||||
}); err != nil {
|
|
||||||
return nil, SessionCreateOutput{}, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
session := Session{
|
_ = s.bridge.Send(BridgeMessage{
|
||||||
ID: newSessionID(),
|
Type: "session_create",
|
||||||
Name: input.Name,
|
Data: map[string]any{"name": input.Name},
|
||||||
Status: "creating",
|
})
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
s.addSession(session)
|
|
||||||
s.recordActivity("session_create", "created session "+session.ID)
|
|
||||||
return nil, SessionCreateOutput{
|
return nil, SessionCreateOutput{
|
||||||
Session: session,
|
Session: Session{
|
||||||
|
Name: input.Name,
|
||||||
|
Status: "creating",
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// planStatus returns the local best-effort session status and refreshes the
|
// planStatus requests plan status from the Laravel backend.
|
||||||
// Laravel backend when the bridge is available.
|
// Stub implementation: sends request via bridge, returns "unknown" status. Awaiting Laravel backend.
|
||||||
func (s *Subsystem) planStatus(_ context.Context, _ *mcp.CallToolRequest, input PlanStatusInput) (*mcp.CallToolResult, PlanStatusOutput, error) {
|
func (s *Subsystem) planStatus(_ context.Context, _ *mcp.CallToolRequest, input PlanStatusInput) (*mcp.CallToolResult, PlanStatusOutput, error) {
|
||||||
if s.bridge != nil {
|
if s.bridge == nil {
|
||||||
_ = s.bridge.Send(BridgeMessage{
|
return nil, PlanStatusOutput{}, errBridgeNotAvailable
|
||||||
Type: "plan_status",
|
|
||||||
SessionID: input.SessionID,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
s.stateMu.Lock()
|
|
||||||
session, ok := s.sessions[input.SessionID]
|
|
||||||
s.stateMu.Unlock()
|
|
||||||
|
|
||||||
status := "unknown"
|
|
||||||
if ok && session.Status != "" {
|
|
||||||
status = session.Status
|
|
||||||
}
|
}
|
||||||
|
_ = s.bridge.Send(BridgeMessage{
|
||||||
|
Type: "plan_status",
|
||||||
|
SessionID: input.SessionID,
|
||||||
|
})
|
||||||
return nil, PlanStatusOutput{
|
return nil, PlanStatusOutput{
|
||||||
SessionID: input.SessionID,
|
SessionID: input.SessionID,
|
||||||
Status: status,
|
Status: "unknown",
|
||||||
Steps: []PlanStep{},
|
Steps: []PlanStep{},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,25 +1,18 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package ide
|
package ide
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Dashboard tool input/output types.
|
// Dashboard tool input/output types.
|
||||||
|
|
||||||
// DashboardOverviewInput is the input for ide_dashboard_overview.
|
// DashboardOverviewInput is the input for ide_dashboard_overview.
|
||||||
//
|
|
||||||
// input := DashboardOverviewInput{}
|
|
||||||
type DashboardOverviewInput struct{}
|
type DashboardOverviewInput struct{}
|
||||||
|
|
||||||
// DashboardOverview contains high-level platform stats.
|
// DashboardOverview contains high-level platform stats.
|
||||||
//
|
|
||||||
// overview := DashboardOverview{Repos: 12, ActiveSessions: 3}
|
|
||||||
type DashboardOverview struct {
|
type DashboardOverview struct {
|
||||||
Repos int `json:"repos"`
|
Repos int `json:"repos"`
|
||||||
Services int `json:"services"`
|
Services int `json:"services"`
|
||||||
|
|
@ -29,22 +22,16 @@ type DashboardOverview struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DashboardOverviewOutput is the output for ide_dashboard_overview.
|
// DashboardOverviewOutput is the output for ide_dashboard_overview.
|
||||||
//
|
|
||||||
// // out.Overview.BridgeOnline reports bridge connectivity
|
|
||||||
type DashboardOverviewOutput struct {
|
type DashboardOverviewOutput struct {
|
||||||
Overview DashboardOverview `json:"overview"`
|
Overview DashboardOverview `json:"overview"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DashboardActivityInput is the input for ide_dashboard_activity.
|
// DashboardActivityInput is the input for ide_dashboard_activity.
|
||||||
//
|
|
||||||
// input := DashboardActivityInput{Limit: 25}
|
|
||||||
type DashboardActivityInput struct {
|
type DashboardActivityInput struct {
|
||||||
Limit int `json:"limit,omitempty"`
|
Limit int `json:"limit,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActivityEvent represents a single activity feed item.
|
// ActivityEvent represents a single activity feed item.
|
||||||
//
|
|
||||||
// event := ActivityEvent{Type: "build", Message: "build finished"}
|
|
||||||
type ActivityEvent struct {
|
type ActivityEvent struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
|
|
@ -52,22 +39,16 @@ type ActivityEvent struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DashboardActivityOutput is the output for ide_dashboard_activity.
|
// DashboardActivityOutput is the output for ide_dashboard_activity.
|
||||||
//
|
|
||||||
// // out.Events contains the recent activity feed
|
|
||||||
type DashboardActivityOutput struct {
|
type DashboardActivityOutput struct {
|
||||||
Events []ActivityEvent `json:"events"`
|
Events []ActivityEvent `json:"events"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DashboardMetricsInput is the input for ide_dashboard_metrics.
|
// DashboardMetricsInput is the input for ide_dashboard_metrics.
|
||||||
//
|
|
||||||
// input := DashboardMetricsInput{Period: "24h"}
|
|
||||||
type DashboardMetricsInput struct {
|
type DashboardMetricsInput struct {
|
||||||
Period string `json:"period,omitempty"` // "1h", "24h", "7d"
|
Period string `json:"period,omitempty"` // "1h", "24h", "7d"
|
||||||
}
|
}
|
||||||
|
|
||||||
// DashboardMetrics contains aggregate metrics.
|
// DashboardMetrics contains aggregate metrics.
|
||||||
//
|
|
||||||
// metrics := DashboardMetrics{BuildsTotal: 42, SuccessRate: 0.95}
|
|
||||||
type DashboardMetrics struct {
|
type DashboardMetrics struct {
|
||||||
BuildsTotal int `json:"buildsTotal"`
|
BuildsTotal int `json:"buildsTotal"`
|
||||||
BuildsSuccess int `json:"buildsSuccess"`
|
BuildsSuccess int `json:"buildsSuccess"`
|
||||||
|
|
@ -79,38 +60,32 @@ type DashboardMetrics struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DashboardMetricsOutput is the output for ide_dashboard_metrics.
|
// DashboardMetricsOutput is the output for ide_dashboard_metrics.
|
||||||
//
|
|
||||||
// // out.Metrics summarises the selected time window
|
|
||||||
type DashboardMetricsOutput struct {
|
type DashboardMetricsOutput struct {
|
||||||
Period string `json:"period"`
|
Period string `json:"period"`
|
||||||
Metrics DashboardMetrics `json:"metrics"`
|
Metrics DashboardMetrics `json:"metrics"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Subsystem) registerDashboardTools(svc *coremcp.Service) {
|
func (s *Subsystem) registerDashboardTools(server *mcp.Server) {
|
||||||
server := svc.Server()
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
|
||||||
Name: "ide_dashboard_overview",
|
Name: "ide_dashboard_overview",
|
||||||
Description: "Get a high-level overview of the platform (repos, services, sessions, builds)",
|
Description: "Get a high-level overview of the platform (repos, services, sessions, builds)",
|
||||||
}, s.dashboardOverview)
|
}, s.dashboardOverview)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ide_dashboard_activity",
|
Name: "ide_dashboard_activity",
|
||||||
Description: "Get the recent activity feed",
|
Description: "Get the recent activity feed",
|
||||||
}, s.dashboardActivity)
|
}, s.dashboardActivity)
|
||||||
|
|
||||||
coremcp.AddToolRecorded(svc, server, "ide", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ide_dashboard_metrics",
|
Name: "ide_dashboard_metrics",
|
||||||
Description: "Get aggregate build and agent metrics for a time period",
|
Description: "Get aggregate build and agent metrics for a time period",
|
||||||
}, s.dashboardMetrics)
|
}, s.dashboardMetrics)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dashboardOverview returns a platform overview with bridge status and
|
// dashboardOverview returns a platform overview with bridge status.
|
||||||
// locally tracked session counts.
|
// Stub implementation: only BridgeOnline is live; other fields return zero values. Awaiting Laravel backend.
|
||||||
func (s *Subsystem) dashboardOverview(_ context.Context, _ *mcp.CallToolRequest, _ DashboardOverviewInput) (*mcp.CallToolResult, DashboardOverviewOutput, error) {
|
func (s *Subsystem) dashboardOverview(_ context.Context, _ *mcp.CallToolRequest, _ DashboardOverviewInput) (*mcp.CallToolResult, DashboardOverviewOutput, error) {
|
||||||
connected := s.bridge != nil && s.bridge.Connected()
|
connected := s.bridge != nil && s.bridge.Connected()
|
||||||
activeSessions := len(s.listSessions())
|
|
||||||
builds := s.listBuilds("", 0)
|
|
||||||
repos := s.buildRepoCount()
|
|
||||||
|
|
||||||
if s.bridge != nil {
|
if s.bridge != nil {
|
||||||
_ = s.bridge.Send(BridgeMessage{Type: "dashboard_overview"})
|
_ = s.bridge.Send(BridgeMessage{Type: "dashboard_overview"})
|
||||||
|
|
@ -118,96 +93,40 @@ func (s *Subsystem) dashboardOverview(_ context.Context, _ *mcp.CallToolRequest,
|
||||||
|
|
||||||
return nil, DashboardOverviewOutput{
|
return nil, DashboardOverviewOutput{
|
||||||
Overview: DashboardOverview{
|
Overview: DashboardOverview{
|
||||||
Repos: repos,
|
BridgeOnline: connected,
|
||||||
Services: len(builds),
|
|
||||||
ActiveSessions: activeSessions,
|
|
||||||
RecentBuilds: len(builds),
|
|
||||||
BridgeOnline: connected,
|
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// dashboardActivity returns the local activity feed and refreshes the Laravel
|
// dashboardActivity requests the activity feed from the Laravel backend.
|
||||||
// backend when the bridge is available.
|
// Stub implementation: sends request via bridge, returns empty events. Awaiting Laravel backend.
|
||||||
func (s *Subsystem) dashboardActivity(_ context.Context, _ *mcp.CallToolRequest, input DashboardActivityInput) (*mcp.CallToolResult, DashboardActivityOutput, error) {
|
func (s *Subsystem) dashboardActivity(_ context.Context, _ *mcp.CallToolRequest, input DashboardActivityInput) (*mcp.CallToolResult, DashboardActivityOutput, error) {
|
||||||
if s.bridge != nil {
|
if s.bridge == nil {
|
||||||
_ = s.bridge.Send(BridgeMessage{
|
return nil, DashboardActivityOutput{}, errBridgeNotAvailable
|
||||||
Type: "dashboard_activity",
|
|
||||||
Data: map[string]any{"limit": input.Limit},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
return nil, DashboardActivityOutput{Events: s.activityFeed(input.Limit)}, nil
|
_ = s.bridge.Send(BridgeMessage{
|
||||||
|
Type: "dashboard_activity",
|
||||||
|
Data: map[string]any{"limit": input.Limit},
|
||||||
|
})
|
||||||
|
return nil, DashboardActivityOutput{Events: []ActivityEvent{}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// dashboardMetrics returns local session and message counts and refreshes the
|
// dashboardMetrics requests aggregate metrics from the Laravel backend.
|
||||||
// Laravel backend when the bridge is available.
|
// Stub implementation: sends request via bridge, returns zero metrics. Awaiting Laravel backend.
|
||||||
func (s *Subsystem) dashboardMetrics(_ context.Context, _ *mcp.CallToolRequest, input DashboardMetricsInput) (*mcp.CallToolResult, DashboardMetricsOutput, error) {
|
func (s *Subsystem) dashboardMetrics(_ context.Context, _ *mcp.CallToolRequest, input DashboardMetricsInput) (*mcp.CallToolResult, DashboardMetricsOutput, error) {
|
||||||
|
if s.bridge == nil {
|
||||||
|
return nil, DashboardMetricsOutput{}, errBridgeNotAvailable
|
||||||
|
}
|
||||||
period := input.Period
|
period := input.Period
|
||||||
if period == "" {
|
if period == "" {
|
||||||
period = "24h"
|
period = "24h"
|
||||||
}
|
}
|
||||||
if s.bridge != nil {
|
_ = s.bridge.Send(BridgeMessage{
|
||||||
_ = s.bridge.Send(BridgeMessage{
|
Type: "dashboard_metrics",
|
||||||
Type: "dashboard_metrics",
|
Data: map[string]any{"period": period},
|
||||||
Data: map[string]any{"period": period},
|
})
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
s.stateMu.Lock()
|
|
||||||
sessions := len(s.sessions)
|
|
||||||
messages := 0
|
|
||||||
builds := make([]BuildInfo, 0, len(s.buildOrder))
|
|
||||||
for _, id := range s.buildOrder {
|
|
||||||
if build, ok := s.builds[id]; ok {
|
|
||||||
builds = append(builds, build)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, history := range s.chats {
|
|
||||||
messages += len(history)
|
|
||||||
}
|
|
||||||
s.stateMu.Unlock()
|
|
||||||
|
|
||||||
total := len(builds)
|
|
||||||
success := 0
|
|
||||||
failed := 0
|
|
||||||
var durationTotal time.Duration
|
|
||||||
var durationCount int
|
|
||||||
for _, build := range builds {
|
|
||||||
switch build.Status {
|
|
||||||
case "success", "succeeded", "completed", "passed":
|
|
||||||
success++
|
|
||||||
case "failed", "error":
|
|
||||||
failed++
|
|
||||||
}
|
|
||||||
if build.Duration == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if d, err := time.ParseDuration(build.Duration); err == nil {
|
|
||||||
durationTotal += d
|
|
||||||
durationCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
avgBuildTime := ""
|
|
||||||
if durationCount > 0 {
|
|
||||||
avgBuildTime = (durationTotal / time.Duration(durationCount)).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
successRate := 0.0
|
|
||||||
if total > 0 {
|
|
||||||
successRate = float64(success) / float64(total)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, DashboardMetricsOutput{
|
return nil, DashboardMetricsOutput{
|
||||||
Period: period,
|
Period: period,
|
||||||
Metrics: DashboardMetrics{
|
Metrics: DashboardMetrics{},
|
||||||
BuildsTotal: total,
|
|
||||||
BuildsSuccess: success,
|
|
||||||
BuildsFailed: failed,
|
|
||||||
AvgBuildTime: avgBuildTime,
|
|
||||||
AgentSessions: sessions,
|
|
||||||
MessagesTotal: messages,
|
|
||||||
SuccessRate: successRate,
|
|
||||||
},
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
coremcp "dappco.re/go/mcp/pkg/mcp"
|
|
||||||
"forge.lthn.ai/core/go-ws"
|
"forge.lthn.ai/core/go-ws"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -16,17 +15,7 @@ import (
|
||||||
|
|
||||||
// newNilBridgeSubsystem returns a Subsystem with no hub/bridge (headless mode).
|
// newNilBridgeSubsystem returns a Subsystem with no hub/bridge (headless mode).
|
||||||
func newNilBridgeSubsystem() *Subsystem {
|
func newNilBridgeSubsystem() *Subsystem {
|
||||||
return New(nil, Config{})
|
return New(nil)
|
||||||
}
|
|
||||||
|
|
||||||
type recordingNotifier struct {
|
|
||||||
channel string
|
|
||||||
data any
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *recordingNotifier) ChannelSend(_ context.Context, channel string, data any) {
|
|
||||||
r.channel = channel
|
|
||||||
r.data = data
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newConnectedSubsystem returns a Subsystem with a connected bridge and a
|
// newConnectedSubsystem returns a Subsystem with a connected bridge and a
|
||||||
|
|
@ -53,10 +42,10 @@ func newConnectedSubsystem(t *testing.T) (*Subsystem, context.CancelFunc, *httpt
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
go hub.Run(ctx)
|
go hub.Run(ctx)
|
||||||
|
|
||||||
sub := New(hub, Config{
|
sub := New(hub,
|
||||||
LaravelWSURL: wsURL(ts),
|
WithLaravelURL(wsURL(ts)),
|
||||||
ReconnectInterval: 50 * time.Millisecond,
|
WithReconnectInterval(50*time.Millisecond),
|
||||||
})
|
)
|
||||||
sub.StartBridge(ctx)
|
sub.StartBridge(ctx)
|
||||||
|
|
||||||
waitConnected(t, sub.Bridge(), 2*time.Second)
|
waitConnected(t, sub.Bridge(), 2*time.Second)
|
||||||
|
|
@ -101,90 +90,56 @@ func TestChatSend_Good_Connected(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestChatHistory_Good_NilBridge verifies chatHistory returns local cache without a bridge.
|
// TestChatHistory_Bad_NilBridge verifies chatHistory returns error without a bridge.
|
||||||
func TestChatHistory_Good_NilBridge(t *testing.T) {
|
func TestChatHistory_Bad_NilBridge(t *testing.T) {
|
||||||
sub := newNilBridgeSubsystem()
|
sub := newNilBridgeSubsystem()
|
||||||
_, out, err := sub.chatHistory(context.Background(), nil, ChatHistoryInput{
|
_, _, err := sub.chatHistory(context.Background(), nil, ChatHistoryInput{
|
||||||
SessionID: "s1",
|
SessionID: "s1",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
t.Fatalf("chatHistory failed: %v", err)
|
t.Error("expected error when bridge is nil")
|
||||||
}
|
|
||||||
if out.SessionID != "s1" {
|
|
||||||
t.Errorf("expected sessionId 's1', got %q", out.SessionID)
|
|
||||||
}
|
|
||||||
if out.Messages == nil {
|
|
||||||
t.Error("expected non-nil messages slice")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestChatHistory_Good_Connected verifies chatHistory succeeds and returns stored messages.
|
// TestChatHistory_Good_Connected verifies chatHistory succeeds and returns empty messages.
|
||||||
func TestChatHistory_Good_Connected(t *testing.T) {
|
func TestChatHistory_Good_Connected(t *testing.T) {
|
||||||
sub, cancel, ts := newConnectedSubsystem(t)
|
sub, cancel, ts := newConnectedSubsystem(t)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
|
||||||
Name: "history-test",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("sessionCreate failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, err = sub.chatSend(context.Background(), nil, ChatSendInput{
|
|
||||||
SessionID: sub.listSessions()[0].ID,
|
|
||||||
Message: "hello history",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("chatSend failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := sub.chatHistory(context.Background(), nil, ChatHistoryInput{
|
_, out, err := sub.chatHistory(context.Background(), nil, ChatHistoryInput{
|
||||||
SessionID: sub.listSessions()[0].ID,
|
SessionID: "sess-1",
|
||||||
Limit: 50,
|
Limit: 50,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("chatHistory failed: %v", err)
|
t.Fatalf("chatHistory failed: %v", err)
|
||||||
}
|
}
|
||||||
if out.SessionID != sub.listSessions()[0].ID {
|
if out.SessionID != "sess-1" {
|
||||||
t.Errorf("expected sessionId %q, got %q", sub.listSessions()[0].ID, out.SessionID)
|
t.Errorf("expected sessionId 'sess-1', got %q", out.SessionID)
|
||||||
}
|
}
|
||||||
if out.Messages == nil {
|
if out.Messages == nil {
|
||||||
t.Error("expected non-nil messages slice")
|
t.Error("expected non-nil messages slice")
|
||||||
}
|
}
|
||||||
if len(out.Messages) != 1 {
|
if len(out.Messages) != 0 {
|
||||||
t.Errorf("expected 1 stored message, got %d", len(out.Messages))
|
t.Errorf("expected 0 messages (stub), got %d", len(out.Messages))
|
||||||
}
|
|
||||||
if out.Messages[0].Content != "hello history" {
|
|
||||||
t.Errorf("expected stored message content %q, got %q", "hello history", out.Messages[0].Content)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSessionList_Good_NilBridge verifies sessionList returns local sessions without a bridge.
|
// TestSessionList_Bad_NilBridge verifies sessionList returns error without a bridge.
|
||||||
func TestSessionList_Good_NilBridge(t *testing.T) {
|
func TestSessionList_Bad_NilBridge(t *testing.T) {
|
||||||
sub := newNilBridgeSubsystem()
|
sub := newNilBridgeSubsystem()
|
||||||
_, out, err := sub.sessionList(context.Background(), nil, SessionListInput{})
|
_, _, err := sub.sessionList(context.Background(), nil, SessionListInput{})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
t.Fatalf("sessionList failed: %v", err)
|
t.Error("expected error when bridge is nil")
|
||||||
}
|
|
||||||
if out.Sessions == nil {
|
|
||||||
t.Error("expected non-nil sessions slice")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSessionList_Good_Connected verifies sessionList returns stored sessions.
|
// TestSessionList_Good_Connected verifies sessionList returns empty sessions.
|
||||||
func TestSessionList_Good_Connected(t *testing.T) {
|
func TestSessionList_Good_Connected(t *testing.T) {
|
||||||
sub, cancel, ts := newConnectedSubsystem(t)
|
sub, cancel, ts := newConnectedSubsystem(t)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
|
||||||
Name: "session-list-test",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("sessionCreate failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := sub.sessionList(context.Background(), nil, SessionListInput{})
|
_, out, err := sub.sessionList(context.Background(), nil, SessionListInput{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("sessionList failed: %v", err)
|
t.Fatalf("sessionList failed: %v", err)
|
||||||
|
|
@ -192,32 +147,23 @@ func TestSessionList_Good_Connected(t *testing.T) {
|
||||||
if out.Sessions == nil {
|
if out.Sessions == nil {
|
||||||
t.Error("expected non-nil sessions slice")
|
t.Error("expected non-nil sessions slice")
|
||||||
}
|
}
|
||||||
if len(out.Sessions) != 1 {
|
if len(out.Sessions) != 0 {
|
||||||
t.Errorf("expected 1 stored session, got %d", len(out.Sessions))
|
t.Errorf("expected 0 sessions (stub), got %d", len(out.Sessions))
|
||||||
}
|
|
||||||
if out.Sessions[0].ID == "" {
|
|
||||||
t.Error("expected stored session to have an ID")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSessionCreate_Good_NilBridge verifies sessionCreate stores a local session without a bridge.
|
// TestSessionCreate_Bad_NilBridge verifies sessionCreate returns error without a bridge.
|
||||||
func TestSessionCreate_Good_NilBridge(t *testing.T) {
|
func TestSessionCreate_Bad_NilBridge(t *testing.T) {
|
||||||
sub := newNilBridgeSubsystem()
|
sub := newNilBridgeSubsystem()
|
||||||
_, out, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
t.Fatalf("sessionCreate failed: %v", err)
|
t.Error("expected error when bridge is nil")
|
||||||
}
|
|
||||||
if out.Session.Name != "test" {
|
|
||||||
t.Errorf("expected session name 'test', got %q", out.Session.Name)
|
|
||||||
}
|
|
||||||
if out.Session.ID == "" {
|
|
||||||
t.Error("expected non-empty session ID")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSessionCreate_Good_Connected verifies sessionCreate returns a stored session.
|
// TestSessionCreate_Good_Connected verifies sessionCreate returns a session stub.
|
||||||
func TestSessionCreate_Good_Connected(t *testing.T) {
|
func TestSessionCreate_Good_Connected(t *testing.T) {
|
||||||
sub, cancel, ts := newConnectedSubsystem(t)
|
sub, cancel, ts := newConnectedSubsystem(t)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
@ -238,52 +184,36 @@ func TestSessionCreate_Good_Connected(t *testing.T) {
|
||||||
if out.Session.CreatedAt.IsZero() {
|
if out.Session.CreatedAt.IsZero() {
|
||||||
t.Error("expected non-zero CreatedAt")
|
t.Error("expected non-zero CreatedAt")
|
||||||
}
|
}
|
||||||
if out.Session.ID == "" {
|
|
||||||
t.Error("expected non-empty session ID")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPlanStatus_Good_NilBridge verifies planStatus returns local status without a bridge.
|
// TestPlanStatus_Bad_NilBridge verifies planStatus returns error without a bridge.
|
||||||
func TestPlanStatus_Good_NilBridge(t *testing.T) {
|
func TestPlanStatus_Bad_NilBridge(t *testing.T) {
|
||||||
sub := newNilBridgeSubsystem()
|
sub := newNilBridgeSubsystem()
|
||||||
_, out, err := sub.planStatus(context.Background(), nil, PlanStatusInput{
|
_, _, err := sub.planStatus(context.Background(), nil, PlanStatusInput{
|
||||||
SessionID: "s1",
|
SessionID: "s1",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
t.Fatalf("planStatus failed: %v", err)
|
t.Error("expected error when bridge is nil")
|
||||||
}
|
|
||||||
if out.SessionID != "s1" {
|
|
||||||
t.Errorf("expected sessionId 's1', got %q", out.SessionID)
|
|
||||||
}
|
|
||||||
if out.Status != "unknown" {
|
|
||||||
t.Errorf("expected status 'unknown', got %q", out.Status)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPlanStatus_Good_Connected verifies planStatus returns a status for a known session.
|
// TestPlanStatus_Good_Connected verifies planStatus returns a stub status.
|
||||||
func TestPlanStatus_Good_Connected(t *testing.T) {
|
func TestPlanStatus_Good_Connected(t *testing.T) {
|
||||||
sub, cancel, ts := newConnectedSubsystem(t)
|
sub, cancel, ts := newConnectedSubsystem(t)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
_, createOut, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
|
||||||
Name: "plan-status-test",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("sessionCreate failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := sub.planStatus(context.Background(), nil, PlanStatusInput{
|
_, out, err := sub.planStatus(context.Background(), nil, PlanStatusInput{
|
||||||
SessionID: createOut.Session.ID,
|
SessionID: "sess-7",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("planStatus failed: %v", err)
|
t.Fatalf("planStatus failed: %v", err)
|
||||||
}
|
}
|
||||||
if out.SessionID != createOut.Session.ID {
|
if out.SessionID != "sess-7" {
|
||||||
t.Errorf("expected sessionId %q, got %q", createOut.Session.ID, out.SessionID)
|
t.Errorf("expected sessionId 'sess-7', got %q", out.SessionID)
|
||||||
}
|
}
|
||||||
if out.Status != "creating" {
|
if out.Status != "unknown" {
|
||||||
t.Errorf("expected status 'creating', got %q", out.Status)
|
t.Errorf("expected status 'unknown', got %q", out.Status)
|
||||||
}
|
}
|
||||||
if out.Steps == nil {
|
if out.Steps == nil {
|
||||||
t.Error("expected non-nil steps slice")
|
t.Error("expected non-nil steps slice")
|
||||||
|
|
@ -292,20 +222,14 @@ func TestPlanStatus_Good_Connected(t *testing.T) {
|
||||||
|
|
||||||
// --- 4.3: Build tool tests ---
|
// --- 4.3: Build tool tests ---
|
||||||
|
|
||||||
// TestBuildStatus_Good_NilBridge verifies buildStatus returns a local stub without a bridge.
|
// TestBuildStatus_Bad_NilBridge verifies buildStatus returns error without a bridge.
|
||||||
func TestBuildStatus_Good_NilBridge(t *testing.T) {
|
func TestBuildStatus_Bad_NilBridge(t *testing.T) {
|
||||||
sub := newNilBridgeSubsystem()
|
sub := newNilBridgeSubsystem()
|
||||||
_, out, err := sub.buildStatus(context.Background(), nil, BuildStatusInput{
|
_, _, err := sub.buildStatus(context.Background(), nil, BuildStatusInput{
|
||||||
BuildID: "b1",
|
BuildID: "b1",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
t.Fatalf("buildStatus failed: %v", err)
|
t.Error("expected error when bridge is nil")
|
||||||
}
|
|
||||||
if out.Build.ID != "b1" {
|
|
||||||
t.Errorf("expected build ID 'b1', got %q", out.Build.ID)
|
|
||||||
}
|
|
||||||
if out.Build.Status != "unknown" {
|
|
||||||
t.Errorf("expected status 'unknown', got %q", out.Build.Status)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -329,74 +253,15 @@ func TestBuildStatus_Good_Connected(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestBuildStatus_Good_EmitsLifecycle verifies bridge updates broadcast build lifecycle events.
|
// TestBuildList_Bad_NilBridge verifies buildList returns error without a bridge.
|
||||||
func TestBuildStatus_Good_EmitsLifecycle(t *testing.T) {
|
func TestBuildList_Bad_NilBridge(t *testing.T) {
|
||||||
sub := newNilBridgeSubsystem()
|
sub := newNilBridgeSubsystem()
|
||||||
notifier := &recordingNotifier{}
|
_, _, err := sub.buildList(context.Background(), nil, BuildListInput{
|
||||||
sub.SetNotifier(notifier)
|
|
||||||
|
|
||||||
sub.handleBridgeMessage(BridgeMessage{
|
|
||||||
Type: "build_status",
|
|
||||||
Data: map[string]any{
|
|
||||||
"buildId": "build-1",
|
|
||||||
"repo": "core-php",
|
|
||||||
"branch": "main",
|
|
||||||
"status": "success",
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if notifier.channel != coremcp.ChannelBuildComplete {
|
|
||||||
t.Fatalf("expected %s channel, got %q", coremcp.ChannelBuildComplete, notifier.channel)
|
|
||||||
}
|
|
||||||
payload, ok := notifier.data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected payload map, got %T", notifier.data)
|
|
||||||
}
|
|
||||||
if payload["id"] != "build-1" {
|
|
||||||
t.Fatalf("expected build id build-1, got %v", payload["id"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestBuildStatus_Good_EmitsStartLifecycle verifies running builds broadcast a start event.
|
|
||||||
func TestBuildStatus_Good_EmitsStartLifecycle(t *testing.T) {
|
|
||||||
sub := newNilBridgeSubsystem()
|
|
||||||
notifier := &recordingNotifier{}
|
|
||||||
sub.SetNotifier(notifier)
|
|
||||||
|
|
||||||
sub.handleBridgeMessage(BridgeMessage{
|
|
||||||
Type: "build_status",
|
|
||||||
Data: map[string]any{
|
|
||||||
"buildId": "build-2",
|
|
||||||
"repo": "core-php",
|
|
||||||
"branch": "main",
|
|
||||||
"status": "running",
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if notifier.channel != coremcp.ChannelBuildStart {
|
|
||||||
t.Fatalf("expected %s channel, got %q", coremcp.ChannelBuildStart, notifier.channel)
|
|
||||||
}
|
|
||||||
payload, ok := notifier.data.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected payload map, got %T", notifier.data)
|
|
||||||
}
|
|
||||||
if payload["id"] != "build-2" {
|
|
||||||
t.Fatalf("expected build id build-2, got %v", payload["id"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestBuildList_Good_NilBridge verifies buildList returns an empty list without a bridge.
|
|
||||||
func TestBuildList_Good_NilBridge(t *testing.T) {
|
|
||||||
sub := newNilBridgeSubsystem()
|
|
||||||
_, out, err := sub.buildList(context.Background(), nil, BuildListInput{
|
|
||||||
Repo: "core-php",
|
Repo: "core-php",
|
||||||
Limit: 10,
|
Limit: 10,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
t.Fatalf("buildList failed: %v", err)
|
t.Error("expected error when bridge is nil")
|
||||||
}
|
|
||||||
if out.Builds == nil {
|
|
||||||
t.Error("expected non-nil builds slice")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -421,21 +286,15 @@ func TestBuildList_Good_Connected(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestBuildLogs_Good_NilBridge verifies buildLogs returns empty lines without a bridge.
|
// TestBuildLogs_Bad_NilBridge verifies buildLogs returns error without a bridge.
|
||||||
func TestBuildLogs_Good_NilBridge(t *testing.T) {
|
func TestBuildLogs_Bad_NilBridge(t *testing.T) {
|
||||||
sub := newNilBridgeSubsystem()
|
sub := newNilBridgeSubsystem()
|
||||||
_, out, err := sub.buildLogs(context.Background(), nil, BuildLogsInput{
|
_, _, err := sub.buildLogs(context.Background(), nil, BuildLogsInput{
|
||||||
BuildID: "b1",
|
BuildID: "b1",
|
||||||
Tail: 100,
|
Tail: 100,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
t.Fatalf("buildLogs failed: %v", err)
|
t.Error("expected error when bridge is nil")
|
||||||
}
|
|
||||||
if out.BuildID != "b1" {
|
|
||||||
t.Errorf("expected buildId 'b1', got %q", out.BuildID)
|
|
||||||
}
|
|
||||||
if out.Lines == nil {
|
|
||||||
t.Error("expected non-nil lines slice")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -478,19 +337,12 @@ func TestDashboardOverview_Good_NilBridge(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDashboardOverview_Good_Connected verifies dashboardOverview reports bridge online and local sessions.
|
// TestDashboardOverview_Good_Connected verifies dashboardOverview reports bridge online.
|
||||||
func TestDashboardOverview_Good_Connected(t *testing.T) {
|
func TestDashboardOverview_Good_Connected(t *testing.T) {
|
||||||
sub, cancel, ts := newConnectedSubsystem(t)
|
sub, cancel, ts := newConnectedSubsystem(t)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
|
||||||
Name: "dashboard-test",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("sessionCreate failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := sub.dashboardOverview(context.Background(), nil, DashboardOverviewInput{})
|
_, out, err := sub.dashboardOverview(context.Background(), nil, DashboardOverviewInput{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dashboardOverview failed: %v", err)
|
t.Fatalf("dashboardOverview failed: %v", err)
|
||||||
|
|
@ -498,38 +350,25 @@ func TestDashboardOverview_Good_Connected(t *testing.T) {
|
||||||
if !out.Overview.BridgeOnline {
|
if !out.Overview.BridgeOnline {
|
||||||
t.Error("expected BridgeOnline=true when bridge is connected")
|
t.Error("expected BridgeOnline=true when bridge is connected")
|
||||||
}
|
}
|
||||||
if out.Overview.ActiveSessions != 1 {
|
|
||||||
t.Errorf("expected 1 active session, got %d", out.Overview.ActiveSessions)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDashboardActivity_Good_NilBridge verifies dashboardActivity returns local activity without bridge.
|
// TestDashboardActivity_Bad_NilBridge verifies dashboardActivity returns error without bridge.
|
||||||
func TestDashboardActivity_Good_NilBridge(t *testing.T) {
|
func TestDashboardActivity_Bad_NilBridge(t *testing.T) {
|
||||||
sub := newNilBridgeSubsystem()
|
sub := newNilBridgeSubsystem()
|
||||||
_, out, err := sub.dashboardActivity(context.Background(), nil, DashboardActivityInput{
|
_, _, err := sub.dashboardActivity(context.Background(), nil, DashboardActivityInput{
|
||||||
Limit: 10,
|
Limit: 10,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
t.Fatalf("dashboardActivity failed: %v", err)
|
t.Error("expected error when bridge is nil")
|
||||||
}
|
|
||||||
if out.Events == nil {
|
|
||||||
t.Error("expected non-nil events slice")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDashboardActivity_Good_Connected verifies dashboardActivity returns stored events.
|
// TestDashboardActivity_Good_Connected verifies dashboardActivity returns empty events.
|
||||||
func TestDashboardActivity_Good_Connected(t *testing.T) {
|
func TestDashboardActivity_Good_Connected(t *testing.T) {
|
||||||
sub, cancel, ts := newConnectedSubsystem(t)
|
sub, cancel, ts := newConnectedSubsystem(t)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
_, _, err := sub.sessionCreate(context.Background(), nil, SessionCreateInput{
|
|
||||||
Name: "activity-test",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("sessionCreate failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := sub.dashboardActivity(context.Background(), nil, DashboardActivityInput{
|
_, out, err := sub.dashboardActivity(context.Background(), nil, DashboardActivityInput{
|
||||||
Limit: 20,
|
Limit: 20,
|
||||||
})
|
})
|
||||||
|
|
@ -539,25 +378,19 @@ func TestDashboardActivity_Good_Connected(t *testing.T) {
|
||||||
if out.Events == nil {
|
if out.Events == nil {
|
||||||
t.Error("expected non-nil events slice")
|
t.Error("expected non-nil events slice")
|
||||||
}
|
}
|
||||||
if len(out.Events) != 1 {
|
if len(out.Events) != 0 {
|
||||||
t.Errorf("expected 1 stored event, got %d", len(out.Events))
|
t.Errorf("expected 0 events (stub), got %d", len(out.Events))
|
||||||
}
|
|
||||||
if len(out.Events) > 0 && out.Events[0].Type != "session_create" {
|
|
||||||
t.Errorf("expected first event type 'session_create', got %q", out.Events[0].Type)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDashboardMetrics_Good_NilBridge verifies dashboardMetrics returns local metrics without bridge.
|
// TestDashboardMetrics_Bad_NilBridge verifies dashboardMetrics returns error without bridge.
|
||||||
func TestDashboardMetrics_Good_NilBridge(t *testing.T) {
|
func TestDashboardMetrics_Bad_NilBridge(t *testing.T) {
|
||||||
sub := newNilBridgeSubsystem()
|
sub := newNilBridgeSubsystem()
|
||||||
_, out, err := sub.dashboardMetrics(context.Background(), nil, DashboardMetricsInput{
|
_, _, err := sub.dashboardMetrics(context.Background(), nil, DashboardMetricsInput{
|
||||||
Period: "1h",
|
Period: "1h",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err == nil {
|
||||||
t.Fatalf("dashboardMetrics failed: %v", err)
|
t.Error("expected error when bridge is nil")
|
||||||
}
|
|
||||||
if out.Period != "1h" {
|
|
||||||
t.Errorf("expected period '1h', got %q", out.Period)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -857,7 +690,7 @@ func TestSubsystem_Good_RegisterTools(t *testing.T) {
|
||||||
// RegisterTools requires a real mcp.Server which is complex to construct
|
// RegisterTools requires a real mcp.Server which is complex to construct
|
||||||
// in isolation. This test verifies the Subsystem can be created and
|
// in isolation. This test verifies the Subsystem can be created and
|
||||||
// the Bridge/Shutdown path works end-to-end.
|
// the Bridge/Shutdown path works end-to-end.
|
||||||
sub := New(nil, Config{})
|
sub := New(nil)
|
||||||
if sub.Bridge() != nil {
|
if sub.Bridge() != nil {
|
||||||
t.Error("expected nil bridge with nil hub")
|
t.Error("expected nil bridge with nil hub")
|
||||||
}
|
}
|
||||||
|
|
@ -868,32 +701,32 @@ func TestSubsystem_Good_RegisterTools(t *testing.T) {
|
||||||
|
|
||||||
// TestSubsystem_Good_StartBridgeNilHub verifies StartBridge is a no-op with nil hub.
|
// TestSubsystem_Good_StartBridgeNilHub verifies StartBridge is a no-op with nil hub.
|
||||||
func TestSubsystem_Good_StartBridgeNilHub(t *testing.T) {
|
func TestSubsystem_Good_StartBridgeNilHub(t *testing.T) {
|
||||||
sub := New(nil, Config{})
|
sub := New(nil)
|
||||||
// Should not panic
|
// Should not panic
|
||||||
sub.StartBridge(context.Background())
|
sub.StartBridge(context.Background())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSubsystem_Good_WithConfig verifies the Config DTO applies correctly.
|
// TestSubsystem_Good_WithOptions verifies all config options apply correctly.
|
||||||
func TestSubsystem_Good_WithConfig(t *testing.T) {
|
func TestSubsystem_Good_WithOptions(t *testing.T) {
|
||||||
hub := ws.NewHub()
|
hub := ws.NewHub()
|
||||||
sub := New(hub, Config{
|
sub := New(hub,
|
||||||
LaravelWSURL: "ws://custom:1234/ws",
|
WithLaravelURL("ws://custom:1234/ws"),
|
||||||
WorkspaceRoot: "/tmp/test",
|
WithWorkspaceRoot("/tmp/test"),
|
||||||
ReconnectInterval: 5 * time.Second,
|
WithReconnectInterval(5*time.Second),
|
||||||
Token: "secret-123",
|
WithToken("secret-123"),
|
||||||
})
|
)
|
||||||
|
|
||||||
if sub.cfg.LaravelWSURL != "ws://custom:1234/ws" {
|
if sub.config.LaravelWSURL != "ws://custom:1234/ws" {
|
||||||
t.Errorf("expected custom URL, got %q", sub.cfg.LaravelWSURL)
|
t.Errorf("expected custom URL, got %q", sub.config.LaravelWSURL)
|
||||||
}
|
}
|
||||||
if sub.cfg.WorkspaceRoot != "/tmp/test" {
|
if sub.config.WorkspaceRoot != "/tmp/test" {
|
||||||
t.Errorf("expected workspace '/tmp/test', got %q", sub.cfg.WorkspaceRoot)
|
t.Errorf("expected workspace '/tmp/test', got %q", sub.config.WorkspaceRoot)
|
||||||
}
|
}
|
||||||
if sub.cfg.ReconnectInterval != 5*time.Second {
|
if sub.config.ReconnectInterval != 5*time.Second {
|
||||||
t.Errorf("expected 5s reconnect interval, got %v", sub.cfg.ReconnectInterval)
|
t.Errorf("expected 5s reconnect interval, got %v", sub.config.ReconnectInterval)
|
||||||
}
|
}
|
||||||
if sub.cfg.Token != "secret-123" {
|
if sub.config.Token != "secret-123" {
|
||||||
t.Errorf("expected token 'secret-123', got %q", sub.cfg.Token)
|
t.Errorf("expected token 'secret-123', got %q", sub.config.Token)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -928,10 +761,7 @@ func TestChatSend_Good_BridgeMessageType(t *testing.T) {
|
||||||
ctx := t.Context()
|
ctx := t.Context()
|
||||||
go hub.Run(ctx)
|
go hub.Run(ctx)
|
||||||
|
|
||||||
sub := New(hub, Config{
|
sub := New(hub, WithLaravelURL(wsURL(ts)), WithReconnectInterval(50*time.Millisecond))
|
||||||
LaravelWSURL: wsURL(ts),
|
|
||||||
ReconnectInterval: 50 * time.Millisecond,
|
|
||||||
})
|
|
||||||
sub.StartBridge(ctx)
|
sub.StartBridge(ctx)
|
||||||
waitConnected(t, sub.Bridge(), 2*time.Second)
|
waitConnected(t, sub.Bridge(), 2*time.Second)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -29,9 +29,9 @@ func TestService_Iterators(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRegistry_SplitTag(t *testing.T) {
|
func TestRegistry_SplitTagSeq(t *testing.T) {
|
||||||
tag := "name,omitempty,json"
|
tag := "name,omitempty,json"
|
||||||
parts := splitTag(tag)
|
parts := slices.Collect(splitTagSeq(tag))
|
||||||
expected := []string{"name", "omitempty", "json"}
|
expected := []string{"name", "omitempty", "json"}
|
||||||
|
|
||||||
if !slices.Equal(parts, expected) {
|
if !slices.Equal(parts, expected) {
|
||||||
|
|
|
||||||
362
pkg/mcp/mcp.go
362
pkg/mcp/mcp.go
|
|
@ -6,14 +6,10 @@ package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"iter"
|
"iter"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"slices"
|
"slices"
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
core "dappco.re/go/core"
|
core "dappco.re/go/core"
|
||||||
|
|
@ -24,17 +20,16 @@ import (
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Service provides a lightweight MCP server with file operations and
|
// Service provides a lightweight MCP server with file operations only.
|
||||||
// optional subsystems.
|
|
||||||
// For full GUI features, use the core-gui package.
|
// For full GUI features, use the core-gui package.
|
||||||
//
|
//
|
||||||
// svc, err := mcp.New(mcp.Options{WorkspaceRoot: "/home/user/project"})
|
// svc, err := mcp.New(mcp.Options{WorkspaceRoot: "/home/user/project"})
|
||||||
// defer svc.Shutdown(ctx)
|
// defer svc.Shutdown(ctx)
|
||||||
type Service struct {
|
type Service struct {
|
||||||
*core.ServiceRuntime[struct{}] // Core access via s.Core()
|
*core.ServiceRuntime[McpOptions] // Core access via s.Core()
|
||||||
|
|
||||||
server *mcp.Server
|
server *mcp.Server
|
||||||
workspaceRoot string // Root directory for file operations (empty = cwd unless Unrestricted)
|
workspaceRoot string // Root directory for file operations (empty = unrestricted)
|
||||||
medium io.Medium // Filesystem medium for sandboxed operations
|
medium io.Medium // Filesystem medium for sandboxed operations
|
||||||
subsystems []Subsystem // Additional subsystems registered via Options.Subsystems
|
subsystems []Subsystem // Additional subsystems registered via Options.Subsystems
|
||||||
logger *log.Logger // Logger for tool execution auditing
|
logger *log.Logger // Logger for tool execution auditing
|
||||||
|
|
@ -43,11 +38,14 @@ type Service struct {
|
||||||
wsServer *http.Server // WebSocket HTTP server (optional)
|
wsServer *http.Server // WebSocket HTTP server (optional)
|
||||||
wsAddr string // WebSocket server address
|
wsAddr string // WebSocket server address
|
||||||
wsMu sync.Mutex // Protects wsServer and wsAddr
|
wsMu sync.Mutex // Protects wsServer and wsAddr
|
||||||
processMu sync.Mutex // Protects processMeta
|
stdioMode bool // True when running via stdio transport
|
||||||
processMeta map[string]processRuntime
|
tools []ToolRecord // Parallel tool registry for REST bridge
|
||||||
tools []ToolRecord // Parallel tool registry for REST bridge
|
coreRef any // Deprecated: use s.Core() via ServiceRuntime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// McpOptions configures the MCP service runtime.
|
||||||
|
type McpOptions struct{}
|
||||||
|
|
||||||
// Options configures a Service.
|
// Options configures a Service.
|
||||||
//
|
//
|
||||||
// svc, err := mcp.New(mcp.Options{
|
// svc, err := mcp.New(mcp.Options{
|
||||||
|
|
@ -63,7 +61,7 @@ type Options struct {
|
||||||
Subsystems []Subsystem // Additional tool groups registered at startup
|
Subsystems []Subsystem // Additional tool groups registered at startup
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new MCP service with file operations and optional subsystems.
|
// New creates a new MCP service with file operations.
|
||||||
//
|
//
|
||||||
// svc, err := mcp.New(mcp.Options{WorkspaceRoot: "."})
|
// svc, err := mcp.New(mcp.Options{WorkspaceRoot: "."})
|
||||||
func New(opts Options) (*Service, error) {
|
func New(opts Options) (*Service, error) {
|
||||||
|
|
@ -84,8 +82,8 @@ func New(opts Options) (*Service, error) {
|
||||||
server: server,
|
server: server,
|
||||||
processService: opts.ProcessService,
|
processService: opts.ProcessService,
|
||||||
wsHub: opts.WSHub,
|
wsHub: opts.WSHub,
|
||||||
|
subsystems: opts.Subsystems,
|
||||||
logger: log.Default(),
|
logger: log.Default(),
|
||||||
processMeta: make(map[string]processRuntime),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Workspace root: unrestricted, explicit root, or default to cwd
|
// Workspace root: unrestricted, explicit root, or default to cwd
|
||||||
|
|
@ -95,18 +93,10 @@ func New(opts Options) (*Service, error) {
|
||||||
} else {
|
} else {
|
||||||
root := opts.WorkspaceRoot
|
root := opts.WorkspaceRoot
|
||||||
if root == "" {
|
if root == "" {
|
||||||
cwd, err := os.Getwd()
|
root = core.Env("DIR_CWD")
|
||||||
if err != nil {
|
|
||||||
return nil, core.E("mcp.New", "failed to get working directory", err)
|
|
||||||
}
|
|
||||||
root = cwd
|
|
||||||
}
|
}
|
||||||
abs, err := filepath.Abs(root)
|
s.workspaceRoot = root
|
||||||
if err != nil {
|
m, merr := io.NewSandboxed(root)
|
||||||
return nil, core.E("mcp.New", "failed to resolve workspace root", err)
|
|
||||||
}
|
|
||||||
s.workspaceRoot = abs
|
|
||||||
m, merr := io.NewSandboxed(abs)
|
|
||||||
if merr != nil {
|
if merr != nil {
|
||||||
return nil, core.E("mcp.New", "failed to create workspace medium", merr)
|
return nil, core.E("mcp.New", "failed to create workspace medium", merr)
|
||||||
}
|
}
|
||||||
|
|
@ -115,23 +105,21 @@ func New(opts Options) (*Service, error) {
|
||||||
|
|
||||||
s.registerTools(s.server)
|
s.registerTools(s.server)
|
||||||
|
|
||||||
s.subsystems = make([]Subsystem, 0, len(opts.Subsystems))
|
for _, sub := range s.subsystems {
|
||||||
for _, sub := range opts.Subsystems {
|
sub.RegisterTools(s.server)
|
||||||
if sub == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.subsystems = append(s.subsystems, sub)
|
|
||||||
if sn, ok := sub.(SubsystemWithNotifier); ok {
|
if sn, ok := sub.(SubsystemWithNotifier); ok {
|
||||||
sn.SetNotifier(s)
|
sn.SetNotifier(s)
|
||||||
}
|
}
|
||||||
// Wire channel callback for subsystems that use func-based notification.
|
// Wire channel callback for subsystems that use func-based notification
|
||||||
if cw, ok := sub.(SubsystemWithChannelCallback); ok {
|
type channelWirer interface {
|
||||||
|
OnChannel(func(ctx context.Context, channel string, data any))
|
||||||
|
}
|
||||||
|
if cw, ok := sub.(channelWirer); ok {
|
||||||
svc := s // capture for closure
|
svc := s // capture for closure
|
||||||
cw.OnChannel(func(ctx context.Context, channel string, data any) {
|
cw.OnChannel(func(ctx context.Context, channel string, data any) {
|
||||||
svc.ChannelSend(ctx, channel, data)
|
svc.ChannelSend(ctx, channel, data)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sub.RegisterTools(s)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
|
|
@ -143,7 +131,7 @@ func New(opts Options) (*Service, error) {
|
||||||
// fmt.Println(sub.Name())
|
// fmt.Println(sub.Name())
|
||||||
// }
|
// }
|
||||||
func (s *Service) Subsystems() []Subsystem {
|
func (s *Service) Subsystems() []Subsystem {
|
||||||
return slices.Clone(s.subsystems)
|
return s.subsystems
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubsystemsSeq returns an iterator over the registered subsystems.
|
// SubsystemsSeq returns an iterator over the registered subsystems.
|
||||||
|
|
@ -152,7 +140,7 @@ func (s *Service) Subsystems() []Subsystem {
|
||||||
// fmt.Println(sub.Name())
|
// fmt.Println(sub.Name())
|
||||||
// }
|
// }
|
||||||
func (s *Service) SubsystemsSeq() iter.Seq[Subsystem] {
|
func (s *Service) SubsystemsSeq() iter.Seq[Subsystem] {
|
||||||
return slices.Values(slices.Clone(s.subsystems))
|
return slices.Values(s.subsystems)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tools returns all recorded tool metadata.
|
// Tools returns all recorded tool metadata.
|
||||||
|
|
@ -161,7 +149,7 @@ func (s *Service) SubsystemsSeq() iter.Seq[Subsystem] {
|
||||||
// fmt.Printf("%s (%s): %s\n", t.Name, t.Group, t.Description)
|
// fmt.Printf("%s (%s): %s\n", t.Name, t.Group, t.Description)
|
||||||
// }
|
// }
|
||||||
func (s *Service) Tools() []ToolRecord {
|
func (s *Service) Tools() []ToolRecord {
|
||||||
return slices.Clone(s.tools)
|
return s.tools
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToolsSeq returns an iterator over all recorded tool metadata.
|
// ToolsSeq returns an iterator over all recorded tool metadata.
|
||||||
|
|
@ -170,7 +158,7 @@ func (s *Service) Tools() []ToolRecord {
|
||||||
// fmt.Println(rec.Name)
|
// fmt.Println(rec.Name)
|
||||||
// }
|
// }
|
||||||
func (s *Service) ToolsSeq() iter.Seq[ToolRecord] {
|
func (s *Service) ToolsSeq() iter.Seq[ToolRecord] {
|
||||||
return slices.Values(slices.Clone(s.tools))
|
return slices.Values(s.tools)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown gracefully shuts down all subsystems that support it.
|
// Shutdown gracefully shuts down all subsystems that support it.
|
||||||
|
|
@ -179,42 +167,17 @@ func (s *Service) ToolsSeq() iter.Seq[ToolRecord] {
|
||||||
// defer cancel()
|
// defer cancel()
|
||||||
// if err := svc.Shutdown(ctx); err != nil { log.Fatal(err) }
|
// if err := svc.Shutdown(ctx); err != nil { log.Fatal(err) }
|
||||||
func (s *Service) Shutdown(ctx context.Context) error {
|
func (s *Service) Shutdown(ctx context.Context) error {
|
||||||
var shutdownErr error
|
|
||||||
|
|
||||||
for _, sub := range s.subsystems {
|
for _, sub := range s.subsystems {
|
||||||
if sh, ok := sub.(SubsystemWithShutdown); ok {
|
if sh, ok := sub.(SubsystemWithShutdown); ok {
|
||||||
if err := sh.Shutdown(ctx); err != nil {
|
if err := sh.Shutdown(ctx); err != nil {
|
||||||
if shutdownErr == nil {
|
return log.E("mcp.Shutdown", "shutdown "+sub.Name(), err)
|
||||||
shutdownErr = log.E("mcp.Shutdown", "shutdown "+sub.Name(), err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
if s.wsServer != nil {
|
|
||||||
s.wsMu.Lock()
|
|
||||||
server := s.wsServer
|
|
||||||
s.wsMu.Unlock()
|
|
||||||
|
|
||||||
if err := server.Shutdown(ctx); err != nil && shutdownErr == nil {
|
|
||||||
shutdownErr = log.E("mcp.Shutdown", "shutdown websocket server", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.wsMu.Lock()
|
|
||||||
if s.wsServer == server {
|
|
||||||
s.wsServer = nil
|
|
||||||
s.wsAddr = ""
|
|
||||||
}
|
|
||||||
s.wsMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := closeWebviewConnection(); err != nil && shutdownErr == nil {
|
|
||||||
shutdownErr = log.E("mcp.Shutdown", "close webview connection", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return shutdownErr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// WSHub returns the WebSocket hub, or nil if not configured.
|
// WSHub returns the WebSocket hub, or nil if not configured.
|
||||||
//
|
//
|
||||||
// if hub := svc.WSHub(); hub != nil {
|
// if hub := svc.WSHub(); hub != nil {
|
||||||
|
|
@ -233,30 +196,7 @@ func (s *Service) ProcessService() *process.Service {
|
||||||
return s.processService
|
return s.processService
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveWorkspacePath converts a tool path into the filesystem path the
|
// registerTools adds file operation tools to the MCP server.
|
||||||
// service actually operates on.
|
|
||||||
//
|
|
||||||
// Sandboxed services keep paths anchored under workspaceRoot. Unrestricted
|
|
||||||
// services preserve absolute paths and clean relative ones against the current
|
|
||||||
// working directory.
|
|
||||||
func (s *Service) resolveWorkspacePath(path string) string {
|
|
||||||
if path == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.workspaceRoot == "" {
|
|
||||||
return filepath.Clean(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
clean := filepath.Clean(string(filepath.Separator) + path)
|
|
||||||
clean = strings.TrimPrefix(clean, string(filepath.Separator))
|
|
||||||
if clean == "." || clean == "" {
|
|
||||||
return s.workspaceRoot
|
|
||||||
}
|
|
||||||
return filepath.Join(s.workspaceRoot, clean)
|
|
||||||
}
|
|
||||||
|
|
||||||
// registerTools adds the built-in tool groups to the MCP server.
|
|
||||||
func (s *Service) registerTools(server *mcp.Server) {
|
func (s *Service) registerTools(server *mcp.Server) {
|
||||||
// File operations
|
// File operations
|
||||||
addToolRecorded(s, server, "files", &mcp.Tool{
|
addToolRecorded(s, server, "files", &mcp.Tool{
|
||||||
|
|
@ -310,13 +250,6 @@ func (s *Service) registerTools(server *mcp.Server) {
|
||||||
Name: "lang_list",
|
Name: "lang_list",
|
||||||
Description: "Get list of supported programming languages",
|
Description: "Get list of supported programming languages",
|
||||||
}, s.getSupportedLanguages)
|
}, s.getSupportedLanguages)
|
||||||
|
|
||||||
// Additional built-in tool groups.
|
|
||||||
s.registerMetricsTools(server)
|
|
||||||
s.registerRAGTools(server)
|
|
||||||
s.registerProcessTools(server)
|
|
||||||
s.registerWebviewTools(server)
|
|
||||||
s.registerWSTools(server)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tool input/output types for MCP file operations.
|
// Tool input/output types for MCP file operations.
|
||||||
|
|
@ -466,7 +399,7 @@ type GetSupportedLanguagesInput struct{}
|
||||||
|
|
||||||
// GetSupportedLanguagesOutput contains the list of supported languages.
|
// GetSupportedLanguagesOutput contains the list of supported languages.
|
||||||
//
|
//
|
||||||
// // len(out.Languages) == 23
|
// // len(out.Languages) == 15
|
||||||
// // out.Languages[0].ID == "typescript"
|
// // out.Languages[0].ID == "typescript"
|
||||||
type GetSupportedLanguagesOutput struct {
|
type GetSupportedLanguagesOutput struct {
|
||||||
Languages []LanguageInfo `json:"languages"` // all recognised languages
|
Languages []LanguageInfo `json:"languages"` // all recognised languages
|
||||||
|
|
@ -490,8 +423,8 @@ type LanguageInfo struct {
|
||||||
// }
|
// }
|
||||||
type EditDiffInput struct {
|
type EditDiffInput struct {
|
||||||
Path string `json:"path"` // e.g. "main.go"
|
Path string `json:"path"` // e.g. "main.go"
|
||||||
OldString string `json:"old_string"` // text to find
|
OldString string `json:"old_string"` // text to find
|
||||||
NewString string `json:"new_string"` // replacement text
|
NewString string `json:"new_string"` // replacement text
|
||||||
ReplaceAll bool `json:"replace_all,omitempty"` // replace all occurrences (default: first only)
|
ReplaceAll bool `json:"replace_all,omitempty"` // replace all occurrences (default: first only)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -507,10 +440,6 @@ type EditDiffOutput struct {
|
||||||
// Tool handlers
|
// Tool handlers
|
||||||
|
|
||||||
func (s *Service) readFile(ctx context.Context, req *mcp.CallToolRequest, input ReadFileInput) (*mcp.CallToolResult, ReadFileOutput, error) {
|
func (s *Service) readFile(ctx context.Context, req *mcp.CallToolRequest, input ReadFileInput) (*mcp.CallToolResult, ReadFileOutput, error) {
|
||||||
if s.medium == nil {
|
|
||||||
return nil, ReadFileOutput{}, log.E("mcp.readFile", "workspace medium unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := s.medium.Read(input.Path)
|
content, err := s.medium.Read(input.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ReadFileOutput{}, log.E("mcp.readFile", "failed to read file", err)
|
return nil, ReadFileOutput{}, log.E("mcp.readFile", "failed to read file", err)
|
||||||
|
|
@ -523,10 +452,6 @@ func (s *Service) readFile(ctx context.Context, req *mcp.CallToolRequest, input
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) writeFile(ctx context.Context, req *mcp.CallToolRequest, input WriteFileInput) (*mcp.CallToolResult, WriteFileOutput, error) {
|
func (s *Service) writeFile(ctx context.Context, req *mcp.CallToolRequest, input WriteFileInput) (*mcp.CallToolResult, WriteFileOutput, error) {
|
||||||
if s.medium == nil {
|
|
||||||
return nil, WriteFileOutput{}, log.E("mcp.writeFile", "workspace medium unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Medium.Write creates parent directories automatically
|
// Medium.Write creates parent directories automatically
|
||||||
if err := s.medium.Write(input.Path, input.Content); err != nil {
|
if err := s.medium.Write(input.Path, input.Content); err != nil {
|
||||||
return nil, WriteFileOutput{}, log.E("mcp.writeFile", "failed to write file", err)
|
return nil, WriteFileOutput{}, log.E("mcp.writeFile", "failed to write file", err)
|
||||||
|
|
@ -535,17 +460,10 @@ func (s *Service) writeFile(ctx context.Context, req *mcp.CallToolRequest, input
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) listDirectory(ctx context.Context, req *mcp.CallToolRequest, input ListDirectoryInput) (*mcp.CallToolResult, ListDirectoryOutput, error) {
|
func (s *Service) listDirectory(ctx context.Context, req *mcp.CallToolRequest, input ListDirectoryInput) (*mcp.CallToolResult, ListDirectoryOutput, error) {
|
||||||
if s.medium == nil {
|
|
||||||
return nil, ListDirectoryOutput{}, log.E("mcp.listDirectory", "workspace medium unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := s.medium.List(input.Path)
|
entries, err := s.medium.List(input.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ListDirectoryOutput{}, log.E("mcp.listDirectory", "failed to list directory", err)
|
return nil, ListDirectoryOutput{}, log.E("mcp.listDirectory", "failed to list directory", err)
|
||||||
}
|
}
|
||||||
sort.Slice(entries, func(i, j int) bool {
|
|
||||||
return entries[i].Name() < entries[j].Name()
|
|
||||||
})
|
|
||||||
result := make([]DirectoryEntry, 0, len(entries))
|
result := make([]DirectoryEntry, 0, len(entries))
|
||||||
for _, e := range entries {
|
for _, e := range entries {
|
||||||
info, _ := e.Info()
|
info, _ := e.Info()
|
||||||
|
|
@ -554,8 +472,11 @@ func (s *Service) listDirectory(ctx context.Context, req *mcp.CallToolRequest, i
|
||||||
size = info.Size()
|
size = info.Size()
|
||||||
}
|
}
|
||||||
result = append(result, DirectoryEntry{
|
result = append(result, DirectoryEntry{
|
||||||
Name: e.Name(),
|
Name: e.Name(),
|
||||||
Path: directoryEntryPath(input.Path, e.Name()),
|
Path: core.JoinPath(input.Path, e.Name()), // Note: This might be relative path, client might expect absolute?
|
||||||
|
// Issue 103 says "Replace ... with local.Medium sandboxing".
|
||||||
|
// Previous code returned `core.JoinPath(input.Path, e.Name())`.
|
||||||
|
// If input.Path is relative, this preserves it.
|
||||||
IsDir: e.IsDir(),
|
IsDir: e.IsDir(),
|
||||||
Size: size,
|
Size: size,
|
||||||
})
|
})
|
||||||
|
|
@ -563,23 +484,7 @@ func (s *Service) listDirectory(ctx context.Context, req *mcp.CallToolRequest, i
|
||||||
return nil, ListDirectoryOutput{Entries: result, Path: input.Path}, nil
|
return nil, ListDirectoryOutput{Entries: result, Path: input.Path}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// directoryEntryPath returns the documented display path for a directory entry.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// directoryEntryPath("src", "main.go") == "src/main.go"
|
|
||||||
func directoryEntryPath(dir, name string) string {
|
|
||||||
if dir == "" {
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
return core.JoinPath(dir, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) createDirectory(ctx context.Context, req *mcp.CallToolRequest, input CreateDirectoryInput) (*mcp.CallToolResult, CreateDirectoryOutput, error) {
|
func (s *Service) createDirectory(ctx context.Context, req *mcp.CallToolRequest, input CreateDirectoryInput) (*mcp.CallToolResult, CreateDirectoryOutput, error) {
|
||||||
if s.medium == nil {
|
|
||||||
return nil, CreateDirectoryOutput{}, log.E("mcp.createDirectory", "workspace medium unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.medium.EnsureDir(input.Path); err != nil {
|
if err := s.medium.EnsureDir(input.Path); err != nil {
|
||||||
return nil, CreateDirectoryOutput{}, log.E("mcp.createDirectory", "failed to create directory", err)
|
return nil, CreateDirectoryOutput{}, log.E("mcp.createDirectory", "failed to create directory", err)
|
||||||
}
|
}
|
||||||
|
|
@ -587,10 +492,6 @@ func (s *Service) createDirectory(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) deleteFile(ctx context.Context, req *mcp.CallToolRequest, input DeleteFileInput) (*mcp.CallToolResult, DeleteFileOutput, error) {
|
func (s *Service) deleteFile(ctx context.Context, req *mcp.CallToolRequest, input DeleteFileInput) (*mcp.CallToolResult, DeleteFileOutput, error) {
|
||||||
if s.medium == nil {
|
|
||||||
return nil, DeleteFileOutput{}, log.E("mcp.deleteFile", "workspace medium unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.medium.Delete(input.Path); err != nil {
|
if err := s.medium.Delete(input.Path); err != nil {
|
||||||
return nil, DeleteFileOutput{}, log.E("mcp.deleteFile", "failed to delete file", err)
|
return nil, DeleteFileOutput{}, log.E("mcp.deleteFile", "failed to delete file", err)
|
||||||
}
|
}
|
||||||
|
|
@ -598,10 +499,6 @@ func (s *Service) deleteFile(ctx context.Context, req *mcp.CallToolRequest, inpu
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) renameFile(ctx context.Context, req *mcp.CallToolRequest, input RenameFileInput) (*mcp.CallToolResult, RenameFileOutput, error) {
|
func (s *Service) renameFile(ctx context.Context, req *mcp.CallToolRequest, input RenameFileInput) (*mcp.CallToolResult, RenameFileOutput, error) {
|
||||||
if s.medium == nil {
|
|
||||||
return nil, RenameFileOutput{}, log.E("mcp.renameFile", "workspace medium unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.medium.Rename(input.OldPath, input.NewPath); err != nil {
|
if err := s.medium.Rename(input.OldPath, input.NewPath); err != nil {
|
||||||
return nil, RenameFileOutput{}, log.E("mcp.renameFile", "failed to rename file", err)
|
return nil, RenameFileOutput{}, log.E("mcp.renameFile", "failed to rename file", err)
|
||||||
}
|
}
|
||||||
|
|
@ -609,22 +506,21 @@ func (s *Service) renameFile(ctx context.Context, req *mcp.CallToolRequest, inpu
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) fileExists(ctx context.Context, req *mcp.CallToolRequest, input FileExistsInput) (*mcp.CallToolResult, FileExistsOutput, error) {
|
func (s *Service) fileExists(ctx context.Context, req *mcp.CallToolRequest, input FileExistsInput) (*mcp.CallToolResult, FileExistsOutput, error) {
|
||||||
if s.medium == nil {
|
exists := s.medium.IsFile(input.Path)
|
||||||
return nil, FileExistsOutput{}, log.E("mcp.fileExists", "workspace medium unavailable", nil)
|
if exists {
|
||||||
|
return nil, FileExistsOutput{Exists: true, IsDir: false, Path: input.Path}, nil
|
||||||
}
|
}
|
||||||
|
// Check if it's a directory by attempting to list it
|
||||||
|
// List might fail if it's a file too (but we checked IsFile) or if doesn't exist.
|
||||||
|
_, err := s.medium.List(input.Path)
|
||||||
|
isDir := err == nil
|
||||||
|
|
||||||
info, err := s.medium.Stat(input.Path)
|
// If List failed, it might mean it doesn't exist OR it's a special file or permissions.
|
||||||
if err != nil {
|
// Assuming if List works, it's a directory.
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
return nil, FileExistsOutput{Exists: false, IsDir: false, Path: input.Path}, nil
|
// Refinement: If it doesn't exist, List returns error.
|
||||||
}
|
|
||||||
return nil, FileExistsOutput{}, log.E("mcp.fileExists", "failed to stat path", err)
|
return nil, FileExistsOutput{Exists: isDir, IsDir: isDir, Path: input.Path}, nil
|
||||||
}
|
|
||||||
return nil, FileExistsOutput{
|
|
||||||
Exists: true,
|
|
||||||
IsDir: info.IsDir(),
|
|
||||||
Path: input.Path,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) detectLanguage(ctx context.Context, req *mcp.CallToolRequest, input DetectLanguageInput) (*mcp.CallToolResult, DetectLanguageOutput, error) {
|
func (s *Service) detectLanguage(ctx context.Context, req *mcp.CallToolRequest, input DetectLanguageInput) (*mcp.CallToolResult, DetectLanguageOutput, error) {
|
||||||
|
|
@ -633,14 +529,27 @@ func (s *Service) detectLanguage(ctx context.Context, req *mcp.CallToolRequest,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) getSupportedLanguages(ctx context.Context, req *mcp.CallToolRequest, input GetSupportedLanguagesInput) (*mcp.CallToolResult, GetSupportedLanguagesOutput, error) {
|
func (s *Service) getSupportedLanguages(ctx context.Context, req *mcp.CallToolRequest, input GetSupportedLanguagesInput) (*mcp.CallToolResult, GetSupportedLanguagesOutput, error) {
|
||||||
return nil, GetSupportedLanguagesOutput{Languages: supportedLanguages()}, nil
|
languages := []LanguageInfo{
|
||||||
|
{ID: "typescript", Name: "TypeScript", Extensions: []string{".ts", ".tsx"}},
|
||||||
|
{ID: "javascript", Name: "JavaScript", Extensions: []string{".js", ".jsx"}},
|
||||||
|
{ID: "go", Name: "Go", Extensions: []string{".go"}},
|
||||||
|
{ID: "python", Name: "Python", Extensions: []string{".py"}},
|
||||||
|
{ID: "rust", Name: "Rust", Extensions: []string{".rs"}},
|
||||||
|
{ID: "java", Name: "Java", Extensions: []string{".java"}},
|
||||||
|
{ID: "php", Name: "PHP", Extensions: []string{".php"}},
|
||||||
|
{ID: "ruby", Name: "Ruby", Extensions: []string{".rb"}},
|
||||||
|
{ID: "html", Name: "HTML", Extensions: []string{".html", ".htm"}},
|
||||||
|
{ID: "css", Name: "CSS", Extensions: []string{".css"}},
|
||||||
|
{ID: "json", Name: "JSON", Extensions: []string{".json"}},
|
||||||
|
{ID: "yaml", Name: "YAML", Extensions: []string{".yaml", ".yml"}},
|
||||||
|
{ID: "markdown", Name: "Markdown", Extensions: []string{".md", ".markdown"}},
|
||||||
|
{ID: "sql", Name: "SQL", Extensions: []string{".sql"}},
|
||||||
|
{ID: "shell", Name: "Shell", Extensions: []string{".sh", ".bash"}},
|
||||||
|
}
|
||||||
|
return nil, GetSupportedLanguagesOutput{Languages: languages}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) editDiff(ctx context.Context, req *mcp.CallToolRequest, input EditDiffInput) (*mcp.CallToolResult, EditDiffOutput, error) {
|
func (s *Service) editDiff(ctx context.Context, req *mcp.CallToolRequest, input EditDiffInput) (*mcp.CallToolResult, EditDiffOutput, error) {
|
||||||
if s.medium == nil {
|
|
||||||
return nil, EditDiffOutput{}, log.E("mcp.editDiff", "workspace medium unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.OldString == "" {
|
if input.OldString == "" {
|
||||||
return nil, EditDiffOutput{}, log.E("mcp.editDiff", "old_string cannot be empty", nil)
|
return nil, EditDiffOutput{}, log.E("mcp.editDiff", "old_string cannot be empty", nil)
|
||||||
}
|
}
|
||||||
|
|
@ -679,78 +588,57 @@ func (s *Service) editDiff(ctx context.Context, req *mcp.CallToolRequest, input
|
||||||
|
|
||||||
// detectLanguageFromPath maps file extensions to language IDs.
|
// detectLanguageFromPath maps file extensions to language IDs.
|
||||||
func detectLanguageFromPath(path string) string {
|
func detectLanguageFromPath(path string) string {
|
||||||
if core.PathBase(path) == "Dockerfile" {
|
|
||||||
return "dockerfile"
|
|
||||||
}
|
|
||||||
|
|
||||||
ext := core.PathExt(path)
|
ext := core.PathExt(path)
|
||||||
if lang, ok := languageByExtension[ext]; ok {
|
switch ext {
|
||||||
return lang
|
case ".ts", ".tsx":
|
||||||
}
|
return "typescript"
|
||||||
return "plaintext"
|
case ".js", ".jsx":
|
||||||
}
|
return "javascript"
|
||||||
|
case ".go":
|
||||||
var languageByExtension = map[string]string{
|
return "go"
|
||||||
".ts": "typescript",
|
case ".py":
|
||||||
".tsx": "typescript",
|
return "python"
|
||||||
".js": "javascript",
|
case ".rs":
|
||||||
".jsx": "javascript",
|
return "rust"
|
||||||
".go": "go",
|
case ".rb":
|
||||||
".py": "python",
|
return "ruby"
|
||||||
".rs": "rust",
|
case ".java":
|
||||||
".rb": "ruby",
|
return "java"
|
||||||
".java": "java",
|
case ".php":
|
||||||
".php": "php",
|
return "php"
|
||||||
".c": "c",
|
case ".c", ".h":
|
||||||
".h": "c",
|
return "c"
|
||||||
".cpp": "cpp",
|
case ".cpp", ".hpp", ".cc", ".cxx":
|
||||||
".hpp": "cpp",
|
return "cpp"
|
||||||
".cc": "cpp",
|
case ".cs":
|
||||||
".cxx": "cpp",
|
return "csharp"
|
||||||
".cs": "csharp",
|
case ".html", ".htm":
|
||||||
".html": "html",
|
return "html"
|
||||||
".htm": "html",
|
case ".css":
|
||||||
".css": "css",
|
return "css"
|
||||||
".scss": "scss",
|
case ".scss":
|
||||||
".json": "json",
|
return "scss"
|
||||||
".yaml": "yaml",
|
case ".json":
|
||||||
".yml": "yaml",
|
return "json"
|
||||||
".xml": "xml",
|
case ".yaml", ".yml":
|
||||||
".md": "markdown",
|
return "yaml"
|
||||||
".markdown": "markdown",
|
case ".xml":
|
||||||
".sql": "sql",
|
return "xml"
|
||||||
".sh": "shell",
|
case ".md", ".markdown":
|
||||||
".bash": "shell",
|
return "markdown"
|
||||||
".swift": "swift",
|
case ".sql":
|
||||||
".kt": "kotlin",
|
return "sql"
|
||||||
".kts": "kotlin",
|
case ".sh", ".bash":
|
||||||
}
|
return "shell"
|
||||||
|
case ".swift":
|
||||||
func supportedLanguages() []LanguageInfo {
|
return "swift"
|
||||||
return []LanguageInfo{
|
case ".kt", ".kts":
|
||||||
{ID: "typescript", Name: "TypeScript", Extensions: []string{".ts", ".tsx"}},
|
return "kotlin"
|
||||||
{ID: "javascript", Name: "JavaScript", Extensions: []string{".js", ".jsx"}},
|
default:
|
||||||
{ID: "go", Name: "Go", Extensions: []string{".go"}},
|
if core.PathBase(path) == "Dockerfile" {
|
||||||
{ID: "python", Name: "Python", Extensions: []string{".py"}},
|
return "dockerfile"
|
||||||
{ID: "rust", Name: "Rust", Extensions: []string{".rs"}},
|
}
|
||||||
{ID: "ruby", Name: "Ruby", Extensions: []string{".rb"}},
|
return "plaintext"
|
||||||
{ID: "java", Name: "Java", Extensions: []string{".java"}},
|
|
||||||
{ID: "php", Name: "PHP", Extensions: []string{".php"}},
|
|
||||||
{ID: "c", Name: "C", Extensions: []string{".c", ".h"}},
|
|
||||||
{ID: "cpp", Name: "C++", Extensions: []string{".cpp", ".hpp", ".cc", ".cxx"}},
|
|
||||||
{ID: "csharp", Name: "C#", Extensions: []string{".cs"}},
|
|
||||||
{ID: "html", Name: "HTML", Extensions: []string{".html", ".htm"}},
|
|
||||||
{ID: "css", Name: "CSS", Extensions: []string{".css"}},
|
|
||||||
{ID: "scss", Name: "SCSS", Extensions: []string{".scss"}},
|
|
||||||
{ID: "json", Name: "JSON", Extensions: []string{".json"}},
|
|
||||||
{ID: "yaml", Name: "YAML", Extensions: []string{".yaml", ".yml"}},
|
|
||||||
{ID: "xml", Name: "XML", Extensions: []string{".xml"}},
|
|
||||||
{ID: "markdown", Name: "Markdown", Extensions: []string{".md", ".markdown"}},
|
|
||||||
{ID: "sql", Name: "SQL", Extensions: []string{".sql"}},
|
|
||||||
{ID: "shell", Name: "Shell", Extensions: []string{".sh", ".bash"}},
|
|
||||||
{ID: "swift", Name: "Swift", Extensions: []string{".swift"}},
|
|
||||||
{ID: "kotlin", Name: "Kotlin", Extensions: []string{".kt", ".kts"}},
|
|
||||||
{ID: "dockerfile", Name: "Dockerfile", Extensions: []string{}},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -763,10 +651,6 @@ func supportedLanguages() []LanguageInfo {
|
||||||
// os.Setenv("MCP_ADDR", "127.0.0.1:9100")
|
// os.Setenv("MCP_ADDR", "127.0.0.1:9100")
|
||||||
// svc.Run(ctx)
|
// svc.Run(ctx)
|
||||||
//
|
//
|
||||||
// // Unix socket (set MCP_UNIX_SOCKET):
|
|
||||||
// os.Setenv("MCP_UNIX_SOCKET", "/tmp/core-mcp.sock")
|
|
||||||
// svc.Run(ctx)
|
|
||||||
//
|
|
||||||
// // HTTP (set MCP_HTTP_ADDR):
|
// // HTTP (set MCP_HTTP_ADDR):
|
||||||
// os.Setenv("MCP_HTTP_ADDR", "127.0.0.1:9101")
|
// os.Setenv("MCP_HTTP_ADDR", "127.0.0.1:9101")
|
||||||
// svc.Run(ctx)
|
// svc.Run(ctx)
|
||||||
|
|
@ -777,12 +661,14 @@ func (s *Service) Run(ctx context.Context) error {
|
||||||
if addr := core.Env("MCP_ADDR"); addr != "" {
|
if addr := core.Env("MCP_ADDR"); addr != "" {
|
||||||
return s.ServeTCP(ctx, addr)
|
return s.ServeTCP(ctx, addr)
|
||||||
}
|
}
|
||||||
if socketPath := core.Env("MCP_UNIX_SOCKET"); socketPath != "" {
|
s.stdioMode = true
|
||||||
return s.ServeUnix(ctx, socketPath)
|
return s.server.Run(ctx, &mcp.IOTransport{
|
||||||
}
|
Reader: os.Stdin,
|
||||||
return s.ServeStdio(ctx)
|
Writer: sharedStdout,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// countOccurrences counts non-overlapping instances of substr in s.
|
// countOccurrences counts non-overlapping instances of substr in s.
|
||||||
func countOccurrences(s, substr string) int {
|
func countOccurrences(s, substr string) int {
|
||||||
if substr == "" {
|
if substr == "" {
|
||||||
|
|
|
||||||
|
|
@ -55,114 +55,6 @@ func TestNew_Good_NoRestriction(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNew_Good_RegistersBuiltInTools(t *testing.T) {
|
|
||||||
s, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tools := map[string]bool{}
|
|
||||||
for _, rec := range s.Tools() {
|
|
||||||
tools[rec.Name] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, name := range []string{
|
|
||||||
"metrics_record",
|
|
||||||
"metrics_query",
|
|
||||||
"rag_query",
|
|
||||||
"rag_ingest",
|
|
||||||
"rag_collections",
|
|
||||||
"webview_connect",
|
|
||||||
"webview_disconnect",
|
|
||||||
"webview_navigate",
|
|
||||||
"webview_click",
|
|
||||||
"webview_type",
|
|
||||||
"webview_query",
|
|
||||||
"webview_console",
|
|
||||||
"webview_eval",
|
|
||||||
"webview_screenshot",
|
|
||||||
"webview_wait",
|
|
||||||
} {
|
|
||||||
if !tools[name] {
|
|
||||||
t.Fatalf("expected tool %q to be registered", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, name := range []string{"process_start", "ws_start"} {
|
|
||||||
if tools[name] {
|
|
||||||
t.Fatalf("did not expect tool %q to be registered without dependencies", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetSupportedLanguages_Good_IncludesAllDetectedLanguages(t *testing.T) {
|
|
||||||
s, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := s.getSupportedLanguages(nil, nil, GetSupportedLanguagesInput{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("getSupportedLanguages failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if got, want := len(out.Languages), 23; got != want {
|
|
||||||
t.Fatalf("expected %d supported languages, got %d", want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
got := map[string]bool{}
|
|
||||||
for _, lang := range out.Languages {
|
|
||||||
got[lang.ID] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, want := range []string{
|
|
||||||
"typescript",
|
|
||||||
"javascript",
|
|
||||||
"go",
|
|
||||||
"python",
|
|
||||||
"rust",
|
|
||||||
"ruby",
|
|
||||||
"java",
|
|
||||||
"php",
|
|
||||||
"c",
|
|
||||||
"cpp",
|
|
||||||
"csharp",
|
|
||||||
"html",
|
|
||||||
"css",
|
|
||||||
"scss",
|
|
||||||
"json",
|
|
||||||
"yaml",
|
|
||||||
"xml",
|
|
||||||
"markdown",
|
|
||||||
"sql",
|
|
||||||
"shell",
|
|
||||||
"swift",
|
|
||||||
"kotlin",
|
|
||||||
"dockerfile",
|
|
||||||
} {
|
|
||||||
if !got[want] {
|
|
||||||
t.Fatalf("expected language %q to be listed", want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDetectLanguageFromPath_Good_KnownExtensions(t *testing.T) {
|
|
||||||
cases := map[string]string{
|
|
||||||
"main.go": "go",
|
|
||||||
"index.tsx": "typescript",
|
|
||||||
"style.scss": "scss",
|
|
||||||
"Program.cs": "csharp",
|
|
||||||
"module.kt": "kotlin",
|
|
||||||
"docker/Dockerfile": "dockerfile",
|
|
||||||
}
|
|
||||||
|
|
||||||
for path, want := range cases {
|
|
||||||
if got := detectLanguageFromPath(path); got != want {
|
|
||||||
t.Fatalf("detectLanguageFromPath(%q) = %q, want %q", path, got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMedium_Good_ReadWrite(t *testing.T) {
|
func TestMedium_Good_ReadWrite(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
s, err := New(Options{WorkspaceRoot: tmpDir})
|
||||||
|
|
@ -216,71 +108,6 @@ func TestMedium_Good_EnsureDir(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFileExists_Good_FileAndDirectory(t *testing.T) {
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.medium.EnsureDir("nested"); err != nil {
|
|
||||||
t.Fatalf("Failed to create directory: %v", err)
|
|
||||||
}
|
|
||||||
if err := s.medium.Write("nested/file.txt", "content"); err != nil {
|
|
||||||
t.Fatalf("Failed to write file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, fileOut, err := s.fileExists(nil, nil, FileExistsInput{Path: "nested/file.txt"})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("fileExists(file) failed: %v", err)
|
|
||||||
}
|
|
||||||
if !fileOut.Exists {
|
|
||||||
t.Fatal("expected file to exist")
|
|
||||||
}
|
|
||||||
if fileOut.IsDir {
|
|
||||||
t.Fatal("expected file to not be reported as a directory")
|
|
||||||
}
|
|
||||||
|
|
||||||
_, dirOut, err := s.fileExists(nil, nil, FileExistsInput{Path: "nested"})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("fileExists(dir) failed: %v", err)
|
|
||||||
}
|
|
||||||
if !dirOut.Exists {
|
|
||||||
t.Fatal("expected directory to exist")
|
|
||||||
}
|
|
||||||
if !dirOut.IsDir {
|
|
||||||
t.Fatal("expected directory to be reported as a directory")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListDirectory_Good_ReturnsDocumentedEntryPaths(t *testing.T) {
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.medium.EnsureDir("nested"); err != nil {
|
|
||||||
t.Fatalf("Failed to create directory: %v", err)
|
|
||||||
}
|
|
||||||
if err := s.medium.Write("nested/file.txt", "content"); err != nil {
|
|
||||||
t.Fatalf("Failed to write file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, out, err := s.listDirectory(nil, nil, ListDirectoryInput{Path: "nested"})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("listDirectory failed: %v", err)
|
|
||||||
}
|
|
||||||
if len(out.Entries) != 1 {
|
|
||||||
t.Fatalf("expected one entry, got %d", len(out.Entries))
|
|
||||||
}
|
|
||||||
|
|
||||||
want := filepath.Join("nested", "file.txt")
|
|
||||||
if out.Entries[0].Path != want {
|
|
||||||
t.Fatalf("expected entry path %q, got %q", want, out.Entries[0].Path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMedium_Good_IsFile(t *testing.T) {
|
func TestMedium_Good_IsFile(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
s, err := New(Options{WorkspaceRoot: tmpDir})
|
||||||
|
|
@ -302,40 +129,6 @@ func TestMedium_Good_IsFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResolveWorkspacePath_Good(t *testing.T) {
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cases := map[string]string{
|
|
||||||
"docs/readme.md": filepath.Join(tmpDir, "docs", "readme.md"),
|
|
||||||
"/docs/readme.md": filepath.Join(tmpDir, "docs", "readme.md"),
|
|
||||||
"../escape/notes.md": filepath.Join(tmpDir, "escape", "notes.md"),
|
|
||||||
"": "",
|
|
||||||
}
|
|
||||||
for input, want := range cases {
|
|
||||||
if got := s.resolveWorkspacePath(input); got != want {
|
|
||||||
t.Fatalf("resolveWorkspacePath(%q) = %q, want %q", input, got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestResolveWorkspacePath_Good_Unrestricted(t *testing.T) {
|
|
||||||
s, err := New(Options{Unrestricted: true})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if got, want := s.resolveWorkspacePath("docs/readme.md"), filepath.Clean("docs/readme.md"); got != want {
|
|
||||||
t.Fatalf("resolveWorkspacePath(relative) = %q, want %q", got, want)
|
|
||||||
}
|
|
||||||
if got, want := s.resolveWorkspacePath("/tmp/readme.md"), filepath.Clean("/tmp/readme.md"); got != want {
|
|
||||||
t.Fatalf("resolveWorkspacePath(absolute) = %q, want %q", got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSandboxing_Traversal_Sanitized(t *testing.T) {
|
func TestSandboxing_Traversal_Sanitized(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
s, err := New(Options{WorkspaceRoot: tmpDir})
|
s, err := New(Options{WorkspaceRoot: tmpDir})
|
||||||
|
|
|
||||||
|
|
@ -11,23 +11,11 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"iter"
|
"iter"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
|
||||||
"slices"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func normalizeNotificationContext(ctx context.Context) context.Context {
|
|
||||||
if ctx == nil {
|
|
||||||
return context.Background()
|
|
||||||
}
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// lockedWriter wraps an io.Writer with a mutex.
|
// lockedWriter wraps an io.Writer with a mutex.
|
||||||
// Both the SDK's transport and ChannelSend use this writer,
|
// Both the SDK's transport and ChannelSend use this writer,
|
||||||
// ensuring channel notifications don't interleave with SDK messages.
|
// ensuring channel notifications don't interleave with SDK messages.
|
||||||
|
|
@ -48,149 +36,20 @@ func (lw *lockedWriter) Close() error { return nil }
|
||||||
// Created once when the MCP service enters stdio mode.
|
// Created once when the MCP service enters stdio mode.
|
||||||
var sharedStdout = &lockedWriter{w: os.Stdout}
|
var sharedStdout = &lockedWriter{w: os.Stdout}
|
||||||
|
|
||||||
// ChannelNotificationMethod is the JSON-RPC method used for named channel
|
|
||||||
// events sent through claude/channel.
|
|
||||||
const ChannelNotificationMethod = "notifications/claude/channel"
|
|
||||||
|
|
||||||
// LoggingNotificationMethod is the JSON-RPC method used for log messages sent
|
|
||||||
// to connected MCP clients.
|
|
||||||
const LoggingNotificationMethod = "notifications/message"
|
|
||||||
|
|
||||||
// ClaudeChannelCapabilityName is the experimental capability key advertised
|
|
||||||
// by the MCP server for channel-based client notifications.
|
|
||||||
const ClaudeChannelCapabilityName = "claude/channel"
|
|
||||||
|
|
||||||
// Shared channel names. Keeping them central avoids drift between emitters
|
|
||||||
// and the advertised claude/channel capability.
|
|
||||||
//
|
|
||||||
// Use these names when emitting structured events from subsystems:
|
|
||||||
//
|
|
||||||
// s.ChannelSend(ctx, ChannelProcessStart, map[string]any{"id": "proc-1"})
|
|
||||||
const (
|
|
||||||
ChannelBuildStart = "build.start"
|
|
||||||
ChannelBuildComplete = "build.complete"
|
|
||||||
ChannelBuildFailed = "build.failed"
|
|
||||||
ChannelAgentComplete = "agent.complete"
|
|
||||||
ChannelAgentBlocked = "agent.blocked"
|
|
||||||
ChannelAgentStatus = "agent.status"
|
|
||||||
ChannelBrainForgetDone = "brain.forget.complete"
|
|
||||||
ChannelBrainListDone = "brain.list.complete"
|
|
||||||
ChannelBrainRecallDone = "brain.recall.complete"
|
|
||||||
ChannelBrainRememberDone = "brain.remember.complete"
|
|
||||||
ChannelHarvestComplete = "harvest.complete"
|
|
||||||
ChannelInboxMessage = "inbox.message"
|
|
||||||
ChannelProcessExit = "process.exit"
|
|
||||||
ChannelProcessStart = "process.start"
|
|
||||||
ChannelProcessOutput = "process.output"
|
|
||||||
ChannelTestResult = "test.result"
|
|
||||||
)
|
|
||||||
|
|
||||||
var channelCapabilityList = []string{
|
|
||||||
ChannelBuildStart,
|
|
||||||
ChannelAgentComplete,
|
|
||||||
ChannelAgentBlocked,
|
|
||||||
ChannelAgentStatus,
|
|
||||||
ChannelBuildComplete,
|
|
||||||
ChannelBuildFailed,
|
|
||||||
ChannelBrainForgetDone,
|
|
||||||
ChannelBrainListDone,
|
|
||||||
ChannelBrainRecallDone,
|
|
||||||
ChannelBrainRememberDone,
|
|
||||||
ChannelHarvestComplete,
|
|
||||||
ChannelInboxMessage,
|
|
||||||
ChannelProcessExit,
|
|
||||||
ChannelProcessStart,
|
|
||||||
ChannelProcessOutput,
|
|
||||||
ChannelTestResult,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChannelCapabilitySpec describes the experimental claude/channel capability.
|
|
||||||
//
|
|
||||||
// spec := ChannelCapabilitySpec{
|
|
||||||
// Version: "1",
|
|
||||||
// Description: "Push events into client sessions via named channels",
|
|
||||||
// Channels: ChannelCapabilityChannels(),
|
|
||||||
// }
|
|
||||||
type ChannelCapabilitySpec struct {
|
|
||||||
Version string `json:"version"` // e.g. "1"
|
|
||||||
Description string `json:"description"` // capability summary shown to clients
|
|
||||||
Channels []string `json:"channels"` // e.g. []string{"build.complete", "agent.status"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map converts the typed capability into the wire-format map expected by the SDK.
|
|
||||||
//
|
|
||||||
// caps := ChannelCapabilitySpec{
|
|
||||||
// Version: "1",
|
|
||||||
// Description: "Push events into client sessions via named channels",
|
|
||||||
// Channels: ChannelCapabilityChannels(),
|
|
||||||
// }.Map()
|
|
||||||
func (c ChannelCapabilitySpec) Map() map[string]any {
|
|
||||||
return map[string]any{
|
|
||||||
"version": c.Version,
|
|
||||||
"description": c.Description,
|
|
||||||
"channels": slices.Clone(c.Channels),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChannelNotification is the payload sent through the experimental channel
|
|
||||||
// notification method.
|
|
||||||
//
|
|
||||||
// n := ChannelNotification{
|
|
||||||
// Channel: ChannelBuildComplete,
|
|
||||||
// Data: map[string]any{"repo": "core/mcp"},
|
|
||||||
// }
|
|
||||||
type ChannelNotification struct {
|
|
||||||
Channel string `json:"channel"` // e.g. "build.complete"
|
|
||||||
Data any `json:"data"` // arbitrary payload for the named channel
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendNotificationToAllClients broadcasts a log-level notification to every
|
// SendNotificationToAllClients broadcasts a log-level notification to every
|
||||||
// connected MCP session (stdio, HTTP, TCP, and Unix).
|
// connected MCP session (stdio, HTTP, TCP, and Unix).
|
||||||
// Errors on individual sessions are logged but do not stop the broadcast.
|
// Errors on individual sessions are logged but do not stop the broadcast.
|
||||||
//
|
//
|
||||||
// s.SendNotificationToAllClients(ctx, "info", "monitor", map[string]any{"event": "build complete"})
|
// s.SendNotificationToAllClients(ctx, "info", "monitor", map[string]any{"event": "build complete"})
|
||||||
func (s *Service) SendNotificationToAllClients(ctx context.Context, level mcp.LoggingLevel, logger string, data any) {
|
func (s *Service) SendNotificationToAllClients(ctx context.Context, level mcp.LoggingLevel, logger string, data any) {
|
||||||
if s == nil || s.server == nil {
|
for session := range s.server.Sessions() {
|
||||||
return
|
if err := session.Log(ctx, &mcp.LoggingMessageParams{
|
||||||
}
|
Level: level,
|
||||||
ctx = normalizeNotificationContext(ctx)
|
Logger: logger,
|
||||||
s.broadcastToSessions(func(session *mcp.ServerSession) {
|
Data: data,
|
||||||
s.sendLoggingNotificationToSession(ctx, session, level, logger, data)
|
}); err != nil {
|
||||||
})
|
s.logger.Debug("notify: failed to send to session", "session", session.ID(), "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendNotificationToSession sends a log-level notification to one connected
|
|
||||||
// MCP session.
|
|
||||||
//
|
|
||||||
// s.SendNotificationToSession(ctx, session, "info", "monitor", data)
|
|
||||||
func (s *Service) SendNotificationToSession(ctx context.Context, session *mcp.ServerSession, level mcp.LoggingLevel, logger string, data any) {
|
|
||||||
if s == nil || s.server == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ctx = normalizeNotificationContext(ctx)
|
|
||||||
s.sendLoggingNotificationToSession(ctx, session, level, logger, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendNotificationToClient sends a log-level notification to one connected
|
|
||||||
// MCP client.
|
|
||||||
//
|
|
||||||
// s.SendNotificationToClient(ctx, client, "info", "monitor", data)
|
|
||||||
func (s *Service) SendNotificationToClient(ctx context.Context, client *mcp.ServerSession, level mcp.LoggingLevel, logger string, data any) {
|
|
||||||
s.SendNotificationToSession(ctx, client, level, logger, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) sendLoggingNotificationToSession(ctx context.Context, session *mcp.ServerSession, level mcp.LoggingLevel, logger string, data any) {
|
|
||||||
if s == nil || s.server == nil || session == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ctx = normalizeNotificationContext(ctx)
|
|
||||||
|
|
||||||
if err := sendSessionNotification(ctx, session, LoggingNotificationMethod, &mcp.LoggingMessageParams{
|
|
||||||
Level: level,
|
|
||||||
Logger: logger,
|
|
||||||
Data: data,
|
|
||||||
}); err != nil {
|
|
||||||
s.debugNotify("notify: failed to send to session", "session", session.ID(), "error", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -200,39 +59,32 @@ func (s *Service) sendLoggingNotificationToSession(ctx context.Context, session
|
||||||
// s.ChannelSend(ctx, "agent.complete", map[string]any{"repo": "go-io", "workspace": "go-io-123"})
|
// s.ChannelSend(ctx, "agent.complete", map[string]any{"repo": "go-io", "workspace": "go-io-123"})
|
||||||
// s.ChannelSend(ctx, "build.failed", map[string]any{"repo": "core", "error": "test timeout"})
|
// s.ChannelSend(ctx, "build.failed", map[string]any{"repo": "core", "error": "test timeout"})
|
||||||
func (s *Service) ChannelSend(ctx context.Context, channel string, data any) {
|
func (s *Service) ChannelSend(ctx context.Context, channel string, data any) {
|
||||||
if s == nil || s.server == nil {
|
payload := map[string]any{
|
||||||
return
|
"channel": channel,
|
||||||
|
"data": data,
|
||||||
}
|
}
|
||||||
if strings.TrimSpace(channel) == "" {
|
s.SendNotificationToAllClients(ctx, mcp.LoggingLevel("info"), "channel", payload)
|
||||||
return
|
|
||||||
}
|
|
||||||
ctx = normalizeNotificationContext(ctx)
|
|
||||||
payload := ChannelNotification{Channel: channel, Data: data}
|
|
||||||
s.sendChannelNotificationToAllClients(ctx, payload)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChannelSendToSession pushes a channel event to a specific session.
|
// ChannelSendToSession pushes a channel event to a specific session.
|
||||||
//
|
//
|
||||||
// s.ChannelSendToSession(ctx, session, "agent.progress", progressData)
|
// s.ChannelSendToSession(ctx, session, "agent.progress", progressData)
|
||||||
func (s *Service) ChannelSendToSession(ctx context.Context, session *mcp.ServerSession, channel string, data any) {
|
func (s *Service) ChannelSendToSession(ctx context.Context, session *mcp.ServerSession, channel string, data any) {
|
||||||
if s == nil || s.server == nil || session == nil {
|
if session == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if strings.TrimSpace(channel) == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ctx = normalizeNotificationContext(ctx)
|
|
||||||
payload := ChannelNotification{Channel: channel, Data: data}
|
|
||||||
if err := sendSessionNotification(ctx, session, ChannelNotificationMethod, payload); err != nil {
|
|
||||||
s.debugNotify("channel: failed to send to session", "session", session.ID(), "error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChannelSendToClient pushes a channel event to one connected MCP client.
|
payload := map[string]any{
|
||||||
//
|
"channel": channel,
|
||||||
// s.ChannelSendToClient(ctx, client, "agent.progress", progressData)
|
"data": data,
|
||||||
func (s *Service) ChannelSendToClient(ctx context.Context, client *mcp.ServerSession, channel string, data any) {
|
}
|
||||||
s.ChannelSendToSession(ctx, client, channel, data)
|
if err := session.Log(ctx, &mcp.LoggingMessageParams{
|
||||||
|
Level: mcp.LoggingLevel("info"),
|
||||||
|
Logger: "channel",
|
||||||
|
Data: payload,
|
||||||
|
}); err != nil {
|
||||||
|
s.logger.Debug("channel: failed to send to session", "session", session.ID(), "error", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sessions returns an iterator over all connected MCP sessions.
|
// Sessions returns an iterator over all connected MCP sessions.
|
||||||
|
|
@ -241,171 +93,31 @@ func (s *Service) ChannelSendToClient(ctx context.Context, client *mcp.ServerSes
|
||||||
// s.ChannelSendToSession(ctx, session, "status", data)
|
// s.ChannelSendToSession(ctx, session, "status", data)
|
||||||
// }
|
// }
|
||||||
func (s *Service) Sessions() iter.Seq[*mcp.ServerSession] {
|
func (s *Service) Sessions() iter.Seq[*mcp.ServerSession] {
|
||||||
if s == nil || s.server == nil {
|
return s.server.Sessions()
|
||||||
return func(yield func(*mcp.ServerSession) bool) {}
|
|
||||||
}
|
|
||||||
return slices.Values(snapshotSessions(s.server))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) sendChannelNotificationToAllClients(ctx context.Context, payload ChannelNotification) {
|
|
||||||
if s == nil || s.server == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ctx = normalizeNotificationContext(ctx)
|
|
||||||
s.broadcastToSessions(func(session *mcp.ServerSession) {
|
|
||||||
if err := sendSessionNotification(ctx, session, ChannelNotificationMethod, payload); err != nil {
|
|
||||||
s.debugNotify("channel: failed to send to session", "session", session.ID(), "error", err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) broadcastToSessions(fn func(*mcp.ServerSession)) {
|
|
||||||
if s == nil || s.server == nil || fn == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, session := range snapshotSessions(s.server) {
|
|
||||||
fn(session)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) debugNotify(msg string, args ...any) {
|
|
||||||
if s == nil || s.logger == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.logger.Debug(msg, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendSessionNotification(ctx context.Context, session *mcp.ServerSession, method string, payload any) error {
|
|
||||||
if session == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ctx = normalizeNotificationContext(ctx)
|
|
||||||
|
|
||||||
if conn, err := sessionMCPConnection(session); err == nil {
|
|
||||||
if notifier, ok := conn.(interface {
|
|
||||||
Notify(context.Context, string, any) error
|
|
||||||
}); ok {
|
|
||||||
if err := notifier.Notify(ctx, method, payload); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := sessionJSONRPCConnection(session)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
notifier, ok := conn.(interface {
|
|
||||||
Notify(context.Context, string, any) error
|
|
||||||
})
|
|
||||||
if !ok {
|
|
||||||
return coreNotifyError("connection Notify method unavailable")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := notifier.Notify(ctx, method, payload); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sessionMCPConnection(session *mcp.ServerSession) (any, error) {
|
|
||||||
value := reflect.ValueOf(session)
|
|
||||||
if value.Kind() != reflect.Ptr || value.IsNil() {
|
|
||||||
return nil, coreNotifyError("invalid session")
|
|
||||||
}
|
|
||||||
|
|
||||||
field := value.Elem().FieldByName("mcpConn")
|
|
||||||
if !field.IsValid() {
|
|
||||||
return nil, coreNotifyError("session mcp connection field unavailable")
|
|
||||||
}
|
|
||||||
|
|
||||||
return reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().Interface(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sessionJSONRPCConnection(session *mcp.ServerSession) (any, error) {
|
|
||||||
value := reflect.ValueOf(session)
|
|
||||||
if value.Kind() != reflect.Ptr || value.IsNil() {
|
|
||||||
return nil, coreNotifyError("invalid session")
|
|
||||||
}
|
|
||||||
|
|
||||||
field := value.Elem().FieldByName("conn")
|
|
||||||
if !field.IsValid() {
|
|
||||||
return nil, coreNotifyError("session connection field unavailable")
|
|
||||||
}
|
|
||||||
|
|
||||||
return reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().Interface(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func coreNotifyError(message string) error {
|
|
||||||
return ¬ificationError{message: message}
|
|
||||||
}
|
|
||||||
|
|
||||||
func snapshotSessions(server *mcp.Server) []*mcp.ServerSession {
|
|
||||||
if server == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sessions := make([]*mcp.ServerSession, 0)
|
|
||||||
for session := range server.Sessions() {
|
|
||||||
if session != nil {
|
|
||||||
sessions = append(sessions, session)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(sessions, func(i, j int) bool {
|
|
||||||
return sessions[i].ID() < sessions[j].ID()
|
|
||||||
})
|
|
||||||
|
|
||||||
return sessions
|
|
||||||
}
|
|
||||||
|
|
||||||
type notificationError struct {
|
|
||||||
message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *notificationError) Error() string {
|
|
||||||
return e.message
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// channelCapability returns the experimental capability descriptor
|
// channelCapability returns the experimental capability descriptor
|
||||||
// for claude/channel, registered during New().
|
// for claude/channel, registered during New().
|
||||||
func channelCapability() map[string]any {
|
func channelCapability() map[string]any {
|
||||||
return map[string]any{
|
return map[string]any{
|
||||||
ClaudeChannelCapabilityName: ClaudeChannelCapability().Map(),
|
"claude/channel": map[string]any{
|
||||||
|
"version": "1",
|
||||||
|
"description": "Push events into client sessions via named channels",
|
||||||
|
"channels": []string{
|
||||||
|
"agent.complete",
|
||||||
|
"agent.blocked",
|
||||||
|
"agent.status",
|
||||||
|
"build.complete",
|
||||||
|
"build.failed",
|
||||||
|
"brain.list.complete",
|
||||||
|
"brain.forget.complete",
|
||||||
|
"brain.remember.complete",
|
||||||
|
"brain.recall.complete",
|
||||||
|
"inbox.message",
|
||||||
|
"process.exit",
|
||||||
|
"harvest.complete",
|
||||||
|
"test.result",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClaudeChannelCapability returns the typed experimental capability descriptor.
|
|
||||||
//
|
|
||||||
// cap := ClaudeChannelCapability()
|
|
||||||
// caps := cap.Map()
|
|
||||||
func ClaudeChannelCapability() ChannelCapabilitySpec {
|
|
||||||
return ChannelCapabilitySpec{
|
|
||||||
Version: "1",
|
|
||||||
Description: "Push events into client sessions via named channels",
|
|
||||||
Channels: channelCapabilityChannels(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChannelCapability returns the experimental capability descriptor registered
|
|
||||||
// during New(). Callers can reuse it when exposing server metadata.
|
|
||||||
//
|
|
||||||
// caps := ChannelCapability()
|
|
||||||
func ChannelCapability() map[string]any {
|
|
||||||
return channelCapability()
|
|
||||||
}
|
|
||||||
|
|
||||||
// channelCapabilityChannels lists the named channel events advertised by the
|
|
||||||
// experimental capability.
|
|
||||||
func channelCapabilityChannels() []string {
|
|
||||||
return slices.Clone(channelCapabilityList)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChannelCapabilityChannels returns the named channel events advertised by the
|
|
||||||
// experimental capability.
|
|
||||||
//
|
|
||||||
// channels := ChannelCapabilityChannels()
|
|
||||||
func ChannelCapabilityChannels() []string {
|
|
||||||
return channelCapabilityChannels()
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,94 +1,10 @@
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"net"
|
|
||||||
"reflect"
|
|
||||||
"slices"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type notificationReadResult struct {
|
|
||||||
msg map[string]any
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func connectNotificationSession(t *testing.T, svc *Service) (context.CancelFunc, *mcp.ServerSession, net.Conn) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
serverConn, clientConn := net.Pipe()
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
|
||||||
if err != nil {
|
|
||||||
cancel()
|
|
||||||
clientConn.Close()
|
|
||||||
t.Fatalf("Connect() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cancel, session, clientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func readNotificationMessage(t *testing.T, conn net.Conn) <-chan notificationReadResult {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
resultCh := make(chan notificationReadResult, 1)
|
|
||||||
go func() {
|
|
||||||
scanner := bufio.NewScanner(conn)
|
|
||||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
|
||||||
|
|
||||||
if !scanner.Scan() {
|
|
||||||
resultCh <- notificationReadResult{err: scanner.Err()}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var msg map[string]any
|
|
||||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
|
||||||
resultCh <- notificationReadResult{err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resultCh <- notificationReadResult{msg: msg}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return resultCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func readNotificationMessageUntil(t *testing.T, conn net.Conn, match func(map[string]any) bool) <-chan notificationReadResult {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
resultCh := make(chan notificationReadResult, 1)
|
|
||||||
scanner := bufio.NewScanner(conn)
|
|
||||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for scanner.Scan() {
|
|
||||||
var msg map[string]any
|
|
||||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
|
||||||
resultCh <- notificationReadResult{err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if match(msg) {
|
|
||||||
resultCh <- notificationReadResult{msg: msg}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
resultCh <- notificationReadResult{err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resultCh <- notificationReadResult{err: context.DeadlineExceeded}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return resultCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSendNotificationToAllClients_Good(t *testing.T) {
|
func TestSendNotificationToAllClients_Good(t *testing.T) {
|
||||||
svc, err := New(Options{})
|
svc, err := New(Options{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -97,141 +13,10 @@ func TestSendNotificationToAllClients_Good(t *testing.T) {
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{
|
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{
|
||||||
"event": ChannelBuildComplete,
|
"event": "build.complete",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNotificationMethods_Good_NilService(t *testing.T) {
|
|
||||||
var svc *Service
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{"ok": true})
|
|
||||||
svc.SendNotificationToSession(ctx, nil, "info", "test", map[string]any{"ok": true})
|
|
||||||
svc.ChannelSend(ctx, ChannelBuildComplete, map[string]any{"ok": true})
|
|
||||||
svc.ChannelSendToSession(ctx, nil, ChannelBuildComplete, map[string]any{"ok": true})
|
|
||||||
|
|
||||||
for range svc.Sessions() {
|
|
||||||
t.Fatal("expected no sessions from nil service")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNotificationMethods_Good_NilServer(t *testing.T) {
|
|
||||||
svc := &Service{}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{"ok": true})
|
|
||||||
svc.SendNotificationToSession(ctx, nil, "info", "test", map[string]any{"ok": true})
|
|
||||||
svc.ChannelSend(ctx, ChannelBuildComplete, map[string]any{"ok": true})
|
|
||||||
svc.ChannelSendToSession(ctx, nil, ChannelBuildComplete, map[string]any{"ok": true})
|
|
||||||
|
|
||||||
for range svc.Sessions() {
|
|
||||||
t.Fatal("expected no sessions from service without a server")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSessions_Good_ReturnsSnapshot(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cancel, session, _ := connectNotificationSession(t, svc)
|
|
||||||
snapshot := svc.Sessions()
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
session.Close()
|
|
||||||
|
|
||||||
var sessions []*mcp.ServerSession
|
|
||||||
for session := range snapshot {
|
|
||||||
sessions = append(sessions, session)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(sessions) != 1 {
|
|
||||||
t.Fatalf("expected snapshot to retain one session, got %d", len(sessions))
|
|
||||||
}
|
|
||||||
if sessions[0] == nil {
|
|
||||||
t.Fatal("expected snapshot session to be non-nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNotificationMethods_Good_NilContext(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
svc.SendNotificationToAllClients(nil, "info", "test", map[string]any{"ok": true})
|
|
||||||
svc.SendNotificationToSession(nil, nil, "info", "test", map[string]any{"ok": true})
|
|
||||||
svc.ChannelSend(nil, ChannelBuildComplete, map[string]any{"ok": true})
|
|
||||||
svc.ChannelSendToSession(nil, nil, ChannelBuildComplete, map[string]any{"ok": true})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSendNotificationToAllClients_Good_CustomNotification(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serverConn, clientConn := net.Pipe()
|
|
||||||
defer clientConn.Close()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Connect() failed: %v", err)
|
|
||||||
}
|
|
||||||
defer session.Close()
|
|
||||||
|
|
||||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
|
||||||
|
|
||||||
read := readNotificationMessageUntil(t, clientConn, func(msg map[string]any) bool {
|
|
||||||
return msg["method"] == LoggingNotificationMethod
|
|
||||||
})
|
|
||||||
|
|
||||||
sent := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{
|
|
||||||
"event": ChannelBuildComplete,
|
|
||||||
})
|
|
||||||
close(sent)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-sent:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatal("timed out waiting for notification send to complete")
|
|
||||||
}
|
|
||||||
|
|
||||||
res := <-read
|
|
||||||
if res.err != nil {
|
|
||||||
t.Fatalf("failed to read notification: %v", res.err)
|
|
||||||
}
|
|
||||||
msg := res.msg
|
|
||||||
if msg["method"] != LoggingNotificationMethod {
|
|
||||||
t.Fatalf("expected method %q, got %v", LoggingNotificationMethod, msg["method"])
|
|
||||||
}
|
|
||||||
|
|
||||||
params, ok := msg["params"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected params object, got %T", msg["params"])
|
|
||||||
}
|
|
||||||
if params["logger"] != "test" {
|
|
||||||
t.Fatalf("expected logger test, got %v", params["logger"])
|
|
||||||
}
|
|
||||||
if params["level"] != "info" {
|
|
||||||
t.Fatalf("expected level info, got %v", params["level"])
|
|
||||||
}
|
|
||||||
data, ok := params["data"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected data object, got %T", params["data"])
|
|
||||||
}
|
|
||||||
if data["event"] != ChannelBuildComplete {
|
|
||||||
t.Fatalf("expected event %s, got %v", ChannelBuildComplete, data["event"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChannelSend_Good(t *testing.T) {
|
func TestChannelSend_Good(t *testing.T) {
|
||||||
svc, err := New(Options{})
|
svc, err := New(Options{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -239,7 +24,7 @@ func TestChannelSend_Good(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
svc.ChannelSend(ctx, ChannelBuildComplete, map[string]any{
|
svc.ChannelSend(ctx, "build.complete", map[string]any{
|
||||||
"repo": "go-io",
|
"repo": "go-io",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
@ -251,185 +36,14 @@ func TestChannelSendToSession_Good_GuardNilSession(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
svc.ChannelSendToSession(ctx, nil, ChannelAgentStatus, map[string]any{
|
svc.ChannelSendToSession(ctx, nil, "agent.status", map[string]any{
|
||||||
"ok": true,
|
"ok": true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSendNotificationToSession_Good_GuardNilSession(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
svc.SendNotificationToSession(ctx, nil, "info", "test", map[string]any{
|
|
||||||
"ok": true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChannelSendToSession_Good_CustomNotification(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serverConn, clientConn := net.Pipe()
|
|
||||||
defer clientConn.Close()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Connect() failed: %v", err)
|
|
||||||
}
|
|
||||||
defer session.Close()
|
|
||||||
|
|
||||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
|
||||||
|
|
||||||
read := readNotificationMessageUntil(t, clientConn, func(msg map[string]any) bool {
|
|
||||||
return msg["method"] == ChannelNotificationMethod
|
|
||||||
})
|
|
||||||
|
|
||||||
sent := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
svc.ChannelSendToSession(ctx, session, ChannelBuildComplete, map[string]any{
|
|
||||||
"repo": "go-io",
|
|
||||||
})
|
|
||||||
close(sent)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-sent:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatal("timed out waiting for notification send to complete")
|
|
||||||
}
|
|
||||||
|
|
||||||
res := <-read
|
|
||||||
if res.err != nil {
|
|
||||||
t.Fatalf("failed to read custom notification: %v", res.err)
|
|
||||||
}
|
|
||||||
msg := res.msg
|
|
||||||
if msg["method"] != ChannelNotificationMethod {
|
|
||||||
t.Fatalf("expected method %q, got %v", ChannelNotificationMethod, msg["method"])
|
|
||||||
}
|
|
||||||
|
|
||||||
params, ok := msg["params"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected params object, got %T", msg["params"])
|
|
||||||
}
|
|
||||||
if params["channel"] != ChannelBuildComplete {
|
|
||||||
t.Fatalf("expected channel %s, got %v", ChannelBuildComplete, params["channel"])
|
|
||||||
}
|
|
||||||
payload, ok := params["data"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected data object, got %T", params["data"])
|
|
||||||
}
|
|
||||||
if payload["repo"] != "go-io" {
|
|
||||||
t.Fatalf("expected repo go-io, got %v", payload["repo"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChannelSendToClient_Good_CustomNotification(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serverConn, clientConn := net.Pipe()
|
|
||||||
defer clientConn.Close()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Connect() failed: %v", err)
|
|
||||||
}
|
|
||||||
defer session.Close()
|
|
||||||
|
|
||||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
|
||||||
|
|
||||||
read := readNotificationMessageUntil(t, clientConn, func(msg map[string]any) bool {
|
|
||||||
return msg["method"] == ChannelNotificationMethod
|
|
||||||
})
|
|
||||||
|
|
||||||
sent := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
svc.ChannelSendToClient(ctx, session, ChannelBuildComplete, map[string]any{
|
|
||||||
"repo": "go-io",
|
|
||||||
})
|
|
||||||
close(sent)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-sent:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatal("timed out waiting for notification send to complete")
|
|
||||||
}
|
|
||||||
|
|
||||||
res := <-read
|
|
||||||
if res.err != nil {
|
|
||||||
t.Fatalf("failed to read custom notification: %v", res.err)
|
|
||||||
}
|
|
||||||
msg := res.msg
|
|
||||||
if msg["method"] != ChannelNotificationMethod {
|
|
||||||
t.Fatalf("expected method %q, got %v", ChannelNotificationMethod, msg["method"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSendNotificationToClient_Good_CustomNotification(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serverConn, clientConn := net.Pipe()
|
|
||||||
defer clientConn.Close()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Connect() failed: %v", err)
|
|
||||||
}
|
|
||||||
defer session.Close()
|
|
||||||
|
|
||||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
|
||||||
|
|
||||||
read := readNotificationMessageUntil(t, clientConn, func(msg map[string]any) bool {
|
|
||||||
return msg["method"] == LoggingNotificationMethod
|
|
||||||
})
|
|
||||||
|
|
||||||
sent := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
svc.SendNotificationToClient(ctx, session, "info", "test", map[string]any{
|
|
||||||
"event": ChannelBuildComplete,
|
|
||||||
})
|
|
||||||
close(sent)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-sent:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatal("timed out waiting for notification send to complete")
|
|
||||||
}
|
|
||||||
|
|
||||||
res := <-read
|
|
||||||
if res.err != nil {
|
|
||||||
t.Fatalf("failed to read notification: %v", res.err)
|
|
||||||
}
|
|
||||||
msg := res.msg
|
|
||||||
if msg["method"] != LoggingNotificationMethod {
|
|
||||||
t.Fatalf("expected method %q, got %v", LoggingNotificationMethod, msg["method"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChannelCapability_Good(t *testing.T) {
|
func TestChannelCapability_Good(t *testing.T) {
|
||||||
caps := channelCapability()
|
caps := channelCapability()
|
||||||
raw, ok := caps[ClaudeChannelCapabilityName]
|
raw, ok := caps["claude/channel"]
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatal("expected claude/channel capability entry")
|
t.Fatal("expected claude/channel capability entry")
|
||||||
}
|
}
|
||||||
|
|
@ -450,121 +64,4 @@ func TestChannelCapability_Good(t *testing.T) {
|
||||||
if len(channels) == 0 {
|
if len(channels) == 0 {
|
||||||
t.Fatal("expected at least one channel in capability definition")
|
t.Fatal("expected at least one channel in capability definition")
|
||||||
}
|
}
|
||||||
|
|
||||||
want := channelCapabilityChannels()
|
|
||||||
if got, wantLen := len(channels), len(want); got != wantLen {
|
|
||||||
t.Fatalf("expected %d channels, got %d", wantLen, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, channel := range want {
|
|
||||||
if !slices.Contains(channels, channel) {
|
|
||||||
t.Fatalf("expected channel %q to be advertised in capability definition", channel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChannelCapability_Good_PublicHelpers(t *testing.T) {
|
|
||||||
got := ChannelCapability()
|
|
||||||
want := channelCapability()
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Fatalf("expected public capability helper to match internal definition")
|
|
||||||
}
|
|
||||||
|
|
||||||
spec := ClaudeChannelCapability()
|
|
||||||
if spec.Version != "1" {
|
|
||||||
t.Fatalf("expected typed capability version 1, got %q", spec.Version)
|
|
||||||
}
|
|
||||||
if spec.Description == "" {
|
|
||||||
t.Fatal("expected typed capability description to be populated")
|
|
||||||
}
|
|
||||||
if !slices.Equal(spec.Channels, channelCapabilityChannels()) {
|
|
||||||
t.Fatalf("expected typed capability channels to match: got %v want %v", spec.Channels, channelCapabilityChannels())
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(spec.Map(), want[ClaudeChannelCapabilityName].(map[string]any)) {
|
|
||||||
t.Fatal("expected typed capability map to match wire-format descriptor")
|
|
||||||
}
|
|
||||||
|
|
||||||
gotChannels := ChannelCapabilityChannels()
|
|
||||||
wantChannels := channelCapabilityChannels()
|
|
||||||
if !slices.Equal(gotChannels, wantChannels) {
|
|
||||||
t.Fatalf("expected public channel list to match internal definition: got %v want %v", gotChannels, wantChannels)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChannelCapabilitySpec_Map_Good_ClonesChannels(t *testing.T) {
|
|
||||||
spec := ClaudeChannelCapability()
|
|
||||||
mapped := spec.Map()
|
|
||||||
|
|
||||||
channels, ok := mapped["channels"].([]string)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected channels to be []string, got %T", mapped["channels"])
|
|
||||||
}
|
|
||||||
if len(channels) == 0 {
|
|
||||||
t.Fatal("expected non-empty channels slice")
|
|
||||||
}
|
|
||||||
|
|
||||||
spec.Channels[0] = "mutated.channel"
|
|
||||||
if channels[0] == "mutated.channel" {
|
|
||||||
t.Fatal("expected Map() to clone the channels slice")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSendNotificationToAllClients_Good_BroadcastsToMultipleSessions(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cancel1, session1, clientConn1 := connectNotificationSession(t, svc)
|
|
||||||
defer cancel1()
|
|
||||||
defer session1.Close()
|
|
||||||
defer clientConn1.Close()
|
|
||||||
|
|
||||||
cancel2, session2, clientConn2 := connectNotificationSession(t, svc)
|
|
||||||
defer cancel2()
|
|
||||||
defer session2.Close()
|
|
||||||
defer clientConn2.Close()
|
|
||||||
|
|
||||||
read1 := readNotificationMessage(t, clientConn1)
|
|
||||||
read2 := readNotificationMessage(t, clientConn2)
|
|
||||||
|
|
||||||
sent := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
svc.SendNotificationToAllClients(ctx, "info", "test", map[string]any{
|
|
||||||
"event": ChannelBuildComplete,
|
|
||||||
})
|
|
||||||
close(sent)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-sent:
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatal("timed out waiting for broadcast to complete")
|
|
||||||
}
|
|
||||||
|
|
||||||
res1 := <-read1
|
|
||||||
if res1.err != nil {
|
|
||||||
t.Fatalf("failed to read notification from session 1: %v", res1.err)
|
|
||||||
}
|
|
||||||
res2 := <-read2
|
|
||||||
if res2.err != nil {
|
|
||||||
t.Fatalf("failed to read notification from session 2: %v", res2.err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for idx, res := range []notificationReadResult{res1, res2} {
|
|
||||||
if res.msg["method"] != LoggingNotificationMethod {
|
|
||||||
t.Fatalf("session %d: expected method %q, got %v", idx+1, LoggingNotificationMethod, res.msg["method"])
|
|
||||||
}
|
|
||||||
|
|
||||||
params, ok := res.msg["params"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("session %d: expected params object, got %T", idx+1, res.msg["params"])
|
|
||||||
}
|
|
||||||
if params["logger"] != "test" {
|
|
||||||
t.Fatalf("session %d: expected logger test, got %v", idx+1, params["logger"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,123 +0,0 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package mcp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type processRuntime struct {
|
|
||||||
Command string
|
|
||||||
Args []string
|
|
||||||
Dir string
|
|
||||||
StartedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) recordProcessRuntime(id string, meta processRuntime) {
|
|
||||||
if id == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.processMu.Lock()
|
|
||||||
defer s.processMu.Unlock()
|
|
||||||
|
|
||||||
if s.processMeta == nil {
|
|
||||||
s.processMeta = make(map[string]processRuntime)
|
|
||||||
}
|
|
||||||
s.processMeta[id] = meta
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) processRuntimeFor(id string) (processRuntime, bool) {
|
|
||||||
s.processMu.Lock()
|
|
||||||
defer s.processMu.Unlock()
|
|
||||||
|
|
||||||
meta, ok := s.processMeta[id]
|
|
||||||
return meta, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) forgetProcessRuntime(id string) {
|
|
||||||
if id == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.processMu.Lock()
|
|
||||||
defer s.processMu.Unlock()
|
|
||||||
|
|
||||||
delete(s.processMeta, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isTestProcess(command string, args []string) bool {
|
|
||||||
base := strings.ToLower(filepath.Base(command))
|
|
||||||
if base == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
switch base {
|
|
||||||
case "go":
|
|
||||||
return len(args) > 0 && strings.EqualFold(args[0], "test")
|
|
||||||
case "cargo":
|
|
||||||
return len(args) > 0 && strings.EqualFold(args[0], "test")
|
|
||||||
case "npm", "pnpm", "yarn", "bun":
|
|
||||||
for _, arg := range args {
|
|
||||||
if strings.EqualFold(arg, "test") || strings.HasPrefix(strings.ToLower(arg), "test:") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case "pytest", "phpunit", "jest", "vitest", "rspec", "go-test":
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) emitTestResult(ctx context.Context, processID string, exitCode int, duration time.Duration, signal string, errText string) {
|
|
||||||
defer s.forgetProcessRuntime(processID)
|
|
||||||
|
|
||||||
meta, ok := s.processRuntimeFor(processID)
|
|
||||||
if !ok || !isTestProcess(meta.Command, meta.Args) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if duration <= 0 && !meta.StartedAt.IsZero() {
|
|
||||||
duration = time.Since(meta.StartedAt)
|
|
||||||
}
|
|
||||||
|
|
||||||
status := "failed"
|
|
||||||
if signal != "" {
|
|
||||||
status = "aborted"
|
|
||||||
} else if exitCode == 0 {
|
|
||||||
status = "passed"
|
|
||||||
}
|
|
||||||
|
|
||||||
payload := map[string]any{
|
|
||||||
"id": processID,
|
|
||||||
"command": meta.Command,
|
|
||||||
"args": meta.Args,
|
|
||||||
"status": status,
|
|
||||||
"passed": status == "passed",
|
|
||||||
}
|
|
||||||
if meta.Dir != "" {
|
|
||||||
payload["dir"] = meta.Dir
|
|
||||||
}
|
|
||||||
if !meta.StartedAt.IsZero() {
|
|
||||||
payload["startedAt"] = meta.StartedAt
|
|
||||||
}
|
|
||||||
if duration > 0 {
|
|
||||||
payload["duration"] = duration
|
|
||||||
}
|
|
||||||
if signal == "" || exitCode != 0 {
|
|
||||||
payload["exitCode"] = exitCode
|
|
||||||
}
|
|
||||||
if signal != "" {
|
|
||||||
payload["signal"] = signal
|
|
||||||
}
|
|
||||||
if errText != "" {
|
|
||||||
payload["error"] = errText
|
|
||||||
}
|
|
||||||
|
|
||||||
s.ChannelSend(ctx, ChannelTestResult, payload)
|
|
||||||
}
|
|
||||||
|
|
@ -4,17 +4,14 @@ package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
|
||||||
|
|
||||||
core "dappco.re/go/core"
|
core "dappco.re/go/core"
|
||||||
"forge.lthn.ai/core/go-process"
|
"forge.lthn.ai/core/go-log"
|
||||||
"forge.lthn.ai/core/go-ws"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register is the service factory for core.WithService.
|
// Register is the service factory for core.WithService.
|
||||||
// Creates the MCP service, discovers subsystems from other Core services,
|
// Creates the MCP service, discovers subsystems from other Core services,
|
||||||
// and wires optional process and WebSocket dependencies when they are
|
// and wires notifiers.
|
||||||
// already registered in Core.
|
|
||||||
//
|
//
|
||||||
// core.New(
|
// core.New(
|
||||||
// core.WithService(agentic.Register),
|
// core.WithService(agentic.Register),
|
||||||
|
|
@ -25,8 +22,6 @@ import (
|
||||||
func Register(c *core.Core) core.Result {
|
func Register(c *core.Core) core.Result {
|
||||||
// Collect subsystems from registered services
|
// Collect subsystems from registered services
|
||||||
var subsystems []Subsystem
|
var subsystems []Subsystem
|
||||||
var processService *process.Service
|
|
||||||
var wsHub *ws.Hub
|
|
||||||
for _, name := range c.Services() {
|
for _, name := range c.Services() {
|
||||||
r := c.Service(name)
|
r := c.Service(name)
|
||||||
if !r.OK {
|
if !r.OK {
|
||||||
|
|
@ -34,34 +29,24 @@ func Register(c *core.Core) core.Result {
|
||||||
}
|
}
|
||||||
if sub, ok := r.Value.(Subsystem); ok {
|
if sub, ok := r.Value.(Subsystem); ok {
|
||||||
subsystems = append(subsystems, sub)
|
subsystems = append(subsystems, sub)
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch v := r.Value.(type) {
|
|
||||||
case *process.Service:
|
|
||||||
processService = v
|
|
||||||
case *ws.Hub:
|
|
||||||
wsHub = v
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
svc, err := New(Options{
|
svc, err := New(Options{
|
||||||
ProcessService: processService,
|
Subsystems: subsystems,
|
||||||
WSHub: wsHub,
|
|
||||||
Subsystems: subsystems,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return core.Result{Value: err, OK: false}
|
return core.Result{Value: err, OK: false}
|
||||||
}
|
}
|
||||||
|
|
||||||
svc.ServiceRuntime = core.NewServiceRuntime(c, struct{}{})
|
svc.ServiceRuntime = core.NewServiceRuntime(c, McpOptions{})
|
||||||
|
svc.coreRef = c // kept until all methods migrate to s.Core()
|
||||||
|
|
||||||
return core.Result{Value: svc, OK: true}
|
return core.Result{Value: svc, OK: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnStartup implements core.Startable — registers MCP transport commands.
|
// OnStartup implements core.Startable — registers MCP transport commands.
|
||||||
//
|
//
|
||||||
// svc.OnStartup(context.Background())
|
|
||||||
//
|
|
||||||
// core-agent mcp — start MCP server on stdio
|
// core-agent mcp — start MCP server on stdio
|
||||||
// core-agent serve — start MCP server on HTTP
|
// core-agent serve — start MCP server on HTTP
|
||||||
func (s *Service) OnStartup(ctx context.Context) core.Result {
|
func (s *Service) OnStartup(ctx context.Context) core.Result {
|
||||||
|
|
@ -82,9 +67,9 @@ func (s *Service) OnStartup(ctx context.Context) core.Result {
|
||||||
})
|
})
|
||||||
|
|
||||||
c.Command("serve", core.Command{
|
c.Command("serve", core.Command{
|
||||||
Description: "Start the MCP server with auto-selected transport",
|
Description: "Start as a persistent HTTP daemon",
|
||||||
Action: func(opts core.Options) core.Result {
|
Action: func(opts core.Options) core.Result {
|
||||||
s.logger.Info("MCP server starting")
|
log.Default().Info("MCP HTTP server starting")
|
||||||
if err := s.Run(ctx); err != nil {
|
if err := s.Run(ctx); err != nil {
|
||||||
return core.Result{Value: err, OK: false}
|
return core.Result{Value: err, OK: false}
|
||||||
}
|
}
|
||||||
|
|
@ -96,89 +81,18 @@ func (s *Service) OnStartup(ctx context.Context) core.Result {
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleIPCEvents implements Core's IPC handler interface.
|
// HandleIPCEvents implements Core's IPC handler interface.
|
||||||
|
// Catches ChannelPush messages from other services and pushes them to Claude Code sessions.
|
||||||
//
|
//
|
||||||
// c.ACTION(mcp.ChannelPush{Channel: "agent.status", Data: statusMap})
|
// c.ACTION(mcp.ChannelPush{Channel: "agent.status", Data: statusMap})
|
||||||
// Catches ChannelPush messages from other services and pushes them to Claude Code sessions.
|
|
||||||
func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result {
|
func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result {
|
||||||
ctx := context.Background()
|
|
||||||
if c != nil {
|
|
||||||
if coreCtx := c.Context(); coreCtx != nil {
|
|
||||||
ctx = coreCtx
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch ev := msg.(type) {
|
switch ev := msg.(type) {
|
||||||
case ChannelPush:
|
case ChannelPush:
|
||||||
s.ChannelSend(ctx, ev.Channel, ev.Data)
|
s.ChannelSend(context.Background(), ev.Channel, ev.Data)
|
||||||
case process.ActionProcessStarted:
|
|
||||||
startedAt := time.Now()
|
|
||||||
s.recordProcessRuntime(ev.ID, processRuntime{
|
|
||||||
Command: ev.Command,
|
|
||||||
Args: ev.Args,
|
|
||||||
Dir: ev.Dir,
|
|
||||||
StartedAt: startedAt,
|
|
||||||
})
|
|
||||||
s.ChannelSend(ctx, ChannelProcessStart, map[string]any{
|
|
||||||
"id": ev.ID,
|
|
||||||
"command": ev.Command,
|
|
||||||
"args": ev.Args,
|
|
||||||
"dir": ev.Dir,
|
|
||||||
"pid": ev.PID,
|
|
||||||
"startedAt": startedAt,
|
|
||||||
})
|
|
||||||
case process.ActionProcessOutput:
|
|
||||||
s.ChannelSend(ctx, ChannelProcessOutput, map[string]any{
|
|
||||||
"id": ev.ID,
|
|
||||||
"line": ev.Line,
|
|
||||||
"stream": ev.Stream,
|
|
||||||
})
|
|
||||||
case process.ActionProcessExited:
|
|
||||||
meta, ok := s.processRuntimeFor(ev.ID)
|
|
||||||
payload := map[string]any{
|
|
||||||
"id": ev.ID,
|
|
||||||
"exitCode": ev.ExitCode,
|
|
||||||
"duration": ev.Duration,
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
payload["command"] = meta.Command
|
|
||||||
payload["args"] = meta.Args
|
|
||||||
payload["dir"] = meta.Dir
|
|
||||||
if !meta.StartedAt.IsZero() {
|
|
||||||
payload["startedAt"] = meta.StartedAt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ev.Error != nil {
|
|
||||||
payload["error"] = ev.Error.Error()
|
|
||||||
}
|
|
||||||
s.ChannelSend(ctx, ChannelProcessExit, payload)
|
|
||||||
errText := ""
|
|
||||||
if ev.Error != nil {
|
|
||||||
errText = ev.Error.Error()
|
|
||||||
}
|
|
||||||
s.emitTestResult(ctx, ev.ID, ev.ExitCode, ev.Duration, "", errText)
|
|
||||||
case process.ActionProcessKilled:
|
|
||||||
meta, ok := s.processRuntimeFor(ev.ID)
|
|
||||||
payload := map[string]any{
|
|
||||||
"id": ev.ID,
|
|
||||||
"signal": ev.Signal,
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
payload["command"] = meta.Command
|
|
||||||
payload["args"] = meta.Args
|
|
||||||
payload["dir"] = meta.Dir
|
|
||||||
if !meta.StartedAt.IsZero() {
|
|
||||||
payload["startedAt"] = meta.StartedAt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.ChannelSend(ctx, ChannelProcessExit, payload)
|
|
||||||
s.emitTestResult(ctx, ev.ID, 0, 0, ev.Signal, "")
|
|
||||||
}
|
}
|
||||||
return core.Result{OK: true}
|
return core.Result{OK: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnShutdown implements core.Stoppable — stops the MCP transport.
|
// OnShutdown implements core.Stoppable — stops the MCP transport.
|
||||||
//
|
|
||||||
// svc.OnShutdown(context.Background())
|
|
||||||
func (s *Service) OnShutdown(ctx context.Context) core.Result {
|
func (s *Service) OnShutdown(ctx context.Context) core.Result {
|
||||||
if err := s.Shutdown(ctx); err != nil {
|
if err := s.Shutdown(ctx); err != nil {
|
||||||
return core.Result{Value: err, OK: false}
|
return core.Result{Value: err, OK: false}
|
||||||
|
|
|
||||||
|
|
@ -1,334 +0,0 @@
|
||||||
package mcp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"dappco.re/go/core"
|
|
||||||
"forge.lthn.ai/core/go-process"
|
|
||||||
"forge.lthn.ai/core/go-ws"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRegister_Good_WiresOptionalServices(t *testing.T) {
|
|
||||||
c := core.New()
|
|
||||||
|
|
||||||
ps := &process.Service{}
|
|
||||||
hub := ws.NewHub()
|
|
||||||
|
|
||||||
if r := c.RegisterService("process", ps); !r.OK {
|
|
||||||
t.Fatalf("failed to register process service: %v", r.Value)
|
|
||||||
}
|
|
||||||
if r := c.RegisterService("ws", hub); !r.OK {
|
|
||||||
t.Fatalf("failed to register ws hub: %v", r.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
result := Register(c)
|
|
||||||
if !result.OK {
|
|
||||||
t.Fatalf("Register() failed: %v", result.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
svc, ok := result.Value.(*Service)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected *Service, got %T", result.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
if svc.ProcessService() != ps {
|
|
||||||
t.Fatalf("expected process service to be wired")
|
|
||||||
}
|
|
||||||
if svc.WSHub() != hub {
|
|
||||||
t.Fatalf("expected ws hub to be wired")
|
|
||||||
}
|
|
||||||
|
|
||||||
tools := map[string]bool{}
|
|
||||||
for _, rec := range svc.Tools() {
|
|
||||||
tools[rec.Name] = true
|
|
||||||
}
|
|
||||||
if !tools["process_start"] {
|
|
||||||
t.Fatal("expected process tools to be registered when process service is available")
|
|
||||||
}
|
|
||||||
if !tools["ws_start"] {
|
|
||||||
t.Fatal("expected ws tools to be registered when ws hub is available")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandleIPCEvents_Good_ForwardsProcessActions(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serverConn, clientConn := net.Pipe()
|
|
||||||
defer clientConn.Close()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Connect() failed: %v", err)
|
|
||||||
}
|
|
||||||
defer session.Close()
|
|
||||||
|
|
||||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
|
||||||
scanner := bufio.NewScanner(clientConn)
|
|
||||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
|
||||||
received := make(chan map[string]any, 8)
|
|
||||||
errCh := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
for scanner.Scan() {
|
|
||||||
var msg map[string]any
|
|
||||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
received <- msg
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
close(received)
|
|
||||||
}()
|
|
||||||
|
|
||||||
result := svc.HandleIPCEvents(nil, process.ActionProcessStarted{
|
|
||||||
ID: "proc-1",
|
|
||||||
Command: "go",
|
|
||||||
Args: []string{"test", "./..."},
|
|
||||||
Dir: "/workspace",
|
|
||||||
PID: 1234,
|
|
||||||
})
|
|
||||||
if !result.OK {
|
|
||||||
t.Fatalf("HandleIPCEvents() returned non-OK result: %#v", result.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
deadline := time.NewTimer(5 * time.Second)
|
|
||||||
defer deadline.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case err := <-errCh:
|
|
||||||
t.Fatalf("failed to read notification: %v", err)
|
|
||||||
case msg, ok := <-received:
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("notification stream closed before expected message arrived")
|
|
||||||
}
|
|
||||||
if msg["method"] != ChannelNotificationMethod {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
params, ok := msg["params"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected params object, got %T", msg["params"])
|
|
||||||
}
|
|
||||||
if params["channel"] != ChannelProcessStart {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, ok := params["data"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected data object, got %T", params["data"])
|
|
||||||
}
|
|
||||||
if payload["id"] != "proc-1" || payload["command"] != "go" {
|
|
||||||
t.Fatalf("unexpected payload: %#v", payload)
|
|
||||||
}
|
|
||||||
if payload["dir"] != "/workspace" {
|
|
||||||
t.Fatalf("expected dir /workspace, got %#v", payload["dir"])
|
|
||||||
}
|
|
||||||
if payload["pid"] != float64(1234) {
|
|
||||||
t.Fatalf("expected pid 1234, got %#v", payload["pid"])
|
|
||||||
}
|
|
||||||
if payload["args"] == nil {
|
|
||||||
t.Fatalf("expected args in payload, got %#v", payload)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case <-deadline.C:
|
|
||||||
t.Fatal("timed out waiting for process start notification")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandleIPCEvents_Good_ForwardsProcessOutput(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serverConn, clientConn := net.Pipe()
|
|
||||||
defer clientConn.Close()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Connect() failed: %v", err)
|
|
||||||
}
|
|
||||||
defer session.Close()
|
|
||||||
|
|
||||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
|
||||||
scanner := bufio.NewScanner(clientConn)
|
|
||||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
|
||||||
received := make(chan map[string]any, 8)
|
|
||||||
errCh := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
for scanner.Scan() {
|
|
||||||
var msg map[string]any
|
|
||||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
received <- msg
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
close(received)
|
|
||||||
}()
|
|
||||||
|
|
||||||
result := svc.HandleIPCEvents(nil, process.ActionProcessOutput{
|
|
||||||
ID: "proc-1",
|
|
||||||
Line: "hello world",
|
|
||||||
Stream: process.StreamStdout,
|
|
||||||
})
|
|
||||||
if !result.OK {
|
|
||||||
t.Fatalf("HandleIPCEvents() returned non-OK result: %#v", result.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
deadline := time.NewTimer(5 * time.Second)
|
|
||||||
defer deadline.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case err := <-errCh:
|
|
||||||
t.Fatalf("failed to read notification: %v", err)
|
|
||||||
case msg, ok := <-received:
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("notification stream closed before expected message arrived")
|
|
||||||
}
|
|
||||||
if msg["method"] != ChannelNotificationMethod {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
params, ok := msg["params"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected params object, got %T", msg["params"])
|
|
||||||
}
|
|
||||||
if params["channel"] != ChannelProcessOutput {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, ok := params["data"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected data object, got %T", msg["params"])
|
|
||||||
}
|
|
||||||
if payload["id"] != "proc-1" || payload["line"] != "hello world" || payload["stream"] != string(process.StreamStdout) {
|
|
||||||
t.Fatalf("unexpected payload: %#v", payload)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case <-deadline.C:
|
|
||||||
t.Fatal("timed out waiting for process output notification")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandleIPCEvents_Good_ForwardsTestResult(t *testing.T) {
|
|
||||||
svc, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serverConn, clientConn := net.Pipe()
|
|
||||||
defer clientConn.Close()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
session, err := svc.server.Connect(ctx, &connTransport{conn: serverConn}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Connect() failed: %v", err)
|
|
||||||
}
|
|
||||||
defer session.Close()
|
|
||||||
|
|
||||||
svc.recordProcessRuntime("proc-test", processRuntime{
|
|
||||||
Command: "go",
|
|
||||||
Args: []string{"test", "./..."},
|
|
||||||
StartedAt: time.Now().Add(-2 * time.Second),
|
|
||||||
})
|
|
||||||
|
|
||||||
clientConn.SetDeadline(time.Now().Add(5 * time.Second))
|
|
||||||
scanner := bufio.NewScanner(clientConn)
|
|
||||||
scanner.Buffer(make([]byte, 64*1024), 10*1024*1024)
|
|
||||||
received := make(chan map[string]any, 8)
|
|
||||||
errCh := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
for scanner.Scan() {
|
|
||||||
var msg map[string]any
|
|
||||||
if err := json.Unmarshal(scanner.Bytes(), &msg); err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
received <- msg
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
close(received)
|
|
||||||
}()
|
|
||||||
|
|
||||||
result := svc.HandleIPCEvents(nil, process.ActionProcessExited{
|
|
||||||
ID: "proc-test",
|
|
||||||
ExitCode: 0,
|
|
||||||
Duration: 2 * time.Second,
|
|
||||||
})
|
|
||||||
if !result.OK {
|
|
||||||
t.Fatalf("HandleIPCEvents() returned non-OK result: %#v", result.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
deadline := time.NewTimer(5 * time.Second)
|
|
||||||
defer deadline.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case err := <-errCh:
|
|
||||||
t.Fatalf("failed to read notification: %v", err)
|
|
||||||
case msg, ok := <-received:
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("notification stream closed before expected message arrived")
|
|
||||||
}
|
|
||||||
if msg["method"] != ChannelNotificationMethod {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
params, ok := msg["params"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected params object, got %T", msg["params"])
|
|
||||||
}
|
|
||||||
if params["channel"] != ChannelTestResult {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, ok := params["data"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected data object, got %T", msg["params"])
|
|
||||||
}
|
|
||||||
if payload["id"] != "proc-test" || payload["command"] != "go" {
|
|
||||||
t.Fatalf("unexpected payload: %#v", payload)
|
|
||||||
}
|
|
||||||
if payload["dir"] != nil {
|
|
||||||
t.Fatalf("expected dir to be absent when not recorded, got %#v", payload["dir"])
|
|
||||||
}
|
|
||||||
if payload["status"] != "passed" || payload["passed"] != true {
|
|
||||||
t.Fatalf("expected passed test result, got %#v", payload)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case <-deadline.C:
|
|
||||||
t.Fatal("timed out waiting for test result notification")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -4,8 +4,8 @@ package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"iter"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
|
||||||
|
|
||||||
core "dappco.re/go/core"
|
core "dappco.re/go/core"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
|
|
@ -21,38 +21,6 @@ import (
|
||||||
// }
|
// }
|
||||||
type RESTHandler func(ctx context.Context, body []byte) (any, error)
|
type RESTHandler func(ctx context.Context, body []byte) (any, error)
|
||||||
|
|
||||||
// errInvalidRESTInput marks malformed JSON bodies for the REST bridge.
|
|
||||||
var errInvalidRESTInput = &restInputError{}
|
|
||||||
|
|
||||||
// restInputError preserves invalid-REST-input identity without stdlib
|
|
||||||
// error constructors so bridge.go can keep using errors.Is.
|
|
||||||
type restInputError struct {
|
|
||||||
cause error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *restInputError) Error() string {
|
|
||||||
if e == nil || e.cause == nil {
|
|
||||||
return "invalid REST input"
|
|
||||||
}
|
|
||||||
return "invalid REST input: " + e.cause.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *restInputError) Unwrap() error {
|
|
||||||
if e == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return e.cause
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *restInputError) Is(target error) bool {
|
|
||||||
_, ok := target.(*restInputError)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func invalidRESTInputError(cause error) error {
|
|
||||||
return &restInputError{cause: cause}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToolRecord captures metadata about a registered MCP tool.
|
// ToolRecord captures metadata about a registered MCP tool.
|
||||||
//
|
//
|
||||||
// for _, rec := range svc.Tools() {
|
// for _, rec := range svc.Tools() {
|
||||||
|
|
@ -67,17 +35,11 @@ type ToolRecord struct {
|
||||||
RESTHandler RESTHandler // REST-callable handler created at registration time
|
RESTHandler RESTHandler // REST-callable handler created at registration time
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddToolRecorded registers a tool with the MCP server and records its metadata.
|
// addToolRecorded registers a tool with the MCP server AND records its metadata.
|
||||||
// This is a generic function that captures the In/Out types for schema extraction.
|
// This is a generic function that captures the In/Out types for schema extraction.
|
||||||
// It also creates a RESTHandler closure that can unmarshal JSON to the correct
|
// It also creates a RESTHandler closure that can unmarshal JSON to the correct
|
||||||
// input type and call the handler directly, enabling the MCP-to-REST bridge.
|
// input type and call the handler directly, enabling the MCP-to-REST bridge.
|
||||||
//
|
func addToolRecorded[In, Out any](s *Service, server *mcp.Server, group string, t *mcp.Tool, h mcp.ToolHandlerFor[In, Out]) {
|
||||||
// svc, _ := mcp.New(mcp.Options{})
|
|
||||||
// mcp.AddToolRecorded(svc, svc.Server(), "files", &mcp.Tool{Name: "file_read"},
|
|
||||||
// func(context.Context, *mcp.CallToolRequest, ReadFileInput) (*mcp.CallToolResult, ReadFileOutput, error) {
|
|
||||||
// return nil, ReadFileOutput{Path: "src/main.go"}, nil
|
|
||||||
// })
|
|
||||||
func AddToolRecorded[In, Out any](s *Service, server *mcp.Server, group string, t *mcp.Tool, h mcp.ToolHandlerFor[In, Out]) {
|
|
||||||
mcp.AddTool(server, t, h)
|
mcp.AddTool(server, t, h)
|
||||||
|
|
||||||
restHandler := func(ctx context.Context, body []byte) (any, error) {
|
restHandler := func(ctx context.Context, body []byte) (any, error) {
|
||||||
|
|
@ -85,9 +47,9 @@ func AddToolRecorded[In, Out any](s *Service, server *mcp.Server, group string,
|
||||||
if len(body) > 0 {
|
if len(body) > 0 {
|
||||||
if r := core.JSONUnmarshal(body, &input); !r.OK {
|
if r := core.JSONUnmarshal(body, &input); !r.OK {
|
||||||
if err, ok := r.Value.(error); ok {
|
if err, ok := r.Value.(error); ok {
|
||||||
return nil, invalidRESTInputError(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, invalidRESTInputError(nil)
|
return nil, core.E("registry.RESTHandler", "failed to unmarshal input", nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// nil: REST callers have no MCP request context.
|
// nil: REST callers have no MCP request context.
|
||||||
|
|
@ -106,10 +68,6 @@ func AddToolRecorded[In, Out any](s *Service, server *mcp.Server, group string,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func addToolRecorded[In, Out any](s *Service, server *mcp.Server, group string, t *mcp.Tool, h mcp.ToolHandlerFor[In, Out]) {
|
|
||||||
AddToolRecorded(s, server, group, t, h)
|
|
||||||
}
|
|
||||||
|
|
||||||
// structSchema builds a simple JSON Schema from a struct's json tags via reflection.
|
// structSchema builds a simple JSON Schema from a struct's json tags via reflection.
|
||||||
// Returns nil for non-struct types or empty structs.
|
// Returns nil for non-struct types or empty structs.
|
||||||
func structSchema(v any) map[string]any {
|
func structSchema(v any) map[string]any {
|
||||||
|
|
@ -123,7 +81,52 @@ func structSchema(v any) map[string]any {
|
||||||
if t.Kind() != reflect.Struct {
|
if t.Kind() != reflect.Struct {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return schemaForType(t, map[reflect.Type]bool{})
|
if t.NumField() == 0 {
|
||||||
|
return map[string]any{"type": "object", "properties": map[string]any{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
properties := make(map[string]any)
|
||||||
|
required := make([]string, 0)
|
||||||
|
|
||||||
|
for f := range t.Fields() {
|
||||||
|
f := f
|
||||||
|
if !f.IsExported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
jsonTag := f.Tag.Get("json")
|
||||||
|
if jsonTag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := f.Name
|
||||||
|
isOptional := false
|
||||||
|
if jsonTag != "" {
|
||||||
|
parts := splitTag(jsonTag)
|
||||||
|
name = parts[0]
|
||||||
|
for _, p := range parts[1:] {
|
||||||
|
if p == "omitempty" {
|
||||||
|
isOptional = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prop := map[string]any{
|
||||||
|
"type": goTypeToJSONType(f.Type),
|
||||||
|
}
|
||||||
|
properties[name] = prop
|
||||||
|
|
||||||
|
if !isOptional {
|
||||||
|
required = append(required, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := map[string]any{
|
||||||
|
"type": "object",
|
||||||
|
"properties": properties,
|
||||||
|
}
|
||||||
|
if len(required) > 0 {
|
||||||
|
schema["required"] = required
|
||||||
|
}
|
||||||
|
return schema
|
||||||
}
|
}
|
||||||
|
|
||||||
// splitTag splits a struct tag value by commas.
|
// splitTag splits a struct tag value by commas.
|
||||||
|
|
@ -131,6 +134,19 @@ func splitTag(tag string) []string {
|
||||||
return core.Split(tag, ",")
|
return core.Split(tag, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// splitTagSeq returns an iterator over the tag parts.
|
||||||
|
func splitTagSeq(tag string) iter.Seq[string] {
|
||||||
|
// core.Split returns []string; wrap as iterator
|
||||||
|
parts := core.Split(tag, ",")
|
||||||
|
return func(yield func(string) bool) {
|
||||||
|
for _, p := range parts {
|
||||||
|
if !yield(p) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// goTypeToJSONType maps Go types to JSON Schema types.
|
// goTypeToJSONType maps Go types to JSON Schema types.
|
||||||
func goTypeToJSONType(t reflect.Type) string {
|
func goTypeToJSONType(t reflect.Type) string {
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
|
|
@ -151,120 +167,3 @@ func goTypeToJSONType(t reflect.Type) string {
|
||||||
return "string"
|
return "string"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func schemaForType(t reflect.Type, seen map[reflect.Type]bool) map[string]any {
|
|
||||||
if t == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for t.Kind() == reflect.Pointer {
|
|
||||||
t = t.Elem()
|
|
||||||
if t == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if isTimeType(t) {
|
|
||||||
return map[string]any{
|
|
||||||
"type": "string",
|
|
||||||
"format": "date-time",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Interface:
|
|
||||||
return map[string]any{}
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
if seen[t] {
|
|
||||||
return map[string]any{"type": "object"}
|
|
||||||
}
|
|
||||||
seen[t] = true
|
|
||||||
|
|
||||||
properties := make(map[string]any)
|
|
||||||
required := make([]string, 0, t.NumField())
|
|
||||||
|
|
||||||
for f := range t.Fields() {
|
|
||||||
f := f
|
|
||||||
if !f.IsExported() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonTag := f.Tag.Get("json")
|
|
||||||
if jsonTag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
name := f.Name
|
|
||||||
isOptional := false
|
|
||||||
if jsonTag != "" {
|
|
||||||
parts := splitTag(jsonTag)
|
|
||||||
name = parts[0]
|
|
||||||
for _, p := range parts[1:] {
|
|
||||||
if p == "omitempty" {
|
|
||||||
isOptional = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
prop := schemaForType(f.Type, cloneSeenSet(seen))
|
|
||||||
if prop == nil {
|
|
||||||
prop = map[string]any{"type": goTypeToJSONType(f.Type)}
|
|
||||||
}
|
|
||||||
properties[name] = prop
|
|
||||||
|
|
||||||
if !isOptional {
|
|
||||||
required = append(required, name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
schema := map[string]any{
|
|
||||||
"type": "object",
|
|
||||||
"properties": properties,
|
|
||||||
}
|
|
||||||
if len(required) > 0 {
|
|
||||||
schema["required"] = required
|
|
||||||
}
|
|
||||||
return schema
|
|
||||||
|
|
||||||
case reflect.Slice, reflect.Array:
|
|
||||||
schema := map[string]any{
|
|
||||||
"type": "array",
|
|
||||||
"items": schemaForType(t.Elem(), cloneSeenSet(seen)),
|
|
||||||
}
|
|
||||||
return schema
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
schema := map[string]any{
|
|
||||||
"type": "object",
|
|
||||||
}
|
|
||||||
if t.Key().Kind() == reflect.String {
|
|
||||||
if valueSchema := schemaForType(t.Elem(), cloneSeenSet(seen)); valueSchema != nil {
|
|
||||||
schema["additionalProperties"] = valueSchema
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return schema
|
|
||||||
|
|
||||||
default:
|
|
||||||
if typeName := goTypeToJSONType(t); typeName != "" {
|
|
||||||
return map[string]any{"type": typeName}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cloneSeenSet(seen map[reflect.Type]bool) map[reflect.Type]bool {
|
|
||||||
if len(seen) == 0 {
|
|
||||||
return map[reflect.Type]bool{}
|
|
||||||
}
|
|
||||||
clone := make(map[reflect.Type]bool, len(seen))
|
|
||||||
for t := range seen {
|
|
||||||
clone[t] = true
|
|
||||||
}
|
|
||||||
return clone
|
|
||||||
}
|
|
||||||
|
|
||||||
func isTimeType(t reflect.Type) bool {
|
|
||||||
return t == reflect.TypeOf(time.Time{})
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -3,11 +3,7 @@
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"forge.lthn.ai/core/go-process"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestToolRegistry_Good_RecordsTools(t *testing.T) {
|
func TestToolRegistry_Good_RecordsTools(t *testing.T) {
|
||||||
|
|
@ -72,12 +68,8 @@ func TestToolRegistry_Good_ToolCount(t *testing.T) {
|
||||||
|
|
||||||
tools := svc.Tools()
|
tools := svc.Tools()
|
||||||
// Built-in tools: file_read, file_write, file_delete, file_rename,
|
// Built-in tools: file_read, file_write, file_delete, file_rename,
|
||||||
// file_exists, file_edit, dir_list, dir_create, lang_detect, lang_list,
|
// file_exists, file_edit, dir_list, dir_create, lang_detect, lang_list
|
||||||
// metrics_record, metrics_query, rag_query, rag_ingest, rag_collections,
|
const expectedCount = 10
|
||||||
// webview_connect, webview_disconnect, webview_navigate, webview_click,
|
|
||||||
// webview_type, webview_query, webview_console, webview_eval,
|
|
||||||
// webview_screenshot, webview_wait
|
|
||||||
const expectedCount = 25
|
|
||||||
if len(tools) != expectedCount {
|
if len(tools) != expectedCount {
|
||||||
t.Errorf("expected %d tools, got %d", expectedCount, len(tools))
|
t.Errorf("expected %d tools, got %d", expectedCount, len(tools))
|
||||||
for _, tr := range tools {
|
for _, tr := range tools {
|
||||||
|
|
@ -94,9 +86,6 @@ func TestToolRegistry_Good_GroupAssignment(t *testing.T) {
|
||||||
|
|
||||||
fileTools := []string{"file_read", "file_write", "file_delete", "file_rename", "file_exists", "file_edit", "dir_list", "dir_create"}
|
fileTools := []string{"file_read", "file_write", "file_delete", "file_rename", "file_exists", "file_edit", "dir_list", "dir_create"}
|
||||||
langTools := []string{"lang_detect", "lang_list"}
|
langTools := []string{"lang_detect", "lang_list"}
|
||||||
metricsTools := []string{"metrics_record", "metrics_query"}
|
|
||||||
ragTools := []string{"rag_query", "rag_ingest", "rag_collections"}
|
|
||||||
webviewTools := []string{"webview_connect", "webview_disconnect", "webview_navigate", "webview_click", "webview_type", "webview_query", "webview_console", "webview_eval", "webview_screenshot", "webview_wait"}
|
|
||||||
|
|
||||||
byName := make(map[string]ToolRecord)
|
byName := make(map[string]ToolRecord)
|
||||||
for _, tr := range svc.Tools() {
|
for _, tr := range svc.Tools() {
|
||||||
|
|
@ -124,39 +113,6 @@ func TestToolRegistry_Good_GroupAssignment(t *testing.T) {
|
||||||
t.Errorf("tool %s: expected group 'language', got %q", name, tr.Group)
|
t.Errorf("tool %s: expected group 'language', got %q", name, tr.Group)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range metricsTools {
|
|
||||||
tr, ok := byName[name]
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("tool %s not found in registry", name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if tr.Group != "metrics" {
|
|
||||||
t.Errorf("tool %s: expected group 'metrics', got %q", name, tr.Group)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, name := range ragTools {
|
|
||||||
tr, ok := byName[name]
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("tool %s not found in registry", name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if tr.Group != "rag" {
|
|
||||||
t.Errorf("tool %s: expected group 'rag', got %q", name, tr.Group)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, name := range webviewTools {
|
|
||||||
tr, ok := byName[name]
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("tool %s not found in registry", name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if tr.Group != "webview" {
|
|
||||||
t.Errorf("tool %s: expected group 'webview', got %q", name, tr.Group)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestToolRegistry_Good_ToolRecordFields(t *testing.T) {
|
func TestToolRegistry_Good_ToolRecordFields(t *testing.T) {
|
||||||
|
|
@ -192,93 +148,3 @@ func TestToolRegistry_Good_ToolRecordFields(t *testing.T) {
|
||||||
t.Error("expected non-nil OutputSchema")
|
t.Error("expected non-nil OutputSchema")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestToolRegistry_Good_TimeSchemas(t *testing.T) {
|
|
||||||
svc, err := New(Options{
|
|
||||||
WorkspaceRoot: t.TempDir(),
|
|
||||||
ProcessService: &process.Service{},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
byName := make(map[string]ToolRecord)
|
|
||||||
for _, tr := range svc.Tools() {
|
|
||||||
byName[tr.Name] = tr
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics, ok := byName["metrics_record"]
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("metrics_record not found in registry")
|
|
||||||
}
|
|
||||||
inputProps, ok := metrics.InputSchema["properties"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("expected metrics_record input properties map")
|
|
||||||
}
|
|
||||||
dataSchema, ok := inputProps["data"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("expected data schema for metrics_record input")
|
|
||||||
}
|
|
||||||
if got := dataSchema["type"]; got != "object" {
|
|
||||||
t.Fatalf("expected metrics_record data type object, got %#v", got)
|
|
||||||
}
|
|
||||||
props, ok := metrics.OutputSchema["properties"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("expected metrics_record output properties map")
|
|
||||||
}
|
|
||||||
timestamp, ok := props["timestamp"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("expected timestamp schema for metrics_record output")
|
|
||||||
}
|
|
||||||
if got := timestamp["type"]; got != "string" {
|
|
||||||
t.Fatalf("expected metrics_record timestamp type string, got %#v", got)
|
|
||||||
}
|
|
||||||
if got := timestamp["format"]; got != "date-time" {
|
|
||||||
t.Fatalf("expected metrics_record timestamp format date-time, got %#v", got)
|
|
||||||
}
|
|
||||||
|
|
||||||
processStart, ok := byName["process_start"]
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("process_start not found in registry")
|
|
||||||
}
|
|
||||||
props, ok = processStart.OutputSchema["properties"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("expected process_start output properties map")
|
|
||||||
}
|
|
||||||
startedAt, ok := props["startedAt"].(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("expected startedAt schema for process_start output")
|
|
||||||
}
|
|
||||||
if got := startedAt["type"]; got != "string" {
|
|
||||||
t.Fatalf("expected process_start startedAt type string, got %#v", got)
|
|
||||||
}
|
|
||||||
if got := startedAt["format"]; got != "date-time" {
|
|
||||||
t.Fatalf("expected process_start startedAt format date-time, got %#v", got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToolRegistry_Bad_InvalidRESTInputIsClassified(t *testing.T) {
|
|
||||||
svc, err := New(Options{WorkspaceRoot: t.TempDir()})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var record ToolRecord
|
|
||||||
for _, tr := range svc.Tools() {
|
|
||||||
if tr.Name == "file_read" {
|
|
||||||
record = tr
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if record.Name == "" {
|
|
||||||
t.Fatal("file_read not found in registry")
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = record.RESTHandler(context.Background(), []byte("{bad json"))
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected REST handler error for malformed JSON")
|
|
||||||
}
|
|
||||||
if !errors.Is(err, errInvalidRESTInput) {
|
|
||||||
t.Fatalf("expected invalid REST input error, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,8 @@ package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Subsystem registers additional MCP tools at startup.
|
// Subsystem registers additional MCP tools at startup.
|
||||||
|
|
@ -11,10 +13,10 @@ import (
|
||||||
//
|
//
|
||||||
// type BrainSubsystem struct{}
|
// type BrainSubsystem struct{}
|
||||||
// func (b *BrainSubsystem) Name() string { return "brain" }
|
// func (b *BrainSubsystem) Name() string { return "brain" }
|
||||||
// func (b *BrainSubsystem) RegisterTools(svc *Service) { ... }
|
// func (b *BrainSubsystem) RegisterTools(server *mcp.Server) { ... }
|
||||||
type Subsystem interface {
|
type Subsystem interface {
|
||||||
Name() string
|
Name() string
|
||||||
RegisterTools(svc *Service)
|
RegisterTools(server *mcp.Server)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubsystemWithShutdown extends Subsystem with graceful cleanup.
|
// SubsystemWithShutdown extends Subsystem with graceful cleanup.
|
||||||
|
|
@ -36,16 +38,11 @@ type Notifier interface {
|
||||||
ChannelSend(ctx context.Context, channel string, data any)
|
ChannelSend(ctx context.Context, channel string, data any)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Notifier = (*Service)(nil)
|
|
||||||
|
|
||||||
// ChannelPush is a Core IPC message that any service can send to push
|
// ChannelPush is a Core IPC message that any service can send to push
|
||||||
// a channel event to connected Claude Code sessions.
|
// a channel event to connected Claude Code sessions.
|
||||||
// The MCP service catches this in HandleIPCEvents and calls ChannelSend.
|
// The MCP service catches this in HandleIPCEvents and calls ChannelSend.
|
||||||
//
|
//
|
||||||
// c.ACTION(mcp.ChannelPush{
|
// c.ACTION(mcp.ChannelPush{Channel: "agent.status", Data: map[string]any{"repo": "go-io"}})
|
||||||
// Channel: "agent.status",
|
|
||||||
// Data: map[string]any{"repo": "go-io"},
|
|
||||||
// })
|
|
||||||
type ChannelPush struct {
|
type ChannelPush struct {
|
||||||
Channel string
|
Channel string
|
||||||
Data any
|
Data any
|
||||||
|
|
@ -61,14 +58,3 @@ type SubsystemWithNotifier interface {
|
||||||
Subsystem
|
Subsystem
|
||||||
SetNotifier(n Notifier)
|
SetNotifier(n Notifier)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubsystemWithChannelCallback extends Subsystem for implementations that
|
|
||||||
// expose an OnChannel callback instead of a Notifier interface.
|
|
||||||
//
|
|
||||||
// brain.OnChannel(func(ctx context.Context, channel string, data any) {
|
|
||||||
// mcpService.ChannelSend(ctx, channel, data)
|
|
||||||
// })
|
|
||||||
type SubsystemWithChannelCallback interface {
|
|
||||||
Subsystem
|
|
||||||
OnChannel(func(ctx context.Context, channel string, data any))
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,8 @@ package mcp
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// stubSubsystem is a minimal Subsystem for testing.
|
// stubSubsystem is a minimal Subsystem for testing.
|
||||||
|
|
@ -13,23 +15,7 @@ type stubSubsystem struct {
|
||||||
|
|
||||||
func (s *stubSubsystem) Name() string { return s.name }
|
func (s *stubSubsystem) Name() string { return s.name }
|
||||||
|
|
||||||
func (s *stubSubsystem) RegisterTools(svc *Service) {
|
func (s *stubSubsystem) RegisterTools(server *mcp.Server) {
|
||||||
s.toolsRegistered = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// notifierSubsystem verifies notifier wiring happens before tool registration.
|
|
||||||
type notifierSubsystem struct {
|
|
||||||
stubSubsystem
|
|
||||||
notifierSet bool
|
|
||||||
sawNotifierAtRegistration bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *notifierSubsystem) SetNotifier(n Notifier) {
|
|
||||||
s.notifierSet = n != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *notifierSubsystem) RegisterTools(svc *Service) {
|
|
||||||
s.sawNotifierAtRegistration = s.notifierSet
|
|
||||||
s.toolsRegistered = true
|
s.toolsRegistered = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -86,41 +72,6 @@ func TestSubsystem_Good_MultipleSubsystems(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSubsystem_Good_NilEntriesIgnoredAndSnapshots(t *testing.T) {
|
|
||||||
sub := &stubSubsystem{name: "snap-sub"}
|
|
||||||
svc, err := New(Options{Subsystems: []Subsystem{nil, sub}})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
subs := svc.Subsystems()
|
|
||||||
if len(subs) != 1 {
|
|
||||||
t.Fatalf("expected 1 subsystem after filtering nil entries, got %d", len(subs))
|
|
||||||
}
|
|
||||||
if subs[0].Name() != "snap-sub" {
|
|
||||||
t.Fatalf("expected snap-sub, got %q", subs[0].Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
subs[0] = nil
|
|
||||||
if svc.Subsystems()[0] == nil {
|
|
||||||
t.Fatal("expected Subsystems() to return a snapshot, not the live slice")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSubsystem_Good_NotifierSetBeforeRegistration(t *testing.T) {
|
|
||||||
sub := ¬ifierSubsystem{stubSubsystem: stubSubsystem{name: "notifier-sub"}}
|
|
||||||
_, err := New(Options{Subsystems: []Subsystem{sub}})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("New() failed: %v", err)
|
|
||||||
}
|
|
||||||
if !sub.notifierSet {
|
|
||||||
t.Fatal("expected notifier to be set")
|
|
||||||
}
|
|
||||||
if !sub.sawNotifierAtRegistration {
|
|
||||||
t.Fatal("expected notifier to be available before RegisterTools ran")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSubsystemShutdown_Good(t *testing.T) {
|
func TestSubsystemShutdown_Good(t *testing.T) {
|
||||||
sub := &shutdownSubsystem{stubSubsystem: stubSubsystem{name: "shutdown-sub"}}
|
sub := &shutdownSubsystem{stubSubsystem: stubSubsystem{name: "shutdown-sub"}}
|
||||||
svc, err := New(Options{Subsystems: []Subsystem{sub}})
|
svc, err := New(Options{Subsystems: []Subsystem{sub}})
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,3 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -7,8 +5,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
core "dappco.re/go/core"
|
|
||||||
"forge.lthn.ai/core/go-ai/ai"
|
"forge.lthn.ai/core/go-ai/ai"
|
||||||
|
core "dappco.re/go/core"
|
||||||
"forge.lthn.ai/core/go-log"
|
"forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
@ -73,19 +71,19 @@ type MetricCount struct {
|
||||||
// // ev.Type == "dispatch.complete", ev.AgentID == "cladius", ev.Repo == "core-php"
|
// // ev.Type == "dispatch.complete", ev.AgentID == "cladius", ev.Repo == "core-php"
|
||||||
type MetricEventBrief struct {
|
type MetricEventBrief struct {
|
||||||
Type string `json:"type"` // e.g. "dispatch.complete"
|
Type string `json:"type"` // e.g. "dispatch.complete"
|
||||||
Timestamp time.Time `json:"timestamp"` // when the event occurred
|
Timestamp time.Time `json:"timestamp"` // when the event occurred
|
||||||
AgentID string `json:"agent_id,omitempty"` // e.g. "cladius"
|
AgentID string `json:"agent_id,omitempty"` // e.g. "cladius"
|
||||||
Repo string `json:"repo,omitempty"` // e.g. "core-php"
|
Repo string `json:"repo,omitempty"` // e.g. "core-php"
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerMetricsTools adds metrics tools to the MCP server.
|
// registerMetricsTools adds metrics tools to the MCP server.
|
||||||
func (s *Service) registerMetricsTools(server *mcp.Server) {
|
func (s *Service) registerMetricsTools(server *mcp.Server) {
|
||||||
addToolRecorded(s, server, "metrics", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "metrics_record",
|
Name: "metrics_record",
|
||||||
Description: "Record a metrics event for AI/security tracking. Events are stored in daily JSONL files.",
|
Description: "Record a metrics event for AI/security tracking. Events are stored in daily JSONL files.",
|
||||||
}, s.metricsRecord)
|
}, s.metricsRecord)
|
||||||
|
|
||||||
addToolRecorded(s, server, "metrics", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "metrics_query",
|
Name: "metrics_query",
|
||||||
Description: "Query metrics events and get aggregated statistics by type, repo, and agent.",
|
Description: "Query metrics events and get aggregated statistics by type, repo, and agent.",
|
||||||
}, s.metricsQuery)
|
}, s.metricsQuery)
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,3 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -141,32 +139,32 @@ func (s *Service) registerProcessTools(server *mcp.Server) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "process_start",
|
Name: "process_start",
|
||||||
Description: "Start a new external process. Returns process ID for tracking.",
|
Description: "Start a new external process. Returns process ID for tracking.",
|
||||||
}, s.processStart)
|
}, s.processStart)
|
||||||
|
|
||||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "process_stop",
|
Name: "process_stop",
|
||||||
Description: "Gracefully stop a running process by ID.",
|
Description: "Gracefully stop a running process by ID.",
|
||||||
}, s.processStop)
|
}, s.processStop)
|
||||||
|
|
||||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "process_kill",
|
Name: "process_kill",
|
||||||
Description: "Force kill a process by ID. Use when process_stop doesn't work.",
|
Description: "Force kill a process by ID. Use when process_stop doesn't work.",
|
||||||
}, s.processKill)
|
}, s.processKill)
|
||||||
|
|
||||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "process_list",
|
Name: "process_list",
|
||||||
Description: "List all managed processes. Use running_only=true for only active processes.",
|
Description: "List all managed processes. Use running_only=true for only active processes.",
|
||||||
}, s.processList)
|
}, s.processList)
|
||||||
|
|
||||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "process_output",
|
Name: "process_output",
|
||||||
Description: "Get the captured output of a process by ID.",
|
Description: "Get the captured output of a process by ID.",
|
||||||
}, s.processOutput)
|
}, s.processOutput)
|
||||||
|
|
||||||
addToolRecorded(s, server, "process", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "process_input",
|
Name: "process_input",
|
||||||
Description: "Send input to a running process stdin.",
|
Description: "Send input to a running process stdin.",
|
||||||
}, s.processInput)
|
}, s.processInput)
|
||||||
|
|
@ -176,10 +174,6 @@ func (s *Service) registerProcessTools(server *mcp.Server) bool {
|
||||||
|
|
||||||
// processStart handles the process_start tool call.
|
// processStart handles the process_start tool call.
|
||||||
func (s *Service) processStart(ctx context.Context, req *mcp.CallToolRequest, input ProcessStartInput) (*mcp.CallToolResult, ProcessStartOutput, error) {
|
func (s *Service) processStart(ctx context.Context, req *mcp.CallToolRequest, input ProcessStartInput) (*mcp.CallToolResult, ProcessStartOutput, error) {
|
||||||
if s.processService == nil {
|
|
||||||
return nil, ProcessStartOutput{}, log.E("processStart", "process service unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Security("MCP tool execution", "tool", "process_start", "command", input.Command, "args", input.Args, "dir", input.Dir, "user", log.Username())
|
s.logger.Security("MCP tool execution", "tool", "process_start", "command", input.Command, "args", input.Args, "dir", input.Dir, "user", log.Username())
|
||||||
|
|
||||||
if input.Command == "" {
|
if input.Command == "" {
|
||||||
|
|
@ -189,7 +183,7 @@ func (s *Service) processStart(ctx context.Context, req *mcp.CallToolRequest, in
|
||||||
opts := process.RunOptions{
|
opts := process.RunOptions{
|
||||||
Command: input.Command,
|
Command: input.Command,
|
||||||
Args: input.Args,
|
Args: input.Args,
|
||||||
Dir: s.resolveWorkspacePath(input.Dir),
|
Dir: input.Dir,
|
||||||
Env: input.Env,
|
Env: input.Env,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -207,29 +201,14 @@ func (s *Service) processStart(ctx context.Context, req *mcp.CallToolRequest, in
|
||||||
Args: proc.Args,
|
Args: proc.Args,
|
||||||
StartedAt: proc.StartedAt,
|
StartedAt: proc.StartedAt,
|
||||||
}
|
}
|
||||||
s.recordProcessRuntime(output.ID, processRuntime{
|
s.ChannelSend(ctx, "process.start", map[string]any{
|
||||||
Command: output.Command,
|
"id": output.ID, "pid": output.PID, "command": output.Command,
|
||||||
Args: output.Args,
|
|
||||||
Dir: info.Dir,
|
|
||||||
StartedAt: output.StartedAt,
|
|
||||||
})
|
|
||||||
s.ChannelSend(ctx, ChannelProcessStart, map[string]any{
|
|
||||||
"id": output.ID,
|
|
||||||
"pid": output.PID,
|
|
||||||
"command": output.Command,
|
|
||||||
"args": output.Args,
|
|
||||||
"dir": info.Dir,
|
|
||||||
"startedAt": output.StartedAt,
|
|
||||||
})
|
})
|
||||||
return nil, output, nil
|
return nil, output, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// processStop handles the process_stop tool call.
|
// processStop handles the process_stop tool call.
|
||||||
func (s *Service) processStop(ctx context.Context, req *mcp.CallToolRequest, input ProcessStopInput) (*mcp.CallToolResult, ProcessStopOutput, error) {
|
func (s *Service) processStop(ctx context.Context, req *mcp.CallToolRequest, input ProcessStopInput) (*mcp.CallToolResult, ProcessStopOutput, error) {
|
||||||
if s.processService == nil {
|
|
||||||
return nil, ProcessStopOutput{}, log.E("processStop", "process service unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Security("MCP tool execution", "tool", "process_stop", "id", input.ID, "user", log.Username())
|
s.logger.Security("MCP tool execution", "tool", "process_stop", "id", input.ID, "user", log.Username())
|
||||||
|
|
||||||
if input.ID == "" {
|
if input.ID == "" {
|
||||||
|
|
@ -242,23 +221,14 @@ func (s *Service) processStop(ctx context.Context, req *mcp.CallToolRequest, inp
|
||||||
return nil, ProcessStopOutput{}, log.E("processStop", "process not found", err)
|
return nil, ProcessStopOutput{}, log.E("processStop", "process not found", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use the process service's graceful shutdown path first so callers get
|
// For graceful stop, we use Kill() which sends SIGKILL
|
||||||
// a real stop signal before we fall back to a hard kill internally.
|
// A more sophisticated implementation could use SIGTERM first
|
||||||
if err := proc.Shutdown(); err != nil {
|
if err := proc.Kill(); err != nil {
|
||||||
log.Error("mcp: process stop failed", "id", input.ID, "err", err)
|
log.Error("mcp: process stop kill failed", "id", input.ID, "err", err)
|
||||||
return nil, ProcessStopOutput{}, log.E("processStop", "failed to stop process", err)
|
return nil, ProcessStopOutput{}, log.E("processStop", "failed to stop process", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
info := proc.Info()
|
s.ChannelSend(ctx, "process.exit", map[string]any{"id": input.ID, "signal": "stop"})
|
||||||
s.ChannelSend(ctx, ChannelProcessExit, map[string]any{
|
|
||||||
"id": input.ID,
|
|
||||||
"signal": "stop",
|
|
||||||
"command": info.Command,
|
|
||||||
"args": info.Args,
|
|
||||||
"dir": info.Dir,
|
|
||||||
"startedAt": info.StartedAt,
|
|
||||||
})
|
|
||||||
s.emitTestResult(ctx, input.ID, 0, 0, "stop", "")
|
|
||||||
return nil, ProcessStopOutput{
|
return nil, ProcessStopOutput{
|
||||||
ID: input.ID,
|
ID: input.ID,
|
||||||
Success: true,
|
Success: true,
|
||||||
|
|
@ -268,37 +238,18 @@ func (s *Service) processStop(ctx context.Context, req *mcp.CallToolRequest, inp
|
||||||
|
|
||||||
// processKill handles the process_kill tool call.
|
// processKill handles the process_kill tool call.
|
||||||
func (s *Service) processKill(ctx context.Context, req *mcp.CallToolRequest, input ProcessKillInput) (*mcp.CallToolResult, ProcessKillOutput, error) {
|
func (s *Service) processKill(ctx context.Context, req *mcp.CallToolRequest, input ProcessKillInput) (*mcp.CallToolResult, ProcessKillOutput, error) {
|
||||||
if s.processService == nil {
|
|
||||||
return nil, ProcessKillOutput{}, log.E("processKill", "process service unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Security("MCP tool execution", "tool", "process_kill", "id", input.ID, "user", log.Username())
|
s.logger.Security("MCP tool execution", "tool", "process_kill", "id", input.ID, "user", log.Username())
|
||||||
|
|
||||||
if input.ID == "" {
|
if input.ID == "" {
|
||||||
return nil, ProcessKillOutput{}, errIDEmpty
|
return nil, ProcessKillOutput{}, errIDEmpty
|
||||||
}
|
}
|
||||||
|
|
||||||
proc, err := s.processService.Get(input.ID)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("mcp: process kill failed", "id", input.ID, "err", err)
|
|
||||||
return nil, ProcessKillOutput{}, log.E("processKill", "process not found", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.processService.Kill(input.ID); err != nil {
|
if err := s.processService.Kill(input.ID); err != nil {
|
||||||
log.Error("mcp: process kill failed", "id", input.ID, "err", err)
|
log.Error("mcp: process kill failed", "id", input.ID, "err", err)
|
||||||
return nil, ProcessKillOutput{}, log.E("processKill", "failed to kill process", err)
|
return nil, ProcessKillOutput{}, log.E("processKill", "failed to kill process", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
info := proc.Info()
|
s.ChannelSend(ctx, "process.exit", map[string]any{"id": input.ID, "signal": "kill"})
|
||||||
s.ChannelSend(ctx, ChannelProcessExit, map[string]any{
|
|
||||||
"id": input.ID,
|
|
||||||
"signal": "kill",
|
|
||||||
"command": info.Command,
|
|
||||||
"args": info.Args,
|
|
||||||
"dir": info.Dir,
|
|
||||||
"startedAt": info.StartedAt,
|
|
||||||
})
|
|
||||||
s.emitTestResult(ctx, input.ID, 0, 0, "kill", "")
|
|
||||||
return nil, ProcessKillOutput{
|
return nil, ProcessKillOutput{
|
||||||
ID: input.ID,
|
ID: input.ID,
|
||||||
Success: true,
|
Success: true,
|
||||||
|
|
@ -308,10 +259,6 @@ func (s *Service) processKill(ctx context.Context, req *mcp.CallToolRequest, inp
|
||||||
|
|
||||||
// processList handles the process_list tool call.
|
// processList handles the process_list tool call.
|
||||||
func (s *Service) processList(ctx context.Context, req *mcp.CallToolRequest, input ProcessListInput) (*mcp.CallToolResult, ProcessListOutput, error) {
|
func (s *Service) processList(ctx context.Context, req *mcp.CallToolRequest, input ProcessListInput) (*mcp.CallToolResult, ProcessListOutput, error) {
|
||||||
if s.processService == nil {
|
|
||||||
return nil, ProcessListOutput{}, log.E("processList", "process service unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Info("MCP tool execution", "tool", "process_list", "running_only", input.RunningOnly, "user", log.Username())
|
s.logger.Info("MCP tool execution", "tool", "process_list", "running_only", input.RunningOnly, "user", log.Username())
|
||||||
|
|
||||||
var procs []*process.Process
|
var procs []*process.Process
|
||||||
|
|
@ -345,10 +292,6 @@ func (s *Service) processList(ctx context.Context, req *mcp.CallToolRequest, inp
|
||||||
|
|
||||||
// processOutput handles the process_output tool call.
|
// processOutput handles the process_output tool call.
|
||||||
func (s *Service) processOutput(ctx context.Context, req *mcp.CallToolRequest, input ProcessOutputInput) (*mcp.CallToolResult, ProcessOutputOutput, error) {
|
func (s *Service) processOutput(ctx context.Context, req *mcp.CallToolRequest, input ProcessOutputInput) (*mcp.CallToolResult, ProcessOutputOutput, error) {
|
||||||
if s.processService == nil {
|
|
||||||
return nil, ProcessOutputOutput{}, log.E("processOutput", "process service unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Info("MCP tool execution", "tool", "process_output", "id", input.ID, "user", log.Username())
|
s.logger.Info("MCP tool execution", "tool", "process_output", "id", input.ID, "user", log.Username())
|
||||||
|
|
||||||
if input.ID == "" {
|
if input.ID == "" {
|
||||||
|
|
@ -369,10 +312,6 @@ func (s *Service) processOutput(ctx context.Context, req *mcp.CallToolRequest, i
|
||||||
|
|
||||||
// processInput handles the process_input tool call.
|
// processInput handles the process_input tool call.
|
||||||
func (s *Service) processInput(ctx context.Context, req *mcp.CallToolRequest, input ProcessInputInput) (*mcp.CallToolResult, ProcessInputOutput, error) {
|
func (s *Service) processInput(ctx context.Context, req *mcp.CallToolRequest, input ProcessInputInput) (*mcp.CallToolResult, ProcessInputOutput, error) {
|
||||||
if s.processService == nil {
|
|
||||||
return nil, ProcessInputOutput{}, log.E("processInput", "process service unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Security("MCP tool execution", "tool", "process_input", "id", input.ID, "user", log.Username())
|
s.logger.Security("MCP tool execution", "tool", "process_input", "id", input.ID, "user", log.Username())
|
||||||
|
|
||||||
if input.ID == "" {
|
if input.ID == "" {
|
||||||
|
|
|
||||||
|
|
@ -275,7 +275,7 @@ func TestProcessInfo_Good(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestWithProcessService_Good verifies Options{ProcessService: ...}.
|
// TestWithProcessService_Good verifies the WithProcessService option.
|
||||||
func TestWithProcessService_Good(t *testing.T) {
|
func TestWithProcessService_Good(t *testing.T) {
|
||||||
// Note: We can't easily create a real process.Service here without Core,
|
// Note: We can't easily create a real process.Service here without Core,
|
||||||
// so we just verify the option doesn't panic with nil.
|
// so we just verify the option doesn't panic with nil.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,3 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -101,17 +99,17 @@ type RAGCollectionsOutput struct {
|
||||||
|
|
||||||
// registerRAGTools adds RAG tools to the MCP server.
|
// registerRAGTools adds RAG tools to the MCP server.
|
||||||
func (s *Service) registerRAGTools(server *mcp.Server) {
|
func (s *Service) registerRAGTools(server *mcp.Server) {
|
||||||
addToolRecorded(s, server, "rag", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "rag_query",
|
Name: "rag_query",
|
||||||
Description: "Query the RAG vector database for relevant documentation. Returns semantically similar content based on the query.",
|
Description: "Query the RAG vector database for relevant documentation. Returns semantically similar content based on the query.",
|
||||||
}, s.ragQuery)
|
}, s.ragQuery)
|
||||||
|
|
||||||
addToolRecorded(s, server, "rag", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "rag_ingest",
|
Name: "rag_ingest",
|
||||||
Description: "Ingest documents into the RAG vector database. Supports both single files and directories.",
|
Description: "Ingest documents into the RAG vector database. Supports both single files and directories.",
|
||||||
}, s.ragIngest)
|
}, s.ragIngest)
|
||||||
|
|
||||||
addToolRecorded(s, server, "rag", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "rag_collections",
|
Name: "rag_collections",
|
||||||
Description: "List all available collections in the RAG vector database.",
|
Description: "List all available collections in the RAG vector database.",
|
||||||
}, s.ragCollections)
|
}, s.ragCollections)
|
||||||
|
|
@ -185,13 +183,12 @@ func (s *Service) ragIngest(ctx context.Context, req *mcp.CallToolRequest, input
|
||||||
log.Error("mcp: rag ingest stat failed", "path", input.Path, "err", err)
|
log.Error("mcp: rag ingest stat failed", "path", input.Path, "err", err)
|
||||||
return nil, RAGIngestOutput{}, log.E("ragIngest", "failed to access path", err)
|
return nil, RAGIngestOutput{}, log.E("ragIngest", "failed to access path", err)
|
||||||
}
|
}
|
||||||
resolvedPath := s.resolveWorkspacePath(input.Path)
|
|
||||||
|
|
||||||
var message string
|
var message string
|
||||||
var chunks int
|
var chunks int
|
||||||
if info.IsDir() {
|
if info.IsDir() {
|
||||||
// Ingest directory
|
// Ingest directory
|
||||||
err = rag.IngestDirectory(ctx, resolvedPath, collection, input.Recreate)
|
err = rag.IngestDirectory(ctx, input.Path, collection, input.Recreate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("mcp: rag ingest directory failed", "path", input.Path, "collection", collection, "err", err)
|
log.Error("mcp: rag ingest directory failed", "path", input.Path, "collection", collection, "err", err)
|
||||||
return nil, RAGIngestOutput{}, log.E("ragIngest", "failed to ingest directory", err)
|
return nil, RAGIngestOutput{}, log.E("ragIngest", "failed to ingest directory", err)
|
||||||
|
|
@ -199,7 +196,7 @@ func (s *Service) ragIngest(ctx context.Context, req *mcp.CallToolRequest, input
|
||||||
message = core.Sprintf("Successfully ingested directory %s into collection %s", input.Path, collection)
|
message = core.Sprintf("Successfully ingested directory %s into collection %s", input.Path, collection)
|
||||||
} else {
|
} else {
|
||||||
// Ingest single file
|
// Ingest single file
|
||||||
chunks, err = rag.IngestSingleFile(ctx, resolvedPath, collection)
|
chunks, err = rag.IngestSingleFile(ctx, input.Path, collection)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("mcp: rag ingest file failed", "path", input.Path, "collection", collection, "err", err)
|
log.Error("mcp: rag ingest file failed", "path", input.Path, "collection", collection, "err", err)
|
||||||
return nil, RAGIngestOutput{}, log.E("ragIngest", "failed to ingest file", err)
|
return nil, RAGIngestOutput{}, log.E("ragIngest", "failed to ingest file", err)
|
||||||
|
|
|
||||||
|
|
@ -1,15 +1,8 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"image"
|
|
||||||
"image/jpeg"
|
|
||||||
_ "image/png"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -32,20 +25,6 @@ var (
|
||||||
errSelectorRequired = log.E("webview", "selector is required", nil)
|
errSelectorRequired = log.E("webview", "selector is required", nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
// closeWebviewConnection closes and clears the shared browser connection.
|
|
||||||
func closeWebviewConnection() error {
|
|
||||||
webviewMu.Lock()
|
|
||||||
defer webviewMu.Unlock()
|
|
||||||
|
|
||||||
if webviewInstance == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := webviewInstance.Close()
|
|
||||||
webviewInstance = nil
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WebviewConnectInput contains parameters for connecting to Chrome DevTools.
|
// WebviewConnectInput contains parameters for connecting to Chrome DevTools.
|
||||||
//
|
//
|
||||||
// input := WebviewConnectInput{DebugURL: "http://localhost:9222", Timeout: 10}
|
// input := WebviewConnectInput{DebugURL: "http://localhost:9222", Timeout: 10}
|
||||||
|
|
@ -222,52 +201,52 @@ type WebviewDisconnectOutput struct {
|
||||||
|
|
||||||
// registerWebviewTools adds webview tools to the MCP server.
|
// registerWebviewTools adds webview tools to the MCP server.
|
||||||
func (s *Service) registerWebviewTools(server *mcp.Server) {
|
func (s *Service) registerWebviewTools(server *mcp.Server) {
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_connect",
|
Name: "webview_connect",
|
||||||
Description: "Connect to Chrome DevTools Protocol. Start Chrome with --remote-debugging-port=9222 first.",
|
Description: "Connect to Chrome DevTools Protocol. Start Chrome with --remote-debugging-port=9222 first.",
|
||||||
}, s.webviewConnect)
|
}, s.webviewConnect)
|
||||||
|
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_disconnect",
|
Name: "webview_disconnect",
|
||||||
Description: "Disconnect from Chrome DevTools.",
|
Description: "Disconnect from Chrome DevTools.",
|
||||||
}, s.webviewDisconnect)
|
}, s.webviewDisconnect)
|
||||||
|
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_navigate",
|
Name: "webview_navigate",
|
||||||
Description: "Navigate the browser to a URL.",
|
Description: "Navigate the browser to a URL.",
|
||||||
}, s.webviewNavigate)
|
}, s.webviewNavigate)
|
||||||
|
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_click",
|
Name: "webview_click",
|
||||||
Description: "Click on an element by CSS selector.",
|
Description: "Click on an element by CSS selector.",
|
||||||
}, s.webviewClick)
|
}, s.webviewClick)
|
||||||
|
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_type",
|
Name: "webview_type",
|
||||||
Description: "Type text into an element by CSS selector.",
|
Description: "Type text into an element by CSS selector.",
|
||||||
}, s.webviewType)
|
}, s.webviewType)
|
||||||
|
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_query",
|
Name: "webview_query",
|
||||||
Description: "Query DOM elements by CSS selector.",
|
Description: "Query DOM elements by CSS selector.",
|
||||||
}, s.webviewQuery)
|
}, s.webviewQuery)
|
||||||
|
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_console",
|
Name: "webview_console",
|
||||||
Description: "Get browser console output.",
|
Description: "Get browser console output.",
|
||||||
}, s.webviewConsole)
|
}, s.webviewConsole)
|
||||||
|
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_eval",
|
Name: "webview_eval",
|
||||||
Description: "Evaluate JavaScript in the browser context.",
|
Description: "Evaluate JavaScript in the browser context.",
|
||||||
}, s.webviewEval)
|
}, s.webviewEval)
|
||||||
|
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_screenshot",
|
Name: "webview_screenshot",
|
||||||
Description: "Capture a screenshot of the browser window.",
|
Description: "Capture a screenshot of the browser window.",
|
||||||
}, s.webviewScreenshot)
|
}, s.webviewScreenshot)
|
||||||
|
|
||||||
addToolRecorded(s, server, "webview", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "webview_wait",
|
Name: "webview_wait",
|
||||||
Description: "Wait for an element to appear by CSS selector.",
|
Description: "Wait for an element to appear by CSS selector.",
|
||||||
}, s.webviewWait)
|
}, s.webviewWait)
|
||||||
|
|
@ -554,7 +533,6 @@ func (s *Service) webviewScreenshot(ctx context.Context, req *mcp.CallToolReques
|
||||||
if format == "" {
|
if format == "" {
|
||||||
format = "png"
|
format = "png"
|
||||||
}
|
}
|
||||||
format = strings.ToLower(format)
|
|
||||||
|
|
||||||
data, err := webviewInstance.Screenshot()
|
data, err := webviewInstance.Screenshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -562,40 +540,13 @@ func (s *Service) webviewScreenshot(ctx context.Context, req *mcp.CallToolReques
|
||||||
return nil, WebviewScreenshotOutput{}, log.E("webviewScreenshot", "failed to capture screenshot", err)
|
return nil, WebviewScreenshotOutput{}, log.E("webviewScreenshot", "failed to capture screenshot", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
encoded, outputFormat, err := normalizeScreenshotData(data, format)
|
|
||||||
if err != nil {
|
|
||||||
return nil, WebviewScreenshotOutput{}, log.E("webviewScreenshot", "failed to encode screenshot", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, WebviewScreenshotOutput{
|
return nil, WebviewScreenshotOutput{
|
||||||
Success: true,
|
Success: true,
|
||||||
Data: base64.StdEncoding.EncodeToString(encoded),
|
Data: base64.StdEncoding.EncodeToString(data),
|
||||||
Format: outputFormat,
|
Format: format,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// normalizeScreenshotData converts screenshot bytes into the requested format.
|
|
||||||
// PNG is preserved as-is. JPEG requests are re-encoded so the output matches
|
|
||||||
// the declared format in WebviewScreenshotOutput.
|
|
||||||
func normalizeScreenshotData(data []byte, format string) ([]byte, string, error) {
|
|
||||||
switch format {
|
|
||||||
case "", "png":
|
|
||||||
return data, "png", nil
|
|
||||||
case "jpeg", "jpg":
|
|
||||||
img, _, err := image.Decode(bytes.NewReader(data))
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := jpeg.Encode(&buf, img, &jpeg.Options{Quality: 90}); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
return buf.Bytes(), "jpeg", nil
|
|
||||||
default:
|
|
||||||
return nil, "", log.E("webviewScreenshot", "unsupported screenshot format: "+format, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// webviewWait handles the webview_wait tool call.
|
// webviewWait handles the webview_wait tool call.
|
||||||
func (s *Service) webviewWait(ctx context.Context, req *mcp.CallToolRequest, input WebviewWaitInput) (*mcp.CallToolResult, WebviewWaitOutput, error) {
|
func (s *Service) webviewWait(ctx context.Context, req *mcp.CallToolRequest, input WebviewWaitInput) (*mcp.CallToolResult, WebviewWaitOutput, error) {
|
||||||
webviewMu.Lock()
|
webviewMu.Lock()
|
||||||
|
|
@ -611,15 +562,7 @@ func (s *Service) webviewWait(ctx context.Context, req *mcp.CallToolRequest, inp
|
||||||
return nil, WebviewWaitOutput{}, errSelectorRequired
|
return nil, WebviewWaitOutput{}, errSelectorRequired
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout := time.Duration(input.Timeout) * time.Second
|
if err := webviewInstance.WaitForSelector(input.Selector); err != nil {
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = 30 * time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := waitForSelector(ctx, timeout, input.Selector, func(selector string) error {
|
|
||||||
_, err := webviewInstance.QuerySelector(selector)
|
|
||||||
return err
|
|
||||||
}); err != nil {
|
|
||||||
log.Error("mcp: webview wait failed", "selector", input.Selector, "err", err)
|
log.Error("mcp: webview wait failed", "selector", input.Selector, "err", err)
|
||||||
return nil, WebviewWaitOutput{}, log.E("webviewWait", "failed to wait for selector", err)
|
return nil, WebviewWaitOutput{}, log.E("webviewWait", "failed to wait for selector", err)
|
||||||
}
|
}
|
||||||
|
|
@ -629,34 +572,3 @@ func (s *Service) webviewWait(ctx context.Context, req *mcp.CallToolRequest, inp
|
||||||
Message: core.Sprintf("Element found: %s", input.Selector),
|
Message: core.Sprintf("Element found: %s", input.Selector),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForSelector polls until the selector exists or the timeout elapses.
|
|
||||||
// Query helpers in go-webview report "element not found" as an error, so we
|
|
||||||
// keep retrying until we see the element or hit the deadline.
|
|
||||||
func waitForSelector(ctx context.Context, timeout time.Duration, selector string, query func(string) error) error {
|
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = 30 * time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
waitCtx, cancel := context.WithTimeout(ctx, timeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
ticker := time.NewTicker(10 * time.Millisecond)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
err := query(selector)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !strings.Contains(err.Error(), "element not found") {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-waitCtx.Done():
|
|
||||||
return log.E("webviewWait", "timed out waiting for selector", waitCtx.Err())
|
|
||||||
case <-ticker.C:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,6 @@
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"image"
|
|
||||||
"image/color"
|
|
||||||
"image/jpeg"
|
|
||||||
"image/png"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -222,41 +215,6 @@ func TestWebviewWaitInput_Good(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWaitForSelector_Good(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
attempts := 0
|
|
||||||
err := waitForSelector(ctx, 200*time.Millisecond, "#ready", func(selector string) error {
|
|
||||||
attempts++
|
|
||||||
if attempts < 3 {
|
|
||||||
return errors.New("element not found: " + selector)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("waitForSelector failed: %v", err)
|
|
||||||
}
|
|
||||||
if attempts != 3 {
|
|
||||||
t.Fatalf("expected 3 attempts, got %d", attempts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWaitForSelector_Bad_Timeout(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
err := waitForSelector(ctx, 50*time.Millisecond, "#missing", func(selector string) error {
|
|
||||||
return errors.New("element not found: " + selector)
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected waitForSelector to time out")
|
|
||||||
}
|
|
||||||
if time.Since(start) < 50*time.Millisecond {
|
|
||||||
t.Fatal("expected waitForSelector to honor timeout")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestWebviewConnectOutput_Good verifies the WebviewConnectOutput struct has expected fields.
|
// TestWebviewConnectOutput_Good verifies the WebviewConnectOutput struct has expected fields.
|
||||||
func TestWebviewConnectOutput_Good(t *testing.T) {
|
func TestWebviewConnectOutput_Good(t *testing.T) {
|
||||||
output := WebviewConnectOutput{
|
output := WebviewConnectOutput{
|
||||||
|
|
@ -400,61 +358,6 @@ func TestWebviewScreenshotOutput_Good(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNormalizeScreenshotData_Good_Png(t *testing.T) {
|
|
||||||
src := mustEncodeTestPNG(t)
|
|
||||||
|
|
||||||
out, format, err := normalizeScreenshotData(src, "png")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("normalizeScreenshotData failed: %v", err)
|
|
||||||
}
|
|
||||||
if format != "png" {
|
|
||||||
t.Fatalf("expected png format, got %q", format)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(out, src) {
|
|
||||||
t.Fatal("expected png output to preserve the original bytes")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNormalizeScreenshotData_Good_Jpeg(t *testing.T) {
|
|
||||||
src := mustEncodeTestPNG(t)
|
|
||||||
|
|
||||||
out, format, err := normalizeScreenshotData(src, "jpeg")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("normalizeScreenshotData failed: %v", err)
|
|
||||||
}
|
|
||||||
if format != "jpeg" {
|
|
||||||
t.Fatalf("expected jpeg format, got %q", format)
|
|
||||||
}
|
|
||||||
if bytes.Equal(out, src) {
|
|
||||||
t.Fatal("expected jpeg output to differ from png input")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := jpeg.Decode(bytes.NewReader(out)); err != nil {
|
|
||||||
t.Fatalf("expected output to decode as an image: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNormalizeScreenshotData_Bad_UnsupportedFormat(t *testing.T) {
|
|
||||||
src := mustEncodeTestPNG(t)
|
|
||||||
|
|
||||||
if _, _, err := normalizeScreenshotData(src, "gif"); err == nil {
|
|
||||||
t.Fatal("expected unsupported format error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustEncodeTestPNG(t *testing.T) []byte {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
img := image.NewRGBA(image.Rect(0, 0, 1, 1))
|
|
||||||
img.Set(0, 0, color.RGBA{R: 200, G: 80, B: 40, A: 255})
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := png.Encode(&buf, img); err != nil {
|
|
||||||
t.Fatalf("png encode failed: %v", err)
|
|
||||||
}
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestWebviewElementInfo_Good verifies the WebviewElementInfo struct has expected fields.
|
// TestWebviewElementInfo_Good verifies the WebviewElementInfo struct has expected fields.
|
||||||
func TestWebviewElementInfo_Good(t *testing.T) {
|
func TestWebviewElementInfo_Good(t *testing.T) {
|
||||||
elem := WebviewElementInfo{
|
elem := WebviewElementInfo{
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,3 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -49,12 +47,12 @@ func (s *Service) registerWSTools(server *mcp.Server) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
addToolRecorded(s, server, "ws", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ws_start",
|
Name: "ws_start",
|
||||||
Description: "Start the WebSocket server for real-time process output streaming.",
|
Description: "Start the WebSocket server for real-time process output streaming.",
|
||||||
}, s.wsStart)
|
}, s.wsStart)
|
||||||
|
|
||||||
addToolRecorded(s, server, "ws", &mcp.Tool{
|
mcp.AddTool(server, &mcp.Tool{
|
||||||
Name: "ws_info",
|
Name: "ws_info",
|
||||||
Description: "Get WebSocket hub statistics (connected clients and active channels).",
|
Description: "Get WebSocket hub statistics (connected clients and active channels).",
|
||||||
}, s.wsInfo)
|
}, s.wsInfo)
|
||||||
|
|
@ -64,10 +62,6 @@ func (s *Service) registerWSTools(server *mcp.Server) bool {
|
||||||
|
|
||||||
// wsStart handles the ws_start tool call.
|
// wsStart handles the ws_start tool call.
|
||||||
func (s *Service) wsStart(ctx context.Context, req *mcp.CallToolRequest, input WSStartInput) (*mcp.CallToolResult, WSStartOutput, error) {
|
func (s *Service) wsStart(ctx context.Context, req *mcp.CallToolRequest, input WSStartInput) (*mcp.CallToolResult, WSStartOutput, error) {
|
||||||
if s.wsHub == nil {
|
|
||||||
return nil, WSStartOutput{}, log.E("wsStart", "websocket hub unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := input.Addr
|
addr := input.Addr
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
addr = ":8080"
|
addr = ":8080"
|
||||||
|
|
@ -123,10 +117,6 @@ func (s *Service) wsStart(ctx context.Context, req *mcp.CallToolRequest, input W
|
||||||
|
|
||||||
// wsInfo handles the ws_info tool call.
|
// wsInfo handles the ws_info tool call.
|
||||||
func (s *Service) wsInfo(ctx context.Context, req *mcp.CallToolRequest, input WSInfoInput) (*mcp.CallToolResult, WSInfoOutput, error) {
|
func (s *Service) wsInfo(ctx context.Context, req *mcp.CallToolRequest, input WSInfoInput) (*mcp.CallToolResult, WSInfoOutput, error) {
|
||||||
if s.wsHub == nil {
|
|
||||||
return nil, WSInfoOutput{}, log.E("wsInfo", "websocket hub unavailable", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Info("MCP tool execution", "tool", "ws_info", "user", log.Username())
|
s.logger.Info("MCP tool execution", "tool", "ws_info", "user", log.Username())
|
||||||
|
|
||||||
stats := s.wsHub.Stats()
|
stats := s.wsHub.Stats()
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@ func TestWSInfoOutput_Good(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestWithWSHub_Good verifies Options{WSHub: ...}.
|
// TestWithWSHub_Good verifies the WithWSHub option.
|
||||||
func TestWithWSHub_Good(t *testing.T) {
|
func TestWithWSHub_Good(t *testing.T) {
|
||||||
hub := ws.NewHub()
|
hub := ws.NewHub()
|
||||||
|
|
||||||
|
|
@ -97,7 +97,7 @@ func TestWithWSHub_Good(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestWithWSHub_Nil verifies Options{WSHub: nil}.
|
// TestWithWSHub_Nil verifies the WithWSHub option with nil.
|
||||||
func TestWithWSHub_Nil(t *testing.T) {
|
func TestWithWSHub_Nil(t *testing.T) {
|
||||||
s, err := New(Options{WSHub: nil})
|
s, err := New(Options{WSHub: nil})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -7,10 +7,9 @@ import (
|
||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
core "dappco.re/go/core"
|
||||||
coreerr "forge.lthn.ai/core/go-log"
|
coreerr "forge.lthn.ai/core/go-log"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
@ -37,7 +36,7 @@ func (s *Service) ServeHTTP(ctx context.Context, addr string) error {
|
||||||
addr = DefaultHTTPAddr
|
addr = DefaultHTTPAddr
|
||||||
}
|
}
|
||||||
|
|
||||||
authToken := os.Getenv("MCP_AUTH_TOKEN")
|
authToken := core.Env("MCP_AUTH_TOKEN")
|
||||||
|
|
||||||
handler := mcp.NewStreamableHTTPHandler(
|
handler := mcp.NewStreamableHTTPHandler(
|
||||||
func(r *http.Request) *mcp.Server {
|
func(r *http.Request) *mcp.Server {
|
||||||
|
|
@ -82,21 +81,22 @@ func (s *Service) ServeHTTP(ctx context.Context, addr string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// withAuth wraps an http.Handler with Bearer token authentication.
|
// withAuth wraps an http.Handler with Bearer token authentication.
|
||||||
// If token is empty, authentication is disabled for local development.
|
// If token is empty, requests are rejected.
|
||||||
func withAuth(token string, next http.Handler) http.Handler {
|
func withAuth(token string, next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
if strings.TrimSpace(token) == "" {
|
if core.Trim(token) == "" {
|
||||||
next.ServeHTTP(w, r)
|
w.Header().Set("WWW-Authenticate", `Bearer`)
|
||||||
|
http.Error(w, `{"error":"authentication not configured"}`, http.StatusUnauthorized)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
auth := r.Header.Get("Authorization")
|
auth := r.Header.Get("Authorization")
|
||||||
if !strings.HasPrefix(auth, "Bearer ") {
|
if !core.HasPrefix(auth, "Bearer ") {
|
||||||
http.Error(w, `{"error":"missing Bearer token"}`, http.StatusUnauthorized)
|
http.Error(w, `{"error":"missing Bearer token"}`, http.StatusUnauthorized)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
provided := strings.TrimSpace(strings.TrimPrefix(auth, "Bearer "))
|
provided := core.Trim(core.TrimPrefix(auth, "Bearer "))
|
||||||
if len(provided) == 0 {
|
if len(provided) == 0 {
|
||||||
http.Error(w, `{"error":"missing Bearer token"}`, http.StatusUnauthorized)
|
http.Error(w, `{"error":"missing Bearer token"}`, http.StatusUnauthorized)
|
||||||
return
|
return
|
||||||
|
|
|
||||||
|
|
@ -107,44 +107,6 @@ func TestServeHTTP_Good_AuthRequired(t *testing.T) {
|
||||||
<-errCh
|
<-errCh
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServeHTTP_Good_NoAuthConfigured(t *testing.T) {
|
|
||||||
os.Unsetenv("MCP_AUTH_TOKEN")
|
|
||||||
|
|
||||||
s, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to find free port: %v", err)
|
|
||||||
}
|
|
||||||
addr := listener.Addr().String()
|
|
||||||
listener.Close()
|
|
||||||
|
|
||||||
errCh := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
errCh <- s.ServeHTTP(ctx, addr)
|
|
||||||
}()
|
|
||||||
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
|
|
||||||
resp, err := http.Get(fmt.Sprintf("http://%s/mcp", addr))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("request failed: %v", err)
|
|
||||||
}
|
|
||||||
resp.Body.Close()
|
|
||||||
if resp.StatusCode == 401 {
|
|
||||||
t.Fatalf("expected /mcp to be open without MCP_AUTH_TOKEN, got %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
<-errCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithAuth_Good_ValidToken(t *testing.T) {
|
func TestWithAuth_Good_ValidToken(t *testing.T) {
|
||||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(200)
|
w.WriteHeader(200)
|
||||||
|
|
@ -195,18 +157,19 @@ func TestWithAuth_Bad_MissingToken(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWithAuth_Good_EmptyConfiguredToken_DisablesAuth(t *testing.T) {
|
func TestWithAuth_Bad_EmptyConfiguredToken(t *testing.T) {
|
||||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(200)
|
w.WriteHeader(200)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Empty token now requires explicit configuration
|
||||||
wrapped := withAuth("", handler)
|
wrapped := withAuth("", handler)
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", "/", nil)
|
req, _ := http.NewRequest("GET", "/", nil)
|
||||||
rr := &fakeResponseWriter{code: 200}
|
rr := &fakeResponseWriter{code: 200}
|
||||||
wrapped.ServeHTTP(rr, req)
|
wrapped.ServeHTTP(rr, req)
|
||||||
if rr.code != 200 {
|
if rr.code != 401 {
|
||||||
t.Errorf("expected 200 with empty configured token, got %d", rr.code)
|
t.Errorf("expected 401 with empty configured token, got %d", rr.code)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,3 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -18,6 +16,7 @@ import (
|
||||||
// }
|
// }
|
||||||
func (s *Service) ServeStdio(ctx context.Context) error {
|
func (s *Service) ServeStdio(ctx context.Context) error {
|
||||||
s.logger.Info("MCP Stdio server starting", "user", log.Username())
|
s.logger.Info("MCP Stdio server starting", "user", log.Username())
|
||||||
|
s.stdioMode = true
|
||||||
return s.server.Run(ctx, &mcp.IOTransport{
|
return s.server.Run(ctx, &mcp.IOTransport{
|
||||||
Reader: os.Stdin,
|
Reader: os.Stdin,
|
||||||
Writer: sharedStdout,
|
Writer: sharedStdout,
|
||||||
|
|
|
||||||
|
|
@ -1,16 +1,14 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
goio "io"
|
goio "io"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
core "dappco.re/go/core"
|
||||||
"github.com/modelcontextprotocol/go-sdk/jsonrpc"
|
"github.com/modelcontextprotocol/go-sdk/jsonrpc"
|
||||||
"github.com/modelcontextprotocol/go-sdk/mcp"
|
"github.com/modelcontextprotocol/go-sdk/mcp"
|
||||||
)
|
)
|
||||||
|
|
@ -31,7 +29,7 @@ var diagWriter goio.Writer = os.Stderr
|
||||||
func diagPrintf(format string, args ...any) {
|
func diagPrintf(format string, args ...any) {
|
||||||
diagMu.Lock()
|
diagMu.Lock()
|
||||||
defer diagMu.Unlock()
|
defer diagMu.Unlock()
|
||||||
fmt.Fprintf(diagWriter, format, args...)
|
diagWriter.Write([]byte(core.Sprintf(format, args...))) //nolint:errcheck
|
||||||
}
|
}
|
||||||
|
|
||||||
// setDiagWriter swaps the diagnostic writer and returns the previous one.
|
// setDiagWriter swaps the diagnostic writer and returns the previous one.
|
||||||
|
|
@ -57,14 +55,11 @@ type TCPTransport struct {
|
||||||
|
|
||||||
// NewTCPTransport creates a new TCP transport listener.
|
// NewTCPTransport creates a new TCP transport listener.
|
||||||
// Defaults to 127.0.0.1 when the host component is empty (e.g. ":9100").
|
// Defaults to 127.0.0.1 when the host component is empty (e.g. ":9100").
|
||||||
// Defaults to DefaultTCPAddr when addr is empty.
|
|
||||||
// Emits a security warning when explicitly binding to 0.0.0.0 (all interfaces).
|
// Emits a security warning when explicitly binding to 0.0.0.0 (all interfaces).
|
||||||
//
|
//
|
||||||
// t, err := NewTCPTransport("127.0.0.1:9100")
|
// t, err := NewTCPTransport("127.0.0.1:9100")
|
||||||
// t, err := NewTCPTransport(":9100") // defaults to 127.0.0.1:9100
|
// t, err := NewTCPTransport(":9100") // defaults to 127.0.0.1:9100
|
||||||
func NewTCPTransport(addr string) (*TCPTransport, error) {
|
func NewTCPTransport(addr string) (*TCPTransport, error) {
|
||||||
addr = normalizeTCPAddr(addr)
|
|
||||||
|
|
||||||
host, port, _ := net.SplitHostPort(addr)
|
host, port, _ := net.SplitHostPort(addr)
|
||||||
if host == "" {
|
if host == "" {
|
||||||
addr = net.JoinHostPort("127.0.0.1", port)
|
addr = net.JoinHostPort("127.0.0.1", port)
|
||||||
|
|
@ -78,23 +73,6 @@ func NewTCPTransport(addr string) (*TCPTransport, error) {
|
||||||
return &TCPTransport{addr: addr, listener: listener}, nil
|
return &TCPTransport{addr: addr, listener: listener}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func normalizeTCPAddr(addr string) string {
|
|
||||||
if addr == "" {
|
|
||||||
return DefaultTCPAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
host, port, err := net.SplitHostPort(addr)
|
|
||||||
if err != nil {
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
if host == "" {
|
|
||||||
return net.JoinHostPort("127.0.0.1", port)
|
|
||||||
}
|
|
||||||
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeTCP starts a TCP server for the MCP service.
|
// ServeTCP starts a TCP server for the MCP service.
|
||||||
// It accepts connections and spawns a new MCP server session for each connection.
|
// It accepts connections and spawns a new MCP server session for each connection.
|
||||||
//
|
//
|
||||||
|
|
@ -113,7 +91,11 @@ func (s *Service) ServeTCP(ctx context.Context, addr string) error {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
_ = t.listener.Close()
|
_ = t.listener.Close()
|
||||||
}()
|
}()
|
||||||
diagPrintf("MCP TCP server listening on %s\n", t.listener.Addr().String())
|
|
||||||
|
if addr == "" {
|
||||||
|
addr = t.listener.Addr().String()
|
||||||
|
}
|
||||||
|
diagPrintf("MCP TCP server listening on %s\n", addr)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
conn, err := t.listener.Accept()
|
conn, err := t.listener.Accept()
|
||||||
|
|
@ -141,7 +123,6 @@ func (s *Service) handleConnection(ctx context.Context, conn net.Conn) {
|
||||||
conn.Close()
|
conn.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer session.Close()
|
|
||||||
// Block until the session ends
|
// Block until the session ends
|
||||||
if err := session.Wait(); err != nil {
|
if err := session.Wait(); err != nil {
|
||||||
diagPrintf("Session ended: %v\n", err)
|
diagPrintf("Session ended: %v\n", err)
|
||||||
|
|
|
||||||
|
|
@ -10,167 +10,137 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewTCPTransport_Defaults(t *testing.T) {
|
func TestTransportTcp_NewTCPTransport_Good(t *testing.T) {
|
||||||
// Test that empty string gets replaced with default address constant
|
// Default constant is correctly set
|
||||||
// Note: We can't actually bind to 9100 as it may be in use,
|
|
||||||
// so we verify the address is set correctly before Listen is called
|
|
||||||
if DefaultTCPAddr != "127.0.0.1:9100" {
|
if DefaultTCPAddr != "127.0.0.1:9100" {
|
||||||
t.Errorf("Expected default constant 127.0.0.1:9100, got %s", DefaultTCPAddr)
|
t.Errorf("expected default constant 127.0.0.1:9100, got %s", DefaultTCPAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test with a dynamic port to verify transport creation works
|
// Create transport with dynamic port
|
||||||
tr, err := NewTCPTransport("127.0.0.1:0")
|
tr, err := NewTCPTransport("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create transport with dynamic port: %v", err)
|
t.Fatalf("failed to create transport with dynamic port: %v", err)
|
||||||
}
|
}
|
||||||
defer tr.listener.Close()
|
defer tr.listener.Close()
|
||||||
|
|
||||||
// Verify we got a valid address
|
|
||||||
if tr.addr != "127.0.0.1:0" {
|
if tr.addr != "127.0.0.1:0" {
|
||||||
t.Errorf("Expected address to be set, got %s", tr.addr)
|
t.Errorf("expected address to be set, got %s", tr.addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNormalizeTCPAddr_Good_Defaults(t *testing.T) {
|
func TestTransportTcp_NewTCPTransport_Bad(t *testing.T) {
|
||||||
tests := []struct {
|
// Binding to an already-in-use port returns an error
|
||||||
name string
|
tr, err := NewTCPTransport("127.0.0.1:0")
|
||||||
in string
|
if err != nil {
|
||||||
want string
|
t.Fatalf("first bind failed unexpectedly: %v", err)
|
||||||
}{
|
|
||||||
{name: "empty", in: "", want: DefaultTCPAddr},
|
|
||||||
{name: "missing host", in: ":9100", want: "127.0.0.1:9100"},
|
|
||||||
{name: "explicit host", in: "127.0.0.1:9100", want: "127.0.0.1:9100"},
|
|
||||||
}
|
}
|
||||||
|
defer tr.listener.Close()
|
||||||
|
|
||||||
for _, tt := range tests {
|
addr := tr.listener.Addr().String()
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
_, err = NewTCPTransport(addr)
|
||||||
if got := normalizeTCPAddr(tt.in); got != tt.want {
|
if err == nil {
|
||||||
t.Fatalf("normalizeTCPAddr(%q) = %q, want %q", tt.in, got, tt.want)
|
t.Error("expected error when binding to already-in-use port, got nil")
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewTCPTransport_Warning(t *testing.T) {
|
func TestTransportTcp_NewTCPTransport_Ugly(t *testing.T) {
|
||||||
// Capture warning output via setDiagWriter (mutex-protected, no race).
|
// Empty host defaults to 127.0.0.1 — never binds to 0.0.0.0
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
old := setDiagWriter(&buf)
|
old := setDiagWriter(&buf)
|
||||||
defer setDiagWriter(old)
|
defer setDiagWriter(old)
|
||||||
|
|
||||||
// Trigger warning — use port 0 (OS assigns free port)
|
tr, err := NewTCPTransport(":0")
|
||||||
tr, err := NewTCPTransport("0.0.0.0:0")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create transport: %v", err)
|
t.Fatalf("failed to create transport with empty host: %v", err)
|
||||||
}
|
}
|
||||||
defer tr.listener.Close()
|
defer tr.listener.Close()
|
||||||
|
|
||||||
output := buf.String()
|
// Should NOT have emitted a warning for 0.0.0.0
|
||||||
if !strings.Contains(output, "WARNING") {
|
if strings.Contains(buf.String(), "WARNING") {
|
||||||
t.Error("Expected warning for binding to 0.0.0.0, but didn't find it in stderr")
|
t.Error("unexpected warning for :0 (should default to 127.0.0.1, not 0.0.0.0)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// The bound address must be on 127.0.0.1, not 0.0.0.0
|
||||||
|
host, _, _ := net.SplitHostPort(tr.listener.Addr().String())
|
||||||
|
if host != "127.0.0.1" {
|
||||||
|
t.Errorf("expected 127.0.0.1, got %s", host)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServeTCP_Connection(t *testing.T) {
|
func TestTransportTcp_ServeTCP_Good(t *testing.T) {
|
||||||
s, err := New(Options{})
|
s, err := New(Options{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
t.Fatalf("failed to create service: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Use a random port for testing to avoid collisions
|
tr, err := NewTCPTransport("127.0.0.1:0")
|
||||||
addr := "127.0.0.1:0"
|
|
||||||
|
|
||||||
// Create transport first to get the actual address if we use :0
|
|
||||||
tr, err := NewTCPTransport(addr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create transport: %v", err)
|
t.Fatalf("failed to create transport: %v", err)
|
||||||
}
|
}
|
||||||
actualAddr := tr.listener.Addr().String()
|
actualAddr := tr.listener.Addr().String()
|
||||||
tr.listener.Close() // Close it so ServeTCP can re-open it or use the same address
|
tr.listener.Close()
|
||||||
|
|
||||||
// Start server in background
|
errCh := make(chan error, 1)
|
||||||
errCh := make(chan error, 1)
|
go func() {
|
||||||
go func() {
|
errCh <- s.ServeTCP(ctx, actualAddr)
|
||||||
errCh <- s.ServeTCP(ctx, actualAddr)
|
}()
|
||||||
}()
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
// Give it a moment to start
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
conn, err := net.Dial("tcp", actualAddr)
|
||||||
|
if err != nil {
|
||||||
// Connect to the server
|
t.Fatalf("failed to connect to server: %v", err)
|
||||||
conn, err := net.Dial("tcp", actualAddr)
|
}
|
||||||
if err != nil {
|
defer conn.Close()
|
||||||
t.Fatalf("Failed to connect to server: %v", err)
|
|
||||||
}
|
_, err = conn.Write([]byte("{}\n"))
|
||||||
defer conn.Close()
|
if err != nil {
|
||||||
|
t.Errorf("failed to write to connection: %v", err)
|
||||||
// Verify we can write to it
|
}
|
||||||
_, err = conn.Write([]byte("{}\n"))
|
|
||||||
if err != nil {
|
cancel()
|
||||||
t.Errorf("Failed to write to connection: %v", err)
|
if err = <-errCh; err != nil {
|
||||||
}
|
t.Errorf("ServeTCP returned error: %v", err)
|
||||||
|
}
|
||||||
// Shutdown server
|
}
|
||||||
cancel()
|
|
||||||
err = <-errCh
|
func TestTransportTcp_ServeTCP_Bad(t *testing.T) {
|
||||||
if err != nil {
|
// ServeTCP with an already-in-use address returns an error
|
||||||
t.Errorf("ServeTCP returned error: %v", err)
|
tr, err := NewTCPTransport("127.0.0.1:0")
|
||||||
}
|
if err != nil {
|
||||||
}
|
t.Fatalf("failed to create transport: %v", err)
|
||||||
|
}
|
||||||
func TestRun_TCPTrigger(t *testing.T) {
|
defer tr.listener.Close()
|
||||||
s, err := New(Options{})
|
addr := tr.listener.Addr().String()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
s, err := New(Options{})
|
||||||
}
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create service: %v", err)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
}
|
||||||
defer cancel()
|
|
||||||
|
ctx := context.Background()
|
||||||
// Set MCP_ADDR to empty to trigger default TCP
|
err = s.ServeTCP(ctx, addr)
|
||||||
os.Setenv("MCP_ADDR", "")
|
if err == nil {
|
||||||
defer os.Unsetenv("MCP_ADDR")
|
t.Error("expected error when binding to already-in-use port, got nil")
|
||||||
|
}
|
||||||
// We use a random port for testing, but Run will try to use 127.0.0.1:9100 by default if we don't override.
|
}
|
||||||
// Since 9100 might be in use, we'll set MCP_ADDR to use :0 (random port)
|
|
||||||
os.Setenv("MCP_ADDR", "127.0.0.1:0")
|
func TestTransportTcp_ServeTCP_Ugly(t *testing.T) {
|
||||||
|
// Multiple simultaneous clients can connect and write without error
|
||||||
errCh := make(chan error, 1)
|
s, err := New(Options{})
|
||||||
go func() {
|
if err != nil {
|
||||||
errCh <- s.Run(ctx)
|
t.Fatalf("failed to create service: %v", err)
|
||||||
}()
|
}
|
||||||
|
|
||||||
// Give it a moment to start
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
time.Sleep(100 * time.Millisecond)
|
defer cancel()
|
||||||
|
|
||||||
// Since we can't easily get the actual port used by Run (it's internal),
|
tr, err := NewTCPTransport("127.0.0.1:0")
|
||||||
// we just verify it didn't immediately fail.
|
if err != nil {
|
||||||
select {
|
t.Fatalf("failed to create transport: %v", err)
|
||||||
case err := <-errCh:
|
|
||||||
t.Fatalf("Run failed immediately: %v", err)
|
|
||||||
default:
|
|
||||||
// still running, which is good
|
|
||||||
}
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
_ = <-errCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestServeTCP_MultipleConnections(t *testing.T) {
|
|
||||||
s, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
addr := "127.0.0.1:0"
|
|
||||||
tr, err := NewTCPTransport(addr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create transport: %v", err)
|
|
||||||
}
|
}
|
||||||
actualAddr := tr.listener.Addr().String()
|
actualAddr := tr.listener.Addr().String()
|
||||||
tr.listener.Close()
|
tr.listener.Close()
|
||||||
|
|
@ -182,23 +152,68 @@ func TestServeTCP_MultipleConnections(t *testing.T) {
|
||||||
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
// Connect multiple clients
|
|
||||||
const numClients = 3
|
const numClients = 3
|
||||||
for i := range numClients {
|
for i := range numClients {
|
||||||
conn, err := net.Dial("tcp", actualAddr)
|
conn, err := net.Dial("tcp", actualAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Client %d failed to connect: %v", i, err)
|
t.Fatalf("client %d failed to connect: %v", i, err)
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
_, err = conn.Write([]byte("{}\n"))
|
_, err = conn.Write([]byte("{}\n"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Client %d failed to write: %v", i, err)
|
t.Errorf("client %d failed to write: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
err = <-errCh
|
if err = <-errCh; err != nil {
|
||||||
if err != nil {
|
|
||||||
t.Errorf("ServeTCP returned error: %v", err)
|
t.Errorf("ServeTCP returned error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTransportTcp_Run_Good(t *testing.T) {
|
||||||
|
s, err := New(Options{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create service: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
os.Setenv("MCP_ADDR", "127.0.0.1:0")
|
||||||
|
defer os.Unsetenv("MCP_ADDR")
|
||||||
|
|
||||||
|
errCh := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
errCh <- s.Run(ctx)
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-errCh:
|
||||||
|
t.Fatalf("Run failed immediately: %v", err)
|
||||||
|
default:
|
||||||
|
// still running, which is good
|
||||||
|
}
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
_ = <-errCh
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTransportTcp_Warning_Ugly(t *testing.T) {
|
||||||
|
// Binding to 0.0.0.0 emits a security warning
|
||||||
|
var buf bytes.Buffer
|
||||||
|
old := setDiagWriter(&buf)
|
||||||
|
defer setDiagWriter(old)
|
||||||
|
|
||||||
|
tr, err := NewTCPTransport("0.0.0.0:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create transport: %v", err)
|
||||||
|
}
|
||||||
|
defer tr.listener.Close()
|
||||||
|
|
||||||
|
if !strings.Contains(buf.String(), "WARNING") {
|
||||||
|
t.Error("expected security warning for 0.0.0.0 binding, got none")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,3 @@
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
|
||||||
|
|
||||||
package mcp
|
package mcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
||||||
package mcp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRun_Good_UnixTrigger(t *testing.T) {
|
|
||||||
s, err := New(Options{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
socketPath := shortSocketPath(t, "run")
|
|
||||||
t.Setenv("MCP_UNIX_SOCKET", socketPath)
|
|
||||||
t.Setenv("MCP_HTTP_ADDR", "")
|
|
||||||
t.Setenv("MCP_ADDR", "")
|
|
||||||
|
|
||||||
errCh := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
errCh <- s.Run(ctx)
|
|
||||||
}()
|
|
||||||
|
|
||||||
var conn net.Conn
|
|
||||||
deadline := time.Now().Add(2 * time.Second)
|
|
||||||
for time.Now().Before(deadline) {
|
|
||||||
conn, err = net.DialTimeout("unix", socketPath, 200*time.Millisecond)
|
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to connect to Unix socket at %s: %v", socketPath, err)
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
if err := <-errCh; err != nil {
|
|
||||||
t.Fatalf("Run failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -140,75 +140,6 @@ List all database tables in the application.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### describe_table
|
|
||||||
|
|
||||||
Describe a database table, including its columns and indexes.
|
|
||||||
|
|
||||||
**Description:** Describe a database table, including columns and indexes
|
|
||||||
|
|
||||||
**Parameters:**
|
|
||||||
|
|
||||||
| Name | Type | Required | Description |
|
|
||||||
|------|------|----------|-------------|
|
|
||||||
| `table` | string | Yes | Database table name to inspect |
|
|
||||||
|
|
||||||
**Example Request:**
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"tool": "describe_table",
|
|
||||||
"arguments": {
|
|
||||||
"table": "users"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Success Response:**
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"table": "users",
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"field": "id",
|
|
||||||
"type": "bigint unsigned",
|
|
||||||
"collation": null,
|
|
||||||
"null": "NO",
|
|
||||||
"key": "PRI",
|
|
||||||
"default": null,
|
|
||||||
"extra": "auto_increment",
|
|
||||||
"privileges": "select,insert,update,references",
|
|
||||||
"comment": "Primary key"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"indexes": [
|
|
||||||
{
|
|
||||||
"name": "PRIMARY",
|
|
||||||
"unique": true,
|
|
||||||
"type": "BTREE",
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"order": 1,
|
|
||||||
"collation": "A",
|
|
||||||
"cardinality": 1,
|
|
||||||
"sub_part": null,
|
|
||||||
"nullable": "",
|
|
||||||
"comment": ""
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Security Notes:**
|
|
||||||
- Table names are validated to allow only letters, numbers, and underscores
|
|
||||||
- System tables are blocked
|
|
||||||
- Table access may be filtered based on configuration
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Commerce Tools
|
## Commerce Tools
|
||||||
|
|
||||||
### get_billing_status
|
### get_billing_status
|
||||||
|
|
@ -759,7 +690,6 @@ curl -X POST https://api.example.com/mcp/tools/call \
|
||||||
### Query Tools
|
### Query Tools
|
||||||
- `query_database` - Execute SQL queries
|
- `query_database` - Execute SQL queries
|
||||||
- `list_tables` - List database tables
|
- `list_tables` - List database tables
|
||||||
- `describe_table` - Inspect table columns and indexes
|
|
||||||
|
|
||||||
### Commerce Tools
|
### Commerce Tools
|
||||||
- `get_billing_status` - Get subscription status
|
- `get_billing_status` - Get subscription status
|
||||||
|
|
|
||||||
|
|
@ -113,8 +113,6 @@ class Boot extends ServiceProvider
|
||||||
->where('id', '[a-z0-9-]+');
|
->where('id', '[a-z0-9-]+');
|
||||||
Route::get('servers/{id}/tools', [Controllers\McpApiController::class, 'tools'])->name('servers.tools')
|
Route::get('servers/{id}/tools', [Controllers\McpApiController::class, 'tools'])->name('servers.tools')
|
||||||
->where('id', '[a-z0-9-]+');
|
->where('id', '[a-z0-9-]+');
|
||||||
Route::get('servers/{id}/resources', [Controllers\McpApiController::class, 'resources'])->name('servers.resources')
|
|
||||||
->where('id', '[a-z0-9-]+');
|
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -6,9 +6,6 @@ namespace Core\Mcp\Controllers;
|
||||||
|
|
||||||
use Core\Front\Controller;
|
use Core\Front\Controller;
|
||||||
use Core\Mcp\Services\McpQuotaService;
|
use Core\Mcp\Services\McpQuotaService;
|
||||||
use Core\Mod\Agentic\Models\AgentPlan;
|
|
||||||
use Core\Mod\Agentic\Models\AgentSession;
|
|
||||||
use Core\Mod\Content\Models\ContentItem;
|
|
||||||
use Illuminate\Http\JsonResponse;
|
use Illuminate\Http\JsonResponse;
|
||||||
use Illuminate\Http\Request;
|
use Illuminate\Http\Request;
|
||||||
use Illuminate\Support\Facades\Cache;
|
use Illuminate\Support\Facades\Cache;
|
||||||
|
|
@ -16,7 +13,6 @@ use Core\Api\Models\ApiKey;
|
||||||
use Core\Mcp\Models\McpApiRequest;
|
use Core\Mcp\Models\McpApiRequest;
|
||||||
use Core\Mcp\Models\McpToolCall;
|
use Core\Mcp\Models\McpToolCall;
|
||||||
use Core\Mcp\Services\McpWebhookDispatcher;
|
use Core\Mcp\Services\McpWebhookDispatcher;
|
||||||
use Core\Tenant\Models\Workspace;
|
|
||||||
use Symfony\Component\Yaml\Yaml;
|
use Symfony\Component\Yaml\Yaml;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -82,26 +78,6 @@ class McpApiController extends Controller
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* List resources for a specific server.
|
|
||||||
*
|
|
||||||
* GET /api/v1/mcp/servers/{id}/resources
|
|
||||||
*/
|
|
||||||
public function resources(Request $request, string $id): JsonResponse
|
|
||||||
{
|
|
||||||
$server = $this->loadServerFull($id);
|
|
||||||
|
|
||||||
if (! $server) {
|
|
||||||
return response()->json(['error' => 'Server not found'], 404);
|
|
||||||
}
|
|
||||||
|
|
||||||
return response()->json([
|
|
||||||
'server' => $id,
|
|
||||||
'resources' => array_values($server['resources'] ?? []),
|
|
||||||
'count' => count($server['resources'] ?? []),
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Execute a tool on an MCP server.
|
* Execute a tool on an MCP server.
|
||||||
*
|
*
|
||||||
|
|
@ -199,6 +175,8 @@ class McpApiController extends Controller
|
||||||
* Read a resource from an MCP server.
|
* Read a resource from an MCP server.
|
||||||
*
|
*
|
||||||
* GET /api/v1/mcp/resources/{uri}
|
* GET /api/v1/mcp/resources/{uri}
|
||||||
|
*
|
||||||
|
* NOTE: Resource reading is not yet implemented. Returns 501 Not Implemented.
|
||||||
*/
|
*/
|
||||||
public function resource(Request $request, string $uri): JsonResponse
|
public function resource(Request $request, string $uri): JsonResponse
|
||||||
{
|
{
|
||||||
|
|
@ -207,289 +185,19 @@ class McpApiController extends Controller
|
||||||
return response()->json(['error' => 'Invalid resource URI format'], 400);
|
return response()->json(['error' => 'Invalid resource URI format'], 400);
|
||||||
}
|
}
|
||||||
|
|
||||||
$scheme = $matches[1];
|
$serverId = $matches[1];
|
||||||
$content = $this->readResourceContent($scheme, $uri);
|
|
||||||
if ($content === null) {
|
|
||||||
return response()->json([
|
|
||||||
'error' => 'not_found',
|
|
||||||
'message' => 'Resource not found',
|
|
||||||
'uri' => $uri,
|
|
||||||
], 404);
|
|
||||||
}
|
|
||||||
|
|
||||||
return response()->json([
|
$server = $this->loadServerFull($serverId);
|
||||||
'uri' => $uri,
|
|
||||||
'content' => $content,
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Resolve a supported MCP resource URI into response content.
|
|
||||||
*/
|
|
||||||
protected function readResourceContent(string $scheme, string $uri): ?array
|
|
||||||
{
|
|
||||||
if (str_starts_with($uri, 'plans://')) {
|
|
||||||
return [
|
|
||||||
'mimeType' => 'text/markdown',
|
|
||||||
'text' => $this->resourcePlanContent($uri),
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (str_starts_with($uri, 'sessions://')) {
|
|
||||||
return [
|
|
||||||
'mimeType' => 'text/markdown',
|
|
||||||
'text' => $this->resourceSessionContent($uri),
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (str_starts_with($uri, 'content://')) {
|
|
||||||
return [
|
|
||||||
'mimeType' => 'text/markdown',
|
|
||||||
'text' => $this->resourceContentItem($uri),
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
return $this->resourceServerContent($scheme, $uri);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Render plan resources.
|
|
||||||
*/
|
|
||||||
protected function resourcePlanContent(string $uri): string
|
|
||||||
{
|
|
||||||
if ($uri === 'plans://all') {
|
|
||||||
$plans = AgentPlan::with('agentPhases')->notArchived()->orderBy('updated_at', 'desc')->get();
|
|
||||||
|
|
||||||
$md = "# Work Plans\n\n";
|
|
||||||
$md .= '**Total:** '.$plans->count()." plan(s)\n\n";
|
|
||||||
|
|
||||||
foreach ($plans->groupBy('status') as $status => $group) {
|
|
||||||
$md .= '## '.ucfirst($status).' ('.$group->count().")\n\n";
|
|
||||||
|
|
||||||
foreach ($group as $plan) {
|
|
||||||
$progress = $plan->getProgress();
|
|
||||||
$md .= "- **[{$plan->slug}]** {$plan->title} - {$progress['percentage']}%\n";
|
|
||||||
}
|
|
||||||
$md .= "\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
return $md;
|
|
||||||
}
|
|
||||||
|
|
||||||
$path = substr($uri, 9); // Remove "plans://"
|
|
||||||
$parts = explode('/', $path);
|
|
||||||
$slug = $parts[0];
|
|
||||||
|
|
||||||
$plan = AgentPlan::with('agentPhases')->where('slug', $slug)->first();
|
|
||||||
if (! $plan) {
|
|
||||||
return "Plan not found: {$slug}";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (count($parts) === 3 && $parts[1] === 'phases') {
|
|
||||||
$phase = $plan->agentPhases()->where('order', (int) $parts[2])->first();
|
|
||||||
if (! $phase) {
|
|
||||||
return "Phase not found: {$parts[2]}";
|
|
||||||
}
|
|
||||||
|
|
||||||
$md = "# Phase {$phase->order}: {$phase->name}\n\n";
|
|
||||||
$md .= "**Status:** {$phase->getStatusIcon()} {$phase->status}\n\n";
|
|
||||||
|
|
||||||
if ($phase->description) {
|
|
||||||
$md .= "{$phase->description}\n\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
$md .= "## Tasks\n\n";
|
|
||||||
|
|
||||||
foreach ($phase->tasks ?? [] as $task) {
|
|
||||||
$status = is_string($task) ? 'pending' : ($task['status'] ?? 'pending');
|
|
||||||
$name = is_string($task) ? $task : ($task['name'] ?? 'Unknown');
|
|
||||||
$icon = $status === 'completed' ? '✅' : '⬜';
|
|
||||||
$md .= "- {$icon} {$name}\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
return $md;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (count($parts) === 3 && $parts[1] === 'state') {
|
|
||||||
$state = $plan->states()->where('key', $parts[2])->first();
|
|
||||||
if (! $state) {
|
|
||||||
return "State key not found: {$parts[2]}";
|
|
||||||
}
|
|
||||||
|
|
||||||
return $state->getFormattedValue();
|
|
||||||
}
|
|
||||||
|
|
||||||
return $plan->toMarkdown();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Render session resources.
|
|
||||||
*/
|
|
||||||
protected function resourceSessionContent(string $uri): string
|
|
||||||
{
|
|
||||||
$path = substr($uri, 11); // Remove "sessions://"
|
|
||||||
$parts = explode('/', $path);
|
|
||||||
|
|
||||||
if (count($parts) !== 2 || $parts[1] !== 'context') {
|
|
||||||
return "Resource not found: {$uri}";
|
|
||||||
}
|
|
||||||
|
|
||||||
$session = AgentSession::where('session_id', $parts[0])->first();
|
|
||||||
if (! $session) {
|
|
||||||
return "Session not found: {$parts[0]}";
|
|
||||||
}
|
|
||||||
|
|
||||||
$md = "# Session: {$session->session_id}\n\n";
|
|
||||||
$md .= "**Agent:** {$session->agent_type}\n";
|
|
||||||
$md .= "**Status:** {$session->status}\n";
|
|
||||||
$md .= "**Duration:** {$session->getDurationFormatted()}\n\n";
|
|
||||||
|
|
||||||
if ($session->plan) {
|
|
||||||
$md .= "## Plan\n\n";
|
|
||||||
$md .= "**{$session->plan->title}** ({$session->plan->slug})\n\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
$context = $session->getHandoffContext();
|
|
||||||
if (! empty($context['summary'])) {
|
|
||||||
$md .= "## Summary\n\n{$context['summary']}\n\n";
|
|
||||||
}
|
|
||||||
if (! empty($context['next_steps'])) {
|
|
||||||
$md .= "## Next Steps\n\n";
|
|
||||||
foreach ((array) $context['next_steps'] as $step) {
|
|
||||||
$md .= "- {$step}\n";
|
|
||||||
}
|
|
||||||
$md .= "\n";
|
|
||||||
}
|
|
||||||
if (! empty($context['blockers'])) {
|
|
||||||
$md .= "## Blockers\n\n";
|
|
||||||
foreach ((array) $context['blockers'] as $blocker) {
|
|
||||||
$md .= "- {$blocker}\n";
|
|
||||||
}
|
|
||||||
$md .= "\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
return $md;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Render content resources.
|
|
||||||
*/
|
|
||||||
protected function resourceContentItem(string $uri): string
|
|
||||||
{
|
|
||||||
if (! str_starts_with($uri, 'content://')) {
|
|
||||||
return "Resource not found: {$uri}";
|
|
||||||
}
|
|
||||||
|
|
||||||
$path = substr($uri, 10); // Remove "content://"
|
|
||||||
$parts = explode('/', $path, 2);
|
|
||||||
if (count($parts) < 2) {
|
|
||||||
return "Invalid URI format. Expected: content://{workspace}/{slug}";
|
|
||||||
}
|
|
||||||
|
|
||||||
[$workspaceSlug, $contentSlug] = $parts;
|
|
||||||
|
|
||||||
$workspace = Workspace::where('slug', $workspaceSlug)
|
|
||||||
->orWhere('id', $workspaceSlug)
|
|
||||||
->first();
|
|
||||||
|
|
||||||
if (! $workspace) {
|
|
||||||
return "Workspace not found: {$workspaceSlug}";
|
|
||||||
}
|
|
||||||
|
|
||||||
$item = ContentItem::forWorkspace($workspace->id)
|
|
||||||
->native()
|
|
||||||
->where('slug', $contentSlug)
|
|
||||||
->first();
|
|
||||||
|
|
||||||
if (! $item && is_numeric($contentSlug)) {
|
|
||||||
$item = ContentItem::forWorkspace($workspace->id)
|
|
||||||
->native()
|
|
||||||
->find($contentSlug);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (! $item) {
|
|
||||||
return "Content not found: {$contentSlug}";
|
|
||||||
}
|
|
||||||
|
|
||||||
$item->load(['author', 'taxonomies']);
|
|
||||||
|
|
||||||
$md = "---\n";
|
|
||||||
$md .= "title: \"{$item->title}\"\n";
|
|
||||||
$md .= "slug: {$item->slug}\n";
|
|
||||||
$md .= "workspace: {$workspace->slug}\n";
|
|
||||||
$md .= "type: {$item->type}\n";
|
|
||||||
$md .= "status: {$item->status}\n";
|
|
||||||
|
|
||||||
if ($item->author) {
|
|
||||||
$md .= "author: {$item->author->name}\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
$categories = $item->categories->pluck('name')->all();
|
|
||||||
if (! empty($categories)) {
|
|
||||||
$md .= 'categories: ['.implode(', ', $categories)."]\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
$tags = $item->tags->pluck('name')->all();
|
|
||||||
if (! empty($tags)) {
|
|
||||||
$md .= 'tags: ['.implode(', ', $tags)."]\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($item->publish_at) {
|
|
||||||
$md .= 'publish_at: '.$item->publish_at->toIso8601String()."\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
$md .= 'created_at: '.$item->created_at->toIso8601String()."\n";
|
|
||||||
$md .= 'updated_at: '.$item->updated_at->toIso8601String()."\n";
|
|
||||||
|
|
||||||
if ($item->seo_meta) {
|
|
||||||
if (isset($item->seo_meta['title'])) {
|
|
||||||
$md .= "seo_title: \"{$item->seo_meta['title']}\"\n";
|
|
||||||
}
|
|
||||||
if (isset($item->seo_meta['description'])) {
|
|
||||||
$md .= "seo_description: \"{$item->seo_meta['description']}\"\n";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
$md .= "---\n\n";
|
|
||||||
|
|
||||||
if ($item->excerpt) {
|
|
||||||
$md .= "> {$item->excerpt}\n\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
$content = $item->content_markdown
|
|
||||||
?? strip_tags($item->content_html_clean ?? $item->content_html_original ?? '');
|
|
||||||
$md .= $content;
|
|
||||||
|
|
||||||
return $md;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Render server-defined static resources when available.
|
|
||||||
*/
|
|
||||||
protected function resourceServerContent(string $scheme, string $uri): ?array
|
|
||||||
{
|
|
||||||
$server = $this->loadServerFull($scheme);
|
|
||||||
if (! $server) {
|
if (! $server) {
|
||||||
return null;
|
return response()->json(['error' => 'Server not found'], 404);
|
||||||
}
|
}
|
||||||
|
|
||||||
foreach ($server['resources'] ?? [] as $resource) {
|
// Resource reading not yet implemented
|
||||||
if (($resource['uri'] ?? null) !== $uri) {
|
return response()->json([
|
||||||
continue;
|
'error' => 'not_implemented',
|
||||||
}
|
'message' => 'MCP resource reading is not yet implemented. Use tool calls instead.',
|
||||||
|
'uri' => $uri,
|
||||||
$text = $resource['content']['text'] ?? $resource['text'] ?? null;
|
], 501);
|
||||||
if ($text === null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return [
|
|
||||||
'mimeType' => $resource['mimeType'] ?? 'text/plain',
|
|
||||||
'text' => $text,
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -197,35 +197,6 @@ class OpenApiGenerator
|
||||||
],
|
],
|
||||||
];
|
];
|
||||||
|
|
||||||
$paths['/servers/{serverId}/resources'] = [
|
|
||||||
'get' => [
|
|
||||||
'tags' => ['Discovery'],
|
|
||||||
'summary' => 'List resources for a server',
|
|
||||||
'operationId' => 'listServerResources',
|
|
||||||
'security' => [['bearerAuth' => []], ['apiKeyAuth' => []]],
|
|
||||||
'parameters' => [
|
|
||||||
[
|
|
||||||
'name' => 'serverId',
|
|
||||||
'in' => 'path',
|
|
||||||
'required' => true,
|
|
||||||
'schema' => ['type' => 'string'],
|
|
||||||
],
|
|
||||||
],
|
|
||||||
'responses' => [
|
|
||||||
'200' => [
|
|
||||||
'description' => 'List of resources',
|
|
||||||
'content' => [
|
|
||||||
'application/json' => [
|
|
||||||
'schema' => [
|
|
||||||
'$ref' => '#/components/schemas/ResourceList',
|
|
||||||
],
|
|
||||||
],
|
|
||||||
],
|
|
||||||
],
|
|
||||||
],
|
|
||||||
],
|
|
||||||
];
|
|
||||||
|
|
||||||
// Execution endpoint
|
// Execution endpoint
|
||||||
$paths['/tools/call'] = [
|
$paths['/tools/call'] = [
|
||||||
'post' => [
|
'post' => [
|
||||||
|
|
@ -431,17 +402,6 @@ class OpenApiGenerator
|
||||||
],
|
],
|
||||||
],
|
],
|
||||||
],
|
],
|
||||||
'ResourceList' => [
|
|
||||||
'type' => 'object',
|
|
||||||
'properties' => [
|
|
||||||
'server' => ['type' => 'string'],
|
|
||||||
'resources' => [
|
|
||||||
'type' => 'array',
|
|
||||||
'items' => ['$ref' => '#/components/schemas/Resource'],
|
|
||||||
],
|
|
||||||
'count' => ['type' => 'integer'],
|
|
||||||
],
|
|
||||||
],
|
|
||||||
];
|
];
|
||||||
|
|
||||||
return $schemas;
|
return $schemas;
|
||||||
|
|
|
||||||
|
|
@ -33,9 +33,6 @@ class ToolRegistry
|
||||||
'query' => 'SELECT id, name FROM users LIMIT 10',
|
'query' => 'SELECT id, name FROM users LIMIT 10',
|
||||||
],
|
],
|
||||||
'list_tables' => [],
|
'list_tables' => [],
|
||||||
'describe_table' => [
|
|
||||||
'table' => 'users',
|
|
||||||
],
|
|
||||||
'list_routes' => [],
|
'list_routes' => [],
|
||||||
'list_sites' => [],
|
'list_sites' => [],
|
||||||
'get_stats' => [],
|
'get_stats' => [],
|
||||||
|
|
|
||||||
|
|
@ -1,151 +0,0 @@
|
||||||
<?php
|
|
||||||
|
|
||||||
declare(strict_types=1);
|
|
||||||
|
|
||||||
namespace Core\Mcp\Tools;
|
|
||||||
|
|
||||||
use Illuminate\Contracts\JsonSchema\JsonSchema;
|
|
||||||
use Illuminate\Support\Facades\Config;
|
|
||||||
use Illuminate\Support\Facades\DB;
|
|
||||||
use Laravel\Mcp\Request;
|
|
||||||
use Laravel\Mcp\Response;
|
|
||||||
use Laravel\Mcp\Server\Tool;
|
|
||||||
|
|
||||||
class DescribeTable extends Tool
|
|
||||||
{
|
|
||||||
protected string $description = 'Describe a database table, including columns and indexes';
|
|
||||||
|
|
||||||
public function handle(Request $request): Response
|
|
||||||
{
|
|
||||||
$table = trim((string) $request->input('table', ''));
|
|
||||||
|
|
||||||
if ($table === '') {
|
|
||||||
return $this->errorResponse('Table name is required');
|
|
||||||
}
|
|
||||||
|
|
||||||
if (! $this->isValidTableName($table)) {
|
|
||||||
return $this->errorResponse('Invalid table name. Use only letters, numbers, and underscores.');
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($this->isBlockedTable($table)) {
|
|
||||||
return $this->errorResponse(sprintf("Access to table '%s' is not permitted", $table));
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
$columns = DB::select("SHOW FULL COLUMNS FROM `{$table}`");
|
|
||||||
$indexes = DB::select("SHOW INDEX FROM `{$table}`");
|
|
||||||
} catch (\Throwable $e) {
|
|
||||||
report($e);
|
|
||||||
|
|
||||||
return $this->errorResponse(sprintf('Unable to describe table "%s"', $table));
|
|
||||||
}
|
|
||||||
|
|
||||||
$result = [
|
|
||||||
'table' => $table,
|
|
||||||
'columns' => array_map(
|
|
||||||
fn (object $column): array => $this->normaliseColumn((array) $column),
|
|
||||||
$columns
|
|
||||||
),
|
|
||||||
'indexes' => $this->normaliseIndexes($indexes),
|
|
||||||
];
|
|
||||||
|
|
||||||
return Response::text(json_encode($result, JSON_PRETTY_PRINT));
|
|
||||||
}
|
|
||||||
|
|
||||||
public function schema(JsonSchema $schema): array
|
|
||||||
{
|
|
||||||
return [
|
|
||||||
'table' => $schema->string('Database table name to inspect'),
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Validate the table name before interpolating it into SQL.
|
|
||||||
*/
|
|
||||||
private function isValidTableName(string $table): bool
|
|
||||||
{
|
|
||||||
return (bool) preg_match('/^[A-Za-z0-9_]+$/', $table);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check whether the table is blocked by configuration or is a system table.
|
|
||||||
*/
|
|
||||||
private function isBlockedTable(string $table): bool
|
|
||||||
{
|
|
||||||
$blockedTables = Config::get('mcp.database.blocked_tables', []);
|
|
||||||
|
|
||||||
if (in_array($table, $blockedTables, true)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
$systemTables = ['information_schema', 'mysql', 'performance_schema', 'sys'];
|
|
||||||
|
|
||||||
return in_array(strtolower($table), $systemTables, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Normalise a SHOW FULL COLUMNS row into a predictable array shape.
|
|
||||||
*
|
|
||||||
* @param array<string, mixed> $column
|
|
||||||
* @return array<string, mixed>
|
|
||||||
*/
|
|
||||||
private function normaliseColumn(array $column): array
|
|
||||||
{
|
|
||||||
return [
|
|
||||||
'field' => $column['Field'] ?? null,
|
|
||||||
'type' => $column['Type'] ?? null,
|
|
||||||
'collation' => $column['Collation'] ?? null,
|
|
||||||
'null' => $column['Null'] ?? null,
|
|
||||||
'key' => $column['Key'] ?? null,
|
|
||||||
'default' => $column['Default'] ?? null,
|
|
||||||
'extra' => $column['Extra'] ?? null,
|
|
||||||
'privileges' => $column['Privileges'] ?? null,
|
|
||||||
'comment' => $column['Comment'] ?? null,
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Group SHOW INDEX rows by index name.
|
|
||||||
*
|
|
||||||
* @param array<int, object> $indexes
|
|
||||||
* @return array<int, array<string, mixed>>
|
|
||||||
*/
|
|
||||||
private function normaliseIndexes(array $indexes): array
|
|
||||||
{
|
|
||||||
$grouped = [];
|
|
||||||
|
|
||||||
foreach ($indexes as $index) {
|
|
||||||
$row = (array) $index;
|
|
||||||
$name = (string) ($row['Key_name'] ?? 'unknown');
|
|
||||||
|
|
||||||
if (! isset($grouped[$name])) {
|
|
||||||
$grouped[$name] = [
|
|
||||||
'name' => $name,
|
|
||||||
'unique' => ! (bool) ($row['Non_unique'] ?? 1),
|
|
||||||
'type' => $row['Index_type'] ?? null,
|
|
||||||
'columns' => [],
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
$grouped[$name]['columns'][] = [
|
|
||||||
'name' => $row['Column_name'] ?? null,
|
|
||||||
'order' => $row['Seq_in_index'] ?? null,
|
|
||||||
'collation' => $row['Collation'] ?? null,
|
|
||||||
'cardinality' => $row['Cardinality'] ?? null,
|
|
||||||
'sub_part' => $row['Sub_part'] ?? null,
|
|
||||||
'nullable' => $row['Null'] ?? null,
|
|
||||||
'comment' => $row['Comment'] ?? null,
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
return array_values($grouped);
|
|
||||||
}
|
|
||||||
|
|
||||||
private function errorResponse(string $message): Response
|
|
||||||
{
|
|
||||||
return Response::text(json_encode([
|
|
||||||
'error' => $message,
|
|
||||||
'code' => 'VALIDATION_ERROR',
|
|
||||||
]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,113 +0,0 @@
|
||||||
<?php
|
|
||||||
|
|
||||||
declare(strict_types=1);
|
|
||||||
|
|
||||||
namespace Core\Mcp\Tests\Unit;
|
|
||||||
|
|
||||||
use Core\Mcp\Tools\DescribeTable;
|
|
||||||
use Illuminate\Support\Facades\Config;
|
|
||||||
use Illuminate\Support\Facades\DB;
|
|
||||||
use Laravel\Mcp\Request;
|
|
||||||
use Mockery;
|
|
||||||
use Tests\TestCase;
|
|
||||||
|
|
||||||
class DescribeTableTest extends TestCase
|
|
||||||
{
|
|
||||||
protected function tearDown(): void
|
|
||||||
{
|
|
||||||
Mockery::close();
|
|
||||||
parent::tearDown();
|
|
||||||
}
|
|
||||||
|
|
||||||
public function test_handle_returns_columns_and_indexes_for_a_table(): void
|
|
||||||
{
|
|
||||||
DB::shouldReceive('select')
|
|
||||||
->once()
|
|
||||||
->with('SHOW FULL COLUMNS FROM `users`')
|
|
||||||
->andReturn([
|
|
||||||
(object) [
|
|
||||||
'Field' => 'id',
|
|
||||||
'Type' => 'bigint unsigned',
|
|
||||||
'Null' => 'NO',
|
|
||||||
'Key' => 'PRI',
|
|
||||||
'Default' => null,
|
|
||||||
'Extra' => 'auto_increment',
|
|
||||||
'Privileges' => 'select,insert,update,references',
|
|
||||||
'Comment' => 'Primary key',
|
|
||||||
],
|
|
||||||
(object) [
|
|
||||||
'Field' => 'email',
|
|
||||||
'Type' => 'varchar(255)',
|
|
||||||
'Null' => 'NO',
|
|
||||||
'Key' => 'UNI',
|
|
||||||
'Default' => null,
|
|
||||||
'Extra' => '',
|
|
||||||
'Privileges' => 'select,insert,update,references',
|
|
||||||
'Comment' => '',
|
|
||||||
],
|
|
||||||
]);
|
|
||||||
|
|
||||||
DB::shouldReceive('select')
|
|
||||||
->once()
|
|
||||||
->with('SHOW INDEX FROM `users`')
|
|
||||||
->andReturn([
|
|
||||||
(object) [
|
|
||||||
'Key_name' => 'PRIMARY',
|
|
||||||
'Non_unique' => 0,
|
|
||||||
'Index_type' => 'BTREE',
|
|
||||||
'Column_name' => 'id',
|
|
||||||
'Seq_in_index' => 1,
|
|
||||||
'Collation' => 'A',
|
|
||||||
'Cardinality' => 1,
|
|
||||||
'Sub_part' => null,
|
|
||||||
'Null' => '',
|
|
||||||
'Comment' => '',
|
|
||||||
],
|
|
||||||
(object) [
|
|
||||||
'Key_name' => 'users_email_unique',
|
|
||||||
'Non_unique' => 0,
|
|
||||||
'Index_type' => 'BTREE',
|
|
||||||
'Column_name' => 'email',
|
|
||||||
'Seq_in_index' => 1,
|
|
||||||
'Collation' => 'A',
|
|
||||||
'Cardinality' => 1,
|
|
||||||
'Sub_part' => null,
|
|
||||||
'Null' => '',
|
|
||||||
'Comment' => '',
|
|
||||||
],
|
|
||||||
]);
|
|
||||||
|
|
||||||
$tool = new DescribeTable();
|
|
||||||
$response = $tool->handle(new Request(['table' => 'users']));
|
|
||||||
$data = json_decode($response->getContent(), true, flags: JSON_THROW_ON_ERROR);
|
|
||||||
|
|
||||||
$this->assertSame('users', $data['table']);
|
|
||||||
$this->assertCount(2, $data['columns']);
|
|
||||||
$this->assertSame('id', $data['columns'][0]['field']);
|
|
||||||
$this->assertSame('bigint unsigned', $data['columns'][0]['type']);
|
|
||||||
$this->assertSame('PRIMARY', $data['indexes'][0]['name']);
|
|
||||||
$this->assertSame(['id'], array_column($data['indexes'][0]['columns'], 'name'));
|
|
||||||
}
|
|
||||||
|
|
||||||
public function test_handle_rejects_invalid_table_names(): void
|
|
||||||
{
|
|
||||||
$tool = new DescribeTable();
|
|
||||||
$response = $tool->handle(new Request(['table' => 'users; DROP TABLE users']));
|
|
||||||
$data = json_decode($response->getContent(), true, flags: JSON_THROW_ON_ERROR);
|
|
||||||
|
|
||||||
$this->assertSame('VALIDATION_ERROR', $data['code']);
|
|
||||||
$this->assertStringContainsString('Invalid table name', $data['error']);
|
|
||||||
}
|
|
||||||
|
|
||||||
public function test_handle_blocks_system_tables(): void
|
|
||||||
{
|
|
||||||
Config::set('mcp.database.blocked_tables', []);
|
|
||||||
|
|
||||||
$tool = new DescribeTable();
|
|
||||||
$response = $tool->handle(new Request(['table' => 'information_schema']));
|
|
||||||
$data = json_decode($response->getContent(), true, flags: JSON_THROW_ON_ERROR);
|
|
||||||
|
|
||||||
$this->assertSame('VALIDATION_ERROR', $data['code']);
|
|
||||||
$this->assertStringContainsString('not permitted', $data['error']);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,67 +0,0 @@
|
||||||
<?php
|
|
||||||
|
|
||||||
declare(strict_types=1);
|
|
||||||
|
|
||||||
namespace Core\Mcp\Tests\Unit;
|
|
||||||
|
|
||||||
use Core\Mcp\Controllers\McpApiController;
|
|
||||||
use Core\Mcp\Services\OpenApiGenerator;
|
|
||||||
use Illuminate\Http\JsonResponse;
|
|
||||||
use Illuminate\Http\Request;
|
|
||||||
use Tests\TestCase;
|
|
||||||
|
|
||||||
class McpResourceListTest extends TestCase
|
|
||||||
{
|
|
||||||
public function test_resources_endpoint_returns_server_resources(): void
|
|
||||||
{
|
|
||||||
$controller = new class extends McpApiController {
|
|
||||||
protected function loadServerFull(string $id): ?array
|
|
||||||
{
|
|
||||||
if ($id !== 'demo-server') {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return [
|
|
||||||
'id' => 'demo-server',
|
|
||||||
'resources' => [
|
|
||||||
[
|
|
||||||
'uri' => 'content://workspace/article',
|
|
||||||
'name' => 'Article',
|
|
||||||
'description' => 'Published article',
|
|
||||||
'mimeType' => 'text/markdown',
|
|
||||||
],
|
|
||||||
[
|
|
||||||
'uri' => 'plans://all',
|
|
||||||
'name' => 'Plans',
|
|
||||||
'description' => 'Work plan index',
|
|
||||||
'mimeType' => 'text/markdown',
|
|
||||||
],
|
|
||||||
],
|
|
||||||
];
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
$response = $controller->resources(Request::create('/api/v1/mcp/servers/demo-server/resources', 'GET'), 'demo-server');
|
|
||||||
|
|
||||||
$this->assertInstanceOf(JsonResponse::class, $response);
|
|
||||||
$this->assertSame(200, $response->getStatusCode());
|
|
||||||
|
|
||||||
$data = $response->getData(true);
|
|
||||||
$this->assertSame('demo-server', $data['server']);
|
|
||||||
$this->assertSame(2, $data['count']);
|
|
||||||
$this->assertSame('content://workspace/article', $data['resources'][0]['uri']);
|
|
||||||
$this->assertSame('plans://all', $data['resources'][1]['uri']);
|
|
||||||
}
|
|
||||||
|
|
||||||
public function test_openapi_includes_resource_list_endpoint(): void
|
|
||||||
{
|
|
||||||
$schema = (new OpenApiGenerator)->generate();
|
|
||||||
|
|
||||||
$this->assertArrayHasKey('/servers/{serverId}/resources', $schema['paths']);
|
|
||||||
$this->assertArrayHasKey('ResourceList', $schema['components']['schemas']);
|
|
||||||
$this->assertSame(
|
|
||||||
'#/components/schemas/ResourceList',
|
|
||||||
$schema['paths']['/servers/{serverId}/resources']['get']['responses']['200']['content']['application/json']['schema']['$ref']
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Loading…
Add table
Reference in a new issue