Merge pull request 'feat(agentci): package dispatch for multi-agent deployment' (#39) from feat/agentci-packaging into new

This commit is contained in:
Virgil 2026-02-09 11:25:48 +00:00
commit 00dfd27072
30 changed files with 1839 additions and 1051 deletions

View file

@ -0,0 +1,336 @@
package ai
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/host-uk/core/pkg/agentci"
"github.com/host-uk/core/pkg/cli"
"github.com/host-uk/core/pkg/config"
)
// AddAgentCommands registers the 'agent' subcommand group under 'ai'.
func AddAgentCommands(parent *cli.Command) {
agentCmd := &cli.Command{
Use: "agent",
Short: "Manage AgentCI dispatch targets",
}
agentCmd.AddCommand(agentAddCmd())
agentCmd.AddCommand(agentListCmd())
agentCmd.AddCommand(agentStatusCmd())
agentCmd.AddCommand(agentLogsCmd())
agentCmd.AddCommand(agentSetupCmd())
agentCmd.AddCommand(agentRemoveCmd())
parent.AddCommand(agentCmd)
}
func loadConfig() (*config.Config, error) {
return config.New()
}
func agentAddCmd() *cli.Command {
cmd := &cli.Command{
Use: "add <name> <user@host>",
Short: "Add an agent to the config",
Args: cli.ExactArgs(2),
RunE: func(cmd *cli.Command, args []string) error {
name := args[0]
host := args[1]
forgejoUser, _ := cmd.Flags().GetString("forgejo-user")
if forgejoUser == "" {
forgejoUser = name
}
queueDir, _ := cmd.Flags().GetString("queue-dir")
if queueDir == "" {
queueDir = "/home/claude/ai-work/queue"
}
// Test SSH connectivity.
// TODO: Replace exec ssh with charmbracelet/ssh native Go client + keygen.
fmt.Printf("Testing SSH to %s... ", host)
out, err := exec.Command("ssh",
"-o", "StrictHostKeyChecking=accept-new",
"-o", "ConnectTimeout=10",
host, "echo ok").CombinedOutput()
if err != nil {
fmt.Println(errorStyle.Render("FAILED"))
return fmt.Errorf("SSH failed: %s", strings.TrimSpace(string(out)))
}
fmt.Println(successStyle.Render("OK"))
cfg, err := loadConfig()
if err != nil {
return err
}
ac := agentci.AgentConfig{
Host: host,
QueueDir: queueDir,
ForgejoUser: forgejoUser,
Active: true,
}
if err := agentci.SaveAgent(cfg, name, ac); err != nil {
return err
}
fmt.Printf("Agent %s added (%s)\n", successStyle.Render(name), host)
return nil
},
}
cmd.Flags().String("forgejo-user", "", "Forgejo username (defaults to agent name)")
cmd.Flags().String("queue-dir", "", "Remote queue directory (default: /home/claude/ai-work/queue)")
return cmd
}
func agentListCmd() *cli.Command {
return &cli.Command{
Use: "list",
Short: "List configured agents",
RunE: func(cmd *cli.Command, args []string) error {
cfg, err := loadConfig()
if err != nil {
return err
}
agents, err := agentci.ListAgents(cfg)
if err != nil {
return err
}
if len(agents) == 0 {
fmt.Println(dimStyle.Render("No agents configured. Use 'core ai agent add' to add one."))
return nil
}
table := cli.NewTable("NAME", "HOST", "FORGEJO USER", "ACTIVE", "QUEUE")
for name, ac := range agents {
active := dimStyle.Render("no")
if ac.Active {
active = successStyle.Render("yes")
}
// Quick SSH check for queue depth.
// TODO: Replace exec ssh with charmbracelet/ssh native Go client.
queue := dimStyle.Render("-")
out, err := exec.Command("ssh",
"-o", "StrictHostKeyChecking=accept-new",
"-o", "ConnectTimeout=5",
ac.Host,
fmt.Sprintf("ls %s/ticket-*.json 2>/dev/null | wc -l", ac.QueueDir),
).Output()
if err == nil {
n := strings.TrimSpace(string(out))
if n != "0" {
queue = n
} else {
queue = "0"
}
}
table.AddRow(name, ac.Host, ac.ForgejoUser, active, queue)
}
table.Render()
return nil
},
}
}
func agentStatusCmd() *cli.Command {
return &cli.Command{
Use: "status <name>",
Short: "Check agent status via SSH",
Args: cli.ExactArgs(1),
RunE: func(cmd *cli.Command, args []string) error {
name := args[0]
cfg, err := loadConfig()
if err != nil {
return err
}
agents, err := agentci.ListAgents(cfg)
if err != nil {
return err
}
ac, ok := agents[name]
if !ok {
return fmt.Errorf("agent %q not found", name)
}
script := `
echo "=== Queue ==="
ls ~/ai-work/queue/ticket-*.json 2>/dev/null | wc -l
echo "=== Active ==="
ls ~/ai-work/active/ticket-*.json 2>/dev/null || echo "none"
echo "=== Done ==="
ls ~/ai-work/done/ticket-*.json 2>/dev/null | wc -l
echo "=== Lock ==="
if [ -f ~/ai-work/.runner.lock ]; then
PID=$(cat ~/ai-work/.runner.lock)
if kill -0 "$PID" 2>/dev/null; then
echo "RUNNING (PID $PID)"
else
echo "STALE (PID $PID)"
fi
else
echo "IDLE"
fi
`
// TODO: Replace exec ssh with charmbracelet/ssh native Go client.
sshCmd := exec.Command("ssh",
"-o", "StrictHostKeyChecking=accept-new",
"-o", "ConnectTimeout=10",
ac.Host, script)
sshCmd.Stdout = os.Stdout
sshCmd.Stderr = os.Stderr
return sshCmd.Run()
},
}
}
func agentLogsCmd() *cli.Command {
cmd := &cli.Command{
Use: "logs <name>",
Short: "Stream agent runner logs",
Args: cli.ExactArgs(1),
RunE: func(cmd *cli.Command, args []string) error {
name := args[0]
follow, _ := cmd.Flags().GetBool("follow")
lines, _ := cmd.Flags().GetInt("lines")
cfg, err := loadConfig()
if err != nil {
return err
}
agents, err := agentci.ListAgents(cfg)
if err != nil {
return err
}
ac, ok := agents[name]
if !ok {
return fmt.Errorf("agent %q not found", name)
}
// TODO: Replace exec ssh with charmbracelet/ssh native Go client.
tailArgs := []string{
"-o", "StrictHostKeyChecking=accept-new",
"-o", "ConnectTimeout=10",
ac.Host,
}
if follow {
tailArgs = append(tailArgs, fmt.Sprintf("tail -f -n %d ~/ai-work/logs/runner.log", lines))
} else {
tailArgs = append(tailArgs, fmt.Sprintf("tail -n %d ~/ai-work/logs/runner.log", lines))
}
sshCmd := exec.Command("ssh", tailArgs...)
sshCmd.Stdout = os.Stdout
sshCmd.Stderr = os.Stderr
sshCmd.Stdin = os.Stdin
return sshCmd.Run()
},
}
cmd.Flags().BoolP("follow", "f", false, "Follow log output")
cmd.Flags().IntP("lines", "n", 50, "Number of lines to show")
return cmd
}
func agentSetupCmd() *cli.Command {
return &cli.Command{
Use: "setup <name>",
Short: "Bootstrap agent machine (create dirs, copy runner, install cron)",
Args: cli.ExactArgs(1),
RunE: func(cmd *cli.Command, args []string) error {
name := args[0]
cfg, err := loadConfig()
if err != nil {
return err
}
agents, err := agentci.ListAgents(cfg)
if err != nil {
return err
}
ac, ok := agents[name]
if !ok {
return fmt.Errorf("agent %q not found — use 'core ai agent add' first", name)
}
// Find the setup script relative to the binary or in known locations.
scriptPath := findSetupScript()
if scriptPath == "" {
return fmt.Errorf("agent-setup.sh not found — expected in scripts/ directory")
}
fmt.Printf("Setting up %s on %s...\n", name, ac.Host)
setupCmd := exec.Command("bash", scriptPath, ac.Host)
setupCmd.Stdout = os.Stdout
setupCmd.Stderr = os.Stderr
if err := setupCmd.Run(); err != nil {
return fmt.Errorf("setup failed: %w", err)
}
fmt.Println(successStyle.Render("Setup complete!"))
return nil
},
}
}
func agentRemoveCmd() *cli.Command {
return &cli.Command{
Use: "remove <name>",
Short: "Remove an agent from config",
Args: cli.ExactArgs(1),
RunE: func(cmd *cli.Command, args []string) error {
name := args[0]
cfg, err := loadConfig()
if err != nil {
return err
}
if err := agentci.RemoveAgent(cfg, name); err != nil {
return err
}
fmt.Printf("Agent %s removed.\n", name)
return nil
},
}
}
// findSetupScript looks for agent-setup.sh in common locations.
func findSetupScript() string {
// Relative to executable.
exe, _ := os.Executable()
if exe != "" {
dir := filepath.Dir(exe)
candidates := []string{
filepath.Join(dir, "scripts", "agent-setup.sh"),
filepath.Join(dir, "..", "scripts", "agent-setup.sh"),
}
for _, c := range candidates {
if _, err := os.Stat(c); err == nil {
return c
}
}
}
// Working directory.
cwd, _ := os.Getwd()
if cwd != "" {
p := filepath.Join(cwd, "scripts", "agent-setup.sh")
if _, err := os.Stat(p); err == nil {
return p
}
}
return ""
}

View file

@ -66,6 +66,9 @@ func initCommands() {
// Add metrics subcommand (core ai metrics)
addMetricsCommand(aiCmd)
// Add agent management commands (core ai agent ...)
AddAgentCommands(aiCmd)
}
// AddAICommands registers the 'ai' command and all subcommands.

View file

@ -11,9 +11,12 @@ import (
"syscall"
"time"
"github.com/host-uk/core/pkg/agentci"
"github.com/host-uk/core/pkg/cli"
"github.com/host-uk/core/pkg/config"
"github.com/host-uk/core/pkg/forge"
"github.com/host-uk/core/pkg/jobrunner"
"github.com/host-uk/core/pkg/jobrunner/github"
forgejosource "github.com/host-uk/core/pkg/jobrunner/forgejo"
"github.com/host-uk/core/pkg/jobrunner/handlers"
)
@ -33,11 +36,6 @@ func startHeadless() {
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer cancel()
// TODO: Updater integration — the internal/cmd/updater package cannot be
// imported from the core-ide module due to Go's internal package restriction
// (separate modules). Move updater to pkg/updater or export a public API to
// enable auto-update in headless mode.
// Journal
journalDir := filepath.Join(os.Getenv("HOME"), ".core", "journal")
journal, err := jobrunner.NewJournal(journalDir)
@ -45,32 +43,52 @@ func startHeadless() {
log.Fatalf("Failed to create journal: %v", err)
}
// GitHub source — repos from CORE_REPOS env var or default
// Forge client
forgeURL, forgeToken, _ := forge.ResolveConfig("", "")
forgeClient, err := forge.New(forgeURL, forgeToken)
if err != nil {
log.Fatalf("Failed to create forge client: %v", err)
}
// Forgejo source — repos from CORE_REPOS env var or default
repos := parseRepoList(os.Getenv("CORE_REPOS"))
if len(repos) == 0 {
repos = []string{"host-uk/core", "host-uk/core-php", "host-uk/core-tenant", "host-uk/core-admin"}
}
ghSource := github.NewGitHubSource(github.Config{
source := forgejosource.New(forgejosource.Config{
Repos: repos,
})
}, forgeClient)
// Handlers (order matters — first match wins)
publishDraft := handlers.NewPublishDraftHandler(nil, "")
sendFix := handlers.NewSendFixCommandHandler(nil, "")
resolveThreads := handlers.NewResolveThreadsHandler(nil, "")
enableAutoMerge := handlers.NewEnableAutoMergeHandler()
tickParent := handlers.NewTickParentHandler()
publishDraft := handlers.NewPublishDraftHandler(forgeClient)
sendFix := handlers.NewSendFixCommandHandler(forgeClient)
dismissReviews := handlers.NewDismissReviewsHandler(forgeClient)
enableAutoMerge := handlers.NewEnableAutoMergeHandler(forgeClient)
tickParent := handlers.NewTickParentHandler(forgeClient)
// Agent dispatch — load targets from ~/.core/config.yaml
cfg, cfgErr := config.New()
var agentTargets map[string]handlers.AgentTarget
if cfgErr == nil {
agentTargets, _ = agentci.LoadAgents(cfg)
}
if agentTargets == nil {
agentTargets = map[string]handlers.AgentTarget{}
}
log.Printf("Loaded %d agent targets", len(agentTargets))
dispatch := handlers.NewDispatchHandler(forgeClient, forgeURL, forgeToken, agentTargets)
// Build poller
poller := jobrunner.NewPoller(jobrunner.PollerConfig{
Sources: []jobrunner.JobSource{ghSource},
Sources: []jobrunner.JobSource{source},
Handlers: []jobrunner.JobHandler{
publishDraft,
sendFix,
resolveThreads,
dismissReviews,
enableAutoMerge,
tickParent,
dispatch, // Last — only matches NeedsCoding signals
},
Journal: journal,
PollInterval: 60 * time.Second,

100
pkg/agentci/config.go Normal file
View file

@ -0,0 +1,100 @@
// Package agentci provides configuration and management for AgentCI dispatch targets.
package agentci
import (
"fmt"
"github.com/host-uk/core/pkg/config"
"github.com/host-uk/core/pkg/jobrunner/handlers"
"github.com/host-uk/core/pkg/log"
)
// AgentConfig represents a single agent machine in the config file.
type AgentConfig struct {
Host string `yaml:"host" mapstructure:"host"`
QueueDir string `yaml:"queue_dir" mapstructure:"queue_dir"`
ForgejoUser string `yaml:"forgejo_user" mapstructure:"forgejo_user"`
Model string `yaml:"model" mapstructure:"model"` // claude model: sonnet, haiku, opus (default: sonnet)
Runner string `yaml:"runner" mapstructure:"runner"` // runner binary: claude, codex (default: claude)
Active bool `yaml:"active" mapstructure:"active"`
}
// LoadAgents reads agent targets from config and returns a map suitable for the dispatch handler.
// Returns an empty map (not an error) if no agents are configured.
func LoadAgents(cfg *config.Config) (map[string]handlers.AgentTarget, error) {
var agents map[string]AgentConfig
if err := cfg.Get("agentci.agents", &agents); err != nil {
// No config is fine — just no agents.
return map[string]handlers.AgentTarget{}, nil
}
targets := make(map[string]handlers.AgentTarget)
for name, ac := range agents {
if !ac.Active {
continue
}
if ac.Host == "" {
return nil, log.E("agentci.LoadAgents", fmt.Sprintf("agent %q: host is required", name), nil)
}
queueDir := ac.QueueDir
if queueDir == "" {
queueDir = "/home/claude/ai-work/queue"
}
model := ac.Model
if model == "" {
model = "sonnet"
}
runner := ac.Runner
if runner == "" {
runner = "claude"
}
targets[name] = handlers.AgentTarget{
Host: ac.Host,
QueueDir: queueDir,
Model: model,
Runner: runner,
}
}
return targets, nil
}
// SaveAgent writes an agent config entry to the config file.
func SaveAgent(cfg *config.Config, name string, ac AgentConfig) error {
key := fmt.Sprintf("agentci.agents.%s", name)
data := map[string]any{
"host": ac.Host,
"queue_dir": ac.QueueDir,
"forgejo_user": ac.ForgejoUser,
"active": ac.Active,
}
if ac.Model != "" {
data["model"] = ac.Model
}
if ac.Runner != "" {
data["runner"] = ac.Runner
}
return cfg.Set(key, data)
}
// RemoveAgent removes an agent from the config file.
func RemoveAgent(cfg *config.Config, name string) error {
var agents map[string]AgentConfig
if err := cfg.Get("agentci.agents", &agents); err != nil {
return log.E("agentci.RemoveAgent", "no agents configured", err)
}
if _, ok := agents[name]; !ok {
return log.E("agentci.RemoveAgent", fmt.Sprintf("agent %q not found", name), nil)
}
delete(agents, name)
return cfg.Set("agentci.agents", agents)
}
// ListAgents returns all configured agents (active and inactive).
func ListAgents(cfg *config.Config) (map[string]AgentConfig, error) {
var agents map[string]AgentConfig
if err := cfg.Get("agentci.agents", &agents); err != nil {
return map[string]AgentConfig{}, nil
}
return agents, nil
}

View file

@ -16,8 +16,9 @@ import (
// Client wraps the Forgejo SDK client with config-based auth.
type Client struct {
api *forgejo.Client
url string
api *forgejo.Client
url string
token string
}
// New creates a new Forgejo API client for the given URL and token.
@ -27,7 +28,7 @@ func New(url, token string) (*Client, error) {
return nil, log.E("forge.New", "failed to create client", err)
}
return &Client{api: api, url: url}, nil
return &Client{api: api, url: url, token: token}, nil
}
// API exposes the underlying SDK client for direct access.

View file

@ -117,3 +117,26 @@ func (c *Client) GetPullRequest(owner, repo string, number int64) (*forgejo.Pull
return pr, nil
}
// CreateIssueComment posts a comment on an issue or pull request.
func (c *Client) CreateIssueComment(owner, repo string, issue int64, body string) error {
_, _, err := c.api.CreateIssueComment(owner, repo, issue, forgejo.CreateIssueCommentOption{
Body: body,
})
if err != nil {
return log.E("forge.CreateIssueComment", "failed to create comment", err)
}
return nil
}
// CloseIssue closes an issue by setting its state to closed.
func (c *Client) CloseIssue(owner, repo string, number int64) error {
closed := forgejo.StateClosed
_, _, err := c.api.EditIssue(owner, repo, number, forgejo.EditIssueOption{
State: &closed,
})
if err != nil {
return log.E("forge.CloseIssue", "failed to close issue", err)
}
return nil
}

109
pkg/forge/prs.go Normal file
View file

@ -0,0 +1,109 @@
package forge
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2"
"github.com/host-uk/core/pkg/log"
)
// MergePullRequest merges a pull request with the given method ("squash", "rebase", "merge").
func (c *Client) MergePullRequest(owner, repo string, index int64, method string) error {
style := forgejo.MergeStyleMerge
switch method {
case "squash":
style = forgejo.MergeStyleSquash
case "rebase":
style = forgejo.MergeStyleRebase
}
merged, _, err := c.api.MergePullRequest(owner, repo, index, forgejo.MergePullRequestOption{
Style: style,
DeleteBranchAfterMerge: true,
})
if err != nil {
return log.E("forge.MergePullRequest", "failed to merge pull request", err)
}
if !merged {
return log.E("forge.MergePullRequest", fmt.Sprintf("merge returned false for %s/%s#%d", owner, repo, index), nil)
}
return nil
}
// SetPRDraft sets or clears the draft status on a pull request.
// The Forgejo SDK v2.2.0 doesn't expose the draft field on EditPullRequestOption,
// so we use a raw HTTP PATCH request.
func (c *Client) SetPRDraft(owner, repo string, index int64, draft bool) error {
payload := map[string]bool{"draft": draft}
body, err := json.Marshal(payload)
if err != nil {
return log.E("forge.SetPRDraft", "marshal payload", err)
}
url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls/%d", c.url, owner, repo, index)
req, err := http.NewRequest(http.MethodPatch, url, bytes.NewReader(body))
if err != nil {
return log.E("forge.SetPRDraft", "create request", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "token "+c.token)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return log.E("forge.SetPRDraft", "failed to update draft status", err)
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return log.E("forge.SetPRDraft", fmt.Sprintf("unexpected status %d", resp.StatusCode), nil)
}
return nil
}
// ListPRReviews returns all reviews for a pull request.
func (c *Client) ListPRReviews(owner, repo string, index int64) ([]*forgejo.PullReview, error) {
var all []*forgejo.PullReview
page := 1
for {
reviews, resp, err := c.api.ListPullReviews(owner, repo, index, forgejo.ListPullReviewsOptions{
ListOptions: forgejo.ListOptions{Page: page, PageSize: 50},
})
if err != nil {
return nil, log.E("forge.ListPRReviews", "failed to list reviews", err)
}
all = append(all, reviews...)
if resp == nil || page >= resp.LastPage {
break
}
page++
}
return all, nil
}
// GetCombinedStatus returns the combined commit status for a ref (SHA or branch).
func (c *Client) GetCombinedStatus(owner, repo string, ref string) (*forgejo.CombinedStatus, error) {
status, _, err := c.api.GetCombinedStatus(owner, repo, ref)
if err != nil {
return nil, log.E("forge.GetCombinedStatus", "failed to get combined status", err)
}
return status, nil
}
// DismissReview dismisses a pull request review by ID.
func (c *Client) DismissReview(owner, repo string, index, reviewID int64, message string) error {
_, err := c.api.DismissPullReview(owner, repo, index, reviewID, forgejo.DismissPullReviewOptions{
Message: message,
})
if err != nil {
return log.E("forge.DismissReview", "failed to dismiss review", err)
}
return nil
}

View file

@ -0,0 +1,114 @@
package forgejo
import (
"regexp"
"strconv"
forgejosdk "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2"
"github.com/host-uk/core/pkg/jobrunner"
)
// epicChildRe matches checklist items: - [ ] #42 or - [x] #42
var epicChildRe = regexp.MustCompile(`- \[([ x])\] #(\d+)`)
// parseEpicChildren extracts child issue numbers from an epic body's checklist.
func parseEpicChildren(body string) (unchecked []int, checked []int) {
matches := epicChildRe.FindAllStringSubmatch(body, -1)
for _, m := range matches {
num, err := strconv.Atoi(m[2])
if err != nil {
continue
}
if m[1] == "x" {
checked = append(checked, num)
} else {
unchecked = append(unchecked, num)
}
}
return unchecked, checked
}
// linkedPRRe matches "#N" references in PR bodies.
var linkedPRRe = regexp.MustCompile(`#(\d+)`)
// findLinkedPR finds the first PR whose body references the given issue number.
func findLinkedPR(prs []*forgejosdk.PullRequest, issueNumber int) *forgejosdk.PullRequest {
target := strconv.Itoa(issueNumber)
for _, pr := range prs {
matches := linkedPRRe.FindAllStringSubmatch(pr.Body, -1)
for _, m := range matches {
if m[1] == target {
return pr
}
}
}
return nil
}
// mapPRState maps Forgejo's PR state and merged flag to a canonical string.
func mapPRState(pr *forgejosdk.PullRequest) string {
if pr.HasMerged {
return "MERGED"
}
switch pr.State {
case forgejosdk.StateOpen:
return "OPEN"
case forgejosdk.StateClosed:
return "CLOSED"
default:
return "CLOSED"
}
}
// mapMergeable maps Forgejo's boolean Mergeable field to a canonical string.
func mapMergeable(pr *forgejosdk.PullRequest) string {
if pr.HasMerged {
return "UNKNOWN"
}
if pr.Mergeable {
return "MERGEABLE"
}
return "CONFLICTING"
}
// mapCombinedStatus maps a Forgejo CombinedStatus to SUCCESS/FAILURE/PENDING.
func mapCombinedStatus(cs *forgejosdk.CombinedStatus) string {
if cs == nil || cs.TotalCount == 0 {
return "PENDING"
}
switch cs.State {
case forgejosdk.StatusSuccess:
return "SUCCESS"
case forgejosdk.StatusFailure, forgejosdk.StatusError:
return "FAILURE"
default:
return "PENDING"
}
}
// buildSignal creates a PipelineSignal from Forgejo API data.
func buildSignal(
owner, repo string,
epicNumber, childNumber int,
pr *forgejosdk.PullRequest,
checkStatus string,
) *jobrunner.PipelineSignal {
sig := &jobrunner.PipelineSignal{
EpicNumber: epicNumber,
ChildNumber: childNumber,
PRNumber: int(pr.Index),
RepoOwner: owner,
RepoName: repo,
PRState: mapPRState(pr),
IsDraft: false, // SDK v2.2.0 doesn't expose Draft; treat as non-draft
Mergeable: mapMergeable(pr),
CheckStatus: checkStatus,
}
if pr.Head != nil {
sig.LastCommitSHA = pr.Head.Sha
}
return sig
}

View file

@ -0,0 +1,173 @@
package forgejo
import (
"context"
"fmt"
"strings"
"github.com/host-uk/core/pkg/forge"
"github.com/host-uk/core/pkg/jobrunner"
"github.com/host-uk/core/pkg/log"
)
// Config configures a ForgejoSource.
type Config struct {
Repos []string // "owner/repo" format
}
// ForgejoSource polls a Forgejo instance for pipeline signals from epic issues.
type ForgejoSource struct {
repos []string
forge *forge.Client
}
// New creates a ForgejoSource using the given forge client.
func New(cfg Config, client *forge.Client) *ForgejoSource {
return &ForgejoSource{
repos: cfg.Repos,
forge: client,
}
}
// Name returns the source identifier.
func (s *ForgejoSource) Name() string {
return "forgejo"
}
// Poll fetches epics and their linked PRs from all configured repositories,
// returning a PipelineSignal for each unchecked child that has a linked PR.
func (s *ForgejoSource) Poll(ctx context.Context) ([]*jobrunner.PipelineSignal, error) {
var signals []*jobrunner.PipelineSignal
for _, repoFull := range s.repos {
owner, repo, err := splitRepo(repoFull)
if err != nil {
log.Error("invalid repo format", "repo", repoFull, "err", err)
continue
}
repoSignals, err := s.pollRepo(ctx, owner, repo)
if err != nil {
log.Error("poll repo failed", "repo", repoFull, "err", err)
continue
}
signals = append(signals, repoSignals...)
}
return signals, nil
}
// Report posts the action result as a comment on the epic issue.
func (s *ForgejoSource) Report(ctx context.Context, result *jobrunner.ActionResult) error {
if result == nil {
return nil
}
status := "succeeded"
if !result.Success {
status = "failed"
}
body := fmt.Sprintf("**jobrunner** `%s` %s for #%d (PR #%d)", result.Action, status, result.ChildNumber, result.PRNumber)
if result.Error != "" {
body += fmt.Sprintf("\n\n```\n%s\n```", result.Error)
}
return s.forge.CreateIssueComment(result.RepoOwner, result.RepoName, int64(result.EpicNumber), body)
}
// pollRepo fetches epics and PRs for a single repository.
func (s *ForgejoSource) pollRepo(_ context.Context, owner, repo string) ([]*jobrunner.PipelineSignal, error) {
// Fetch epic issues (label=epic, state=open).
issues, err := s.forge.ListIssues(owner, repo, forge.ListIssuesOpts{State: "open"})
if err != nil {
return nil, log.E("forgejo.pollRepo", "fetch issues", err)
}
// Filter to epics only.
var epics []epicInfo
for _, issue := range issues {
for _, label := range issue.Labels {
if label.Name == "epic" {
epics = append(epics, epicInfo{
Number: int(issue.Index),
Body: issue.Body,
})
break
}
}
}
if len(epics) == 0 {
return nil, nil
}
// Fetch all open PRs (and also merged/closed to catch MERGED state).
prs, err := s.forge.ListPullRequests(owner, repo, "all")
if err != nil {
return nil, log.E("forgejo.pollRepo", "fetch PRs", err)
}
var signals []*jobrunner.PipelineSignal
for _, epic := range epics {
unchecked, _ := parseEpicChildren(epic.Body)
for _, childNum := range unchecked {
pr := findLinkedPR(prs, childNum)
if pr == nil {
// No PR yet — check if the child issue is assigned (needs coding).
childIssue, err := s.forge.GetIssue(owner, repo, int64(childNum))
if err != nil {
log.Error("fetch child issue failed", "repo", owner+"/"+repo, "issue", childNum, "err", err)
continue
}
if len(childIssue.Assignees) > 0 && childIssue.Assignees[0].UserName != "" {
sig := &jobrunner.PipelineSignal{
EpicNumber: epic.Number,
ChildNumber: childNum,
RepoOwner: owner,
RepoName: repo,
NeedsCoding: true,
Assignee: childIssue.Assignees[0].UserName,
IssueTitle: childIssue.Title,
IssueBody: childIssue.Body,
}
signals = append(signals, sig)
}
continue
}
// Get combined commit status for the PR's head SHA.
checkStatus := "PENDING"
if pr.Head != nil && pr.Head.Sha != "" {
cs, err := s.forge.GetCombinedStatus(owner, repo, pr.Head.Sha)
if err != nil {
log.Error("fetch combined status failed", "repo", owner+"/"+repo, "sha", pr.Head.Sha, "err", err)
} else {
checkStatus = mapCombinedStatus(cs)
}
}
sig := buildSignal(owner, repo, epic.Number, childNum, pr, checkStatus)
signals = append(signals, sig)
}
}
return signals, nil
}
type epicInfo struct {
Number int
Body string
}
// splitRepo parses "owner/repo" into its components.
func splitRepo(full string) (string, string, error) {
parts := strings.SplitN(full, "/", 2)
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
return "", "", log.E("forgejo.splitRepo", fmt.Sprintf("expected owner/repo format, got %q", full), nil)
}
return parts[0], parts[1], nil
}

View file

@ -0,0 +1,177 @@
package forgejo
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/host-uk/core/pkg/forge"
"github.com/host-uk/core/pkg/jobrunner"
)
// withVersion wraps an HTTP handler to serve the Forgejo /api/v1/version
// endpoint that the SDK calls during NewClient initialization.
func withVersion(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.HasSuffix(r.URL.Path, "/version") {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"version":"9.0.0"}`))
return
}
next.ServeHTTP(w, r)
})
}
func newTestClient(t *testing.T, url string) *forge.Client {
t.Helper()
client, err := forge.New(url, "test-token")
require.NoError(t, err)
return client
}
func TestForgejoSource_Name(t *testing.T) {
s := New(Config{}, nil)
assert.Equal(t, "forgejo", s.Name())
}
func TestForgejoSource_Poll_Good(t *testing.T) {
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
w.Header().Set("Content-Type", "application/json")
switch {
// List issues — return one epic
case strings.Contains(path, "/issues"):
issues := []map[string]any{
{
"number": 10,
"body": "## Tasks\n- [ ] #11\n- [x] #12\n",
"labels": []map[string]string{{"name": "epic"}},
"state": "open",
},
}
_ = json.NewEncoder(w).Encode(issues)
// List PRs — return one open PR linked to #11
case strings.Contains(path, "/pulls"):
prs := []map[string]any{
{
"number": 20,
"body": "Fixes #11",
"state": "open",
"mergeable": true,
"merged": false,
"head": map[string]string{"sha": "abc123", "ref": "feature", "label": "feature"},
},
}
_ = json.NewEncoder(w).Encode(prs)
// Combined status
case strings.Contains(path, "/status"):
status := map[string]any{
"state": "success",
"total_count": 1,
"statuses": []map[string]any{{"status": "success", "context": "ci"}},
}
_ = json.NewEncoder(w).Encode(status)
default:
w.WriteHeader(http.StatusNotFound)
}
})))
defer srv.Close()
client := newTestClient(t, srv.URL)
s := New(Config{Repos: []string{"test-org/test-repo"}}, client)
signals, err := s.Poll(context.Background())
require.NoError(t, err)
require.Len(t, signals, 1)
sig := signals[0]
assert.Equal(t, 10, sig.EpicNumber)
assert.Equal(t, 11, sig.ChildNumber)
assert.Equal(t, 20, sig.PRNumber)
assert.Equal(t, "OPEN", sig.PRState)
assert.Equal(t, "MERGEABLE", sig.Mergeable)
assert.Equal(t, "SUCCESS", sig.CheckStatus)
assert.Equal(t, "test-org", sig.RepoOwner)
assert.Equal(t, "test-repo", sig.RepoName)
assert.Equal(t, "abc123", sig.LastCommitSHA)
}
func TestForgejoSource_Poll_NoEpics(t *testing.T) {
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode([]any{})
})))
defer srv.Close()
client := newTestClient(t, srv.URL)
s := New(Config{Repos: []string{"test-org/test-repo"}}, client)
signals, err := s.Poll(context.Background())
require.NoError(t, err)
assert.Empty(t, signals)
}
func TestForgejoSource_Report_Good(t *testing.T) {
var capturedBody string
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var body map[string]string
_ = json.NewDecoder(r.Body).Decode(&body)
capturedBody = body["body"]
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
})))
defer srv.Close()
client := newTestClient(t, srv.URL)
s := New(Config{}, client)
result := &jobrunner.ActionResult{
Action: "enable_auto_merge",
RepoOwner: "test-org",
RepoName: "test-repo",
EpicNumber: 10,
ChildNumber: 11,
PRNumber: 20,
Success: true,
}
err := s.Report(context.Background(), result)
require.NoError(t, err)
assert.Contains(t, capturedBody, "enable_auto_merge")
assert.Contains(t, capturedBody, "succeeded")
}
func TestParseEpicChildren(t *testing.T) {
body := "## Tasks\n- [x] #1\n- [ ] #7\n- [ ] #8\n- [x] #3\n"
unchecked, checked := parseEpicChildren(body)
assert.Equal(t, []int{7, 8}, unchecked)
assert.Equal(t, []int{1, 3}, checked)
}
func TestFindLinkedPR(t *testing.T) {
assert.Nil(t, findLinkedPR(nil, 7))
}
func TestSplitRepo(t *testing.T) {
owner, repo, err := splitRepo("host-uk/core")
require.NoError(t, err)
assert.Equal(t, "host-uk", owner)
assert.Equal(t, "core", repo)
_, _, err = splitRepo("invalid")
assert.Error(t, err)
_, _, err = splitRepo("")
assert.Error(t, err)
}

View file

@ -1,161 +0,0 @@
package github
import (
"regexp"
"strconv"
"time"
"github.com/host-uk/core/pkg/jobrunner"
)
// ghIssue is a minimal GitHub issue response.
type ghIssue struct {
Number int `json:"number"`
Title string `json:"title"`
Body string `json:"body"`
Labels []ghLabel `json:"labels"`
State string `json:"state"`
}
// ghLabel is a GitHub label.
type ghLabel struct {
Name string `json:"name"`
}
// ghPR is a minimal GitHub pull request response.
type ghPR struct {
Number int `json:"number"`
Title string `json:"title"`
Body string `json:"body"`
State string `json:"state"`
Draft bool `json:"draft"`
MergeableState string `json:"mergeable_state"`
Head ghRef `json:"head"`
}
// ghRef is a Git reference (branch head).
type ghRef struct {
SHA string `json:"sha"`
Ref string `json:"ref"`
}
// ghCheckSuites is the response for the check-suites endpoint.
type ghCheckSuites struct {
TotalCount int `json:"total_count"`
CheckSuites []ghCheckSuite `json:"check_suites"`
}
// ghCheckSuite is a single check suite.
type ghCheckSuite struct {
ID int `json:"id"`
Status string `json:"status"` // queued, in_progress, completed
Conclusion string `json:"conclusion"` // success, failure, neutral, cancelled, etc.
}
// epicChildRe matches checklist items in epic bodies: - [ ] #42 or - [x] #42
var epicChildRe = regexp.MustCompile(`- \[([ x])\] #(\d+)`)
// parseEpicChildren extracts child issue numbers from an epic body's checklist.
// Returns two slices: unchecked (pending) and checked (done) issue numbers.
func parseEpicChildren(body string) (unchecked []int, checked []int) {
matches := epicChildRe.FindAllStringSubmatch(body, -1)
for _, m := range matches {
num, err := strconv.Atoi(m[2])
if err != nil {
continue
}
if m[1] == "x" {
checked = append(checked, num)
} else {
unchecked = append(unchecked, num)
}
}
return unchecked, checked
}
// linkedPRRe matches "#N" references in PR bodies.
var linkedPRRe = regexp.MustCompile(`#(\d+)`)
// findLinkedPR finds the first PR whose body references the given issue number.
func findLinkedPR(prs []ghPR, issueNumber int) *ghPR {
target := strconv.Itoa(issueNumber)
for i := range prs {
matches := linkedPRRe.FindAllStringSubmatch(prs[i].Body, -1)
for _, m := range matches {
if m[1] == target {
return &prs[i]
}
}
}
return nil
}
// aggregateCheckStatus returns SUCCESS, FAILURE, or PENDING based on check suites.
func aggregateCheckStatus(suites []ghCheckSuite) string {
if len(suites) == 0 {
return "PENDING"
}
allComplete := true
for _, s := range suites {
if s.Status != "completed" {
allComplete = false
break
}
}
if !allComplete {
return "PENDING"
}
for _, s := range suites {
if s.Conclusion != "success" && s.Conclusion != "neutral" && s.Conclusion != "skipped" {
return "FAILURE"
}
}
return "SUCCESS"
}
// mergeableToString maps GitHub's mergeable_state to a canonical string.
func mergeableToString(state string) string {
switch state {
case "clean", "has_hooks", "unstable":
return "MERGEABLE"
case "dirty", "blocked":
return "CONFLICTING"
default:
return "UNKNOWN"
}
}
// buildSignal creates a PipelineSignal from parsed GitHub API data.
func buildSignal(
owner, repo string,
epicNumber, childNumber int,
pr *ghPR,
checkStatus string,
) *jobrunner.PipelineSignal {
prState := "OPEN"
switch pr.State {
case "closed":
prState = "CLOSED"
case "open":
prState = "OPEN"
}
return &jobrunner.PipelineSignal{
EpicNumber: epicNumber,
ChildNumber: childNumber,
PRNumber: pr.Number,
RepoOwner: owner,
RepoName: repo,
PRState: prState,
IsDraft: pr.Draft,
Mergeable: mergeableToString(pr.MergeableState),
CheckStatus: checkStatus,
LastCommitSHA: pr.Head.SHA,
LastCommitAt: time.Time{}, // Not available from list endpoint
LastReviewAt: time.Time{}, // Not available from list endpoint
}
}

View file

@ -1,196 +0,0 @@
package github
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"strings"
"sync"
"golang.org/x/oauth2"
"github.com/host-uk/core/pkg/jobrunner"
"github.com/host-uk/core/pkg/log"
)
// Config configures a GitHubSource.
type Config struct {
Repos []string // "owner/repo" format
APIURL string // override for testing (default: https://api.github.com)
}
// GitHubSource polls GitHub for pipeline signals from epic issues.
type GitHubSource struct {
repos []string
apiURL string
client *http.Client
etags map[string]string
mu sync.Mutex
}
// NewGitHubSource creates a GitHubSource from the given config.
func NewGitHubSource(cfg Config) *GitHubSource {
apiURL := cfg.APIURL
if apiURL == "" {
apiURL = "https://api.github.com"
}
// Build an authenticated HTTP client if GITHUB_TOKEN is set.
var client *http.Client
if token := os.Getenv("GITHUB_TOKEN"); token != "" {
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
client = oauth2.NewClient(context.Background(), ts)
} else {
client = http.DefaultClient
}
return &GitHubSource{
repos: cfg.Repos,
apiURL: strings.TrimRight(apiURL, "/"),
client: client,
etags: make(map[string]string),
}
}
// Name returns the source identifier.
func (g *GitHubSource) Name() string {
return "github"
}
// Poll fetches epics and their linked PRs from all configured repositories,
// returning a PipelineSignal for each unchecked child that has a linked PR.
func (g *GitHubSource) Poll(ctx context.Context) ([]*jobrunner.PipelineSignal, error) {
var signals []*jobrunner.PipelineSignal
for _, repoFull := range g.repos {
owner, repo, err := splitRepo(repoFull)
if err != nil {
log.Error("invalid repo format", "repo", repoFull, "err", err)
continue
}
repoSignals, err := g.pollRepo(ctx, owner, repo)
if err != nil {
log.Error("poll repo failed", "repo", repoFull, "err", err)
continue
}
signals = append(signals, repoSignals...)
}
return signals, nil
}
// Report is a no-op for the GitHub source.
func (g *GitHubSource) Report(_ context.Context, _ *jobrunner.ActionResult) error {
return nil
}
// pollRepo fetches epics and PRs for a single repository.
func (g *GitHubSource) pollRepo(ctx context.Context, owner, repo string) ([]*jobrunner.PipelineSignal, error) {
// Fetch epic issues (label=epic).
epicsURL := fmt.Sprintf("%s/repos/%s/%s/issues?labels=epic&state=open", g.apiURL, owner, repo)
var epics []ghIssue
notModified, err := g.fetchJSON(ctx, epicsURL, &epics)
if err != nil {
return nil, fmt.Errorf("fetch epics: %w", err)
}
if notModified {
log.Debug("epics not modified", "repo", owner+"/"+repo)
return nil, nil
}
if len(epics) == 0 {
return nil, nil
}
// Fetch open PRs.
prsURL := fmt.Sprintf("%s/repos/%s/%s/pulls?state=open", g.apiURL, owner, repo)
var prs []ghPR
_, err = g.fetchJSON(ctx, prsURL, &prs)
if err != nil {
return nil, fmt.Errorf("fetch PRs: %w", err)
}
var signals []*jobrunner.PipelineSignal
for _, epic := range epics {
unchecked, _ := parseEpicChildren(epic.Body)
for _, childNum := range unchecked {
pr := findLinkedPR(prs, childNum)
if pr == nil {
continue
}
// Fetch check suites for the PR's head SHA.
checksURL := fmt.Sprintf("%s/repos/%s/%s/commits/%s/check-suites", g.apiURL, owner, repo, pr.Head.SHA)
var checkResp ghCheckSuites
_, err := g.fetchJSON(ctx, checksURL, &checkResp)
if err != nil {
log.Error("fetch check suites failed", "repo", owner+"/"+repo, "sha", pr.Head.SHA, "err", err)
continue
}
checkStatus := aggregateCheckStatus(checkResp.CheckSuites)
sig := buildSignal(owner, repo, epic.Number, childNum, pr, checkStatus)
signals = append(signals, sig)
}
}
return signals, nil
}
// fetchJSON performs a GET request with ETag conditional headers.
// Returns true if the server responded with 304 Not Modified.
func (g *GitHubSource) fetchJSON(ctx context.Context, url string, target any) (bool, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return false, fmt.Errorf("create request: %w", err)
}
req.Header.Set("Accept", "application/vnd.github+json")
g.mu.Lock()
if etag, ok := g.etags[url]; ok {
req.Header.Set("If-None-Match", etag)
}
g.mu.Unlock()
resp, err := g.client.Do(req)
if err != nil {
return false, fmt.Errorf("execute request: %w", err)
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode == http.StatusNotModified {
return true, nil
}
if resp.StatusCode != http.StatusOK {
return false, fmt.Errorf("unexpected status %d for %s", resp.StatusCode, url)
}
// Store ETag for future conditional requests.
if etag := resp.Header.Get("ETag"); etag != "" {
g.mu.Lock()
g.etags[url] = etag
g.mu.Unlock()
}
if err := json.NewDecoder(resp.Body).Decode(target); err != nil {
return false, fmt.Errorf("decode response: %w", err)
}
return false, nil
}
// splitRepo parses "owner/repo" into its components.
func splitRepo(full string) (string, string, error) {
parts := strings.SplitN(full, "/", 2)
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
return "", "", fmt.Errorf("expected owner/repo format, got %q", full)
}
return parts[0], parts[1], nil
}

View file

@ -1,270 +0,0 @@
package github
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGitHubSource_Name_Good(t *testing.T) {
src := NewGitHubSource(Config{Repos: []string{"owner/repo"}})
assert.Equal(t, "github", src.Name())
}
func TestGitHubSource_Poll_Good(t *testing.T) {
epic := ghIssue{
Number: 10,
Title: "Epic: feature rollout",
Body: "Tasks:\n- [ ] #5\n- [x] #6\n- [ ] #7",
Labels: []ghLabel{{Name: "epic"}},
State: "open",
}
pr5 := ghPR{
Number: 50,
Title: "Implement child #5",
Body: "Closes #5",
State: "open",
Draft: false,
MergeableState: "clean",
Head: ghRef{SHA: "abc123", Ref: "feature-5"},
}
// PR 7 has no linked reference to any child, so child #7 should not produce a signal.
pr99 := ghPR{
Number: 99,
Title: "Unrelated PR",
Body: "No issue links here",
State: "open",
Draft: false,
MergeableState: "dirty",
Head: ghRef{SHA: "def456", Ref: "feature-other"},
}
checkSuites := ghCheckSuites{
TotalCount: 1,
CheckSuites: []ghCheckSuite{
{ID: 1, Status: "completed", Conclusion: "success"},
},
}
mux := http.NewServeMux()
mux.HandleFunc("GET /repos/test-org/test-repo/issues", func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "epic", r.URL.Query().Get("labels"))
assert.Equal(t, "open", r.URL.Query().Get("state"))
w.Header().Set("ETag", `"epic-etag-1"`)
_ = json.NewEncoder(w).Encode([]ghIssue{epic})
})
mux.HandleFunc("GET /repos/test-org/test-repo/pulls", func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "open", r.URL.Query().Get("state"))
_ = json.NewEncoder(w).Encode([]ghPR{pr5, pr99})
})
mux.HandleFunc("GET /repos/test-org/test-repo/commits/abc123/check-suites", func(w http.ResponseWriter, _ *http.Request) {
_ = json.NewEncoder(w).Encode(checkSuites)
})
srv := httptest.NewServer(mux)
defer srv.Close()
src := NewGitHubSource(Config{
Repos: []string{"test-org/test-repo"},
APIURL: srv.URL,
})
signals, err := src.Poll(context.Background())
require.NoError(t, err)
// Only child #5 has a linked PR (pr5 references #5 in body).
// Child #7 is unchecked but no PR references it.
// Child #6 is checked so it's ignored.
require.Len(t, signals, 1)
sig := signals[0]
assert.Equal(t, 10, sig.EpicNumber)
assert.Equal(t, 5, sig.ChildNumber)
assert.Equal(t, 50, sig.PRNumber)
assert.Equal(t, "test-org", sig.RepoOwner)
assert.Equal(t, "test-repo", sig.RepoName)
assert.Equal(t, "OPEN", sig.PRState)
assert.Equal(t, false, sig.IsDraft)
assert.Equal(t, "MERGEABLE", sig.Mergeable)
assert.Equal(t, "SUCCESS", sig.CheckStatus)
assert.Equal(t, "abc123", sig.LastCommitSHA)
}
func TestGitHubSource_Poll_Good_NotModified(t *testing.T) {
callCount := 0
mux := http.NewServeMux()
mux.HandleFunc("GET /repos/test-org/test-repo/issues", func(w http.ResponseWriter, r *http.Request) {
callCount++
if callCount == 1 {
w.Header().Set("ETag", `"etag-v1"`)
_ = json.NewEncoder(w).Encode([]ghIssue{})
} else {
// Second call should have If-None-Match.
assert.Equal(t, `"etag-v1"`, r.Header.Get("If-None-Match"))
w.WriteHeader(http.StatusNotModified)
}
})
srv := httptest.NewServer(mux)
defer srv.Close()
src := NewGitHubSource(Config{
Repos: []string{"test-org/test-repo"},
APIURL: srv.URL,
})
// First poll — gets empty list, stores ETag.
signals, err := src.Poll(context.Background())
require.NoError(t, err)
assert.Empty(t, signals)
// Second poll — sends If-None-Match, gets 304.
signals, err = src.Poll(context.Background())
require.NoError(t, err)
assert.Empty(t, signals)
assert.Equal(t, 2, callCount)
}
func TestParseEpicChildren_Good(t *testing.T) {
body := `## Epic
Tasks to complete:
- [ ] #1
- [x] #2
- [ ] #3
- [x] #4
- [ ] #5
`
unchecked, checked := parseEpicChildren(body)
assert.Equal(t, []int{1, 3, 5}, unchecked)
assert.Equal(t, []int{2, 4}, checked)
}
func TestParseEpicChildren_Good_Empty(t *testing.T) {
unchecked, checked := parseEpicChildren("No checklist here")
assert.Nil(t, unchecked)
assert.Nil(t, checked)
}
func TestFindLinkedPR_Good(t *testing.T) {
prs := []ghPR{
{Number: 10, Body: "Unrelated work"},
{Number: 20, Body: "Fixes #42 and updates docs"},
{Number: 30, Body: "Closes #99"},
}
pr := findLinkedPR(prs, 42)
require.NotNil(t, pr)
assert.Equal(t, 20, pr.Number)
}
func TestFindLinkedPR_Good_NoMatch(t *testing.T) {
prs := []ghPR{
{Number: 10, Body: "Unrelated work"},
{Number: 20, Body: "Closes #99"},
}
pr := findLinkedPR(prs, 42)
assert.Nil(t, pr)
}
func TestAggregateCheckStatus_Good(t *testing.T) {
tests := []struct {
name string
suites []ghCheckSuite
want string
}{
{
name: "all success",
suites: []ghCheckSuite{{Status: "completed", Conclusion: "success"}},
want: "SUCCESS",
},
{
name: "one failure",
suites: []ghCheckSuite{{Status: "completed", Conclusion: "failure"}},
want: "FAILURE",
},
{
name: "in progress",
suites: []ghCheckSuite{{Status: "in_progress", Conclusion: ""}},
want: "PENDING",
},
{
name: "empty",
suites: nil,
want: "PENDING",
},
{
name: "mixed completed",
suites: []ghCheckSuite{
{Status: "completed", Conclusion: "success"},
{Status: "completed", Conclusion: "failure"},
},
want: "FAILURE",
},
{
name: "neutral is success",
suites: []ghCheckSuite{
{Status: "completed", Conclusion: "neutral"},
{Status: "completed", Conclusion: "success"},
},
want: "SUCCESS",
},
{
name: "skipped is success",
suites: []ghCheckSuite{
{Status: "completed", Conclusion: "skipped"},
},
want: "SUCCESS",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := aggregateCheckStatus(tc.suites)
assert.Equal(t, tc.want, got)
})
}
}
func TestMergeableToString_Good(t *testing.T) {
tests := []struct {
input string
want string
}{
{"clean", "MERGEABLE"},
{"has_hooks", "MERGEABLE"},
{"unstable", "MERGEABLE"},
{"dirty", "CONFLICTING"},
{"blocked", "CONFLICTING"},
{"unknown", "UNKNOWN"},
{"", "UNKNOWN"},
}
for _, tc := range tests {
t.Run(tc.input, func(t *testing.T) {
got := mergeableToString(tc.input)
assert.Equal(t, tc.want, got)
})
}
}
func TestGitHubSource_Report_Good(t *testing.T) {
src := NewGitHubSource(Config{Repos: []string{"owner/repo"}})
err := src.Report(context.Background(), nil)
assert.NoError(t, err)
}

View file

@ -0,0 +1,201 @@
package handlers
import (
"context"
"encoding/json"
"fmt"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/host-uk/core/pkg/forge"
"github.com/host-uk/core/pkg/jobrunner"
"github.com/host-uk/core/pkg/log"
)
// AgentTarget maps a Forgejo username to an SSH-reachable agent machine.
type AgentTarget struct {
Host string // SSH destination (e.g., "claude@192.168.0.201")
QueueDir string // Remote queue directory (e.g., "~/ai-work/queue")
Model string // AI model: sonnet, haiku, opus (default: sonnet)
Runner string // Runner binary: claude, codex (default: claude)
}
// DispatchTicket is the JSON payload written to the agent's queue.
type DispatchTicket struct {
ID string `json:"id"`
RepoOwner string `json:"repo_owner"`
RepoName string `json:"repo_name"`
IssueNumber int `json:"issue_number"`
IssueTitle string `json:"issue_title"`
IssueBody string `json:"issue_body"`
TargetBranch string `json:"target_branch"`
EpicNumber int `json:"epic_number"`
ForgeURL string `json:"forge_url"`
ForgeToken string `json:"forge_token"`
ForgeUser string `json:"forgejo_user"`
Model string `json:"model,omitempty"`
Runner string `json:"runner,omitempty"`
CreatedAt string `json:"created_at"`
}
// DispatchHandler dispatches coding work to remote agent machines via SSH/SCP.
type DispatchHandler struct {
forge *forge.Client
forgeURL string
token string
agents map[string]AgentTarget
}
// NewDispatchHandler creates a handler that dispatches tickets to agent machines.
func NewDispatchHandler(client *forge.Client, forgeURL, token string, agents map[string]AgentTarget) *DispatchHandler {
return &DispatchHandler{
forge: client,
forgeURL: forgeURL,
token: token,
agents: agents,
}
}
// Name returns the handler identifier.
func (h *DispatchHandler) Name() string {
return "dispatch"
}
// Match returns true for signals where a child issue needs coding (no PR yet)
// and the assignee is a known agent.
func (h *DispatchHandler) Match(signal *jobrunner.PipelineSignal) bool {
if !signal.NeedsCoding {
return false
}
_, ok := h.agents[signal.Assignee]
return ok
}
// Execute creates a ticket JSON and SCPs it to the agent's queue directory.
func (h *DispatchHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
start := time.Now()
agent, ok := h.agents[signal.Assignee]
if !ok {
return nil, log.E("dispatch.Execute", fmt.Sprintf("unknown agent: %s", signal.Assignee), nil)
}
// Determine target branch (default to repo default).
targetBranch := "new" // TODO: resolve from epic or repo default
ticket := DispatchTicket{
ID: fmt.Sprintf("%s-%s-%d-%d", signal.RepoOwner, signal.RepoName, signal.ChildNumber, time.Now().Unix()),
RepoOwner: signal.RepoOwner,
RepoName: signal.RepoName,
IssueNumber: signal.ChildNumber,
IssueTitle: signal.IssueTitle,
IssueBody: signal.IssueBody,
TargetBranch: targetBranch,
EpicNumber: signal.EpicNumber,
ForgeURL: h.forgeURL,
ForgeToken: h.token,
ForgeUser: signal.Assignee,
Model: agent.Model,
Runner: agent.Runner,
CreatedAt: time.Now().UTC().Format(time.RFC3339),
}
ticketJSON, err := json.MarshalIndent(ticket, "", " ")
if err != nil {
return &jobrunner.ActionResult{
Action: "dispatch",
RepoOwner: signal.RepoOwner,
RepoName: signal.RepoName,
EpicNumber: signal.EpicNumber,
ChildNumber: signal.ChildNumber,
Success: false,
Error: fmt.Sprintf("marshal ticket: %v", err),
Timestamp: time.Now(),
Duration: time.Since(start),
}, nil
}
// Check if ticket already exists on agent (dedup).
ticketName := fmt.Sprintf("ticket-%s-%s-%d.json", signal.RepoOwner, signal.RepoName, signal.ChildNumber)
if h.ticketExists(agent, ticketName) {
log.Info("ticket already queued, skipping", "ticket", ticketName, "agent", signal.Assignee)
return &jobrunner.ActionResult{
Action: "dispatch",
RepoOwner: signal.RepoOwner,
RepoName: signal.RepoName,
EpicNumber: signal.EpicNumber,
ChildNumber: signal.ChildNumber,
Success: true,
Timestamp: time.Now(),
Duration: time.Since(start),
}, nil
}
// SCP ticket to agent queue.
remotePath := filepath.Join(agent.QueueDir, ticketName)
if err := h.scpTicket(ctx, agent.Host, remotePath, ticketJSON); err != nil {
return &jobrunner.ActionResult{
Action: "dispatch",
RepoOwner: signal.RepoOwner,
RepoName: signal.RepoName,
EpicNumber: signal.EpicNumber,
ChildNumber: signal.ChildNumber,
Success: false,
Error: fmt.Sprintf("scp ticket: %v", err),
Timestamp: time.Now(),
Duration: time.Since(start),
}, nil
}
// Comment on issue.
comment := fmt.Sprintf("Dispatched to **%s** agent queue.", signal.Assignee)
_ = h.forge.CreateIssueComment(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber), comment)
return &jobrunner.ActionResult{
Action: "dispatch",
RepoOwner: signal.RepoOwner,
RepoName: signal.RepoName,
EpicNumber: signal.EpicNumber,
ChildNumber: signal.ChildNumber,
Success: true,
Timestamp: time.Now(),
Duration: time.Since(start),
}, nil
}
// scpTicket writes ticket data to a remote path via SSH.
// TODO: Replace exec ssh+cat with charmbracelet/ssh for native Go SSH.
func (h *DispatchHandler) scpTicket(ctx context.Context, host, remotePath string, data []byte) error {
// Use ssh + cat instead of scp for piping stdin.
// TODO: Use charmbracelet/keygen for key management, native Go SSH client for transport.
cmd := exec.CommandContext(ctx, "ssh",
"-o", "StrictHostKeyChecking=accept-new",
"-o", "ConnectTimeout=10",
host,
fmt.Sprintf("cat > %s", remotePath),
)
cmd.Stdin = strings.NewReader(string(data))
output, err := cmd.CombinedOutput()
if err != nil {
return log.E("dispatch.scp", fmt.Sprintf("ssh to %s failed: %s", host, string(output)), err)
}
return nil
}
// ticketExists checks if a ticket file already exists in queue, active, or done.
// TODO: Replace exec ssh with native Go SSH client (charmbracelet/ssh).
func (h *DispatchHandler) ticketExists(agent AgentTarget, ticketName string) bool {
cmd := exec.Command("ssh",
"-o", "StrictHostKeyChecking=accept-new",
"-o", "ConnectTimeout=10",
agent.Host,
fmt.Sprintf("test -f %s/%s || test -f %s/../active/%s || test -f %s/../done/%s",
agent.QueueDir, ticketName,
agent.QueueDir, ticketName,
agent.QueueDir, ticketName),
)
return cmd.Run() == nil
}

View file

@ -0,0 +1,53 @@
package handlers
import (
"testing"
"github.com/host-uk/core/pkg/jobrunner"
"github.com/stretchr/testify/assert"
)
func TestDispatch_Match_Good_NeedsCoding(t *testing.T) {
h := NewDispatchHandler(nil, "", "", map[string]AgentTarget{
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue"},
})
sig := &jobrunner.PipelineSignal{
NeedsCoding: true,
Assignee: "darbs-claude",
}
assert.True(t, h.Match(sig))
}
func TestDispatch_Match_Bad_HasPR(t *testing.T) {
h := NewDispatchHandler(nil, "", "", map[string]AgentTarget{
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue"},
})
sig := &jobrunner.PipelineSignal{
NeedsCoding: false,
PRNumber: 7,
Assignee: "darbs-claude",
}
assert.False(t, h.Match(sig))
}
func TestDispatch_Match_Bad_UnknownAgent(t *testing.T) {
h := NewDispatchHandler(nil, "", "", map[string]AgentTarget{
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue"},
})
sig := &jobrunner.PipelineSignal{
NeedsCoding: true,
Assignee: "unknown-user",
}
assert.False(t, h.Match(sig))
}
func TestDispatch_Match_Bad_NotAssigned(t *testing.T) {
h := NewDispatchHandler(nil, "", "", map[string]AgentTarget{
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue"},
})
sig := &jobrunner.PipelineSignal{
NeedsCoding: true,
Assignee: "",
}
assert.False(t, h.Match(sig))
}

View file

@ -5,15 +5,18 @@ import (
"fmt"
"time"
"github.com/host-uk/core/pkg/forge"
"github.com/host-uk/core/pkg/jobrunner"
)
// EnableAutoMergeHandler enables squash auto-merge on a PR that is ready.
type EnableAutoMergeHandler struct{}
// EnableAutoMergeHandler merges a PR that is ready using squash strategy.
type EnableAutoMergeHandler struct {
forge *forge.Client
}
// NewEnableAutoMergeHandler creates a handler that enables auto-merge.
func NewEnableAutoMergeHandler() *EnableAutoMergeHandler {
return &EnableAutoMergeHandler{}
// NewEnableAutoMergeHandler creates a handler that merges ready PRs.
func NewEnableAutoMergeHandler(f *forge.Client) *EnableAutoMergeHandler {
return &EnableAutoMergeHandler{forge: f}
}
// Name returns the handler identifier.
@ -31,15 +34,11 @@ func (h *EnableAutoMergeHandler) Match(signal *jobrunner.PipelineSignal) bool {
!signal.HasUnresolvedThreads()
}
// Execute shells out to gh to enable auto-merge with squash strategy.
// Execute merges the pull request with squash strategy.
func (h *EnableAutoMergeHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
start := time.Now()
repoFlag := fmt.Sprintf("%s/%s", signal.RepoOwner, signal.RepoName)
prNumber := fmt.Sprintf("%d", signal.PRNumber)
cmd := execCommand(ctx, "gh", "pr", "merge", "--auto", "--squash", prNumber, "-R", repoFlag)
output, err := cmd.CombinedOutput()
err := h.forge.MergePullRequest(signal.RepoOwner, signal.RepoName, int64(signal.PRNumber), "squash")
result := &jobrunner.ActionResult{
Action: "enable_auto_merge",
@ -52,7 +51,7 @@ func (h *EnableAutoMergeHandler) Execute(ctx context.Context, signal *jobrunner.
}
if err != nil {
result.Error = fmt.Sprintf("gh pr merge failed: %v: %s", err, string(output))
result.Error = fmt.Sprintf("merge failed: %v", err)
}
return result, nil

View file

@ -2,8 +2,9 @@ package handlers
import (
"context"
"os/exec"
"strings"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
@ -13,7 +14,7 @@ import (
)
func TestEnableAutoMerge_Match_Good(t *testing.T) {
h := NewEnableAutoMergeHandler()
h := NewEnableAutoMergeHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "OPEN",
IsDraft: false,
@ -26,7 +27,7 @@ func TestEnableAutoMerge_Match_Good(t *testing.T) {
}
func TestEnableAutoMerge_Match_Bad_Draft(t *testing.T) {
h := NewEnableAutoMergeHandler()
h := NewEnableAutoMergeHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "OPEN",
IsDraft: true,
@ -39,7 +40,7 @@ func TestEnableAutoMerge_Match_Bad_Draft(t *testing.T) {
}
func TestEnableAutoMerge_Match_Bad_UnresolvedThreads(t *testing.T) {
h := NewEnableAutoMergeHandler()
h := NewEnableAutoMergeHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "OPEN",
IsDraft: false,
@ -52,17 +53,19 @@ func TestEnableAutoMerge_Match_Bad_UnresolvedThreads(t *testing.T) {
}
func TestEnableAutoMerge_Execute_Good(t *testing.T) {
// Save and restore the original execCommand.
original := execCommand
defer func() { execCommand = original }()
var capturedPath string
var capturedMethod string
var capturedArgs []string
execCommand = func(ctx context.Context, name string, args ...string) *exec.Cmd {
capturedArgs = append([]string{name}, args...)
return exec.CommandContext(ctx, "echo", append([]string{name}, args...)...)
}
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
capturedMethod = r.Method
capturedPath = r.URL.Path
w.WriteHeader(http.StatusOK)
})))
defer srv.Close()
h := NewEnableAutoMergeHandler()
client := newTestForgeClient(t, srv.URL)
h := NewEnableAutoMergeHandler(client)
sig := &jobrunner.PipelineSignal{
RepoOwner: "host-uk",
RepoName: "core-php",
@ -74,11 +77,29 @@ func TestEnableAutoMerge_Execute_Good(t *testing.T) {
assert.True(t, result.Success)
assert.Equal(t, "enable_auto_merge", result.Action)
joined := strings.Join(capturedArgs, " ")
assert.Contains(t, joined, "--auto")
assert.Contains(t, joined, "--squash")
assert.Contains(t, joined, "55")
assert.Contains(t, joined, "-R")
assert.Contains(t, joined, "host-uk/core-php")
assert.Equal(t, http.MethodPost, capturedMethod)
assert.Equal(t, "/api/v1/repos/host-uk/core-php/pulls/55/merge", capturedPath)
}
func TestEnableAutoMerge_Execute_Bad_MergeFailed(t *testing.T) {
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusConflict)
_ = json.NewEncoder(w).Encode(map[string]string{"message": "merge conflict"})
})))
defer srv.Close()
client := newTestForgeClient(t, srv.URL)
h := NewEnableAutoMergeHandler(client)
sig := &jobrunner.PipelineSignal{
RepoOwner: "host-uk",
RepoName: "core-php",
PRNumber: 55,
}
result, err := h.Execute(context.Background(), sig)
require.NoError(t, err)
assert.False(t, result.Success)
assert.Contains(t, result.Error, "merge failed")
}

View file

@ -1,8 +0,0 @@
package handlers
import "os/exec"
// execCommand is a package-level variable for creating exec.Cmd instances.
// It defaults to exec.CommandContext and can be replaced in tests for
// mocking shell commands.
var execCommand = exec.CommandContext

View file

@ -1,34 +1,22 @@
package handlers
import (
"bytes"
"context"
"fmt"
"net/http"
"time"
"github.com/host-uk/core/pkg/forge"
"github.com/host-uk/core/pkg/jobrunner"
)
const defaultAPIURL = "https://api.github.com"
// PublishDraftHandler marks a draft PR as ready for review once its checks pass.
type PublishDraftHandler struct {
client *http.Client
apiURL string
forge *forge.Client
}
// NewPublishDraftHandler creates a handler that publishes draft PRs.
// If client is nil, http.DefaultClient is used.
// If apiURL is empty, the default GitHub API URL is used.
func NewPublishDraftHandler(client *http.Client, apiURL string) *PublishDraftHandler {
if client == nil {
client = http.DefaultClient
}
if apiURL == "" {
apiURL = defaultAPIURL
}
return &PublishDraftHandler{client: client, apiURL: apiURL}
func NewPublishDraftHandler(f *forge.Client) *PublishDraftHandler {
return &PublishDraftHandler{forge: f}
}
// Name returns the handler identifier.
@ -43,38 +31,24 @@ func (h *PublishDraftHandler) Match(signal *jobrunner.PipelineSignal) bool {
signal.CheckStatus == "SUCCESS"
}
// Execute patches the PR to mark it as no longer a draft.
// Execute marks the PR as no longer a draft.
func (h *PublishDraftHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
start := time.Now()
url := fmt.Sprintf("%s/repos/%s/%s/pulls/%d", h.apiURL, signal.RepoOwner, signal.RepoName, signal.PRNumber)
body := bytes.NewBufferString(`{"draft":false}`)
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, url, body)
if err != nil {
return nil, fmt.Errorf("publish_draft: create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/vnd.github+json")
err := h.forge.SetPRDraft(signal.RepoOwner, signal.RepoName, int64(signal.PRNumber), false)
resp, err := h.client.Do(req)
if err != nil {
return nil, fmt.Errorf("publish_draft: execute request: %w", err)
}
defer func() { _ = resp.Body.Close() }()
success := resp.StatusCode >= 200 && resp.StatusCode < 300
result := &jobrunner.ActionResult{
Action: "publish_draft",
RepoOwner: signal.RepoOwner,
RepoName: signal.RepoName,
PRNumber: signal.PRNumber,
Success: success,
Success: err == nil,
Timestamp: time.Now(),
Duration: time.Since(start),
}
if !success {
result.Error = fmt.Sprintf("unexpected status %d", resp.StatusCode)
if err != nil {
result.Error = fmt.Sprintf("publish draft failed: %v", err)
}
return result, nil

View file

@ -14,7 +14,7 @@ import (
)
func TestPublishDraft_Match_Good(t *testing.T) {
h := NewPublishDraftHandler(nil, "")
h := NewPublishDraftHandler(nil)
sig := &jobrunner.PipelineSignal{
IsDraft: true,
PRState: "OPEN",
@ -24,7 +24,7 @@ func TestPublishDraft_Match_Good(t *testing.T) {
}
func TestPublishDraft_Match_Bad_NotDraft(t *testing.T) {
h := NewPublishDraftHandler(nil, "")
h := NewPublishDraftHandler(nil)
sig := &jobrunner.PipelineSignal{
IsDraft: false,
PRState: "OPEN",
@ -34,7 +34,7 @@ func TestPublishDraft_Match_Bad_NotDraft(t *testing.T) {
}
func TestPublishDraft_Match_Bad_ChecksFailing(t *testing.T) {
h := NewPublishDraftHandler(nil, "")
h := NewPublishDraftHandler(nil)
sig := &jobrunner.PipelineSignal{
IsDraft: true,
PRState: "OPEN",
@ -48,17 +48,19 @@ func TestPublishDraft_Execute_Good(t *testing.T) {
var capturedPath string
var capturedBody string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
capturedMethod = r.Method
capturedPath = r.URL.Path
b, _ := io.ReadAll(r.Body)
capturedBody = string(b)
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(`{"draft":false}`))
}))
_, _ = w.Write([]byte(`{}`))
})))
defer srv.Close()
h := NewPublishDraftHandler(srv.Client(), srv.URL)
client := newTestForgeClient(t, srv.URL)
h := NewPublishDraftHandler(client)
sig := &jobrunner.PipelineSignal{
RepoOwner: "host-uk",
RepoName: "core-php",
@ -71,7 +73,7 @@ func TestPublishDraft_Execute_Good(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, http.MethodPatch, capturedMethod)
assert.Equal(t, "/repos/host-uk/core-php/pulls/42", capturedPath)
assert.Equal(t, "/api/v1/repos/host-uk/core-php/pulls/42", capturedPath)
assert.Contains(t, capturedBody, `"draft":false`)
assert.True(t, result.Success)

View file

@ -1,216 +1,79 @@
package handlers
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
forgejosdk "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2"
"github.com/host-uk/core/pkg/forge"
"github.com/host-uk/core/pkg/jobrunner"
)
const defaultGraphQLURL = "https://api.github.com/graphql"
// ResolveThreadsHandler resolves all unresolved review threads on a PR
// via the GitHub GraphQL API.
type ResolveThreadsHandler struct {
client *http.Client
graphqlURL string
// DismissReviewsHandler dismisses stale "request changes" reviews on a PR.
// This replaces the GitHub-only ResolveThreadsHandler because Forgejo does
// not have a thread resolution API.
type DismissReviewsHandler struct {
forge *forge.Client
}
// NewResolveThreadsHandler creates a handler that resolves review threads.
// If client is nil, http.DefaultClient is used.
// If graphqlURL is empty, the default GitHub GraphQL URL is used.
func NewResolveThreadsHandler(client *http.Client, graphqlURL string) *ResolveThreadsHandler {
if client == nil {
client = http.DefaultClient
}
if graphqlURL == "" {
graphqlURL = defaultGraphQLURL
}
return &ResolveThreadsHandler{client: client, graphqlURL: graphqlURL}
// NewDismissReviewsHandler creates a handler that dismisses stale reviews.
func NewDismissReviewsHandler(f *forge.Client) *DismissReviewsHandler {
return &DismissReviewsHandler{forge: f}
}
// Name returns the handler identifier.
func (h *ResolveThreadsHandler) Name() string {
return "resolve_threads"
func (h *DismissReviewsHandler) Name() string {
return "dismiss_reviews"
}
// Match returns true when the PR is open and has unresolved review threads.
func (h *ResolveThreadsHandler) Match(signal *jobrunner.PipelineSignal) bool {
func (h *DismissReviewsHandler) Match(signal *jobrunner.PipelineSignal) bool {
return signal.PRState == "OPEN" && signal.HasUnresolvedThreads()
}
// graphqlRequest is a generic GraphQL request body.
type graphqlRequest struct {
Query string `json:"query"`
Variables map[string]any `json:"variables,omitempty"`
}
// threadsResponse models the GraphQL response for fetching review threads.
type threadsResponse struct {
Data struct {
Repository struct {
PullRequest struct {
ReviewThreads struct {
Nodes []struct {
ID string `json:"id"`
IsResolved bool `json:"isResolved"`
} `json:"nodes"`
} `json:"reviewThreads"`
} `json:"pullRequest"`
} `json:"repository"`
} `json:"data"`
}
// resolveResponse models the GraphQL mutation response for resolving a thread.
type resolveResponse struct {
Data struct {
ResolveReviewThread struct {
Thread struct {
ID string `json:"id"`
} `json:"thread"`
} `json:"resolveReviewThread"`
} `json:"data"`
Errors []struct {
Message string `json:"message"`
} `json:"errors"`
}
// Execute fetches unresolved review threads and resolves each one.
func (h *ResolveThreadsHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
// Execute dismisses stale "request changes" reviews on the PR.
func (h *DismissReviewsHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
start := time.Now()
threadIDs, err := h.fetchUnresolvedThreads(ctx, signal)
reviews, err := h.forge.ListPRReviews(signal.RepoOwner, signal.RepoName, int64(signal.PRNumber))
if err != nil {
return nil, fmt.Errorf("resolve_threads: fetch threads: %w", err)
return nil, fmt.Errorf("dismiss_reviews: list reviews: %w", err)
}
var resolveErrors []string
for _, threadID := range threadIDs {
if err := h.resolveThread(ctx, threadID); err != nil {
resolveErrors = append(resolveErrors, err.Error())
var dismissErrors []string
dismissed := 0
for _, review := range reviews {
if review.State != forgejosdk.ReviewStateRequestChanges || review.Dismissed || !review.Stale {
continue
}
if err := h.forge.DismissReview(
signal.RepoOwner, signal.RepoName,
int64(signal.PRNumber), review.ID,
"Automatically dismissed: review is stale after new commits",
); err != nil {
dismissErrors = append(dismissErrors, err.Error())
} else {
dismissed++
}
}
result := &jobrunner.ActionResult{
Action: "resolve_threads",
Action: "dismiss_reviews",
RepoOwner: signal.RepoOwner,
RepoName: signal.RepoName,
PRNumber: signal.PRNumber,
Success: len(resolveErrors) == 0,
Success: len(dismissErrors) == 0,
Timestamp: time.Now(),
Duration: time.Since(start),
}
if len(resolveErrors) > 0 {
result.Error = fmt.Sprintf("failed to resolve %d thread(s): %s",
len(resolveErrors), resolveErrors[0])
if len(dismissErrors) > 0 {
result.Error = fmt.Sprintf("failed to dismiss %d review(s): %s",
len(dismissErrors), dismissErrors[0])
}
return result, nil
}
// fetchUnresolvedThreads queries the GraphQL API for unresolved review threads.
func (h *ResolveThreadsHandler) fetchUnresolvedThreads(ctx context.Context, signal *jobrunner.PipelineSignal) ([]string, error) {
query := `query($owner: String!, $repo: String!, $number: Int!) {
repository(owner: $owner, name: $repo) {
pullRequest(number: $number) {
reviewThreads(first: 100) {
nodes {
id
isResolved
}
}
}
}
}`
variables := map[string]any{
"owner": signal.RepoOwner,
"repo": signal.RepoName,
"number": signal.PRNumber,
}
gqlReq := graphqlRequest{Query: query, Variables: variables}
respBody, err := h.doGraphQL(ctx, gqlReq)
if err != nil {
return nil, err
}
var resp threadsResponse
if err := json.Unmarshal(respBody, &resp); err != nil {
return nil, fmt.Errorf("decode threads response: %w", err)
}
var ids []string
for _, node := range resp.Data.Repository.PullRequest.ReviewThreads.Nodes {
if !node.IsResolved {
ids = append(ids, node.ID)
}
}
return ids, nil
}
// resolveThread calls the resolveReviewThread GraphQL mutation.
func (h *ResolveThreadsHandler) resolveThread(ctx context.Context, threadID string) error {
mutation := `mutation($threadId: ID!) {
resolveReviewThread(input: {threadId: $threadId}) {
thread {
id
}
}
}`
variables := map[string]any{
"threadId": threadID,
}
gqlReq := graphqlRequest{Query: mutation, Variables: variables}
respBody, err := h.doGraphQL(ctx, gqlReq)
if err != nil {
return err
}
var resp resolveResponse
if err := json.Unmarshal(respBody, &resp); err != nil {
return fmt.Errorf("decode resolve response: %w", err)
}
if len(resp.Errors) > 0 {
return fmt.Errorf("graphql error: %s", resp.Errors[0].Message)
}
return nil
}
// doGraphQL sends a GraphQL request and returns the raw response body.
func (h *ResolveThreadsHandler) doGraphQL(ctx context.Context, gqlReq graphqlRequest) ([]byte, error) {
bodyBytes, err := json.Marshal(gqlReq)
if err != nil {
return nil, fmt.Errorf("marshal graphql request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, h.graphqlURL, bytes.NewReader(bodyBytes))
if err != nil {
return nil, fmt.Errorf("create graphql request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
resp, err := h.client.Do(req)
if err != nil {
return nil, fmt.Errorf("execute graphql request: %w", err)
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("graphql unexpected status %d", resp.StatusCode)
}
return io.ReadAll(resp.Body)
}

View file

@ -3,7 +3,6 @@ package handlers
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"testing"
@ -14,8 +13,8 @@ import (
"github.com/host-uk/core/pkg/jobrunner"
)
func TestResolveThreads_Match_Good(t *testing.T) {
h := NewResolveThreadsHandler(nil, "")
func TestDismissReviews_Match_Good(t *testing.T) {
h := NewDismissReviewsHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "OPEN",
ThreadsTotal: 4,
@ -24,8 +23,8 @@ func TestResolveThreads_Match_Good(t *testing.T) {
assert.True(t, h.Match(sig))
}
func TestResolveThreads_Match_Bad_AllResolved(t *testing.T) {
h := NewResolveThreadsHandler(nil, "")
func TestDismissReviews_Match_Bad_AllResolved(t *testing.T) {
h := NewDismissReviewsHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "OPEN",
ThreadsTotal: 3,
@ -34,41 +33,41 @@ func TestResolveThreads_Match_Bad_AllResolved(t *testing.T) {
assert.False(t, h.Match(sig))
}
func TestResolveThreads_Execute_Good(t *testing.T) {
func TestDismissReviews_Execute_Good(t *testing.T) {
callCount := 0
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
b, _ := io.ReadAll(r.Body)
var gqlReq graphqlRequest
_ = json.Unmarshal(b, &gqlReq)
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
callCount++
w.Header().Set("Content-Type", "application/json")
if callCount == 1 {
// First call: fetch threads query.
resp := threadsResponse{}
resp.Data.Repository.PullRequest.ReviewThreads.Nodes = []struct {
ID string `json:"id"`
IsResolved bool `json:"isResolved"`
}{
{ID: "thread-1", IsResolved: false},
{ID: "thread-2", IsResolved: true},
{ID: "thread-3", IsResolved: false},
// ListPullReviews (GET)
if r.Method == http.MethodGet {
reviews := []map[string]any{
{
"id": 1, "state": "REQUEST_CHANGES", "dismissed": false, "stale": true,
"body": "fix this", "commit_id": "abc123",
},
{
"id": 2, "state": "APPROVED", "dismissed": false, "stale": false,
"body": "looks good", "commit_id": "abc123",
},
{
"id": 3, "state": "REQUEST_CHANGES", "dismissed": false, "stale": true,
"body": "needs work", "commit_id": "abc123",
},
}
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(resp)
_ = json.NewEncoder(w).Encode(reviews)
return
}
// Subsequent calls: resolve mutation.
resp := resolveResponse{}
resp.Data.ResolveReviewThread.Thread.ID = gqlReq.Variables["threadId"].(string)
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(resp)
}))
// DismissPullReview (POST to dismissals endpoint)
w.WriteHeader(http.StatusOK)
})))
defer srv.Close()
h := NewResolveThreadsHandler(srv.Client(), srv.URL)
client := newTestForgeClient(t, srv.URL)
h := NewDismissReviewsHandler(client)
sig := &jobrunner.PipelineSignal{
RepoOwner: "host-uk",
RepoName: "core-admin",
@ -82,11 +81,11 @@ func TestResolveThreads_Execute_Good(t *testing.T) {
require.NoError(t, err)
assert.True(t, result.Success)
assert.Equal(t, "resolve_threads", result.Action)
assert.Equal(t, "dismiss_reviews", result.Action)
assert.Equal(t, "host-uk", result.RepoOwner)
assert.Equal(t, "core-admin", result.RepoName)
assert.Equal(t, 33, result.PRNumber)
// 1 query + 2 mutations (thread-1 and thread-3 are unresolved).
// 1 list + 2 dismiss (reviews #1 and #3 are stale REQUEST_CHANGES)
assert.Equal(t, 3, callCount)
}

View file

@ -1,33 +1,23 @@
package handlers
import (
"bytes"
"context"
"fmt"
"net/http"
"time"
"github.com/host-uk/core/pkg/forge"
"github.com/host-uk/core/pkg/jobrunner"
)
// SendFixCommandHandler posts a comment on a PR asking for conflict or
// review fixes.
type SendFixCommandHandler struct {
client *http.Client
apiURL string
forge *forge.Client
}
// NewSendFixCommandHandler creates a handler that posts fix commands.
// If client is nil, http.DefaultClient is used.
// If apiURL is empty, the default GitHub API URL is used.
func NewSendFixCommandHandler(client *http.Client, apiURL string) *SendFixCommandHandler {
if client == nil {
client = http.DefaultClient
}
if apiURL == "" {
apiURL = defaultAPIURL
}
return &SendFixCommandHandler{client: client, apiURL: apiURL}
func NewSendFixCommandHandler(f *forge.Client) *SendFixCommandHandler {
return &SendFixCommandHandler{forge: f}
}
// Name returns the handler identifier.
@ -50,7 +40,7 @@ func (h *SendFixCommandHandler) Match(signal *jobrunner.PipelineSignal) bool {
return false
}
// Execute posts a comment on the PR issue asking for a fix.
// Execute posts a comment on the PR asking for a fix.
func (h *SendFixCommandHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
start := time.Now()
@ -61,36 +51,23 @@ func (h *SendFixCommandHandler) Execute(ctx context.Context, signal *jobrunner.P
message = "Can you fix the code reviews?"
}
url := fmt.Sprintf("%s/repos/%s/%s/issues/%d/comments", h.apiURL, signal.RepoOwner, signal.RepoName, signal.PRNumber)
bodyStr := fmt.Sprintf(`{"body":%q}`, message)
body := bytes.NewBufferString(bodyStr)
err := h.forge.CreateIssueComment(
signal.RepoOwner, signal.RepoName,
int64(signal.PRNumber), message,
)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body)
if err != nil {
return nil, fmt.Errorf("send_fix_command: create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/vnd.github+json")
resp, err := h.client.Do(req)
if err != nil {
return nil, fmt.Errorf("send_fix_command: execute request: %w", err)
}
defer func() { _ = resp.Body.Close() }()
success := resp.StatusCode >= 200 && resp.StatusCode < 300
result := &jobrunner.ActionResult{
Action: "send_fix_command",
RepoOwner: signal.RepoOwner,
RepoName: signal.RepoName,
PRNumber: signal.PRNumber,
Success: success,
Success: err == nil,
Timestamp: time.Now(),
Duration: time.Since(start),
}
if !success {
result.Error = fmt.Sprintf("unexpected status %d", resp.StatusCode)
if err != nil {
result.Error = fmt.Sprintf("post comment failed: %v", err)
}
return result, nil

View file

@ -14,7 +14,7 @@ import (
)
func TestSendFixCommand_Match_Good_Conflicting(t *testing.T) {
h := NewSendFixCommandHandler(nil, "")
h := NewSendFixCommandHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "OPEN",
Mergeable: "CONFLICTING",
@ -23,7 +23,7 @@ func TestSendFixCommand_Match_Good_Conflicting(t *testing.T) {
}
func TestSendFixCommand_Match_Good_UnresolvedThreads(t *testing.T) {
h := NewSendFixCommandHandler(nil, "")
h := NewSendFixCommandHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "OPEN",
Mergeable: "MERGEABLE",
@ -35,7 +35,7 @@ func TestSendFixCommand_Match_Good_UnresolvedThreads(t *testing.T) {
}
func TestSendFixCommand_Match_Bad_Clean(t *testing.T) {
h := NewSendFixCommandHandler(nil, "")
h := NewSendFixCommandHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "OPEN",
Mergeable: "MERGEABLE",
@ -51,17 +51,19 @@ func TestSendFixCommand_Execute_Good_Conflict(t *testing.T) {
var capturedPath string
var capturedBody string
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
capturedMethod = r.Method
capturedPath = r.URL.Path
b, _ := io.ReadAll(r.Body)
capturedBody = string(b)
w.WriteHeader(http.StatusCreated)
_, _ = w.Write([]byte(`{"id":1}`))
}))
})))
defer srv.Close()
h := NewSendFixCommandHandler(srv.Client(), srv.URL)
client := newTestForgeClient(t, srv.URL)
h := NewSendFixCommandHandler(client)
sig := &jobrunner.PipelineSignal{
RepoOwner: "host-uk",
RepoName: "core-tenant",
@ -74,7 +76,7 @@ func TestSendFixCommand_Execute_Good_Conflict(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, http.MethodPost, capturedMethod)
assert.Equal(t, "/repos/host-uk/core-tenant/issues/17/comments", capturedPath)
assert.Equal(t, "/api/v1/repos/host-uk/core-tenant/issues/17/comments", capturedPath)
assert.Contains(t, capturedBody, "fix the merge conflict")
assert.True(t, result.Success)

View file

@ -0,0 +1,35 @@
package handlers
import (
"net/http"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/host-uk/core/pkg/forge"
)
// forgejoVersionResponse is the JSON response for /api/v1/version.
const forgejoVersionResponse = `{"version":"9.0.0"}`
// withVersion wraps an HTTP handler to also serve the Forgejo version endpoint
// that the SDK calls during NewClient initialization.
func withVersion(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.HasSuffix(r.URL.Path, "/version") {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(forgejoVersionResponse))
return
}
next.ServeHTTP(w, r)
})
}
// newTestForgeClient creates a forge.Client pointing at the given test server URL.
func newTestForgeClient(t *testing.T, url string) *forge.Client {
t.Helper()
client, err := forge.New(url, "test-token")
require.NoError(t, err)
return client
}

View file

@ -6,16 +6,21 @@ import (
"strings"
"time"
forgejosdk "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2"
"github.com/host-uk/core/pkg/forge"
"github.com/host-uk/core/pkg/jobrunner"
)
// TickParentHandler ticks a child checkbox in the parent epic issue body
// after the child's PR has been merged.
type TickParentHandler struct{}
type TickParentHandler struct {
forge *forge.Client
}
// NewTickParentHandler creates a handler that ticks parent epic checkboxes.
func NewTickParentHandler() *TickParentHandler {
return &TickParentHandler{}
func NewTickParentHandler(f *forge.Client) *TickParentHandler {
return &TickParentHandler{forge: f}
}
// Name returns the handler identifier.
@ -29,24 +34,17 @@ func (h *TickParentHandler) Match(signal *jobrunner.PipelineSignal) bool {
}
// Execute fetches the epic body, replaces the unchecked checkbox for the
// child issue with a checked one, and updates the epic.
// child issue with a checked one, updates the epic, and closes the child issue.
func (h *TickParentHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
start := time.Now()
repoFlag := signal.RepoFullName()
// Fetch the epic issue body.
viewCmd := execCommand(ctx, "gh", "issue", "view",
fmt.Sprintf("%d", signal.EpicNumber),
"-R", repoFlag,
"--json", "body",
"-q", ".body",
)
bodyBytes, err := viewCmd.Output()
epic, err := h.forge.GetIssue(signal.RepoOwner, signal.RepoName, int64(signal.EpicNumber))
if err != nil {
return nil, fmt.Errorf("tick_parent: fetch epic body: %w", err)
return nil, fmt.Errorf("tick_parent: fetch epic: %w", err)
}
oldBody := string(bodyBytes)
oldBody := epic.Body
unchecked := fmt.Sprintf("- [ ] #%d", signal.ChildNumber)
checked := fmt.Sprintf("- [x] #%d", signal.ChildNumber)
@ -65,30 +63,24 @@ func (h *TickParentHandler) Execute(ctx context.Context, signal *jobrunner.Pipel
newBody := strings.Replace(oldBody, unchecked, checked, 1)
editCmd := execCommand(ctx, "gh", "issue", "edit",
fmt.Sprintf("%d", signal.EpicNumber),
"-R", repoFlag,
"--body", newBody,
)
editOutput, err := editCmd.CombinedOutput()
// Update the epic body.
_, err = h.forge.EditIssue(signal.RepoOwner, signal.RepoName, int64(signal.EpicNumber), forgejosdk.EditIssueOption{
Body: &newBody,
})
if err != nil {
return &jobrunner.ActionResult{
Action: "tick_parent",
RepoOwner: signal.RepoOwner,
RepoName: signal.RepoName,
PRNumber: signal.PRNumber,
Error: fmt.Sprintf("gh issue edit failed: %v: %s", err, string(editOutput)),
Error: fmt.Sprintf("edit epic failed: %v", err),
Timestamp: time.Now(),
Duration: time.Since(start),
}, nil
}
// Also close the child issue (design steps 8+9 combined).
closeCmd := execCommand(ctx, "gh", "issue", "close",
fmt.Sprintf("%d", signal.ChildNumber),
"-R", repoFlag,
)
closeOutput, err := closeCmd.CombinedOutput()
// Close the child issue.
err = h.forge.CloseIssue(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber))
result := &jobrunner.ActionResult{
Action: "tick_parent",
@ -101,7 +93,7 @@ func (h *TickParentHandler) Execute(ctx context.Context, signal *jobrunner.Pipel
}
if err != nil {
result.Error = fmt.Sprintf("gh issue close failed: %v: %s", err, string(closeOutput))
result.Error = fmt.Sprintf("close child issue failed: %v", err)
}
return result, nil

View file

@ -2,8 +2,10 @@ package handlers
import (
"context"
"fmt"
"os/exec"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
@ -14,7 +16,7 @@ import (
)
func TestTickParent_Match_Good(t *testing.T) {
h := NewTickParentHandler()
h := NewTickParentHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "MERGED",
}
@ -22,7 +24,7 @@ func TestTickParent_Match_Good(t *testing.T) {
}
func TestTickParent_Match_Bad_Open(t *testing.T) {
h := NewTickParentHandler()
h := NewTickParentHandler(nil)
sig := &jobrunner.PipelineSignal{
PRState: "OPEN",
}
@ -30,32 +32,51 @@ func TestTickParent_Match_Bad_Open(t *testing.T) {
}
func TestTickParent_Execute_Good(t *testing.T) {
// Save and restore the original execCommand.
original := execCommand
defer func() { execCommand = original }()
epicBody := "## Tasks\n- [x] #1\n- [ ] #7\n- [ ] #8\n"
var callCount int
var editArgs []string
var closeArgs []string
var editBody string
var closeCalled bool
execCommand = func(ctx context.Context, name string, args ...string) *exec.Cmd {
callCount++
if callCount == 1 {
// First call: gh issue view — return the epic body.
return exec.CommandContext(ctx, "echo", "-n", epicBody)
}
if callCount == 2 {
// Second call: gh issue edit — capture args and succeed.
editArgs = append([]string{name}, args...)
return exec.CommandContext(ctx, "echo", "ok")
}
// Third call: gh issue close — capture args and succeed.
closeArgs = append([]string{name}, args...)
return exec.CommandContext(ctx, "echo", "ok")
}
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
method := r.Method
w.Header().Set("Content-Type", "application/json")
h := NewTickParentHandler()
switch {
// GET issue (fetch epic)
case method == http.MethodGet && strings.Contains(path, "/issues/42"):
_ = json.NewEncoder(w).Encode(map[string]any{
"number": 42,
"body": epicBody,
"title": "Epic",
})
// PATCH issue (edit epic body)
case method == http.MethodPatch && strings.Contains(path, "/issues/42"):
b, _ := io.ReadAll(r.Body)
editBody = string(b)
_ = json.NewEncoder(w).Encode(map[string]any{
"number": 42,
"body": editBody,
"title": "Epic",
})
// PATCH issue (close child — state: closed)
case method == http.MethodPatch && strings.Contains(path, "/issues/7"):
closeCalled = true
_ = json.NewEncoder(w).Encode(map[string]any{
"number": 7,
"state": "closed",
})
default:
w.WriteHeader(http.StatusNotFound)
}
})))
defer srv.Close()
client := newTestForgeClient(t, srv.URL)
h := NewTickParentHandler(client)
sig := &jobrunner.PipelineSignal{
RepoOwner: "host-uk",
RepoName: "core-php",
@ -70,21 +91,8 @@ func TestTickParent_Execute_Good(t *testing.T) {
assert.True(t, result.Success)
assert.Equal(t, "tick_parent", result.Action)
assert.Equal(t, 3, callCount, "expected three exec calls: view + edit + close")
// Verify the edit args contain the checked checkbox.
editJoined := strings.Join(editArgs, " ")
assert.Contains(t, editJoined, "issue")
assert.Contains(t, editJoined, "edit")
assert.Contains(t, editJoined, "42")
assert.Contains(t, editJoined, fmt.Sprintf("-R %s", sig.RepoFullName()))
assert.Contains(t, editJoined, "- [x] #7")
// Verify the close args target the child issue.
closeJoined := strings.Join(closeArgs, " ")
assert.Contains(t, closeJoined, "issue")
assert.Contains(t, closeJoined, "close")
assert.Contains(t, closeJoined, "7")
assert.Contains(t, closeJoined, "-R")
assert.Contains(t, closeJoined, "host-uk/core-php")
// Verify the edit body contains the checked checkbox.
assert.Contains(t, editBody, "- [x] #7")
assert.True(t, closeCalled, "expected child issue to be closed")
}

View file

@ -6,7 +6,7 @@ import (
)
// PipelineSignal is the structural snapshot of a child issue/PR.
// Never contains comment bodies or free text — structural signals only.
// Carries structural state plus issue title/body for dispatch prompts.
type PipelineSignal struct {
EpicNumber int
ChildNumber int
@ -22,6 +22,10 @@ type PipelineSignal struct {
LastCommitSHA string
LastCommitAt time.Time
LastReviewAt time.Time
NeedsCoding bool // true if child has no PR (work not started)
Assignee string // issue assignee username (for dispatch)
IssueTitle string // child issue title (for dispatch prompt)
IssueBody string // child issue body (for dispatch prompt)
}
// RepoFullName returns "owner/repo".

153
scripts/agent-runner.sh Executable file
View file

@ -0,0 +1,153 @@
#!/bin/bash
# agent-runner.sh — One-at-a-time queue runner for Claude Code agents.
# Deployed to agent machines, triggered by cron every 5 minutes.
#
# Usage: */5 * * * * ~/ai-work/agent-runner.sh >> ~/ai-work/logs/runner.log 2>&1
set -euo pipefail
WORK_DIR="${HOME}/ai-work"
QUEUE_DIR="${WORK_DIR}/queue"
ACTIVE_DIR="${WORK_DIR}/active"
DONE_DIR="${WORK_DIR}/done"
LOG_DIR="${WORK_DIR}/logs"
LOCK_FILE="${WORK_DIR}/.runner.lock"
# Ensure directories exist.
mkdir -p "$QUEUE_DIR" "$ACTIVE_DIR" "$DONE_DIR" "$LOG_DIR"
# --- 1. Check lock (is another run active?) ---
if [ -f "$LOCK_FILE" ]; then
PID=$(cat "$LOCK_FILE" 2>/dev/null || echo "")
if [ -n "$PID" ] && kill -0 "$PID" 2>/dev/null; then
echo "$(date -Iseconds) Runner already active (PID $PID), exiting."
exit 0
fi
echo "$(date -Iseconds) Removing stale lock (PID $PID)."
rm -f "$LOCK_FILE"
fi
# --- 2. Check credits ---
# Parse remaining usage from claude. If under 5% remaining, skip.
if command -v claude &>/dev/null; then
USAGE_OUTPUT=$(claude --output-format json -p "Reply with just the word OK" 2>/dev/null | head -1 || echo "")
# Fallback: if we can't check, proceed anyway.
fi
# --- 3. Pick oldest ticket ---
TICKET=$(find "$QUEUE_DIR" -name 'ticket-*.json' -type f 2>/dev/null | sort | head -1)
if [ -z "$TICKET" ]; then
exit 0 # No work
fi
TICKET_BASENAME=$(basename "$TICKET")
echo "$(date -Iseconds) Processing ticket: $TICKET_BASENAME"
# --- 4. Lock ---
echo $$ > "$LOCK_FILE"
cleanup() {
rm -f "$LOCK_FILE"
echo "$(date -Iseconds) Lock released."
}
trap cleanup EXIT
# --- 5. Move to active ---
mv "$TICKET" "$ACTIVE_DIR/"
TICKET_FILE="$ACTIVE_DIR/$TICKET_BASENAME"
# --- 6. Extract ticket data ---
REPO_OWNER=$(jq -r .repo_owner "$TICKET_FILE")
REPO_NAME=$(jq -r .repo_name "$TICKET_FILE")
ISSUE_NUM=$(jq -r .issue_number "$TICKET_FILE")
ISSUE_TITLE=$(jq -r .issue_title "$TICKET_FILE")
ISSUE_BODY=$(jq -r .issue_body "$TICKET_FILE")
TARGET_BRANCH=$(jq -r .target_branch "$TICKET_FILE")
FORGE_URL=$(jq -r .forge_url "$TICKET_FILE")
FORGE_TOKEN=$(jq -r .forge_token "$TICKET_FILE")
echo "$(date -Iseconds) Issue: ${REPO_OWNER}/${REPO_NAME}#${ISSUE_NUM} - ${ISSUE_TITLE}"
# --- 7. Clone or update repo ---
JOB_DIR="$WORK_DIR/jobs/${REPO_OWNER}-${REPO_NAME}-${ISSUE_NUM}"
REPO_DIR="$JOB_DIR/$REPO_NAME"
mkdir -p "$JOB_DIR"
FORGEJO_USER=$(jq -r '.forgejo_user // empty' "$TICKET_FILE")
if [ -z "$FORGEJO_USER" ]; then
FORGEJO_USER="$(hostname -s)-$(whoami)"
fi
# TODO: Replace token-in-URL with git credential helper or SSH clone via charmbracelet/keygen.
CLONE_URL="https://${FORGEJO_USER}:${FORGE_TOKEN}@${FORGE_URL#https://}/${REPO_OWNER}/${REPO_NAME}.git"
if [ -d "$REPO_DIR/.git" ]; then
echo "$(date -Iseconds) Updating existing clone..."
cd "$REPO_DIR"
git fetch origin
git checkout "$TARGET_BRANCH" 2>/dev/null || git checkout -b "$TARGET_BRANCH" "origin/$TARGET_BRANCH"
git pull origin "$TARGET_BRANCH"
else
echo "$(date -Iseconds) Cloning repo..."
git clone -b "$TARGET_BRANCH" "$CLONE_URL" "$REPO_DIR"
cd "$REPO_DIR"
fi
# --- 8. Build prompt ---
PROMPT="You are working on issue #${ISSUE_NUM} in ${REPO_OWNER}/${REPO_NAME}.
Title: ${ISSUE_TITLE}
Description:
${ISSUE_BODY}
The repo is cloned at the current directory on branch '${TARGET_BRANCH}'.
Create a feature branch from '${TARGET_BRANCH}', make minimal targeted changes, commit referencing #${ISSUE_NUM}, and push.
Then create a PR targeting '${TARGET_BRANCH}' using the forgejo MCP tools or git push."
# --- 9. Run AI agent ---
MODEL=$(jq -r '.model // "sonnet"' "$TICKET_FILE")
RUNNER=$(jq -r '.runner // "claude"' "$TICKET_FILE")
LOG_FILE="$LOG_DIR/${REPO_OWNER}-${REPO_NAME}-${ISSUE_NUM}.log"
echo "$(date -Iseconds) Running ${RUNNER} (model: ${MODEL})..."
case "$RUNNER" in
codex)
codex exec --full-auto \
"$PROMPT" \
> "$LOG_FILE" 2>&1
;;
gemini)
MODEL_FLAG=""
if [ -n "$MODEL" ] && [ "$MODEL" != "sonnet" ]; then
MODEL_FLAG="-m $MODEL"
fi
echo "$PROMPT" | gemini -p - -y $MODEL_FLAG \
> "$LOG_FILE" 2>&1
;;
*)
echo "$PROMPT" | claude -p \
--model "$MODEL" \
--dangerously-skip-permissions \
--output-format text \
> "$LOG_FILE" 2>&1
;;
esac
EXIT_CODE=$?
echo "$(date -Iseconds) ${RUNNER} exited with code: $EXIT_CODE"
# --- 10. Move to done ---
mv "$TICKET_FILE" "$DONE_DIR/"
# --- 11. Report result back to Forgejo ---
if [ $EXIT_CODE -eq 0 ]; then
COMMENT="Agent completed work on #${ISSUE_NUM}. Exit code: 0."
else
COMMENT="Agent failed on #${ISSUE_NUM} (exit code: ${EXIT_CODE}). Check logs on agent machine."
fi
curl -s -X POST "${FORGE_URL}/api/v1/repos/${REPO_OWNER}/${REPO_NAME}/issues/${ISSUE_NUM}/comments" \
-H "Authorization: token $FORGE_TOKEN" \
-H "Content-Type: application/json" \
-d "$(jq -n --arg body "$COMMENT" '{body: $body}')" \
> /dev/null 2>&1 || true
echo "$(date -Iseconds) Done: $TICKET_BASENAME (exit: $EXIT_CODE)"

86
scripts/agent-setup.sh Executable file
View file

@ -0,0 +1,86 @@
#!/bin/bash
# agent-setup.sh — Bootstrap an AgentCI agent machine via SSH.
#
# Usage: agent-setup.sh <user@host>
#
# Creates work directories, copies agent-runner.sh, installs cron,
# and verifies prerequisites.
set -euo pipefail
HOST="${1:?Usage: agent-setup.sh <user@host>}"
SSH_OPTS="-o StrictHostKeyChecking=accept-new -o ConnectTimeout=10"
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
RUNNER_SCRIPT="${SCRIPT_DIR}/agent-runner.sh"
if [ ! -f "$RUNNER_SCRIPT" ]; then
echo "ERROR: agent-runner.sh not found at $RUNNER_SCRIPT"
exit 1
fi
echo "=== AgentCI Setup: $HOST ==="
# --- 1. Test SSH ---
echo -n "Testing SSH... "
if ! ssh $SSH_OPTS "$HOST" "echo ok" >/dev/null 2>&1; then
echo "FAILED — cannot reach $HOST"
exit 1
fi
echo "OK"
# --- 2. Create directories ---
echo -n "Creating directories... "
ssh $SSH_OPTS "$HOST" "mkdir -p ~/ai-work/{queue,active,done,logs,jobs}"
echo "OK"
# --- 3. Copy runner script ---
echo -n "Copying agent-runner.sh... "
scp $SSH_OPTS "$RUNNER_SCRIPT" "${HOST}:~/ai-work/agent-runner.sh"
ssh $SSH_OPTS "$HOST" "chmod +x ~/ai-work/agent-runner.sh"
echo "OK"
# --- 4. Install cron (idempotent) ---
echo -n "Installing cron... "
CRON_LINE="*/5 * * * * ~/ai-work/agent-runner.sh >> ~/ai-work/logs/runner.log 2>&1"
ssh $SSH_OPTS "$HOST" "
if crontab -l 2>/dev/null | grep -qF 'agent-runner.sh'; then
echo 'already installed'
else
(crontab -l 2>/dev/null; echo '$CRON_LINE') | crontab -
echo 'installed'
fi
"
# --- 5. Verify prerequisites ---
echo "Checking prerequisites..."
MISSING=""
for tool in jq git claude; do
if ssh $SSH_OPTS "$HOST" "command -v $tool" >/dev/null 2>&1; then
echo " $tool: OK"
else
echo " $tool: MISSING"
MISSING="$MISSING $tool"
fi
done
if [ -n "$MISSING" ]; then
echo ""
echo "WARNING: Missing tools:$MISSING"
echo "Install them before the agent can process tickets."
fi
# --- 6. Round-trip test ---
echo -n "Round-trip test... "
TEST_FILE="queue/test-setup-$(date +%s).json"
ssh $SSH_OPTS "$HOST" "echo '{\"test\":true}' > ~/ai-work/$TEST_FILE"
RESULT=$(ssh $SSH_OPTS "$HOST" "cat ~/ai-work/$TEST_FILE && rm ~/ai-work/$TEST_FILE")
if [ "$RESULT" = '{"test":true}' ]; then
echo "OK"
else
echo "FAILED"
exit 1
fi
echo ""
echo "=== Setup complete ==="
echo "Agent queue: $HOST:~/ai-work/queue/"
echo "Runner log: $HOST:~/ai-work/logs/runner.log"