refactor: extract agentci + jobrunner to core/go-agent
Both packages now live in forge.lthn.ai/core/go-agent as a unified agent orchestration repo. go-scm retains collect/, forge/, git/, gitea/. Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
148003e005
commit
c95aefed94
41 changed files with 6 additions and 9022 deletions
19
CLAUDE.md
19
CLAUDE.md
|
|
@ -4,7 +4,9 @@ You are a dedicated domain expert for `forge.lthn.ai/core/go-scm`. Virgil orches
|
|||
|
||||
## What This Package Does
|
||||
|
||||
SCM integration, AgentCI dispatch, and data collection for the Lethean ecosystem. See `docs/architecture.md` for the full package breakdown.
|
||||
SCM integration and data collection for the Lethean ecosystem. Four packages: `forge/` (Forgejo API client), `git/` (multi-repo git ops), `gitea/` (Gitea API client), `collect/` (data collection).
|
||||
|
||||
**Extracted to other repos:** `agentci/` + `jobrunner/` → `core/go-agent`, `git/` → `core/go-git`.
|
||||
|
||||
## Commands
|
||||
|
||||
|
|
@ -35,26 +37,13 @@ type RepoStatus struct {
|
|||
Modified, Untracked, Staged, Ahead, Behind int
|
||||
Error error
|
||||
}
|
||||
|
||||
// jobrunner/types.go
|
||||
type PipelineSignal struct {
|
||||
EpicNumber, ChildNumber, PRNumber int
|
||||
RepoOwner, RepoName string
|
||||
PRState, Mergeable, CheckStatus string
|
||||
NeedsCoding bool
|
||||
IssueTitle, IssueBody string
|
||||
}
|
||||
|
||||
// agentci/clotho.go
|
||||
type RunMode string // "standard" or "dual"
|
||||
type Spinner struct { Config ClothoConfig; Agents map[string]AgentConfig }
|
||||
```
|
||||
|
||||
## Coding Standards
|
||||
|
||||
- **UK English**: colour, organisation, centre
|
||||
- **Tests**: testify assert/require, table-driven preferred, `_Good`/`_Bad`/`_Ugly` naming
|
||||
- **Conventional commits**: `feat(forge):`, `fix(jobrunner):`, `test(collect):`
|
||||
- **Conventional commits**: `feat(forge):`, `fix(git):`, `test(collect):`
|
||||
- **Co-Author**: `Co-Authored-By: Virgil <virgil@lethean.io>`
|
||||
- **Licence**: EUPL-1.2
|
||||
- **Imports**: stdlib → forge.lthn.ai → third-party, each group separated by blank line
|
||||
|
|
|
|||
|
|
@ -1,87 +0,0 @@
|
|||
package agentci
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// RunMode determines the execution strategy for a dispatched task.
|
||||
type RunMode string
|
||||
|
||||
const (
|
||||
ModeStandard RunMode = "standard"
|
||||
ModeDual RunMode = "dual" // The Clotho Protocol — dual-run verification
|
||||
)
|
||||
|
||||
// Spinner is the Clotho orchestrator that determines the fate of each task.
|
||||
type Spinner struct {
|
||||
Config ClothoConfig
|
||||
Agents map[string]AgentConfig
|
||||
}
|
||||
|
||||
// NewSpinner creates a new Clotho orchestrator.
|
||||
func NewSpinner(cfg ClothoConfig, agents map[string]AgentConfig) *Spinner {
|
||||
return &Spinner{
|
||||
Config: cfg,
|
||||
Agents: agents,
|
||||
}
|
||||
}
|
||||
|
||||
// DeterminePlan decides if a signal requires dual-run verification based on
|
||||
// the global strategy, agent configuration, and repository criticality.
|
||||
func (s *Spinner) DeterminePlan(signal *jobrunner.PipelineSignal, agentName string) RunMode {
|
||||
if s.Config.Strategy != "clotho-verified" {
|
||||
return ModeStandard
|
||||
}
|
||||
|
||||
agent, ok := s.Agents[agentName]
|
||||
if !ok {
|
||||
return ModeStandard
|
||||
}
|
||||
if agent.DualRun {
|
||||
return ModeDual
|
||||
}
|
||||
|
||||
// Protect critical repos with dual-run (Axiom 1).
|
||||
if signal.RepoName == "core" || strings.Contains(signal.RepoName, "security") {
|
||||
return ModeDual
|
||||
}
|
||||
|
||||
return ModeStandard
|
||||
}
|
||||
|
||||
// GetVerifierModel returns the model for the secondary "signed" verification run.
|
||||
func (s *Spinner) GetVerifierModel(agentName string) string {
|
||||
agent, ok := s.Agents[agentName]
|
||||
if !ok || agent.VerifyModel == "" {
|
||||
return "gemini-1.5-pro"
|
||||
}
|
||||
return agent.VerifyModel
|
||||
}
|
||||
|
||||
// FindByForgejoUser resolves a Forgejo username to the agent config key and config.
|
||||
// This decouples agent naming (mythological roles) from Forgejo identity.
|
||||
func (s *Spinner) FindByForgejoUser(forgejoUser string) (string, AgentConfig, bool) {
|
||||
if forgejoUser == "" {
|
||||
return "", AgentConfig{}, false
|
||||
}
|
||||
// Direct match on config key first.
|
||||
if agent, ok := s.Agents[forgejoUser]; ok {
|
||||
return forgejoUser, agent, true
|
||||
}
|
||||
// Search by ForgejoUser field.
|
||||
for name, agent := range s.Agents {
|
||||
if agent.ForgejoUser != "" && agent.ForgejoUser == forgejoUser {
|
||||
return name, agent, true
|
||||
}
|
||||
}
|
||||
return "", AgentConfig{}, false
|
||||
}
|
||||
|
||||
// Weave compares primary and verifier outputs. Returns true if they converge.
|
||||
// This is a placeholder for future semantic diff logic.
|
||||
func (s *Spinner) Weave(ctx context.Context, primaryOutput, signedOutput []byte) (bool, error) {
|
||||
return string(primaryOutput) == string(signedOutput), nil
|
||||
}
|
||||
|
|
@ -1,194 +0,0 @@
|
|||
package agentci
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func newTestSpinner() *Spinner {
|
||||
return NewSpinner(
|
||||
ClothoConfig{
|
||||
Strategy: "clotho-verified",
|
||||
ValidationThreshold: 0.85,
|
||||
},
|
||||
map[string]AgentConfig{
|
||||
"claude-agent": {
|
||||
Host: "claude@10.0.0.1",
|
||||
Model: "opus",
|
||||
Runner: "claude",
|
||||
Active: true,
|
||||
DualRun: false,
|
||||
ForgejoUser: "claude-forge",
|
||||
},
|
||||
"gemini-agent": {
|
||||
Host: "localhost",
|
||||
Model: "gemini-2.0-flash",
|
||||
VerifyModel: "gemini-1.5-pro",
|
||||
Runner: "gemini",
|
||||
Active: true,
|
||||
DualRun: true,
|
||||
ForgejoUser: "gemini-forge",
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestNewSpinner_Good(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
assert.NotNil(t, spinner)
|
||||
assert.Equal(t, "clotho-verified", spinner.Config.Strategy)
|
||||
assert.Len(t, spinner.Agents, 2)
|
||||
}
|
||||
|
||||
func TestDeterminePlan_Good_Standard(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
}
|
||||
|
||||
mode := spinner.DeterminePlan(signal, "claude-agent")
|
||||
assert.Equal(t, ModeStandard, mode)
|
||||
}
|
||||
|
||||
func TestDeterminePlan_Good_DualRunByAgent(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "some-repo",
|
||||
}
|
||||
|
||||
mode := spinner.DeterminePlan(signal, "gemini-agent")
|
||||
assert.Equal(t, ModeDual, mode)
|
||||
}
|
||||
|
||||
func TestDeterminePlan_Good_DualRunByCriticalRepo(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
repoName string
|
||||
expected RunMode
|
||||
}{
|
||||
{name: "core repo", repoName: "core", expected: ModeDual},
|
||||
{name: "security repo", repoName: "auth-security", expected: ModeDual},
|
||||
{name: "security-audit", repoName: "security-audit", expected: ModeDual},
|
||||
{name: "regular repo", repoName: "docs", expected: ModeStandard},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: tt.repoName,
|
||||
}
|
||||
mode := spinner.DeterminePlan(signal, "claude-agent")
|
||||
assert.Equal(t, tt.expected, mode)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeterminePlan_Good_NonVerifiedStrategy(t *testing.T) {
|
||||
spinner := NewSpinner(
|
||||
ClothoConfig{Strategy: "direct"},
|
||||
map[string]AgentConfig{
|
||||
"agent": {Host: "localhost", DualRun: true, Active: true},
|
||||
},
|
||||
)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{RepoName: "core"}
|
||||
mode := spinner.DeterminePlan(signal, "agent")
|
||||
assert.Equal(t, ModeStandard, mode, "non-verified strategy should always return standard")
|
||||
}
|
||||
|
||||
func TestDeterminePlan_Good_UnknownAgent(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
signal := &jobrunner.PipelineSignal{RepoName: "some-repo"}
|
||||
mode := spinner.DeterminePlan(signal, "nonexistent-agent")
|
||||
assert.Equal(t, ModeStandard, mode, "unknown agent should return standard")
|
||||
}
|
||||
|
||||
func TestGetVerifierModel_Good(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
model := spinner.GetVerifierModel("gemini-agent")
|
||||
assert.Equal(t, "gemini-1.5-pro", model)
|
||||
}
|
||||
|
||||
func TestGetVerifierModel_Good_Default(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
// claude-agent has no VerifyModel set.
|
||||
model := spinner.GetVerifierModel("claude-agent")
|
||||
assert.Equal(t, "gemini-1.5-pro", model, "should fall back to default")
|
||||
}
|
||||
|
||||
func TestGetVerifierModel_Good_UnknownAgent(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
model := spinner.GetVerifierModel("unknown")
|
||||
assert.Equal(t, "gemini-1.5-pro", model, "should fall back to default")
|
||||
}
|
||||
|
||||
func TestFindByForgejoUser_Good_DirectMatch(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
// Direct match on config key.
|
||||
name, agent, found := spinner.FindByForgejoUser("claude-agent")
|
||||
assert.True(t, found)
|
||||
assert.Equal(t, "claude-agent", name)
|
||||
assert.Equal(t, "opus", agent.Model)
|
||||
}
|
||||
|
||||
func TestFindByForgejoUser_Good_ByField(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
// Match by ForgejoUser field.
|
||||
name, agent, found := spinner.FindByForgejoUser("claude-forge")
|
||||
assert.True(t, found)
|
||||
assert.Equal(t, "claude-agent", name)
|
||||
assert.Equal(t, "opus", agent.Model)
|
||||
}
|
||||
|
||||
func TestFindByForgejoUser_Bad_NotFound(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
_, _, found := spinner.FindByForgejoUser("nonexistent")
|
||||
assert.False(t, found)
|
||||
}
|
||||
|
||||
func TestFindByForgejoUser_Bad_Empty(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
_, _, found := spinner.FindByForgejoUser("")
|
||||
assert.False(t, found)
|
||||
}
|
||||
|
||||
func TestWeave_Good_Matching(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
converge, err := spinner.Weave(context.Background(), []byte("output"), []byte("output"))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, converge)
|
||||
}
|
||||
|
||||
func TestWeave_Good_Diverging(t *testing.T) {
|
||||
spinner := newTestSpinner()
|
||||
|
||||
converge, err := spinner.Weave(context.Background(), []byte("primary"), []byte("different"))
|
||||
require.NoError(t, err)
|
||||
assert.False(t, converge)
|
||||
}
|
||||
|
||||
func TestRunModeConstants(t *testing.T) {
|
||||
assert.Equal(t, RunMode("standard"), ModeStandard)
|
||||
assert.Equal(t, RunMode("dual"), ModeDual)
|
||||
}
|
||||
|
|
@ -1,144 +0,0 @@
|
|||
// Package agentci provides configuration, security, and orchestration for AgentCI dispatch targets.
|
||||
package agentci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/config"
|
||||
)
|
||||
|
||||
// AgentConfig represents a single agent machine in the config file.
|
||||
type AgentConfig struct {
|
||||
Host string `yaml:"host" mapstructure:"host"`
|
||||
QueueDir string `yaml:"queue_dir" mapstructure:"queue_dir"`
|
||||
ForgejoUser string `yaml:"forgejo_user" mapstructure:"forgejo_user"`
|
||||
Model string `yaml:"model" mapstructure:"model"` // primary AI model
|
||||
Runner string `yaml:"runner" mapstructure:"runner"` // runner binary: claude, codex, gemini
|
||||
VerifyModel string `yaml:"verify_model" mapstructure:"verify_model"` // secondary model for dual-run
|
||||
SecurityLevel string `yaml:"security_level" mapstructure:"security_level"` // low, high
|
||||
Roles []string `yaml:"roles" mapstructure:"roles"`
|
||||
DualRun bool `yaml:"dual_run" mapstructure:"dual_run"`
|
||||
Active bool `yaml:"active" mapstructure:"active"`
|
||||
}
|
||||
|
||||
// ClothoConfig controls the orchestration strategy.
|
||||
type ClothoConfig struct {
|
||||
Strategy string `yaml:"strategy" mapstructure:"strategy"` // direct, clotho-verified
|
||||
ValidationThreshold float64 `yaml:"validation_threshold" mapstructure:"validation_threshold"` // divergence limit (0.0-1.0)
|
||||
SigningKeyPath string `yaml:"signing_key_path" mapstructure:"signing_key_path"`
|
||||
}
|
||||
|
||||
// LoadAgents reads agent targets from config and returns a map of AgentConfig.
|
||||
// Returns an empty map (not an error) if no agents are configured.
|
||||
func LoadAgents(cfg *config.Config) (map[string]AgentConfig, error) {
|
||||
var agents map[string]AgentConfig
|
||||
if err := cfg.Get("agentci.agents", &agents); err != nil {
|
||||
return map[string]AgentConfig{}, nil
|
||||
}
|
||||
|
||||
// Validate and apply defaults.
|
||||
for name, ac := range agents {
|
||||
if !ac.Active {
|
||||
continue
|
||||
}
|
||||
if ac.Host == "" {
|
||||
return nil, fmt.Errorf("agentci.LoadAgents: agent %q: host is required", name)
|
||||
}
|
||||
if ac.QueueDir == "" {
|
||||
ac.QueueDir = "/home/claude/ai-work/queue"
|
||||
}
|
||||
if ac.Model == "" {
|
||||
ac.Model = "sonnet"
|
||||
}
|
||||
if ac.Runner == "" {
|
||||
ac.Runner = "claude"
|
||||
}
|
||||
agents[name] = ac
|
||||
}
|
||||
|
||||
return agents, nil
|
||||
}
|
||||
|
||||
// LoadActiveAgents returns only active agents.
|
||||
func LoadActiveAgents(cfg *config.Config) (map[string]AgentConfig, error) {
|
||||
all, err := LoadAgents(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
active := make(map[string]AgentConfig)
|
||||
for name, ac := range all {
|
||||
if ac.Active {
|
||||
active[name] = ac
|
||||
}
|
||||
}
|
||||
return active, nil
|
||||
}
|
||||
|
||||
// LoadClothoConfig loads the Clotho orchestrator settings.
|
||||
// Returns sensible defaults if no config is present.
|
||||
func LoadClothoConfig(cfg *config.Config) (ClothoConfig, error) {
|
||||
var cc ClothoConfig
|
||||
if err := cfg.Get("agentci.clotho", &cc); err != nil {
|
||||
return ClothoConfig{
|
||||
Strategy: "direct",
|
||||
ValidationThreshold: 0.85,
|
||||
}, nil
|
||||
}
|
||||
if cc.Strategy == "" {
|
||||
cc.Strategy = "direct"
|
||||
}
|
||||
if cc.ValidationThreshold == 0 {
|
||||
cc.ValidationThreshold = 0.85
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
// SaveAgent writes an agent config entry to the config file.
|
||||
func SaveAgent(cfg *config.Config, name string, ac AgentConfig) error {
|
||||
key := fmt.Sprintf("agentci.agents.%s", name)
|
||||
data := map[string]any{
|
||||
"host": ac.Host,
|
||||
"queue_dir": ac.QueueDir,
|
||||
"forgejo_user": ac.ForgejoUser,
|
||||
"active": ac.Active,
|
||||
"dual_run": ac.DualRun,
|
||||
}
|
||||
if ac.Model != "" {
|
||||
data["model"] = ac.Model
|
||||
}
|
||||
if ac.Runner != "" {
|
||||
data["runner"] = ac.Runner
|
||||
}
|
||||
if ac.VerifyModel != "" {
|
||||
data["verify_model"] = ac.VerifyModel
|
||||
}
|
||||
if ac.SecurityLevel != "" {
|
||||
data["security_level"] = ac.SecurityLevel
|
||||
}
|
||||
if len(ac.Roles) > 0 {
|
||||
data["roles"] = ac.Roles
|
||||
}
|
||||
return cfg.Set(key, data)
|
||||
}
|
||||
|
||||
// RemoveAgent removes an agent from the config file.
|
||||
func RemoveAgent(cfg *config.Config, name string) error {
|
||||
var agents map[string]AgentConfig
|
||||
if err := cfg.Get("agentci.agents", &agents); err != nil {
|
||||
return fmt.Errorf("agentci.RemoveAgent: no agents configured")
|
||||
}
|
||||
if _, ok := agents[name]; !ok {
|
||||
return fmt.Errorf("agentci.RemoveAgent: agent %q not found", name)
|
||||
}
|
||||
delete(agents, name)
|
||||
return cfg.Set("agentci.agents", agents)
|
||||
}
|
||||
|
||||
// ListAgents returns all configured agents (active and inactive).
|
||||
func ListAgents(cfg *config.Config) (map[string]AgentConfig, error) {
|
||||
var agents map[string]AgentConfig
|
||||
if err := cfg.Get("agentci.agents", &agents); err != nil {
|
||||
return map[string]AgentConfig{}, nil
|
||||
}
|
||||
return agents, nil
|
||||
}
|
||||
|
|
@ -1,329 +0,0 @@
|
|||
package agentci
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/config"
|
||||
"forge.lthn.ai/core/go/pkg/io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func newTestConfig(t *testing.T, yaml string) *config.Config {
|
||||
t.Helper()
|
||||
m := io.NewMockMedium()
|
||||
if yaml != "" {
|
||||
m.Files["/tmp/test/config.yaml"] = yaml
|
||||
}
|
||||
cfg, err := config.New(config.WithMedium(m), config.WithPath("/tmp/test/config.yaml"))
|
||||
require.NoError(t, err)
|
||||
return cfg
|
||||
}
|
||||
|
||||
func TestLoadAgents_Good(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
darbs-claude:
|
||||
host: claude@192.168.0.201
|
||||
queue_dir: /home/claude/ai-work/queue
|
||||
forgejo_user: darbs-claude
|
||||
model: sonnet
|
||||
runner: claude
|
||||
active: true
|
||||
`)
|
||||
agents, err := LoadAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, agents, 1)
|
||||
|
||||
agent := agents["darbs-claude"]
|
||||
assert.Equal(t, "claude@192.168.0.201", agent.Host)
|
||||
assert.Equal(t, "/home/claude/ai-work/queue", agent.QueueDir)
|
||||
assert.Equal(t, "sonnet", agent.Model)
|
||||
assert.Equal(t, "claude", agent.Runner)
|
||||
}
|
||||
|
||||
func TestLoadAgents_Good_MultipleAgents(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
darbs-claude:
|
||||
host: claude@192.168.0.201
|
||||
queue_dir: /home/claude/ai-work/queue
|
||||
active: true
|
||||
local-codex:
|
||||
host: localhost
|
||||
queue_dir: /home/claude/ai-work/queue
|
||||
runner: codex
|
||||
active: true
|
||||
`)
|
||||
agents, err := LoadAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, agents, 2)
|
||||
assert.Contains(t, agents, "darbs-claude")
|
||||
assert.Contains(t, agents, "local-codex")
|
||||
}
|
||||
|
||||
func TestLoadAgents_Good_SkipsInactive(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
active-agent:
|
||||
host: claude@10.0.0.1
|
||||
active: true
|
||||
offline-agent:
|
||||
host: claude@10.0.0.2
|
||||
active: false
|
||||
`)
|
||||
agents, err := LoadAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
// Both are returned, but only active-agent has defaults applied.
|
||||
assert.Len(t, agents, 2)
|
||||
assert.Contains(t, agents, "active-agent")
|
||||
}
|
||||
|
||||
func TestLoadActiveAgents_Good(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
active-agent:
|
||||
host: claude@10.0.0.1
|
||||
active: true
|
||||
offline-agent:
|
||||
host: claude@10.0.0.2
|
||||
active: false
|
||||
`)
|
||||
active, err := LoadActiveAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, active, 1)
|
||||
assert.Contains(t, active, "active-agent")
|
||||
}
|
||||
|
||||
func TestLoadAgents_Good_Defaults(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
minimal:
|
||||
host: claude@10.0.0.1
|
||||
active: true
|
||||
`)
|
||||
agents, err := LoadAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, agents, 1)
|
||||
|
||||
agent := agents["minimal"]
|
||||
assert.Equal(t, "/home/claude/ai-work/queue", agent.QueueDir)
|
||||
assert.Equal(t, "sonnet", agent.Model)
|
||||
assert.Equal(t, "claude", agent.Runner)
|
||||
}
|
||||
|
||||
func TestLoadAgents_Good_NoConfig(t *testing.T) {
|
||||
cfg := newTestConfig(t, "")
|
||||
agents, err := LoadAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, agents)
|
||||
}
|
||||
|
||||
func TestLoadAgents_Bad_MissingHost(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
broken:
|
||||
queue_dir: /tmp
|
||||
active: true
|
||||
`)
|
||||
_, err := LoadAgents(cfg)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "host is required")
|
||||
}
|
||||
|
||||
func TestLoadAgents_Good_WithDualRun(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
gemini-agent:
|
||||
host: localhost
|
||||
runner: gemini
|
||||
model: gemini-2.0-flash
|
||||
verify_model: gemini-1.5-pro
|
||||
dual_run: true
|
||||
active: true
|
||||
`)
|
||||
agents, err := LoadAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
agent := agents["gemini-agent"]
|
||||
assert.Equal(t, "gemini", agent.Runner)
|
||||
assert.Equal(t, "gemini-2.0-flash", agent.Model)
|
||||
assert.Equal(t, "gemini-1.5-pro", agent.VerifyModel)
|
||||
assert.True(t, agent.DualRun)
|
||||
}
|
||||
|
||||
func TestLoadClothoConfig_Good(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
clotho:
|
||||
strategy: clotho-verified
|
||||
validation_threshold: 0.9
|
||||
signing_key_path: /etc/core/keys/clotho.pub
|
||||
`)
|
||||
cc, err := LoadClothoConfig(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "clotho-verified", cc.Strategy)
|
||||
assert.Equal(t, 0.9, cc.ValidationThreshold)
|
||||
assert.Equal(t, "/etc/core/keys/clotho.pub", cc.SigningKeyPath)
|
||||
}
|
||||
|
||||
func TestLoadClothoConfig_Good_Defaults(t *testing.T) {
|
||||
cfg := newTestConfig(t, "")
|
||||
cc, err := LoadClothoConfig(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "direct", cc.Strategy)
|
||||
assert.Equal(t, 0.85, cc.ValidationThreshold)
|
||||
}
|
||||
|
||||
func TestSaveAgent_Good(t *testing.T) {
|
||||
cfg := newTestConfig(t, "")
|
||||
|
||||
err := SaveAgent(cfg, "new-agent", AgentConfig{
|
||||
Host: "claude@10.0.0.5",
|
||||
QueueDir: "/home/claude/ai-work/queue",
|
||||
ForgejoUser: "new-agent",
|
||||
Model: "haiku",
|
||||
Runner: "claude",
|
||||
Active: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
agents, err := ListAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, agents, "new-agent")
|
||||
assert.Equal(t, "claude@10.0.0.5", agents["new-agent"].Host)
|
||||
assert.Equal(t, "haiku", agents["new-agent"].Model)
|
||||
}
|
||||
|
||||
func TestSaveAgent_Good_WithDualRun(t *testing.T) {
|
||||
cfg := newTestConfig(t, "")
|
||||
|
||||
err := SaveAgent(cfg, "verified-agent", AgentConfig{
|
||||
Host: "claude@10.0.0.5",
|
||||
Model: "gemini-2.0-flash",
|
||||
VerifyModel: "gemini-1.5-pro",
|
||||
DualRun: true,
|
||||
Active: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
agents, err := ListAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, agents, "verified-agent")
|
||||
assert.True(t, agents["verified-agent"].DualRun)
|
||||
}
|
||||
|
||||
func TestSaveAgent_Good_OmitsEmptyOptionals(t *testing.T) {
|
||||
cfg := newTestConfig(t, "")
|
||||
|
||||
err := SaveAgent(cfg, "minimal", AgentConfig{
|
||||
Host: "claude@10.0.0.1",
|
||||
Active: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
agents, err := ListAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, agents, "minimal")
|
||||
}
|
||||
|
||||
func TestRemoveAgent_Good(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
to-remove:
|
||||
host: claude@10.0.0.1
|
||||
active: true
|
||||
to-keep:
|
||||
host: claude@10.0.0.2
|
||||
active: true
|
||||
`)
|
||||
err := RemoveAgent(cfg, "to-remove")
|
||||
require.NoError(t, err)
|
||||
|
||||
agents, err := ListAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.NotContains(t, agents, "to-remove")
|
||||
assert.Contains(t, agents, "to-keep")
|
||||
}
|
||||
|
||||
func TestRemoveAgent_Bad_NotFound(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
existing:
|
||||
host: claude@10.0.0.1
|
||||
active: true
|
||||
`)
|
||||
err := RemoveAgent(cfg, "nonexistent")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not found")
|
||||
}
|
||||
|
||||
func TestRemoveAgent_Bad_NoAgents(t *testing.T) {
|
||||
cfg := newTestConfig(t, "")
|
||||
err := RemoveAgent(cfg, "anything")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no agents configured")
|
||||
}
|
||||
|
||||
func TestListAgents_Good(t *testing.T) {
|
||||
cfg := newTestConfig(t, `
|
||||
agentci:
|
||||
agents:
|
||||
agent-a:
|
||||
host: claude@10.0.0.1
|
||||
active: true
|
||||
agent-b:
|
||||
host: claude@10.0.0.2
|
||||
active: false
|
||||
`)
|
||||
agents, err := ListAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, agents, 2)
|
||||
assert.True(t, agents["agent-a"].Active)
|
||||
assert.False(t, agents["agent-b"].Active)
|
||||
}
|
||||
|
||||
func TestListAgents_Good_Empty(t *testing.T) {
|
||||
cfg := newTestConfig(t, "")
|
||||
agents, err := ListAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, agents)
|
||||
}
|
||||
|
||||
func TestRoundTrip_SaveThenLoad(t *testing.T) {
|
||||
cfg := newTestConfig(t, "")
|
||||
|
||||
err := SaveAgent(cfg, "alpha", AgentConfig{
|
||||
Host: "claude@alpha",
|
||||
QueueDir: "/home/claude/work/queue",
|
||||
ForgejoUser: "alpha-bot",
|
||||
Model: "opus",
|
||||
Runner: "claude",
|
||||
Active: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = SaveAgent(cfg, "beta", AgentConfig{
|
||||
Host: "claude@beta",
|
||||
ForgejoUser: "beta-bot",
|
||||
Runner: "codex",
|
||||
Active: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
agents, err := LoadActiveAgents(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, agents, 2)
|
||||
assert.Equal(t, "claude@alpha", agents["alpha"].Host)
|
||||
assert.Equal(t, "opus", agents["alpha"].Model)
|
||||
assert.Equal(t, "codex", agents["beta"].Runner)
|
||||
}
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
package agentci
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var safeNameRegex = regexp.MustCompile(`^[a-zA-Z0-9\-\_\.]+$`)
|
||||
|
||||
// SanitizePath ensures a filename or directory name is safe and prevents path traversal.
|
||||
// Returns filepath.Base of the input after validation.
|
||||
func SanitizePath(input string) (string, error) {
|
||||
base := filepath.Base(input)
|
||||
if !safeNameRegex.MatchString(base) {
|
||||
return "", fmt.Errorf("agentci.SanitizePath: invalid characters in path element: %s", input)
|
||||
}
|
||||
if base == "." || base == ".." || base == "/" {
|
||||
return "", fmt.Errorf("agentci.SanitizePath: invalid path element: %s", base)
|
||||
}
|
||||
return base, nil
|
||||
}
|
||||
|
||||
// EscapeShellArg wraps a string in single quotes for safe remote shell insertion.
|
||||
// Prefer exec.Command arguments over constructing shell strings where possible.
|
||||
func EscapeShellArg(arg string) string {
|
||||
return "'" + strings.ReplaceAll(arg, "'", "'\\''") + "'"
|
||||
}
|
||||
|
||||
// SecureSSHCommand creates an SSH exec.Cmd with strict host key checking and batch mode.
|
||||
// Deprecated: Use SecureSSHCommandContext for context-aware cancellation.
|
||||
func SecureSSHCommand(host string, remoteCmd string) *exec.Cmd {
|
||||
return SecureSSHCommandContext(context.Background(), host, remoteCmd)
|
||||
}
|
||||
|
||||
// SecureSSHCommandContext creates an SSH exec.Cmd with context support for cancellation,
|
||||
// strict host key checking, and batch mode.
|
||||
func SecureSSHCommandContext(ctx context.Context, host string, remoteCmd string) *exec.Cmd {
|
||||
return exec.CommandContext(ctx, "ssh",
|
||||
"-o", "StrictHostKeyChecking=yes",
|
||||
"-o", "BatchMode=yes",
|
||||
"-o", "ConnectTimeout=10",
|
||||
host,
|
||||
remoteCmd,
|
||||
)
|
||||
}
|
||||
|
||||
// MaskToken returns a masked version of a token for safe logging.
|
||||
func MaskToken(token string) string {
|
||||
if len(token) < 8 {
|
||||
return "*****"
|
||||
}
|
||||
return token[:4] + "****" + token[len(token)-4:]
|
||||
}
|
||||
|
|
@ -1,116 +0,0 @@
|
|||
package agentci
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSanitizePath_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{name: "simple name", input: "myfile.txt", expected: "myfile.txt"},
|
||||
{name: "with hyphen", input: "my-file", expected: "my-file"},
|
||||
{name: "with underscore", input: "my_file", expected: "my_file"},
|
||||
{name: "with dots", input: "file.tar.gz", expected: "file.tar.gz"},
|
||||
{name: "strips directory", input: "/path/to/file.txt", expected: "file.txt"},
|
||||
{name: "alphanumeric", input: "abc123", expected: "abc123"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := SanitizePath(tt.input)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizePath_Good_StripsDirTraversal(t *testing.T) {
|
||||
// filepath.Base("../secret") returns "secret" which is safe.
|
||||
result, err := SanitizePath("../secret")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "secret", result, "directory traversal component stripped by filepath.Base")
|
||||
}
|
||||
|
||||
func TestSanitizePath_Bad(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
}{
|
||||
{name: "spaces", input: "my file"},
|
||||
{name: "special chars", input: "file;rm -rf"},
|
||||
{name: "pipe", input: "file|cmd"},
|
||||
{name: "backtick", input: "file`cmd`"},
|
||||
{name: "dollar", input: "file$var"},
|
||||
{name: "single dot", input: "."},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := SanitizePath(tt.input)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEscapeShellArg_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{name: "simple string", input: "hello", expected: "'hello'"},
|
||||
{name: "with spaces", input: "hello world", expected: "'hello world'"},
|
||||
{name: "empty string", input: "", expected: "''"},
|
||||
{name: "with single quote", input: "it's", expected: "'it'\\''s'"},
|
||||
{name: "multiple single quotes", input: "a'b'c", expected: "'a'\\''b'\\''c'"},
|
||||
{name: "with special chars", input: "$(rm -rf /)", expected: "'$(rm -rf /)'"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := EscapeShellArg(tt.input)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSecureSSHCommand_Good(t *testing.T) {
|
||||
cmd := SecureSSHCommand("claude@10.0.0.1", "ls -la /tmp")
|
||||
|
||||
assert.Equal(t, "ssh", cmd.Path[len(cmd.Path)-3:])
|
||||
args := cmd.Args
|
||||
assert.Contains(t, args, "-o")
|
||||
assert.Contains(t, args, "StrictHostKeyChecking=yes")
|
||||
assert.Contains(t, args, "BatchMode=yes")
|
||||
assert.Contains(t, args, "ConnectTimeout=10")
|
||||
assert.Contains(t, args, "claude@10.0.0.1")
|
||||
assert.Contains(t, args, "ls -la /tmp")
|
||||
}
|
||||
|
||||
func TestMaskToken_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
token string
|
||||
expected string
|
||||
}{
|
||||
{name: "normal token", token: "abcdefghijkl", expected: "abcd****ijkl"},
|
||||
{name: "exactly 8 chars", token: "12345678", expected: "1234****5678"},
|
||||
{name: "short token", token: "abc", expected: "*****"},
|
||||
{name: "empty token", token: "", expected: "*****"},
|
||||
{name: "7 chars", token: "1234567", expected: "*****"},
|
||||
{name: "long token", token: "ghp_1234567890abcdef", expected: "ghp_****cdef"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := MaskToken(tt.token)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
3
go.sum
3
go.sum
|
|
@ -68,7 +68,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
|||
|
|
@ -1,389 +0,0 @@
|
|||
package jobrunner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// --- Journal: NewJournal error path ---
|
||||
|
||||
func TestNewJournal_Bad_EmptyBaseDir(t *testing.T) {
|
||||
_, err := NewJournal("")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "base directory is required")
|
||||
}
|
||||
|
||||
func TestNewJournal_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, j)
|
||||
}
|
||||
|
||||
// --- Journal: sanitizePathComponent additional cases ---
|
||||
|
||||
func TestSanitizePathComponent_Good_ValidNames(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{"host-uk", "host-uk"},
|
||||
{"core", "core"},
|
||||
{"my_repo", "my_repo"},
|
||||
{"repo.v2", "repo.v2"},
|
||||
{"A123", "A123"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
got, err := sanitizePathComponent(tc.input)
|
||||
require.NoError(t, err, "input: %q", tc.input)
|
||||
assert.Equal(t, tc.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizePathComponent_Bad_Invalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
}{
|
||||
{"empty", ""},
|
||||
{"spaces", " "},
|
||||
{"dotdot", ".."},
|
||||
{"dot", "."},
|
||||
{"slash", "foo/bar"},
|
||||
{"backslash", `foo\bar`},
|
||||
{"special", "org$bad"},
|
||||
{"leading-dot", ".hidden"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, err := sanitizePathComponent(tc.input)
|
||||
assert.Error(t, err, "input: %q", tc.input)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --- Journal: Append with readonly directory ---
|
||||
|
||||
func TestJournal_Append_Bad_ReadonlyDir(t *testing.T) {
|
||||
// Create a dir that we then make readonly (only works as non-root).
|
||||
dir := t.TempDir()
|
||||
readonlyDir := filepath.Join(dir, "readonly")
|
||||
require.NoError(t, os.MkdirAll(readonlyDir, 0o755))
|
||||
require.NoError(t, os.Chmod(readonlyDir, 0o444))
|
||||
t.Cleanup(func() { _ = os.Chmod(readonlyDir, 0o755) })
|
||||
|
||||
j, err := NewJournal(readonlyDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
signal := &PipelineSignal{
|
||||
RepoOwner: "test-owner",
|
||||
RepoName: "test-repo",
|
||||
}
|
||||
result := &ActionResult{
|
||||
Action: "test",
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
err = j.Append(signal, result)
|
||||
// Should fail because MkdirAll cannot create subdirectories in readonly dir.
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- Poller: error-returning source ---
|
||||
|
||||
type errorSource struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (e *errorSource) Name() string { return e.name }
|
||||
func (e *errorSource) Poll(_ context.Context) ([]*PipelineSignal, error) {
|
||||
return nil, fmt.Errorf("poll error")
|
||||
}
|
||||
func (e *errorSource) Report(_ context.Context, _ *ActionResult) error { return nil }
|
||||
|
||||
func TestPoller_RunOnce_Good_SourceError(t *testing.T) {
|
||||
src := &errorSource{name: "broken-source"}
|
||||
handler := &mockHandler{name: "test"}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
Handlers: []JobHandler{handler},
|
||||
})
|
||||
|
||||
err := p.RunOnce(context.Background())
|
||||
require.NoError(t, err) // Poll errors are logged, not returned
|
||||
|
||||
handler.mu.Lock()
|
||||
defer handler.mu.Unlock()
|
||||
assert.Empty(t, handler.executed, "handler should not be called when poll fails")
|
||||
}
|
||||
|
||||
// --- Poller: error-returning handler ---
|
||||
|
||||
type errorHandler struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (e *errorHandler) Name() string { return e.name }
|
||||
func (e *errorHandler) Match(_ *PipelineSignal) bool { return true }
|
||||
func (e *errorHandler) Execute(_ context.Context, _ *PipelineSignal) (*ActionResult, error) {
|
||||
return nil, fmt.Errorf("handler error")
|
||||
}
|
||||
|
||||
func TestPoller_RunOnce_Good_HandlerError(t *testing.T) {
|
||||
sig := &PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 1,
|
||||
PRNumber: 1,
|
||||
RepoOwner: "test",
|
||||
RepoName: "repo",
|
||||
}
|
||||
|
||||
src := &mockSource{
|
||||
name: "test-source",
|
||||
signals: []*PipelineSignal{sig},
|
||||
}
|
||||
|
||||
handler := &errorHandler{name: "broken-handler"}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
Handlers: []JobHandler{handler},
|
||||
})
|
||||
|
||||
err := p.RunOnce(context.Background())
|
||||
require.NoError(t, err) // Handler errors are logged, not returned
|
||||
|
||||
// Source should not have received a report (handler errored out).
|
||||
src.mu.Lock()
|
||||
defer src.mu.Unlock()
|
||||
assert.Empty(t, src.reports)
|
||||
}
|
||||
|
||||
// --- Poller: with Journal integration ---
|
||||
|
||||
func TestPoller_RunOnce_Good_WithJournal(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
journal, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
sig := &PipelineSignal{
|
||||
EpicNumber: 10,
|
||||
ChildNumber: 3,
|
||||
PRNumber: 55,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core",
|
||||
PRState: "OPEN",
|
||||
CheckStatus: "SUCCESS",
|
||||
Mergeable: "MERGEABLE",
|
||||
}
|
||||
|
||||
src := &mockSource{
|
||||
name: "test-source",
|
||||
signals: []*PipelineSignal{sig},
|
||||
}
|
||||
|
||||
handler := &mockHandler{
|
||||
name: "test-handler",
|
||||
matchFn: func(s *PipelineSignal) bool {
|
||||
return true
|
||||
},
|
||||
}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
Handlers: []JobHandler{handler},
|
||||
Journal: journal,
|
||||
})
|
||||
|
||||
err = p.RunOnce(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
handler.mu.Lock()
|
||||
require.Len(t, handler.executed, 1)
|
||||
handler.mu.Unlock()
|
||||
|
||||
// Verify the journal file was written.
|
||||
date := time.Now().UTC().Format("2006-01-02")
|
||||
journalPath := filepath.Join(dir, "host-uk", "core", date+".jsonl")
|
||||
_, statErr := os.Stat(journalPath)
|
||||
assert.NoError(t, statErr, "journal file should exist at %s", journalPath)
|
||||
}
|
||||
|
||||
// --- Poller: error-returning Report ---
|
||||
|
||||
type reportErrorSource struct {
|
||||
name string
|
||||
signals []*PipelineSignal
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (r *reportErrorSource) Name() string { return r.name }
|
||||
func (r *reportErrorSource) Poll(_ context.Context) ([]*PipelineSignal, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return r.signals, nil
|
||||
}
|
||||
func (r *reportErrorSource) Report(_ context.Context, _ *ActionResult) error {
|
||||
return fmt.Errorf("report error")
|
||||
}
|
||||
|
||||
func TestPoller_RunOnce_Good_ReportError(t *testing.T) {
|
||||
sig := &PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 1,
|
||||
PRNumber: 1,
|
||||
RepoOwner: "test",
|
||||
RepoName: "repo",
|
||||
}
|
||||
|
||||
src := &reportErrorSource{
|
||||
name: "report-fail-source",
|
||||
signals: []*PipelineSignal{sig},
|
||||
}
|
||||
|
||||
handler := &mockHandler{
|
||||
name: "test-handler",
|
||||
matchFn: func(s *PipelineSignal) bool { return true },
|
||||
}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
Handlers: []JobHandler{handler},
|
||||
})
|
||||
|
||||
err := p.RunOnce(context.Background())
|
||||
require.NoError(t, err) // Report errors are logged, not returned
|
||||
|
||||
handler.mu.Lock()
|
||||
defer handler.mu.Unlock()
|
||||
assert.Len(t, handler.executed, 1, "handler should still execute even though report fails")
|
||||
}
|
||||
|
||||
// --- Poller: multiple sources and handlers ---
|
||||
|
||||
func TestPoller_RunOnce_Good_MultipleSources(t *testing.T) {
|
||||
sig1 := &PipelineSignal{
|
||||
EpicNumber: 1, ChildNumber: 1, PRNumber: 1,
|
||||
RepoOwner: "org1", RepoName: "repo1",
|
||||
}
|
||||
sig2 := &PipelineSignal{
|
||||
EpicNumber: 2, ChildNumber: 2, PRNumber: 2,
|
||||
RepoOwner: "org2", RepoName: "repo2",
|
||||
}
|
||||
|
||||
src1 := &mockSource{name: "source-1", signals: []*PipelineSignal{sig1}}
|
||||
src2 := &mockSource{name: "source-2", signals: []*PipelineSignal{sig2}}
|
||||
|
||||
handler := &mockHandler{
|
||||
name: "catch-all",
|
||||
matchFn: func(s *PipelineSignal) bool { return true },
|
||||
}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src1, src2},
|
||||
Handlers: []JobHandler{handler},
|
||||
})
|
||||
|
||||
err := p.RunOnce(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
handler.mu.Lock()
|
||||
defer handler.mu.Unlock()
|
||||
assert.Len(t, handler.executed, 2)
|
||||
}
|
||||
|
||||
// --- Poller: Run with immediate cancellation ---
|
||||
|
||||
func TestPoller_Run_Good_ImmediateCancel(t *testing.T) {
|
||||
src := &mockSource{name: "source", signals: nil}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
PollInterval: 1 * time.Hour, // long interval
|
||||
})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// Cancel after the first RunOnce completes.
|
||||
go func() {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
err := p.Run(ctx)
|
||||
assert.ErrorIs(t, err, context.Canceled)
|
||||
assert.Equal(t, 1, p.Cycle()) // One cycle from the initial RunOnce
|
||||
}
|
||||
|
||||
// --- Journal: Append with journal error logging ---
|
||||
|
||||
func TestPoller_RunOnce_Good_JournalAppendError(t *testing.T) {
|
||||
// Use a directory that will cause journal writes to fail.
|
||||
dir := t.TempDir()
|
||||
journal, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make the journal directory read-only to trigger append errors.
|
||||
require.NoError(t, os.Chmod(dir, 0o444))
|
||||
t.Cleanup(func() { _ = os.Chmod(dir, 0o755) })
|
||||
|
||||
sig := &PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 1,
|
||||
PRNumber: 1,
|
||||
RepoOwner: "test",
|
||||
RepoName: "repo",
|
||||
}
|
||||
|
||||
src := &mockSource{
|
||||
name: "test-source",
|
||||
signals: []*PipelineSignal{sig},
|
||||
}
|
||||
|
||||
handler := &mockHandler{
|
||||
name: "test-handler",
|
||||
matchFn: func(s *PipelineSignal) bool { return true },
|
||||
}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
Handlers: []JobHandler{handler},
|
||||
Journal: journal,
|
||||
})
|
||||
|
||||
err = p.RunOnce(context.Background())
|
||||
// Journal errors are logged, not returned.
|
||||
require.NoError(t, err)
|
||||
|
||||
handler.mu.Lock()
|
||||
defer handler.mu.Unlock()
|
||||
assert.Len(t, handler.executed, 1, "handler should still execute even when journal fails")
|
||||
}
|
||||
|
||||
// --- Poller: Cycle counter increments ---
|
||||
|
||||
func TestPoller_Cycle_Good_Increments(t *testing.T) {
|
||||
src := &mockSource{name: "source", signals: nil}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
})
|
||||
|
||||
assert.Equal(t, 0, p.Cycle())
|
||||
|
||||
_ = p.RunOnce(context.Background())
|
||||
assert.Equal(t, 1, p.Cycle())
|
||||
|
||||
_ = p.RunOnce(context.Background())
|
||||
assert.Equal(t, 2, p.Cycle())
|
||||
}
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
package forgejo
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
forgejosdk "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// epicChildRe matches checklist items: - [ ] #42 or - [x] #42
|
||||
var epicChildRe = regexp.MustCompile(`- \[([ x])\] #(\d+)`)
|
||||
|
||||
// parseEpicChildren extracts child issue numbers from an epic body's checklist.
|
||||
func parseEpicChildren(body string) (unchecked []int, checked []int) {
|
||||
matches := epicChildRe.FindAllStringSubmatch(body, -1)
|
||||
for _, m := range matches {
|
||||
num, err := strconv.Atoi(m[2])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if m[1] == "x" {
|
||||
checked = append(checked, num)
|
||||
} else {
|
||||
unchecked = append(unchecked, num)
|
||||
}
|
||||
}
|
||||
return unchecked, checked
|
||||
}
|
||||
|
||||
// linkedPRRe matches "#N" references in PR bodies.
|
||||
var linkedPRRe = regexp.MustCompile(`#(\d+)`)
|
||||
|
||||
// findLinkedPR finds the first PR whose body references the given issue number.
|
||||
func findLinkedPR(prs []*forgejosdk.PullRequest, issueNumber int) *forgejosdk.PullRequest {
|
||||
target := strconv.Itoa(issueNumber)
|
||||
for _, pr := range prs {
|
||||
matches := linkedPRRe.FindAllStringSubmatch(pr.Body, -1)
|
||||
for _, m := range matches {
|
||||
if m[1] == target {
|
||||
return pr
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mapPRState maps Forgejo's PR state and merged flag to a canonical string.
|
||||
func mapPRState(pr *forgejosdk.PullRequest) string {
|
||||
if pr.HasMerged {
|
||||
return "MERGED"
|
||||
}
|
||||
switch pr.State {
|
||||
case forgejosdk.StateOpen:
|
||||
return "OPEN"
|
||||
case forgejosdk.StateClosed:
|
||||
return "CLOSED"
|
||||
default:
|
||||
return "CLOSED"
|
||||
}
|
||||
}
|
||||
|
||||
// mapMergeable maps Forgejo's boolean Mergeable field to a canonical string.
|
||||
func mapMergeable(pr *forgejosdk.PullRequest) string {
|
||||
if pr.HasMerged {
|
||||
return "UNKNOWN"
|
||||
}
|
||||
if pr.Mergeable {
|
||||
return "MERGEABLE"
|
||||
}
|
||||
return "CONFLICTING"
|
||||
}
|
||||
|
||||
// mapCombinedStatus maps a Forgejo CombinedStatus to SUCCESS/FAILURE/PENDING.
|
||||
func mapCombinedStatus(cs *forgejosdk.CombinedStatus) string {
|
||||
if cs == nil || cs.TotalCount == 0 {
|
||||
return "PENDING"
|
||||
}
|
||||
switch cs.State {
|
||||
case forgejosdk.StatusSuccess:
|
||||
return "SUCCESS"
|
||||
case forgejosdk.StatusFailure, forgejosdk.StatusError:
|
||||
return "FAILURE"
|
||||
default:
|
||||
return "PENDING"
|
||||
}
|
||||
}
|
||||
|
||||
// buildSignal creates a PipelineSignal from Forgejo API data.
|
||||
func buildSignal(
|
||||
owner, repo string,
|
||||
epicNumber, childNumber int,
|
||||
pr *forgejosdk.PullRequest,
|
||||
checkStatus string,
|
||||
) *jobrunner.PipelineSignal {
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
EpicNumber: epicNumber,
|
||||
ChildNumber: childNumber,
|
||||
PRNumber: int(pr.Index),
|
||||
RepoOwner: owner,
|
||||
RepoName: repo,
|
||||
PRState: mapPRState(pr),
|
||||
IsDraft: false, // SDK v2.2.0 doesn't expose Draft; treat as non-draft
|
||||
Mergeable: mapMergeable(pr),
|
||||
CheckStatus: checkStatus,
|
||||
}
|
||||
|
||||
if pr.Head != nil {
|
||||
sig.LastCommitSHA = pr.Head.Sha
|
||||
}
|
||||
|
||||
return sig
|
||||
}
|
||||
|
|
@ -1,205 +0,0 @@
|
|||
package forgejo
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
forgejosdk "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMapPRState_Good_Open(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{State: forgejosdk.StateOpen, HasMerged: false}
|
||||
assert.Equal(t, "OPEN", mapPRState(pr))
|
||||
}
|
||||
|
||||
func TestMapPRState_Good_Merged(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{State: forgejosdk.StateClosed, HasMerged: true}
|
||||
assert.Equal(t, "MERGED", mapPRState(pr))
|
||||
}
|
||||
|
||||
func TestMapPRState_Good_Closed(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{State: forgejosdk.StateClosed, HasMerged: false}
|
||||
assert.Equal(t, "CLOSED", mapPRState(pr))
|
||||
}
|
||||
|
||||
func TestMapPRState_Good_UnknownState(t *testing.T) {
|
||||
// Any unknown state should default to CLOSED.
|
||||
pr := &forgejosdk.PullRequest{State: "weird", HasMerged: false}
|
||||
assert.Equal(t, "CLOSED", mapPRState(pr))
|
||||
}
|
||||
|
||||
func TestMapMergeable_Good_Mergeable(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{Mergeable: true, HasMerged: false}
|
||||
assert.Equal(t, "MERGEABLE", mapMergeable(pr))
|
||||
}
|
||||
|
||||
func TestMapMergeable_Good_Conflicting(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{Mergeable: false, HasMerged: false}
|
||||
assert.Equal(t, "CONFLICTING", mapMergeable(pr))
|
||||
}
|
||||
|
||||
func TestMapMergeable_Good_Merged(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{HasMerged: true}
|
||||
assert.Equal(t, "UNKNOWN", mapMergeable(pr))
|
||||
}
|
||||
|
||||
func TestMapCombinedStatus_Good_Success(t *testing.T) {
|
||||
cs := &forgejosdk.CombinedStatus{
|
||||
State: forgejosdk.StatusSuccess,
|
||||
TotalCount: 1,
|
||||
}
|
||||
assert.Equal(t, "SUCCESS", mapCombinedStatus(cs))
|
||||
}
|
||||
|
||||
func TestMapCombinedStatus_Good_Failure(t *testing.T) {
|
||||
cs := &forgejosdk.CombinedStatus{
|
||||
State: forgejosdk.StatusFailure,
|
||||
TotalCount: 1,
|
||||
}
|
||||
assert.Equal(t, "FAILURE", mapCombinedStatus(cs))
|
||||
}
|
||||
|
||||
func TestMapCombinedStatus_Good_Error(t *testing.T) {
|
||||
cs := &forgejosdk.CombinedStatus{
|
||||
State: forgejosdk.StatusError,
|
||||
TotalCount: 1,
|
||||
}
|
||||
assert.Equal(t, "FAILURE", mapCombinedStatus(cs))
|
||||
}
|
||||
|
||||
func TestMapCombinedStatus_Good_Pending(t *testing.T) {
|
||||
cs := &forgejosdk.CombinedStatus{
|
||||
State: forgejosdk.StatusPending,
|
||||
TotalCount: 1,
|
||||
}
|
||||
assert.Equal(t, "PENDING", mapCombinedStatus(cs))
|
||||
}
|
||||
|
||||
func TestMapCombinedStatus_Good_Nil(t *testing.T) {
|
||||
assert.Equal(t, "PENDING", mapCombinedStatus(nil))
|
||||
}
|
||||
|
||||
func TestMapCombinedStatus_Good_ZeroCount(t *testing.T) {
|
||||
cs := &forgejosdk.CombinedStatus{
|
||||
State: forgejosdk.StatusSuccess,
|
||||
TotalCount: 0,
|
||||
}
|
||||
assert.Equal(t, "PENDING", mapCombinedStatus(cs))
|
||||
}
|
||||
|
||||
func TestParseEpicChildren_Good_Mixed(t *testing.T) {
|
||||
body := "## Sprint\n- [x] #1\n- [ ] #2\n- [x] #3\n- [ ] #4\nSome text\n"
|
||||
unchecked, checked := parseEpicChildren(body)
|
||||
assert.Equal(t, []int{2, 4}, unchecked)
|
||||
assert.Equal(t, []int{1, 3}, checked)
|
||||
}
|
||||
|
||||
func TestParseEpicChildren_Good_NoCheckboxes(t *testing.T) {
|
||||
body := "This is just a normal issue with no checkboxes."
|
||||
unchecked, checked := parseEpicChildren(body)
|
||||
assert.Nil(t, unchecked)
|
||||
assert.Nil(t, checked)
|
||||
}
|
||||
|
||||
func TestParseEpicChildren_Good_AllChecked(t *testing.T) {
|
||||
body := "- [x] #10\n- [x] #20\n"
|
||||
unchecked, checked := parseEpicChildren(body)
|
||||
assert.Nil(t, unchecked)
|
||||
assert.Equal(t, []int{10, 20}, checked)
|
||||
}
|
||||
|
||||
func TestParseEpicChildren_Good_AllUnchecked(t *testing.T) {
|
||||
body := "- [ ] #5\n- [ ] #6\n"
|
||||
unchecked, checked := parseEpicChildren(body)
|
||||
assert.Equal(t, []int{5, 6}, unchecked)
|
||||
assert.Nil(t, checked)
|
||||
}
|
||||
|
||||
func TestFindLinkedPR_Good(t *testing.T) {
|
||||
prs := []*forgejosdk.PullRequest{
|
||||
{Index: 10, Body: "Fixes #5"},
|
||||
{Index: 11, Body: "Resolves #7"},
|
||||
{Index: 12, Body: "Nothing here"},
|
||||
}
|
||||
|
||||
pr := findLinkedPR(prs, 7)
|
||||
assert.NotNil(t, pr)
|
||||
assert.Equal(t, int64(11), pr.Index)
|
||||
}
|
||||
|
||||
func TestFindLinkedPR_Good_NotFound(t *testing.T) {
|
||||
prs := []*forgejosdk.PullRequest{
|
||||
{Index: 10, Body: "Fixes #5"},
|
||||
}
|
||||
pr := findLinkedPR(prs, 99)
|
||||
assert.Nil(t, pr)
|
||||
}
|
||||
|
||||
func TestFindLinkedPR_Good_Nil(t *testing.T) {
|
||||
pr := findLinkedPR(nil, 1)
|
||||
assert.Nil(t, pr)
|
||||
}
|
||||
|
||||
func TestBuildSignal_Good(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{
|
||||
Index: 42,
|
||||
State: forgejosdk.StateOpen,
|
||||
Mergeable: true,
|
||||
Head: &forgejosdk.PRBranchInfo{Sha: "deadbeef"},
|
||||
}
|
||||
|
||||
sig := buildSignal("org", "repo", 10, 5, pr, "SUCCESS")
|
||||
|
||||
assert.Equal(t, 10, sig.EpicNumber)
|
||||
assert.Equal(t, 5, sig.ChildNumber)
|
||||
assert.Equal(t, 42, sig.PRNumber)
|
||||
assert.Equal(t, "org", sig.RepoOwner)
|
||||
assert.Equal(t, "repo", sig.RepoName)
|
||||
assert.Equal(t, "OPEN", sig.PRState)
|
||||
assert.Equal(t, "MERGEABLE", sig.Mergeable)
|
||||
assert.Equal(t, "SUCCESS", sig.CheckStatus)
|
||||
assert.Equal(t, "deadbeef", sig.LastCommitSHA)
|
||||
assert.False(t, sig.IsDraft)
|
||||
}
|
||||
|
||||
func TestBuildSignal_Good_NilHead(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{
|
||||
Index: 1,
|
||||
State: forgejosdk.StateClosed,
|
||||
HasMerged: true,
|
||||
}
|
||||
|
||||
sig := buildSignal("org", "repo", 1, 2, pr, "PENDING")
|
||||
assert.Equal(t, "", sig.LastCommitSHA)
|
||||
assert.Equal(t, "MERGED", sig.PRState)
|
||||
}
|
||||
|
||||
func TestSplitRepo_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
owner string
|
||||
repo string
|
||||
err bool
|
||||
}{
|
||||
{"host-uk/core", "host-uk", "core", false},
|
||||
{"a/b", "a", "b", false},
|
||||
{"org/repo-name", "org", "repo-name", false},
|
||||
{"invalid", "", "", true},
|
||||
{"", "", "", true},
|
||||
{"/repo", "", "", true},
|
||||
{"owner/", "", "", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
owner, repo, err := splitRepo(tt.input)
|
||||
if tt.err {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.owner, owner)
|
||||
assert.Equal(t, tt.repo, repo)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,173 +0,0 @@
|
|||
package forgejo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
"forge.lthn.ai/core/go/pkg/log"
|
||||
)
|
||||
|
||||
// Config configures a ForgejoSource.
|
||||
type Config struct {
|
||||
Repos []string // "owner/repo" format
|
||||
}
|
||||
|
||||
// ForgejoSource polls a Forgejo instance for pipeline signals from epic issues.
|
||||
type ForgejoSource struct {
|
||||
repos []string
|
||||
forge *forge.Client
|
||||
}
|
||||
|
||||
// New creates a ForgejoSource using the given forge client.
|
||||
func New(cfg Config, client *forge.Client) *ForgejoSource {
|
||||
return &ForgejoSource{
|
||||
repos: cfg.Repos,
|
||||
forge: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the source identifier.
|
||||
func (s *ForgejoSource) Name() string {
|
||||
return "forgejo"
|
||||
}
|
||||
|
||||
// Poll fetches epics and their linked PRs from all configured repositories,
|
||||
// returning a PipelineSignal for each unchecked child that has a linked PR.
|
||||
func (s *ForgejoSource) Poll(ctx context.Context) ([]*jobrunner.PipelineSignal, error) {
|
||||
var signals []*jobrunner.PipelineSignal
|
||||
|
||||
for _, repoFull := range s.repos {
|
||||
owner, repo, err := splitRepo(repoFull)
|
||||
if err != nil {
|
||||
log.Error("invalid repo format", "repo", repoFull, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
repoSignals, err := s.pollRepo(ctx, owner, repo)
|
||||
if err != nil {
|
||||
log.Error("poll repo failed", "repo", repoFull, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
signals = append(signals, repoSignals...)
|
||||
}
|
||||
|
||||
return signals, nil
|
||||
}
|
||||
|
||||
// Report posts the action result as a comment on the epic issue.
|
||||
func (s *ForgejoSource) Report(ctx context.Context, result *jobrunner.ActionResult) error {
|
||||
if result == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
status := "succeeded"
|
||||
if !result.Success {
|
||||
status = "failed"
|
||||
}
|
||||
|
||||
body := fmt.Sprintf("**jobrunner** `%s` %s for #%d (PR #%d)", result.Action, status, result.ChildNumber, result.PRNumber)
|
||||
if result.Error != "" {
|
||||
body += fmt.Sprintf("\n\n```\n%s\n```", result.Error)
|
||||
}
|
||||
|
||||
return s.forge.CreateIssueComment(result.RepoOwner, result.RepoName, int64(result.EpicNumber), body)
|
||||
}
|
||||
|
||||
// pollRepo fetches epics and PRs for a single repository.
|
||||
func (s *ForgejoSource) pollRepo(_ context.Context, owner, repo string) ([]*jobrunner.PipelineSignal, error) {
|
||||
// Fetch epic issues (label=epic, state=open).
|
||||
issues, err := s.forge.ListIssues(owner, repo, forge.ListIssuesOpts{State: "open"})
|
||||
if err != nil {
|
||||
return nil, log.E("forgejo.pollRepo", "fetch issues", err)
|
||||
}
|
||||
|
||||
// Filter to epics only.
|
||||
var epics []epicInfo
|
||||
for _, issue := range issues {
|
||||
for _, label := range issue.Labels {
|
||||
if label.Name == "epic" {
|
||||
epics = append(epics, epicInfo{
|
||||
Number: int(issue.Index),
|
||||
Body: issue.Body,
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(epics) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Fetch all open PRs (and also merged/closed to catch MERGED state).
|
||||
prs, err := s.forge.ListPullRequests(owner, repo, "all")
|
||||
if err != nil {
|
||||
return nil, log.E("forgejo.pollRepo", "fetch PRs", err)
|
||||
}
|
||||
|
||||
var signals []*jobrunner.PipelineSignal
|
||||
|
||||
for _, epic := range epics {
|
||||
unchecked, _ := parseEpicChildren(epic.Body)
|
||||
for _, childNum := range unchecked {
|
||||
pr := findLinkedPR(prs, childNum)
|
||||
|
||||
if pr == nil {
|
||||
// No PR yet — check if the child issue is assigned (needs coding).
|
||||
childIssue, err := s.forge.GetIssue(owner, repo, int64(childNum))
|
||||
if err != nil {
|
||||
log.Error("fetch child issue failed", "repo", owner+"/"+repo, "issue", childNum, "err", err)
|
||||
continue
|
||||
}
|
||||
if len(childIssue.Assignees) > 0 && childIssue.Assignees[0].UserName != "" {
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
EpicNumber: epic.Number,
|
||||
ChildNumber: childNum,
|
||||
RepoOwner: owner,
|
||||
RepoName: repo,
|
||||
NeedsCoding: true,
|
||||
Assignee: childIssue.Assignees[0].UserName,
|
||||
IssueTitle: childIssue.Title,
|
||||
IssueBody: childIssue.Body,
|
||||
}
|
||||
signals = append(signals, sig)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Get combined commit status for the PR's head SHA.
|
||||
checkStatus := "PENDING"
|
||||
if pr.Head != nil && pr.Head.Sha != "" {
|
||||
cs, err := s.forge.GetCombinedStatus(owner, repo, pr.Head.Sha)
|
||||
if err != nil {
|
||||
log.Error("fetch combined status failed", "repo", owner+"/"+repo, "sha", pr.Head.Sha, "err", err)
|
||||
} else {
|
||||
checkStatus = mapCombinedStatus(cs)
|
||||
}
|
||||
}
|
||||
|
||||
sig := buildSignal(owner, repo, epic.Number, childNum, pr, checkStatus)
|
||||
signals = append(signals, sig)
|
||||
}
|
||||
}
|
||||
|
||||
return signals, nil
|
||||
}
|
||||
|
||||
type epicInfo struct {
|
||||
Number int
|
||||
Body string
|
||||
}
|
||||
|
||||
// splitRepo parses "owner/repo" into its components.
|
||||
func splitRepo(full string) (string, string, error) {
|
||||
parts := strings.SplitN(full, "/", 2)
|
||||
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
|
||||
return "", "", log.E("forgejo.splitRepo", fmt.Sprintf("expected owner/repo format, got %q", full), nil)
|
||||
}
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
|
@ -1,320 +0,0 @@
|
|||
package forgejo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
func TestForgejoSource_Poll_Good_InvalidRepo(t *testing.T) {
|
||||
// Invalid repo format should be logged and skipped, not error.
|
||||
s := New(Config{Repos: []string{"invalid-no-slash"}}, nil)
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, signals)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_MultipleRepos(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
// Return one epic per repo.
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #2\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 10,
|
||||
"body": "Fixes #2",
|
||||
"state": "open",
|
||||
"mergeable": true,
|
||||
"merged": false,
|
||||
"head": map[string]string{"sha": "abc", "ref": "fix", "label": "fix"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
case strings.Contains(path, "/status"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"state": "success",
|
||||
"total_count": 1,
|
||||
"statuses": []any{},
|
||||
})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org-a/repo-1", "org-b/repo-2"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, signals, 2)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_NeedsCoding(t *testing.T) {
|
||||
// When a child issue has no linked PR but is assigned, NeedsCoding should be true.
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues/5"):
|
||||
// Child issue with assignee.
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 5,
|
||||
"title": "Implement feature",
|
||||
"body": "Please implement this.",
|
||||
"state": "open",
|
||||
"assignees": []map[string]any{{"login": "darbs-claude", "username": "darbs-claude"}},
|
||||
})
|
||||
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #5\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
// No PRs linked.
|
||||
_ = json.NewEncoder(w).Encode([]any{})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"test-org/test-repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 1)
|
||||
sig := signals[0]
|
||||
assert.True(t, sig.NeedsCoding)
|
||||
assert.Equal(t, "darbs-claude", sig.Assignee)
|
||||
assert.Equal(t, "Implement feature", sig.IssueTitle)
|
||||
assert.Equal(t, "Please implement this.", sig.IssueBody)
|
||||
assert.Equal(t, 5, sig.ChildNumber)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_MergedPR(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #3\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 20,
|
||||
"body": "Fixes #3",
|
||||
"state": "closed",
|
||||
"mergeable": false,
|
||||
"merged": true,
|
||||
"head": map[string]string{"sha": "merged123", "ref": "fix", "label": "fix"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
case strings.Contains(path, "/status"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"state": "success",
|
||||
"total_count": 1,
|
||||
"statuses": []any{},
|
||||
})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 1)
|
||||
assert.Equal(t, "MERGED", signals[0].PRState)
|
||||
assert.Equal(t, "UNKNOWN", signals[0].Mergeable)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_NoHeadSHA(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #3\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 20,
|
||||
"body": "Fixes #3",
|
||||
"state": "open",
|
||||
"mergeable": true,
|
||||
"merged": false,
|
||||
// No head field.
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 1)
|
||||
// Without head SHA, check status stays PENDING.
|
||||
assert.Equal(t, "PENDING", signals[0].CheckStatus)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Report_Good_Nil(t *testing.T) {
|
||||
s := New(Config{}, nil)
|
||||
err := s.Report(context.Background(), nil)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Report_Good_Failed(t *testing.T) {
|
||||
var capturedBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
capturedBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{}, client)
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "dispatch",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 2,
|
||||
PRNumber: 3,
|
||||
Success: false,
|
||||
Error: "transfer failed",
|
||||
}
|
||||
|
||||
err := s.Report(context.Background(), result)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, capturedBody, "failed")
|
||||
assert.Contains(t, capturedBody, "transfer failed")
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_APIErrors(t *testing.T) {
|
||||
// When the issues API fails, poll should continue with other repos.
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, signals)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_EmptyRepos(t *testing.T) {
|
||||
s := New(Config{Repos: []string{}}, nil)
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, signals)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_NonEpicIssues(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
// Issues without the "epic" label.
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #2\n",
|
||||
"labels": []map[string]string{{"name": "bug"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, signals, "non-epic issues should not generate signals")
|
||||
}
|
||||
|
|
@ -1,672 +0,0 @@
|
|||
package forgejo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
forgejosdk "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// --- Signal parsing and filtering tests ---
|
||||
|
||||
func TestParseEpicChildren_Good_EmptyBody(t *testing.T) {
|
||||
unchecked, checked := parseEpicChildren("")
|
||||
assert.Nil(t, unchecked)
|
||||
assert.Nil(t, checked)
|
||||
}
|
||||
|
||||
func TestParseEpicChildren_Good_MixedContent(t *testing.T) {
|
||||
// Checkboxes mixed with regular markdown content.
|
||||
body := `## Epic: Refactor Auth
|
||||
|
||||
Some description of the epic.
|
||||
|
||||
### Tasks
|
||||
- [x] #10 — Migrate session store
|
||||
- [ ] #11 — Update OAuth flow
|
||||
- [x] #12 — Fix token refresh
|
||||
- [ ] #13 — Add 2FA support
|
||||
|
||||
### Notes
|
||||
This is a note, not a checkbox.
|
||||
- Regular list item
|
||||
- Another item
|
||||
`
|
||||
unchecked, checked := parseEpicChildren(body)
|
||||
assert.Equal(t, []int{11, 13}, unchecked)
|
||||
assert.Equal(t, []int{10, 12}, checked)
|
||||
}
|
||||
|
||||
func TestParseEpicChildren_Good_LargeIssueNumbers(t *testing.T) {
|
||||
body := "- [ ] #9999\n- [x] #10000\n"
|
||||
unchecked, checked := parseEpicChildren(body)
|
||||
assert.Equal(t, []int{9999}, unchecked)
|
||||
assert.Equal(t, []int{10000}, checked)
|
||||
}
|
||||
|
||||
func TestParseEpicChildren_Good_ConsecutiveCheckboxes(t *testing.T) {
|
||||
body := "- [ ] #1\n- [ ] #2\n- [ ] #3\n- [ ] #4\n- [ ] #5\n"
|
||||
unchecked, checked := parseEpicChildren(body)
|
||||
assert.Equal(t, []int{1, 2, 3, 4, 5}, unchecked)
|
||||
assert.Nil(t, checked)
|
||||
}
|
||||
|
||||
// --- findLinkedPR tests ---
|
||||
|
||||
func TestFindLinkedPR_Good_MultipleReferencesInBody(t *testing.T) {
|
||||
prs := []*forgejosdk.PullRequest{
|
||||
{Index: 10, Body: "Fixes #5 and relates to #7"},
|
||||
{Index: 11, Body: "Closes #8"},
|
||||
}
|
||||
|
||||
// Should find PR #10 because it references #7.
|
||||
pr := findLinkedPR(prs, 7)
|
||||
assert.NotNil(t, pr)
|
||||
assert.Equal(t, int64(10), pr.Index)
|
||||
|
||||
// Should find PR #10 because it references #5.
|
||||
pr = findLinkedPR(prs, 5)
|
||||
assert.NotNil(t, pr)
|
||||
assert.Equal(t, int64(10), pr.Index)
|
||||
}
|
||||
|
||||
func TestFindLinkedPR_Good_EmptyBodyPR(t *testing.T) {
|
||||
prs := []*forgejosdk.PullRequest{
|
||||
{Index: 10, Body: ""},
|
||||
{Index: 11, Body: "Fixes #7"},
|
||||
}
|
||||
|
||||
pr := findLinkedPR(prs, 7)
|
||||
assert.NotNil(t, pr)
|
||||
assert.Equal(t, int64(11), pr.Index)
|
||||
}
|
||||
|
||||
func TestFindLinkedPR_Good_FirstMatchWins(t *testing.T) {
|
||||
// Both PRs reference #7, first one should win.
|
||||
prs := []*forgejosdk.PullRequest{
|
||||
{Index: 10, Body: "Fixes #7"},
|
||||
{Index: 11, Body: "Also fixes #7"},
|
||||
}
|
||||
|
||||
pr := findLinkedPR(prs, 7)
|
||||
assert.NotNil(t, pr)
|
||||
assert.Equal(t, int64(10), pr.Index)
|
||||
}
|
||||
|
||||
func TestFindLinkedPR_Good_EmptySlice(t *testing.T) {
|
||||
prs := []*forgejosdk.PullRequest{}
|
||||
pr := findLinkedPR(prs, 1)
|
||||
assert.Nil(t, pr)
|
||||
}
|
||||
|
||||
// --- mapPRState edge case ---
|
||||
|
||||
func TestMapPRState_Good_MergedOverridesState(t *testing.T) {
|
||||
// HasMerged=true should return MERGED regardless of State.
|
||||
pr := &forgejosdk.PullRequest{State: forgejosdk.StateOpen, HasMerged: true}
|
||||
assert.Equal(t, "MERGED", mapPRState(pr))
|
||||
}
|
||||
|
||||
// --- mapCombinedStatus edge cases ---
|
||||
|
||||
func TestMapCombinedStatus_Good_WarningState(t *testing.T) {
|
||||
// Unknown/warning state should default to PENDING.
|
||||
cs := &forgejosdk.CombinedStatus{
|
||||
State: forgejosdk.StatusWarning,
|
||||
TotalCount: 1,
|
||||
}
|
||||
assert.Equal(t, "PENDING", mapCombinedStatus(cs))
|
||||
}
|
||||
|
||||
// --- buildSignal edge cases ---
|
||||
|
||||
func TestBuildSignal_Good_ClosedPR(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{
|
||||
Index: 5,
|
||||
State: forgejosdk.StateClosed,
|
||||
Mergeable: false,
|
||||
HasMerged: false,
|
||||
Head: &forgejosdk.PRBranchInfo{Sha: "abc"},
|
||||
}
|
||||
|
||||
sig := buildSignal("org", "repo", 1, 2, pr, "FAILURE")
|
||||
assert.Equal(t, "CLOSED", sig.PRState)
|
||||
assert.Equal(t, "CONFLICTING", sig.Mergeable)
|
||||
assert.Equal(t, "FAILURE", sig.CheckStatus)
|
||||
assert.Equal(t, "abc", sig.LastCommitSHA)
|
||||
}
|
||||
|
||||
func TestBuildSignal_Good_MergedPR(t *testing.T) {
|
||||
pr := &forgejosdk.PullRequest{
|
||||
Index: 99,
|
||||
State: forgejosdk.StateClosed,
|
||||
Mergeable: false,
|
||||
HasMerged: true,
|
||||
Head: &forgejosdk.PRBranchInfo{Sha: "merged123"},
|
||||
}
|
||||
|
||||
sig := buildSignal("owner", "repo", 10, 5, pr, "SUCCESS")
|
||||
assert.Equal(t, "MERGED", sig.PRState)
|
||||
assert.Equal(t, "UNKNOWN", sig.Mergeable)
|
||||
assert.Equal(t, 99, sig.PRNumber)
|
||||
assert.Equal(t, "merged123", sig.LastCommitSHA)
|
||||
}
|
||||
|
||||
// --- splitRepo edge cases ---
|
||||
|
||||
func TestSplitRepo_Bad_OnlySlash(t *testing.T) {
|
||||
_, _, err := splitRepo("/")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSplitRepo_Bad_MultipleSlashes(t *testing.T) {
|
||||
// Should take only the first part as owner, rest as repo.
|
||||
owner, repo, err := splitRepo("a/b/c")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "a", owner)
|
||||
assert.Equal(t, "b/c", repo)
|
||||
}
|
||||
|
||||
// --- Poll with combined status failure ---
|
||||
|
||||
func TestForgejoSource_Poll_Good_CombinedStatusFailure(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #2\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 10,
|
||||
"body": "Fixes #2",
|
||||
"state": "open",
|
||||
"mergeable": true,
|
||||
"merged": false,
|
||||
"head": map[string]string{"sha": "fail123", "ref": "feature", "label": "feature"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
case strings.Contains(path, "/status"):
|
||||
status := map[string]any{
|
||||
"state": "failure",
|
||||
"total_count": 2,
|
||||
"statuses": []map[string]any{{"status": "failure", "context": "ci"}},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(status)
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 1)
|
||||
assert.Equal(t, "FAILURE", signals[0].CheckStatus)
|
||||
assert.Equal(t, "OPEN", signals[0].PRState)
|
||||
assert.Equal(t, "MERGEABLE", signals[0].Mergeable)
|
||||
}
|
||||
|
||||
// --- Poll with combined status error ---
|
||||
|
||||
func TestForgejoSource_Poll_Good_CombinedStatusError(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #3\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 20,
|
||||
"body": "Fixes #3",
|
||||
"state": "open",
|
||||
"mergeable": false,
|
||||
"merged": false,
|
||||
"head": map[string]string{"sha": "err123", "ref": "fix", "label": "fix"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
// Combined status endpoint returns 500 — should fall back to PENDING.
|
||||
case strings.Contains(path, "/status"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 1)
|
||||
// Combined status API error -> falls back to PENDING.
|
||||
assert.Equal(t, "PENDING", signals[0].CheckStatus)
|
||||
assert.Equal(t, "CONFLICTING", signals[0].Mergeable)
|
||||
}
|
||||
|
||||
// --- Poll with child that has no assignee (NeedsCoding path, no assignee) ---
|
||||
|
||||
func TestForgejoSource_Poll_Good_ChildNoAssignee(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues/5"):
|
||||
// Child issue with no assignee.
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 5,
|
||||
"title": "Unassigned task",
|
||||
"body": "No one is working on this.",
|
||||
"state": "open",
|
||||
"assignees": []map[string]any{},
|
||||
})
|
||||
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #5\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
_ = json.NewEncoder(w).Encode([]any{})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// No signal should be emitted when child has no assignee and no PR.
|
||||
assert.Empty(t, signals)
|
||||
}
|
||||
|
||||
// --- Poll with child issue fetch failure ---
|
||||
|
||||
func TestForgejoSource_Poll_Good_ChildFetchFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues/5"):
|
||||
// Child issue fetch fails.
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #5\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
_ = json.NewEncoder(w).Encode([]any{})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Child fetch error should be logged and skipped, not returned as error.
|
||||
assert.Empty(t, signals)
|
||||
}
|
||||
|
||||
// --- Poll with multiple epics ---
|
||||
|
||||
func TestForgejoSource_Poll_Good_MultipleEpics(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #3\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
{
|
||||
"number": 2,
|
||||
"body": "- [ ] #4\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 10,
|
||||
"body": "Fixes #3",
|
||||
"state": "open",
|
||||
"mergeable": true,
|
||||
"merged": false,
|
||||
"head": map[string]string{"sha": "aaa", "ref": "f1", "label": "f1"},
|
||||
},
|
||||
{
|
||||
"number": 11,
|
||||
"body": "Fixes #4",
|
||||
"state": "open",
|
||||
"mergeable": true,
|
||||
"merged": false,
|
||||
"head": map[string]string{"sha": "bbb", "ref": "f2", "label": "f2"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
case strings.Contains(path, "/status"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"state": "success",
|
||||
"total_count": 1,
|
||||
"statuses": []any{},
|
||||
})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 2)
|
||||
assert.Equal(t, 1, signals[0].EpicNumber)
|
||||
assert.Equal(t, 3, signals[0].ChildNumber)
|
||||
assert.Equal(t, 10, signals[0].PRNumber)
|
||||
|
||||
assert.Equal(t, 2, signals[1].EpicNumber)
|
||||
assert.Equal(t, 4, signals[1].ChildNumber)
|
||||
assert.Equal(t, 11, signals[1].PRNumber)
|
||||
}
|
||||
|
||||
// --- Report with nil result ---
|
||||
|
||||
func TestForgejoSource_Report_Good_NilResult(t *testing.T) {
|
||||
s := New(Config{}, nil)
|
||||
err := s.Report(context.Background(), nil)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// --- Report constructs correct comment body ---
|
||||
|
||||
func TestForgejoSource_Report_Good_SuccessFormat(t *testing.T) {
|
||||
var capturedPath string
|
||||
var capturedBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
capturedPath = r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
capturedBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{}, client)
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "tick_parent",
|
||||
RepoOwner: "core",
|
||||
RepoName: "go-scm",
|
||||
EpicNumber: 5,
|
||||
ChildNumber: 10,
|
||||
PRNumber: 20,
|
||||
Success: true,
|
||||
}
|
||||
|
||||
err := s.Report(context.Background(), result)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Comment should be on the epic issue.
|
||||
assert.Contains(t, capturedPath, "/issues/5/comments")
|
||||
assert.Contains(t, capturedBody, "tick_parent")
|
||||
assert.Contains(t, capturedBody, "succeeded")
|
||||
assert.Contains(t, capturedBody, "#10")
|
||||
assert.Contains(t, capturedBody, "PR #20")
|
||||
}
|
||||
|
||||
func TestForgejoSource_Report_Good_FailureWithError(t *testing.T) {
|
||||
var capturedBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
capturedBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{}, client)
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "enable_auto_merge",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 2,
|
||||
PRNumber: 3,
|
||||
Success: false,
|
||||
Error: "merge conflict detected",
|
||||
}
|
||||
|
||||
err := s.Report(context.Background(), result)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, capturedBody, "failed")
|
||||
assert.Contains(t, capturedBody, "merge conflict detected")
|
||||
}
|
||||
|
||||
// --- Poll filters only epic-labelled issues ---
|
||||
|
||||
func TestForgejoSource_Poll_Good_MixedLabels(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #2\n",
|
||||
"labels": []map[string]string{{"name": "epic"}, {"name": "priority-high"}},
|
||||
"state": "open",
|
||||
},
|
||||
{
|
||||
"number": 3,
|
||||
"body": "- [ ] #4\n",
|
||||
"labels": []map[string]string{{"name": "bug"}},
|
||||
"state": "open",
|
||||
},
|
||||
{
|
||||
"number": 5,
|
||||
"body": "- [ ] #6\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 10,
|
||||
"body": "Fixes #2",
|
||||
"state": "open",
|
||||
"mergeable": true,
|
||||
"merged": false,
|
||||
"head": map[string]string{"sha": "sha1", "ref": "f1", "label": "f1"},
|
||||
},
|
||||
{
|
||||
"number": 11,
|
||||
"body": "Fixes #4",
|
||||
"state": "open",
|
||||
"mergeable": true,
|
||||
"merged": false,
|
||||
"head": map[string]string{"sha": "sha2", "ref": "f2", "label": "f2"},
|
||||
},
|
||||
{
|
||||
"number": 12,
|
||||
"body": "Fixes #6",
|
||||
"state": "open",
|
||||
"mergeable": true,
|
||||
"merged": false,
|
||||
"head": map[string]string{"sha": "sha3", "ref": "f3", "label": "f3"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
case strings.Contains(path, "/status"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"state": "success",
|
||||
"total_count": 1,
|
||||
"statuses": []any{},
|
||||
})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Only issues #1 and #5 have the "epic" label.
|
||||
require.Len(t, signals, 2)
|
||||
assert.Equal(t, 1, signals[0].EpicNumber)
|
||||
assert.Equal(t, 2, signals[0].ChildNumber)
|
||||
assert.Equal(t, 5, signals[1].EpicNumber)
|
||||
assert.Equal(t, 6, signals[1].ChildNumber)
|
||||
}
|
||||
|
||||
// --- Poll with PRs error after issues succeed ---
|
||||
|
||||
func TestForgejoSource_Poll_Good_PRsAPIError(t *testing.T) {
|
||||
callCount := 0
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
callCount++
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1,
|
||||
"body": "- [ ] #2\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client, err := forge.New(srv.URL, "test-token")
|
||||
require.NoError(t, err)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
// PR API failure -> repo is skipped, no signals.
|
||||
assert.Empty(t, signals)
|
||||
}
|
||||
|
||||
// --- New creates source correctly ---
|
||||
|
||||
func TestForgejoSource_New_Good(t *testing.T) {
|
||||
s := New(Config{Repos: []string{"a/b", "c/d"}}, nil)
|
||||
assert.Equal(t, "forgejo", s.Name())
|
||||
assert.Equal(t, []string{"a/b", "c/d"}, s.repos)
|
||||
}
|
||||
|
|
@ -1,409 +0,0 @@
|
|||
package forgejo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Supplementary Forgejo signal source tests — extends Phase 3 coverage
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestForgejoSource_Poll_Good_MultipleEpicsMultipleChildren(t *testing.T) {
|
||||
// Two epics, each with multiple unchecked children that have linked PRs.
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 10,
|
||||
"body": "## Sprint\n- [ ] #11\n- [ ] #12\n- [x] #13\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
{
|
||||
"number": 20,
|
||||
"body": "## Sprint 2\n- [ ] #21\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 30, "body": "Fixes #11", "state": "open",
|
||||
"mergeable": true, "merged": false,
|
||||
"head": map[string]string{"sha": "aaa111", "ref": "fix-11", "label": "fix-11"},
|
||||
},
|
||||
{
|
||||
"number": 31, "body": "Fixes #12", "state": "open",
|
||||
"mergeable": false, "merged": false,
|
||||
"head": map[string]string{"sha": "bbb222", "ref": "fix-12", "label": "fix-12"},
|
||||
},
|
||||
{
|
||||
"number": 32, "body": "Resolves #21", "state": "open",
|
||||
"mergeable": true, "merged": false,
|
||||
"head": map[string]string{"sha": "ccc333", "ref": "fix-21", "label": "fix-21"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
case strings.Contains(path, "/status"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"state": "success", "total_count": 1, "statuses": []any{},
|
||||
})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Epic 10 has #11 and #12 unchecked; epic 20 has #21 unchecked. Total 3 signals.
|
||||
require.Len(t, signals, 3, "expected three signals from two epics")
|
||||
|
||||
childNumbers := map[int]bool{}
|
||||
for _, sig := range signals {
|
||||
childNumbers[sig.ChildNumber] = true
|
||||
}
|
||||
assert.True(t, childNumbers[11])
|
||||
assert.True(t, childNumbers[12])
|
||||
assert.True(t, childNumbers[21])
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_CombinedStatusFetchErrorFallsToPending(t *testing.T) {
|
||||
// When combined status fetch fails, check status should default to PENDING.
|
||||
var statusFetched atomic.Bool
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1, "body": "- [ ] #2\n",
|
||||
"labels": []map[string]string{{"name": "epic"}}, "state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 10, "body": "Fixes #2", "state": "open",
|
||||
"mergeable": true, "merged": false,
|
||||
"head": map[string]string{"sha": "sha123", "ref": "fix", "label": "fix"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
case strings.Contains(path, "/status"):
|
||||
statusFetched.Store(true)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 1)
|
||||
assert.True(t, statusFetched.Load(), "status endpoint should have been called")
|
||||
assert.Equal(t, "PENDING", signals[0].CheckStatus, "failed status fetch should default to PENDING")
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_MixedReposFirstFailsSecondSucceeds(t *testing.T) {
|
||||
// First repo fails (issues endpoint 500), second repo succeeds.
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/repos/bad-org/bad-repo/issues"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
case strings.Contains(path, "/repos/good-org/good-repo/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1, "body": "- [ ] #2\n",
|
||||
"labels": []map[string]string{{"name": "epic"}}, "state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/repos/good-org/good-repo/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 10, "body": "Fixes #2", "state": "open",
|
||||
"mergeable": true, "merged": false,
|
||||
"head": map[string]string{"sha": "abc", "ref": "fix", "label": "fix"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
case strings.Contains(path, "/status"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"state": "success", "total_count": 1, "statuses": []any{},
|
||||
})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"bad-org/bad-repo", "good-org/good-repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, signals, 1, "only the good repo should produce signals")
|
||||
assert.Equal(t, "good-org", signals[0].RepoOwner)
|
||||
assert.Equal(t, "good-repo", signals[0].RepoName)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Report_Good_CommentBodyTable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
result *jobrunner.ActionResult
|
||||
wantContains []string
|
||||
}{
|
||||
{
|
||||
name: "successful action",
|
||||
result: &jobrunner.ActionResult{
|
||||
Action: "enable_auto_merge", RepoOwner: "org", RepoName: "repo",
|
||||
EpicNumber: 10, ChildNumber: 11, PRNumber: 20, Success: true,
|
||||
},
|
||||
wantContains: []string{"enable_auto_merge", "succeeded", "#11", "PR #20"},
|
||||
},
|
||||
{
|
||||
name: "failed action with error",
|
||||
result: &jobrunner.ActionResult{
|
||||
Action: "tick_parent", RepoOwner: "org", RepoName: "repo",
|
||||
EpicNumber: 10, ChildNumber: 11, PRNumber: 20,
|
||||
Success: false, Error: "rate limit exceeded",
|
||||
},
|
||||
wantContains: []string{"tick_parent", "failed", "#11", "PR #20", "rate limit exceeded"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var capturedBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
capturedBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{}, client)
|
||||
|
||||
err := s.Report(context.Background(), tt.result)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, want := range tt.wantContains {
|
||||
assert.Contains(t, capturedBody, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestForgejoSource_Report_Good_PostsToCorrectEpicIssue(t *testing.T) {
|
||||
var capturedPath string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if r.Method == http.MethodPost {
|
||||
capturedPath = r.URL.Path
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{}, client)
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "merge", RepoOwner: "test-org", RepoName: "test-repo",
|
||||
EpicNumber: 42, ChildNumber: 7, PRNumber: 99, Success: true,
|
||||
}
|
||||
|
||||
err := s.Report(context.Background(), result)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := fmt.Sprintf("/api/v1/repos/%s/%s/issues/%d/comments", result.RepoOwner, result.RepoName, result.EpicNumber)
|
||||
assert.Equal(t, expected, capturedPath, "comment should be posted on the epic issue")
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_SignalFieldCompleteness(t *testing.T) {
|
||||
// Verify that all expected signal fields are populated correctly.
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 100, "body": "## Work\n- [ ] #101\n- [x] #102\n",
|
||||
"labels": []map[string]string{{"name": "epic"}}, "state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 200, "body": "Closes #101", "state": "open",
|
||||
"mergeable": true, "merged": false,
|
||||
"head": map[string]string{"sha": "deadbeef", "ref": "feature", "label": "feature"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
case strings.Contains(path, "/status"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"state": "success", "total_count": 2,
|
||||
"statuses": []map[string]any{{"status": "success"}, {"status": "success"}},
|
||||
})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"acme/widgets"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 1)
|
||||
sig := signals[0]
|
||||
|
||||
assert.Equal(t, 100, sig.EpicNumber)
|
||||
assert.Equal(t, 101, sig.ChildNumber)
|
||||
assert.Equal(t, 200, sig.PRNumber)
|
||||
assert.Equal(t, "acme", sig.RepoOwner)
|
||||
assert.Equal(t, "widgets", sig.RepoName)
|
||||
assert.Equal(t, "OPEN", sig.PRState)
|
||||
assert.Equal(t, "MERGEABLE", sig.Mergeable)
|
||||
assert.Equal(t, "SUCCESS", sig.CheckStatus)
|
||||
assert.Equal(t, "deadbeef", sig.LastCommitSHA)
|
||||
assert.False(t, sig.NeedsCoding)
|
||||
assert.Equal(t, "acme/widgets", sig.RepoFullName())
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_AllChildrenCheckedNoSignals(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1, "body": "- [x] #2\n- [x] #3\n",
|
||||
"labels": []map[string]string{{"name": "epic"}}, "state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
_ = json.NewEncoder(w).Encode([]any{})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, signals, "all children checked means no work to do")
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good_NeedsCodingSignalFields(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case strings.Contains(path, "/issues/7"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 7, "title": "Implement authentication",
|
||||
"body": "Add OAuth2 support.", "state": "open",
|
||||
"assignees": []map[string]any{{"login": "agent-bot", "username": "agent-bot"}},
|
||||
})
|
||||
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 1, "body": "- [ ] #7\n",
|
||||
"labels": []map[string]string{{"name": "epic"}}, "state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
case strings.Contains(path, "/pulls"):
|
||||
_ = json.NewEncoder(w).Encode([]any{})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"org/repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 1)
|
||||
sig := signals[0]
|
||||
assert.True(t, sig.NeedsCoding)
|
||||
assert.Equal(t, "agent-bot", sig.Assignee)
|
||||
assert.Equal(t, "Implement authentication", sig.IssueTitle)
|
||||
assert.Contains(t, sig.IssueBody, "OAuth2 support")
|
||||
assert.Equal(t, 0, sig.PRNumber, "PRNumber should be zero for NeedsCoding signals")
|
||||
}
|
||||
|
|
@ -1,177 +0,0 @@
|
|||
package forgejo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// withVersion wraps an HTTP handler to serve the Forgejo /api/v1/version
|
||||
// endpoint that the SDK calls during NewClient initialization.
|
||||
func withVersion(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.HasSuffix(r.URL.Path, "/version") {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"version":"9.0.0"}`))
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func newTestClient(t *testing.T, url string) *forge.Client {
|
||||
t.Helper()
|
||||
client, err := forge.New(url, "test-token")
|
||||
require.NoError(t, err)
|
||||
return client
|
||||
}
|
||||
|
||||
func TestForgejoSource_Name(t *testing.T) {
|
||||
s := New(Config{}, nil)
|
||||
assert.Equal(t, "forgejo", s.Name())
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_Good(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
// List issues — return one epic
|
||||
case strings.Contains(path, "/issues"):
|
||||
issues := []map[string]any{
|
||||
{
|
||||
"number": 10,
|
||||
"body": "## Tasks\n- [ ] #11\n- [x] #12\n",
|
||||
"labels": []map[string]string{{"name": "epic"}},
|
||||
"state": "open",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(issues)
|
||||
|
||||
// List PRs — return one open PR linked to #11
|
||||
case strings.Contains(path, "/pulls"):
|
||||
prs := []map[string]any{
|
||||
{
|
||||
"number": 20,
|
||||
"body": "Fixes #11",
|
||||
"state": "open",
|
||||
"mergeable": true,
|
||||
"merged": false,
|
||||
"head": map[string]string{"sha": "abc123", "ref": "feature", "label": "feature"},
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(prs)
|
||||
|
||||
// Combined status
|
||||
case strings.Contains(path, "/status"):
|
||||
status := map[string]any{
|
||||
"state": "success",
|
||||
"total_count": 1,
|
||||
"statuses": []map[string]any{{"status": "success", "context": "ci"}},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(status)
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"test-org/test-repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, signals, 1)
|
||||
sig := signals[0]
|
||||
assert.Equal(t, 10, sig.EpicNumber)
|
||||
assert.Equal(t, 11, sig.ChildNumber)
|
||||
assert.Equal(t, 20, sig.PRNumber)
|
||||
assert.Equal(t, "OPEN", sig.PRState)
|
||||
assert.Equal(t, "MERGEABLE", sig.Mergeable)
|
||||
assert.Equal(t, "SUCCESS", sig.CheckStatus)
|
||||
assert.Equal(t, "test-org", sig.RepoOwner)
|
||||
assert.Equal(t, "test-repo", sig.RepoName)
|
||||
assert.Equal(t, "abc123", sig.LastCommitSHA)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Poll_NoEpics(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode([]any{})
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{Repos: []string{"test-org/test-repo"}}, client)
|
||||
|
||||
signals, err := s.Poll(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, signals)
|
||||
}
|
||||
|
||||
func TestForgejoSource_Report_Good(t *testing.T) {
|
||||
var capturedBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
capturedBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestClient(t, srv.URL)
|
||||
s := New(Config{}, client)
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "enable_auto_merge",
|
||||
RepoOwner: "test-org",
|
||||
RepoName: "test-repo",
|
||||
EpicNumber: 10,
|
||||
ChildNumber: 11,
|
||||
PRNumber: 20,
|
||||
Success: true,
|
||||
}
|
||||
|
||||
err := s.Report(context.Background(), result)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, capturedBody, "enable_auto_merge")
|
||||
assert.Contains(t, capturedBody, "succeeded")
|
||||
}
|
||||
|
||||
func TestParseEpicChildren(t *testing.T) {
|
||||
body := "## Tasks\n- [x] #1\n- [ ] #7\n- [ ] #8\n- [x] #3\n"
|
||||
unchecked, checked := parseEpicChildren(body)
|
||||
assert.Equal(t, []int{7, 8}, unchecked)
|
||||
assert.Equal(t, []int{1, 3}, checked)
|
||||
}
|
||||
|
||||
func TestFindLinkedPR(t *testing.T) {
|
||||
assert.Nil(t, findLinkedPR(nil, 7))
|
||||
}
|
||||
|
||||
func TestSplitRepo(t *testing.T) {
|
||||
owner, repo, err := splitRepo("host-uk/core")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "host-uk", owner)
|
||||
assert.Equal(t, "core", repo)
|
||||
|
||||
_, _, err = splitRepo("invalid")
|
||||
assert.Error(t, err)
|
||||
|
||||
_, _, err = splitRepo("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
const (
|
||||
ColorAgentComplete = "#0e8a16" // Green
|
||||
)
|
||||
|
||||
// CompletionHandler manages issue state when an agent finishes work.
|
||||
type CompletionHandler struct {
|
||||
forge *forge.Client
|
||||
}
|
||||
|
||||
// NewCompletionHandler creates a handler for agent completion events.
|
||||
func NewCompletionHandler(client *forge.Client) *CompletionHandler {
|
||||
return &CompletionHandler{
|
||||
forge: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the handler identifier.
|
||||
func (h *CompletionHandler) Name() string {
|
||||
return "completion"
|
||||
}
|
||||
|
||||
// Match returns true if the signal indicates an agent has finished a task.
|
||||
func (h *CompletionHandler) Match(signal *jobrunner.PipelineSignal) bool {
|
||||
return signal.Type == "agent_completion"
|
||||
}
|
||||
|
||||
// Execute updates the issue labels based on the completion status.
|
||||
func (h *CompletionHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
|
||||
start := time.Now()
|
||||
|
||||
// Remove in-progress label.
|
||||
if inProgressLabel, err := h.forge.GetLabelByName(signal.RepoOwner, signal.RepoName, LabelInProgress); err == nil {
|
||||
_ = h.forge.RemoveIssueLabel(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber), inProgressLabel.ID)
|
||||
}
|
||||
|
||||
if signal.Success {
|
||||
completeLabel, err := h.forge.EnsureLabel(signal.RepoOwner, signal.RepoName, LabelAgentComplete, ColorAgentComplete)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensure label %s: %w", LabelAgentComplete, err)
|
||||
}
|
||||
|
||||
if err := h.forge.AddIssueLabels(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber), []int64{completeLabel.ID}); err != nil {
|
||||
return nil, fmt.Errorf("add completed label: %w", err)
|
||||
}
|
||||
|
||||
if signal.Message != "" {
|
||||
_ = h.forge.CreateIssueComment(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber), signal.Message)
|
||||
}
|
||||
} else {
|
||||
failedLabel, err := h.forge.EnsureLabel(signal.RepoOwner, signal.RepoName, LabelAgentFailed, ColorAgentFailed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensure label %s: %w", LabelAgentFailed, err)
|
||||
}
|
||||
|
||||
if err := h.forge.AddIssueLabels(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber), []int64{failedLabel.ID}); err != nil {
|
||||
return nil, fmt.Errorf("add failed label: %w", err)
|
||||
}
|
||||
|
||||
msg := "Agent reported failure."
|
||||
if signal.Error != "" {
|
||||
msg += fmt.Sprintf("\n\nError: %s", signal.Error)
|
||||
}
|
||||
_ = h.forge.CreateIssueComment(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber), msg)
|
||||
}
|
||||
|
||||
return &jobrunner.ActionResult{
|
||||
Action: "completion",
|
||||
RepoOwner: signal.RepoOwner,
|
||||
RepoName: signal.RepoName,
|
||||
EpicNumber: signal.EpicNumber,
|
||||
ChildNumber: signal.ChildNumber,
|
||||
Success: true,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -1,291 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
func TestCompletion_Name_Good(t *testing.T) {
|
||||
h := NewCompletionHandler(nil)
|
||||
assert.Equal(t, "completion", h.Name())
|
||||
}
|
||||
|
||||
func TestCompletion_Match_Good_AgentCompletion(t *testing.T) {
|
||||
h := NewCompletionHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestCompletion_Match_Bad_WrongType(t *testing.T) {
|
||||
h := NewCompletionHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "pr_update",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestCompletion_Match_Bad_EmptyType(t *testing.T) {
|
||||
h := NewCompletionHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestCompletion_Execute_Good_Success(t *testing.T) {
|
||||
var labelRemoved bool
|
||||
var labelAdded bool
|
||||
var commentPosted bool
|
||||
var commentBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
// GetLabelByName (in-progress) — GET labels to find in-progress.
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/test-org/test-repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 1, "name": "in-progress", "color": "#1d76db"},
|
||||
})
|
||||
|
||||
// RemoveIssueLabel (in-progress).
|
||||
case r.Method == http.MethodDelete && r.URL.Path == "/api/v1/repos/test-org/test-repo/issues/5/labels/1":
|
||||
labelRemoved = true
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
// EnsureLabel (agent-completed) — POST to create.
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/test-org/test-repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 2, "name": "agent-completed", "color": "#0e8a16"})
|
||||
|
||||
// AddIssueLabels.
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/test-org/test-repo/issues/5/labels":
|
||||
labelAdded = true
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{{"id": 2, "name": "agent-completed"}})
|
||||
|
||||
// CreateIssueComment.
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/test-org/test-repo/issues/5/comments":
|
||||
commentPosted = true
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
commentBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "body": body["body"]})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
RepoOwner: "test-org",
|
||||
RepoName: "test-repo",
|
||||
ChildNumber: 5,
|
||||
EpicNumber: 3,
|
||||
Success: true,
|
||||
Message: "Task completed successfully",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "completion", result.Action)
|
||||
assert.Equal(t, "test-org", result.RepoOwner)
|
||||
assert.Equal(t, "test-repo", result.RepoName)
|
||||
assert.Equal(t, 3, result.EpicNumber)
|
||||
assert.Equal(t, 5, result.ChildNumber)
|
||||
assert.True(t, labelRemoved, "in-progress label should be removed")
|
||||
assert.True(t, labelAdded, "agent-completed label should be added")
|
||||
assert.True(t, commentPosted, "comment should be posted")
|
||||
assert.Contains(t, commentBody, "Task completed successfully")
|
||||
}
|
||||
|
||||
func TestCompletion_Execute_Good_Failure(t *testing.T) {
|
||||
var labelAdded bool
|
||||
var commentBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/test-org/test-repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/test-org/test-repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 3, "name": "agent-failed", "color": "#c0392b"})
|
||||
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/test-org/test-repo/issues/5/labels":
|
||||
labelAdded = true
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{{"id": 3, "name": "agent-failed"}})
|
||||
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/test-org/test-repo/issues/5/comments":
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
commentBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "body": body["body"]})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
RepoOwner: "test-org",
|
||||
RepoName: "test-repo",
|
||||
ChildNumber: 5,
|
||||
EpicNumber: 3,
|
||||
Success: false,
|
||||
Error: "tests failed",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success) // The handler itself succeeded
|
||||
assert.Equal(t, "completion", result.Action)
|
||||
assert.True(t, labelAdded, "agent-failed label should be added")
|
||||
assert.Contains(t, commentBody, "Agent reported failure")
|
||||
assert.Contains(t, commentBody, "tests failed")
|
||||
}
|
||||
|
||||
func TestCompletion_Execute_Good_FailureNoError(t *testing.T) {
|
||||
var commentBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 3, "name": "agent-failed", "color": "#c0392b"})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/issues/1/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/issues/1/comments":
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
commentBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 1,
|
||||
Success: false,
|
||||
Error: "", // No error message.
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
assert.Contains(t, commentBody, "Agent reported failure")
|
||||
assert.NotContains(t, commentBody, "Error:") // No error detail.
|
||||
}
|
||||
|
||||
func TestCompletion_Execute_Good_SuccessNoMessage(t *testing.T) {
|
||||
var commentPosted bool
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 2, "name": "agent-completed", "color": "#0e8a16"})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/issues/1/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/issues/1/comments":
|
||||
commentPosted = true
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 1,
|
||||
Success: true,
|
||||
Message: "", // No message.
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
assert.False(t, commentPosted, "no comment should be posted when message is empty")
|
||||
}
|
||||
|
||||
func TestCompletion_Execute_Bad_EnsureLabelFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
// Return empty so EnsureLabel tries to create.
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
// Label creation fails.
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 1,
|
||||
Success: true,
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "ensure label")
|
||||
}
|
||||
|
|
@ -1,704 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/agentci"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// --- Dispatch: Execute with invalid repo name ---
|
||||
|
||||
func TestDispatch_Execute_Bad_InvalidRepoNameSpecialChars(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
RepoOwner: "valid-org",
|
||||
RepoName: "repo$bad!",
|
||||
ChildNumber: 1,
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid repo name")
|
||||
}
|
||||
|
||||
// --- Dispatch: Execute when EnsureLabel fails ---
|
||||
|
||||
func TestDispatch_Execute_Bad_EnsureLabelCreationFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/labels"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 1,
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "ensure label")
|
||||
}
|
||||
|
||||
// dispatchMockServer creates a standard mock server for dispatch tests.
|
||||
// It handles all the Forgejo API calls needed for a full dispatch flow.
|
||||
func dispatchMockServer(t *testing.T) *httptest.Server {
|
||||
t.Helper()
|
||||
return httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
// GetLabelByName / list labels
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 1, "name": "in-progress", "color": "#1d76db"},
|
||||
{"id": 2, "name": "agent-ready", "color": "#00ff00"},
|
||||
})
|
||||
|
||||
// CreateLabel (shouldn't normally be needed since we return it above)
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "name": "in-progress", "color": "#1d76db"})
|
||||
|
||||
// GetIssue (returns issue with no label to trigger the full dispatch flow)
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
w.WriteHeader(http.StatusNotFound) // Issue not found => full dispatch flow
|
||||
|
||||
// AssignIssue
|
||||
case r.Method == http.MethodPatch && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 5, "number": 5})
|
||||
|
||||
// AddIssueLabels
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/issues/5/labels"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{{"id": 1, "name": "in-progress"}})
|
||||
|
||||
// RemoveIssueLabel
|
||||
case r.Method == http.MethodDelete && strings.Contains(r.URL.Path, "/labels/"):
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
// CreateIssueComment
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/issues/5/comments"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "body": "dispatched"})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
}
|
||||
|
||||
// --- Dispatch: Execute when GetIssue returns 404 (full dispatch path) ---
|
||||
|
||||
func TestDispatch_Execute_Good_GetIssueNotFound(t *testing.T) {
|
||||
srv := dispatchMockServer(t)
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/nonexistent-queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 5,
|
||||
EpicNumber: 3,
|
||||
IssueTitle: "Test issue",
|
||||
IssueBody: "Test body",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "dispatch", result.Action)
|
||||
}
|
||||
|
||||
// --- Completion: Execute when AddIssueLabels fails for success case ---
|
||||
|
||||
func TestCompletion_Execute_Bad_AddCompleteLabelFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/labels"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
case r.Method == http.MethodPost && strings.HasSuffix(r.URL.Path, "/repo/labels"):
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 2, "name": "agent-completed", "color": "#0e8a16"})
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/issues/5/labels"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 5,
|
||||
Success: true,
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "add completed label")
|
||||
}
|
||||
|
||||
// --- Completion: Execute when AddIssueLabels fails for failure case ---
|
||||
|
||||
func TestCompletion_Execute_Bad_AddFailLabelFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/labels"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
case r.Method == http.MethodPost && strings.HasSuffix(r.URL.Path, "/repo/labels"):
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 3, "name": "agent-failed", "color": "#c0392b"})
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/issues/5/labels"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 5,
|
||||
Success: false,
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "add failed label")
|
||||
}
|
||||
|
||||
// --- Completion: Execute with EnsureLabel failure on failure path ---
|
||||
|
||||
func TestCompletion_Execute_Bad_FailedPathEnsureLabelFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/labels"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/labels"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 1,
|
||||
Success: false,
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "ensure label")
|
||||
}
|
||||
|
||||
// --- EnableAutoMerge: additional edge case ---
|
||||
|
||||
func TestEnableAutoMerge_Match_Bad_PendingChecks(t *testing.T) {
|
||||
h := NewEnableAutoMergeHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
IsDraft: false,
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "PENDING",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestEnableAutoMerge_Execute_Bad_InternalServerError(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewEnableAutoMergeHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRNumber: 1,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Success)
|
||||
assert.Contains(t, result.Error, "merge failed")
|
||||
}
|
||||
|
||||
// --- PublishDraft: Match with MERGED state ---
|
||||
|
||||
func TestPublishDraft_Match_Bad_MergedState(t *testing.T) {
|
||||
h := NewPublishDraftHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
IsDraft: true,
|
||||
PRState: "MERGED",
|
||||
CheckStatus: "SUCCESS",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
// --- SendFixCommand: Execute merge conflict message ---
|
||||
|
||||
func TestSendFixCommand_Execute_Good_MergeConflictMessage(t *testing.T) {
|
||||
var capturedBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if r.Method == http.MethodPost {
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
capturedBody = body["body"]
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewSendFixCommandHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRNumber: 1,
|
||||
Mergeable: "CONFLICTING",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
assert.Contains(t, capturedBody, "fix the merge conflict")
|
||||
}
|
||||
|
||||
// --- DismissReviews: Execute with stale review that gets dismissed ---
|
||||
|
||||
func TestDismissReviews_Execute_Good_StaleReviewDismissed(t *testing.T) {
|
||||
var dismissCalled bool
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/reviews") {
|
||||
reviews := []map[string]any{
|
||||
{
|
||||
"id": 1, "state": "REQUEST_CHANGES", "dismissed": false, "stale": true,
|
||||
"body": "fix it", "commit_id": "abc123",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(reviews)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/dismissals") {
|
||||
dismissCalled = true
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "state": "DISMISSED"})
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewDismissReviewsHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRNumber: 1,
|
||||
PRState: "OPEN",
|
||||
ThreadsTotal: 1,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
assert.True(t, dismissCalled)
|
||||
}
|
||||
|
||||
// --- TickParent: Execute ticks and closes ---
|
||||
|
||||
func TestTickParent_Execute_Good_TicksCheckboxAndCloses(t *testing.T) {
|
||||
epicBody := "## Tasks\n- [ ] #7\n- [ ] #8\n"
|
||||
var editedBody string
|
||||
var closedIssue bool
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/issues/42"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": epicBody,
|
||||
"title": "Epic",
|
||||
})
|
||||
case r.Method == http.MethodPatch && strings.Contains(r.URL.Path, "/issues/42"):
|
||||
var body map[string]any
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
if b, ok := body["body"].(string); ok {
|
||||
editedBody = b
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": editedBody,
|
||||
"title": "Epic",
|
||||
})
|
||||
case r.Method == http.MethodPatch && strings.Contains(r.URL.Path, "/issues/7"):
|
||||
closedIssue = true
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 7,
|
||||
"state": "closed",
|
||||
})
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewTickParentHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
EpicNumber: 42,
|
||||
ChildNumber: 7,
|
||||
PRNumber: 99,
|
||||
PRState: "MERGED",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
assert.Contains(t, editedBody, "- [x] #7")
|
||||
assert.True(t, closedIssue)
|
||||
}
|
||||
|
||||
// --- Dispatch: DualRun mode ---
|
||||
|
||||
func TestDispatch_Execute_Good_DualRunModeDispatch(t *testing.T) {
|
||||
srv := dispatchMockServer(t)
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
spinner := agentci.NewSpinner(
|
||||
agentci.ClothoConfig{Strategy: "clotho-verified"},
|
||||
map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {
|
||||
Host: "localhost",
|
||||
QueueDir: "/tmp/nonexistent-queue",
|
||||
Active: true,
|
||||
Model: "sonnet",
|
||||
DualRun: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 5,
|
||||
EpicNumber: 3,
|
||||
IssueTitle: "Test issue",
|
||||
IssueBody: "Test body",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "dispatch", result.Action)
|
||||
}
|
||||
|
||||
// --- TickParent: ChildNumber not found in epic body ---
|
||||
|
||||
func TestTickParent_Execute_Good_ChildNotInBody(t *testing.T) {
|
||||
epicBody := "## Tasks\n- [ ] #99\n"
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/issues/42") {
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": epicBody,
|
||||
"title": "Epic",
|
||||
})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewTickParentHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
EpicNumber: 42,
|
||||
ChildNumber: 50,
|
||||
PRNumber: 100,
|
||||
PRState: "MERGED",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
}
|
||||
|
||||
// --- Dispatch: AssignIssue fails (warn, continue) ---
|
||||
|
||||
func TestDispatch_Execute_Good_AssignIssueFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 1, "name": "in-progress", "color": "#1d76db"},
|
||||
{"id": 2, "name": "agent-ready", "color": "#00ff00"},
|
||||
})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "name": "in-progress"})
|
||||
// GetIssue returns issue with NO special labels
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"id": 5, "number": 5, "title": "Test Issue",
|
||||
"labels": []map[string]any{},
|
||||
})
|
||||
// AssignIssue FAILS
|
||||
case r.Method == http.MethodPatch && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = w.Write([]byte(`{"message":"assign failed"}`))
|
||||
// AddIssueLabels succeeds
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/issues/5/labels"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{{"id": 1, "name": "in-progress"}})
|
||||
case r.Method == http.MethodDelete && strings.Contains(r.URL.Path, "/labels/"):
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/issues/5/comments"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "body": "dispatched"})
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/nonexistent-queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 5,
|
||||
PRNumber: 10,
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
Assignee: "darbs-claude",
|
||||
IssueTitle: "Test Issue",
|
||||
IssueBody: "Test body",
|
||||
}
|
||||
|
||||
// Should not return error because AssignIssue failure is only a warning.
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
// secureTransfer will fail because SSH isn't available, but we exercised the assign-error path.
|
||||
_ = result
|
||||
_ = err
|
||||
}
|
||||
|
||||
// --- Dispatch: AddIssueLabels fails ---
|
||||
|
||||
func TestDispatch_Execute_Bad_AddIssueLabelsError(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 1, "name": "in-progress", "color": "#1d76db"},
|
||||
})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "name": "in-progress"})
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"id": 5, "number": 5, "title": "Test Issue",
|
||||
"labels": []map[string]any{},
|
||||
})
|
||||
case r.Method == http.MethodPatch && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 5, "number": 5})
|
||||
// AddIssueLabels FAILS
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/issues/5/labels"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = w.Write([]byte(`{"message":"label add failed"}`))
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/nonexistent-queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 5,
|
||||
PRNumber: 10,
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
Assignee: "darbs-claude",
|
||||
IssueTitle: "Test Issue",
|
||||
IssueBody: "Test body",
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), signal)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "add in-progress label")
|
||||
}
|
||||
|
||||
// --- Dispatch: GetIssue returns issue with existing labels not matching ---
|
||||
|
||||
func TestDispatch_Execute_Good_IssueFoundNoSpecialLabels(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 1, "name": "in-progress", "color": "#1d76db"},
|
||||
{"id": 2, "name": "agent-ready", "color": "#00ff00"},
|
||||
})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "name": "in-progress"})
|
||||
// GetIssue returns issue with unrelated labels
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"id": 5, "number": 5, "title": "Test Issue",
|
||||
"labels": []map[string]any{
|
||||
{"id": 10, "name": "enhancement"},
|
||||
},
|
||||
})
|
||||
case r.Method == http.MethodPatch && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 5, "number": 5})
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/issues/5/labels"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{{"id": 1, "name": "in-progress"}})
|
||||
case r.Method == http.MethodDelete && strings.Contains(r.URL.Path, "/labels/"):
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/issues/5/comments"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "body": "dispatched"})
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/nonexistent-queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 5,
|
||||
PRNumber: 10,
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
Assignee: "darbs-claude",
|
||||
IssueTitle: "Test Issue",
|
||||
IssueBody: "Test body",
|
||||
}
|
||||
|
||||
// Execute will proceed past label check and try SSH (which fails).
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
// Should either succeed (if somehow SSH works) or fail at secureTransfer.
|
||||
_ = result
|
||||
_ = err
|
||||
}
|
||||
|
|
@ -1,293 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/agentci"
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
"forge.lthn.ai/core/go/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
LabelAgentReady = "agent-ready"
|
||||
LabelInProgress = "in-progress"
|
||||
LabelAgentFailed = "agent-failed"
|
||||
LabelAgentComplete = "agent-completed"
|
||||
|
||||
ColorInProgress = "#1d76db" // Blue
|
||||
ColorAgentFailed = "#c0392b" // Red
|
||||
)
|
||||
|
||||
// DispatchTicket is the JSON payload written to the agent's queue.
|
||||
// The ForgeToken is transferred separately via a .env file with 0600 permissions.
|
||||
type DispatchTicket struct {
|
||||
ID string `json:"id"`
|
||||
RepoOwner string `json:"repo_owner"`
|
||||
RepoName string `json:"repo_name"`
|
||||
IssueNumber int `json:"issue_number"`
|
||||
IssueTitle string `json:"issue_title"`
|
||||
IssueBody string `json:"issue_body"`
|
||||
TargetBranch string `json:"target_branch"`
|
||||
EpicNumber int `json:"epic_number"`
|
||||
ForgeURL string `json:"forge_url"`
|
||||
ForgeUser string `json:"forgejo_user"`
|
||||
Model string `json:"model,omitempty"`
|
||||
Runner string `json:"runner,omitempty"`
|
||||
VerifyModel string `json:"verify_model,omitempty"`
|
||||
DualRun bool `json:"dual_run"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
|
||||
// DispatchHandler dispatches coding work to remote agent machines via SSH.
|
||||
type DispatchHandler struct {
|
||||
forge *forge.Client
|
||||
forgeURL string
|
||||
token string
|
||||
spinner *agentci.Spinner
|
||||
}
|
||||
|
||||
// NewDispatchHandler creates a handler that dispatches tickets to agent machines.
|
||||
func NewDispatchHandler(client *forge.Client, forgeURL, token string, spinner *agentci.Spinner) *DispatchHandler {
|
||||
return &DispatchHandler{
|
||||
forge: client,
|
||||
forgeURL: forgeURL,
|
||||
token: token,
|
||||
spinner: spinner,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the handler identifier.
|
||||
func (h *DispatchHandler) Name() string {
|
||||
return "dispatch"
|
||||
}
|
||||
|
||||
// Match returns true for signals where a child issue needs coding (no PR yet)
|
||||
// and the assignee is a known agent (by config key or Forgejo username).
|
||||
func (h *DispatchHandler) Match(signal *jobrunner.PipelineSignal) bool {
|
||||
if !signal.NeedsCoding {
|
||||
return false
|
||||
}
|
||||
_, _, ok := h.spinner.FindByForgejoUser(signal.Assignee)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Execute creates a ticket JSON and transfers it securely to the agent's queue directory.
|
||||
func (h *DispatchHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
|
||||
start := time.Now()
|
||||
|
||||
agentName, agent, ok := h.spinner.FindByForgejoUser(signal.Assignee)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("handlers.Dispatch.Execute: unknown agent: %s", signal.Assignee)
|
||||
}
|
||||
|
||||
// Sanitize inputs to prevent path traversal.
|
||||
safeOwner, err := agentci.SanitizePath(signal.RepoOwner)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid repo owner: %w", err)
|
||||
}
|
||||
safeRepo, err := agentci.SanitizePath(signal.RepoName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid repo name: %w", err)
|
||||
}
|
||||
|
||||
// Ensure in-progress label exists on repo.
|
||||
inProgressLabel, err := h.forge.EnsureLabel(safeOwner, safeRepo, LabelInProgress, ColorInProgress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensure label %s: %w", LabelInProgress, err)
|
||||
}
|
||||
|
||||
// Check if already in progress to prevent double-dispatch.
|
||||
issue, err := h.forge.GetIssue(safeOwner, safeRepo, int64(signal.ChildNumber))
|
||||
if err == nil {
|
||||
for _, l := range issue.Labels {
|
||||
if l.Name == LabelInProgress || l.Name == LabelAgentComplete {
|
||||
log.Info("issue already processed, skipping", "issue", signal.ChildNumber, "label", l.Name)
|
||||
return &jobrunner.ActionResult{
|
||||
Action: "dispatch",
|
||||
Success: true,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Assign agent and add in-progress label.
|
||||
if err := h.forge.AssignIssue(safeOwner, safeRepo, int64(signal.ChildNumber), []string{signal.Assignee}); err != nil {
|
||||
log.Warn("failed to assign agent, continuing", "err", err)
|
||||
}
|
||||
|
||||
if err := h.forge.AddIssueLabels(safeOwner, safeRepo, int64(signal.ChildNumber), []int64{inProgressLabel.ID}); err != nil {
|
||||
return nil, fmt.Errorf("add in-progress label: %w", err)
|
||||
}
|
||||
|
||||
// Remove agent-ready label if present.
|
||||
if readyLabel, err := h.forge.GetLabelByName(safeOwner, safeRepo, LabelAgentReady); err == nil {
|
||||
_ = h.forge.RemoveIssueLabel(safeOwner, safeRepo, int64(signal.ChildNumber), readyLabel.ID)
|
||||
}
|
||||
|
||||
// Clotho planning — determine execution mode.
|
||||
runMode := h.spinner.DeterminePlan(signal, agentName)
|
||||
verifyModel := ""
|
||||
if runMode == agentci.ModeDual {
|
||||
verifyModel = h.spinner.GetVerifierModel(agentName)
|
||||
}
|
||||
|
||||
// Build ticket — resolve target branch from repo default.
|
||||
targetBranch := "main"
|
||||
if repo, err := h.forge.GetRepo(safeOwner, safeRepo); err == nil && repo.DefaultBranch != "" {
|
||||
targetBranch = repo.DefaultBranch
|
||||
}
|
||||
ticketID := fmt.Sprintf("%s-%s-%d-%d", safeOwner, safeRepo, signal.ChildNumber, time.Now().Unix())
|
||||
|
||||
ticket := DispatchTicket{
|
||||
ID: ticketID,
|
||||
RepoOwner: safeOwner,
|
||||
RepoName: safeRepo,
|
||||
IssueNumber: signal.ChildNumber,
|
||||
IssueTitle: signal.IssueTitle,
|
||||
IssueBody: signal.IssueBody,
|
||||
TargetBranch: targetBranch,
|
||||
EpicNumber: signal.EpicNumber,
|
||||
ForgeURL: h.forgeURL,
|
||||
ForgeUser: signal.Assignee,
|
||||
Model: agent.Model,
|
||||
Runner: agent.Runner,
|
||||
VerifyModel: verifyModel,
|
||||
DualRun: runMode == agentci.ModeDual,
|
||||
CreatedAt: time.Now().UTC().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
ticketJSON, err := json.MarshalIndent(ticket, "", " ")
|
||||
if err != nil {
|
||||
h.failDispatch(signal, "Failed to marshal ticket JSON")
|
||||
return nil, fmt.Errorf("marshal ticket: %w", err)
|
||||
}
|
||||
|
||||
// Check if ticket already exists on agent (dedup).
|
||||
ticketName := fmt.Sprintf("ticket-%s-%s-%d.json", safeOwner, safeRepo, signal.ChildNumber)
|
||||
if h.ticketExists(ctx, agent, ticketName) {
|
||||
log.Info("ticket already queued, skipping", "ticket", ticketName, "agent", signal.Assignee)
|
||||
return &jobrunner.ActionResult{
|
||||
Action: "dispatch",
|
||||
RepoOwner: safeOwner,
|
||||
RepoName: safeRepo,
|
||||
EpicNumber: signal.EpicNumber,
|
||||
ChildNumber: signal.ChildNumber,
|
||||
Success: true,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Transfer ticket JSON.
|
||||
remoteTicketPath := filepath.Join(agent.QueueDir, ticketName)
|
||||
if err := h.secureTransfer(ctx, agent, remoteTicketPath, ticketJSON, 0644); err != nil {
|
||||
h.failDispatch(signal, fmt.Sprintf("Ticket transfer failed: %v", err))
|
||||
return &jobrunner.ActionResult{
|
||||
Action: "dispatch",
|
||||
RepoOwner: safeOwner,
|
||||
RepoName: safeRepo,
|
||||
EpicNumber: signal.EpicNumber,
|
||||
ChildNumber: signal.ChildNumber,
|
||||
Success: false,
|
||||
Error: fmt.Sprintf("transfer ticket: %v", err),
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Transfer token via separate .env file with 0600 permissions.
|
||||
envContent := fmt.Sprintf("FORGE_TOKEN=%s\n", h.token)
|
||||
remoteEnvPath := filepath.Join(agent.QueueDir, fmt.Sprintf(".env.%s", ticketID))
|
||||
if err := h.secureTransfer(ctx, agent, remoteEnvPath, []byte(envContent), 0600); err != nil {
|
||||
// Clean up the ticket if env transfer fails.
|
||||
_ = h.runRemote(ctx, agent, fmt.Sprintf("rm -f %s", agentci.EscapeShellArg(remoteTicketPath)))
|
||||
h.failDispatch(signal, fmt.Sprintf("Token transfer failed: %v", err))
|
||||
return &jobrunner.ActionResult{
|
||||
Action: "dispatch",
|
||||
RepoOwner: safeOwner,
|
||||
RepoName: safeRepo,
|
||||
EpicNumber: signal.EpicNumber,
|
||||
ChildNumber: signal.ChildNumber,
|
||||
Success: false,
|
||||
Error: fmt.Sprintf("transfer token: %v", err),
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Comment on issue.
|
||||
modeStr := "Standard"
|
||||
if runMode == agentci.ModeDual {
|
||||
modeStr = "Clotho Verified (Dual Run)"
|
||||
}
|
||||
comment := fmt.Sprintf("Dispatched to **%s** agent queue.\nMode: **%s**", signal.Assignee, modeStr)
|
||||
_ = h.forge.CreateIssueComment(safeOwner, safeRepo, int64(signal.ChildNumber), comment)
|
||||
|
||||
return &jobrunner.ActionResult{
|
||||
Action: "dispatch",
|
||||
RepoOwner: safeOwner,
|
||||
RepoName: safeRepo,
|
||||
EpicNumber: signal.EpicNumber,
|
||||
ChildNumber: signal.ChildNumber,
|
||||
Success: true,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// failDispatch handles cleanup when dispatch fails (adds failed label, removes in-progress).
|
||||
func (h *DispatchHandler) failDispatch(signal *jobrunner.PipelineSignal, reason string) {
|
||||
if failedLabel, err := h.forge.EnsureLabel(signal.RepoOwner, signal.RepoName, LabelAgentFailed, ColorAgentFailed); err == nil {
|
||||
_ = h.forge.AddIssueLabels(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber), []int64{failedLabel.ID})
|
||||
}
|
||||
|
||||
if inProgressLabel, err := h.forge.GetLabelByName(signal.RepoOwner, signal.RepoName, LabelInProgress); err == nil {
|
||||
_ = h.forge.RemoveIssueLabel(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber), inProgressLabel.ID)
|
||||
}
|
||||
|
||||
_ = h.forge.CreateIssueComment(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber), fmt.Sprintf("Agent dispatch failed: %s", reason))
|
||||
}
|
||||
|
||||
// secureTransfer writes data to a remote path via SSH stdin, preventing command injection.
|
||||
func (h *DispatchHandler) secureTransfer(ctx context.Context, agent agentci.AgentConfig, remotePath string, data []byte, mode int) error {
|
||||
safeRemotePath := agentci.EscapeShellArg(remotePath)
|
||||
remoteCmd := fmt.Sprintf("cat > %s && chmod %o %s", safeRemotePath, mode, safeRemotePath)
|
||||
|
||||
cmd := agentci.SecureSSHCommandContext(ctx, agent.Host, remoteCmd)
|
||||
cmd.Stdin = bytes.NewReader(data)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return log.E("dispatch.transfer", fmt.Sprintf("ssh to %s failed: %s", agent.Host, string(output)), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// runRemote executes a command on the agent via SSH.
|
||||
func (h *DispatchHandler) runRemote(ctx context.Context, agent agentci.AgentConfig, cmdStr string) error {
|
||||
cmd := agentci.SecureSSHCommandContext(ctx, agent.Host, cmdStr)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// ticketExists checks if a ticket file already exists in queue, active, or done.
|
||||
func (h *DispatchHandler) ticketExists(ctx context.Context, agent agentci.AgentConfig, ticketName string) bool {
|
||||
safeTicket, err := agentci.SanitizePath(ticketName)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
qDir := agent.QueueDir
|
||||
checkCmd := fmt.Sprintf(
|
||||
"test -f %s/%s || test -f %s/../active/%s || test -f %s/../done/%s",
|
||||
qDir, safeTicket, qDir, safeTicket, qDir, safeTicket,
|
||||
)
|
||||
cmd := agentci.SecureSSHCommandContext(ctx, agent.Host, checkCmd)
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
|
|
@ -1,327 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/agentci"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// newTestSpinner creates a Spinner with the given agents for testing.
|
||||
func newTestSpinner(agents map[string]agentci.AgentConfig) *agentci.Spinner {
|
||||
return agentci.NewSpinner(agentci.ClothoConfig{Strategy: "direct"}, agents)
|
||||
}
|
||||
|
||||
// --- Match tests ---
|
||||
|
||||
func TestDispatch_Match_Good_NeedsCoding(t *testing.T) {
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(nil, "", "", spinner)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestDispatch_Match_Good_MultipleAgents(t *testing.T) {
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue", Active: true},
|
||||
"local-codex": {Host: "localhost", QueueDir: "~/ai-work/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(nil, "", "", spinner)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "local-codex",
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestDispatch_Match_Bad_HasPR(t *testing.T) {
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(nil, "", "", spinner)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: false,
|
||||
PRNumber: 7,
|
||||
Assignee: "darbs-claude",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestDispatch_Match_Bad_UnknownAgent(t *testing.T) {
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(nil, "", "", spinner)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "unknown-user",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestDispatch_Match_Bad_NotAssigned(t *testing.T) {
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(nil, "", "", spinner)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestDispatch_Match_Bad_EmptyAgentMap(t *testing.T) {
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{})
|
||||
h := NewDispatchHandler(nil, "", "", spinner)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
// --- Name test ---
|
||||
|
||||
func TestDispatch_Name_Good(t *testing.T) {
|
||||
spinner := newTestSpinner(nil)
|
||||
h := NewDispatchHandler(nil, "", "", spinner)
|
||||
assert.Equal(t, "dispatch", h.Name())
|
||||
}
|
||||
|
||||
// --- Execute tests ---
|
||||
|
||||
func TestDispatch_Execute_Bad_UnknownAgent(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "claude@192.168.0.201", QueueDir: "~/ai-work/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "nonexistent-agent",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core",
|
||||
ChildNumber: 1,
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown agent")
|
||||
}
|
||||
|
||||
func TestDispatch_TicketJSON_Good(t *testing.T) {
|
||||
ticket := DispatchTicket{
|
||||
ID: "host-uk-core-5-1234567890",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core",
|
||||
IssueNumber: 5,
|
||||
IssueTitle: "Fix the thing",
|
||||
IssueBody: "Please fix this bug",
|
||||
TargetBranch: "new",
|
||||
EpicNumber: 3,
|
||||
ForgeURL: "https://forge.lthn.ai",
|
||||
ForgeUser: "darbs-claude",
|
||||
Model: "sonnet",
|
||||
Runner: "claude",
|
||||
DualRun: false,
|
||||
CreatedAt: "2026-02-09T12:00:00Z",
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(ticket, "", " ")
|
||||
require.NoError(t, err)
|
||||
|
||||
var decoded map[string]any
|
||||
err = json.Unmarshal(data, &decoded)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "host-uk-core-5-1234567890", decoded["id"])
|
||||
assert.Equal(t, "host-uk", decoded["repo_owner"])
|
||||
assert.Equal(t, "core", decoded["repo_name"])
|
||||
assert.Equal(t, float64(5), decoded["issue_number"])
|
||||
assert.Equal(t, "Fix the thing", decoded["issue_title"])
|
||||
assert.Equal(t, "Please fix this bug", decoded["issue_body"])
|
||||
assert.Equal(t, "new", decoded["target_branch"])
|
||||
assert.Equal(t, float64(3), decoded["epic_number"])
|
||||
assert.Equal(t, "https://forge.lthn.ai", decoded["forge_url"])
|
||||
assert.Equal(t, "darbs-claude", decoded["forgejo_user"])
|
||||
assert.Equal(t, "sonnet", decoded["model"])
|
||||
assert.Equal(t, "claude", decoded["runner"])
|
||||
// Token should NOT be present in the ticket.
|
||||
_, hasToken := decoded["forge_token"]
|
||||
assert.False(t, hasToken, "forge_token must not be in ticket JSON")
|
||||
}
|
||||
|
||||
func TestDispatch_TicketJSON_Good_DualRun(t *testing.T) {
|
||||
ticket := DispatchTicket{
|
||||
ID: "test-dual",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core",
|
||||
IssueNumber: 1,
|
||||
ForgeURL: "https://forge.lthn.ai",
|
||||
Model: "gemini-2.0-flash",
|
||||
VerifyModel: "gemini-1.5-pro",
|
||||
DualRun: true,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(ticket)
|
||||
require.NoError(t, err)
|
||||
|
||||
var roundtrip DispatchTicket
|
||||
err = json.Unmarshal(data, &roundtrip)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, roundtrip.DualRun)
|
||||
assert.Equal(t, "gemini-1.5-pro", roundtrip.VerifyModel)
|
||||
}
|
||||
|
||||
func TestDispatch_TicketJSON_Good_OmitsEmptyModelRunner(t *testing.T) {
|
||||
ticket := DispatchTicket{
|
||||
ID: "test-1",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core",
|
||||
IssueNumber: 1,
|
||||
TargetBranch: "new",
|
||||
ForgeURL: "https://forge.lthn.ai",
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(ticket, "", " ")
|
||||
require.NoError(t, err)
|
||||
|
||||
var decoded map[string]any
|
||||
err = json.Unmarshal(data, &decoded)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, hasModel := decoded["model"]
|
||||
_, hasRunner := decoded["runner"]
|
||||
assert.False(t, hasModel, "model should be omitted when empty")
|
||||
assert.False(t, hasRunner, "runner should be omitted when empty")
|
||||
}
|
||||
|
||||
func TestDispatch_TicketJSON_Good_ModelRunnerVariants(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
model string
|
||||
runner string
|
||||
}{
|
||||
{"claude-sonnet", "sonnet", "claude"},
|
||||
{"claude-opus", "opus", "claude"},
|
||||
{"codex-default", "", "codex"},
|
||||
{"gemini-default", "", "gemini"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ticket := DispatchTicket{
|
||||
ID: "test-" + tt.name,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core",
|
||||
IssueNumber: 1,
|
||||
TargetBranch: "new",
|
||||
ForgeURL: "https://forge.lthn.ai",
|
||||
Model: tt.model,
|
||||
Runner: tt.runner,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(ticket)
|
||||
require.NoError(t, err)
|
||||
|
||||
var roundtrip DispatchTicket
|
||||
err = json.Unmarshal(data, &roundtrip)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.model, roundtrip.Model)
|
||||
assert.Equal(t, tt.runner, roundtrip.Runner)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDispatch_Execute_Good_PostsComment(t *testing.T) {
|
||||
var commentPosted bool
|
||||
var commentBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/host-uk/core/labels":
|
||||
json.NewEncoder(w).Encode([]any{})
|
||||
return
|
||||
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/host-uk/core/labels":
|
||||
json.NewEncoder(w).Encode(map[string]any{"id": 1, "name": "in-progress", "color": "#1d76db"})
|
||||
return
|
||||
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/host-uk/core/issues/5":
|
||||
json.NewEncoder(w).Encode(map[string]any{"id": 5, "number": 5, "labels": []any{}, "title": "Test"})
|
||||
return
|
||||
|
||||
case r.Method == http.MethodPatch && r.URL.Path == "/api/v1/repos/host-uk/core/issues/5":
|
||||
json.NewEncoder(w).Encode(map[string]any{"id": 5, "number": 5})
|
||||
return
|
||||
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/host-uk/core/issues/5/labels":
|
||||
json.NewEncoder(w).Encode([]any{map[string]any{"id": 1, "name": "in-progress"}})
|
||||
return
|
||||
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/host-uk/core/issues/5/comments":
|
||||
commentPosted = true
|
||||
var body map[string]string
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
commentBody = body["body"]
|
||||
json.NewEncoder(w).Encode(map[string]any{"id": 1, "body": body["body"]})
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(map[string]any{})
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/nonexistent-queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core",
|
||||
ChildNumber: 5,
|
||||
EpicNumber: 3,
|
||||
IssueTitle: "Test issue",
|
||||
IssueBody: "Test body",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "dispatch", result.Action)
|
||||
assert.Equal(t, "host-uk", result.RepoOwner)
|
||||
assert.Equal(t, "core", result.RepoName)
|
||||
assert.Equal(t, 3, result.EpicNumber)
|
||||
assert.Equal(t, 5, result.ChildNumber)
|
||||
|
||||
if result.Success {
|
||||
assert.True(t, commentPosted)
|
||||
assert.Contains(t, commentBody, "darbs-claude")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// EnableAutoMergeHandler merges a PR that is ready using squash strategy.
|
||||
type EnableAutoMergeHandler struct {
|
||||
forge *forge.Client
|
||||
}
|
||||
|
||||
// NewEnableAutoMergeHandler creates a handler that merges ready PRs.
|
||||
func NewEnableAutoMergeHandler(f *forge.Client) *EnableAutoMergeHandler {
|
||||
return &EnableAutoMergeHandler{forge: f}
|
||||
}
|
||||
|
||||
// Name returns the handler identifier.
|
||||
func (h *EnableAutoMergeHandler) Name() string {
|
||||
return "enable_auto_merge"
|
||||
}
|
||||
|
||||
// Match returns true when the PR is open, not a draft, mergeable, checks
|
||||
// are passing, and there are no unresolved review threads.
|
||||
func (h *EnableAutoMergeHandler) Match(signal *jobrunner.PipelineSignal) bool {
|
||||
return signal.PRState == "OPEN" &&
|
||||
!signal.IsDraft &&
|
||||
signal.Mergeable == "MERGEABLE" &&
|
||||
signal.CheckStatus == "SUCCESS" &&
|
||||
!signal.HasUnresolvedThreads()
|
||||
}
|
||||
|
||||
// Execute merges the pull request with squash strategy.
|
||||
func (h *EnableAutoMergeHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
|
||||
start := time.Now()
|
||||
|
||||
err := h.forge.MergePullRequest(signal.RepoOwner, signal.RepoName, int64(signal.PRNumber), "squash")
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "enable_auto_merge",
|
||||
RepoOwner: signal.RepoOwner,
|
||||
RepoName: signal.RepoName,
|
||||
PRNumber: signal.PRNumber,
|
||||
Success: err == nil,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
result.Error = fmt.Sprintf("merge failed: %v", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -1,105 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
func TestEnableAutoMerge_Match_Good(t *testing.T) {
|
||||
h := NewEnableAutoMergeHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
IsDraft: false,
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
ThreadsTotal: 0,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestEnableAutoMerge_Match_Bad_Draft(t *testing.T) {
|
||||
h := NewEnableAutoMergeHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
IsDraft: true,
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
ThreadsTotal: 0,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestEnableAutoMerge_Match_Bad_UnresolvedThreads(t *testing.T) {
|
||||
h := NewEnableAutoMergeHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
IsDraft: false,
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
ThreadsTotal: 5,
|
||||
ThreadsResolved: 3,
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestEnableAutoMerge_Execute_Good(t *testing.T) {
|
||||
var capturedPath string
|
||||
var capturedMethod string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
capturedMethod = r.Method
|
||||
capturedPath = r.URL.Path
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
h := NewEnableAutoMergeHandler(client)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
PRNumber: 55,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "enable_auto_merge", result.Action)
|
||||
assert.Equal(t, http.MethodPost, capturedMethod)
|
||||
assert.Equal(t, "/api/v1/repos/host-uk/core-php/pulls/55/merge", capturedPath)
|
||||
}
|
||||
|
||||
func TestEnableAutoMerge_Execute_Bad_MergeFailed(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusConflict)
|
||||
_ = json.NewEncoder(w).Encode(map[string]string{"message": "merge conflict"})
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
h := NewEnableAutoMergeHandler(client)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
PRNumber: 55,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, result.Success)
|
||||
assert.Contains(t, result.Error, "merge failed")
|
||||
}
|
||||
|
|
@ -1,583 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/agentci"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// --- Name tests for all handlers ---
|
||||
|
||||
func TestEnableAutoMerge_Name_Good(t *testing.T) {
|
||||
h := NewEnableAutoMergeHandler(nil)
|
||||
assert.Equal(t, "enable_auto_merge", h.Name())
|
||||
}
|
||||
|
||||
func TestPublishDraft_Name_Good(t *testing.T) {
|
||||
h := NewPublishDraftHandler(nil)
|
||||
assert.Equal(t, "publish_draft", h.Name())
|
||||
}
|
||||
|
||||
func TestDismissReviews_Name_Good(t *testing.T) {
|
||||
h := NewDismissReviewsHandler(nil)
|
||||
assert.Equal(t, "dismiss_reviews", h.Name())
|
||||
}
|
||||
|
||||
func TestSendFixCommand_Name_Good(t *testing.T) {
|
||||
h := NewSendFixCommandHandler(nil)
|
||||
assert.Equal(t, "send_fix_command", h.Name())
|
||||
}
|
||||
|
||||
func TestTickParent_Name_Good(t *testing.T) {
|
||||
h := NewTickParentHandler(nil)
|
||||
assert.Equal(t, "tick_parent", h.Name())
|
||||
}
|
||||
|
||||
// --- Additional Match tests ---
|
||||
|
||||
func TestEnableAutoMerge_Match_Bad_Closed(t *testing.T) {
|
||||
h := NewEnableAutoMergeHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "CLOSED",
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestEnableAutoMerge_Match_Bad_ChecksFailing(t *testing.T) {
|
||||
h := NewEnableAutoMergeHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "FAILURE",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestEnableAutoMerge_Match_Bad_Conflicting(t *testing.T) {
|
||||
h := NewEnableAutoMergeHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
Mergeable: "CONFLICTING",
|
||||
CheckStatus: "SUCCESS",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestPublishDraft_Match_Bad_Closed(t *testing.T) {
|
||||
h := NewPublishDraftHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
IsDraft: true,
|
||||
PRState: "CLOSED",
|
||||
CheckStatus: "SUCCESS",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestDismissReviews_Match_Bad_Closed(t *testing.T) {
|
||||
h := NewDismissReviewsHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "CLOSED",
|
||||
ThreadsTotal: 3,
|
||||
ThreadsResolved: 1,
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestDismissReviews_Match_Bad_NoThreads(t *testing.T) {
|
||||
h := NewDismissReviewsHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
ThreadsTotal: 0,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestSendFixCommand_Match_Bad_Closed(t *testing.T) {
|
||||
h := NewSendFixCommandHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "CLOSED",
|
||||
Mergeable: "CONFLICTING",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestSendFixCommand_Match_Bad_NoIssues(t *testing.T) {
|
||||
h := NewSendFixCommandHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestSendFixCommand_Match_Good_ThreadsFailure(t *testing.T) {
|
||||
h := NewSendFixCommandHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "FAILURE",
|
||||
ThreadsTotal: 2,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestTickParent_Match_Bad_Closed(t *testing.T) {
|
||||
h := NewTickParentHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "CLOSED",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
// --- Additional Execute tests ---
|
||||
|
||||
func TestPublishDraft_Execute_Bad_ServerError(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewPublishDraftHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRNumber: 1,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Success)
|
||||
assert.Contains(t, result.Error, "publish draft failed")
|
||||
}
|
||||
|
||||
func TestSendFixCommand_Execute_Good_Reviews(t *testing.T) {
|
||||
var capturedBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if r.Method == http.MethodPost {
|
||||
b, _ := io.ReadAll(r.Body)
|
||||
capturedBody = string(b)
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_, _ = w.Write([]byte(`{"id":1}`))
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewSendFixCommandHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRNumber: 5,
|
||||
PRState: "OPEN",
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "FAILURE",
|
||||
ThreadsTotal: 2,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
assert.Contains(t, capturedBody, "fix the code reviews")
|
||||
}
|
||||
|
||||
func TestSendFixCommand_Execute_Bad_CommentFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewSendFixCommandHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRNumber: 1,
|
||||
Mergeable: "CONFLICTING",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Success)
|
||||
assert.Contains(t, result.Error, "post comment failed")
|
||||
}
|
||||
|
||||
func TestTickParent_Execute_Good_AlreadyTicked(t *testing.T) {
|
||||
epicBody := "## Tasks\n- [x] #7\n- [ ] #8\n"
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/issues/42") {
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": epicBody,
|
||||
"title": "Epic",
|
||||
})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewTickParentHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
EpicNumber: 42,
|
||||
ChildNumber: 7,
|
||||
PRNumber: 99,
|
||||
PRState: "MERGED",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "tick_parent", result.Action)
|
||||
}
|
||||
|
||||
func TestTickParent_Execute_Bad_FetchEpicFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewTickParentHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
EpicNumber: 999,
|
||||
ChildNumber: 1,
|
||||
PRState: "MERGED",
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "fetch epic")
|
||||
}
|
||||
|
||||
func TestTickParent_Execute_Bad_EditEpicFails(t *testing.T) {
|
||||
epicBody := "## Tasks\n- [ ] #7\n"
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/issues/42"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": epicBody,
|
||||
"title": "Epic",
|
||||
})
|
||||
case r.Method == http.MethodPatch && strings.Contains(r.URL.Path, "/issues/42"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewTickParentHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
EpicNumber: 42,
|
||||
ChildNumber: 7,
|
||||
PRNumber: 99,
|
||||
PRState: "MERGED",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Success)
|
||||
assert.Contains(t, result.Error, "edit epic failed")
|
||||
}
|
||||
|
||||
func TestTickParent_Execute_Bad_CloseChildFails(t *testing.T) {
|
||||
epicBody := "## Tasks\n- [ ] #7\n"
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/issues/42"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": epicBody,
|
||||
"title": "Epic",
|
||||
})
|
||||
case r.Method == http.MethodPatch && strings.Contains(r.URL.Path, "/issues/42"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": strings.Replace(epicBody, "- [ ] #7", "- [x] #7", 1),
|
||||
"title": "Epic",
|
||||
})
|
||||
case r.Method == http.MethodPatch && strings.Contains(r.URL.Path, "/issues/7"):
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewTickParentHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
EpicNumber: 42,
|
||||
ChildNumber: 7,
|
||||
PRNumber: 99,
|
||||
PRState: "MERGED",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Success)
|
||||
assert.Contains(t, result.Error, "close child issue failed")
|
||||
}
|
||||
|
||||
func TestDismissReviews_Execute_Bad_ListFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewDismissReviewsHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRNumber: 1,
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "list reviews")
|
||||
}
|
||||
|
||||
func TestDismissReviews_Execute_Good_NothingToDismiss(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if r.Method == http.MethodGet {
|
||||
// All reviews are either approved or already dismissed.
|
||||
reviews := []map[string]any{
|
||||
{
|
||||
"id": 1, "state": "APPROVED", "dismissed": false, "stale": false,
|
||||
"body": "lgtm", "commit_id": "abc123",
|
||||
},
|
||||
{
|
||||
"id": 2, "state": "REQUEST_CHANGES", "dismissed": true, "stale": true,
|
||||
"body": "already dismissed", "commit_id": "abc123",
|
||||
},
|
||||
{
|
||||
"id": 3, "state": "REQUEST_CHANGES", "dismissed": false, "stale": false,
|
||||
"body": "not stale", "commit_id": "abc123",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(reviews)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewDismissReviewsHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRNumber: 1,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success, "nothing to dismiss should be success")
|
||||
}
|
||||
|
||||
func TestDismissReviews_Execute_Bad_DismissFails(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if r.Method == http.MethodGet {
|
||||
reviews := []map[string]any{
|
||||
{
|
||||
"id": 1, "state": "REQUEST_CHANGES", "dismissed": false, "stale": true,
|
||||
"body": "fix it", "commit_id": "abc123",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(reviews)
|
||||
return
|
||||
}
|
||||
|
||||
// Dismiss fails.
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewDismissReviewsHandler(client)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRNumber: 1,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Success)
|
||||
assert.Contains(t, result.Error, "failed to dismiss")
|
||||
}
|
||||
|
||||
// --- Dispatch Execute edge cases ---
|
||||
|
||||
func TestDispatch_Execute_Good_AlreadyInProgress(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 1, "name": "in-progress", "color": "#1d76db"},
|
||||
})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "name": "in-progress"})
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
// Issue already has in-progress label.
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"id": 5,
|
||||
"number": 5,
|
||||
"labels": []map[string]any{{"name": "in-progress", "id": 1}},
|
||||
"title": "Test",
|
||||
})
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 5,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success, "already in-progress should be a no-op success")
|
||||
}
|
||||
|
||||
func TestDispatch_Execute_Good_AlreadyCompleted(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 2, "name": "agent-completed", "color": "#0e8a16"},
|
||||
})
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/org/repo/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "name": "in-progress"})
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/org/repo/issues/5":
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"id": 5,
|
||||
"number": 5,
|
||||
"labels": []map[string]any{{"name": "agent-completed", "id": 2}},
|
||||
"title": "Done",
|
||||
})
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 5,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
}
|
||||
|
||||
func TestDispatch_Execute_Bad_InvalidRepoOwner(t *testing.T) {
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
spinner := newTestSpinner(map[string]agentci.AgentConfig{
|
||||
"darbs-claude": {Host: "localhost", QueueDir: "/tmp/queue", Active: true},
|
||||
})
|
||||
h := NewDispatchHandler(client, srv.URL, "test-token", spinner)
|
||||
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
NeedsCoding: true,
|
||||
Assignee: "darbs-claude",
|
||||
RepoOwner: "org$bad",
|
||||
RepoName: "repo",
|
||||
ChildNumber: 1,
|
||||
}
|
||||
|
||||
_, err := h.Execute(context.Background(), sig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid repo owner")
|
||||
}
|
||||
|
|
@ -1,824 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// --- Integration: full signal -> handler -> result flow ---
|
||||
// These tests exercise the complete pipeline: a signal is created,
|
||||
// matched by a handler, executed against a mock Forgejo server,
|
||||
// and the result is verified.
|
||||
|
||||
// mockForgejoServer creates a comprehensive mock Forgejo API server
|
||||
// for integration testing. It supports issues, PRs, labels, comments,
|
||||
// and tracks all API calls made.
|
||||
type apiCall struct {
|
||||
Method string
|
||||
Path string
|
||||
Body string
|
||||
}
|
||||
|
||||
type forgejoMock struct {
|
||||
epicBody string
|
||||
calls []apiCall
|
||||
srv *httptest.Server
|
||||
closedChild bool
|
||||
editedBody string
|
||||
comments []string
|
||||
}
|
||||
|
||||
func newForgejoMock(t *testing.T, epicBody string) *forgejoMock {
|
||||
t.Helper()
|
||||
m := &forgejoMock{
|
||||
epicBody: epicBody,
|
||||
}
|
||||
|
||||
m.srv = httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
m.calls = append(m.calls, apiCall{
|
||||
Method: r.Method,
|
||||
Path: r.URL.Path,
|
||||
Body: string(bodyBytes),
|
||||
})
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
path := r.URL.Path
|
||||
|
||||
switch {
|
||||
// GET epic issue.
|
||||
case r.Method == http.MethodGet && strings.Contains(path, "/issues/") && !strings.Contains(path, "/comments"):
|
||||
issueNum := path[strings.LastIndex(path, "/")+1:]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": json.Number(issueNum),
|
||||
"body": m.epicBody,
|
||||
"title": "Epic: Phase 3",
|
||||
"state": "open",
|
||||
"labels": []map[string]any{{"name": "epic", "id": 1}},
|
||||
})
|
||||
|
||||
// PATCH epic issue (edit body or close child).
|
||||
case r.Method == http.MethodPatch && strings.Contains(path, "/issues/"):
|
||||
var body map[string]any
|
||||
_ = json.Unmarshal(bodyBytes, &body)
|
||||
|
||||
if bodyStr, ok := body["body"].(string); ok {
|
||||
m.editedBody = bodyStr
|
||||
}
|
||||
if state, ok := body["state"].(string); ok && state == "closed" {
|
||||
m.closedChild = true
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 1,
|
||||
"body": m.editedBody,
|
||||
"state": "open",
|
||||
})
|
||||
|
||||
// POST comment.
|
||||
case r.Method == http.MethodPost && strings.Contains(path, "/comments"):
|
||||
var body map[string]string
|
||||
_ = json.Unmarshal(bodyBytes, &body)
|
||||
m.comments = append(m.comments, body["body"])
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1, "body": body["body"]})
|
||||
|
||||
// GET labels.
|
||||
case r.Method == http.MethodGet && strings.Contains(path, "/labels"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 1, "name": "epic", "color": "#ff0000"},
|
||||
{"id": 2, "name": "in-progress", "color": "#1d76db"},
|
||||
})
|
||||
|
||||
// POST labels.
|
||||
case r.Method == http.MethodPost && strings.HasSuffix(path, "/labels"):
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 10, "name": "new-label"})
|
||||
|
||||
// POST issue labels.
|
||||
case r.Method == http.MethodPost && strings.Contains(path, "/issues/") && strings.Contains(path, "/labels"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
|
||||
// DELETE issue label.
|
||||
case r.Method == http.MethodDelete && strings.Contains(path, "/labels/"):
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
// POST merge PR.
|
||||
case r.Method == http.MethodPost && strings.Contains(path, "/merge"):
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
// PATCH PR (publish draft).
|
||||
case r.Method == http.MethodPatch && strings.Contains(path, "/pulls/"):
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte(`{}`))
|
||||
|
||||
// GET reviews.
|
||||
case r.Method == http.MethodGet && strings.Contains(path, "/reviews"):
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *forgejoMock) close() {
|
||||
m.srv.Close()
|
||||
}
|
||||
|
||||
func (m *forgejoMock) client(t *testing.T) *forge.Client {
|
||||
t.Helper()
|
||||
c, err := forge.New(m.srv.URL, "test-token")
|
||||
require.NoError(t, err)
|
||||
return c
|
||||
}
|
||||
|
||||
// --- TickParent integration: signal -> execute -> verify epic updated ---
|
||||
|
||||
func TestIntegration_TickParent_Good_FullFlow(t *testing.T) {
|
||||
epicBody := "## Tasks\n- [x] #1\n- [ ] #7\n- [ ] #8\n- [x] #3\n"
|
||||
|
||||
mock := newForgejoMock(t, epicBody)
|
||||
defer mock.close()
|
||||
|
||||
h := NewTickParentHandler(mock.client(t))
|
||||
|
||||
// Create signal representing a merged PR for child #7.
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 42,
|
||||
ChildNumber: 7,
|
||||
PRNumber: 99,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
PRState: "MERGED",
|
||||
CheckStatus: "SUCCESS",
|
||||
Mergeable: "UNKNOWN",
|
||||
}
|
||||
|
||||
// Verify the handler matches.
|
||||
assert.True(t, h.Match(signal))
|
||||
|
||||
// Execute.
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify result.
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "tick_parent", result.Action)
|
||||
assert.Equal(t, "host-uk", result.RepoOwner)
|
||||
assert.Equal(t, "core-php", result.RepoName)
|
||||
assert.Equal(t, 99, result.PRNumber)
|
||||
|
||||
// Verify the epic body was updated: #7 should now be checked.
|
||||
assert.Contains(t, mock.editedBody, "- [x] #7")
|
||||
// #8 should still be unchecked.
|
||||
assert.Contains(t, mock.editedBody, "- [ ] #8")
|
||||
// #1 and #3 should remain checked.
|
||||
assert.Contains(t, mock.editedBody, "- [x] #1")
|
||||
assert.Contains(t, mock.editedBody, "- [x] #3")
|
||||
|
||||
// Verify the child issue was closed.
|
||||
assert.True(t, mock.closedChild)
|
||||
}
|
||||
|
||||
// --- TickParent integration: epic progress tracking ---
|
||||
|
||||
func TestIntegration_TickParent_Good_TrackEpicProgress(t *testing.T) {
|
||||
// Start with 4 tasks, 1 checked.
|
||||
epicBody := "## Tasks\n- [x] #1\n- [ ] #2\n- [ ] #3\n- [ ] #4\n"
|
||||
|
||||
mock := newForgejoMock(t, epicBody)
|
||||
defer mock.close()
|
||||
|
||||
h := NewTickParentHandler(mock.client(t))
|
||||
|
||||
// Tick child #2.
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 10,
|
||||
ChildNumber: 2,
|
||||
PRNumber: 20,
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRState: "MERGED",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Success)
|
||||
|
||||
// Verify #2 is now checked.
|
||||
assert.Contains(t, mock.editedBody, "- [x] #2")
|
||||
// #3 and #4 should still be unchecked.
|
||||
assert.Contains(t, mock.editedBody, "- [ ] #3")
|
||||
assert.Contains(t, mock.editedBody, "- [ ] #4")
|
||||
|
||||
// Count progress: 2 out of 4 now checked.
|
||||
checked := strings.Count(mock.editedBody, "- [x]")
|
||||
unchecked := strings.Count(mock.editedBody, "- [ ]")
|
||||
assert.Equal(t, 2, checked)
|
||||
assert.Equal(t, 2, unchecked)
|
||||
}
|
||||
|
||||
// --- EnableAutoMerge integration: full flow ---
|
||||
|
||||
func TestIntegration_EnableAutoMerge_Good_FullFlow(t *testing.T) {
|
||||
var mergeMethod string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/merge") {
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
var body map[string]any
|
||||
_ = json.Unmarshal(bodyBytes, &body)
|
||||
if do, ok := body["Do"].(string); ok {
|
||||
mergeMethod = do
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewEnableAutoMergeHandler(client)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 5,
|
||||
PRNumber: 42,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-tenant",
|
||||
PRState: "OPEN",
|
||||
IsDraft: false,
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
}
|
||||
|
||||
// Verify match.
|
||||
assert.True(t, h.Match(signal))
|
||||
|
||||
// Execute.
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "enable_auto_merge", result.Action)
|
||||
assert.Equal(t, "host-uk", result.RepoOwner)
|
||||
assert.Equal(t, "core-tenant", result.RepoName)
|
||||
assert.Equal(t, 42, result.PRNumber)
|
||||
assert.Equal(t, "squash", mergeMethod)
|
||||
}
|
||||
|
||||
// --- PublishDraft integration: full flow ---
|
||||
|
||||
func TestIntegration_PublishDraft_Good_FullFlow(t *testing.T) {
|
||||
var patchedDraft bool
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if r.Method == http.MethodPatch && strings.Contains(r.URL.Path, "/pulls/") {
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
if strings.Contains(string(bodyBytes), `"draft":false`) {
|
||||
patchedDraft = true
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte(`{}`))
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewPublishDraftHandler(client)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 3,
|
||||
ChildNumber: 8,
|
||||
PRNumber: 15,
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRState: "OPEN",
|
||||
IsDraft: true,
|
||||
CheckStatus: "SUCCESS",
|
||||
Mergeable: "MERGEABLE",
|
||||
}
|
||||
|
||||
// Verify match.
|
||||
assert.True(t, h.Match(signal))
|
||||
|
||||
// Execute.
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "publish_draft", result.Action)
|
||||
assert.True(t, patchedDraft)
|
||||
}
|
||||
|
||||
// --- SendFixCommand integration: conflict message ---
|
||||
|
||||
func TestIntegration_SendFixCommand_Good_ConflictFlow(t *testing.T) {
|
||||
var commentBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/comments") {
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
var body map[string]string
|
||||
_ = json.Unmarshal(bodyBytes, &body)
|
||||
commentBody = body["body"]
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewSendFixCommandHandler(client)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 3,
|
||||
PRNumber: 10,
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRState: "OPEN",
|
||||
Mergeable: "CONFLICTING",
|
||||
CheckStatus: "SUCCESS",
|
||||
}
|
||||
|
||||
assert.True(t, h.Match(signal))
|
||||
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "send_fix_command", result.Action)
|
||||
assert.Contains(t, commentBody, "fix the merge conflict")
|
||||
}
|
||||
|
||||
// --- SendFixCommand integration: code review message ---
|
||||
|
||||
func TestIntegration_SendFixCommand_Good_ReviewFlow(t *testing.T) {
|
||||
var commentBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/comments") {
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
var body map[string]string
|
||||
_ = json.Unmarshal(bodyBytes, &body)
|
||||
commentBody = body["body"]
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewSendFixCommandHandler(client)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 3,
|
||||
PRNumber: 10,
|
||||
RepoOwner: "org",
|
||||
RepoName: "repo",
|
||||
PRState: "OPEN",
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "FAILURE",
|
||||
ThreadsTotal: 3,
|
||||
ThreadsResolved: 1,
|
||||
}
|
||||
|
||||
assert.True(t, h.Match(signal))
|
||||
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Contains(t, commentBody, "fix the code reviews")
|
||||
}
|
||||
|
||||
// --- Completion integration: success flow ---
|
||||
|
||||
func TestIntegration_Completion_Good_SuccessFlow(t *testing.T) {
|
||||
var labelAdded bool
|
||||
var labelRemoved bool
|
||||
var commentBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
// GetLabelByName — GET repo labels.
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/core/go-scm/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 1, "name": "in-progress", "color": "#1d76db"},
|
||||
})
|
||||
|
||||
// RemoveIssueLabel.
|
||||
case r.Method == http.MethodDelete && strings.Contains(r.URL.Path, "/labels/"):
|
||||
labelRemoved = true
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
// EnsureLabel — POST to create repo label.
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/core/go-scm/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 2, "name": "agent-completed", "color": "#0e8a16"})
|
||||
|
||||
// AddIssueLabels — POST to issue labels.
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/core/go-scm/issues/12/labels":
|
||||
labelAdded = true
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{{"id": 2, "name": "agent-completed"}})
|
||||
|
||||
// CreateIssueComment.
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/comments"):
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
var body map[string]string
|
||||
_ = json.Unmarshal(bodyBytes, &body)
|
||||
commentBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
EpicNumber: 5,
|
||||
ChildNumber: 12,
|
||||
RepoOwner: "core",
|
||||
RepoName: "go-scm",
|
||||
Success: true,
|
||||
Message: "PR created and tests passing",
|
||||
}
|
||||
|
||||
assert.True(t, h.Match(signal))
|
||||
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "completion", result.Action)
|
||||
assert.Equal(t, "core", result.RepoOwner)
|
||||
assert.Equal(t, "go-scm", result.RepoName)
|
||||
assert.Equal(t, 5, result.EpicNumber)
|
||||
assert.Equal(t, 12, result.ChildNumber)
|
||||
assert.True(t, labelRemoved, "in-progress label should be removed")
|
||||
assert.True(t, labelAdded, "agent-completed label should be added")
|
||||
assert.Contains(t, commentBody, "PR created and tests passing")
|
||||
}
|
||||
|
||||
// --- Full pipeline integration: signal -> match -> execute -> journal ---
|
||||
|
||||
func TestIntegration_FullPipeline_Good_TickParentWithJournal(t *testing.T) {
|
||||
epicBody := "## Tasks\n- [ ] #7\n- [ ] #8\n"
|
||||
|
||||
mock := newForgejoMock(t, epicBody)
|
||||
defer mock.close()
|
||||
|
||||
dir := t.TempDir()
|
||||
journal, err := jobrunner.NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := mock.client(t)
|
||||
h := NewTickParentHandler(client)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
EpicNumber: 10,
|
||||
ChildNumber: 7,
|
||||
PRNumber: 55,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-tenant",
|
||||
PRState: "MERGED",
|
||||
CheckStatus: "SUCCESS",
|
||||
Mergeable: "UNKNOWN",
|
||||
}
|
||||
|
||||
// Verify match.
|
||||
assert.True(t, h.Match(signal))
|
||||
|
||||
// Execute.
|
||||
start := time.Now()
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
|
||||
// Write to journal (simulating what the poller does).
|
||||
result.EpicNumber = signal.EpicNumber
|
||||
result.ChildNumber = signal.ChildNumber
|
||||
result.Cycle = 1
|
||||
result.Duration = time.Since(start)
|
||||
|
||||
err = journal.Append(signal, result)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the journal file exists and contains the entry.
|
||||
date := time.Now().UTC().Format("2006-01-02")
|
||||
journalPath := filepath.Join(dir, "host-uk", "core-tenant", date+".jsonl")
|
||||
|
||||
_, statErr := os.Stat(journalPath)
|
||||
require.NoError(t, statErr)
|
||||
|
||||
f, err := os.Open(journalPath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
var entry jobrunner.JournalEntry
|
||||
err = json.NewDecoder(f).Decode(&entry)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "tick_parent", entry.Action)
|
||||
assert.Equal(t, "host-uk/core-tenant", entry.Repo)
|
||||
assert.Equal(t, 10, entry.Epic)
|
||||
assert.Equal(t, 7, entry.Child)
|
||||
assert.Equal(t, 55, entry.PR)
|
||||
assert.Equal(t, 1, entry.Cycle)
|
||||
assert.True(t, entry.Result.Success)
|
||||
assert.Equal(t, "MERGED", entry.Signals.PRState)
|
||||
|
||||
// Verify the epic was properly updated.
|
||||
assert.Contains(t, mock.editedBody, "- [x] #7")
|
||||
assert.Contains(t, mock.editedBody, "- [ ] #8")
|
||||
assert.True(t, mock.closedChild)
|
||||
}
|
||||
|
||||
// --- Handler matching priority: first match wins ---
|
||||
|
||||
func TestIntegration_HandlerPriority_Good_FirstMatchWins(t *testing.T) {
|
||||
// Test that when multiple handlers could match, the first one wins.
|
||||
// This exercises the poller's findHandler logic.
|
||||
|
||||
// Signal with OPEN, not draft, MERGEABLE, SUCCESS, no threads:
|
||||
// This matches enable_auto_merge.
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
IsDraft: false,
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
ThreadsTotal: 0,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
|
||||
autoMerge := NewEnableAutoMergeHandler(nil)
|
||||
publishDraft := NewPublishDraftHandler(nil)
|
||||
fixCommand := NewSendFixCommandHandler(nil)
|
||||
|
||||
// enable_auto_merge should match.
|
||||
assert.True(t, autoMerge.Match(signal))
|
||||
// publish_draft should NOT match (not a draft).
|
||||
assert.False(t, publishDraft.Match(signal))
|
||||
// send_fix_command should NOT match (mergeable and passing).
|
||||
assert.False(t, fixCommand.Match(signal))
|
||||
}
|
||||
|
||||
// --- Handler matching: draft PR path ---
|
||||
|
||||
func TestIntegration_HandlerPriority_Good_DraftPRPath(t *testing.T) {
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
IsDraft: true,
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
ThreadsTotal: 0,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
|
||||
autoMerge := NewEnableAutoMergeHandler(nil)
|
||||
publishDraft := NewPublishDraftHandler(nil)
|
||||
fixCommand := NewSendFixCommandHandler(nil)
|
||||
|
||||
// enable_auto_merge should NOT match (is draft).
|
||||
assert.False(t, autoMerge.Match(signal))
|
||||
// publish_draft should match (draft + open + success).
|
||||
assert.True(t, publishDraft.Match(signal))
|
||||
// send_fix_command should NOT match.
|
||||
assert.False(t, fixCommand.Match(signal))
|
||||
}
|
||||
|
||||
// --- Handler matching: merged PR only matches tick_parent ---
|
||||
|
||||
func TestIntegration_HandlerPriority_Good_MergedPRPath(t *testing.T) {
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
PRState: "MERGED",
|
||||
IsDraft: false,
|
||||
Mergeable: "UNKNOWN",
|
||||
CheckStatus: "SUCCESS",
|
||||
ThreadsTotal: 0,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
|
||||
autoMerge := NewEnableAutoMergeHandler(nil)
|
||||
publishDraft := NewPublishDraftHandler(nil)
|
||||
fixCommand := NewSendFixCommandHandler(nil)
|
||||
tickParent := NewTickParentHandler(nil)
|
||||
|
||||
assert.False(t, autoMerge.Match(signal))
|
||||
assert.False(t, publishDraft.Match(signal))
|
||||
assert.False(t, fixCommand.Match(signal))
|
||||
assert.True(t, tickParent.Match(signal))
|
||||
}
|
||||
|
||||
// --- Handler matching: conflicting PR matches send_fix_command ---
|
||||
|
||||
func TestIntegration_HandlerPriority_Good_ConflictingPRPath(t *testing.T) {
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
IsDraft: false,
|
||||
Mergeable: "CONFLICTING",
|
||||
CheckStatus: "SUCCESS",
|
||||
ThreadsTotal: 0,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
|
||||
autoMerge := NewEnableAutoMergeHandler(nil)
|
||||
fixCommand := NewSendFixCommandHandler(nil)
|
||||
|
||||
// enable_auto_merge should NOT match (conflicting).
|
||||
assert.False(t, autoMerge.Match(signal))
|
||||
// send_fix_command should match (conflicting).
|
||||
assert.True(t, fixCommand.Match(signal))
|
||||
}
|
||||
|
||||
// --- Completion integration: failure flow ---
|
||||
|
||||
func TestIntegration_Completion_Good_FailureFlow(t *testing.T) {
|
||||
var commentBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
// GetLabelByName — GET repo labels.
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/repos/core/go-scm/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{
|
||||
{"id": 1, "name": "in-progress", "color": "#1d76db"},
|
||||
})
|
||||
|
||||
// RemoveIssueLabel.
|
||||
case r.Method == http.MethodDelete:
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
// EnsureLabel — POST to create repo label.
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/core/go-scm/labels":
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 3, "name": "agent-failed", "color": "#c0392b"})
|
||||
|
||||
// AddIssueLabels — POST to issue labels.
|
||||
case r.Method == http.MethodPost && r.URL.Path == "/api/v1/repos/core/go-scm/issues/12/labels":
|
||||
_ = json.NewEncoder(w).Encode([]map[string]any{{"id": 3, "name": "agent-failed"}})
|
||||
|
||||
// CreateIssueComment.
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/comments"):
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
var body map[string]string
|
||||
_ = json.Unmarshal(bodyBytes, &body)
|
||||
commentBody = body["body"]
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
h := NewCompletionHandler(client)
|
||||
|
||||
signal := &jobrunner.PipelineSignal{
|
||||
Type: "agent_completion",
|
||||
EpicNumber: 5,
|
||||
ChildNumber: 12,
|
||||
RepoOwner: "core",
|
||||
RepoName: "go-scm",
|
||||
Success: false,
|
||||
Error: "tests failed: 3 assertions",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), signal)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success) // The handler itself succeeded.
|
||||
assert.Contains(t, commentBody, "Agent reported failure")
|
||||
assert.Contains(t, commentBody, "tests failed: 3 assertions")
|
||||
}
|
||||
|
||||
// --- Multiple handlers execute in sequence for different signals ---
|
||||
|
||||
func TestIntegration_MultipleHandlers_Good_DifferentSignals(t *testing.T) {
|
||||
var commentBodies []string
|
||||
var mergedPRs []int64
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/merge"):
|
||||
// Extract PR number from path.
|
||||
parts := strings.Split(r.URL.Path, "/")
|
||||
for i, p := range parts {
|
||||
if p == "pulls" && i+1 < len(parts) {
|
||||
var prNum int64
|
||||
_ = json.Unmarshal([]byte(parts[i+1]), &prNum)
|
||||
mergedPRs = append(mergedPRs, prNum)
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
case r.Method == http.MethodPost && strings.Contains(r.URL.Path, "/comments"):
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
var body map[string]string
|
||||
_ = json.Unmarshal(bodyBytes, &body)
|
||||
commentBodies = append(commentBodies, body["body"])
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": 1})
|
||||
|
||||
case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/issues/"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": "## Tasks\n- [ ] #7\n- [ ] #8\n",
|
||||
"title": "Epic",
|
||||
})
|
||||
|
||||
case r.Method == http.MethodPatch:
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{"number": 1, "body": "", "state": "open"})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{})
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
autoMergeHandler := NewEnableAutoMergeHandler(client)
|
||||
fixCommandHandler := NewSendFixCommandHandler(client)
|
||||
|
||||
// Signal 1: should trigger auto merge.
|
||||
sig1 := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN", IsDraft: false, Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS", PRNumber: 10,
|
||||
RepoOwner: "org", RepoName: "repo",
|
||||
}
|
||||
|
||||
// Signal 2: should trigger fix command.
|
||||
sig2 := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN", Mergeable: "CONFLICTING",
|
||||
CheckStatus: "SUCCESS", PRNumber: 20,
|
||||
RepoOwner: "org", RepoName: "repo",
|
||||
}
|
||||
|
||||
assert.True(t, autoMergeHandler.Match(sig1))
|
||||
assert.False(t, autoMergeHandler.Match(sig2))
|
||||
|
||||
assert.False(t, fixCommandHandler.Match(sig1))
|
||||
assert.True(t, fixCommandHandler.Match(sig2))
|
||||
|
||||
// Execute both.
|
||||
result1, err := autoMergeHandler.Execute(context.Background(), sig1)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result1.Success)
|
||||
|
||||
result2, err := fixCommandHandler.Execute(context.Background(), sig2)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result2.Success)
|
||||
|
||||
// Verify correct comment was posted for the conflicting PR.
|
||||
require.Len(t, commentBodies, 1)
|
||||
assert.Contains(t, commentBodies[0], "fix the merge conflict")
|
||||
}
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// PublishDraftHandler marks a draft PR as ready for review once its checks pass.
|
||||
type PublishDraftHandler struct {
|
||||
forge *forge.Client
|
||||
}
|
||||
|
||||
// NewPublishDraftHandler creates a handler that publishes draft PRs.
|
||||
func NewPublishDraftHandler(f *forge.Client) *PublishDraftHandler {
|
||||
return &PublishDraftHandler{forge: f}
|
||||
}
|
||||
|
||||
// Name returns the handler identifier.
|
||||
func (h *PublishDraftHandler) Name() string {
|
||||
return "publish_draft"
|
||||
}
|
||||
|
||||
// Match returns true when the PR is a draft, open, and all checks have passed.
|
||||
func (h *PublishDraftHandler) Match(signal *jobrunner.PipelineSignal) bool {
|
||||
return signal.IsDraft &&
|
||||
signal.PRState == "OPEN" &&
|
||||
signal.CheckStatus == "SUCCESS"
|
||||
}
|
||||
|
||||
// Execute marks the PR as no longer a draft.
|
||||
func (h *PublishDraftHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
|
||||
start := time.Now()
|
||||
|
||||
err := h.forge.SetPRDraft(signal.RepoOwner, signal.RepoName, int64(signal.PRNumber), false)
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "publish_draft",
|
||||
RepoOwner: signal.RepoOwner,
|
||||
RepoName: signal.RepoName,
|
||||
PRNumber: signal.PRNumber,
|
||||
Success: err == nil,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
result.Error = fmt.Sprintf("publish draft failed: %v", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -1,84 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
func TestPublishDraft_Match_Good(t *testing.T) {
|
||||
h := NewPublishDraftHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
IsDraft: true,
|
||||
PRState: "OPEN",
|
||||
CheckStatus: "SUCCESS",
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestPublishDraft_Match_Bad_NotDraft(t *testing.T) {
|
||||
h := NewPublishDraftHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
IsDraft: false,
|
||||
PRState: "OPEN",
|
||||
CheckStatus: "SUCCESS",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestPublishDraft_Match_Bad_ChecksFailing(t *testing.T) {
|
||||
h := NewPublishDraftHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
IsDraft: true,
|
||||
PRState: "OPEN",
|
||||
CheckStatus: "FAILURE",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestPublishDraft_Execute_Good(t *testing.T) {
|
||||
var capturedMethod string
|
||||
var capturedPath string
|
||||
var capturedBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
capturedMethod = r.Method
|
||||
capturedPath = r.URL.Path
|
||||
b, _ := io.ReadAll(r.Body)
|
||||
capturedBody = string(b)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte(`{}`))
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
h := NewPublishDraftHandler(client)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
PRNumber: 42,
|
||||
IsDraft: true,
|
||||
PRState: "OPEN",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, http.MethodPatch, capturedMethod)
|
||||
assert.Equal(t, "/api/v1/repos/host-uk/core-php/pulls/42", capturedPath)
|
||||
assert.Contains(t, capturedBody, `"draft":false`)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "publish_draft", result.Action)
|
||||
assert.Equal(t, "host-uk", result.RepoOwner)
|
||||
assert.Equal(t, "core-php", result.RepoName)
|
||||
assert.Equal(t, 42, result.PRNumber)
|
||||
}
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
forgejosdk "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// DismissReviewsHandler dismisses stale "request changes" reviews on a PR.
|
||||
// This replaces the GitHub-only ResolveThreadsHandler because Forgejo does
|
||||
// not have a thread resolution API.
|
||||
type DismissReviewsHandler struct {
|
||||
forge *forge.Client
|
||||
}
|
||||
|
||||
// NewDismissReviewsHandler creates a handler that dismisses stale reviews.
|
||||
func NewDismissReviewsHandler(f *forge.Client) *DismissReviewsHandler {
|
||||
return &DismissReviewsHandler{forge: f}
|
||||
}
|
||||
|
||||
// Name returns the handler identifier.
|
||||
func (h *DismissReviewsHandler) Name() string {
|
||||
return "dismiss_reviews"
|
||||
}
|
||||
|
||||
// Match returns true when the PR is open and has unresolved review threads.
|
||||
func (h *DismissReviewsHandler) Match(signal *jobrunner.PipelineSignal) bool {
|
||||
return signal.PRState == "OPEN" && signal.HasUnresolvedThreads()
|
||||
}
|
||||
|
||||
// Execute dismisses stale "request changes" reviews on the PR.
|
||||
func (h *DismissReviewsHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
|
||||
start := time.Now()
|
||||
|
||||
reviews, err := h.forge.ListPRReviews(signal.RepoOwner, signal.RepoName, int64(signal.PRNumber))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dismiss_reviews: list reviews: %w", err)
|
||||
}
|
||||
|
||||
var dismissErrors []string
|
||||
dismissed := 0
|
||||
for _, review := range reviews {
|
||||
if review.State != forgejosdk.ReviewStateRequestChanges || review.Dismissed || !review.Stale {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := h.forge.DismissReview(
|
||||
signal.RepoOwner, signal.RepoName,
|
||||
int64(signal.PRNumber), review.ID,
|
||||
"Automatically dismissed: review is stale after new commits",
|
||||
); err != nil {
|
||||
dismissErrors = append(dismissErrors, err.Error())
|
||||
} else {
|
||||
dismissed++
|
||||
}
|
||||
}
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "dismiss_reviews",
|
||||
RepoOwner: signal.RepoOwner,
|
||||
RepoName: signal.RepoName,
|
||||
PRNumber: signal.PRNumber,
|
||||
Success: len(dismissErrors) == 0,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}
|
||||
|
||||
if len(dismissErrors) > 0 {
|
||||
result.Error = fmt.Sprintf("failed to dismiss %d review(s): %s",
|
||||
len(dismissErrors), dismissErrors[0])
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -1,91 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
func TestDismissReviews_Match_Good(t *testing.T) {
|
||||
h := NewDismissReviewsHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
ThreadsTotal: 4,
|
||||
ThreadsResolved: 2,
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestDismissReviews_Match_Bad_AllResolved(t *testing.T) {
|
||||
h := NewDismissReviewsHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
ThreadsTotal: 3,
|
||||
ThreadsResolved: 3,
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestDismissReviews_Execute_Good(t *testing.T) {
|
||||
callCount := 0
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
callCount++
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// ListPullReviews (GET)
|
||||
if r.Method == http.MethodGet {
|
||||
reviews := []map[string]any{
|
||||
{
|
||||
"id": 1, "state": "REQUEST_CHANGES", "dismissed": false, "stale": true,
|
||||
"body": "fix this", "commit_id": "abc123",
|
||||
},
|
||||
{
|
||||
"id": 2, "state": "APPROVED", "dismissed": false, "stale": false,
|
||||
"body": "looks good", "commit_id": "abc123",
|
||||
},
|
||||
{
|
||||
"id": 3, "state": "REQUEST_CHANGES", "dismissed": false, "stale": true,
|
||||
"body": "needs work", "commit_id": "abc123",
|
||||
},
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(reviews)
|
||||
return
|
||||
}
|
||||
|
||||
// DismissPullReview (POST to dismissals endpoint)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
h := NewDismissReviewsHandler(client)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-admin",
|
||||
PRNumber: 33,
|
||||
PRState: "OPEN",
|
||||
ThreadsTotal: 3,
|
||||
ThreadsResolved: 1,
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "dismiss_reviews", result.Action)
|
||||
assert.Equal(t, "host-uk", result.RepoOwner)
|
||||
assert.Equal(t, "core-admin", result.RepoName)
|
||||
assert.Equal(t, 33, result.PRNumber)
|
||||
|
||||
// 1 list + 2 dismiss (reviews #1 and #3 are stale REQUEST_CHANGES)
|
||||
assert.Equal(t, 3, callCount)
|
||||
}
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// SendFixCommandHandler posts a comment on a PR asking for conflict or
|
||||
// review fixes.
|
||||
type SendFixCommandHandler struct {
|
||||
forge *forge.Client
|
||||
}
|
||||
|
||||
// NewSendFixCommandHandler creates a handler that posts fix commands.
|
||||
func NewSendFixCommandHandler(f *forge.Client) *SendFixCommandHandler {
|
||||
return &SendFixCommandHandler{forge: f}
|
||||
}
|
||||
|
||||
// Name returns the handler identifier.
|
||||
func (h *SendFixCommandHandler) Name() string {
|
||||
return "send_fix_command"
|
||||
}
|
||||
|
||||
// Match returns true when the PR is open and either has merge conflicts or
|
||||
// has unresolved threads with failing checks.
|
||||
func (h *SendFixCommandHandler) Match(signal *jobrunner.PipelineSignal) bool {
|
||||
if signal.PRState != "OPEN" {
|
||||
return false
|
||||
}
|
||||
if signal.Mergeable == "CONFLICTING" {
|
||||
return true
|
||||
}
|
||||
if signal.HasUnresolvedThreads() && signal.CheckStatus == "FAILURE" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Execute posts a comment on the PR asking for a fix.
|
||||
func (h *SendFixCommandHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
|
||||
start := time.Now()
|
||||
|
||||
var message string
|
||||
if signal.Mergeable == "CONFLICTING" {
|
||||
message = "Can you fix the merge conflict?"
|
||||
} else {
|
||||
message = "Can you fix the code reviews?"
|
||||
}
|
||||
|
||||
err := h.forge.CreateIssueComment(
|
||||
signal.RepoOwner, signal.RepoName,
|
||||
int64(signal.PRNumber), message,
|
||||
)
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "send_fix_command",
|
||||
RepoOwner: signal.RepoOwner,
|
||||
RepoName: signal.RepoName,
|
||||
PRNumber: signal.PRNumber,
|
||||
Success: err == nil,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
result.Error = fmt.Sprintf("post comment failed: %v", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
func TestSendFixCommand_Match_Good_Conflicting(t *testing.T) {
|
||||
h := NewSendFixCommandHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
Mergeable: "CONFLICTING",
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestSendFixCommand_Match_Good_UnresolvedThreads(t *testing.T) {
|
||||
h := NewSendFixCommandHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "FAILURE",
|
||||
ThreadsTotal: 3,
|
||||
ThreadsResolved: 1,
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestSendFixCommand_Match_Bad_Clean(t *testing.T) {
|
||||
h := NewSendFixCommandHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
ThreadsTotal: 2,
|
||||
ThreadsResolved: 2,
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestSendFixCommand_Execute_Good_Conflict(t *testing.T) {
|
||||
var capturedMethod string
|
||||
var capturedPath string
|
||||
var capturedBody string
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
capturedMethod = r.Method
|
||||
capturedPath = r.URL.Path
|
||||
b, _ := io.ReadAll(r.Body)
|
||||
capturedBody = string(b)
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
_, _ = w.Write([]byte(`{"id":1}`))
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
h := NewSendFixCommandHandler(client)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-tenant",
|
||||
PRNumber: 17,
|
||||
PRState: "OPEN",
|
||||
Mergeable: "CONFLICTING",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, http.MethodPost, capturedMethod)
|
||||
assert.Equal(t, "/api/v1/repos/host-uk/core-tenant/issues/17/comments", capturedPath)
|
||||
assert.Contains(t, capturedBody, "fix the merge conflict")
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "send_fix_command", result.Action)
|
||||
assert.Equal(t, "host-uk", result.RepoOwner)
|
||||
assert.Equal(t, "core-tenant", result.RepoName)
|
||||
assert.Equal(t, 17, result.PRNumber)
|
||||
}
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
)
|
||||
|
||||
// forgejoVersionResponse is the JSON response for /api/v1/version.
|
||||
const forgejoVersionResponse = `{"version":"9.0.0"}`
|
||||
|
||||
// withVersion wraps an HTTP handler to also serve the Forgejo version endpoint
|
||||
// that the SDK calls during NewClient initialization.
|
||||
func withVersion(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.HasSuffix(r.URL.Path, "/version") {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(forgejoVersionResponse))
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// newTestForgeClient creates a forge.Client pointing at the given test server URL.
|
||||
func newTestForgeClient(t *testing.T, url string) *forge.Client {
|
||||
t.Helper()
|
||||
client, err := forge.New(url, "test-token")
|
||||
require.NoError(t, err)
|
||||
return client
|
||||
}
|
||||
|
|
@ -1,100 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
forgejosdk "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/forge"
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
// TickParentHandler ticks a child checkbox in the parent epic issue body
|
||||
// after the child's PR has been merged.
|
||||
type TickParentHandler struct {
|
||||
forge *forge.Client
|
||||
}
|
||||
|
||||
// NewTickParentHandler creates a handler that ticks parent epic checkboxes.
|
||||
func NewTickParentHandler(f *forge.Client) *TickParentHandler {
|
||||
return &TickParentHandler{forge: f}
|
||||
}
|
||||
|
||||
// Name returns the handler identifier.
|
||||
func (h *TickParentHandler) Name() string {
|
||||
return "tick_parent"
|
||||
}
|
||||
|
||||
// Match returns true when the child PR has been merged.
|
||||
func (h *TickParentHandler) Match(signal *jobrunner.PipelineSignal) bool {
|
||||
return signal.PRState == "MERGED"
|
||||
}
|
||||
|
||||
// Execute fetches the epic body, replaces the unchecked checkbox for the
|
||||
// child issue with a checked one, updates the epic, and closes the child issue.
|
||||
func (h *TickParentHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) {
|
||||
start := time.Now()
|
||||
|
||||
// Fetch the epic issue body.
|
||||
epic, err := h.forge.GetIssue(signal.RepoOwner, signal.RepoName, int64(signal.EpicNumber))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tick_parent: fetch epic: %w", err)
|
||||
}
|
||||
|
||||
oldBody := epic.Body
|
||||
unchecked := fmt.Sprintf("- [ ] #%d", signal.ChildNumber)
|
||||
checked := fmt.Sprintf("- [x] #%d", signal.ChildNumber)
|
||||
|
||||
if !strings.Contains(oldBody, unchecked) {
|
||||
// Already ticked or not found -- nothing to do.
|
||||
return &jobrunner.ActionResult{
|
||||
Action: "tick_parent",
|
||||
RepoOwner: signal.RepoOwner,
|
||||
RepoName: signal.RepoName,
|
||||
PRNumber: signal.PRNumber,
|
||||
Success: true,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
||||
newBody := strings.Replace(oldBody, unchecked, checked, 1)
|
||||
|
||||
// Update the epic body.
|
||||
_, err = h.forge.EditIssue(signal.RepoOwner, signal.RepoName, int64(signal.EpicNumber), forgejosdk.EditIssueOption{
|
||||
Body: &newBody,
|
||||
})
|
||||
if err != nil {
|
||||
return &jobrunner.ActionResult{
|
||||
Action: "tick_parent",
|
||||
RepoOwner: signal.RepoOwner,
|
||||
RepoName: signal.RepoName,
|
||||
PRNumber: signal.PRNumber,
|
||||
Error: fmt.Sprintf("edit epic failed: %v", err),
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close the child issue.
|
||||
err = h.forge.CloseIssue(signal.RepoOwner, signal.RepoName, int64(signal.ChildNumber))
|
||||
|
||||
result := &jobrunner.ActionResult{
|
||||
Action: "tick_parent",
|
||||
RepoOwner: signal.RepoOwner,
|
||||
RepoName: signal.RepoName,
|
||||
PRNumber: signal.PRNumber,
|
||||
Success: err == nil,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
result.Error = fmt.Sprintf("close child issue failed: %v", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -1,98 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"forge.lthn.ai/core/go-scm/jobrunner"
|
||||
)
|
||||
|
||||
func TestTickParent_Match_Good(t *testing.T) {
|
||||
h := NewTickParentHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "MERGED",
|
||||
}
|
||||
assert.True(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestTickParent_Match_Bad_Open(t *testing.T) {
|
||||
h := NewTickParentHandler(nil)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
PRState: "OPEN",
|
||||
}
|
||||
assert.False(t, h.Match(sig))
|
||||
}
|
||||
|
||||
func TestTickParent_Execute_Good(t *testing.T) {
|
||||
epicBody := "## Tasks\n- [x] #1\n- [ ] #7\n- [ ] #8\n"
|
||||
var editBody string
|
||||
var closeCalled bool
|
||||
|
||||
srv := httptest.NewServer(withVersion(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
method := r.Method
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
switch {
|
||||
// GET issue (fetch epic)
|
||||
case method == http.MethodGet && strings.Contains(path, "/issues/42"):
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": epicBody,
|
||||
"title": "Epic",
|
||||
})
|
||||
|
||||
// PATCH issue (edit epic body)
|
||||
case method == http.MethodPatch && strings.Contains(path, "/issues/42"):
|
||||
b, _ := io.ReadAll(r.Body)
|
||||
editBody = string(b)
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 42,
|
||||
"body": editBody,
|
||||
"title": "Epic",
|
||||
})
|
||||
|
||||
// PATCH issue (close child — state: closed)
|
||||
case method == http.MethodPatch && strings.Contains(path, "/issues/7"):
|
||||
closeCalled = true
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"number": 7,
|
||||
"state": "closed",
|
||||
})
|
||||
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
})))
|
||||
defer srv.Close()
|
||||
|
||||
client := newTestForgeClient(t, srv.URL)
|
||||
|
||||
h := NewTickParentHandler(client)
|
||||
sig := &jobrunner.PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
EpicNumber: 42,
|
||||
ChildNumber: 7,
|
||||
PRNumber: 99,
|
||||
PRState: "MERGED",
|
||||
}
|
||||
|
||||
result, err := h.Execute(context.Background(), sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Success)
|
||||
assert.Equal(t, "tick_parent", result.Action)
|
||||
|
||||
// Verify the edit body contains the checked checkbox.
|
||||
assert.Contains(t, editBody, "- [x] #7")
|
||||
assert.True(t, closeCalled, "expected child issue to be closed")
|
||||
}
|
||||
|
|
@ -1,170 +0,0 @@
|
|||
package jobrunner
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// validPathComponent matches safe repo owner/name characters (alphanumeric, hyphen, underscore, dot).
|
||||
var validPathComponent = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9._-]*$`)
|
||||
|
||||
// JournalEntry is a single line in the JSONL audit log.
|
||||
type JournalEntry struct {
|
||||
Timestamp string `json:"ts"`
|
||||
Epic int `json:"epic"`
|
||||
Child int `json:"child"`
|
||||
PR int `json:"pr"`
|
||||
Repo string `json:"repo"`
|
||||
Action string `json:"action"`
|
||||
Signals SignalSnapshot `json:"signals"`
|
||||
Result ResultSnapshot `json:"result"`
|
||||
Cycle int `json:"cycle"`
|
||||
}
|
||||
|
||||
// SignalSnapshot captures the structural state of a PR at the time of action.
|
||||
type SignalSnapshot struct {
|
||||
PRState string `json:"pr_state"`
|
||||
IsDraft bool `json:"is_draft"`
|
||||
CheckStatus string `json:"check_status"`
|
||||
Mergeable string `json:"mergeable"`
|
||||
ThreadsTotal int `json:"threads_total"`
|
||||
ThreadsResolved int `json:"threads_resolved"`
|
||||
}
|
||||
|
||||
// ResultSnapshot captures the outcome of an action.
|
||||
type ResultSnapshot struct {
|
||||
Success bool `json:"success"`
|
||||
Error string `json:"error,omitempty"`
|
||||
DurationMs int64 `json:"duration_ms"`
|
||||
}
|
||||
|
||||
// Journal writes ActionResult entries to date-partitioned JSONL files.
|
||||
type Journal struct {
|
||||
baseDir string
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewJournal creates a new Journal rooted at baseDir.
|
||||
func NewJournal(baseDir string) (*Journal, error) {
|
||||
if baseDir == "" {
|
||||
return nil, fmt.Errorf("journal.NewJournal: base directory is required")
|
||||
}
|
||||
return &Journal{baseDir: baseDir}, nil
|
||||
}
|
||||
|
||||
// sanitizePathComponent validates a single path component (owner or repo name)
|
||||
// to prevent path traversal attacks. It rejects "..", empty strings, paths
|
||||
// containing separators, and any value outside the safe character set.
|
||||
func sanitizePathComponent(name string) (string, error) {
|
||||
// Reject empty or whitespace-only values.
|
||||
if name == "" || strings.TrimSpace(name) == "" {
|
||||
return "", fmt.Errorf("journal.sanitizePathComponent: invalid path component: %q", name)
|
||||
}
|
||||
|
||||
// Reject inputs containing path separators (directory traversal attempt).
|
||||
if strings.ContainsAny(name, `/\`) {
|
||||
return "", fmt.Errorf("journal.sanitizePathComponent: path component contains directory separator: %q", name)
|
||||
}
|
||||
|
||||
// Use filepath.Clean to normalize (e.g., collapse redundant dots).
|
||||
clean := filepath.Clean(name)
|
||||
|
||||
// Reject traversal components.
|
||||
if clean == "." || clean == ".." {
|
||||
return "", fmt.Errorf("journal.sanitizePathComponent: invalid path component: %q", name)
|
||||
}
|
||||
|
||||
// Validate against the safe character set.
|
||||
if !validPathComponent.MatchString(clean) {
|
||||
return "", fmt.Errorf("journal.sanitizePathComponent: path component contains invalid characters: %q", name)
|
||||
}
|
||||
|
||||
return clean, nil
|
||||
}
|
||||
|
||||
// Append writes a journal entry for the given signal and result.
|
||||
func (j *Journal) Append(signal *PipelineSignal, result *ActionResult) error {
|
||||
if signal == nil {
|
||||
return fmt.Errorf("journal.Append: signal is required")
|
||||
}
|
||||
if result == nil {
|
||||
return fmt.Errorf("journal.Append: result is required")
|
||||
}
|
||||
|
||||
entry := JournalEntry{
|
||||
Timestamp: result.Timestamp.UTC().Format("2006-01-02T15:04:05Z"),
|
||||
Epic: signal.EpicNumber,
|
||||
Child: signal.ChildNumber,
|
||||
PR: signal.PRNumber,
|
||||
Repo: signal.RepoFullName(),
|
||||
Action: result.Action,
|
||||
Signals: SignalSnapshot{
|
||||
PRState: signal.PRState,
|
||||
IsDraft: signal.IsDraft,
|
||||
CheckStatus: signal.CheckStatus,
|
||||
Mergeable: signal.Mergeable,
|
||||
ThreadsTotal: signal.ThreadsTotal,
|
||||
ThreadsResolved: signal.ThreadsResolved,
|
||||
},
|
||||
Result: ResultSnapshot{
|
||||
Success: result.Success,
|
||||
Error: result.Error,
|
||||
DurationMs: result.Duration.Milliseconds(),
|
||||
},
|
||||
Cycle: result.Cycle,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("journal.Append: marshal entry: %w", err)
|
||||
}
|
||||
data = append(data, '\n')
|
||||
|
||||
// Sanitize path components to prevent path traversal (CVE: issue #46).
|
||||
owner, err := sanitizePathComponent(signal.RepoOwner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("journal.Append: invalid repo owner: %w", err)
|
||||
}
|
||||
repo, err := sanitizePathComponent(signal.RepoName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("journal.Append: invalid repo name: %w", err)
|
||||
}
|
||||
|
||||
date := result.Timestamp.UTC().Format("2006-01-02")
|
||||
dir := filepath.Join(j.baseDir, owner, repo)
|
||||
|
||||
// Resolve to absolute path and verify it stays within baseDir.
|
||||
absBase, err := filepath.Abs(j.baseDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("journal.Append: resolve base directory: %w", err)
|
||||
}
|
||||
absDir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("journal.Append: resolve journal directory: %w", err)
|
||||
}
|
||||
if !strings.HasPrefix(absDir, absBase+string(filepath.Separator)) {
|
||||
return fmt.Errorf("journal.Append: path %q escapes base directory %q", absDir, absBase)
|
||||
}
|
||||
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return fmt.Errorf("journal.Append: create directory: %w", err)
|
||||
}
|
||||
|
||||
path := filepath.Join(dir, date+".jsonl")
|
||||
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("journal.Append: open file: %w", err)
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
_, err = f.Write(data)
|
||||
return err
|
||||
}
|
||||
|
|
@ -1,540 +0,0 @@
|
|||
package jobrunner
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// readJournalEntries reads all JSONL entries from a given file path.
|
||||
func readJournalEntries(t *testing.T, path string) []JournalEntry {
|
||||
t.Helper()
|
||||
f, err := os.Open(path)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
var entries []JournalEntry
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
var entry JournalEntry
|
||||
err := json.Unmarshal(scanner.Bytes(), &entry)
|
||||
require.NoError(t, err)
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
require.NoError(t, scanner.Err())
|
||||
return entries
|
||||
}
|
||||
|
||||
// readAllJournalFiles reads all .jsonl files recursively under a base directory.
|
||||
func readAllJournalFiles(t *testing.T, baseDir string) []JournalEntry {
|
||||
t.Helper()
|
||||
var all []JournalEntry
|
||||
err := filepath.Walk(baseDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if filepath.Ext(path) == ".jsonl" {
|
||||
entries := readJournalEntries(t, path)
|
||||
all = append(all, entries...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return all
|
||||
}
|
||||
|
||||
// --- Journal replay: write multiple entries, read back, verify round-trip ---
|
||||
|
||||
func TestJournal_Replay_Good_WriteAndReadBack(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
baseTime := time.Date(2026, 2, 10, 10, 0, 0, 0, time.UTC)
|
||||
|
||||
// Write 5 entries with different actions, times, and repos.
|
||||
entries := []struct {
|
||||
signal *PipelineSignal
|
||||
result *ActionResult
|
||||
}{
|
||||
{
|
||||
signal: &PipelineSignal{
|
||||
EpicNumber: 1, ChildNumber: 2, PRNumber: 10,
|
||||
RepoOwner: "org-a", RepoName: "repo-1",
|
||||
PRState: "OPEN", CheckStatus: "SUCCESS", Mergeable: "MERGEABLE",
|
||||
},
|
||||
result: &ActionResult{
|
||||
Action: "enable_auto_merge",
|
||||
RepoOwner: "org-a", RepoName: "repo-1",
|
||||
Success: true, Timestamp: baseTime, Duration: 100 * time.Millisecond, Cycle: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
signal: &PipelineSignal{
|
||||
EpicNumber: 1, ChildNumber: 3, PRNumber: 11,
|
||||
RepoOwner: "org-a", RepoName: "repo-1",
|
||||
PRState: "OPEN", CheckStatus: "FAILURE", Mergeable: "CONFLICTING",
|
||||
},
|
||||
result: &ActionResult{
|
||||
Action: "send_fix_command",
|
||||
RepoOwner: "org-a", RepoName: "repo-1",
|
||||
Success: true, Timestamp: baseTime.Add(5 * time.Minute), Duration: 50 * time.Millisecond, Cycle: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
signal: &PipelineSignal{
|
||||
EpicNumber: 5, ChildNumber: 10, PRNumber: 20,
|
||||
RepoOwner: "org-b", RepoName: "repo-2",
|
||||
PRState: "MERGED", CheckStatus: "SUCCESS", Mergeable: "UNKNOWN",
|
||||
},
|
||||
result: &ActionResult{
|
||||
Action: "tick_parent",
|
||||
RepoOwner: "org-b", RepoName: "repo-2",
|
||||
Success: true, Timestamp: baseTime.Add(10 * time.Minute), Duration: 200 * time.Millisecond, Cycle: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
signal: &PipelineSignal{
|
||||
EpicNumber: 5, ChildNumber: 11, PRNumber: 21,
|
||||
RepoOwner: "org-b", RepoName: "repo-2",
|
||||
PRState: "OPEN", CheckStatus: "PENDING", Mergeable: "MERGEABLE",
|
||||
IsDraft: true,
|
||||
},
|
||||
result: &ActionResult{
|
||||
Action: "publish_draft",
|
||||
RepoOwner: "org-b", RepoName: "repo-2",
|
||||
Success: false, Error: "API error", Timestamp: baseTime.Add(15 * time.Minute),
|
||||
Duration: 300 * time.Millisecond, Cycle: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
signal: &PipelineSignal{
|
||||
EpicNumber: 1, ChildNumber: 4, PRNumber: 12,
|
||||
RepoOwner: "org-a", RepoName: "repo-1",
|
||||
PRState: "OPEN", CheckStatus: "SUCCESS", Mergeable: "MERGEABLE",
|
||||
ThreadsTotal: 3, ThreadsResolved: 1,
|
||||
},
|
||||
result: &ActionResult{
|
||||
Action: "dismiss_reviews",
|
||||
RepoOwner: "org-a", RepoName: "repo-1",
|
||||
Success: true, Timestamp: baseTime.Add(20 * time.Minute), Duration: 150 * time.Millisecond, Cycle: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
err := j.Append(e.signal, e.result)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Read back all entries.
|
||||
all := readAllJournalFiles(t, dir)
|
||||
require.Len(t, all, 5)
|
||||
|
||||
// Build a map by action for flexible lookup (filepath.Walk order is by path, not insertion).
|
||||
byAction := make(map[string][]JournalEntry)
|
||||
for _, e := range all {
|
||||
byAction[e.Action] = append(byAction[e.Action], e)
|
||||
}
|
||||
|
||||
// Verify enable_auto_merge entry (org-a/repo-1).
|
||||
require.Len(t, byAction["enable_auto_merge"], 1)
|
||||
eam := byAction["enable_auto_merge"][0]
|
||||
assert.Equal(t, "org-a/repo-1", eam.Repo)
|
||||
assert.Equal(t, 1, eam.Epic)
|
||||
assert.Equal(t, 2, eam.Child)
|
||||
assert.Equal(t, 10, eam.PR)
|
||||
assert.Equal(t, 1, eam.Cycle)
|
||||
assert.True(t, eam.Result.Success)
|
||||
assert.Equal(t, int64(100), eam.Result.DurationMs)
|
||||
|
||||
// Verify publish_draft (failed entry has error).
|
||||
require.Len(t, byAction["publish_draft"], 1)
|
||||
pd := byAction["publish_draft"][0]
|
||||
assert.Equal(t, "publish_draft", pd.Action)
|
||||
assert.False(t, pd.Result.Success)
|
||||
assert.Equal(t, "API error", pd.Result.Error)
|
||||
|
||||
// Verify signal snapshot preserves state.
|
||||
assert.True(t, pd.Signals.IsDraft)
|
||||
assert.Equal(t, "PENDING", pd.Signals.CheckStatus)
|
||||
|
||||
// Verify dismiss_reviews has thread counts preserved.
|
||||
require.Len(t, byAction["dismiss_reviews"], 1)
|
||||
dr := byAction["dismiss_reviews"][0]
|
||||
assert.Equal(t, 3, dr.Signals.ThreadsTotal)
|
||||
assert.Equal(t, 1, dr.Signals.ThreadsResolved)
|
||||
}
|
||||
|
||||
// --- Journal replay: filter by action ---
|
||||
|
||||
func TestJournal_Replay_Good_FilterByAction(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := time.Date(2026, 2, 10, 12, 0, 0, 0, time.UTC)
|
||||
|
||||
actions := []string{"enable_auto_merge", "tick_parent", "send_fix_command", "tick_parent", "publish_draft"}
|
||||
for i, action := range actions {
|
||||
signal := &PipelineSignal{
|
||||
EpicNumber: 1, ChildNumber: i + 1, PRNumber: 10 + i,
|
||||
RepoOwner: "org", RepoName: "repo",
|
||||
PRState: "OPEN", CheckStatus: "SUCCESS", Mergeable: "MERGEABLE",
|
||||
}
|
||||
result := &ActionResult{
|
||||
Action: action,
|
||||
RepoOwner: "org", RepoName: "repo",
|
||||
Success: true,
|
||||
Timestamp: ts.Add(time.Duration(i) * time.Minute),
|
||||
Duration: 100 * time.Millisecond,
|
||||
Cycle: i + 1,
|
||||
}
|
||||
require.NoError(t, j.Append(signal, result))
|
||||
}
|
||||
|
||||
all := readAllJournalFiles(t, dir)
|
||||
require.Len(t, all, 5)
|
||||
|
||||
// Filter by action=tick_parent.
|
||||
var tickParentEntries []JournalEntry
|
||||
for _, e := range all {
|
||||
if e.Action == "tick_parent" {
|
||||
tickParentEntries = append(tickParentEntries, e)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Len(t, tickParentEntries, 2)
|
||||
assert.Equal(t, 2, tickParentEntries[0].Child)
|
||||
assert.Equal(t, 4, tickParentEntries[1].Child)
|
||||
}
|
||||
|
||||
// --- Journal replay: filter by repo ---
|
||||
|
||||
func TestJournal_Replay_Good_FilterByRepo(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := time.Date(2026, 2, 10, 12, 0, 0, 0, time.UTC)
|
||||
|
||||
repos := []struct {
|
||||
owner string
|
||||
name string
|
||||
}{
|
||||
{"host-uk", "core-php"},
|
||||
{"host-uk", "core-tenant"},
|
||||
{"host-uk", "core-php"},
|
||||
{"lethean", "go-scm"},
|
||||
{"host-uk", "core-tenant"},
|
||||
}
|
||||
|
||||
for i, r := range repos {
|
||||
signal := &PipelineSignal{
|
||||
EpicNumber: 1, ChildNumber: i + 1, PRNumber: 10 + i,
|
||||
RepoOwner: r.owner, RepoName: r.name,
|
||||
PRState: "OPEN", CheckStatus: "SUCCESS", Mergeable: "MERGEABLE",
|
||||
}
|
||||
result := &ActionResult{
|
||||
Action: "tick_parent",
|
||||
RepoOwner: r.owner, RepoName: r.name,
|
||||
Success: true,
|
||||
Timestamp: ts.Add(time.Duration(i) * time.Minute),
|
||||
Duration: 50 * time.Millisecond,
|
||||
Cycle: i + 1,
|
||||
}
|
||||
require.NoError(t, j.Append(signal, result))
|
||||
}
|
||||
|
||||
// Read entries for host-uk/core-php.
|
||||
phpPath := filepath.Join(dir, "host-uk", "core-php", "2026-02-10.jsonl")
|
||||
phpEntries := readJournalEntries(t, phpPath)
|
||||
assert.Len(t, phpEntries, 2)
|
||||
for _, e := range phpEntries {
|
||||
assert.Equal(t, "host-uk/core-php", e.Repo)
|
||||
}
|
||||
|
||||
// Read entries for host-uk/core-tenant.
|
||||
tenantPath := filepath.Join(dir, "host-uk", "core-tenant", "2026-02-10.jsonl")
|
||||
tenantEntries := readJournalEntries(t, tenantPath)
|
||||
assert.Len(t, tenantEntries, 2)
|
||||
for _, e := range tenantEntries {
|
||||
assert.Equal(t, "host-uk/core-tenant", e.Repo)
|
||||
}
|
||||
|
||||
// Read entries for lethean/go-scm.
|
||||
scmPath := filepath.Join(dir, "lethean", "go-scm", "2026-02-10.jsonl")
|
||||
scmEntries := readJournalEntries(t, scmPath)
|
||||
assert.Len(t, scmEntries, 1)
|
||||
assert.Equal(t, "lethean/go-scm", scmEntries[0].Repo)
|
||||
}
|
||||
|
||||
// --- Journal replay: filter by time range (date partitioning) ---
|
||||
|
||||
func TestJournal_Replay_Good_FilterByTimeRange(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write entries across three different days.
|
||||
dates := []time.Time{
|
||||
time.Date(2026, 2, 8, 9, 0, 0, 0, time.UTC),
|
||||
time.Date(2026, 2, 9, 10, 0, 0, 0, time.UTC),
|
||||
time.Date(2026, 2, 9, 14, 0, 0, 0, time.UTC),
|
||||
time.Date(2026, 2, 10, 8, 0, 0, 0, time.UTC),
|
||||
time.Date(2026, 2, 10, 16, 0, 0, 0, time.UTC),
|
||||
}
|
||||
|
||||
for i, ts := range dates {
|
||||
signal := &PipelineSignal{
|
||||
EpicNumber: 1, ChildNumber: i + 1, PRNumber: 10 + i,
|
||||
RepoOwner: "org", RepoName: "repo",
|
||||
PRState: "OPEN", CheckStatus: "SUCCESS", Mergeable: "MERGEABLE",
|
||||
}
|
||||
result := &ActionResult{
|
||||
Action: "merge",
|
||||
RepoOwner: "org", RepoName: "repo",
|
||||
Success: true,
|
||||
Timestamp: ts,
|
||||
Duration: 100 * time.Millisecond,
|
||||
Cycle: i + 1,
|
||||
}
|
||||
require.NoError(t, j.Append(signal, result))
|
||||
}
|
||||
|
||||
// Verify each date file has the correct number of entries.
|
||||
day8Path := filepath.Join(dir, "org", "repo", "2026-02-08.jsonl")
|
||||
day8Entries := readJournalEntries(t, day8Path)
|
||||
assert.Len(t, day8Entries, 1)
|
||||
assert.Equal(t, "2026-02-08T09:00:00Z", day8Entries[0].Timestamp)
|
||||
|
||||
day9Path := filepath.Join(dir, "org", "repo", "2026-02-09.jsonl")
|
||||
day9Entries := readJournalEntries(t, day9Path)
|
||||
assert.Len(t, day9Entries, 2)
|
||||
assert.Equal(t, "2026-02-09T10:00:00Z", day9Entries[0].Timestamp)
|
||||
assert.Equal(t, "2026-02-09T14:00:00Z", day9Entries[1].Timestamp)
|
||||
|
||||
day10Path := filepath.Join(dir, "org", "repo", "2026-02-10.jsonl")
|
||||
day10Entries := readJournalEntries(t, day10Path)
|
||||
assert.Len(t, day10Entries, 2)
|
||||
|
||||
// Simulate a time range query: get entries for Feb 9 only.
|
||||
// In a real system, you'd list files matching the date range.
|
||||
// Here we verify the date partitioning is correct.
|
||||
rangeStart := time.Date(2026, 2, 9, 0, 0, 0, 0, time.UTC)
|
||||
rangeEnd := time.Date(2026, 2, 10, 0, 0, 0, 0, time.UTC) // exclusive
|
||||
|
||||
var filtered []JournalEntry
|
||||
all := readAllJournalFiles(t, dir)
|
||||
for _, e := range all {
|
||||
ts, err := time.Parse("2006-01-02T15:04:05Z", e.Timestamp)
|
||||
require.NoError(t, err)
|
||||
if !ts.Before(rangeStart) && ts.Before(rangeEnd) {
|
||||
filtered = append(filtered, e)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Len(t, filtered, 2)
|
||||
assert.Equal(t, 2, filtered[0].Child)
|
||||
assert.Equal(t, 3, filtered[1].Child)
|
||||
}
|
||||
|
||||
// --- Journal replay: combined filter (action + repo + time) ---
|
||||
|
||||
func TestJournal_Replay_Good_CombinedFilter(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts1 := time.Date(2026, 2, 10, 10, 0, 0, 0, time.UTC)
|
||||
ts2 := time.Date(2026, 2, 10, 11, 0, 0, 0, time.UTC)
|
||||
ts3 := time.Date(2026, 2, 11, 9, 0, 0, 0, time.UTC)
|
||||
|
||||
testData := []struct {
|
||||
owner string
|
||||
name string
|
||||
action string
|
||||
ts time.Time
|
||||
}{
|
||||
{"org", "repo-a", "tick_parent", ts1},
|
||||
{"org", "repo-a", "enable_auto_merge", ts1},
|
||||
{"org", "repo-b", "tick_parent", ts2},
|
||||
{"org", "repo-a", "tick_parent", ts3},
|
||||
{"org", "repo-b", "send_fix_command", ts3},
|
||||
}
|
||||
|
||||
for i, td := range testData {
|
||||
signal := &PipelineSignal{
|
||||
EpicNumber: 1, ChildNumber: i + 1, PRNumber: 100 + i,
|
||||
RepoOwner: td.owner, RepoName: td.name,
|
||||
PRState: "MERGED", CheckStatus: "SUCCESS", Mergeable: "UNKNOWN",
|
||||
}
|
||||
result := &ActionResult{
|
||||
Action: td.action,
|
||||
RepoOwner: td.owner, RepoName: td.name,
|
||||
Success: true,
|
||||
Timestamp: td.ts,
|
||||
Duration: 50 * time.Millisecond,
|
||||
Cycle: i + 1,
|
||||
}
|
||||
require.NoError(t, j.Append(signal, result))
|
||||
}
|
||||
|
||||
// Filter: action=tick_parent AND repo=org/repo-a.
|
||||
repoAPath := filepath.Join(dir, "org", "repo-a")
|
||||
var repoAEntries []JournalEntry
|
||||
err = filepath.Walk(repoAPath, func(path string, info os.FileInfo, walkErr error) error {
|
||||
if walkErr != nil {
|
||||
return walkErr
|
||||
}
|
||||
if filepath.Ext(path) == ".jsonl" {
|
||||
entries := readJournalEntries(t, path)
|
||||
repoAEntries = append(repoAEntries, entries...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var tickParentRepoA []JournalEntry
|
||||
for _, e := range repoAEntries {
|
||||
if e.Action == "tick_parent" && e.Repo == "org/repo-a" {
|
||||
tickParentRepoA = append(tickParentRepoA, e)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Len(t, tickParentRepoA, 2)
|
||||
assert.Equal(t, 1, tickParentRepoA[0].Child)
|
||||
assert.Equal(t, 4, tickParentRepoA[1].Child)
|
||||
}
|
||||
|
||||
// --- Journal replay: empty journal returns no entries ---
|
||||
|
||||
func TestJournal_Replay_Good_EmptyJournal(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
all := readAllJournalFiles(t, dir)
|
||||
assert.Empty(t, all)
|
||||
}
|
||||
|
||||
// --- Journal replay: single entry round-trip preserves all fields ---
|
||||
|
||||
func TestJournal_Replay_Good_FullFieldRoundTrip(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := time.Date(2026, 2, 15, 14, 30, 45, 0, time.UTC)
|
||||
|
||||
signal := &PipelineSignal{
|
||||
EpicNumber: 42,
|
||||
ChildNumber: 7,
|
||||
PRNumber: 99,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-admin",
|
||||
PRState: "OPEN",
|
||||
IsDraft: true,
|
||||
Mergeable: "CONFLICTING",
|
||||
CheckStatus: "FAILURE",
|
||||
ThreadsTotal: 5,
|
||||
ThreadsResolved: 2,
|
||||
}
|
||||
|
||||
result := &ActionResult{
|
||||
Action: "send_fix_command",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-admin",
|
||||
Success: false,
|
||||
Error: "comment API returned 503",
|
||||
Timestamp: ts,
|
||||
Duration: 1500 * time.Millisecond,
|
||||
Cycle: 7,
|
||||
}
|
||||
|
||||
require.NoError(t, j.Append(signal, result))
|
||||
|
||||
path := filepath.Join(dir, "host-uk", "core-admin", "2026-02-15.jsonl")
|
||||
entries := readJournalEntries(t, path)
|
||||
require.Len(t, entries, 1)
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "2026-02-15T14:30:45Z", e.Timestamp)
|
||||
assert.Equal(t, 42, e.Epic)
|
||||
assert.Equal(t, 7, e.Child)
|
||||
assert.Equal(t, 99, e.PR)
|
||||
assert.Equal(t, "host-uk/core-admin", e.Repo)
|
||||
assert.Equal(t, "send_fix_command", e.Action)
|
||||
assert.Equal(t, 7, e.Cycle)
|
||||
|
||||
// Signal snapshot.
|
||||
assert.Equal(t, "OPEN", e.Signals.PRState)
|
||||
assert.True(t, e.Signals.IsDraft)
|
||||
assert.Equal(t, "CONFLICTING", e.Signals.Mergeable)
|
||||
assert.Equal(t, "FAILURE", e.Signals.CheckStatus)
|
||||
assert.Equal(t, 5, e.Signals.ThreadsTotal)
|
||||
assert.Equal(t, 2, e.Signals.ThreadsResolved)
|
||||
|
||||
// Result snapshot.
|
||||
assert.False(t, e.Result.Success)
|
||||
assert.Equal(t, "comment API returned 503", e.Result.Error)
|
||||
assert.Equal(t, int64(1500), e.Result.DurationMs)
|
||||
}
|
||||
|
||||
// --- Journal replay: concurrent writes produce valid JSONL ---
|
||||
|
||||
func TestJournal_Replay_Good_ConcurrentWrites(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := time.Date(2026, 2, 10, 12, 0, 0, 0, time.UTC)
|
||||
|
||||
// Write 20 entries concurrently.
|
||||
done := make(chan struct{}, 20)
|
||||
for i := 0; i < 20; i++ {
|
||||
go func(idx int) {
|
||||
signal := &PipelineSignal{
|
||||
EpicNumber: 1, ChildNumber: idx, PRNumber: idx,
|
||||
RepoOwner: "org", RepoName: "repo",
|
||||
PRState: "OPEN", CheckStatus: "SUCCESS", Mergeable: "MERGEABLE",
|
||||
}
|
||||
result := &ActionResult{
|
||||
Action: "test",
|
||||
RepoOwner: "org", RepoName: "repo",
|
||||
Success: true,
|
||||
Timestamp: ts,
|
||||
Duration: 10 * time.Millisecond,
|
||||
Cycle: idx,
|
||||
}
|
||||
_ = j.Append(signal, result)
|
||||
done <- struct{}{}
|
||||
}(i)
|
||||
}
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
// All entries should be parseable and present.
|
||||
path := filepath.Join(dir, "org", "repo", "2026-02-10.jsonl")
|
||||
entries := readJournalEntries(t, path)
|
||||
assert.Len(t, entries, 20)
|
||||
|
||||
// Each entry should have valid JSON (no corruption from concurrent writes).
|
||||
for _, e := range entries {
|
||||
assert.NotEmpty(t, e.Action)
|
||||
assert.Equal(t, "org/repo", e.Repo)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,263 +0,0 @@
|
|||
package jobrunner
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestJournal_Append_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := time.Date(2026, 2, 5, 14, 30, 0, 0, time.UTC)
|
||||
|
||||
signal := &PipelineSignal{
|
||||
EpicNumber: 10,
|
||||
ChildNumber: 3,
|
||||
PRNumber: 55,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-tenant",
|
||||
PRState: "OPEN",
|
||||
IsDraft: false,
|
||||
Mergeable: "MERGEABLE",
|
||||
CheckStatus: "SUCCESS",
|
||||
ThreadsTotal: 2,
|
||||
ThreadsResolved: 1,
|
||||
LastCommitSHA: "abc123",
|
||||
LastCommitAt: ts,
|
||||
LastReviewAt: ts,
|
||||
}
|
||||
|
||||
result := &ActionResult{
|
||||
Action: "merge",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-tenant",
|
||||
EpicNumber: 10,
|
||||
ChildNumber: 3,
|
||||
PRNumber: 55,
|
||||
Success: true,
|
||||
Timestamp: ts,
|
||||
Duration: 1200 * time.Millisecond,
|
||||
Cycle: 1,
|
||||
}
|
||||
|
||||
err = j.Append(signal, result)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read the file back.
|
||||
expectedPath := filepath.Join(dir, "host-uk", "core-tenant", "2026-02-05.jsonl")
|
||||
f, err := os.Open(expectedPath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
require.True(t, scanner.Scan(), "expected at least one line in JSONL file")
|
||||
|
||||
var entry JournalEntry
|
||||
err = json.Unmarshal(scanner.Bytes(), &entry)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "2026-02-05T14:30:00Z", entry.Timestamp)
|
||||
assert.Equal(t, 10, entry.Epic)
|
||||
assert.Equal(t, 3, entry.Child)
|
||||
assert.Equal(t, 55, entry.PR)
|
||||
assert.Equal(t, "host-uk/core-tenant", entry.Repo)
|
||||
assert.Equal(t, "merge", entry.Action)
|
||||
assert.Equal(t, 1, entry.Cycle)
|
||||
|
||||
// Verify signal snapshot.
|
||||
assert.Equal(t, "OPEN", entry.Signals.PRState)
|
||||
assert.Equal(t, false, entry.Signals.IsDraft)
|
||||
assert.Equal(t, "SUCCESS", entry.Signals.CheckStatus)
|
||||
assert.Equal(t, "MERGEABLE", entry.Signals.Mergeable)
|
||||
assert.Equal(t, 2, entry.Signals.ThreadsTotal)
|
||||
assert.Equal(t, 1, entry.Signals.ThreadsResolved)
|
||||
|
||||
// Verify result snapshot.
|
||||
assert.Equal(t, true, entry.Result.Success)
|
||||
assert.Equal(t, "", entry.Result.Error)
|
||||
assert.Equal(t, int64(1200), entry.Result.DurationMs)
|
||||
|
||||
// Append a second entry and verify two lines exist.
|
||||
result2 := &ActionResult{
|
||||
Action: "comment",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-tenant",
|
||||
Success: false,
|
||||
Error: "rate limited",
|
||||
Timestamp: ts,
|
||||
Duration: 50 * time.Millisecond,
|
||||
Cycle: 2,
|
||||
}
|
||||
err = j.Append(signal, result2)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := os.ReadFile(expectedPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
lines := 0
|
||||
sc := bufio.NewScanner(strings.NewReader(string(data)))
|
||||
for sc.Scan() {
|
||||
lines++
|
||||
}
|
||||
assert.Equal(t, 2, lines, "expected two JSONL lines after two appends")
|
||||
}
|
||||
|
||||
func TestJournal_Append_Bad_PathTraversal(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
repoOwner string
|
||||
repoName string
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "dotdot owner",
|
||||
repoOwner: "..",
|
||||
repoName: "core",
|
||||
wantErr: "invalid repo owner",
|
||||
},
|
||||
{
|
||||
name: "dotdot repo",
|
||||
repoOwner: "host-uk",
|
||||
repoName: "../../etc/cron.d",
|
||||
wantErr: "invalid repo name",
|
||||
},
|
||||
{
|
||||
name: "slash in owner",
|
||||
repoOwner: "../etc",
|
||||
repoName: "core",
|
||||
wantErr: "invalid repo owner",
|
||||
},
|
||||
{
|
||||
name: "absolute path in repo",
|
||||
repoOwner: "host-uk",
|
||||
repoName: "/etc/passwd",
|
||||
wantErr: "invalid repo name",
|
||||
},
|
||||
{
|
||||
name: "empty owner",
|
||||
repoOwner: "",
|
||||
repoName: "core",
|
||||
wantErr: "invalid repo owner",
|
||||
},
|
||||
{
|
||||
name: "empty repo",
|
||||
repoOwner: "host-uk",
|
||||
repoName: "",
|
||||
wantErr: "invalid repo name",
|
||||
},
|
||||
{
|
||||
name: "dot only owner",
|
||||
repoOwner: ".",
|
||||
repoName: "core",
|
||||
wantErr: "invalid repo owner",
|
||||
},
|
||||
{
|
||||
name: "spaces only owner",
|
||||
repoOwner: " ",
|
||||
repoName: "core",
|
||||
wantErr: "invalid repo owner",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
signal := &PipelineSignal{
|
||||
RepoOwner: tc.repoOwner,
|
||||
RepoName: tc.repoName,
|
||||
}
|
||||
result := &ActionResult{
|
||||
Action: "merge",
|
||||
Timestamp: ts,
|
||||
}
|
||||
|
||||
err := j.Append(signal, result)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tc.wantErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJournal_Append_Good_ValidNames(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := time.Date(2026, 2, 5, 14, 30, 0, 0, time.UTC)
|
||||
|
||||
// Verify valid names with dots, hyphens, underscores all work.
|
||||
validNames := []struct {
|
||||
owner string
|
||||
repo string
|
||||
}{
|
||||
{"host-uk", "core"},
|
||||
{"my_org", "my_repo"},
|
||||
{"org.name", "repo.v2"},
|
||||
{"a", "b"},
|
||||
{"Org-123", "Repo_456.go"},
|
||||
}
|
||||
|
||||
for _, vn := range validNames {
|
||||
signal := &PipelineSignal{
|
||||
RepoOwner: vn.owner,
|
||||
RepoName: vn.repo,
|
||||
}
|
||||
result := &ActionResult{
|
||||
Action: "test",
|
||||
Timestamp: ts,
|
||||
}
|
||||
|
||||
err := j.Append(signal, result)
|
||||
assert.NoError(t, err, "expected valid name pair %s/%s to succeed", vn.owner, vn.repo)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJournal_Append_Bad_NilSignal(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := &ActionResult{
|
||||
Action: "merge",
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
err = j.Append(nil, result)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "signal is required")
|
||||
}
|
||||
|
||||
func TestJournal_Append_Bad_NilResult(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
j, err := NewJournal(dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
signal := &PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
}
|
||||
|
||||
err = j.Append(signal, nil)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "result is required")
|
||||
}
|
||||
|
|
@ -1,195 +0,0 @@
|
|||
package jobrunner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"forge.lthn.ai/core/go/pkg/log"
|
||||
)
|
||||
|
||||
// PollerConfig configures a Poller.
|
||||
type PollerConfig struct {
|
||||
Sources []JobSource
|
||||
Handlers []JobHandler
|
||||
Journal *Journal
|
||||
PollInterval time.Duration
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
// Poller discovers signals from sources and dispatches them to handlers.
|
||||
type Poller struct {
|
||||
mu sync.RWMutex
|
||||
sources []JobSource
|
||||
handlers []JobHandler
|
||||
journal *Journal
|
||||
interval time.Duration
|
||||
dryRun bool
|
||||
cycle int
|
||||
}
|
||||
|
||||
// NewPoller creates a Poller from the given config.
|
||||
func NewPoller(cfg PollerConfig) *Poller {
|
||||
interval := cfg.PollInterval
|
||||
if interval <= 0 {
|
||||
interval = 60 * time.Second
|
||||
}
|
||||
|
||||
return &Poller{
|
||||
sources: cfg.Sources,
|
||||
handlers: cfg.Handlers,
|
||||
journal: cfg.Journal,
|
||||
interval: interval,
|
||||
dryRun: cfg.DryRun,
|
||||
}
|
||||
}
|
||||
|
||||
// Cycle returns the number of completed poll-dispatch cycles.
|
||||
func (p *Poller) Cycle() int {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
return p.cycle
|
||||
}
|
||||
|
||||
// DryRun returns whether dry-run mode is enabled.
|
||||
func (p *Poller) DryRun() bool {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
return p.dryRun
|
||||
}
|
||||
|
||||
// SetDryRun enables or disables dry-run mode.
|
||||
func (p *Poller) SetDryRun(v bool) {
|
||||
p.mu.Lock()
|
||||
p.dryRun = v
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// AddSource appends a source to the poller.
|
||||
func (p *Poller) AddSource(s JobSource) {
|
||||
p.mu.Lock()
|
||||
p.sources = append(p.sources, s)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// AddHandler appends a handler to the poller.
|
||||
func (p *Poller) AddHandler(h JobHandler) {
|
||||
p.mu.Lock()
|
||||
p.handlers = append(p.handlers, h)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// Run starts a blocking poll-dispatch loop. It runs one cycle immediately,
|
||||
// then repeats on each tick of the configured interval until the context
|
||||
// is cancelled.
|
||||
func (p *Poller) Run(ctx context.Context) error {
|
||||
if err := p.RunOnce(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(p.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
if err := p.RunOnce(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunOnce performs a single poll-dispatch cycle: iterate sources, poll each,
|
||||
// find the first matching handler for each signal, and execute it.
|
||||
func (p *Poller) RunOnce(ctx context.Context) error {
|
||||
p.mu.Lock()
|
||||
p.cycle++
|
||||
cycle := p.cycle
|
||||
dryRun := p.dryRun
|
||||
sources := make([]JobSource, len(p.sources))
|
||||
copy(sources, p.sources)
|
||||
handlers := make([]JobHandler, len(p.handlers))
|
||||
copy(handlers, p.handlers)
|
||||
p.mu.Unlock()
|
||||
|
||||
log.Info("poller cycle starting", "cycle", cycle, "sources", len(sources), "handlers", len(handlers))
|
||||
|
||||
for _, src := range sources {
|
||||
signals, err := src.Poll(ctx)
|
||||
if err != nil {
|
||||
log.Error("poll failed", "source", src.Name(), "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("polled source", "source", src.Name(), "signals", len(signals))
|
||||
|
||||
for _, sig := range signals {
|
||||
handler := p.findHandler(handlers, sig)
|
||||
if handler == nil {
|
||||
log.Debug("no matching handler", "epic", sig.EpicNumber, "child", sig.ChildNumber)
|
||||
continue
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
log.Info("dry-run: would execute",
|
||||
"handler", handler.Name(),
|
||||
"epic", sig.EpicNumber,
|
||||
"child", sig.ChildNumber,
|
||||
"pr", sig.PRNumber,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
result, err := handler.Execute(ctx, sig)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
if err != nil {
|
||||
log.Error("handler execution failed",
|
||||
"handler", handler.Name(),
|
||||
"epic", sig.EpicNumber,
|
||||
"child", sig.ChildNumber,
|
||||
"err", err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
result.Cycle = cycle
|
||||
result.EpicNumber = sig.EpicNumber
|
||||
result.ChildNumber = sig.ChildNumber
|
||||
result.Duration = elapsed
|
||||
|
||||
if p.journal != nil {
|
||||
if jErr := p.journal.Append(sig, result); jErr != nil {
|
||||
log.Error("journal append failed", "err", jErr)
|
||||
}
|
||||
}
|
||||
|
||||
if rErr := src.Report(ctx, result); rErr != nil {
|
||||
log.Error("source report failed", "source", src.Name(), "err", rErr)
|
||||
}
|
||||
|
||||
log.Info("handler executed",
|
||||
"handler", handler.Name(),
|
||||
"action", result.Action,
|
||||
"success", result.Success,
|
||||
"duration", elapsed,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findHandler returns the first handler that matches the signal, or nil.
|
||||
func (p *Poller) findHandler(handlers []JobHandler, sig *PipelineSignal) JobHandler {
|
||||
for _, h := range handlers {
|
||||
if h.Match(sig) {
|
||||
return h
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,307 +0,0 @@
|
|||
package jobrunner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// --- Mock source ---
|
||||
|
||||
type mockSource struct {
|
||||
name string
|
||||
signals []*PipelineSignal
|
||||
reports []*ActionResult
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (m *mockSource) Name() string { return m.name }
|
||||
|
||||
func (m *mockSource) Poll(_ context.Context) ([]*PipelineSignal, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.signals, nil
|
||||
}
|
||||
|
||||
func (m *mockSource) Report(_ context.Context, result *ActionResult) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.reports = append(m.reports, result)
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- Mock handler ---
|
||||
|
||||
type mockHandler struct {
|
||||
name string
|
||||
matchFn func(*PipelineSignal) bool
|
||||
executed []*PipelineSignal
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (m *mockHandler) Name() string { return m.name }
|
||||
|
||||
func (m *mockHandler) Match(sig *PipelineSignal) bool {
|
||||
if m.matchFn != nil {
|
||||
return m.matchFn(sig)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *mockHandler) Execute(_ context.Context, sig *PipelineSignal) (*ActionResult, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.executed = append(m.executed, sig)
|
||||
return &ActionResult{
|
||||
Action: m.name,
|
||||
RepoOwner: sig.RepoOwner,
|
||||
RepoName: sig.RepoName,
|
||||
PRNumber: sig.PRNumber,
|
||||
Success: true,
|
||||
Timestamp: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestPoller_RunOnce_Good(t *testing.T) {
|
||||
sig := &PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 2,
|
||||
PRNumber: 10,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
PRState: "OPEN",
|
||||
CheckStatus: "SUCCESS",
|
||||
Mergeable: "MERGEABLE",
|
||||
}
|
||||
|
||||
src := &mockSource{
|
||||
name: "test-source",
|
||||
signals: []*PipelineSignal{sig},
|
||||
}
|
||||
|
||||
handler := &mockHandler{
|
||||
name: "test-handler",
|
||||
matchFn: func(s *PipelineSignal) bool {
|
||||
return s.PRNumber == 10
|
||||
},
|
||||
}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
Handlers: []JobHandler{handler},
|
||||
})
|
||||
|
||||
err := p.RunOnce(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handler should have been called with our signal.
|
||||
handler.mu.Lock()
|
||||
defer handler.mu.Unlock()
|
||||
require.Len(t, handler.executed, 1)
|
||||
assert.Equal(t, 10, handler.executed[0].PRNumber)
|
||||
|
||||
// Source should have received a report.
|
||||
src.mu.Lock()
|
||||
defer src.mu.Unlock()
|
||||
require.Len(t, src.reports, 1)
|
||||
assert.Equal(t, "test-handler", src.reports[0].Action)
|
||||
assert.True(t, src.reports[0].Success)
|
||||
assert.Equal(t, 1, src.reports[0].Cycle)
|
||||
assert.Equal(t, 1, src.reports[0].EpicNumber)
|
||||
assert.Equal(t, 2, src.reports[0].ChildNumber)
|
||||
|
||||
// Cycle counter should have incremented.
|
||||
assert.Equal(t, 1, p.Cycle())
|
||||
}
|
||||
|
||||
func TestPoller_RunOnce_Good_NoSignals(t *testing.T) {
|
||||
src := &mockSource{
|
||||
name: "empty-source",
|
||||
signals: nil,
|
||||
}
|
||||
|
||||
handler := &mockHandler{
|
||||
name: "unused-handler",
|
||||
}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
Handlers: []JobHandler{handler},
|
||||
})
|
||||
|
||||
err := p.RunOnce(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handler should not have been called.
|
||||
handler.mu.Lock()
|
||||
defer handler.mu.Unlock()
|
||||
assert.Empty(t, handler.executed)
|
||||
|
||||
// Source should not have received reports.
|
||||
src.mu.Lock()
|
||||
defer src.mu.Unlock()
|
||||
assert.Empty(t, src.reports)
|
||||
|
||||
assert.Equal(t, 1, p.Cycle())
|
||||
}
|
||||
|
||||
func TestPoller_RunOnce_Good_NoMatchingHandler(t *testing.T) {
|
||||
sig := &PipelineSignal{
|
||||
EpicNumber: 5,
|
||||
ChildNumber: 8,
|
||||
PRNumber: 42,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-tenant",
|
||||
PRState: "OPEN",
|
||||
}
|
||||
|
||||
src := &mockSource{
|
||||
name: "test-source",
|
||||
signals: []*PipelineSignal{sig},
|
||||
}
|
||||
|
||||
handler := &mockHandler{
|
||||
name: "picky-handler",
|
||||
matchFn: func(s *PipelineSignal) bool {
|
||||
return false // never matches
|
||||
},
|
||||
}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
Handlers: []JobHandler{handler},
|
||||
})
|
||||
|
||||
err := p.RunOnce(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handler should not have been called.
|
||||
handler.mu.Lock()
|
||||
defer handler.mu.Unlock()
|
||||
assert.Empty(t, handler.executed)
|
||||
|
||||
// Source should not have received reports (no action taken).
|
||||
src.mu.Lock()
|
||||
defer src.mu.Unlock()
|
||||
assert.Empty(t, src.reports)
|
||||
}
|
||||
|
||||
func TestPoller_RunOnce_Good_DryRun(t *testing.T) {
|
||||
sig := &PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 3,
|
||||
PRNumber: 20,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-admin",
|
||||
PRState: "OPEN",
|
||||
CheckStatus: "SUCCESS",
|
||||
Mergeable: "MERGEABLE",
|
||||
}
|
||||
|
||||
src := &mockSource{
|
||||
name: "test-source",
|
||||
signals: []*PipelineSignal{sig},
|
||||
}
|
||||
|
||||
handler := &mockHandler{
|
||||
name: "merge-handler",
|
||||
matchFn: func(s *PipelineSignal) bool {
|
||||
return true
|
||||
},
|
||||
}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
Handlers: []JobHandler{handler},
|
||||
DryRun: true,
|
||||
})
|
||||
|
||||
assert.True(t, p.DryRun())
|
||||
|
||||
err := p.RunOnce(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handler should NOT have been called in dry-run mode.
|
||||
handler.mu.Lock()
|
||||
defer handler.mu.Unlock()
|
||||
assert.Empty(t, handler.executed)
|
||||
|
||||
// Source should not have received reports.
|
||||
src.mu.Lock()
|
||||
defer src.mu.Unlock()
|
||||
assert.Empty(t, src.reports)
|
||||
}
|
||||
|
||||
func TestPoller_SetDryRun_Good(t *testing.T) {
|
||||
p := NewPoller(PollerConfig{})
|
||||
|
||||
assert.False(t, p.DryRun())
|
||||
p.SetDryRun(true)
|
||||
assert.True(t, p.DryRun())
|
||||
p.SetDryRun(false)
|
||||
assert.False(t, p.DryRun())
|
||||
}
|
||||
|
||||
func TestPoller_AddSourceAndHandler_Good(t *testing.T) {
|
||||
p := NewPoller(PollerConfig{})
|
||||
|
||||
sig := &PipelineSignal{
|
||||
EpicNumber: 1,
|
||||
ChildNumber: 1,
|
||||
PRNumber: 5,
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
PRState: "OPEN",
|
||||
}
|
||||
|
||||
src := &mockSource{
|
||||
name: "added-source",
|
||||
signals: []*PipelineSignal{sig},
|
||||
}
|
||||
|
||||
handler := &mockHandler{
|
||||
name: "added-handler",
|
||||
matchFn: func(s *PipelineSignal) bool { return true },
|
||||
}
|
||||
|
||||
p.AddSource(src)
|
||||
p.AddHandler(handler)
|
||||
|
||||
err := p.RunOnce(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
handler.mu.Lock()
|
||||
defer handler.mu.Unlock()
|
||||
require.Len(t, handler.executed, 1)
|
||||
assert.Equal(t, 5, handler.executed[0].PRNumber)
|
||||
}
|
||||
|
||||
func TestPoller_Run_Good(t *testing.T) {
|
||||
src := &mockSource{
|
||||
name: "tick-source",
|
||||
signals: nil,
|
||||
}
|
||||
|
||||
p := NewPoller(PollerConfig{
|
||||
Sources: []JobSource{src},
|
||||
PollInterval: 50 * time.Millisecond,
|
||||
})
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 180*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
err := p.Run(ctx)
|
||||
assert.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
// Should have completed at least 2 cycles (one immediate + at least one tick).
|
||||
assert.GreaterOrEqual(t, p.Cycle(), 2)
|
||||
}
|
||||
|
||||
func TestPoller_DefaultInterval_Good(t *testing.T) {
|
||||
p := NewPoller(PollerConfig{})
|
||||
assert.Equal(t, 60*time.Second, p.interval)
|
||||
}
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
package jobrunner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PipelineSignal is the structural snapshot of a child issue/PR.
|
||||
// Carries structural state plus issue title/body for dispatch prompts.
|
||||
type PipelineSignal struct {
|
||||
EpicNumber int
|
||||
ChildNumber int
|
||||
PRNumber int
|
||||
RepoOwner string
|
||||
RepoName string
|
||||
PRState string // OPEN, MERGED, CLOSED
|
||||
IsDraft bool
|
||||
Mergeable string // MERGEABLE, CONFLICTING, UNKNOWN
|
||||
CheckStatus string // SUCCESS, FAILURE, PENDING
|
||||
ThreadsTotal int
|
||||
ThreadsResolved int
|
||||
LastCommitSHA string
|
||||
LastCommitAt time.Time
|
||||
LastReviewAt time.Time
|
||||
NeedsCoding bool // true if child has no PR (work not started)
|
||||
Assignee string // issue assignee username (for dispatch)
|
||||
IssueTitle string // child issue title (for dispatch prompt)
|
||||
IssueBody string // child issue body (for dispatch prompt)
|
||||
Type string // signal type (e.g., "agent_completion")
|
||||
Success bool // agent completion success flag
|
||||
Error string // agent error message
|
||||
Message string // agent completion message
|
||||
}
|
||||
|
||||
// RepoFullName returns "owner/repo".
|
||||
func (s *PipelineSignal) RepoFullName() string {
|
||||
return s.RepoOwner + "/" + s.RepoName
|
||||
}
|
||||
|
||||
// HasUnresolvedThreads returns true if there are unresolved review threads.
|
||||
func (s *PipelineSignal) HasUnresolvedThreads() bool {
|
||||
return s.ThreadsTotal > s.ThreadsResolved
|
||||
}
|
||||
|
||||
// ActionResult carries the outcome of a handler execution.
|
||||
type ActionResult struct {
|
||||
Action string `json:"action"`
|
||||
RepoOwner string `json:"repo_owner"`
|
||||
RepoName string `json:"repo_name"`
|
||||
EpicNumber int `json:"epic"`
|
||||
ChildNumber int `json:"child"`
|
||||
PRNumber int `json:"pr"`
|
||||
Success bool `json:"success"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Timestamp time.Time `json:"ts"`
|
||||
Duration time.Duration `json:"duration_ms"`
|
||||
Cycle int `json:"cycle"`
|
||||
}
|
||||
|
||||
// JobSource discovers actionable work from an external system.
|
||||
type JobSource interface {
|
||||
Name() string
|
||||
Poll(ctx context.Context) ([]*PipelineSignal, error)
|
||||
Report(ctx context.Context, result *ActionResult) error
|
||||
}
|
||||
|
||||
// JobHandler processes a single pipeline signal.
|
||||
type JobHandler interface {
|
||||
Name() string
|
||||
Match(signal *PipelineSignal) bool
|
||||
Execute(ctx context.Context, signal *PipelineSignal) (*ActionResult, error)
|
||||
}
|
||||
|
|
@ -1,98 +0,0 @@
|
|||
package jobrunner
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPipelineSignal_RepoFullName_Good(t *testing.T) {
|
||||
sig := &PipelineSignal{
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-php",
|
||||
}
|
||||
assert.Equal(t, "host-uk/core-php", sig.RepoFullName())
|
||||
}
|
||||
|
||||
func TestPipelineSignal_HasUnresolvedThreads_Good(t *testing.T) {
|
||||
sig := &PipelineSignal{
|
||||
ThreadsTotal: 5,
|
||||
ThreadsResolved: 3,
|
||||
}
|
||||
assert.True(t, sig.HasUnresolvedThreads())
|
||||
}
|
||||
|
||||
func TestPipelineSignal_HasUnresolvedThreads_Bad_AllResolved(t *testing.T) {
|
||||
sig := &PipelineSignal{
|
||||
ThreadsTotal: 4,
|
||||
ThreadsResolved: 4,
|
||||
}
|
||||
assert.False(t, sig.HasUnresolvedThreads())
|
||||
|
||||
// Also verify zero threads is not unresolved.
|
||||
sigZero := &PipelineSignal{
|
||||
ThreadsTotal: 0,
|
||||
ThreadsResolved: 0,
|
||||
}
|
||||
assert.False(t, sigZero.HasUnresolvedThreads())
|
||||
}
|
||||
|
||||
func TestActionResult_JSON_Good(t *testing.T) {
|
||||
ts := time.Date(2026, 2, 5, 12, 0, 0, 0, time.UTC)
|
||||
result := &ActionResult{
|
||||
Action: "merge",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-tenant",
|
||||
EpicNumber: 42,
|
||||
ChildNumber: 7,
|
||||
PRNumber: 99,
|
||||
Success: true,
|
||||
Timestamp: ts,
|
||||
Duration: 1500 * time.Millisecond,
|
||||
Cycle: 3,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
|
||||
var decoded map[string]any
|
||||
err = json.Unmarshal(data, &decoded)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "merge", decoded["action"])
|
||||
assert.Equal(t, "host-uk", decoded["repo_owner"])
|
||||
assert.Equal(t, "core-tenant", decoded["repo_name"])
|
||||
assert.Equal(t, float64(42), decoded["epic"])
|
||||
assert.Equal(t, float64(7), decoded["child"])
|
||||
assert.Equal(t, float64(99), decoded["pr"])
|
||||
assert.Equal(t, true, decoded["success"])
|
||||
assert.Equal(t, float64(3), decoded["cycle"])
|
||||
|
||||
// Error field should be omitted when empty.
|
||||
_, hasError := decoded["error"]
|
||||
assert.False(t, hasError, "error field should be omitted when empty")
|
||||
|
||||
// Verify round-trip with error field present.
|
||||
resultWithErr := &ActionResult{
|
||||
Action: "merge",
|
||||
RepoOwner: "host-uk",
|
||||
RepoName: "core-tenant",
|
||||
Success: false,
|
||||
Error: "checks failing",
|
||||
Timestamp: ts,
|
||||
Duration: 200 * time.Millisecond,
|
||||
Cycle: 1,
|
||||
}
|
||||
data2, err := json.Marshal(resultWithErr)
|
||||
require.NoError(t, err)
|
||||
|
||||
var decoded2 map[string]any
|
||||
err = json.Unmarshal(data2, &decoded2)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "checks failing", decoded2["error"])
|
||||
assert.Equal(t, false, decoded2["success"])
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue