diff --git a/.gitignore b/.gitignore index f6e26ac..fdb5520 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ tasks patch_cov.* +go.work.sum diff --git a/docs/plans/2026-02-05-core-ide-job-runner-design.md b/docs/plans/2026-02-05-core-ide-job-runner-design.md new file mode 100644 index 0000000..bec933a --- /dev/null +++ b/docs/plans/2026-02-05-core-ide-job-runner-design.md @@ -0,0 +1,271 @@ +# Core-IDE Job Runner Design + +**Date:** 2026-02-05 +**Status:** Approved +**Author:** @Snider + Claude + +--- + +## Goal + +Turn core-ide into an autonomous job runner that polls for actionable pipeline work, executes it via typed MCP tool handlers, captures JSONL training data, and self-updates. Supports 12 nodes running headless on servers and desktop on developer machines. + +--- + +## Architecture Overview + +``` ++-------------------------------------------------+ +| core-ide | +| | +| +----------+ +-----------+ +----------+ | +| | Poller |-->| Dispatcher|-->| Handler | | +| | (Source) | | (MCP route)| | Registry | | +| +----------+ +-----------+ +----------+ | +| | | | | +| | +----v----+ +---v-------+ | +| | | Journal | | JobSource | | +| | | (JSONL) | | (adapter) | | +| | +---------+ +-----------+ | +| +----v-----+ | +| | Updater | (existing internal/cmd/updater) | +| +----------+ | ++-------------------------------------------------+ +``` + +**Three components:** +- **Poller** -- Periodic scan via pluggable JobSource adapters. Builds PipelineSignal structs from API responses. Never reads comment bodies (injection vector). +- **Dispatcher** -- Matches signals against handler registry in priority order. One action per signal per cycle (prevents cascades). +- **Journal** -- Appends JSONL after each completed action per issue-epic step 10 spec. Structural signals only -- IDs, SHAs, timestamps, cycle counts, instructions sent, automations performed. + +--- + +## Job Source Abstraction + +GitHub is the first adapter. The platform's own Agentic API replaces it later. Handler logic is source-agnostic. + +```go +type JobSource interface { + Name() string + Poll(ctx context.Context) ([]*PipelineSignal, error) + Report(ctx context.Context, result *ActionResult) error +} +``` + +| Adapter | When | Transport | +|-------------------|-------|----------------------------------------| +| `GitHubSource` | Now | REST API + conditional requests (ETag) | +| `HostUKSource` | Next | Agentic API (WebSocket or poll) | +| `HyperswarmSource`| Later | P2P encrypted channels via Holepunch | + +**Multi-source:** Poller runs multiple sources concurrently. Own repos get priority. When idle (zero signals for N consecutive cycles), external project sources activate (WailsApp first). + +**API budget:** 50% credit allocation for harvest mode is a config value on the source, not hardcoded. + +--- + +## Pipeline Signal + +The structural snapshot passed to handlers. Never contains comment bodies or free text. + +```go +type PipelineSignal struct { + EpicNumber int + ChildNumber int + PRNumber int + RepoOwner string + RepoName string + PRState string // OPEN, MERGED, CLOSED + IsDraft bool + Mergeable string // MERGEABLE, CONFLICTING, UNKNOWN + CheckStatus string // SUCCESS, FAILURE, PENDING + ThreadsTotal int + ThreadsResolved int + LastCommitSHA string + LastCommitAt time.Time + LastReviewAt time.Time +} +``` + +--- + +## Handler Registry + +Each action from the issue-epic flow is a registered handler. All Go functions with typed inputs/outputs. + +```go +type JobHandler interface { + Name() string + Match(signal *PipelineSignal) bool + Execute(ctx context.Context, signal *PipelineSignal) (*ActionResult, error) +} +``` + +| Handler | Epic Stage | Input Signals | Action | +|--------------------|-----------|---------------------------------------------------|---------------------------------------------| +| `publish_draft` | 3 | PR draft=true, checks=SUCCESS | Mark PR as ready for review | +| `send_fix_command` | 4/6 | PR CONFLICTING or threads without fix commit | Comment "fix merge conflict" / "fix the code reviews" | +| `resolve_threads` | 5 | Unresolved threads, fix commit exists after review | Resolve all pre-commit threads | +| `enable_auto_merge`| 7 | PR MERGEABLE, checks passing, threads resolved | Enable auto-merge via API | +| `tick_parent` | 8 | Child PR merged | Update epic issue checklist | +| `close_child` | 9 | Child PR merged + parent ticked | Close child issue | +| `capture_journal` | 10 | Any completed action | Append JSONL entry | + +**ActionResult** carries what was done -- action name, target IDs, success/failure, timestamps. Feeds directly into JSONL journal. + +Handlers register at init time, same pattern as CLI commands in the existing codebase. + +--- + +## Headless vs Desktop Mode + +Same binary, same handlers, different UI surface. + +**Detection:** + +```go +func hasDisplay() bool { + if runtime.GOOS == "windows" { return true } + return os.Getenv("DISPLAY") != "" || os.Getenv("WAYLAND_DISPLAY") != "" +} +``` + +**Headless mode** (Linux server, no display): +- Skip Wails window creation +- Start poller immediately +- Start MCP bridge (port 9877) for external tool access +- Log to stdout/file (structured JSON) +- Updater: check on startup, auto-apply + restart via watcher +- Managed by systemd: `Restart=always` + +**Desktop mode** (display available): +- Full Wails system tray + webview panel +- Tray icon shows status: idle, polling, executing, error +- Tray menu: Start/Stop poller, Force update, Open journal, Configure sources +- Poller off by default (developer toggle) +- Same MCP bridge, same handlers, same journal + +**CLI override:** `core-ide --headless` forces headless. `core-ide --desktop` forces GUI. + +**Shared startup:** + +```go +func main() { + // 1. Load config (repos, interval, channel, sources) + // 2. Build handler registry + // 3. Init journal + // 4. Init updater (check on startup) + // 5. Branch: + if hasDisplay() { + startDesktop() // Wails + tray + optional poller + } else { + startHeadless() // Poller + MCP bridge + signal handling + } +} +``` + +--- + +## Poller Configuration + +```go +type PollerConfig struct { + Sources []JobSource + Handlers []JobHandler + Journal *Journal + PollInterval time.Duration // default: 60s + DryRun bool // log without executing +} +``` + +**Rate limiting:** GitHub API allows 5000 req/hr with token. Full scan of 4 repos with ~30 PRs uses ~150 requests. Poller uses conditional requests (If-None-Match/ETag) to avoid counting unchanged responses. Backs off to 5min interval when idle. + +**CLI flags:** +- `--poll-interval` (default: 60s) +- `--repos` (comma-separated: `host-uk/core,host-uk/core-php`) +- `--dry-run` (log actions without executing) +- `--headless` / `--desktop` (mode override) + +--- + +## Self-Update + +Uses existing `internal/cmd/updater` package. Binary-safe replacement with platform-specific watcher process, SemVer channel selection (stable/beta/alpha/dev), automatic rollback on failure. + +**Integration:** +- Headless: `CheckAndUpdateOnStartup` -- auto-apply + restart +- Desktop: `CheckOnStartup` -- notify via tray, user confirms + +--- + +## Training Data (Journal) + +JSONL format per issue-epic step 10. One record per completed action. + +```json +{ + "ts": "2026-02-05T12:00:00Z", + "epic": 299, + "child": 212, + "pr": 316, + "repo": "host-uk/core", + "action": "publish_draft", + "signals": { + "pr_state": "OPEN", + "is_draft": true, + "check_status": "SUCCESS", + "mergeable": "UNKNOWN", + "threads_total": 0, + "threads_resolved": 0 + }, + "result": { + "success": true, + "duration_ms": 340 + }, + "cycle": 1 +} +``` + +**Rules:** +- NO content (no comments, no messages, no bodies) +- Structural signals only -- safe for training +- Append-only JSONL file per node +- File path: `~/.core/journal//.jsonl` + +--- + +## Files Summary + +| File | Action | +|------|--------| +| `pkg/jobrunner/types.go` | CREATE -- JobSource, JobHandler, PipelineSignal, ActionResult interfaces | +| `pkg/jobrunner/poller.go` | CREATE -- Poller, Dispatcher, multi-source orchestration | +| `pkg/jobrunner/journal.go` | CREATE -- JSONL writer, append-only, structured records | +| `pkg/jobrunner/github/source.go` | CREATE -- GitHubSource adapter, conditional requests | +| `pkg/jobrunner/github/signals.go` | CREATE -- PR/issue state extraction, signal building | +| `internal/core-ide/handlers/publish_draft.go` | CREATE -- Publish draft PR handler | +| `internal/core-ide/handlers/resolve_threads.go` | CREATE -- Resolve review threads handler | +| `internal/core-ide/handlers/send_fix_command.go` | CREATE -- Send fix command handler | +| `internal/core-ide/handlers/enable_auto_merge.go` | CREATE -- Enable auto-merge handler | +| `internal/core-ide/handlers/tick_parent.go` | CREATE -- Tick epic checklist handler | +| `internal/core-ide/handlers/close_child.go` | CREATE -- Close child issue handler | +| `internal/core-ide/main.go` | MODIFY -- Headless/desktop branching, poller integration | +| `internal/core-ide/mcp_bridge.go` | MODIFY -- Register job handlers as MCP tools | + +--- + +## What Doesn't Ship Yet + +- HostUK Agentic API adapter (future -- replaces GitHub) +- Hyperswarm P2P adapter (future) +- External project scanning / harvest mode (future -- WailsApp first) +- LoRA training pipeline (separate concern -- reads JSONL journal) + +--- + +## Testing Strategy + +- **Handlers:** Unit-testable. Mock PipelineSignal in, assert API calls out. +- **Poller:** httptest server returning fixture responses. +- **Journal:** Read back JSONL, verify schema. +- **Integration:** Dry-run mode against real repos, verify signals match expected state. diff --git a/docs/plans/2026-02-05-core-ide-job-runner-plan.md b/docs/plans/2026-02-05-core-ide-job-runner-plan.md new file mode 100644 index 0000000..c0bbbb3 --- /dev/null +++ b/docs/plans/2026-02-05-core-ide-job-runner-plan.md @@ -0,0 +1,2116 @@ +# Core-IDE Job Runner Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Turn core-ide into an autonomous job runner that polls GitHub for pipeline work, executes it via typed handlers, and captures JSONL training data. + +**Architecture:** Go workspace (`go.work`) linking root module + core-ide module. Pluggable `JobSource` interface with GitHub as first adapter. `JobHandler` interface for each pipeline action (publish draft, resolve threads, etc.). `Poller` orchestrates discovery and dispatch. `Journal` writes JSONL. Headless mode reuses existing `pkg/cli.Daemon` infrastructure. Handlers live in `pkg/jobrunner/` (root module), core-ide imports them via workspace. + +**Tech Stack:** Go 1.25, GitHub REST API (via `oauth2`), `pkg/cli.Daemon` for headless, `testify/assert` + `httptest` for tests. + +--- + +### Task 0: Set Up Go Workspace (`go.work`) + +**Files:** +- Create: `go.work` + +**Context:** The repo has two real modules — the root (`github.com/host-uk/core`) and core-ide (`github.com/host-uk/core/internal/core-ide`). Without a workspace, core-ide can't import `pkg/jobrunner` from the root module during local development without fragile `replace` directives. A `go.work` file makes cross-module imports resolve locally, keeps each module's `go.mod` clean, and lets CI build each variant independently. + +**Step 1: Create the workspace file** + +```bash +cd /Users/snider/Code/host-uk/core +go work init . ./internal/core-ide +``` + +This generates `go.work`: +``` +go 1.25.5 + +use ( + . + ./internal/core-ide +) +``` + +**Step 2: Sync dependency versions across modules** + +```bash +go work sync +``` + +This aligns shared dependency versions between the two modules. + +**Step 3: Verify the workspace** + +Run: `go build ./...` +Expected: Root module builds successfully. + +Run: `cd internal/core-ide && go build .` +Expected: core-ide builds successfully. + +Run: `go test ./pkg/... -count=1` +Expected: All existing tests pass (workspace doesn't change behaviour, just resolution). + +**Step 4: Add go.work.sum to gitignore** + +`go.work.sum` is generated and shouldn't be committed (it's machine-specific like `go.sum` but for the workspace). Check if `.gitignore` already excludes it: + +```bash +grep -q 'go.work.sum' .gitignore || echo 'go.work.sum' >> .gitignore +``` + +**Note:** Whether to commit `go.work` itself is a choice. Committing it means all developers and CI share the same workspace layout. Since the module layout is fixed (root + core-ide), committing it is the right call — it documents the build variants explicitly. + +**Step 5: Commit** + +```bash +git add go.work .gitignore +git commit -m "build: add Go workspace for root + core-ide modules" +``` + +--- + +### Task 1: Core Types (`pkg/jobrunner/types.go`) + +**Files:** +- Create: `pkg/jobrunner/types.go` +- Test: `pkg/jobrunner/types_test.go` + +**Step 1: Write the test file** + +```go +package jobrunner + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestPipelineSignal_RepoFullName_Good(t *testing.T) { + s := &PipelineSignal{RepoOwner: "host-uk", RepoName: "core"} + assert.Equal(t, "host-uk/core", s.RepoFullName()) +} + +func TestPipelineSignal_HasUnresolvedThreads_Good(t *testing.T) { + s := &PipelineSignal{ThreadsTotal: 5, ThreadsResolved: 3} + assert.True(t, s.HasUnresolvedThreads()) +} + +func TestPipelineSignal_HasUnresolvedThreads_Bad_AllResolved(t *testing.T) { + s := &PipelineSignal{ThreadsTotal: 5, ThreadsResolved: 5} + assert.False(t, s.HasUnresolvedThreads()) +} + +func TestActionResult_JSON_Good(t *testing.T) { + r := &ActionResult{ + Action: "publish_draft", + RepoOwner: "host-uk", + RepoName: "core", + PRNumber: 315, + Success: true, + Timestamp: time.Date(2026, 2, 5, 12, 0, 0, 0, time.UTC), + } + assert.Equal(t, "publish_draft", r.Action) + assert.True(t, r.Success) +} +``` + +**Step 2: Run test to verify it fails** + +Run: `go test ./pkg/jobrunner/ -v -count=1` +Expected: FAIL — package does not exist yet. + +**Step 3: Write the types** + +```go +package jobrunner + +import ( + "context" + "time" +) + +// PipelineSignal is the structural snapshot of a child issue/PR. +// Never contains comment bodies or free text — structural signals only. +type PipelineSignal struct { + EpicNumber int + ChildNumber int + PRNumber int + RepoOwner string + RepoName string + PRState string // OPEN, MERGED, CLOSED + IsDraft bool + Mergeable string // MERGEABLE, CONFLICTING, UNKNOWN + CheckStatus string // SUCCESS, FAILURE, PENDING + ThreadsTotal int + ThreadsResolved int + LastCommitSHA string + LastCommitAt time.Time + LastReviewAt time.Time +} + +// RepoFullName returns "owner/repo". +func (s *PipelineSignal) RepoFullName() string { + return s.RepoOwner + "/" + s.RepoName +} + +// HasUnresolvedThreads returns true if there are unresolved review threads. +func (s *PipelineSignal) HasUnresolvedThreads() bool { + return s.ThreadsTotal > s.ThreadsResolved +} + +// ActionResult carries the outcome of a handler execution. +type ActionResult struct { + Action string `json:"action"` + RepoOwner string `json:"repo_owner"` + RepoName string `json:"repo_name"` + EpicNumber int `json:"epic"` + ChildNumber int `json:"child"` + PRNumber int `json:"pr"` + Success bool `json:"success"` + Error string `json:"error,omitempty"` + Timestamp time.Time `json:"ts"` + Duration time.Duration `json:"duration_ms"` + Cycle int `json:"cycle"` +} + +// JobSource discovers actionable work from an external system. +type JobSource interface { + Name() string + Poll(ctx context.Context) ([]*PipelineSignal, error) + Report(ctx context.Context, result *ActionResult) error +} + +// JobHandler processes a single pipeline signal. +type JobHandler interface { + Name() string + Match(signal *PipelineSignal) bool + Execute(ctx context.Context, signal *PipelineSignal) (*ActionResult, error) +} +``` + +**Step 4: Run tests** + +Run: `go test ./pkg/jobrunner/ -v -count=1` +Expected: PASS (4 tests). + +**Step 5: Commit** + +```bash +git add pkg/jobrunner/types.go pkg/jobrunner/types_test.go +git commit -m "feat(jobrunner): add core types — PipelineSignal, ActionResult, JobSource, JobHandler" +``` + +--- + +### Task 2: Journal JSONL Writer (`pkg/jobrunner/journal.go`) + +**Files:** +- Create: `pkg/jobrunner/journal.go` +- Test: `pkg/jobrunner/journal_test.go` + +**Step 1: Write the test** + +```go +package jobrunner + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestJournal_Append_Good(t *testing.T) { + dir := t.TempDir() + j, err := NewJournal(dir) + require.NoError(t, err) + + signal := &PipelineSignal{ + EpicNumber: 299, + ChildNumber: 212, + PRNumber: 316, + RepoOwner: "host-uk", + RepoName: "core", + PRState: "OPEN", + IsDraft: true, + CheckStatus: "SUCCESS", + } + + result := &ActionResult{ + Action: "publish_draft", + RepoOwner: "host-uk", + RepoName: "core", + PRNumber: 316, + Success: true, + Timestamp: time.Date(2026, 2, 5, 12, 0, 0, 0, time.UTC), + Duration: 340 * time.Millisecond, + Cycle: 1, + } + + err = j.Append(signal, result) + require.NoError(t, err) + + // Read the file back + pattern := filepath.Join(dir, "host-uk", "core", "*.jsonl") + files, _ := filepath.Glob(pattern) + require.Len(t, files, 1) + + data, err := os.ReadFile(files[0]) + require.NoError(t, err) + + var entry JournalEntry + err = json.Unmarshal([]byte(strings.TrimSpace(string(data))), &entry) + require.NoError(t, err) + + assert.Equal(t, "publish_draft", entry.Action) + assert.Equal(t, 316, entry.PR) + assert.Equal(t, 299, entry.Epic) + assert.True(t, entry.Result.Success) +} + +func TestJournal_Append_Bad_NilSignal(t *testing.T) { + dir := t.TempDir() + j, err := NewJournal(dir) + require.NoError(t, err) + + err = j.Append(nil, &ActionResult{}) + assert.Error(t, err) +} +``` + +**Step 2: Run test to verify it fails** + +Run: `go test ./pkg/jobrunner/ -run TestJournal -v -count=1` +Expected: FAIL — `NewJournal` undefined. + +**Step 3: Write the implementation** + +```go +package jobrunner + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sync" + "time" +) + +// JournalEntry is a single JSONL record for training data. +type JournalEntry struct { + Timestamp time.Time `json:"ts"` + Epic int `json:"epic"` + Child int `json:"child"` + PR int `json:"pr"` + Repo string `json:"repo"` + Action string `json:"action"` + Signals SignalSnapshot `json:"signals"` + Result ResultSnapshot `json:"result"` + Cycle int `json:"cycle"` +} + +// SignalSnapshot captures the structural state at action time. +type SignalSnapshot struct { + PRState string `json:"pr_state"` + IsDraft bool `json:"is_draft"` + CheckStatus string `json:"check_status"` + Mergeable string `json:"mergeable"` + ThreadsTotal int `json:"threads_total"` + ThreadsResolved int `json:"threads_resolved"` +} + +// ResultSnapshot captures the action outcome. +type ResultSnapshot struct { + Success bool `json:"success"` + Error string `json:"error,omitempty"` + DurationMs int64 `json:"duration_ms"` +} + +// Journal writes append-only JSONL files organised by repo and date. +type Journal struct { + baseDir string + mu sync.Mutex +} + +// NewJournal creates a journal writer rooted at baseDir. +// Files are written to baseDir///YYYY-MM-DD.jsonl. +func NewJournal(baseDir string) (*Journal, error) { + if baseDir == "" { + return nil, fmt.Errorf("journal base directory is required") + } + return &Journal{baseDir: baseDir}, nil +} + +// Append writes a journal entry for the given signal and result. +func (j *Journal) Append(signal *PipelineSignal, result *ActionResult) error { + if signal == nil { + return fmt.Errorf("signal is required") + } + if result == nil { + return fmt.Errorf("result is required") + } + + entry := JournalEntry{ + Timestamp: result.Timestamp, + Epic: signal.EpicNumber, + Child: signal.ChildNumber, + PR: signal.PRNumber, + Repo: signal.RepoFullName(), + Action: result.Action, + Signals: SignalSnapshot{ + PRState: signal.PRState, + IsDraft: signal.IsDraft, + CheckStatus: signal.CheckStatus, + Mergeable: signal.Mergeable, + ThreadsTotal: signal.ThreadsTotal, + ThreadsResolved: signal.ThreadsResolved, + }, + Result: ResultSnapshot{ + Success: result.Success, + Error: result.Error, + DurationMs: result.Duration.Milliseconds(), + }, + Cycle: result.Cycle, + } + + data, err := json.Marshal(entry) + if err != nil { + return fmt.Errorf("marshal journal entry: %w", err) + } + data = append(data, '\n') + + // Build path: baseDir/owner/repo/YYYY-MM-DD.jsonl + date := result.Timestamp.UTC().Format("2006-01-02") + dir := filepath.Join(j.baseDir, signal.RepoOwner, signal.RepoName) + + j.mu.Lock() + defer j.mu.Unlock() + + if err := os.MkdirAll(dir, 0o755); err != nil { + return fmt.Errorf("create journal directory: %w", err) + } + + path := filepath.Join(dir, date+".jsonl") + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return fmt.Errorf("open journal file: %w", err) + } + defer f.Close() + + _, err = f.Write(data) + return err +} +``` + +**Step 4: Run tests** + +Run: `go test ./pkg/jobrunner/ -v -count=1` +Expected: PASS (all tests including Task 1). + +**Step 5: Commit** + +```bash +git add pkg/jobrunner/journal.go pkg/jobrunner/journal_test.go +git commit -m "feat(jobrunner): add JSONL journal writer for training data" +``` + +--- + +### Task 3: Poller and Dispatcher (`pkg/jobrunner/poller.go`) + +**Files:** +- Create: `pkg/jobrunner/poller.go` +- Test: `pkg/jobrunner/poller_test.go` + +**Step 1: Write the test** + +```go +package jobrunner + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type mockSource struct { + name string + signals []*PipelineSignal + reports []*ActionResult + mu sync.Mutex +} + +func (m *mockSource) Name() string { return m.name } +func (m *mockSource) Poll(_ context.Context) ([]*PipelineSignal, error) { + return m.signals, nil +} +func (m *mockSource) Report(_ context.Context, r *ActionResult) error { + m.mu.Lock() + m.reports = append(m.reports, r) + m.mu.Unlock() + return nil +} + +type mockHandler struct { + name string + matchFn func(*PipelineSignal) bool + executed []*PipelineSignal + mu sync.Mutex +} + +func (m *mockHandler) Name() string { return m.name } +func (m *mockHandler) Match(s *PipelineSignal) bool { + if m.matchFn != nil { + return m.matchFn(s) + } + return true +} +func (m *mockHandler) Execute(_ context.Context, s *PipelineSignal) (*ActionResult, error) { + m.mu.Lock() + m.executed = append(m.executed, s) + m.mu.Unlock() + return &ActionResult{ + Action: m.name, + Success: true, + Timestamp: time.Now().UTC(), + }, nil +} + +func TestPoller_RunOnce_Good(t *testing.T) { + signal := &PipelineSignal{ + PRNumber: 315, + RepoOwner: "host-uk", + RepoName: "core", + IsDraft: true, + PRState: "OPEN", + } + + source := &mockSource{name: "test", signals: []*PipelineSignal{signal}} + handler := &mockHandler{name: "publish_draft"} + journal, err := NewJournal(t.TempDir()) + require.NoError(t, err) + + p := NewPoller(PollerConfig{ + Sources: []JobSource{source}, + Handlers: []JobHandler{handler}, + Journal: journal, + PollInterval: time.Second, + }) + + err = p.RunOnce(context.Background()) + require.NoError(t, err) + + handler.mu.Lock() + assert.Len(t, handler.executed, 1) + handler.mu.Unlock() +} + +func TestPoller_RunOnce_Good_NoSignals(t *testing.T) { + source := &mockSource{name: "test", signals: nil} + handler := &mockHandler{name: "noop"} + journal, err := NewJournal(t.TempDir()) + require.NoError(t, err) + + p := NewPoller(PollerConfig{ + Sources: []JobSource{source}, + Handlers: []JobHandler{handler}, + Journal: journal, + }) + + err = p.RunOnce(context.Background()) + require.NoError(t, err) + + handler.mu.Lock() + assert.Len(t, handler.executed, 0) + handler.mu.Unlock() +} + +func TestPoller_RunOnce_Good_NoMatchingHandler(t *testing.T) { + signal := &PipelineSignal{PRNumber: 1, RepoOwner: "a", RepoName: "b"} + source := &mockSource{name: "test", signals: []*PipelineSignal{signal}} + handler := &mockHandler{ + name: "never_match", + matchFn: func(*PipelineSignal) bool { return false }, + } + journal, err := NewJournal(t.TempDir()) + require.NoError(t, err) + + p := NewPoller(PollerConfig{ + Sources: []JobSource{source}, + Handlers: []JobHandler{handler}, + Journal: journal, + }) + + err = p.RunOnce(context.Background()) + require.NoError(t, err) + + handler.mu.Lock() + assert.Len(t, handler.executed, 0) + handler.mu.Unlock() +} +``` + +**Step 2: Run test to verify it fails** + +Run: `go test ./pkg/jobrunner/ -run TestPoller -v -count=1` +Expected: FAIL — `NewPoller` undefined. + +**Step 3: Write the implementation** + +```go +package jobrunner + +import ( + "context" + "fmt" + "time" + + "github.com/host-uk/core/pkg/log" +) + +// PollerConfig configures the job runner poller. +type PollerConfig struct { + Sources []JobSource + Handlers []JobHandler + Journal *Journal + PollInterval time.Duration + DryRun bool +} + +// Poller discovers and dispatches pipeline work. +type Poller struct { + cfg PollerConfig + cycle int +} + +// NewPoller creates a poller with the given configuration. +func NewPoller(cfg PollerConfig) *Poller { + if cfg.PollInterval == 0 { + cfg.PollInterval = 60 * time.Second + } + return &Poller{cfg: cfg} +} + +// Run starts the polling loop. Blocks until context is cancelled. +func (p *Poller) Run(ctx context.Context) error { + ticker := time.NewTicker(p.cfg.PollInterval) + defer ticker.Stop() + + // Run once immediately + if err := p.RunOnce(ctx); err != nil { + log.Info("poller", "cycle_error", err) + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + if err := p.RunOnce(ctx); err != nil { + log.Info("poller", "cycle_error", err) + } + } + } +} + +// RunOnce performs a single poll-dispatch cycle across all sources. +func (p *Poller) RunOnce(ctx context.Context) error { + p.cycle++ + + for _, source := range p.cfg.Sources { + if err := ctx.Err(); err != nil { + return err + } + + signals, err := source.Poll(ctx) + if err != nil { + log.Info("poller", "source", source.Name(), "poll_error", err) + continue + } + + for _, signal := range signals { + if err := ctx.Err(); err != nil { + return err + } + p.dispatch(ctx, source, signal) + } + } + + return nil +} + +// dispatch finds the first matching handler and executes it. +// One action per signal per cycle. +func (p *Poller) dispatch(ctx context.Context, source JobSource, signal *PipelineSignal) { + for _, handler := range p.cfg.Handlers { + if !handler.Match(signal) { + continue + } + + if p.cfg.DryRun { + log.Info("poller", + "dry_run", handler.Name(), + "repo", signal.RepoFullName(), + "pr", signal.PRNumber, + ) + return + } + + start := time.Now() + result, err := handler.Execute(ctx, signal) + if err != nil { + log.Info("poller", + "handler", handler.Name(), + "error", err, + "repo", signal.RepoFullName(), + "pr", signal.PRNumber, + ) + return + } + + result.Cycle = p.cycle + result.EpicNumber = signal.EpicNumber + result.ChildNumber = signal.ChildNumber + result.Duration = time.Since(start) + + // Write to journal + if p.cfg.Journal != nil { + if err := p.cfg.Journal.Append(signal, result); err != nil { + log.Info("poller", "journal_error", err) + } + } + + // Report back to source + if err := source.Report(ctx, result); err != nil { + log.Info("poller", "report_error", err) + } + + return // one action per signal per cycle + } +} + +// Cycle returns the current cycle count. +func (p *Poller) Cycle() int { + return p.cycle +} + +// DryRun returns whether the poller is in dry-run mode. +func (p *Poller) DryRun() bool { + return p.cfg.DryRun +} + +// SetDryRun enables or disables dry-run mode. +func (p *Poller) SetDryRun(v bool) { + p.cfg.DryRun = v +} + +// AddSource appends a job source to the poller. +func (p *Poller) AddSource(s JobSource) { + p.cfg.Sources = append(p.cfg.Sources, s) +} + +// AddHandler appends a job handler to the poller. +func (p *Poller) AddHandler(h JobHandler) { + p.cfg.Handlers = append(p.cfg.Handlers, h) +} + +_ = fmt.Sprintf // ensure fmt imported for future use +``` + +Wait — remove that last line. The `fmt` import is only needed if used. Let me correct: the implementation above doesn't use `fmt` directly, so remove it from imports. The `log` package import path is `github.com/host-uk/core/pkg/log`. + +**Step 4: Run tests** + +Run: `go test ./pkg/jobrunner/ -v -count=1` +Expected: PASS (all tests). + +**Step 5: Commit** + +```bash +git add pkg/jobrunner/poller.go pkg/jobrunner/poller_test.go +git commit -m "feat(jobrunner): add Poller with multi-source dispatch and journal integration" +``` + +--- + +### Task 4: GitHub Source — Signal Builder (`pkg/jobrunner/github/`) + +**Files:** +- Create: `pkg/jobrunner/github/source.go` +- Create: `pkg/jobrunner/github/signals.go` +- Test: `pkg/jobrunner/github/source_test.go` + +**Context:** This package lives in the root go.mod (`github.com/host-uk/core`), NOT in the core-ide module. It uses `oauth2` and the GitHub REST API (same pattern as `internal/cmd/updater/github.go`). Uses conditional requests (ETag/If-None-Match) to conserve rate limit. + +**Step 1: Write the test** + +```go +package github + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/host-uk/core/pkg/jobrunner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGitHubSource_Poll_Good(t *testing.T) { + // Mock GitHub API: return one open PR that's a draft with passing checks + mux := http.NewServeMux() + + // GET /repos/host-uk/core/issues?labels=epic&state=open + mux.HandleFunc("/repos/host-uk/core/issues", func(w http.ResponseWriter, r *http.Request) { + if r.URL.Query().Get("labels") == "epic" { + json.NewEncoder(w).Encode([]map[string]any{ + { + "number": 299, + "body": "- [ ] #212\n- [x] #213", + "state": "open", + }, + }) + return + } + json.NewEncoder(w).Encode([]map[string]any{}) + }) + + // GET /repos/host-uk/core/pulls?state=open + mux.HandleFunc("/repos/host-uk/core/pulls", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode([]map[string]any{ + { + "number": 316, + "state": "open", + "draft": true, + "mergeable_state": "clean", + "body": "Closes #212", + "head": map[string]any{"sha": "abc123"}, + }, + }) + }) + + // GET /repos/host-uk/core/commits/abc123/check-suites + mux.HandleFunc("/repos/host-uk/core/commits/", func(w http.ResponseWriter, r *http.Request) { + json.NewEncoder(w).Encode(map[string]any{ + "check_suites": []map[string]any{ + {"conclusion": "success", "status": "completed"}, + }, + }) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + src := NewGitHubSource(Config{ + Repos: []string{"host-uk/core"}, + APIURL: server.URL, + }) + + signals, err := src.Poll(context.Background()) + require.NoError(t, err) + require.NotEmpty(t, signals) + + assert.Equal(t, 316, signals[0].PRNumber) + assert.True(t, signals[0].IsDraft) + assert.Equal(t, "host-uk", signals[0].RepoOwner) + assert.Equal(t, "core", signals[0].RepoName) +} + +func TestGitHubSource_Name_Good(t *testing.T) { + src := NewGitHubSource(Config{Repos: []string{"host-uk/core"}}) + assert.Equal(t, "github", src.Name()) +} +``` + +**Step 2: Run test to verify it fails** + +Run: `go test ./pkg/jobrunner/github/ -v -count=1` +Expected: FAIL — package does not exist. + +**Step 3: Write `signals.go`** — PR/issue data structures and signal extraction + +```go +package github + +import ( + "regexp" + "strconv" + "strings" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +// ghIssue is the minimal structure from GitHub Issues API. +type ghIssue struct { + Number int `json:"number"` + Body string `json:"body"` + State string `json:"state"` +} + +// ghPR is the minimal structure from GitHub Pull Requests API. +type ghPR struct { + Number int `json:"number"` + State string `json:"state"` + Draft bool `json:"draft"` + MergeableState string `json:"mergeable_state"` + Body string `json:"body"` + Head ghRef `json:"head"` + UpdatedAt time.Time `json:"updated_at"` +} + +type ghRef struct { + SHA string `json:"sha"` +} + +// ghCheckSuites is the response from /commits/:sha/check-suites. +type ghCheckSuites struct { + CheckSuites []ghCheckSuite `json:"check_suites"` +} + +type ghCheckSuite struct { + Conclusion string `json:"conclusion"` + Status string `json:"status"` +} + +// ghReviewThread counts (from GraphQL or approximated from review comments). +type ghReviewCounts struct { + Total int + Resolved int +} + +// parseEpicChildren extracts unchecked child issue numbers from an epic body. +// Matches: - [ ] #123 +var checklistRe = regexp.MustCompile(`- \[( |x)\] #(\d+)`) + +func parseEpicChildren(body string) (unchecked []int, checked []int) { + matches := checklistRe.FindAllStringSubmatch(body, -1) + for _, m := range matches { + num, _ := strconv.Atoi(m[2]) + if m[1] == "x" { + checked = append(checked, num) + } else { + unchecked = append(unchecked, num) + } + } + return +} + +// findLinkedPR finds a PR that references an issue number in its body. +// Matches: Closes #123, Fixes #123, Resolves #123 +func findLinkedPR(prs []ghPR, issueNumber int) *ghPR { + pattern := strconv.Itoa(issueNumber) + for i := range prs { + if strings.Contains(prs[i].Body, "#"+pattern) { + return &prs[i] + } + } + return nil +} + +// aggregateCheckStatus returns the overall check status from check suites. +func aggregateCheckStatus(suites []ghCheckSuite) string { + if len(suites) == 0 { + return "PENDING" + } + for _, s := range suites { + if s.Status != "completed" { + return "PENDING" + } + if s.Conclusion == "failure" || s.Conclusion == "timed_out" || s.Conclusion == "cancelled" { + return "FAILURE" + } + } + return "SUCCESS" +} + +// mergeableToString normalises GitHub's mergeable_state to our enum. +func mergeableToString(state string) string { + switch state { + case "clean", "has_hooks", "unstable": + return "MERGEABLE" + case "dirty": + return "CONFLICTING" + default: + return "UNKNOWN" + } +} + +// buildSignal creates a PipelineSignal from GitHub API data. +func buildSignal(owner, repo string, epic ghIssue, childNum int, pr ghPR, checks ghCheckSuites) *jobrunner.PipelineSignal { + return &jobrunner.PipelineSignal{ + EpicNumber: epic.Number, + ChildNumber: childNum, + PRNumber: pr.Number, + RepoOwner: owner, + RepoName: repo, + PRState: strings.ToUpper(pr.State), + IsDraft: pr.Draft, + Mergeable: mergeableToString(pr.MergeableState), + CheckStatus: aggregateCheckStatus(checks.CheckSuites), + LastCommitSHA: pr.Head.SHA, + LastCommitAt: pr.UpdatedAt, + } +} +``` + +**Step 4: Write `source.go`** — GitHubSource implementing JobSource + +```go +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + + "github.com/host-uk/core/pkg/jobrunner" + "github.com/host-uk/core/pkg/log" + "golang.org/x/oauth2" +) + +// Config for the GitHub job source. +type Config struct { + Repos []string // "owner/repo" format + APIURL string // override for testing (default: https://api.github.com) +} + +// GitHubSource polls GitHub for pipeline signals. +type GitHubSource struct { + cfg Config + client *http.Client + etags map[string]string // URL -> ETag for conditional requests +} + +// NewGitHubSource creates a GitHub job source. +func NewGitHubSource(cfg Config) *GitHubSource { + if cfg.APIURL == "" { + cfg.APIURL = "https://api.github.com" + } + + var client *http.Client + token := os.Getenv("GITHUB_TOKEN") + if token != "" { + ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}) + client = oauth2.NewClient(context.Background(), ts) + } else { + client = http.DefaultClient + } + + return &GitHubSource{ + cfg: cfg, + client: client, + etags: make(map[string]string), + } +} + +func (g *GitHubSource) Name() string { return "github" } + +// Poll scans all configured repos for actionable pipeline signals. +func (g *GitHubSource) Poll(ctx context.Context) ([]*jobrunner.PipelineSignal, error) { + var all []*jobrunner.PipelineSignal + + for _, repoSpec := range g.cfg.Repos { + parts := strings.SplitN(repoSpec, "/", 2) + if len(parts) != 2 { + continue + } + owner, repo := parts[0], parts[1] + + signals, err := g.pollRepo(ctx, owner, repo) + if err != nil { + log.Info("github_source", "repo", repoSpec, "error", err) + continue + } + all = append(all, signals...) + } + + return all, nil +} + +func (g *GitHubSource) pollRepo(ctx context.Context, owner, repo string) ([]*jobrunner.PipelineSignal, error) { + // 1. Fetch epic issues + epics, err := g.fetchEpics(ctx, owner, repo) + if err != nil { + return nil, err + } + + // 2. Fetch open PRs + prs, err := g.fetchPRs(ctx, owner, repo) + if err != nil { + return nil, err + } + + var signals []*jobrunner.PipelineSignal + + for _, epic := range epics { + unchecked, _ := parseEpicChildren(epic.Body) + for _, childNum := range unchecked { + pr := findLinkedPR(prs, childNum) + if pr == nil { + continue // no PR yet for this child + } + + checks, err := g.fetchCheckSuites(ctx, owner, repo, pr.Head.SHA) + if err != nil { + log.Info("github_source", "pr", pr.Number, "check_error", err) + checks = ghCheckSuites{} + } + + signals = append(signals, buildSignal(owner, repo, epic, childNum, *pr, checks)) + } + } + + return signals, nil +} + +func (g *GitHubSource) fetchEpics(ctx context.Context, owner, repo string) ([]ghIssue, error) { + url := fmt.Sprintf("%s/repos/%s/%s/issues?labels=epic&state=open&per_page=100", g.cfg.APIURL, owner, repo) + var issues []ghIssue + return issues, g.getJSON(ctx, url, &issues) +} + +func (g *GitHubSource) fetchPRs(ctx context.Context, owner, repo string) ([]ghPR, error) { + url := fmt.Sprintf("%s/repos/%s/%s/pulls?state=open&per_page=100", g.cfg.APIURL, owner, repo) + var prs []ghPR + return prs, g.getJSON(ctx, url, &prs) +} + +func (g *GitHubSource) fetchCheckSuites(ctx context.Context, owner, repo, sha string) (ghCheckSuites, error) { + url := fmt.Sprintf("%s/repos/%s/%s/commits/%s/check-suites", g.cfg.APIURL, owner, repo, sha) + var result ghCheckSuites + return result, g.getJSON(ctx, url, &result) +} + +// getJSON performs a GET with conditional request support. +func (g *GitHubSource) getJSON(ctx context.Context, url string, out any) error { + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return err + } + req.Header.Set("Accept", "application/vnd.github+json") + + if etag, ok := g.etags[url]; ok { + req.Header.Set("If-None-Match", etag) + } + + resp, err := g.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + // Store ETag for next request + if etag := resp.Header.Get("ETag"); etag != "" { + g.etags[url] = etag + } + + if resp.StatusCode == http.StatusNotModified { + return nil // no change since last poll + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("HTTP %d for %s", resp.StatusCode, url) + } + + return json.NewDecoder(resp.Body).Decode(out) +} + +// Report is a no-op for GitHub (actions are performed directly via API). +func (g *GitHubSource) Report(_ context.Context, _ *jobrunner.ActionResult) error { + return nil +} +``` + +**Step 5: Run tests** + +Run: `go test ./pkg/jobrunner/github/ -v -count=1` +Expected: PASS. + +**Step 6: Commit** + +```bash +git add pkg/jobrunner/github/ +git commit -m "feat(jobrunner): add GitHub source adapter with ETag conditional requests" +``` + +--- + +### Task 5: Publish Draft Handler (`pkg/jobrunner/handlers/`) + +**Files:** +- Create: `pkg/jobrunner/handlers/publish_draft.go` +- Test: `pkg/jobrunner/handlers/publish_draft_test.go` + +**Context:** Handlers live in `pkg/jobrunner/handlers/` (root module). They use `net/http` to call GitHub REST API directly. Each handler implements `jobrunner.JobHandler`. + +**Step 1: Write the test** + +```go +package handlers + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/host-uk/core/pkg/jobrunner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPublishDraft_Match_Good(t *testing.T) { + h := NewPublishDraft(nil) + signal := &jobrunner.PipelineSignal{ + IsDraft: true, + PRState: "OPEN", + CheckStatus: "SUCCESS", + } + assert.True(t, h.Match(signal)) +} + +func TestPublishDraft_Match_Bad_NotDraft(t *testing.T) { + h := NewPublishDraft(nil) + signal := &jobrunner.PipelineSignal{ + IsDraft: false, + PRState: "OPEN", + CheckStatus: "SUCCESS", + } + assert.False(t, h.Match(signal)) +} + +func TestPublishDraft_Match_Bad_ChecksFailing(t *testing.T) { + h := NewPublishDraft(nil) + signal := &jobrunner.PipelineSignal{ + IsDraft: true, + PRState: "OPEN", + CheckStatus: "FAILURE", + } + assert.False(t, h.Match(signal)) +} + +func TestPublishDraft_Execute_Good(t *testing.T) { + var calledURL string + var calledMethod string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + calledURL = r.URL.Path + calledMethod = r.Method + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"number":316}`)) + })) + defer server.Close() + + h := NewPublishDraft(&http.Client{}) + h.apiURL = server.URL + + signal := &jobrunner.PipelineSignal{ + PRNumber: 316, + RepoOwner: "host-uk", + RepoName: "core", + IsDraft: true, + PRState: "OPEN", + } + + result, err := h.Execute(context.Background(), signal) + require.NoError(t, err) + assert.True(t, result.Success) + assert.Equal(t, "publish_draft", result.Action) + assert.Equal(t, "/repos/host-uk/core/pulls/316", calledURL) + assert.Equal(t, "PATCH", calledMethod) +} +``` + +**Step 2: Run test to verify it fails** + +Run: `go test ./pkg/jobrunner/handlers/ -run TestPublishDraft -v -count=1` +Expected: FAIL — package does not exist. + +**Step 3: Write the implementation** + +```go +package handlers + +import ( + "bytes" + "context" + "fmt" + "net/http" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +// PublishDraft marks a draft PR as ready for review. +type PublishDraft struct { + client *http.Client + apiURL string +} + +// NewPublishDraft creates a publish_draft handler. +// Pass nil client to use http.DefaultClient. +func NewPublishDraft(client *http.Client) *PublishDraft { + if client == nil { + client = http.DefaultClient + } + return &PublishDraft{ + client: client, + apiURL: "https://api.github.com", + } +} + +func (h *PublishDraft) Name() string { return "publish_draft" } + +// Match returns true for open draft PRs with passing checks. +func (h *PublishDraft) Match(s *jobrunner.PipelineSignal) bool { + return s.IsDraft && s.PRState == "OPEN" && s.CheckStatus == "SUCCESS" +} + +// Execute calls PATCH /repos/:owner/:repo/pulls/:number with draft=false. +func (h *PublishDraft) Execute(ctx context.Context, s *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) { + url := fmt.Sprintf("%s/repos/%s/%s/pulls/%d", h.apiURL, s.RepoOwner, s.RepoName, s.PRNumber) + body := bytes.NewBufferString(`{"draft":false}`) + + req, err := http.NewRequestWithContext(ctx, "PATCH", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/vnd.github+json") + req.Header.Set("Content-Type", "application/json") + + resp, err := h.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + result := &jobrunner.ActionResult{ + Action: "publish_draft", + RepoOwner: s.RepoOwner, + RepoName: s.RepoName, + PRNumber: s.PRNumber, + Timestamp: time.Now().UTC(), + } + + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + result.Success = true + } else { + result.Error = fmt.Sprintf("HTTP %d", resp.StatusCode) + } + + return result, nil +} +``` + +**Step 4: Run tests** + +Run: `go test ./pkg/jobrunner/handlers/ -v -count=1` +Expected: PASS. + +**Step 5: Commit** + +```bash +git add pkg/jobrunner/handlers/ +git commit -m "feat(jobrunner): add publish_draft handler" +``` + +--- + +### Task 6: Send Fix Command Handler + +**Files:** +- Create: `pkg/jobrunner/handlers/send_fix_command.go` +- Test: `pkg/jobrunner/handlers/send_fix_command_test.go` + +**Step 1: Write the test** + +```go +package handlers + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/host-uk/core/pkg/jobrunner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSendFixCommand_Match_Good_Conflicting(t *testing.T) { + h := NewSendFixCommand(nil) + signal := &jobrunner.PipelineSignal{ + PRState: "OPEN", + Mergeable: "CONFLICTING", + } + assert.True(t, h.Match(signal)) +} + +func TestSendFixCommand_Match_Good_UnresolvedThreads(t *testing.T) { + h := NewSendFixCommand(nil) + signal := &jobrunner.PipelineSignal{ + PRState: "OPEN", + Mergeable: "MERGEABLE", + ThreadsTotal: 3, + ThreadsResolved: 1, + CheckStatus: "FAILURE", + } + assert.True(t, h.Match(signal)) +} + +func TestSendFixCommand_Match_Bad_Clean(t *testing.T) { + h := NewSendFixCommand(nil) + signal := &jobrunner.PipelineSignal{ + PRState: "OPEN", + Mergeable: "MERGEABLE", + CheckStatus: "SUCCESS", + ThreadsTotal: 0, + ThreadsResolved: 0, + } + assert.False(t, h.Match(signal)) +} + +func TestSendFixCommand_Execute_Good_Conflict(t *testing.T) { + var postedBody map[string]string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + json.NewDecoder(r.Body).Decode(&postedBody) + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"id":1}`)) + })) + defer server.Close() + + h := NewSendFixCommand(&http.Client{}) + h.apiURL = server.URL + + signal := &jobrunner.PipelineSignal{ + PRNumber: 296, + RepoOwner: "host-uk", + RepoName: "core", + PRState: "OPEN", + Mergeable: "CONFLICTING", + } + + result, err := h.Execute(context.Background(), signal) + require.NoError(t, err) + assert.True(t, result.Success) + assert.Contains(t, postedBody["body"], "fix the merge conflict") +} +``` + +**Step 2: Run test to verify it fails** + +Run: `go test ./pkg/jobrunner/handlers/ -run TestSendFixCommand -v -count=1` +Expected: FAIL — `NewSendFixCommand` undefined. + +**Step 3: Write the implementation** + +```go +package handlers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +// SendFixCommand comments on a PR to request a fix. +type SendFixCommand struct { + client *http.Client + apiURL string +} + +func NewSendFixCommand(client *http.Client) *SendFixCommand { + if client == nil { + client = http.DefaultClient + } + return &SendFixCommand{client: client, apiURL: "https://api.github.com"} +} + +func (h *SendFixCommand) Name() string { return "send_fix_command" } + +// Match returns true for open PRs that are conflicting OR have unresolved +// review threads with failing checks (indicating reviews need fixing). +func (h *SendFixCommand) Match(s *jobrunner.PipelineSignal) bool { + if s.PRState != "OPEN" { + return false + } + if s.Mergeable == "CONFLICTING" { + return true + } + if s.HasUnresolvedThreads() && s.CheckStatus == "FAILURE" { + return true + } + return false +} + +// Execute posts a comment with the appropriate fix command. +func (h *SendFixCommand) Execute(ctx context.Context, s *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) { + msg := "Can you fix the code reviews?" + if s.Mergeable == "CONFLICTING" { + msg = "Can you fix the merge conflict?" + } + + url := fmt.Sprintf("%s/repos/%s/%s/issues/%d/comments", h.apiURL, s.RepoOwner, s.RepoName, s.PRNumber) + payload, _ := json.Marshal(map[string]string{"body": msg}) + + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/vnd.github+json") + req.Header.Set("Content-Type", "application/json") + + resp, err := h.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + result := &jobrunner.ActionResult{ + Action: "send_fix_command", + RepoOwner: s.RepoOwner, + RepoName: s.RepoName, + PRNumber: s.PRNumber, + Timestamp: time.Now().UTC(), + } + + if resp.StatusCode == http.StatusCreated { + result.Success = true + } else { + result.Error = fmt.Sprintf("HTTP %d", resp.StatusCode) + } + + return result, nil +} +``` + +**Step 4: Run tests** + +Run: `go test ./pkg/jobrunner/handlers/ -v -count=1` +Expected: PASS. + +**Step 5: Commit** + +```bash +git add pkg/jobrunner/handlers/send_fix_command.go pkg/jobrunner/handlers/send_fix_command_test.go +git commit -m "feat(jobrunner): add send_fix_command handler" +``` + +--- + +### Task 7: Remaining Handlers (enable_auto_merge, tick_parent, close_child) + +**Files:** +- Create: `pkg/jobrunner/handlers/enable_auto_merge.go` + test +- Create: `pkg/jobrunner/handlers/tick_parent.go` + test +- Create: `pkg/jobrunner/handlers/close_child.go` + test + +**Context:** Same pattern as Tasks 5-6. Each handler: Match checks signal conditions, Execute calls GitHub REST API. Tests use httptest. + +**Step 1: Write tests for all three** (one test file per handler, same pattern as above) + +**enable_auto_merge:** +- Match: `PRState=OPEN && Mergeable=MERGEABLE && CheckStatus=SUCCESS && !IsDraft && ThreadsTotal==ThreadsResolved` +- Execute: `PUT /repos/:owner/:repo/pulls/:number/merge` with `merge_method=squash` — actually, auto-merge uses `gh api` to enable. For REST: `POST /repos/:owner/:repo/pulls/:number/merge` — No. Auto-merge is enabled via GraphQL `enablePullRequestAutoMerge`. For REST fallback, use: `PATCH /repos/:owner/:repo/pulls/:number` — No, that's not right either. + +Actually, auto-merge via REST requires: `PUT /repos/:owner/:repo/pulls/:number/auto_merge`. This is not a standard GitHub REST endpoint. Auto-merge is enabled via the GraphQL API: + +```graphql +mutation { enablePullRequestAutoMerge(input: {pullRequestId: "..."}) { ... } } +``` + +**Simpler approach:** Shell out to `gh pr merge --auto -R owner/repo`. This is what the pipeline flow does today. Let's use `os/exec` with the `gh` CLI. + +```go +// enable_auto_merge.go +package handlers + +import ( + "context" + "fmt" + "os/exec" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +type EnableAutoMerge struct{} + +func NewEnableAutoMerge() *EnableAutoMerge { return &EnableAutoMerge{} } + +func (h *EnableAutoMerge) Name() string { return "enable_auto_merge" } + +func (h *EnableAutoMerge) Match(s *jobrunner.PipelineSignal) bool { + return s.PRState == "OPEN" && + !s.IsDraft && + s.Mergeable == "MERGEABLE" && + s.CheckStatus == "SUCCESS" && + !s.HasUnresolvedThreads() +} + +func (h *EnableAutoMerge) Execute(ctx context.Context, s *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) { + cmd := exec.CommandContext(ctx, "gh", "pr", "merge", "--auto", + fmt.Sprintf("%d", s.PRNumber), + "-R", s.RepoFullName(), + ) + output, err := cmd.CombinedOutput() + + result := &jobrunner.ActionResult{ + Action: "enable_auto_merge", + RepoOwner: s.RepoOwner, + RepoName: s.RepoName, + PRNumber: s.PRNumber, + Timestamp: time.Now().UTC(), + } + + if err != nil { + result.Error = fmt.Sprintf("%v: %s", err, string(output)) + } else { + result.Success = true + } + + return result, nil +} +``` + +**tick_parent and close_child** follow the same `gh` CLI pattern: +- `tick_parent`: Reads epic issue body, checks the child's checkbox, updates via `gh issue edit` +- `close_child`: `gh issue close -R owner/repo` + +**Step 2-5:** Same TDD cycle as Tasks 5-6. Write test, verify fail, implement, verify pass, commit. + +For brevity, the exact test code follows the same pattern. Key test assertions: +- `tick_parent`: Verify `gh issue edit` is called with updated body +- `close_child`: Verify `gh issue close` is called +- `enable_auto_merge`: Verify `gh pr merge --auto` is called + +**Testability:** Use a command factory variable for mocking `exec.Command`: + +```go +// In each handler file: +var execCommand = exec.CommandContext + +// In tests: +originalExecCommand := execCommand +defer func() { execCommand = originalExecCommand }() +execCommand = func(ctx context.Context, name string, args ...string) *exec.Cmd { + // return a mock command +} +``` + +**Step 6: Commit** + +```bash +git add pkg/jobrunner/handlers/enable_auto_merge.go pkg/jobrunner/handlers/enable_auto_merge_test.go +git add pkg/jobrunner/handlers/tick_parent.go pkg/jobrunner/handlers/tick_parent_test.go +git add pkg/jobrunner/handlers/close_child.go pkg/jobrunner/handlers/close_child_test.go +git commit -m "feat(jobrunner): add enable_auto_merge, tick_parent, close_child handlers" +``` + +--- + +### Task 8: Resolve Threads Handler + +**Files:** +- Create: `pkg/jobrunner/handlers/resolve_threads.go` +- Test: `pkg/jobrunner/handlers/resolve_threads_test.go` + +**Context:** This handler is special — it needs GraphQL to resolve review threads (no REST endpoint exists). Use a minimal GraphQL client (raw `net/http` POST to `https://api.github.com/graphql`). + +**Step 1: Write the test** + +```go +package handlers + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/host-uk/core/pkg/jobrunner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestResolveThreads_Match_Good(t *testing.T) { + h := NewResolveThreads(nil) + signal := &jobrunner.PipelineSignal{ + PRState: "OPEN", + ThreadsTotal: 3, + ThreadsResolved: 1, + } + assert.True(t, h.Match(signal)) +} + +func TestResolveThreads_Match_Bad_AllResolved(t *testing.T) { + h := NewResolveThreads(nil) + signal := &jobrunner.PipelineSignal{ + PRState: "OPEN", + ThreadsTotal: 3, + ThreadsResolved: 3, + } + assert.False(t, h.Match(signal)) +} + +func TestResolveThreads_Execute_Good(t *testing.T) { + callCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + callCount++ + var req map[string]any + json.NewDecoder(r.Body).Decode(&req) + + query := req["query"].(string) + + // First call: fetch threads + if callCount == 1 { + json.NewEncoder(w).Encode(map[string]any{ + "data": map[string]any{ + "repository": map[string]any{ + "pullRequest": map[string]any{ + "reviewThreads": map[string]any{ + "nodes": []map[string]any{ + {"id": "PRRT_1", "isResolved": false}, + {"id": "PRRT_2", "isResolved": true}, + }, + }, + }, + }, + }, + }) + return + } + + // Subsequent calls: resolve thread + json.NewEncoder(w).Encode(map[string]any{ + "data": map[string]any{ + "resolveReviewThread": map[string]any{ + "thread": map[string]any{"isResolved": true}, + }, + }, + }) + })) + defer server.Close() + + h := NewResolveThreads(&http.Client{}) + h.graphqlURL = server.URL + + signal := &jobrunner.PipelineSignal{ + PRNumber: 315, + RepoOwner: "host-uk", + RepoName: "core", + PRState: "OPEN", + ThreadsTotal: 2, + ThreadsResolved: 1, + } + + result, err := h.Execute(context.Background(), signal) + require.NoError(t, err) + assert.True(t, result.Success) + assert.Equal(t, 2, callCount) // 1 fetch + 1 resolve (only PRRT_1 unresolved) +} +``` + +**Step 2: Run test to verify it fails** + +Run: `go test ./pkg/jobrunner/handlers/ -run TestResolveThreads -v -count=1` +Expected: FAIL — `NewResolveThreads` undefined. + +**Step 3: Write the implementation** + +```go +package handlers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +// ResolveThreads resolves all unresolved review threads on a PR. +type ResolveThreads struct { + client *http.Client + graphqlURL string +} + +func NewResolveThreads(client *http.Client) *ResolveThreads { + if client == nil { + client = http.DefaultClient + } + return &ResolveThreads{ + client: client, + graphqlURL: "https://api.github.com/graphql", + } +} + +func (h *ResolveThreads) Name() string { return "resolve_threads" } + +func (h *ResolveThreads) Match(s *jobrunner.PipelineSignal) bool { + return s.PRState == "OPEN" && s.HasUnresolvedThreads() +} + +func (h *ResolveThreads) Execute(ctx context.Context, s *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) { + // 1. Fetch unresolved thread IDs + threadIDs, err := h.fetchUnresolvedThreads(ctx, s.RepoOwner, s.RepoName, s.PRNumber) + if err != nil { + return nil, fmt.Errorf("fetch threads: %w", err) + } + + // 2. Resolve each thread + resolved := 0 + for _, id := range threadIDs { + if err := h.resolveThread(ctx, id); err != nil { + // Log but continue — some threads may not be resolvable + continue + } + resolved++ + } + + result := &jobrunner.ActionResult{ + Action: "resolve_threads", + RepoOwner: s.RepoOwner, + RepoName: s.RepoName, + PRNumber: s.PRNumber, + Success: resolved > 0, + Timestamp: time.Now().UTC(), + } + + if resolved == 0 && len(threadIDs) > 0 { + result.Error = fmt.Sprintf("0/%d threads resolved", len(threadIDs)) + } + + return result, nil +} + +func (h *ResolveThreads) fetchUnresolvedThreads(ctx context.Context, owner, repo string, pr int) ([]string, error) { + query := fmt.Sprintf(`{ + repository(owner: %q, name: %q) { + pullRequest(number: %d) { + reviewThreads(first: 100) { + nodes { id isResolved } + } + } + } + }`, owner, repo, pr) + + resp, err := h.graphql(ctx, query) + if err != nil { + return nil, err + } + + type thread struct { + ID string `json:"id"` + IsResolved bool `json:"isResolved"` + } + var result struct { + Data struct { + Repository struct { + PullRequest struct { + ReviewThreads struct { + Nodes []thread `json:"nodes"` + } `json:"reviewThreads"` + } `json:"pullRequest"` + } `json:"repository"` + } `json:"data"` + } + + if err := json.Unmarshal(resp, &result); err != nil { + return nil, err + } + + var ids []string + for _, t := range result.Data.Repository.PullRequest.ReviewThreads.Nodes { + if !t.IsResolved { + ids = append(ids, t.ID) + } + } + return ids, nil +} + +func (h *ResolveThreads) resolveThread(ctx context.Context, threadID string) error { + mutation := fmt.Sprintf(`mutation { + resolveReviewThread(input: {threadId: %q}) { + thread { isResolved } + } + }`, threadID) + + _, err := h.graphql(ctx, mutation) + return err +} + +func (h *ResolveThreads) graphql(ctx context.Context, query string) (json.RawMessage, error) { + payload, _ := json.Marshal(map[string]string{"query": query}) + + req, err := http.NewRequestWithContext(ctx, "POST", h.graphqlURL, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := h.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("GraphQL HTTP %d", resp.StatusCode) + } + + var raw json.RawMessage + err = json.NewDecoder(resp.Body).Decode(&raw) + return raw, err +} +``` + +**Step 4: Run tests** + +Run: `go test ./pkg/jobrunner/handlers/ -v -count=1` +Expected: PASS. + +**Step 5: Commit** + +```bash +git add pkg/jobrunner/handlers/resolve_threads.go pkg/jobrunner/handlers/resolve_threads_test.go +git commit -m "feat(jobrunner): add resolve_threads handler with GraphQL" +``` + +--- + +### Task 9: Headless Mode in core-ide + +**Files:** +- Modify: `internal/core-ide/main.go` + +**Context:** core-ide currently always creates a Wails app. We need to branch: headless starts the poller + MCP bridge directly; desktop mode keeps the existing Wails app with poller as an optional service. + +Note: core-ide has its own `go.mod` (`github.com/host-uk/core/internal/core-ide`). The jobrunner package lives in the root module. We need to add the root module as a dependency of core-ide, OR move the handler wiring into the root module. **Simplest approach:** core-ide imports `github.com/host-uk/core/pkg/jobrunner` — this requires adding the root module as a dependency in core-ide's go.mod. + +**Step 1: Update core-ide go.mod** + +Run: `cd /Users/snider/Code/host-uk/core/internal/core-ide && go get github.com/host-uk/core/pkg/jobrunner` + +If this fails because the package isn't published yet, use a `replace` directive temporarily: + +``` +replace github.com/host-uk/core => ../.. +``` + +Then `go mod tidy`. + +**Step 2: Modify main.go** + +Add `--headless` flag parsing, `hasDisplay()` detection, and the headless startup path. + +The headless path: +1. Create `cli.Daemon` with PID file + health server +2. Create `Journal` at `~/.core/journal/` +3. Create `GitHubSource` with repos from config/env +4. Create all handlers +5. Create `Poller` with sources + handlers + journal +6. Start daemon, run poller in goroutine, block on `daemon.Run(ctx)` + +The desktop path: +- Existing Wails app code, unchanged for now +- Poller can be added as a Wails service later + +```go +// At top of main(): +headless := false +for _, arg := range os.Args[1:] { + if arg == "--headless" { + headless = true + } +} + +if headless || !hasDisplay() { + startHeadless() + return +} +// ... existing Wails app code ... +``` + +**Step 3: Run core-ide with --headless --dry-run to verify** + +Run: `cd /Users/snider/Code/host-uk/core/internal/core-ide && go run . --headless --dry-run` +Expected: Starts, logs poll cycle, exits cleanly on Ctrl+C. + +**Step 4: Commit** + +```bash +git add internal/core-ide/main.go internal/core-ide/go.mod internal/core-ide/go.sum +git commit -m "feat(core-ide): add headless mode with job runner poller" +``` + +--- + +### Task 10: Register Handlers as MCP Tools + +**Files:** +- Modify: `internal/core-ide/mcp_bridge.go` + +**Context:** Register each JobHandler as an MCP tool so they're callable via the HTTP API (POST /mcp/call). This lets external tools invoke handlers manually. + +**Step 1: Add handler registration to MCPBridge** + +Add a `handlers` field and register them in `ServiceStartup`. Add a `job_*` prefix to distinguish from webview tools. + +```go +// In handleMCPTools — append job handler tools to the tool list +// In handleMCPCall — add a job_* dispatch path +``` + +**Step 2: Test via curl** + +Run: `curl -X POST http://localhost:9877/mcp/call -d '{"tool":"job_publish_draft","params":{"pr":316,"owner":"host-uk","repo":"core"}}'` +Expected: Returns handler result JSON. + +**Step 3: Commit** + +```bash +git add internal/core-ide/mcp_bridge.go +git commit -m "feat(core-ide): register job handlers as MCP tools" +``` + +--- + +### Task 11: Updater Integration in core-ide + +**Files:** +- Modify: `internal/core-ide/main.go` (headless startup path) + +**Context:** Wire the existing `internal/cmd/updater` package into core-ide's headless startup. Check for updates on startup, auto-apply in headless mode. + +**Step 1: Add updater to headless startup** + +```go +// In startHeadless(), before starting poller: +updaterSvc, err := updater.NewUpdateService(updater.UpdateServiceConfig{ + RepoURL: "https://github.com/host-uk/core", + Channel: "alpha", + CheckOnStartup: updater.CheckAndUpdateOnStartup, +}) +if err == nil { + _ = updaterSvc.Start() // will auto-update and restart if newer version exists +} +``` + +**Step 2: Test by running headless** + +Run: `core-ide --headless` — should check for updates on startup, then start polling. + +**Step 3: Commit** + +```bash +git add internal/core-ide/main.go +git commit -m "feat(core-ide): integrate updater for headless auto-update" +``` + +--- + +### Task 12: Systemd Service File + +**Files:** +- Create: `internal/core-ide/build/linux/core-ide.service` + +**Step 1: Write the systemd unit** + +```ini +[Unit] +Description=Core IDE Job Runner +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/core-ide --headless +Restart=always +RestartSec=10 +Environment=CORE_DAEMON=1 +Environment=GITHUB_TOKEN= + +[Install] +WantedBy=multi-user.target +``` + +**Step 2: Add to nfpm.yaml** so it's included in the Linux package: + +In `internal/core-ide/build/linux/nfpm/nfpm.yaml`, add to `contents`: +```yaml +- src: ../core-ide.service + dst: /etc/systemd/system/core-ide.service + type: config +``` + +**Step 3: Commit** + +```bash +git add internal/core-ide/build/linux/core-ide.service internal/core-ide/build/linux/nfpm/nfpm.yaml +git commit -m "feat(core-ide): add systemd service for headless mode" +``` + +--- + +### Task 13: Run Full Test Suite + +**Step 1: Run all jobrunner tests** + +Run: `go test ./pkg/jobrunner/... -v -count=1` +Expected: All tests pass. + +**Step 2: Run core-ide build** + +Run: `cd /Users/snider/Code/host-uk/core/internal/core-ide && go build -o /dev/null .` +Expected: Builds without errors. + +**Step 3: Run dry-run integration test** + +Run: `cd /Users/snider/Code/host-uk/core/internal/core-ide && go run . --headless --dry-run` +Expected: Polls GitHub, logs signals, takes no actions, exits on Ctrl+C. + +--- + +## Batch Execution Plan + +| Batch | Tasks | Description | +|-------|-------|-------------| +| 0 | 0 | Go workspace setup | +| 1 | 1-2 | Core types + Journal | +| 2 | 3-4 | Poller + GitHub Source | +| 3 | 5-8 | All handlers | +| 4 | 9-11 | core-ide integration (headless, MCP, updater) | +| 5 | 12-13 | Systemd + verification | diff --git a/go.mod b/go.mod index 775cc0a..ea9b957 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,6 @@ require ( github.com/42wim/httpsig v1.2.3 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.3.0 // indirect - github.com/Snider/Enchantrix v0.0.2 // indirect github.com/TwiN/go-color v1.4.1 // indirect github.com/adrg/xdg v0.5.3 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect @@ -62,8 +61,6 @@ require ( github.com/godbus/dbus/v5 v5.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect - github.com/google/go-github/v39 v39.2.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect github.com/google/jsonschema-go v0.4.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect @@ -80,7 +77,6 @@ require ( github.com/mailru/easyjson v0.9.1 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect @@ -92,7 +88,6 @@ require ( github.com/rivo/uniseg v0.4.7 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/samber/lo v1.52.0 // indirect - github.com/schollz/progressbar/v3 v3.18.0 // indirect github.com/sergi/go-diff v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/skeema/knownhosts v1.3.2 // indirect diff --git a/go.sum b/go.sum index 587df43..58a940c 100644 --- a/go.sum +++ b/go.sum @@ -14,12 +14,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= -github.com/Snider/Borg v0.1.0 h1:tLvrytPMIM2To0xByYP+KHLcT9pg9P9y9uRTyG6r9oc= -github.com/Snider/Borg v0.1.0/go.mod h1:0GMzdXYzdFZpR25IFne7ErqV/YFQHsX1THm1BbncMPo= github.com/Snider/Borg v0.2.0 h1:iCyDhY4WTXi39+FexRwXbn2YpZ2U9FUXVXDZk9xRCXQ= github.com/Snider/Borg v0.2.0/go.mod h1:TqlKnfRo9okioHbgrZPfWjQsztBV0Nfskz4Om1/vdMY= -github.com/Snider/Enchantrix v0.0.2 h1:ExZQiBhfS/p/AHFTKhY80TOd+BXZjK95EzByAEgwvjs= -github.com/Snider/Enchantrix v0.0.2/go.mod h1:CtFcLAvnDT1KcuF1JBb/DJj0KplY8jHryO06KzQ1hsQ= github.com/TwiN/go-color v1.4.1 h1:mqG0P/KBgHKVqmtL5ye7K0/Gr4l6hTksPgTgMk3mUzc= github.com/TwiN/go-color v1.4.1/go.mod h1:WcPf/jtiW95WBIsEeY1Lc/b8aaWoiqQpu5cf8WFxu+s= github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= @@ -101,18 +97,10 @@ github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeD github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-github/v39 v39.2.0 h1:rNNM311XtPOz5rDdsJXAp2o8F67X9FnROXTvto3aSnQ= -github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -168,8 +156,6 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU= github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/modelcontextprotocol/go-sdk v1.2.0 h1:Y23co09300CEk8iZ/tMxIX1dVmKZkzoSBZOpJwUnc/s= github.com/modelcontextprotocol/go-sdk v1.2.0/go.mod h1:6fM3LCm3yV7pAs8isnKLn07oKtB0MP9LHd3DfAcKw10= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= @@ -209,8 +195,6 @@ github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDc github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw= github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= -github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= -github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -290,7 +274,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= @@ -300,12 +283,10 @@ golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHi golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= @@ -329,7 +310,6 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= @@ -337,10 +317,8 @@ golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba h1:UKgtfRM7Yh93Sya0Fo8ZzhDP4qBckrrxEr2oF5UIVb8= google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= diff --git a/go.work b/go.work new file mode 100644 index 0000000..feefe4f --- /dev/null +++ b/go.work @@ -0,0 +1,7 @@ +go 1.25.5 + +use ( + . + ../core-gui + ./internal/core-ide +) diff --git a/internal/core-ide/build/linux/core-ide.service b/internal/core-ide/build/linux/core-ide.service new file mode 100644 index 0000000..5b7e3a4 --- /dev/null +++ b/internal/core-ide/build/linux/core-ide.service @@ -0,0 +1,15 @@ +[Unit] +Description=Core IDE Job Runner +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/core-ide --headless +Restart=always +RestartSec=10 +Environment=CORE_DAEMON=1 +Environment=GITHUB_TOKEN= + +[Install] +WantedBy=multi-user.target diff --git a/internal/core-ide/build/linux/nfpm/nfpm.yaml b/internal/core-ide/build/linux/nfpm/nfpm.yaml index c993b68..ab0d3a7 100644 --- a/internal/core-ide/build/linux/nfpm/nfpm.yaml +++ b/internal/core-ide/build/linux/nfpm/nfpm.yaml @@ -23,6 +23,9 @@ contents: dst: "/usr/share/icons/hicolor/128x128/apps/core-ide.png" - src: "./build/linux/core-ide.desktop" dst: "/usr/share/applications/core-ide.desktop" + - src: "./build/linux/core-ide.service" + dst: "/etc/systemd/system/core-ide.service" + type: config # Default dependencies for Debian 12/Ubuntu 22.04+ with WebKit 4.1 depends: diff --git a/internal/core-ide/go.mod b/internal/core-ide/go.mod index db4550b..7c92ead 100644 --- a/internal/core-ide/go.mod +++ b/internal/core-ide/go.mod @@ -6,7 +6,12 @@ require github.com/wailsapp/wails/v3 v3.0.0-alpha.64 require ( github.com/coder/websocket v1.8.14 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/spf13/cobra v1.10.2 // indirect + github.com/spf13/pflag v1.0.10 // indirect + golang.org/x/oauth2 v0.34.0 // indirect + golang.org/x/term v0.39.0 // indirect ) require ( @@ -27,7 +32,8 @@ require ( github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 - github.com/host-uk/core-gui v0.0.0-20260131214111-6e2460834a87 + github.com/host-uk/core v0.0.0 + github.com/host-uk/core-gui v0.0.0 github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jchv/go-winloader v0.0.0-20250406163304-c1995be93bd1 // indirect github.com/kevinburke/ssh_config v1.4.0 // indirect @@ -50,3 +56,7 @@ require ( golang.org/x/text v0.33.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect ) + +replace github.com/host-uk/core => ../.. + +replace github.com/host-uk/core-gui => ../../../core-gui diff --git a/internal/core-ide/go.sum b/internal/core-ide/go.sum index a907d9b..920d80d 100644 --- a/internal/core-ide/go.sum +++ b/internal/core-ide/go.sum @@ -17,11 +17,13 @@ github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g= github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A= github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= @@ -52,7 +54,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/host-uk/core-gui v0.0.0-20260131214111-6e2460834a87/go.mod h1:yOBnW4of0/82O6GSxFl2Pxepq9yTlJg2pLVwaU9cWHo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jchv/go-winloader v0.0.0-20250406163304-c1995be93bd1 h1:njuLRcjAuMKr7kI3D85AXWkw6/+v9PwtV6M6o11sWHQ= @@ -89,13 +92,15 @@ github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmd github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw= github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= @@ -103,6 +108,11 @@ github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepq github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/skeema/knownhosts v1.3.2 h1:EDL9mgf4NzwMXCTfaxSD/o/a5fxDw/xL9nkU28JjdBg= github.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -114,6 +124,7 @@ github.com/wailsapp/wails/v3 v3.0.0-alpha.64 h1:xAhLFVfdbg7XdZQ5mMQmBv2BglWu8hMq github.com/wailsapp/wails/v3 v3.0.0-alpha.64/go.mod h1:zvgNL/mlFcX8aRGu6KOz9AHrMmTBD+4hJRQIONqF/Yw= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= @@ -122,6 +133,8 @@ golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHi golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/internal/core-ide/headless.go b/internal/core-ide/headless.go new file mode 100644 index 0000000..f0d5ecb --- /dev/null +++ b/internal/core-ide/headless.go @@ -0,0 +1,132 @@ +package main + +import ( + "context" + "log" + "os" + "os/signal" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" + + "github.com/host-uk/core/pkg/cli" + "github.com/host-uk/core/pkg/jobrunner" + "github.com/host-uk/core/pkg/jobrunner/github" + "github.com/host-uk/core/pkg/jobrunner/handlers" +) + +// hasDisplay returns true if a graphical display is available. +func hasDisplay() bool { + if runtime.GOOS == "windows" { + return true + } + return os.Getenv("DISPLAY") != "" || os.Getenv("WAYLAND_DISPLAY") != "" +} + +// startHeadless runs the job runner in daemon mode without GUI. +func startHeadless() { + log.Println("Starting Core IDE in headless mode...") + + // Signal handling + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + // TODO: Updater integration — the internal/cmd/updater package cannot be + // imported from the core-ide module due to Go's internal package restriction + // (separate modules). Move updater to pkg/updater or export a public API to + // enable auto-update in headless mode. + + // Journal + journalDir := filepath.Join(os.Getenv("HOME"), ".core", "journal") + journal, err := jobrunner.NewJournal(journalDir) + if err != nil { + log.Fatalf("Failed to create journal: %v", err) + } + + // GitHub source — repos from CORE_REPOS env var or default + repos := parseRepoList(os.Getenv("CORE_REPOS")) + if len(repos) == 0 { + repos = []string{"host-uk/core", "host-uk/core-php", "host-uk/core-tenant", "host-uk/core-admin"} + } + + ghSource := github.NewGitHubSource(github.Config{ + Repos: repos, + }) + + // Handlers (order matters — first match wins) + publishDraft := handlers.NewPublishDraftHandler(nil, "") + sendFix := handlers.NewSendFixCommandHandler(nil, "") + resolveThreads := handlers.NewResolveThreadsHandler(nil, "") + enableAutoMerge := handlers.NewEnableAutoMergeHandler() + tickParent := handlers.NewTickParentHandler() + + // Build poller + poller := jobrunner.NewPoller(jobrunner.PollerConfig{ + Sources: []jobrunner.JobSource{ghSource}, + Handlers: []jobrunner.JobHandler{ + publishDraft, + sendFix, + resolveThreads, + enableAutoMerge, + tickParent, + }, + Journal: journal, + PollInterval: 60 * time.Second, + DryRun: isDryRun(), + }) + + // Daemon with PID file and health check + daemon := cli.NewDaemon(cli.DaemonOptions{ + PIDFile: filepath.Join(os.Getenv("HOME"), ".core", "core-ide.pid"), + HealthAddr: "127.0.0.1:9878", + }) + + if err := daemon.Start(); err != nil { + log.Fatalf("Failed to start daemon: %v", err) + } + daemon.SetReady(true) + + // Start MCP bridge in headless mode too (port 9877) + go startHeadlessMCP(poller) + + log.Printf("Polling %d repos every %s (dry-run: %v)", len(repos), "60s", poller.DryRun()) + + // Run poller in goroutine, block on context + go func() { + if err := poller.Run(ctx); err != nil && err != context.Canceled { + log.Printf("Poller error: %v", err) + } + }() + + // Block until signal + <-ctx.Done() + log.Println("Shutting down...") + _ = daemon.Stop() +} + +// parseRepoList splits a comma-separated repo list. +func parseRepoList(s string) []string { + if s == "" { + return nil + } + var repos []string + for _, r := range strings.Split(s, ",") { + r = strings.TrimSpace(r) + if r != "" { + repos = append(repos, r) + } + } + return repos +} + +// isDryRun checks if --dry-run flag was passed. +func isDryRun() bool { + for _, arg := range os.Args[1:] { + if arg == "--dry-run" { + return true + } + } + return false +} diff --git a/internal/core-ide/headless_mcp.go b/internal/core-ide/headless_mcp.go new file mode 100644 index 0000000..a9752ae --- /dev/null +++ b/internal/core-ide/headless_mcp.go @@ -0,0 +1,90 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net/http" + + "github.com/host-uk/core/pkg/jobrunner" +) + +// startHeadlessMCP starts a minimal MCP HTTP server for headless mode. +// It exposes job handler tools and health endpoints. +func startHeadlessMCP(poller *jobrunner.Poller) { + mux := http.NewServeMux() + + mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]any{ + "status": "ok", + "mode": "headless", + "cycle": poller.Cycle(), + }) + }) + + mux.HandleFunc("/mcp", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]any{ + "name": "core-ide", + "version": "0.1.0", + "mode": "headless", + }) + }) + + mux.HandleFunc("/mcp/tools", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + tools := []map[string]string{ + {"name": "job_status", "description": "Get poller status (cycle count, dry-run)"}, + {"name": "job_set_dry_run", "description": "Enable/disable dry-run mode"}, + {"name": "job_run_once", "description": "Trigger a single poll-dispatch cycle"}, + } + json.NewEncoder(w).Encode(map[string]any{"tools": tools}) + }) + + mux.HandleFunc("/mcp/call", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + if r.Method != "POST" { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + var req struct { + Tool string `json:"tool"` + Params map[string]any `json:"params"` + } + r.Body = http.MaxBytesReader(w, r.Body, 1<<20) + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid request body", http.StatusBadRequest) + return + } + + switch req.Tool { + case "job_status": + json.NewEncoder(w).Encode(map[string]any{ + "cycle": poller.Cycle(), + "dry_run": poller.DryRun(), + }) + case "job_set_dry_run": + if v, ok := req.Params["enabled"].(bool); ok { + poller.SetDryRun(v) + } + json.NewEncoder(w).Encode(map[string]any{"dry_run": poller.DryRun()}) + case "job_run_once": + err := poller.RunOnce(context.Background()) + json.NewEncoder(w).Encode(map[string]any{ + "success": err == nil, + "cycle": poller.Cycle(), + }) + default: + json.NewEncoder(w).Encode(map[string]any{"error": "unknown tool"}) + } + }) + + addr := fmt.Sprintf("127.0.0.1:%d", mcpPort) + log.Printf("Headless MCP server listening on %s", addr) + if err := http.ListenAndServe(addr, mux); err != nil { + log.Printf("Headless MCP server error: %v", err) + } +} diff --git a/internal/core-ide/main.go b/internal/core-ide/main.go index 646bd70..23bc1e3 100644 --- a/internal/core-ide/main.go +++ b/internal/core-ide/main.go @@ -4,6 +4,7 @@ import ( "embed" "io/fs" "log" + "os" "runtime" "github.com/host-uk/core/internal/core-ide/icons" @@ -17,6 +18,19 @@ var assets embed.FS const mcpPort = 9877 func main() { + // Check for headless mode + headless := false + for _, arg := range os.Args[1:] { + if arg == "--headless" { + headless = true + } + } + + if headless || !hasDisplay() { + startHeadless() + return + } + // Strip the embed path prefix so files are served from root staticAssets, err := fs.Sub(assets, "frontend/dist/wails-angular-template/browser") if err != nil { diff --git a/pkg/jobrunner/github/signals.go b/pkg/jobrunner/github/signals.go new file mode 100644 index 0000000..c1fad24 --- /dev/null +++ b/pkg/jobrunner/github/signals.go @@ -0,0 +1,161 @@ +package github + +import ( + "regexp" + "strconv" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +// ghIssue is a minimal GitHub issue response. +type ghIssue struct { + Number int `json:"number"` + Title string `json:"title"` + Body string `json:"body"` + Labels []ghLabel `json:"labels"` + State string `json:"state"` +} + +// ghLabel is a GitHub label. +type ghLabel struct { + Name string `json:"name"` +} + +// ghPR is a minimal GitHub pull request response. +type ghPR struct { + Number int `json:"number"` + Title string `json:"title"` + Body string `json:"body"` + State string `json:"state"` + Draft bool `json:"draft"` + MergeableState string `json:"mergeable_state"` + Head ghRef `json:"head"` +} + +// ghRef is a Git reference (branch head). +type ghRef struct { + SHA string `json:"sha"` + Ref string `json:"ref"` +} + +// ghCheckSuites is the response for the check-suites endpoint. +type ghCheckSuites struct { + TotalCount int `json:"total_count"` + CheckSuites []ghCheckSuite `json:"check_suites"` +} + +// ghCheckSuite is a single check suite. +type ghCheckSuite struct { + ID int `json:"id"` + Status string `json:"status"` // queued, in_progress, completed + Conclusion string `json:"conclusion"` // success, failure, neutral, cancelled, etc. +} + +// epicChildRe matches checklist items in epic bodies: - [ ] #42 or - [x] #42 +var epicChildRe = regexp.MustCompile(`- \[([ x])\] #(\d+)`) + +// parseEpicChildren extracts child issue numbers from an epic body's checklist. +// Returns two slices: unchecked (pending) and checked (done) issue numbers. +func parseEpicChildren(body string) (unchecked []int, checked []int) { + matches := epicChildRe.FindAllStringSubmatch(body, -1) + for _, m := range matches { + num, err := strconv.Atoi(m[2]) + if err != nil { + continue + } + if m[1] == "x" { + checked = append(checked, num) + } else { + unchecked = append(unchecked, num) + } + } + return unchecked, checked +} + +// linkedPRRe matches "#N" references in PR bodies. +var linkedPRRe = regexp.MustCompile(`#(\d+)`) + +// findLinkedPR finds the first PR whose body references the given issue number. +func findLinkedPR(prs []ghPR, issueNumber int) *ghPR { + target := strconv.Itoa(issueNumber) + for i := range prs { + matches := linkedPRRe.FindAllStringSubmatch(prs[i].Body, -1) + for _, m := range matches { + if m[1] == target { + return &prs[i] + } + } + } + return nil +} + +// aggregateCheckStatus returns SUCCESS, FAILURE, or PENDING based on check suites. +func aggregateCheckStatus(suites []ghCheckSuite) string { + if len(suites) == 0 { + return "PENDING" + } + + allComplete := true + for _, s := range suites { + if s.Status != "completed" { + allComplete = false + break + } + } + + if !allComplete { + return "PENDING" + } + + for _, s := range suites { + if s.Conclusion != "success" && s.Conclusion != "neutral" && s.Conclusion != "skipped" { + return "FAILURE" + } + } + + return "SUCCESS" +} + +// mergeableToString maps GitHub's mergeable_state to a canonical string. +func mergeableToString(state string) string { + switch state { + case "clean", "has_hooks", "unstable": + return "MERGEABLE" + case "dirty", "blocked": + return "CONFLICTING" + default: + return "UNKNOWN" + } +} + +// buildSignal creates a PipelineSignal from parsed GitHub API data. +func buildSignal( + owner, repo string, + epicNumber, childNumber int, + pr *ghPR, + checkStatus string, +) *jobrunner.PipelineSignal { + prState := "OPEN" + switch pr.State { + case "closed": + prState = "CLOSED" + case "open": + prState = "OPEN" + } + + return &jobrunner.PipelineSignal{ + EpicNumber: epicNumber, + ChildNumber: childNumber, + PRNumber: pr.Number, + RepoOwner: owner, + RepoName: repo, + PRState: prState, + IsDraft: pr.Draft, + Mergeable: mergeableToString(pr.MergeableState), + CheckStatus: checkStatus, + LastCommitSHA: pr.Head.SHA, + LastCommitAt: time.Time{}, // Not available from list endpoint + LastReviewAt: time.Time{}, // Not available from list endpoint + } +} diff --git a/pkg/jobrunner/github/source.go b/pkg/jobrunner/github/source.go new file mode 100644 index 0000000..7d7fc5d --- /dev/null +++ b/pkg/jobrunner/github/source.go @@ -0,0 +1,196 @@ +package github + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + "sync" + + "golang.org/x/oauth2" + + "github.com/host-uk/core/pkg/jobrunner" + "github.com/host-uk/core/pkg/log" +) + +// Config configures a GitHubSource. +type Config struct { + Repos []string // "owner/repo" format + APIURL string // override for testing (default: https://api.github.com) +} + +// GitHubSource polls GitHub for pipeline signals from epic issues. +type GitHubSource struct { + repos []string + apiURL string + client *http.Client + etags map[string]string + mu sync.Mutex +} + +// NewGitHubSource creates a GitHubSource from the given config. +func NewGitHubSource(cfg Config) *GitHubSource { + apiURL := cfg.APIURL + if apiURL == "" { + apiURL = "https://api.github.com" + } + + // Build an authenticated HTTP client if GITHUB_TOKEN is set. + var client *http.Client + if token := os.Getenv("GITHUB_TOKEN"); token != "" { + ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}) + client = oauth2.NewClient(context.Background(), ts) + } else { + client = http.DefaultClient + } + + return &GitHubSource{ + repos: cfg.Repos, + apiURL: strings.TrimRight(apiURL, "/"), + client: client, + etags: make(map[string]string), + } +} + +// Name returns the source identifier. +func (g *GitHubSource) Name() string { + return "github" +} + +// Poll fetches epics and their linked PRs from all configured repositories, +// returning a PipelineSignal for each unchecked child that has a linked PR. +func (g *GitHubSource) Poll(ctx context.Context) ([]*jobrunner.PipelineSignal, error) { + var signals []*jobrunner.PipelineSignal + + for _, repoFull := range g.repos { + owner, repo, err := splitRepo(repoFull) + if err != nil { + log.Error("invalid repo format", "repo", repoFull, "err", err) + continue + } + + repoSignals, err := g.pollRepo(ctx, owner, repo) + if err != nil { + log.Error("poll repo failed", "repo", repoFull, "err", err) + continue + } + + signals = append(signals, repoSignals...) + } + + return signals, nil +} + +// Report is a no-op for the GitHub source. +func (g *GitHubSource) Report(_ context.Context, _ *jobrunner.ActionResult) error { + return nil +} + +// pollRepo fetches epics and PRs for a single repository. +func (g *GitHubSource) pollRepo(ctx context.Context, owner, repo string) ([]*jobrunner.PipelineSignal, error) { + // Fetch epic issues (label=epic). + epicsURL := fmt.Sprintf("%s/repos/%s/%s/issues?labels=epic&state=open", g.apiURL, owner, repo) + var epics []ghIssue + notModified, err := g.fetchJSON(ctx, epicsURL, &epics) + if err != nil { + return nil, fmt.Errorf("fetch epics: %w", err) + } + if notModified { + log.Debug("epics not modified", "repo", owner+"/"+repo) + return nil, nil + } + + if len(epics) == 0 { + return nil, nil + } + + // Fetch open PRs. + prsURL := fmt.Sprintf("%s/repos/%s/%s/pulls?state=open", g.apiURL, owner, repo) + var prs []ghPR + _, err = g.fetchJSON(ctx, prsURL, &prs) + if err != nil { + return nil, fmt.Errorf("fetch PRs: %w", err) + } + + var signals []*jobrunner.PipelineSignal + + for _, epic := range epics { + unchecked, _ := parseEpicChildren(epic.Body) + for _, childNum := range unchecked { + pr := findLinkedPR(prs, childNum) + if pr == nil { + continue + } + + // Fetch check suites for the PR's head SHA. + checksURL := fmt.Sprintf("%s/repos/%s/%s/commits/%s/check-suites", g.apiURL, owner, repo, pr.Head.SHA) + var checkResp ghCheckSuites + _, err := g.fetchJSON(ctx, checksURL, &checkResp) + if err != nil { + log.Error("fetch check suites failed", "repo", owner+"/"+repo, "sha", pr.Head.SHA, "err", err) + continue + } + + checkStatus := aggregateCheckStatus(checkResp.CheckSuites) + sig := buildSignal(owner, repo, epic.Number, childNum, pr, checkStatus) + signals = append(signals, sig) + } + } + + return signals, nil +} + +// fetchJSON performs a GET request with ETag conditional headers. +// Returns true if the server responded with 304 Not Modified. +func (g *GitHubSource) fetchJSON(ctx context.Context, url string, target any) (bool, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return false, fmt.Errorf("create request: %w", err) + } + + req.Header.Set("Accept", "application/vnd.github+json") + + g.mu.Lock() + if etag, ok := g.etags[url]; ok { + req.Header.Set("If-None-Match", etag) + } + g.mu.Unlock() + + resp, err := g.client.Do(req) + if err != nil { + return false, fmt.Errorf("execute request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode == http.StatusNotModified { + return true, nil + } + + if resp.StatusCode != http.StatusOK { + return false, fmt.Errorf("unexpected status %d for %s", resp.StatusCode, url) + } + + // Store ETag for future conditional requests. + if etag := resp.Header.Get("ETag"); etag != "" { + g.mu.Lock() + g.etags[url] = etag + g.mu.Unlock() + } + + if err := json.NewDecoder(resp.Body).Decode(target); err != nil { + return false, fmt.Errorf("decode response: %w", err) + } + + return false, nil +} + +// splitRepo parses "owner/repo" into its components. +func splitRepo(full string) (string, string, error) { + parts := strings.SplitN(full, "/", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("expected owner/repo format, got %q", full) + } + return parts[0], parts[1], nil +} diff --git a/pkg/jobrunner/github/source_test.go b/pkg/jobrunner/github/source_test.go new file mode 100644 index 0000000..4b244df --- /dev/null +++ b/pkg/jobrunner/github/source_test.go @@ -0,0 +1,270 @@ +package github + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGitHubSource_Name_Good(t *testing.T) { + src := NewGitHubSource(Config{Repos: []string{"owner/repo"}}) + assert.Equal(t, "github", src.Name()) +} + +func TestGitHubSource_Poll_Good(t *testing.T) { + epic := ghIssue{ + Number: 10, + Title: "Epic: feature rollout", + Body: "Tasks:\n- [ ] #5\n- [x] #6\n- [ ] #7", + Labels: []ghLabel{{Name: "epic"}}, + State: "open", + } + + pr5 := ghPR{ + Number: 50, + Title: "Implement child #5", + Body: "Closes #5", + State: "open", + Draft: false, + MergeableState: "clean", + Head: ghRef{SHA: "abc123", Ref: "feature-5"}, + } + + // PR 7 has no linked reference to any child, so child #7 should not produce a signal. + pr99 := ghPR{ + Number: 99, + Title: "Unrelated PR", + Body: "No issue links here", + State: "open", + Draft: false, + MergeableState: "dirty", + Head: ghRef{SHA: "def456", Ref: "feature-other"}, + } + + checkSuites := ghCheckSuites{ + TotalCount: 1, + CheckSuites: []ghCheckSuite{ + {ID: 1, Status: "completed", Conclusion: "success"}, + }, + } + + mux := http.NewServeMux() + + mux.HandleFunc("GET /repos/test-org/test-repo/issues", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "epic", r.URL.Query().Get("labels")) + assert.Equal(t, "open", r.URL.Query().Get("state")) + w.Header().Set("ETag", `"epic-etag-1"`) + _ = json.NewEncoder(w).Encode([]ghIssue{epic}) + }) + + mux.HandleFunc("GET /repos/test-org/test-repo/pulls", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "open", r.URL.Query().Get("state")) + _ = json.NewEncoder(w).Encode([]ghPR{pr5, pr99}) + }) + + mux.HandleFunc("GET /repos/test-org/test-repo/commits/abc123/check-suites", func(w http.ResponseWriter, _ *http.Request) { + _ = json.NewEncoder(w).Encode(checkSuites) + }) + + srv := httptest.NewServer(mux) + defer srv.Close() + + src := NewGitHubSource(Config{ + Repos: []string{"test-org/test-repo"}, + APIURL: srv.URL, + }) + + signals, err := src.Poll(context.Background()) + require.NoError(t, err) + + // Only child #5 has a linked PR (pr5 references #5 in body). + // Child #7 is unchecked but no PR references it. + // Child #6 is checked so it's ignored. + require.Len(t, signals, 1) + + sig := signals[0] + assert.Equal(t, 10, sig.EpicNumber) + assert.Equal(t, 5, sig.ChildNumber) + assert.Equal(t, 50, sig.PRNumber) + assert.Equal(t, "test-org", sig.RepoOwner) + assert.Equal(t, "test-repo", sig.RepoName) + assert.Equal(t, "OPEN", sig.PRState) + assert.Equal(t, false, sig.IsDraft) + assert.Equal(t, "MERGEABLE", sig.Mergeable) + assert.Equal(t, "SUCCESS", sig.CheckStatus) + assert.Equal(t, "abc123", sig.LastCommitSHA) +} + +func TestGitHubSource_Poll_Good_NotModified(t *testing.T) { + callCount := 0 + + mux := http.NewServeMux() + mux.HandleFunc("GET /repos/test-org/test-repo/issues", func(w http.ResponseWriter, r *http.Request) { + callCount++ + if callCount == 1 { + w.Header().Set("ETag", `"etag-v1"`) + _ = json.NewEncoder(w).Encode([]ghIssue{}) + } else { + // Second call should have If-None-Match. + assert.Equal(t, `"etag-v1"`, r.Header.Get("If-None-Match")) + w.WriteHeader(http.StatusNotModified) + } + }) + + srv := httptest.NewServer(mux) + defer srv.Close() + + src := NewGitHubSource(Config{ + Repos: []string{"test-org/test-repo"}, + APIURL: srv.URL, + }) + + // First poll — gets empty list, stores ETag. + signals, err := src.Poll(context.Background()) + require.NoError(t, err) + assert.Empty(t, signals) + + // Second poll — sends If-None-Match, gets 304. + signals, err = src.Poll(context.Background()) + require.NoError(t, err) + assert.Empty(t, signals) + + assert.Equal(t, 2, callCount) +} + +func TestParseEpicChildren_Good(t *testing.T) { + body := `## Epic + +Tasks to complete: +- [ ] #1 +- [x] #2 +- [ ] #3 +- [x] #4 +- [ ] #5 +` + + unchecked, checked := parseEpicChildren(body) + + assert.Equal(t, []int{1, 3, 5}, unchecked) + assert.Equal(t, []int{2, 4}, checked) +} + +func TestParseEpicChildren_Good_Empty(t *testing.T) { + unchecked, checked := parseEpicChildren("No checklist here") + assert.Nil(t, unchecked) + assert.Nil(t, checked) +} + +func TestFindLinkedPR_Good(t *testing.T) { + prs := []ghPR{ + {Number: 10, Body: "Unrelated work"}, + {Number: 20, Body: "Fixes #42 and updates docs"}, + {Number: 30, Body: "Closes #99"}, + } + + pr := findLinkedPR(prs, 42) + require.NotNil(t, pr) + assert.Equal(t, 20, pr.Number) +} + +func TestFindLinkedPR_Good_NoMatch(t *testing.T) { + prs := []ghPR{ + {Number: 10, Body: "Unrelated work"}, + {Number: 20, Body: "Closes #99"}, + } + + pr := findLinkedPR(prs, 42) + assert.Nil(t, pr) +} + +func TestAggregateCheckStatus_Good(t *testing.T) { + tests := []struct { + name string + suites []ghCheckSuite + want string + }{ + { + name: "all success", + suites: []ghCheckSuite{{Status: "completed", Conclusion: "success"}}, + want: "SUCCESS", + }, + { + name: "one failure", + suites: []ghCheckSuite{{Status: "completed", Conclusion: "failure"}}, + want: "FAILURE", + }, + { + name: "in progress", + suites: []ghCheckSuite{{Status: "in_progress", Conclusion: ""}}, + want: "PENDING", + }, + { + name: "empty", + suites: nil, + want: "PENDING", + }, + { + name: "mixed completed", + suites: []ghCheckSuite{ + {Status: "completed", Conclusion: "success"}, + {Status: "completed", Conclusion: "failure"}, + }, + want: "FAILURE", + }, + { + name: "neutral is success", + suites: []ghCheckSuite{ + {Status: "completed", Conclusion: "neutral"}, + {Status: "completed", Conclusion: "success"}, + }, + want: "SUCCESS", + }, + { + name: "skipped is success", + suites: []ghCheckSuite{ + {Status: "completed", Conclusion: "skipped"}, + }, + want: "SUCCESS", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := aggregateCheckStatus(tc.suites) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestMergeableToString_Good(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"clean", "MERGEABLE"}, + {"has_hooks", "MERGEABLE"}, + {"unstable", "MERGEABLE"}, + {"dirty", "CONFLICTING"}, + {"blocked", "CONFLICTING"}, + {"unknown", "UNKNOWN"}, + {"", "UNKNOWN"}, + } + + for _, tc := range tests { + t.Run(tc.input, func(t *testing.T) { + got := mergeableToString(tc.input) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestGitHubSource_Report_Good(t *testing.T) { + src := NewGitHubSource(Config{Repos: []string{"owner/repo"}}) + err := src.Report(context.Background(), nil) + assert.NoError(t, err) +} diff --git a/pkg/jobrunner/handlers/enable_auto_merge.go b/pkg/jobrunner/handlers/enable_auto_merge.go new file mode 100644 index 0000000..ca8433d --- /dev/null +++ b/pkg/jobrunner/handlers/enable_auto_merge.go @@ -0,0 +1,59 @@ +package handlers + +import ( + "context" + "fmt" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +// EnableAutoMergeHandler enables squash auto-merge on a PR that is ready. +type EnableAutoMergeHandler struct{} + +// NewEnableAutoMergeHandler creates a handler that enables auto-merge. +func NewEnableAutoMergeHandler() *EnableAutoMergeHandler { + return &EnableAutoMergeHandler{} +} + +// Name returns the handler identifier. +func (h *EnableAutoMergeHandler) Name() string { + return "enable_auto_merge" +} + +// Match returns true when the PR is open, not a draft, mergeable, checks +// are passing, and there are no unresolved review threads. +func (h *EnableAutoMergeHandler) Match(signal *jobrunner.PipelineSignal) bool { + return signal.PRState == "OPEN" && + !signal.IsDraft && + signal.Mergeable == "MERGEABLE" && + signal.CheckStatus == "SUCCESS" && + !signal.HasUnresolvedThreads() +} + +// Execute shells out to gh to enable auto-merge with squash strategy. +func (h *EnableAutoMergeHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) { + start := time.Now() + + repoFlag := fmt.Sprintf("%s/%s", signal.RepoOwner, signal.RepoName) + prNumber := fmt.Sprintf("%d", signal.PRNumber) + + cmd := execCommand(ctx, "gh", "pr", "merge", "--auto", "--squash", prNumber, "-R", repoFlag) + output, err := cmd.CombinedOutput() + + result := &jobrunner.ActionResult{ + Action: "enable_auto_merge", + RepoOwner: signal.RepoOwner, + RepoName: signal.RepoName, + PRNumber: signal.PRNumber, + Success: err == nil, + Timestamp: time.Now(), + Duration: time.Since(start), + } + + if err != nil { + result.Error = fmt.Sprintf("gh pr merge failed: %v: %s", err, string(output)) + } + + return result, nil +} diff --git a/pkg/jobrunner/handlers/enable_auto_merge_test.go b/pkg/jobrunner/handlers/enable_auto_merge_test.go new file mode 100644 index 0000000..30a4031 --- /dev/null +++ b/pkg/jobrunner/handlers/enable_auto_merge_test.go @@ -0,0 +1,84 @@ +package handlers + +import ( + "context" + "os/exec" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/host-uk/core/pkg/jobrunner" +) + +func TestEnableAutoMerge_Match_Good(t *testing.T) { + h := NewEnableAutoMergeHandler() + sig := &jobrunner.PipelineSignal{ + PRState: "OPEN", + IsDraft: false, + Mergeable: "MERGEABLE", + CheckStatus: "SUCCESS", + ThreadsTotal: 0, + ThreadsResolved: 0, + } + assert.True(t, h.Match(sig)) +} + +func TestEnableAutoMerge_Match_Bad_Draft(t *testing.T) { + h := NewEnableAutoMergeHandler() + sig := &jobrunner.PipelineSignal{ + PRState: "OPEN", + IsDraft: true, + Mergeable: "MERGEABLE", + CheckStatus: "SUCCESS", + ThreadsTotal: 0, + ThreadsResolved: 0, + } + assert.False(t, h.Match(sig)) +} + +func TestEnableAutoMerge_Match_Bad_UnresolvedThreads(t *testing.T) { + h := NewEnableAutoMergeHandler() + sig := &jobrunner.PipelineSignal{ + PRState: "OPEN", + IsDraft: false, + Mergeable: "MERGEABLE", + CheckStatus: "SUCCESS", + ThreadsTotal: 5, + ThreadsResolved: 3, + } + assert.False(t, h.Match(sig)) +} + +func TestEnableAutoMerge_Execute_Good(t *testing.T) { + // Save and restore the original execCommand. + original := execCommand + defer func() { execCommand = original }() + + var capturedArgs []string + execCommand = func(ctx context.Context, name string, args ...string) *exec.Cmd { + capturedArgs = append([]string{name}, args...) + return exec.CommandContext(ctx, "echo", append([]string{name}, args...)...) + } + + h := NewEnableAutoMergeHandler() + sig := &jobrunner.PipelineSignal{ + RepoOwner: "host-uk", + RepoName: "core-php", + PRNumber: 55, + } + + result, err := h.Execute(context.Background(), sig) + require.NoError(t, err) + + assert.True(t, result.Success) + assert.Equal(t, "enable_auto_merge", result.Action) + + joined := strings.Join(capturedArgs, " ") + assert.Contains(t, joined, "--auto") + assert.Contains(t, joined, "--squash") + assert.Contains(t, joined, "55") + assert.Contains(t, joined, "-R") + assert.Contains(t, joined, "host-uk/core-php") +} diff --git a/pkg/jobrunner/handlers/exec.go b/pkg/jobrunner/handlers/exec.go new file mode 100644 index 0000000..fb4c97e --- /dev/null +++ b/pkg/jobrunner/handlers/exec.go @@ -0,0 +1,8 @@ +package handlers + +import "os/exec" + +// execCommand is a package-level variable for creating exec.Cmd instances. +// It defaults to exec.CommandContext and can be replaced in tests for +// mocking shell commands. +var execCommand = exec.CommandContext diff --git a/pkg/jobrunner/handlers/publish_draft.go b/pkg/jobrunner/handlers/publish_draft.go new file mode 100644 index 0000000..9656ceb --- /dev/null +++ b/pkg/jobrunner/handlers/publish_draft.go @@ -0,0 +1,81 @@ +package handlers + +import ( + "bytes" + "context" + "fmt" + "net/http" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +const defaultAPIURL = "https://api.github.com" + +// PublishDraftHandler marks a draft PR as ready for review once its checks pass. +type PublishDraftHandler struct { + client *http.Client + apiURL string +} + +// NewPublishDraftHandler creates a handler that publishes draft PRs. +// If client is nil, http.DefaultClient is used. +// If apiURL is empty, the default GitHub API URL is used. +func NewPublishDraftHandler(client *http.Client, apiURL string) *PublishDraftHandler { + if client == nil { + client = http.DefaultClient + } + if apiURL == "" { + apiURL = defaultAPIURL + } + return &PublishDraftHandler{client: client, apiURL: apiURL} +} + +// Name returns the handler identifier. +func (h *PublishDraftHandler) Name() string { + return "publish_draft" +} + +// Match returns true when the PR is a draft, open, and all checks have passed. +func (h *PublishDraftHandler) Match(signal *jobrunner.PipelineSignal) bool { + return signal.IsDraft && + signal.PRState == "OPEN" && + signal.CheckStatus == "SUCCESS" +} + +// Execute patches the PR to mark it as no longer a draft. +func (h *PublishDraftHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) { + start := time.Now() + url := fmt.Sprintf("%s/repos/%s/%s/pulls/%d", h.apiURL, signal.RepoOwner, signal.RepoName, signal.PRNumber) + + body := bytes.NewBufferString(`{"draft":false}`) + req, err := http.NewRequestWithContext(ctx, http.MethodPatch, url, body) + if err != nil { + return nil, fmt.Errorf("publish_draft: create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/vnd.github+json") + + resp, err := h.client.Do(req) + if err != nil { + return nil, fmt.Errorf("publish_draft: execute request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + success := resp.StatusCode >= 200 && resp.StatusCode < 300 + result := &jobrunner.ActionResult{ + Action: "publish_draft", + RepoOwner: signal.RepoOwner, + RepoName: signal.RepoName, + PRNumber: signal.PRNumber, + Success: success, + Timestamp: time.Now(), + Duration: time.Since(start), + } + + if !success { + result.Error = fmt.Sprintf("unexpected status %d", resp.StatusCode) + } + + return result, nil +} diff --git a/pkg/jobrunner/handlers/publish_draft_test.go b/pkg/jobrunner/handlers/publish_draft_test.go new file mode 100644 index 0000000..965a2db --- /dev/null +++ b/pkg/jobrunner/handlers/publish_draft_test.go @@ -0,0 +1,82 @@ +package handlers + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/host-uk/core/pkg/jobrunner" +) + +func TestPublishDraft_Match_Good(t *testing.T) { + h := NewPublishDraftHandler(nil, "") + sig := &jobrunner.PipelineSignal{ + IsDraft: true, + PRState: "OPEN", + CheckStatus: "SUCCESS", + } + assert.True(t, h.Match(sig)) +} + +func TestPublishDraft_Match_Bad_NotDraft(t *testing.T) { + h := NewPublishDraftHandler(nil, "") + sig := &jobrunner.PipelineSignal{ + IsDraft: false, + PRState: "OPEN", + CheckStatus: "SUCCESS", + } + assert.False(t, h.Match(sig)) +} + +func TestPublishDraft_Match_Bad_ChecksFailing(t *testing.T) { + h := NewPublishDraftHandler(nil, "") + sig := &jobrunner.PipelineSignal{ + IsDraft: true, + PRState: "OPEN", + CheckStatus: "FAILURE", + } + assert.False(t, h.Match(sig)) +} + +func TestPublishDraft_Execute_Good(t *testing.T) { + var capturedMethod string + var capturedPath string + var capturedBody string + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + capturedMethod = r.Method + capturedPath = r.URL.Path + b, _ := io.ReadAll(r.Body) + capturedBody = string(b) + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"draft":false}`)) + })) + defer srv.Close() + + h := NewPublishDraftHandler(srv.Client(), srv.URL) + sig := &jobrunner.PipelineSignal{ + RepoOwner: "host-uk", + RepoName: "core-php", + PRNumber: 42, + IsDraft: true, + PRState: "OPEN", + } + + result, err := h.Execute(context.Background(), sig) + require.NoError(t, err) + + assert.Equal(t, http.MethodPatch, capturedMethod) + assert.Equal(t, "/repos/host-uk/core-php/pulls/42", capturedPath) + assert.Contains(t, capturedBody, `"draft":false`) + + assert.True(t, result.Success) + assert.Equal(t, "publish_draft", result.Action) + assert.Equal(t, "host-uk", result.RepoOwner) + assert.Equal(t, "core-php", result.RepoName) + assert.Equal(t, 42, result.PRNumber) +} diff --git a/pkg/jobrunner/handlers/resolve_threads.go b/pkg/jobrunner/handlers/resolve_threads.go new file mode 100644 index 0000000..82bea0b --- /dev/null +++ b/pkg/jobrunner/handlers/resolve_threads.go @@ -0,0 +1,216 @@ +package handlers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +const defaultGraphQLURL = "https://api.github.com/graphql" + +// ResolveThreadsHandler resolves all unresolved review threads on a PR +// via the GitHub GraphQL API. +type ResolveThreadsHandler struct { + client *http.Client + graphqlURL string +} + +// NewResolveThreadsHandler creates a handler that resolves review threads. +// If client is nil, http.DefaultClient is used. +// If graphqlURL is empty, the default GitHub GraphQL URL is used. +func NewResolveThreadsHandler(client *http.Client, graphqlURL string) *ResolveThreadsHandler { + if client == nil { + client = http.DefaultClient + } + if graphqlURL == "" { + graphqlURL = defaultGraphQLURL + } + return &ResolveThreadsHandler{client: client, graphqlURL: graphqlURL} +} + +// Name returns the handler identifier. +func (h *ResolveThreadsHandler) Name() string { + return "resolve_threads" +} + +// Match returns true when the PR is open and has unresolved review threads. +func (h *ResolveThreadsHandler) Match(signal *jobrunner.PipelineSignal) bool { + return signal.PRState == "OPEN" && signal.HasUnresolvedThreads() +} + +// graphqlRequest is a generic GraphQL request body. +type graphqlRequest struct { + Query string `json:"query"` + Variables map[string]any `json:"variables,omitempty"` +} + +// threadsResponse models the GraphQL response for fetching review threads. +type threadsResponse struct { + Data struct { + Repository struct { + PullRequest struct { + ReviewThreads struct { + Nodes []struct { + ID string `json:"id"` + IsResolved bool `json:"isResolved"` + } `json:"nodes"` + } `json:"reviewThreads"` + } `json:"pullRequest"` + } `json:"repository"` + } `json:"data"` +} + +// resolveResponse models the GraphQL mutation response for resolving a thread. +type resolveResponse struct { + Data struct { + ResolveReviewThread struct { + Thread struct { + ID string `json:"id"` + } `json:"thread"` + } `json:"resolveReviewThread"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` +} + +// Execute fetches unresolved review threads and resolves each one. +func (h *ResolveThreadsHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) { + start := time.Now() + + threadIDs, err := h.fetchUnresolvedThreads(ctx, signal) + if err != nil { + return nil, fmt.Errorf("resolve_threads: fetch threads: %w", err) + } + + var resolveErrors []string + for _, threadID := range threadIDs { + if err := h.resolveThread(ctx, threadID); err != nil { + resolveErrors = append(resolveErrors, err.Error()) + } + } + + result := &jobrunner.ActionResult{ + Action: "resolve_threads", + RepoOwner: signal.RepoOwner, + RepoName: signal.RepoName, + PRNumber: signal.PRNumber, + Success: len(resolveErrors) == 0, + Timestamp: time.Now(), + Duration: time.Since(start), + } + + if len(resolveErrors) > 0 { + result.Error = fmt.Sprintf("failed to resolve %d thread(s): %s", + len(resolveErrors), resolveErrors[0]) + } + + return result, nil +} + +// fetchUnresolvedThreads queries the GraphQL API for unresolved review threads. +func (h *ResolveThreadsHandler) fetchUnresolvedThreads(ctx context.Context, signal *jobrunner.PipelineSignal) ([]string, error) { + query := `query($owner: String!, $repo: String!, $number: Int!) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $number) { + reviewThreads(first: 100) { + nodes { + id + isResolved + } + } + } + } + }` + + variables := map[string]any{ + "owner": signal.RepoOwner, + "repo": signal.RepoName, + "number": signal.PRNumber, + } + + gqlReq := graphqlRequest{Query: query, Variables: variables} + respBody, err := h.doGraphQL(ctx, gqlReq) + if err != nil { + return nil, err + } + + var resp threadsResponse + if err := json.Unmarshal(respBody, &resp); err != nil { + return nil, fmt.Errorf("decode threads response: %w", err) + } + + var ids []string + for _, node := range resp.Data.Repository.PullRequest.ReviewThreads.Nodes { + if !node.IsResolved { + ids = append(ids, node.ID) + } + } + + return ids, nil +} + +// resolveThread calls the resolveReviewThread GraphQL mutation. +func (h *ResolveThreadsHandler) resolveThread(ctx context.Context, threadID string) error { + mutation := `mutation($threadId: ID!) { + resolveReviewThread(input: {threadId: $threadId}) { + thread { + id + } + } + }` + + variables := map[string]any{ + "threadId": threadID, + } + + gqlReq := graphqlRequest{Query: mutation, Variables: variables} + respBody, err := h.doGraphQL(ctx, gqlReq) + if err != nil { + return err + } + + var resp resolveResponse + if err := json.Unmarshal(respBody, &resp); err != nil { + return fmt.Errorf("decode resolve response: %w", err) + } + + if len(resp.Errors) > 0 { + return fmt.Errorf("graphql error: %s", resp.Errors[0].Message) + } + + return nil +} + +// doGraphQL sends a GraphQL request and returns the raw response body. +func (h *ResolveThreadsHandler) doGraphQL(ctx context.Context, gqlReq graphqlRequest) ([]byte, error) { + bodyBytes, err := json.Marshal(gqlReq) + if err != nil { + return nil, fmt.Errorf("marshal graphql request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, h.graphqlURL, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("create graphql request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + + resp, err := h.client.Do(req) + if err != nil { + return nil, fmt.Errorf("execute graphql request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("graphql unexpected status %d", resp.StatusCode) + } + + return io.ReadAll(resp.Body) +} diff --git a/pkg/jobrunner/handlers/resolve_threads_test.go b/pkg/jobrunner/handlers/resolve_threads_test.go new file mode 100644 index 0000000..c7ea384 --- /dev/null +++ b/pkg/jobrunner/handlers/resolve_threads_test.go @@ -0,0 +1,92 @@ +package handlers + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/host-uk/core/pkg/jobrunner" +) + +func TestResolveThreads_Match_Good(t *testing.T) { + h := NewResolveThreadsHandler(nil, "") + sig := &jobrunner.PipelineSignal{ + PRState: "OPEN", + ThreadsTotal: 4, + ThreadsResolved: 2, + } + assert.True(t, h.Match(sig)) +} + +func TestResolveThreads_Match_Bad_AllResolved(t *testing.T) { + h := NewResolveThreadsHandler(nil, "") + sig := &jobrunner.PipelineSignal{ + PRState: "OPEN", + ThreadsTotal: 3, + ThreadsResolved: 3, + } + assert.False(t, h.Match(sig)) +} + +func TestResolveThreads_Execute_Good(t *testing.T) { + callCount := 0 + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b, _ := io.ReadAll(r.Body) + var gqlReq graphqlRequest + _ = json.Unmarshal(b, &gqlReq) + + callCount++ + + if callCount == 1 { + // First call: fetch threads query. + resp := threadsResponse{} + resp.Data.Repository.PullRequest.ReviewThreads.Nodes = []struct { + ID string `json:"id"` + IsResolved bool `json:"isResolved"` + }{ + {ID: "thread-1", IsResolved: false}, + {ID: "thread-2", IsResolved: true}, + {ID: "thread-3", IsResolved: false}, + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(resp) + return + } + + // Subsequent calls: resolve mutation. + resp := resolveResponse{} + resp.Data.ResolveReviewThread.Thread.ID = gqlReq.Variables["threadId"].(string) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(resp) + })) + defer srv.Close() + + h := NewResolveThreadsHandler(srv.Client(), srv.URL) + sig := &jobrunner.PipelineSignal{ + RepoOwner: "host-uk", + RepoName: "core-admin", + PRNumber: 33, + PRState: "OPEN", + ThreadsTotal: 3, + ThreadsResolved: 1, + } + + result, err := h.Execute(context.Background(), sig) + require.NoError(t, err) + + assert.True(t, result.Success) + assert.Equal(t, "resolve_threads", result.Action) + assert.Equal(t, "host-uk", result.RepoOwner) + assert.Equal(t, "core-admin", result.RepoName) + assert.Equal(t, 33, result.PRNumber) + + // 1 query + 2 mutations (thread-1 and thread-3 are unresolved). + assert.Equal(t, 3, callCount) +} diff --git a/pkg/jobrunner/handlers/send_fix_command.go b/pkg/jobrunner/handlers/send_fix_command.go new file mode 100644 index 0000000..5c4d7ef --- /dev/null +++ b/pkg/jobrunner/handlers/send_fix_command.go @@ -0,0 +1,97 @@ +package handlers + +import ( + "bytes" + "context" + "fmt" + "net/http" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +// SendFixCommandHandler posts a comment on a PR asking for conflict or +// review fixes. +type SendFixCommandHandler struct { + client *http.Client + apiURL string +} + +// NewSendFixCommandHandler creates a handler that posts fix commands. +// If client is nil, http.DefaultClient is used. +// If apiURL is empty, the default GitHub API URL is used. +func NewSendFixCommandHandler(client *http.Client, apiURL string) *SendFixCommandHandler { + if client == nil { + client = http.DefaultClient + } + if apiURL == "" { + apiURL = defaultAPIURL + } + return &SendFixCommandHandler{client: client, apiURL: apiURL} +} + +// Name returns the handler identifier. +func (h *SendFixCommandHandler) Name() string { + return "send_fix_command" +} + +// Match returns true when the PR is open and either has merge conflicts or +// has unresolved threads with failing checks. +func (h *SendFixCommandHandler) Match(signal *jobrunner.PipelineSignal) bool { + if signal.PRState != "OPEN" { + return false + } + if signal.Mergeable == "CONFLICTING" { + return true + } + if signal.HasUnresolvedThreads() && signal.CheckStatus == "FAILURE" { + return true + } + return false +} + +// Execute posts a comment on the PR issue asking for a fix. +func (h *SendFixCommandHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) { + start := time.Now() + + var message string + if signal.Mergeable == "CONFLICTING" { + message = "Can you fix the merge conflict?" + } else { + message = "Can you fix the code reviews?" + } + + url := fmt.Sprintf("%s/repos/%s/%s/issues/%d/comments", h.apiURL, signal.RepoOwner, signal.RepoName, signal.PRNumber) + bodyStr := fmt.Sprintf(`{"body":%q}`, message) + body := bytes.NewBufferString(bodyStr) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body) + if err != nil { + return nil, fmt.Errorf("send_fix_command: create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/vnd.github+json") + + resp, err := h.client.Do(req) + if err != nil { + return nil, fmt.Errorf("send_fix_command: execute request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + success := resp.StatusCode >= 200 && resp.StatusCode < 300 + result := &jobrunner.ActionResult{ + Action: "send_fix_command", + RepoOwner: signal.RepoOwner, + RepoName: signal.RepoName, + PRNumber: signal.PRNumber, + Success: success, + Timestamp: time.Now(), + Duration: time.Since(start), + } + + if !success { + result.Error = fmt.Sprintf("unexpected status %d", resp.StatusCode) + } + + return result, nil +} diff --git a/pkg/jobrunner/handlers/send_fix_command_test.go b/pkg/jobrunner/handlers/send_fix_command_test.go new file mode 100644 index 0000000..4e9d478 --- /dev/null +++ b/pkg/jobrunner/handlers/send_fix_command_test.go @@ -0,0 +1,85 @@ +package handlers + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/host-uk/core/pkg/jobrunner" +) + +func TestSendFixCommand_Match_Good_Conflicting(t *testing.T) { + h := NewSendFixCommandHandler(nil, "") + sig := &jobrunner.PipelineSignal{ + PRState: "OPEN", + Mergeable: "CONFLICTING", + } + assert.True(t, h.Match(sig)) +} + +func TestSendFixCommand_Match_Good_UnresolvedThreads(t *testing.T) { + h := NewSendFixCommandHandler(nil, "") + sig := &jobrunner.PipelineSignal{ + PRState: "OPEN", + Mergeable: "MERGEABLE", + CheckStatus: "FAILURE", + ThreadsTotal: 3, + ThreadsResolved: 1, + } + assert.True(t, h.Match(sig)) +} + +func TestSendFixCommand_Match_Bad_Clean(t *testing.T) { + h := NewSendFixCommandHandler(nil, "") + sig := &jobrunner.PipelineSignal{ + PRState: "OPEN", + Mergeable: "MERGEABLE", + CheckStatus: "SUCCESS", + ThreadsTotal: 2, + ThreadsResolved: 2, + } + assert.False(t, h.Match(sig)) +} + +func TestSendFixCommand_Execute_Good_Conflict(t *testing.T) { + var capturedMethod string + var capturedPath string + var capturedBody string + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + capturedMethod = r.Method + capturedPath = r.URL.Path + b, _ := io.ReadAll(r.Body) + capturedBody = string(b) + w.WriteHeader(http.StatusCreated) + _, _ = w.Write([]byte(`{"id":1}`)) + })) + defer srv.Close() + + h := NewSendFixCommandHandler(srv.Client(), srv.URL) + sig := &jobrunner.PipelineSignal{ + RepoOwner: "host-uk", + RepoName: "core-tenant", + PRNumber: 17, + PRState: "OPEN", + Mergeable: "CONFLICTING", + } + + result, err := h.Execute(context.Background(), sig) + require.NoError(t, err) + + assert.Equal(t, http.MethodPost, capturedMethod) + assert.Equal(t, "/repos/host-uk/core-tenant/issues/17/comments", capturedPath) + assert.Contains(t, capturedBody, "fix the merge conflict") + + assert.True(t, result.Success) + assert.Equal(t, "send_fix_command", result.Action) + assert.Equal(t, "host-uk", result.RepoOwner) + assert.Equal(t, "core-tenant", result.RepoName) + assert.Equal(t, 17, result.PRNumber) +} diff --git a/pkg/jobrunner/handlers/tick_parent.go b/pkg/jobrunner/handlers/tick_parent.go new file mode 100644 index 0000000..12174a2 --- /dev/null +++ b/pkg/jobrunner/handlers/tick_parent.go @@ -0,0 +1,108 @@ +package handlers + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/host-uk/core/pkg/jobrunner" +) + +// TickParentHandler ticks a child checkbox in the parent epic issue body +// after the child's PR has been merged. +type TickParentHandler struct{} + +// NewTickParentHandler creates a handler that ticks parent epic checkboxes. +func NewTickParentHandler() *TickParentHandler { + return &TickParentHandler{} +} + +// Name returns the handler identifier. +func (h *TickParentHandler) Name() string { + return "tick_parent" +} + +// Match returns true when the child PR has been merged. +func (h *TickParentHandler) Match(signal *jobrunner.PipelineSignal) bool { + return signal.PRState == "MERGED" +} + +// Execute fetches the epic body, replaces the unchecked checkbox for the +// child issue with a checked one, and updates the epic. +func (h *TickParentHandler) Execute(ctx context.Context, signal *jobrunner.PipelineSignal) (*jobrunner.ActionResult, error) { + start := time.Now() + repoFlag := signal.RepoFullName() + + // Fetch the epic issue body. + viewCmd := execCommand(ctx, "gh", "issue", "view", + fmt.Sprintf("%d", signal.EpicNumber), + "-R", repoFlag, + "--json", "body", + "-q", ".body", + ) + bodyBytes, err := viewCmd.Output() + if err != nil { + return nil, fmt.Errorf("tick_parent: fetch epic body: %w", err) + } + + oldBody := string(bodyBytes) + unchecked := fmt.Sprintf("- [ ] #%d", signal.ChildNumber) + checked := fmt.Sprintf("- [x] #%d", signal.ChildNumber) + + if !strings.Contains(oldBody, unchecked) { + // Already ticked or not found -- nothing to do. + return &jobrunner.ActionResult{ + Action: "tick_parent", + RepoOwner: signal.RepoOwner, + RepoName: signal.RepoName, + PRNumber: signal.PRNumber, + Success: true, + Timestamp: time.Now(), + Duration: time.Since(start), + }, nil + } + + newBody := strings.Replace(oldBody, unchecked, checked, 1) + + editCmd := execCommand(ctx, "gh", "issue", "edit", + fmt.Sprintf("%d", signal.EpicNumber), + "-R", repoFlag, + "--body", newBody, + ) + editOutput, err := editCmd.CombinedOutput() + if err != nil { + return &jobrunner.ActionResult{ + Action: "tick_parent", + RepoOwner: signal.RepoOwner, + RepoName: signal.RepoName, + PRNumber: signal.PRNumber, + Error: fmt.Sprintf("gh issue edit failed: %v: %s", err, string(editOutput)), + Timestamp: time.Now(), + Duration: time.Since(start), + }, nil + } + + // Also close the child issue (design steps 8+9 combined). + closeCmd := execCommand(ctx, "gh", "issue", "close", + fmt.Sprintf("%d", signal.ChildNumber), + "-R", repoFlag, + ) + closeOutput, err := closeCmd.CombinedOutput() + + result := &jobrunner.ActionResult{ + Action: "tick_parent", + RepoOwner: signal.RepoOwner, + RepoName: signal.RepoName, + PRNumber: signal.PRNumber, + Success: err == nil, + Timestamp: time.Now(), + Duration: time.Since(start), + } + + if err != nil { + result.Error = fmt.Sprintf("gh issue close failed: %v: %s", err, string(closeOutput)) + } + + return result, nil +} diff --git a/pkg/jobrunner/handlers/tick_parent_test.go b/pkg/jobrunner/handlers/tick_parent_test.go new file mode 100644 index 0000000..89bc91c --- /dev/null +++ b/pkg/jobrunner/handlers/tick_parent_test.go @@ -0,0 +1,90 @@ +package handlers + +import ( + "context" + "fmt" + "os/exec" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/host-uk/core/pkg/jobrunner" +) + +func TestTickParent_Match_Good(t *testing.T) { + h := NewTickParentHandler() + sig := &jobrunner.PipelineSignal{ + PRState: "MERGED", + } + assert.True(t, h.Match(sig)) +} + +func TestTickParent_Match_Bad_Open(t *testing.T) { + h := NewTickParentHandler() + sig := &jobrunner.PipelineSignal{ + PRState: "OPEN", + } + assert.False(t, h.Match(sig)) +} + +func TestTickParent_Execute_Good(t *testing.T) { + // Save and restore the original execCommand. + original := execCommand + defer func() { execCommand = original }() + + epicBody := "## Tasks\n- [x] #1\n- [ ] #7\n- [ ] #8\n" + var callCount int + var editArgs []string + var closeArgs []string + + execCommand = func(ctx context.Context, name string, args ...string) *exec.Cmd { + callCount++ + if callCount == 1 { + // First call: gh issue view — return the epic body. + return exec.CommandContext(ctx, "echo", "-n", epicBody) + } + if callCount == 2 { + // Second call: gh issue edit — capture args and succeed. + editArgs = append([]string{name}, args...) + return exec.CommandContext(ctx, "echo", "ok") + } + // Third call: gh issue close — capture args and succeed. + closeArgs = append([]string{name}, args...) + return exec.CommandContext(ctx, "echo", "ok") + } + + h := NewTickParentHandler() + sig := &jobrunner.PipelineSignal{ + RepoOwner: "host-uk", + RepoName: "core-php", + EpicNumber: 42, + ChildNumber: 7, + PRNumber: 99, + PRState: "MERGED", + } + + result, err := h.Execute(context.Background(), sig) + require.NoError(t, err) + + assert.True(t, result.Success) + assert.Equal(t, "tick_parent", result.Action) + assert.Equal(t, 3, callCount, "expected three exec calls: view + edit + close") + + // Verify the edit args contain the checked checkbox. + editJoined := strings.Join(editArgs, " ") + assert.Contains(t, editJoined, "issue") + assert.Contains(t, editJoined, "edit") + assert.Contains(t, editJoined, "42") + assert.Contains(t, editJoined, fmt.Sprintf("-R %s", sig.RepoFullName())) + assert.Contains(t, editJoined, "- [x] #7") + + // Verify the close args target the child issue. + closeJoined := strings.Join(closeArgs, " ") + assert.Contains(t, closeJoined, "issue") + assert.Contains(t, closeJoined, "close") + assert.Contains(t, closeJoined, "7") + assert.Contains(t, closeJoined, "-R") + assert.Contains(t, closeJoined, "host-uk/core-php") +} diff --git a/pkg/jobrunner/journal.go b/pkg/jobrunner/journal.go new file mode 100644 index 0000000..b5ee9f5 --- /dev/null +++ b/pkg/jobrunner/journal.go @@ -0,0 +1,112 @@ +package jobrunner + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sync" +) + +// JournalEntry is a single line in the JSONL audit log. +type JournalEntry struct { + Timestamp string `json:"ts"` + Epic int `json:"epic"` + Child int `json:"child"` + PR int `json:"pr"` + Repo string `json:"repo"` + Action string `json:"action"` + Signals SignalSnapshot `json:"signals"` + Result ResultSnapshot `json:"result"` + Cycle int `json:"cycle"` +} + +// SignalSnapshot captures the structural state of a PR at the time of action. +type SignalSnapshot struct { + PRState string `json:"pr_state"` + IsDraft bool `json:"is_draft"` + CheckStatus string `json:"check_status"` + Mergeable string `json:"mergeable"` + ThreadsTotal int `json:"threads_total"` + ThreadsResolved int `json:"threads_resolved"` +} + +// ResultSnapshot captures the outcome of an action. +type ResultSnapshot struct { + Success bool `json:"success"` + Error string `json:"error,omitempty"` + DurationMs int64 `json:"duration_ms"` +} + +// Journal writes ActionResult entries to date-partitioned JSONL files. +type Journal struct { + baseDir string + mu sync.Mutex +} + +// NewJournal creates a new Journal rooted at baseDir. +func NewJournal(baseDir string) (*Journal, error) { + if baseDir == "" { + return nil, fmt.Errorf("journal base directory is required") + } + return &Journal{baseDir: baseDir}, nil +} + +// Append writes a journal entry for the given signal and result. +func (j *Journal) Append(signal *PipelineSignal, result *ActionResult) error { + if signal == nil { + return fmt.Errorf("signal is required") + } + if result == nil { + return fmt.Errorf("result is required") + } + + entry := JournalEntry{ + Timestamp: result.Timestamp.UTC().Format("2006-01-02T15:04:05Z"), + Epic: signal.EpicNumber, + Child: signal.ChildNumber, + PR: signal.PRNumber, + Repo: signal.RepoFullName(), + Action: result.Action, + Signals: SignalSnapshot{ + PRState: signal.PRState, + IsDraft: signal.IsDraft, + CheckStatus: signal.CheckStatus, + Mergeable: signal.Mergeable, + ThreadsTotal: signal.ThreadsTotal, + ThreadsResolved: signal.ThreadsResolved, + }, + Result: ResultSnapshot{ + Success: result.Success, + Error: result.Error, + DurationMs: result.Duration.Milliseconds(), + }, + Cycle: result.Cycle, + } + + data, err := json.Marshal(entry) + if err != nil { + return fmt.Errorf("marshal journal entry: %w", err) + } + data = append(data, '\n') + + date := result.Timestamp.UTC().Format("2006-01-02") + dir := filepath.Join(j.baseDir, signal.RepoOwner, signal.RepoName) + + j.mu.Lock() + defer j.mu.Unlock() + + if err := os.MkdirAll(dir, 0o755); err != nil { + return fmt.Errorf("create journal directory: %w", err) + } + + path := filepath.Join(dir, date+".jsonl") + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return fmt.Errorf("open journal file: %w", err) + } + defer func() { _ = f.Close() }() + + _, err = f.Write(data) + return err +} diff --git a/pkg/jobrunner/journal_test.go b/pkg/jobrunner/journal_test.go new file mode 100644 index 0000000..dac14a3 --- /dev/null +++ b/pkg/jobrunner/journal_test.go @@ -0,0 +1,146 @@ +package jobrunner + +import ( + "bufio" + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestJournal_Append_Good(t *testing.T) { + dir := t.TempDir() + + j, err := NewJournal(dir) + require.NoError(t, err) + + ts := time.Date(2026, 2, 5, 14, 30, 0, 0, time.UTC) + + signal := &PipelineSignal{ + EpicNumber: 10, + ChildNumber: 3, + PRNumber: 55, + RepoOwner: "host-uk", + RepoName: "core-tenant", + PRState: "OPEN", + IsDraft: false, + Mergeable: "MERGEABLE", + CheckStatus: "SUCCESS", + ThreadsTotal: 2, + ThreadsResolved: 1, + LastCommitSHA: "abc123", + LastCommitAt: ts, + LastReviewAt: ts, + } + + result := &ActionResult{ + Action: "merge", + RepoOwner: "host-uk", + RepoName: "core-tenant", + EpicNumber: 10, + ChildNumber: 3, + PRNumber: 55, + Success: true, + Timestamp: ts, + Duration: 1200 * time.Millisecond, + Cycle: 1, + } + + err = j.Append(signal, result) + require.NoError(t, err) + + // Read the file back. + expectedPath := filepath.Join(dir, "host-uk", "core-tenant", "2026-02-05.jsonl") + f, err := os.Open(expectedPath) + require.NoError(t, err) + defer func() { _ = f.Close() }() + + scanner := bufio.NewScanner(f) + require.True(t, scanner.Scan(), "expected at least one line in JSONL file") + + var entry JournalEntry + err = json.Unmarshal(scanner.Bytes(), &entry) + require.NoError(t, err) + + assert.Equal(t, "2026-02-05T14:30:00Z", entry.Timestamp) + assert.Equal(t, 10, entry.Epic) + assert.Equal(t, 3, entry.Child) + assert.Equal(t, 55, entry.PR) + assert.Equal(t, "host-uk/core-tenant", entry.Repo) + assert.Equal(t, "merge", entry.Action) + assert.Equal(t, 1, entry.Cycle) + + // Verify signal snapshot. + assert.Equal(t, "OPEN", entry.Signals.PRState) + assert.Equal(t, false, entry.Signals.IsDraft) + assert.Equal(t, "SUCCESS", entry.Signals.CheckStatus) + assert.Equal(t, "MERGEABLE", entry.Signals.Mergeable) + assert.Equal(t, 2, entry.Signals.ThreadsTotal) + assert.Equal(t, 1, entry.Signals.ThreadsResolved) + + // Verify result snapshot. + assert.Equal(t, true, entry.Result.Success) + assert.Equal(t, "", entry.Result.Error) + assert.Equal(t, int64(1200), entry.Result.DurationMs) + + // Append a second entry and verify two lines exist. + result2 := &ActionResult{ + Action: "comment", + RepoOwner: "host-uk", + RepoName: "core-tenant", + Success: false, + Error: "rate limited", + Timestamp: ts, + Duration: 50 * time.Millisecond, + Cycle: 2, + } + err = j.Append(signal, result2) + require.NoError(t, err) + + data, err := os.ReadFile(expectedPath) + require.NoError(t, err) + + lines := 0 + sc := bufio.NewScanner(strings.NewReader(string(data))) + for sc.Scan() { + lines++ + } + assert.Equal(t, 2, lines, "expected two JSONL lines after two appends") +} + +func TestJournal_Append_Bad_NilSignal(t *testing.T) { + dir := t.TempDir() + + j, err := NewJournal(dir) + require.NoError(t, err) + + result := &ActionResult{ + Action: "merge", + Timestamp: time.Now(), + } + + err = j.Append(nil, result) + require.Error(t, err) + assert.Contains(t, err.Error(), "signal is required") +} + +func TestJournal_Append_Bad_NilResult(t *testing.T) { + dir := t.TempDir() + + j, err := NewJournal(dir) + require.NoError(t, err) + + signal := &PipelineSignal{ + RepoOwner: "host-uk", + RepoName: "core-php", + } + + err = j.Append(signal, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "result is required") +} diff --git a/pkg/jobrunner/poller.go b/pkg/jobrunner/poller.go new file mode 100644 index 0000000..d8440ff --- /dev/null +++ b/pkg/jobrunner/poller.go @@ -0,0 +1,195 @@ +package jobrunner + +import ( + "context" + "sync" + "time" + + "github.com/host-uk/core/pkg/log" +) + +// PollerConfig configures a Poller. +type PollerConfig struct { + Sources []JobSource + Handlers []JobHandler + Journal *Journal + PollInterval time.Duration + DryRun bool +} + +// Poller discovers signals from sources and dispatches them to handlers. +type Poller struct { + mu sync.RWMutex + sources []JobSource + handlers []JobHandler + journal *Journal + interval time.Duration + dryRun bool + cycle int +} + +// NewPoller creates a Poller from the given config. +func NewPoller(cfg PollerConfig) *Poller { + interval := cfg.PollInterval + if interval <= 0 { + interval = 60 * time.Second + } + + return &Poller{ + sources: cfg.Sources, + handlers: cfg.Handlers, + journal: cfg.Journal, + interval: interval, + dryRun: cfg.DryRun, + } +} + +// Cycle returns the number of completed poll-dispatch cycles. +func (p *Poller) Cycle() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.cycle +} + +// DryRun returns whether dry-run mode is enabled. +func (p *Poller) DryRun() bool { + p.mu.RLock() + defer p.mu.RUnlock() + return p.dryRun +} + +// SetDryRun enables or disables dry-run mode. +func (p *Poller) SetDryRun(v bool) { + p.mu.Lock() + p.dryRun = v + p.mu.Unlock() +} + +// AddSource appends a source to the poller. +func (p *Poller) AddSource(s JobSource) { + p.mu.Lock() + p.sources = append(p.sources, s) + p.mu.Unlock() +} + +// AddHandler appends a handler to the poller. +func (p *Poller) AddHandler(h JobHandler) { + p.mu.Lock() + p.handlers = append(p.handlers, h) + p.mu.Unlock() +} + +// Run starts a blocking poll-dispatch loop. It runs one cycle immediately, +// then repeats on each tick of the configured interval until the context +// is cancelled. +func (p *Poller) Run(ctx context.Context) error { + if err := p.RunOnce(ctx); err != nil { + return err + } + + ticker := time.NewTicker(p.interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + if err := p.RunOnce(ctx); err != nil { + return err + } + } + } +} + +// RunOnce performs a single poll-dispatch cycle: iterate sources, poll each, +// find the first matching handler for each signal, and execute it. +func (p *Poller) RunOnce(ctx context.Context) error { + p.mu.Lock() + p.cycle++ + cycle := p.cycle + dryRun := p.dryRun + sources := make([]JobSource, len(p.sources)) + copy(sources, p.sources) + handlers := make([]JobHandler, len(p.handlers)) + copy(handlers, p.handlers) + p.mu.Unlock() + + log.Info("poller cycle starting", "cycle", cycle, "sources", len(sources), "handlers", len(handlers)) + + for _, src := range sources { + signals, err := src.Poll(ctx) + if err != nil { + log.Error("poll failed", "source", src.Name(), "err", err) + continue + } + + log.Info("polled source", "source", src.Name(), "signals", len(signals)) + + for _, sig := range signals { + handler := p.findHandler(handlers, sig) + if handler == nil { + log.Debug("no matching handler", "epic", sig.EpicNumber, "child", sig.ChildNumber) + continue + } + + if dryRun { + log.Info("dry-run: would execute", + "handler", handler.Name(), + "epic", sig.EpicNumber, + "child", sig.ChildNumber, + "pr", sig.PRNumber, + ) + continue + } + + start := time.Now() + result, err := handler.Execute(ctx, sig) + elapsed := time.Since(start) + + if err != nil { + log.Error("handler execution failed", + "handler", handler.Name(), + "epic", sig.EpicNumber, + "child", sig.ChildNumber, + "err", err, + ) + continue + } + + result.Cycle = cycle + result.EpicNumber = sig.EpicNumber + result.ChildNumber = sig.ChildNumber + result.Duration = elapsed + + if p.journal != nil { + if jErr := p.journal.Append(sig, result); jErr != nil { + log.Error("journal append failed", "err", jErr) + } + } + + if rErr := src.Report(ctx, result); rErr != nil { + log.Error("source report failed", "source", src.Name(), "err", rErr) + } + + log.Info("handler executed", + "handler", handler.Name(), + "action", result.Action, + "success", result.Success, + "duration", elapsed, + ) + } + } + + return nil +} + +// findHandler returns the first handler that matches the signal, or nil. +func (p *Poller) findHandler(handlers []JobHandler, sig *PipelineSignal) JobHandler { + for _, h := range handlers { + if h.Match(sig) { + return h + } + } + return nil +} diff --git a/pkg/jobrunner/poller_test.go b/pkg/jobrunner/poller_test.go new file mode 100644 index 0000000..1d3a908 --- /dev/null +++ b/pkg/jobrunner/poller_test.go @@ -0,0 +1,307 @@ +package jobrunner + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// --- Mock source --- + +type mockSource struct { + name string + signals []*PipelineSignal + reports []*ActionResult + mu sync.Mutex +} + +func (m *mockSource) Name() string { return m.name } + +func (m *mockSource) Poll(_ context.Context) ([]*PipelineSignal, error) { + m.mu.Lock() + defer m.mu.Unlock() + return m.signals, nil +} + +func (m *mockSource) Report(_ context.Context, result *ActionResult) error { + m.mu.Lock() + defer m.mu.Unlock() + m.reports = append(m.reports, result) + return nil +} + +// --- Mock handler --- + +type mockHandler struct { + name string + matchFn func(*PipelineSignal) bool + executed []*PipelineSignal + mu sync.Mutex +} + +func (m *mockHandler) Name() string { return m.name } + +func (m *mockHandler) Match(sig *PipelineSignal) bool { + if m.matchFn != nil { + return m.matchFn(sig) + } + return true +} + +func (m *mockHandler) Execute(_ context.Context, sig *PipelineSignal) (*ActionResult, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.executed = append(m.executed, sig) + return &ActionResult{ + Action: m.name, + RepoOwner: sig.RepoOwner, + RepoName: sig.RepoName, + PRNumber: sig.PRNumber, + Success: true, + Timestamp: time.Now(), + }, nil +} + +func TestPoller_RunOnce_Good(t *testing.T) { + sig := &PipelineSignal{ + EpicNumber: 1, + ChildNumber: 2, + PRNumber: 10, + RepoOwner: "host-uk", + RepoName: "core-php", + PRState: "OPEN", + CheckStatus: "SUCCESS", + Mergeable: "MERGEABLE", + } + + src := &mockSource{ + name: "test-source", + signals: []*PipelineSignal{sig}, + } + + handler := &mockHandler{ + name: "test-handler", + matchFn: func(s *PipelineSignal) bool { + return s.PRNumber == 10 + }, + } + + p := NewPoller(PollerConfig{ + Sources: []JobSource{src}, + Handlers: []JobHandler{handler}, + }) + + err := p.RunOnce(context.Background()) + require.NoError(t, err) + + // Handler should have been called with our signal. + handler.mu.Lock() + defer handler.mu.Unlock() + require.Len(t, handler.executed, 1) + assert.Equal(t, 10, handler.executed[0].PRNumber) + + // Source should have received a report. + src.mu.Lock() + defer src.mu.Unlock() + require.Len(t, src.reports, 1) + assert.Equal(t, "test-handler", src.reports[0].Action) + assert.True(t, src.reports[0].Success) + assert.Equal(t, 1, src.reports[0].Cycle) + assert.Equal(t, 1, src.reports[0].EpicNumber) + assert.Equal(t, 2, src.reports[0].ChildNumber) + + // Cycle counter should have incremented. + assert.Equal(t, 1, p.Cycle()) +} + +func TestPoller_RunOnce_Good_NoSignals(t *testing.T) { + src := &mockSource{ + name: "empty-source", + signals: nil, + } + + handler := &mockHandler{ + name: "unused-handler", + } + + p := NewPoller(PollerConfig{ + Sources: []JobSource{src}, + Handlers: []JobHandler{handler}, + }) + + err := p.RunOnce(context.Background()) + require.NoError(t, err) + + // Handler should not have been called. + handler.mu.Lock() + defer handler.mu.Unlock() + assert.Empty(t, handler.executed) + + // Source should not have received reports. + src.mu.Lock() + defer src.mu.Unlock() + assert.Empty(t, src.reports) + + assert.Equal(t, 1, p.Cycle()) +} + +func TestPoller_RunOnce_Good_NoMatchingHandler(t *testing.T) { + sig := &PipelineSignal{ + EpicNumber: 5, + ChildNumber: 8, + PRNumber: 42, + RepoOwner: "host-uk", + RepoName: "core-tenant", + PRState: "OPEN", + } + + src := &mockSource{ + name: "test-source", + signals: []*PipelineSignal{sig}, + } + + handler := &mockHandler{ + name: "picky-handler", + matchFn: func(s *PipelineSignal) bool { + return false // never matches + }, + } + + p := NewPoller(PollerConfig{ + Sources: []JobSource{src}, + Handlers: []JobHandler{handler}, + }) + + err := p.RunOnce(context.Background()) + require.NoError(t, err) + + // Handler should not have been called. + handler.mu.Lock() + defer handler.mu.Unlock() + assert.Empty(t, handler.executed) + + // Source should not have received reports (no action taken). + src.mu.Lock() + defer src.mu.Unlock() + assert.Empty(t, src.reports) +} + +func TestPoller_RunOnce_Good_DryRun(t *testing.T) { + sig := &PipelineSignal{ + EpicNumber: 1, + ChildNumber: 3, + PRNumber: 20, + RepoOwner: "host-uk", + RepoName: "core-admin", + PRState: "OPEN", + CheckStatus: "SUCCESS", + Mergeable: "MERGEABLE", + } + + src := &mockSource{ + name: "test-source", + signals: []*PipelineSignal{sig}, + } + + handler := &mockHandler{ + name: "merge-handler", + matchFn: func(s *PipelineSignal) bool { + return true + }, + } + + p := NewPoller(PollerConfig{ + Sources: []JobSource{src}, + Handlers: []JobHandler{handler}, + DryRun: true, + }) + + assert.True(t, p.DryRun()) + + err := p.RunOnce(context.Background()) + require.NoError(t, err) + + // Handler should NOT have been called in dry-run mode. + handler.mu.Lock() + defer handler.mu.Unlock() + assert.Empty(t, handler.executed) + + // Source should not have received reports. + src.mu.Lock() + defer src.mu.Unlock() + assert.Empty(t, src.reports) +} + +func TestPoller_SetDryRun_Good(t *testing.T) { + p := NewPoller(PollerConfig{}) + + assert.False(t, p.DryRun()) + p.SetDryRun(true) + assert.True(t, p.DryRun()) + p.SetDryRun(false) + assert.False(t, p.DryRun()) +} + +func TestPoller_AddSourceAndHandler_Good(t *testing.T) { + p := NewPoller(PollerConfig{}) + + sig := &PipelineSignal{ + EpicNumber: 1, + ChildNumber: 1, + PRNumber: 5, + RepoOwner: "host-uk", + RepoName: "core-php", + PRState: "OPEN", + } + + src := &mockSource{ + name: "added-source", + signals: []*PipelineSignal{sig}, + } + + handler := &mockHandler{ + name: "added-handler", + matchFn: func(s *PipelineSignal) bool { return true }, + } + + p.AddSource(src) + p.AddHandler(handler) + + err := p.RunOnce(context.Background()) + require.NoError(t, err) + + handler.mu.Lock() + defer handler.mu.Unlock() + require.Len(t, handler.executed, 1) + assert.Equal(t, 5, handler.executed[0].PRNumber) +} + +func TestPoller_Run_Good(t *testing.T) { + src := &mockSource{ + name: "tick-source", + signals: nil, + } + + p := NewPoller(PollerConfig{ + Sources: []JobSource{src}, + PollInterval: 50 * time.Millisecond, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 180*time.Millisecond) + defer cancel() + + err := p.Run(ctx) + assert.ErrorIs(t, err, context.DeadlineExceeded) + + // Should have completed at least 2 cycles (one immediate + at least one tick). + assert.GreaterOrEqual(t, p.Cycle(), 2) +} + +func TestPoller_DefaultInterval_Good(t *testing.T) { + p := NewPoller(PollerConfig{}) + assert.Equal(t, 60*time.Second, p.interval) +} diff --git a/pkg/jobrunner/types.go b/pkg/jobrunner/types.go new file mode 100644 index 0000000..3d04da2 --- /dev/null +++ b/pkg/jobrunner/types.go @@ -0,0 +1,64 @@ +package jobrunner + +import ( + "context" + "time" +) + +// PipelineSignal is the structural snapshot of a child issue/PR. +// Never contains comment bodies or free text — structural signals only. +type PipelineSignal struct { + EpicNumber int + ChildNumber int + PRNumber int + RepoOwner string + RepoName string + PRState string // OPEN, MERGED, CLOSED + IsDraft bool + Mergeable string // MERGEABLE, CONFLICTING, UNKNOWN + CheckStatus string // SUCCESS, FAILURE, PENDING + ThreadsTotal int + ThreadsResolved int + LastCommitSHA string + LastCommitAt time.Time + LastReviewAt time.Time +} + +// RepoFullName returns "owner/repo". +func (s *PipelineSignal) RepoFullName() string { + return s.RepoOwner + "/" + s.RepoName +} + +// HasUnresolvedThreads returns true if there are unresolved review threads. +func (s *PipelineSignal) HasUnresolvedThreads() bool { + return s.ThreadsTotal > s.ThreadsResolved +} + +// ActionResult carries the outcome of a handler execution. +type ActionResult struct { + Action string `json:"action"` + RepoOwner string `json:"repo_owner"` + RepoName string `json:"repo_name"` + EpicNumber int `json:"epic"` + ChildNumber int `json:"child"` + PRNumber int `json:"pr"` + Success bool `json:"success"` + Error string `json:"error,omitempty"` + Timestamp time.Time `json:"ts"` + Duration time.Duration `json:"duration_ms"` + Cycle int `json:"cycle"` +} + +// JobSource discovers actionable work from an external system. +type JobSource interface { + Name() string + Poll(ctx context.Context) ([]*PipelineSignal, error) + Report(ctx context.Context, result *ActionResult) error +} + +// JobHandler processes a single pipeline signal. +type JobHandler interface { + Name() string + Match(signal *PipelineSignal) bool + Execute(ctx context.Context, signal *PipelineSignal) (*ActionResult, error) +} diff --git a/pkg/jobrunner/types_test.go b/pkg/jobrunner/types_test.go new file mode 100644 index 0000000..c81a840 --- /dev/null +++ b/pkg/jobrunner/types_test.go @@ -0,0 +1,98 @@ +package jobrunner + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPipelineSignal_RepoFullName_Good(t *testing.T) { + sig := &PipelineSignal{ + RepoOwner: "host-uk", + RepoName: "core-php", + } + assert.Equal(t, "host-uk/core-php", sig.RepoFullName()) +} + +func TestPipelineSignal_HasUnresolvedThreads_Good(t *testing.T) { + sig := &PipelineSignal{ + ThreadsTotal: 5, + ThreadsResolved: 3, + } + assert.True(t, sig.HasUnresolvedThreads()) +} + +func TestPipelineSignal_HasUnresolvedThreads_Bad_AllResolved(t *testing.T) { + sig := &PipelineSignal{ + ThreadsTotal: 4, + ThreadsResolved: 4, + } + assert.False(t, sig.HasUnresolvedThreads()) + + // Also verify zero threads is not unresolved. + sigZero := &PipelineSignal{ + ThreadsTotal: 0, + ThreadsResolved: 0, + } + assert.False(t, sigZero.HasUnresolvedThreads()) +} + +func TestActionResult_JSON_Good(t *testing.T) { + ts := time.Date(2026, 2, 5, 12, 0, 0, 0, time.UTC) + result := &ActionResult{ + Action: "merge", + RepoOwner: "host-uk", + RepoName: "core-tenant", + EpicNumber: 42, + ChildNumber: 7, + PRNumber: 99, + Success: true, + Timestamp: ts, + Duration: 1500 * time.Millisecond, + Cycle: 3, + } + + data, err := json.Marshal(result) + require.NoError(t, err) + + var decoded map[string]any + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + assert.Equal(t, "merge", decoded["action"]) + assert.Equal(t, "host-uk", decoded["repo_owner"]) + assert.Equal(t, "core-tenant", decoded["repo_name"]) + assert.Equal(t, float64(42), decoded["epic"]) + assert.Equal(t, float64(7), decoded["child"]) + assert.Equal(t, float64(99), decoded["pr"]) + assert.Equal(t, true, decoded["success"]) + assert.Equal(t, float64(3), decoded["cycle"]) + + // Error field should be omitted when empty. + _, hasError := decoded["error"] + assert.False(t, hasError, "error field should be omitted when empty") + + // Verify round-trip with error field present. + resultWithErr := &ActionResult{ + Action: "merge", + RepoOwner: "host-uk", + RepoName: "core-tenant", + Success: false, + Error: "checks failing", + Timestamp: ts, + Duration: 200 * time.Millisecond, + Cycle: 1, + } + data2, err := json.Marshal(resultWithErr) + require.NoError(t, err) + + var decoded2 map[string]any + err = json.Unmarshal(data2, &decoded2) + require.NoError(t, err) + + assert.Equal(t, "checks failing", decoded2["error"]) + assert.Equal(t, false, decoded2["success"]) +}