From d2916db6403a55087e582bfbb67d656e5f8816f0 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 8 Feb 2026 13:25:06 +0000 Subject: [PATCH] feat: add Woodpecker CI pipeline and workspace improvements (#1) Co-authored-by: Claude Co-committed-by: Claude --- .forgejo/workflows/security-scan.yml | 50 ++ .gitleaks.toml | 10 + .woodpecker.yml | 21 + Taskfile.yml | 27 +- cmd/bugseti/go.mod | 5 + cmd/bugseti/go.sum | 10 +- cmd/bugseti/main.go | 2 + cmd/bugseti/workspace.go | 268 ++++++++ docs/examples/build-cpp.yaml | 83 +++ go.mod | 1 + go.sum | 2 + internal/cmd/forge/cmd_config.go | 106 ++++ internal/cmd/forge/cmd_forge.go | 53 ++ internal/cmd/forge/cmd_issues.go | 200 ++++++ internal/cmd/forge/cmd_labels.go | 120 ++++ internal/cmd/forge/cmd_migrate.go | 121 ++++ internal/cmd/forge/cmd_orgs.go | 66 ++ internal/cmd/forge/cmd_prs.go | 98 +++ internal/cmd/forge/cmd_repos.go | 94 +++ internal/cmd/forge/cmd_status.go | 63 ++ internal/cmd/forge/cmd_sync.go | 334 ++++++++++ internal/cmd/forge/helpers.go | 33 + internal/cmd/php/detect_test.go | 3 + internal/cmd/pkgcmd/cmd_pkg.go | 1 + internal/cmd/pkgcmd/cmd_remove.go | 144 +++++ internal/cmd/pkgcmd/cmd_remove_test.go | 92 +++ internal/cmd/workspace/cmd_agent.go | 288 +++++++++ internal/cmd/workspace/cmd_agent_test.go | 79 +++ internal/cmd/workspace/cmd_task.go | 466 ++++++++++++++ internal/cmd/workspace/cmd_task_test.go | 109 ++++ internal/cmd/workspace/cmd_workspace.go | 2 + internal/variants/full.go | 2 + pkg/build/build.go | 2 + pkg/build/buildcmd/cmd_project.go | 2 + pkg/build/builders/cpp.go | 253 ++++++++ pkg/build/builders/cpp_test.go | 149 +++++ pkg/build/discovery.go | 7 + pkg/build/discovery_test.go | 21 + pkg/build/testdata/cpp-project/CMakeLists.txt | 2 + pkg/cli/app.go | 34 +- pkg/forge/client.go | 37 ++ pkg/forge/config.go | 92 +++ pkg/forge/issues.go | 119 ++++ pkg/forge/labels.go | 60 ++ pkg/forge/meta.go | 144 +++++ pkg/forge/orgs.go | 51 ++ pkg/forge/repos.go | 96 +++ pkg/forge/webhooks.go | 41 ++ pkg/i18n/locales/en_GB.json | 16 +- pkg/io/datanode/client.go | 573 ++++++++++++++++++ pkg/io/datanode/client_test.go | 352 +++++++++++ pkg/release/config_test.go | 3 + pkg/release/release_test.go | 3 + 53 files changed, 4998 insertions(+), 12 deletions(-) create mode 100644 .forgejo/workflows/security-scan.yml create mode 100644 .gitleaks.toml create mode 100644 .woodpecker.yml create mode 100644 cmd/bugseti/workspace.go create mode 100644 docs/examples/build-cpp.yaml create mode 100644 internal/cmd/forge/cmd_config.go create mode 100644 internal/cmd/forge/cmd_forge.go create mode 100644 internal/cmd/forge/cmd_issues.go create mode 100644 internal/cmd/forge/cmd_labels.go create mode 100644 internal/cmd/forge/cmd_migrate.go create mode 100644 internal/cmd/forge/cmd_orgs.go create mode 100644 internal/cmd/forge/cmd_prs.go create mode 100644 internal/cmd/forge/cmd_repos.go create mode 100644 internal/cmd/forge/cmd_status.go create mode 100644 internal/cmd/forge/cmd_sync.go create mode 100644 internal/cmd/forge/helpers.go create mode 100644 internal/cmd/pkgcmd/cmd_remove.go create mode 100644 internal/cmd/pkgcmd/cmd_remove_test.go create mode 100644 internal/cmd/workspace/cmd_agent.go create mode 100644 internal/cmd/workspace/cmd_agent_test.go create mode 100644 internal/cmd/workspace/cmd_task.go create mode 100644 internal/cmd/workspace/cmd_task_test.go create mode 100644 pkg/build/builders/cpp.go create mode 100644 pkg/build/builders/cpp_test.go create mode 100644 pkg/build/testdata/cpp-project/CMakeLists.txt create mode 100644 pkg/forge/client.go create mode 100644 pkg/forge/config.go create mode 100644 pkg/forge/issues.go create mode 100644 pkg/forge/labels.go create mode 100644 pkg/forge/meta.go create mode 100644 pkg/forge/orgs.go create mode 100644 pkg/forge/repos.go create mode 100644 pkg/forge/webhooks.go create mode 100644 pkg/io/datanode/client.go create mode 100644 pkg/io/datanode/client_test.go diff --git a/.forgejo/workflows/security-scan.yml b/.forgejo/workflows/security-scan.yml new file mode 100644 index 0000000..7544d94 --- /dev/null +++ b/.forgejo/workflows/security-scan.yml @@ -0,0 +1,50 @@ +# Sovereign security scanning — no cloud dependencies +# Replaces: GitHub Dependabot, CodeQL, Advanced Security +# PCI DSS: Req 6.3.2 (code review), Req 11.3 (vulnerability scanning) + +name: Security Scan + +on: + push: + branches: [main, dev, 'feat/*'] + pull_request: + branches: [main] + +jobs: + govulncheck: + name: Go Vulnerability Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: '1.25' + - name: Install govulncheck + run: go install golang.org/x/vuln/cmd/govulncheck@latest + - name: Run govulncheck + run: govulncheck ./... + + gitleaks: + name: Secret Detection + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Install gitleaks + run: | + GITLEAKS_VERSION=$(curl -s https://api.github.com/repos/gitleaks/gitleaks/releases/latest | jq -r '.tag_name' | tr -d 'v') + curl -sL "https://github.com/gitleaks/gitleaks/releases/download/v${GITLEAKS_VERSION}/gitleaks_${GITLEAKS_VERSION}_linux_x64.tar.gz" | tar xz -C /usr/local/bin gitleaks + - name: Scan for secrets + run: gitleaks detect --source . --no-banner + + trivy: + name: Dependency & Config Scan + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Trivy + run: | + curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin + - name: Filesystem scan + run: trivy fs --scanners vuln,secret,misconfig --severity HIGH,CRITICAL --exit-code 1 . diff --git a/.gitleaks.toml b/.gitleaks.toml new file mode 100644 index 0000000..893d718 --- /dev/null +++ b/.gitleaks.toml @@ -0,0 +1,10 @@ +# Gitleaks configuration for host-uk/core +# Test fixtures contain private keys for cryptographic testing — not real secrets. + +[allowlist] + description = "Test fixture allowlist" + paths = [ + '''pkg/crypt/pgp/pgp_test\.go''', + '''pkg/crypt/rsa/rsa_test\.go''', + '''pkg/crypt/openpgp/test_util\.go''', + ] diff --git a/.woodpecker.yml b/.woodpecker.yml new file mode 100644 index 0000000..7e1e7b2 --- /dev/null +++ b/.woodpecker.yml @@ -0,0 +1,21 @@ +when: + - event: [push, pull_request, manual] + +steps: + - name: build + image: golang:1.25-bookworm + commands: + - go version + - go mod download + - >- + go build + -ldflags "-X github.com/host-uk/core/pkg/cli.AppVersion=ci + -X github.com/host-uk/core/pkg/cli.BuildCommit=${CI_COMMIT_SHA:0:7} + -X github.com/host-uk/core/pkg/cli.BuildDate=$(date -u +%Y%m%d)" + -o ./bin/core . + - ./bin/core --version + + - name: test + image: golang:1.25-bookworm + commands: + - go test -short -count=1 -timeout 120s ./... diff --git a/Taskfile.yml b/Taskfile.yml index 1e26746..d3ceddb 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -1,14 +1,33 @@ version: '3' vars: - VERSION: - sh: git describe --tags --exact-match 2>/dev/null || echo "dev" - # Base ldflags for version injection - LDFLAGS_BASE: "-X github.com/host-uk/core/pkg/cli.AppVersion={{.VERSION}}" + # SemVer 2.0.0 build variables + SEMVER_TAG: + sh: git describe --tags --abbrev=0 2>/dev/null || echo "0.0.0" + SEMVER_VERSION: + sh: echo "{{.SEMVER_TAG}}" | sed 's/^v//' + SEMVER_COMMITS: + sh: git rev-list {{.SEMVER_TAG}}..HEAD --count 2>/dev/null || echo "0" + SEMVER_COMMIT: + sh: git rev-parse --short HEAD 2>/dev/null || echo "unknown" + SEMVER_DATE: + sh: date -u +%Y%m%d + SEMVER_PRERELEASE: + sh: '[ "{{.SEMVER_COMMITS}}" = "0" ] && echo "" || echo "dev.{{.SEMVER_COMMITS}}"' + # ldflags + PKG: "github.com/host-uk/core/pkg/cli" + LDFLAGS_BASE: >- + -X {{.PKG}}.AppVersion={{.SEMVER_VERSION}} + -X {{.PKG}}.BuildCommit={{.SEMVER_COMMIT}} + -X {{.PKG}}.BuildDate={{.SEMVER_DATE}} + -X {{.PKG}}.BuildPreRelease={{.SEMVER_PRERELEASE}} # Development build: includes debug info LDFLAGS: "{{.LDFLAGS_BASE}}" # Release build: strips debug info and symbol table for smaller binary LDFLAGS_RELEASE: "-s -w {{.LDFLAGS_BASE}}" + # Compat alias + VERSION: + sh: git describe --tags --exact-match 2>/dev/null || echo "dev" tasks: # --- CLI Management --- diff --git a/cmd/bugseti/go.mod b/cmd/bugseti/go.mod index 99cabc6..8d363e9 100644 --- a/cmd/bugseti/go.mod +++ b/cmd/bugseti/go.mod @@ -3,11 +3,15 @@ module github.com/host-uk/core/cmd/bugseti go 1.25.5 require ( + github.com/Snider/Borg v0.2.0 + github.com/host-uk/core v0.0.0 github.com/host-uk/core/internal/bugseti v0.0.0 github.com/host-uk/core/internal/bugseti/updater v0.0.0 github.com/wailsapp/wails/v3 v3.0.0-alpha.64 ) +replace github.com/host-uk/core => ../.. + replace github.com/host-uk/core/internal/bugseti => ../../internal/bugseti replace github.com/host-uk/core/internal/bugseti/updater => ../../internal/bugseti/updater @@ -16,6 +20,7 @@ require ( dario.cat/mergo v1.0.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.3.0 // indirect + github.com/Snider/Enchantrix v0.0.2 // indirect github.com/adrg/xdg v0.5.3 // indirect github.com/bep/debounce v1.2.1 // indirect github.com/cloudflare/circl v1.6.3 // indirect diff --git a/cmd/bugseti/go.sum b/cmd/bugseti/go.sum index 0e3453c..35a3244 100644 --- a/cmd/bugseti/go.sum +++ b/cmd/bugseti/go.sum @@ -5,6 +5,10 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= +github.com/Snider/Borg v0.2.0 h1:iCyDhY4WTXi39+FexRwXbn2YpZ2U9FUXVXDZk9xRCXQ= +github.com/Snider/Borg v0.2.0/go.mod h1:TqlKnfRo9okioHbgrZPfWjQsztBV0Nfskz4Om1/vdMY= +github.com/Snider/Enchantrix v0.0.2 h1:ExZQiBhfS/p/AHFTKhY80TOd+BXZjK95EzByAEgwvjs= +github.com/Snider/Enchantrix v0.0.2/go.mod h1:CtFcLAvnDT1KcuF1JBb/DJj0KplY8jHryO06KzQ1hsQ= github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= @@ -20,8 +24,9 @@ github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6p github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A= github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= @@ -86,8 +91,9 @@ github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmd github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= diff --git a/cmd/bugseti/main.go b/cmd/bugseti/main.go index 4e23dba..458f53a 100644 --- a/cmd/bugseti/main.go +++ b/cmd/bugseti/main.go @@ -44,6 +44,7 @@ func main() { seederService := bugseti.NewSeederService(configService) submitService := bugseti.NewSubmitService(configService, notifyService, statsService) versionService := bugseti.NewVersionService() + workspaceService := NewWorkspaceService(configService) // Initialize update service updateService, err := updater.NewService(configService) @@ -64,6 +65,7 @@ func main() { application.NewService(seederService), application.NewService(submitService), application.NewService(versionService), + application.NewService(workspaceService), application.NewService(trayService), } diff --git a/cmd/bugseti/workspace.go b/cmd/bugseti/workspace.go new file mode 100644 index 0000000..df2c02b --- /dev/null +++ b/cmd/bugseti/workspace.go @@ -0,0 +1,268 @@ +// Package main provides the BugSETI system tray application. +package main + +import ( + "fmt" + "io/fs" + "log" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Snider/Borg/pkg/tim" + "github.com/host-uk/core/internal/bugseti" + "github.com/host-uk/core/pkg/io/datanode" +) + +// WorkspaceService manages DataNode-backed workspaces for issues. +// Each issue gets a sandboxed in-memory filesystem that can be +// snapshotted, packaged as a TIM container, or shipped as a crash report. +type WorkspaceService struct { + config *bugseti.ConfigService + workspaces map[string]*Workspace // issue ID → workspace + mu sync.RWMutex +} + +// Workspace tracks a DataNode-backed workspace for an issue. +type Workspace struct { + Issue *bugseti.Issue `json:"issue"` + Medium *datanode.Medium + DiskPath string `json:"diskPath"` + CreatedAt time.Time `json:"createdAt"` + Snapshots int `json:"snapshots"` +} + +// CrashReport contains a packaged workspace state for debugging. +type CrashReport struct { + IssueID string `json:"issueId"` + Repo string `json:"repo"` + Number int `json:"number"` + Title string `json:"title"` + Error string `json:"error"` + Timestamp time.Time `json:"timestamp"` + Data []byte `json:"data"` // tar snapshot + Files int `json:"files"` + Size int64 `json:"size"` +} + +// NewWorkspaceService creates a new WorkspaceService. +func NewWorkspaceService(config *bugseti.ConfigService) *WorkspaceService { + return &WorkspaceService{ + config: config, + workspaces: make(map[string]*Workspace), + } +} + +// ServiceName returns the service name for Wails. +func (w *WorkspaceService) ServiceName() string { + return "WorkspaceService" +} + +// Capture loads a filesystem workspace into a DataNode Medium. +// Call this after git clone to create the in-memory snapshot. +func (w *WorkspaceService) Capture(issue *bugseti.Issue, diskPath string) error { + if issue == nil { + return fmt.Errorf("issue is nil") + } + + m := datanode.New() + + // Walk the filesystem and load all files into the DataNode + err := filepath.WalkDir(diskPath, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return nil // skip errors + } + + // Get relative path + rel, err := filepath.Rel(diskPath, path) + if err != nil { + return nil + } + if rel == "." { + return nil + } + + // Skip .git internals (keep .git marker but not the pack files) + if rel == ".git" { + return fs.SkipDir + } + + if d.IsDir() { + return m.EnsureDir(rel) + } + + // Skip large files (>1MB) to keep DataNode lightweight + info, err := d.Info() + if err != nil || info.Size() > 1<<20 { + return nil + } + + content, err := os.ReadFile(path) + if err != nil { + return nil + } + return m.Write(rel, string(content)) + }) + if err != nil { + return fmt.Errorf("failed to capture workspace: %w", err) + } + + w.mu.Lock() + w.workspaces[issue.ID] = &Workspace{ + Issue: issue, + Medium: m, + DiskPath: diskPath, + CreatedAt: time.Now(), + } + w.mu.Unlock() + + log.Printf("Captured workspace for issue #%d (%s)", issue.Number, issue.Repo) + return nil +} + +// GetMedium returns the DataNode Medium for an issue's workspace. +func (w *WorkspaceService) GetMedium(issueID string) *datanode.Medium { + w.mu.RLock() + defer w.mu.RUnlock() + + ws := w.workspaces[issueID] + if ws == nil { + return nil + } + return ws.Medium +} + +// Snapshot takes a tar snapshot of the workspace. +func (w *WorkspaceService) Snapshot(issueID string) ([]byte, error) { + w.mu.Lock() + defer w.mu.Unlock() + + ws := w.workspaces[issueID] + if ws == nil { + return nil, fmt.Errorf("workspace not found: %s", issueID) + } + + data, err := ws.Medium.Snapshot() + if err != nil { + return nil, fmt.Errorf("snapshot failed: %w", err) + } + + ws.Snapshots++ + return data, nil +} + +// PackageCrashReport captures the current workspace state as a crash report. +// Re-reads from disk to get the latest state (including git changes). +func (w *WorkspaceService) PackageCrashReport(issue *bugseti.Issue, errMsg string) (*CrashReport, error) { + if issue == nil { + return nil, fmt.Errorf("issue is nil") + } + + w.mu.RLock() + ws := w.workspaces[issue.ID] + w.mu.RUnlock() + + var diskPath string + if ws != nil { + diskPath = ws.DiskPath + } else { + // Try to find the workspace on disk + baseDir := w.config.GetWorkspaceDir() + if baseDir == "" { + baseDir = filepath.Join(os.TempDir(), "bugseti") + } + diskPath = filepath.Join(baseDir, sanitizeForPath(issue.Repo), fmt.Sprintf("issue-%d", issue.Number)) + } + + // Re-capture from disk to get latest state + if err := w.Capture(issue, diskPath); err != nil { + return nil, fmt.Errorf("capture failed: %w", err) + } + + // Snapshot the captured workspace + data, err := w.Snapshot(issue.ID) + if err != nil { + return nil, fmt.Errorf("snapshot failed: %w", err) + } + + return &CrashReport{ + IssueID: issue.ID, + Repo: issue.Repo, + Number: issue.Number, + Title: issue.Title, + Error: errMsg, + Timestamp: time.Now(), + Data: data, + Size: int64(len(data)), + }, nil +} + +// PackageTIM wraps the workspace as a TIM container (runc-compatible bundle). +// The resulting TIM can be executed via runc or encrypted to .stim for transit. +func (w *WorkspaceService) PackageTIM(issueID string) (*tim.TerminalIsolationMatrix, error) { + w.mu.RLock() + ws := w.workspaces[issueID] + w.mu.RUnlock() + + if ws == nil { + return nil, fmt.Errorf("workspace not found: %s", issueID) + } + + dn := ws.Medium.DataNode() + return tim.FromDataNode(dn) +} + +// SaveCrashReport writes a crash report to the data directory. +func (w *WorkspaceService) SaveCrashReport(report *CrashReport) (string, error) { + dataDir := w.config.GetDataDir() + if dataDir == "" { + dataDir = filepath.Join(os.TempDir(), "bugseti") + } + + crashDir := filepath.Join(dataDir, "crash-reports") + if err := os.MkdirAll(crashDir, 0755); err != nil { + return "", fmt.Errorf("failed to create crash dir: %w", err) + } + + filename := fmt.Sprintf("crash-%s-issue-%d-%s.tar", + sanitizeForPath(report.Repo), + report.Number, + report.Timestamp.Format("20060102-150405"), + ) + path := filepath.Join(crashDir, filename) + + if err := os.WriteFile(path, report.Data, 0644); err != nil { + return "", fmt.Errorf("failed to write crash report: %w", err) + } + + log.Printf("Crash report saved: %s (%d bytes)", path, report.Size) + return path, nil +} + +// Release removes a workspace from memory. +func (w *WorkspaceService) Release(issueID string) { + w.mu.Lock() + delete(w.workspaces, issueID) + w.mu.Unlock() +} + +// ActiveWorkspaces returns the count of active workspaces. +func (w *WorkspaceService) ActiveWorkspaces() int { + w.mu.RLock() + defer w.mu.RUnlock() + return len(w.workspaces) +} + +// sanitizeForPath converts owner/repo to a safe directory name. +func sanitizeForPath(s string) string { + result := make([]byte, 0, len(s)) + for _, c := range s { + if c == '/' || c == '\\' || c == ':' { + result = append(result, '-') + } else { + result = append(result, byte(c)) + } + } + return string(result) +} diff --git a/docs/examples/build-cpp.yaml b/docs/examples/build-cpp.yaml new file mode 100644 index 0000000..3cee856 --- /dev/null +++ b/docs/examples/build-cpp.yaml @@ -0,0 +1,83 @@ +# Example: C++ Build Configuration +# CMake + Conan 2 project using host-uk/build system + +version: 1 + +project: + name: my-cpp-project + type: cpp + description: "A C++ application" + +cpp: + standard: 17 + build_type: Release + static: false + + # Conan package manager + conan: + version: "2.21.0" + requires: + - zlib/1.3.1 + - boost/1.85.0 + - openssl/3.2.0 + tool_requires: + - cmake/3.31.9 + options: + boost/*:without_test: true + registry: + url: http://forge.snider.dev:4000/api/packages/host-uk/conan + remote: conan_build + + # CMake settings + cmake: + minimum_version: "3.16" + variables: + USE_CCACHE: "ON" + presets: + - conan-release + - conan-debug + + # Optional project-specific build options + options: + testnet: false + +# Cross-compilation targets +targets: + - os: linux + arch: x86_64 + profile: gcc-linux-x86_64 + - os: linux + arch: arm64 + profile: gcc-linux-armv8 + - os: darwin + arch: arm64 + profile: apple-clang-armv8 + - os: darwin + arch: x86_64 + profile: apple-clang-x86_64 + - os: windows + arch: x86_64 + profile: msvc-194-x86_64 + +# Packaging +package: + generators: + - TGZ + - ZIP + vendor: host-uk + contact: developers@lethean.io + website: https://lt.hn + +# Docker output +docker: + dockerfile: .core/build/docker/Dockerfile + platforms: + - linux/amd64 + - linux/arm64 + tags: + - latest + - "{{.Version}}" + build_args: + BUILD_THREADS: auto + BUILD_STATIC: "0" + BUILD_TYPE: Release diff --git a/go.mod b/go.mod index ea9b957..7324523 100644 --- a/go.mod +++ b/go.mod @@ -31,6 +31,7 @@ require ( require ( aead.dev/minisign v0.3.0 // indirect cloud.google.com/go v0.123.0 // indirect + codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2 v2.2.0 // indirect dario.cat/mergo v1.0.2 // indirect github.com/42wim/httpsig v1.2.3 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/go.sum b/go.sum index 58a940c..5846a46 100644 --- a/go.sum +++ b/go.sum @@ -5,6 +5,8 @@ cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= code.gitea.io/sdk/gitea v0.23.2 h1:iJB1FDmLegwfwjX8gotBDHdPSbk/ZR8V9VmEJaVsJYg= code.gitea.io/sdk/gitea v0.23.2/go.mod h1:yyF5+GhljqvA30sRDreoyHILruNiy4ASufugzYg0VHM= +codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2 v2.2.0 h1:HTCWpzyWQOHDWt3LzI6/d2jvUDsw/vgGRWm/8BTvcqI= +codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2 v2.2.0/go.mod h1:ZglEEDj+qkxYUb+SQIeqGtFxQrbaMYqIOgahNKb7uxs= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= github.com/42wim/httpsig v1.2.3 h1:xb0YyWhkYj57SPtfSttIobJUPJZB9as1nsfo7KWVcEs= diff --git a/internal/cmd/forge/cmd_config.go b/internal/cmd/forge/cmd_config.go new file mode 100644 index 0000000..7dd5554 --- /dev/null +++ b/internal/cmd/forge/cmd_config.go @@ -0,0 +1,106 @@ +package forge + +import ( + "fmt" + + "github.com/host-uk/core/pkg/cli" + fg "github.com/host-uk/core/pkg/forge" +) + +// Config command flags. +var ( + configURL string + configToken string + configTest bool +) + +// addConfigCommand adds the 'config' subcommand for Forgejo connection setup. +func addConfigCommand(parent *cli.Command) { + cmd := &cli.Command{ + Use: "config", + Short: "Configure Forgejo connection", + Long: "Set the Forgejo instance URL and API token, or test the current connection.", + RunE: func(cmd *cli.Command, args []string) error { + return runConfig() + }, + } + + cmd.Flags().StringVar(&configURL, "url", "", "Forgejo instance URL") + cmd.Flags().StringVar(&configToken, "token", "", "Forgejo API token") + cmd.Flags().BoolVar(&configTest, "test", false, "Test the current connection") + + parent.AddCommand(cmd) +} + +func runConfig() error { + // If setting values, save them first + if configURL != "" || configToken != "" { + if err := fg.SaveConfig(configURL, configToken); err != nil { + return err + } + + if configURL != "" { + cli.Success(fmt.Sprintf("Forgejo URL set to %s", configURL)) + } + if configToken != "" { + cli.Success("Forgejo token saved") + } + } + + // If testing, verify the connection + if configTest { + return runConfigTest() + } + + // If no flags, show current config + if configURL == "" && configToken == "" && !configTest { + return showConfig() + } + + return nil +} + +func showConfig() error { + url, token, err := fg.ResolveConfig("", "") + if err != nil { + return err + } + + cli.Blank() + cli.Print(" %s %s\n", dimStyle.Render("URL:"), valueStyle.Render(url)) + + if token != "" { + masked := token + if len(token) >= 8 { + masked = token[:4] + "..." + token[len(token)-4:] + } + cli.Print(" %s %s\n", dimStyle.Render("Token:"), valueStyle.Render(masked)) + } else { + cli.Print(" %s %s\n", dimStyle.Render("Token:"), warningStyle.Render("not set")) + } + + cli.Blank() + + return nil +} + +func runConfigTest() error { + client, err := fg.NewFromConfig(configURL, configToken) + if err != nil { + return err + } + + user, _, err := client.API().GetMyUserInfo() + if err != nil { + cli.Error("Connection failed") + return cli.WrapVerb(err, "connect to", "Forgejo") + } + + cli.Blank() + cli.Success(fmt.Sprintf("Connected to %s", client.URL())) + cli.Print(" %s %s\n", dimStyle.Render("User:"), valueStyle.Render(user.UserName)) + cli.Print(" %s %s\n", dimStyle.Render("Email:"), valueStyle.Render(user.Email)) + cli.Blank() + + return nil +} diff --git a/internal/cmd/forge/cmd_forge.go b/internal/cmd/forge/cmd_forge.go new file mode 100644 index 0000000..62aa33e --- /dev/null +++ b/internal/cmd/forge/cmd_forge.go @@ -0,0 +1,53 @@ +// Package forge provides CLI commands for managing a Forgejo instance. +// +// Commands: +// - config: Configure Forgejo connection (URL, token) +// - status: Show instance status and version +// - repos: List repositories +// - issues: List and create issues +// - prs: List pull requests +// - migrate: Migrate repos from external services +// - sync: Sync GitHub repos to Forgejo upstream branches +// - orgs: List organisations +// - labels: List and create labels +package forge + +import ( + "github.com/host-uk/core/pkg/cli" +) + +func init() { + cli.RegisterCommands(AddForgeCommands) +} + +// Style aliases from shared package. +var ( + successStyle = cli.SuccessStyle + errorStyle = cli.ErrorStyle + warningStyle = cli.WarningStyle + dimStyle = cli.DimStyle + valueStyle = cli.ValueStyle + repoStyle = cli.RepoStyle + numberStyle = cli.NumberStyle + infoStyle = cli.InfoStyle +) + +// AddForgeCommands registers the 'forge' command and all subcommands. +func AddForgeCommands(root *cli.Command) { + forgeCmd := &cli.Command{ + Use: "forge", + Short: "Forgejo instance management", + Long: "Manage repositories, issues, pull requests, and organisations on your Forgejo instance.", + } + root.AddCommand(forgeCmd) + + addConfigCommand(forgeCmd) + addStatusCommand(forgeCmd) + addReposCommand(forgeCmd) + addIssuesCommand(forgeCmd) + addPRsCommand(forgeCmd) + addMigrateCommand(forgeCmd) + addSyncCommand(forgeCmd) + addOrgsCommand(forgeCmd) + addLabelsCommand(forgeCmd) +} diff --git a/internal/cmd/forge/cmd_issues.go b/internal/cmd/forge/cmd_issues.go new file mode 100644 index 0000000..b66ef7c --- /dev/null +++ b/internal/cmd/forge/cmd_issues.go @@ -0,0 +1,200 @@ +package forge + +import ( + "fmt" + "strings" + + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/cli" + fg "github.com/host-uk/core/pkg/forge" +) + +// Issues command flags. +var ( + issuesState string + issuesTitle string + issuesBody string +) + +// addIssuesCommand adds the 'issues' subcommand for listing and creating issues. +func addIssuesCommand(parent *cli.Command) { + cmd := &cli.Command{ + Use: "issues [owner/repo]", + Short: "List and manage issues", + Long: "List issues for a repository, or list all open issues across all your repos.", + Args: cli.MaximumNArgs(1), + RunE: func(cmd *cli.Command, args []string) error { + if len(args) == 0 { + return runListAllIssues() + } + + owner, repo, err := splitOwnerRepo(args[0]) + if err != nil { + return err + } + + // If title is set, create an issue instead + if issuesTitle != "" { + return runCreateIssue(owner, repo) + } + + return runListIssues(owner, repo) + }, + } + + cmd.Flags().StringVar(&issuesState, "state", "open", "Filter by state (open, closed, all)") + cmd.Flags().StringVar(&issuesTitle, "title", "", "Create issue with this title") + cmd.Flags().StringVar(&issuesBody, "body", "", "Issue body (used with --title)") + + parent.AddCommand(cmd) +} + +func runListAllIssues() error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + // Collect all repos: user repos + all org repos, deduplicated + seen := make(map[string]bool) + var allRepos []*forgejo.Repository + + userRepos, err := client.ListUserRepos() + if err == nil { + for _, r := range userRepos { + if !seen[r.FullName] { + seen[r.FullName] = true + allRepos = append(allRepos, r) + } + } + } + + orgs, err := client.ListMyOrgs() + if err != nil { + return err + } + + for _, org := range orgs { + repos, err := client.ListOrgRepos(org.UserName) + if err != nil { + continue + } + for _, r := range repos { + if !seen[r.FullName] { + seen[r.FullName] = true + allRepos = append(allRepos, r) + } + } + } + + total := 0 + cli.Blank() + + for _, repo := range allRepos { + if repo.OpenIssues == 0 { + continue + } + + owner, name := repo.Owner.UserName, repo.Name + issues, err := client.ListIssues(owner, name, fg.ListIssuesOpts{ + State: issuesState, + }) + if err != nil || len(issues) == 0 { + continue + } + + cli.Print(" %s %s\n", repoStyle.Render(repo.FullName), dimStyle.Render(fmt.Sprintf("(%d)", len(issues)))) + for _, issue := range issues { + printForgeIssue(issue) + } + cli.Blank() + total += len(issues) + } + + if total == 0 { + cli.Text(fmt.Sprintf("No %s issues found.", issuesState)) + } else { + cli.Print(" %s\n", dimStyle.Render(fmt.Sprintf("%d %s issues total", total, issuesState))) + } + cli.Blank() + + return nil +} + +func runListIssues(owner, repo string) error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + issues, err := client.ListIssues(owner, repo, fg.ListIssuesOpts{ + State: issuesState, + }) + if err != nil { + return err + } + + if len(issues) == 0 { + cli.Text(fmt.Sprintf("No %s issues in %s/%s.", issuesState, owner, repo)) + return nil + } + + cli.Blank() + cli.Print(" %s\n\n", fmt.Sprintf("%d %s issues in %s/%s", len(issues), issuesState, owner, repo)) + + for _, issue := range issues { + printForgeIssue(issue) + } + + return nil +} + +func runCreateIssue(owner, repo string) error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + issue, err := client.CreateIssue(owner, repo, forgejo.CreateIssueOption{ + Title: issuesTitle, + Body: issuesBody, + }) + if err != nil { + return err + } + + cli.Blank() + cli.Success(fmt.Sprintf("Created issue #%d: %s", issue.Index, issue.Title)) + cli.Print(" %s %s\n", dimStyle.Render("URL:"), valueStyle.Render(issue.HTMLURL)) + cli.Blank() + + return nil +} + +func printForgeIssue(issue *forgejo.Issue) { + num := numberStyle.Render(fmt.Sprintf("#%d", issue.Index)) + title := valueStyle.Render(cli.Truncate(issue.Title, 60)) + + line := fmt.Sprintf(" %s %s", num, title) + + // Add labels + if len(issue.Labels) > 0 { + var labels []string + for _, l := range issue.Labels { + labels = append(labels, l.Name) + } + line += " " + warningStyle.Render("["+strings.Join(labels, ", ")+"]") + } + + // Add assignees + if len(issue.Assignees) > 0 { + var assignees []string + for _, a := range issue.Assignees { + assignees = append(assignees, "@"+a.UserName) + } + line += " " + infoStyle.Render(strings.Join(assignees, ", ")) + } + + cli.Text(line) +} diff --git a/internal/cmd/forge/cmd_labels.go b/internal/cmd/forge/cmd_labels.go new file mode 100644 index 0000000..ada96c5 --- /dev/null +++ b/internal/cmd/forge/cmd_labels.go @@ -0,0 +1,120 @@ +package forge + +import ( + "fmt" + + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/cli" + fg "github.com/host-uk/core/pkg/forge" +) + +// Labels command flags. +var ( + labelsCreate string + labelsColor string + labelsRepo string +) + +// addLabelsCommand adds the 'labels' subcommand for listing and creating labels. +func addLabelsCommand(parent *cli.Command) { + cmd := &cli.Command{ + Use: "labels ", + Short: "List and manage labels", + Long: `List labels from an organisation's repos, or create a new label. + +Labels are listed from the first repo in the organisation. Use --repo to target a specific repo. + +Examples: + core forge labels Private-Host-UK + core forge labels Private-Host-UK --create "feature" --color "00aabb" + core forge labels Private-Host-UK --repo Enchantrix`, + Args: cli.ExactArgs(1), + RunE: func(cmd *cli.Command, args []string) error { + if labelsCreate != "" { + return runCreateLabel(args[0]) + } + return runListLabels(args[0]) + }, + } + + cmd.Flags().StringVar(&labelsCreate, "create", "", "Create a label with this name") + cmd.Flags().StringVar(&labelsColor, "color", "0075ca", "Label colour (hex, e.g. 00aabb)") + cmd.Flags().StringVar(&labelsRepo, "repo", "", "Target a specific repo (default: first org repo)") + + parent.AddCommand(cmd) +} + +func runListLabels(org string) error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + var labels []*forgejo.Label + if labelsRepo != "" { + labels, err = client.ListRepoLabels(org, labelsRepo) + } else { + labels, err = client.ListOrgLabels(org) + } + if err != nil { + return err + } + + if len(labels) == 0 { + cli.Text("No labels found.") + return nil + } + + cli.Blank() + cli.Print(" %s\n\n", fmt.Sprintf("%d labels", len(labels))) + + table := cli.NewTable("Name", "Color", "Description") + + for _, l := range labels { + table.AddRow( + warningStyle.Render(l.Name), + dimStyle.Render("#"+l.Color), + cli.Truncate(l.Description, 50), + ) + } + + table.Render() + + return nil +} + +func runCreateLabel(org string) error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + // Determine target repo + repo := labelsRepo + if repo == "" { + repos, err := client.ListOrgRepos(org) + if err != nil { + return err + } + if len(repos) == 0 { + return cli.Err("no repos in org %s to create label on", org) + } + repo = repos[0].Name + org = repos[0].Owner.UserName + } + + label, err := client.CreateRepoLabel(org, repo, forgejo.CreateLabelOption{ + Name: labelsCreate, + Color: "#" + labelsColor, + }) + if err != nil { + return err + } + + cli.Blank() + cli.Success(fmt.Sprintf("Created label %q on %s/%s", label.Name, org, repo)) + cli.Blank() + + return nil +} diff --git a/internal/cmd/forge/cmd_migrate.go b/internal/cmd/forge/cmd_migrate.go new file mode 100644 index 0000000..a37e1a6 --- /dev/null +++ b/internal/cmd/forge/cmd_migrate.go @@ -0,0 +1,121 @@ +package forge + +import ( + "fmt" + + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/cli" + fg "github.com/host-uk/core/pkg/forge" +) + +// Migrate command flags. +var ( + migrateOrg string + migrateService string + migrateToken string + migrateMirror bool +) + +// addMigrateCommand adds the 'migrate' subcommand for importing repos from external services. +func addMigrateCommand(parent *cli.Command) { + cmd := &cli.Command{ + Use: "migrate ", + Short: "Migrate a repo from an external service", + Long: `Migrate a repository from GitHub, GitLab, Gitea, or other services into Forgejo. + +Unlike a simple mirror, migration imports issues, labels, pull requests, releases, and more. + +Examples: + core forge migrate https://github.com/owner/repo --org MyOrg --service github + core forge migrate https://gitea.example.com/owner/repo --service gitea --token TOKEN`, + Args: cli.ExactArgs(1), + RunE: func(cmd *cli.Command, args []string) error { + return runMigrate(args[0]) + }, + } + + cmd.Flags().StringVar(&migrateOrg, "org", "", "Forgejo organisation to migrate into (default: your user account)") + cmd.Flags().StringVar(&migrateService, "service", "github", "Source service type (github, gitlab, gitea, forgejo, gogs, git)") + cmd.Flags().StringVar(&migrateToken, "token", "", "Auth token for the source service") + cmd.Flags().BoolVar(&migrateMirror, "mirror", false, "Set up as a mirror (periodic sync)") + + parent.AddCommand(cmd) +} + +func runMigrate(cloneURL string) error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + // Determine target owner on Forgejo + targetOwner := migrateOrg + if targetOwner == "" { + user, _, err := client.API().GetMyUserInfo() + if err != nil { + return cli.WrapVerb(err, "get", "current user") + } + targetOwner = user.UserName + } + + // Extract repo name from clone URL + repoName := extractRepoName(cloneURL) + if repoName == "" { + return cli.Err("could not extract repo name from URL: %s", cloneURL) + } + + // Map service flag to SDK type + service := mapServiceType(migrateService) + + cli.Print(" Migrating %s -> %s/%s on Forgejo...\n", cloneURL, targetOwner, repoName) + + opts := forgejo.MigrateRepoOption{ + RepoName: repoName, + RepoOwner: targetOwner, + CloneAddr: cloneURL, + Service: service, + Mirror: migrateMirror, + AuthToken: migrateToken, + Issues: true, + Labels: true, + PullRequests: true, + Releases: true, + Milestones: true, + Wiki: true, + Description: "Migrated from " + cloneURL, + } + + repo, err := client.MigrateRepo(opts) + if err != nil { + return err + } + + cli.Blank() + cli.Success(fmt.Sprintf("Migration complete: %s", repo.FullName)) + cli.Print(" %s %s\n", dimStyle.Render("URL:"), valueStyle.Render(repo.HTMLURL)) + cli.Print(" %s %s\n", dimStyle.Render("Clone:"), valueStyle.Render(repo.CloneURL)) + if migrateMirror { + cli.Print(" %s %s\n", dimStyle.Render("Type:"), dimStyle.Render("mirror (periodic sync)")) + } + cli.Blank() + + return nil +} + +func mapServiceType(s string) forgejo.GitServiceType { + switch s { + case "github": + return forgejo.GitServiceGithub + case "gitlab": + return forgejo.GitServiceGitlab + case "gitea": + return forgejo.GitServiceGitea + case "forgejo": + return forgejo.GitServiceForgejo + case "gogs": + return forgejo.GitServiceGogs + default: + return forgejo.GitServicePlain + } +} diff --git a/internal/cmd/forge/cmd_orgs.go b/internal/cmd/forge/cmd_orgs.go new file mode 100644 index 0000000..d33bc74 --- /dev/null +++ b/internal/cmd/forge/cmd_orgs.go @@ -0,0 +1,66 @@ +package forge + +import ( + "fmt" + + "github.com/host-uk/core/pkg/cli" + fg "github.com/host-uk/core/pkg/forge" +) + +// addOrgsCommand adds the 'orgs' subcommand for listing organisations. +func addOrgsCommand(parent *cli.Command) { + cmd := &cli.Command{ + Use: "orgs", + Short: "List organisations", + Long: "List all organisations the authenticated user belongs to.", + RunE: func(cmd *cli.Command, args []string) error { + return runOrgs() + }, + } + + parent.AddCommand(cmd) +} + +func runOrgs() error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + orgs, err := client.ListMyOrgs() + if err != nil { + return err + } + + if len(orgs) == 0 { + cli.Text("No organisations found.") + return nil + } + + cli.Blank() + cli.Print(" %s\n\n", fmt.Sprintf("%d organisations", len(orgs))) + + table := cli.NewTable("Name", "Visibility", "Description") + + for _, org := range orgs { + visibility := successStyle.Render(org.Visibility) + if org.Visibility == "private" { + visibility = warningStyle.Render(org.Visibility) + } + + desc := cli.Truncate(org.Description, 50) + if desc == "" { + desc = dimStyle.Render("-") + } + + table.AddRow( + repoStyle.Render(org.UserName), + visibility, + desc, + ) + } + + table.Render() + + return nil +} diff --git a/internal/cmd/forge/cmd_prs.go b/internal/cmd/forge/cmd_prs.go new file mode 100644 index 0000000..3be1951 --- /dev/null +++ b/internal/cmd/forge/cmd_prs.go @@ -0,0 +1,98 @@ +package forge + +import ( + "fmt" + "strings" + + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/cli" + fg "github.com/host-uk/core/pkg/forge" +) + +// PRs command flags. +var ( + prsState string +) + +// addPRsCommand adds the 'prs' subcommand for listing pull requests. +func addPRsCommand(parent *cli.Command) { + cmd := &cli.Command{ + Use: "prs ", + Short: "List pull requests", + Long: "List pull requests for a repository.", + Args: cli.ExactArgs(1), + RunE: func(cmd *cli.Command, args []string) error { + owner, repo, err := splitOwnerRepo(args[0]) + if err != nil { + return err + } + return runListPRs(owner, repo) + }, + } + + cmd.Flags().StringVar(&prsState, "state", "open", "Filter by state (open, closed, all)") + + parent.AddCommand(cmd) +} + +func runListPRs(owner, repo string) error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + prs, err := client.ListPullRequests(owner, repo, prsState) + if err != nil { + return err + } + + if len(prs) == 0 { + cli.Text(fmt.Sprintf("No %s pull requests in %s/%s.", prsState, owner, repo)) + return nil + } + + cli.Blank() + cli.Print(" %s\n\n", fmt.Sprintf("%d %s pull requests in %s/%s", len(prs), prsState, owner, repo)) + + for _, pr := range prs { + printForgePR(pr) + } + + return nil +} + +func printForgePR(pr *forgejo.PullRequest) { + num := numberStyle.Render(fmt.Sprintf("#%d", pr.Index)) + title := valueStyle.Render(cli.Truncate(pr.Title, 50)) + + var author string + if pr.Poster != nil { + author = infoStyle.Render("@" + pr.Poster.UserName) + } + + // Branch info + branch := dimStyle.Render(pr.Head.Ref + " -> " + pr.Base.Ref) + + // Merge status + var status string + if pr.HasMerged { + status = successStyle.Render("merged") + } else if pr.State == forgejo.StateClosed { + status = errorStyle.Render("closed") + } else { + status = warningStyle.Render("open") + } + + // Labels + var labelStr string + if len(pr.Labels) > 0 { + var labels []string + for _, l := range pr.Labels { + labels = append(labels, l.Name) + } + labelStr = " " + warningStyle.Render("["+strings.Join(labels, ", ")+"]") + } + + cli.Print(" %s %s %s %s %s%s\n", num, title, author, status, branch, labelStr) +} diff --git a/internal/cmd/forge/cmd_repos.go b/internal/cmd/forge/cmd_repos.go new file mode 100644 index 0000000..5b0ffc7 --- /dev/null +++ b/internal/cmd/forge/cmd_repos.go @@ -0,0 +1,94 @@ +package forge + +import ( + "fmt" + + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/cli" + fg "github.com/host-uk/core/pkg/forge" +) + +// Repos command flags. +var ( + reposOrg string + reposMirrors bool +) + +// addReposCommand adds the 'repos' subcommand for listing repositories. +func addReposCommand(parent *cli.Command) { + cmd := &cli.Command{ + Use: "repos", + Short: "List repositories", + Long: "List repositories from your Forgejo instance, optionally filtered by organisation or mirror status.", + RunE: func(cmd *cli.Command, args []string) error { + return runRepos() + }, + } + + cmd.Flags().StringVar(&reposOrg, "org", "", "Filter by organisation") + cmd.Flags().BoolVar(&reposMirrors, "mirrors", false, "Show only mirror repositories") + + parent.AddCommand(cmd) +} + +func runRepos() error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + var repos []*forgejo.Repository + if reposOrg != "" { + repos, err = client.ListOrgRepos(reposOrg) + } else { + repos, err = client.ListUserRepos() + } + if err != nil { + return err + } + + // Filter mirrors if requested + if reposMirrors { + var filtered []*forgejo.Repository + for _, r := range repos { + if r.Mirror { + filtered = append(filtered, r) + } + } + repos = filtered + } + + if len(repos) == 0 { + cli.Text("No repositories found.") + return nil + } + + // Build table + table := cli.NewTable("Name", "Type", "Visibility", "Stars") + + for _, r := range repos { + repoType := "source" + if r.Mirror { + repoType = "mirror" + } + + visibility := successStyle.Render("public") + if r.Private { + visibility = warningStyle.Render("private") + } + + table.AddRow( + repoStyle.Render(r.FullName), + dimStyle.Render(repoType), + visibility, + fmt.Sprintf("%d", r.Stars), + ) + } + + cli.Blank() + cli.Print(" %s\n\n", fmt.Sprintf("%d repositories", len(repos))) + table.Render() + + return nil +} diff --git a/internal/cmd/forge/cmd_status.go b/internal/cmd/forge/cmd_status.go new file mode 100644 index 0000000..8361950 --- /dev/null +++ b/internal/cmd/forge/cmd_status.go @@ -0,0 +1,63 @@ +package forge + +import ( + "fmt" + + "github.com/host-uk/core/pkg/cli" + fg "github.com/host-uk/core/pkg/forge" +) + +// addStatusCommand adds the 'status' subcommand for instance info. +func addStatusCommand(parent *cli.Command) { + cmd := &cli.Command{ + Use: "status", + Short: "Show Forgejo instance status", + Long: "Display Forgejo instance version, authenticated user, and summary counts.", + RunE: func(cmd *cli.Command, args []string) error { + return runStatus() + }, + } + + parent.AddCommand(cmd) +} + +func runStatus() error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + // Get server version + ver, _, err := client.API().ServerVersion() + if err != nil { + return cli.WrapVerb(err, "get", "server version") + } + + // Get authenticated user + user, _, err := client.API().GetMyUserInfo() + if err != nil { + return cli.WrapVerb(err, "get", "user info") + } + + // Get org count + orgs, err := client.ListMyOrgs() + if err != nil { + return cli.WrapVerb(err, "list", "organisations") + } + + // Get repo count + repos, err := client.ListUserRepos() + if err != nil { + return cli.WrapVerb(err, "list", "repositories") + } + + cli.Blank() + cli.Print(" %s %s\n", dimStyle.Render("Instance:"), valueStyle.Render(client.URL())) + cli.Print(" %s %s\n", dimStyle.Render("Version:"), valueStyle.Render(ver)) + cli.Print(" %s %s\n", dimStyle.Render("User:"), valueStyle.Render(user.UserName)) + cli.Print(" %s %s\n", dimStyle.Render("Orgs:"), numberStyle.Render(fmt.Sprintf("%d", len(orgs)))) + cli.Print(" %s %s\n", dimStyle.Render("Repos:"), numberStyle.Render(fmt.Sprintf("%d", len(repos)))) + cli.Blank() + + return nil +} diff --git a/internal/cmd/forge/cmd_sync.go b/internal/cmd/forge/cmd_sync.go new file mode 100644 index 0000000..93fc12a --- /dev/null +++ b/internal/cmd/forge/cmd_sync.go @@ -0,0 +1,334 @@ +package forge + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/cli" + fg "github.com/host-uk/core/pkg/forge" +) + +// Sync command flags. +var ( + syncOrg string + syncBasePath string + syncSetup bool +) + +// addSyncCommand adds the 'sync' subcommand for syncing GitHub repos to Forgejo upstream branches. +func addSyncCommand(parent *cli.Command) { + cmd := &cli.Command{ + Use: "sync [owner/repo...]", + Short: "Sync GitHub repos to Forgejo upstream branches", + Long: `Push local GitHub content to Forgejo as 'upstream' branches. + +Each repo gets: + - An 'upstream' branch tracking the GitHub default branch + - A 'main' branch (default) for private tasks, processes, and AI workflows + +Use --setup on first run to create the Forgejo repos and configure remotes. +Without --setup, updates existing upstream branches from local clones.`, + Args: cli.MinimumNArgs(0), + RunE: func(cmd *cli.Command, args []string) error { + return runSync(args) + }, + } + + cmd.Flags().StringVar(&syncOrg, "org", "Host-UK", "Forgejo organisation") + cmd.Flags().StringVar(&syncBasePath, "base-path", "~/Code/host-uk", "Base path for local repo clones") + cmd.Flags().BoolVar(&syncSetup, "setup", false, "Initial setup: create repos, configure remotes, push upstream branches") + + parent.AddCommand(cmd) +} + +// syncRepoEntry holds info for a repo to sync. +type syncRepoEntry struct { + name string + localPath string + defaultBranch string +} + +func runSync(args []string) error { + client, err := fg.NewFromConfig("", "") + if err != nil { + return err + } + + // Expand base path + basePath := syncBasePath + if strings.HasPrefix(basePath, "~/") { + home, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("failed to resolve home directory: %w", err) + } + basePath = filepath.Join(home, basePath[2:]) + } + + // Build repo list: either from args or from the Forgejo org + repos, err := buildSyncRepoList(client, args, basePath) + if err != nil { + return err + } + + if len(repos) == 0 { + cli.Text("No repos to sync.") + return nil + } + + forgeURL := client.URL() + + if syncSetup { + return runSyncSetup(client, repos, forgeURL) + } + + return runSyncUpdate(repos, forgeURL) +} + +func buildSyncRepoList(client *fg.Client, args []string, basePath string) ([]syncRepoEntry, error) { + var repos []syncRepoEntry + + if len(args) > 0 { + for _, arg := range args { + name := arg + if parts := strings.SplitN(arg, "/", 2); len(parts) == 2 { + name = parts[1] + } + localPath := filepath.Join(basePath, name) + branch := syncDetectDefaultBranch(localPath) + repos = append(repos, syncRepoEntry{ + name: name, + localPath: localPath, + defaultBranch: branch, + }) + } + } else { + orgRepos, err := client.ListOrgRepos(syncOrg) + if err != nil { + return nil, err + } + for _, r := range orgRepos { + localPath := filepath.Join(basePath, r.Name) + branch := syncDetectDefaultBranch(localPath) + repos = append(repos, syncRepoEntry{ + name: r.Name, + localPath: localPath, + defaultBranch: branch, + }) + } + } + + return repos, nil +} + +func runSyncSetup(client *fg.Client, repos []syncRepoEntry, forgeURL string) error { + cli.Blank() + cli.Print(" Setting up %d repos in %s with upstream branches...\n\n", len(repos), syncOrg) + + var succeeded, failed int + + for _, repo := range repos { + cli.Print(" %s %s\n", dimStyle.Render(">>"), repoStyle.Render(repo.name)) + + // Step 1: Delete existing repo if it exists + cli.Print(" Deleting existing repo... ") + err := client.DeleteRepo(syncOrg, repo.name) + if err != nil { + cli.Print("%s (may not exist)\n", dimStyle.Render("skipped")) + } else { + cli.Print("%s\n", successStyle.Render("done")) + } + + // Step 2: Create empty repo + cli.Print(" Creating repo... ") + _, err = client.CreateOrgRepo(syncOrg, forgejo.CreateRepoOption{ + Name: repo.name, + AutoInit: false, + DefaultBranch: "main", + }) + if err != nil { + cli.Print("%s\n", errorStyle.Render(err.Error())) + failed++ + continue + } + cli.Print("%s\n", successStyle.Render("done")) + + // Step 3: Add forge remote to local clone + cli.Print(" Configuring remote... ") + remoteURL := fmt.Sprintf("%s/%s/%s.git", forgeURL, syncOrg, repo.name) + err = syncConfigureForgeRemote(repo.localPath, remoteURL) + if err != nil { + cli.Print("%s\n", errorStyle.Render(err.Error())) + failed++ + continue + } + cli.Print("%s\n", successStyle.Render("done")) + + // Step 4: Push default branch as 'upstream' to Forgejo + cli.Print(" Pushing %s -> upstream... ", repo.defaultBranch) + err = syncPushUpstream(repo.localPath, repo.defaultBranch) + if err != nil { + cli.Print("%s\n", errorStyle.Render(err.Error())) + failed++ + continue + } + cli.Print("%s\n", successStyle.Render("done")) + + // Step 5: Create 'main' branch from 'upstream' on Forgejo + cli.Print(" Creating main branch... ") + err = syncCreateMainFromUpstream(client, syncOrg, repo.name) + if err != nil { + if strings.Contains(err.Error(), "already exists") || strings.Contains(err.Error(), "409") { + cli.Print("%s\n", dimStyle.Render("exists")) + } else { + cli.Print("%s\n", errorStyle.Render(err.Error())) + failed++ + continue + } + } else { + cli.Print("%s\n", successStyle.Render("done")) + } + + // Step 6: Set default branch to 'main' + cli.Print(" Setting default branch... ") + _, _, err = client.API().EditRepo(syncOrg, repo.name, forgejo.EditRepoOption{ + DefaultBranch: strPtr("main"), + }) + if err != nil { + cli.Print("%s\n", warningStyle.Render(err.Error())) + } else { + cli.Print("%s\n", successStyle.Render("main")) + } + + succeeded++ + cli.Blank() + } + + cli.Print(" %s", successStyle.Render(fmt.Sprintf("%d repos set up", succeeded))) + if failed > 0 { + cli.Print(", %s", errorStyle.Render(fmt.Sprintf("%d failed", failed))) + } + cli.Blank() + + return nil +} + +func runSyncUpdate(repos []syncRepoEntry, forgeURL string) error { + cli.Blank() + cli.Print(" Syncing %d repos to %s upstream branches...\n\n", len(repos), syncOrg) + + var succeeded, failed int + + for _, repo := range repos { + cli.Print(" %s -> upstream ", repoStyle.Render(repo.name)) + + // Ensure remote exists + remoteURL := fmt.Sprintf("%s/%s/%s.git", forgeURL, syncOrg, repo.name) + _ = syncConfigureForgeRemote(repo.localPath, remoteURL) + + // Fetch latest from GitHub (origin) + err := syncGitFetch(repo.localPath, "origin") + if err != nil { + cli.Print("%s\n", errorStyle.Render("fetch failed: "+err.Error())) + failed++ + continue + } + + // Push to Forgejo upstream branch + err = syncPushUpstream(repo.localPath, repo.defaultBranch) + if err != nil { + cli.Print("%s\n", errorStyle.Render(err.Error())) + failed++ + continue + } + + cli.Print("%s\n", successStyle.Render("ok")) + succeeded++ + } + + cli.Blank() + cli.Print(" %s", successStyle.Render(fmt.Sprintf("%d synced", succeeded))) + if failed > 0 { + cli.Print(", %s", errorStyle.Render(fmt.Sprintf("%d failed", failed))) + } + cli.Blank() + + return nil +} + +func syncDetectDefaultBranch(path string) string { + out, err := exec.Command("git", "-C", path, "symbolic-ref", "refs/remotes/origin/HEAD").Output() + if err == nil { + ref := strings.TrimSpace(string(out)) + if parts := strings.Split(ref, "/"); len(parts) > 0 { + return parts[len(parts)-1] + } + } + + out, err = exec.Command("git", "-C", path, "branch", "--show-current").Output() + if err == nil { + branch := strings.TrimSpace(string(out)) + if branch != "" { + return branch + } + } + + return "main" +} + +func syncConfigureForgeRemote(localPath, remoteURL string) error { + out, err := exec.Command("git", "-C", localPath, "remote", "get-url", "forge").Output() + if err == nil { + existing := strings.TrimSpace(string(out)) + if existing != remoteURL { + cmd := exec.Command("git", "-C", localPath, "remote", "set-url", "forge", remoteURL) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to update remote: %w", err) + } + } + return nil + } + + cmd := exec.Command("git", "-C", localPath, "remote", "add", "forge", remoteURL) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to add remote: %w", err) + } + + return nil +} + +func syncPushUpstream(localPath, defaultBranch string) error { + refspec := fmt.Sprintf("refs/remotes/origin/%s:refs/heads/upstream", defaultBranch) + cmd := exec.Command("git", "-C", localPath, "push", "--force", "forge", refspec) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("%s", strings.TrimSpace(string(output))) + } + + return nil +} + +func syncGitFetch(localPath, remote string) error { + cmd := exec.Command("git", "-C", localPath, "fetch", remote) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("%s", strings.TrimSpace(string(output))) + } + return nil +} + +func syncCreateMainFromUpstream(client *fg.Client, org, repo string) error { + _, _, err := client.API().CreateBranch(org, repo, forgejo.CreateBranchOption{ + BranchName: "main", + OldBranchName: "upstream", + }) + if err != nil { + return fmt.Errorf("create branch: %w", err) + } + + return nil +} diff --git a/internal/cmd/forge/helpers.go b/internal/cmd/forge/helpers.go new file mode 100644 index 0000000..6d5cf9c --- /dev/null +++ b/internal/cmd/forge/helpers.go @@ -0,0 +1,33 @@ +package forge + +import ( + "path" + "strings" + + "github.com/host-uk/core/pkg/cli" +) + +// splitOwnerRepo splits "owner/repo" into its parts. +func splitOwnerRepo(s string) (string, string, error) { + parts := strings.SplitN(s, "/", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", cli.Err("expected format: owner/repo (got %q)", s) + } + return parts[0], parts[1], nil +} + +// strPtr returns a pointer to the given string. +func strPtr(s string) *string { return &s } + +// extractRepoName extracts a repository name from a clone URL. +// e.g. "https://github.com/owner/repo.git" -> "repo" +func extractRepoName(cloneURL string) string { + // Get the last path segment + name := path.Base(cloneURL) + // Strip .git suffix + name = strings.TrimSuffix(name, ".git") + if name == "" || name == "." || name == "/" { + return "" + } + return name +} diff --git a/internal/cmd/php/detect_test.go b/internal/cmd/php/detect_test.go index 6460a83..9b72f84 100644 --- a/internal/cmd/php/detect_test.go +++ b/internal/cmd/php/detect_test.go @@ -178,6 +178,9 @@ return [ }) t.Run("project with octane but unreadable config file", func(t *testing.T) { + if os.Geteuid() == 0 { + t.Skip("root can read any file") + } dir := t.TempDir() // Create composer.json with laravel/octane diff --git a/internal/cmd/pkgcmd/cmd_pkg.go b/internal/cmd/pkgcmd/cmd_pkg.go index 5f6da91..284f163 100644 --- a/internal/cmd/pkgcmd/cmd_pkg.go +++ b/internal/cmd/pkgcmd/cmd_pkg.go @@ -35,4 +35,5 @@ func AddPkgCommands(root *cobra.Command) { addPkgListCommand(pkgCmd) addPkgUpdateCommand(pkgCmd) addPkgOutdatedCommand(pkgCmd) + addPkgRemoveCommand(pkgCmd) } diff --git a/internal/cmd/pkgcmd/cmd_remove.go b/internal/cmd/pkgcmd/cmd_remove.go new file mode 100644 index 0000000..00dd813 --- /dev/null +++ b/internal/cmd/pkgcmd/cmd_remove.go @@ -0,0 +1,144 @@ +// cmd_remove.go implements the 'pkg remove' command with safety checks. +// +// Before removing a package, it verifies: +// 1. No uncommitted changes exist +// 2. No unpushed branches exist +// This prevents accidental data loss from agents or tools that might +// attempt to remove packages without cleaning up first. +package pkgcmd + +import ( + "errors" + "fmt" + "os/exec" + "path/filepath" + "strings" + + "github.com/host-uk/core/pkg/i18n" + coreio "github.com/host-uk/core/pkg/io" + "github.com/host-uk/core/pkg/repos" + "github.com/spf13/cobra" +) + +var removeForce bool + +func addPkgRemoveCommand(parent *cobra.Command) { + removeCmd := &cobra.Command{ + Use: "remove ", + Short: "Remove a package (with safety checks)", + Long: `Removes a package directory after verifying it has no uncommitted +changes or unpushed branches. Use --force to skip safety checks.`, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New(i18n.T("cmd.pkg.error.repo_required")) + } + return runPkgRemove(args[0], removeForce) + }, + } + + removeCmd.Flags().BoolVar(&removeForce, "force", false, "Skip safety checks (dangerous)") + + parent.AddCommand(removeCmd) +} + +func runPkgRemove(name string, force bool) error { + // Find package path via registry + regPath, err := repos.FindRegistry(coreio.Local) + if err != nil { + return errors.New(i18n.T("cmd.pkg.error.no_repos_yaml")) + } + + reg, err := repos.LoadRegistry(coreio.Local, regPath) + if err != nil { + return fmt.Errorf("%s: %w", i18n.T("i18n.fail.load", "registry"), err) + } + + basePath := reg.BasePath + if basePath == "" { + basePath = "." + } + if !filepath.IsAbs(basePath) { + basePath = filepath.Join(filepath.Dir(regPath), basePath) + } + + repoPath := filepath.Join(basePath, name) + + if !coreio.Local.IsDir(filepath.Join(repoPath, ".git")) { + return fmt.Errorf("package %s is not installed at %s", name, repoPath) + } + + if !force { + blocked, reasons := checkRepoSafety(repoPath) + if blocked { + fmt.Printf("%s Cannot remove %s:\n", errorStyle.Render("Blocked:"), repoNameStyle.Render(name)) + for _, r := range reasons { + fmt.Printf(" %s %s\n", errorStyle.Render("·"), r) + } + fmt.Printf("\nResolve the issues above or use --force to override.\n") + return errors.New("package has unresolved changes") + } + } + + // Remove the directory + fmt.Printf("%s %s... ", dimStyle.Render("Removing"), repoNameStyle.Render(name)) + + if err := coreio.Local.DeleteAll(repoPath); err != nil { + fmt.Printf("%s\n", errorStyle.Render("x "+err.Error())) + return err + } + + fmt.Printf("%s\n", successStyle.Render("ok")) + return nil +} + +// checkRepoSafety checks a git repo for uncommitted changes and unpushed branches. +func checkRepoSafety(repoPath string) (blocked bool, reasons []string) { + // Check for uncommitted changes (staged, unstaged, untracked) + cmd := exec.Command("git", "-C", repoPath, "status", "--porcelain") + output, err := cmd.Output() + if err == nil && strings.TrimSpace(string(output)) != "" { + lines := strings.Split(strings.TrimSpace(string(output)), "\n") + blocked = true + reasons = append(reasons, fmt.Sprintf("has %d uncommitted changes", len(lines))) + } + + // Check for unpushed commits on current branch + cmd = exec.Command("git", "-C", repoPath, "log", "--oneline", "@{u}..HEAD") + output, err = cmd.Output() + if err == nil && strings.TrimSpace(string(output)) != "" { + lines := strings.Split(strings.TrimSpace(string(output)), "\n") + blocked = true + reasons = append(reasons, fmt.Sprintf("has %d unpushed commits on current branch", len(lines))) + } + + // Check all local branches for unpushed work + cmd = exec.Command("git", "-C", repoPath, "branch", "--no-merged", "origin/HEAD") + output, _ = cmd.Output() + if trimmed := strings.TrimSpace(string(output)); trimmed != "" { + branches := strings.Split(trimmed, "\n") + var unmerged []string + for _, b := range branches { + b = strings.TrimSpace(b) + b = strings.TrimPrefix(b, "* ") + if b != "" { + unmerged = append(unmerged, b) + } + } + if len(unmerged) > 0 { + blocked = true + reasons = append(reasons, fmt.Sprintf("has %d unmerged branches: %s", + len(unmerged), strings.Join(unmerged, ", "))) + } + } + + // Check for stashed changes + cmd = exec.Command("git", "-C", repoPath, "stash", "list") + output, err = cmd.Output() + if err == nil && strings.TrimSpace(string(output)) != "" { + lines := strings.Split(strings.TrimSpace(string(output)), "\n") + blocked = true + reasons = append(reasons, fmt.Sprintf("has %d stashed entries", len(lines))) + } + + return blocked, reasons +} diff --git a/internal/cmd/pkgcmd/cmd_remove_test.go b/internal/cmd/pkgcmd/cmd_remove_test.go new file mode 100644 index 0000000..442a08e --- /dev/null +++ b/internal/cmd/pkgcmd/cmd_remove_test.go @@ -0,0 +1,92 @@ +package pkgcmd + +import ( + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func setupTestRepo(t *testing.T, dir, name string) string { + t.Helper() + repoPath := filepath.Join(dir, name) + require.NoError(t, os.MkdirAll(repoPath, 0755)) + + cmds := [][]string{ + {"git", "init"}, + {"git", "config", "user.email", "test@test.com"}, + {"git", "config", "user.name", "Test"}, + {"git", "commit", "--allow-empty", "-m", "initial"}, + } + for _, c := range cmds { + cmd := exec.Command(c[0], c[1:]...) + cmd.Dir = repoPath + out, err := cmd.CombinedOutput() + require.NoError(t, err, "cmd %v failed: %s", c, string(out)) + } + return repoPath +} + +func TestCheckRepoSafety_Clean(t *testing.T) { + tmp := t.TempDir() + repoPath := setupTestRepo(t, tmp, "clean-repo") + + blocked, reasons := checkRepoSafety(repoPath) + assert.False(t, blocked) + assert.Empty(t, reasons) +} + +func TestCheckRepoSafety_UncommittedChanges(t *testing.T) { + tmp := t.TempDir() + repoPath := setupTestRepo(t, tmp, "dirty-repo") + + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "new.txt"), []byte("data"), 0644)) + + blocked, reasons := checkRepoSafety(repoPath) + assert.True(t, blocked) + assert.NotEmpty(t, reasons) + assert.Contains(t, reasons[0], "uncommitted changes") +} + +func TestCheckRepoSafety_Stash(t *testing.T) { + tmp := t.TempDir() + repoPath := setupTestRepo(t, tmp, "stash-repo") + + // Create a file, add, stash + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "stash.txt"), []byte("data"), 0644)) + cmd := exec.Command("git", "add", ".") + cmd.Dir = repoPath + require.NoError(t, cmd.Run()) + + cmd = exec.Command("git", "stash") + cmd.Dir = repoPath + require.NoError(t, cmd.Run()) + + blocked, reasons := checkRepoSafety(repoPath) + assert.True(t, blocked) + found := false + for _, r := range reasons { + if assert.ObjectsAreEqual("stashed", "") || len(r) > 0 { + if contains(r, "stash") { + found = true + } + } + } + assert.True(t, found, "expected stash warning in reasons: %v", reasons) +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsStr(s, substr)) +} + +func containsStr(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/internal/cmd/workspace/cmd_agent.go b/internal/cmd/workspace/cmd_agent.go new file mode 100644 index 0000000..84a64cf --- /dev/null +++ b/internal/cmd/workspace/cmd_agent.go @@ -0,0 +1,288 @@ +// cmd_agent.go manages persistent agent context within task workspaces. +// +// Each agent gets a directory at: +// .core/workspace/p{epic}/i{issue}/agents/{provider}/{agent-name}/ +// +// This directory persists across invocations, allowing agents to build +// understanding over time — QA agents accumulate findings, reviewers +// track patterns, implementors record decisions. +// +// Layout: +// +// agents/ +// ├── claude-opus/implementor/ +// │ ├── memory.md # Persistent notes, decisions, context +// │ └── artifacts/ # Generated artifacts (reports, diffs, etc.) +// ├── claude-opus/qa/ +// │ ├── memory.md +// │ └── artifacts/ +// └── gemini/reviewer/ +// └── memory.md +package workspace + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/host-uk/core/pkg/cli" + coreio "github.com/host-uk/core/pkg/io" + "github.com/spf13/cobra" +) + +var ( + agentProvider string + agentName string +) + +func addAgentCommands(parent *cobra.Command) { + agentCmd := &cobra.Command{ + Use: "agent", + Short: "Manage persistent agent context within task workspaces", + } + + initCmd := &cobra.Command{ + Use: "init ", + Short: "Initialize an agent's context directory in the task workspace", + Long: `Creates agents/{provider}/{agent-name}/ with memory.md and artifacts/ +directory. The agent can read/write memory.md across invocations to +build understanding over time.`, + Args: cobra.ExactArgs(1), + RunE: runAgentInit, + } + initCmd.Flags().IntVar(&taskEpic, "epic", 0, "Epic/project number") + initCmd.Flags().IntVar(&taskIssue, "issue", 0, "Issue number") + _ = initCmd.MarkFlagRequired("epic") + _ = initCmd.MarkFlagRequired("issue") + + agentListCmd := &cobra.Command{ + Use: "list", + Short: "List agents in a task workspace", + RunE: runAgentList, + } + agentListCmd.Flags().IntVar(&taskEpic, "epic", 0, "Epic/project number") + agentListCmd.Flags().IntVar(&taskIssue, "issue", 0, "Issue number") + _ = agentListCmd.MarkFlagRequired("epic") + _ = agentListCmd.MarkFlagRequired("issue") + + pathCmd := &cobra.Command{ + Use: "path ", + Short: "Print the agent's context directory path", + Args: cobra.ExactArgs(1), + RunE: runAgentPath, + } + pathCmd.Flags().IntVar(&taskEpic, "epic", 0, "Epic/project number") + pathCmd.Flags().IntVar(&taskIssue, "issue", 0, "Issue number") + _ = pathCmd.MarkFlagRequired("epic") + _ = pathCmd.MarkFlagRequired("issue") + + agentCmd.AddCommand(initCmd, agentListCmd, pathCmd) + parent.AddCommand(agentCmd) +} + +// agentContextPath returns the path for an agent's context directory. +func agentContextPath(wsPath, provider, name string) string { + return filepath.Join(wsPath, "agents", provider, name) +} + +// parseAgentID splits "provider/agent-name" into parts. +func parseAgentID(id string) (provider, name string, err error) { + parts := strings.SplitN(id, "/", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("agent ID must be provider/agent-name (e.g. claude-opus/qa)") + } + return parts[0], parts[1], nil +} + +// AgentManifest tracks agent metadata for a task workspace. +type AgentManifest struct { + Provider string `json:"provider"` + Name string `json:"name"` + CreatedAt time.Time `json:"created_at"` + LastSeen time.Time `json:"last_seen"` +} + +func runAgentInit(cmd *cobra.Command, args []string) error { + provider, name, err := parseAgentID(args[0]) + if err != nil { + return err + } + + root, err := FindWorkspaceRoot() + if err != nil { + return cli.Err("not in a workspace") + } + + wsPath := taskWorkspacePath(root, taskEpic, taskIssue) + if !coreio.Local.IsDir(wsPath) { + return cli.Err("task workspace does not exist: p%d/i%d — create it first with `core workspace task create`", taskEpic, taskIssue) + } + + agentDir := agentContextPath(wsPath, provider, name) + + if coreio.Local.IsDir(agentDir) { + // Update last_seen + updateAgentManifest(agentDir, provider, name) + cli.Print("Agent %s/%s already initialized at p%d/i%d\n", + cli.ValueStyle.Render(provider), cli.ValueStyle.Render(name), taskEpic, taskIssue) + cli.Print("Path: %s\n", cli.DimStyle.Render(agentDir)) + return nil + } + + // Create directory structure + if err := coreio.Local.EnsureDir(agentDir); err != nil { + return fmt.Errorf("failed to create agent directory: %w", err) + } + if err := coreio.Local.EnsureDir(filepath.Join(agentDir, "artifacts")); err != nil { + return fmt.Errorf("failed to create artifacts directory: %w", err) + } + + // Create initial memory.md + memoryContent := fmt.Sprintf(`# %s/%s — Issue #%d (EPIC #%d) + +## Context +- **Task workspace:** p%d/i%d +- **Initialized:** %s + +## Notes + + +`, provider, name, taskIssue, taskEpic, taskEpic, taskIssue, time.Now().Format(time.RFC3339)) + + if err := coreio.Local.Write(filepath.Join(agentDir, "memory.md"), memoryContent); err != nil { + return fmt.Errorf("failed to create memory.md: %w", err) + } + + // Write manifest + updateAgentManifest(agentDir, provider, name) + + cli.Print("%s Agent %s/%s initialized at p%d/i%d\n", + cli.SuccessStyle.Render("Done:"), + cli.ValueStyle.Render(provider), cli.ValueStyle.Render(name), + taskEpic, taskIssue) + cli.Print("Memory: %s\n", cli.DimStyle.Render(filepath.Join(agentDir, "memory.md"))) + + return nil +} + +func runAgentList(cmd *cobra.Command, args []string) error { + root, err := FindWorkspaceRoot() + if err != nil { + return cli.Err("not in a workspace") + } + + wsPath := taskWorkspacePath(root, taskEpic, taskIssue) + agentsDir := filepath.Join(wsPath, "agents") + + if !coreio.Local.IsDir(agentsDir) { + cli.Println("No agents in this workspace.") + return nil + } + + providers, err := coreio.Local.List(agentsDir) + if err != nil { + return fmt.Errorf("failed to list agents: %w", err) + } + + found := false + for _, providerEntry := range providers { + if !providerEntry.IsDir() { + continue + } + providerDir := filepath.Join(agentsDir, providerEntry.Name()) + agents, err := coreio.Local.List(providerDir) + if err != nil { + continue + } + + for _, agentEntry := range agents { + if !agentEntry.IsDir() { + continue + } + found = true + agentDir := filepath.Join(providerDir, agentEntry.Name()) + + // Read manifest for last_seen + lastSeen := "" + manifestPath := filepath.Join(agentDir, "manifest.json") + if data, err := coreio.Local.Read(manifestPath); err == nil { + var m AgentManifest + if json.Unmarshal([]byte(data), &m) == nil { + lastSeen = m.LastSeen.Format("2006-01-02 15:04") + } + } + + // Check if memory has content beyond the template + memorySize := "" + if content, err := coreio.Local.Read(filepath.Join(agentDir, "memory.md")); err == nil { + lines := len(strings.Split(content, "\n")) + memorySize = fmt.Sprintf("%d lines", lines) + } + + cli.Print(" %s/%s %s", + cli.ValueStyle.Render(providerEntry.Name()), + cli.ValueStyle.Render(agentEntry.Name()), + cli.DimStyle.Render(memorySize)) + if lastSeen != "" { + cli.Print(" last: %s", cli.DimStyle.Render(lastSeen)) + } + cli.Print("\n") + } + } + + if !found { + cli.Println("No agents in this workspace.") + } + + return nil +} + +func runAgentPath(cmd *cobra.Command, args []string) error { + provider, name, err := parseAgentID(args[0]) + if err != nil { + return err + } + + root, err := FindWorkspaceRoot() + if err != nil { + return cli.Err("not in a workspace") + } + + wsPath := taskWorkspacePath(root, taskEpic, taskIssue) + agentDir := agentContextPath(wsPath, provider, name) + + if !coreio.Local.IsDir(agentDir) { + return cli.Err("agent %s/%s not initialized — run `core workspace agent init %s/%s`", provider, name, provider, name) + } + + // Print just the path (useful for scripting: cd $(core workspace agent path ...)) + cli.Text(agentDir) + return nil +} + +func updateAgentManifest(agentDir, provider, name string) { + now := time.Now() + manifest := AgentManifest{ + Provider: provider, + Name: name, + CreatedAt: now, + LastSeen: now, + } + + // Try to preserve created_at from existing manifest + manifestPath := filepath.Join(agentDir, "manifest.json") + if data, err := coreio.Local.Read(manifestPath); err == nil { + var existing AgentManifest + if json.Unmarshal([]byte(data), &existing) == nil { + manifest.CreatedAt = existing.CreatedAt + } + } + + data, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return + } + _ = coreio.Local.Write(manifestPath, string(data)) +} diff --git a/internal/cmd/workspace/cmd_agent_test.go b/internal/cmd/workspace/cmd_agent_test.go new file mode 100644 index 0000000..e414cb0 --- /dev/null +++ b/internal/cmd/workspace/cmd_agent_test.go @@ -0,0 +1,79 @@ +package workspace + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseAgentID_Good(t *testing.T) { + provider, name, err := parseAgentID("claude-opus/qa") + require.NoError(t, err) + assert.Equal(t, "claude-opus", provider) + assert.Equal(t, "qa", name) +} + +func TestParseAgentID_Bad(t *testing.T) { + tests := []string{ + "noslash", + "/missing-provider", + "missing-name/", + "", + } + for _, id := range tests { + _, _, err := parseAgentID(id) + assert.Error(t, err, "expected error for: %q", id) + } +} + +func TestAgentContextPath(t *testing.T) { + path := agentContextPath("/ws/p101/i343", "claude-opus", "qa") + assert.Equal(t, "/ws/p101/i343/agents/claude-opus/qa", path) +} + +func TestUpdateAgentManifest_Good(t *testing.T) { + tmp := t.TempDir() + agentDir := filepath.Join(tmp, "agents", "test-provider", "test-agent") + require.NoError(t, os.MkdirAll(agentDir, 0755)) + + updateAgentManifest(agentDir, "test-provider", "test-agent") + + data, err := os.ReadFile(filepath.Join(agentDir, "manifest.json")) + require.NoError(t, err) + + var m AgentManifest + require.NoError(t, json.Unmarshal(data, &m)) + assert.Equal(t, "test-provider", m.Provider) + assert.Equal(t, "test-agent", m.Name) + assert.False(t, m.CreatedAt.IsZero()) + assert.False(t, m.LastSeen.IsZero()) +} + +func TestUpdateAgentManifest_PreservesCreatedAt(t *testing.T) { + tmp := t.TempDir() + agentDir := filepath.Join(tmp, "agents", "p", "a") + require.NoError(t, os.MkdirAll(agentDir, 0755)) + + // First call sets created_at + updateAgentManifest(agentDir, "p", "a") + + data, err := os.ReadFile(filepath.Join(agentDir, "manifest.json")) + require.NoError(t, err) + var first AgentManifest + require.NoError(t, json.Unmarshal(data, &first)) + + // Second call should preserve created_at + updateAgentManifest(agentDir, "p", "a") + + data, err = os.ReadFile(filepath.Join(agentDir, "manifest.json")) + require.NoError(t, err) + var second AgentManifest + require.NoError(t, json.Unmarshal(data, &second)) + + assert.Equal(t, first.CreatedAt, second.CreatedAt) + assert.True(t, second.LastSeen.After(first.CreatedAt) || second.LastSeen.Equal(first.CreatedAt)) +} diff --git a/internal/cmd/workspace/cmd_task.go b/internal/cmd/workspace/cmd_task.go new file mode 100644 index 0000000..fcb0b83 --- /dev/null +++ b/internal/cmd/workspace/cmd_task.go @@ -0,0 +1,466 @@ +// cmd_task.go implements task workspace isolation using git worktrees. +// +// Each task gets an isolated workspace at .core/workspace/p{epic}/i{issue}/ +// containing git worktrees of required repos. This prevents agents from +// writing to the implementor's working tree. +// +// Safety checks enforce that workspaces cannot be removed if they contain +// uncommitted changes or unpushed branches. +package workspace + +import ( + "context" + "errors" + "fmt" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "github.com/host-uk/core/pkg/cli" + coreio "github.com/host-uk/core/pkg/io" + "github.com/host-uk/core/pkg/repos" + "github.com/spf13/cobra" +) + +var ( + taskEpic int + taskIssue int + taskRepos []string + taskForce bool + taskBranch string +) + +func addTaskCommands(parent *cobra.Command) { + taskCmd := &cobra.Command{ + Use: "task", + Short: "Manage isolated task workspaces for agents", + } + + createCmd := &cobra.Command{ + Use: "create", + Short: "Create an isolated task workspace with git worktrees", + Long: `Creates a workspace at .core/workspace/p{epic}/i{issue}/ with git +worktrees for each specified repo. Each worktree gets a fresh branch +(issue/{id} by default) so agents work in isolation.`, + RunE: runTaskCreate, + } + createCmd.Flags().IntVar(&taskEpic, "epic", 0, "Epic/project number") + createCmd.Flags().IntVar(&taskIssue, "issue", 0, "Issue number") + createCmd.Flags().StringSliceVar(&taskRepos, "repo", nil, "Repos to include (default: all from registry)") + createCmd.Flags().StringVar(&taskBranch, "branch", "", "Branch name (default: issue/{issue})") + _ = createCmd.MarkFlagRequired("epic") + _ = createCmd.MarkFlagRequired("issue") + + removeCmd := &cobra.Command{ + Use: "remove", + Short: "Remove a task workspace (with safety checks)", + Long: `Removes a task workspace after checking for uncommitted changes and +unpushed branches. Use --force to skip safety checks.`, + RunE: runTaskRemove, + } + removeCmd.Flags().IntVar(&taskEpic, "epic", 0, "Epic/project number") + removeCmd.Flags().IntVar(&taskIssue, "issue", 0, "Issue number") + removeCmd.Flags().BoolVar(&taskForce, "force", false, "Skip safety checks") + _ = removeCmd.MarkFlagRequired("epic") + _ = removeCmd.MarkFlagRequired("issue") + + listCmd := &cobra.Command{ + Use: "list", + Short: "List all task workspaces", + RunE: runTaskList, + } + + statusCmd := &cobra.Command{ + Use: "status", + Short: "Show status of a task workspace", + RunE: runTaskStatus, + } + statusCmd.Flags().IntVar(&taskEpic, "epic", 0, "Epic/project number") + statusCmd.Flags().IntVar(&taskIssue, "issue", 0, "Issue number") + _ = statusCmd.MarkFlagRequired("epic") + _ = statusCmd.MarkFlagRequired("issue") + + addAgentCommands(taskCmd) + + taskCmd.AddCommand(createCmd, removeCmd, listCmd, statusCmd) + parent.AddCommand(taskCmd) +} + +// taskWorkspacePath returns the path for a task workspace. +func taskWorkspacePath(root string, epic, issue int) string { + return filepath.Join(root, ".core", "workspace", fmt.Sprintf("p%d", epic), fmt.Sprintf("i%d", issue)) +} + +func runTaskCreate(cmd *cobra.Command, args []string) error { + ctx := context.Background() + root, err := FindWorkspaceRoot() + if err != nil { + return cli.Err("not in a workspace — run from workspace root or a package directory") + } + + wsPath := taskWorkspacePath(root, taskEpic, taskIssue) + + if coreio.Local.IsDir(wsPath) { + return cli.Err("task workspace already exists: %s", wsPath) + } + + branch := taskBranch + if branch == "" { + branch = fmt.Sprintf("issue/%d", taskIssue) + } + + // Determine repos to include + repoNames := taskRepos + if len(repoNames) == 0 { + repoNames, err = registryRepoNames(root) + if err != nil { + return fmt.Errorf("failed to load registry: %w", err) + } + } + + if len(repoNames) == 0 { + return cli.Err("no repos specified and no registry found") + } + + // Resolve package paths + config, _ := LoadConfig(root) + pkgDir := "./packages" + if config != nil && config.PackagesDir != "" { + pkgDir = config.PackagesDir + } + if !filepath.IsAbs(pkgDir) { + pkgDir = filepath.Join(root, pkgDir) + } + + if err := coreio.Local.EnsureDir(wsPath); err != nil { + return fmt.Errorf("failed to create workspace directory: %w", err) + } + + cli.Print("Creating task workspace: %s\n", cli.ValueStyle.Render(fmt.Sprintf("p%d/i%d", taskEpic, taskIssue))) + cli.Print("Branch: %s\n", cli.ValueStyle.Render(branch)) + cli.Print("Path: %s\n\n", cli.DimStyle.Render(wsPath)) + + var created, skipped int + for _, repoName := range repoNames { + repoPath := filepath.Join(pkgDir, repoName) + if !coreio.Local.IsDir(filepath.Join(repoPath, ".git")) { + cli.Print(" %s %s (not cloned, skipping)\n", cli.DimStyle.Render("·"), repoName) + skipped++ + continue + } + + worktreePath := filepath.Join(wsPath, repoName) + cli.Print(" %s %s... ", cli.DimStyle.Render("·"), repoName) + + if err := createWorktree(ctx, repoPath, worktreePath, branch); err != nil { + cli.Print("%s\n", cli.ErrorStyle.Render("x "+err.Error())) + skipped++ + continue + } + + cli.Print("%s\n", cli.SuccessStyle.Render("ok")) + created++ + } + + cli.Print("\n%s %d worktrees created", cli.SuccessStyle.Render("Done:"), created) + if skipped > 0 { + cli.Print(", %d skipped", skipped) + } + cli.Print("\n") + + return nil +} + +func runTaskRemove(cmd *cobra.Command, args []string) error { + root, err := FindWorkspaceRoot() + if err != nil { + return cli.Err("not in a workspace") + } + + wsPath := taskWorkspacePath(root, taskEpic, taskIssue) + if !coreio.Local.IsDir(wsPath) { + return cli.Err("task workspace does not exist: p%d/i%d", taskEpic, taskIssue) + } + + if !taskForce { + dirty, reasons := checkWorkspaceSafety(wsPath) + if dirty { + cli.Print("%s Cannot remove workspace p%d/i%d:\n", cli.ErrorStyle.Render("Blocked:"), taskEpic, taskIssue) + for _, r := range reasons { + cli.Print(" %s %s\n", cli.ErrorStyle.Render("·"), r) + } + cli.Print("\nUse --force to override or resolve the issues first.\n") + return errors.New("workspace has unresolved changes") + } + } + + // Remove worktrees first (so git knows they're gone) + entries, err := coreio.Local.List(wsPath) + if err != nil { + return fmt.Errorf("failed to list workspace: %w", err) + } + + config, _ := LoadConfig(root) + pkgDir := "./packages" + if config != nil && config.PackagesDir != "" { + pkgDir = config.PackagesDir + } + if !filepath.IsAbs(pkgDir) { + pkgDir = filepath.Join(root, pkgDir) + } + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + worktreePath := filepath.Join(wsPath, entry.Name()) + repoPath := filepath.Join(pkgDir, entry.Name()) + + // Remove worktree from git + if coreio.Local.IsDir(filepath.Join(repoPath, ".git")) { + removeWorktree(repoPath, worktreePath) + } + } + + // Remove the workspace directory + if err := coreio.Local.DeleteAll(wsPath); err != nil { + return fmt.Errorf("failed to remove workspace directory: %w", err) + } + + // Clean up empty parent (p{epic}/) if it's now empty + epicDir := filepath.Dir(wsPath) + if entries, err := coreio.Local.List(epicDir); err == nil && len(entries) == 0 { + coreio.Local.DeleteAll(epicDir) + } + + cli.Print("%s Removed workspace p%d/i%d\n", cli.SuccessStyle.Render("Done:"), taskEpic, taskIssue) + return nil +} + +func runTaskList(cmd *cobra.Command, args []string) error { + root, err := FindWorkspaceRoot() + if err != nil { + return cli.Err("not in a workspace") + } + + wsRoot := filepath.Join(root, ".core", "workspace") + if !coreio.Local.IsDir(wsRoot) { + cli.Println("No task workspaces found.") + return nil + } + + epics, err := coreio.Local.List(wsRoot) + if err != nil { + return fmt.Errorf("failed to list workspaces: %w", err) + } + + found := false + for _, epicEntry := range epics { + if !epicEntry.IsDir() || !strings.HasPrefix(epicEntry.Name(), "p") { + continue + } + epicDir := filepath.Join(wsRoot, epicEntry.Name()) + issues, err := coreio.Local.List(epicDir) + if err != nil { + continue + } + for _, issueEntry := range issues { + if !issueEntry.IsDir() || !strings.HasPrefix(issueEntry.Name(), "i") { + continue + } + found = true + wsPath := filepath.Join(epicDir, issueEntry.Name()) + + // Count worktrees + entries, _ := coreio.Local.List(wsPath) + dirCount := 0 + for _, e := range entries { + if e.IsDir() { + dirCount++ + } + } + + // Check safety + dirty, _ := checkWorkspaceSafety(wsPath) + status := cli.SuccessStyle.Render("clean") + if dirty { + status = cli.ErrorStyle.Render("dirty") + } + + cli.Print(" %s/%s %d repos %s\n", + epicEntry.Name(), issueEntry.Name(), + dirCount, status) + } + } + + if !found { + cli.Println("No task workspaces found.") + } + + return nil +} + +func runTaskStatus(cmd *cobra.Command, args []string) error { + root, err := FindWorkspaceRoot() + if err != nil { + return cli.Err("not in a workspace") + } + + wsPath := taskWorkspacePath(root, taskEpic, taskIssue) + if !coreio.Local.IsDir(wsPath) { + return cli.Err("task workspace does not exist: p%d/i%d", taskEpic, taskIssue) + } + + cli.Print("Workspace: %s\n", cli.ValueStyle.Render(fmt.Sprintf("p%d/i%d", taskEpic, taskIssue))) + cli.Print("Path: %s\n\n", cli.DimStyle.Render(wsPath)) + + entries, err := coreio.Local.List(wsPath) + if err != nil { + return fmt.Errorf("failed to list workspace: %w", err) + } + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + worktreePath := filepath.Join(wsPath, entry.Name()) + + // Get branch + branch := gitOutput(worktreePath, "rev-parse", "--abbrev-ref", "HEAD") + branch = strings.TrimSpace(branch) + + // Get status + status := gitOutput(worktreePath, "status", "--porcelain") + statusLabel := cli.SuccessStyle.Render("clean") + if strings.TrimSpace(status) != "" { + lines := len(strings.Split(strings.TrimSpace(status), "\n")) + statusLabel = cli.ErrorStyle.Render(fmt.Sprintf("%d changes", lines)) + } + + // Get unpushed + unpushed := gitOutput(worktreePath, "log", "--oneline", "@{u}..HEAD") + unpushedLabel := "" + if trimmed := strings.TrimSpace(unpushed); trimmed != "" { + count := len(strings.Split(trimmed, "\n")) + unpushedLabel = cli.WarningStyle.Render(fmt.Sprintf(" %d unpushed", count)) + } + + cli.Print(" %s %s %s%s\n", + cli.RepoStyle.Render(entry.Name()), + cli.DimStyle.Render(branch), + statusLabel, + unpushedLabel) + } + + return nil +} + +// createWorktree adds a git worktree at worktreePath for the given branch. +func createWorktree(ctx context.Context, repoPath, worktreePath, branch string) error { + // Check if branch exists on remote first + cmd := exec.CommandContext(ctx, "git", "worktree", "add", "-b", branch, worktreePath) + cmd.Dir = repoPath + output, err := cmd.CombinedOutput() + if err != nil { + errStr := strings.TrimSpace(string(output)) + // If branch already exists, try without -b + if strings.Contains(errStr, "already exists") { + cmd = exec.CommandContext(ctx, "git", "worktree", "add", worktreePath, branch) + cmd.Dir = repoPath + output, err = cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("%s", strings.TrimSpace(string(output))) + } + return nil + } + return fmt.Errorf("%s", errStr) + } + return nil +} + +// removeWorktree removes a git worktree. +func removeWorktree(repoPath, worktreePath string) { + cmd := exec.Command("git", "worktree", "remove", worktreePath) + cmd.Dir = repoPath + _ = cmd.Run() + + // Prune stale worktrees + cmd = exec.Command("git", "worktree", "prune") + cmd.Dir = repoPath + _ = cmd.Run() +} + +// checkWorkspaceSafety checks all worktrees in a workspace for uncommitted/unpushed changes. +func checkWorkspaceSafety(wsPath string) (dirty bool, reasons []string) { + entries, err := coreio.Local.List(wsPath) + if err != nil { + return false, nil + } + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + worktreePath := filepath.Join(wsPath, entry.Name()) + + // Check for uncommitted changes + status := gitOutput(worktreePath, "status", "--porcelain") + if strings.TrimSpace(status) != "" { + dirty = true + reasons = append(reasons, fmt.Sprintf("%s: has uncommitted changes", entry.Name())) + } + + // Check for unpushed commits + unpushed := gitOutput(worktreePath, "log", "--oneline", "@{u}..HEAD") + if strings.TrimSpace(unpushed) != "" { + dirty = true + count := len(strings.Split(strings.TrimSpace(unpushed), "\n")) + reasons = append(reasons, fmt.Sprintf("%s: %d unpushed commits", entry.Name(), count)) + } + } + + return dirty, reasons +} + +// gitOutput runs a git command and returns stdout. +func gitOutput(dir string, args ...string) string { + cmd := exec.Command("git", args...) + cmd.Dir = dir + out, _ := cmd.Output() + return string(out) +} + +// registryRepoNames returns repo names from the workspace registry. +func registryRepoNames(root string) ([]string, error) { + // Try to find repos.yaml + regPath, err := repos.FindRegistry(coreio.Local) + if err != nil { + return nil, err + } + + reg, err := repos.LoadRegistry(coreio.Local, regPath) + if err != nil { + return nil, err + } + + var names []string + for _, repo := range reg.List() { + // Only include cloneable repos + if repo.Clone != nil && !*repo.Clone { + continue + } + // Skip meta repos + if repo.Type == "meta" { + continue + } + names = append(names, repo.Name) + } + + return names, nil +} + +// epicBranchName returns the branch name for an EPIC. +func epicBranchName(epicID int) string { + return "epic/" + strconv.Itoa(epicID) +} diff --git a/internal/cmd/workspace/cmd_task_test.go b/internal/cmd/workspace/cmd_task_test.go new file mode 100644 index 0000000..6340470 --- /dev/null +++ b/internal/cmd/workspace/cmd_task_test.go @@ -0,0 +1,109 @@ +package workspace + +import ( + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func setupTestRepo(t *testing.T, dir, name string) string { + t.Helper() + repoPath := filepath.Join(dir, name) + require.NoError(t, os.MkdirAll(repoPath, 0755)) + + cmds := [][]string{ + {"git", "init"}, + {"git", "config", "user.email", "test@test.com"}, + {"git", "config", "user.name", "Test"}, + {"git", "commit", "--allow-empty", "-m", "initial"}, + } + for _, c := range cmds { + cmd := exec.Command(c[0], c[1:]...) + cmd.Dir = repoPath + out, err := cmd.CombinedOutput() + require.NoError(t, err, "cmd %v failed: %s", c, string(out)) + } + return repoPath +} + +func TestTaskWorkspacePath(t *testing.T) { + path := taskWorkspacePath("/home/user/Code/host-uk", 101, 343) + assert.Equal(t, "/home/user/Code/host-uk/.core/workspace/p101/i343", path) +} + +func TestCreateWorktree_Good(t *testing.T) { + tmp := t.TempDir() + repoPath := setupTestRepo(t, tmp, "test-repo") + worktreePath := filepath.Join(tmp, "workspace", "test-repo") + + err := createWorktree(t.Context(), repoPath, worktreePath, "issue/123") + require.NoError(t, err) + + // Verify worktree exists + assert.DirExists(t, worktreePath) + assert.FileExists(t, filepath.Join(worktreePath, ".git")) + + // Verify branch + branch := gitOutput(worktreePath, "rev-parse", "--abbrev-ref", "HEAD") + assert.Equal(t, "issue/123", trimNL(branch)) +} + +func TestCreateWorktree_BranchExists(t *testing.T) { + tmp := t.TempDir() + repoPath := setupTestRepo(t, tmp, "test-repo") + + // Create branch first + cmd := exec.Command("git", "branch", "issue/456") + cmd.Dir = repoPath + require.NoError(t, cmd.Run()) + + worktreePath := filepath.Join(tmp, "workspace", "test-repo") + err := createWorktree(t.Context(), repoPath, worktreePath, "issue/456") + require.NoError(t, err) + + assert.DirExists(t, worktreePath) +} + +func TestCheckWorkspaceSafety_Clean(t *testing.T) { + tmp := t.TempDir() + wsPath := filepath.Join(tmp, "workspace") + require.NoError(t, os.MkdirAll(wsPath, 0755)) + + repoPath := setupTestRepo(t, tmp, "origin-repo") + worktreePath := filepath.Join(wsPath, "origin-repo") + require.NoError(t, createWorktree(t.Context(), repoPath, worktreePath, "test-branch")) + + dirty, reasons := checkWorkspaceSafety(wsPath) + assert.False(t, dirty) + assert.Empty(t, reasons) +} + +func TestCheckWorkspaceSafety_Dirty(t *testing.T) { + tmp := t.TempDir() + wsPath := filepath.Join(tmp, "workspace") + require.NoError(t, os.MkdirAll(wsPath, 0755)) + + repoPath := setupTestRepo(t, tmp, "origin-repo") + worktreePath := filepath.Join(wsPath, "origin-repo") + require.NoError(t, createWorktree(t.Context(), repoPath, worktreePath, "test-branch")) + + // Create uncommitted file + require.NoError(t, os.WriteFile(filepath.Join(worktreePath, "dirty.txt"), []byte("dirty"), 0644)) + + dirty, reasons := checkWorkspaceSafety(wsPath) + assert.True(t, dirty) + assert.Contains(t, reasons[0], "uncommitted changes") +} + +func TestEpicBranchName(t *testing.T) { + assert.Equal(t, "epic/101", epicBranchName(101)) + assert.Equal(t, "epic/42", epicBranchName(42)) +} + +func trimNL(s string) string { + return s[:len(s)-1] +} diff --git a/internal/cmd/workspace/cmd_workspace.go b/internal/cmd/workspace/cmd_workspace.go index 204efe1..c90bf63 100644 --- a/internal/cmd/workspace/cmd_workspace.go +++ b/internal/cmd/workspace/cmd_workspace.go @@ -21,6 +21,8 @@ func AddWorkspaceCommands(root *cobra.Command) { RunE: runWorkspaceActive, }) + addTaskCommands(wsCmd) + root.AddCommand(wsCmd) } diff --git a/internal/variants/full.go b/internal/variants/full.go index f80e34f..1fb33c3 100644 --- a/internal/variants/full.go +++ b/internal/variants/full.go @@ -21,6 +21,7 @@ // - qa: Quality assurance workflows // - monitor: Security monitoring aggregation // - gitea: Gitea instance management (repos, issues, PRs, mirrors) +// - forge: Forgejo instance management (repos, issues, PRs, migration, orgs, labels) // - unifi: UniFi network management (sites, devices, clients) package variants @@ -36,6 +37,7 @@ import ( _ "github.com/host-uk/core/internal/cmd/dev" _ "github.com/host-uk/core/internal/cmd/docs" _ "github.com/host-uk/core/internal/cmd/doctor" + _ "github.com/host-uk/core/internal/cmd/forge" _ "github.com/host-uk/core/internal/cmd/gitcmd" _ "github.com/host-uk/core/internal/cmd/gitea" _ "github.com/host-uk/core/internal/cmd/go" diff --git a/pkg/build/build.go b/pkg/build/build.go index 86f660e..8d68607 100644 --- a/pkg/build/build.go +++ b/pkg/build/build.go @@ -22,6 +22,8 @@ const ( ProjectTypeNode ProjectType = "node" // ProjectTypePHP indicates a PHP/Laravel project with composer.json. ProjectTypePHP ProjectType = "php" + // ProjectTypeCPP indicates a C++ project with CMakeLists.txt. + ProjectTypeCPP ProjectType = "cpp" // ProjectTypeDocker indicates a Docker-based project with Dockerfile. ProjectTypeDocker ProjectType = "docker" // ProjectTypeLinuxKit indicates a LinuxKit VM configuration. diff --git a/pkg/build/buildcmd/cmd_project.go b/pkg/build/buildcmd/cmd_project.go index 25a09dd..e13b9ea 100644 --- a/pkg/build/buildcmd/cmd_project.go +++ b/pkg/build/buildcmd/cmd_project.go @@ -380,6 +380,8 @@ func getBuilder(projectType build.ProjectType) (build.Builder, error) { return builders.NewLinuxKitBuilder(), nil case build.ProjectTypeTaskfile: return builders.NewTaskfileBuilder(), nil + case build.ProjectTypeCPP: + return builders.NewCPPBuilder(), nil case build.ProjectTypeNode: return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.node_not_implemented")) case build.ProjectTypePHP: diff --git a/pkg/build/builders/cpp.go b/pkg/build/builders/cpp.go new file mode 100644 index 0000000..f5cf6f4 --- /dev/null +++ b/pkg/build/builders/cpp.go @@ -0,0 +1,253 @@ +// Package builders provides build implementations for different project types. +package builders + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/host-uk/core/pkg/build" + "github.com/host-uk/core/pkg/io" +) + +// CPPBuilder implements the Builder interface for C++ projects using CMake + Conan. +// It wraps the Makefile-based build system from the .core/build submodule. +type CPPBuilder struct{} + +// NewCPPBuilder creates a new CPPBuilder instance. +func NewCPPBuilder() *CPPBuilder { + return &CPPBuilder{} +} + +// Name returns the builder's identifier. +func (b *CPPBuilder) Name() string { + return "cpp" +} + +// Detect checks if this builder can handle the project in the given directory. +func (b *CPPBuilder) Detect(fs io.Medium, dir string) (bool, error) { + return build.IsCPPProject(fs, dir), nil +} + +// Build compiles the C++ project using Make targets. +// The build flow is: make configure → make build → make package. +// Cross-compilation is handled via Conan profiles specified in .core/build.yaml. +func (b *CPPBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) { + if cfg == nil { + return nil, fmt.Errorf("builders.CPPBuilder.Build: config is nil") + } + + // Validate make is available + if err := b.validateMake(); err != nil { + return nil, err + } + + // For C++ projects, the Makefile handles everything. + // We don't iterate per-target like Go — the Makefile's configure + build + // produces binaries for the host platform, and cross-compilation uses + // named Conan profiles (e.g., make gcc-linux-armv8). + if len(targets) == 0 { + // Default to host platform + targets = []build.Target{{OS: runtime.GOOS, Arch: runtime.GOARCH}} + } + + var artifacts []build.Artifact + + for _, target := range targets { + built, err := b.buildTarget(ctx, cfg, target) + if err != nil { + return artifacts, fmt.Errorf("builders.CPPBuilder.Build: %w", err) + } + artifacts = append(artifacts, built...) + } + + return artifacts, nil +} + +// buildTarget compiles for a single target platform. +func (b *CPPBuilder) buildTarget(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) { + // Determine if this is a cross-compile or host build + isHostBuild := target.OS == runtime.GOOS && target.Arch == runtime.GOARCH + + if isHostBuild { + return b.buildHost(ctx, cfg, target) + } + + return b.buildCross(ctx, cfg, target) +} + +// buildHost runs the standard make configure → make build → make package flow. +func (b *CPPBuilder) buildHost(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) { + fmt.Printf("Building C++ project for %s/%s (host)\n", target.OS, target.Arch) + + // Step 1: Configure (runs conan install + cmake configure) + if err := b.runMake(ctx, cfg.ProjectDir, "configure"); err != nil { + return nil, fmt.Errorf("configure failed: %w", err) + } + + // Step 2: Build + if err := b.runMake(ctx, cfg.ProjectDir, "build"); err != nil { + return nil, fmt.Errorf("build failed: %w", err) + } + + // Step 3: Package + if err := b.runMake(ctx, cfg.ProjectDir, "package"); err != nil { + return nil, fmt.Errorf("package failed: %w", err) + } + + // Discover artifacts from build/packages/ + return b.findArtifacts(cfg.FS, cfg.ProjectDir, target) +} + +// buildCross runs a cross-compilation using a Conan profile name. +// The Makefile supports profile targets like: make gcc-linux-armv8 +func (b *CPPBuilder) buildCross(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) { + // Map target to a Conan profile name + profile := b.targetToProfile(target) + if profile == "" { + return nil, fmt.Errorf("no Conan profile mapped for target %s/%s", target.OS, target.Arch) + } + + fmt.Printf("Building C++ project for %s/%s (cross: %s)\n", target.OS, target.Arch, profile) + + // The Makefile exposes each profile as a top-level target + if err := b.runMake(ctx, cfg.ProjectDir, profile); err != nil { + return nil, fmt.Errorf("cross-compile for %s failed: %w", profile, err) + } + + return b.findArtifacts(cfg.FS, cfg.ProjectDir, target) +} + +// runMake executes a make target in the project directory. +func (b *CPPBuilder) runMake(ctx context.Context, projectDir string, target string) error { + cmd := exec.CommandContext(ctx, "make", target) + cmd.Dir = projectDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = os.Environ() + + if err := cmd.Run(); err != nil { + return fmt.Errorf("make %s: %w", target, err) + } + return nil +} + +// findArtifacts searches for built packages in build/packages/. +func (b *CPPBuilder) findArtifacts(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) { + packagesDir := filepath.Join(projectDir, "build", "packages") + + if !fs.IsDir(packagesDir) { + // Fall back to searching build/release/src/ for raw binaries + return b.findBinaries(fs, projectDir, target) + } + + entries, err := fs.List(packagesDir) + if err != nil { + return nil, fmt.Errorf("failed to list packages directory: %w", err) + } + + var artifacts []build.Artifact + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + // Skip checksum files and hidden files + if strings.HasSuffix(name, ".sha256") || strings.HasPrefix(name, ".") { + continue + } + + artifacts = append(artifacts, build.Artifact{ + Path: filepath.Join(packagesDir, name), + OS: target.OS, + Arch: target.Arch, + }) + } + + return artifacts, nil +} + +// findBinaries searches for compiled binaries in build/release/src/. +func (b *CPPBuilder) findBinaries(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) { + binDir := filepath.Join(projectDir, "build", "release", "src") + + if !fs.IsDir(binDir) { + return nil, fmt.Errorf("no build output found in %s", binDir) + } + + entries, err := fs.List(binDir) + if err != nil { + return nil, fmt.Errorf("failed to list build directory: %w", err) + } + + var artifacts []build.Artifact + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + // Skip non-executable files (libraries, cmake files, etc.) + if strings.HasSuffix(name, ".a") || strings.HasSuffix(name, ".o") || + strings.HasSuffix(name, ".cmake") || strings.HasPrefix(name, ".") { + continue + } + + fullPath := filepath.Join(binDir, name) + + // On Unix, check if file is executable + if target.OS != "windows" { + info, err := os.Stat(fullPath) + if err != nil { + continue + } + if info.Mode()&0111 == 0 { + continue + } + } + + artifacts = append(artifacts, build.Artifact{ + Path: fullPath, + OS: target.OS, + Arch: target.Arch, + }) + } + + return artifacts, nil +} + +// targetToProfile maps a build target to a Conan cross-compilation profile name. +// Profile names match those in .core/build/cmake/profiles/. +func (b *CPPBuilder) targetToProfile(target build.Target) string { + key := target.OS + "/" + target.Arch + profiles := map[string]string{ + "linux/amd64": "gcc-linux-x86_64", + "linux/x86_64": "gcc-linux-x86_64", + "linux/arm64": "gcc-linux-armv8", + "linux/armv8": "gcc-linux-armv8", + "darwin/arm64": "apple-clang-armv8", + "darwin/armv8": "apple-clang-armv8", + "darwin/amd64": "apple-clang-x86_64", + "darwin/x86_64": "apple-clang-x86_64", + "windows/amd64": "msvc-194-x86_64", + "windows/x86_64": "msvc-194-x86_64", + } + + return profiles[key] +} + +// validateMake checks if make is available. +func (b *CPPBuilder) validateMake() error { + if _, err := exec.LookPath("make"); err != nil { + return fmt.Errorf("cpp: make not found. Install build-essential (Linux) or Xcode Command Line Tools (macOS)") + } + return nil +} + +// Ensure CPPBuilder implements the Builder interface. +var _ build.Builder = (*CPPBuilder)(nil) diff --git a/pkg/build/builders/cpp_test.go b/pkg/build/builders/cpp_test.go new file mode 100644 index 0000000..f78c16c --- /dev/null +++ b/pkg/build/builders/cpp_test.go @@ -0,0 +1,149 @@ +package builders + +import ( + "os" + "path/filepath" + "testing" + + "github.com/host-uk/core/pkg/build" + "github.com/host-uk/core/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCPPBuilder_Name_Good(t *testing.T) { + builder := NewCPPBuilder() + assert.Equal(t, "cpp", builder.Name()) +} + +func TestCPPBuilder_Detect_Good(t *testing.T) { + fs := io.Local + + t.Run("detects C++ project with CMakeLists.txt", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "CMakeLists.txt"), []byte("cmake_minimum_required(VERSION 3.16)"), 0644) + require.NoError(t, err) + + builder := NewCPPBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.True(t, detected) + }) + + t.Run("returns false for non-C++ project", func(t *testing.T) { + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644) + require.NoError(t, err) + + builder := NewCPPBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.False(t, detected) + }) + + t.Run("returns false for empty directory", func(t *testing.T) { + dir := t.TempDir() + + builder := NewCPPBuilder() + detected, err := builder.Detect(fs, dir) + assert.NoError(t, err) + assert.False(t, detected) + }) +} + +func TestCPPBuilder_Build_Bad(t *testing.T) { + t.Run("returns error for nil config", func(t *testing.T) { + builder := NewCPPBuilder() + artifacts, err := builder.Build(nil, nil, []build.Target{{OS: "linux", Arch: "amd64"}}) + assert.Error(t, err) + assert.Nil(t, artifacts) + assert.Contains(t, err.Error(), "config is nil") + }) +} + +func TestCPPBuilder_TargetToProfile_Good(t *testing.T) { + builder := NewCPPBuilder() + + tests := []struct { + os, arch string + expected string + }{ + {"linux", "amd64", "gcc-linux-x86_64"}, + {"linux", "x86_64", "gcc-linux-x86_64"}, + {"linux", "arm64", "gcc-linux-armv8"}, + {"darwin", "arm64", "apple-clang-armv8"}, + {"darwin", "amd64", "apple-clang-x86_64"}, + {"windows", "amd64", "msvc-194-x86_64"}, + } + + for _, tt := range tests { + t.Run(tt.os+"/"+tt.arch, func(t *testing.T) { + profile := builder.targetToProfile(build.Target{OS: tt.os, Arch: tt.arch}) + assert.Equal(t, tt.expected, profile) + }) + } +} + +func TestCPPBuilder_TargetToProfile_Bad(t *testing.T) { + builder := NewCPPBuilder() + + t.Run("returns empty for unknown target", func(t *testing.T) { + profile := builder.targetToProfile(build.Target{OS: "plan9", Arch: "mips"}) + assert.Empty(t, profile) + }) +} + +func TestCPPBuilder_FindArtifacts_Good(t *testing.T) { + fs := io.Local + + t.Run("finds packages in build/packages", func(t *testing.T) { + dir := t.TempDir() + packagesDir := filepath.Join(dir, "build", "packages") + require.NoError(t, os.MkdirAll(packagesDir, 0755)) + + // Create mock package files + require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz"), []byte("pkg"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz.sha256"), []byte("checksum"), 0644)) + require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.rpm"), []byte("rpm"), 0644)) + + builder := NewCPPBuilder() + target := build.Target{OS: "linux", Arch: "amd64"} + artifacts, err := builder.findArtifacts(fs, dir, target) + require.NoError(t, err) + + // Should find tar.xz and rpm but not sha256 + assert.Len(t, artifacts, 2) + for _, a := range artifacts { + assert.Equal(t, "linux", a.OS) + assert.Equal(t, "amd64", a.Arch) + assert.False(t, filepath.Ext(a.Path) == ".sha256") + } + }) + + t.Run("falls back to binaries in build/release/src", func(t *testing.T) { + dir := t.TempDir() + binDir := filepath.Join(dir, "build", "release", "src") + require.NoError(t, os.MkdirAll(binDir, 0755)) + + // Create mock binary (executable) + binPath := filepath.Join(binDir, "test-daemon") + require.NoError(t, os.WriteFile(binPath, []byte("binary"), 0755)) + + // Create a library (should be skipped) + require.NoError(t, os.WriteFile(filepath.Join(binDir, "libcrypto.a"), []byte("lib"), 0644)) + + builder := NewCPPBuilder() + target := build.Target{OS: "linux", Arch: "amd64"} + artifacts, err := builder.findArtifacts(fs, dir, target) + require.NoError(t, err) + + // Should find the executable but not the library + assert.Len(t, artifacts, 1) + assert.Contains(t, artifacts[0].Path, "test-daemon") + }) +} + +func TestCPPBuilder_Interface_Good(t *testing.T) { + var _ build.Builder = (*CPPBuilder)(nil) + var _ build.Builder = NewCPPBuilder() +} diff --git a/pkg/build/discovery.go b/pkg/build/discovery.go index ea4ee12..209c2cf 100644 --- a/pkg/build/discovery.go +++ b/pkg/build/discovery.go @@ -13,6 +13,7 @@ const ( markerWails = "wails.json" markerNodePackage = "package.json" markerComposer = "composer.json" + markerCMake = "CMakeLists.txt" ) // projectMarker maps a marker file to its project type. @@ -28,6 +29,7 @@ var markers = []projectMarker{ {markerGoMod, ProjectTypeGo}, {markerNodePackage, ProjectTypeNode}, {markerComposer, ProjectTypePHP}, + {markerCMake, ProjectTypeCPP}, } // Discover detects project types in the given directory by checking for marker files. @@ -83,6 +85,11 @@ func IsPHPProject(fs io.Medium, dir string) bool { return fileExists(fs, filepath.Join(dir, markerComposer)) } +// IsCPPProject checks if the directory contains a C++ project. +func IsCPPProject(fs io.Medium, dir string) bool { + return fileExists(fs, filepath.Join(dir, markerCMake)) +} + // fileExists checks if a file exists and is not a directory. func fileExists(fs io.Medium, path string) bool { return fs.IsFile(path) diff --git a/pkg/build/discovery_test.go b/pkg/build/discovery_test.go index 414b1a3..11b4cc6 100644 --- a/pkg/build/discovery_test.go +++ b/pkg/build/discovery_test.go @@ -52,6 +52,13 @@ func TestDiscover_Good(t *testing.T) { assert.Equal(t, []ProjectType{ProjectTypePHP}, types) }) + t.Run("detects C++ project", func(t *testing.T) { + dir := setupTestDir(t, "CMakeLists.txt") + types, err := Discover(fs, dir) + assert.NoError(t, err) + assert.Equal(t, []ProjectType{ProjectTypeCPP}, types) + }) + t.Run("detects multiple project types", func(t *testing.T) { dir := setupTestDir(t, "go.mod", "package.json") types, err := Discover(fs, dir) @@ -155,6 +162,19 @@ func TestIsNodeProject_Good(t *testing.T) { }) } +func TestIsCPPProject_Good(t *testing.T) { + fs := io.Local + t.Run("true with CMakeLists.txt", func(t *testing.T) { + dir := setupTestDir(t, "CMakeLists.txt") + assert.True(t, IsCPPProject(fs, dir)) + }) + + t.Run("false without CMakeLists.txt", func(t *testing.T) { + dir := t.TempDir() + assert.False(t, IsCPPProject(fs, dir)) + }) +} + func TestIsPHPProject_Good(t *testing.T) { fs := io.Local t.Run("true with composer.json", func(t *testing.T) { @@ -209,6 +229,7 @@ func TestDiscover_Testdata(t *testing.T) { {"wails-project", "wails-project", []ProjectType{ProjectTypeWails, ProjectTypeGo}}, {"node-project", "node-project", []ProjectType{ProjectTypeNode}}, {"php-project", "php-project", []ProjectType{ProjectTypePHP}}, + {"cpp-project", "cpp-project", []ProjectType{ProjectTypeCPP}}, {"multi-project", "multi-project", []ProjectType{ProjectTypeGo, ProjectTypeNode}}, {"empty-project", "empty-project", []ProjectType{}}, } diff --git a/pkg/build/testdata/cpp-project/CMakeLists.txt b/pkg/build/testdata/cpp-project/CMakeLists.txt new file mode 100644 index 0000000..f6ba2c7 --- /dev/null +++ b/pkg/build/testdata/cpp-project/CMakeLists.txt @@ -0,0 +1,2 @@ +cmake_minimum_required(VERSION 3.16) +project(TestCPP) diff --git a/pkg/cli/app.go b/pkg/cli/app.go index e904b17..a9f6054 100644 --- a/pkg/cli/app.go +++ b/pkg/cli/app.go @@ -17,10 +17,36 @@ const ( AppName = "core" ) -// AppVersion is set at build time via ldflags: +// Build-time variables set via ldflags (SemVer 2.0.0): // -// go build -ldflags="-X github.com/host-uk/core/pkg/cli.AppVersion=v1.0.0" -var AppVersion = "dev" +// go build -ldflags="-X github.com/host-uk/core/pkg/cli.AppVersion=1.2.0 \ +// -X github.com/host-uk/core/pkg/cli.BuildCommit=df94c24 \ +// -X github.com/host-uk/core/pkg/cli.BuildDate=2026-02-06 \ +// -X github.com/host-uk/core/pkg/cli.BuildPreRelease=dev.8" +var ( + AppVersion = "0.0.0" + BuildCommit = "unknown" + BuildDate = "unknown" + BuildPreRelease = "" +) + +// SemVer returns the full SemVer 2.0.0 version string. +// - Release: 1.2.0 +// - Pre-release: 1.2.0-dev.8 +// - Full: 1.2.0-dev.8+df94c24.20260206 +func SemVer() string { + v := AppVersion + if BuildPreRelease != "" { + v += "-" + BuildPreRelease + } + if BuildCommit != "unknown" { + v += "+" + BuildCommit + if BuildDate != "unknown" { + v += "." + BuildDate + } + } + return v +} // Main initialises and runs the CLI application. // This is the main entry point for the CLI. @@ -38,7 +64,7 @@ func Main() { // Initialise CLI runtime with services if err := Init(Options{ AppName: AppName, - Version: AppVersion, + Version: SemVer(), Services: []framework.Option{ framework.WithName("i18n", NewI18nService(I18nOptions{})), framework.WithName("log", NewLogService(log.Options{ diff --git a/pkg/forge/client.go b/pkg/forge/client.go new file mode 100644 index 0000000..601d1cf --- /dev/null +++ b/pkg/forge/client.go @@ -0,0 +1,37 @@ +// Package forge provides a thin wrapper around the Forgejo Go SDK +// for managing repositories, issues, and pull requests on a Forgejo instance. +// +// Authentication is resolved from config file, environment variables, or flag overrides: +// +// 1. ~/.core/config.yaml keys: forge.token, forge.url +// 2. FORGE_TOKEN + FORGE_URL environment variables (override config file) +// 3. Flag overrides via core forge config --url/--token (highest priority) +package forge + +import ( + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/log" +) + +// Client wraps the Forgejo SDK client with config-based auth. +type Client struct { + api *forgejo.Client + url string +} + +// New creates a new Forgejo API client for the given URL and token. +func New(url, token string) (*Client, error) { + api, err := forgejo.NewClient(url, forgejo.SetToken(token)) + if err != nil { + return nil, log.E("forge.New", "failed to create client", err) + } + + return &Client{api: api, url: url}, nil +} + +// API exposes the underlying SDK client for direct access. +func (c *Client) API() *forgejo.Client { return c.api } + +// URL returns the Forgejo instance URL. +func (c *Client) URL() string { return c.url } diff --git a/pkg/forge/config.go b/pkg/forge/config.go new file mode 100644 index 0000000..e641001 --- /dev/null +++ b/pkg/forge/config.go @@ -0,0 +1,92 @@ +package forge + +import ( + "os" + + "github.com/host-uk/core/pkg/config" + "github.com/host-uk/core/pkg/log" +) + +const ( + // ConfigKeyURL is the config key for the Forgejo instance URL. + ConfigKeyURL = "forge.url" + // ConfigKeyToken is the config key for the Forgejo API token. + ConfigKeyToken = "forge.token" + + // DefaultURL is the default Forgejo instance URL. + DefaultURL = "http://localhost:4000" +) + +// NewFromConfig creates a Forgejo client using the standard config resolution: +// +// 1. ~/.core/config.yaml keys: forge.token, forge.url +// 2. FORGE_TOKEN + FORGE_URL environment variables (override config file) +// 3. Provided flag overrides (highest priority; pass empty to skip) +func NewFromConfig(flagURL, flagToken string) (*Client, error) { + url, token, err := ResolveConfig(flagURL, flagToken) + if err != nil { + return nil, err + } + + if token == "" { + return nil, log.E("forge.NewFromConfig", "no API token configured (set FORGE_TOKEN or run: core forge config --token TOKEN)", nil) + } + + return New(url, token) +} + +// ResolveConfig resolves the Forgejo URL and token from all config sources. +// Flag values take highest priority, then env vars, then config file. +func ResolveConfig(flagURL, flagToken string) (url, token string, err error) { + // Start with config file values + cfg, cfgErr := config.New() + if cfgErr == nil { + _ = cfg.Get(ConfigKeyURL, &url) + _ = cfg.Get(ConfigKeyToken, &token) + } + + // Overlay environment variables + if envURL := os.Getenv("FORGE_URL"); envURL != "" { + url = envURL + } + if envToken := os.Getenv("FORGE_TOKEN"); envToken != "" { + token = envToken + } + + // Overlay flag values (highest priority) + if flagURL != "" { + url = flagURL + } + if flagToken != "" { + token = flagToken + } + + // Default URL if nothing configured + if url == "" { + url = DefaultURL + } + + return url, token, nil +} + +// SaveConfig persists the Forgejo URL and/or token to the config file. +func SaveConfig(url, token string) error { + cfg, err := config.New() + if err != nil { + return log.E("forge.SaveConfig", "failed to load config", err) + } + + if url != "" { + if err := cfg.Set(ConfigKeyURL, url); err != nil { + return log.E("forge.SaveConfig", "failed to save URL", err) + } + } + + if token != "" { + if err := cfg.Set(ConfigKeyToken, token); err != nil { + return log.E("forge.SaveConfig", "failed to save token", err) + } + } + + return nil +} diff --git a/pkg/forge/issues.go b/pkg/forge/issues.go new file mode 100644 index 0000000..80c6099 --- /dev/null +++ b/pkg/forge/issues.go @@ -0,0 +1,119 @@ +package forge + +import ( + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/log" +) + +// ListIssuesOpts configures issue listing. +type ListIssuesOpts struct { + State string // "open", "closed", "all" + Page int + Limit int +} + +// ListIssues returns issues for the given repository. +func (c *Client) ListIssues(owner, repo string, opts ListIssuesOpts) ([]*forgejo.Issue, error) { + state := forgejo.StateOpen + switch opts.State { + case "closed": + state = forgejo.StateClosed + case "all": + state = forgejo.StateAll + } + + limit := opts.Limit + if limit == 0 { + limit = 50 + } + + page := opts.Page + if page == 0 { + page = 1 + } + + issues, _, err := c.api.ListRepoIssues(owner, repo, forgejo.ListIssueOption{ + ListOptions: forgejo.ListOptions{Page: page, PageSize: limit}, + State: state, + Type: forgejo.IssueTypeIssue, + }) + if err != nil { + return nil, log.E("forge.ListIssues", "failed to list issues", err) + } + + return issues, nil +} + +// GetIssue returns a single issue by number. +func (c *Client) GetIssue(owner, repo string, number int64) (*forgejo.Issue, error) { + issue, _, err := c.api.GetIssue(owner, repo, number) + if err != nil { + return nil, log.E("forge.GetIssue", "failed to get issue", err) + } + + return issue, nil +} + +// CreateIssue creates a new issue in the given repository. +func (c *Client) CreateIssue(owner, repo string, opts forgejo.CreateIssueOption) (*forgejo.Issue, error) { + issue, _, err := c.api.CreateIssue(owner, repo, opts) + if err != nil { + return nil, log.E("forge.CreateIssue", "failed to create issue", err) + } + + return issue, nil +} + +// EditIssue edits an existing issue. +func (c *Client) EditIssue(owner, repo string, number int64, opts forgejo.EditIssueOption) (*forgejo.Issue, error) { + issue, _, err := c.api.EditIssue(owner, repo, number, opts) + if err != nil { + return nil, log.E("forge.EditIssue", "failed to edit issue", err) + } + + return issue, nil +} + +// ListPullRequests returns pull requests for the given repository. +func (c *Client) ListPullRequests(owner, repo string, state string) ([]*forgejo.PullRequest, error) { + st := forgejo.StateOpen + switch state { + case "closed": + st = forgejo.StateClosed + case "all": + st = forgejo.StateAll + } + + var all []*forgejo.PullRequest + page := 1 + + for { + prs, resp, err := c.api.ListRepoPullRequests(owner, repo, forgejo.ListPullRequestsOptions{ + ListOptions: forgejo.ListOptions{Page: page, PageSize: 50}, + State: st, + }) + if err != nil { + return nil, log.E("forge.ListPullRequests", "failed to list pull requests", err) + } + + all = append(all, prs...) + + if resp == nil || page >= resp.LastPage { + break + } + page++ + } + + return all, nil +} + +// GetPullRequest returns a single pull request by number. +func (c *Client) GetPullRequest(owner, repo string, number int64) (*forgejo.PullRequest, error) { + pr, _, err := c.api.GetPullRequest(owner, repo, number) + if err != nil { + return nil, log.E("forge.GetPullRequest", "failed to get pull request", err) + } + + return pr, nil +} diff --git a/pkg/forge/labels.go b/pkg/forge/labels.go new file mode 100644 index 0000000..89f2de2 --- /dev/null +++ b/pkg/forge/labels.go @@ -0,0 +1,60 @@ +package forge + +import ( + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/log" +) + +// ListOrgLabels returns all labels for repos in the given organisation. +// Note: The Forgejo SDK does not have a dedicated org-level labels endpoint. +// This lists labels from the first repo found, which works when orgs use shared label sets. +// For org-wide label management, use ListRepoLabels with a specific repo. +func (c *Client) ListOrgLabels(org string) ([]*forgejo.Label, error) { + // Forgejo doesn't expose org-level labels via SDK — list repos and aggregate unique labels. + repos, err := c.ListOrgRepos(org) + if err != nil { + return nil, err + } + + if len(repos) == 0 { + return nil, nil + } + + // Use the first repo's labels as representative of the org's label set. + return c.ListRepoLabels(repos[0].Owner.UserName, repos[0].Name) +} + +// ListRepoLabels returns all labels for a repository. +func (c *Client) ListRepoLabels(owner, repo string) ([]*forgejo.Label, error) { + var all []*forgejo.Label + page := 1 + + for { + labels, resp, err := c.api.ListRepoLabels(owner, repo, forgejo.ListLabelsOptions{ + ListOptions: forgejo.ListOptions{Page: page, PageSize: 50}, + }) + if err != nil { + return nil, log.E("forge.ListRepoLabels", "failed to list repo labels", err) + } + + all = append(all, labels...) + + if resp == nil || page >= resp.LastPage { + break + } + page++ + } + + return all, nil +} + +// CreateRepoLabel creates a label on a repository. +func (c *Client) CreateRepoLabel(owner, repo string, opts forgejo.CreateLabelOption) (*forgejo.Label, error) { + label, _, err := c.api.CreateLabel(owner, repo, opts) + if err != nil { + return nil, log.E("forge.CreateRepoLabel", "failed to create repo label", err) + } + + return label, nil +} diff --git a/pkg/forge/meta.go b/pkg/forge/meta.go new file mode 100644 index 0000000..642f676 --- /dev/null +++ b/pkg/forge/meta.go @@ -0,0 +1,144 @@ +package forge + +import ( + "time" + + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/log" +) + +// PRMeta holds structural signals from a pull request, +// used by the pipeline MetaReader for AI-driven workflows. +type PRMeta struct { + Number int64 + Title string + State string + Author string + Branch string + BaseBranch string + Labels []string + Assignees []string + IsMerged bool + CreatedAt time.Time + UpdatedAt time.Time + CommentCount int +} + +// Comment represents a comment with metadata. +type Comment struct { + ID int64 + Author string + Body string + CreatedAt time.Time + UpdatedAt time.Time +} + +const commentPageSize = 50 + +// GetPRMeta returns structural signals for a pull request. +// This is the Forgejo side of the dual MetaReader described in the pipeline design. +func (c *Client) GetPRMeta(owner, repo string, pr int64) (*PRMeta, error) { + pull, _, err := c.api.GetPullRequest(owner, repo, pr) + if err != nil { + return nil, log.E("forge.GetPRMeta", "failed to get PR metadata", err) + } + + meta := &PRMeta{ + Number: pull.Index, + Title: pull.Title, + State: string(pull.State), + Branch: pull.Head.Ref, + BaseBranch: pull.Base.Ref, + IsMerged: pull.HasMerged, + } + + if pull.Created != nil { + meta.CreatedAt = *pull.Created + } + if pull.Updated != nil { + meta.UpdatedAt = *pull.Updated + } + + if pull.Poster != nil { + meta.Author = pull.Poster.UserName + } + + for _, label := range pull.Labels { + meta.Labels = append(meta.Labels, label.Name) + } + + for _, assignee := range pull.Assignees { + meta.Assignees = append(meta.Assignees, assignee.UserName) + } + + // Fetch comment count from the issue side (PRs are issues in Forgejo). + // Paginate to get an accurate count. + count := 0 + page := 1 + for { + comments, _, listErr := c.api.ListIssueComments(owner, repo, pr, forgejo.ListIssueCommentOptions{ + ListOptions: forgejo.ListOptions{Page: page, PageSize: commentPageSize}, + }) + if listErr != nil { + break + } + count += len(comments) + if len(comments) < commentPageSize { + break + } + page++ + } + meta.CommentCount = count + + return meta, nil +} + +// GetCommentBodies returns all comment bodies for a pull request. +func (c *Client) GetCommentBodies(owner, repo string, pr int64) ([]Comment, error) { + var comments []Comment + page := 1 + + for { + raw, _, err := c.api.ListIssueComments(owner, repo, pr, forgejo.ListIssueCommentOptions{ + ListOptions: forgejo.ListOptions{Page: page, PageSize: commentPageSize}, + }) + if err != nil { + return nil, log.E("forge.GetCommentBodies", "failed to get PR comments", err) + } + + if len(raw) == 0 { + break + } + + for _, rc := range raw { + comment := Comment{ + ID: rc.ID, + Body: rc.Body, + CreatedAt: rc.Created, + UpdatedAt: rc.Updated, + } + if rc.Poster != nil { + comment.Author = rc.Poster.UserName + } + comments = append(comments, comment) + } + + if len(raw) < commentPageSize { + break + } + page++ + } + + return comments, nil +} + +// GetIssueBody returns the body text of an issue. +func (c *Client) GetIssueBody(owner, repo string, issue int64) (string, error) { + iss, _, err := c.api.GetIssue(owner, repo, issue) + if err != nil { + return "", log.E("forge.GetIssueBody", "failed to get issue body", err) + } + + return iss.Body, nil +} diff --git a/pkg/forge/orgs.go b/pkg/forge/orgs.go new file mode 100644 index 0000000..0c559d1 --- /dev/null +++ b/pkg/forge/orgs.go @@ -0,0 +1,51 @@ +package forge + +import ( + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/log" +) + +// ListMyOrgs returns all organisations for the authenticated user. +func (c *Client) ListMyOrgs() ([]*forgejo.Organization, error) { + var all []*forgejo.Organization + page := 1 + + for { + orgs, resp, err := c.api.ListMyOrgs(forgejo.ListOrgsOptions{ + ListOptions: forgejo.ListOptions{Page: page, PageSize: 50}, + }) + if err != nil { + return nil, log.E("forge.ListMyOrgs", "failed to list orgs", err) + } + + all = append(all, orgs...) + + if resp == nil || page >= resp.LastPage { + break + } + page++ + } + + return all, nil +} + +// GetOrg returns a single organisation by name. +func (c *Client) GetOrg(name string) (*forgejo.Organization, error) { + org, _, err := c.api.GetOrg(name) + if err != nil { + return nil, log.E("forge.GetOrg", "failed to get org", err) + } + + return org, nil +} + +// CreateOrg creates a new organisation. +func (c *Client) CreateOrg(opts forgejo.CreateOrgOption) (*forgejo.Organization, error) { + org, _, err := c.api.CreateOrg(opts) + if err != nil { + return nil, log.E("forge.CreateOrg", "failed to create org", err) + } + + return org, nil +} diff --git a/pkg/forge/repos.go b/pkg/forge/repos.go new file mode 100644 index 0000000..62f6b74 --- /dev/null +++ b/pkg/forge/repos.go @@ -0,0 +1,96 @@ +package forge + +import ( + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/log" +) + +// ListOrgRepos returns all repositories for the given organisation. +func (c *Client) ListOrgRepos(org string) ([]*forgejo.Repository, error) { + var all []*forgejo.Repository + page := 1 + + for { + repos, resp, err := c.api.ListOrgRepos(org, forgejo.ListOrgReposOptions{ + ListOptions: forgejo.ListOptions{Page: page, PageSize: 50}, + }) + if err != nil { + return nil, log.E("forge.ListOrgRepos", "failed to list org repos", err) + } + + all = append(all, repos...) + + if resp == nil || page >= resp.LastPage { + break + } + page++ + } + + return all, nil +} + +// ListUserRepos returns all repositories for the authenticated user. +func (c *Client) ListUserRepos() ([]*forgejo.Repository, error) { + var all []*forgejo.Repository + page := 1 + + for { + repos, resp, err := c.api.ListMyRepos(forgejo.ListReposOptions{ + ListOptions: forgejo.ListOptions{Page: page, PageSize: 50}, + }) + if err != nil { + return nil, log.E("forge.ListUserRepos", "failed to list user repos", err) + } + + all = append(all, repos...) + + if resp == nil || page >= resp.LastPage { + break + } + page++ + } + + return all, nil +} + +// GetRepo returns a single repository by owner and name. +func (c *Client) GetRepo(owner, name string) (*forgejo.Repository, error) { + repo, _, err := c.api.GetRepo(owner, name) + if err != nil { + return nil, log.E("forge.GetRepo", "failed to get repo", err) + } + + return repo, nil +} + +// CreateOrgRepo creates a new empty repository under an organisation. +func (c *Client) CreateOrgRepo(org string, opts forgejo.CreateRepoOption) (*forgejo.Repository, error) { + repo, _, err := c.api.CreateOrgRepo(org, opts) + if err != nil { + return nil, log.E("forge.CreateOrgRepo", "failed to create org repo", err) + } + + return repo, nil +} + +// DeleteRepo deletes a repository from Forgejo. +func (c *Client) DeleteRepo(owner, name string) error { + _, err := c.api.DeleteRepo(owner, name) + if err != nil { + return log.E("forge.DeleteRepo", "failed to delete repo", err) + } + + return nil +} + +// MigrateRepo migrates a repository from an external service using the Forgejo migration API. +// Unlike CreateMirror, this supports importing issues, labels, PRs, and more. +func (c *Client) MigrateRepo(opts forgejo.MigrateRepoOption) (*forgejo.Repository, error) { + repo, _, err := c.api.MigrateRepo(opts) + if err != nil { + return nil, log.E("forge.MigrateRepo", "failed to migrate repo", err) + } + + return repo, nil +} diff --git a/pkg/forge/webhooks.go b/pkg/forge/webhooks.go new file mode 100644 index 0000000..a2c49bd --- /dev/null +++ b/pkg/forge/webhooks.go @@ -0,0 +1,41 @@ +package forge + +import ( + forgejo "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2" + + "github.com/host-uk/core/pkg/log" +) + +// CreateRepoWebhook creates a webhook on a repository. +func (c *Client) CreateRepoWebhook(owner, repo string, opts forgejo.CreateHookOption) (*forgejo.Hook, error) { + hook, _, err := c.api.CreateRepoHook(owner, repo, opts) + if err != nil { + return nil, log.E("forge.CreateRepoWebhook", "failed to create repo webhook", err) + } + + return hook, nil +} + +// ListRepoWebhooks returns all webhooks for a repository. +func (c *Client) ListRepoWebhooks(owner, repo string) ([]*forgejo.Hook, error) { + var all []*forgejo.Hook + page := 1 + + for { + hooks, resp, err := c.api.ListRepoHooks(owner, repo, forgejo.ListHooksOptions{ + ListOptions: forgejo.ListOptions{Page: page, PageSize: 50}, + }) + if err != nil { + return nil, log.E("forge.ListRepoWebhooks", "failed to list repo webhooks", err) + } + + all = append(all, hooks...) + + if resp == nil || page >= resp.LastPage { + break + } + page++ + } + + return all, nil +} diff --git a/pkg/i18n/locales/en_GB.json b/pkg/i18n/locales/en_GB.json index 4f6d8f4..b901d3d 100644 --- a/pkg/i18n/locales/en_GB.json +++ b/pkg/i18n/locales/en_GB.json @@ -282,6 +282,9 @@ "vm.status.short": "Show development VM status", "no_changes": "No uncommitted changes found.", "no_git_repos": "No git repositories found.", + "modified": "{{.Count}} modified", + "staged": "{{.Count}} staged", + "untracked": "{{.Count}} untracked", "confirm_claude_commit": "Have Claude commit these repos?", "health.short": "Quick health check across all repos", "health.long": "Shows a summary of repository health across all repos in the workspace.", @@ -298,6 +301,12 @@ "status.clean": "clean", "commit.short": "Claude-assisted commits across repos", "push.short": "Push commits across all repos", + "push.long": "Push commits to remote across all repos in the workspace.", + "push.flag.force": "Push without confirmation", + "push.all_up_to_date": "All repos are up to date.", + "push.confirm_push": "Push {{.Commits}} commit(s) across {{.Repos}} repo(s)?", + "push.done_pushed": "Pushed {{.Count}} repo(s)", + "push.pull_and_retry": "Pull and retry push?", "push.diverged": "branch has diverged from remote", "push.diverged_help": "Some repos have diverged (local and remote have different commits).", "push.uncommitted_changes_commit": "You have uncommitted changes. Commit with Claude first?", @@ -731,7 +740,9 @@ "succeeded": "{{.Count}} succeeded", "failed": "{{.Count}} failed", "skipped": "{{.Count}} skipped", - "passed": "{{.Count}} passed" + "passed": "{{.Count}} passed", + "commits": "{{.Count}} commit(s) ahead", + "repos_unpushed": "{{.Count}} repo(s) with unpushed commits" } }, "error": { @@ -748,6 +759,7 @@ }, "cli": { "pass": "PASS", - "fail": "FAIL" + "fail": "FAIL", + "aborted": "Aborted." } } diff --git a/pkg/io/datanode/client.go b/pkg/io/datanode/client.go new file mode 100644 index 0000000..8a385a5 --- /dev/null +++ b/pkg/io/datanode/client.go @@ -0,0 +1,573 @@ +// Package datanode provides an in-memory io.Medium backed by Borg's DataNode. +// +// DataNode is an in-memory fs.FS that serializes to tar. Wrapping it as a +// Medium lets any code that works with io.Medium transparently operate on +// an in-memory filesystem that can be snapshotted, shipped as a crash report, +// or wrapped in a TIM container for runc execution. +package datanode + +import ( + goio "io" + "io/fs" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/Snider/Borg/pkg/datanode" + coreerr "github.com/host-uk/core/pkg/framework/core" +) + +// Medium is an in-memory storage backend backed by a Borg DataNode. +// All paths are relative (no leading slash). Thread-safe via RWMutex. +type Medium struct { + dn *datanode.DataNode + dirs map[string]bool // explicit directory tracking + mu sync.RWMutex +} + +// New creates a new empty DataNode Medium. +func New() *Medium { + return &Medium{ + dn: datanode.New(), + dirs: make(map[string]bool), + } +} + +// FromTar creates a Medium from a tarball, restoring all files. +func FromTar(data []byte) (*Medium, error) { + dn, err := datanode.FromTar(data) + if err != nil { + return nil, coreerr.E("datanode.FromTar", "failed to restore", err) + } + return &Medium{ + dn: dn, + dirs: make(map[string]bool), + }, nil +} + +// Snapshot serializes the entire filesystem to a tarball. +// Use this for crash reports, workspace packaging, or TIM creation. +func (m *Medium) Snapshot() ([]byte, error) { + m.mu.RLock() + defer m.mu.RUnlock() + data, err := m.dn.ToTar() + if err != nil { + return nil, coreerr.E("datanode.Snapshot", "tar failed", err) + } + return data, nil +} + +// Restore replaces the filesystem contents from a tarball. +func (m *Medium) Restore(data []byte) error { + dn, err := datanode.FromTar(data) + if err != nil { + return coreerr.E("datanode.Restore", "tar failed", err) + } + m.mu.Lock() + defer m.mu.Unlock() + m.dn = dn + m.dirs = make(map[string]bool) + return nil +} + +// DataNode returns the underlying Borg DataNode. +// Use this to wrap the filesystem in a TIM container. +func (m *Medium) DataNode() *datanode.DataNode { + m.mu.RLock() + defer m.mu.RUnlock() + return m.dn +} + +// clean normalizes a path: strips leading slash, cleans traversal. +func clean(p string) string { + p = strings.TrimPrefix(p, "/") + p = path.Clean(p) + if p == "." { + return "" + } + return p +} + +// --- io.Medium interface --- + +func (m *Medium) Read(p string) (string, error) { + m.mu.RLock() + defer m.mu.RUnlock() + + p = clean(p) + f, err := m.dn.Open(p) + if err != nil { + return "", coreerr.E("datanode.Read", "not found: "+p, os.ErrNotExist) + } + defer f.Close() + + info, err := f.Stat() + if err != nil { + return "", coreerr.E("datanode.Read", "stat failed: "+p, err) + } + if info.IsDir() { + return "", coreerr.E("datanode.Read", "is a directory: "+p, os.ErrInvalid) + } + + data, err := goio.ReadAll(f) + if err != nil { + return "", coreerr.E("datanode.Read", "read failed: "+p, err) + } + return string(data), nil +} + +func (m *Medium) Write(p, content string) error { + m.mu.Lock() + defer m.mu.Unlock() + + p = clean(p) + if p == "" { + return coreerr.E("datanode.Write", "empty path", os.ErrInvalid) + } + m.dn.AddData(p, []byte(content)) + + // ensure parent dirs are tracked + m.ensureDirsLocked(path.Dir(p)) + return nil +} + +func (m *Medium) EnsureDir(p string) error { + m.mu.Lock() + defer m.mu.Unlock() + + p = clean(p) + if p == "" { + return nil + } + m.ensureDirsLocked(p) + return nil +} + +// ensureDirsLocked marks a directory and all ancestors as existing. +// Caller must hold m.mu. +func (m *Medium) ensureDirsLocked(p string) { + for p != "" && p != "." { + m.dirs[p] = true + p = path.Dir(p) + if p == "." { + break + } + } +} + +func (m *Medium) IsFile(p string) bool { + m.mu.RLock() + defer m.mu.RUnlock() + + p = clean(p) + info, err := m.dn.Stat(p) + return err == nil && !info.IsDir() +} + +func (m *Medium) FileGet(p string) (string, error) { + return m.Read(p) +} + +func (m *Medium) FileSet(p, content string) error { + return m.Write(p, content) +} + +func (m *Medium) Delete(p string) error { + m.mu.Lock() + defer m.mu.Unlock() + + p = clean(p) + if p == "" { + return coreerr.E("datanode.Delete", "cannot delete root", os.ErrPermission) + } + + // Check if it's a file in the DataNode + info, err := m.dn.Stat(p) + if err != nil { + // Check explicit dirs + if m.dirs[p] { + // Check if dir is empty + if m.hasPrefixLocked(p + "/") { + return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) + } + delete(m.dirs, p) + return nil + } + return coreerr.E("datanode.Delete", "not found: "+p, os.ErrNotExist) + } + + if info.IsDir() { + if m.hasPrefixLocked(p + "/") { + return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) + } + delete(m.dirs, p) + return nil + } + + // Remove the file by creating a new DataNode without it + m.removeFileLocked(p) + return nil +} + +func (m *Medium) DeleteAll(p string) error { + m.mu.Lock() + defer m.mu.Unlock() + + p = clean(p) + if p == "" { + return coreerr.E("datanode.DeleteAll", "cannot delete root", os.ErrPermission) + } + + prefix := p + "/" + found := false + + // Check if p itself is a file + info, err := m.dn.Stat(p) + if err == nil && !info.IsDir() { + m.removeFileLocked(p) + found = true + } + + // Remove all files under prefix + entries, _ := m.collectAllLocked() + for _, name := range entries { + if name == p || strings.HasPrefix(name, prefix) { + m.removeFileLocked(name) + found = true + } + } + + // Remove explicit dirs under prefix + for d := range m.dirs { + if d == p || strings.HasPrefix(d, prefix) { + delete(m.dirs, d) + found = true + } + } + + if !found { + return coreerr.E("datanode.DeleteAll", "not found: "+p, os.ErrNotExist) + } + return nil +} + +func (m *Medium) Rename(oldPath, newPath string) error { + m.mu.Lock() + defer m.mu.Unlock() + + oldPath = clean(oldPath) + newPath = clean(newPath) + + // Check if source is a file + info, err := m.dn.Stat(oldPath) + if err != nil { + return coreerr.E("datanode.Rename", "not found: "+oldPath, os.ErrNotExist) + } + + if !info.IsDir() { + // Read old, write new, delete old + f, err := m.dn.Open(oldPath) + if err != nil { + return coreerr.E("datanode.Rename", "open failed: "+oldPath, err) + } + data, err := goio.ReadAll(f) + f.Close() + if err != nil { + return coreerr.E("datanode.Rename", "read failed: "+oldPath, err) + } + m.dn.AddData(newPath, data) + m.ensureDirsLocked(path.Dir(newPath)) + m.removeFileLocked(oldPath) + return nil + } + + // Directory rename: move all files under oldPath to newPath + oldPrefix := oldPath + "/" + newPrefix := newPath + "/" + + entries, _ := m.collectAllLocked() + for _, name := range entries { + if strings.HasPrefix(name, oldPrefix) { + newName := newPrefix + strings.TrimPrefix(name, oldPrefix) + f, err := m.dn.Open(name) + if err != nil { + continue + } + data, _ := goio.ReadAll(f) + f.Close() + m.dn.AddData(newName, data) + m.removeFileLocked(name) + } + } + + // Move explicit dirs + dirsToMove := make(map[string]string) + for d := range m.dirs { + if d == oldPath || strings.HasPrefix(d, oldPrefix) { + newD := newPath + strings.TrimPrefix(d, oldPath) + dirsToMove[d] = newD + } + } + for old, nw := range dirsToMove { + delete(m.dirs, old) + m.dirs[nw] = true + } + + return nil +} + +func (m *Medium) List(p string) ([]fs.DirEntry, error) { + m.mu.RLock() + defer m.mu.RUnlock() + + p = clean(p) + + entries, err := m.dn.ReadDir(p) + if err != nil { + // Check explicit dirs + if p == "" || m.dirs[p] { + return []fs.DirEntry{}, nil + } + return nil, coreerr.E("datanode.List", "not found: "+p, os.ErrNotExist) + } + + // Also include explicit subdirectories not discovered via files + prefix := p + if prefix != "" { + prefix += "/" + } + seen := make(map[string]bool) + for _, e := range entries { + seen[e.Name()] = true + } + + for d := range m.dirs { + if !strings.HasPrefix(d, prefix) { + continue + } + rest := strings.TrimPrefix(d, prefix) + if rest == "" { + continue + } + first := strings.SplitN(rest, "/", 2)[0] + if !seen[first] { + seen[first] = true + entries = append(entries, &dirEntry{name: first}) + } + } + + sort.Slice(entries, func(i, j int) bool { + return entries[i].Name() < entries[j].Name() + }) + + return entries, nil +} + +func (m *Medium) Stat(p string) (fs.FileInfo, error) { + m.mu.RLock() + defer m.mu.RUnlock() + + p = clean(p) + if p == "" { + return &fileInfo{name: ".", isDir: true, mode: fs.ModeDir | 0755}, nil + } + + info, err := m.dn.Stat(p) + if err == nil { + return info, nil + } + + if m.dirs[p] { + return &fileInfo{name: path.Base(p), isDir: true, mode: fs.ModeDir | 0755}, nil + } + return nil, coreerr.E("datanode.Stat", "not found: "+p, os.ErrNotExist) +} + +func (m *Medium) Open(p string) (fs.File, error) { + m.mu.RLock() + defer m.mu.RUnlock() + + p = clean(p) + return m.dn.Open(p) +} + +func (m *Medium) Create(p string) (goio.WriteCloser, error) { + p = clean(p) + if p == "" { + return nil, coreerr.E("datanode.Create", "empty path", os.ErrInvalid) + } + return &writeCloser{m: m, path: p}, nil +} + +func (m *Medium) Append(p string) (goio.WriteCloser, error) { + p = clean(p) + if p == "" { + return nil, coreerr.E("datanode.Append", "empty path", os.ErrInvalid) + } + + // Read existing content + var existing []byte + m.mu.RLock() + f, err := m.dn.Open(p) + if err == nil { + existing, _ = goio.ReadAll(f) + f.Close() + } + m.mu.RUnlock() + + return &writeCloser{m: m, path: p, buf: existing}, nil +} + +func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { + m.mu.RLock() + defer m.mu.RUnlock() + + p = clean(p) + f, err := m.dn.Open(p) + if err != nil { + return nil, coreerr.E("datanode.ReadStream", "not found: "+p, os.ErrNotExist) + } + return f.(goio.ReadCloser), nil +} + +func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { + return m.Create(p) +} + +func (m *Medium) Exists(p string) bool { + m.mu.RLock() + defer m.mu.RUnlock() + + p = clean(p) + if p == "" { + return true // root always exists + } + _, err := m.dn.Stat(p) + if err == nil { + return true + } + return m.dirs[p] +} + +func (m *Medium) IsDir(p string) bool { + m.mu.RLock() + defer m.mu.RUnlock() + + p = clean(p) + if p == "" { + return true + } + info, err := m.dn.Stat(p) + if err == nil { + return info.IsDir() + } + return m.dirs[p] +} + +// --- internal helpers --- + +// hasPrefixLocked checks if any file path starts with prefix. Caller holds lock. +func (m *Medium) hasPrefixLocked(prefix string) bool { + entries, _ := m.collectAllLocked() + for _, name := range entries { + if strings.HasPrefix(name, prefix) { + return true + } + } + for d := range m.dirs { + if strings.HasPrefix(d, prefix) { + return true + } + } + return false +} + +// collectAllLocked returns all file paths in the DataNode. Caller holds lock. +func (m *Medium) collectAllLocked() ([]string, error) { + var names []string + err := fs.WalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error { + if err != nil { + return nil + } + if !d.IsDir() { + names = append(names, p) + } + return nil + }) + return names, err +} + +// removeFileLocked removes a single file by rebuilding the DataNode. +// This is necessary because Borg's DataNode doesn't expose a Remove method. +// Caller must hold m.mu write lock. +func (m *Medium) removeFileLocked(target string) { + entries, _ := m.collectAllLocked() + newDN := datanode.New() + for _, name := range entries { + if name == target { + continue + } + f, err := m.dn.Open(name) + if err != nil { + continue + } + data, err := goio.ReadAll(f) + f.Close() + if err != nil { + continue + } + newDN.AddData(name, data) + } + m.dn = newDN +} + +// --- writeCloser buffers writes and flushes to DataNode on Close --- + +type writeCloser struct { + m *Medium + path string + buf []byte +} + +func (w *writeCloser) Write(p []byte) (int, error) { + w.buf = append(w.buf, p...) + return len(p), nil +} + +func (w *writeCloser) Close() error { + w.m.mu.Lock() + defer w.m.mu.Unlock() + + w.m.dn.AddData(w.path, w.buf) + w.m.ensureDirsLocked(path.Dir(w.path)) + return nil +} + +// --- fs types for explicit directories --- + +type dirEntry struct { + name string +} + +func (d *dirEntry) Name() string { return d.name } +func (d *dirEntry) IsDir() bool { return true } +func (d *dirEntry) Type() fs.FileMode { return fs.ModeDir } +func (d *dirEntry) Info() (fs.FileInfo, error) { return &fileInfo{name: d.name, isDir: true, mode: fs.ModeDir | 0755}, nil } + +type fileInfo struct { + name string + size int64 + mode fs.FileMode + modTime time.Time + isDir bool +} + +func (fi *fileInfo) Name() string { return fi.name } +func (fi *fileInfo) Size() int64 { return fi.size } +func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } +func (fi *fileInfo) ModTime() time.Time { return fi.modTime } +func (fi *fileInfo) IsDir() bool { return fi.isDir } +func (fi *fileInfo) Sys() any { return nil } diff --git a/pkg/io/datanode/client_test.go b/pkg/io/datanode/client_test.go new file mode 100644 index 0000000..70ed2cc --- /dev/null +++ b/pkg/io/datanode/client_test.go @@ -0,0 +1,352 @@ +package datanode + +import ( + "io" + "testing" + + coreio "github.com/host-uk/core/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Compile-time check: Medium implements io.Medium. +var _ coreio.Medium = (*Medium)(nil) + +func TestReadWrite_Good(t *testing.T) { + m := New() + + err := m.Write("hello.txt", "world") + require.NoError(t, err) + + got, err := m.Read("hello.txt") + require.NoError(t, err) + assert.Equal(t, "world", got) +} + +func TestReadWrite_Bad(t *testing.T) { + m := New() + + _, err := m.Read("missing.txt") + assert.Error(t, err) + + err = m.Write("", "content") + assert.Error(t, err) +} + +func TestNestedPaths_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("a/b/c/deep.txt", "deep")) + + got, err := m.Read("a/b/c/deep.txt") + require.NoError(t, err) + assert.Equal(t, "deep", got) + + assert.True(t, m.IsDir("a")) + assert.True(t, m.IsDir("a/b")) + assert.True(t, m.IsDir("a/b/c")) +} + +func TestLeadingSlash_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("/leading/file.txt", "stripped")) + got, err := m.Read("leading/file.txt") + require.NoError(t, err) + assert.Equal(t, "stripped", got) + + got, err = m.Read("/leading/file.txt") + require.NoError(t, err) + assert.Equal(t, "stripped", got) +} + +func TestIsFile_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("file.go", "package main")) + + assert.True(t, m.IsFile("file.go")) + assert.False(t, m.IsFile("missing.go")) + assert.False(t, m.IsFile("")) // empty path +} + +func TestEnsureDir_Good(t *testing.T) { + m := New() + + require.NoError(t, m.EnsureDir("foo/bar/baz")) + + assert.True(t, m.IsDir("foo")) + assert.True(t, m.IsDir("foo/bar")) + assert.True(t, m.IsDir("foo/bar/baz")) + assert.True(t, m.Exists("foo/bar/baz")) +} + +func TestDelete_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("delete-me.txt", "bye")) + assert.True(t, m.Exists("delete-me.txt")) + + require.NoError(t, m.Delete("delete-me.txt")) + assert.False(t, m.Exists("delete-me.txt")) +} + +func TestDelete_Bad(t *testing.T) { + m := New() + + // Delete non-existent + assert.Error(t, m.Delete("ghost.txt")) + + // Delete non-empty dir + require.NoError(t, m.Write("dir/file.txt", "content")) + assert.Error(t, m.Delete("dir")) +} + +func TestDeleteAll_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("tree/a.txt", "a")) + require.NoError(t, m.Write("tree/sub/b.txt", "b")) + require.NoError(t, m.Write("keep.txt", "keep")) + + require.NoError(t, m.DeleteAll("tree")) + + assert.False(t, m.Exists("tree/a.txt")) + assert.False(t, m.Exists("tree/sub/b.txt")) + assert.True(t, m.Exists("keep.txt")) +} + +func TestRename_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("old.txt", "content")) + require.NoError(t, m.Rename("old.txt", "new.txt")) + + assert.False(t, m.Exists("old.txt")) + got, err := m.Read("new.txt") + require.NoError(t, err) + assert.Equal(t, "content", got) +} + +func TestRenameDir_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("src/a.go", "package a")) + require.NoError(t, m.Write("src/sub/b.go", "package b")) + + require.NoError(t, m.Rename("src", "dst")) + + assert.False(t, m.Exists("src/a.go")) + + got, err := m.Read("dst/a.go") + require.NoError(t, err) + assert.Equal(t, "package a", got) + + got, err = m.Read("dst/sub/b.go") + require.NoError(t, err) + assert.Equal(t, "package b", got) +} + +func TestList_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("root.txt", "r")) + require.NoError(t, m.Write("pkg/a.go", "a")) + require.NoError(t, m.Write("pkg/b.go", "b")) + require.NoError(t, m.Write("pkg/sub/c.go", "c")) + + entries, err := m.List("") + require.NoError(t, err) + + names := make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + assert.Contains(t, names, "root.txt") + assert.Contains(t, names, "pkg") + + entries, err = m.List("pkg") + require.NoError(t, err) + names = make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + assert.Contains(t, names, "a.go") + assert.Contains(t, names, "b.go") + assert.Contains(t, names, "sub") +} + +func TestStat_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("stat.txt", "hello")) + + info, err := m.Stat("stat.txt") + require.NoError(t, err) + assert.Equal(t, int64(5), info.Size()) + assert.False(t, info.IsDir()) + + // Root stat + info, err = m.Stat("") + require.NoError(t, err) + assert.True(t, info.IsDir()) +} + +func TestOpen_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("open.txt", "opened")) + + f, err := m.Open("open.txt") + require.NoError(t, err) + defer f.Close() + + data, err := io.ReadAll(f) + require.NoError(t, err) + assert.Equal(t, "opened", string(data)) +} + +func TestCreateAppend_Good(t *testing.T) { + m := New() + + // Create + w, err := m.Create("new.txt") + require.NoError(t, err) + w.Write([]byte("hello")) + w.Close() + + got, err := m.Read("new.txt") + require.NoError(t, err) + assert.Equal(t, "hello", got) + + // Append + w, err = m.Append("new.txt") + require.NoError(t, err) + w.Write([]byte(" world")) + w.Close() + + got, err = m.Read("new.txt") + require.NoError(t, err) + assert.Equal(t, "hello world", got) +} + +func TestStreams_Good(t *testing.T) { + m := New() + + // WriteStream + ws, err := m.WriteStream("stream.txt") + require.NoError(t, err) + ws.Write([]byte("streamed")) + ws.Close() + + // ReadStream + rs, err := m.ReadStream("stream.txt") + require.NoError(t, err) + data, err := io.ReadAll(rs) + require.NoError(t, err) + assert.Equal(t, "streamed", string(data)) + rs.Close() +} + +func TestFileGetFileSet_Good(t *testing.T) { + m := New() + + require.NoError(t, m.FileSet("alias.txt", "via set")) + + got, err := m.FileGet("alias.txt") + require.NoError(t, err) + assert.Equal(t, "via set", got) +} + +func TestSnapshotRestore_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("a.txt", "alpha")) + require.NoError(t, m.Write("b/c.txt", "charlie")) + + snap, err := m.Snapshot() + require.NoError(t, err) + assert.NotEmpty(t, snap) + + // Restore into a new Medium + m2, err := FromTar(snap) + require.NoError(t, err) + + got, err := m2.Read("a.txt") + require.NoError(t, err) + assert.Equal(t, "alpha", got) + + got, err = m2.Read("b/c.txt") + require.NoError(t, err) + assert.Equal(t, "charlie", got) +} + +func TestRestore_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("original.txt", "before")) + + snap, err := m.Snapshot() + require.NoError(t, err) + + // Modify + require.NoError(t, m.Write("original.txt", "after")) + require.NoError(t, m.Write("extra.txt", "extra")) + + // Restore to snapshot + require.NoError(t, m.Restore(snap)) + + got, err := m.Read("original.txt") + require.NoError(t, err) + assert.Equal(t, "before", got) + + assert.False(t, m.Exists("extra.txt")) +} + +func TestDataNode_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("test.txt", "borg")) + + dn := m.DataNode() + assert.NotNil(t, dn) + + // Verify we can use the DataNode directly + f, err := dn.Open("test.txt") + require.NoError(t, err) + defer f.Close() + + data, err := io.ReadAll(f) + require.NoError(t, err) + assert.Equal(t, "borg", string(data)) +} + +func TestOverwrite_Good(t *testing.T) { + m := New() + + require.NoError(t, m.Write("file.txt", "v1")) + require.NoError(t, m.Write("file.txt", "v2")) + + got, err := m.Read("file.txt") + require.NoError(t, err) + assert.Equal(t, "v2", got) +} + +func TestExists_Good(t *testing.T) { + m := New() + + assert.True(t, m.Exists("")) // root + assert.False(t, m.Exists("x")) + + require.NoError(t, m.Write("x", "y")) + assert.True(t, m.Exists("x")) +} + +func TestReadDir_Ugly(t *testing.T) { + m := New() + + // Read from a file path (not a dir) should return empty or error + require.NoError(t, m.Write("file.txt", "content")) + _, err := m.Read("file.txt") + require.NoError(t, err) +} diff --git a/pkg/release/config_test.go b/pkg/release/config_test.go index 7af80e9..59d47e8 100644 --- a/pkg/release/config_test.go +++ b/pkg/release/config_test.go @@ -327,6 +327,9 @@ func TestWriteConfig_Bad(t *testing.T) { }) t.Run("returns error when directory creation fails", func(t *testing.T) { + if os.Geteuid() == 0 { + t.Skip("root can create directories anywhere") + } // Use a path that doesn't exist and can't be created cfg := DefaultConfig() err := WriteConfig(io.Local, cfg, "/nonexistent/path/that/cannot/be/created") diff --git a/pkg/release/release_test.go b/pkg/release/release_test.go index d768e92..a0dce08 100644 --- a/pkg/release/release_test.go +++ b/pkg/release/release_test.go @@ -141,6 +141,9 @@ func TestFindArtifacts_Bad(t *testing.T) { }) t.Run("returns error when dist directory is unreadable", func(t *testing.T) { + if os.Geteuid() == 0 { + t.Skip("root can read any directory") + } dir := t.TempDir() distDir := filepath.Join(dir, "dist") require.NoError(t, os.MkdirAll(distDir, 0755))