From 1c27bc4ca9f46560a4c968c3c4b3a93b92731c27 Mon Sep 17 00:00:00 2001 From: Snider Date: Sat, 21 Feb 2026 21:45:58 +0000 Subject: [PATCH] feat: absorb dev tooling commands from CLI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cmd/dev — multi-repo workflow (work, push, pull, commit, health) cmd/setup — project bootstrap, CI, GitHub config cmd/qa — code quality, review, docblock checking cmd/docs — docs scan/sync/list cmd/gitcmd — git helpers cmd/monitor — monitoring Co-Authored-By: Virgil --- cmd/dev/cmd_api.go | 22 ++ cmd/dev/cmd_apply.go | 304 +++++++++++++++++ cmd/dev/cmd_bundles.go | 86 +++++ cmd/dev/cmd_ci.go | 261 +++++++++++++++ cmd/dev/cmd_commit.go | 201 +++++++++++ cmd/dev/cmd_dev.go | 96 ++++++ cmd/dev/cmd_file_sync.go | 340 +++++++++++++++++++ cmd/dev/cmd_health.go | 185 +++++++++++ cmd/dev/cmd_impact.go | 184 ++++++++++ cmd/dev/cmd_issues.go | 208 ++++++++++++ cmd/dev/cmd_pull.go | 130 ++++++++ cmd/dev/cmd_push.go | 275 +++++++++++++++ cmd/dev/cmd_reviews.go | 237 +++++++++++++ cmd/dev/cmd_sync.go | 174 ++++++++++ cmd/dev/cmd_vm.go | 510 ++++++++++++++++++++++++++++ cmd/dev/cmd_work.go | 344 +++++++++++++++++++ cmd/dev/cmd_workflow.go | 307 +++++++++++++++++ cmd/dev/cmd_workflow_test.go | 108 ++++++ cmd/dev/registry.go | 69 ++++ cmd/dev/service.go | 291 ++++++++++++++++ cmd/docs/cmd_commands.go | 20 ++ cmd/docs/cmd_docs.go | 30 ++ cmd/docs/cmd_list.go | 83 +++++ cmd/docs/cmd_scan.go | 159 +++++++++ cmd/docs/cmd_sync.go | 327 ++++++++++++++++++ cmd/gitcmd/cmd_git.go | 44 +++ cmd/monitor/cmd_commands.go | 47 +++ cmd/monitor/cmd_monitor.go | 590 +++++++++++++++++++++++++++++++++ cmd/qa/cmd_docblock.go | 353 ++++++++++++++++++++ cmd/qa/cmd_health.go | 289 ++++++++++++++++ cmd/qa/cmd_issues.go | 401 ++++++++++++++++++++++ cmd/qa/cmd_qa.go | 45 +++ cmd/qa/cmd_review.go | 322 ++++++++++++++++++ cmd/qa/cmd_watch.go | 444 +++++++++++++++++++++++++ cmd/setup/cmd_bootstrap.go | 176 ++++++++++ cmd/setup/cmd_ci.go | 300 +++++++++++++++++ cmd/setup/cmd_commands.go | 38 +++ cmd/setup/cmd_github.go | 230 +++++++++++++ cmd/setup/cmd_registry.go | 264 +++++++++++++++ cmd/setup/cmd_repo.go | 289 ++++++++++++++++ cmd/setup/cmd_setup.go | 59 ++++ cmd/setup/cmd_wizard.go | 93 ++++++ cmd/setup/github_config.go | 204 ++++++++++++ cmd/setup/github_diff.go | 288 ++++++++++++++++ cmd/setup/github_labels.go | 152 +++++++++ cmd/setup/github_protection.go | 299 +++++++++++++++++ cmd/setup/github_security.go | 281 ++++++++++++++++ cmd/setup/github_webhooks.go | 263 +++++++++++++++ 48 files changed, 10422 insertions(+) create mode 100644 cmd/dev/cmd_api.go create mode 100644 cmd/dev/cmd_apply.go create mode 100644 cmd/dev/cmd_bundles.go create mode 100644 cmd/dev/cmd_ci.go create mode 100644 cmd/dev/cmd_commit.go create mode 100644 cmd/dev/cmd_dev.go create mode 100644 cmd/dev/cmd_file_sync.go create mode 100644 cmd/dev/cmd_health.go create mode 100644 cmd/dev/cmd_impact.go create mode 100644 cmd/dev/cmd_issues.go create mode 100644 cmd/dev/cmd_pull.go create mode 100644 cmd/dev/cmd_push.go create mode 100644 cmd/dev/cmd_reviews.go create mode 100644 cmd/dev/cmd_sync.go create mode 100644 cmd/dev/cmd_vm.go create mode 100644 cmd/dev/cmd_work.go create mode 100644 cmd/dev/cmd_workflow.go create mode 100644 cmd/dev/cmd_workflow_test.go create mode 100644 cmd/dev/registry.go create mode 100644 cmd/dev/service.go create mode 100644 cmd/docs/cmd_commands.go create mode 100644 cmd/docs/cmd_docs.go create mode 100644 cmd/docs/cmd_list.go create mode 100644 cmd/docs/cmd_scan.go create mode 100644 cmd/docs/cmd_sync.go create mode 100644 cmd/gitcmd/cmd_git.go create mode 100644 cmd/monitor/cmd_commands.go create mode 100644 cmd/monitor/cmd_monitor.go create mode 100644 cmd/qa/cmd_docblock.go create mode 100644 cmd/qa/cmd_health.go create mode 100644 cmd/qa/cmd_issues.go create mode 100644 cmd/qa/cmd_qa.go create mode 100644 cmd/qa/cmd_review.go create mode 100644 cmd/qa/cmd_watch.go create mode 100644 cmd/setup/cmd_bootstrap.go create mode 100644 cmd/setup/cmd_ci.go create mode 100644 cmd/setup/cmd_commands.go create mode 100644 cmd/setup/cmd_github.go create mode 100644 cmd/setup/cmd_registry.go create mode 100644 cmd/setup/cmd_repo.go create mode 100644 cmd/setup/cmd_setup.go create mode 100644 cmd/setup/cmd_wizard.go create mode 100644 cmd/setup/github_config.go create mode 100644 cmd/setup/github_diff.go create mode 100644 cmd/setup/github_labels.go create mode 100644 cmd/setup/github_protection.go create mode 100644 cmd/setup/github_security.go create mode 100644 cmd/setup/github_webhooks.go diff --git a/cmd/dev/cmd_api.go b/cmd/dev/cmd_api.go new file mode 100644 index 0000000..405085b --- /dev/null +++ b/cmd/dev/cmd_api.go @@ -0,0 +1,22 @@ +package dev + +import ( + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// addAPICommands adds the 'api' command and its subcommands to the given parent command. +func addAPICommands(parent *cli.Command) { + // Create the 'api' command + apiCmd := &cli.Command{ + Use: "api", + Short: i18n.T("cmd.dev.api.short"), + } + parent.AddCommand(apiCmd) + + // Add the 'sync' command to 'api' + addSyncCommand(apiCmd) + + // TODO: Add the 'test-gen' command to 'api' + // addTestGenCommand(apiCmd) +} diff --git a/cmd/dev/cmd_apply.go b/cmd/dev/cmd_apply.go new file mode 100644 index 0000000..75c392d --- /dev/null +++ b/cmd/dev/cmd_apply.go @@ -0,0 +1,304 @@ +// cmd_apply.go implements safe command/script execution across repos for AI agents. +// +// Usage: +// core dev apply --command="sed -i 's/old/new/g' README.md" +// core dev apply --script="./scripts/update-version.sh" +// core dev apply --command="..." --commit --message="chore: update" + +package dev + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + core "forge.lthn.ai/core/go/pkg/framework/core" + "forge.lthn.ai/core/go-scm/git" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/repos" +) + +// Apply command flags +var ( + applyCommand string + applyScript string + applyRepos string + applyCommit bool + applyMessage string + applyCoAuthor string + applyDryRun bool + applyPush bool + applyContinue bool // Continue on error + applyYes bool // Skip confirmation prompt +) + +// AddApplyCommand adds the 'apply' command to dev. +func AddApplyCommand(parent *cli.Command) { + applyCmd := &cli.Command{ + Use: "apply", + Short: i18n.T("cmd.dev.apply.short"), + Long: i18n.T("cmd.dev.apply.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runApply() + }, + } + + applyCmd.Flags().StringVar(&applyCommand, "command", "", i18n.T("cmd.dev.apply.flag.command")) + applyCmd.Flags().StringVar(&applyScript, "script", "", i18n.T("cmd.dev.apply.flag.script")) + applyCmd.Flags().StringVar(&applyRepos, "repos", "", i18n.T("cmd.dev.apply.flag.repos")) + applyCmd.Flags().BoolVar(&applyCommit, "commit", false, i18n.T("cmd.dev.apply.flag.commit")) + applyCmd.Flags().StringVarP(&applyMessage, "message", "m", "", i18n.T("cmd.dev.apply.flag.message")) + applyCmd.Flags().StringVar(&applyCoAuthor, "co-author", "", i18n.T("cmd.dev.apply.flag.co_author")) + applyCmd.Flags().BoolVar(&applyDryRun, "dry-run", false, i18n.T("cmd.dev.apply.flag.dry_run")) + applyCmd.Flags().BoolVar(&applyPush, "push", false, i18n.T("cmd.dev.apply.flag.push")) + applyCmd.Flags().BoolVar(&applyContinue, "continue", false, i18n.T("cmd.dev.apply.flag.continue")) + applyCmd.Flags().BoolVarP(&applyYes, "yes", "y", false, i18n.T("cmd.dev.apply.flag.yes")) + + parent.AddCommand(applyCmd) +} + +func runApply() error { + ctx := context.Background() + + // Validate inputs + if applyCommand == "" && applyScript == "" { + return core.E("dev.apply", i18n.T("cmd.dev.apply.error.no_command"), nil) + } + if applyCommand != "" && applyScript != "" { + return core.E("dev.apply", i18n.T("cmd.dev.apply.error.both_command_script"), nil) + } + if applyCommit && applyMessage == "" { + return core.E("dev.apply", i18n.T("cmd.dev.apply.error.commit_needs_message"), nil) + } + + // Validate script exists + if applyScript != "" { + if !io.Local.IsFile(applyScript) { + return core.E("dev.apply", "script not found: "+applyScript, nil) // Error mismatch? IsFile returns bool + } + } + + // Get target repos + targetRepos, err := getApplyTargetRepos() + if err != nil { + return err + } + + if len(targetRepos) == 0 { + return core.E("dev.apply", i18n.T("cmd.dev.apply.error.no_repos"), nil) + } + + // Show plan + action := applyCommand + if applyScript != "" { + action = applyScript + } + cli.Print("%s: %s\n", dimStyle.Render(i18n.T("cmd.dev.apply.action")), action) + cli.Print("%s: %d repos\n", dimStyle.Render(i18n.T("cmd.dev.apply.targets")), len(targetRepos)) + if applyDryRun { + cli.Print("%s\n", warningStyle.Render(i18n.T("cmd.dev.apply.dry_run_mode"))) + } + cli.Blank() + + // Require confirmation unless --yes or --dry-run + if !applyYes && !applyDryRun { + cli.Print("%s\n", warningStyle.Render(i18n.T("cmd.dev.apply.warning"))) + cli.Blank() + + if !cli.Confirm(i18n.T("cmd.dev.apply.confirm"), cli.Required()) { + cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.dev.apply.cancelled"))) + return nil + } + cli.Blank() + } + + var succeeded, skipped, failed int + + for _, repo := range targetRepos { + repoName := filepath.Base(repo.Path) + + if applyDryRun { + cli.Print(" %s %s\n", dimStyle.Render("[dry-run]"), repoName) + succeeded++ + continue + } + + // Step 1: Run command or script + var cmdErr error + if applyCommand != "" { + cmdErr = runCommandInRepo(ctx, repo.Path, applyCommand) + } else { + cmdErr = runScriptInRepo(ctx, repo.Path, applyScript) + } + + if cmdErr != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), repoName, cmdErr) + failed++ + if !applyContinue { + return cli.Err("%s", i18n.T("cmd.dev.apply.error.command_failed")) + } + continue + } + + // Step 2: Check if anything changed + statuses := git.Status(ctx, git.StatusOptions{ + Paths: []string{repo.Path}, + Names: map[string]string{repo.Path: repoName}, + }) + if len(statuses) == 0 || !statuses[0].IsDirty() { + cli.Print(" %s %s: %s\n", dimStyle.Render("-"), repoName, i18n.T("cmd.dev.apply.no_changes")) + skipped++ + continue + } + + // Step 3: Commit if requested + if applyCommit { + commitMsg := applyMessage + if applyCoAuthor != "" { + commitMsg += "\n\nCo-Authored-By: " + applyCoAuthor + } + + // Stage all changes + if _, err := gitCommandQuiet(ctx, repo.Path, "add", "-A"); err != nil { + cli.Print(" %s %s: stage failed: %s\n", errorStyle.Render("x"), repoName, err) + failed++ + if !applyContinue { + return err + } + continue + } + + // Commit + if _, err := gitCommandQuiet(ctx, repo.Path, "commit", "-m", commitMsg); err != nil { + cli.Print(" %s %s: commit failed: %s\n", errorStyle.Render("x"), repoName, err) + failed++ + if !applyContinue { + return err + } + continue + } + + // Step 4: Push if requested + if applyPush { + if err := safePush(ctx, repo.Path); err != nil { + cli.Print(" %s %s: push failed: %s\n", errorStyle.Render("x"), repoName, err) + failed++ + if !applyContinue { + return err + } + continue + } + } + } + + cli.Print(" %s %s\n", successStyle.Render("v"), repoName) + succeeded++ + } + + // Summary + cli.Blank() + cli.Print("%s: ", i18n.T("cmd.dev.apply.summary")) + if succeeded > 0 { + cli.Print("%s", successStyle.Render(i18n.T("common.count.succeeded", map[string]interface{}{"Count": succeeded}))) + } + if skipped > 0 { + if succeeded > 0 { + cli.Print(", ") + } + cli.Print("%s", dimStyle.Render(i18n.T("common.count.skipped", map[string]interface{}{"Count": skipped}))) + } + if failed > 0 { + if succeeded > 0 || skipped > 0 { + cli.Print(", ") + } + cli.Print("%s", errorStyle.Render(i18n.T("common.count.failed", map[string]interface{}{"Count": failed}))) + } + cli.Blank() + + return nil +} + +// getApplyTargetRepos gets repos to apply command to +func getApplyTargetRepos() ([]*repos.Repo, error) { + // Load registry + registryPath, err := repos.FindRegistry(io.Local) + if err != nil { + return nil, core.E("dev.apply", "failed to find registry", err) + } + + registry, err := repos.LoadRegistry(io.Local, registryPath) + if err != nil { + return nil, core.E("dev.apply", "failed to load registry", err) + } + + // If --repos specified, filter to those + if applyRepos != "" { + repoNames := strings.Split(applyRepos, ",") + nameSet := make(map[string]bool) + for _, name := range repoNames { + nameSet[strings.TrimSpace(name)] = true + } + + var matched []*repos.Repo + for _, repo := range registry.Repos { + if nameSet[repo.Name] { + matched = append(matched, repo) + } + } + return matched, nil + } + + // Return all repos as slice + var all []*repos.Repo + for _, repo := range registry.Repos { + all = append(all, repo) + } + return all, nil +} + +// runCommandInRepo runs a shell command in a repo directory +func runCommandInRepo(ctx context.Context, repoPath, command string) error { + // Use shell to execute command + var cmd *exec.Cmd + if isWindows() { + cmd = exec.CommandContext(ctx, "cmd", "/C", command) + } else { + cmd = exec.CommandContext(ctx, "sh", "-c", command) + } + cmd.Dir = repoPath + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + return cmd.Run() +} + +// runScriptInRepo runs a script in a repo directory +func runScriptInRepo(ctx context.Context, repoPath, scriptPath string) error { + // Get absolute path to script + absScript, err := filepath.Abs(scriptPath) + if err != nil { + return err + } + + var cmd *exec.Cmd + if isWindows() { + cmd = exec.CommandContext(ctx, "cmd", "/C", absScript) + } else { + // Execute script directly to honor shebang + cmd = exec.CommandContext(ctx, absScript) + } + cmd.Dir = repoPath + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + return cmd.Run() +} + +// isWindows returns true if running on Windows +func isWindows() bool { + return os.PathSeparator == '\\' +} diff --git a/cmd/dev/cmd_bundles.go b/cmd/dev/cmd_bundles.go new file mode 100644 index 0000000..8721713 --- /dev/null +++ b/cmd/dev/cmd_bundles.go @@ -0,0 +1,86 @@ +package dev + +import ( + "context" + + "forge.lthn.ai/core/go-agentic" + "forge.lthn.ai/core/go/pkg/framework" + "forge.lthn.ai/core/go-scm/git" +) + +// WorkBundle contains the Core instance for dev work operations. +type WorkBundle struct { + Core *framework.Core +} + +// WorkBundleOptions configures the work bundle. +type WorkBundleOptions struct { + RegistryPath string + AllowEdit bool // Allow agentic to use Write/Edit tools +} + +// NewWorkBundle creates a bundle for dev work operations. +// Includes: dev (orchestration), git, agentic services. +func NewWorkBundle(opts WorkBundleOptions) (*WorkBundle, error) { + c, err := framework.New( + framework.WithService(NewService(ServiceOptions{ + RegistryPath: opts.RegistryPath, + })), + framework.WithService(git.NewService(git.ServiceOptions{})), + framework.WithService(agentic.NewService(agentic.ServiceOptions{ + AllowEdit: opts.AllowEdit, + })), + framework.WithServiceLock(), + ) + if err != nil { + return nil, err + } + + return &WorkBundle{Core: c}, nil +} + +// Start initialises the bundle services. +func (b *WorkBundle) Start(ctx context.Context) error { + return b.Core.ServiceStartup(ctx, nil) +} + +// Stop shuts down the bundle services. +func (b *WorkBundle) Stop(ctx context.Context) error { + return b.Core.ServiceShutdown(ctx) +} + +// StatusBundle contains the Core instance for status-only operations. +type StatusBundle struct { + Core *framework.Core +} + +// StatusBundleOptions configures the status bundle. +type StatusBundleOptions struct { + RegistryPath string +} + +// NewStatusBundle creates a bundle for status-only operations. +// Includes: dev (orchestration), git services. No agentic - commits not available. +func NewStatusBundle(opts StatusBundleOptions) (*StatusBundle, error) { + c, err := framework.New( + framework.WithService(NewService(ServiceOptions(opts))), + framework.WithService(git.NewService(git.ServiceOptions{})), + // No agentic service - TaskCommit will be unhandled + framework.WithServiceLock(), + ) + if err != nil { + return nil, err + } + + return &StatusBundle{Core: c}, nil +} + +// Start initialises the bundle services. +func (b *StatusBundle) Start(ctx context.Context) error { + return b.Core.ServiceStartup(ctx, nil) +} + +// Stop shuts down the bundle services. +func (b *StatusBundle) Stop(ctx context.Context) error { + return b.Core.ServiceShutdown(ctx) +} diff --git a/cmd/dev/cmd_ci.go b/cmd/dev/cmd_ci.go new file mode 100644 index 0000000..46b1cab --- /dev/null +++ b/cmd/dev/cmd_ci.go @@ -0,0 +1,261 @@ +package dev + +import ( + "encoding/json" + "errors" + "os" + "os/exec" + "strings" + "time" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/repos" +) + +// CI-specific styles (aliases to shared) +var ( + ciSuccessStyle = cli.SuccessStyle + ciFailureStyle = cli.ErrorStyle + ciPendingStyle = cli.WarningStyle + ciSkippedStyle = cli.DimStyle +) + +// WorkflowRun represents a GitHub Actions workflow run +type WorkflowRun struct { + Name string `json:"name"` + Status string `json:"status"` + Conclusion string `json:"conclusion"` + HeadBranch string `json:"headBranch"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + URL string `json:"url"` + + // Added by us + RepoName string `json:"-"` +} + +// CI command flags +var ( + ciRegistryPath string + ciBranch string + ciFailedOnly bool +) + +// addCICommand adds the 'ci' command to the given parent command. +func addCICommand(parent *cli.Command) { + ciCmd := &cli.Command{ + Use: "ci", + Short: i18n.T("cmd.dev.ci.short"), + Long: i18n.T("cmd.dev.ci.long"), + RunE: func(cmd *cli.Command, args []string) error { + branch := ciBranch + if branch == "" { + branch = "main" + } + return runCI(ciRegistryPath, branch, ciFailedOnly) + }, + } + + ciCmd.Flags().StringVar(&ciRegistryPath, "registry", "", i18n.T("common.flag.registry")) + ciCmd.Flags().StringVarP(&ciBranch, "branch", "b", "main", i18n.T("cmd.dev.ci.flag.branch")) + ciCmd.Flags().BoolVar(&ciFailedOnly, "failed", false, i18n.T("cmd.dev.ci.flag.failed")) + + parent.AddCommand(ciCmd) +} + +func runCI(registryPath string, branch string, failedOnly bool) error { + // Check gh is available + if _, err := exec.LookPath("gh"); err != nil { + return errors.New(i18n.T("error.gh_not_found")) + } + + // Find or use provided registry + var reg *repos.Registry + var err error + + if registryPath != "" { + reg, err = repos.LoadRegistry(io.Local, registryPath) + if err != nil { + return cli.Wrap(err, "failed to load registry") + } + } else { + registryPath, err = repos.FindRegistry(io.Local) + if err == nil { + reg, err = repos.LoadRegistry(io.Local, registryPath) + if err != nil { + return cli.Wrap(err, "failed to load registry") + } + } else { + cwd, _ := os.Getwd() + reg, err = repos.ScanDirectory(io.Local, cwd) + if err != nil { + return cli.Wrap(err, "failed to scan directory") + } + } + } + + // Fetch CI status sequentially + var allRuns []WorkflowRun + var fetchErrors []error + var noCI []string + + repoList := reg.List() + for i, repo := range repoList { + repoFullName := cli.Sprintf("%s/%s", reg.Org, repo.Name) + cli.Print("\033[2K\r%s %d/%d %s", dimStyle.Render(i18n.T("i18n.progress.check")), i+1, len(repoList), repo.Name) + + runs, err := fetchWorkflowRuns(repoFullName, repo.Name, branch) + if err != nil { + if strings.Contains(err.Error(), "no workflows") { + noCI = append(noCI, repo.Name) + } else { + fetchErrors = append(fetchErrors, cli.Wrap(err, repo.Name)) + } + continue + } + + if len(runs) > 0 { + // Just get the latest run + allRuns = append(allRuns, runs[0]) + } else { + noCI = append(noCI, repo.Name) + } + } + cli.Print("\033[2K\r") // Clear progress line + + // Count by status + var success, failed, pending, other int + for _, run := range allRuns { + switch run.Conclusion { + case "success": + success++ + case "failure": + failed++ + case "": + if run.Status == "in_progress" || run.Status == "queued" { + pending++ + } else { + other++ + } + default: + other++ + } + } + + // Print summary + cli.Blank() + cli.Print("%s", i18n.T("cmd.dev.ci.repos_checked", map[string]interface{}{"Count": len(repoList)})) + if success > 0 { + cli.Print(" * %s", ciSuccessStyle.Render(i18n.T("cmd.dev.ci.passing", map[string]interface{}{"Count": success}))) + } + if failed > 0 { + cli.Print(" * %s", ciFailureStyle.Render(i18n.T("cmd.dev.ci.failing", map[string]interface{}{"Count": failed}))) + } + if pending > 0 { + cli.Print(" * %s", ciPendingStyle.Render(i18n.T("common.count.pending", map[string]interface{}{"Count": pending}))) + } + if len(noCI) > 0 { + cli.Print(" * %s", ciSkippedStyle.Render(i18n.T("cmd.dev.ci.no_ci", map[string]interface{}{"Count": len(noCI)}))) + } + cli.Blank() + cli.Blank() + + // Filter if needed + displayRuns := allRuns + if failedOnly { + displayRuns = nil + for _, run := range allRuns { + if run.Conclusion == "failure" { + displayRuns = append(displayRuns, run) + } + } + } + + // Print details + for _, run := range displayRuns { + printWorkflowRun(run) + } + + // Print errors + if len(fetchErrors) > 0 { + cli.Blank() + for _, err := range fetchErrors { + cli.Print("%s %s\n", errorStyle.Render(i18n.Label("error")), err) + } + } + + return nil +} + +func fetchWorkflowRuns(repoFullName, repoName string, branch string) ([]WorkflowRun, error) { + args := []string{ + "run", "list", + "--repo", repoFullName, + "--branch", branch, + "--limit", "1", + "--json", "name,status,conclusion,headBranch,createdAt,updatedAt,url", + } + + cmd := exec.Command("gh", args...) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := string(exitErr.Stderr) + return nil, cli.Err("%s", strings.TrimSpace(stderr)) + } + return nil, err + } + + var runs []WorkflowRun + if err := json.Unmarshal(output, &runs); err != nil { + return nil, err + } + + // Tag with repo name + for i := range runs { + runs[i].RepoName = repoName + } + + return runs, nil +} + +func printWorkflowRun(run WorkflowRun) { + // Status icon + var status string + switch run.Conclusion { + case "success": + status = ciSuccessStyle.Render("v") + case "failure": + status = ciFailureStyle.Render("x") + case "": + switch run.Status { + case "in_progress": + status = ciPendingStyle.Render("*") + case "queued": + status = ciPendingStyle.Render("o") + default: + status = ciSkippedStyle.Render("-") + } + case "skipped": + status = ciSkippedStyle.Render("-") + case "cancelled": + status = ciSkippedStyle.Render("o") + default: + status = ciSkippedStyle.Render("?") + } + + // Workflow name (truncated) + workflowName := cli.Truncate(run.Name, 20) + + // Age + age := cli.FormatAge(run.UpdatedAt) + + cli.Print(" %s %-18s %-22s %s\n", + status, + repoNameStyle.Render(run.RepoName), + dimStyle.Render(workflowName), + issueAgeStyle.Render(age), + ) +} diff --git a/cmd/dev/cmd_commit.go b/cmd/dev/cmd_commit.go new file mode 100644 index 0000000..644e64e --- /dev/null +++ b/cmd/dev/cmd_commit.go @@ -0,0 +1,201 @@ +package dev + +import ( + "context" + "os" + "path/filepath" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go-scm/git" + "forge.lthn.ai/core/go/pkg/i18n" + coreio "forge.lthn.ai/core/go/pkg/io" +) + +// Commit command flags +var ( + commitRegistryPath string + commitAll bool +) + +// AddCommitCommand adds the 'commit' command to the given parent command. +func AddCommitCommand(parent *cli.Command) { + commitCmd := &cli.Command{ + Use: "commit", + Short: i18n.T("cmd.dev.commit.short"), + Long: i18n.T("cmd.dev.commit.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runCommit(commitRegistryPath, commitAll) + }, + } + + commitCmd.Flags().StringVar(&commitRegistryPath, "registry", "", i18n.T("common.flag.registry")) + commitCmd.Flags().BoolVar(&commitAll, "all", false, i18n.T("cmd.dev.commit.flag.all")) + + parent.AddCommand(commitCmd) +} + +func runCommit(registryPath string, all bool) error { + ctx := context.Background() + cwd, _ := os.Getwd() + + // Check if current directory is a git repo (single-repo mode) + if registryPath == "" && isGitRepo(cwd) { + return runCommitSingleRepo(ctx, cwd, all) + } + + // Multi-repo mode: find or use provided registry + reg, regDir, err := loadRegistryWithConfig(registryPath) + if err != nil { + return err + } + registryPath = regDir // Use resolved registry directory for relative paths + + // Build paths and names for git operations + var paths []string + names := make(map[string]string) + + for _, repo := range reg.List() { + if repo.IsGitRepo() { + paths = append(paths, repo.Path) + names[repo.Path] = repo.Name + } + } + + if len(paths) == 0 { + cli.Text(i18n.T("cmd.dev.no_git_repos")) + return nil + } + + // Get status for all repos + statuses := git.Status(ctx, git.StatusOptions{ + Paths: paths, + Names: names, + }) + + // Find dirty repos + var dirtyRepos []git.RepoStatus + for _, s := range statuses { + if s.Error == nil && s.IsDirty() { + dirtyRepos = append(dirtyRepos, s) + } + } + + if len(dirtyRepos) == 0 { + cli.Text(i18n.T("cmd.dev.no_changes")) + return nil + } + + // Show dirty repos + cli.Print("\n%s\n\n", i18n.T("cmd.dev.repos_with_changes", map[string]interface{}{"Count": len(dirtyRepos)})) + for _, s := range dirtyRepos { + cli.Print(" %s: ", repoNameStyle.Render(s.Name)) + if s.Modified > 0 { + cli.Print("%s ", dirtyStyle.Render(i18n.T("cmd.dev.modified", map[string]interface{}{"Count": s.Modified}))) + } + if s.Untracked > 0 { + cli.Print("%s ", dirtyStyle.Render(i18n.T("cmd.dev.untracked", map[string]interface{}{"Count": s.Untracked}))) + } + if s.Staged > 0 { + cli.Print("%s ", aheadStyle.Render(i18n.T("cmd.dev.staged", map[string]interface{}{"Count": s.Staged}))) + } + cli.Blank() + } + + // Confirm unless --all + if !all { + cli.Blank() + if !cli.Confirm(i18n.T("cmd.dev.confirm_claude_commit")) { + cli.Text(i18n.T("cli.aborted")) + return nil + } + } + + cli.Blank() + + // Commit each dirty repo + var succeeded, failed int + for _, s := range dirtyRepos { + cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.dev.committing")), s.Name) + + if err := claudeCommit(ctx, s.Path, s.Name, registryPath); err != nil { + cli.Print(" %s %s\n", errorStyle.Render("x"), err) + failed++ + } else { + cli.Print(" %s %s\n", successStyle.Render("v"), i18n.T("cmd.dev.committed")) + succeeded++ + } + cli.Blank() + } + + // Summary + cli.Print("%s", successStyle.Render(i18n.T("cmd.dev.done_succeeded", map[string]interface{}{"Count": succeeded}))) + if failed > 0 { + cli.Print(", %s", errorStyle.Render(i18n.T("common.count.failed", map[string]interface{}{"Count": failed}))) + } + cli.Blank() + + return nil +} + +// isGitRepo checks if a directory is a git repository. +func isGitRepo(path string) bool { + gitDir := path + "/.git" + _, err := coreio.Local.List(gitDir) + return err == nil +} + +// runCommitSingleRepo handles commit for a single repo (current directory). +func runCommitSingleRepo(ctx context.Context, repoPath string, all bool) error { + repoName := filepath.Base(repoPath) + + // Get status + statuses := git.Status(ctx, git.StatusOptions{ + Paths: []string{repoPath}, + Names: map[string]string{repoPath: repoName}, + }) + + if len(statuses) == 0 || statuses[0].Error != nil { + if len(statuses) > 0 && statuses[0].Error != nil { + return statuses[0].Error + } + return cli.Err("failed to get repo status") + } + + s := statuses[0] + if !s.IsDirty() { + cli.Text(i18n.T("cmd.dev.no_changes")) + return nil + } + + // Show status + cli.Print("%s: ", repoNameStyle.Render(s.Name)) + if s.Modified > 0 { + cli.Print("%s ", dirtyStyle.Render(i18n.T("cmd.dev.modified", map[string]interface{}{"Count": s.Modified}))) + } + if s.Untracked > 0 { + cli.Print("%s ", dirtyStyle.Render(i18n.T("cmd.dev.untracked", map[string]interface{}{"Count": s.Untracked}))) + } + if s.Staged > 0 { + cli.Print("%s ", aheadStyle.Render(i18n.T("cmd.dev.staged", map[string]interface{}{"Count": s.Staged}))) + } + cli.Blank() + + // Confirm unless --all + if !all { + cli.Blank() + if !cli.Confirm(i18n.T("cmd.dev.confirm_claude_commit")) { + cli.Text(i18n.T("cli.aborted")) + return nil + } + } + + cli.Blank() + + // Commit + if err := claudeCommit(ctx, repoPath, repoName, ""); err != nil { + cli.Print(" %s %s\n", errorStyle.Render("x"), err) + return err + } + cli.Print(" %s %s\n", successStyle.Render("v"), i18n.T("cmd.dev.committed")) + return nil +} diff --git a/cmd/dev/cmd_dev.go b/cmd/dev/cmd_dev.go new file mode 100644 index 0000000..5477719 --- /dev/null +++ b/cmd/dev/cmd_dev.go @@ -0,0 +1,96 @@ +// Package dev provides multi-repo development workflow commands. +// +// Git Operations: +// - work: Combined status, commit, and push workflow +// - health: Quick health check across all repos +// - commit: Claude-assisted commit message generation +// - push: Push repos with unpushed commits +// - pull: Pull repos that are behind remote +// +// GitHub Integration (requires gh CLI): +// - issues: List open issues across repos +// - reviews: List PRs needing review +// - ci: Check GitHub Actions CI status +// - impact: Analyse dependency impact of changes +// +// CI/Workflow Management: +// - workflow list: Show table of repos vs workflows +// - workflow sync: Copy workflow template to all repos +// +// API Tools: +// - api sync: Synchronize public service APIs +// +// Dev Environment (VM management): +// - install: Download dev environment image +// - boot: Start dev environment VM +// - stop: Stop dev environment VM +// - status: Check dev VM status +// - shell: Open shell in dev VM +// - serve: Mount project and start dev server +// - test: Run tests in dev environment +// - claude: Start sandboxed Claude session +// - update: Check for and apply updates +package dev + +import ( + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +func init() { + cli.RegisterCommands(AddDevCommands) +} + +// Style aliases from shared package +var ( + successStyle = cli.SuccessStyle + errorStyle = cli.ErrorStyle + warningStyle = cli.WarningStyle + dimStyle = cli.DimStyle + valueStyle = cli.ValueStyle + headerStyle = cli.HeaderStyle + repoNameStyle = cli.RepoStyle +) + +// Table styles for status display (extends shared styles with cell padding) +var ( + dirtyStyle = cli.NewStyle().Foreground(cli.ColourRed500) + aheadStyle = cli.NewStyle().Foreground(cli.ColourAmber500) + cleanStyle = cli.NewStyle().Foreground(cli.ColourGreen500) +) + +// AddDevCommands registers the 'dev' command and all subcommands. +func AddDevCommands(root *cli.Command) { + devCmd := &cli.Command{ + Use: "dev", + Short: i18n.T("cmd.dev.short"), + Long: i18n.T("cmd.dev.long"), + } + root.AddCommand(devCmd) + + // Git operations (also available under 'core git') + AddWorkCommand(devCmd) + AddHealthCommand(devCmd) + AddCommitCommand(devCmd) + AddPushCommand(devCmd) + AddPullCommand(devCmd) + + // Safe git operations for AI agents (also available under 'core git') + AddFileSyncCommand(devCmd) + AddApplyCommand(devCmd) + + // GitHub integration + addIssuesCommand(devCmd) + addReviewsCommand(devCmd) + addCICommand(devCmd) + addImpactCommand(devCmd) + + // CI/Workflow management + addWorkflowCommands(devCmd) + + // API tools + addAPICommands(devCmd) + + // Dev environment + addVMCommands(devCmd) +} diff --git a/cmd/dev/cmd_file_sync.go b/cmd/dev/cmd_file_sync.go new file mode 100644 index 0000000..436bf79 --- /dev/null +++ b/cmd/dev/cmd_file_sync.go @@ -0,0 +1,340 @@ +// cmd_file_sync.go implements safe file synchronization across repos for AI agents. +// +// Usage: +// core dev sync workflow.yml --to="packages/core-*" +// core dev sync .github/workflows/ --to="packages/core-*" --message="feat: add CI" +// core dev sync config.yaml --to="packages/core-*" --dry-run + +package dev + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go-scm/git" + "forge.lthn.ai/core/go/pkg/i18n" + coreio "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/log" + "forge.lthn.ai/core/go/pkg/repos" +) + +// File sync command flags +var ( + fileSyncTo string + fileSyncMessage string + fileSyncCoAuthor string + fileSyncDryRun bool + fileSyncPush bool +) + +// AddFileSyncCommand adds the 'sync' command to dev for file syncing. +func AddFileSyncCommand(parent *cli.Command) { + syncCmd := &cli.Command{ + Use: "sync ", + Short: i18n.T("cmd.dev.file_sync.short"), + Long: i18n.T("cmd.dev.file_sync.long"), + Args: cli.MinimumNArgs(1), + RunE: func(cmd *cli.Command, args []string) error { + return runFileSync(args[0]) + }, + } + + syncCmd.Flags().StringVar(&fileSyncTo, "to", "", i18n.T("cmd.dev.file_sync.flag.to")) + syncCmd.Flags().StringVarP(&fileSyncMessage, "message", "m", "", i18n.T("cmd.dev.file_sync.flag.message")) + syncCmd.Flags().StringVar(&fileSyncCoAuthor, "co-author", "", i18n.T("cmd.dev.file_sync.flag.co_author")) + syncCmd.Flags().BoolVar(&fileSyncDryRun, "dry-run", false, i18n.T("cmd.dev.file_sync.flag.dry_run")) + syncCmd.Flags().BoolVar(&fileSyncPush, "push", false, i18n.T("cmd.dev.file_sync.flag.push")) + + _ = syncCmd.MarkFlagRequired("to") + + parent.AddCommand(syncCmd) +} + +func runFileSync(source string) error { + ctx := context.Background() + + // Security: Reject path traversal attempts + if strings.Contains(source, "..") { + return log.E("dev.sync", "path traversal not allowed", nil) + } + + // Validate source exists + sourceInfo, err := os.Stat(source) // Keep os.Stat for local source check or use coreio? coreio.Local.IsFile is bool. + // If source is local file on disk (not in medium), we can use os.Stat. + // But concept is everything is via Medium? + // User is running CLI on host. `source` is relative to CWD. + // coreio.Local uses absolute path or relative to root (which is "/" by default). + // So coreio.Local works. + if !coreio.Local.IsFile(source) { + // Might be directory + // IsFile returns false for directory. + } + // Let's rely on os.Stat for initial source check to distinguish dir vs file easily if coreio doesn't expose Stat. + // coreio doesn't expose Stat. + + // Check using standard os for source determination as we are outside strict sandbox for input args potentially? + // But we should use coreio where possible. + // coreio.Local.List worked for dirs. + // Let's stick to os.Stat for source properties finding as typically allowed for CLI args. + + if err != nil { + return log.E("dev.sync", i18n.T("cmd.dev.file_sync.error.source_not_found", map[string]interface{}{"Path": source}), err) + } + + // Find target repos + targetRepos, err := resolveTargetRepos(fileSyncTo) + if err != nil { + return err + } + + if len(targetRepos) == 0 { + return cli.Err("%s", i18n.T("cmd.dev.file_sync.error.no_targets")) + } + + // Show plan + cli.Print("%s: %s\n", dimStyle.Render(i18n.T("cmd.dev.file_sync.source")), source) + cli.Print("%s: %d repos\n", dimStyle.Render(i18n.T("cmd.dev.file_sync.targets")), len(targetRepos)) + if fileSyncDryRun { + cli.Print("%s\n", warningStyle.Render(i18n.T("cmd.dev.file_sync.dry_run_mode"))) + } + cli.Blank() + + var succeeded, skipped, failed int + + for _, repo := range targetRepos { + repoName := filepath.Base(repo.Path) + + if fileSyncDryRun { + cli.Print(" %s %s\n", dimStyle.Render("[dry-run]"), repoName) + succeeded++ + continue + } + + // Step 1: Pull latest (safe sync) + if err := safePull(ctx, repo.Path); err != nil { + cli.Print(" %s %s: pull failed: %s\n", errorStyle.Render("x"), repoName, err) + failed++ + continue + } + + // Step 2: Copy file(s) + destPath := filepath.Join(repo.Path, source) + if sourceInfo.IsDir() { + if err := copyDir(source, destPath); err != nil { + cli.Print(" %s %s: copy failed: %s\n", errorStyle.Render("x"), repoName, err) + failed++ + continue + } + } else { + // Ensure dir exists + if err := coreio.Local.EnsureDir(filepath.Dir(destPath)); err != nil { + cli.Print(" %s %s: copy failed: %s\n", errorStyle.Render("x"), repoName, err) + failed++ + continue + } + if err := coreio.Copy(coreio.Local, source, coreio.Local, destPath); err != nil { + cli.Print(" %s %s: copy failed: %s\n", errorStyle.Render("x"), repoName, err) + failed++ + continue + } + } + + // Step 3: Check if anything changed + statuses := git.Status(ctx, git.StatusOptions{ + Paths: []string{repo.Path}, + Names: map[string]string{repo.Path: repoName}, + }) + if len(statuses) == 0 || !statuses[0].IsDirty() { + cli.Print(" %s %s: %s\n", dimStyle.Render("-"), repoName, i18n.T("cmd.dev.file_sync.no_changes")) + skipped++ + continue + } + + // Step 4: Commit if message provided + if fileSyncMessage != "" { + commitMsg := fileSyncMessage + if fileSyncCoAuthor != "" { + commitMsg += "\n\nCo-Authored-By: " + fileSyncCoAuthor + } + + if err := gitAddCommit(ctx, repo.Path, source, commitMsg); err != nil { + cli.Print(" %s %s: commit failed: %s\n", errorStyle.Render("x"), repoName, err) + failed++ + continue + } + + // Step 5: Push if requested + if fileSyncPush { + if err := safePush(ctx, repo.Path); err != nil { + cli.Print(" %s %s: push failed: %s\n", errorStyle.Render("x"), repoName, err) + failed++ + continue + } + } + } + + cli.Print(" %s %s\n", successStyle.Render("v"), repoName) + succeeded++ + } + + // Summary + cli.Blank() + cli.Print("%s: ", i18n.T("cmd.dev.file_sync.summary")) + if succeeded > 0 { + cli.Print("%s", successStyle.Render(i18n.T("common.count.succeeded", map[string]interface{}{"Count": succeeded}))) + } + if skipped > 0 { + if succeeded > 0 { + cli.Print(", ") + } + cli.Print("%s", dimStyle.Render(i18n.T("common.count.skipped", map[string]interface{}{"Count": skipped}))) + } + if failed > 0 { + if succeeded > 0 || skipped > 0 { + cli.Print(", ") + } + cli.Print("%s", errorStyle.Render(i18n.T("common.count.failed", map[string]interface{}{"Count": failed}))) + } + cli.Blank() + + return nil +} + +// resolveTargetRepos resolves the --to pattern to actual repos +func resolveTargetRepos(pattern string) ([]*repos.Repo, error) { + // Load registry + registryPath, err := repos.FindRegistry(coreio.Local) + if err != nil { + return nil, log.E("dev.sync", "failed to find registry", err) + } + + registry, err := repos.LoadRegistry(coreio.Local, registryPath) + if err != nil { + return nil, log.E("dev.sync", "failed to load registry", err) + } + + // Match pattern against repo names + var matched []*repos.Repo + for _, repo := range registry.Repos { + if matchGlob(repo.Name, pattern) || matchGlob(repo.Path, pattern) { + matched = append(matched, repo) + } + } + + return matched, nil +} + +// matchGlob performs simple glob matching with * wildcards +func matchGlob(s, pattern string) bool { + // Handle exact match + if s == pattern { + return true + } + + // Handle * at end + if strings.HasSuffix(pattern, "*") { + prefix := strings.TrimSuffix(pattern, "*") + return strings.HasPrefix(s, prefix) + } + + // Handle * at start + if strings.HasPrefix(pattern, "*") { + suffix := strings.TrimPrefix(pattern, "*") + return strings.HasSuffix(s, suffix) + } + + // Handle * in middle + if strings.Contains(pattern, "*") { + parts := strings.SplitN(pattern, "*", 2) + return strings.HasPrefix(s, parts[0]) && strings.HasSuffix(s, parts[1]) + } + + return false +} + +// safePull pulls with rebase, handling errors gracefully +func safePull(ctx context.Context, path string) error { + // Check if we have upstream + _, err := gitCommandQuiet(ctx, path, "rev-parse", "--abbrev-ref", "@{u}") + if err != nil { + // No upstream set, skip pull + return nil + } + + return git.Pull(ctx, path) +} + +// safePush pushes with automatic pull-rebase on rejection +func safePush(ctx context.Context, path string) error { + err := git.Push(ctx, path) + if err == nil { + return nil + } + + // If non-fast-forward, try pull and push again + if git.IsNonFastForward(err) { + if pullErr := git.Pull(ctx, path); pullErr != nil { + return pullErr + } + return git.Push(ctx, path) + } + + return err +} + +// gitAddCommit stages and commits a file/directory +func gitAddCommit(ctx context.Context, repoPath, filePath, message string) error { + // Stage the file(s) + if _, err := gitCommandQuiet(ctx, repoPath, "add", filePath); err != nil { + return err + } + + // Commit + _, err := gitCommandQuiet(ctx, repoPath, "commit", "-m", message) + return err +} + +// gitCommandQuiet runs a git command without output +func gitCommandQuiet(ctx context.Context, dir string, args ...string) (string, error) { + cmd := exec.CommandContext(ctx, "git", args...) + cmd.Dir = dir + + output, err := cmd.CombinedOutput() + if err != nil { + return "", cli.Err("%s", strings.TrimSpace(string(output))) + } + return string(output), nil +} + +// copyDir recursively copies a directory +func copyDir(src, dst string) error { + entries, err := coreio.Local.List(src) + if err != nil { + return err + } + + if err := coreio.Local.EnsureDir(dst); err != nil { + return err + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + if err := copyDir(srcPath, dstPath); err != nil { + return err + } + } else { + if err := coreio.Copy(coreio.Local, srcPath, coreio.Local, dstPath); err != nil { + return err + } + } + } + + return nil +} diff --git a/cmd/dev/cmd_health.go b/cmd/dev/cmd_health.go new file mode 100644 index 0000000..b898e3a --- /dev/null +++ b/cmd/dev/cmd_health.go @@ -0,0 +1,185 @@ +package dev + +import ( + "context" + "fmt" + "sort" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go-scm/git" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// Health command flags +var ( + healthRegistryPath string + healthVerbose bool +) + +// AddHealthCommand adds the 'health' command to the given parent command. +func AddHealthCommand(parent *cli.Command) { + healthCmd := &cli.Command{ + Use: "health", + Short: i18n.T("cmd.dev.health.short"), + Long: i18n.T("cmd.dev.health.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runHealth(healthRegistryPath, healthVerbose) + }, + } + + healthCmd.Flags().StringVar(&healthRegistryPath, "registry", "", i18n.T("common.flag.registry")) + healthCmd.Flags().BoolVarP(&healthVerbose, "verbose", "v", false, i18n.T("cmd.dev.health.flag.verbose")) + + parent.AddCommand(healthCmd) +} + +func runHealth(registryPath string, verbose bool) error { + ctx := context.Background() + + // Load registry and get paths + reg, _, err := loadRegistryWithConfig(registryPath) + if err != nil { + return err + } + + // Build paths and names for git operations + var paths []string + names := make(map[string]string) + + for _, repo := range reg.List() { + if repo.IsGitRepo() { + paths = append(paths, repo.Path) + names[repo.Path] = repo.Name + } + } + + if len(paths) == 0 { + cli.Text(i18n.T("cmd.dev.no_git_repos")) + return nil + } + + // Get status for all repos + statuses := git.Status(ctx, git.StatusOptions{ + Paths: paths, + Names: names, + }) + + // Sort for consistent verbose output + sort.Slice(statuses, func(i, j int) bool { + return statuses[i].Name < statuses[j].Name + }) + + // Aggregate stats + var ( + totalRepos = len(statuses) + dirtyRepos []string + aheadRepos []string + behindRepos []string + errorRepos []string + ) + + for _, s := range statuses { + if s.Error != nil { + errorRepos = append(errorRepos, s.Name) + continue + } + if s.IsDirty() { + dirtyRepos = append(dirtyRepos, s.Name) + } + if s.HasUnpushed() { + aheadRepos = append(aheadRepos, s.Name) + } + if s.HasUnpulled() { + behindRepos = append(behindRepos, s.Name) + } + } + + // Print summary line + cli.Blank() + printHealthSummary(totalRepos, dirtyRepos, aheadRepos, behindRepos, errorRepos) + cli.Blank() + + // Verbose output + if verbose { + if len(dirtyRepos) > 0 { + cli.Print("%s %s\n", warningStyle.Render(i18n.T("cmd.dev.health.dirty_label")), formatRepoList(dirtyRepos)) + } + if len(aheadRepos) > 0 { + cli.Print("%s %s\n", successStyle.Render(i18n.T("cmd.dev.health.ahead_label")), formatRepoList(aheadRepos)) + } + if len(behindRepos) > 0 { + cli.Print("%s %s\n", warningStyle.Render(i18n.T("cmd.dev.health.behind_label")), formatRepoList(behindRepos)) + } + if len(errorRepos) > 0 { + cli.Print("%s %s\n", errorStyle.Render(i18n.T("cmd.dev.health.errors_label")), formatRepoList(errorRepos)) + } + cli.Blank() + } + + return nil +} + +func printHealthSummary(total int, dirty, ahead, behind, errors []string) { + parts := []string{ + statusPart(total, i18n.T("cmd.dev.health.repos"), cli.ValueStyle), + } + + // Dirty status + if len(dirty) > 0 { + parts = append(parts, statusPart(len(dirty), i18n.T("common.status.dirty"), cli.WarningStyle)) + } else { + parts = append(parts, statusText(i18n.T("cmd.dev.status.clean"), cli.SuccessStyle)) + } + + // Push status + if len(ahead) > 0 { + parts = append(parts, statusPart(len(ahead), i18n.T("cmd.dev.health.to_push"), cli.ValueStyle)) + } else { + parts = append(parts, statusText(i18n.T("common.status.synced"), cli.SuccessStyle)) + } + + // Pull status + if len(behind) > 0 { + parts = append(parts, statusPart(len(behind), i18n.T("cmd.dev.health.to_pull"), cli.WarningStyle)) + } else { + parts = append(parts, statusText(i18n.T("common.status.up_to_date"), cli.SuccessStyle)) + } + + // Errors (only if any) + if len(errors) > 0 { + parts = append(parts, statusPart(len(errors), i18n.T("cmd.dev.health.errors"), cli.ErrorStyle)) + } + + cli.Text(statusLine(parts...)) +} + +func formatRepoList(reposList []string) string { + if len(reposList) <= 5 { + return joinRepos(reposList) + } + return joinRepos(reposList[:5]) + " " + i18n.T("cmd.dev.health.more", map[string]interface{}{"Count": len(reposList) - 5}) +} + +func joinRepos(reposList []string) string { + result := "" + for i, r := range reposList { + if i > 0 { + result += ", " + } + result += r + } + return result +} + +func statusPart(count int, label string, style *cli.AnsiStyle) string { + return style.Render(fmt.Sprintf("%d %s", count, label)) +} + +func statusText(text string, style *cli.AnsiStyle) string { + return style.Render(text) +} + +func statusLine(parts ...string) string { + return strings.Join(parts, " | ") +} diff --git a/cmd/dev/cmd_impact.go b/cmd/dev/cmd_impact.go new file mode 100644 index 0000000..46ef28d --- /dev/null +++ b/cmd/dev/cmd_impact.go @@ -0,0 +1,184 @@ +package dev + +import ( + "errors" + "sort" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/repos" +) + +// Impact-specific styles (aliases to shared) +var ( + impactDirectStyle = cli.ErrorStyle + impactIndirectStyle = cli.WarningStyle + impactSafeStyle = cli.SuccessStyle +) + +// Impact command flags +var impactRegistryPath string + +// addImpactCommand adds the 'impact' command to the given parent command. +func addImpactCommand(parent *cli.Command) { + impactCmd := &cli.Command{ + Use: "impact ", + Short: i18n.T("cmd.dev.impact.short"), + Long: i18n.T("cmd.dev.impact.long"), + Args: cli.ExactArgs(1), + RunE: func(cmd *cli.Command, args []string) error { + return runImpact(impactRegistryPath, args[0]) + }, + } + + impactCmd.Flags().StringVar(&impactRegistryPath, "registry", "", i18n.T("common.flag.registry")) + + parent.AddCommand(impactCmd) +} + +func runImpact(registryPath string, repoName string) error { + // Find or use provided registry + var reg *repos.Registry + var err error + + if registryPath != "" { + reg, err = repos.LoadRegistry(io.Local, registryPath) + if err != nil { + return cli.Wrap(err, "failed to load registry") + } + } else { + registryPath, err = repos.FindRegistry(io.Local) + if err == nil { + reg, err = repos.LoadRegistry(io.Local, registryPath) + if err != nil { + return cli.Wrap(err, "failed to load registry") + } + } else { + return errors.New(i18n.T("cmd.dev.impact.requires_registry")) + } + } + + // Check repo exists + repo, exists := reg.Get(repoName) + if !exists { + return errors.New(i18n.T("error.repo_not_found", map[string]interface{}{"Name": repoName})) + } + + // Build reverse dependency graph + dependents := buildDependentsGraph(reg) + + // Find all affected repos (direct and transitive) + direct := dependents[repoName] + allAffected := findAllDependents(repoName, dependents) + + // Separate direct vs indirect + directSet := make(map[string]bool) + for _, d := range direct { + directSet[d] = true + } + + var indirect []string + for _, a := range allAffected { + if !directSet[a] { + indirect = append(indirect, a) + } + } + + // Sort for consistent output + sort.Strings(direct) + sort.Strings(indirect) + + // Print results + cli.Blank() + cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.dev.impact.analysis_for")), repoNameStyle.Render(repoName)) + if repo.Description != "" { + cli.Print("%s\n", dimStyle.Render(repo.Description)) + } + cli.Blank() + + if len(allAffected) == 0 { + cli.Print("%s %s\n", impactSafeStyle.Render("v"), i18n.T("cmd.dev.impact.no_dependents", map[string]interface{}{"Name": repoName})) + return nil + } + + // Direct dependents + if len(direct) > 0 { + cli.Print("%s %s\n", + impactDirectStyle.Render("*"), + i18n.T("cmd.dev.impact.direct_dependents", map[string]interface{}{"Count": len(direct)}), + ) + for _, d := range direct { + r, _ := reg.Get(d) + desc := "" + if r != nil && r.Description != "" { + desc = dimStyle.Render(" - " + cli.Truncate(r.Description, 40)) + } + cli.Print(" %s%s\n", d, desc) + } + cli.Blank() + } + + // Indirect dependents + if len(indirect) > 0 { + cli.Print("%s %s\n", + impactIndirectStyle.Render("o"), + i18n.T("cmd.dev.impact.transitive_dependents", map[string]interface{}{"Count": len(indirect)}), + ) + for _, d := range indirect { + r, _ := reg.Get(d) + desc := "" + if r != nil && r.Description != "" { + desc = dimStyle.Render(" - " + cli.Truncate(r.Description, 40)) + } + cli.Print(" %s%s\n", d, desc) + } + cli.Blank() + } + + // Summary + cli.Print("%s %s\n", + dimStyle.Render(i18n.Label("summary")), + i18n.T("cmd.dev.impact.changes_affect", map[string]interface{}{ + "Repo": repoNameStyle.Render(repoName), + "Affected": len(allAffected), + "Total": len(reg.Repos) - 1, + }), + ) + + return nil +} + +// buildDependentsGraph creates a reverse dependency map +// key = repo, value = repos that depend on it +func buildDependentsGraph(reg *repos.Registry) map[string][]string { + dependents := make(map[string][]string) + + for name, repo := range reg.Repos { + for _, dep := range repo.DependsOn { + dependents[dep] = append(dependents[dep], name) + } + } + + return dependents +} + +// findAllDependents recursively finds all repos that depend on the given repo +func findAllDependents(repoName string, dependents map[string][]string) []string { + visited := make(map[string]bool) + var result []string + + var visit func(name string) + visit = func(name string) { + for _, dep := range dependents[name] { + if !visited[dep] { + visited[dep] = true + result = append(result, dep) + visit(dep) // Recurse for transitive deps + } + } + } + + visit(repoName) + return result +} diff --git a/cmd/dev/cmd_issues.go b/cmd/dev/cmd_issues.go new file mode 100644 index 0000000..934d86e --- /dev/null +++ b/cmd/dev/cmd_issues.go @@ -0,0 +1,208 @@ +package dev + +import ( + "encoding/json" + "errors" + "os/exec" + "sort" + "strings" + "time" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// Issue-specific styles (aliases to shared) +var ( + issueRepoStyle = cli.DimStyle + issueNumberStyle = cli.TitleStyle + issueTitleStyle = cli.ValueStyle + issueLabelStyle = cli.WarningStyle + issueAssigneeStyle = cli.SuccessStyle + issueAgeStyle = cli.DimStyle +) + +// GitHubIssue represents a GitHub issue from the API. +type GitHubIssue struct { + Number int `json:"number"` + Title string `json:"title"` + State string `json:"state"` + CreatedAt time.Time `json:"createdAt"` + Author struct { + Login string `json:"login"` + } `json:"author"` + Assignees struct { + Nodes []struct { + Login string `json:"login"` + } `json:"nodes"` + } `json:"assignees"` + Labels struct { + Nodes []struct { + Name string `json:"name"` + } `json:"nodes"` + } `json:"labels"` + URL string `json:"url"` + + // Added by us + RepoName string `json:"-"` +} + +// Issues command flags +var ( + issuesRegistryPath string + issuesLimit int + issuesAssignee string +) + +// addIssuesCommand adds the 'issues' command to the given parent command. +func addIssuesCommand(parent *cli.Command) { + issuesCmd := &cli.Command{ + Use: "issues", + Short: i18n.T("cmd.dev.issues.short"), + Long: i18n.T("cmd.dev.issues.long"), + RunE: func(cmd *cli.Command, args []string) error { + limit := issuesLimit + if limit == 0 { + limit = 10 + } + return runIssues(issuesRegistryPath, limit, issuesAssignee) + }, + } + + issuesCmd.Flags().StringVar(&issuesRegistryPath, "registry", "", i18n.T("common.flag.registry")) + issuesCmd.Flags().IntVarP(&issuesLimit, "limit", "l", 10, i18n.T("cmd.dev.issues.flag.limit")) + issuesCmd.Flags().StringVarP(&issuesAssignee, "assignee", "a", "", i18n.T("cmd.dev.issues.flag.assignee")) + + parent.AddCommand(issuesCmd) +} + +func runIssues(registryPath string, limit int, assignee string) error { + // Check gh is available + if _, err := exec.LookPath("gh"); err != nil { + return errors.New(i18n.T("error.gh_not_found")) + } + + // Find or use provided registry + reg, _, err := loadRegistryWithConfig(registryPath) + if err != nil { + return err + } + + // Fetch issues sequentially (avoid GitHub rate limits) + var allIssues []GitHubIssue + var fetchErrors []error + + repoList := reg.List() + for i, repo := range repoList { + repoFullName := cli.Sprintf("%s/%s", reg.Org, repo.Name) + cli.Print("\033[2K\r%s %d/%d %s", dimStyle.Render(i18n.T("i18n.progress.fetch")), i+1, len(repoList), repo.Name) + + issues, err := fetchIssues(repoFullName, repo.Name, limit, assignee) + if err != nil { + fetchErrors = append(fetchErrors, cli.Wrap(err, repo.Name)) + continue + } + allIssues = append(allIssues, issues...) + } + cli.Print("\033[2K\r") // Clear progress line + + // Sort by created date (newest first) + sort.Slice(allIssues, func(i, j int) bool { + return allIssues[i].CreatedAt.After(allIssues[j].CreatedAt) + }) + + // Print issues + if len(allIssues) == 0 { + cli.Text(i18n.T("cmd.dev.issues.no_issues")) + return nil + } + + cli.Print("\n%s\n\n", i18n.T("cmd.dev.issues.open_issues", map[string]interface{}{"Count": len(allIssues)})) + + for _, issue := range allIssues { + printIssue(issue) + } + + // Print any errors + if len(fetchErrors) > 0 { + cli.Blank() + for _, err := range fetchErrors { + cli.Print("%s %s\n", errorStyle.Render(i18n.Label("error")), err) + } + } + + return nil +} + +func fetchIssues(repoFullName, repoName string, limit int, assignee string) ([]GitHubIssue, error) { + args := []string{ + "issue", "list", + "--repo", repoFullName, + "--state", "open", + "--limit", cli.Sprintf("%d", limit), + "--json", "number,title,state,createdAt,author,assignees,labels,url", + } + + if assignee != "" { + args = append(args, "--assignee", assignee) + } + + cmd := exec.Command("gh", args...) + output, err := cmd.Output() + if err != nil { + // Check if it's just "no issues" vs actual error + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := string(exitErr.Stderr) + if strings.Contains(stderr, "no issues") || strings.Contains(stderr, "Could not resolve") { + return nil, nil + } + return nil, cli.Err("%s", stderr) + } + return nil, err + } + + var issues []GitHubIssue + if err := json.Unmarshal(output, &issues); err != nil { + return nil, err + } + + // Tag with repo name + for i := range issues { + issues[i].RepoName = repoName + } + + return issues, nil +} + +func printIssue(issue GitHubIssue) { + // #42 [core-bio] Fix avatar upload + num := issueNumberStyle.Render(cli.Sprintf("#%d", issue.Number)) + repo := issueRepoStyle.Render(cli.Sprintf("[%s]", issue.RepoName)) + title := issueTitleStyle.Render(cli.Truncate(issue.Title, 60)) + + line := cli.Sprintf(" %s %s %s", num, repo, title) + + // Add labels if any + if len(issue.Labels.Nodes) > 0 { + var labels []string + for _, l := range issue.Labels.Nodes { + labels = append(labels, l.Name) + } + line += " " + issueLabelStyle.Render("["+strings.Join(labels, ", ")+"]") + } + + // Add assignee if any + if len(issue.Assignees.Nodes) > 0 { + var assignees []string + for _, a := range issue.Assignees.Nodes { + assignees = append(assignees, "@"+a.Login) + } + line += " " + issueAssigneeStyle.Render(strings.Join(assignees, ", ")) + } + + // Add age + age := cli.FormatAge(issue.CreatedAt) + line += " " + issueAgeStyle.Render(age) + + cli.Text(line) +} diff --git a/cmd/dev/cmd_pull.go b/cmd/dev/cmd_pull.go new file mode 100644 index 0000000..ab8d2c7 --- /dev/null +++ b/cmd/dev/cmd_pull.go @@ -0,0 +1,130 @@ +package dev + +import ( + "context" + "os/exec" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go-scm/git" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// Pull command flags +var ( + pullRegistryPath string + pullAll bool +) + +// AddPullCommand adds the 'pull' command to the given parent command. +func AddPullCommand(parent *cli.Command) { + pullCmd := &cli.Command{ + Use: "pull", + Short: i18n.T("cmd.dev.pull.short"), + Long: i18n.T("cmd.dev.pull.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runPull(pullRegistryPath, pullAll) + }, + } + + pullCmd.Flags().StringVar(&pullRegistryPath, "registry", "", i18n.T("common.flag.registry")) + pullCmd.Flags().BoolVar(&pullAll, "all", false, i18n.T("cmd.dev.pull.flag.all")) + + parent.AddCommand(pullCmd) +} + +func runPull(registryPath string, all bool) error { + ctx := context.Background() + + // Find or use provided registry + reg, _, err := loadRegistryWithConfig(registryPath) + if err != nil { + return err + } + + // Build paths and names for git operations + var paths []string + names := make(map[string]string) + + for _, repo := range reg.List() { + if repo.IsGitRepo() { + paths = append(paths, repo.Path) + names[repo.Path] = repo.Name + } + } + + if len(paths) == 0 { + cli.Text(i18n.T("cmd.dev.no_git_repos")) + return nil + } + + // Get status for all repos + statuses := git.Status(ctx, git.StatusOptions{ + Paths: paths, + Names: names, + }) + + // Find repos to pull + var toPull []git.RepoStatus + for _, s := range statuses { + if s.Error != nil { + continue + } + if all || s.HasUnpulled() { + toPull = append(toPull, s) + } + } + + if len(toPull) == 0 { + cli.Text(i18n.T("cmd.dev.pull.all_up_to_date")) + return nil + } + + // Show what we're pulling + if all { + cli.Print("\n%s\n\n", i18n.T("cmd.dev.pull.pulling_repos", map[string]interface{}{"Count": len(toPull)})) + } else { + cli.Print("\n%s\n\n", i18n.T("cmd.dev.pull.repos_behind", map[string]interface{}{"Count": len(toPull)})) + for _, s := range toPull { + cli.Print(" %s: %s\n", + repoNameStyle.Render(s.Name), + dimStyle.Render(i18n.T("cmd.dev.pull.commits_behind", map[string]interface{}{"Count": s.Behind})), + ) + } + cli.Blank() + } + + // Pull each repo + var succeeded, failed int + for _, s := range toPull { + cli.Print(" %s %s... ", dimStyle.Render(i18n.T("cmd.dev.pull.pulling")), s.Name) + + err := gitPull(ctx, s.Path) + if err != nil { + cli.Print("%s\n", errorStyle.Render("x "+err.Error())) + failed++ + } else { + cli.Print("%s\n", successStyle.Render("v")) + succeeded++ + } + } + + // Summary + cli.Blank() + cli.Print("%s", successStyle.Render(i18n.T("cmd.dev.pull.done_pulled", map[string]interface{}{"Count": succeeded}))) + if failed > 0 { + cli.Print(", %s", errorStyle.Render(i18n.T("common.count.failed", map[string]interface{}{"Count": failed}))) + } + cli.Blank() + + return nil +} + +func gitPull(ctx context.Context, path string) error { + cmd := exec.CommandContext(ctx, "git", "pull", "--ff-only") + cmd.Dir = path + output, err := cmd.CombinedOutput() + if err != nil { + return cli.Err("%s", string(output)) + } + return nil +} diff --git a/cmd/dev/cmd_push.go b/cmd/dev/cmd_push.go new file mode 100644 index 0000000..48ca78e --- /dev/null +++ b/cmd/dev/cmd_push.go @@ -0,0 +1,275 @@ +package dev + +import ( + "context" + "os" + "path/filepath" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go-scm/git" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// Push command flags +var ( + pushRegistryPath string + pushForce bool +) + +// AddPushCommand adds the 'push' command to the given parent command. +func AddPushCommand(parent *cli.Command) { + pushCmd := &cli.Command{ + Use: "push", + Short: i18n.T("cmd.dev.push.short"), + Long: i18n.T("cmd.dev.push.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runPush(pushRegistryPath, pushForce) + }, + } + + pushCmd.Flags().StringVar(&pushRegistryPath, "registry", "", i18n.T("common.flag.registry")) + pushCmd.Flags().BoolVarP(&pushForce, "force", "f", false, i18n.T("cmd.dev.push.flag.force")) + + parent.AddCommand(pushCmd) +} + +func runPush(registryPath string, force bool) error { + ctx := context.Background() + cwd, _ := os.Getwd() + + // Check if current directory is a git repo (single-repo mode) + if registryPath == "" && isGitRepo(cwd) { + return runPushSingleRepo(ctx, cwd, force) + } + + // Multi-repo mode: find or use provided registry + reg, _, err := loadRegistryWithConfig(registryPath) + if err != nil { + return err + } + + // Build paths and names for git operations + var paths []string + names := make(map[string]string) + + for _, repo := range reg.List() { + if repo.IsGitRepo() { + paths = append(paths, repo.Path) + names[repo.Path] = repo.Name + } + } + + if len(paths) == 0 { + cli.Text(i18n.T("cmd.dev.no_git_repos")) + return nil + } + + // Get status for all repos + statuses := git.Status(ctx, git.StatusOptions{ + Paths: paths, + Names: names, + }) + + // Find repos with unpushed commits + var aheadRepos []git.RepoStatus + for _, s := range statuses { + if s.Error == nil && s.HasUnpushed() { + aheadRepos = append(aheadRepos, s) + } + } + + if len(aheadRepos) == 0 { + cli.Text(i18n.T("cmd.dev.push.all_up_to_date")) + return nil + } + + // Show repos to push + cli.Print("\n%s\n\n", i18n.T("common.count.repos_unpushed", map[string]interface{}{"Count": len(aheadRepos)})) + totalCommits := 0 + for _, s := range aheadRepos { + cli.Print(" %s: %s\n", + repoNameStyle.Render(s.Name), + aheadStyle.Render(i18n.T("common.count.commits", map[string]interface{}{"Count": s.Ahead})), + ) + totalCommits += s.Ahead + } + + // Confirm unless --force + if !force { + cli.Blank() + if !cli.Confirm(i18n.T("cmd.dev.push.confirm_push", map[string]interface{}{"Commits": totalCommits, "Repos": len(aheadRepos)})) { + cli.Text(i18n.T("cli.aborted")) + return nil + } + } + + cli.Blank() + + // Push sequentially (SSH passphrase needs interaction) + var pushPaths []string + for _, s := range aheadRepos { + pushPaths = append(pushPaths, s.Path) + } + + results := git.PushMultiple(ctx, pushPaths, names) + + var succeeded, failed int + var divergedRepos []git.PushResult + + for _, r := range results { + if r.Success { + cli.Print(" %s %s\n", successStyle.Render("v"), r.Name) + succeeded++ + } else { + // Check if this is a non-fast-forward error (diverged branch) + if git.IsNonFastForward(r.Error) { + cli.Print(" %s %s: %s\n", warningStyle.Render("!"), r.Name, i18n.T("cmd.dev.push.diverged")) + divergedRepos = append(divergedRepos, r) + } else { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), r.Name, r.Error) + } + failed++ + } + } + + // Handle diverged repos - offer to pull and retry + if len(divergedRepos) > 0 { + cli.Blank() + cli.Print("%s\n", i18n.T("cmd.dev.push.diverged_help")) + if cli.Confirm(i18n.T("cmd.dev.push.pull_and_retry")) { + cli.Blank() + for _, r := range divergedRepos { + cli.Print(" %s %s...\n", dimStyle.Render("↓"), r.Name) + if err := git.Pull(ctx, r.Path); err != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), r.Name, err) + continue + } + cli.Print(" %s %s...\n", dimStyle.Render("↑"), r.Name) + if err := git.Push(ctx, r.Path); err != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), r.Name, err) + continue + } + cli.Print(" %s %s\n", successStyle.Render("v"), r.Name) + succeeded++ + failed-- + } + } + } + + // Summary + cli.Blank() + cli.Print("%s", successStyle.Render(i18n.T("cmd.dev.push.done_pushed", map[string]interface{}{"Count": succeeded}))) + if failed > 0 { + cli.Print(", %s", errorStyle.Render(i18n.T("common.count.failed", map[string]interface{}{"Count": failed}))) + } + cli.Blank() + + return nil +} + +// runPushSingleRepo handles push for a single repo (current directory). +func runPushSingleRepo(ctx context.Context, repoPath string, force bool) error { + repoName := filepath.Base(repoPath) + + // Get status + statuses := git.Status(ctx, git.StatusOptions{ + Paths: []string{repoPath}, + Names: map[string]string{repoPath: repoName}, + }) + + if len(statuses) == 0 { + return cli.Err("failed to get repo status") + } + + s := statuses[0] + if s.Error != nil { + return s.Error + } + + if !s.HasUnpushed() { + // Check if there are uncommitted changes + if s.IsDirty() { + cli.Print("%s: ", repoNameStyle.Render(s.Name)) + if s.Modified > 0 { + cli.Print("%s ", dirtyStyle.Render(i18n.T("cmd.dev.modified", map[string]interface{}{"Count": s.Modified}))) + } + if s.Untracked > 0 { + cli.Print("%s ", dirtyStyle.Render(i18n.T("cmd.dev.untracked", map[string]interface{}{"Count": s.Untracked}))) + } + if s.Staged > 0 { + cli.Print("%s ", aheadStyle.Render(i18n.T("cmd.dev.staged", map[string]interface{}{"Count": s.Staged}))) + } + cli.Blank() + cli.Blank() + if cli.Confirm(i18n.T("cmd.dev.push.uncommitted_changes_commit")) { + cli.Blank() + // Use edit-enabled commit if only untracked files (may need .gitignore fix) + var err error + if s.Modified == 0 && s.Staged == 0 && s.Untracked > 0 { + err = claudeEditCommit(ctx, repoPath, repoName, "") + } else { + err = runCommitSingleRepo(ctx, repoPath, false) + } + if err != nil { + return err + } + // Re-check - only push if Claude created commits + newStatuses := git.Status(ctx, git.StatusOptions{ + Paths: []string{repoPath}, + Names: map[string]string{repoPath: repoName}, + }) + if len(newStatuses) > 0 && newStatuses[0].HasUnpushed() { + return runPushSingleRepo(ctx, repoPath, force) + } + } + return nil + } + cli.Text(i18n.T("cmd.dev.push.all_up_to_date")) + return nil + } + + // Show commits to push + cli.Print("%s: %s\n", repoNameStyle.Render(s.Name), + aheadStyle.Render(i18n.T("common.count.commits", map[string]interface{}{"Count": s.Ahead}))) + + // Confirm unless --force + if !force { + cli.Blank() + if !cli.Confirm(i18n.T("cmd.dev.push.confirm_push", map[string]interface{}{"Commits": s.Ahead, "Repos": 1})) { + cli.Text(i18n.T("cli.aborted")) + return nil + } + } + + cli.Blank() + + // Push + err := git.Push(ctx, repoPath) + if err != nil { + if git.IsNonFastForward(err) { + cli.Print(" %s %s: %s\n", warningStyle.Render("!"), repoName, i18n.T("cmd.dev.push.diverged")) + cli.Blank() + cli.Print("%s\n", i18n.T("cmd.dev.push.diverged_help")) + if cli.Confirm(i18n.T("cmd.dev.push.pull_and_retry")) { + cli.Blank() + cli.Print(" %s %s...\n", dimStyle.Render("↓"), repoName) + if pullErr := git.Pull(ctx, repoPath); pullErr != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), repoName, pullErr) + return pullErr + } + cli.Print(" %s %s...\n", dimStyle.Render("↑"), repoName) + if pushErr := git.Push(ctx, repoPath); pushErr != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), repoName, pushErr) + return pushErr + } + cli.Print(" %s %s\n", successStyle.Render("v"), repoName) + return nil + } + } + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), repoName, err) + return err + } + + cli.Print(" %s %s\n", successStyle.Render("v"), repoName) + return nil +} diff --git a/cmd/dev/cmd_reviews.go b/cmd/dev/cmd_reviews.go new file mode 100644 index 0000000..3750da1 --- /dev/null +++ b/cmd/dev/cmd_reviews.go @@ -0,0 +1,237 @@ +package dev + +import ( + "encoding/json" + "errors" + "os/exec" + "sort" + "strings" + "time" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// PR-specific styles (aliases to shared) +var ( + prNumberStyle = cli.NumberStyle + prTitleStyle = cli.ValueStyle + prAuthorStyle = cli.InfoStyle + prApprovedStyle = cli.SuccessStyle + prChangesStyle = cli.WarningStyle + prPendingStyle = cli.DimStyle + prDraftStyle = cli.DimStyle +) + +// GitHubPR represents a GitHub pull request. +type GitHubPR struct { + Number int `json:"number"` + Title string `json:"title"` + State string `json:"state"` + IsDraft bool `json:"isDraft"` + CreatedAt time.Time `json:"createdAt"` + Author struct { + Login string `json:"login"` + } `json:"author"` + ReviewDecision string `json:"reviewDecision"` + Reviews struct { + Nodes []struct { + State string `json:"state"` + Author struct { + Login string `json:"login"` + } `json:"author"` + } `json:"nodes"` + } `json:"reviews"` + URL string `json:"url"` + + // Added by us + RepoName string `json:"-"` +} + +// Reviews command flags +var ( + reviewsRegistryPath string + reviewsAuthor string + reviewsShowAll bool +) + +// addReviewsCommand adds the 'reviews' command to the given parent command. +func addReviewsCommand(parent *cli.Command) { + reviewsCmd := &cli.Command{ + Use: "reviews", + Short: i18n.T("cmd.dev.reviews.short"), + Long: i18n.T("cmd.dev.reviews.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runReviews(reviewsRegistryPath, reviewsAuthor, reviewsShowAll) + }, + } + + reviewsCmd.Flags().StringVar(&reviewsRegistryPath, "registry", "", i18n.T("common.flag.registry")) + reviewsCmd.Flags().StringVar(&reviewsAuthor, "author", "", i18n.T("cmd.dev.reviews.flag.author")) + reviewsCmd.Flags().BoolVar(&reviewsShowAll, "all", false, i18n.T("cmd.dev.reviews.flag.all")) + + parent.AddCommand(reviewsCmd) +} + +func runReviews(registryPath string, author string, showAll bool) error { + // Check gh is available + if _, err := exec.LookPath("gh"); err != nil { + return errors.New(i18n.T("error.gh_not_found")) + } + + // Find or use provided registry + reg, _, err := loadRegistryWithConfig(registryPath) + if err != nil { + return err + } + + // Fetch PRs sequentially (avoid GitHub rate limits) + var allPRs []GitHubPR + var fetchErrors []error + + repoList := reg.List() + for i, repo := range repoList { + repoFullName := cli.Sprintf("%s/%s", reg.Org, repo.Name) + cli.Print("\033[2K\r%s %d/%d %s", dimStyle.Render(i18n.T("i18n.progress.fetch")), i+1, len(repoList), repo.Name) + + prs, err := fetchPRs(repoFullName, repo.Name, author) + if err != nil { + fetchErrors = append(fetchErrors, cli.Wrap(err, repo.Name)) + continue + } + + for _, pr := range prs { + // Filter drafts unless --all + if !showAll && pr.IsDraft { + continue + } + allPRs = append(allPRs, pr) + } + } + cli.Print("\033[2K\r") // Clear progress line + + // Sort: pending review first, then by date + sort.Slice(allPRs, func(i, j int) bool { + // Pending reviews come first + iPending := allPRs[i].ReviewDecision == "" || allPRs[i].ReviewDecision == "REVIEW_REQUIRED" + jPending := allPRs[j].ReviewDecision == "" || allPRs[j].ReviewDecision == "REVIEW_REQUIRED" + if iPending != jPending { + return iPending + } + return allPRs[i].CreatedAt.After(allPRs[j].CreatedAt) + }) + + // Print PRs + if len(allPRs) == 0 { + cli.Text(i18n.T("cmd.dev.reviews.no_prs")) + return nil + } + + // Count by status + var pending, approved, changesRequested int + for _, pr := range allPRs { + switch pr.ReviewDecision { + case "APPROVED": + approved++ + case "CHANGES_REQUESTED": + changesRequested++ + default: + pending++ + } + } + + cli.Blank() + cli.Print("%s", i18n.T("cmd.dev.reviews.open_prs", map[string]interface{}{"Count": len(allPRs)})) + if pending > 0 { + cli.Print(" * %s", prPendingStyle.Render(i18n.T("common.count.pending", map[string]interface{}{"Count": pending}))) + } + if approved > 0 { + cli.Print(" * %s", prApprovedStyle.Render(i18n.T("cmd.dev.reviews.approved", map[string]interface{}{"Count": approved}))) + } + if changesRequested > 0 { + cli.Print(" * %s", prChangesStyle.Render(i18n.T("cmd.dev.reviews.changes_requested", map[string]interface{}{"Count": changesRequested}))) + } + cli.Blank() + cli.Blank() + + for _, pr := range allPRs { + printPR(pr) + } + + // Print any errors + if len(fetchErrors) > 0 { + cli.Blank() + for _, err := range fetchErrors { + cli.Print("%s %s\n", errorStyle.Render(i18n.Label("error")), err) + } + } + + return nil +} + +func fetchPRs(repoFullName, repoName string, author string) ([]GitHubPR, error) { + args := []string{ + "pr", "list", + "--repo", repoFullName, + "--state", "open", + "--json", "number,title,state,isDraft,createdAt,author,reviewDecision,reviews,url", + } + + if author != "" { + args = append(args, "--author", author) + } + + cmd := exec.Command("gh", args...) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := string(exitErr.Stderr) + if strings.Contains(stderr, "no pull requests") || strings.Contains(stderr, "Could not resolve") { + return nil, nil + } + return nil, cli.Err("%s", stderr) + } + return nil, err + } + + var prs []GitHubPR + if err := json.Unmarshal(output, &prs); err != nil { + return nil, err + } + + // Tag with repo name + for i := range prs { + prs[i].RepoName = repoName + } + + return prs, nil +} + +func printPR(pr GitHubPR) { + // #12 [core-php] Webhook validation + num := prNumberStyle.Render(cli.Sprintf("#%d", pr.Number)) + repo := issueRepoStyle.Render(cli.Sprintf("[%s]", pr.RepoName)) + title := prTitleStyle.Render(cli.Truncate(pr.Title, 50)) + author := prAuthorStyle.Render("@" + pr.Author.Login) + + // Review status + var status string + switch pr.ReviewDecision { + case "APPROVED": + status = prApprovedStyle.Render(i18n.T("cmd.dev.reviews.status_approved")) + case "CHANGES_REQUESTED": + status = prChangesStyle.Render(i18n.T("cmd.dev.reviews.status_changes")) + default: + status = prPendingStyle.Render(i18n.T("cmd.dev.reviews.status_pending")) + } + + // Draft indicator + draft := "" + if pr.IsDraft { + draft = prDraftStyle.Render(" " + i18n.T("cmd.dev.reviews.draft")) + } + + age := cli.FormatAge(pr.CreatedAt) + + cli.Print(" %s %s %s%s %s %s %s\n", num, repo, title, draft, author, status, issueAgeStyle.Render(age)) +} diff --git a/cmd/dev/cmd_sync.go b/cmd/dev/cmd_sync.go new file mode 100644 index 0000000..fc425cd --- /dev/null +++ b/cmd/dev/cmd_sync.go @@ -0,0 +1,174 @@ +package dev + +import ( + "bytes" + "go/ast" + "go/parser" + "go/token" + "path/filepath" + "text/template" + + "forge.lthn.ai/core/go/pkg/cli" // Added + "forge.lthn.ai/core/go/pkg/i18n" // Added + coreio "forge.lthn.ai/core/go/pkg/io" + // Added + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +// addSyncCommand adds the 'sync' command to the given parent command. +func addSyncCommand(parent *cli.Command) { + syncCmd := &cli.Command{ + Use: "sync", + Short: i18n.T("cmd.dev.sync.short"), + Long: i18n.T("cmd.dev.sync.long"), + RunE: func(cmd *cli.Command, args []string) error { + if err := runSync(); err != nil { + return cli.Wrap(err, i18n.Label("error")) + } + cli.Text(i18n.T("i18n.done.sync", "public APIs")) + return nil + }, + } + + parent.AddCommand(syncCmd) +} + +type symbolInfo struct { + Name string + Kind string // "var", "func", "type", "const" +} + +func runSync() error { + pkgDir := "pkg" + internalDirs, err := coreio.Local.List(pkgDir) + if err != nil { + return cli.Wrap(err, "failed to read pkg directory") + } + + for _, dir := range internalDirs { + if !dir.IsDir() || dir.Name() == "core" { + continue + } + + serviceName := dir.Name() + internalFile := filepath.Join(pkgDir, serviceName, serviceName+".go") + publicDir := serviceName + publicFile := filepath.Join(publicDir, serviceName+".go") + + if !coreio.Local.IsFile(internalFile) { + continue + } + + symbols, err := getExportedSymbols(internalFile) + if err != nil { + return cli.Wrap(err, cli.Sprintf("error getting symbols for service '%s'", serviceName)) + } + + if err := generatePublicAPIFile(publicDir, publicFile, serviceName, symbols); err != nil { + return cli.Wrap(err, cli.Sprintf("error generating public API file for service '%s'", serviceName)) + } + } + + return nil +} + +func getExportedSymbols(path string) ([]symbolInfo, error) { + // ParseFile expects a filename/path and reads it using os.Open by default if content is nil. + // Since we want to use our Medium abstraction, we should read the file content first. + content, err := coreio.Local.Read(path) + if err != nil { + return nil, err + } + + fset := token.NewFileSet() + // ParseFile can take content as string (src argument). + node, err := parser.ParseFile(fset, path, content, parser.ParseComments) + if err != nil { + return nil, err + } + + var symbols []symbolInfo + for name, obj := range node.Scope.Objects { + if ast.IsExported(name) { + kind := "unknown" + switch obj.Kind { + case ast.Con: + kind = "const" + case ast.Var: + kind = "var" + case ast.Fun: + kind = "func" + case ast.Typ: + kind = "type" + } + if kind != "unknown" { + symbols = append(symbols, symbolInfo{Name: name, Kind: kind}) + } + } + } + return symbols, nil +} + +const publicAPITemplate = `// package {{.ServiceName}} provides the public API for the {{.ServiceName}} service. +package {{.ServiceName}} + +import ( + // Import the internal implementation with an alias. + impl "forge.lthn.ai/core/cli/{{.ServiceName}}" + + // Import the core contracts to re-export the interface. + "forge.lthn.ai/core/cli/core" +) + +{{range .Symbols}} +{{- if eq .Kind "type"}} +// {{.Name}} is the public type for the {{.Name}} service. It is a type alias +// to the underlying implementation, making it transparent to the user. +type {{.Name}} = impl.{{.Name}} +{{else if eq .Kind "const"}} +// {{.Name}} is a public constant that points to the real constant in the implementation package. +const {{.Name}} = impl.{{.Name}} +{{else if eq .Kind "var"}} +// {{.Name}} is a public variable that points to the real variable in the implementation package. +var {{.Name}} = impl.{{.Name}} +{{else if eq .Kind "func"}} +// {{.Name}} is a public function that points to the real function in the implementation package. +var {{.Name}} = impl.{{.Name}} +{{end}} +{{end}} + +// {{.InterfaceName}} is the public interface for the {{.ServiceName}} service. +type {{.InterfaceName}} = core.{{.InterfaceName}} +` + +func generatePublicAPIFile(dir, path, serviceName string, symbols []symbolInfo) error { + if err := coreio.Local.EnsureDir(dir); err != nil { + return err + } + + tmpl, err := template.New("publicAPI").Parse(publicAPITemplate) + if err != nil { + return err + } + + tcaser := cases.Title(language.English) + interfaceName := tcaser.String(serviceName) + + data := struct { + ServiceName string + Symbols []symbolInfo + InterfaceName string + }{ + ServiceName: serviceName, + Symbols: symbols, + InterfaceName: interfaceName, + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return err + } + + return coreio.Local.Write(path, buf.String()) +} diff --git a/cmd/dev/cmd_vm.go b/cmd/dev/cmd_vm.go new file mode 100644 index 0000000..fdfa16b --- /dev/null +++ b/cmd/dev/cmd_vm.go @@ -0,0 +1,510 @@ +package dev + +import ( + "context" + "errors" + "os" + "time" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go-devops/devops" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" +) + +// addVMCommands adds the dev environment VM commands to the dev parent command. +// These are added as direct subcommands: core dev install, core dev boot, etc. +func addVMCommands(parent *cli.Command) { + addVMInstallCommand(parent) + addVMBootCommand(parent) + addVMStopCommand(parent) + addVMStatusCommand(parent) + addVMShellCommand(parent) + addVMServeCommand(parent) + addVMTestCommand(parent) + addVMClaudeCommand(parent) + addVMUpdateCommand(parent) +} + +// addVMInstallCommand adds the 'dev install' command. +func addVMInstallCommand(parent *cli.Command) { + installCmd := &cli.Command{ + Use: "install", + Short: i18n.T("cmd.dev.vm.install.short"), + Long: i18n.T("cmd.dev.vm.install.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runVMInstall() + }, + } + + parent.AddCommand(installCmd) +} + +func runVMInstall() error { + d, err := devops.New(io.Local) + if err != nil { + return err + } + + if d.IsInstalled() { + cli.Text(successStyle.Render(i18n.T("cmd.dev.vm.already_installed"))) + cli.Blank() + cli.Text(i18n.T("cmd.dev.vm.check_updates", map[string]interface{}{"Command": dimStyle.Render("core dev update")})) + return nil + } + + cli.Print("%s %s\n", dimStyle.Render(i18n.Label("image")), devops.ImageName()) + cli.Blank() + cli.Text(i18n.T("cmd.dev.vm.downloading")) + cli.Blank() + + ctx := context.Background() + start := time.Now() + var lastProgress int64 + + err = d.Install(ctx, func(downloaded, total int64) { + if total > 0 { + pct := int(float64(downloaded) / float64(total) * 100) + if pct != int(float64(lastProgress)/float64(total)*100) { + cli.Print("\r%s %d%%", dimStyle.Render(i18n.T("cmd.dev.vm.progress_label")), pct) + lastProgress = downloaded + } + } + }) + + cli.Blank() // Clear progress line + + if err != nil { + return cli.Wrap(err, "install failed") + } + + elapsed := time.Since(start).Round(time.Second) + cli.Blank() + cli.Text(i18n.T("cmd.dev.vm.installed_in", map[string]interface{}{"Duration": elapsed})) + cli.Blank() + cli.Text(i18n.T("cmd.dev.vm.start_with", map[string]interface{}{"Command": dimStyle.Render("core dev boot")})) + + return nil +} + +// VM boot command flags +var ( + vmBootMemory int + vmBootCPUs int + vmBootFresh bool +) + +// addVMBootCommand adds the 'devops boot' command. +func addVMBootCommand(parent *cli.Command) { + bootCmd := &cli.Command{ + Use: "boot", + Short: i18n.T("cmd.dev.vm.boot.short"), + Long: i18n.T("cmd.dev.vm.boot.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runVMBoot(vmBootMemory, vmBootCPUs, vmBootFresh) + }, + } + + bootCmd.Flags().IntVar(&vmBootMemory, "memory", 0, i18n.T("cmd.dev.vm.boot.flag.memory")) + bootCmd.Flags().IntVar(&vmBootCPUs, "cpus", 0, i18n.T("cmd.dev.vm.boot.flag.cpus")) + bootCmd.Flags().BoolVar(&vmBootFresh, "fresh", false, i18n.T("cmd.dev.vm.boot.flag.fresh")) + + parent.AddCommand(bootCmd) +} + +func runVMBoot(memory, cpus int, fresh bool) error { + d, err := devops.New(io.Local) + if err != nil { + return err + } + + if !d.IsInstalled() { + return errors.New(i18n.T("cmd.dev.vm.not_installed")) + } + + opts := devops.DefaultBootOptions() + if memory > 0 { + opts.Memory = memory + } + if cpus > 0 { + opts.CPUs = cpus + } + opts.Fresh = fresh + + cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.dev.vm.config_label")), i18n.T("cmd.dev.vm.config_value", map[string]interface{}{"Memory": opts.Memory, "CPUs": opts.CPUs})) + cli.Blank() + cli.Text(i18n.T("cmd.dev.vm.booting")) + + ctx := context.Background() + if err := d.Boot(ctx, opts); err != nil { + return err + } + + cli.Blank() + cli.Text(successStyle.Render(i18n.T("cmd.dev.vm.running"))) + cli.Blank() + cli.Text(i18n.T("cmd.dev.vm.connect_with", map[string]interface{}{"Command": dimStyle.Render("core dev shell")})) + cli.Print("%s %s\n", i18n.T("cmd.dev.vm.ssh_port"), dimStyle.Render("2222")) + + return nil +} + +// addVMStopCommand adds the 'devops stop' command. +func addVMStopCommand(parent *cli.Command) { + stopCmd := &cli.Command{ + Use: "stop", + Short: i18n.T("cmd.dev.vm.stop.short"), + Long: i18n.T("cmd.dev.vm.stop.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runVMStop() + }, + } + + parent.AddCommand(stopCmd) +} + +func runVMStop() error { + d, err := devops.New(io.Local) + if err != nil { + return err + } + + ctx := context.Background() + running, err := d.IsRunning(ctx) + if err != nil { + return err + } + + if !running { + cli.Text(dimStyle.Render(i18n.T("cmd.dev.vm.not_running"))) + return nil + } + + cli.Text(i18n.T("cmd.dev.vm.stopping")) + + if err := d.Stop(ctx); err != nil { + return err + } + + cli.Text(successStyle.Render(i18n.T("common.status.stopped"))) + return nil +} + +// addVMStatusCommand adds the 'devops status' command. +func addVMStatusCommand(parent *cli.Command) { + statusCmd := &cli.Command{ + Use: "vm-status", + Short: i18n.T("cmd.dev.vm.status.short"), + Long: i18n.T("cmd.dev.vm.status.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runVMStatus() + }, + } + + parent.AddCommand(statusCmd) +} + +func runVMStatus() error { + d, err := devops.New(io.Local) + if err != nil { + return err + } + + ctx := context.Background() + status, err := d.Status(ctx) + if err != nil { + return err + } + + cli.Text(headerStyle.Render(i18n.T("cmd.dev.vm.status_title"))) + cli.Blank() + + // Installation status + if status.Installed { + cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.dev.vm.installed_label")), successStyle.Render(i18n.T("cmd.dev.vm.installed_yes"))) + if status.ImageVersion != "" { + cli.Print("%s %s\n", dimStyle.Render(i18n.Label("version")), status.ImageVersion) + } + } else { + cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.dev.vm.installed_label")), errorStyle.Render(i18n.T("cmd.dev.vm.installed_no"))) + cli.Blank() + cli.Text(i18n.T("cmd.dev.vm.install_with", map[string]interface{}{"Command": dimStyle.Render("core dev install")})) + return nil + } + + cli.Blank() + + // Running status + if status.Running { + cli.Print("%s %s\n", dimStyle.Render(i18n.Label("status")), successStyle.Render(i18n.T("common.status.running"))) + cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.dev.vm.container_label")), status.ContainerID[:8]) + cli.Print("%s %dMB\n", dimStyle.Render(i18n.T("cmd.dev.vm.memory_label")), status.Memory) + cli.Print("%s %d\n", dimStyle.Render(i18n.T("cmd.dev.vm.cpus_label")), status.CPUs) + cli.Print("%s %d\n", dimStyle.Render(i18n.T("cmd.dev.vm.ssh_port")), status.SSHPort) + cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.dev.vm.uptime_label")), formatVMUptime(status.Uptime)) + } else { + cli.Print("%s %s\n", dimStyle.Render(i18n.Label("status")), dimStyle.Render(i18n.T("common.status.stopped"))) + cli.Blank() + cli.Text(i18n.T("cmd.dev.vm.start_with", map[string]interface{}{"Command": dimStyle.Render("core dev boot")})) + } + + return nil +} + +func formatVMUptime(d time.Duration) string { + if d < time.Minute { + return cli.Sprintf("%ds", int(d.Seconds())) + } + if d < time.Hour { + return cli.Sprintf("%dm", int(d.Minutes())) + } + if d < 24*time.Hour { + return cli.Sprintf("%dh %dm", int(d.Hours()), int(d.Minutes())%60) + } + return cli.Sprintf("%dd %dh", int(d.Hours()/24), int(d.Hours())%24) +} + +// VM shell command flags +var vmShellConsole bool + +// addVMShellCommand adds the 'devops shell' command. +func addVMShellCommand(parent *cli.Command) { + shellCmd := &cli.Command{ + Use: "shell [-- command...]", + Short: i18n.T("cmd.dev.vm.shell.short"), + Long: i18n.T("cmd.dev.vm.shell.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runVMShell(vmShellConsole, args) + }, + } + + shellCmd.Flags().BoolVar(&vmShellConsole, "console", false, i18n.T("cmd.dev.vm.shell.flag.console")) + + parent.AddCommand(shellCmd) +} + +func runVMShell(console bool, command []string) error { + d, err := devops.New(io.Local) + if err != nil { + return err + } + + opts := devops.ShellOptions{ + Console: console, + Command: command, + } + + ctx := context.Background() + return d.Shell(ctx, opts) +} + +// VM serve command flags +var ( + vmServePort int + vmServePath string +) + +// addVMServeCommand adds the 'devops serve' command. +func addVMServeCommand(parent *cli.Command) { + serveCmd := &cli.Command{ + Use: "serve", + Short: i18n.T("cmd.dev.vm.serve.short"), + Long: i18n.T("cmd.dev.vm.serve.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runVMServe(vmServePort, vmServePath) + }, + } + + serveCmd.Flags().IntVarP(&vmServePort, "port", "p", 0, i18n.T("cmd.dev.vm.serve.flag.port")) + serveCmd.Flags().StringVar(&vmServePath, "path", "", i18n.T("cmd.dev.vm.serve.flag.path")) + + parent.AddCommand(serveCmd) +} + +func runVMServe(port int, path string) error { + d, err := devops.New(io.Local) + if err != nil { + return err + } + + projectDir, err := os.Getwd() + if err != nil { + return err + } + + opts := devops.ServeOptions{ + Port: port, + Path: path, + } + + ctx := context.Background() + return d.Serve(ctx, projectDir, opts) +} + +// VM test command flags +var vmTestName string + +// addVMTestCommand adds the 'devops test' command. +func addVMTestCommand(parent *cli.Command) { + testCmd := &cli.Command{ + Use: "test [-- command...]", + Short: i18n.T("cmd.dev.vm.test.short"), + Long: i18n.T("cmd.dev.vm.test.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runVMTest(vmTestName, args) + }, + } + + testCmd.Flags().StringVarP(&vmTestName, "name", "n", "", i18n.T("cmd.dev.vm.test.flag.name")) + + parent.AddCommand(testCmd) +} + +func runVMTest(name string, command []string) error { + d, err := devops.New(io.Local) + if err != nil { + return err + } + + projectDir, err := os.Getwd() + if err != nil { + return err + } + + opts := devops.TestOptions{ + Name: name, + Command: command, + } + + ctx := context.Background() + return d.Test(ctx, projectDir, opts) +} + +// VM claude command flags +var ( + vmClaudeNoAuth bool + vmClaudeModel string + vmClaudeAuthFlags []string +) + +// addVMClaudeCommand adds the 'devops claude' command. +func addVMClaudeCommand(parent *cli.Command) { + claudeCmd := &cli.Command{ + Use: "claude", + Short: i18n.T("cmd.dev.vm.claude.short"), + Long: i18n.T("cmd.dev.vm.claude.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runVMClaude(vmClaudeNoAuth, vmClaudeModel, vmClaudeAuthFlags) + }, + } + + claudeCmd.Flags().BoolVar(&vmClaudeNoAuth, "no-auth", false, i18n.T("cmd.dev.vm.claude.flag.no_auth")) + claudeCmd.Flags().StringVarP(&vmClaudeModel, "model", "m", "", i18n.T("cmd.dev.vm.claude.flag.model")) + claudeCmd.Flags().StringSliceVar(&vmClaudeAuthFlags, "auth", nil, i18n.T("cmd.dev.vm.claude.flag.auth")) + + parent.AddCommand(claudeCmd) +} + +func runVMClaude(noAuth bool, model string, authFlags []string) error { + d, err := devops.New(io.Local) + if err != nil { + return err + } + + projectDir, err := os.Getwd() + if err != nil { + return err + } + + opts := devops.ClaudeOptions{ + NoAuth: noAuth, + Model: model, + Auth: authFlags, + } + + ctx := context.Background() + return d.Claude(ctx, projectDir, opts) +} + +// VM update command flags +var vmUpdateApply bool + +// addVMUpdateCommand adds the 'devops update' command. +func addVMUpdateCommand(parent *cli.Command) { + updateCmd := &cli.Command{ + Use: "update", + Short: i18n.T("cmd.dev.vm.update.short"), + Long: i18n.T("cmd.dev.vm.update.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runVMUpdate(vmUpdateApply) + }, + } + + updateCmd.Flags().BoolVar(&vmUpdateApply, "apply", false, i18n.T("cmd.dev.vm.update.flag.apply")) + + parent.AddCommand(updateCmd) +} + +func runVMUpdate(apply bool) error { + d, err := devops.New(io.Local) + if err != nil { + return err + } + + ctx := context.Background() + + cli.Text(i18n.T("common.progress.checking_updates")) + cli.Blank() + + current, latest, hasUpdate, err := d.CheckUpdate(ctx) + if err != nil { + return cli.Wrap(err, "failed to check for updates") + } + + cli.Print("%s %s\n", dimStyle.Render(i18n.Label("current")), valueStyle.Render(current)) + cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.dev.vm.latest_label")), valueStyle.Render(latest)) + cli.Blank() + + if !hasUpdate { + cli.Text(successStyle.Render(i18n.T("cmd.dev.vm.up_to_date"))) + return nil + } + + cli.Text(warningStyle.Render(i18n.T("cmd.dev.vm.update_available"))) + cli.Blank() + + if !apply { + cli.Text(i18n.T("cmd.dev.vm.run_to_update", map[string]interface{}{"Command": dimStyle.Render("core dev update --apply")})) + return nil + } + + // Stop if running + running, _ := d.IsRunning(ctx) + if running { + cli.Text(i18n.T("cmd.dev.vm.stopping_current")) + _ = d.Stop(ctx) + } + + cli.Text(i18n.T("cmd.dev.vm.downloading_update")) + cli.Blank() + + start := time.Now() + err = d.Install(ctx, func(downloaded, total int64) { + if total > 0 { + pct := int(float64(downloaded) / float64(total) * 100) + cli.Print("\r%s %d%%", dimStyle.Render(i18n.T("cmd.dev.vm.progress_label")), pct) + } + }) + + cli.Blank() + + if err != nil { + return cli.Wrap(err, "update failed") + } + + elapsed := time.Since(start).Round(time.Second) + cli.Blank() + cli.Text(i18n.T("cmd.dev.vm.updated_in", map[string]interface{}{"Duration": elapsed})) + + return nil +} diff --git a/cmd/dev/cmd_work.go b/cmd/dev/cmd_work.go new file mode 100644 index 0000000..5394608 --- /dev/null +++ b/cmd/dev/cmd_work.go @@ -0,0 +1,344 @@ +package dev + +import ( + "context" + "os" + "os/exec" + "sort" + "strings" + + "forge.lthn.ai/core/go-agentic" + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go-scm/git" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// Work command flags +var ( + workStatusOnly bool + workAutoCommit bool + workRegistryPath string +) + +// AddWorkCommand adds the 'work' command to the given parent command. +func AddWorkCommand(parent *cli.Command) { + workCmd := &cli.Command{ + Use: "work", + Short: i18n.T("cmd.dev.work.short"), + Long: i18n.T("cmd.dev.work.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runWork(workRegistryPath, workStatusOnly, workAutoCommit) + }, + } + + workCmd.Flags().BoolVar(&workStatusOnly, "status", false, i18n.T("cmd.dev.work.flag.status")) + workCmd.Flags().BoolVar(&workAutoCommit, "commit", false, i18n.T("cmd.dev.work.flag.commit")) + workCmd.Flags().StringVar(&workRegistryPath, "registry", "", i18n.T("common.flag.registry")) + + parent.AddCommand(workCmd) +} + +func runWork(registryPath string, statusOnly, autoCommit bool) error { + ctx := context.Background() + + // Build worker bundle with required services + bundle, err := NewWorkBundle(WorkBundleOptions{ + RegistryPath: registryPath, + }) + if err != nil { + return err + } + + // Start services (registers handlers) + if err := bundle.Start(ctx); err != nil { + return err + } + defer func() { _ = bundle.Stop(ctx) }() + + // Load registry and get paths + paths, names, err := func() ([]string, map[string]string, error) { + reg, _, err := loadRegistryWithConfig(registryPath) + if err != nil { + return nil, nil, err + } + var paths []string + names := make(map[string]string) + for _, repo := range reg.List() { + if repo.IsGitRepo() { + paths = append(paths, repo.Path) + names[repo.Path] = repo.Name + } + } + return paths, names, nil + }() + if err != nil { + return err + } + + if len(paths) == 0 { + cli.Text(i18n.T("cmd.dev.no_git_repos")) + return nil + } + + // QUERY git status + result, handled, err := bundle.Core.QUERY(git.QueryStatus{ + Paths: paths, + Names: names, + }) + if !handled { + return cli.Err("git service not available") + } + if err != nil { + return err + } + statuses := result.([]git.RepoStatus) + + // Sort by repo name for consistent output + sort.Slice(statuses, func(i, j int) bool { + return statuses[i].Name < statuses[j].Name + }) + + // Display status table + printStatusTable(statuses) + + // Collect dirty and ahead repos + var dirtyRepos []git.RepoStatus + var aheadRepos []git.RepoStatus + + for _, s := range statuses { + if s.Error != nil { + continue + } + if s.IsDirty() { + dirtyRepos = append(dirtyRepos, s) + } + if s.HasUnpushed() { + aheadRepos = append(aheadRepos, s) + } + } + + // Auto-commit dirty repos if requested + if autoCommit && len(dirtyRepos) > 0 { + cli.Blank() + cli.Print("%s\n", cli.TitleStyle.Render(i18n.T("cmd.dev.commit.committing"))) + cli.Blank() + + for _, s := range dirtyRepos { + // PERFORM commit via agentic service + _, handled, err := bundle.Core.PERFORM(agentic.TaskCommit{ + Path: s.Path, + Name: s.Name, + }) + if !handled { + cli.Print(" %s %s: %s\n", warningStyle.Render("!"), s.Name, "agentic service not available") + continue + } + if err != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), s.Name, err) + } else { + cli.Print(" %s %s\n", successStyle.Render("v"), s.Name) + } + } + + // Re-QUERY status after commits + result, _, _ = bundle.Core.QUERY(git.QueryStatus{ + Paths: paths, + Names: names, + }) + statuses = result.([]git.RepoStatus) + + // Rebuild ahead repos list + aheadRepos = nil + for _, s := range statuses { + if s.Error == nil && s.HasUnpushed() { + aheadRepos = append(aheadRepos, s) + } + } + } + + // If status only, we're done + if statusOnly { + if len(dirtyRepos) > 0 && !autoCommit { + cli.Blank() + cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.dev.work.use_commit_flag"))) + } + return nil + } + + // Push repos with unpushed commits + if len(aheadRepos) == 0 { + cli.Blank() + cli.Text(i18n.T("cmd.dev.work.all_up_to_date")) + return nil + } + + cli.Blank() + cli.Print("%s\n", i18n.T("common.count.repos_unpushed", map[string]interface{}{"Count": len(aheadRepos)})) + for _, s := range aheadRepos { + cli.Print(" %s: %s\n", s.Name, i18n.T("common.count.commits", map[string]interface{}{"Count": s.Ahead})) + } + + cli.Blank() + if !cli.Confirm(i18n.T("cmd.dev.push.confirm")) { + cli.Text(i18n.T("cli.aborted")) + return nil + } + + cli.Blank() + + // PERFORM push for each repo + var divergedRepos []git.RepoStatus + + for _, s := range aheadRepos { + _, handled, err := bundle.Core.PERFORM(git.TaskPush{ + Path: s.Path, + Name: s.Name, + }) + if !handled { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), s.Name, "git service not available") + continue + } + if err != nil { + if git.IsNonFastForward(err) { + cli.Print(" %s %s: %s\n", warningStyle.Render("!"), s.Name, i18n.T("cmd.dev.push.diverged")) + divergedRepos = append(divergedRepos, s) + } else { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), s.Name, err) + } + } else { + cli.Print(" %s %s\n", successStyle.Render("v"), s.Name) + } + } + + // Handle diverged repos - offer to pull and retry + if len(divergedRepos) > 0 { + cli.Blank() + cli.Print("%s\n", i18n.T("cmd.dev.push.diverged_help")) + if cli.Confirm(i18n.T("cmd.dev.push.pull_and_retry")) { + cli.Blank() + for _, s := range divergedRepos { + cli.Print(" %s %s...\n", dimStyle.Render("↓"), s.Name) + + // PERFORM pull + _, _, err := bundle.Core.PERFORM(git.TaskPull{Path: s.Path, Name: s.Name}) + if err != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), s.Name, err) + continue + } + + cli.Print(" %s %s...\n", dimStyle.Render("↑"), s.Name) + + // PERFORM push + _, _, err = bundle.Core.PERFORM(git.TaskPush{Path: s.Path, Name: s.Name}) + if err != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("x"), s.Name, err) + continue + } + + cli.Print(" %s %s\n", successStyle.Render("v"), s.Name) + } + } + } + + return nil +} + +func printStatusTable(statuses []git.RepoStatus) { + // Calculate column widths + nameWidth := 4 // "Repo" + for _, s := range statuses { + if len(s.Name) > nameWidth { + nameWidth = len(s.Name) + } + } + + // Print header with fixed-width formatting + cli.Print("%-*s %8s %9s %6s %5s\n", + nameWidth, + cli.TitleStyle.Render(i18n.Label("repo")), + cli.TitleStyle.Render(i18n.T("cmd.dev.work.table_modified")), + cli.TitleStyle.Render(i18n.T("cmd.dev.work.table_untracked")), + cli.TitleStyle.Render(i18n.T("cmd.dev.work.table_staged")), + cli.TitleStyle.Render(i18n.T("cmd.dev.work.table_ahead")), + ) + + // Print separator + cli.Text(strings.Repeat("-", nameWidth+2+10+11+8+7)) + + // Print rows + for _, s := range statuses { + if s.Error != nil { + paddedName := cli.Sprintf("%-*s", nameWidth, s.Name) + cli.Print("%s %s\n", + repoNameStyle.Render(paddedName), + errorStyle.Render(i18n.T("cmd.dev.work.error_prefix")+" "+s.Error.Error()), + ) + continue + } + + // Style numbers based on values + modStr := cli.Sprintf("%d", s.Modified) + if s.Modified > 0 { + modStr = dirtyStyle.Render(modStr) + } else { + modStr = cleanStyle.Render(modStr) + } + + untrackedStr := cli.Sprintf("%d", s.Untracked) + if s.Untracked > 0 { + untrackedStr = dirtyStyle.Render(untrackedStr) + } else { + untrackedStr = cleanStyle.Render(untrackedStr) + } + + stagedStr := cli.Sprintf("%d", s.Staged) + if s.Staged > 0 { + stagedStr = aheadStyle.Render(stagedStr) + } else { + stagedStr = cleanStyle.Render(stagedStr) + } + + aheadStr := cli.Sprintf("%d", s.Ahead) + if s.Ahead > 0 { + aheadStr = aheadStyle.Render(aheadStr) + } else { + aheadStr = cleanStyle.Render(aheadStr) + } + + // Pad name before styling to avoid ANSI code length issues + paddedName := cli.Sprintf("%-*s", nameWidth, s.Name) + cli.Print("%s %8s %9s %6s %5s\n", + repoNameStyle.Render(paddedName), + modStr, + untrackedStr, + stagedStr, + aheadStr, + ) + } +} + +// claudeCommit shells out to claude for committing (legacy helper for other commands) +func claudeCommit(ctx context.Context, repoPath, repoName, registryPath string) error { + prompt := agentic.Prompt("commit") + + cmd := exec.CommandContext(ctx, "claude", "-p", prompt, "--allowedTools", "Bash,Read,Glob,Grep") + cmd.Dir = repoPath + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + + return cmd.Run() +} + +// claudeEditCommit shells out to claude with edit permissions (legacy helper) +func claudeEditCommit(ctx context.Context, repoPath, repoName, registryPath string) error { + prompt := agentic.Prompt("commit") + + cmd := exec.CommandContext(ctx, "claude", "-p", prompt, "--allowedTools", "Bash,Read,Write,Edit,Glob,Grep") + cmd.Dir = repoPath + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + + return cmd.Run() +} diff --git a/cmd/dev/cmd_workflow.go b/cmd/dev/cmd_workflow.go new file mode 100644 index 0000000..e9adb08 --- /dev/null +++ b/cmd/dev/cmd_workflow.go @@ -0,0 +1,307 @@ +package dev + +import ( + "path/filepath" + "sort" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" +) + +// Workflow command flags +var ( + workflowRegistryPath string + workflowDryRun bool +) + +// addWorkflowCommands adds the 'workflow' subcommand and its subcommands. +func addWorkflowCommands(parent *cli.Command) { + workflowCmd := &cli.Command{ + Use: "workflow", + Short: i18n.T("cmd.dev.workflow.short"), + Long: i18n.T("cmd.dev.workflow.long"), + } + + // Shared flags + workflowCmd.PersistentFlags().StringVar(&workflowRegistryPath, "registry", "", i18n.T("common.flag.registry")) + + // Subcommands + addWorkflowListCommand(workflowCmd) + addWorkflowSyncCommand(workflowCmd) + + parent.AddCommand(workflowCmd) +} + +// addWorkflowListCommand adds the 'workflow list' subcommand. +func addWorkflowListCommand(parent *cli.Command) { + listCmd := &cli.Command{ + Use: "list", + Short: i18n.T("cmd.dev.workflow.list.short"), + Long: i18n.T("cmd.dev.workflow.list.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runWorkflowList(workflowRegistryPath) + }, + } + + parent.AddCommand(listCmd) +} + +// addWorkflowSyncCommand adds the 'workflow sync' subcommand. +func addWorkflowSyncCommand(parent *cli.Command) { + syncCmd := &cli.Command{ + Use: "sync ", + Short: i18n.T("cmd.dev.workflow.sync.short"), + Long: i18n.T("cmd.dev.workflow.sync.long"), + Args: cli.ExactArgs(1), + RunE: func(cmd *cli.Command, args []string) error { + return runWorkflowSync(workflowRegistryPath, args[0], workflowDryRun) + }, + } + + syncCmd.Flags().BoolVar(&workflowDryRun, "dry-run", false, i18n.T("cmd.dev.workflow.sync.flag.dry_run")) + + parent.AddCommand(syncCmd) +} + +// runWorkflowList shows a table of repos vs workflows. +func runWorkflowList(registryPath string) error { + reg, registryDir, err := loadRegistryWithConfig(registryPath) + if err != nil { + return err + } + + repoList := reg.List() + if len(repoList) == 0 { + cli.Text(i18n.T("cmd.dev.no_git_repos")) + return nil + } + + // Sort repos by name for consistent output + sort.Slice(repoList, func(i, j int) bool { + return repoList[i].Name < repoList[j].Name + }) + + // Collect all unique workflow files across all repos + workflowSet := make(map[string]bool) + repoWorkflows := make(map[string]map[string]bool) + + for _, repo := range repoList { + workflows := findWorkflows(repo.Path) + repoWorkflows[repo.Name] = make(map[string]bool) + for _, wf := range workflows { + workflowSet[wf] = true + repoWorkflows[repo.Name][wf] = true + } + } + + // Sort workflow names + var workflowNames []string + for wf := range workflowSet { + workflowNames = append(workflowNames, wf) + } + sort.Strings(workflowNames) + + if len(workflowNames) == 0 { + cli.Text(i18n.T("cmd.dev.workflow.no_workflows")) + return nil + } + + // Check for template workflows in the registry directory + templateWorkflows := findWorkflows(filepath.Join(registryDir, ".github", "workflow-templates")) + if len(templateWorkflows) == 0 { + // Also check .github/workflows in the devops repo itself + templateWorkflows = findWorkflows(filepath.Join(registryDir, ".github", "workflows")) + } + templateSet := make(map[string]bool) + for _, wf := range templateWorkflows { + templateSet[wf] = true + } + + // Build table + headers := []string{i18n.T("cmd.dev.workflow.header.repo")} + headers = append(headers, workflowNames...) + table := cli.NewTable(headers...) + + for _, repo := range repoList { + row := []string{repo.Name} + for _, wf := range workflowNames { + if repoWorkflows[repo.Name][wf] { + row = append(row, successStyle.Render(cli.Glyph(":check:"))) + } else { + row = append(row, errorStyle.Render(cli.Glyph(":cross:"))) + } + } + table.AddRow(row...) + } + + cli.Blank() + table.Render() + + return nil +} + +// runWorkflowSync copies a workflow template to all repos. +func runWorkflowSync(registryPath string, workflowFile string, dryRun bool) error { + reg, registryDir, err := loadRegistryWithConfig(registryPath) + if err != nil { + return err + } + + // Find the template workflow + templatePath := findTemplateWorkflow(registryDir, workflowFile) + if templatePath == "" { + return cli.Err("%s", i18n.T("cmd.dev.workflow.template_not_found", map[string]interface{}{"File": workflowFile})) + } + + // Read template content + templateContent, err := io.Local.Read(templatePath) + if err != nil { + return cli.Wrap(err, i18n.T("cmd.dev.workflow.read_template_error")) + } + + repoList := reg.List() + if len(repoList) == 0 { + cli.Text(i18n.T("cmd.dev.no_git_repos")) + return nil + } + + // Sort repos by name for consistent output + sort.Slice(repoList, func(i, j int) bool { + return repoList[i].Name < repoList[j].Name + }) + + if dryRun { + cli.Text(i18n.T("cmd.dev.workflow.dry_run_mode")) + cli.Blank() + } + + var synced, skipped, failed int + + for _, repo := range repoList { + if !repo.IsGitRepo() { + skipped++ + continue + } + + destDir := filepath.Join(repo.Path, ".github", "workflows") + destPath := filepath.Join(destDir, workflowFile) + + // Check if workflow already exists and is identical + if existingContent, err := io.Local.Read(destPath); err == nil { + if existingContent == templateContent { + cli.Print(" %s %s %s\n", + dimStyle.Render("-"), + repoNameStyle.Render(repo.Name), + dimStyle.Render(i18n.T("cmd.dev.workflow.up_to_date"))) + skipped++ + continue + } + } + + if dryRun { + cli.Print(" %s %s %s\n", + warningStyle.Render("*"), + repoNameStyle.Render(repo.Name), + i18n.T("cmd.dev.workflow.would_sync")) + synced++ + continue + } + + // Create .github/workflows directory if needed + if err := io.Local.EnsureDir(destDir); err != nil { + cli.Print(" %s %s %s\n", + errorStyle.Render(cli.Glyph(":cross:")), + repoNameStyle.Render(repo.Name), + err.Error()) + failed++ + continue + } + + // Write workflow file + if err := io.Local.Write(destPath, templateContent); err != nil { + cli.Print(" %s %s %s\n", + errorStyle.Render(cli.Glyph(":cross:")), + repoNameStyle.Render(repo.Name), + err.Error()) + failed++ + continue + } + + cli.Print(" %s %s %s\n", + successStyle.Render(cli.Glyph(":check:")), + repoNameStyle.Render(repo.Name), + i18n.T("cmd.dev.workflow.synced")) + synced++ + } + + cli.Blank() + + // Summary + if dryRun { + cli.Print("%s %s\n", + i18n.T("cmd.dev.workflow.would_sync_count", map[string]interface{}{"Count": synced}), + dimStyle.Render(i18n.T("cmd.dev.workflow.skipped_count", map[string]interface{}{"Count": skipped}))) + cli.Text(i18n.T("cmd.dev.workflow.run_without_dry_run")) + } else { + cli.Print("%s %s\n", + successStyle.Render(i18n.T("cmd.dev.workflow.synced_count", map[string]interface{}{"Count": synced})), + dimStyle.Render(i18n.T("cmd.dev.workflow.skipped_count", map[string]interface{}{"Count": skipped}))) + if failed > 0 { + cli.Print("%s\n", errorStyle.Render(i18n.T("cmd.dev.workflow.failed_count", map[string]interface{}{"Count": failed}))) + } + } + + return nil +} + +// findWorkflows returns a list of workflow file names in a directory. +func findWorkflows(dir string) []string { + workflowsDir := filepath.Join(dir, ".github", "workflows") + // If dir already ends with workflows path, use it directly + if strings.HasSuffix(dir, "workflows") || strings.HasSuffix(dir, "workflow-templates") { + workflowsDir = dir + } + + entries, err := io.Local.List(workflowsDir) + if err != nil { + return nil + } + + var workflows []string + for _, entry := range entries { + if entry.IsDir() { + continue + } + name := entry.Name() + if strings.HasSuffix(name, ".yml") || strings.HasSuffix(name, ".yaml") { + workflows = append(workflows, name) + } + } + + return workflows +} + +// findTemplateWorkflow finds a workflow template file in common locations. +func findTemplateWorkflow(registryDir, workflowFile string) string { + // Ensure .yml extension + if !strings.HasSuffix(workflowFile, ".yml") && !strings.HasSuffix(workflowFile, ".yaml") { + workflowFile = workflowFile + ".yml" + } + + // Check common template locations + candidates := []string{ + filepath.Join(registryDir, ".github", "workflow-templates", workflowFile), + filepath.Join(registryDir, ".github", "workflows", workflowFile), + filepath.Join(registryDir, "workflow-templates", workflowFile), + } + + for _, candidate := range candidates { + if io.Local.IsFile(candidate) { + return candidate + } + } + + return "" +} diff --git a/cmd/dev/cmd_workflow_test.go b/cmd/dev/cmd_workflow_test.go new file mode 100644 index 0000000..1aa31d0 --- /dev/null +++ b/cmd/dev/cmd_workflow_test.go @@ -0,0 +1,108 @@ +package dev + +import ( + "path/filepath" + "testing" + + "forge.lthn.ai/core/go/pkg/io" +) + +func TestFindWorkflows_Good(t *testing.T) { + // Create a temp directory with workflow files + tmpDir := t.TempDir() + workflowsDir := filepath.Join(tmpDir, ".github", "workflows") + if err := io.Local.EnsureDir(workflowsDir); err != nil { + t.Fatalf("Failed to create workflows dir: %v", err) + } + + // Create some workflow files + for _, name := range []string{"qa.yml", "tests.yml", "codeql.yaml"} { + if err := io.Local.Write(filepath.Join(workflowsDir, name), "name: Test"); err != nil { + t.Fatalf("Failed to create workflow file: %v", err) + } + } + + // Create a non-workflow file (should be ignored) + if err := io.Local.Write(filepath.Join(workflowsDir, "readme.md"), "# Workflows"); err != nil { + t.Fatalf("Failed to create readme file: %v", err) + } + + workflows := findWorkflows(tmpDir) + + if len(workflows) != 3 { + t.Errorf("Expected 3 workflows, got %d", len(workflows)) + } + + // Check that all expected workflows are found + found := make(map[string]bool) + for _, wf := range workflows { + found[wf] = true + } + + for _, expected := range []string{"qa.yml", "tests.yml", "codeql.yaml"} { + if !found[expected] { + t.Errorf("Expected to find workflow %s", expected) + } + } +} + +func TestFindWorkflows_NoWorkflowsDir(t *testing.T) { + tmpDir := t.TempDir() + workflows := findWorkflows(tmpDir) + + if len(workflows) != 0 { + t.Errorf("Expected 0 workflows for non-existent dir, got %d", len(workflows)) + } +} + +func TestFindTemplateWorkflow_Good(t *testing.T) { + tmpDir := t.TempDir() + templatesDir := filepath.Join(tmpDir, ".github", "workflow-templates") + if err := io.Local.EnsureDir(templatesDir); err != nil { + t.Fatalf("Failed to create templates dir: %v", err) + } + + templateContent := "name: QA\non: [push]" + if err := io.Local.Write(filepath.Join(templatesDir, "qa.yml"), templateContent); err != nil { + t.Fatalf("Failed to create template file: %v", err) + } + + // Test finding with .yml extension + result := findTemplateWorkflow(tmpDir, "qa.yml") + if result == "" { + t.Error("Expected to find qa.yml template") + } + + // Test finding without extension (should auto-add .yml) + result = findTemplateWorkflow(tmpDir, "qa") + if result == "" { + t.Error("Expected to find qa template without extension") + } +} + +func TestFindTemplateWorkflow_FallbackToWorkflows(t *testing.T) { + tmpDir := t.TempDir() + workflowsDir := filepath.Join(tmpDir, ".github", "workflows") + if err := io.Local.EnsureDir(workflowsDir); err != nil { + t.Fatalf("Failed to create workflows dir: %v", err) + } + + templateContent := "name: Tests\non: [push]" + if err := io.Local.Write(filepath.Join(workflowsDir, "tests.yml"), templateContent); err != nil { + t.Fatalf("Failed to create workflow file: %v", err) + } + + result := findTemplateWorkflow(tmpDir, "tests.yml") + if result == "" { + t.Error("Expected to find tests.yml in workflows dir") + } +} + +func TestFindTemplateWorkflow_NotFound(t *testing.T) { + tmpDir := t.TempDir() + + result := findTemplateWorkflow(tmpDir, "nonexistent.yml") + if result != "" { + t.Errorf("Expected empty string for non-existent template, got %s", result) + } +} diff --git a/cmd/dev/registry.go b/cmd/dev/registry.go new file mode 100644 index 0000000..119d0b7 --- /dev/null +++ b/cmd/dev/registry.go @@ -0,0 +1,69 @@ +package dev + +import ( + "os" + "path/filepath" + "strings" + + "forge.lthn.ai/core/cli/cmd/workspace" + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/repos" +) + +// loadRegistryWithConfig loads the registry and applies workspace configuration. +func loadRegistryWithConfig(registryPath string) (*repos.Registry, string, error) { + var reg *repos.Registry + var err error + var registryDir string + + if registryPath != "" { + reg, err = repos.LoadRegistry(io.Local, registryPath) + if err != nil { + return nil, "", cli.Wrap(err, "failed to load registry") + } + cli.Print("%s %s\n\n", dimStyle.Render(i18n.Label("registry")), registryPath) + registryDir = filepath.Dir(registryPath) + } else { + registryPath, err = repos.FindRegistry(io.Local) + if err == nil { + reg, err = repos.LoadRegistry(io.Local, registryPath) + if err != nil { + return nil, "", cli.Wrap(err, "failed to load registry") + } + cli.Print("%s %s\n\n", dimStyle.Render(i18n.Label("registry")), registryPath) + registryDir = filepath.Dir(registryPath) + } else { + // Fallback: scan current directory + cwd, _ := os.Getwd() + reg, err = repos.ScanDirectory(io.Local, cwd) + if err != nil { + return nil, "", cli.Wrap(err, "failed to scan directory") + } + cli.Print("%s %s\n\n", dimStyle.Render(i18n.T("cmd.dev.scanning_label")), cwd) + registryDir = cwd + } + } + // Load workspace config to respect packages_dir (only if config exists) + if wsConfig, err := workspace.LoadConfig(registryDir); err == nil && wsConfig != nil { + if wsConfig.PackagesDir != "" { + pkgDir := wsConfig.PackagesDir + // Expand ~ + if strings.HasPrefix(pkgDir, "~/") { + home, _ := os.UserHomeDir() + pkgDir = filepath.Join(home, pkgDir[2:]) + } + if !filepath.IsAbs(pkgDir) { + pkgDir = filepath.Join(registryDir, pkgDir) + } + + // Update repo paths + for _, repo := range reg.Repos { + repo.Path = filepath.Join(pkgDir, repo.Name) + } + } + } + + return reg, registryDir, nil +} diff --git a/cmd/dev/service.go b/cmd/dev/service.go new file mode 100644 index 0000000..ab64cb1 --- /dev/null +++ b/cmd/dev/service.go @@ -0,0 +1,291 @@ +package dev + +import ( + "context" + "sort" + "strings" + + "forge.lthn.ai/core/go-agentic" + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/framework" + "forge.lthn.ai/core/go-scm/git" +) + +// Tasks for dev service + +// TaskWork runs the full dev workflow: status, commit, push. +type TaskWork struct { + RegistryPath string + StatusOnly bool + AutoCommit bool + AutoPush bool +} + +// TaskStatus displays git status for all repos. +type TaskStatus struct { + RegistryPath string +} + +// ServiceOptions for configuring the dev service. +type ServiceOptions struct { + RegistryPath string +} + +// Service provides dev workflow orchestration as a Core service. +type Service struct { + *framework.ServiceRuntime[ServiceOptions] +} + +// NewService creates a dev service factory. +func NewService(opts ServiceOptions) func(*framework.Core) (any, error) { + return func(c *framework.Core) (any, error) { + return &Service{ + ServiceRuntime: framework.NewServiceRuntime(c, opts), + }, nil + } +} + +// OnStartup registers task handlers. +func (s *Service) OnStartup(ctx context.Context) error { + s.Core().RegisterTask(s.handleTask) + return nil +} + +func (s *Service) handleTask(c *framework.Core, t framework.Task) (any, bool, error) { + switch m := t.(type) { + case TaskWork: + err := s.runWork(m) + return nil, true, err + + case TaskStatus: + err := s.runStatus(m) + return nil, true, err + } + return nil, false, nil +} + +func (s *Service) runWork(task TaskWork) error { + // Load registry + paths, names, err := s.loadRegistry(task.RegistryPath) + if err != nil { + return err + } + + if len(paths) == 0 { + cli.Println("No git repositories found") + return nil + } + + // QUERY git status + result, handled, err := s.Core().QUERY(git.QueryStatus{ + Paths: paths, + Names: names, + }) + if !handled { + return cli.Err("git service not available") + } + if err != nil { + return err + } + statuses := result.([]git.RepoStatus) + + // Sort by name + sort.Slice(statuses, func(i, j int) bool { + return statuses[i].Name < statuses[j].Name + }) + + // Display status table + s.printStatusTable(statuses) + + // Collect dirty and ahead repos + var dirtyRepos []git.RepoStatus + var aheadRepos []git.RepoStatus + + for _, st := range statuses { + if st.Error != nil { + continue + } + if st.IsDirty() { + dirtyRepos = append(dirtyRepos, st) + } + if st.HasUnpushed() { + aheadRepos = append(aheadRepos, st) + } + } + + // Auto-commit dirty repos if requested + if task.AutoCommit && len(dirtyRepos) > 0 { + cli.Blank() + cli.Println("Committing changes...") + cli.Blank() + + for _, repo := range dirtyRepos { + _, handled, err := s.Core().PERFORM(agentic.TaskCommit{ + Path: repo.Path, + Name: repo.Name, + }) + if !handled { + // Agentic service not available - skip silently + cli.Print(" - %s: agentic service not available\n", repo.Name) + continue + } + if err != nil { + cli.Print(" x %s: %s\n", repo.Name, err) + } else { + cli.Print(" v %s\n", repo.Name) + } + } + + // Re-query status after commits + result, _, _ = s.Core().QUERY(git.QueryStatus{ + Paths: paths, + Names: names, + }) + statuses = result.([]git.RepoStatus) + + // Rebuild ahead repos list + aheadRepos = nil + for _, st := range statuses { + if st.Error == nil && st.HasUnpushed() { + aheadRepos = append(aheadRepos, st) + } + } + } + + // If status only, we're done + if task.StatusOnly { + if len(dirtyRepos) > 0 && !task.AutoCommit { + cli.Blank() + cli.Println("Use --commit flag to auto-commit dirty repos") + } + return nil + } + + // Push repos with unpushed commits + if len(aheadRepos) == 0 { + cli.Blank() + cli.Println("All repositories are up to date") + return nil + } + + cli.Blank() + cli.Print("%d repos with unpushed commits:\n", len(aheadRepos)) + for _, st := range aheadRepos { + cli.Print(" %s: %d commits\n", st.Name, st.Ahead) + } + + if !task.AutoPush { + cli.Blank() + cli.Print("Push all? [y/N] ") + var answer string + _, _ = cli.Scanln(&answer) + if strings.ToLower(answer) != "y" { + cli.Println("Aborted") + return nil + } + } + + cli.Blank() + + // Push each repo + for _, st := range aheadRepos { + _, handled, err := s.Core().PERFORM(git.TaskPush{ + Path: st.Path, + Name: st.Name, + }) + if !handled { + cli.Print(" x %s: git service not available\n", st.Name) + continue + } + if err != nil { + if git.IsNonFastForward(err) { + cli.Print(" ! %s: branch has diverged\n", st.Name) + } else { + cli.Print(" x %s: %s\n", st.Name, err) + } + } else { + cli.Print(" v %s\n", st.Name) + } + } + + return nil +} + +func (s *Service) runStatus(task TaskStatus) error { + paths, names, err := s.loadRegistry(task.RegistryPath) + if err != nil { + return err + } + + if len(paths) == 0 { + cli.Println("No git repositories found") + return nil + } + + result, handled, err := s.Core().QUERY(git.QueryStatus{ + Paths: paths, + Names: names, + }) + if !handled { + return cli.Err("git service not available") + } + if err != nil { + return err + } + + statuses := result.([]git.RepoStatus) + sort.Slice(statuses, func(i, j int) bool { + return statuses[i].Name < statuses[j].Name + }) + + s.printStatusTable(statuses) + return nil +} + +func (s *Service) loadRegistry(registryPath string) ([]string, map[string]string, error) { + reg, _, err := loadRegistryWithConfig(registryPath) + if err != nil { + return nil, nil, err + } + + var paths []string + names := make(map[string]string) + + for _, repo := range reg.List() { + if repo.IsGitRepo() { + paths = append(paths, repo.Path) + names[repo.Path] = repo.Name + } + } + + return paths, names, nil +} + +func (s *Service) printStatusTable(statuses []git.RepoStatus) { + // Calculate column widths + nameWidth := 4 // "Repo" + for _, st := range statuses { + if len(st.Name) > nameWidth { + nameWidth = len(st.Name) + } + } + + // Print header + cli.Print("%-*s %8s %9s %6s %5s\n", + nameWidth, "Repo", "Modified", "Untracked", "Staged", "Ahead") + + // Print separator + cli.Text(strings.Repeat("-", nameWidth+2+10+11+8+7)) + + // Print rows + for _, st := range statuses { + if st.Error != nil { + cli.Print("%-*s error: %s\n", nameWidth, st.Name, st.Error) + continue + } + + cli.Print("%-*s %8d %9d %6d %5d\n", + nameWidth, st.Name, + st.Modified, st.Untracked, st.Staged, st.Ahead) + } +} diff --git a/cmd/docs/cmd_commands.go b/cmd/docs/cmd_commands.go new file mode 100644 index 0000000..86970ed --- /dev/null +++ b/cmd/docs/cmd_commands.go @@ -0,0 +1,20 @@ +// Package docs provides documentation management commands for multi-repo workspaces. +// +// Commands: +// - list: Scan repos for README.md, CLAUDE.md, CHANGELOG.md, docs/ +// - sync: Copy docs/ files from all repos to core-php/docs/packages/ +// +// Works with repos.yaml to discover repositories and sync documentation +// to a central location for unified documentation builds. +package docs + +import "forge.lthn.ai/core/go/pkg/cli" + +func init() { + cli.RegisterCommands(AddDocsCommands) +} + +// AddDocsCommands registers the 'docs' command and all subcommands. +func AddDocsCommands(root *cli.Command) { + root.AddCommand(docsCmd) +} diff --git a/cmd/docs/cmd_docs.go b/cmd/docs/cmd_docs.go new file mode 100644 index 0000000..b1b83a1 --- /dev/null +++ b/cmd/docs/cmd_docs.go @@ -0,0 +1,30 @@ +// Package docs provides documentation management commands. +package docs + +import ( + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// Style and utility aliases from shared +var ( + repoNameStyle = cli.RepoStyle + successStyle = cli.SuccessStyle + errorStyle = cli.ErrorStyle + dimStyle = cli.DimStyle + headerStyle = cli.HeaderStyle + confirm = cli.Confirm + docsFoundStyle = cli.SuccessStyle + docsFileStyle = cli.InfoStyle +) + +var docsCmd = &cli.Command{ + Use: "docs", + Short: i18n.T("cmd.docs.short"), + Long: i18n.T("cmd.docs.long"), +} + +func init() { + docsCmd.AddCommand(docsSyncCmd) + docsCmd.AddCommand(docsListCmd) +} diff --git a/cmd/docs/cmd_list.go b/cmd/docs/cmd_list.go new file mode 100644 index 0000000..4a4fd5e --- /dev/null +++ b/cmd/docs/cmd_list.go @@ -0,0 +1,83 @@ +package docs + +import ( + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// Flag variable for list command +var docsListRegistryPath string + +var docsListCmd = &cli.Command{ + Use: "list", + Short: i18n.T("cmd.docs.list.short"), + Long: i18n.T("cmd.docs.list.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runDocsList(docsListRegistryPath) + }, +} + +func init() { + docsListCmd.Flags().StringVar(&docsListRegistryPath, "registry", "", i18n.T("common.flag.registry")) +} + +func runDocsList(registryPath string) error { + reg, _, err := loadRegistry(registryPath) + if err != nil { + return err + } + + cli.Print("\n%-20s %-8s %-8s %-10s %s\n", + headerStyle.Render(i18n.Label("repo")), + headerStyle.Render(i18n.T("cmd.docs.list.header.readme")), + headerStyle.Render(i18n.T("cmd.docs.list.header.claude")), + headerStyle.Render(i18n.T("cmd.docs.list.header.changelog")), + headerStyle.Render(i18n.T("cmd.docs.list.header.docs")), + ) + cli.Text(strings.Repeat("─", 70)) + + var withDocs, withoutDocs int + for _, repo := range reg.List() { + info := scanRepoDocs(repo) + + readme := checkMark(info.Readme != "") + claude := checkMark(info.ClaudeMd != "") + changelog := checkMark(info.Changelog != "") + + docsDir := checkMark(false) + if len(info.DocsFiles) > 0 { + docsDir = docsFoundStyle.Render(i18n.T("common.count.files", map[string]interface{}{"Count": len(info.DocsFiles)})) + } + + cli.Print("%-20s %-8s %-8s %-10s %s\n", + repoNameStyle.Render(info.Name), + readme, + claude, + changelog, + docsDir, + ) + + if info.HasDocs { + withDocs++ + } else { + withoutDocs++ + } + } + + cli.Blank() + cli.Print("%s %s\n", + cli.KeyStyle.Render(i18n.Label("coverage")), + i18n.T("cmd.docs.list.coverage_summary", map[string]interface{}{"WithDocs": withDocs, "WithoutDocs": withoutDocs}), + ) + + return nil +} + +func checkMark(ok bool) string { + if ok { + return cli.Glyph(":check:") + } + return cli.Glyph(":cross:") +} diff --git a/cmd/docs/cmd_scan.go b/cmd/docs/cmd_scan.go new file mode 100644 index 0000000..73f4542 --- /dev/null +++ b/cmd/docs/cmd_scan.go @@ -0,0 +1,159 @@ +package docs + +import ( + "io/fs" + "os" + "path/filepath" + "strings" + + "forge.lthn.ai/core/cli/cmd/workspace" + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/repos" +) + +// RepoDocInfo holds documentation info for a repo +type RepoDocInfo struct { + Name string + Path string + HasDocs bool + Readme string + ClaudeMd string + Changelog string + DocsFiles []string // All files in docs/ directory (recursive) + KBFiles []string // All files in KB/ directory (recursive) +} + +func loadRegistry(registryPath string) (*repos.Registry, string, error) { + var reg *repos.Registry + var err error + var registryDir string + + if registryPath != "" { + reg, err = repos.LoadRegistry(io.Local, registryPath) + if err != nil { + return nil, "", cli.Wrap(err, i18n.T("i18n.fail.load", "registry")) + } + registryDir = filepath.Dir(registryPath) + } else { + registryPath, err = repos.FindRegistry(io.Local) + if err == nil { + reg, err = repos.LoadRegistry(io.Local, registryPath) + if err != nil { + return nil, "", cli.Wrap(err, i18n.T("i18n.fail.load", "registry")) + } + registryDir = filepath.Dir(registryPath) + } else { + cwd, _ := os.Getwd() + reg, err = repos.ScanDirectory(io.Local, cwd) + if err != nil { + return nil, "", cli.Wrap(err, i18n.T("i18n.fail.scan", "directory")) + } + registryDir = cwd + } + } + + // Load workspace config to respect packages_dir + wsConfig, err := workspace.LoadConfig(registryDir) + if err != nil { + return nil, "", cli.Wrap(err, i18n.T("i18n.fail.load", "workspace config")) + } + + basePath := registryDir + + if wsConfig != nil && wsConfig.PackagesDir != "" && wsConfig.PackagesDir != "./packages" { + pkgDir := wsConfig.PackagesDir + + // Expand ~ + if strings.HasPrefix(pkgDir, "~/") { + home, _ := os.UserHomeDir() + pkgDir = filepath.Join(home, pkgDir[2:]) + } + + if !filepath.IsAbs(pkgDir) { + pkgDir = filepath.Join(registryDir, pkgDir) + } + basePath = pkgDir + + // Update repo paths if they were relative to registry + // This ensures consistency when packages_dir overrides the default + reg.BasePath = basePath + for _, repo := range reg.Repos { + repo.Path = filepath.Join(basePath, repo.Name) + } + } + + return reg, basePath, nil +} + +func scanRepoDocs(repo *repos.Repo) RepoDocInfo { + info := RepoDocInfo{ + Name: repo.Name, + Path: repo.Path, + } + + // Check for README.md + readme := filepath.Join(repo.Path, "README.md") + if io.Local.IsFile(readme) { + info.Readme = readme + info.HasDocs = true + } + + // Check for CLAUDE.md + claudeMd := filepath.Join(repo.Path, "CLAUDE.md") + if io.Local.IsFile(claudeMd) { + info.ClaudeMd = claudeMd + info.HasDocs = true + } + + // Check for CHANGELOG.md + changelog := filepath.Join(repo.Path, "CHANGELOG.md") + if io.Local.IsFile(changelog) { + info.Changelog = changelog + info.HasDocs = true + } + + // Recursively scan docs/ directory for .md files + docsDir := filepath.Join(repo.Path, "docs") + // Check if directory exists by listing it + if _, err := io.Local.List(docsDir); err == nil { + _ = filepath.WalkDir(docsDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return nil + } + // Skip plans/ directory + if d.IsDir() && d.Name() == "plans" { + return filepath.SkipDir + } + // Skip non-markdown files + if d.IsDir() || !strings.HasSuffix(d.Name(), ".md") { + return nil + } + // Get relative path from docs/ + relPath, _ := filepath.Rel(docsDir, path) + info.DocsFiles = append(info.DocsFiles, relPath) + info.HasDocs = true + return nil + }) + } + + // Recursively scan KB/ directory for .md files + kbDir := filepath.Join(repo.Path, "KB") + if _, err := io.Local.List(kbDir); err == nil { + _ = filepath.WalkDir(kbDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return nil + } + if d.IsDir() || !strings.HasSuffix(d.Name(), ".md") { + return nil + } + relPath, _ := filepath.Rel(kbDir, path) + info.KBFiles = append(info.KBFiles, relPath) + info.HasDocs = true + return nil + }) + } + + return info +} diff --git a/cmd/docs/cmd_sync.go b/cmd/docs/cmd_sync.go new file mode 100644 index 0000000..fb2d6cb --- /dev/null +++ b/cmd/docs/cmd_sync.go @@ -0,0 +1,327 @@ +package docs + +import ( + "bytes" + "fmt" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/repos" +) + +// Flag variables for sync command +var ( + docsSyncRegistryPath string + docsSyncDryRun bool + docsSyncOutputDir string + docsSyncTarget string +) + +var docsSyncCmd = &cli.Command{ + Use: "sync", + Short: i18n.T("cmd.docs.sync.short"), + Long: i18n.T("cmd.docs.sync.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runDocsSync(docsSyncRegistryPath, docsSyncOutputDir, docsSyncDryRun, docsSyncTarget) + }, +} + +func init() { + docsSyncCmd.Flags().StringVar(&docsSyncRegistryPath, "registry", "", i18n.T("common.flag.registry")) + docsSyncCmd.Flags().BoolVar(&docsSyncDryRun, "dry-run", false, i18n.T("cmd.docs.sync.flag.dry_run")) + docsSyncCmd.Flags().StringVar(&docsSyncOutputDir, "output", "", i18n.T("cmd.docs.sync.flag.output")) + docsSyncCmd.Flags().StringVar(&docsSyncTarget, "target", "php", "Target format: php (default) or hugo") +} + +// packageOutputName maps repo name to output folder name +func packageOutputName(repoName string) string { + // core -> go (the Go framework) + if repoName == "core" { + return "go" + } + // core-admin -> admin, core-api -> api, etc. + if strings.HasPrefix(repoName, "core-") { + return strings.TrimPrefix(repoName, "core-") + } + return repoName +} + +// shouldSyncRepo returns true if this repo should be synced +func shouldSyncRepo(repoName string) bool { + // Skip core-php (it's the destination) + if repoName == "core-php" { + return false + } + // Skip template + if repoName == "core-template" { + return false + } + return true +} + +func runDocsSync(registryPath string, outputDir string, dryRun bool, target string) error { + reg, basePath, err := loadRegistry(registryPath) + if err != nil { + return err + } + + switch target { + case "hugo": + return runHugoSync(reg, basePath, outputDir, dryRun) + default: + return runPHPSync(reg, basePath, outputDir, dryRun) + } +} + +func runPHPSync(reg *repos.Registry, basePath string, outputDir string, dryRun bool) error { + // Default output to core-php/docs/packages relative to registry + if outputDir == "" { + outputDir = filepath.Join(basePath, "core-php", "docs", "packages") + } + + // Scan all repos for docs + var docsInfo []RepoDocInfo + for _, repo := range reg.List() { + if !shouldSyncRepo(repo.Name) { + continue + } + info := scanRepoDocs(repo) + if info.HasDocs && len(info.DocsFiles) > 0 { + docsInfo = append(docsInfo, info) + } + } + + if len(docsInfo) == 0 { + cli.Text(i18n.T("cmd.docs.sync.no_docs_found")) + return nil + } + + cli.Print("\n%s %s\n\n", dimStyle.Render(i18n.T("cmd.docs.sync.found_label")), i18n.T("cmd.docs.sync.repos_with_docs", map[string]interface{}{"Count": len(docsInfo)})) + + // Show what will be synced + var totalFiles int + for _, info := range docsInfo { + totalFiles += len(info.DocsFiles) + outName := packageOutputName(info.Name) + cli.Print(" %s → %s %s\n", + repoNameStyle.Render(info.Name), + docsFileStyle.Render("packages/"+outName+"/"), + dimStyle.Render(i18n.T("cmd.docs.sync.files_count", map[string]interface{}{"Count": len(info.DocsFiles)}))) + + for _, f := range info.DocsFiles { + cli.Print(" %s\n", dimStyle.Render(f)) + } + } + + cli.Print("\n%s %s\n", + dimStyle.Render(i18n.Label("total")), + i18n.T("cmd.docs.sync.total_summary", map[string]interface{}{"Files": totalFiles, "Repos": len(docsInfo), "Output": outputDir})) + + if dryRun { + cli.Print("\n%s\n", dimStyle.Render(i18n.T("cmd.docs.sync.dry_run_notice"))) + return nil + } + + // Confirm + cli.Blank() + if !confirm(i18n.T("cmd.docs.sync.confirm")) { + cli.Text(i18n.T("common.prompt.abort")) + return nil + } + + // Sync docs + cli.Blank() + var synced int + for _, info := range docsInfo { + outName := packageOutputName(info.Name) + repoOutDir := filepath.Join(outputDir, outName) + + // Clear existing directory (recursively) + _ = io.Local.DeleteAll(repoOutDir) + + if err := io.Local.EnsureDir(repoOutDir); err != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("✗"), info.Name, err) + continue + } + + // Copy all docs files + docsDir := filepath.Join(info.Path, "docs") + for _, f := range info.DocsFiles { + src := filepath.Join(docsDir, f) + dst := filepath.Join(repoOutDir, f) + // Ensure parent dir + if err := io.Local.EnsureDir(filepath.Dir(dst)); err != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("✗"), f, err) + continue + } + + if err := io.Copy(io.Local, src, io.Local, dst); err != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("✗"), f, err) + } + } + + cli.Print(" %s %s → packages/%s/\n", successStyle.Render("✓"), info.Name, outName) + synced++ + } + + cli.Print("\n%s %s\n", successStyle.Render(i18n.T("i18n.done.sync")), i18n.T("cmd.docs.sync.synced_packages", map[string]interface{}{"Count": synced})) + + return nil +} + +// hugoOutputName maps repo name to Hugo content section and folder. +func hugoOutputName(repoName string) (string, string) { + if repoName == "cli" { + return "getting-started", "" + } + if repoName == "core" { + return "cli", "" + } + if strings.HasPrefix(repoName, "go-") { + return "go", repoName + } + if strings.HasPrefix(repoName, "core-") { + return "php", strings.TrimPrefix(repoName, "core-") + } + return "go", repoName +} + +// injectFrontMatter prepends Hugo front matter to markdown content if missing. +func injectFrontMatter(content []byte, title string, weight int) []byte { + if bytes.HasPrefix(bytes.TrimSpace(content), []byte("---")) { + return content + } + fm := fmt.Sprintf("---\ntitle: %q\nweight: %d\n---\n\n", title, weight) + return append([]byte(fm), content...) +} + +// titleFromFilename derives a human-readable title from a filename. +func titleFromFilename(filename string) string { + name := strings.TrimSuffix(filepath.Base(filename), ".md") + name = strings.ReplaceAll(name, "-", " ") + name = strings.ReplaceAll(name, "_", " ") + words := strings.Fields(name) + for i, w := range words { + if len(w) > 0 { + words[i] = strings.ToUpper(w[:1]) + w[1:] + } + } + return strings.Join(words, " ") +} + +// copyWithFrontMatter copies a markdown file, injecting front matter if missing. +func copyWithFrontMatter(src, dst string, weight int) error { + if err := io.Local.EnsureDir(filepath.Dir(dst)); err != nil { + return err + } + content, err := io.Local.Read(src) + if err != nil { + return err + } + title := titleFromFilename(src) + result := injectFrontMatter([]byte(content), title, weight) + return io.Local.Write(dst, string(result)) +} + +func runHugoSync(reg *repos.Registry, basePath string, outputDir string, dryRun bool) error { + if outputDir == "" { + outputDir = filepath.Join(basePath, "docs-site", "content") + } + + var docsInfo []RepoDocInfo + for _, repo := range reg.List() { + if repo.Name == "core-template" || repo.Name == "core-claude" { + continue + } + info := scanRepoDocs(repo) + if info.HasDocs { + docsInfo = append(docsInfo, info) + } + } + + if len(docsInfo) == 0 { + cli.Text("No documentation found") + return nil + } + + cli.Print("\n Hugo sync: %d repos with docs → %s\n\n", len(docsInfo), outputDir) + + for _, info := range docsInfo { + section, folder := hugoOutputName(info.Name) + target := section + if folder != "" { + target = section + "/" + folder + } + fileCount := len(info.DocsFiles) + len(info.KBFiles) + if info.Readme != "" { + fileCount++ + } + cli.Print(" %s → %s/ (%d files)\n", repoNameStyle.Render(info.Name), target, fileCount) + } + + if dryRun { + cli.Print("\n Dry run — no files written\n") + return nil + } + + cli.Blank() + if !confirm("Sync to Hugo content directory?") { + cli.Text("Aborted") + return nil + } + + cli.Blank() + var synced int + for _, info := range docsInfo { + section, folder := hugoOutputName(info.Name) + + destDir := filepath.Join(outputDir, section) + if folder != "" { + destDir = filepath.Join(destDir, folder) + } + + weight := 10 + docsDir := filepath.Join(info.Path, "docs") + for _, f := range info.DocsFiles { + src := filepath.Join(docsDir, f) + dst := filepath.Join(destDir, f) + if err := copyWithFrontMatter(src, dst, weight); err != nil { + cli.Print(" %s %s: %s\n", errorStyle.Render("✗"), f, err) + continue + } + weight += 10 + } + + if info.Readme != "" && folder != "" { + dst := filepath.Join(destDir, "_index.md") + if err := copyWithFrontMatter(info.Readme, dst, 1); err != nil { + cli.Print(" %s README: %s\n", errorStyle.Render("✗"), err) + } + } + + if len(info.KBFiles) > 0 { + suffix := strings.TrimPrefix(info.Name, "go-") + kbDestDir := filepath.Join(outputDir, "kb", suffix) + kbDir := filepath.Join(info.Path, "KB") + kbWeight := 10 + for _, f := range info.KBFiles { + src := filepath.Join(kbDir, f) + dst := filepath.Join(kbDestDir, f) + if err := copyWithFrontMatter(src, dst, kbWeight); err != nil { + cli.Print(" %s KB/%s: %s\n", errorStyle.Render("✗"), f, err) + continue + } + kbWeight += 10 + } + } + + cli.Print(" %s %s\n", successStyle.Render("✓"), info.Name) + synced++ + } + + cli.Print("\n Synced %d repos to Hugo content\n", synced) + return nil +} diff --git a/cmd/gitcmd/cmd_git.go b/cmd/gitcmd/cmd_git.go new file mode 100644 index 0000000..0024ecd --- /dev/null +++ b/cmd/gitcmd/cmd_git.go @@ -0,0 +1,44 @@ +// Package gitcmd provides git workflow commands as a root-level command. +// +// Git Operations: +// - health: Show status across repos +// - commit: Claude-assisted commit message generation +// - push: Push repos with unpushed commits +// - pull: Pull repos that are behind remote +// - work: Combined status, commit, and push workflow +// +// Safe Operations (for AI agents): +// - file-sync: Sync files across repos with auto commit/push +// - apply: Run command across repos with auto commit/push +package gitcmd + +import ( + "forge.lthn.ai/core/cli/cmd/dev" + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +func init() { + cli.RegisterCommands(AddGitCommands) +} + +// AddGitCommands registers the 'git' command and all subcommands. +func AddGitCommands(root *cli.Command) { + gitCmd := &cli.Command{ + Use: "git", + Short: i18n.T("cmd.git.short"), + Long: i18n.T("cmd.git.long"), + } + root.AddCommand(gitCmd) + + // Import git commands from dev package + dev.AddHealthCommand(gitCmd) // Shows repo status + dev.AddCommitCommand(gitCmd) + dev.AddPushCommand(gitCmd) + dev.AddPullCommand(gitCmd) + dev.AddWorkCommand(gitCmd) + + // Safe operations for AI agents + dev.AddFileSyncCommand(gitCmd) + dev.AddApplyCommand(gitCmd) +} diff --git a/cmd/monitor/cmd_commands.go b/cmd/monitor/cmd_commands.go new file mode 100644 index 0000000..0028da4 --- /dev/null +++ b/cmd/monitor/cmd_commands.go @@ -0,0 +1,47 @@ +// Package monitor provides security monitoring commands. +// +// Commands: +// - monitor: Aggregate security findings from GitHub Security Tab, workflow artifacts, and PR comments +// +// Data sources (all free tier): +// - Code scanning: Semgrep, Trivy, Gitleaks, OSV-Scanner, Checkov, CodeQL +// - Dependabot: Dependency vulnerability alerts +// - Secret scanning: Exposed secrets/credentials +package monitor + +import ( + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +func init() { + cli.RegisterCommands(AddMonitorCommands) +} + +// Style aliases from shared package +var ( + successStyle = cli.SuccessStyle + errorStyle = cli.ErrorStyle + warningStyle = cli.WarningStyle + dimStyle = cli.DimStyle +) + +// AddMonitorCommands registers the 'monitor' command. +func AddMonitorCommands(root *cli.Command) { + monitorCmd := &cli.Command{ + Use: "monitor", + Short: i18n.T("cmd.monitor.short"), + Long: i18n.T("cmd.monitor.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runMonitor() + }, + } + + // Flags + monitorCmd.Flags().StringVarP(&monitorRepo, "repo", "r", "", i18n.T("cmd.monitor.flag.repo")) + monitorCmd.Flags().StringSliceVarP(&monitorSeverity, "severity", "s", []string{}, i18n.T("cmd.monitor.flag.severity")) + monitorCmd.Flags().BoolVar(&monitorJSON, "json", false, i18n.T("cmd.monitor.flag.json")) + monitorCmd.Flags().BoolVar(&monitorAll, "all", false, i18n.T("cmd.monitor.flag.all")) + + root.AddCommand(monitorCmd) +} diff --git a/cmd/monitor/cmd_monitor.go b/cmd/monitor/cmd_monitor.go new file mode 100644 index 0000000..bd19b3e --- /dev/null +++ b/cmd/monitor/cmd_monitor.go @@ -0,0 +1,590 @@ +// cmd_monitor.go implements the 'monitor' command for aggregating security findings. +// +// Usage: +// core monitor # Monitor current repo +// core monitor --repo X # Monitor specific repo +// core monitor --all # Monitor all repos in registry +// core monitor --severity high # Filter by severity +// core monitor --json # Output as JSON + +package monitor + +import ( + "encoding/json" + "fmt" + "os/exec" + "sort" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/log" + "forge.lthn.ai/core/go/pkg/repos" +) + +// Command flags +var ( + monitorRepo string + monitorSeverity []string + monitorJSON bool + monitorAll bool +) + +// Finding represents a security finding from any source +type Finding struct { + Source string `json:"source"` // semgrep, trivy, dependabot, secret-scanning, etc. + Severity string `json:"severity"` // critical, high, medium, low + Rule string `json:"rule"` // Rule ID or CVE + File string `json:"file"` // Affected file path + Line int `json:"line"` // Line number (0 if N/A) + Message string `json:"message"` // Description + URL string `json:"url"` // Link to finding + State string `json:"state"` // open, dismissed, fixed + RepoName string `json:"repo"` // Repository name + CreatedAt string `json:"created_at"` // When found + Labels []string `json:"suggested_labels,omitempty"` +} + +// CodeScanningAlert represents a GitHub code scanning alert +type CodeScanningAlert struct { + Number int `json:"number"` + State string `json:"state"` // open, dismissed, fixed + Rule struct { + ID string `json:"id"` + Severity string `json:"severity"` + Description string `json:"description"` + } `json:"rule"` + Tool struct { + Name string `json:"name"` + } `json:"tool"` + MostRecentInstance struct { + Location struct { + Path string `json:"path"` + StartLine int `json:"start_line"` + } `json:"location"` + Message struct { + Text string `json:"text"` + } `json:"message"` + } `json:"most_recent_instance"` + HTMLURL string `json:"html_url"` + CreatedAt string `json:"created_at"` +} + +// DependabotAlert represents a GitHub Dependabot alert +type DependabotAlert struct { + Number int `json:"number"` + State string `json:"state"` // open, dismissed, fixed + SecurityVulnerability struct { + Severity string `json:"severity"` + Package struct { + Name string `json:"name"` + Ecosystem string `json:"ecosystem"` + } `json:"package"` + } `json:"security_vulnerability"` + SecurityAdvisory struct { + CVEID string `json:"cve_id"` + Summary string `json:"summary"` + Description string `json:"description"` + } `json:"security_advisory"` + Dependency struct { + ManifestPath string `json:"manifest_path"` + } `json:"dependency"` + HTMLURL string `json:"html_url"` + CreatedAt string `json:"created_at"` +} + +// SecretScanningAlert represents a GitHub secret scanning alert +type SecretScanningAlert struct { + Number int `json:"number"` + State string `json:"state"` // open, resolved + SecretType string `json:"secret_type"` + Secret string `json:"secret"` // Partial, redacted + HTMLURL string `json:"html_url"` + LocationType string `json:"location_type"` + CreatedAt string `json:"created_at"` +} + +func runMonitor() error { + // Check gh is available + if _, err := exec.LookPath("gh"); err != nil { + return log.E("monitor", i18n.T("error.gh_not_found"), err) + } + + // Determine repos to scan + repoList, err := resolveRepos() + if err != nil { + return err + } + + if len(repoList) == 0 { + return log.E("monitor", i18n.T("cmd.monitor.error.no_repos"), nil) + } + + // Collect all findings and errors + var allFindings []Finding + var fetchErrors []string + for _, repo := range repoList { + if !monitorJSON { + cli.Print("\033[2K\r%s %s...", dimStyle.Render(i18n.T("cmd.monitor.scanning")), repo) + } + + findings, errs := fetchRepoFindings(repo) + allFindings = append(allFindings, findings...) + fetchErrors = append(fetchErrors, errs...) + } + + // Filter by severity if specified + if len(monitorSeverity) > 0 { + allFindings = filterBySeverity(allFindings, monitorSeverity) + } + + // Sort by severity (critical first) + sortBySeverity(allFindings) + + // Output + if monitorJSON { + return outputJSON(allFindings) + } + + cli.Print("\033[2K\r") // Clear scanning line + + // Show any fetch errors as warnings + if len(fetchErrors) > 0 { + for _, e := range fetchErrors { + cli.Print("%s %s\n", warningStyle.Render("!"), e) + } + cli.Blank() + } + + return outputTable(allFindings) +} + +// resolveRepos determines which repos to scan +func resolveRepos() ([]string, error) { + if monitorRepo != "" { + // Specific repo - if fully qualified (org/repo), use as-is + if strings.Contains(monitorRepo, "/") { + return []string{monitorRepo}, nil + } + // Otherwise, try to detect org from git remote, fallback to host-uk + // Note: Users outside host-uk org should use fully qualified names + org := detectOrgFromGit() + if org == "" { + org = "host-uk" + } + return []string{org + "/" + monitorRepo}, nil + } + + if monitorAll { + // All repos from registry + registry, err := repos.FindRegistry(io.Local) + if err != nil { + return nil, log.E("monitor", "failed to find registry", err) + } + + loaded, err := repos.LoadRegistry(io.Local, registry) + if err != nil { + return nil, log.E("monitor", "failed to load registry", err) + } + + var repoList []string + for _, r := range loaded.Repos { + repoList = append(repoList, loaded.Org+"/"+r.Name) + } + return repoList, nil + } + + // Default to current repo + repo, err := detectRepoFromGit() + if err != nil { + return nil, err + } + return []string{repo}, nil +} + +// fetchRepoFindings fetches all security findings for a repo +// Returns findings and any errors encountered (errors don't stop other fetches) +func fetchRepoFindings(repoFullName string) ([]Finding, []string) { + var findings []Finding + var errs []string + repoName := strings.Split(repoFullName, "/")[1] + + // Fetch code scanning alerts + codeFindings, err := fetchCodeScanningAlerts(repoFullName) + if err != nil { + errs = append(errs, fmt.Sprintf("%s: code-scanning: %s", repoName, err)) + } + findings = append(findings, codeFindings...) + + // Fetch Dependabot alerts + depFindings, err := fetchDependabotAlerts(repoFullName) + if err != nil { + errs = append(errs, fmt.Sprintf("%s: dependabot: %s", repoName, err)) + } + findings = append(findings, depFindings...) + + // Fetch secret scanning alerts + secretFindings, err := fetchSecretScanningAlerts(repoFullName) + if err != nil { + errs = append(errs, fmt.Sprintf("%s: secret-scanning: %s", repoName, err)) + } + findings = append(findings, secretFindings...) + + return findings, errs +} + +// fetchCodeScanningAlerts fetches code scanning alerts +func fetchCodeScanningAlerts(repoFullName string) ([]Finding, error) { + args := []string{ + "api", + fmt.Sprintf("repos/%s/code-scanning/alerts", repoFullName), + } + + cmd := exec.Command("gh", args...) + output, err := cmd.Output() + if err != nil { + // Check for expected "not enabled" responses vs actual errors + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := string(exitErr.Stderr) + // These are expected conditions, not errors + if strings.Contains(stderr, "Advanced Security must be enabled") || + strings.Contains(stderr, "no analysis found") || + strings.Contains(stderr, "Not Found") { + return nil, nil + } + } + return nil, log.E("monitor.fetchCodeScanning", "API request failed", err) + } + + var alerts []CodeScanningAlert + if err := json.Unmarshal(output, &alerts); err != nil { + return nil, log.E("monitor.fetchCodeScanning", "failed to parse response", err) + } + + repoName := strings.Split(repoFullName, "/")[1] + var findings []Finding + for _, alert := range alerts { + if alert.State != "open" { + continue + } + f := Finding{ + Source: alert.Tool.Name, + Severity: normalizeSeverity(alert.Rule.Severity), + Rule: alert.Rule.ID, + File: alert.MostRecentInstance.Location.Path, + Line: alert.MostRecentInstance.Location.StartLine, + Message: alert.MostRecentInstance.Message.Text, + URL: alert.HTMLURL, + State: alert.State, + RepoName: repoName, + CreatedAt: alert.CreatedAt, + Labels: []string{"type:security"}, + } + if f.Message == "" { + f.Message = alert.Rule.Description + } + findings = append(findings, f) + } + + return findings, nil +} + +// fetchDependabotAlerts fetches Dependabot alerts +func fetchDependabotAlerts(repoFullName string) ([]Finding, error) { + args := []string{ + "api", + fmt.Sprintf("repos/%s/dependabot/alerts", repoFullName), + } + + cmd := exec.Command("gh", args...) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := string(exitErr.Stderr) + // Dependabot not enabled is expected + if strings.Contains(stderr, "Dependabot alerts are not enabled") || + strings.Contains(stderr, "Not Found") { + return nil, nil + } + } + return nil, log.E("monitor.fetchDependabot", "API request failed", err) + } + + var alerts []DependabotAlert + if err := json.Unmarshal(output, &alerts); err != nil { + return nil, log.E("monitor.fetchDependabot", "failed to parse response", err) + } + + repoName := strings.Split(repoFullName, "/")[1] + var findings []Finding + for _, alert := range alerts { + if alert.State != "open" { + continue + } + f := Finding{ + Source: "dependabot", + Severity: normalizeSeverity(alert.SecurityVulnerability.Severity), + Rule: alert.SecurityAdvisory.CVEID, + File: alert.Dependency.ManifestPath, + Line: 0, + Message: fmt.Sprintf("%s: %s", alert.SecurityVulnerability.Package.Name, alert.SecurityAdvisory.Summary), + URL: alert.HTMLURL, + State: alert.State, + RepoName: repoName, + CreatedAt: alert.CreatedAt, + Labels: []string{"type:security", "dependencies"}, + } + findings = append(findings, f) + } + + return findings, nil +} + +// fetchSecretScanningAlerts fetches secret scanning alerts +func fetchSecretScanningAlerts(repoFullName string) ([]Finding, error) { + args := []string{ + "api", + fmt.Sprintf("repos/%s/secret-scanning/alerts", repoFullName), + } + + cmd := exec.Command("gh", args...) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := string(exitErr.Stderr) + // Secret scanning not enabled is expected + if strings.Contains(stderr, "Secret scanning is disabled") || + strings.Contains(stderr, "Not Found") { + return nil, nil + } + } + return nil, log.E("monitor.fetchSecretScanning", "API request failed", err) + } + + var alerts []SecretScanningAlert + if err := json.Unmarshal(output, &alerts); err != nil { + return nil, log.E("monitor.fetchSecretScanning", "failed to parse response", err) + } + + repoName := strings.Split(repoFullName, "/")[1] + var findings []Finding + for _, alert := range alerts { + if alert.State != "open" { + continue + } + f := Finding{ + Source: "secret-scanning", + Severity: "critical", // Secrets are always critical + Rule: alert.SecretType, + File: alert.LocationType, + Line: 0, + Message: fmt.Sprintf("Exposed %s detected", alert.SecretType), + URL: alert.HTMLURL, + State: alert.State, + RepoName: repoName, + CreatedAt: alert.CreatedAt, + Labels: []string{"type:security", "secrets"}, + } + findings = append(findings, f) + } + + return findings, nil +} + +// normalizeSeverity normalizes severity strings to standard values +func normalizeSeverity(s string) string { + s = strings.ToLower(s) + switch s { + case "critical", "crit": + return "critical" + case "high", "error": + return "high" + case "medium", "moderate", "warning": + return "medium" + case "low", "info", "note": + return "low" + default: + return "medium" + } +} + +// filterBySeverity filters findings by severity +func filterBySeverity(findings []Finding, severities []string) []Finding { + sevSet := make(map[string]bool) + for _, s := range severities { + sevSet[strings.ToLower(s)] = true + } + + var filtered []Finding + for _, f := range findings { + if sevSet[f.Severity] { + filtered = append(filtered, f) + } + } + return filtered +} + +// sortBySeverity sorts findings by severity (critical first) +func sortBySeverity(findings []Finding) { + severityOrder := map[string]int{ + "critical": 0, + "high": 1, + "medium": 2, + "low": 3, + } + + sort.Slice(findings, func(i, j int) bool { + oi := severityOrder[findings[i].Severity] + oj := severityOrder[findings[j].Severity] + if oi != oj { + return oi < oj + } + return findings[i].RepoName < findings[j].RepoName + }) +} + +// outputJSON outputs findings as JSON +func outputJSON(findings []Finding) error { + data, err := json.MarshalIndent(findings, "", " ") + if err != nil { + return log.E("monitor", "failed to marshal findings", err) + } + cli.Print("%s\n", string(data)) + return nil +} + +// outputTable outputs findings as a formatted table +func outputTable(findings []Finding) error { + if len(findings) == 0 { + cli.Print("%s\n", successStyle.Render(i18n.T("cmd.monitor.no_findings"))) + return nil + } + + // Count by severity + counts := make(map[string]int) + for _, f := range findings { + counts[f.Severity]++ + } + + // Header summary + var parts []string + if counts["critical"] > 0 { + parts = append(parts, errorStyle.Render(fmt.Sprintf("%d critical", counts["critical"]))) + } + if counts["high"] > 0 { + parts = append(parts, errorStyle.Render(fmt.Sprintf("%d high", counts["high"]))) + } + if counts["medium"] > 0 { + parts = append(parts, warningStyle.Render(fmt.Sprintf("%d medium", counts["medium"]))) + } + if counts["low"] > 0 { + parts = append(parts, dimStyle.Render(fmt.Sprintf("%d low", counts["low"]))) + } + cli.Print("%s: %s\n", i18n.T("cmd.monitor.found"), strings.Join(parts, ", ")) + cli.Blank() + + // Group by repo + byRepo := make(map[string][]Finding) + for _, f := range findings { + byRepo[f.RepoName] = append(byRepo[f.RepoName], f) + } + + // Sort repos for consistent output + repoNames := make([]string, 0, len(byRepo)) + for repo := range byRepo { + repoNames = append(repoNames, repo) + } + sort.Strings(repoNames) + + // Print by repo + for _, repo := range repoNames { + repoFindings := byRepo[repo] + cli.Print("%s\n", cli.BoldStyle.Render(repo)) + for _, f := range repoFindings { + sevStyle := dimStyle + switch f.Severity { + case "critical", "high": + sevStyle = errorStyle + case "medium": + sevStyle = warningStyle + } + + // Format: [severity] source: message (file:line) + location := "" + if f.File != "" { + location = f.File + if f.Line > 0 { + location = fmt.Sprintf("%s:%d", f.File, f.Line) + } + } + + cli.Print(" %s %s: %s", + sevStyle.Render(fmt.Sprintf("[%s]", f.Severity)), + dimStyle.Render(f.Source), + truncate(f.Message, 60)) + if location != "" { + cli.Print(" %s", dimStyle.Render("("+location+")")) + } + cli.Blank() + } + cli.Blank() + } + + return nil +} + +// truncate truncates a string to max runes (Unicode-safe) +func truncate(s string, max int) string { + runes := []rune(s) + if len(runes) <= max { + return s + } + return string(runes[:max-3]) + "..." +} + +// detectRepoFromGit detects the repo from git remote +func detectRepoFromGit() (string, error) { + cmd := exec.Command("git", "remote", "get-url", "origin") + output, err := cmd.Output() + if err != nil { + return "", log.E("monitor", i18n.T("cmd.monitor.error.not_git_repo"), err) + } + + url := strings.TrimSpace(string(output)) + return parseGitHubRepo(url) +} + +// detectOrgFromGit tries to detect the org from git remote +func detectOrgFromGit() string { + repo, err := detectRepoFromGit() + if err != nil { + return "" + } + parts := strings.Split(repo, "/") + if len(parts) >= 1 { + return parts[0] + } + return "" +} + +// parseGitHubRepo extracts org/repo from a git URL +func parseGitHubRepo(url string) (string, error) { + // Handle SSH URLs: git@github.com:org/repo.git + if strings.HasPrefix(url, "git@github.com:") { + path := strings.TrimPrefix(url, "git@github.com:") + path = strings.TrimSuffix(path, ".git") + return path, nil + } + + // Handle HTTPS URLs: https://github.com/org/repo.git + if strings.Contains(url, "github.com/") { + parts := strings.Split(url, "github.com/") + if len(parts) >= 2 { + path := strings.TrimSuffix(parts[1], ".git") + return path, nil + } + } + + return "", fmt.Errorf("could not parse GitHub repo from URL: %s", url) +} diff --git a/cmd/qa/cmd_docblock.go b/cmd/qa/cmd_docblock.go new file mode 100644 index 0000000..7e9cde7 --- /dev/null +++ b/cmd/qa/cmd_docblock.go @@ -0,0 +1,353 @@ +// cmd_docblock.go implements docblock/docstring coverage checking for Go code. +// +// Usage: +// +// core qa docblock # Check current directory +// core qa docblock ./pkg/... # Check specific packages +// core qa docblock --threshold=80 # Require 80% coverage +package qa + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "path/filepath" + "sort" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// Docblock command flags +var ( + docblockThreshold float64 + docblockVerbose bool + docblockJSON bool +) + +// addDocblockCommand adds the 'docblock' command to qa. +func addDocblockCommand(parent *cli.Command) { + docblockCmd := &cli.Command{ + Use: "docblock [packages...]", + Short: i18n.T("cmd.qa.docblock.short"), + Long: i18n.T("cmd.qa.docblock.long"), + RunE: func(cmd *cli.Command, args []string) error { + paths := args + if len(paths) == 0 { + paths = []string{"./..."} + } + return RunDocblockCheck(paths, docblockThreshold, docblockVerbose, docblockJSON) + }, + } + + docblockCmd.Flags().Float64Var(&docblockThreshold, "threshold", 80, i18n.T("cmd.qa.docblock.flag.threshold")) + docblockCmd.Flags().BoolVarP(&docblockVerbose, "verbose", "v", false, i18n.T("common.flag.verbose")) + docblockCmd.Flags().BoolVar(&docblockJSON, "json", false, i18n.T("common.flag.json")) + + parent.AddCommand(docblockCmd) +} + +// DocblockResult holds the result of a docblock coverage check. +type DocblockResult struct { + Coverage float64 `json:"coverage"` + Threshold float64 `json:"threshold"` + Total int `json:"total"` + Documented int `json:"documented"` + Missing []MissingDocblock `json:"missing,omitempty"` + Passed bool `json:"passed"` +} + +// MissingDocblock represents an exported symbol without documentation. +type MissingDocblock struct { + File string `json:"file"` + Line int `json:"line"` + Name string `json:"name"` + Kind string `json:"kind"` // func, type, const, var + Reason string `json:"reason,omitempty"` +} + +// RunDocblockCheck checks docblock coverage for the given packages. +func RunDocblockCheck(paths []string, threshold float64, verbose, jsonOutput bool) error { + result, err := CheckDocblockCoverage(paths) + if err != nil { + return err + } + result.Threshold = threshold + result.Passed = result.Coverage >= threshold + + if jsonOutput { + data, err := json.MarshalIndent(result, "", " ") + if err != nil { + return err + } + fmt.Println(string(data)) + if !result.Passed { + return cli.Err("docblock coverage %.1f%% below threshold %.1f%%", result.Coverage, threshold) + } + return nil + } + + // Sort missing by file then line + sort.Slice(result.Missing, func(i, j int) bool { + if result.Missing[i].File != result.Missing[j].File { + return result.Missing[i].File < result.Missing[j].File + } + return result.Missing[i].Line < result.Missing[j].Line + }) + + // Print result + if verbose && len(result.Missing) > 0 { + cli.Print("%s\n\n", i18n.T("cmd.qa.docblock.missing_docs")) + for _, m := range result.Missing { + cli.Print(" %s:%d: %s %s\n", + dimStyle.Render(m.File), + m.Line, + dimStyle.Render(m.Kind), + m.Name, + ) + } + cli.Blank() + } + + // Summary + coverageStr := fmt.Sprintf("%.1f%%", result.Coverage) + thresholdStr := fmt.Sprintf("%.1f%%", threshold) + + if result.Passed { + cli.Print("%s %s %s/%s (%s >= %s)\n", + successStyle.Render(i18n.T("common.label.success")), + i18n.T("cmd.qa.docblock.coverage"), + fmt.Sprintf("%d", result.Documented), + fmt.Sprintf("%d", result.Total), + successStyle.Render(coverageStr), + thresholdStr, + ) + return nil + } + + cli.Print("%s %s %s/%s (%s < %s)\n", + errorStyle.Render(i18n.T("common.label.error")), + i18n.T("cmd.qa.docblock.coverage"), + fmt.Sprintf("%d", result.Documented), + fmt.Sprintf("%d", result.Total), + errorStyle.Render(coverageStr), + thresholdStr, + ) + + // Always show compact file:line list when failing (token-efficient for AI agents) + if len(result.Missing) > 0 { + cli.Blank() + for _, m := range result.Missing { + cli.Print("%s:%d\n", m.File, m.Line) + } + } + + return cli.Err("docblock coverage %.1f%% below threshold %.1f%%", result.Coverage, threshold) +} + +// CheckDocblockCoverage analyzes Go packages for docblock coverage. +func CheckDocblockCoverage(patterns []string) (*DocblockResult, error) { + result := &DocblockResult{} + + // Expand patterns to actual directories + dirs, err := expandPatterns(patterns) + if err != nil { + return nil, err + } + + fset := token.NewFileSet() + + for _, dir := range dirs { + pkgs, err := parser.ParseDir(fset, dir, func(fi os.FileInfo) bool { + return !strings.HasSuffix(fi.Name(), "_test.go") + }, parser.ParseComments) + if err != nil { + // Log parse errors but continue to check other directories + cli.Warnf("failed to parse %s: %v", dir, err) + continue + } + + for _, pkg := range pkgs { + for filename, file := range pkg.Files { + checkFile(fset, filename, file, result) + } + } + } + + if result.Total > 0 { + result.Coverage = float64(result.Documented) / float64(result.Total) * 100 + } + + return result, nil +} + +// expandPatterns expands Go package patterns like ./... to actual directories. +func expandPatterns(patterns []string) ([]string, error) { + var dirs []string + seen := make(map[string]bool) + + for _, pattern := range patterns { + if strings.HasSuffix(pattern, "/...") { + // Recursive pattern + base := strings.TrimSuffix(pattern, "/...") + if base == "." { + base = "." + } + err := filepath.Walk(base, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil // Skip errors + } + if !info.IsDir() { + return nil + } + // Skip vendor, testdata, and hidden directories (but not "." itself) + name := info.Name() + if name == "vendor" || name == "testdata" || (strings.HasPrefix(name, ".") && name != ".") { + return filepath.SkipDir + } + // Check if directory has Go files + if hasGoFiles(path) && !seen[path] { + dirs = append(dirs, path) + seen[path] = true + } + return nil + }) + if err != nil { + return nil, err + } + } else { + // Single directory + path := pattern + if !seen[path] && hasGoFiles(path) { + dirs = append(dirs, path) + seen[path] = true + } + } + } + + return dirs, nil +} + +// hasGoFiles checks if a directory contains Go files. +func hasGoFiles(dir string) bool { + entries, err := os.ReadDir(dir) + if err != nil { + return false + } + for _, entry := range entries { + if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".go") && !strings.HasSuffix(entry.Name(), "_test.go") { + return true + } + } + return false +} + +// checkFile analyzes a single file for docblock coverage. +func checkFile(fset *token.FileSet, filename string, file *ast.File, result *DocblockResult) { + // Make filename relative if possible + if cwd, err := os.Getwd(); err == nil { + if rel, err := filepath.Rel(cwd, filename); err == nil { + filename = rel + } + } + + for _, decl := range file.Decls { + switch d := decl.(type) { + case *ast.FuncDecl: + // Skip unexported functions + if !ast.IsExported(d.Name.Name) { + continue + } + // Skip methods on unexported types + if d.Recv != nil && len(d.Recv.List) > 0 { + if recvType := getReceiverTypeName(d.Recv.List[0].Type); recvType != "" && !ast.IsExported(recvType) { + continue + } + } + + result.Total++ + if d.Doc != nil && len(d.Doc.List) > 0 { + result.Documented++ + } else { + pos := fset.Position(d.Pos()) + result.Missing = append(result.Missing, MissingDocblock{ + File: filename, + Line: pos.Line, + Name: d.Name.Name, + Kind: "func", + }) + } + + case *ast.GenDecl: + for _, spec := range d.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + if !ast.IsExported(s.Name.Name) { + continue + } + result.Total++ + // Type can have doc on GenDecl or TypeSpec + if (d.Doc != nil && len(d.Doc.List) > 0) || (s.Doc != nil && len(s.Doc.List) > 0) { + result.Documented++ + } else { + pos := fset.Position(s.Pos()) + result.Missing = append(result.Missing, MissingDocblock{ + File: filename, + Line: pos.Line, + Name: s.Name.Name, + Kind: "type", + }) + } + + case *ast.ValueSpec: + // Check exported consts and vars + for _, name := range s.Names { + if !ast.IsExported(name.Name) { + continue + } + result.Total++ + // Value can have doc on GenDecl or ValueSpec + if (d.Doc != nil && len(d.Doc.List) > 0) || (s.Doc != nil && len(s.Doc.List) > 0) { + result.Documented++ + } else { + pos := fset.Position(name.Pos()) + result.Missing = append(result.Missing, MissingDocblock{ + File: filename, + Line: pos.Line, + Name: name.Name, + Kind: kindFromToken(d.Tok), + }) + } + } + } + } + } + } +} + +// getReceiverTypeName extracts the type name from a method receiver. +func getReceiverTypeName(expr ast.Expr) string { + switch t := expr.(type) { + case *ast.Ident: + return t.Name + case *ast.StarExpr: + return getReceiverTypeName(t.X) + } + return "" +} + +// kindFromToken returns a string representation of the token kind. +func kindFromToken(tok token.Token) string { + switch tok { + case token.CONST: + return "const" + case token.VAR: + return "var" + default: + return "value" + } +} diff --git a/cmd/qa/cmd_health.go b/cmd/qa/cmd_health.go new file mode 100644 index 0000000..2389c4d --- /dev/null +++ b/cmd/qa/cmd_health.go @@ -0,0 +1,289 @@ +// cmd_health.go implements the 'qa health' command for aggregate CI health. +// +// Usage: +// core qa health # Show CI health summary +// core qa health --problems # Show only repos with problems + +package qa + +import ( + "encoding/json" + "os/exec" + "sort" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/log" + "forge.lthn.ai/core/go/pkg/repos" +) + +// Health command flags +var ( + healthProblems bool + healthRegistry string +) + +// HealthWorkflowRun represents a GitHub Actions workflow run +type HealthWorkflowRun struct { + Status string `json:"status"` + Conclusion string `json:"conclusion"` + Name string `json:"name"` + HeadSha string `json:"headSha"` + UpdatedAt string `json:"updatedAt"` + URL string `json:"url"` +} + +// RepoHealth represents the CI health of a single repo +type RepoHealth struct { + Name string + Status string // "passing", "failing", "pending", "no_ci", "disabled" + Message string + URL string + FailingSince string +} + +// addHealthCommand adds the 'health' subcommand to qa. +func addHealthCommand(parent *cli.Command) { + healthCmd := &cli.Command{ + Use: "health", + Short: i18n.T("cmd.qa.health.short"), + Long: i18n.T("cmd.qa.health.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runHealth() + }, + } + + healthCmd.Flags().BoolVarP(&healthProblems, "problems", "p", false, i18n.T("cmd.qa.health.flag.problems")) + healthCmd.Flags().StringVar(&healthRegistry, "registry", "", i18n.T("common.flag.registry")) + + parent.AddCommand(healthCmd) +} + +func runHealth() error { + // Check gh is available + if _, err := exec.LookPath("gh"); err != nil { + return log.E("qa.health", i18n.T("error.gh_not_found"), nil) + } + + // Load registry + var reg *repos.Registry + var err error + + if healthRegistry != "" { + reg, err = repos.LoadRegistry(io.Local, healthRegistry) + } else { + registryPath, findErr := repos.FindRegistry(io.Local) + if findErr != nil { + return log.E("qa.health", i18n.T("error.registry_not_found"), nil) + } + reg, err = repos.LoadRegistry(io.Local, registryPath) + } + if err != nil { + return log.E("qa.health", "failed to load registry", err) + } + + // Fetch CI status from all repos + var healthResults []RepoHealth + repoList := reg.List() + + for i, repo := range repoList { + cli.Print("\033[2K\r%s %d/%d %s", + dimStyle.Render(i18n.T("cmd.qa.issues.fetching")), + i+1, len(repoList), repo.Name) + + health := fetchRepoHealth(reg.Org, repo.Name) + healthResults = append(healthResults, health) + } + cli.Print("\033[2K\r") // Clear progress + + // Sort: problems first, then passing + sort.Slice(healthResults, func(i, j int) bool { + return healthPriority(healthResults[i].Status) < healthPriority(healthResults[j].Status) + }) + + // Filter if --problems flag + if healthProblems { + var problems []RepoHealth + for _, h := range healthResults { + if h.Status != "passing" { + problems = append(problems, h) + } + } + healthResults = problems + } + + // Calculate summary + passing := 0 + for _, h := range healthResults { + if h.Status == "passing" { + passing++ + } + } + total := len(repoList) + percentage := 0 + if total > 0 { + percentage = (passing * 100) / total + } + + // Print summary + cli.Print("%s: %d/%d repos healthy (%d%%)\n\n", + i18n.T("cmd.qa.health.summary"), + passing, total, percentage) + + if len(healthResults) == 0 { + cli.Text(i18n.T("cmd.qa.health.all_healthy")) + return nil + } + + // Group by status + grouped := make(map[string][]RepoHealth) + for _, h := range healthResults { + grouped[h.Status] = append(grouped[h.Status], h) + } + + // Print problems first + printHealthGroup("failing", grouped["failing"], errorStyle) + printHealthGroup("pending", grouped["pending"], warningStyle) + printHealthGroup("no_ci", grouped["no_ci"], dimStyle) + printHealthGroup("disabled", grouped["disabled"], dimStyle) + + if !healthProblems { + printHealthGroup("passing", grouped["passing"], successStyle) + } + + return nil +} + +func fetchRepoHealth(org, repoName string) RepoHealth { + repoFullName := cli.Sprintf("%s/%s", org, repoName) + + args := []string{ + "run", "list", + "--repo", repoFullName, + "--limit", "1", + "--json", "status,conclusion,name,headSha,updatedAt,url", + } + + cmd := exec.Command("gh", args...) + output, err := cmd.Output() + if err != nil { + // Check if it's a 404 (no workflows) + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := string(exitErr.Stderr) + if strings.Contains(stderr, "no workflows") || strings.Contains(stderr, "not found") { + return RepoHealth{ + Name: repoName, + Status: "no_ci", + Message: i18n.T("cmd.qa.health.no_ci_configured"), + } + } + } + return RepoHealth{ + Name: repoName, + Status: "no_ci", + Message: i18n.T("cmd.qa.health.fetch_error"), + } + } + + var runs []HealthWorkflowRun + if err := json.Unmarshal(output, &runs); err != nil { + return RepoHealth{ + Name: repoName, + Status: "no_ci", + Message: i18n.T("cmd.qa.health.parse_error"), + } + } + + if len(runs) == 0 { + return RepoHealth{ + Name: repoName, + Status: "no_ci", + Message: i18n.T("cmd.qa.health.no_ci_configured"), + } + } + + run := runs[0] + health := RepoHealth{ + Name: repoName, + URL: run.URL, + } + + switch run.Status { + case "completed": + switch run.Conclusion { + case "success": + health.Status = "passing" + health.Message = i18n.T("cmd.qa.health.passing") + case "failure": + health.Status = "failing" + health.Message = i18n.T("cmd.qa.health.tests_failing") + case "cancelled": + health.Status = "pending" + health.Message = i18n.T("cmd.qa.health.cancelled") + case "skipped": + health.Status = "passing" + health.Message = i18n.T("cmd.qa.health.skipped") + default: + health.Status = "failing" + health.Message = run.Conclusion + } + case "in_progress", "queued", "waiting": + health.Status = "pending" + health.Message = i18n.T("cmd.qa.health.running") + default: + health.Status = "no_ci" + health.Message = run.Status + } + + return health +} + +func healthPriority(status string) int { + switch status { + case "failing": + return 0 + case "pending": + return 1 + case "no_ci": + return 2 + case "disabled": + return 3 + case "passing": + return 4 + default: + return 5 + } +} + +func printHealthGroup(status string, repos []RepoHealth, style *cli.AnsiStyle) { + if len(repos) == 0 { + return + } + + var label string + switch status { + case "failing": + label = i18n.T("cmd.qa.health.count_failing") + case "pending": + label = i18n.T("cmd.qa.health.count_pending") + case "no_ci": + label = i18n.T("cmd.qa.health.count_no_ci") + case "disabled": + label = i18n.T("cmd.qa.health.count_disabled") + case "passing": + label = i18n.T("cmd.qa.health.count_passing") + } + + cli.Print("%s (%d):\n", style.Render(label), len(repos)) + for _, repo := range repos { + cli.Print(" %s %s\n", + cli.RepoStyle.Render(repo.Name), + dimStyle.Render(repo.Message)) + if repo.URL != "" && status == "failing" { + cli.Print(" -> %s\n", dimStyle.Render(repo.URL)) + } + } + cli.Blank() +} diff --git a/cmd/qa/cmd_issues.go b/cmd/qa/cmd_issues.go new file mode 100644 index 0000000..27efaec --- /dev/null +++ b/cmd/qa/cmd_issues.go @@ -0,0 +1,401 @@ +// cmd_issues.go implements the 'qa issues' command for intelligent issue triage. +// +// Usage: +// core qa issues # Show prioritised, actionable issues +// core qa issues --mine # Show issues assigned to you +// core qa issues --triage # Show issues needing triage (no labels/assignee) +// core qa issues --blocked # Show blocked issues + +package qa + +import ( + "encoding/json" + "os/exec" + "sort" + "strings" + "time" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/log" + "forge.lthn.ai/core/go/pkg/repos" +) + +// Issue command flags +var ( + issuesMine bool + issuesTriage bool + issuesBlocked bool + issuesRegistry string + issuesLimit int +) + +// Issue represents a GitHub issue with triage metadata +type Issue struct { + Number int `json:"number"` + Title string `json:"title"` + State string `json:"state"` + Body string `json:"body"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + Author struct { + Login string `json:"login"` + } `json:"author"` + Assignees struct { + Nodes []struct { + Login string `json:"login"` + } `json:"nodes"` + } `json:"assignees"` + Labels struct { + Nodes []struct { + Name string `json:"name"` + } `json:"nodes"` + } `json:"labels"` + Comments struct { + TotalCount int `json:"totalCount"` + Nodes []struct { + Author struct { + Login string `json:"login"` + } `json:"author"` + CreatedAt time.Time `json:"createdAt"` + } `json:"nodes"` + } `json:"comments"` + URL string `json:"url"` + + // Computed fields + RepoName string + Priority int // Lower = higher priority + Category string // "needs_response", "ready", "blocked", "triage" + ActionHint string +} + +// addIssuesCommand adds the 'issues' subcommand to qa. +func addIssuesCommand(parent *cli.Command) { + issuesCmd := &cli.Command{ + Use: "issues", + Short: i18n.T("cmd.qa.issues.short"), + Long: i18n.T("cmd.qa.issues.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runQAIssues() + }, + } + + issuesCmd.Flags().BoolVarP(&issuesMine, "mine", "m", false, i18n.T("cmd.qa.issues.flag.mine")) + issuesCmd.Flags().BoolVarP(&issuesTriage, "triage", "t", false, i18n.T("cmd.qa.issues.flag.triage")) + issuesCmd.Flags().BoolVarP(&issuesBlocked, "blocked", "b", false, i18n.T("cmd.qa.issues.flag.blocked")) + issuesCmd.Flags().StringVar(&issuesRegistry, "registry", "", i18n.T("common.flag.registry")) + issuesCmd.Flags().IntVarP(&issuesLimit, "limit", "l", 50, i18n.T("cmd.qa.issues.flag.limit")) + + parent.AddCommand(issuesCmd) +} + +func runQAIssues() error { + // Check gh is available + if _, err := exec.LookPath("gh"); err != nil { + return log.E("qa.issues", i18n.T("error.gh_not_found"), nil) + } + + // Load registry + var reg *repos.Registry + var err error + + if issuesRegistry != "" { + reg, err = repos.LoadRegistry(io.Local, issuesRegistry) + } else { + registryPath, findErr := repos.FindRegistry(io.Local) + if findErr != nil { + return log.E("qa.issues", i18n.T("error.registry_not_found"), nil) + } + reg, err = repos.LoadRegistry(io.Local, registryPath) + } + if err != nil { + return log.E("qa.issues", "failed to load registry", err) + } + + // Fetch issues from all repos + var allIssues []Issue + repoList := reg.List() + + for i, repo := range repoList { + cli.Print("\033[2K\r%s %d/%d %s", + dimStyle.Render(i18n.T("cmd.qa.issues.fetching")), + i+1, len(repoList), repo.Name) + + issues, err := fetchQAIssues(reg.Org, repo.Name, issuesLimit) + if err != nil { + continue // Skip repos with errors + } + allIssues = append(allIssues, issues...) + } + cli.Print("\033[2K\r") // Clear progress + + if len(allIssues) == 0 { + cli.Text(i18n.T("cmd.qa.issues.no_issues")) + return nil + } + + // Categorise and prioritise issues + categorised := categoriseIssues(allIssues) + + // Filter based on flags + if issuesMine { + categorised = filterMine(categorised) + } + if issuesTriage { + categorised = filterCategory(categorised, "triage") + } + if issuesBlocked { + categorised = filterCategory(categorised, "blocked") + } + + // Print categorised issues + printCategorisedIssues(categorised) + + return nil +} + +func fetchQAIssues(org, repoName string, limit int) ([]Issue, error) { + repoFullName := cli.Sprintf("%s/%s", org, repoName) + + args := []string{ + "issue", "list", + "--repo", repoFullName, + "--state", "open", + "--limit", cli.Sprintf("%d", limit), + "--json", "number,title,state,body,createdAt,updatedAt,author,assignees,labels,comments,url", + } + + cmd := exec.Command("gh", args...) + output, err := cmd.Output() + if err != nil { + return nil, err + } + + var issues []Issue + if err := json.Unmarshal(output, &issues); err != nil { + return nil, err + } + + // Tag with repo name + for i := range issues { + issues[i].RepoName = repoName + } + + return issues, nil +} + +func categoriseIssues(issues []Issue) map[string][]Issue { + result := map[string][]Issue{ + "needs_response": {}, + "ready": {}, + "blocked": {}, + "triage": {}, + } + + currentUser := getCurrentUser() + + for i := range issues { + issue := &issues[i] + categoriseIssue(issue, currentUser) + result[issue.Category] = append(result[issue.Category], *issue) + } + + // Sort each category by priority + for cat := range result { + sort.Slice(result[cat], func(i, j int) bool { + return result[cat][i].Priority < result[cat][j].Priority + }) + } + + return result +} + +func categoriseIssue(issue *Issue, currentUser string) { + labels := getLabels(issue) + + // Check if blocked + for _, l := range labels { + if strings.HasPrefix(l, "blocked") || l == "waiting" { + issue.Category = "blocked" + issue.Priority = 30 + issue.ActionHint = i18n.T("cmd.qa.issues.hint.blocked") + return + } + } + + // Check if needs triage (no labels, no assignee) + if len(issue.Labels.Nodes) == 0 && len(issue.Assignees.Nodes) == 0 { + issue.Category = "triage" + issue.Priority = 20 + issue.ActionHint = i18n.T("cmd.qa.issues.hint.triage") + return + } + + // Check if needs response (recent comment from someone else) + if issue.Comments.TotalCount > 0 && len(issue.Comments.Nodes) > 0 { + lastComment := issue.Comments.Nodes[len(issue.Comments.Nodes)-1] + // If last comment is not from current user and is recent + if lastComment.Author.Login != currentUser { + age := time.Since(lastComment.CreatedAt) + if age < 48*time.Hour { + issue.Category = "needs_response" + issue.Priority = 10 + issue.ActionHint = cli.Sprintf("@%s %s", lastComment.Author.Login, i18n.T("cmd.qa.issues.hint.needs_response")) + return + } + } + } + + // Default: ready to work + issue.Category = "ready" + issue.Priority = calculatePriority(issue, labels) + issue.ActionHint = "" +} + +func calculatePriority(issue *Issue, labels []string) int { + priority := 50 + + // Priority labels + for _, l := range labels { + switch { + case strings.Contains(l, "critical") || strings.Contains(l, "urgent"): + priority = 1 + case strings.Contains(l, "high"): + priority = 10 + case strings.Contains(l, "medium"): + priority = 30 + case strings.Contains(l, "low"): + priority = 70 + case l == "good-first-issue" || l == "good first issue": + priority = min(priority, 15) // Boost good first issues + case l == "help-wanted" || l == "help wanted": + priority = min(priority, 20) + case l == "agent:ready" || l == "agentic": + priority = min(priority, 5) // AI-ready issues are high priority + } + } + + return priority +} + +func getLabels(issue *Issue) []string { + var labels []string + for _, l := range issue.Labels.Nodes { + labels = append(labels, strings.ToLower(l.Name)) + } + return labels +} + +func getCurrentUser() string { + cmd := exec.Command("gh", "api", "user", "--jq", ".login") + output, err := cmd.Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(output)) +} + +func filterMine(categorised map[string][]Issue) map[string][]Issue { + currentUser := getCurrentUser() + result := make(map[string][]Issue) + + for cat, issues := range categorised { + var filtered []Issue + for _, issue := range issues { + for _, a := range issue.Assignees.Nodes { + if a.Login == currentUser { + filtered = append(filtered, issue) + break + } + } + } + if len(filtered) > 0 { + result[cat] = filtered + } + } + + return result +} + +func filterCategory(categorised map[string][]Issue, category string) map[string][]Issue { + if issues, ok := categorised[category]; ok && len(issues) > 0 { + return map[string][]Issue{category: issues} + } + return map[string][]Issue{} +} + +func printCategorisedIssues(categorised map[string][]Issue) { + // Print in order: needs_response, ready, blocked, triage + categories := []struct { + key string + title string + style *cli.AnsiStyle + }{ + {"needs_response", i18n.T("cmd.qa.issues.category.needs_response"), warningStyle}, + {"ready", i18n.T("cmd.qa.issues.category.ready"), successStyle}, + {"blocked", i18n.T("cmd.qa.issues.category.blocked"), errorStyle}, + {"triage", i18n.T("cmd.qa.issues.category.triage"), dimStyle}, + } + + first := true + for _, cat := range categories { + issues := categorised[cat.key] + if len(issues) == 0 { + continue + } + + if !first { + cli.Blank() + } + first = false + + cli.Print("%s (%d):\n", cat.style.Render(cat.title), len(issues)) + + for _, issue := range issues { + printTriagedIssue(issue) + } + } + + if first { + cli.Text(i18n.T("cmd.qa.issues.no_issues")) + } +} + +func printTriagedIssue(issue Issue) { + // #42 [core-bio] Fix avatar upload + num := cli.TitleStyle.Render(cli.Sprintf("#%d", issue.Number)) + repo := dimStyle.Render(cli.Sprintf("[%s]", issue.RepoName)) + title := cli.ValueStyle.Render(truncate(issue.Title, 50)) + + cli.Print(" %s %s %s", num, repo, title) + + // Add labels if priority-related + var importantLabels []string + for _, l := range issue.Labels.Nodes { + name := strings.ToLower(l.Name) + if strings.Contains(name, "priority") || strings.Contains(name, "critical") || + name == "good-first-issue" || name == "agent:ready" || name == "agentic" { + importantLabels = append(importantLabels, l.Name) + } + } + if len(importantLabels) > 0 { + cli.Print(" %s", warningStyle.Render("["+strings.Join(importantLabels, ", ")+"]")) + } + + // Add age + age := cli.FormatAge(issue.UpdatedAt) + cli.Print(" %s\n", dimStyle.Render(age)) + + // Add action hint if present + if issue.ActionHint != "" { + cli.Print(" %s %s\n", dimStyle.Render("->"), issue.ActionHint) + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/cmd/qa/cmd_qa.go b/cmd/qa/cmd_qa.go new file mode 100644 index 0000000..5a9eff8 --- /dev/null +++ b/cmd/qa/cmd_qa.go @@ -0,0 +1,45 @@ +// Package qa provides quality assurance workflow commands. +// +// Unlike `core dev` which is about doing work (commit, push, pull), +// `core qa` is about verifying work (CI status, reviews, issues). +// +// Commands: +// - watch: Monitor GitHub Actions after a push, report actionable data +// - review: PR review status with actionable next steps +// - health: Aggregate CI health across all repos +// - issues: Intelligent issue triage +package qa + +import ( + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +func init() { + cli.RegisterCommands(AddQACommands) +} + +// Style aliases from shared package +var ( + successStyle = cli.SuccessStyle + errorStyle = cli.ErrorStyle + warningStyle = cli.WarningStyle + dimStyle = cli.DimStyle +) + +// AddQACommands registers the 'qa' command and all subcommands. +func AddQACommands(root *cli.Command) { + qaCmd := &cli.Command{ + Use: "qa", + Short: i18n.T("cmd.qa.short"), + Long: i18n.T("cmd.qa.long"), + } + root.AddCommand(qaCmd) + + // Subcommands + addWatchCommand(qaCmd) + addReviewCommand(qaCmd) + addHealthCommand(qaCmd) + addIssuesCommand(qaCmd) + addDocblockCommand(qaCmd) +} diff --git a/cmd/qa/cmd_review.go b/cmd/qa/cmd_review.go new file mode 100644 index 0000000..1b30ac9 --- /dev/null +++ b/cmd/qa/cmd_review.go @@ -0,0 +1,322 @@ +// cmd_review.go implements the 'qa review' command for PR review status. +// +// Usage: +// core qa review # Show all PRs needing attention +// core qa review --mine # Show status of your open PRs +// core qa review --requested # Show PRs you need to review + +package qa + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" + "time" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/log" +) + +// Review command flags +var ( + reviewMine bool + reviewRequested bool + reviewRepo string +) + +// PullRequest represents a GitHub pull request +type PullRequest struct { + Number int `json:"number"` + Title string `json:"title"` + Author Author `json:"author"` + State string `json:"state"` + IsDraft bool `json:"isDraft"` + Mergeable string `json:"mergeable"` + ReviewDecision string `json:"reviewDecision"` + URL string `json:"url"` + HeadRefName string `json:"headRefName"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + Additions int `json:"additions"` + Deletions int `json:"deletions"` + ChangedFiles int `json:"changedFiles"` + StatusChecks *StatusCheckRollup `json:"statusCheckRollup"` + ReviewRequests ReviewRequests `json:"reviewRequests"` + Reviews []Review `json:"reviews"` +} + +// Author represents a GitHub user +type Author struct { + Login string `json:"login"` +} + +// StatusCheckRollup contains CI check status +type StatusCheckRollup struct { + Contexts []StatusContext `json:"contexts"` +} + +// StatusContext represents a single check +type StatusContext struct { + State string `json:"state"` + Conclusion string `json:"conclusion"` + Name string `json:"name"` +} + +// ReviewRequests contains pending review requests +type ReviewRequests struct { + Nodes []ReviewRequest `json:"nodes"` +} + +// ReviewRequest represents a review request +type ReviewRequest struct { + RequestedReviewer Author `json:"requestedReviewer"` +} + +// Review represents a PR review +type Review struct { + Author Author `json:"author"` + State string `json:"state"` +} + +// addReviewCommand adds the 'review' subcommand to the qa command. +func addReviewCommand(parent *cli.Command) { + reviewCmd := &cli.Command{ + Use: "review", + Short: i18n.T("cmd.qa.review.short"), + Long: i18n.T("cmd.qa.review.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runReview() + }, + } + + reviewCmd.Flags().BoolVarP(&reviewMine, "mine", "m", false, i18n.T("cmd.qa.review.flag.mine")) + reviewCmd.Flags().BoolVarP(&reviewRequested, "requested", "r", false, i18n.T("cmd.qa.review.flag.requested")) + reviewCmd.Flags().StringVar(&reviewRepo, "repo", "", i18n.T("cmd.qa.review.flag.repo")) + + parent.AddCommand(reviewCmd) +} + +func runReview() error { + // Check gh is available + if _, err := exec.LookPath("gh"); err != nil { + return log.E("qa.review", i18n.T("error.gh_not_found"), nil) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Determine repo + repoFullName := reviewRepo + if repoFullName == "" { + var err error + repoFullName, err = detectRepoFromGit() + if err != nil { + return log.E("qa.review", i18n.T("cmd.qa.review.error.no_repo"), nil) + } + } + + // Default: show both mine and requested if neither flag is set + showMine := reviewMine || (!reviewMine && !reviewRequested) + showRequested := reviewRequested || (!reviewMine && !reviewRequested) + + if showMine { + if err := showMyPRs(ctx, repoFullName); err != nil { + return err + } + } + + if showRequested { + if showMine { + cli.Blank() + } + if err := showRequestedReviews(ctx, repoFullName); err != nil { + return err + } + } + + return nil +} + +// showMyPRs shows the user's open PRs with status +func showMyPRs(ctx context.Context, repo string) error { + prs, err := fetchPRs(ctx, repo, "author:@me") + if err != nil { + return log.E("qa.review", "failed to fetch your PRs", err) + } + + if len(prs) == 0 { + cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.qa.review.no_prs"))) + return nil + } + + cli.Print("%s (%d):\n", i18n.T("cmd.qa.review.your_prs"), len(prs)) + + for _, pr := range prs { + printPRStatus(pr) + } + + return nil +} + +// showRequestedReviews shows PRs where user's review is requested +func showRequestedReviews(ctx context.Context, repo string) error { + prs, err := fetchPRs(ctx, repo, "review-requested:@me") + if err != nil { + return log.E("qa.review", "failed to fetch review requests", err) + } + + if len(prs) == 0 { + cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.qa.review.no_reviews"))) + return nil + } + + cli.Print("%s (%d):\n", i18n.T("cmd.qa.review.review_requested"), len(prs)) + + for _, pr := range prs { + printPRForReview(pr) + } + + return nil +} + +// fetchPRs fetches PRs matching the search query +func fetchPRs(ctx context.Context, repo, search string) ([]PullRequest, error) { + args := []string{ + "pr", "list", + "--state", "open", + "--search", search, + "--json", "number,title,author,state,isDraft,mergeable,reviewDecision,url,headRefName,createdAt,updatedAt,additions,deletions,changedFiles,statusCheckRollup,reviewRequests,reviews", + } + + if repo != "" { + args = append(args, "--repo", repo) + } + + cmd := exec.CommandContext(ctx, "gh", args...) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return nil, fmt.Errorf("%s", strings.TrimSpace(string(exitErr.Stderr))) + } + return nil, err + } + + var prs []PullRequest + if err := json.Unmarshal(output, &prs); err != nil { + return nil, err + } + + return prs, nil +} + +// printPRStatus prints a PR with its merge status +func printPRStatus(pr PullRequest) { + // Determine status icon and color + status, style, action := analyzePRStatus(pr) + + cli.Print(" %s #%d %s\n", + style.Render(status), + pr.Number, + truncate(pr.Title, 50)) + + if action != "" { + cli.Print(" %s %s\n", dimStyle.Render("->"), action) + } +} + +// printPRForReview prints a PR that needs review +func printPRForReview(pr PullRequest) { + // Show PR info with stats + stats := fmt.Sprintf("+%d/-%d, %d files", + pr.Additions, pr.Deletions, pr.ChangedFiles) + + cli.Print(" %s #%d %s\n", + warningStyle.Render("◯"), + pr.Number, + truncate(pr.Title, 50)) + cli.Print(" %s @%s, %s\n", + dimStyle.Render("->"), + pr.Author.Login, + stats) + cli.Print(" %s gh pr checkout %d\n", + dimStyle.Render("->"), + pr.Number) +} + +// analyzePRStatus determines the status, style, and action for a PR +func analyzePRStatus(pr PullRequest) (status string, style *cli.AnsiStyle, action string) { + // Check if draft + if pr.IsDraft { + return "◯", dimStyle, "Draft - convert to ready when done" + } + + // Check CI status + ciPassed := true + ciFailed := false + ciPending := false + var failedCheck string + + if pr.StatusChecks != nil { + for _, check := range pr.StatusChecks.Contexts { + switch check.Conclusion { + case "FAILURE", "failure": + ciFailed = true + ciPassed = false + if failedCheck == "" { + failedCheck = check.Name + } + case "PENDING", "pending", "": + if check.State == "PENDING" || check.State == "" { + ciPending = true + ciPassed = false + } + } + } + } + + // Check review status + approved := pr.ReviewDecision == "APPROVED" + changesRequested := pr.ReviewDecision == "CHANGES_REQUESTED" + + // Check mergeable status + hasConflicts := pr.Mergeable == "CONFLICTING" + + // Determine overall status + if hasConflicts { + return "✗", errorStyle, "Needs rebase - has merge conflicts" + } + + if ciFailed { + return "✗", errorStyle, fmt.Sprintf("CI failed: %s", failedCheck) + } + + if changesRequested { + return "✗", warningStyle, "Changes requested - address review feedback" + } + + if ciPending { + return "◯", warningStyle, "CI running..." + } + + if !approved && pr.ReviewDecision != "" { + return "◯", warningStyle, "Awaiting review" + } + + if approved && ciPassed { + return "✓", successStyle, "Ready to merge" + } + + return "◯", dimStyle, "" +} + +// truncate shortens a string to max length (rune-safe for UTF-8) +func truncate(s string, max int) string { + runes := []rune(s) + if len(runes) <= max { + return s + } + return string(runes[:max-3]) + "..." +} diff --git a/cmd/qa/cmd_watch.go b/cmd/qa/cmd_watch.go new file mode 100644 index 0000000..5f308b5 --- /dev/null +++ b/cmd/qa/cmd_watch.go @@ -0,0 +1,444 @@ +// cmd_watch.go implements the 'qa watch' command for monitoring GitHub Actions. +// +// Usage: +// core qa watch # Watch current repo's latest push +// core qa watch --repo X # Watch specific repo +// core qa watch --commit SHA # Watch specific commit +// core qa watch --timeout 5m # Custom timeout (default: 10m) + +package qa + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" + "time" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/log" +) + +// Watch command flags +var ( + watchRepo string + watchCommit string + watchTimeout time.Duration +) + +// WorkflowRun represents a GitHub Actions workflow run +type WorkflowRun struct { + ID int64 `json:"databaseId"` + Name string `json:"name"` + DisplayTitle string `json:"displayTitle"` + Status string `json:"status"` + Conclusion string `json:"conclusion"` + HeadSha string `json:"headSha"` + URL string `json:"url"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// WorkflowJob represents a job within a workflow run +type WorkflowJob struct { + ID int64 `json:"databaseId"` + Name string `json:"name"` + Status string `json:"status"` + Conclusion string `json:"conclusion"` + URL string `json:"url"` +} + +// JobStep represents a step within a job +type JobStep struct { + Name string `json:"name"` + Status string `json:"status"` + Conclusion string `json:"conclusion"` + Number int `json:"number"` +} + +// addWatchCommand adds the 'watch' subcommand to the qa command. +func addWatchCommand(parent *cli.Command) { + watchCmd := &cli.Command{ + Use: "watch", + Short: i18n.T("cmd.qa.watch.short"), + Long: i18n.T("cmd.qa.watch.long"), + RunE: func(cmd *cli.Command, args []string) error { + return runWatch() + }, + } + + watchCmd.Flags().StringVarP(&watchRepo, "repo", "r", "", i18n.T("cmd.qa.watch.flag.repo")) + watchCmd.Flags().StringVarP(&watchCommit, "commit", "c", "", i18n.T("cmd.qa.watch.flag.commit")) + watchCmd.Flags().DurationVarP(&watchTimeout, "timeout", "t", 10*time.Minute, i18n.T("cmd.qa.watch.flag.timeout")) + + parent.AddCommand(watchCmd) +} + +func runWatch() error { + // Check gh is available + if _, err := exec.LookPath("gh"); err != nil { + return log.E("qa.watch", i18n.T("error.gh_not_found"), nil) + } + + // Determine repo + repoFullName, err := resolveRepo(watchRepo) + if err != nil { + return err + } + + // Determine commit + commitSha, err := resolveCommit(watchCommit) + if err != nil { + return err + } + + cli.Print("%s %s\n", dimStyle.Render(i18n.Label("repo")), repoFullName) + // Safe prefix for display - handle short SHAs gracefully + shaPrefix := commitSha + if len(commitSha) > 8 { + shaPrefix = commitSha[:8] + } + cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.qa.watch.commit")), shaPrefix) + cli.Blank() + + // Create context with timeout for all gh commands + ctx, cancel := context.WithTimeout(context.Background(), watchTimeout) + defer cancel() + + // Poll for workflow runs + pollInterval := 3 * time.Second + var lastStatus string + + for { + // Check if context deadline exceeded + if ctx.Err() != nil { + cli.Blank() + return log.E("qa.watch", i18n.T("cmd.qa.watch.timeout", map[string]interface{}{"Duration": watchTimeout}), nil) + } + + runs, err := fetchWorkflowRunsForCommit(ctx, repoFullName, commitSha) + if err != nil { + return log.Wrap(err, "qa.watch", "failed to fetch workflow runs") + } + + if len(runs) == 0 { + // No workflows triggered yet, keep waiting + cli.Print("\033[2K\r%s", dimStyle.Render(i18n.T("cmd.qa.watch.waiting_for_workflows"))) + time.Sleep(pollInterval) + continue + } + + // Check status of all runs + allComplete := true + var pending, success, failed int + for _, run := range runs { + switch run.Status { + case "completed": + if run.Conclusion == "success" { + success++ + } else { + // Count all non-success conclusions as failed + // (failure, cancelled, timed_out, action_required, stale, etc.) + failed++ + } + default: + allComplete = false + pending++ + } + } + + // Build status line + status := fmt.Sprintf("%d workflow(s): ", len(runs)) + if pending > 0 { + status += warningStyle.Render(fmt.Sprintf("%d running", pending)) + if success > 0 || failed > 0 { + status += ", " + } + } + if success > 0 { + status += successStyle.Render(fmt.Sprintf("%d passed", success)) + if failed > 0 { + status += ", " + } + } + if failed > 0 { + status += errorStyle.Render(fmt.Sprintf("%d failed", failed)) + } + + // Only print if status changed + if status != lastStatus { + cli.Print("\033[2K\r%s", status) + lastStatus = status + } + + if allComplete { + cli.Blank() + cli.Blank() + return printResults(ctx, repoFullName, runs) + } + + time.Sleep(pollInterval) + } +} + +// resolveRepo determines the repo to watch +func resolveRepo(specified string) (string, error) { + if specified != "" { + // If it contains /, assume it's already full name + if strings.Contains(specified, "/") { + return specified, nil + } + // Try to get org from current directory + org := detectOrgFromGit() + if org != "" { + return org + "/" + specified, nil + } + return "", log.E("qa.watch", i18n.T("cmd.qa.watch.error.repo_format"), nil) + } + + // Detect from current directory + return detectRepoFromGit() +} + +// resolveCommit determines the commit to watch +func resolveCommit(specified string) (string, error) { + if specified != "" { + return specified, nil + } + + // Get HEAD commit + cmd := exec.Command("git", "rev-parse", "HEAD") + output, err := cmd.Output() + if err != nil { + return "", log.Wrap(err, "qa.watch", "failed to get HEAD commit") + } + + return strings.TrimSpace(string(output)), nil +} + +// detectRepoFromGit detects the repo from git remote +func detectRepoFromGit() (string, error) { + cmd := exec.Command("git", "remote", "get-url", "origin") + output, err := cmd.Output() + if err != nil { + return "", log.E("qa.watch", i18n.T("cmd.qa.watch.error.not_git_repo"), nil) + } + + url := strings.TrimSpace(string(output)) + return parseGitHubRepo(url) +} + +// detectOrgFromGit tries to detect the org from git remote +func detectOrgFromGit() string { + repo, err := detectRepoFromGit() + if err != nil { + return "" + } + parts := strings.Split(repo, "/") + if len(parts) >= 1 { + return parts[0] + } + return "" +} + +// parseGitHubRepo extracts org/repo from a git URL +func parseGitHubRepo(url string) (string, error) { + // Handle SSH URLs: git@github.com:org/repo.git + if strings.HasPrefix(url, "git@github.com:") { + path := strings.TrimPrefix(url, "git@github.com:") + path = strings.TrimSuffix(path, ".git") + return path, nil + } + + // Handle HTTPS URLs: https://github.com/org/repo.git + if strings.Contains(url, "github.com/") { + parts := strings.Split(url, "github.com/") + if len(parts) >= 2 { + path := strings.TrimSuffix(parts[1], ".git") + return path, nil + } + } + + return "", fmt.Errorf("could not parse GitHub repo from URL: %s", url) +} + +// fetchWorkflowRunsForCommit fetches workflow runs for a specific commit +func fetchWorkflowRunsForCommit(ctx context.Context, repoFullName, commitSha string) ([]WorkflowRun, error) { + args := []string{ + "run", "list", + "--repo", repoFullName, + "--commit", commitSha, + "--json", "databaseId,name,displayTitle,status,conclusion,headSha,url,createdAt,updatedAt", + } + + cmd := exec.CommandContext(ctx, "gh", args...) + output, err := cmd.Output() + if err != nil { + // Check if context was cancelled/deadline exceeded + if ctx.Err() != nil { + return nil, ctx.Err() + } + if exitErr, ok := err.(*exec.ExitError); ok { + return nil, cli.Err("%s", strings.TrimSpace(string(exitErr.Stderr))) + } + return nil, err + } + + var runs []WorkflowRun + if err := json.Unmarshal(output, &runs); err != nil { + return nil, err + } + + return runs, nil +} + +// printResults prints the final results with actionable information +func printResults(ctx context.Context, repoFullName string, runs []WorkflowRun) error { + var failures []WorkflowRun + var successes []WorkflowRun + + for _, run := range runs { + if run.Conclusion == "success" { + successes = append(successes, run) + } else { + // Treat all non-success as failures (failure, cancelled, timed_out, etc.) + failures = append(failures, run) + } + } + + // Print successes briefly + for _, run := range successes { + cli.Print("%s %s\n", successStyle.Render(cli.Glyph(":check:")), run.Name) + } + + // Print failures with details + for _, run := range failures { + cli.Print("%s %s\n", errorStyle.Render(cli.Glyph(":cross:")), run.Name) + + // Fetch failed job details + failedJob, failedStep, errorLine := fetchFailureDetails(ctx, repoFullName, run.ID) + if failedJob != "" { + cli.Print(" %s Job: %s", dimStyle.Render("->"), failedJob) + if failedStep != "" { + cli.Print(" (step: %s)", failedStep) + } + cli.Blank() + } + if errorLine != "" { + cli.Print(" %s Error: %s\n", dimStyle.Render("->"), errorLine) + } + cli.Print(" %s %s\n", dimStyle.Render("->"), run.URL) + } + + // Exit with error if any failures + if len(failures) > 0 { + cli.Blank() + return cli.Err("%s", i18n.T("cmd.qa.watch.workflows_failed", map[string]interface{}{"Count": len(failures)})) + } + + cli.Blank() + cli.Print("%s\n", successStyle.Render(i18n.T("cmd.qa.watch.all_passed"))) + return nil +} + +// fetchFailureDetails fetches details about why a workflow failed +func fetchFailureDetails(ctx context.Context, repoFullName string, runID int64) (jobName, stepName, errorLine string) { + // Fetch jobs for this run + args := []string{ + "run", "view", fmt.Sprintf("%d", runID), + "--repo", repoFullName, + "--json", "jobs", + } + + cmd := exec.CommandContext(ctx, "gh", args...) + output, err := cmd.Output() + if err != nil { + return "", "", "" + } + + var result struct { + Jobs []struct { + Name string `json:"name"` + Conclusion string `json:"conclusion"` + Steps []struct { + Name string `json:"name"` + Conclusion string `json:"conclusion"` + Number int `json:"number"` + } `json:"steps"` + } `json:"jobs"` + } + + if err := json.Unmarshal(output, &result); err != nil { + return "", "", "" + } + + // Find the failed job and step + for _, job := range result.Jobs { + if job.Conclusion == "failure" { + jobName = job.Name + for _, step := range job.Steps { + if step.Conclusion == "failure" { + stepName = fmt.Sprintf("%d: %s", step.Number, step.Name) + break + } + } + break + } + } + + // Try to get the error line from logs (if available) + errorLine = fetchErrorFromLogs(ctx, repoFullName, runID) + + return jobName, stepName, errorLine +} + +// fetchErrorFromLogs attempts to extract the first error line from workflow logs +func fetchErrorFromLogs(ctx context.Context, repoFullName string, runID int64) string { + // Use gh run view --log-failed to get failed step logs + args := []string{ + "run", "view", fmt.Sprintf("%d", runID), + "--repo", repoFullName, + "--log-failed", + } + + cmd := exec.CommandContext(ctx, "gh", args...) + output, err := cmd.Output() + if err != nil { + return "" + } + + // Parse output to find the first meaningful error line + lines := strings.Split(string(output), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + // Skip common metadata/progress lines + lower := strings.ToLower(line) + if strings.HasPrefix(lower, "##[") { // GitHub Actions command markers + continue + } + if strings.HasPrefix(line, "Run ") || strings.HasPrefix(line, "Running ") { + continue + } + + // Look for error indicators + if strings.Contains(lower, "error") || + strings.Contains(lower, "failed") || + strings.Contains(lower, "fatal") || + strings.Contains(lower, "panic") || + strings.Contains(line, ": ") { // Likely a file:line or key: value format + // Truncate long lines + if len(line) > 120 { + line = line[:117] + "..." + } + return line + } + } + + return "" +} diff --git a/cmd/setup/cmd_bootstrap.go b/cmd/setup/cmd_bootstrap.go new file mode 100644 index 0000000..3473d6f --- /dev/null +++ b/cmd/setup/cmd_bootstrap.go @@ -0,0 +1,176 @@ +// cmd_bootstrap.go implements bootstrap mode for new workspaces. +// +// Bootstrap mode is activated when no repos.yaml exists in the current +// directory or any parent. It clones core-devops first, then uses its +// repos.yaml to present the package wizard. + +package setup + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "forge.lthn.ai/core/cli/cmd/workspace" + "forge.lthn.ai/core/go/pkg/i18n" + coreio "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/repos" +) + +// runSetupOrchestrator decides between registry mode and bootstrap mode. +func runSetupOrchestrator(registryPath, only string, dryRun, all bool, projectName string, runBuild bool) error { + ctx := context.Background() + + // Try to find an existing registry + var foundRegistry string + var err error + + if registryPath != "" { + foundRegistry = registryPath + } else { + foundRegistry, err = repos.FindRegistry(coreio.Local) + } + + // If registry exists, use registry mode + if err == nil && foundRegistry != "" { + return runRegistrySetup(ctx, foundRegistry, only, dryRun, all, runBuild) + } + + // No registry found - enter bootstrap mode + return runBootstrap(ctx, only, dryRun, all, projectName, runBuild) +} + +// runBootstrap handles the case where no repos.yaml exists. +func runBootstrap(ctx context.Context, only string, dryRun, all bool, projectName string, runBuild bool) error { + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get working directory: %w", err) + } + + fmt.Printf("%s %s\n", dimStyle.Render(">>"), i18n.T("cmd.setup.bootstrap_mode")) + + var targetDir string + + // Check if current directory is empty + empty, err := isDirEmpty(cwd) + if err != nil { + return fmt.Errorf("failed to check directory: %w", err) + } + + if empty { + // Clone into current directory + targetDir = cwd + fmt.Printf("%s %s\n", dimStyle.Render(">>"), i18n.T("cmd.setup.cloning_current_dir")) + } else { + // Directory has content - check if it's a git repo root + isRepo := isGitRepoRoot(cwd) + + if isRepo && isTerminal() && !all { + // Offer choice: setup working directory or create package + choice, err := promptSetupChoice() + if err != nil { + return fmt.Errorf("failed to get choice: %w", err) + } + + if choice == "setup" { + // Setup this working directory with .core/ config + return runRepoSetup(cwd, dryRun) + } + // Otherwise continue to "create package" flow + } + + // Create package flow - need a project name + if projectName == "" { + if !isTerminal() || all { + projectName = defaultOrg + } else { + projectName, err = promptProjectName(defaultOrg) + if err != nil { + return fmt.Errorf("failed to get project name: %w", err) + } + } + } + + targetDir = filepath.Join(cwd, projectName) + fmt.Printf("%s %s: %s\n", dimStyle.Render(">>"), i18n.T("cmd.setup.creating_project_dir"), projectName) + + if !dryRun { + if err := coreio.Local.EnsureDir(targetDir); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + } + } + + // Clone core-devops first + devopsPath := filepath.Join(targetDir, devopsRepo) + if !coreio.Local.Exists(filepath.Join(devopsPath, ".git")) { + fmt.Printf("%s %s %s...\n", dimStyle.Render(">>"), i18n.T("common.status.cloning"), devopsRepo) + + if !dryRun { + if err := gitClone(ctx, defaultOrg, devopsRepo, devopsPath); err != nil { + return fmt.Errorf("failed to clone %s: %w", devopsRepo, err) + } + fmt.Printf("%s %s %s\n", successStyle.Render(">>"), devopsRepo, i18n.T("cmd.setup.cloned")) + } else { + fmt.Printf(" %s %s/%s to %s\n", i18n.T("cmd.setup.would_clone"), defaultOrg, devopsRepo, devopsPath) + } + } else { + fmt.Printf("%s %s %s\n", dimStyle.Render(">>"), devopsRepo, i18n.T("cmd.setup.already_exists")) + } + + // Load the repos.yaml from core-devops + registryPath := filepath.Join(devopsPath, devopsReposYaml) + + if dryRun { + fmt.Printf("\n%s %s %s\n", dimStyle.Render(">>"), i18n.T("cmd.setup.would_load_registry"), registryPath) + return nil + } + + reg, err := repos.LoadRegistry(coreio.Local, registryPath) + if err != nil { + return fmt.Errorf("failed to load registry from %s: %w", devopsRepo, err) + } + + // Override base path to target directory + reg.BasePath = targetDir + + // Check workspace config for default_only if no filter specified + if only == "" { + if wsConfig, err := workspace.LoadConfig(devopsPath); err == nil && wsConfig != nil && len(wsConfig.DefaultOnly) > 0 { + only = strings.Join(wsConfig.DefaultOnly, ",") + } + } + + // Now run the regular setup with the loaded registry + return runRegistrySetupWithReg(ctx, reg, registryPath, only, dryRun, all, runBuild) +} + +// isGitRepoRoot returns true if the directory is a git repository root. +// Handles both regular repos (.git is a directory) and worktrees (.git is a file). +func isGitRepoRoot(path string) bool { + return coreio.Local.Exists(filepath.Join(path, ".git")) +} + +// isDirEmpty returns true if the directory is empty or contains only hidden files. +func isDirEmpty(path string) (bool, error) { + entries, err := coreio.Local.List(path) + if err != nil { + return false, err + } + + for _, e := range entries { + name := e.Name() + // Ignore common hidden/metadata files + if name == ".DS_Store" || name == ".git" || name == ".gitignore" { + continue + } + // Any other non-hidden file means directory is not empty + if len(name) > 0 && name[0] != '.' { + return false, nil + } + } + + return true, nil +} diff --git a/cmd/setup/cmd_ci.go b/cmd/setup/cmd_ci.go new file mode 100644 index 0000000..fafc933 --- /dev/null +++ b/cmd/setup/cmd_ci.go @@ -0,0 +1,300 @@ +package setup + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "forge.lthn.ai/core/go/pkg/cli" + coreio "forge.lthn.ai/core/go/pkg/io" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" +) + +// CIConfig holds CI setup configuration from .core/ci.yaml +type CIConfig struct { + // Homebrew tap (e.g., "host-uk/tap") + Tap string `yaml:"tap"` + // Formula name (defaults to "core") + Formula string `yaml:"formula"` + // Scoop bucket URL + ScoopBucket string `yaml:"scoop_bucket"` + // Chocolatey package name + ChocolateyPkg string `yaml:"chocolatey_pkg"` + // GitHub repository for direct downloads + Repository string `yaml:"repository"` + // Default version to install + DefaultVersion string `yaml:"default_version"` +} + +// DefaultCIConfig returns the default CI configuration. +func DefaultCIConfig() *CIConfig { + return &CIConfig{ + Tap: "host-uk/tap", + Formula: "core", + ScoopBucket: "https://https://forge.lthn.ai/core/scoop-bucket.git", + ChocolateyPkg: "core-cli", + Repository: "host-uk/core", + DefaultVersion: "dev", + } +} + +// LoadCIConfig loads CI configuration from .core/ci.yaml +func LoadCIConfig() *CIConfig { + cfg := DefaultCIConfig() + + // Try to find .core/ci.yaml in current directory or parents + dir, err := os.Getwd() + if err != nil { + return cfg + } + + for { + configPath := filepath.Join(dir, ".core", "ci.yaml") + data, err := coreio.Local.Read(configPath) + if err == nil { + if err := yaml.Unmarshal([]byte(data), cfg); err == nil { + return cfg + } + } + + parent := filepath.Dir(dir) + if parent == dir { + break + } + dir = parent + } + + return cfg +} + +// CI setup command flags +var ( + ciShell string + ciVersion string +) + +func init() { + ciCmd := &cobra.Command{ + Use: "ci", + Short: "Output CI installation commands for core CLI", + Long: `Output installation commands for the core CLI in CI environments. + +Generates shell commands to install the core CLI using the appropriate +package manager for each platform: + + macOS/Linux: Homebrew (brew install host-uk/tap/core) + Windows: Scoop or Chocolatey, or direct download + +Configuration can be customized via .core/ci.yaml: + + tap: host-uk/tap # Homebrew tap + formula: core # Homebrew formula name + scoop_bucket: https://... # Scoop bucket URL + chocolatey_pkg: core-cli # Chocolatey package name + repository: host-uk/core # GitHub repo for direct downloads + default_version: dev # Default version to install + +Examples: + # Output installation commands for current platform + core setup ci + + # Output for specific shell (bash, powershell, yaml) + core setup ci --shell=bash + core setup ci --shell=powershell + core setup ci --shell=yaml + + # Install specific version + core setup ci --version=v1.0.0 + + # Use in GitHub Actions (pipe to shell) + eval "$(core setup ci --shell=bash)"`, + RunE: runSetupCI, + } + + ciCmd.Flags().StringVar(&ciShell, "shell", "", "Output format: bash, powershell, yaml (auto-detected if not specified)") + ciCmd.Flags().StringVar(&ciVersion, "version", "", "Version to install (tag name or 'dev' for latest dev build)") + + setupCmd.AddCommand(ciCmd) +} + +func runSetupCI(cmd *cobra.Command, args []string) error { + cfg := LoadCIConfig() + + // Use flag version or config default + version := ciVersion + if version == "" { + version = cfg.DefaultVersion + } + + // Auto-detect shell if not specified + shell := ciShell + if shell == "" { + if runtime.GOOS == "windows" { + shell = "powershell" + } else { + shell = "bash" + } + } + + switch shell { + case "bash", "sh": + return outputBashInstall(cfg, version) + case "powershell", "pwsh", "ps1": + return outputPowershellInstall(cfg, version) + case "yaml", "yml", "gha", "github": + return outputGitHubActionsYAML(cfg, version) + default: + return cli.Err("unsupported shell: %s (use bash, powershell, or yaml)", shell) + } +} + +func outputBashInstall(cfg *CIConfig, version string) error { + script := fmt.Sprintf(`#!/bin/bash +set -e + +VERSION="%s" +REPO="%s" +TAP="%s" +FORMULA="%s" + +# Detect OS and architecture +OS="$(uname -s | tr '[:upper:]' '[:lower:]')" +ARCH="$(uname -m)" + +case "$ARCH" in + x86_64|amd64) ARCH="amd64" ;; + arm64|aarch64) ARCH="arm64" ;; + *) echo "Unsupported architecture: $ARCH"; exit 1 ;; +esac + +# Try Homebrew first on macOS/Linux +if command -v brew &>/dev/null; then + echo "Installing via Homebrew..." + brew tap "$TAP" 2>/dev/null || true + if [ "$VERSION" = "dev" ]; then + brew install "${TAP}/${FORMULA}" --HEAD 2>/dev/null || brew upgrade "${TAP}/${FORMULA}" --fetch-HEAD 2>/dev/null || brew install "${TAP}/${FORMULA}" + else + brew install "${TAP}/${FORMULA}" + fi + %s --version + exit 0 +fi + +# Fall back to direct download +echo "Installing %s CLI ${VERSION} for ${OS}/${ARCH}..." + +DOWNLOAD_URL="https://github.com/${REPO}/releases/download/${VERSION}/%s-${OS}-${ARCH}" + +# Download binary +curl -fsSL "$DOWNLOAD_URL" -o /tmp/%s +chmod +x /tmp/%s + +# Install to /usr/local/bin (requires sudo on most systems) +if [ -w /usr/local/bin ]; then + mv /tmp/%s /usr/local/bin/%s +else + sudo mv /tmp/%s /usr/local/bin/%s +fi + +echo "Installed:" +%s --version +`, version, cfg.Repository, cfg.Tap, cfg.Formula, + cfg.Formula, cfg.Formula, cfg.Formula, + cfg.Formula, cfg.Formula, cfg.Formula, cfg.Formula, cfg.Formula, cfg.Formula, cfg.Formula) + + fmt.Print(script) + return nil +} + +func outputPowershellInstall(cfg *CIConfig, version string) error { + script := fmt.Sprintf(`# PowerShell installation script for %s CLI +$ErrorActionPreference = "Stop" + +$Version = "%s" +$Repo = "%s" +$ScoopBucket = "%s" +$ChocoPkg = "%s" +$BinaryName = "%s" +$Arch = if ([Environment]::Is64BitOperatingSystem) { "amd64" } else { "386" } + +# Try Scoop first +if (Get-Command scoop -ErrorAction SilentlyContinue) { + Write-Host "Installing via Scoop..." + scoop bucket add host-uk $ScoopBucket 2>$null + scoop install "host-uk/$BinaryName" + & $BinaryName --version + exit 0 +} + +# Try Chocolatey +if (Get-Command choco -ErrorAction SilentlyContinue) { + Write-Host "Installing via Chocolatey..." + choco install $ChocoPkg -y + & $BinaryName --version + exit 0 +} + +# Fall back to direct download +Write-Host "Installing $BinaryName CLI $Version for windows/$Arch..." + +$DownloadUrl = "https://github.com/$Repo/releases/download/$Version/$BinaryName-windows-$Arch.exe" +$InstallDir = "$env:LOCALAPPDATA\Programs\$BinaryName" +$BinaryPath = "$InstallDir\$BinaryName.exe" + +# Create install directory +New-Item -ItemType Directory -Force -Path $InstallDir | Out-Null + +# Download binary +Invoke-WebRequest -Uri $DownloadUrl -OutFile $BinaryPath + +# Add to PATH if not already there +$CurrentPath = [Environment]::GetEnvironmentVariable("Path", "User") +if ($CurrentPath -notlike "*$InstallDir*") { + [Environment]::SetEnvironmentVariable("Path", "$CurrentPath;$InstallDir", "User") + $env:Path = "$env:Path;$InstallDir" +} + +Write-Host "Installed:" +& $BinaryPath --version +`, cfg.Formula, version, cfg.Repository, cfg.ScoopBucket, cfg.ChocolateyPkg, cfg.Formula) + + fmt.Print(script) + return nil +} + +func outputGitHubActionsYAML(cfg *CIConfig, version string) error { + yaml := fmt.Sprintf(`# GitHub Actions steps to install %s CLI +# Add these to your workflow file + +# Option 1: Direct download (fastest, no extra dependencies) +- name: Install %s CLI + shell: bash + run: | + VERSION="%s" + REPO="%s" + BINARY="%s" + OS="$(uname -s | tr '[:upper:]' '[:lower:]')" + ARCH="$(uname -m)" + case "$ARCH" in + x86_64|amd64) ARCH="amd64" ;; + arm64|aarch64) ARCH="arm64" ;; + esac + curl -fsSL "https://github.com/${REPO}/releases/download/${VERSION}/${BINARY}-${OS}-${ARCH}" -o "${BINARY}" + chmod +x "${BINARY}" + sudo mv "${BINARY}" /usr/local/bin/ + %s --version + +# Option 2: Homebrew (better for caching, includes dependencies) +- name: Install %s CLI (Homebrew) + run: | + brew tap %s + brew install %s/%s + %s --version +`, cfg.Formula, cfg.Formula, version, cfg.Repository, cfg.Formula, cfg.Formula, + cfg.Formula, cfg.Tap, cfg.Tap, cfg.Formula, cfg.Formula) + + fmt.Print(yaml) + return nil +} diff --git a/cmd/setup/cmd_commands.go b/cmd/setup/cmd_commands.go new file mode 100644 index 0000000..15fbade --- /dev/null +++ b/cmd/setup/cmd_commands.go @@ -0,0 +1,38 @@ +// Package setup provides workspace bootstrap and package cloning commands. +// +// Two modes of operation: +// +// REGISTRY MODE (repos.yaml exists): +// - Clones all repositories defined in repos.yaml into packages/ +// - Skips repos that already exist +// - Supports filtering by type with --only +// +// BOOTSTRAP MODE (no repos.yaml): +// - Clones core-devops to set up the workspace foundation +// - Presents an interactive wizard to select packages (unless --all) +// - Clones selected packages +// +// Flags: +// - --registry: Path to repos.yaml (auto-detected if not specified) +// - --only: Filter by repo type (foundation, module, product) +// - --dry-run: Preview what would be cloned +// - --all: Skip wizard, clone all packages (non-interactive) +// - --name: Project directory name for bootstrap mode +// - --build: Run build after cloning +// +// Uses gh CLI with HTTPS when authenticated, falls back to SSH. +package setup + +import ( + "forge.lthn.ai/core/go/pkg/cli" + "github.com/spf13/cobra" +) + +func init() { + cli.RegisterCommands(AddSetupCommands) +} + +// AddSetupCommands registers the 'setup' command and all subcommands. +func AddSetupCommands(root *cobra.Command) { + AddSetupCommand(root) +} diff --git a/cmd/setup/cmd_github.go b/cmd/setup/cmd_github.go new file mode 100644 index 0000000..68afd13 --- /dev/null +++ b/cmd/setup/cmd_github.go @@ -0,0 +1,230 @@ +// cmd_github.go implements the 'setup github' command for configuring +// GitHub repositories with organization standards. +// +// Usage: +// core setup github [flags] +// +// Flags: +// -r, --repo string Specific repo to setup +// -a, --all Setup all repos in registry +// -l, --labels Only sync labels +// -w, --webhooks Only sync webhooks +// -p, --protection Only sync branch protection +// -s, --security Only sync security settings +// -c, --check Dry-run: show what would change +// --config string Path to github.yaml config +// --verbose Show detailed output + +package setup + +import ( + "errors" + "os/exec" + "path/filepath" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + coreio "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/repos" + "github.com/spf13/cobra" +) + +// GitHub command flags +var ( + ghRepo string + ghAll bool + ghLabels bool + ghWebhooks bool + ghProtection bool + ghSecurity bool + ghCheck bool + ghConfigPath string + ghVerbose bool +) + +// addGitHubCommand adds the 'github' subcommand to the setup command. +func addGitHubCommand(parent *cobra.Command) { + ghCmd := &cobra.Command{ + Use: "github", + Aliases: []string{"gh"}, + Short: i18n.T("cmd.setup.github.short"), + Long: i18n.T("cmd.setup.github.long"), + RunE: func(cmd *cobra.Command, args []string) error { + return runGitHubSetup() + }, + } + + ghCmd.Flags().StringVarP(&ghRepo, "repo", "r", "", i18n.T("cmd.setup.github.flag.repo")) + ghCmd.Flags().BoolVarP(&ghAll, "all", "a", false, i18n.T("cmd.setup.github.flag.all")) + ghCmd.Flags().BoolVarP(&ghLabels, "labels", "l", false, i18n.T("cmd.setup.github.flag.labels")) + ghCmd.Flags().BoolVarP(&ghWebhooks, "webhooks", "w", false, i18n.T("cmd.setup.github.flag.webhooks")) + ghCmd.Flags().BoolVarP(&ghProtection, "protection", "p", false, i18n.T("cmd.setup.github.flag.protection")) + ghCmd.Flags().BoolVarP(&ghSecurity, "security", "s", false, i18n.T("cmd.setup.github.flag.security")) + ghCmd.Flags().BoolVarP(&ghCheck, "check", "c", false, i18n.T("cmd.setup.github.flag.check")) + ghCmd.Flags().StringVar(&ghConfigPath, "config", "", i18n.T("cmd.setup.github.flag.config")) + ghCmd.Flags().BoolVarP(&ghVerbose, "verbose", "v", false, i18n.T("common.flag.verbose")) + + parent.AddCommand(ghCmd) +} + +func runGitHubSetup() error { + // Check gh is available + if _, err := exec.LookPath("gh"); err != nil { + return errors.New(i18n.T("error.gh_not_found")) + } + + // Check gh is authenticated + if !cli.GhAuthenticated() { + return errors.New(i18n.T("cmd.setup.github.error.not_authenticated")) + } + + // Find registry + registryPath, err := repos.FindRegistry(coreio.Local) + if err != nil { + return cli.Wrap(err, i18n.T("error.registry_not_found")) + } + + reg, err := repos.LoadRegistry(coreio.Local, registryPath) + if err != nil { + return cli.Wrap(err, "failed to load registry") + } + + registryDir := filepath.Dir(registryPath) + + // Find GitHub config + configPath, err := FindGitHubConfig(registryDir, ghConfigPath) + if err != nil { + return cli.Wrap(err, i18n.T("cmd.setup.github.error.config_not_found")) + } + + config, err := LoadGitHubConfig(configPath) + if err != nil { + return cli.Wrap(err, "failed to load GitHub config") + } + + if err := config.Validate(); err != nil { + return cli.Wrap(err, "invalid GitHub config") + } + + // Print header + cli.Print("%s %s\n", dimStyle.Render(i18n.Label("registry")), registryPath) + cli.Print("%s %s\n", dimStyle.Render(i18n.Label("config")), configPath) + + if ghCheck { + cli.Print("%s\n", warningStyle.Render(i18n.T("cmd.setup.github.dry_run_mode"))) + } + + // Determine which repos to process + var reposToProcess []*repos.Repo + + // Reject conflicting flags + if ghRepo != "" && ghAll { + return errors.New(i18n.T("cmd.setup.github.error.conflicting_flags")) + } + + if ghRepo != "" { + // Single repo mode + repo, ok := reg.Get(ghRepo) + if !ok { + return errors.New(i18n.T("error.repo_not_found", map[string]interface{}{"Name": ghRepo})) + } + reposToProcess = []*repos.Repo{repo} + } else if ghAll { + // All repos mode + reposToProcess = reg.List() + } else { + // No repos specified + cli.Print("\n%s\n", i18n.T("cmd.setup.github.no_repos_specified")) + cli.Print(" %s\n", i18n.T("cmd.setup.github.usage_hint")) + return nil + } + + // Determine which operations to run + runAll := !ghLabels && !ghWebhooks && !ghProtection && !ghSecurity + runLabels := runAll || ghLabels + runWebhooks := runAll || ghWebhooks + runProtection := runAll || ghProtection + runSecurity := runAll || ghSecurity + + // Process each repo + aggregate := NewAggregate() + + for i, repo := range reposToProcess { + repoFullName := cli.Sprintf("%s/%s", reg.Org, repo.Name) + + // Show progress + cli.Print("\033[2K\r%s %d/%d %s", + dimStyle.Render(i18n.T("common.progress.checking")), + i+1, len(reposToProcess), repo.Name) + + changes := NewChangeSet(repo.Name) + + // Sync labels + if runLabels { + labelChanges, err := SyncLabels(repoFullName, config, ghCheck) + if err != nil { + cli.Print("\033[2K\r") + cli.Print("%s %s: %s\n", errorStyle.Render(cli.Glyph(":cross:")), repo.Name, err) + aggregate.Add(changes) // Preserve partial results + continue + } + changes.Changes = append(changes.Changes, labelChanges.Changes...) + } + + // Sync webhooks + if runWebhooks { + webhookChanges, err := SyncWebhooks(repoFullName, config, ghCheck) + if err != nil { + cli.Print("\033[2K\r") + cli.Print("%s %s: %s\n", errorStyle.Render(cli.Glyph(":cross:")), repo.Name, err) + aggregate.Add(changes) // Preserve partial results + continue + } + changes.Changes = append(changes.Changes, webhookChanges.Changes...) + } + + // Sync branch protection + if runProtection { + protectionChanges, err := SyncBranchProtection(repoFullName, config, ghCheck) + if err != nil { + cli.Print("\033[2K\r") + cli.Print("%s %s: %s\n", errorStyle.Render(cli.Glyph(":cross:")), repo.Name, err) + aggregate.Add(changes) // Preserve partial results + continue + } + changes.Changes = append(changes.Changes, protectionChanges.Changes...) + } + + // Sync security settings + if runSecurity { + securityChanges, err := SyncSecuritySettings(repoFullName, config, ghCheck) + if err != nil { + cli.Print("\033[2K\r") + cli.Print("%s %s: %s\n", errorStyle.Render(cli.Glyph(":cross:")), repo.Name, err) + aggregate.Add(changes) // Preserve partial results + continue + } + changes.Changes = append(changes.Changes, securityChanges.Changes...) + } + + aggregate.Add(changes) + } + + // Clear progress line + cli.Print("\033[2K\r") + + // Print results + for _, cs := range aggregate.Sets { + cs.Print(ghVerbose || ghCheck) + } + + // Print summary + aggregate.PrintSummary() + + // Suggest permission fix if needed + if ghCheck { + cli.Print("\n%s\n", i18n.T("cmd.setup.github.run_without_check")) + } + + return nil +} diff --git a/cmd/setup/cmd_registry.go b/cmd/setup/cmd_registry.go new file mode 100644 index 0000000..6af2040 --- /dev/null +++ b/cmd/setup/cmd_registry.go @@ -0,0 +1,264 @@ +// cmd_registry.go implements registry mode for cloning packages. +// +// Registry mode is activated when a repos.yaml exists. It reads the registry +// and clones all (or selected) packages into the configured packages directory. + +package setup + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/cli/cmd/workspace" + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + coreio "forge.lthn.ai/core/go/pkg/io" + "forge.lthn.ai/core/go/pkg/repos" +) + +// runRegistrySetup loads a registry from path and runs setup. +func runRegistrySetup(ctx context.Context, registryPath, only string, dryRun, all, runBuild bool) error { + reg, err := repos.LoadRegistry(coreio.Local, registryPath) + if err != nil { + return fmt.Errorf("failed to load registry: %w", err) + } + + // Check workspace config for default_only if no filter specified + if only == "" { + registryDir := filepath.Dir(registryPath) + if wsConfig, err := workspace.LoadConfig(registryDir); err == nil && wsConfig != nil && len(wsConfig.DefaultOnly) > 0 { + only = strings.Join(wsConfig.DefaultOnly, ",") + } + } + + return runRegistrySetupWithReg(ctx, reg, registryPath, only, dryRun, all, runBuild) +} + +// runRegistrySetupWithReg runs setup with an already-loaded registry. +func runRegistrySetupWithReg(ctx context.Context, reg *repos.Registry, registryPath, only string, dryRun, all, runBuild bool) error { + fmt.Printf("%s %s\n", dimStyle.Render(i18n.Label("registry")), registryPath) + fmt.Printf("%s %s\n", dimStyle.Render(i18n.T("cmd.setup.org_label")), reg.Org) + + registryDir := filepath.Dir(registryPath) + + // Determine base path for cloning + basePath := reg.BasePath + if basePath == "" { + // Load workspace config to see if packages_dir is set (ignore errors, fall back to default) + wsConfig, _ := workspace.LoadConfig(registryDir) + if wsConfig != nil && wsConfig.PackagesDir != "" { + basePath = wsConfig.PackagesDir + } else { + basePath = "./packages" + } + } + + // Expand ~ + if strings.HasPrefix(basePath, "~/") { + home, _ := os.UserHomeDir() + basePath = filepath.Join(home, basePath[2:]) + } + + // Resolve relative to registry location + if !filepath.IsAbs(basePath) { + basePath = filepath.Join(registryDir, basePath) + } + + fmt.Printf("%s %s\n", dimStyle.Render(i18n.Label("target")), basePath) + + // Parse type filter + var typeFilter []string + if only != "" { + for _, t := range strings.Split(only, ",") { + typeFilter = append(typeFilter, strings.TrimSpace(t)) + } + fmt.Printf("%s %s\n", dimStyle.Render(i18n.Label("filter")), only) + } + + // Ensure base path exists + if !dryRun { + if err := coreio.Local.EnsureDir(basePath); err != nil { + return fmt.Errorf("failed to create packages directory: %w", err) + } + } + + // Get all available repos + allRepos := reg.List() + + // Determine which repos to clone + var toClone []*repos.Repo + var skipped, exists int + + // Use wizard in interactive mode, unless --all specified + useWizard := isTerminal() && !all && !dryRun + + if useWizard { + selected, err := runPackageWizard(reg, typeFilter) + if err != nil { + return fmt.Errorf("wizard error: %w", err) + } + + // Build set of selected repos + selectedSet := make(map[string]bool) + for _, name := range selected { + selectedSet[name] = true + } + + // Filter repos based on selection + for _, repo := range allRepos { + if !selectedSet[repo.Name] { + skipped++ + continue + } + + // Check if already exists + repoPath := filepath.Join(basePath, repo.Name) + // Check .git dir existence via Exists + if coreio.Local.Exists(filepath.Join(repoPath, ".git")) { + exists++ + continue + } + + toClone = append(toClone, repo) + } + } else { + // Non-interactive: filter by type + typeFilterSet := make(map[string]bool) + for _, t := range typeFilter { + typeFilterSet[t] = true + } + + for _, repo := range allRepos { + // Skip if type filter doesn't match (when filter is specified) + if len(typeFilterSet) > 0 && !typeFilterSet[repo.Type] { + skipped++ + continue + } + + // Skip if clone: false + if repo.Clone != nil && !*repo.Clone { + skipped++ + continue + } + + // Check if already exists + repoPath := filepath.Join(basePath, repo.Name) + if coreio.Local.Exists(filepath.Join(repoPath, ".git")) { + exists++ + continue + } + + toClone = append(toClone, repo) + } + } + + // Summary + fmt.Println() + fmt.Printf("%s, %s, %s\n", + i18n.T("cmd.setup.to_clone", map[string]interface{}{"Count": len(toClone)}), + i18n.T("cmd.setup.exist", map[string]interface{}{"Count": exists}), + i18n.T("common.count.skipped", map[string]interface{}{"Count": skipped})) + + if len(toClone) == 0 { + fmt.Printf("\n%s\n", i18n.T("cmd.setup.nothing_to_clone")) + return nil + } + + if dryRun { + fmt.Printf("\n%s\n", i18n.T("cmd.setup.would_clone_list")) + for _, repo := range toClone { + fmt.Printf(" %s (%s)\n", repoNameStyle.Render(repo.Name), repo.Type) + } + return nil + } + + // Confirm in interactive mode + if useWizard { + confirmed, err := confirmClone(len(toClone), basePath) + if err != nil { + return err + } + if !confirmed { + fmt.Println(i18n.T("cmd.setup.cancelled")) + return nil + } + } + + // Clone repos + fmt.Println() + var succeeded, failed int + + for _, repo := range toClone { + fmt.Printf(" %s %s... ", dimStyle.Render(i18n.T("common.status.cloning")), repo.Name) + + repoPath := filepath.Join(basePath, repo.Name) + + err := gitClone(ctx, reg.Org, repo.Name, repoPath) + if err != nil { + fmt.Printf("%s\n", errorStyle.Render("x "+err.Error())) + failed++ + } else { + fmt.Printf("%s\n", successStyle.Render(i18n.T("cmd.setup.done"))) + succeeded++ + } + } + + // Summary + fmt.Println() + fmt.Printf("%s %s", successStyle.Render(i18n.Label("done")), i18n.T("cmd.setup.cloned_count", map[string]interface{}{"Count": succeeded})) + if failed > 0 { + fmt.Printf(", %s", errorStyle.Render(i18n.T("i18n.count.failed", failed))) + } + if exists > 0 { + fmt.Printf(", %s", i18n.T("cmd.setup.already_exist_count", map[string]interface{}{"Count": exists})) + } + fmt.Println() + + // Run build if requested + if runBuild && succeeded > 0 { + fmt.Println() + fmt.Printf("%s %s\n", dimStyle.Render(">>"), i18n.ProgressSubject("run", "build")) + buildCmd := exec.Command("core", "build") + buildCmd.Dir = basePath + buildCmd.Stdout = os.Stdout + buildCmd.Stderr = os.Stderr + if err := buildCmd.Run(); err != nil { + return fmt.Errorf("%s: %w", i18n.T("i18n.fail.run", "build"), err) + } + } + + return nil +} + +// gitClone clones a repository using gh CLI or git. +func gitClone(ctx context.Context, org, repo, path string) error { + // Try gh clone first with HTTPS (works without SSH keys) + if cli.GhAuthenticated() { + // Use HTTPS URL directly to bypass git_protocol config + httpsURL := fmt.Sprintf("https://github.com/%s/%s.git", org, repo) + cmd := exec.CommandContext(ctx, "gh", "repo", "clone", httpsURL, path) + output, err := cmd.CombinedOutput() + if err == nil { + return nil + } + errStr := strings.TrimSpace(string(output)) + // Only fall through to SSH if it's an auth error + if !strings.Contains(errStr, "Permission denied") && + !strings.Contains(errStr, "could not read") { + return fmt.Errorf("%s", errStr) + } + } + + // Fallback to git clone via SSH + url := fmt.Sprintf("git@github.com:%s/%s.git", org, repo) + cmd := exec.CommandContext(ctx, "git", "clone", url, path) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("%s", strings.TrimSpace(string(output))) + } + return nil +} diff --git a/cmd/setup/cmd_repo.go b/cmd/setup/cmd_repo.go new file mode 100644 index 0000000..fac6874 --- /dev/null +++ b/cmd/setup/cmd_repo.go @@ -0,0 +1,289 @@ +// cmd_repo.go implements repository setup with .core/ configuration. +// +// When running setup in an existing git repository, this generates +// build.yaml, release.yaml, and test.yaml configurations based on +// detected project type. + +package setup + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" + + "forge.lthn.ai/core/go/pkg/i18n" + coreio "forge.lthn.ai/core/go/pkg/io" +) + +// runRepoSetup sets up the current repository with .core/ configuration. +func runRepoSetup(repoPath string, dryRun bool) error { + fmt.Printf("%s %s: %s\n", dimStyle.Render(">>"), i18n.T("cmd.setup.repo.setting_up"), repoPath) + + // Detect project type + projectType := detectProjectType(repoPath) + fmt.Printf("%s %s: %s\n", dimStyle.Render(">>"), i18n.T("cmd.setup.repo.detected_type"), projectType) + + // Create .core directory + coreDir := filepath.Join(repoPath, ".core") + if !dryRun { + if err := coreio.Local.EnsureDir(coreDir); err != nil { + return fmt.Errorf("failed to create .core directory: %w", err) + } + } + + // Generate configs based on project type + name := filepath.Base(repoPath) + configs := map[string]string{ + "build.yaml": generateBuildConfig(repoPath, projectType), + "release.yaml": generateReleaseConfig(name, projectType), + "test.yaml": generateTestConfig(projectType), + } + + if dryRun { + fmt.Printf("\n%s %s:\n", dimStyle.Render(">>"), i18n.T("cmd.setup.repo.would_create")) + for filename, content := range configs { + fmt.Printf("\n %s:\n", filepath.Join(coreDir, filename)) + // Indent content for display + for _, line := range strings.Split(content, "\n") { + fmt.Printf(" %s\n", line) + } + } + return nil + } + + for filename, content := range configs { + configPath := filepath.Join(coreDir, filename) + if err := coreio.Local.Write(configPath, content); err != nil { + return fmt.Errorf("failed to write %s: %w", filename, err) + } + fmt.Printf("%s %s %s\n", successStyle.Render(">>"), i18n.T("cmd.setup.repo.created"), configPath) + } + + return nil +} + +// detectProjectType identifies the project type from files present. +func detectProjectType(path string) string { + // Check in priority order + if coreio.Local.IsFile(filepath.Join(path, "wails.json")) { + return "wails" + } + if coreio.Local.IsFile(filepath.Join(path, "go.mod")) { + return "go" + } + if coreio.Local.IsFile(filepath.Join(path, "composer.json")) { + return "php" + } + if coreio.Local.IsFile(filepath.Join(path, "package.json")) { + return "node" + } + return "unknown" +} + +// generateBuildConfig creates a build.yaml configuration based on project type. +func generateBuildConfig(path, projectType string) string { + name := filepath.Base(path) + + switch projectType { + case "go", "wails": + return fmt.Sprintf(`version: 1 +project: + name: %s + description: Go application + main: ./cmd/%s + binary: %s +build: + cgo: false + flags: + - -trimpath + ldflags: + - -s + - -w +targets: + - os: linux + arch: amd64 + - os: linux + arch: arm64 + - os: darwin + arch: amd64 + - os: darwin + arch: arm64 + - os: windows + arch: amd64 +`, name, name, name) + + case "php": + return fmt.Sprintf(`version: 1 +project: + name: %s + description: PHP application + type: php +build: + dockerfile: Dockerfile + image: %s +`, name, name) + + case "node": + return fmt.Sprintf(`version: 1 +project: + name: %s + description: Node.js application + type: node +build: + script: npm run build + output: dist +`, name) + + default: + return fmt.Sprintf(`version: 1 +project: + name: %s + description: Application +`, name) + } +} + +// generateReleaseConfig creates a release.yaml configuration. +func generateReleaseConfig(name, projectType string) string { + // Try to detect GitHub repo from git remote + repo := detectGitHubRepo() + if repo == "" { + repo = "owner/" + name + } + + base := fmt.Sprintf(`version: 1 +project: + name: %s + repository: %s +`, name, repo) + + switch projectType { + case "go", "wails": + return base + ` +changelog: + include: + - feat + - fix + - perf + - refactor + exclude: + - chore + - docs + - style + - test + +publishers: + - type: github + draft: false + prerelease: false +` + case "php": + return base + ` +changelog: + include: + - feat + - fix + - perf + +publishers: + - type: github + draft: false +` + default: + return base + ` +changelog: + include: + - feat + - fix + +publishers: + - type: github +` + } +} + +// generateTestConfig creates a test.yaml configuration. +func generateTestConfig(projectType string) string { + switch projectType { + case "go", "wails": + return `version: 1 + +commands: + - name: unit + run: go test ./... + - name: coverage + run: go test -coverprofile=coverage.out ./... + - name: race + run: go test -race ./... + +env: + CGO_ENABLED: "0" +` + case "php": + return `version: 1 + +commands: + - name: unit + run: vendor/bin/pest --parallel + - name: types + run: vendor/bin/phpstan analyse + - name: lint + run: vendor/bin/pint --test + +env: + APP_ENV: testing + DB_CONNECTION: sqlite +` + case "node": + return `version: 1 + +commands: + - name: unit + run: npm test + - name: lint + run: npm run lint + - name: typecheck + run: npm run typecheck + +env: + NODE_ENV: test +` + default: + return `version: 1 + +commands: + - name: test + run: echo "No tests configured" +` + } +} + +// detectGitHubRepo tries to extract owner/repo from git remote. +func detectGitHubRepo() string { + cmd := exec.Command("git", "remote", "get-url", "origin") + output, err := cmd.Output() + if err != nil { + return "" + } + + url := strings.TrimSpace(string(output)) + + // Handle SSH format: git@github.com:owner/repo.git + if strings.HasPrefix(url, "git@github.com:") { + repo := strings.TrimPrefix(url, "git@github.com:") + repo = strings.TrimSuffix(repo, ".git") + return repo + } + + // Handle HTTPS format: https://github.com/owner/repo.git + if strings.Contains(url, "github.com/") { + parts := strings.Split(url, "github.com/") + if len(parts) == 2 { + repo := strings.TrimSuffix(parts[1], ".git") + return repo + } + } + + return "" +} diff --git a/cmd/setup/cmd_setup.go b/cmd/setup/cmd_setup.go new file mode 100644 index 0000000..89da437 --- /dev/null +++ b/cmd/setup/cmd_setup.go @@ -0,0 +1,59 @@ +// Package setup provides workspace setup and bootstrap commands. +package setup + +import ( + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "github.com/spf13/cobra" +) + +// Style aliases from shared package +var ( + repoNameStyle = cli.RepoStyle + successStyle = cli.SuccessStyle + errorStyle = cli.ErrorStyle + warningStyle = cli.WarningStyle + dimStyle = cli.DimStyle +) + +// Default organization and devops repo for bootstrap +const ( + defaultOrg = "host-uk" + devopsRepo = "core-devops" + devopsReposYaml = "repos.yaml" +) + +// Setup command flags +var ( + registryPath string + only string + dryRun bool + all bool + name string + build bool +) + +var setupCmd = &cobra.Command{ + Use: "setup", + Short: i18n.T("cmd.setup.short"), + Long: i18n.T("cmd.setup.long"), + RunE: func(cmd *cobra.Command, args []string) error { + return runSetupOrchestrator(registryPath, only, dryRun, all, name, build) + }, +} + +func initSetupFlags() { + setupCmd.Flags().StringVar(®istryPath, "registry", "", i18n.T("cmd.setup.flag.registry")) + setupCmd.Flags().StringVar(&only, "only", "", i18n.T("cmd.setup.flag.only")) + setupCmd.Flags().BoolVar(&dryRun, "dry-run", false, i18n.T("cmd.setup.flag.dry_run")) + setupCmd.Flags().BoolVar(&all, "all", false, i18n.T("cmd.setup.flag.all")) + setupCmd.Flags().StringVar(&name, "name", "", i18n.T("cmd.setup.flag.name")) + setupCmd.Flags().BoolVar(&build, "build", false, i18n.T("cmd.setup.flag.build")) +} + +// AddSetupCommand adds the 'setup' command to the given parent command. +func AddSetupCommand(root *cobra.Command) { + initSetupFlags() + addGitHubCommand(setupCmd) + root.AddCommand(setupCmd) +} diff --git a/cmd/setup/cmd_wizard.go b/cmd/setup/cmd_wizard.go new file mode 100644 index 0000000..8f595b8 --- /dev/null +++ b/cmd/setup/cmd_wizard.go @@ -0,0 +1,93 @@ +// cmd_wizard.go implements the interactive package selection wizard. +package setup + +import ( + "fmt" + "os" + "sort" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" + "forge.lthn.ai/core/go/pkg/repos" + "golang.org/x/term" +) + +// isTerminal returns true if stdin is a terminal. +func isTerminal() bool { + return term.IsTerminal(int(os.Stdin.Fd())) +} + +// promptSetupChoice asks the user whether to setup the working directory or create a package. +func promptSetupChoice() (string, error) { + fmt.Println(cli.TitleStyle.Render(i18n.T("cmd.setup.wizard.git_repo_title"))) + fmt.Println(i18n.T("cmd.setup.wizard.what_to_do")) + + choice, err := cli.Select("Choose action", []string{"setup", "package"}) + if err != nil { + return "", err + } + return choice, nil +} + +// promptProjectName asks the user for a project directory name. +func promptProjectName(defaultName string) (string, error) { + fmt.Println(cli.TitleStyle.Render(i18n.T("cmd.setup.wizard.project_name_title"))) + return cli.Prompt(i18n.T("cmd.setup.wizard.project_name_desc"), defaultName) +} + +// runPackageWizard presents an interactive multi-select UI for package selection. +func runPackageWizard(reg *repos.Registry, preselectedTypes []string) ([]string, error) { + allRepos := reg.List() + + // Build options + var options []string + + // Sort by name + sort.Slice(allRepos, func(i, j int) bool { + return allRepos[i].Name < allRepos[j].Name + }) + + for _, repo := range allRepos { + if repo.Clone != nil && !*repo.Clone { + continue + } + // Format: name (type) + label := fmt.Sprintf("%s (%s)", repo.Name, repo.Type) + options = append(options, label) + } + + fmt.Println(cli.TitleStyle.Render(i18n.T("cmd.setup.wizard.package_selection"))) + fmt.Println(i18n.T("cmd.setup.wizard.selection_hint")) + + selectedLabels, err := cli.MultiSelect(i18n.T("cmd.setup.wizard.select_packages"), options) + if err != nil { + return nil, err + } + + // Extract names from labels + var selected []string + for _, label := range selectedLabels { + // Basic parsing assuming "name (type)" format + // Find last space + var name string + // Since we constructed it, we know it ends with (type) + // but repo name might have spaces? Repos usually don't. + // Let's iterate repos to find match + for _, repo := range allRepos { + if label == fmt.Sprintf("%s (%s)", repo.Name, repo.Type) { + name = repo.Name + break + } + } + if name != "" { + selected = append(selected, name) + } + } + return selected, nil +} + +// confirmClone asks for confirmation before cloning. +func confirmClone(count int, target string) (bool, error) { + confirmed := cli.Confirm(i18n.T("cmd.setup.wizard.confirm_clone", map[string]interface{}{"Count": count, "Target": target})) + return confirmed, nil +} diff --git a/cmd/setup/github_config.go b/cmd/setup/github_config.go new file mode 100644 index 0000000..82b72cf --- /dev/null +++ b/cmd/setup/github_config.go @@ -0,0 +1,204 @@ +// github_config.go defines configuration types for GitHub repository setup. +// +// Configuration is loaded from .core/github.yaml and supports environment +// variable expansion using ${VAR} or ${VAR:-default} syntax. + +package setup + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + coreio "forge.lthn.ai/core/go/pkg/io" + "gopkg.in/yaml.v3" +) + +// GitHubConfig represents the full GitHub setup configuration. +type GitHubConfig struct { + Version int `yaml:"version"` + Labels []LabelConfig `yaml:"labels"` + Webhooks map[string]WebhookConfig `yaml:"webhooks"` + BranchProtection []BranchProtectionConfig `yaml:"branch_protection"` + Security SecurityConfig `yaml:"security"` +} + +// LabelConfig defines a GitHub issue/PR label. +type LabelConfig struct { + Name string `yaml:"name"` + Color string `yaml:"color"` + Description string `yaml:"description"` +} + +// WebhookConfig defines a GitHub webhook configuration. +type WebhookConfig struct { + URL string `yaml:"url"` // Webhook URL (supports ${ENV_VAR}) + ContentType string `yaml:"content_type"` // json or form (default: json) + Secret string `yaml:"secret"` // Optional secret (supports ${ENV_VAR}) + Events []string `yaml:"events"` // Events to trigger on + Active *bool `yaml:"active"` // Whether webhook is active (default: true) +} + +// BranchProtectionConfig defines branch protection rules. +type BranchProtectionConfig struct { + Branch string `yaml:"branch"` + RequiredReviews int `yaml:"required_reviews"` + DismissStale bool `yaml:"dismiss_stale"` + RequireCodeOwnerReviews bool `yaml:"require_code_owner_reviews"` + RequiredStatusChecks []string `yaml:"required_status_checks"` + RequireLinearHistory bool `yaml:"require_linear_history"` + AllowForcePushes bool `yaml:"allow_force_pushes"` + AllowDeletions bool `yaml:"allow_deletions"` + EnforceAdmins bool `yaml:"enforce_admins"` + RequireConversationResolution bool `yaml:"require_conversation_resolution"` +} + +// SecurityConfig defines repository security settings. +type SecurityConfig struct { + DependabotAlerts bool `yaml:"dependabot_alerts"` + DependabotSecurityUpdates bool `yaml:"dependabot_security_updates"` + SecretScanning bool `yaml:"secret_scanning"` + SecretScanningPushProtection bool `yaml:"push_protection"` +} + +// LoadGitHubConfig reads and parses a GitHub configuration file. +func LoadGitHubConfig(path string) (*GitHubConfig, error) { + data, err := coreio.Local.Read(path) + if err != nil { + return nil, fmt.Errorf("failed to read config file: %w", err) + } + + // Expand environment variables before parsing + expanded := expandEnvVars(data) + + var config GitHubConfig + if err := yaml.Unmarshal([]byte(expanded), &config); err != nil { + return nil, fmt.Errorf("failed to parse config file: %w", err) + } + + // Set defaults + for i := range config.Webhooks { + wh := config.Webhooks[i] + if wh.ContentType == "" { + wh.ContentType = "json" + } + if wh.Active == nil { + active := true + wh.Active = &active + } + config.Webhooks[i] = wh + } + + return &config, nil +} + +// envVarPattern matches ${VAR} or ${VAR:-default} patterns. +var envVarPattern = regexp.MustCompile(`\$\{([A-Za-z_][A-Za-z0-9_]*)(?::-([^}]*))?\}`) + +// expandEnvVars expands environment variables in the input string. +// Supports ${VAR} and ${VAR:-default} syntax. +func expandEnvVars(input string) string { + return envVarPattern.ReplaceAllStringFunc(input, func(match string) string { + // Parse the match + submatch := envVarPattern.FindStringSubmatch(match) + if len(submatch) < 2 { + return match + } + + varName := submatch[1] + defaultValue := "" + if len(submatch) >= 3 { + defaultValue = submatch[2] + } + + // Look up the environment variable + if value, ok := os.LookupEnv(varName); ok { + return value + } + return defaultValue + }) +} + +// FindGitHubConfig searches for github.yaml in common locations. +// Search order: +// 1. Specified path (if non-empty) +// 2. .core/github.yaml (relative to registry) +// 3. github.yaml (relative to registry) +func FindGitHubConfig(registryDir, specifiedPath string) (string, error) { + if specifiedPath != "" { + if coreio.Local.IsFile(specifiedPath) { + return specifiedPath, nil + } + return "", fmt.Errorf("config file not found: %s", specifiedPath) + } + + // Search in common locations (using filepath.Join for OS-portable paths) + candidates := []string{ + filepath.Join(registryDir, ".core", "github.yaml"), + filepath.Join(registryDir, "github.yaml"), + } + + for _, path := range candidates { + if coreio.Local.IsFile(path) { + return path, nil + } + } + + return "", fmt.Errorf("github.yaml not found in %s/.core/ or %s/", registryDir, registryDir) +} + +// Validate checks the configuration for errors. +func (c *GitHubConfig) Validate() error { + if c.Version != 1 { + return fmt.Errorf("unsupported config version: %d (expected 1)", c.Version) + } + + // Validate labels + for i, label := range c.Labels { + if label.Name == "" { + return fmt.Errorf("label %d: name is required", i+1) + } + if label.Color == "" { + return fmt.Errorf("label %q: color is required", label.Name) + } + // Validate color format (hex without #) + if !isValidHexColor(label.Color) { + return fmt.Errorf("label %q: invalid color %q (expected 6-digit hex without #)", label.Name, label.Color) + } + } + + // Validate webhooks (skip those with empty URLs - allows optional webhooks via env vars) + for name, wh := range c.Webhooks { + if wh.URL == "" { + // Empty URL is allowed - webhook will be skipped during sync + continue + } + if len(wh.Events) == 0 { + return fmt.Errorf("webhook %q: at least one event is required", name) + } + } + + // Validate branch protection + for i, bp := range c.BranchProtection { + if bp.Branch == "" { + return fmt.Errorf("branch_protection %d: branch is required", i+1) + } + } + + return nil +} + +// isValidHexColor checks if a string is a valid 6-digit hex color (without #). +func isValidHexColor(color string) bool { + if len(color) != 6 { + return false + } + for _, c := range strings.ToLower(color) { + if (c < '0' || c > '9') && (c < 'a' || c > 'f') { + return false + } + } + return true +} diff --git a/cmd/setup/github_diff.go b/cmd/setup/github_diff.go new file mode 100644 index 0000000..792d93b --- /dev/null +++ b/cmd/setup/github_diff.go @@ -0,0 +1,288 @@ +// github_diff.go provides change tracking for dry-run output. + +package setup + +import ( + "fmt" + "sort" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" + "forge.lthn.ai/core/go/pkg/i18n" +) + +// ChangeType indicates the type of change being made. +type ChangeType string + +// Change type constants for GitHub configuration diffs. +const ( + // ChangeCreate indicates a new resource to be created. + ChangeCreate ChangeType = "create" + // ChangeUpdate indicates an existing resource to be updated. + ChangeUpdate ChangeType = "update" + // ChangeDelete indicates a resource to be deleted. + ChangeDelete ChangeType = "delete" + // ChangeSkip indicates a resource that requires no changes. + ChangeSkip ChangeType = "skip" +) + +// ChangeCategory groups changes by type. +type ChangeCategory string + +// Change category constants for grouping GitHub configuration changes. +const ( + // CategoryLabel indicates label-related changes. + CategoryLabel ChangeCategory = "label" + // CategoryWebhook indicates webhook-related changes. + CategoryWebhook ChangeCategory = "webhook" + // CategoryProtection indicates branch protection changes. + CategoryProtection ChangeCategory = "protection" + // CategorySecurity indicates security settings changes. + CategorySecurity ChangeCategory = "security" +) + +// Change represents a single change to be made. +type Change struct { + Category ChangeCategory + Type ChangeType + Name string + Description string + Details map[string]string // Key-value details about the change +} + +// ChangeSet tracks all changes for a repository. +type ChangeSet struct { + Repo string + Changes []Change +} + +// NewChangeSet creates a new change set for a repository. +func NewChangeSet(repo string) *ChangeSet { + return &ChangeSet{ + Repo: repo, + Changes: make([]Change, 0), + } +} + +// Add adds a change to the set. +func (cs *ChangeSet) Add(category ChangeCategory, changeType ChangeType, name, description string) { + cs.Changes = append(cs.Changes, Change{ + Category: category, + Type: changeType, + Name: name, + Description: description, + Details: make(map[string]string), + }) +} + +// AddWithDetails adds a change with additional details. +func (cs *ChangeSet) AddWithDetails(category ChangeCategory, changeType ChangeType, name, description string, details map[string]string) { + cs.Changes = append(cs.Changes, Change{ + Category: category, + Type: changeType, + Name: name, + Description: description, + Details: details, + }) +} + +// HasChanges returns true if there are any non-skip changes. +func (cs *ChangeSet) HasChanges() bool { + for _, c := range cs.Changes { + if c.Type != ChangeSkip { + return true + } + } + return false +} + +// Count returns the number of changes by type. +func (cs *ChangeSet) Count() (creates, updates, deletes, skips int) { + for _, c := range cs.Changes { + switch c.Type { + case ChangeCreate: + creates++ + case ChangeUpdate: + updates++ + case ChangeDelete: + deletes++ + case ChangeSkip: + skips++ + } + } + return +} + +// CountByCategory returns changes grouped by category. +func (cs *ChangeSet) CountByCategory() map[ChangeCategory]int { + counts := make(map[ChangeCategory]int) + for _, c := range cs.Changes { + if c.Type != ChangeSkip { + counts[c.Category]++ + } + } + return counts +} + +// Print outputs the change set to the console. +func (cs *ChangeSet) Print(verbose bool) { + creates, updates, deletes, skips := cs.Count() + + // Print header + fmt.Printf("\n%s %s\n", dimStyle.Render(i18n.Label("repo")), repoNameStyle.Render(cs.Repo)) + + if !cs.HasChanges() { + fmt.Printf(" %s\n", dimStyle.Render(i18n.T("cmd.setup.github.no_changes"))) + return + } + + // Print summary + var parts []string + if creates > 0 { + parts = append(parts, successStyle.Render(fmt.Sprintf("+%d", creates))) + } + if updates > 0 { + parts = append(parts, warningStyle.Render(fmt.Sprintf("~%d", updates))) + } + if deletes > 0 { + parts = append(parts, errorStyle.Render(fmt.Sprintf("-%d", deletes))) + } + if skips > 0 && verbose { + parts = append(parts, dimStyle.Render(fmt.Sprintf("=%d", skips))) + } + fmt.Printf(" %s\n", strings.Join(parts, " ")) + + // Print details if verbose + if verbose { + cs.printByCategory(CategoryLabel, "Labels") + cs.printByCategory(CategoryWebhook, "Webhooks") + cs.printByCategory(CategoryProtection, "Branch protection") + cs.printByCategory(CategorySecurity, "Security") + } +} + +func (cs *ChangeSet) printByCategory(category ChangeCategory, title string) { + var categoryChanges []Change + for _, c := range cs.Changes { + if c.Category == category && c.Type != ChangeSkip { + categoryChanges = append(categoryChanges, c) + } + } + + if len(categoryChanges) == 0 { + return + } + + fmt.Printf("\n %s:\n", dimStyle.Render(title)) + for _, c := range categoryChanges { + icon := getChangeIcon(c.Type) + style := getChangeStyle(c.Type) + fmt.Printf(" %s %s", style.Render(icon), c.Name) + if c.Description != "" { + fmt.Printf(" %s", dimStyle.Render(c.Description)) + } + fmt.Println() + + // Print details (sorted for deterministic output) + keys := make([]string, 0, len(c.Details)) + for k := range c.Details { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fmt.Printf(" %s: %s\n", dimStyle.Render(k), c.Details[k]) + } + } +} + +func getChangeIcon(t ChangeType) string { + switch t { + case ChangeCreate: + return "+" + case ChangeUpdate: + return "~" + case ChangeDelete: + return "-" + default: + return "=" + } +} + +func getChangeStyle(t ChangeType) *cli.AnsiStyle { + switch t { + case ChangeCreate: + return successStyle + case ChangeUpdate: + return warningStyle + case ChangeDelete: + return errorStyle + default: + return dimStyle + } +} + +// Aggregate combines multiple change sets into a summary. +type Aggregate struct { + Sets []*ChangeSet +} + +// NewAggregate creates a new aggregate. +func NewAggregate() *Aggregate { + return &Aggregate{ + Sets: make([]*ChangeSet, 0), + } +} + +// Add adds a change set to the aggregate. +func (a *Aggregate) Add(cs *ChangeSet) { + a.Sets = append(a.Sets, cs) +} + +// TotalChanges returns the total number of changes across all sets. +func (a *Aggregate) TotalChanges() (creates, updates, deletes, skips int) { + for _, cs := range a.Sets { + c, u, d, s := cs.Count() + creates += c + updates += u + deletes += d + skips += s + } + return +} + +// ReposWithChanges returns the number of repos that have changes. +func (a *Aggregate) ReposWithChanges() int { + count := 0 + for _, cs := range a.Sets { + if cs.HasChanges() { + count++ + } + } + return count +} + +// PrintSummary outputs the aggregate summary. +func (a *Aggregate) PrintSummary() { + creates, updates, deletes, _ := a.TotalChanges() + reposWithChanges := a.ReposWithChanges() + + fmt.Println() + fmt.Printf("%s\n", dimStyle.Render(i18n.Label("summary"))) + fmt.Printf(" %s: %d\n", i18n.T("cmd.setup.github.repos_checked"), len(a.Sets)) + + if reposWithChanges == 0 { + fmt.Printf(" %s\n", dimStyle.Render(i18n.T("cmd.setup.github.all_up_to_date"))) + return + } + + fmt.Printf(" %s: %d\n", i18n.T("cmd.setup.github.repos_with_changes"), reposWithChanges) + if creates > 0 { + fmt.Printf(" %s: %s\n", i18n.T("cmd.setup.github.to_create"), successStyle.Render(fmt.Sprintf("%d", creates))) + } + if updates > 0 { + fmt.Printf(" %s: %s\n", i18n.T("cmd.setup.github.to_update"), warningStyle.Render(fmt.Sprintf("%d", updates))) + } + if deletes > 0 { + fmt.Printf(" %s: %s\n", i18n.T("cmd.setup.github.to_delete"), errorStyle.Render(fmt.Sprintf("%d", deletes))) + } +} diff --git a/cmd/setup/github_labels.go b/cmd/setup/github_labels.go new file mode 100644 index 0000000..b8affa8 --- /dev/null +++ b/cmd/setup/github_labels.go @@ -0,0 +1,152 @@ +// github_labels.go implements GitHub label synchronization. +// +// Uses the gh CLI for label operations: +// - gh label list --repo {repo} --json name,color,description +// - gh label create --repo {repo} {name} --color {color} --description {desc} +// - gh label edit --repo {repo} {name} --color {color} --description {desc} + +package setup + +import ( + "encoding/json" + "os/exec" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" +) + +// GitHubLabel represents a label as returned by the GitHub API. +type GitHubLabel struct { + Name string `json:"name"` + Color string `json:"color"` + Description string `json:"description"` +} + +// ListLabels fetches all labels for a repository. +func ListLabels(repoFullName string) ([]GitHubLabel, error) { + args := []string{ + "label", "list", + "--repo", repoFullName, + "--json", "name,color,description", + "--limit", "200", + } + + cmd := exec.Command("gh", args...) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return nil, cli.Err("%s", strings.TrimSpace(string(exitErr.Stderr))) + } + return nil, err + } + + var labels []GitHubLabel + if err := json.Unmarshal(output, &labels); err != nil { + return nil, err + } + + return labels, nil +} + +// CreateLabel creates a new label in a repository. +func CreateLabel(repoFullName string, label LabelConfig) error { + args := []string{ + "label", "create", + "--repo", repoFullName, + label.Name, + "--color", label.Color, + } + + if label.Description != "" { + args = append(args, "--description", label.Description) + } + + cmd := exec.Command("gh", args...) + output, err := cmd.CombinedOutput() + if err != nil { + return cli.Err("%s", strings.TrimSpace(string(output))) + } + return nil +} + +// EditLabel updates an existing label in a repository. +func EditLabel(repoFullName string, label LabelConfig) error { + args := []string{ + "label", "edit", + "--repo", repoFullName, + label.Name, + "--color", label.Color, + } + + if label.Description != "" { + args = append(args, "--description", label.Description) + } + + cmd := exec.Command("gh", args...) + output, err := cmd.CombinedOutput() + if err != nil { + return cli.Err("%s", strings.TrimSpace(string(output))) + } + return nil +} + +// SyncLabels synchronizes labels for a repository. +// Returns a ChangeSet describing what was changed (or would be changed in dry-run mode). +func SyncLabels(repoFullName string, config *GitHubConfig, dryRun bool) (*ChangeSet, error) { + changes := NewChangeSet(repoFullName) + + // Get existing labels + existing, err := ListLabels(repoFullName) + if err != nil { + return nil, cli.Wrap(err, "failed to list labels") + } + + // Build lookup map + existingMap := make(map[string]GitHubLabel) + for _, label := range existing { + existingMap[strings.ToLower(label.Name)] = label + } + + // Process each configured label + for _, wantLabel := range config.Labels { + key := strings.ToLower(wantLabel.Name) + existing, exists := existingMap[key] + + if !exists { + // Create new label + changes.Add(CategoryLabel, ChangeCreate, wantLabel.Name, wantLabel.Description) + if !dryRun { + if err := CreateLabel(repoFullName, wantLabel); err != nil { + return changes, cli.Wrap(err, "failed to create label "+wantLabel.Name) + } + } + continue + } + + // Check if update is needed + needsUpdate := false + details := make(map[string]string) + + if !strings.EqualFold(existing.Color, wantLabel.Color) { + needsUpdate = true + details["color"] = existing.Color + " -> " + wantLabel.Color + } + if existing.Description != wantLabel.Description { + needsUpdate = true + details["description"] = "updated" + } + + if needsUpdate { + changes.AddWithDetails(CategoryLabel, ChangeUpdate, wantLabel.Name, "", details) + if !dryRun { + if err := EditLabel(repoFullName, wantLabel); err != nil { + return changes, cli.Wrap(err, "failed to update label "+wantLabel.Name) + } + } + } else { + changes.Add(CategoryLabel, ChangeSkip, wantLabel.Name, "up to date") + } + } + + return changes, nil +} diff --git a/cmd/setup/github_protection.go b/cmd/setup/github_protection.go new file mode 100644 index 0000000..f421be1 --- /dev/null +++ b/cmd/setup/github_protection.go @@ -0,0 +1,299 @@ +// github_protection.go implements GitHub branch protection synchronization. +// +// Uses the gh api command for branch protection operations: +// - gh api repos/{owner}/{repo}/branches/{branch}/protection --method GET +// - gh api repos/{owner}/{repo}/branches/{branch}/protection --method PUT + +package setup + +import ( + "encoding/json" + "fmt" + "os/exec" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" +) + +// GitHubBranchProtection represents branch protection rules from the GitHub API. +type GitHubBranchProtection struct { + RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` + RequiredPullRequestReviews *RequiredPullRequestReviews `json:"required_pull_request_reviews"` + EnforceAdmins *EnforceAdmins `json:"enforce_admins"` + RequiredLinearHistory *RequiredLinearHistory `json:"required_linear_history"` + AllowForcePushes *AllowForcePushes `json:"allow_force_pushes"` + AllowDeletions *AllowDeletions `json:"allow_deletions"` + RequiredConversationResolution *RequiredConversationResolution `json:"required_conversation_resolution"` +} + +// RequiredStatusChecks defines required CI checks. +type RequiredStatusChecks struct { + Strict bool `json:"strict"` + Contexts []string `json:"contexts"` +} + +// RequiredPullRequestReviews defines review requirements. +type RequiredPullRequestReviews struct { + DismissStaleReviews bool `json:"dismiss_stale_reviews"` + RequireCodeOwnerReviews bool `json:"require_code_owner_reviews"` + RequiredApprovingReviewCount int `json:"required_approving_review_count"` +} + +// EnforceAdmins indicates if admins are subject to rules. +type EnforceAdmins struct { + Enabled bool `json:"enabled"` +} + +// RequiredLinearHistory indicates if linear history is required. +type RequiredLinearHistory struct { + Enabled bool `json:"enabled"` +} + +// AllowForcePushes indicates if force pushes are allowed. +type AllowForcePushes struct { + Enabled bool `json:"enabled"` +} + +// AllowDeletions indicates if branch deletion is allowed. +type AllowDeletions struct { + Enabled bool `json:"enabled"` +} + +// RequiredConversationResolution indicates if conversation resolution is required. +type RequiredConversationResolution struct { + Enabled bool `json:"enabled"` +} + +// GetBranchProtection fetches branch protection rules for a branch. +func GetBranchProtection(repoFullName, branch string) (*GitHubBranchProtection, error) { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid repo format: %s", repoFullName) + } + + endpoint := fmt.Sprintf("repos/%s/%s/branches/%s/protection", parts[0], parts[1], branch) + cmd := exec.Command("gh", "api", endpoint) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := strings.TrimSpace(string(exitErr.Stderr)) + // Branch protection not enabled returns 404 + if strings.Contains(stderr, "404") || strings.Contains(stderr, "Branch not protected") { + return nil, nil // No protection set + } + if strings.Contains(stderr, "403") { + return nil, cli.Err("insufficient permissions to manage branch protection (requires admin)") + } + return nil, cli.Err("%s", stderr) + } + return nil, err + } + + var protection GitHubBranchProtection + if err := json.Unmarshal(output, &protection); err != nil { + return nil, err + } + + return &protection, nil +} + +// SetBranchProtection sets branch protection rules for a branch. +func SetBranchProtection(repoFullName, branch string, config BranchProtectionConfig) error { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid repo format: %s", repoFullName) + } + + // Build the protection payload + payload := map[string]interface{}{ + "enforce_admins": config.EnforceAdmins, + "required_linear_history": config.RequireLinearHistory, + "allow_force_pushes": config.AllowForcePushes, + "allow_deletions": config.AllowDeletions, + "required_conversation_resolution": config.RequireConversationResolution, + } + + // Required pull request reviews + if config.RequiredReviews > 0 { + payload["required_pull_request_reviews"] = map[string]interface{}{ + "dismiss_stale_reviews": config.DismissStale, + "require_code_owner_reviews": config.RequireCodeOwnerReviews, + "required_approving_review_count": config.RequiredReviews, + } + } else { + payload["required_pull_request_reviews"] = nil + } + + // Required status checks + if len(config.RequiredStatusChecks) > 0 { + payload["required_status_checks"] = map[string]interface{}{ + "strict": true, + "contexts": config.RequiredStatusChecks, + } + } else { + payload["required_status_checks"] = nil + } + + // Restrictions (required but can be empty for non-org repos) + payload["restrictions"] = nil + + payloadJSON, err := json.Marshal(payload) + if err != nil { + return err + } + + endpoint := fmt.Sprintf("repos/%s/%s/branches/%s/protection", parts[0], parts[1], branch) + cmd := exec.Command("gh", "api", endpoint, "--method", "PUT", "--input", "-") + cmd.Stdin = strings.NewReader(string(payloadJSON)) + output, err := cmd.CombinedOutput() + if err != nil { + return cli.Err("%s", strings.TrimSpace(string(output))) + } + return nil +} + +// SyncBranchProtection synchronizes branch protection for a repository. +func SyncBranchProtection(repoFullName string, config *GitHubConfig, dryRun bool) (*ChangeSet, error) { + changes := NewChangeSet(repoFullName) + + // Skip if no branch protection configured + if len(config.BranchProtection) == 0 { + return changes, nil + } + + // Process each configured branch + for _, wantProtection := range config.BranchProtection { + branch := wantProtection.Branch + + // Get existing protection + existing, err := GetBranchProtection(repoFullName, branch) + if err != nil { + // If permission denied, note it but don't fail + if strings.Contains(err.Error(), "insufficient permissions") { + changes.Add(CategoryProtection, ChangeSkip, branch, "insufficient permissions") + continue + } + return nil, cli.Wrap(err, "failed to get protection for "+branch) + } + + // Check if protection needs to be created or updated + if existing == nil { + // Create new protection + changes.Add(CategoryProtection, ChangeCreate, branch, describeProtection(wantProtection)) + if !dryRun { + if err := SetBranchProtection(repoFullName, branch, wantProtection); err != nil { + return changes, cli.Wrap(err, "failed to set protection for "+branch) + } + } + continue + } + + // Compare and check if update is needed + needsUpdate := false + details := make(map[string]string) + + // Check required reviews + existingReviews := 0 + existingDismissStale := false + existingCodeOwner := false + if existing.RequiredPullRequestReviews != nil { + existingReviews = existing.RequiredPullRequestReviews.RequiredApprovingReviewCount + existingDismissStale = existing.RequiredPullRequestReviews.DismissStaleReviews + existingCodeOwner = existing.RequiredPullRequestReviews.RequireCodeOwnerReviews + } + + if existingReviews != wantProtection.RequiredReviews { + needsUpdate = true + details["required_reviews"] = fmt.Sprintf("%d -> %d", existingReviews, wantProtection.RequiredReviews) + } + if existingDismissStale != wantProtection.DismissStale { + needsUpdate = true + details["dismiss_stale"] = fmt.Sprintf("%v -> %v", existingDismissStale, wantProtection.DismissStale) + } + if existingCodeOwner != wantProtection.RequireCodeOwnerReviews { + needsUpdate = true + details["code_owner_reviews"] = fmt.Sprintf("%v -> %v", existingCodeOwner, wantProtection.RequireCodeOwnerReviews) + } + + // Check enforce admins + existingEnforceAdmins := false + if existing.EnforceAdmins != nil { + existingEnforceAdmins = existing.EnforceAdmins.Enabled + } + if existingEnforceAdmins != wantProtection.EnforceAdmins { + needsUpdate = true + details["enforce_admins"] = fmt.Sprintf("%v -> %v", existingEnforceAdmins, wantProtection.EnforceAdmins) + } + + // Check linear history + existingLinear := false + if existing.RequiredLinearHistory != nil { + existingLinear = existing.RequiredLinearHistory.Enabled + } + if existingLinear != wantProtection.RequireLinearHistory { + needsUpdate = true + details["linear_history"] = fmt.Sprintf("%v -> %v", existingLinear, wantProtection.RequireLinearHistory) + } + + // Check force pushes + existingForcePush := false + if existing.AllowForcePushes != nil { + existingForcePush = existing.AllowForcePushes.Enabled + } + if existingForcePush != wantProtection.AllowForcePushes { + needsUpdate = true + details["allow_force_pushes"] = fmt.Sprintf("%v -> %v", existingForcePush, wantProtection.AllowForcePushes) + } + + // Check deletions + existingDeletions := false + if existing.AllowDeletions != nil { + existingDeletions = existing.AllowDeletions.Enabled + } + if existingDeletions != wantProtection.AllowDeletions { + needsUpdate = true + details["allow_deletions"] = fmt.Sprintf("%v -> %v", existingDeletions, wantProtection.AllowDeletions) + } + + // Check required status checks + var existingStatusChecks []string + if existing.RequiredStatusChecks != nil { + existingStatusChecks = existing.RequiredStatusChecks.Contexts + } + if !stringSliceEqual(existingStatusChecks, wantProtection.RequiredStatusChecks) { + needsUpdate = true + details["status_checks"] = fmt.Sprintf("%v -> %v", existingStatusChecks, wantProtection.RequiredStatusChecks) + } + + if needsUpdate { + changes.AddWithDetails(CategoryProtection, ChangeUpdate, branch, "", details) + if !dryRun { + if err := SetBranchProtection(repoFullName, branch, wantProtection); err != nil { + return changes, cli.Wrap(err, "failed to update protection for "+branch) + } + } + } else { + changes.Add(CategoryProtection, ChangeSkip, branch, "up to date") + } + } + + return changes, nil +} + +// describeProtection returns a human-readable description of protection rules. +func describeProtection(p BranchProtectionConfig) string { + var parts []string + if p.RequiredReviews > 0 { + parts = append(parts, fmt.Sprintf("%d review(s)", p.RequiredReviews)) + } + if p.DismissStale { + parts = append(parts, "dismiss stale") + } + if p.EnforceAdmins { + parts = append(parts, "enforce admins") + } + if len(parts) == 0 { + return "basic protection" + } + return strings.Join(parts, ", ") +} diff --git a/cmd/setup/github_security.go b/cmd/setup/github_security.go new file mode 100644 index 0000000..7a312e5 --- /dev/null +++ b/cmd/setup/github_security.go @@ -0,0 +1,281 @@ +// github_security.go implements GitHub security settings synchronization. +// +// Uses the gh api command for security settings: +// - gh api repos/{owner}/{repo}/vulnerability-alerts --method GET (check if enabled) +// - gh api repos/{owner}/{repo}/vulnerability-alerts --method PUT (enable) +// - gh api repos/{owner}/{repo}/automated-security-fixes --method PUT (enable dependabot updates) +// - gh api repos/{owner}/{repo} --method PATCH (security_and_analysis settings) + +package setup + +import ( + "encoding/json" + "fmt" + "os/exec" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" +) + +// GitHubSecurityStatus represents the security settings status of a repository. +type GitHubSecurityStatus struct { + DependabotAlerts bool + DependabotSecurityUpdates bool + SecretScanning bool + SecretScanningPushProtection bool +} + +// GitHubRepoResponse contains security-related fields from repo API. +type GitHubRepoResponse struct { + SecurityAndAnalysis *SecurityAndAnalysis `json:"security_and_analysis"` +} + +// SecurityAndAnalysis contains security feature settings. +type SecurityAndAnalysis struct { + SecretScanning *SecurityFeature `json:"secret_scanning"` + SecretScanningPushProtection *SecurityFeature `json:"secret_scanning_push_protection"` + DependabotSecurityUpdates *SecurityFeature `json:"dependabot_security_updates"` +} + +// SecurityFeature represents a single security feature status. +type SecurityFeature struct { + Status string `json:"status"` // "enabled" or "disabled" +} + +// GetSecuritySettings fetches current security settings for a repository. +func GetSecuritySettings(repoFullName string) (*GitHubSecurityStatus, error) { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid repo format: %s", repoFullName) + } + + status := &GitHubSecurityStatus{} + + // Check Dependabot alerts (vulnerability alerts) + endpoint := fmt.Sprintf("repos/%s/%s/vulnerability-alerts", parts[0], parts[1]) + cmd := exec.Command("gh", "api", endpoint, "--method", "GET") + _, err := cmd.Output() + if err == nil { + status.DependabotAlerts = true + } else if exitErr, ok := err.(*exec.ExitError); ok { + stderr := string(exitErr.Stderr) + // 404 means alerts are disabled, 204 means enabled + if strings.Contains(stderr, "403") { + return nil, cli.Err("insufficient permissions to check security settings") + } + // Other errors (like 404) mean alerts are disabled + status.DependabotAlerts = false + } + + // Get repo security_and_analysis settings + endpoint = fmt.Sprintf("repos/%s/%s", parts[0], parts[1]) + cmd = exec.Command("gh", "api", endpoint) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return nil, cli.Err("%s", strings.TrimSpace(string(exitErr.Stderr))) + } + return nil, err + } + + var repo GitHubRepoResponse + if err := json.Unmarshal(output, &repo); err != nil { + return nil, err + } + + if repo.SecurityAndAnalysis != nil { + if repo.SecurityAndAnalysis.SecretScanning != nil { + status.SecretScanning = repo.SecurityAndAnalysis.SecretScanning.Status == "enabled" + } + if repo.SecurityAndAnalysis.SecretScanningPushProtection != nil { + status.SecretScanningPushProtection = repo.SecurityAndAnalysis.SecretScanningPushProtection.Status == "enabled" + } + if repo.SecurityAndAnalysis.DependabotSecurityUpdates != nil { + status.DependabotSecurityUpdates = repo.SecurityAndAnalysis.DependabotSecurityUpdates.Status == "enabled" + } + } + + return status, nil +} + +// EnableDependabotAlerts enables Dependabot vulnerability alerts. +func EnableDependabotAlerts(repoFullName string) error { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid repo format: %s", repoFullName) + } + + endpoint := fmt.Sprintf("repos/%s/%s/vulnerability-alerts", parts[0], parts[1]) + cmd := exec.Command("gh", "api", endpoint, "--method", "PUT") + output, err := cmd.CombinedOutput() + if err != nil { + return cli.Err("%s", strings.TrimSpace(string(output))) + } + return nil +} + +// EnableDependabotSecurityUpdates enables automated Dependabot security updates. +func EnableDependabotSecurityUpdates(repoFullName string) error { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid repo format: %s", repoFullName) + } + + endpoint := fmt.Sprintf("repos/%s/%s/automated-security-fixes", parts[0], parts[1]) + cmd := exec.Command("gh", "api", endpoint, "--method", "PUT") + output, err := cmd.CombinedOutput() + if err != nil { + return cli.Err("%s", strings.TrimSpace(string(output))) + } + return nil +} + +// DisableDependabotSecurityUpdates disables automated Dependabot security updates. +func DisableDependabotSecurityUpdates(repoFullName string) error { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid repo format: %s", repoFullName) + } + + endpoint := fmt.Sprintf("repos/%s/%s/automated-security-fixes", parts[0], parts[1]) + cmd := exec.Command("gh", "api", endpoint, "--method", "DELETE") + output, err := cmd.CombinedOutput() + if err != nil { + return cli.Err("%s", strings.TrimSpace(string(output))) + } + return nil +} + +// UpdateSecurityAndAnalysis updates security_and_analysis settings. +func UpdateSecurityAndAnalysis(repoFullName string, secretScanning, pushProtection bool) error { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid repo format: %s", repoFullName) + } + + // Build the payload + payload := map[string]interface{}{ + "security_and_analysis": map[string]interface{}{ + "secret_scanning": map[string]string{ + "status": boolToStatus(secretScanning), + }, + "secret_scanning_push_protection": map[string]string{ + "status": boolToStatus(pushProtection), + }, + }, + } + + payloadJSON, err := json.Marshal(payload) + if err != nil { + return err + } + + endpoint := fmt.Sprintf("repos/%s/%s", parts[0], parts[1]) + cmd := exec.Command("gh", "api", endpoint, "--method", "PATCH", "--input", "-") + cmd.Stdin = strings.NewReader(string(payloadJSON)) + output, err := cmd.CombinedOutput() + if err != nil { + errStr := strings.TrimSpace(string(output)) + // Some repos (private without GHAS) don't support these features + if strings.Contains(errStr, "secret scanning") || strings.Contains(errStr, "not available") { + return nil // Silently skip unsupported features + } + return cli.Err("%s", errStr) + } + return nil +} + +func boolToStatus(b bool) string { + if b { + return "enabled" + } + return "disabled" +} + +// SyncSecuritySettings synchronizes security settings for a repository. +func SyncSecuritySettings(repoFullName string, config *GitHubConfig, dryRun bool) (*ChangeSet, error) { + changes := NewChangeSet(repoFullName) + + // Get current settings + existing, err := GetSecuritySettings(repoFullName) + if err != nil { + // If permission denied, note it but don't fail + if strings.Contains(err.Error(), "insufficient permissions") { + changes.Add(CategorySecurity, ChangeSkip, "all", "insufficient permissions") + return changes, nil + } + return nil, cli.Wrap(err, "failed to get security settings") + } + + wantConfig := config.Security + + // Check Dependabot alerts + if wantConfig.DependabotAlerts && !existing.DependabotAlerts { + changes.Add(CategorySecurity, ChangeCreate, "dependabot_alerts", "enable") + if !dryRun { + if err := EnableDependabotAlerts(repoFullName); err != nil { + return changes, cli.Wrap(err, "failed to enable dependabot alerts") + } + } + } else if !wantConfig.DependabotAlerts && existing.DependabotAlerts { + changes.Add(CategorySecurity, ChangeSkip, "dependabot_alerts", "cannot disable via API") + } else { + changes.Add(CategorySecurity, ChangeSkip, "dependabot_alerts", "up to date") + } + + // Check Dependabot security updates + if wantConfig.DependabotSecurityUpdates && !existing.DependabotSecurityUpdates { + changes.Add(CategorySecurity, ChangeCreate, "dependabot_security_updates", "enable") + if !dryRun { + if err := EnableDependabotSecurityUpdates(repoFullName); err != nil { + // This might fail if alerts aren't enabled first + return changes, cli.Wrap(err, "failed to enable dependabot security updates") + } + } + } else if !wantConfig.DependabotSecurityUpdates && existing.DependabotSecurityUpdates { + changes.Add(CategorySecurity, ChangeDelete, "dependabot_security_updates", "disable") + if !dryRun { + if err := DisableDependabotSecurityUpdates(repoFullName); err != nil { + return changes, cli.Wrap(err, "failed to disable dependabot security updates") + } + } + } else { + changes.Add(CategorySecurity, ChangeSkip, "dependabot_security_updates", "up to date") + } + + // Check secret scanning and push protection + needsSecurityUpdate := false + if wantConfig.SecretScanning != existing.SecretScanning { + needsSecurityUpdate = true + if wantConfig.SecretScanning { + changes.Add(CategorySecurity, ChangeCreate, "secret_scanning", "enable") + } else { + changes.Add(CategorySecurity, ChangeDelete, "secret_scanning", "disable") + } + } else { + changes.Add(CategorySecurity, ChangeSkip, "secret_scanning", "up to date") + } + + if wantConfig.SecretScanningPushProtection != existing.SecretScanningPushProtection { + needsSecurityUpdate = true + if wantConfig.SecretScanningPushProtection { + changes.Add(CategorySecurity, ChangeCreate, "push_protection", "enable") + } else { + changes.Add(CategorySecurity, ChangeDelete, "push_protection", "disable") + } + } else { + changes.Add(CategorySecurity, ChangeSkip, "push_protection", "up to date") + } + + // Apply security_and_analysis changes + if needsSecurityUpdate && !dryRun { + if err := UpdateSecurityAndAnalysis(repoFullName, wantConfig.SecretScanning, wantConfig.SecretScanningPushProtection); err != nil { + // Don't fail on unsupported features + if !strings.Contains(err.Error(), "not available") { + return changes, cli.Wrap(err, "failed to update security settings") + } + } + } + + return changes, nil +} diff --git a/cmd/setup/github_webhooks.go b/cmd/setup/github_webhooks.go new file mode 100644 index 0000000..34ec2b1 --- /dev/null +++ b/cmd/setup/github_webhooks.go @@ -0,0 +1,263 @@ +// github_webhooks.go implements GitHub webhook synchronization. +// +// Uses the gh api command for webhook operations: +// - gh api repos/{owner}/{repo}/hooks --method GET +// - gh api repos/{owner}/{repo}/hooks --method POST + +package setup + +import ( + "encoding/json" + "fmt" + "os/exec" + "strings" + + "forge.lthn.ai/core/go/pkg/cli" +) + +// GitHubWebhook represents a webhook as returned by the GitHub API. +type GitHubWebhook struct { + ID int `json:"id"` + Name string `json:"name"` + Active bool `json:"active"` + Events []string `json:"events"` + Config GitHubWebhookConfig `json:"config"` +} + +// GitHubWebhookConfig contains webhook configuration details. +type GitHubWebhookConfig struct { + URL string `json:"url"` + ContentType string `json:"content_type"` + InsecureSSL string `json:"insecure_ssl"` +} + +// ListWebhooks fetches all webhooks for a repository. +func ListWebhooks(repoFullName string) ([]GitHubWebhook, error) { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid repo format: %s", repoFullName) + } + + endpoint := fmt.Sprintf("repos/%s/%s/hooks", parts[0], parts[1]) + cmd := exec.Command("gh", "api", endpoint) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + stderr := strings.TrimSpace(string(exitErr.Stderr)) + // Check for permission error + if strings.Contains(stderr, "Must have admin rights") || strings.Contains(stderr, "403") { + return nil, cli.Err("insufficient permissions to manage webhooks (requires admin)") + } + return nil, cli.Err("%s", stderr) + } + return nil, err + } + + var hooks []GitHubWebhook + if err := json.Unmarshal(output, &hooks); err != nil { + return nil, err + } + + return hooks, nil +} + +// CreateWebhook creates a new webhook in a repository. +func CreateWebhook(repoFullName string, name string, config WebhookConfig) error { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid repo format: %s", repoFullName) + } + + // Build the webhook payload + payload := map[string]interface{}{ + "name": "web", + "active": true, + "events": config.Events, + "config": map[string]interface{}{ + "url": config.URL, + "content_type": config.ContentType, + "insecure_ssl": "0", + }, + } + + if config.Active != nil { + payload["active"] = *config.Active + } + + if config.Secret != "" { + configMap := payload["config"].(map[string]interface{}) + configMap["secret"] = config.Secret + } + + payloadJSON, err := json.Marshal(payload) + if err != nil { + return err + } + + endpoint := fmt.Sprintf("repos/%s/%s/hooks", parts[0], parts[1]) + cmd := exec.Command("gh", "api", endpoint, "--method", "POST", "--input", "-") + cmd.Stdin = strings.NewReader(string(payloadJSON)) + output, err := cmd.CombinedOutput() + if err != nil { + return cli.Err("%s", strings.TrimSpace(string(output))) + } + return nil +} + +// UpdateWebhook updates an existing webhook. +func UpdateWebhook(repoFullName string, hookID int, config WebhookConfig) error { + parts := strings.Split(repoFullName, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid repo format: %s", repoFullName) + } + + payload := map[string]interface{}{ + "active": true, + "events": config.Events, + "config": map[string]interface{}{ + "url": config.URL, + "content_type": config.ContentType, + "insecure_ssl": "0", + }, + } + + if config.Active != nil { + payload["active"] = *config.Active + } + + if config.Secret != "" { + configMap := payload["config"].(map[string]interface{}) + configMap["secret"] = config.Secret + } + + payloadJSON, err := json.Marshal(payload) + if err != nil { + return err + } + + endpoint := fmt.Sprintf("repos/%s/%s/hooks/%d", parts[0], parts[1], hookID) + cmd := exec.Command("gh", "api", endpoint, "--method", "PATCH", "--input", "-") + cmd.Stdin = strings.NewReader(string(payloadJSON)) + output, err := cmd.CombinedOutput() + if err != nil { + return cli.Err("%s", strings.TrimSpace(string(output))) + } + return nil +} + +// SyncWebhooks synchronizes webhooks for a repository. +// Webhooks are matched by URL - if a webhook with the same URL exists, it's updated. +// Otherwise, a new webhook is created. +func SyncWebhooks(repoFullName string, config *GitHubConfig, dryRun bool) (*ChangeSet, error) { + changes := NewChangeSet(repoFullName) + + // Skip if no webhooks configured + if len(config.Webhooks) == 0 { + return changes, nil + } + + // Get existing webhooks + existing, err := ListWebhooks(repoFullName) + if err != nil { + // If permission denied, note it but don't fail entirely + if strings.Contains(err.Error(), "insufficient permissions") { + changes.Add(CategoryWebhook, ChangeSkip, "all", "insufficient permissions") + return changes, nil + } + return nil, cli.Wrap(err, "failed to list webhooks") + } + + // Build lookup map by URL + existingByURL := make(map[string]GitHubWebhook) + for _, hook := range existing { + existingByURL[hook.Config.URL] = hook + } + + // Process each configured webhook + for name, wantHook := range config.Webhooks { + // Skip webhooks with empty URLs (env var not set) + if wantHook.URL == "" { + changes.Add(CategoryWebhook, ChangeSkip, name, "URL not configured") + continue + } + + existingHook, exists := existingByURL[wantHook.URL] + + if !exists { + // Create new webhook + changes.Add(CategoryWebhook, ChangeCreate, name, wantHook.URL) + if !dryRun { + if err := CreateWebhook(repoFullName, name, wantHook); err != nil { + return changes, cli.Wrap(err, "failed to create webhook "+name) + } + } + continue + } + + // Check if update is needed + needsUpdate := false + details := make(map[string]string) + + // Check events + if !stringSliceEqual(existingHook.Events, wantHook.Events) { + needsUpdate = true + details["events"] = fmt.Sprintf("%v -> %v", existingHook.Events, wantHook.Events) + } + + // Check content type + if existingHook.Config.ContentType != wantHook.ContentType { + needsUpdate = true + details["content_type"] = fmt.Sprintf("%s -> %s", existingHook.Config.ContentType, wantHook.ContentType) + } + + // Check active state + wantActive := true + if wantHook.Active != nil { + wantActive = *wantHook.Active + } + if existingHook.Active != wantActive { + needsUpdate = true + details["active"] = fmt.Sprintf("%v -> %v", existingHook.Active, wantActive) + } + + if needsUpdate { + changes.AddWithDetails(CategoryWebhook, ChangeUpdate, name, "", details) + if !dryRun { + if err := UpdateWebhook(repoFullName, existingHook.ID, wantHook); err != nil { + return changes, cli.Wrap(err, "failed to update webhook "+name) + } + } + } else { + changes.Add(CategoryWebhook, ChangeSkip, name, "up to date") + } + } + + return changes, nil +} + +// stringSliceEqual compares two string slices for equality (order-independent). +// Uses frequency counting to properly handle duplicates. +func stringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + // Count frequencies in slice a + counts := make(map[string]int) + for _, s := range a { + counts[s]++ + } + // Decrement for each element in slice b + for _, s := range b { + counts[s]-- + if counts[s] < 0 { + return false + } + } + // All counts should be zero if slices are equal + for _, count := range counts { + if count != 0 { + return false + } + } + return true +}