Compare commits

..

78 commits
v0.3.2 ... dev

Author SHA1 Message Date
Snider
525d8b993b fix: migrate module paths from forge.lthn.ai to dappco.re
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 16:21:14 +01:00
Virgil
7b0f800e08 fix(lint): skip hidden configured file paths
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 13:06:47 +00:00
Virgil
6ee67362ca refactor(lint): expand adapter helper names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:59:22 +00:00
Virgil
d1264dd88a fix(lint): exclude infra tools from language shortcuts
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:52:49 +00:00
Virgil
85dc5f75d0 refactor(lint): align naming with AX principles
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:48:12 +00:00
Virgil
10f89a83f2 fix(lint): add threshold summary to run failures
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:39:22 +00:00
Virgil
e3ae8caae2 refactor(lint): expand CLI flag names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:29:58 +00:00
Virgil
a567b72b18 docs(lint): add AX usage examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:24:29 +00:00
Virgil
602ea8bec0 docs(lint): add AX usage examples to orchestration types
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:19:35 +00:00
Virgil
63c4c51f21 fix(lint): skip hidden dirs in scanner
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:14:51 +00:00
Virgil
86ec27ca03 refactor(lint): expand service names for AX clarity
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:10:57 +00:00
Virgil
c7d6db8ee2 feat(lint): add sarif output for catalog checks
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:59:46 +00:00
Virgil
19f098cf43 fix(lint): preserve explicit empty file scopes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:53:19 +00:00
Virgil
b24021b8f8 refactor(lint): short-circuit explicit output resolution
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:47:18 +00:00
Virgil
5da4a1dbd1 fix(lint): preserve explicit empty file scopes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:43:25 +00:00
Virgil
71529076b3 refactor(lint): centralise run output resolution
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:37:01 +00:00
Virgil
a26a4e1301 refactor(lint): clarify orchestration names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:30:49 +00:00
Virgil
48acea0ef4 refactor(lint): add semantic tracked comment API
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:18:16 +00:00
Virgil
1e1ed30d04 fix(lint): honour lang precedence over ci and sbom groups
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:14:33 +00:00
Virgil
7e32c0c21c fix(lint): normalise report output levels
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:07:50 +00:00
Virgil
e7b41af939 feat(lint): detect cpp source files
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 10:58:09 +00:00
Virgil
ebc2c04c3d fix(lint): normalise empty orchestration outputs
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 10:53:06 +00:00
Virgil
23c5d20b1b feat(lint): add named schedule presets
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 10:46:41 +00:00
Virgil
3db0553082 feat(lint): honour configured exclude paths
Co-authored-by: Virgil <virgil@lethean.io>
2026-04-01 10:38:57 +00:00
Virgil
54a82bfe1a feat(lint): honour configured scan paths
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 10:34:40 +00:00
Virgil
7c92d313a6 Fix hook mode with no staged files 2026-04-01 09:54:32 +00:00
Virgil
be7f9fe966 Improve lint tool inventory output 2026-04-01 07:19:43 +00:00
Virgil
c82c57748c Add core-lint files flag 2026-04-01 07:11:28 +00:00
Virgil
0f5648aba9 Improve lint API usage examples 2026-04-01 07:04:18 +00:00
Virgil
e9085f6489 Refine core-lint command naming 2026-04-01 06:57:15 +00:00
Virgil
4500d5eb80 Fix catalog scanner language detection 2026-04-01 06:49:10 +00:00
Virgil
7a86afbc65 Improve lint command AX ergonomics 2026-04-01 06:43:30 +00:00
Virgil
877a757d8c Preserve hook content on removal 2026-04-01 06:36:55 +00:00
Virgil
b8ee543bae Populate adapter versions in lint reports 2026-04-01 06:28:04 +00:00
Virgil
20875bc066 Deduplicate merged lint findings 2026-04-01 05:41:25 +00:00
Virgil
8798210a4d Add prettier lint adapter 2026-04-01 05:13:43 +00:00
Virgil
382fe209de chore: verify lint RFC implementation 2026-03-30 15:18:30 +00:00
Virgil
0b41f3caeb docs: clarify lint RFC stream semantics 2026-03-30 15:10:39 +00:00
Virgil
4414aea2b0 docs: clarify lint RFC execution semantics 2026-03-30 15:05:08 +00:00
Virgil
6d226ce8e2 docs: clarify lint tool inventory contract 2026-03-30 14:59:41 +00:00
Virgil
e772a1f0f6 docs: clarify lint compliance shortcut semantics 2026-03-30 14:53:40 +00:00
Virgil
d5be05c578 docs: align lint RFC with AX standard 2026-03-30 14:32:40 +00:00
Virgil
8622e582ab docs: clarify lint RFC contract 2026-03-30 14:27:10 +00:00
Virgil
6ed4ab5ac5 docs: tighten lint RFC contract details 2026-03-30 14:20:32 +00:00
Virgil
0d9fbd7906 docs: rewrite lint RFC to match implementation 2026-03-30 14:14:58 +00:00
Virgil
7ab634bcd2 feat(ax): implement RFC lint orchestration CLI 2026-03-30 13:50:39 +00:00
Snider
119df680d7 docs: add lint RFC and AX RFC to repo docs for agent access
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-30 14:24:33 +01:00
Virgil
eec45af6cc fix(ax): harden structured output paths 2026-03-30 12:28:42 +00:00
Virgil
1660fe025f test(ax): sync health and security coverage 2026-03-30 12:20:10 +00:00
Virgil
7b2bb529e1 fix(ax): honour php security flags 2026-03-30 12:11:28 +00:00
Virgil
364b4b96de fix(ax): normalise audit and health machine output 2026-03-30 11:59:38 +00:00
Virgil
140d2b0583 test(cli): add artifact validation harnesses 2026-03-30 11:45:35 +00:00
Virgil
4a6f59b6fc fix(ax): fail total review and issue outages 2026-03-30 11:22:50 +00:00
Virgil
e05d7cf070 fix(ax): stabilise watch output
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-30 10:52:49 +00:00
Virgil
d5bc922325 fix(ax): stabilise map-derived ordering 2026-03-30 10:46:52 +00:00
Virgil
e1616a055d fix(ax): stabilise remaining qa output
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-30 10:41:49 +00:00
Virgil
30691b883c fix(ax): stabilise issue triage ordering
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-30 10:38:01 +00:00
Virgil
72b4fc4871 fix(ax): keep health summaries accurate under filters 2026-03-30 10:28:16 +00:00
Virgil
1f34ead44f fix(ax): make health and issues machine-friendly 2026-03-30 10:24:38 +00:00
Virgil
95c32c21ca fix(ax): preserve partial review results 2026-03-30 10:14:03 +00:00
Virgil
29a2722eda fix(ax): preserve docblock partial results 2026-03-30 10:05:38 +00:00
Virgil
8c8c6a9d2e fix(ax): clean php structured output modes 2026-03-30 08:14:46 +00:00
Virgil
6d202bb1d9 fix(qa): remove duplicated audit JSON helpers 2026-03-30 07:57:32 +00:00
Virgil
d9d7ae7ffa fix(ax): complete machine-friendly QA output updates 2026-03-30 07:55:44 +00:00
Virgil
3af8556d64 feat(qa): include fetch errors in issues/review JSON and continue on partial failures 2026-03-30 07:43:38 +00:00
Virgil
dfed5e3ab1 feat(qa): make php commands JSON-clean and deterministic 2026-03-30 07:43:29 +00:00
Virgil
a3648041ce feat(qa,lint): add deterministic and JSON-friendly command output 2026-03-30 07:43:29 +00:00
Virgil
cf9e43f0ad feat: improve qa output determinism and JSON modes 2026-03-30 07:43:29 +00:00
Virgil
aa57d1e09f feat(qa): align php command output with AX machine-friendly mode 2026-03-30 07:35:19 +00:00
Virgil
182f108d37 feat: align qa and lint outputs with agent experience 2026-03-30 07:21:21 +00:00
Snider
8ab944d0e7 chore: sync dependencies for v0.3.5
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 17:54:30 +00:00
Snider
876c65bd70 chore: sync dependencies for v0.3.4
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 17:49:52 +00:00
Snider
471266200e refactor: pass locales via RegisterCommands
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 01:38:32 +00:00
Snider
e7d469cc8d feat: embed and load locale translations on init
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 00:45:18 +00:00
Snider
94df217e84 feat: add en-GB locale file for QA commands
69 translation keys for qa, format, test, coverage, watch commands.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 00:33:04 +00:00
Snider
a001224b68 chore: sync dependencies for v0.3.3
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-16 22:18:05 +00:00
Snider
9681b062ac refactor: replace all remaining fmt.Errorf with coreerr.E from go-log
Replaces ~46 fmt.Errorf and errors.New calls in production code across
pkg/lint, pkg/php, cmd/core-lint, and cmd/qa with structured coreerr.E()
calls using the forge.lthn.ai/core/go-log convention.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-16 21:48:58 +00:00
Snider
a36f835fe0 refactor: replace os.* and fmt.Errorf with go-io/go-log conventions
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-16 19:44:45 +00:00
119 changed files with 8576 additions and 447 deletions

View file

@ -1,13 +1,17 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"strings"
"forge.lthn.ai/core/cli/pkg/cli"
lint "forge.lthn.ai/core/lint"
coreerr "forge.lthn.ai/core/go-log"
cataloglint "forge.lthn.ai/core/lint"
lintpkg "forge.lthn.ai/core/lint/pkg/lint"
)
@ -17,37 +21,258 @@ func main() {
}
func addLintCommands(root *cli.Command) {
lintCmd := cli.NewGroup("lint", "Pattern-based code linter", "")
addRFCCommands(root)
// ── check ──────────────────────────────────────────────────────────────
lintCmd := cli.NewGroup("lint", "Pattern-based code linter", "")
lintCmd.AddCommand(newCheckCommand(), newCatalogCommand())
addRFCCommands(lintCmd)
root.AddCommand(lintCmd)
}
func addRFCCommands(parent *cli.Command) {
parent.AddCommand(
newRunCommand("run", "Run configured linters", lintpkg.RunInput{}),
newDetectCommand("detect", "Detect project languages"),
newToolsCommand("tools", "List supported linter tools"),
newInitCommand("init", "Generate .core/lint.yaml"),
newRunCommand("go", "Run Go linters", lintpkg.RunInput{Lang: "go"}),
newRunCommand("php", "Run PHP linters", lintpkg.RunInput{Lang: "php"}),
newRunCommand("js", "Run JS/TS linters", lintpkg.RunInput{Lang: "js"}),
newRunCommand("python", "Run Python linters", lintpkg.RunInput{Lang: "python"}),
newRunCommand("security", "Run security linters", lintpkg.RunInput{Category: "security"}),
newRunCommand("compliance", "Run compliance linters", lintpkg.RunInput{Category: "compliance"}),
newHookCommand(),
)
}
func newRunCommand(commandName string, summary string, defaults lintpkg.RunInput) *cli.Command {
var (
checkFormat string
checkLang string
checkSeverity string
outputFormat string
configPath string
scheduleName string
failOnLevel string
categoryName string
languageName string
filePaths []string
hookMode bool
ciMode bool
sbomMode bool
)
checkCmd := cli.NewCommand("check", "Scan files for pattern matches", "", func(cmd *cli.Command, args []string) error {
cat, err := lint.LoadEmbeddedCatalog()
if err != nil {
return fmt.Errorf("loading catalog: %w", err)
command := cli.NewCommand(commandName, summary, "", func(command *cli.Command, args []string) error {
input := defaults
input.Output = outputFormat
input.Config = configPath
input.Schedule = scheduleName
input.FailOn = failOnLevel
input.Category = categoryName
input.Lang = languageName
input.Files = filePaths
input.Hook = hookMode
input.CI = ciMode
input.SBOM = sbomMode
if len(args) > 0 {
input.Path = args[0]
}
if input.Path == "" {
input.Path = "."
}
rules := cat.Rules
resolvedOutputFormat, err := lintpkg.ResolveRunOutputFormat(input)
if err != nil {
return err
}
input.Output = resolvedOutputFormat
// Filter by language if specified.
if checkLang != "" {
rules = cat.ForLanguage(checkLang)
service := lintpkg.NewService()
report, err := service.Run(context.Background(), input)
if err != nil {
return err
}
if err := writeReport(command.OutOrStdout(), input.Output, report); err != nil {
return err
}
if !report.Summary.Passed {
return coreerr.E(
"cmd."+commandName,
fmt.Sprintf(
"lint failed (fail-on=%s): %d error(s), %d warning(s), %d info finding(s)",
input.FailOn,
report.Summary.Errors,
report.Summary.Warnings,
report.Summary.Info,
),
nil,
)
}
return nil
})
cli.StringFlag(command, &outputFormat, "output", "o", defaults.Output, "Output format: json, text, github, sarif")
cli.StringFlag(command, &configPath, "config", "c", defaults.Config, "Config path (default: .core/lint.yaml)")
cli.StringFlag(command, &scheduleName, "schedule", "", "", "Run a named schedule from the config")
cli.StringFlag(command, &failOnLevel, "fail-on", "", defaults.FailOn, "Fail threshold: error, warning, info")
cli.StringFlag(command, &categoryName, "category", "", defaults.Category, "Restrict to one category")
cli.StringFlag(command, &languageName, "lang", "l", defaults.Lang, "Restrict to one language")
cli.StringSliceFlag(command, &filePaths, "files", "", defaults.Files, "Restrict scanning to specific files")
cli.BoolFlag(command, &hookMode, "hook", "", defaults.Hook, "Run in pre-commit mode against staged files")
cli.BoolFlag(command, &ciMode, "ci", "", defaults.CI, "GitHub Actions mode (github annotations)")
cli.BoolFlag(command, &sbomMode, "sbom", "", defaults.SBOM, "Enable compliance/SBOM tools")
return command
}
func newDetectCommand(commandName string, summary string) *cli.Command {
var output string
command := cli.NewCommand(commandName, summary, "", func(command *cli.Command, args []string) error {
projectPath := "."
if len(args) > 0 {
projectPath = args[0]
}
languages := lintpkg.Detect(projectPath)
switch output {
case "", "text":
for _, language := range languages {
fmt.Fprintln(command.OutOrStdout(), language)
}
return nil
case "json":
return writeIndentedJSON(command.OutOrStdout(), languages)
default:
return coreerr.E("cmd.detect", "unsupported output format "+output, nil)
}
})
cli.StringFlag(command, &output, "output", "o", "text", "Output format: text, json")
return command
}
func newToolsCommand(commandName string, summary string) *cli.Command {
var output string
var languageFilter string
command := cli.NewCommand(commandName, summary, "", func(command *cli.Command, args []string) error {
service := lintpkg.NewService()
var languages []string
if languageFilter != "" {
languages = []string{languageFilter}
}
tools := service.Tools(languages)
switch output {
case "", "text":
for _, tool := range tools {
status := "missing"
if tool.Available {
status = "available"
}
line := fmt.Sprintf("%-14s [%-11s] %s langs=%s", tool.Name, tool.Category, status, strings.Join(tool.Languages, ","))
if tool.Entitlement != "" {
line += " entitlement=" + tool.Entitlement
}
fmt.Fprintln(command.OutOrStdout(), line)
}
return nil
case "json":
return writeIndentedJSON(command.OutOrStdout(), tools)
default:
return coreerr.E("cmd.tools", "unsupported output format "+output, nil)
}
})
cli.StringFlag(command, &output, "output", "o", "text", "Output format: text, json")
cli.StringFlag(command, &languageFilter, "lang", "l", "", "Filter by language")
return command
}
func newInitCommand(commandName string, summary string) *cli.Command {
var force bool
command := cli.NewCommand(commandName, summary, "", func(command *cli.Command, args []string) error {
projectPath := "."
if len(args) > 0 {
projectPath = args[0]
}
service := lintpkg.NewService()
writtenPath, err := service.WriteDefaultConfig(projectPath, force)
if err != nil {
return err
}
fmt.Fprintln(command.OutOrStdout(), writtenPath)
return nil
})
cli.BoolFlag(command, &force, "force", "f", false, "Overwrite an existing config")
return command
}
func newHookCommand() *cli.Command {
hookCmd := cli.NewGroup("hook", "Install or remove the git pre-commit hook", "")
installCmd := cli.NewCommand("install", "Install the pre-commit hook", "", func(command *cli.Command, args []string) error {
projectPath := "."
if len(args) > 0 {
projectPath = args[0]
}
service := lintpkg.NewService()
if err := service.InstallHook(projectPath); err != nil {
return err
}
fmt.Fprintln(command.OutOrStdout(), "installed")
return nil
})
removeCmd := cli.NewCommand("remove", "Remove the pre-commit hook", "", func(command *cli.Command, args []string) error {
projectPath := "."
if len(args) > 0 {
projectPath = args[0]
}
service := lintpkg.NewService()
if err := service.RemoveHook(projectPath); err != nil {
return err
}
fmt.Fprintln(command.OutOrStdout(), "removed")
return nil
})
hookCmd.AddCommand(installCmd, removeCmd)
return hookCmd
}
func newCheckCommand() *cli.Command {
var (
format string
language string
severity string
)
command := cli.NewCommand("check", "Scan files for pattern matches", "", func(command *cli.Command, args []string) error {
catalog, err := cataloglint.LoadEmbeddedCatalog()
if err != nil {
return coreerr.E("cmd.check", "loading catalog", err)
}
rules := catalog.Rules
if language != "" {
rules = catalog.ForLanguage(language)
if len(rules) == 0 {
fmt.Fprintf(os.Stderr, "no rules for language %q\n", checkLang)
fmt.Fprintf(os.Stderr, "no rules for language %q\n", language)
return nil
}
}
// Filter by severity threshold if specified.
if checkSeverity != "" {
filtered := (&lintpkg.Catalog{Rules: rules}).AtSeverity(checkSeverity)
if severity != "" {
filtered := (&lintpkg.Catalog{Rules: rules}).AtSeverity(severity)
if len(filtered) == 0 {
fmt.Fprintf(os.Stderr, "no rules at severity %q or above\n", checkSeverity)
fmt.Fprintf(os.Stderr, "no rules at severity %q or above\n", severity)
return nil
}
rules = filtered
@ -55,7 +280,7 @@ func addLintCommands(root *cli.Command) {
scanner, err := lintpkg.NewScanner(rules)
if err != nil {
return fmt.Errorf("creating scanner: %w", err)
return coreerr.E("cmd.check", "creating scanner", err)
}
paths := args
@ -63,110 +288,176 @@ func addLintCommands(root *cli.Command) {
paths = []string{"."}
}
var allFindings []lintpkg.Finding
for _, p := range paths {
info, err := os.Stat(p)
var findings []lintpkg.Finding
for _, path := range paths {
info, err := os.Stat(path)
if err != nil {
return fmt.Errorf("stat %s: %w", p, err)
return coreerr.E("cmd.check", "stat "+path, err)
}
var findings []lintpkg.Finding
if info.IsDir() {
findings, err = scanner.ScanDir(p)
} else {
findings, err = scanner.ScanFile(p)
pathFindings, err := scanner.ScanDir(path)
if err != nil {
return err
}
findings = append(findings, pathFindings...)
continue
}
pathFindings, err := scanner.ScanFile(path)
if err != nil {
return err
}
allFindings = append(allFindings, findings...)
findings = append(findings, pathFindings...)
}
switch checkFormat {
switch format {
case "json":
return lintpkg.WriteJSON(os.Stdout, allFindings)
return lintpkg.WriteJSON(command.OutOrStdout(), findings)
case "jsonl":
return lintpkg.WriteJSONL(os.Stdout, allFindings)
return lintpkg.WriteJSONL(command.OutOrStdout(), findings)
case "sarif":
report := lintpkg.Report{
Findings: findings,
Summary: lintpkg.Summarise(findings),
}
return lintpkg.WriteReportSARIF(command.OutOrStdout(), report)
default:
lintpkg.WriteText(os.Stdout, allFindings)
}
if len(allFindings) > 0 {
summary := lintpkg.Summarise(allFindings)
fmt.Fprintf(os.Stderr, "\n%d finding(s)", summary.Total)
var parts []string
for sev, count := range summary.BySeverity {
parts = append(parts, fmt.Sprintf("%d %s", count, sev))
lintpkg.WriteText(command.OutOrStdout(), findings)
if format == "text" && len(findings) > 0 {
writeCatalogSummary(command.OutOrStdout(), findings)
}
if len(parts) > 0 {
fmt.Fprintf(os.Stderr, " (%s)", strings.Join(parts, ", "))
}
fmt.Fprintln(os.Stderr)
return nil
}
return nil
})
cli.StringFlag(checkCmd, &checkFormat, "format", "f", "text", "Output format: text, json, jsonl")
cli.StringFlag(checkCmd, &checkLang, "lang", "l", "", "Filter rules by language (e.g. go, php, ts)")
cli.StringFlag(checkCmd, &checkSeverity, "severity", "s", "", "Minimum severity threshold (info, low, medium, high, critical)")
cli.StringFlag(command, &format, "format", "f", "text", "Output format: text, json, jsonl, sarif")
cli.StringFlag(command, &language, "lang", "l", "", "Filter rules by language")
cli.StringFlag(command, &severity, "severity", "s", "", "Minimum severity threshold (info, low, medium, high, critical)")
// ── catalog ────────────────────────────────────────────────────────────
return command
}
func newCatalogCommand() *cli.Command {
catalogCmd := cli.NewGroup("catalog", "Browse the pattern catalog", "")
// catalog list
var listLang string
listCmd := cli.NewCommand("list", "List all rules in the catalog", "", func(cmd *cli.Command, args []string) error {
cat, err := lint.LoadEmbeddedCatalog()
var listLanguage string
listCmd := cli.NewCommand("list", "List all rules in the catalog", "", func(command *cli.Command, args []string) error {
catalog, err := cataloglint.LoadEmbeddedCatalog()
if err != nil {
return fmt.Errorf("loading catalog: %w", err)
return coreerr.E("cmd.catalog.list", "loading catalog", err)
}
rules := cat.Rules
if listLang != "" {
rules = cat.ForLanguage(listLang)
rules := catalog.Rules
if listLanguage != "" {
rules = catalog.ForLanguage(listLanguage)
}
if len(rules) == 0 {
fmt.Println("No rules found.")
fmt.Fprintln(command.OutOrStdout(), "No rules found.")
return nil
}
for _, r := range rules {
fmt.Printf("%-14s [%-8s] %s\n", r.ID, r.Severity, r.Title)
rules = append([]lintpkg.Rule(nil), rules...)
sort.Slice(rules, func(left int, right int) bool {
if rules[left].Severity == rules[right].Severity {
return strings.Compare(rules[left].ID, rules[right].ID) < 0
}
return strings.Compare(rules[left].Severity, rules[right].Severity) < 0
})
for _, rule := range rules {
fmt.Fprintf(command.OutOrStdout(), "%-14s [%-8s] %s\n", rule.ID, rule.Severity, rule.Title)
}
fmt.Fprintf(os.Stderr, "\n%d rule(s)\n", len(rules))
return nil
})
cli.StringFlag(listCmd, &listLanguage, "lang", "l", "", "Filter by language")
cli.StringFlag(listCmd, &listLang, "lang", "l", "", "Filter by language")
// catalog show
showCmd := cli.NewCommand("show", "Show details of a specific rule", "", func(cmd *cli.Command, args []string) error {
showCmd := cli.NewCommand("show", "Show details of a specific rule", "", func(command *cli.Command, args []string) error {
if len(args) == 0 {
return fmt.Errorf("rule ID required")
return coreerr.E("cmd.catalog.show", "rule ID required", nil)
}
cat, err := lint.LoadEmbeddedCatalog()
catalog, err := cataloglint.LoadEmbeddedCatalog()
if err != nil {
return fmt.Errorf("loading catalog: %w", err)
return coreerr.E("cmd.catalog.show", "loading catalog", err)
}
r := cat.ByID(args[0])
if r == nil {
return fmt.Errorf("rule %q not found", args[0])
rule := catalog.ByID(args[0])
if rule == nil {
return coreerr.E("cmd.catalog.show", "rule "+args[0]+" not found", nil)
}
data, err := json.MarshalIndent(r, "", " ")
data, err := json.MarshalIndent(rule, "", " ")
if err != nil {
return err
}
fmt.Println(string(data))
fmt.Fprintf(command.OutOrStdout(), "%s\n", string(data))
return nil
})
catalogCmd.AddCommand(listCmd, showCmd)
lintCmd.AddCommand(checkCmd, catalogCmd)
root.AddCommand(lintCmd)
return catalogCmd
}
func writeReport(writer io.Writer, output string, report lintpkg.Report) error {
switch output {
case "json":
return lintpkg.WriteReportJSON(writer, report)
case "text":
lintpkg.WriteReportText(writer, report)
return nil
case "github":
lintpkg.WriteReportGitHub(writer, report)
return nil
case "sarif":
return lintpkg.WriteReportSARIF(writer, report)
default:
return coreerr.E("writeReport", "unsupported output format "+output, nil)
}
}
func writeIndentedJSON(writer io.Writer, value any) error {
encoder := json.NewEncoder(writer)
encoder.SetIndent("", " ")
return encoder.Encode(value)
}
func writeCatalogSummary(writer io.Writer, findings []lintpkg.Finding) {
summary := lintpkg.Summarise(findings)
fmt.Fprintf(writer, "\n%d finding(s)", summary.Total)
orderedSeverities := []string{"critical", "high", "medium", "low", "info", "error", "warning"}
seen := make(map[string]bool, len(summary.BySeverity))
var parts []string
for _, severity := range orderedSeverities {
count := summary.BySeverity[severity]
if count == 0 {
continue
}
seen[severity] = true
parts = append(parts, fmt.Sprintf("%d %s", count, severity))
}
var extraSeverities []string
for severity := range summary.BySeverity {
if seen[severity] {
continue
}
extraSeverities = append(extraSeverities, severity)
}
sort.Strings(extraSeverities)
for _, severity := range extraSeverities {
count := summary.BySeverity[severity]
if count == 0 {
continue
}
parts = append(parts, fmt.Sprintf("%d %s", count, severity))
}
if len(parts) > 0 {
fmt.Fprintf(writer, " (%s)", strings.Join(parts, ", "))
}
fmt.Fprintln(writer)
}

286
cmd/core-lint/main_test.go Normal file
View file

@ -0,0 +1,286 @@
package main
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"testing"
lintpkg "forge.lthn.ai/core/lint/pkg/lint"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
buildBinaryOnce sync.Once
builtBinaryPath string
buildBinaryErr error
)
func TestCLI_Run_JSON(t *testing.T) {
dir := t.TempDir()
buildCLI(t)
t.Setenv("PATH", t.TempDir())
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "input.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("data")
}
`), 0o644))
stdout, stderr, exitCode := runCLI(t, dir, "run", "--output", "json", "--fail-on", "warning", dir)
assert.Equal(t, 1, exitCode, stderr)
assert.Contains(t, stderr, "lint failed (fail-on=warning)")
var report lintpkg.Report
require.NoError(t, json.Unmarshal([]byte(stdout), &report))
require.Len(t, report.Findings, 1)
assert.Equal(t, "go-cor-003", report.Findings[0].Code)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestCLI_Run_FilesFlagLimitsScanning(t *testing.T) {
dir := t.TempDir()
buildCLI(t)
t.Setenv("PATH", t.TempDir())
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "clean.go"), []byte(`package sample
func Clean() {}
`), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "ignored.go"), []byte(`package sample
func Run() {
_ = helper()
}
func helper() error { return nil }
`), 0o644))
stdout, stderr, exitCode := runCLI(t, dir, "run", "--output", "json", "--files", "clean.go", dir)
assert.Equal(t, 0, exitCode, stderr)
var report lintpkg.Report
require.NoError(t, json.Unmarshal([]byte(stdout), &report))
assert.Empty(t, report.Findings)
assert.Equal(t, 0, report.Summary.Total)
assert.True(t, report.Summary.Passed)
}
func TestCLI_Run_ScheduleAppliesPreset(t *testing.T) {
dir := t.TempDir()
buildCLI(t)
t.Setenv("PATH", t.TempDir())
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "services"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "services", "clean.go"), []byte(`package sample
func Clean() {}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`output: text
schedules:
nightly:
output: json
paths:
- services
`), 0o644))
stdout, stderr, exitCode := runCLI(t, dir, "run", "--schedule", "nightly", dir)
assert.Equal(t, 0, exitCode, stderr)
var report lintpkg.Report
require.NoError(t, json.Unmarshal([]byte(stdout), &report))
assert.Empty(t, report.Findings)
assert.Equal(t, 0, report.Summary.Total)
assert.True(t, report.Summary.Passed)
}
func TestCLI_Detect_JSON(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}\n"), 0o644))
stdout, stderr, exitCode := runCLI(t, dir, "detect", "--output", "json", dir)
assert.Equal(t, 0, exitCode, stderr)
var languages []string
require.NoError(t, json.Unmarshal([]byte(stdout), &languages))
assert.Equal(t, []string{"go", "js"}, languages)
}
func TestCLI_Init_WritesConfig(t *testing.T) {
dir := t.TempDir()
stdout, stderr, exitCode := runCLI(t, dir, "init", dir)
assert.Equal(t, 0, exitCode, stderr)
assert.Contains(t, stdout, ".core/lint.yaml")
configPath := filepath.Join(dir, ".core", "lint.yaml")
content, err := os.ReadFile(configPath)
require.NoError(t, err)
assert.Contains(t, string(content), "golangci-lint")
assert.Contains(t, string(content), "fail_on: error")
}
func TestCLI_Tools_TextIncludesMetadata(t *testing.T) {
buildCLI(t)
binDir := t.TempDir()
fakeToolPath := filepath.Join(binDir, "gosec")
require.NoError(t, os.WriteFile(fakeToolPath, []byte("#!/bin/sh\nexit 0\n"), 0o755))
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
command := exec.Command(buildCLI(t), "tools", "--lang", "go")
command.Dir = t.TempDir()
command.Env = os.Environ()
output, err := command.CombinedOutput()
require.NoError(t, err, string(output))
text := string(output)
assert.Contains(t, text, "gosec")
assert.Contains(t, text, "langs=go")
assert.Contains(t, text, "entitlement=lint.security")
}
func TestCLI_LintCheck_SARIF(t *testing.T) {
buildCLI(t)
repoRoot := repoRoot(t)
stdout, stderr, exitCode := runCLI(t, repoRoot, "lint", "check", "--format", "sarif", "tests/cli/lint/check/fixtures")
assert.Equal(t, 0, exitCode, stderr)
var sarif struct {
Version string `json:"version"`
Runs []struct {
Tool struct {
Driver struct {
Name string `json:"name"`
} `json:"driver"`
} `json:"tool"`
Results []struct {
RuleID string `json:"ruleId"`
} `json:"results"`
} `json:"runs"`
}
require.NoError(t, json.Unmarshal([]byte(stdout), &sarif))
require.Equal(t, "2.1.0", sarif.Version)
require.Len(t, sarif.Runs, 1)
assert.Equal(t, "core-lint", sarif.Runs[0].Tool.Driver.Name)
require.Len(t, sarif.Runs[0].Results, 1)
assert.Equal(t, "go-cor-003", sarif.Runs[0].Results[0].RuleID)
}
func TestCLI_HookInstallRemove(t *testing.T) {
if _, err := exec.LookPath("git"); err != nil {
t.Skip("git not available")
}
dir := t.TempDir()
runCLIExpectSuccess(t, dir, "git", "init")
runCLIExpectSuccess(t, dir, "git", "config", "user.email", "test@example.com")
runCLIExpectSuccess(t, dir, "git", "config", "user.name", "Test User")
_, stderr, exitCode := runCLI(t, dir, "hook", "install", dir)
assert.Equal(t, 0, exitCode, stderr)
hookPath := filepath.Join(dir, ".git", "hooks", "pre-commit")
hookContent, err := os.ReadFile(hookPath)
require.NoError(t, err)
assert.Contains(t, string(hookContent), "core-lint run --hook")
_, stderr, exitCode = runCLI(t, dir, "hook", "remove", dir)
assert.Equal(t, 0, exitCode, stderr)
removedContent, err := os.ReadFile(hookPath)
if err == nil {
assert.NotContains(t, string(removedContent), "core-lint run --hook")
}
}
func runCLI(t *testing.T, workdir string, args ...string) (string, string, int) {
t.Helper()
command := exec.Command(buildCLI(t), args...)
command.Dir = workdir
command.Env = os.Environ()
stdout, err := command.Output()
if err == nil {
return string(stdout), "", 0
}
exitCode := -1
stderr := ""
if exitErr, ok := err.(*exec.ExitError); ok {
exitCode = exitErr.ExitCode()
stderr = string(exitErr.Stderr)
}
return string(stdout), stderr, exitCode
}
func runCLIExpectSuccess(t *testing.T, dir string, name string, args ...string) {
t.Helper()
command := exec.Command(name, args...)
command.Dir = dir
output, err := command.CombinedOutput()
require.NoError(t, err, string(output))
}
func buildCLI(t *testing.T) string {
t.Helper()
buildBinaryOnce.Do(func() {
repoRoot := repoRoot(t)
binDir, err := os.MkdirTemp("", "core-lint-bin-*")
if err != nil {
buildBinaryErr = err
return
}
builtBinaryPath = filepath.Join(binDir, "core-lint")
command := exec.Command("go", "build", "-o", builtBinaryPath, "./cmd/core-lint")
command.Dir = repoRoot
output, err := command.CombinedOutput()
if err != nil {
buildBinaryErr = fmt.Errorf("build core-lint: %w: %s", err, strings.TrimSpace(string(output)))
}
})
require.NoError(t, buildBinaryErr)
return builtBinaryPath
}
func repoRoot(t *testing.T) string {
t.Helper()
root, err := filepath.Abs(filepath.Join(".", "..", ".."))
require.NoError(t, err)
return root
}

View file

@ -59,6 +59,7 @@ type DocblockResult struct {
Total int `json:"total"`
Documented int `json:"documented"`
Missing []MissingDocblock `json:"missing,omitempty"`
Warnings []DocblockWarning `json:"warnings,omitempty"`
Passed bool `json:"passed"`
}
@ -71,6 +72,13 @@ type MissingDocblock struct {
Reason string `json:"reason,omitempty"`
}
// DocblockWarning captures a partial parse failure while still preserving
// the successfully parsed files in the same directory.
type DocblockWarning struct {
Path string `json:"path"`
Error string `json:"error"`
}
// RunDocblockCheck checks docblock coverage for the given packages.
func RunDocblockCheck(paths []string, threshold float64, verbose, jsonOutput bool) error {
result, err := CheckDocblockCoverage(paths)
@ -85,21 +93,13 @@ func RunDocblockCheck(paths []string, threshold float64, verbose, jsonOutput boo
if err != nil {
return err
}
fmt.Println(string(data))
cli.Print("%s\n", string(data))
if !result.Passed {
return cli.Err("docblock coverage %.1f%% below threshold %.1f%%", result.Coverage, threshold)
}
return nil
}
// Sort missing by file then line
slices.SortFunc(result.Missing, func(a, b MissingDocblock) int {
return cmp.Or(
cmp.Compare(a.File, b.File),
cmp.Compare(a.Line, b.Line),
)
})
// Print result
if verbose && len(result.Missing) > 0 {
cli.Print("%s\n\n", i18n.T("cmd.qa.docblock.missing_docs"))
@ -114,6 +114,13 @@ func RunDocblockCheck(paths []string, threshold float64, verbose, jsonOutput boo
cli.Blank()
}
if len(result.Warnings) > 0 {
for _, warning := range result.Warnings {
cli.Warnf("failed to parse %s: %s", warning.Path, warning.Error)
}
cli.Blank()
}
// Summary
coverageStr := fmt.Sprintf("%.1f%%", result.Coverage)
thresholdStr := fmt.Sprintf("%.1f%%", threshold)
@ -167,9 +174,12 @@ func CheckDocblockCoverage(patterns []string) (*DocblockResult, error) {
return !strings.HasSuffix(fi.Name(), "_test.go")
}, parser.ParseComments)
if err != nil {
// Log parse errors but continue to check other directories
cli.Warnf("failed to parse %s: %v", dir, err)
continue
// Preserve partial results when a directory contains both valid and
// invalid files. The caller decides how to present the warning.
result.Warnings = append(result.Warnings, DocblockWarning{
Path: dir,
Error: err.Error(),
})
}
for _, pkg := range pkgs {
@ -183,6 +193,21 @@ func CheckDocblockCoverage(patterns []string) (*DocblockResult, error) {
result.Coverage = float64(result.Documented) / float64(result.Total) * 100
}
slices.SortFunc(result.Missing, func(a, b MissingDocblock) int {
return cmp.Or(
cmp.Compare(a.File, b.File),
cmp.Compare(a.Line, b.Line),
cmp.Compare(a.Kind, b.Kind),
cmp.Compare(a.Name, b.Name),
)
})
slices.SortFunc(result.Warnings, func(a, b DocblockWarning) int {
return cmp.Or(
cmp.Compare(a.Path, b.Path),
cmp.Compare(a.Error, b.Error),
)
})
return result, nil
}

View file

@ -0,0 +1,36 @@
package qa
import (
"encoding/json"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunDocblockCheckJSONOutput_IsDeterministicAndKeepsWarnings(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "b.go"), "package sample\n\nfunc Beta() {}\n")
writeTestFile(t, filepath.Join(dir, "a.go"), "package sample\n\nfunc Alpha() {}\n")
writeTestFile(t, filepath.Join(dir, "broken.go"), "package sample\n\nfunc Broken(\n")
restoreWorkingDir(t, dir)
var result DocblockResult
output := captureStdout(t, func() {
err := RunDocblockCheck([]string{"."}, 100, false, true)
require.Error(t, err)
})
require.NoError(t, json.Unmarshal([]byte(output), &result))
assert.False(t, result.Passed)
assert.Equal(t, 2, result.Total)
assert.Equal(t, 0, result.Documented)
require.Len(t, result.Missing, 2)
assert.Equal(t, "a.go", result.Missing[0].File)
assert.Equal(t, "b.go", result.Missing[1].File)
require.Len(t, result.Warnings, 1)
assert.Equal(t, ".", result.Warnings[0].Path)
assert.NotEmpty(t, result.Warnings[0].Error)
}

View file

@ -1,4 +1,4 @@
// cmd_health.go implements the 'qa health' command for aggregate CI health.
// cmd_health.go implements the `qa health` command for aggregate CI health.
//
// Usage:
// core qa health # Show CI health summary
@ -20,13 +20,14 @@ import (
"forge.lthn.ai/core/go-scm/repos"
)
// Health command flags
// Health command flags.
var (
healthProblems bool
healthRegistry string
healthJSON bool
)
// HealthWorkflowRun represents a GitHub Actions workflow run
// HealthWorkflowRun represents a GitHub Actions workflow run.
type HealthWorkflowRun struct {
Status string `json:"status"`
Conclusion string `json:"conclusion"`
@ -36,16 +37,36 @@ type HealthWorkflowRun struct {
URL string `json:"url"`
}
// RepoHealth represents the CI health of a single repo
// RepoHealth represents the CI health of a single repo.
type RepoHealth struct {
Name string
Status string // "passing", "failing", "pending", "no_ci", "disabled"
Message string
URL string
FailingSince string
Name string `json:"name"`
Status string `json:"status"` // passing, failing, error, pending, no_ci, disabled
Message string `json:"message"`
URL string `json:"url"`
FailingSince string `json:"failing_since,omitempty"`
}
// HealthSummary captures aggregate health counts.
type HealthSummary struct {
TotalRepos int `json:"total_repos"`
FilteredRepos int `json:"filtered_repos"`
Passing int `json:"passing"`
Failing int `json:"failing"`
Errors int `json:"errors"`
Pending int `json:"pending"`
Disabled int `json:"disabled"`
NotConfigured int `json:"not_configured"`
PassingRate int `json:"passing_rate"`
ProblemsOnly bool `json:"problems_only"`
ByStatus map[string]int `json:"by_status"`
}
// HealthOutput is the JSON payload for `qa health --json`.
type HealthOutput struct {
Summary HealthSummary `json:"summary"`
Repos []RepoHealth `json:"repos"`
}
// addHealthCommand adds the 'health' subcommand to qa.
func addHealthCommand(parent *cli.Command) {
healthCmd := &cli.Command{
Use: "health",
@ -58,20 +79,18 @@ func addHealthCommand(parent *cli.Command) {
healthCmd.Flags().BoolVarP(&healthProblems, "problems", "p", false, i18n.T("cmd.qa.health.flag.problems"))
healthCmd.Flags().StringVar(&healthRegistry, "registry", "", i18n.T("common.flag.registry"))
healthCmd.Flags().BoolVar(&healthJSON, "json", false, i18n.T("common.flag.json"))
parent.AddCommand(healthCmd)
}
func runHealth() error {
// Check gh is available
if _, err := exec.LookPath("gh"); err != nil {
return log.E("qa.health", i18n.T("error.gh_not_found"), nil)
}
// Load registry
var reg *repos.Registry
var err error
if healthRegistry != "" {
reg, err = repos.LoadRegistry(io.Local, healthRegistry)
} else {
@ -85,28 +104,24 @@ func runHealth() error {
return log.E("qa.health", "failed to load registry", err)
}
// Fetch CI status from all repos
var healthResults []RepoHealth
repoList := reg.List()
for i, repo := range repoList {
cli.Print("\033[2K\r%s %d/%d %s",
dimStyle.Render(i18n.T("cmd.qa.issues.fetching")),
i+1, len(repoList), repo.Name)
allHealthResults := make([]RepoHealth, 0, len(repoList))
for _, repo := range repoList {
health := fetchRepoHealth(reg.Org, repo.Name)
healthResults = append(healthResults, health)
allHealthResults = append(allHealthResults, health)
}
cli.Print("\033[2K\r") // Clear progress
// Sort: problems first, then passing
slices.SortFunc(healthResults, func(a, b RepoHealth) int {
return cmp.Compare(healthPriority(a.Status), healthPriority(b.Status))
// Sort by severity first, then repo name for deterministic output.
slices.SortFunc(allHealthResults, func(a, b RepoHealth) int {
if p := cmp.Compare(healthPriority(a.Status), healthPriority(b.Status)); p != 0 {
return p
}
return strings.Compare(a.Name, b.Name)
})
// Filter if --problems flag
healthResults := allHealthResults
if healthProblems {
var problems []RepoHealth
problems := make([]RepoHealth, 0, len(healthResults))
for _, h := range healthResults {
if h.Status != "passing" {
problems = append(problems, h)
@ -115,37 +130,29 @@ func runHealth() error {
healthResults = problems
}
// Calculate summary
passing := 0
for _, h := range healthResults {
if h.Status == "passing" {
passing++
}
}
total := len(repoList)
percentage := 0
if total > 0 {
percentage = (passing * 100) / total
summary := summariseHealthResults(len(repoList), len(healthResults), allHealthResults, healthProblems)
if healthJSON {
return printHealthJSON(summary, healthResults)
}
// Print summary
cli.Print("%s: %d/%d repos healthy (%d%%)\n\n",
i18n.T("cmd.qa.health.summary"),
passing, total, percentage)
summary.Passing,
summary.TotalRepos,
summary.PassingRate)
if len(healthResults) == 0 {
cli.Text(i18n.T("cmd.qa.health.all_healthy"))
return nil
}
// Group by status
grouped := make(map[string][]RepoHealth)
for _, h := range healthResults {
grouped[h.Status] = append(grouped[h.Status], h)
}
// Print problems first
printHealthGroup("failing", grouped["failing"], errorStyle)
printHealthGroup("error", grouped["error"], errorStyle)
printHealthGroup("pending", grouped["pending"], warningStyle)
printHealthGroup("no_ci", grouped["no_ci"], dimStyle)
printHealthGroup("disabled", grouped["disabled"], dimStyle)
@ -159,7 +166,6 @@ func runHealth() error {
func fetchRepoHealth(org, repoName string) RepoHealth {
repoFullName := cli.Sprintf("%s/%s", org, repoName)
args := []string{
"run", "list",
"--repo", repoFullName,
@ -170,7 +176,6 @@ func fetchRepoHealth(org, repoName string) RepoHealth {
cmd := exec.Command("gh", args...)
output, err := cmd.Output()
if err != nil {
// Check if it's a 404 (no workflows)
if exitErr, ok := err.(*exec.ExitError); ok {
stderr := string(exitErr.Stderr)
if strings.Contains(stderr, "no workflows") || strings.Contains(stderr, "not found") {
@ -183,7 +188,7 @@ func fetchRepoHealth(org, repoName string) RepoHealth {
}
return RepoHealth{
Name: repoName,
Status: "no_ci",
Status: "error",
Message: i18n.T("cmd.qa.health.fetch_error"),
}
}
@ -192,7 +197,7 @@ func fetchRepoHealth(org, repoName string) RepoHealth {
if err := json.Unmarshal(output, &runs); err != nil {
return RepoHealth{
Name: repoName,
Status: "no_ci",
Status: "error",
Message: i18n.T("cmd.qa.health.parse_error"),
}
}
@ -245,28 +250,88 @@ func healthPriority(status string) int {
switch status {
case "failing":
return 0
case "pending":
case "error":
return 1
case "no_ci":
case "pending":
return 2
case "disabled":
case "no_ci":
return 3
case "passing":
case "disabled":
return 4
default:
case "passing":
return 5
default:
return 6
}
}
func summariseHealthResults(totalRepos int, filteredRepos int, results []RepoHealth, problemsOnly bool) HealthSummary {
summary := HealthSummary{
TotalRepos: totalRepos,
FilteredRepos: filteredRepos,
ByStatus: map[string]int{
"passing": 0,
"failing": 0,
"error": 0,
"pending": 0,
"disabled": 0,
"no_ci": 0,
},
ProblemsOnly: problemsOnly,
}
for _, health := range results {
summary.ByStatus[health.Status]++
switch health.Status {
case "passing":
summary.Passing++
case "failing":
summary.Failing++
case "error":
summary.Errors++
case "pending":
summary.Pending++
case "disabled":
summary.Disabled++
case "no_ci":
summary.NotConfigured++
}
}
if summary.TotalRepos > 0 {
summary.PassingRate = (summary.Passing * 100) / summary.TotalRepos
}
return summary
}
func printHealthJSON(summary HealthSummary, repos []RepoHealth) error {
data, err := json.MarshalIndent(HealthOutput{
Summary: summary,
Repos: repos,
}, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
return nil
}
func printHealthGroup(status string, repos []RepoHealth, style *cli.AnsiStyle) {
if len(repos) == 0 {
return
}
slices.SortFunc(repos, func(a, b RepoHealth) int {
return strings.Compare(a.Name, b.Name)
})
var label string
switch status {
case "failing":
label = i18n.T("cmd.qa.health.count_failing")
case "error":
label = i18n.T("cmd.qa.health.count_error")
case "pending":
label = i18n.T("cmd.qa.health.count_pending")
case "no_ci":

240
cmd/qa/cmd_health_test.go Normal file
View file

@ -0,0 +1,240 @@
package qa
import (
"encoding/json"
"path/filepath"
"testing"
"forge.lthn.ai/core/cli/pkg/cli"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunHealthJSONOutput_UsesMachineFriendlyKeysAndKeepsFetchErrors(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
alpha:
type: module
beta:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"--repo forge/alpha"*)
cat <<'JSON'
[
{
"status": "completed",
"conclusion": "success",
"name": "CI",
"headSha": "abc123",
"updatedAt": "2026-03-30T00:00:00Z",
"url": "https://example.com/alpha/run/1"
}
]
JSON
;;
*"--repo forge/beta"*)
printf '%s\n' 'simulated workflow lookup failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetHealthFlags(t)
t.Cleanup(func() {
healthRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addHealthCommand(parent)
command := findSubcommand(t, parent, "health")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
var payload HealthOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, 2, payload.Summary.TotalRepos)
assert.Equal(t, 1, payload.Summary.Passing)
assert.Equal(t, 1, payload.Summary.Errors)
assert.Equal(t, 2, payload.Summary.FilteredRepos)
assert.Len(t, payload.Summary.ByStatus, 6)
assert.Equal(t, 1, payload.Summary.ByStatus["passing"])
assert.Equal(t, 1, payload.Summary.ByStatus["error"])
assert.Equal(t, 0, payload.Summary.ByStatus["pending"])
assert.Equal(t, 0, payload.Summary.ByStatus["disabled"])
assert.Equal(t, 0, payload.Summary.ByStatus["no_ci"])
require.Len(t, payload.Repos, 2)
assert.Equal(t, "error", payload.Repos[0].Status)
assert.Equal(t, "beta", payload.Repos[0].Name)
assert.Equal(t, "passing", payload.Repos[1].Status)
assert.Equal(t, "alpha", payload.Repos[1].Name)
assert.Contains(t, output, `"status"`)
assert.NotContains(t, output, `"Status"`)
assert.NotContains(t, output, `"FailingSince"`)
}
func TestRunHealthJSONOutput_ProblemsOnlyKeepsOverallSummary(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
alpha:
type: module
beta:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"--repo forge/alpha"*)
cat <<'JSON'
[
{
"status": "completed",
"conclusion": "success",
"name": "CI",
"headSha": "abc123",
"updatedAt": "2026-03-30T00:00:00Z",
"url": "https://example.com/alpha/run/1"
}
]
JSON
;;
*"--repo forge/beta"*)
printf '%s\n' 'simulated workflow lookup failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetHealthFlags(t)
t.Cleanup(func() {
healthRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addHealthCommand(parent)
command := findSubcommand(t, parent, "health")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
require.NoError(t, command.Flags().Set("problems", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
var payload HealthOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, 2, payload.Summary.TotalRepos)
assert.Equal(t, 1, payload.Summary.Passing)
assert.Equal(t, 1, payload.Summary.Errors)
assert.Equal(t, 1, payload.Summary.FilteredRepos)
assert.True(t, payload.Summary.ProblemsOnly)
assert.Len(t, payload.Summary.ByStatus, 6)
assert.Equal(t, 1, payload.Summary.ByStatus["passing"])
assert.Equal(t, 1, payload.Summary.ByStatus["error"])
assert.Equal(t, 0, payload.Summary.ByStatus["pending"])
assert.Equal(t, 0, payload.Summary.ByStatus["disabled"])
assert.Equal(t, 0, payload.Summary.ByStatus["no_ci"])
require.Len(t, payload.Repos, 1)
assert.Equal(t, "error", payload.Repos[0].Status)
assert.Equal(t, "beta", payload.Repos[0].Name)
}
func TestRunHealthHumanOutput_ShowsFetchErrorsAsErrors(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
alpha:
type: module
beta:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"--repo forge/alpha"*)
cat <<'JSON'
[
{
"status": "completed",
"conclusion": "success",
"name": "CI",
"headSha": "abc123",
"updatedAt": "2026-03-30T00:00:00Z",
"url": "https://example.com/alpha/run/1"
}
]
JSON
;;
*"--repo forge/beta"*)
printf '%s\n' 'simulated workflow lookup failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetHealthFlags(t)
t.Cleanup(func() {
healthRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addHealthCommand(parent)
command := findSubcommand(t, parent, "health")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Contains(t, output, "cmd.qa.health.summary")
assert.Contains(t, output, "alpha")
assert.Contains(t, output, "beta")
assert.Contains(t, output, "cmd.qa.health.fetch_error")
assert.NotContains(t, output, "no CI")
}
func resetHealthFlags(t *testing.T) {
t.Helper()
oldProblems := healthProblems
oldRegistry := healthRegistry
oldJSON := healthJSON
healthProblems = false
healthRegistry = ""
healthJSON = false
t.Cleanup(func() {
healthProblems = oldProblems
healthRegistry = oldRegistry
healthJSON = oldJSON
})
}

View file

@ -30,6 +30,7 @@ var (
issuesBlocked bool
issuesRegistry string
issuesLimit int
issuesJSON bool
)
// Issue represents a GitHub issue with triage metadata
@ -65,10 +66,31 @@ type Issue struct {
URL string `json:"url"`
// Computed fields
RepoName string
Priority int // Lower = higher priority
Category string // "needs_response", "ready", "blocked", "triage"
ActionHint string
RepoName string `json:"repo_name"`
Priority int `json:"priority"` // Lower = higher priority
Category string `json:"category"` // "needs_response", "ready", "blocked", "triage"
ActionHint string `json:"action_hint,omitempty"`
}
type IssueFetchError struct {
Repo string `json:"repo"`
Error string `json:"error"`
}
type IssueCategoryOutput struct {
Category string `json:"category"`
Count int `json:"count"`
Issues []Issue `json:"issues"`
}
type IssuesOutput struct {
TotalIssues int `json:"total_issues"`
FilteredIssues int `json:"filtered_issues"`
ShowingMine bool `json:"showing_mine"`
ShowingTriage bool `json:"showing_triage"`
ShowingBlocked bool `json:"showing_blocked"`
Categories []IssueCategoryOutput `json:"categories"`
FetchErrors []IssueFetchError `json:"fetch_errors"`
}
// addIssuesCommand adds the 'issues' subcommand to qa.
@ -87,6 +109,7 @@ func addIssuesCommand(parent *cli.Command) {
issuesCmd.Flags().BoolVarP(&issuesBlocked, "blocked", "b", false, i18n.T("cmd.qa.issues.flag.blocked"))
issuesCmd.Flags().StringVar(&issuesRegistry, "registry", "", i18n.T("common.flag.registry"))
issuesCmd.Flags().IntVarP(&issuesLimit, "limit", "l", 50, i18n.T("cmd.qa.issues.flag.limit"))
issuesCmd.Flags().BoolVar(&issuesJSON, "json", false, i18n.T("common.flag.json"))
parent.AddCommand(issuesCmd)
}
@ -116,22 +139,59 @@ func runQAIssues() error {
// Fetch issues from all repos
var allIssues []Issue
fetchErrors := make([]IssueFetchError, 0)
repoList := reg.List()
// Registry repos are map-backed, so sort before fetching to keep output stable.
slices.SortFunc(repoList, func(a, b *repos.Repo) int {
return cmp.Compare(a.Name, b.Name)
})
successfulFetches := 0
for i, repo := range repoList {
cli.Print("\033[2K\r%s %d/%d %s",
dimStyle.Render(i18n.T("cmd.qa.issues.fetching")),
i+1, len(repoList), repo.Name)
if !issuesJSON {
cli.Print("%s %d/%d %s\n",
dimStyle.Render(i18n.T("cmd.qa.issues.fetching")),
i+1, len(repoList), repo.Name)
}
issues, err := fetchQAIssues(reg.Org, repo.Name, issuesLimit)
if err != nil {
fetchErrors = append(fetchErrors, IssueFetchError{
Repo: repo.Name,
Error: strings.TrimSpace(err.Error()),
})
if !issuesJSON {
cli.Print("%s\n", warningStyle.Render(i18n.T(
"cmd.qa.issues.fetch_error",
map[string]any{"Repo": repo.Name, "Error": strings.TrimSpace(err.Error())},
)))
}
continue // Skip repos with errors
}
allIssues = append(allIssues, issues...)
successfulFetches++
}
cli.Print("\033[2K\r") // Clear progress
totalIssues := len(allIssues)
if len(allIssues) == 0 {
emptyCategorised := map[string][]Issue{
"needs_response": {},
"ready": {},
"blocked": {},
"triage": {},
}
if issuesJSON {
if err := printCategorisedIssuesJSON(0, emptyCategorised, fetchErrors); err != nil {
return err
}
if successfulFetches == 0 && len(fetchErrors) > 0 {
return cli.Err("failed to fetch issues from any repository")
}
return nil
}
if successfulFetches == 0 && len(fetchErrors) > 0 {
return cli.Err("failed to fetch issues from any repository")
}
cli.Text(i18n.T("cmd.qa.issues.no_issues"))
return nil
}
@ -150,6 +210,10 @@ func runQAIssues() error {
categorised = filterCategory(categorised, "blocked")
}
if issuesJSON {
return printCategorisedIssuesJSON(totalIssues, categorised, fetchErrors)
}
// Print categorised issues
printCategorisedIssues(categorised)
@ -170,6 +234,9 @@ func fetchQAIssues(org, repoName string, limit int) ([]Issue, error) {
cmd := exec.Command("gh", args...)
output, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return nil, log.E("qa.fetchQAIssues", strings.TrimSpace(string(exitErr.Stderr)), nil)
}
return nil, err
}
@ -205,7 +272,16 @@ func categoriseIssues(issues []Issue) map[string][]Issue {
// Sort each category by priority
for cat := range result {
slices.SortFunc(result[cat], func(a, b Issue) int {
return cmp.Compare(a.Priority, b.Priority)
if priority := cmp.Compare(a.Priority, b.Priority); priority != 0 {
return priority
}
if byDate := cmp.Compare(b.UpdatedAt.Unix(), a.UpdatedAt.Unix()); byDate != 0 {
return byDate
}
if repo := cmp.Compare(a.RepoName, b.RepoName); repo != 0 {
return repo
}
return cmp.Compare(a.Number, b.Number)
})
}
@ -250,24 +326,26 @@ func categoriseIssue(issue *Issue, currentUser string) {
// Default: ready to work
issue.Category = "ready"
issue.Priority = calculatePriority(issue, labels)
issue.Priority = calculatePriority(labels)
issue.ActionHint = ""
}
func calculatePriority(issue *Issue, labels []string) int {
// calculatePriority chooses the most urgent matching label so label order
// does not change how issues are ranked.
func calculatePriority(labels []string) int {
priority := 50
// Priority labels
for _, l := range labels {
switch {
case strings.Contains(l, "critical") || strings.Contains(l, "urgent"):
priority = 1
priority = min(priority, 1)
case strings.Contains(l, "high"):
priority = 10
priority = min(priority, 10)
case strings.Contains(l, "medium"):
priority = 30
priority = min(priority, 30)
case strings.Contains(l, "low"):
priority = 70
priority = min(priority, 70)
case l == "good-first-issue" || l == "good first issue":
priority = min(priority, 15) // Boost good first issues
case l == "help-wanted" || l == "help wanted":
@ -363,6 +441,39 @@ func printCategorisedIssues(categorised map[string][]Issue) {
}
}
func printCategorisedIssuesJSON(totalIssues int, categorised map[string][]Issue, fetchErrors []IssueFetchError) error {
categories := []string{"needs_response", "ready", "blocked", "triage"}
filteredIssues := 0
categoryOutput := make([]IssueCategoryOutput, 0, len(categories))
for _, category := range categories {
issues := categorised[category]
filteredIssues += len(issues)
categoryOutput = append(categoryOutput, IssueCategoryOutput{
Category: category,
Count: len(issues),
Issues: issues,
})
}
output := IssuesOutput{
TotalIssues: totalIssues,
FilteredIssues: filteredIssues,
ShowingMine: issuesMine,
ShowingTriage: issuesTriage,
ShowingBlocked: issuesBlocked,
Categories: categoryOutput,
FetchErrors: fetchErrors,
}
data, err := json.MarshalIndent(output, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
return nil
}
func printTriagedIssue(issue Issue) {
// #42 [core-bio] Fix avatar upload
num := cli.TitleStyle.Render(cli.Sprintf("#%d", issue.Number))
@ -381,6 +492,7 @@ func printTriagedIssue(issue Issue) {
}
}
if len(importantLabels) > 0 {
slices.Sort(importantLabels)
cli.Print(" %s", warningStyle.Render("["+strings.Join(importantLabels, ", ")+"]"))
}

316
cmd/qa/cmd_issues_test.go Normal file
View file

@ -0,0 +1,316 @@
package qa
import (
"encoding/json"
"fmt"
"path/filepath"
"testing"
"time"
"forge.lthn.ai/core/cli/pkg/cli"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunQAIssuesJSONOutput_UsesMachineFriendlyKeys(t *testing.T) {
dir := t.TempDir()
commentTime := time.Now().UTC().Add(-1 * time.Hour).Format(time.RFC3339)
updatedAt := time.Now().UTC().Format(time.RFC3339)
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
alpha:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), fmt.Sprintf(`#!/bin/sh
case "$*" in
*"api user"*)
printf '%%s\n' 'alice'
;;
*"issue list --repo forge/alpha"*)
cat <<JSON
[
{
"number": 7,
"title": "Clarify agent output",
"state": "OPEN",
"body": "Explain behaviour",
"createdAt": "2026-03-30T00:00:00Z",
"updatedAt": %q,
"author": {"login": "bob"},
"assignees": {"nodes": []},
"labels": {"nodes": [{"name": "agent:ready"}]},
"comments": {
"totalCount": 1,
"nodes": [
{
"author": {"login": "carol"},
"createdAt": %q
}
]
},
"url": "https://example.com/issues/7"
}
]
JSON
;;
*)
printf '%%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`, updatedAt, commentTime))
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetIssuesFlags(t)
t.Cleanup(func() {
issuesRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addIssuesCommand(parent)
command := findSubcommand(t, parent, "issues")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
var payload IssuesOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, 1, payload.TotalIssues)
assert.Equal(t, 1, payload.FilteredIssues)
require.Len(t, payload.Categories, 4)
require.Len(t, payload.Categories[0].Issues, 1)
issue := payload.Categories[0].Issues[0]
assert.Equal(t, "needs_response", payload.Categories[0].Category)
assert.Equal(t, "alpha", issue.RepoName)
assert.Equal(t, 10, issue.Priority)
assert.Equal(t, "needs_response", issue.Category)
assert.Equal(t, "@carol cmd.qa.issues.hint.needs_response", issue.ActionHint)
assert.Contains(t, output, `"repo_name"`)
assert.Contains(t, output, `"action_hint"`)
assert.NotContains(t, output, `"RepoName"`)
assert.NotContains(t, output, `"ActionHint"`)
}
func TestRunQAIssuesJSONOutput_SortsFetchErrorsByRepoName(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
beta:
type: module
alpha:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"issue list --repo forge/alpha"*)
printf '%s\n' 'alpha failed' >&2
exit 1
;;
*"issue list --repo forge/beta"*)
printf '%s\n' 'beta failed' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetIssuesFlags(t)
t.Cleanup(func() {
issuesRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addIssuesCommand(parent)
command := findSubcommand(t, parent, "issues")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
var payload IssuesOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
require.Len(t, payload.FetchErrors, 2)
assert.Equal(t, "alpha", payload.FetchErrors[0].Repo)
assert.Equal(t, "beta", payload.FetchErrors[1].Repo)
}
func TestRunQAIssuesJSONOutput_ReturnsErrorWhenAllFetchesFail(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
beta:
type: module
alpha:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"issue list --repo forge/alpha"*)
printf '%s\n' 'alpha failed' >&2
exit 1
;;
*"issue list --repo forge/beta"*)
printf '%s\n' 'beta failed' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetIssuesFlags(t)
t.Cleanup(func() {
issuesRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addIssuesCommand(parent)
command := findSubcommand(t, parent, "issues")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
var payload IssuesOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
require.Len(t, payload.Categories, 4)
assert.Empty(t, payload.Categories[0].Issues)
require.Len(t, payload.FetchErrors, 2)
assert.Equal(t, "alpha", payload.FetchErrors[0].Repo)
assert.Equal(t, "beta", payload.FetchErrors[1].Repo)
}
func TestRunQAIssuesHumanOutput_ReturnsErrorWhenAllFetchesFail(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
beta:
type: module
alpha:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"issue list --repo forge/alpha"*)
printf '%s\n' 'alpha failed' >&2
exit 1
;;
*"issue list --repo forge/beta"*)
printf '%s\n' 'beta failed' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetIssuesFlags(t)
t.Cleanup(func() {
issuesRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addIssuesCommand(parent)
command := findSubcommand(t, parent, "issues")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
assert.NotContains(t, output, "cmd.qa.issues.no_issues")
}
func TestCalculatePriority_UsesMostUrgentLabelRegardlessOfOrder(t *testing.T) {
labelsA := []string{"low", "critical"}
labelsB := []string{"critical", "low"}
assert.Equal(t, 1, calculatePriority(labelsA))
assert.Equal(t, 1, calculatePriority(labelsB))
}
func TestPrintTriagedIssue_SortsImportantLabels(t *testing.T) {
var issue Issue
require.NoError(t, json.Unmarshal([]byte(`{
"number": 7,
"title": "Stabilise output",
"updatedAt": "2026-03-30T00:00:00Z",
"labels": {
"nodes": [
{"name": "priority:urgent"},
{"name": "agent:ready"}
]
}
}`), &issue))
issue.RepoName = "alpha"
output := captureStdout(t, func() {
printTriagedIssue(issue)
})
assert.Contains(t, output, "[agent:ready, priority:urgent]")
assert.NotContains(t, output, "[priority:urgent, agent:ready]")
}
func resetIssuesFlags(t *testing.T) {
t.Helper()
oldMine := issuesMine
oldTriage := issuesTriage
oldBlocked := issuesBlocked
oldRegistry := issuesRegistry
oldLimit := issuesLimit
oldJSON := issuesJSON
issuesMine = false
issuesTriage = false
issuesBlocked = false
issuesRegistry = ""
issuesLimit = 50
issuesJSON = false
t.Cleanup(func() {
issuesMine = oldMine
issuesTriage = oldTriage
issuesBlocked = oldBlocked
issuesRegistry = oldRegistry
issuesLimit = oldLimit
issuesJSON = oldJSON
})
}

View file

@ -14,8 +14,10 @@ package qa
import (
"context"
"encoding/json"
"fmt"
"os"
"sort"
"strings"
"forge.lthn.ai/core/cli/pkg/cli"
@ -65,8 +67,10 @@ func addPHPFmtCommand(parent *cli.Command) {
return cli.Err("not a PHP project (no composer.json found)")
}
cli.Print("%s %s\n", headerStyle.Render("PHP Format"), dimStyle.Render("(Pint)"))
cli.Blank()
if !isMachineReadableOutput(phpFmtJSON) {
cli.Print("%s %s\n", headerStyle.Render("PHP Format"), dimStyle.Render("(Pint)"))
cli.Blank()
}
return php.Format(context.Background(), php.FormatOptions{
Dir: cwd,
@ -111,8 +115,10 @@ func addPHPStanCommand(parent *cli.Command) {
return cli.Err("no static analyser found (install PHPStan: composer require phpstan/phpstan --dev)")
}
cli.Print("%s %s\n", headerStyle.Render("PHP Static Analysis"), dimStyle.Render(fmt.Sprintf("(%s)", analyser)))
cli.Blank()
if !isMachineReadableOutput(phpStanJSON, phpStanSARIF) {
cli.Print("%s %s\n", headerStyle.Render("PHP Static Analysis"), dimStyle.Render(fmt.Sprintf("(%s)", analyser)))
cli.Blank()
}
err = php.Analyse(context.Background(), php.AnalyseOptions{
Dir: cwd,
@ -125,8 +131,10 @@ func addPHPStanCommand(parent *cli.Command) {
return cli.Err("static analysis found issues")
}
cli.Blank()
cli.Print("%s\n", successStyle.Render("Static analysis passed"))
if !isMachineReadableOutput(phpStanJSON, phpStanSARIF) {
cli.Blank()
cli.Print("%s\n", successStyle.Render("Static analysis passed"))
}
return nil
},
}
@ -168,8 +176,10 @@ func addPHPPsalmCommand(parent *cli.Command) {
return cli.Err("Psalm not found (install: composer require vimeo/psalm --dev)")
}
cli.Print("%s\n", headerStyle.Render("PHP Psalm Analysis"))
cli.Blank()
if !isMachineReadableOutput(phpPsalmJSON, phpPsalmSARIF) {
cli.Print("%s\n", headerStyle.Render("PHP Psalm Analysis"))
cli.Blank()
}
err = php.RunPsalm(context.Background(), php.PsalmOptions{
Dir: cwd,
@ -184,8 +194,10 @@ func addPHPPsalmCommand(parent *cli.Command) {
return cli.Err("Psalm found issues")
}
cli.Blank()
cli.Print("%s\n", successStyle.Render("Psalm analysis passed"))
if !isMachineReadableOutput(phpPsalmJSON, phpPsalmSARIF) {
cli.Blank()
cli.Print("%s\n", successStyle.Render("Psalm analysis passed"))
}
return nil
},
}
@ -220,8 +232,10 @@ func addPHPAuditCommand(parent *cli.Command) {
return cli.Err("not a PHP project (no composer.json found)")
}
cli.Print("%s\n", headerStyle.Render("Dependency Audit"))
cli.Blank()
if !isMachineReadableOutput(phpAuditJSON) {
cli.Print("%s\n", headerStyle.Render("Dependency Audit"))
cli.Blank()
}
results, err := php.RunAudit(context.Background(), php.AuditOptions{
Dir: cwd,
@ -232,6 +246,20 @@ func addPHPAuditCommand(parent *cli.Command) {
return err
}
if phpAuditJSON {
payload := mapAuditResultsForJSON(results)
data, err := json.MarshalIndent(payload, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
if payload.HasVulnerabilities {
return cli.Err("vulnerabilities found in dependencies")
}
return nil
}
hasVulns := false
for _, result := range results {
if result.Error != nil {
@ -293,8 +321,10 @@ func addPHPSecurityCommand(parent *cli.Command) {
return cli.Err("not a PHP project (no composer.json found)")
}
cli.Print("%s\n", headerStyle.Render("Security Checks"))
cli.Blank()
if !isMachineReadableOutput(phpSecurityJSON, phpSecuritySARIF) {
cli.Print("%s\n", headerStyle.Render("Security Checks"))
cli.Blank()
}
result, err := php.RunSecurityChecks(context.Background(), php.SecurityOptions{
Dir: cwd,
@ -307,6 +337,36 @@ func addPHPSecurityCommand(parent *cli.Command) {
return err
}
result.Checks = sortSecurityChecks(result.Checks)
if phpSecuritySARIF {
data, err := json.MarshalIndent(mapSecurityResultForSARIF(result), "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
summary := result.Summary
if summary.Critical > 0 || summary.High > 0 {
return cli.Err("security checks failed")
}
return nil
}
if phpSecurityJSON {
data, err := json.MarshalIndent(result, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
summary := result.Summary
if summary.Critical > 0 || summary.High > 0 {
return cli.Err("security checks failed")
}
return nil
}
// Print each check result
for _, check := range result.Checks {
if check.Passed {
@ -363,6 +423,74 @@ func addPHPSecurityCommand(parent *cli.Command) {
parent.AddCommand(cmd)
}
type auditJSONOutput struct {
Results []auditResultJSON `json:"results"`
HasVulnerabilities bool `json:"has_vulnerabilities"`
Vulnerabilities int `json:"vulnerabilities"`
}
type auditResultJSON struct {
Tool string `json:"tool"`
Vulnerabilities int `json:"vulnerabilities"`
Advisories []auditAdvisoryJSON `json:"advisories"`
Error string `json:"error,omitempty"`
}
type auditAdvisoryJSON struct {
Package string `json:"package"`
Severity string `json:"severity,omitempty"`
Title string `json:"title,omitempty"`
URL string `json:"url,omitempty"`
Identifiers []string `json:"identifiers,omitempty"`
}
func mapAuditResultsForJSON(results []php.AuditResult) auditJSONOutput {
output := auditJSONOutput{
Results: make([]auditResultJSON, 0, len(results)),
}
sort.Slice(results, func(i, j int) bool {
return results[i].Tool < results[j].Tool
})
for _, result := range results {
entry := auditResultJSON{
Tool: result.Tool,
Vulnerabilities: result.Vulnerabilities,
}
if result.Error != nil {
entry.Error = result.Error.Error()
}
entry.Advisories = make([]auditAdvisoryJSON, 0, len(result.Advisories))
for _, advisory := range result.Advisories {
entry.Advisories = append(entry.Advisories, auditAdvisoryJSON{
Package: advisory.Package,
Severity: advisory.Severity,
Title: advisory.Title,
URL: advisory.URL,
Identifiers: append([]string(nil), advisory.Identifiers...),
})
}
sort.Slice(entry.Advisories, func(i, j int) bool {
if entry.Advisories[i].Package == entry.Advisories[j].Package {
return entry.Advisories[i].Title < entry.Advisories[j].Title
}
return entry.Advisories[i].Package < entry.Advisories[j].Package
})
output.Results = append(output.Results, entry)
output.Vulnerabilities += entry.Vulnerabilities
}
output.HasVulnerabilities = output.Vulnerabilities > 0
return output
}
func sortSecurityChecks(checks []php.SecurityCheck) []php.SecurityCheck {
sort.Slice(checks, func(i, j int) bool {
return checks[i].ID < checks[j].ID
})
return checks
}
// PHP rector command flags.
var (
phpRectorFix bool
@ -499,8 +627,10 @@ func addPHPTestCommand(parent *cli.Command) {
}
runner := php.DetectTestRunner(cwd)
cli.Print("%s %s\n", headerStyle.Render("PHP Tests"), dimStyle.Render(fmt.Sprintf("(%s)", runner)))
cli.Blank()
if !isMachineReadableOutput(phpTestJUnit) {
cli.Print("%s %s\n", headerStyle.Render("PHP Tests"), dimStyle.Render(fmt.Sprintf("(%s)", runner)))
cli.Blank()
}
var groups []string
if phpTestGroup != "" {
@ -519,8 +649,10 @@ func addPHPTestCommand(parent *cli.Command) {
return cli.Err("tests failed")
}
cli.Blank()
cli.Print("%s\n", successStyle.Render("All tests passed"))
if !isMachineReadableOutput(phpTestJUnit) {
cli.Blank()
cli.Print("%s\n", successStyle.Render("All tests passed"))
}
return nil
},
}
@ -549,3 +681,124 @@ func getSeverityStyle(severity string) *cli.AnsiStyle {
return dimStyle
}
}
func isMachineReadableOutput(flags ...bool) bool {
for _, flag := range flags {
if flag {
return true
}
}
return false
}
type sarifLog struct {
Version string `json:"version"`
Schema string `json:"$schema"`
Runs []sarifRun `json:"runs"`
}
type sarifRun struct {
Tool sarifTool `json:"tool"`
Results []sarifResult `json:"results"`
}
type sarifTool struct {
Driver sarifDriver `json:"driver"`
}
type sarifDriver struct {
Name string `json:"name"`
Rules []sarifRule `json:"rules"`
}
type sarifRule struct {
ID string `json:"id"`
Name string `json:"name"`
ShortDescription sarifMessage `json:"shortDescription"`
FullDescription sarifMessage `json:"fullDescription"`
Help sarifMessage `json:"help,omitempty"`
Properties any `json:"properties,omitempty"`
}
type sarifResult struct {
RuleID string `json:"ruleId"`
Level string `json:"level"`
Message sarifMessage `json:"message"`
Properties any `json:"properties,omitempty"`
}
type sarifMessage struct {
Text string `json:"text"`
}
func mapSecurityResultForSARIF(result *php.SecurityResult) sarifLog {
rules := make([]sarifRule, 0, len(result.Checks))
sarifResults := make([]sarifResult, 0, len(result.Checks))
for _, check := range result.Checks {
rule := sarifRule{
ID: check.ID,
Name: check.Name,
ShortDescription: sarifMessage{Text: check.Name},
FullDescription: sarifMessage{Text: check.Description},
}
if check.Fix != "" {
rule.Help = sarifMessage{Text: check.Fix}
}
if check.CWE != "" {
rule.Properties = map[string]any{"cwe": check.CWE}
}
rules = append(rules, rule)
if check.Passed {
continue
}
message := check.Message
if message == "" {
message = check.Description
}
properties := map[string]any{
"severity": check.Severity,
}
if check.CWE != "" {
properties["cwe"] = check.CWE
}
if check.Fix != "" {
properties["fix"] = check.Fix
}
sarifResults = append(sarifResults, sarifResult{
RuleID: check.ID,
Level: sarifLevel(check.Severity),
Message: sarifMessage{Text: message},
Properties: properties,
})
}
return sarifLog{
Version: "2.1.0",
Schema: "https://json.schemastore.org/sarif-2.1.0.json",
Runs: []sarifRun{{
Tool: sarifTool{
Driver: sarifDriver{
Name: "core qa security",
Rules: rules,
},
},
Results: sarifResults,
}},
}
}
func sarifLevel(severity string) string {
switch strings.ToLower(severity) {
case "critical", "high":
return "error"
case "medium":
return "warning"
default:
return "note"
}
}

432
cmd/qa/cmd_php_test.go Normal file
View file

@ -0,0 +1,432 @@
package qa
import (
"encoding/json"
"io"
"os"
"path/filepath"
"testing"
"forge.lthn.ai/core/cli/pkg/cli"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPHPStanJSONOutput_DoesNotAppendSuccessBanner(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "phpstan"), "#!/bin/sh\nprintf '%s\\n' '{\"tool\":\"phpstan\",\"status\":\"ok\"}'\n")
restoreWorkingDir(t, dir)
resetPHPStanFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPStanCommand(parent)
command := findSubcommand(t, parent, "stan")
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "{\"tool\":\"phpstan\",\"status\":\"ok\"}\n", output)
assert.NotContains(t, output, "Static analysis passed")
assert.NotContains(t, output, "PHP Static Analysis")
}
func TestPHPPsalmJSONOutput_DoesNotAppendSuccessBanner(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "psalm"), "#!/bin/sh\nprintf '%s\\n' '{\"tool\":\"psalm\",\"status\":\"ok\"}'\n")
restoreWorkingDir(t, dir)
resetPHPPsalmFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPPsalmCommand(parent)
command := findSubcommand(t, parent, "psalm")
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "{\"tool\":\"psalm\",\"status\":\"ok\"}\n", output)
assert.NotContains(t, output, "Psalm analysis passed")
assert.NotContains(t, output, "PHP Psalm Analysis")
}
func TestPHPStanSARIFOutput_DoesNotAppendSuccessBanner(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "phpstan"), "#!/bin/sh\nprintf '%s\\n' '{\"version\":\"2.1.0\",\"runs\":[]}'\n")
restoreWorkingDir(t, dir)
resetPHPStanFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPStanCommand(parent)
command := findSubcommand(t, parent, "stan")
require.NoError(t, command.Flags().Set("sarif", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "{\"version\":\"2.1.0\",\"runs\":[]}\n", output)
assert.NotContains(t, output, "Static analysis passed")
assert.NotContains(t, output, "PHP Static Analysis")
}
func TestPHPPsalmSARIFOutput_DoesNotAppendSuccessBanner(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "psalm"), "#!/bin/sh\nprintf '%s\\n' '{\"version\":\"2.1.0\",\"runs\":[]}'\n")
restoreWorkingDir(t, dir)
resetPHPPsalmFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPPsalmCommand(parent)
command := findSubcommand(t, parent, "psalm")
require.NoError(t, command.Flags().Set("sarif", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "{\"version\":\"2.1.0\",\"runs\":[]}\n", output)
assert.NotContains(t, output, "Psalm analysis passed")
assert.NotContains(t, output, "PHP Psalm Analysis")
}
func TestPHPSecurityJSONOutput_UsesMachineFriendlyKeys(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeTestFile(t, filepath.Join(dir, ".env"), "APP_DEBUG=true\nAPP_KEY=short\nAPP_URL=http://example.com\n")
writeExecutable(t, filepath.Join(dir, "bin", "composer"), "#!/bin/sh\nprintf '%s\\n' '{\"advisories\":{}}'\n")
restoreWorkingDir(t, dir)
prependPath(t, filepath.Join(dir, "bin"))
resetPHPSecurityFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPSecurityCommand(parent)
command := findSubcommand(t, parent, "security")
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.Error(t, command.RunE(command, nil))
})
assert.Contains(t, output, "\"checks\"")
assert.Contains(t, output, "\"summary\"")
assert.Contains(t, output, "\"app_key_set\"")
assert.NotContains(t, output, "\"Checks\"")
assert.NotContains(t, output, "Security Checks")
}
func TestPHPSecuritySARIFOutput_IsStructuredAndChromeFree(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeTestFile(t, filepath.Join(dir, ".env"), "APP_DEBUG=true\nAPP_KEY=short\nAPP_URL=http://example.com\n")
writeExecutable(t, filepath.Join(dir, "bin", "composer"), "#!/bin/sh\nprintf '%s\\n' '{\"advisories\":{}}'\n")
restoreWorkingDir(t, dir)
prependPath(t, filepath.Join(dir, "bin"))
resetPHPSecurityFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPSecurityCommand(parent)
command := findSubcommand(t, parent, "security")
require.NoError(t, command.Flags().Set("sarif", "true"))
output := captureStdout(t, func() {
require.Error(t, command.RunE(command, nil))
})
var payload map[string]any
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, "2.1.0", payload["version"])
assert.Contains(t, output, "\"ruleId\": \"app_key_set\"")
assert.NotContains(t, output, "Security Checks")
assert.NotContains(t, output, "Summary:")
}
func TestPHPSecurityJSONOutput_RespectsSeverityFilter(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeTestFile(t, filepath.Join(dir, ".env"), "APP_DEBUG=true\nAPP_KEY=short\nAPP_URL=http://example.com\n")
writeExecutable(t, filepath.Join(dir, "bin", "composer"), "#!/bin/sh\nprintf '%s\\n' '{\"advisories\":{}}'\n")
restoreWorkingDir(t, dir)
prependPath(t, filepath.Join(dir, "bin"))
resetPHPSecurityFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPSecurityCommand(parent)
command := findSubcommand(t, parent, "security")
require.NoError(t, command.Flags().Set("json", "true"))
require.NoError(t, command.Flags().Set("severity", "critical"))
output := captureStdout(t, func() {
require.Error(t, command.RunE(command, nil))
})
var payload struct {
Checks []struct {
ID string `json:"id"`
Severity string `json:"severity"`
} `json:"checks"`
Summary struct {
Total int `json:"total"`
Passed int `json:"passed"`
Critical int `json:"critical"`
High int `json:"high"`
} `json:"summary"`
}
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, 3, payload.Summary.Total)
assert.Equal(t, 1, payload.Summary.Passed)
assert.Equal(t, 2, payload.Summary.Critical)
assert.Zero(t, payload.Summary.High)
require.Len(t, payload.Checks, 3)
assert.NotContains(t, output, "https_enforced")
}
func TestPHPAuditJSONOutput_UsesLowerCaseAdvisoryKeys(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "composer"), `#!/bin/sh
cat <<'JSON'
{
"advisories": {
"vendor/package-a": [
{
"title": "Remote Code Execution",
"link": "https://example.com/advisory/1",
"cve": "CVE-2025-1234",
"affectedVersions": ">=1.0,<1.5"
}
]
}
}
JSON
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetPHPAuditFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPAuditCommand(parent)
command := findSubcommand(t, parent, "audit")
require.NoError(t, command.Flags().Set("json", "true"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
var payload struct {
Results []struct {
Tool string `json:"tool"`
Advisories []struct {
Package string `json:"package"`
} `json:"advisories"`
} `json:"results"`
HasVulnerabilities bool `json:"has_vulnerabilities"`
Vulnerabilities int `json:"vulnerabilities"`
}
require.NoError(t, json.Unmarshal([]byte(output), &payload))
require.Len(t, payload.Results, 1)
assert.Equal(t, "composer", payload.Results[0].Tool)
require.Len(t, payload.Results[0].Advisories, 1)
assert.Equal(t, "vendor/package-a", payload.Results[0].Advisories[0].Package)
assert.True(t, payload.HasVulnerabilities)
assert.Equal(t, 1, payload.Vulnerabilities)
assert.NotContains(t, output, "\"Package\"")
assert.NotContains(t, output, "Dependency Audit")
}
func TestPHPTestJUnitOutput_PrintsOnlyXML(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "phpunit"), "#!/bin/sh\njunit=''\nwhile [ $# -gt 0 ]; do\n if [ \"$1\" = \"--log-junit\" ]; then\n shift\n junit=\"$1\"\n fi\n shift\ndone\nprintf '%s\\n' 'human output should be suppressed'\nprintf '%s' '<testsuite tests=\"1\"></testsuite>' > \"$junit\"\n")
restoreWorkingDir(t, dir)
resetPHPTestFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPTestCommand(parent)
command := findSubcommand(t, parent, "test")
require.NoError(t, command.Flags().Set("junit", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "<testsuite tests=\"1\"></testsuite>\n", output)
assert.NotContains(t, output, "human output should be suppressed")
assert.NotContains(t, output, "PHP Tests")
assert.NotContains(t, output, "All tests passed")
}
func writeTestFile(t *testing.T, path string, content string) {
t.Helper()
require.NoError(t, os.MkdirAll(filepath.Dir(path), 0o755))
require.NoError(t, os.WriteFile(path, []byte(content), 0o644))
}
func writeExecutable(t *testing.T, path string, content string) {
t.Helper()
require.NoError(t, os.MkdirAll(filepath.Dir(path), 0o755))
require.NoError(t, os.WriteFile(path, []byte(content), 0o755))
}
func restoreWorkingDir(t *testing.T, dir string) {
t.Helper()
wd, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir(dir))
t.Cleanup(func() {
require.NoError(t, os.Chdir(wd))
})
}
func resetPHPStanFlags(t *testing.T) {
t.Helper()
oldLevel := phpStanLevel
oldMemory := phpStanMemory
oldJSON := phpStanJSON
oldSARIF := phpStanSARIF
phpStanLevel = 0
phpStanMemory = ""
phpStanJSON = false
phpStanSARIF = false
t.Cleanup(func() {
phpStanLevel = oldLevel
phpStanMemory = oldMemory
phpStanJSON = oldJSON
phpStanSARIF = oldSARIF
})
}
func resetPHPPsalmFlags(t *testing.T) {
t.Helper()
oldLevel := phpPsalmLevel
oldFix := phpPsalmFix
oldBaseline := phpPsalmBaseline
oldShowInfo := phpPsalmShowInfo
oldJSON := phpPsalmJSON
oldSARIF := phpPsalmSARIF
phpPsalmLevel = 0
phpPsalmFix = false
phpPsalmBaseline = false
phpPsalmShowInfo = false
phpPsalmJSON = false
phpPsalmSARIF = false
t.Cleanup(func() {
phpPsalmLevel = oldLevel
phpPsalmFix = oldFix
phpPsalmBaseline = oldBaseline
phpPsalmShowInfo = oldShowInfo
phpPsalmJSON = oldJSON
phpPsalmSARIF = oldSARIF
})
}
func resetPHPSecurityFlags(t *testing.T) {
t.Helper()
oldSeverity := phpSecuritySeverity
oldJSON := phpSecurityJSON
oldSARIF := phpSecuritySARIF
oldURL := phpSecurityURL
phpSecuritySeverity = ""
phpSecurityJSON = false
phpSecuritySARIF = false
phpSecurityURL = ""
t.Cleanup(func() {
phpSecuritySeverity = oldSeverity
phpSecurityJSON = oldJSON
phpSecuritySARIF = oldSARIF
phpSecurityURL = oldURL
})
}
func resetPHPAuditFlags(t *testing.T) {
t.Helper()
oldJSON := phpAuditJSON
oldFix := phpAuditFix
phpAuditJSON = false
phpAuditFix = false
t.Cleanup(func() {
phpAuditJSON = oldJSON
phpAuditFix = oldFix
})
}
func resetPHPTestFlags(t *testing.T) {
t.Helper()
oldParallel := phpTestParallel
oldCoverage := phpTestCoverage
oldFilter := phpTestFilter
oldGroup := phpTestGroup
oldJUnit := phpTestJUnit
phpTestParallel = false
phpTestCoverage = false
phpTestFilter = ""
phpTestGroup = ""
phpTestJUnit = false
t.Cleanup(func() {
phpTestParallel = oldParallel
phpTestCoverage = oldCoverage
phpTestFilter = oldFilter
phpTestGroup = oldGroup
phpTestJUnit = oldJUnit
})
}
func findSubcommand(t *testing.T, parent *cli.Command, name string) *cli.Command {
t.Helper()
for _, command := range parent.Commands() {
if command.Name() == name {
return command
}
}
t.Fatalf("subcommand %q not found", name)
return nil
}
func captureStdout(t *testing.T, fn func()) string {
t.Helper()
oldStdout := os.Stdout
reader, writer, err := os.Pipe()
require.NoError(t, err)
os.Stdout = writer
defer func() {
os.Stdout = oldStdout
}()
defer func() {
require.NoError(t, reader.Close())
}()
fn()
require.NoError(t, writer.Close())
output, err := io.ReadAll(reader)
require.NoError(t, err)
return string(output)
}
func prependPath(t *testing.T, dir string) {
t.Helper()
oldPath := os.Getenv("PATH")
require.NoError(t, os.Setenv("PATH", dir+string(os.PathListSeparator)+oldPath))
t.Cleanup(func() {
require.NoError(t, os.Setenv("PATH", oldPath))
})
}

View file

@ -13,10 +13,11 @@ package qa
import (
"forge.lthn.ai/core/cli/pkg/cli"
"forge.lthn.ai/core/go-i18n"
"forge.lthn.ai/core/lint/locales"
)
func init() {
cli.RegisterCommands(AddQACommands)
cli.RegisterCommands(AddQACommands, locales.FS)
}
// Style aliases from shared package

View file

@ -12,6 +12,7 @@ import (
"encoding/json"
"fmt"
"os/exec"
"sort"
"strings"
"time"
@ -25,6 +26,7 @@ var (
reviewMine bool
reviewRequested bool
reviewRepo string
reviewJSON bool
)
// PullRequest represents a GitHub pull request
@ -81,6 +83,24 @@ type Review struct {
State string `json:"state"`
}
// ReviewFetchError captures a partial fetch failure while preserving any
// successfully fetched PRs in the same review run.
type ReviewFetchError struct {
Repo string `json:"repo"`
Scope string `json:"scope"`
Error string `json:"error"`
}
type reviewOutput struct {
Mine []PullRequest `json:"mine"`
Requested []PullRequest `json:"requested"`
TotalMine int `json:"total_mine"`
TotalRequested int `json:"total_requested"`
ShowingMine bool `json:"showing_mine"`
ShowingRequested bool `json:"showing_requested"`
FetchErrors []ReviewFetchError `json:"fetch_errors"`
}
// addReviewCommand adds the 'review' subcommand to the qa command.
func addReviewCommand(parent *cli.Command) {
reviewCmd := &cli.Command{
@ -95,6 +115,7 @@ func addReviewCommand(parent *cli.Command) {
reviewCmd.Flags().BoolVarP(&reviewMine, "mine", "m", false, i18n.T("cmd.qa.review.flag.mine"))
reviewCmd.Flags().BoolVarP(&reviewRequested, "requested", "r", false, i18n.T("cmd.qa.review.flag.requested"))
reviewCmd.Flags().StringVar(&reviewRepo, "repo", "", i18n.T("cmd.qa.review.flag.repo"))
reviewCmd.Flags().BoolVar(&reviewJSON, "json", false, i18n.T("common.flag.json"))
parent.AddCommand(reviewCmd)
}
@ -121,18 +142,98 @@ func runReview() error {
// Default: show both mine and requested if neither flag is set
showMine := reviewMine || (!reviewMine && !reviewRequested)
showRequested := reviewRequested || (!reviewMine && !reviewRequested)
minePRs := []PullRequest{}
requestedPRs := []PullRequest{}
fetchErrors := make([]ReviewFetchError, 0)
mineFetched := false
requestedFetched := false
successfulFetches := 0
if showMine {
if err := showMyPRs(ctx, repoFullName); err != nil {
return err
prs, err := fetchPRs(ctx, repoFullName, "author:@me")
if err != nil {
fetchErrors = append(fetchErrors, ReviewFetchError{
Repo: repoFullName,
Scope: "mine",
Error: strings.TrimSpace(err.Error()),
})
if !reviewJSON {
cli.Warnf("failed to fetch your PRs for %s: %s", repoFullName, strings.TrimSpace(err.Error()))
}
} else {
sort.Slice(prs, func(i, j int) bool {
if prs[i].Number == prs[j].Number {
return strings.Compare(prs[i].Title, prs[j].Title) < 0
}
return prs[i].Number < prs[j].Number
})
minePRs = prs
mineFetched = true
successfulFetches++
}
}
if showRequested {
if showMine {
prs, err := fetchPRs(ctx, repoFullName, "review-requested:@me")
if err != nil {
fetchErrors = append(fetchErrors, ReviewFetchError{
Repo: repoFullName,
Scope: "requested",
Error: strings.TrimSpace(err.Error()),
})
if !reviewJSON {
cli.Warnf("failed to fetch review requested PRs for %s: %s", repoFullName, strings.TrimSpace(err.Error()))
}
} else {
sort.Slice(prs, func(i, j int) bool {
if prs[i].Number == prs[j].Number {
return strings.Compare(prs[i].Title, prs[j].Title) < 0
}
return prs[i].Number < prs[j].Number
})
requestedPRs = prs
requestedFetched = true
successfulFetches++
}
}
output := reviewOutput{
Mine: minePRs,
Requested: requestedPRs,
TotalMine: len(minePRs),
TotalRequested: len(requestedPRs),
ShowingMine: showMine,
ShowingRequested: showRequested,
FetchErrors: fetchErrors,
}
if reviewJSON {
data, err := json.MarshalIndent(output, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
if successfulFetches == 0 && len(fetchErrors) > 0 {
return cli.Err("failed to fetch pull requests for %s", repoFullName)
}
return nil
}
if successfulFetches == 0 && len(fetchErrors) > 0 {
return cli.Err("failed to fetch pull requests for %s", repoFullName)
}
if showMine && mineFetched {
if err := printMyPRs(minePRs); err != nil {
return err
}
}
if showRequested && requestedFetched {
if showMine && mineFetched {
cli.Blank()
}
if err := showRequestedReviews(ctx, repoFullName); err != nil {
if err := printRequestedPRs(requestedPRs); err != nil {
return err
}
}
@ -140,13 +241,8 @@ func runReview() error {
return nil
}
// showMyPRs shows the user's open PRs with status
func showMyPRs(ctx context.Context, repo string) error {
prs, err := fetchPRs(ctx, repo, "author:@me")
if err != nil {
return log.E("qa.review", "failed to fetch your PRs", err)
}
// printMyPRs shows the user's open PRs with status
func printMyPRs(prs []PullRequest) error {
if len(prs) == 0 {
cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.qa.review.no_prs")))
return nil
@ -161,13 +257,8 @@ func showMyPRs(ctx context.Context, repo string) error {
return nil
}
// showRequestedReviews shows PRs where user's review is requested
func showRequestedReviews(ctx context.Context, repo string) error {
prs, err := fetchPRs(ctx, repo, "review-requested:@me")
if err != nil {
return log.E("qa.review", "failed to fetch review requests", err)
}
// printRequestedPRs shows PRs where user's review is requested
func printRequestedPRs(prs []PullRequest) error {
if len(prs) == 0 {
cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.qa.review.no_reviews")))
return nil
@ -199,7 +290,7 @@ func fetchPRs(ctx context.Context, repo, search string) ([]PullRequest, error) {
output, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return nil, fmt.Errorf("%s", strings.TrimSpace(string(exitErr.Stderr)))
return nil, log.E("qa.fetchPRs", strings.TrimSpace(string(exitErr.Stderr)), nil)
}
return nil, err
}
@ -257,7 +348,7 @@ func analyzePRStatus(pr PullRequest) (status string, style *cli.AnsiStyle, actio
ciPassed := true
ciFailed := false
ciPending := false
var failedCheck string
var failedChecks []string
if pr.StatusChecks != nil {
for _, check := range pr.StatusChecks.Contexts {
@ -265,9 +356,7 @@ func analyzePRStatus(pr PullRequest) (status string, style *cli.AnsiStyle, actio
case "FAILURE", "failure":
ciFailed = true
ciPassed = false
if failedCheck == "" {
failedCheck = check.Name
}
failedChecks = append(failedChecks, check.Name)
case "PENDING", "pending", "":
if check.State == "PENDING" || check.State == "" {
ciPending = true
@ -290,7 +379,11 @@ func analyzePRStatus(pr PullRequest) (status string, style *cli.AnsiStyle, actio
}
if ciFailed {
return "✗", errorStyle, fmt.Sprintf("CI failed: %s", failedCheck)
if len(failedChecks) > 0 {
sort.Strings(failedChecks)
return "✗", errorStyle, fmt.Sprintf("CI failed: %s", failedChecks[0])
}
return "✗", errorStyle, "CI failed"
}
if changesRequested {

269
cmd/qa/cmd_review_test.go Normal file
View file

@ -0,0 +1,269 @@
package qa
import (
"encoding/json"
"path/filepath"
"testing"
"forge.lthn.ai/core/cli/pkg/cli"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunReviewJSONOutput_PreservesPartialResultsAndFetchErrors(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"author:@me"*)
printf '%s\n' 'simulated author query failure' >&2
exit 1
;;
*"review-requested:@me"*)
cat <<'JSON'
[
{
"number": 42,
"title": "Refine agent output",
"author": {"login": "alice"},
"state": "OPEN",
"isDraft": false,
"mergeable": "MERGEABLE",
"reviewDecision": "",
"url": "https://example.com/pull/42",
"headRefName": "feature/agent-output",
"createdAt": "2026-03-30T00:00:00Z",
"updatedAt": "2026-03-30T00:00:00Z",
"additions": 12,
"deletions": 3,
"changedFiles": 2,
"reviewRequests": {"nodes": []},
"reviews": []
}
]
JSON
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetReviewFlags(t)
t.Cleanup(func() {
reviewRepo = ""
})
parent := &cli.Command{Use: "qa"}
addReviewCommand(parent)
command := findSubcommand(t, parent, "review")
require.NoError(t, command.Flags().Set("repo", "forge/example"))
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
var payload reviewOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.True(t, payload.ShowingMine)
assert.True(t, payload.ShowingRequested)
require.Len(t, payload.Mine, 0)
require.Len(t, payload.Requested, 1)
assert.Equal(t, 42, payload.Requested[0].Number)
assert.Equal(t, "Refine agent output", payload.Requested[0].Title)
require.Len(t, payload.FetchErrors, 1)
assert.Equal(t, "forge/example", payload.FetchErrors[0].Repo)
assert.Equal(t, "mine", payload.FetchErrors[0].Scope)
assert.Contains(t, payload.FetchErrors[0].Error, "simulated author query failure")
}
func TestRunReviewJSONOutput_ReturnsErrorWhenAllFetchesFail(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"author:@me"*)
printf '%s\n' 'simulated author query failure' >&2
exit 1
;;
*"review-requested:@me"*)
printf '%s\n' 'simulated requested query failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetReviewFlags(t)
t.Cleanup(func() {
reviewRepo = ""
})
parent := &cli.Command{Use: "qa"}
addReviewCommand(parent)
command := findSubcommand(t, parent, "review")
require.NoError(t, command.Flags().Set("repo", "forge/example"))
require.NoError(t, command.Flags().Set("json", "true"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
var payload reviewOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Empty(t, payload.Mine)
assert.Empty(t, payload.Requested)
require.Len(t, payload.FetchErrors, 2)
assert.Equal(t, "mine", payload.FetchErrors[0].Scope)
assert.Equal(t, "requested", payload.FetchErrors[1].Scope)
}
func TestRunReviewHumanOutput_PreservesSuccessfulSectionWhenOneFetchFails(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"author:@me"*)
printf '%s\n' 'simulated author query failure' >&2
exit 1
;;
*"review-requested:@me"*)
cat <<'JSON'
[
{
"number": 42,
"title": "Refine agent output",
"author": {"login": "alice"},
"state": "OPEN",
"isDraft": false,
"mergeable": "MERGEABLE",
"reviewDecision": "",
"url": "https://example.com/pull/42",
"headRefName": "feature/agent-output",
"createdAt": "2026-03-30T00:00:00Z",
"updatedAt": "2026-03-30T00:00:00Z",
"additions": 12,
"deletions": 3,
"changedFiles": 2,
"reviewRequests": {"nodes": []},
"reviews": []
}
]
JSON
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetReviewFlags(t)
t.Cleanup(func() {
reviewRepo = ""
})
parent := &cli.Command{Use: "qa"}
addReviewCommand(parent)
command := findSubcommand(t, parent, "review")
require.NoError(t, command.Flags().Set("repo", "forge/example"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Contains(t, output, "#42 Refine agent output")
assert.Contains(t, output, "gh pr checkout 42")
assert.NotContains(t, output, "Your pull requests")
assert.NotContains(t, output, "cmd.qa.review.no_prs")
}
func TestRunReviewHumanOutput_ReturnsErrorWhenAllFetchesFail(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"author:@me"*)
printf '%s\n' 'simulated author query failure' >&2
exit 1
;;
*"review-requested:@me"*)
printf '%s\n' 'simulated requested query failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetReviewFlags(t)
t.Cleanup(func() {
reviewRepo = ""
})
parent := &cli.Command{Use: "qa"}
addReviewCommand(parent)
command := findSubcommand(t, parent, "review")
require.NoError(t, command.Flags().Set("repo", "forge/example"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
assert.NotContains(t, output, "Your pull requests")
assert.NotContains(t, output, "Review requested")
}
func TestAnalyzePRStatus_UsesDeterministicFailedCheckName(t *testing.T) {
pr := PullRequest{
Mergeable: "MERGEABLE",
ReviewDecision: "",
StatusChecks: &StatusCheckRollup{
Contexts: []StatusContext{
{State: "FAILURE", Conclusion: "failure", Name: "Zulu"},
{State: "FAILURE", Conclusion: "failure", Name: "Alpha"},
},
},
}
status, _, action := analyzePRStatus(pr)
assert.Equal(t, "✗", status)
assert.Equal(t, "CI failed: Alpha", action)
}
func resetReviewFlags(t *testing.T) {
t.Helper()
oldMine := reviewMine
oldRequested := reviewRequested
oldRepo := reviewRepo
oldJSON := reviewJSON
reviewMine = false
reviewRequested = false
reviewRepo = ""
reviewJSON = false
t.Cleanup(func() {
reviewMine = oldMine
reviewRequested = oldRequested
reviewRepo = oldRepo
reviewJSON = oldJSON
})
}

View file

@ -9,10 +9,12 @@
package qa
import (
"cmp"
"context"
"encoding/json"
"fmt"
"os/exec"
"slices"
"strings"
"time"
@ -43,11 +45,12 @@ type WorkflowRun struct {
// WorkflowJob represents a job within a workflow run
type WorkflowJob struct {
ID int64 `json:"databaseId"`
Name string `json:"name"`
Status string `json:"status"`
Conclusion string `json:"conclusion"`
URL string `json:"url"`
ID int64 `json:"databaseId"`
Name string `json:"name"`
Status string `json:"status"`
Conclusion string `json:"conclusion"`
URL string `json:"url"`
Steps []JobStep `json:"steps"`
}
// JobStep represents a step within a job
@ -110,6 +113,7 @@ func runWatch() error {
// Poll for workflow runs
pollInterval := 3 * time.Second
var lastStatus string
waitingStatus := dimStyle.Render(i18n.T("cmd.qa.watch.waiting_for_workflows"))
for {
// Check if context deadline exceeded
@ -125,7 +129,10 @@ func runWatch() error {
if len(runs) == 0 {
// No workflows triggered yet, keep waiting
cli.Print("\033[2K\r%s", dimStyle.Render(i18n.T("cmd.qa.watch.waiting_for_workflows")))
if waitingStatus != lastStatus {
cli.Print("%s\n", waitingStatus)
lastStatus = waitingStatus
}
time.Sleep(pollInterval)
continue
}
@ -169,12 +176,11 @@ func runWatch() error {
// Only print if status changed
if status != lastStatus {
cli.Print("\033[2K\r%s", status)
cli.Print("%s\n", status)
lastStatus = status
}
if allComplete {
cli.Blank()
cli.Blank()
return printResults(ctx, repoFullName, runs)
}
@ -261,7 +267,7 @@ func parseGitHubRepo(url string) (string, error) {
}
}
return "", fmt.Errorf("could not parse GitHub repo from URL: %s", url)
return "", log.E("qa.parseGitHubRepo", "could not parse GitHub repo from URL: "+url, nil)
}
// fetchWorkflowRunsForCommit fetches workflow runs for a specific commit
@ -308,14 +314,17 @@ func printResults(ctx context.Context, repoFullName string, runs []WorkflowRun)
}
}
slices.SortFunc(successes, compareWorkflowRun)
slices.SortFunc(failures, compareWorkflowRun)
// Print successes briefly
for _, run := range successes {
cli.Print("%s %s\n", successStyle.Render(cli.Glyph(":check:")), run.Name)
cli.Print("%s %s\n", successStyle.Render(i18n.T("common.label.success")), run.Name)
}
// Print failures with details
for _, run := range failures {
cli.Print("%s %s\n", errorStyle.Render(cli.Glyph(":cross:")), run.Name)
cli.Print("%s %s\n", errorStyle.Render(i18n.T("common.label.error")), run.Name)
// Fetch failed job details
failedJob, failedStep, errorLine := fetchFailureDetails(ctx, repoFullName, run.ID)
@ -359,25 +368,20 @@ func fetchFailureDetails(ctx context.Context, repoFullName string, runID int64)
}
var result struct {
Jobs []struct {
Name string `json:"name"`
Conclusion string `json:"conclusion"`
Steps []struct {
Name string `json:"name"`
Conclusion string `json:"conclusion"`
Number int `json:"number"`
} `json:"steps"`
} `json:"jobs"`
Jobs []WorkflowJob `json:"jobs"`
}
if err := json.Unmarshal(output, &result); err != nil {
return "", "", ""
}
slices.SortFunc(result.Jobs, compareWorkflowJob)
// Find the failed job and step
for _, job := range result.Jobs {
if job.Conclusion == "failure" {
jobName = job.Name
slices.SortFunc(job.Steps, compareJobStep)
for _, step := range job.Steps {
if step.Conclusion == "failure" {
stepName = fmt.Sprintf("%d: %s", step.Number, step.Name)
@ -442,3 +446,33 @@ func fetchErrorFromLogs(ctx context.Context, repoFullName string, runID int64) s
return ""
}
func compareWorkflowRun(a, b WorkflowRun) int {
return cmp.Or(
cmp.Compare(a.Name, b.Name),
cmp.Compare(a.DisplayTitle, b.DisplayTitle),
a.CreatedAt.Compare(b.CreatedAt),
a.UpdatedAt.Compare(b.UpdatedAt),
cmp.Compare(a.ID, b.ID),
cmp.Compare(a.URL, b.URL),
)
}
func compareWorkflowJob(a, b WorkflowJob) int {
return cmp.Or(
cmp.Compare(a.Name, b.Name),
cmp.Compare(a.Conclusion, b.Conclusion),
cmp.Compare(a.Status, b.Status),
cmp.Compare(a.ID, b.ID),
cmp.Compare(a.URL, b.URL),
)
}
func compareJobStep(a, b JobStep) int {
return cmp.Or(
cmp.Compare(a.Number, b.Number),
cmp.Compare(a.Name, b.Name),
cmp.Compare(a.Conclusion, b.Conclusion),
cmp.Compare(a.Status, b.Status),
)
}

103
cmd/qa/cmd_watch_test.go Normal file
View file

@ -0,0 +1,103 @@
package qa
import (
"context"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPrintResults_SortsRunsAndUsesDeterministicDetails(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"run view 2 --repo forge/alpha --json jobs"*)
cat <<'JSON'
{"jobs":[
{
"databaseId": 20,
"name": "Zulu Job",
"status": "completed",
"conclusion": "failure",
"steps": [
{"name": "Zulu Step", "status": "completed", "conclusion": "failure", "number": 2}
]
},
{
"databaseId": 10,
"name": "Alpha Job",
"status": "completed",
"conclusion": "failure",
"steps": [
{"name": "Zulu Step", "status": "completed", "conclusion": "failure", "number": 2},
{"name": "Alpha Step", "status": "completed", "conclusion": "failure", "number": 1}
]
}
]}
JSON
;;
*"run view 2 --repo forge/alpha --log-failed"*)
cat <<'EOF'
Alpha error detail
EOF
;;
*"run view 4 --repo forge/alpha --json jobs"*)
cat <<'JSON'
{"jobs":[
{
"databaseId": 40,
"name": "Omega Job",
"status": "completed",
"conclusion": "failure",
"steps": [
{"name": "Omega Step", "status": "completed", "conclusion": "failure", "number": 1}
]
}
]}
JSON
;;
*"run view 4 --repo forge/alpha --log-failed"*)
cat <<'EOF'
Omega error detail
EOF
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
prependPath(t, dir)
runs := []WorkflowRun{
{ID: 3, Name: "Zulu Build", Conclusion: "success", URL: "https://example.com/zulu"},
{ID: 1, Name: "Alpha Build", Conclusion: "success", URL: "https://example.com/alpha"},
{ID: 4, Name: "Omega Failure", Conclusion: "failure", URL: "https://example.com/omega"},
{ID: 2, Name: "Beta Failure", Conclusion: "failure", URL: "https://example.com/beta"},
}
output := captureStdout(t, func() {
err := printResults(context.Background(), "forge/alpha", runs)
require.Error(t, err)
})
assert.NotContains(t, output, "\033[2K\r")
alphaBuild := strings.Index(output, "Alpha Build")
require.NotEqual(t, -1, alphaBuild)
zuluBuild := strings.Index(output, "Zulu Build")
require.NotEqual(t, -1, zuluBuild)
assert.Less(t, alphaBuild, zuluBuild)
betaFailure := strings.Index(output, "Beta Failure")
require.NotEqual(t, -1, betaFailure)
omegaFailure := strings.Index(output, "Omega Failure")
require.NotEqual(t, -1, omegaFailure)
assert.Less(t, betaFailure, omegaFailure)
assert.Contains(t, output, "Job: Alpha Job (step: 1: Alpha Step)")
assert.Contains(t, output, "Error: Alpha error detail")
assert.NotContains(t, output, "Job: Zulu Job")
}

View file

@ -0,0 +1,440 @@
# RFC-025: Agent Experience (AX) Design Principles
- **Status:** Draft
- **Authors:** Snider, Cladius
- **Date:** 2026-03-19
- **Applies to:** All Core ecosystem packages (CoreGO, CorePHP, CoreTS, core-agent)
## Abstract
Agent Experience (AX) is a design paradigm for software systems where the primary code consumer is an AI agent, not a human developer. AX sits alongside User Experience (UX) and Developer Experience (DX) as the third era of interface design.
This RFC establishes AX as a formal design principle for the Core ecosystem and defines the conventions that follow from it.
## Motivation
As of early 2026, AI agents write, review, and maintain the majority of code in the Core ecosystem. The original author has not manually edited code (outside of Core struct design) since October 2025. Code is processed semantically — agents reason about intent, not characters.
Design patterns inherited from the human-developer era optimise for the wrong consumer:
- **Short names** save keystrokes but increase semantic ambiguity
- **Functional option chains** are fluent for humans but opaque for agents tracing configuration
- **Error-at-every-call-site** produces 50% boilerplate that obscures intent
- **Generic type parameters** force agents to carry type context that the runtime already has
- **Panic-hiding conventions** (`Must*`) create implicit control flow that agents must special-case
AX acknowledges this shift and provides principles for designing code, APIs, file structures, and conventions that serve AI agents as first-class consumers.
## The Three Eras
| Era | Primary Consumer | Optimises For | Key Metric |
|-----|-----------------|---------------|------------|
| UX | End users | Discoverability, forgiveness, visual clarity | Task completion time |
| DX | Developers | Typing speed, IDE support, convention familiarity | Time to first commit |
| AX | AI agents | Predictability, composability, semantic navigation | Correct-on-first-pass rate |
AX does not replace UX or DX. End users still need good UX. Developers still need good DX. But when the primary code author and maintainer is an AI agent, the codebase should be designed for that consumer first.
## Principles
### 1. Predictable Names Over Short Names
Names are tokens that agents pattern-match across languages and contexts. Abbreviations introduce mapping overhead.
```
Config not Cfg
Service not Srv
Embed not Emb
Error not Err (as a subsystem name; err for local variables is fine)
Options not Opts
```
**Rule:** If a name would require a comment to explain, it is too short.
**Exception:** Industry-standard abbreviations that are universally understood (`HTTP`, `URL`, `ID`, `IPC`, `I18n`) are acceptable. The test: would an agent trained on any mainstream language recognise it without context?
### 2. Comments as Usage Examples
The function signature tells WHAT. The comment shows HOW with real values.
```go
// Detect the project type from files present
setup.Detect("/path/to/project")
// Set up a workspace with auto-detected template
setup.Run(setup.Options{Path: ".", Template: "auto"})
// Scaffold a PHP module workspace
setup.Run(setup.Options{Path: "./my-module", Template: "php"})
```
**Rule:** If a comment restates what the type signature already says, delete it. If a comment shows a concrete usage with realistic values, keep it.
**Rationale:** Agents learn from examples more effectively than from descriptions. A comment like "Run executes the setup process" adds zero information. A comment like `setup.Run(setup.Options{Path: ".", Template: "auto"})` teaches an agent exactly how to call the function.
### 3. Path Is Documentation
File and directory paths should be self-describing. An agent navigating the filesystem should understand what it is looking at without reading a README.
```
flow/deploy/to/homelab.yaml — deploy TO the homelab
flow/deploy/from/github.yaml — deploy FROM GitHub
flow/code/review.yaml — code review flow
template/file/go/struct.go.tmpl — Go struct file template
template/dir/workspace/php/ — PHP workspace scaffold
```
**Rule:** If an agent needs to read a file to understand what a directory contains, the directory naming has failed.
**Corollary:** The unified path convention (folder structure = HTTP route = CLI command = test path) is AX-native. One path, every surface.
### 4. Templates Over Freeform
When an agent generates code from a template, the output is constrained to known-good shapes. When an agent writes freeform, the output varies.
```go
// Template-driven — consistent output
lib.RenderFile("php/action", data)
lib.ExtractDir("php", targetDir, data)
// Freeform — variance in output
"write a PHP action class that..."
```
**Rule:** For any code pattern that recurs, provide a template. Templates are guardrails for agents.
**Scope:** Templates apply to file generation, workspace scaffolding, config generation, and commit messages. They do NOT apply to novel logic — agents should write business logic freeform with the domain knowledge available.
### 5. Declarative Over Imperative
Agents reason better about declarations of intent than sequences of operations.
```yaml
# Declarative — agent sees what should happen
steps:
- name: build
flow: tools/docker-build
with:
context: "{{ .app_dir }}"
image_name: "{{ .image_name }}"
- name: deploy
flow: deploy/with/docker
with:
host: "{{ .host }}"
```
```go
// Imperative — agent must trace execution
cmd := exec.Command("docker", "build", "--platform", "linux/amd64", "-t", imageName, ".")
cmd.Dir = appDir
if err := cmd.Run(); err != nil {
return fmt.Errorf("docker build: %w", err)
}
```
**Rule:** Orchestration, configuration, and pipeline logic should be declarative (YAML/JSON). Implementation logic should be imperative (Go/PHP/TS). The boundary is: if an agent needs to compose or modify the logic, make it declarative.
### 6. Universal Types (Core Primitives)
Every component in the ecosystem accepts and returns the same primitive types. An agent processing any level of the tree sees identical shapes.
```go
// Universal contract
setup.Run(core.Options{Path: ".", Template: "auto"})
brain.New(core.Options{Name: "openbrain"})
deploy.Run(core.Options{Flow: "deploy/to/homelab"})
// Fractal — Core itself is a Service
core.New(core.Options{
Services: []core.Service{
process.New(core.Options{Name: "process"}),
brain.New(core.Options{Name: "brain"}),
},
})
```
**Core primitive types:**
| Type | Purpose |
|------|---------|
| `core.Options` | Input configuration (what you want) |
| `core.Config` | Runtime settings (what is active) |
| `core.Data` | Embedded or stored content |
| `core.Service` | A managed component with lifecycle |
| `core.Result[T]` | Return value with OK/fail state |
**What this replaces:**
| Go Convention | Core AX | Why |
|--------------|---------|-----|
| `func With*(v) Option` | `core.Options{Field: v}` | Struct literal is parseable; option chain requires tracing |
| `func Must*(v) T` | `core.Result[T]` | No hidden panics; errors flow through Core |
| `func *For[T](c) T` | `c.Service("name")` | String lookup is greppable; generics require type context |
| `val, err :=` everywhere | Single return via `core.Result` | Intent not obscured by error handling |
| `_ = err` | Never needed | Core handles all errors internally |
### 7. Directory as Semantics
The directory structure tells an agent the intent before it reads a word. Top-level directories are semantic categories, not organisational bins.
```
plans/
├── code/ # Pure primitives — read for WHAT exists
├── project/ # Products — read for WHAT we're building and WHY
└── rfc/ # Contracts — read for constraints and rules
```
**Rule:** An agent should know what kind of document it's reading from the path alone. `code/core/go/io/RFC.md` = a lib primitive spec. `project/ofm/RFC.md` = a product spec that cross-references code/. `rfc/snider/borg/RFC-BORG-006-SMSG-FORMAT.md` = an immutable contract for the Borg SMSG protocol.
**Corollary:** The three-way split (code/project/rfc) extends principle 3 (Path Is Documentation) from files to entire subtrees. The path IS the metadata.
### 8. Lib Never Imports Consumer
Dependency flows one direction. Libraries define primitives. Consumers compose from them. A new feature in a consumer can never break a library.
```
code/core/go/* → lib tier (stable foundation)
code/core/agent/ → consumer tier (composes from go/*)
code/core/cli/ → consumer tier (composes from go/*)
code/core/gui/ → consumer tier (composes from go/*)
```
**Rule:** If package A is in `go/` and package B is in the consumer tier, B may import A but A must never import B. The repo naming convention enforces this: `go-{name}` = lib, bare `{name}` = consumer.
**Why this matters for agents:** When an agent is dispatched to implement a feature in `core/agent`, it can freely import from `go-io`, `go-scm`, `go-process`. But if an agent is dispatched to `go-io`, it knows its changes are foundational — every consumer depends on it, so the contract must not break.
### 9. Issues Are N+(rounds) Deep
Problems in code and specs are layered. Surface issues mask deeper issues. Fixing the surface reveals the next layer. This is not a failure mode — it is the discovery process.
```
Pass 1: Find 16 issues (surface — naming, imports, obvious errors)
Pass 2: Find 11 issues (structural — contradictions, missing types)
Pass 3: Find 5 issues (architectural — signature mismatches, registration gaps)
Pass 4: Find 4 issues (contract — cross-spec API mismatches)
Pass 5: Find 2 issues (mechanical — path format, nil safety)
Pass N: Findings are trivial → spec/code is complete
```
**Rule:** Iteration is required, not a failure. Each pass sees what the previous pass could not, because the context changed. An agent dispatched with the same task on the same repo will find different things each time — this is correct behaviour.
**Corollary:** The cheapest model should do the most passes (surface work). The frontier model should arrive last, when only deep issues remain. Tiered iteration: grunt model grinds → mid model pre-warms → frontier model polishes.
**Anti-pattern:** One-shot generation expecting valid output. No model, no human, produces correct-on-first-pass for non-trivial work. Expecting it wastes the first pass on surface issues that a cheaper pass would have caught.
### 10. CLI Tests as Artifact Validation
Unit tests verify the code. CLI tests verify the binary. The directory structure IS the command structure — path maps to command, Taskfile runs the test.
```
tests/cli/
├── core/
│ └── lint/
│ ├── Taskfile.yaml ← test `core-lint` (root)
│ ├── run/
│ │ ├── Taskfile.yaml ← test `core-lint run`
│ │ └── fixtures/
│ ├── go/
│ │ ├── Taskfile.yaml ← test `core-lint go`
│ │ └── fixtures/
│ └── security/
│ ├── Taskfile.yaml ← test `core-lint security`
│ └── fixtures/
```
**Rule:** Every CLI command has a matching `tests/cli/{path}/Taskfile.yaml`. The Taskfile runs the compiled binary against fixtures with known inputs and validates the output. If the CLI test passes, the underlying actions work — because CLI commands call actions, MCP tools call actions, API endpoints call actions. Test the CLI, trust the rest.
**Pattern:**
```yaml
# tests/cli/core/lint/go/Taskfile.yaml
version: '3'
tasks:
test:
cmds:
- core-lint go --output json fixtures/ > /tmp/result.json
- jq -e '.findings | length > 0' /tmp/result.json
- jq -e '.summary.passed == false' /tmp/result.json
```
**Why this matters for agents:** An agent can validate its own work by running `task test` in the matching `tests/cli/` directory. No test framework, no mocking, no setup — just the binary, fixtures, and `jq` assertions. The agent builds the binary, runs the test, sees the result. If it fails, the agent can read the fixture, read the output, and fix the code.
**Corollary:** Fixtures are planted bugs. Each fixture file has a known issue that the linter must find. If the linter doesn't find it, the test fails. Fixtures are the spec for what the tool must detect — they ARE the test cases, not descriptions of test cases.
## Applying AX to Existing Patterns
### File Structure
```
# AX-native: path describes content
core/agent/
├── go/ # Go source
├── php/ # PHP source
├── ui/ # Frontend source
├── claude/ # Claude Code plugin
└── codex/ # Codex plugin
# Not AX: generic names requiring README
src/
├── lib/
├── utils/
└── helpers/
```
### Error Handling
```go
// AX-native: errors are infrastructure, not application logic
svc := c.Service("brain")
cfg := c.Config().Get("database.host")
// Errors logged by Core. Code reads like a spec.
// Not AX: errors dominate the code
svc, err := c.ServiceFor[brain.Service]()
if err != nil {
return fmt.Errorf("get brain service: %w", err)
}
cfg, err := c.Config().Get("database.host")
if err != nil {
_ = err // silenced because "it'll be fine"
}
```
### API Design
```go
// AX-native: one shape, every surface
core.New(core.Options{
Name: "my-app",
Services: []core.Service{...},
Config: core.Config{...},
})
// Not AX: multiple patterns for the same thing
core.New(
core.WithName("my-app"),
core.WithService(factory1),
core.WithService(factory2),
core.WithConfig(cfg),
)
```
## The Plans Convention — AX Development Lifecycle
The `plans/` directory structure encodes a development methodology designed for how generative AI actually works: iterative refinement across structured phases, not one-shot generation.
### The Three-Way Split
```
plans/
├── project/ # 1. WHAT and WHY — start here
├── rfc/ # 2. CONSTRAINTS — immutable contracts
└── code/ # 3. HOW — implementation specs
```
Each directory is a phase. Work flows from project → rfc → code. Each transition forces a refinement pass — you cannot write a code spec without discovering gaps in the project spec, and you cannot write an RFC without discovering assumptions in both.
**Three places for data that can't be written simultaneously = three guaranteed iterations of "actually, this needs changing."** Refinement is baked into the structure, not bolted on as a review step.
### Phase 1: Project (Vision)
Start with `project/`. No code exists yet. Define:
- What the product IS and who it serves
- What existing primitives it consumes (cross-ref to `code/`)
- What constraints it operates under (cross-ref to `rfc/`)
This is where creativity lives. Map features to building blocks. Connect systems. The project spec is integrative — it references everything else.
### Phase 2: RFC (Contracts)
Extract the immutable rules into `rfc/`. These are constraints that don't change with implementation:
- Wire formats, protocols, hash algorithms
- Security properties that must hold
- Compatibility guarantees
RFCs are numbered per component (`RFC-BORG-006-SMSG-FORMAT.md`) and never modified after acceptance. If the contract changes, write a new RFC.
### Phase 3: Code (Implementation Specs)
Define the implementation in `code/`. Each component gets an RFC.md that an agent can implement from:
- Struct definitions (the DTOs — see principle 6)
- Method signatures and behaviour
- Error conditions and edge cases
- Cross-references to other code/ specs
The code spec IS the product. Write the spec → dispatch to an agent → review output → iterate.
### Pre-Launch: Alignment Protocol
Before dispatching for implementation, verify spec-model alignment:
```
1. REVIEW — The implementation model (Codex/Jules) reads the spec
and reports missing elements. This surfaces the delta between
the model's training and the spec's assumptions.
"I need X, Y, Z to implement this" is the model saying
"I hear you but I'm missing context" — without asking.
2. ADJUST — Update the spec to close the gaps. Add examples,
clarify ambiguities, provide the context the model needs.
This is shared alignment, not compromise.
3. VERIFY — A different model (or sub-agent) reviews the adjusted
spec without the planner's bias. Fresh eyes on the contract.
"Does this make sense to someone who wasn't in the room?"
4. READY — When the review findings are trivial or deployment-
related (not architectural), the spec is ready to dispatch.
```
### Implementation: Iterative Dispatch
Same prompt, multiple runs. Each pass sees deeper because the context evolved:
```
Round 1: Build features (the obvious gaps)
Round 2: Write tests (verify what was built)
Round 3: Harden security (what can go wrong?)
Round 4: Next RFC section (what's still missing?)
Round N: Findings are trivial → implementation is complete
```
Re-running is not failure. It is the process. Each pass changes the codebase, which changes what the next pass can see. The iteration IS the refinement.
### Post-Implementation: Auto-Documentation
The QA/verify chain produces artefacts that feed forward:
- Test results document the contract (what works, what doesn't)
- Coverage reports surface untested paths
- Diff summaries prep the changelog for the next release
- Doc site updates from the spec (the spec IS the documentation)
The output of one cycle is the input to the next. The plans repo stays current because the specs drive the code, not the other way round.
## Compatibility
AX conventions are valid, idiomatic Go/PHP/TS. They do not require language extensions, code generation, or non-standard tooling. An AX-designed codebase compiles, tests, and deploys with standard toolchains.
The conventions diverge from community patterns (functional options, Must/For, etc.) but do not violate language specifications. This is a style choice, not a fork.
## Adoption
AX applies to all new code in the Core ecosystem. Existing code migrates incrementally as it is touched — no big-bang rewrite.
Priority order:
1. **Public APIs** (package-level functions, struct constructors)
2. **File structure** (path naming, template locations)
3. **Internal fields** (struct field names, local variables)
## References
- dAppServer unified path convention (2024)
- CoreGO DTO pattern refactor (2026-03-18)
- Core primitives design (2026-03-19)
- Go Proverbs, Rob Pike (2015) — AX provides an updated lens
## Changelog
- 2026-03-19: Initial draft

685
docs/RFC-LINT.md Normal file
View file

@ -0,0 +1,685 @@
# RFC-LINT: core/lint Agent-Native CLI and Adapter Contract
- **Status:** Implemented
- **Date:** 2026-03-30
- **Applies to:** `forge.lthn.ai/core/lint`
- **Standard:** [`docs/RFC-CORE-008-AGENT-EXPERIENCE.md`](./RFC-CORE-008-AGENT-EXPERIENCE.md)
## Abstract
`core/lint` is a standalone Go CLI and library that detects project languages, runs matching lint adapters, merges their findings into one report, and writes machine-readable output for local development, CI, and agent QA.
The binary does not bundle external linters. It orchestrates tools already present in `PATH`, treats missing tools as `skipped`, and keeps the orchestration report contract separate from the legacy catalog commands.
This RFC describes the implementation that exists in this repository. It replaces the earlier draft that described a future Core service with Tasks, IPC actions, MCP wrapping, build stages, artifact stages, entitlement gates, and scheduled runs. Those designs are not the current contract.
## Motivation
Earlier drafts described a future `core/lint` service that does not exist in this module. Agents dispatched to this repository need the contract that is implemented now, not the architecture that might exist later.
The current implementation has three properties that matter for AX:
- one CLI binary with explicit command paths
- one orchestration DTO (`RunInput`) and one orchestration report (`Report`)
- one clear split between adapter-driven runs and the older embedded catalog commands
An agent should be able to read the paths, map the commands, and predict the output shapes without reverse-engineering aspirational features from an outdated RFC.
## AX Principles Applied
This RFC follows the Agent Experience standard directly:
1. Predictable names over short names: `RunInput`, `Report`, `ToolRun`, `ToolInfo`, `Service`, and `Adapter` are the contract nouns across the CLI and package boundary.
2. Comments as usage examples: command examples use real flags and real paths such as `core-lint run --output json .` and `core-lint tools --output json --lang go`.
3. Path is documentation: the implementation map is the contract, and `tests/cli/lint/{path}` mirrors the command path it validates.
4. Declarative over imperative: `.core/lint.yaml` declares tool groups, thresholds, and output defaults instead of encoding those decisions in hidden CLI behavior.
5. One input shape for orchestration: `pkg/lint/service.go` owns `RunInput`.
6. One output shape for orchestration: `pkg/lint/service.go` owns `Report`.
7. CLI tests as artifact validation: the Taskfiles under `tests/cli/lint/...` are the runnable contract for the binary surface.
8. Stable sequencing over hidden magic: adapters run sequentially, then tool runs and findings are sorted before output.
## Path Map
An agent should be able to navigate the module from the path alone:
| Path | Meaning |
|------|---------|
| `cmd/core-lint/main.go` | CLI surface for `run`, `detect`, `tools`, `init`, language shortcuts, `hook`, and the legacy `lint` namespace |
| `pkg/lint/service.go` | Orchestrator for config loading, language selection, adapter selection, hook mode, and report assembly |
| `pkg/lint/adapter.go` | Adapter interface, external adapter registry, built-in catalog fallback, external command execution, and output parsers |
| `pkg/lint/config.go` | Repo-local config contract and defaults for `core-lint init` |
| `pkg/lint/detect_project.go` | Project language detection from markers and file names |
| `pkg/lint/report.go` | `Summary` aggregation and JSON/text/GitHub/SARIF writers |
| `lint.go` | Embedded catalog loader for `lint check` and `lint catalog` |
| `catalog/*.yaml` | Embedded pattern catalog files used by the legacy catalog commands |
| `tests/cli/lint/...` | CLI artifact tests; the path is the command |
## Scope
In scope:
- Project language detection
- Config-driven lint tool selection
- Embedded catalog scanning
- External linter orchestration
- Structured report generation
- Git pre-commit hook installation and removal
- CLI artifact tests in `tests/cli/lint/...`
Out of scope:
- Core service registration
- IPC or MCP exposure
- Build-stage compilation checks
- Artifact-stage scans against compiled binaries or images
- Scheduler integration
- Sidecar SBOM file writing
- Automatic tool installation
- Entitlement enforcement
## Command Surface
The repository ships two CLI surfaces:
- The root AX surface: `core-lint run`, `core-lint detect`, `core-lint tools`, and friends
- The legacy catalog surface: `core-lint lint check` and `core-lint lint catalog ...`
The RFC commands are mounted twice: once at the root and once under `core-lint lint ...`. Both surfaces are real. The root surface is shorter. The namespaced surface keeps the path semantic.
| Capability | Root path | Namespaced alias | Example |
|------------|-----------|------------------|---------|
| Full orchestration | `core-lint run [path]` | `core-lint lint run [path]` | `core-lint run --output json .` |
| Go only | `core-lint go [path]` | `core-lint lint go [path]` | `core-lint go .` |
| PHP only | `core-lint php [path]` | `core-lint lint php [path]` | `core-lint php .` |
| JS group shortcut | `core-lint js [path]` | `core-lint lint js [path]` | `core-lint js .` |
| Python only | `core-lint python [path]` | `core-lint lint python [path]` | `core-lint python .` |
| Security group shortcut | `core-lint security [path]` | `core-lint lint security [path]` | `core-lint security --ci .` |
| Compliance tools only | `core-lint compliance [path]` | `core-lint lint compliance [path]` | `core-lint compliance --output json .` |
| Language detection | `core-lint detect [path]` | `core-lint lint detect [path]` | `core-lint detect --output json .` |
| Tool inventory | `core-lint tools` | `core-lint lint tools` | `core-lint tools --output json --lang go` |
| Default config | `core-lint init [path]` | `core-lint lint init [path]` | `core-lint init /tmp/project` |
| Pre-commit hook install | `core-lint hook install [path]` | `core-lint lint hook install [path]` | `core-lint hook install .` |
| Pre-commit hook remove | `core-lint hook remove [path]` | `core-lint lint hook remove [path]` | `core-lint hook remove .` |
| Embedded catalog scan | none | `core-lint lint check [path...]` | `core-lint lint check --format json tests/cli/lint/check/fixtures` |
| Embedded catalog list | none | `core-lint lint catalog list` | `core-lint lint catalog list --lang go` |
| Embedded catalog show | none | `core-lint lint catalog show RULE_ID` | `core-lint lint catalog show go-sec-001` |
`core-lint js` is a shortcut for `Lang=js`, not a dedicated TypeScript command. TypeScript-only runs use `core-lint run --lang ts ...` or plain `run` with auto-detection.
`core-lint compliance` is also not identical to `core-lint run --sbom`. The shortcut sets `Category=compliance`, so the final adapter filter keeps only adapters whose runtime category is `compliance`. `run --sbom` appends the compliance config group without that category filter.
## RunInput Contract
All orchestration commands resolve into one DTO:
```go
type RunInput struct {
Path string `json:"path"`
Output string `json:"output,omitempty"`
Config string `json:"config,omitempty"`
FailOn string `json:"fail_on,omitempty"`
Category string `json:"category,omitempty"`
Lang string `json:"lang,omitempty"`
Hook bool `json:"hook,omitempty"`
CI bool `json:"ci,omitempty"`
Files []string `json:"files,omitempty"`
SBOM bool `json:"sbom,omitempty"`
}
```
### Input Resolution Rules
`Service.Run()` resolves input in this order:
1. Empty `Path` becomes `.`
2. `CI=true` sets `Output=github` only when `Output` was not provided explicitly
3. Config is loaded from `--config` or `.core/lint.yaml`
4. Empty `FailOn` falls back to the loaded config
5. `Hook=true` with no explicit `Files` reads staged files from `git diff --cached --name-only`
6. `Lang` overrides auto-detection
7. `Files` override directory detection for language inference
### CLI Output Resolution
The CLI resolves output before it calls `Service.Run()`:
1. explicit `--output` wins
2. otherwise `--ci` becomes `github`
3. otherwise the loaded config `output` value is used
4. if the config output is empty, the CLI falls back to `text`
### Category and Language Precedence
Tool group selection is intentionally simple and deterministic:
1. `Category=security` selects the `lint.security` config group
2. `Category=compliance` means only `lint.compliance`
3. `Lang=go|php|js|ts|python|...` means only that language group
4. Plain `run` uses all detected language groups plus `infra`
5. Plain `run --ci` adds the `security` group
6. Plain `run --sbom` adds the `compliance` group
`Lang` is stronger than `CI` and `SBOM`. If `Lang` is set, the language group wins and the extra groups are not appended.
`Category=style`, `Category=correctness`, and other non-group categories act as adapter-side filters only. They do not map to dedicated config groups.
One current consequence is that `grype` is listed in the default `lint.compliance` config group but advertises `Category() == "security"`. `core-lint compliance` therefore filters it out, while plain `core-lint run --sbom` still leaves it eligible.
Final adapter selection has one extra Go-specific exception: if Go is present and `Category != "compliance"`, `Service.Run()` prepends the built-in `catalog` adapter after registry filtering. That means `core-lint security` on a Go project can still emit `catalog` findings tagged `security`.
## Config Contract
Repo-local config lives at `.core/lint.yaml`.
`core-lint init /path/to/project` writes the default file from `pkg/lint/config.go`.
```yaml
lint:
go:
- golangci-lint
- gosec
- govulncheck
- staticcheck
- revive
- errcheck
php:
- phpstan
- psalm
- phpcs
- phpmd
- pint
js:
- biome
- oxlint
- eslint
- prettier
ts:
- biome
- oxlint
- typescript
python:
- ruff
- mypy
- bandit
- pylint
infra:
- shellcheck
- hadolint
- yamllint
- jsonlint
- markdownlint
security:
- gitleaks
- trivy
- gosec
- bandit
- semgrep
compliance:
- syft
- grype
- scancode
output: json
fail_on: error
paths:
- .
exclude:
- vendor/
- node_modules/
- .core/
```
### Config Rules
- If `.core/lint.yaml` does not exist, `DefaultConfig()` is used in memory
- Relative `--config` paths resolve relative to `Path`
- Unknown tool names in config are inert; the adapter registry is authoritative
- The current default config includes `prettier`, but the adapter registry does not yet provide a `prettier` adapter
- `paths` and `exclude` are part of the file schema, but the current orchestration path does not read them; detection and scanning still rely on built-in defaults
- `LintConfig` still accepts a `schedules` map, but no current CLI command reads or executes it
## Detection Contract
`pkg/lint/detect_project.go` is the only project-language detector used by orchestration commands.
### Marker Files
| Marker | Language |
|--------|----------|
| `go.mod` | `go` |
| `composer.json` | `php` |
| `package.json` | `js` |
| `tsconfig.json` | `ts` |
| `requirements.txt` | `python` |
| `pyproject.toml` | `python` |
| `Cargo.toml` | `rust` |
| `Dockerfile*` | `dockerfile` |
### File Extensions
| Extension | Language |
|-----------|----------|
| `.go` | `go` |
| `.php` | `php` |
| `.js`, `.jsx` | `js` |
| `.ts`, `.tsx` | `ts` |
| `.py` | `python` |
| `.rs` | `rust` |
| `.sh` | `shell` |
| `.yaml`, `.yml` | `yaml` |
| `.json` | `json` |
| `.md` | `markdown` |
### Detection Rules
- Directory traversal skips `vendor`, `node_modules`, `.git`, `testdata`, `.core`, and any hidden directory
- Results are de-duplicated and returned in sorted order
- `core-lint detect --output json tests/cli/lint/check/fixtures` currently returns `["go"]`
## Execution Model
`Service.Run()` is the orchestrator. The current implementation is sequential, not parallel.
### Step 1: Load Config
`LoadProjectConfig()` returns the repo-local config or the in-memory default.
### Step 2: Resolve File Scope
- If `Files` was provided, only those files are considered for language detection and adapter arguments
- If `Hook=true` and `Files` is empty, staged files are read from Git
- Otherwise the whole project path is scanned
### Step 3: Resolve Languages
- `Lang` wins first
- `Files` are used next
- `Detect(Path)` is the fallback
### Step 4: Select Adapters
`pkg/lint/service.go` builds a set of enabled tool names from config, then filters the registry from `pkg/lint/adapter.go`.
Special case:
- If `go` is present in the final language set and `Category != "compliance"`, a built-in `catalog` adapter is prepended automatically
### Step 5: Run Adapters
Every selected adapter runs with the same contract:
```go
type Adapter interface {
Name() string
Available() bool
Languages() []string
Command() string
Entitlement() string
RequiresEntitlement() bool
MatchesLanguage(languages []string) bool
Category() string
Fast() bool
Run(ctx context.Context, input RunInput, files []string) AdapterResult
}
```
Execution rules:
- Missing binaries become `ToolRun{Status: "skipped"}`
- External commands run with a 5 minute timeout
- Hook mode marks non-fast adapters as `skipped`
- Parsed findings are normalised, sorted, and merged into one report
- Adapter order becomes deterministic after `sortToolRuns()` and `sortFindings()`
### Step 6: Compute Pass or Fail
`passesThreshold()` applies the configured threshold:
| `fail_on` | Passes when |
|-----------|-------------|
| `error` or empty | `summary.errors == 0` |
| `warning` | `summary.errors == 0 && summary.warnings == 0` |
| `info` | `summary.total == 0` |
CLI exit status follows `report.Summary.Passed`, not raw tool state. A `skipped` or `timeout` tool run does not fail the command by itself.
## Catalog Surfaces
The repository has two catalog paths. They are related, but they are not the same implementation.
### Legacy Embedded Catalog
These commands load the embedded YAML catalog via `lint.go`:
- `core-lint lint check`
- `core-lint lint catalog list`
- `core-lint lint catalog show`
The source of truth is `catalog/*.yaml`.
### Orchestration Catalog Adapter
`core-lint run`, `core-lint go`, and the other orchestration commands prepend a smaller built-in `catalog` adapter from `pkg/lint/adapter.go`.
That adapter reads the hard-coded `defaultCatalogRulesYAML` constant, not `catalog/*.yaml`.
Today the fallback adapter contains these Go rules:
- `go-cor-003`
- `go-cor-004`
- `go-sec-001`
- `go-sec-002`
- `go-sec-004`
The overlap is intentional, but the surfaces are different:
- `lint check` returns raw catalog findings with catalog severities such as `medium` or `high`
- `run` normalises those findings into report severities `warning`, `error`, or `info`
An agent must not assume that `core-lint lint check` and `core-lint run` execute the same rule set.
## Adapter Inventory
The implementation has two adapter sources in `pkg/lint/adapter.go`:
- `defaultAdapters()` defines the external-tool registry exposed by `core-lint tools`
- `newCatalogAdapter()` defines the built-in Go fallback injected by `Service.Run()` when Go is in scope
### ToolInfo Contract
`core-lint tools` returns the runtime inventory from `Service.Tools()`:
```go
type ToolInfo struct {
Name string `json:"name"`
Available bool `json:"available"`
Languages []string `json:"languages"`
Category string `json:"category"`
Entitlement string `json:"entitlement,omitempty"`
}
```
Inventory rules:
- results are sorted by `Name`
- `--lang` filters via `Adapter.MatchesLanguage()`, not strict equality on the `Languages` field
- wildcard adapters with `Languages() == []string{"*"}` still appear under any `--lang` filter
- category tokens also match, so `core-lint tools --lang security` returns security adapters plus wildcard adapters
- `Available` reflects a `PATH` lookup at runtime, not config membership
- `Entitlement` is descriptive metadata; the current implementation does not enforce it
- the built-in `catalog` adapter is not returned by `core-lint tools`; it is injected only during `run`-style orchestration on Go projects
### Injected During Run
| Adapter | Languages | Category | Fast | Notes |
|---------|-----------|----------|------|-------|
| `catalog` | `go` | `correctness` | yes | Built-in regex fallback rules; injected by `Service.Run()`, not listed by `core-lint tools` |
### Go
| Adapter | Category | Fast |
|---------|----------|------|
| `golangci-lint` | `correctness` | yes |
| `gosec` | `security` | no |
| `govulncheck` | `security` | no |
| `staticcheck` | `correctness` | yes |
| `revive` | `style` | yes |
| `errcheck` | `correctness` | yes |
### PHP
| Adapter | Category | Fast |
|---------|----------|------|
| `phpstan` | `correctness` | yes |
| `psalm` | `correctness` | yes |
| `phpcs` | `style` | yes |
| `phpmd` | `correctness` | yes |
| `pint` | `style` | yes |
### JS and TS
| Adapter | Category | Fast |
|---------|----------|------|
| `biome` | `style` | yes |
| `oxlint` | `style` | yes |
| `eslint` | `style` | yes |
| `typescript` | `correctness` | yes |
### Python
| Adapter | Category | Fast |
|---------|----------|------|
| `ruff` | `style` | yes |
| `mypy` | `correctness` | yes |
| `bandit` | `security` | no |
| `pylint` | `style` | yes |
### Infra and Cross-Project
| Adapter | Category | Fast |
|---------|----------|------|
| `shellcheck` | `correctness` | yes |
| `hadolint` | `security` | yes |
| `yamllint` | `style` | yes |
| `jsonlint` | `style` | yes |
| `markdownlint` | `style` | yes |
| `gitleaks` | `security` | no |
| `trivy` | `security` | no |
| `semgrep` | `security` | no |
| `syft` | `compliance` | no |
| `grype` | `security` | no |
| `scancode` | `compliance` | no |
### Adapter Parsing Rules
- JSON tools are parsed recursively and schema-tolerantly by searching for common keys such as `file`, `line`, `column`, `code`, `message`, and `severity`
- Text tools are parsed from `file:line[:column]: message`
- Non-empty output that does not match either parser becomes one synthetic finding with `code: diagnostic`
- A failed command with no usable parsed output becomes one synthetic finding with `code: command-failed`
- Duplicate findings are collapsed on `tool|file|line|column|code|message`
- `ToolRun.Version` exists in the report schema but is not populated yet
### Entitlement Metadata
Adapters still expose `Entitlement()` and `RequiresEntitlement()`, but `Service.Run()` does not enforce them today. The metadata is present; the gate is not.
## Output Contract
Orchestration commands return one report document:
```go
type Report struct {
Project string `json:"project"`
Timestamp time.Time `json:"timestamp"`
Duration string `json:"duration"`
Languages []string `json:"languages"`
Tools []ToolRun `json:"tools"`
Findings []Finding `json:"findings"`
Summary Summary `json:"summary"`
}
type ToolRun struct {
Name string `json:"name"`
Version string `json:"version,omitempty"`
Status string `json:"status"`
Duration string `json:"duration"`
Findings int `json:"findings"`
}
type Summary struct {
Total int `json:"total"`
Errors int `json:"errors"`
Warnings int `json:"warnings"`
Info int `json:"info"`
Passed bool `json:"passed"`
BySeverity map[string]int `json:"by_severity,omitempty"`
}
```
`ToolRun.Status` has four implemented values:
| Status | Meaning |
|--------|---------|
| `passed` | The adapter ran and emitted no findings |
| `failed` | The adapter ran and emitted findings or the command exited non-zero |
| `skipped` | The binary was missing or hook mode skipped a non-fast adapter |
| `timeout` | The command exceeded the 5 minute adapter timeout |
`Finding` is shared with the legacy catalog scanner:
```go
type Finding struct {
Tool string `json:"tool,omitempty"`
File string `json:"file"`
Line int `json:"line"`
Column int `json:"column,omitempty"`
Severity string `json:"severity"`
Code string `json:"code,omitempty"`
Message string `json:"message,omitempty"`
Category string `json:"category,omitempty"`
Fix string `json:"fix,omitempty"`
RuleID string `json:"rule_id,omitempty"`
Title string `json:"title,omitempty"`
Match string `json:"match,omitempty"`
Repo string `json:"repo,omitempty"`
}
```
### Finding Normalisation
During orchestration:
- `Code` falls back to `RuleID`
- `Message` falls back to `Title`
- empty `Tool` becomes `catalog`
- file paths are made relative to `Path` when possible
- severities are collapsed to report levels:
| Raw severity | Report severity |
|--------------|-----------------|
| `critical`, `high`, `error`, `errors` | `error` |
| `medium`, `low`, `warning`, `warn` | `warning` |
| `info`, `note` | `info` |
### Output Modes
| Mode | How to request it | Writer |
|------|-------------------|--------|
| JSON | `--output json` | `WriteReportJSON` |
| Text | `--output text` | `WriteReportText` |
| GitHub annotations | `--output github` or `--ci` | `WriteReportGitHub` |
| SARIF | `--output sarif` | `WriteReportSARIF` |
### Stream Contract
For `run`-style commands, the selected writer always writes the report document to `stdout`.
If the report fails the configured threshold, the CLI still writes the report to `stdout`, then returns an error. The error path adds human-facing diagnostics on `stderr`.
Agents and CI jobs that need machine-readable output should parse `stdout` and treat `stderr` as diagnostic text.
## Hook Mode
`core-lint run --hook` is the installed pre-commit path.
Implementation details:
- staged files come from `git diff --cached --name-only`
- language detection runs only on those staged files
- adapters with `Fast() == false` are marked `skipped`
- output format still follows normal resolution rules; hook mode does not force text output
- `core-lint hook install` writes a managed block into `.git/hooks/pre-commit`
- `core-lint hook remove` removes only the managed block
Installed hook block:
```sh
# core-lint hook start
# Installed by core-lint
exec core-lint run --hook
# core-lint hook end
```
If the hook file already exists, install appends a guarded block instead of overwriting the file. In that appended case the command line becomes `core-lint run --hook || exit $?` rather than `exec core-lint run --hook`.
## Test Contract
The CLI artifact tests are the runnable contract for this RFC:
| Path | Command under test |
|------|--------------------|
| `tests/cli/lint/check/Taskfile.yaml` | `core-lint lint check` |
| `tests/cli/lint/catalog/list/Taskfile.yaml` | `core-lint lint catalog list` |
| `tests/cli/lint/catalog/show/Taskfile.yaml` | `core-lint lint catalog show` |
| `tests/cli/lint/detect/Taskfile.yaml` | `core-lint detect` |
| `tests/cli/lint/tools/Taskfile.yaml` | `core-lint tools` |
| `tests/cli/lint/init/Taskfile.yaml` | `core-lint init` |
| `tests/cli/lint/run/Taskfile.yaml` | `core-lint run` |
| `tests/cli/lint/Taskfile.yaml` | aggregate CLI suite |
The planted bug fixture is `tests/cli/lint/check/fixtures/input.go`.
Current expectations from the test suite:
- `lint check --format=json` finds `go-cor-003` in `input.go`
- `run --output json --fail-on warning` writes one report document to `stdout`, emits failure diagnostics on `stderr`, and exits non-zero
- `detect --output json` returns `["go"]` for the shipped fixture
- `tools --output json --lang go` includes `golangci-lint` and `govulncheck`
- `init` writes `.core/lint.yaml`
Unit-level confirmation also exists in:
- `cmd/core-lint/main_test.go`
- `pkg/lint/service_test.go`
- `pkg/lint/detect_project_test.go`
## Explicit Non-Goals
These items are intentionally not part of the current contract:
- no Core runtime integration
- no `core.Task` pipeline
- no `lint.static`, `lint.build`, or `lint.artifact` action graph
- no scheduled cron registration
- no sidecar `sbom.cdx.json` or `sbom.spdx.json` output
- no parallel adapter execution
- no adapter entitlement enforcement
- no guarantee that every config tool name has a matching adapter
Any future RFC that adds those capabilities must describe the code that implements them, not just the aspiration.
## Compatibility
This RFC matches the code that ships today:
- a standard Go CLI binary built from `cmd/core-lint`
- external tools resolved from `PATH` at runtime
- no required Core runtime, IPC layer, scheduler, or generated action graph
The contract is compatible with the current unit tests and CLI Taskfile tests because it describes the existing paths, flags, DTOs, and outputs rather than a future service boundary.
## Adoption
This contract applies immediately to:
- the root orchestration commands such as `core-lint run`, `core-lint detect`, `core-lint tools`, `core-lint init`, and `core-lint hook`
- the namespaced aliases under `core-lint lint ...`
- the legacy embedded catalog commands under `core-lint lint check` and `core-lint lint catalog ...`
Future work that adds scheduler support, runtime registration, entitlement enforcement, parallel execution, or SBOM file outputs must land behind a new RFC revision that points to implemented code.
## References
- `docs/RFC-CORE-008-AGENT-EXPERIENCE.md`
- `docs/index.md`
- `docs/development.md`
- `cmd/core-lint/main.go`
- `pkg/lint/service.go`
- `pkg/lint/adapter.go`
- `tests/cli/lint/Taskfile.yaml`
## Changelog
- 2026-03-30: Rewrote the RFC to match the implemented standalone CLI, adapter registry, fallback catalog adapter, hook mode, and CLI test paths
- 2026-03-30: Clarified the implemented report boundary, category filtering semantics, ignored config fields, and AX-style motivation/compatibility/adoption sections
- 2026-03-30: Documented the `stdout` versus `stderr` contract for failing `run` commands and the non-strict `tools --lang` matching rules

22
go.mod
View file

@ -1,23 +1,21 @@
module forge.lthn.ai/core/lint
module dappco.re/go/core/lint
go 1.26.0
require (
forge.lthn.ai/core/cli v0.3.1
forge.lthn.ai/core/go-i18n v0.1.4
forge.lthn.ai/core/go-io v0.1.2
forge.lthn.ai/core/go-log v0.0.4
forge.lthn.ai/core/go-process v0.2.3
forge.lthn.ai/core/go-scm v0.3.1
dappco.re/go/core/cli v0.3.7
dappco.re/go/core/i18n v0.1.7
dappco.re/go/core/io v0.1.7
dappco.re/go/core/log v0.0.4
dappco.re/go/core/process v0.2.9
dappco.re/go/core/scm v0.3.6
github.com/stretchr/testify v1.11.1
gopkg.in/yaml.v3 v3.0.1
)
require (
forge.lthn.ai/core/go v0.3.1 // indirect
forge.lthn.ai/core/go-crypt v0.1.7 // indirect
forge.lthn.ai/core/go-inference v0.1.4 // indirect
github.com/ProtonMail/go-crypto v1.4.0 // indirect
dappco.re/go/core v0.3.3 // indirect
dappco.re/go/core/inference v0.1.7 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/charmbracelet/bubbletea v1.3.10 // indirect
github.com/charmbracelet/colorprofile v0.4.3 // indirect
@ -27,7 +25,6 @@ require (
github.com/charmbracelet/x/term v0.2.2 // indirect
github.com/clipperhouse/displaywidth v0.11.0 // indirect
github.com/clipperhouse/uax29/v2 v2.7.0 // indirect
github.com/cloudflare/circl v1.6.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@ -43,7 +40,6 @@ require (
github.com/spf13/cobra v1.10.2 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/crypto v0.49.0 // indirect
golang.org/x/sys v0.42.0 // indirect
golang.org/x/term v0.41.0 // indirect
golang.org/x/text v0.35.0 // indirect

36
go.sum
View file

@ -1,23 +1,19 @@
forge.lthn.ai/core/cli v0.3.1 h1:ZpHhaDrdbaV98JDxj/f0E5nytYk9tTMRu3qohGyK4M0=
forge.lthn.ai/core/cli v0.3.1/go.mod h1:28cOl9eK0H033Otkjrv9f/QCmtHcJl+IIx4om8JskOg=
forge.lthn.ai/core/go v0.3.1 h1:5FMTsUhLcxSr07F9q3uG0Goy4zq4eLivoqi8shSY4UM=
forge.lthn.ai/core/go v0.3.1/go.mod h1:gE6c8h+PJ2287qNhVUJ5SOe1kopEwHEquvinstpuyJc=
forge.lthn.ai/core/go-crypt v0.1.7 h1:tyDFnXjEksHFQpkFwCpEn+x7zvwh4LnaU+/fP3WmqZc=
forge.lthn.ai/core/go-crypt v0.1.7/go.mod h1:mQdr6K8lWOcyHmSEW24vZPTThQF8fteVgZi8CO+Ko3Y=
forge.lthn.ai/core/go-i18n v0.1.4 h1:zOHUUJDgRo88/3tj++kN+VELg/buyZ4T2OSdG3HBbLQ=
forge.lthn.ai/core/go-i18n v0.1.4/go.mod h1:aDyAfz7MMgWYgLkZCptfFmZ7jJg3ocwjEJ1WkJSvv4U=
forge.lthn.ai/core/go-inference v0.1.4 h1:fuAgWbqsEDajHniqAKyvHYbRcBrkGEiGSqR2pfTMRY0=
forge.lthn.ai/core/go-inference v0.1.4/go.mod h1:jfWz+IJX55wAH98+ic6FEqqGB6/P31CHlg7VY7pxREw=
forge.lthn.ai/core/go-io v0.1.2 h1:q8hj2jtOFqAgHlBr5wsUAOXtaFkxy9gqGrQT/il0WYA=
forge.lthn.ai/core/go-io v0.1.2/go.mod h1:PbNKW1Q25ywSOoQXeGdQHbV5aiIrTXvHIQ5uhplA//g=
forge.lthn.ai/core/cli v0.3.7 h1:1GrbaGg0wDGHr6+klSbbGyN/9sSbHvFbdySJznymhwg=
forge.lthn.ai/core/cli v0.3.7/go.mod h1:DBUppJkA9P45ZFGgI2B8VXw1rAZxamHoI/KG7fRvTNs=
forge.lthn.ai/core/go v0.3.3 h1:kYYZ2nRYy0/Be3cyuLJspRjLqTMxpckVyhb/7Sw2gd0=
forge.lthn.ai/core/go v0.3.3/go.mod h1:Cp4ac25pghvO2iqOu59t1GyngTKVOzKB5/VPdhRi9CQ=
forge.lthn.ai/core/go-i18n v0.1.7 h1:aHkAoc3W8fw3RPNvw/UszQbjyFWXHszzbZgty3SwyAA=
forge.lthn.ai/core/go-i18n v0.1.7/go.mod h1:0VDjwtY99NSj2iqwrI09h5GUsJeM9s48MLkr+/Dn4G8=
forge.lthn.ai/core/go-inference v0.1.7 h1:9Dy6v03jX5ZRH3n5iTzlYyGtucuBIgSe+S7GWvBzx9Q=
forge.lthn.ai/core/go-inference v0.1.7/go.mod h1:jfWz+IJX55wAH98+ic6FEqqGB6/P31CHlg7VY7pxREw=
forge.lthn.ai/core/go-io v0.1.7 h1:Tdb6sqh+zz1lsGJaNX9RFWM6MJ/RhSAyxfulLXrJsbk=
forge.lthn.ai/core/go-io v0.1.7/go.mod h1:8lRLFk4Dnp5cR/Cyzh9WclD5566TbpdRgwcH7UZLWn4=
forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0=
forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
forge.lthn.ai/core/go-process v0.2.3 h1:/ERqRYHgCNZjNT9NMinAAJJGJWSsHuCTiHFNEm6nTPY=
forge.lthn.ai/core/go-process v0.2.3/go.mod h1:gVTbxL16ccUIexlFcyDtCy7LfYvD8Rtyzfo8bnXAXrU=
forge.lthn.ai/core/go-scm v0.3.1 h1:G+DqVJLT+UjgUzu2Hnseyl2szhb0wB+DB8VYhq/bLOI=
forge.lthn.ai/core/go-scm v0.3.1/go.mod h1:ER9fQBs8nnlJZQ6+ALnwv+boK/xiwg8jEc9VP6DMijk=
github.com/ProtonMail/go-crypto v1.4.0 h1:Zq/pbM3F5DFgJiMouxEdSVY44MVoQNEKp5d5QxIQceQ=
github.com/ProtonMail/go-crypto v1.4.0/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo=
forge.lthn.ai/core/go-process v0.2.9 h1:Wql+5TUF+lfU2oJ9I+S764MkTqJhBsuyMM0v1zsfZC4=
forge.lthn.ai/core/go-process v0.2.9/go.mod h1:NIzZOF5IVYYCjHkcNIGcg1mZH+bzGoie4SlZUDYOKIM=
forge.lthn.ai/core/go-scm v0.3.6 h1:LFNx8Fs82mrpxro/MPUM6tMiD4DqPmdu83UknXztQjc=
forge.lthn.ai/core/go-scm v0.3.6/go.mod h1:IWFIYDfRH0mtRdqY5zV06l/RkmkPpBM6FcbKWhg1Qa8=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
@ -36,8 +32,6 @@ github.com/clipperhouse/displaywidth v0.11.0 h1:lBc6kY44VFw+TDx4I8opi/EtL9m20WSE
github.com/clipperhouse/displaywidth v0.11.0/go.mod h1:bkrFNkf81G8HyVqmKGxsPufD3JhNl3dSqnGhOoSD/o0=
github.com/clipperhouse/uax29/v2 v2.7.0 h1:+gs4oBZ2gPfVrKPthwbMzWZDaAFPGYK72F0NJv2v7Vk=
github.com/clipperhouse/uax29/v2 v2.7.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -80,8 +74,6 @@ github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA=
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

7
locales/embed.go Normal file
View file

@ -0,0 +1,7 @@
// Package locales embeds translation files for this module.
package locales
import "embed"
//go:embed *.json
var FS embed.FS

113
locales/en.json Normal file
View file

@ -0,0 +1,113 @@
{
"cmd": {
"qa": {
"short": "Quality assurance checks",
"long": "Run quality assurance checks across the registry: CI health, issues, reviews, documentation coverage, and workflow monitoring.",
"docblock": {
"short": "Check documentation coverage",
"long": "Analyse exported symbols for missing doc comments and report coverage percentage against a configurable threshold.",
"coverage": "Documentation coverage",
"missing_docs": "Missing documentation:",
"flag": {
"threshold": "Minimum coverage percentage to pass"
}
},
"health": {
"short": "Show CI health across repos",
"long": "Check GitHub Actions workflow status for all repos in the registry and report which are passing, failing, errored, or unconfigured.",
"summary": "CI Health",
"all_healthy": "All repos are healthy.",
"passing": "Passing",
"tests_failing": "Tests failing",
"cancelled": "Cancelled",
"skipped": "Skipped",
"running": "Running",
"fetch_error": "Failed to fetch workflow status",
"parse_error": "Failed to parse workflow response",
"no_ci_configured": "No CI configured",
"count_passing": "passing",
"count_failing": "failing",
"count_error": "error",
"count_pending": "pending",
"count_no_ci": "no CI",
"count_disabled": "disabled",
"flag": {
"problems": "Show only repos with problems"
}
},
"issues": {
"short": "List open issues across repos",
"long": "Fetch and categorise open GitHub issues across registry repos, grouping by status: needs response, ready, blocked, and triage.",
"fetching": "Fetching issues...",
"no_issues": "No open issues found.",
"category": {
"needs_response": "Needs Response",
"ready": "Ready",
"blocked": "Blocked",
"triage": "Triage"
},
"fetch_error": "Failed to fetch issues from {{.Repo}}: {{.Error}}",
"hint": {
"blocked": "blocked by dependency",
"triage": "needs triage",
"needs_response": "awaiting response"
},
"flag": {
"mine": "Show only issues assigned to you",
"triage": "Show only issues needing triage",
"blocked": "Show only blocked issues",
"limit": "Maximum number of issues to fetch"
}
},
"review": {
"short": "List pull requests needing review",
"long": "Show open pull requests across registry repos, highlighting those awaiting your review and your own PRs.",
"your_prs": "Your pull requests",
"review_requested": "Review requested",
"no_prs": "No open pull requests.",
"no_reviews": "No reviews requested.",
"error": {
"no_repo": "Could not determine repository"
},
"flag": {
"mine": "Show only your pull requests",
"requested": "Show only PRs requesting your review",
"repo": "Filter by repository name"
}
},
"watch": {
"short": "Watch CI workflows for a commit",
"long": "Monitor GitHub Actions workflows for a specific commit, polling until all complete or the timeout is reached.",
"commit": "Watching commit",
"waiting_for_workflows": "Waiting for workflows...",
"all_passed": "All workflows passed.",
"workflows_failed": "{{.Count}} workflow(s) failed.",
"timeout": "Timed out after {{.Duration}}.",
"error": {
"not_git_repo": "Current directory is not a git repository",
"repo_format": "Could not determine owner/repo from remote"
},
"flag": {
"repo": "Repository in owner/name format",
"commit": "Commit SHA to watch",
"timeout": "Maximum time to wait"
}
}
}
},
"common": {
"flag": {
"json": "Output as JSON",
"registry": "Path to registry file",
"verbose": "Show detailed output"
},
"label": {
"error": "FAIL",
"success": "PASS"
}
},
"error": {
"gh_not_found": "GitHub CLI (gh) not found; install from https://cli.github.com",
"registry_not_found": "Registry file not found"
}
}

912
pkg/lint/adapter.go Normal file
View file

@ -0,0 +1,912 @@
package lint
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
coreerr "forge.lthn.ai/core/go-log"
)
// Adapter wraps one lint tool and normalises its output to Finding values.
type Adapter interface {
Name() string
Available() bool
Languages() []string
Command() string
Entitlement() string
RequiresEntitlement() bool
MatchesLanguage(languages []string) bool
Category() string
Fast() bool
Run(ctx context.Context, input RunInput, files []string) AdapterResult
}
// AdapterResult contains one tool execution plus the parsed findings from that run.
type AdapterResult struct {
Tool ToolRun
Findings []Finding
}
type findingParser func(tool string, category string, output string) []Finding
type commandArgumentsBuilder func(projectPath string, files []string) []string
// CommandAdapter runs an external binary and parses its stdout/stderr.
type CommandAdapter struct {
name string
binaries []string
languages []string
category string
entitlement string
requiresEntitlement bool
fast bool
buildArgs commandArgumentsBuilder
parseOutput findingParser
}
// CatalogAdapter wraps the embedded regex rule catalog as a built-in linter.
type CatalogAdapter struct{}
func defaultAdapters() []Adapter {
return []Adapter{
newCommandAdapter("golangci-lint", []string{"golangci-lint"}, []string{"go"}, "correctness", "", false, true, goProjectArguments("run", "--out-format", "json"), parseJSONDiagnostics),
newCommandAdapter("gosec", []string{"gosec"}, []string{"go"}, "security", "lint.security", true, false, goProjectArguments("-fmt", "json"), parseJSONDiagnostics),
newCommandAdapter("govulncheck", []string{"govulncheck"}, []string{"go"}, "security", "", false, false, goProjectArguments("-json"), parseGovulncheckDiagnostics),
newCommandAdapter("staticcheck", []string{"staticcheck"}, []string{"go"}, "correctness", "", false, true, goProjectArguments("-f", "json"), parseJSONDiagnostics),
newCommandAdapter("revive", []string{"revive"}, []string{"go"}, "style", "", false, true, goProjectArguments("-formatter", "json"), parseJSONDiagnostics),
newCommandAdapter("errcheck", []string{"errcheck"}, []string{"go"}, "correctness", "", false, true, goProjectArguments(), parseTextDiagnostics),
newCommandAdapter("phpstan", []string{"phpstan"}, []string{"php"}, "correctness", "", false, true, projectPathArguments("analyse", "--error-format", "json"), parseJSONDiagnostics),
newCommandAdapter("psalm", []string{"psalm"}, []string{"php"}, "correctness", "", false, true, projectPathArguments("--output-format", "json"), parseJSONDiagnostics),
newCommandAdapter("phpcs", []string{"phpcs"}, []string{"php"}, "style", "", false, true, projectPathArguments("--report=json"), parseJSONDiagnostics),
newCommandAdapter("phpmd", []string{"phpmd"}, []string{"php"}, "correctness", "", false, true, phpmdArguments(), parseJSONDiagnostics),
newCommandAdapter("pint", []string{"pint"}, []string{"php"}, "style", "", false, true, projectPathArguments("--format", "json"), parseJSONDiagnostics),
newCommandAdapter("biome", []string{"biome"}, []string{"js", "ts"}, "style", "", false, true, projectPathArguments("check", "--reporter", "json"), parseJSONDiagnostics),
newCommandAdapter("oxlint", []string{"oxlint"}, []string{"js", "ts"}, "style", "", false, true, projectPathArguments("--format", "json"), parseJSONDiagnostics),
newCommandAdapter("eslint", []string{"eslint"}, []string{"js"}, "style", "", false, true, projectPathArguments("--format", "json"), parseJSONDiagnostics),
newCommandAdapter("typescript", []string{"tsc", "typescript"}, []string{"ts"}, "correctness", "", false, true, projectPathArguments("--pretty", "false"), parseTextDiagnostics),
newCommandAdapter("ruff", []string{"ruff"}, []string{"python"}, "style", "", false, true, projectPathArguments("check", "--output-format", "json"), parseJSONDiagnostics),
newCommandAdapter("mypy", []string{"mypy"}, []string{"python"}, "correctness", "", false, true, projectPathArguments("--output", "json"), parseJSONDiagnostics),
newCommandAdapter("bandit", []string{"bandit"}, []string{"python"}, "security", "lint.security", true, false, recursiveProjectPathArguments("-f", "json", "-r"), parseJSONDiagnostics),
newCommandAdapter("pylint", []string{"pylint"}, []string{"python"}, "style", "", false, true, projectPathArguments("--output-format", "json"), parseJSONDiagnostics),
newCommandAdapter("shellcheck", []string{"shellcheck"}, []string{"shell"}, "correctness", "", false, true, filePathArguments("-f", "json"), parseJSONDiagnostics),
newCommandAdapter("hadolint", []string{"hadolint"}, []string{"dockerfile"}, "security", "", false, true, filePathArguments("-f", "json"), parseJSONDiagnostics),
newCommandAdapter("yamllint", []string{"yamllint"}, []string{"yaml"}, "style", "", false, true, projectPathArguments("-f", "parsable"), parseTextDiagnostics),
newCommandAdapter("jsonlint", []string{"jsonlint"}, []string{"json"}, "style", "", false, true, filePathArguments(), parseTextDiagnostics),
newCommandAdapter("markdownlint", []string{"markdownlint", "markdownlint-cli"}, []string{"markdown"}, "style", "", false, true, projectPathArguments("--json"), parseJSONDiagnostics),
newCommandAdapter("prettier", []string{"prettier"}, []string{"js"}, "style", "", false, true, projectPathArguments("--list-different"), parsePrettierDiagnostics),
newCommandAdapter("gitleaks", []string{"gitleaks"}, []string{"*"}, "security", "lint.security", true, false, recursiveProjectPathArguments("detect", "--no-git", "--report-format", "json", "--source"), parseJSONDiagnostics),
newCommandAdapter("trivy", []string{"trivy"}, []string{"*"}, "security", "lint.security", true, false, projectPathArguments("fs", "--format", "json"), parseJSONDiagnostics),
newCommandAdapter("semgrep", []string{"semgrep"}, []string{"*"}, "security", "lint.security", true, false, projectPathArguments("--json"), parseJSONDiagnostics),
newCommandAdapter("syft", []string{"syft"}, []string{"*"}, "compliance", "lint.compliance", true, false, projectPathArguments("scan", "-o", "json"), parseJSONDiagnostics),
newCommandAdapter("grype", []string{"grype"}, []string{"*"}, "security", "lint.compliance", true, false, projectPathArguments("-o", "json"), parseJSONDiagnostics),
newCommandAdapter("scancode", []string{"scancode-toolkit", "scancode"}, []string{"*"}, "compliance", "lint.compliance", true, false, projectPathArguments("--json"), parseJSONDiagnostics),
}
}
func newCatalogAdapter() Adapter {
return CatalogAdapter{}
}
func newCommandAdapter(name string, binaries []string, languages []string, category string, entitlement string, requiresEntitlement bool, fast bool, builder commandArgumentsBuilder, parser findingParser) Adapter {
return CommandAdapter{
name: name,
binaries: binaries,
languages: languages,
category: category,
entitlement: entitlement,
requiresEntitlement: requiresEntitlement,
fast: fast,
buildArgs: builder,
parseOutput: parser,
}
}
func (adapter CommandAdapter) Name() string { return adapter.name }
func (adapter CommandAdapter) Available() bool {
_, ok := adapter.availableBinary()
return ok
}
func (adapter CommandAdapter) Languages() []string {
return append([]string(nil), adapter.languages...)
}
func (adapter CommandAdapter) Command() string {
if len(adapter.binaries) == 0 {
return ""
}
return adapter.binaries[0]
}
func (adapter CommandAdapter) Entitlement() string { return adapter.entitlement }
func (adapter CommandAdapter) RequiresEntitlement() bool { return adapter.requiresEntitlement }
func (adapter CommandAdapter) MatchesLanguage(languages []string) bool {
if len(adapter.languages) == 0 || len(languages) == 0 {
return true
}
if len(adapter.languages) == 1 && adapter.languages[0] == "*" {
return true
}
for _, language := range languages {
if strings.EqualFold(language, adapter.category) {
return true
}
for _, supported := range adapter.languages {
if supported == language {
return true
}
}
}
return false
}
func (adapter CommandAdapter) Category() string { return adapter.category }
func (adapter CommandAdapter) Fast() bool { return adapter.fast }
func (adapter CommandAdapter) Run(ctx context.Context, input RunInput, files []string) AdapterResult {
startedAt := time.Now()
result := AdapterResult{
Tool: ToolRun{
Name: adapter.name,
},
}
binary, ok := adapter.availableBinary()
if !ok {
result.Tool.Status = "skipped"
result.Tool.Duration = "0s"
return result
}
result.Tool.Version = probeCommandVersion(binary, input.Path)
runContext, cancel := context.WithTimeout(ctx, 5*time.Minute)
defer cancel()
args := adapter.buildArgs(input.Path, files)
stdout, stderr, exitCode, runErr := runCommand(runContext, input.Path, binary, args)
result.Tool.Duration = time.Since(startedAt).Round(time.Millisecond).String()
if errors.Is(runContext.Err(), context.DeadlineExceeded) {
result.Tool.Status = "timeout"
return result
}
output := strings.TrimSpace(stdout)
if strings.TrimSpace(stderr) != "" {
if output != "" {
output += "\n" + strings.TrimSpace(stderr)
} else {
output = strings.TrimSpace(stderr)
}
}
if adapter.parseOutput != nil && output != "" {
result.Findings = adapter.parseOutput(adapter.name, adapter.category, output)
}
if len(result.Findings) == 0 && output != "" {
result.Findings = parseTextDiagnostics(adapter.name, adapter.category, output)
}
if len(result.Findings) == 0 && runErr != nil {
result.Findings = []Finding{{
Tool: adapter.name,
Severity: defaultSeverityForCategory(adapter.category),
Code: "command-failed",
Message: strings.TrimSpace(firstNonEmpty(output, runErr.Error())),
Category: adapter.category,
}}
}
for index := range result.Findings {
if result.Findings[index].Tool == "" {
result.Findings[index].Tool = adapter.name
}
if result.Findings[index].Category == "" {
result.Findings[index].Category = adapter.category
}
if result.Findings[index].Severity == "" {
result.Findings[index].Severity = defaultSeverityForCategory(adapter.category)
} else {
result.Findings[index].Severity = normaliseSeverity(result.Findings[index].Severity)
}
}
result.Tool.Findings = len(result.Findings)
switch {
case runErr != nil || exitCode != 0 || len(result.Findings) > 0:
result.Tool.Status = "failed"
default:
result.Tool.Status = "passed"
}
return result
}
func probeCommandVersion(binary string, workingDir string) string {
for _, args := range [][]string{{"--version"}, {"-version"}, {"version"}} {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
stdout, stderr, exitCode, err := runCommand(ctx, workingDir, binary, args)
cancel()
if err != nil && exitCode != 0 {
continue
}
version := firstNonEmpty(stdout, stderr)
if version == "" {
continue
}
if line := firstVersionLine(version); line != "" {
return line
}
}
return ""
}
func (adapter CommandAdapter) availableBinary() (string, bool) {
for _, binary := range adapter.binaries {
path, err := exec.LookPath(binary)
if err == nil {
return path, true
}
}
return "", false
}
func (CatalogAdapter) Name() string { return "catalog" }
func (CatalogAdapter) Available() bool { return true }
func (CatalogAdapter) Languages() []string { return []string{"go"} }
func (CatalogAdapter) Command() string { return "catalog" }
func (CatalogAdapter) Entitlement() string { return "" }
func (CatalogAdapter) RequiresEntitlement() bool { return false }
func (CatalogAdapter) MatchesLanguage(languages []string) bool {
return len(languages) == 0 || containsString(languages, "go")
}
func (CatalogAdapter) Category() string { return "correctness" }
func (CatalogAdapter) Fast() bool { return true }
func (CatalogAdapter) Run(_ context.Context, input RunInput, files []string) AdapterResult {
startedAt := time.Now()
result := AdapterResult{
Tool: ToolRun{
Name: "catalog",
},
}
catalog, err := loadBuiltinCatalog()
if err != nil {
result.Tool.Status = "failed"
result.Tool.Duration = time.Since(startedAt).Round(time.Millisecond).String()
result.Findings = []Finding{{
Tool: "catalog",
Severity: "error",
Code: "catalog-load",
Message: err.Error(),
Category: "correctness",
}}
result.Tool.Findings = len(result.Findings)
return result
}
rules := catalog.Rules
if input.Category != "" {
rules = filterRulesByTag(rules, input.Category)
}
scanner, err := NewScanner(rules)
if err != nil {
result.Tool.Status = "failed"
result.Tool.Duration = time.Since(startedAt).Round(time.Millisecond).String()
result.Findings = []Finding{{
Tool: "catalog",
Severity: "error",
Code: "catalog-scan",
Message: err.Error(),
Category: "correctness",
}}
result.Tool.Findings = len(result.Findings)
return result
}
var findings []Finding
if len(files) > 0 {
for _, file := range files {
scanPath := file
if !filepath.IsAbs(scanPath) {
scanPath = filepath.Join(input.Path, file)
}
fileFindings, scanErr := scanner.ScanFile(scanPath)
if scanErr != nil {
continue
}
findings = append(findings, fileFindings...)
}
} else {
findings, _ = scanner.ScanDir(input.Path)
}
for index := range findings {
rule := catalog.ByID(findings[index].RuleID)
findings[index].Tool = "catalog"
findings[index].Code = findings[index].RuleID
findings[index].Message = findings[index].Title
findings[index].Severity = normaliseSeverity(findings[index].Severity)
if rule != nil {
findings[index].Category = ruleCategory(*rule)
}
}
result.Findings = findings
result.Tool.Findings = len(findings)
result.Tool.Duration = time.Since(startedAt).Round(time.Millisecond).String()
if len(findings) > 0 {
result.Tool.Status = "failed"
} else {
result.Tool.Status = "passed"
}
return result
}
func loadBuiltinCatalog() (*Catalog, error) {
rules, err := ParseRules([]byte(defaultCatalogRulesYAML))
if err != nil {
return nil, coreerr.E("loadBuiltinCatalog", "parse embedded fallback rules", err)
}
return &Catalog{Rules: rules}, nil
}
func goProjectArguments(prefix ...string) commandArgumentsBuilder {
return func(_ string, files []string) []string {
args := append([]string(nil), prefix...)
if len(files) > 0 {
return append(args, files...)
}
return append(args, "./...")
}
}
func projectPathArguments(prefix ...string) commandArgumentsBuilder {
return func(_ string, files []string) []string {
args := append([]string(nil), prefix...)
if len(files) > 0 {
return append(args, files...)
}
return append(args, ".")
}
}
func recursiveProjectPathArguments(prefix ...string) commandArgumentsBuilder {
return func(_ string, files []string) []string {
args := append([]string(nil), prefix...)
if len(files) > 0 {
return append(args, files...)
}
return append(args, ".")
}
}
func filePathArguments(prefix ...string) commandArgumentsBuilder {
return func(_ string, files []string) []string {
args := append([]string(nil), prefix...)
if len(files) > 0 {
return append(args, files...)
}
return append(args, ".")
}
}
func phpmdArguments() commandArgumentsBuilder {
return func(_ string, files []string) []string {
target := "."
if len(files) > 0 {
target = strings.Join(files, ",")
}
return []string{target, "json", "cleancode,codesize,controversial,design,naming,unusedcode"}
}
}
func runCommand(ctx context.Context, workingDir string, binary string, args []string) (string, string, int, error) {
command := exec.CommandContext(ctx, binary, args...)
if workingDir != "" {
command.Dir = workingDir
}
var stdout bytes.Buffer
var stderr bytes.Buffer
command.Stdout = &stdout
command.Stderr = &stderr
err := command.Run()
if err == nil {
return stdout.String(), stderr.String(), 0, nil
}
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return stdout.String(), stderr.String(), exitErr.ExitCode(), err
}
return stdout.String(), stderr.String(), -1, err
}
func parseGovulncheckDiagnostics(tool string, category string, output string) []Finding {
result, err := ParseVulnCheckJSON(output, "")
if err != nil || result == nil {
return nil
}
var findings []Finding
for _, vuln := range result.Findings {
message := strings.TrimSpace(firstNonEmpty(vuln.Description, vuln.Package))
if message == "" {
message = vuln.ID
}
findings = append(findings, Finding{
Tool: tool,
File: vuln.Package,
Severity: "error",
Code: vuln.ID,
Message: message,
Category: category,
})
}
return findings
}
func parseJSONDiagnostics(tool string, category string, output string) []Finding {
decoder := json.NewDecoder(strings.NewReader(output))
var findings []Finding
for {
var value any
err := decoder.Decode(&value)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil
}
findings = append(findings, collectJSONDiagnostics(tool, category, value)...)
}
return dedupeFindings(findings)
}
func collectJSONDiagnostics(tool string, category string, value any) []Finding {
switch typed := value.(type) {
case []any:
var findings []Finding
for _, child := range typed {
findings = append(findings, collectJSONDiagnostics(tool, category, child)...)
}
return findings
case map[string]any:
var findings []Finding
if finding, ok := findingFromMap(tool, category, typed); ok {
findings = append(findings, finding)
}
for _, child := range typed {
findings = append(findings, collectJSONDiagnostics(tool, category, child)...)
}
return findings
default:
return nil
}
}
func findingFromMap(tool string, category string, fields map[string]any) (Finding, bool) {
file := firstStringPath(fields,
[]string{"file"},
[]string{"File"},
[]string{"filename"},
[]string{"path"},
[]string{"location", "path"},
[]string{"artifactLocation", "uri"},
[]string{"Target"},
)
line := firstIntPath(fields,
[]string{"line"},
[]string{"Line"},
[]string{"startLine"},
[]string{"StartLine"},
[]string{"region", "startLine"},
[]string{"location", "start", "line"},
[]string{"Start", "Line"},
)
column := firstIntPath(fields,
[]string{"column"},
[]string{"Column"},
[]string{"col"},
[]string{"startColumn"},
[]string{"StartColumn"},
[]string{"region", "startColumn"},
[]string{"location", "start", "column"},
)
code := firstStringPath(fields,
[]string{"code"},
[]string{"Code"},
[]string{"rule"},
[]string{"Rule"},
[]string{"rule_id"},
[]string{"RuleID"},
[]string{"check_id"},
[]string{"checkId"},
[]string{"id"},
[]string{"ID"},
)
message := firstStringPath(fields,
[]string{"message"},
[]string{"Message"},
[]string{"description"},
[]string{"Description"},
[]string{"title"},
[]string{"Title"},
[]string{"message", "text"},
[]string{"Message", "Text"},
)
severity := firstStringPath(fields,
[]string{"severity"},
[]string{"Severity"},
[]string{"level"},
[]string{"Level"},
[]string{"type"},
[]string{"Type"},
)
if message == "" && code == "" {
return Finding{}, false
}
if file == "" && line == 0 && !strings.Contains(strings.ToLower(category), "security") && code == "" {
return Finding{}, false
}
return Finding{
Tool: tool,
File: file,
Line: line,
Column: column,
Severity: firstNonEmpty(normaliseSeverity(severity), defaultSeverityForCategory(category)),
Code: code,
Message: message,
Category: category,
}, true
}
func parseTextDiagnostics(tool string, category string, output string) []Finding {
var findings []Finding
for line := range strings.SplitSeq(strings.TrimSpace(output), "\n") {
trimmed := strings.TrimSpace(line)
if trimmed == "" {
continue
}
if finding, ok := parseTextDiagnosticLine(tool, category, trimmed); ok {
findings = append(findings, finding)
}
}
if len(findings) == 0 && strings.TrimSpace(output) != "" {
findings = append(findings, Finding{
Tool: tool,
Severity: defaultSeverityForCategory(category),
Code: "diagnostic",
Message: strings.TrimSpace(output),
Category: category,
})
}
return dedupeFindings(findings)
}
func parsePrettierDiagnostics(tool string, category string, output string) []Finding {
var findings []Finding
for line := range strings.SplitSeq(strings.TrimSpace(output), "\n") {
trimmed := strings.TrimSpace(line)
if trimmed == "" {
continue
}
findings = append(findings, Finding{
Tool: tool,
File: filepath.ToSlash(trimmed),
Severity: defaultSeverityForCategory(category),
Code: "prettier-format",
Message: "File is not formatted with Prettier",
Category: category,
})
}
return dedupeFindings(findings)
}
func parseTextDiagnosticLine(tool string, category string, line string) (Finding, bool) {
segments := strings.Split(line, ":")
if len(segments) < 3 {
return Finding{}, false
}
lineNumber, lineErr := strconv.Atoi(strings.TrimSpace(segments[1]))
if lineErr != nil {
return Finding{}, false
}
columnNumber := 0
messageIndex := 2
if len(segments) > 3 {
if parsedColumn, columnErr := strconv.Atoi(strings.TrimSpace(segments[2])); columnErr == nil {
columnNumber = parsedColumn
messageIndex = 3
}
}
message := strings.TrimSpace(strings.Join(segments[messageIndex:], ":"))
if message == "" {
return Finding{}, false
}
severity := defaultSeverityForCategory(category)
switch {
case strings.Contains(strings.ToLower(message), "warning"):
severity = "warning"
case strings.Contains(strings.ToLower(message), "error"):
severity = "error"
}
return Finding{
Tool: tool,
File: filepath.ToSlash(strings.TrimSpace(segments[0])),
Line: lineNumber,
Column: columnNumber,
Severity: severity,
Code: "diagnostic",
Message: message,
Category: category,
}, true
}
func firstStringPath(fields map[string]any, paths ...[]string) string {
for _, path := range paths {
if value, ok := lookupPath(fields, path); ok {
switch typed := value.(type) {
case string:
if strings.TrimSpace(typed) != "" {
return strings.TrimSpace(typed)
}
case json.Number:
return typed.String()
}
}
}
return ""
}
func firstIntPath(fields map[string]any, paths ...[]string) int {
for _, path := range paths {
if value, ok := lookupPath(fields, path); ok {
switch typed := value.(type) {
case int:
return typed
case int64:
return int(typed)
case float64:
return int(typed)
case json.Number:
parsed, _ := typed.Int64()
return int(parsed)
case string:
parsed, err := strconv.Atoi(strings.TrimSpace(typed))
if err == nil {
return parsed
}
}
}
}
return 0
}
func lookupPath(fields map[string]any, path []string) (any, bool) {
current := any(fields)
for _, segment := range path {
object, ok := current.(map[string]any)
if !ok {
return nil, false
}
value, found := mapValue(object, segment)
if !found {
return nil, false
}
current = value
}
return current, true
}
func mapValue(fields map[string]any, key string) (any, bool) {
if value, ok := fields[key]; ok {
return value, true
}
lowerKey := strings.ToLower(key)
for fieldKey, value := range fields {
if strings.ToLower(fieldKey) == lowerKey {
return value, true
}
}
return nil, false
}
func dedupeFindings(findings []Finding) []Finding {
seen := make(map[string]bool)
var deduped []Finding
for _, finding := range findings {
key := strings.Join([]string{
finding.Tool,
finding.File,
strconv.Itoa(finding.Line),
strconv.Itoa(finding.Column),
finding.Code,
finding.Message,
}, "|")
if seen[key] {
continue
}
seen[key] = true
deduped = append(deduped, finding)
}
return deduped
}
func filterRulesByTag(rules []Rule, tag string) []Rule {
var filtered []Rule
for _, rule := range rules {
for _, currentTag := range rule.Tags {
if currentTag == tag {
filtered = append(filtered, rule)
break
}
}
}
return filtered
}
func ruleCategory(rule Rule) string {
for _, tag := range rule.Tags {
switch tag {
case "security", "style", "correctness", "performance", "compliance":
return tag
}
}
return "correctness"
}
func normaliseSeverity(severity string) string {
switch strings.ToLower(strings.TrimSpace(severity)) {
case "critical", "high", "error", "errors":
return "error"
case "medium", "low", "warning", "warn":
return "warning"
case "info", "note":
return "info"
default:
return strings.ToLower(strings.TrimSpace(severity))
}
}
func defaultSeverityForCategory(category string) string {
switch category {
case "security":
return "error"
case "compliance":
return "warning"
default:
return "warning"
}
}
func firstNonEmpty(values ...string) string {
for _, value := range values {
if strings.TrimSpace(value) != "" {
return strings.TrimSpace(value)
}
}
return ""
}
func firstVersionLine(output string) string {
for line := range strings.SplitSeq(strings.TrimSpace(output), "\n") {
line = strings.TrimSpace(line)
if line != "" {
return line
}
}
return ""
}
func containsString(values []string, target string) bool {
for _, value := range values {
if value == target {
return true
}
}
return false
}
const defaultCatalogRulesYAML = `
- id: go-cor-003
title: "Silent error swallowing with blank identifier"
severity: medium
languages: [go]
tags: [correctness, errors]
pattern: '^\s*_\s*=\s*\w+\.\w+\('
exclude_pattern: 'defer|Close\(|Flush\('
fix: "Handle the error explicitly — log it, return it, or document why it is safe to discard"
detection: regex
auto_fixable: false
- id: go-cor-004
title: "Panic in library code"
severity: high
languages: [go]
tags: [correctness, panic]
pattern: '\bpanic\('
exclude_pattern: '_test\.go|// unreachable|Must\w+\('
fix: "Return an error instead of panicking — panics in libraries crash the caller"
detection: regex
auto_fixable: false
- id: go-sec-001
title: "SQL wildcard injection in LIKE clauses"
severity: high
languages: [go]
tags: [security, injection]
pattern: 'LIKE\s+\?.*["%].*\+'
fix: "Use parameterised LIKE with EscapeLike() helper to sanitise wildcard characters"
detection: regex
auto_fixable: false
- id: go-sec-002
title: "Path traversal via filepath.Join"
severity: high
languages: [go]
tags: [security, path-traversal]
pattern: 'filepath\.Join\(.*,\s*\w+\)'
exclude_pattern: 'filepath\.Clean|securejoin|ValidatePath'
fix: "Validate the path component or use securejoin to prevent directory traversal"
detection: regex
auto_fixable: false
- id: go-sec-004
title: "Non-constant-time authentication comparison"
severity: critical
languages: [go]
tags: [security, timing-attack]
pattern: '==\s*\w*(token|key|secret|password|hash|digest|hmac|mac|sig)'
exclude_pattern: 'subtle\.ConstantTimeCompare|hmac\.Equal'
fix: "Use subtle.ConstantTimeCompare() or hmac.Equal() for timing-safe comparison"
detection: regex
auto_fixable: false
`

View file

@ -1,12 +1,13 @@
package lint
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// severityOrder maps severity names to numeric ranks for threshold comparison.
@ -25,23 +26,24 @@ type Catalog struct {
// LoadDir reads all .yaml files from the given directory and returns a Catalog.
func LoadDir(dir string) (*Catalog, error) {
entries, err := os.ReadDir(dir)
entries, err := coreio.Local.List(dir)
if err != nil {
return nil, fmt.Errorf("loading catalog from %s: %w", dir, err)
return nil, coreerr.E("Catalog.LoadDir", "loading catalog from "+dir, err)
}
sortDirEntries(entries)
var rules []Rule
for _, entry := range entries {
if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".yaml") {
continue
}
data, err := os.ReadFile(filepath.Join(dir, entry.Name()))
raw, err := coreio.Local.Read(filepath.Join(dir, entry.Name()))
if err != nil {
return nil, fmt.Errorf("reading %s: %w", entry.Name(), err)
return nil, coreerr.E("Catalog.LoadDir", "reading "+entry.Name(), err)
}
parsed, err := ParseRules(data)
parsed, err := ParseRules([]byte(raw))
if err != nil {
return nil, fmt.Errorf("parsing %s: %w", entry.Name(), err)
return nil, coreerr.E("Catalog.LoadDir", "parsing "+entry.Name(), err)
}
rules = append(rules, parsed...)
}
@ -53,8 +55,9 @@ func LoadDir(dir string) (*Catalog, error) {
func LoadFS(fsys fs.FS, dir string) (*Catalog, error) {
entries, err := fs.ReadDir(fsys, dir)
if err != nil {
return nil, fmt.Errorf("loading catalog from embedded %s: %w", dir, err)
return nil, coreerr.E("Catalog.LoadFS", "loading catalog from embedded "+dir, err)
}
sortDirEntries(entries)
var rules []Rule
for _, entry := range entries {
@ -63,11 +66,11 @@ func LoadFS(fsys fs.FS, dir string) (*Catalog, error) {
}
data, err := fs.ReadFile(fsys, dir+"/"+entry.Name())
if err != nil {
return nil, fmt.Errorf("reading embedded %s: %w", entry.Name(), err)
return nil, coreerr.E("Catalog.LoadFS", "reading embedded "+entry.Name(), err)
}
parsed, err := ParseRules(data)
if err != nil {
return nil, fmt.Errorf("parsing embedded %s: %w", entry.Name(), err)
return nil, coreerr.E("Catalog.LoadFS", "parsing embedded "+entry.Name(), err)
}
rules = append(rules, parsed...)
}
@ -75,6 +78,12 @@ func LoadFS(fsys fs.FS, dir string) (*Catalog, error) {
return &Catalog{Rules: rules}, nil
}
func sortDirEntries(entries []fs.DirEntry) {
slices.SortFunc(entries, func(a, b fs.DirEntry) int {
return strings.Compare(a.Name(), b.Name())
})
}
// ForLanguage returns all rules that apply to the given language.
func (c *Catalog) ForLanguage(lang string) []Rule {
var result []Rule

View file

@ -29,6 +29,38 @@ func TestLoadDir_Good(t *testing.T) {
assert.NotNil(t, cat.ByID("go-mod-001"))
}
func TestLoadDir_SortsFilesDeterministically(t *testing.T) {
dir := t.TempDir()
err := os.WriteFile(filepath.Join(dir, "z.yaml"), []byte(`- id: z-rule
title: "Z rule"
severity: info
languages: [go]
pattern: 'z'
fix: "z"
detection: regex
auto_fixable: false
`), 0o644)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(dir, "a.yaml"), []byte(`- id: a-rule
title: "A rule"
severity: info
languages: [go]
pattern: 'a'
fix: "a"
detection: regex
auto_fixable: false
`), 0o644)
require.NoError(t, err)
cat, err := LoadDir(dir)
require.NoError(t, err)
require.Len(t, cat.Rules, 2)
assert.Equal(t, "a-rule", cat.Rules[0].ID)
assert.Equal(t, "z-rule", cat.Rules[1].ID)
}
func TestLoadDir_Bad_NonexistentDir(t *testing.T) {
_, err := LoadDir("/nonexistent/path/that/does/not/exist")
assert.Error(t, err)

View file

@ -1,13 +1,15 @@
package lint
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"path/filepath"
"strings"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// ComplexityConfig controls cyclomatic complexity analysis.
@ -45,9 +47,9 @@ func AnalyseComplexity(cfg ComplexityConfig) ([]ComplexityResult, error) {
var results []ComplexityResult
info, err := os.Stat(cfg.Path)
info, err := coreio.Local.Stat(cfg.Path)
if err != nil {
return nil, fmt.Errorf("stat %s: %w", cfg.Path, err)
return nil, coreerr.E("AnalyseComplexity", "stat "+cfg.Path, err)
}
if !info.IsDir() {
@ -81,7 +83,7 @@ func AnalyseComplexity(cfg ComplexityConfig) ([]ComplexityResult, error) {
return nil
})
if err != nil {
return nil, fmt.Errorf("walk %s: %w", cfg.Path, err)
return nil, coreerr.E("AnalyseComplexity", "walk "+cfg.Path, err)
}
return results, nil
@ -97,7 +99,7 @@ func AnalyseComplexitySource(src string, filename string, threshold int) ([]Comp
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
if err != nil {
return nil, fmt.Errorf("parse %s: %w", filename, err)
return nil, coreerr.E("AnalyseComplexitySource", "parse "+filename, err)
}
var results []ComplexityResult
@ -130,11 +132,11 @@ func AnalyseComplexitySource(src string, filename string, threshold int) ([]Comp
// analyseFile parses a single Go file and returns functions exceeding the threshold.
func analyseFile(path string, threshold int) ([]ComplexityResult, error) {
src, err := os.ReadFile(path)
src, err := coreio.Local.Read(path)
if err != nil {
return nil, fmt.Errorf("read %s: %w", path, err)
return nil, coreerr.E("analyseFile", "read "+path, err)
}
return AnalyseComplexitySource(string(src), path, threshold)
return AnalyseComplexitySource(src, path, threshold)
}
// calculateComplexity computes the cyclomatic complexity of a function.

182
pkg/lint/config.go Normal file
View file

@ -0,0 +1,182 @@
package lint
import (
"os"
"path/filepath"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
"gopkg.in/yaml.v3"
)
// DefaultConfigPath is the repo-local config path used by core-lint.
const DefaultConfigPath = ".core/lint.yaml"
// LintConfig defines which tools run for each language and how results fail the build.
//
// cfg := lint.DefaultConfig()
// cfg.FailOn = "warning"
type LintConfig struct {
Lint ToolGroups `yaml:"lint" json:"lint"`
Output string `yaml:"output" json:"output"`
FailOn string `yaml:"fail_on" json:"fail_on"`
Paths []string `yaml:"paths" json:"paths"`
Exclude []string `yaml:"exclude" json:"exclude"`
Schedules map[string]Schedule `yaml:"schedules,omitempty" json:"schedules,omitempty"`
}
// ToolGroups maps config groups to tool names.
type ToolGroups struct {
Go []string `yaml:"go,omitempty" json:"go,omitempty"`
PHP []string `yaml:"php,omitempty" json:"php,omitempty"`
JS []string `yaml:"js,omitempty" json:"js,omitempty"`
TS []string `yaml:"ts,omitempty" json:"ts,omitempty"`
Python []string `yaml:"python,omitempty" json:"python,omitempty"`
Infra []string `yaml:"infra,omitempty" json:"infra,omitempty"`
Security []string `yaml:"security,omitempty" json:"security,omitempty"`
Compliance []string `yaml:"compliance,omitempty" json:"compliance,omitempty"`
}
// Schedule declares a named lint run for external schedulers.
type Schedule struct {
Cron string `yaml:"cron" json:"cron"`
Categories []string `yaml:"categories,omitempty" json:"categories,omitempty"`
Output string `yaml:"output,omitempty" json:"output,omitempty"`
Paths []string `yaml:"paths,omitempty" json:"paths,omitempty"`
FailOn string `yaml:"fail_on,omitempty" json:"fail_on,omitempty"`
}
// DefaultConfig returns the RFC baseline config used when a repo has no local file yet.
//
// cfg := lint.DefaultConfig()
// cfg.Output = "sarif"
func DefaultConfig() LintConfig {
return LintConfig{
Lint: ToolGroups{
Go: []string{
"golangci-lint",
"gosec",
"govulncheck",
"staticcheck",
"revive",
"errcheck",
},
PHP: []string{
"phpstan",
"psalm",
"phpcs",
"phpmd",
"pint",
},
JS: []string{
"biome",
"oxlint",
"eslint",
"prettier",
},
TS: []string{
"biome",
"oxlint",
"typescript",
},
Python: []string{
"ruff",
"mypy",
"bandit",
"pylint",
},
Infra: []string{
"shellcheck",
"hadolint",
"yamllint",
"jsonlint",
"markdownlint",
},
Security: []string{
"gitleaks",
"trivy",
"gosec",
"bandit",
"semgrep",
},
Compliance: []string{
"syft",
"grype",
"scancode",
},
},
Output: "json",
FailOn: "error",
Paths: []string{"."},
Exclude: []string{"vendor/", "node_modules/", ".core/"},
}
}
// DefaultConfigYAML marshals the default config as the file content for `core-lint init`.
func DefaultConfigYAML() (string, error) {
data, err := yaml.Marshal(DefaultConfig())
if err != nil {
return "", coreerr.E("DefaultConfigYAML", "marshal default config", err)
}
return string(data), nil
}
// ResolveConfigPath resolves an explicit config path or the repo-local default.
//
// path := lint.ResolveConfigPath(".", "")
// override := lint.ResolveConfigPath("/repo", ".core/lint.yaml")
func ResolveConfigPath(projectPath string, override string) string {
if projectPath == "" {
projectPath = "."
}
if override == "" {
return filepath.Join(projectPath, DefaultConfigPath)
}
if filepath.IsAbs(override) {
return override
}
return filepath.Join(projectPath, override)
}
// LoadProjectConfig reads `.core/lint.yaml` if present, otherwise returns the default config.
//
// cfg, path, err := lint.LoadProjectConfig(".", "")
// cfg, _, err = lint.LoadProjectConfig("/repo", ".core/lint.yaml")
func LoadProjectConfig(projectPath string, override string) (LintConfig, string, error) {
config := DefaultConfig()
path := ResolveConfigPath(projectPath, override)
_, err := coreio.Local.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return config, "", nil
}
return config, "", coreerr.E("LoadProjectConfig", "stat "+path, err)
}
raw, err := coreio.Local.Read(path)
if err != nil {
return config, "", coreerr.E("LoadProjectConfig", "read "+path, err)
}
if err := yaml.Unmarshal([]byte(raw), &config); err != nil {
return config, "", coreerr.E("LoadProjectConfig", "parse "+path, err)
}
return config, path, nil
}
// ResolveSchedule returns a named schedule from the config.
//
// schedule, err := lint.ResolveSchedule(cfg, "nightly")
func ResolveSchedule(config LintConfig, name string) (*Schedule, error) {
if name == "" {
return nil, nil
}
schedule, ok := config.Schedules[name]
if !ok {
return nil, coreerr.E("ResolveSchedule", "schedule "+name+" not found", nil)
}
return &schedule, nil
}

View file

@ -2,22 +2,26 @@ package lint
import (
"bufio"
"cmp"
"encoding/json"
"fmt"
"math"
"os"
"regexp"
"slices"
"strconv"
"strings"
"time"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// CoverageSnapshot represents a point-in-time coverage measurement.
type CoverageSnapshot struct {
Timestamp time.Time `json:"timestamp"`
Packages map[string]float64 `json:"packages"` // package → coverage %
Total float64 `json:"total"` // overall coverage %
Meta map[string]string `json:"meta,omitempty"` // optional metadata (commit, branch, etc.)
Packages map[string]float64 `json:"packages"` // package → coverage %
Total float64 `json:"total"` // overall coverage %
Meta map[string]string `json:"meta,omitempty"` // optional metadata (commit, branch, etc.)
}
// CoverageRegression flags a package whose coverage changed between runs.
@ -51,32 +55,32 @@ func NewCoverageStore(path string) *CoverageStore {
func (s *CoverageStore) Append(snap CoverageSnapshot) error {
snapshots, err := s.Load()
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("load snapshots: %w", err)
return coreerr.E("CoverageStore.Append", "load snapshots", err)
}
snapshots = append(snapshots, snap)
data, err := json.MarshalIndent(snapshots, "", " ")
if err != nil {
return fmt.Errorf("marshal snapshots: %w", err)
return coreerr.E("CoverageStore.Append", "marshal snapshots", err)
}
if err := os.WriteFile(s.Path, data, 0644); err != nil {
return fmt.Errorf("write %s: %w", s.Path, err)
if err := coreio.Local.Write(s.Path, string(data)); err != nil {
return coreerr.E("CoverageStore.Append", "write "+s.Path, err)
}
return nil
}
// Load reads all snapshots from the store.
func (s *CoverageStore) Load() ([]CoverageSnapshot, error) {
data, err := os.ReadFile(s.Path)
raw, err := coreio.Local.Read(s.Path)
if err != nil {
return nil, err
}
var snapshots []CoverageSnapshot
if err := json.Unmarshal(data, &snapshots); err != nil {
return nil, fmt.Errorf("parse %s: %w", s.Path, err)
if err := json.Unmarshal([]byte(raw), &snapshots); err != nil {
return nil, coreerr.E("CoverageStore.Load", "parse "+s.Path, err)
}
return snapshots, nil
}
@ -245,5 +249,24 @@ func CompareCoverage(previous, current CoverageSnapshot) CoverageComparison {
}
}
slices.Sort(comp.NewPackages)
slices.Sort(comp.Removed)
slices.SortFunc(comp.Regressions, func(a, b CoverageRegression) int {
return cmp.Or(
cmp.Compare(a.Package, b.Package),
cmp.Compare(a.Previous, b.Previous),
cmp.Compare(a.Current, b.Current),
cmp.Compare(a.Delta, b.Delta),
)
})
slices.SortFunc(comp.Improvements, func(a, b CoverageRegression) int {
return cmp.Or(
cmp.Compare(a.Package, b.Package),
cmp.Compare(a.Previous, b.Previous),
cmp.Compare(a.Current, b.Current),
cmp.Compare(a.Delta, b.Delta),
)
})
return comp
}

View file

@ -79,6 +79,37 @@ func TestCompareCoverage(t *testing.T) {
assert.InDelta(t, 6.7, comp.TotalDelta, 0.1)
}
func TestCompareCoverage_SortsResultSlices(t *testing.T) {
prev := CoverageSnapshot{
Packages: map[string]float64{
"pkg/z": 90.0,
"pkg/b": 60.0,
"pkg/a": 80.0,
"pkg/c": 50.0,
},
Total: 70.0,
}
curr := CoverageSnapshot{
Packages: map[string]float64{
"pkg/b": 55.0,
"pkg/a": 70.0,
"pkg/c": 60.0,
"pkg/y": 40.0,
},
Total: 55.0,
}
comp := CompareCoverage(prev, curr)
assert.Equal(t, []string{"pkg/y"}, comp.NewPackages)
assert.Equal(t, []string{"pkg/z"}, comp.Removed)
require.Len(t, comp.Regressions, 2)
assert.Equal(t, "pkg/a", comp.Regressions[0].Package)
assert.Equal(t, "pkg/b", comp.Regressions[1].Package)
require.Len(t, comp.Improvements, 1)
assert.Equal(t, "pkg/c", comp.Improvements[0].Package)
}
func TestCompareCoverage_NoChange(t *testing.T) {
snap := CoverageSnapshot{
Packages: map[string]float64{"pkg/a": 80.0},

129
pkg/lint/detect_project.go Normal file
View file

@ -0,0 +1,129 @@
package lint
import (
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
)
var projectLanguageByExtension = map[string]string{
".go": "go",
".php": "php",
".cpp": "cpp",
".cc": "cpp",
".c": "cpp",
".h": "cpp",
".js": "js",
".jsx": "js",
".ts": "ts",
".tsx": "ts",
".py": "python",
".rs": "rust",
".sh": "shell",
".yaml": "yaml",
".yml": "yaml",
".json": "json",
".md": "markdown",
}
// Detect returns the project languages inferred from markers and file names.
//
// lint.Detect(".")
// lint.Detect("/path/to/project")
func Detect(path string) []string {
if path == "" {
path = "."
}
seen := make(map[string]bool)
info, err := os.Stat(path)
if err != nil {
return []string{}
}
if !info.IsDir() {
recordDetectedPath(seen, path)
return sortedDetectedLanguages(seen)
}
if shouldSkipTraversalRoot(path) {
return []string{}
}
_ = filepath.WalkDir(path, func(currentPath string, entry fs.DirEntry, walkErr error) error {
if walkErr != nil {
return nil
}
if entry.IsDir() {
if currentPath != path && IsExcludedDir(entry.Name()) {
return filepath.SkipDir
}
return nil
}
recordDetectedPath(seen, currentPath)
return nil
})
return sortedDetectedLanguages(seen)
}
func detectFromFiles(files []string) []string {
seen := make(map[string]bool)
for _, file := range files {
recordDetectedPath(seen, file)
}
return sortedDetectedLanguages(seen)
}
func recordDetectedPath(seen map[string]bool, path string) {
name := filepath.Base(path)
matchedMarker := false
switch {
case name == "go.mod":
seen["go"] = true
matchedMarker = true
case name == "composer.json":
seen["php"] = true
matchedMarker = true
case name == "package.json":
seen["js"] = true
matchedMarker = true
case name == "tsconfig.json":
seen["ts"] = true
matchedMarker = true
case name == "requirements.txt", name == "pyproject.toml":
seen["python"] = true
matchedMarker = true
case name == "Cargo.toml":
seen["rust"] = true
matchedMarker = true
case strings.HasPrefix(name, "Dockerfile"):
seen["dockerfile"] = true
matchedMarker = true
}
if matchedMarker {
return
}
if lang, ok := projectLanguageByExtension[strings.ToLower(filepath.Ext(name))]; ok {
seen[lang] = true
}
}
func sortedDetectedLanguages(seen map[string]bool) []string {
var languages []string
for language := range seen {
languages = append(languages, language)
}
slices.Sort(languages)
if languages == nil {
return []string{}
}
return languages
}

View file

@ -0,0 +1,59 @@
package lint
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDetect_Good_ProjectMarkersAndFiles(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "main.cpp"), []byte("int main() { return 0; }\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "tsconfig.json"), []byte("{}\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "requirements.txt"), []byte("ruff\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "Dockerfile"), []byte("FROM scratch\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "run.sh"), []byte("#!/bin/sh\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "README.md"), []byte("# Test\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "vendor"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "vendor", "ignored.go"), []byte("package ignored\n"), 0o644))
assert.Equal(t,
[]string{"cpp", "dockerfile", "go", "js", "markdown", "python", "shell", "ts"},
Detect(dir),
)
}
func TestDetectFromFiles_Good(t *testing.T) {
files := []string{
"main.go",
"src/lib.cc",
"web/app.ts",
"Dockerfile",
"scripts/run.sh",
"docs/index.md",
}
assert.Equal(t,
[]string{"cpp", "dockerfile", "go", "markdown", "shell", "ts"},
detectFromFiles(files),
)
}
func TestDetect_MissingPathReturnsEmptySlice(t *testing.T) {
assert.Equal(t, []string{}, Detect(filepath.Join(t.TempDir(), "missing")))
}
func TestDetect_Good_SkipsHiddenRootDirectory(t *testing.T) {
dir := t.TempDir()
hiddenDir := filepath.Join(dir, ".core")
require.NoError(t, os.MkdirAll(hiddenDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(hiddenDir, "main.go"), []byte("package main\n"), 0o644))
assert.Equal(t, []string{}, Detect(hiddenDir))
}

View file

@ -2,20 +2,26 @@ package lint
import (
"bytes"
"fmt"
"regexp"
"strings"
coreerr "forge.lthn.ai/core/go-log"
)
// Finding represents a single match of a rule against a source file.
type Finding struct {
RuleID string `json:"rule_id"`
Title string `json:"title"`
Severity string `json:"severity"`
Tool string `json:"tool,omitempty"`
File string `json:"file"`
Line int `json:"line"`
Match string `json:"match"`
Fix string `json:"fix"`
Column int `json:"column,omitempty"`
Severity string `json:"severity"`
Code string `json:"code,omitempty"`
Message string `json:"message,omitempty"`
Category string `json:"category,omitempty"`
Fix string `json:"fix,omitempty"`
RuleID string `json:"rule_id,omitempty"`
Title string `json:"title,omitempty"`
Match string `json:"match,omitempty"`
Repo string `json:"repo,omitempty"`
}
@ -43,14 +49,14 @@ func NewMatcher(rules []Rule) (*Matcher, error) {
pat, err := regexp.Compile(r.Pattern)
if err != nil {
return nil, fmt.Errorf("compiling pattern for rule %s: %w", r.ID, err)
return nil, coreerr.E("NewMatcher", "compiling pattern for rule "+r.ID, err)
}
var excl *regexp.Regexp
if r.ExcludePattern != "" {
excl, err = regexp.Compile(r.ExcludePattern)
if err != nil {
return nil, fmt.Errorf("compiling exclude pattern for rule %s: %w", r.ID, err)
return nil, coreerr.E("NewMatcher", "compiling exclude pattern for rule "+r.ID, err)
}
}

29
pkg/lint/output.go Normal file
View file

@ -0,0 +1,29 @@
package lint
// ResolveRunOutputFormat resolves the report writer from the run input and project config.
//
// format, err := lint.ResolveRunOutputFormat(lint.RunInput{Path: ".", CI: true})
// format, err := lint.ResolveRunOutputFormat(lint.RunInput{Path: ".", Schedule: "nightly"})
func ResolveRunOutputFormat(input RunInput) (string, error) {
if input.Output != "" {
return input.Output, nil
}
if input.CI {
return "github", nil
}
config, _, err := LoadProjectConfig(input.Path, input.Config)
if err != nil {
return "", err
}
schedule, err := ResolveSchedule(config, input.Schedule)
if err != nil {
return "", err
}
if schedule != nil && schedule.Output != "" {
return schedule.Output, nil
}
if config.Output != "" {
return config.Output, nil
}
return "text", nil
}

64
pkg/lint/output_test.go Normal file
View file

@ -0,0 +1,64 @@
package lint
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestResolveRunOutputFormat_Good_Precedence(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`output: text
schedules:
nightly:
output: json
`), 0o644))
format, err := ResolveRunOutputFormat(RunInput{
Path: dir,
Output: "sarif",
CI: true,
})
require.NoError(t, err)
assert.Equal(t, "sarif", format)
format, err = ResolveRunOutputFormat(RunInput{
Path: dir,
Schedule: "nightly",
CI: true,
})
require.NoError(t, err)
assert.Equal(t, "github", format)
format, err = ResolveRunOutputFormat(RunInput{
Path: dir,
Schedule: "nightly",
})
require.NoError(t, err)
assert.Equal(t, "json", format)
format, err = ResolveRunOutputFormat(RunInput{
Path: dir,
})
require.NoError(t, err)
assert.Equal(t, "text", format)
}
func TestResolveRunOutputFormat_Good_ExplicitOutputBypassesConfigLoading(t *testing.T) {
dir := t.TempDir()
projectPath := filepath.Join(dir, "project-file")
require.NoError(t, os.WriteFile(projectPath, []byte("not a directory"), 0o644))
format, err := ResolveRunOutputFormat(RunInput{
Path: projectPath,
Output: "sarif",
Config: "broken/config.yaml",
Schedule: "nightly",
})
require.NoError(t, err)
assert.Equal(t, "sarif", format)
}

View file

@ -4,27 +4,49 @@ import (
"encoding/json"
"fmt"
"io"
"strings"
)
// Summary holds aggregate counts for a set of findings.
type Summary struct {
Total int `json:"total"`
BySeverity map[string]int `json:"by_severity"`
Errors int `json:"errors"`
Warnings int `json:"warnings"`
Info int `json:"info"`
Passed bool `json:"passed"`
BySeverity map[string]int `json:"by_severity,omitempty"`
}
// Summarise counts findings by severity.
//
// summary := lint.Summarise(findings)
func Summarise(findings []Finding) Summary {
s := Summary{
summary := Summary{
Total: len(findings),
BySeverity: make(map[string]int),
}
for _, f := range findings {
s.BySeverity[f.Severity]++
for _, finding := range findings {
severity := strings.TrimSpace(finding.Severity)
if severity == "" {
severity = "warning"
}
summary.BySeverity[severity]++
switch severity {
case "error":
summary.Errors++
case "info":
summary.Info++
default:
summary.Warnings++
}
}
return s
summary.Passed = summary.Errors == 0
return summary
}
// WriteJSON writes findings as a pretty-printed JSON array.
//
// _ = lint.WriteJSON(os.Stdout, findings)
func WriteJSON(w io.Writer, findings []Finding) error {
if findings == nil {
findings = []Finding{}
@ -35,6 +57,8 @@ func WriteJSON(w io.Writer, findings []Finding) error {
}
// WriteJSONL writes findings as newline-delimited JSON (one object per line).
//
// _ = lint.WriteJSONL(os.Stdout, findings)
func WriteJSONL(w io.Writer, findings []Finding) error {
for _, f := range findings {
data, err := json.Marshal(f)
@ -48,11 +72,172 @@ func WriteJSONL(w io.Writer, findings []Finding) error {
return nil
}
// WriteText writes findings in a human-readable format:
// WriteText writes findings in a human-readable format.
//
// file:line [severity] title (rule-id)
// lint.WriteText(os.Stdout, findings)
func WriteText(w io.Writer, findings []Finding) {
for _, f := range findings {
fmt.Fprintf(w, "%s:%d [%s] %s (%s)\n", f.File, f.Line, f.Severity, f.Title, f.RuleID)
for _, finding := range findings {
message := finding.Message
if message == "" {
message = finding.Title
}
code := finding.Code
if code == "" {
code = finding.RuleID
}
fmt.Fprintf(w, "%s:%d [%s] %s (%s)\n", finding.File, finding.Line, finding.Severity, message, code)
}
}
// WriteReportJSON writes the RFC report document as pretty-printed JSON.
//
// _ = lint.WriteReportJSON(os.Stdout, report)
func WriteReportJSON(w io.Writer, report Report) error {
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(report)
}
// WriteReportText writes report findings followed by a short summary.
//
// lint.WriteReportText(os.Stdout, report)
func WriteReportText(w io.Writer, report Report) {
WriteText(w, report.Findings)
fmt.Fprintf(w, "\n%d finding(s): %d error(s), %d warning(s), %d info\n", report.Summary.Total, report.Summary.Errors, report.Summary.Warnings, report.Summary.Info)
}
// WriteReportGitHub writes GitHub Actions annotation lines.
//
// lint.WriteReportGitHub(os.Stdout, report)
func WriteReportGitHub(w io.Writer, report Report) {
for _, finding := range report.Findings {
level := githubAnnotationLevel(finding.Severity)
location := ""
if finding.File != "" {
location = fmt.Sprintf(" file=%s", finding.File)
if finding.Line > 0 {
location += fmt.Sprintf(",line=%d", finding.Line)
}
if finding.Column > 0 {
location += fmt.Sprintf(",col=%d", finding.Column)
}
}
message := finding.Message
if message == "" {
message = finding.Title
}
code := finding.Code
if code == "" {
code = finding.RuleID
}
fmt.Fprintf(w, "::%s%s::[%s] %s (%s)\n", level, location, finding.Tool, message, code)
}
}
// WriteReportSARIF writes a minimal SARIF document for code scanning tools.
//
// _ = lint.WriteReportSARIF(os.Stdout, report)
func WriteReportSARIF(w io.Writer, report Report) error {
type sarifMessage struct {
Text string `json:"text"`
}
type sarifRegion struct {
StartLine int `json:"startLine,omitempty"`
StartColumn int `json:"startColumn,omitempty"`
}
type sarifArtifactLocation struct {
URI string `json:"uri,omitempty"`
}
type sarifPhysicalLocation struct {
ArtifactLocation sarifArtifactLocation `json:"artifactLocation"`
Region sarifRegion `json:"region,omitempty"`
}
type sarifLocation struct {
PhysicalLocation sarifPhysicalLocation `json:"physicalLocation"`
}
type sarifResult struct {
RuleID string `json:"ruleId,omitempty"`
Level string `json:"level,omitempty"`
Message sarifMessage `json:"message"`
Locations []sarifLocation `json:"locations,omitempty"`
}
type sarifRun struct {
Tool struct {
Driver struct {
Name string `json:"name"`
} `json:"driver"`
} `json:"tool"`
Results []sarifResult `json:"results"`
}
type sarifLog struct {
Version string `json:"version"`
Schema string `json:"$schema"`
Runs []sarifRun `json:"runs"`
}
sarifRunValue := sarifRun{}
sarifRunValue.Tool.Driver.Name = "core-lint"
for _, finding := range report.Findings {
message := finding.Message
if message == "" {
message = finding.Title
}
ruleID := finding.Code
if ruleID == "" {
ruleID = finding.RuleID
}
result := sarifResult{
RuleID: ruleID,
Level: sarifLevel(finding.Severity),
Message: sarifMessage{Text: message},
}
if finding.File != "" {
result.Locations = []sarifLocation{{
PhysicalLocation: sarifPhysicalLocation{
ArtifactLocation: sarifArtifactLocation{URI: finding.File},
Region: sarifRegion{
StartLine: finding.Line,
StartColumn: finding.Column,
},
},
}}
}
sarifRunValue.Results = append(sarifRunValue.Results, result)
}
return json.NewEncoder(w).Encode(sarifLog{
Version: "2.1.0",
Schema: "https://json.schemastore.org/sarif-2.1.0.json",
Runs: []sarifRun{sarifRunValue},
})
}
func githubAnnotationLevel(severity string) string {
switch strings.ToLower(strings.TrimSpace(severity)) {
case "error":
return "error"
case "info":
return "notice"
case "warning", "":
return "warning"
default:
return "warning"
}
}
func sarifLevel(severity string) string {
switch strings.ToLower(strings.TrimSpace(severity)) {
case "error":
return "error"
case "warning":
return "warning"
case "info":
return "note"
default:
return "warning"
}
}

View file

@ -134,3 +134,45 @@ func TestWriteText_Good_Empty(t *testing.T) {
WriteText(&buf, nil)
assert.Empty(t, buf.String())
}
func TestWriteReportGitHub_Good_MapsInfoToNotice(t *testing.T) {
var buf bytes.Buffer
WriteReportGitHub(&buf, Report{
Findings: []Finding{{
Tool: "demo",
File: "example.go",
Line: 7,
Column: 3,
Severity: "info",
Code: "demo-rule",
Message: "explanation",
}},
})
assert.Contains(t, buf.String(), "::notice file=example.go,line=7,col=3::[demo] explanation (demo-rule)")
}
func TestWriteReportSARIF_Good_MapsInfoToNote(t *testing.T) {
var buf bytes.Buffer
err := WriteReportSARIF(&buf, Report{
Findings: []Finding{{
Tool: "demo",
File: "example.go",
Line: 7,
Column: 3,
Severity: "info",
Code: "demo-rule",
Message: "explanation",
}},
})
require.NoError(t, err)
var decoded map[string]any
require.NoError(t, json.Unmarshal(buf.Bytes(), &decoded))
runs := decoded["runs"].([]any)
results := runs[0].(map[string]any)["results"].([]any)
assert.Equal(t, "note", results[0].(map[string]any)["level"])
}

View file

@ -5,6 +5,7 @@ import (
"regexp"
"slices"
coreerr "forge.lthn.ai/core/go-log"
"gopkg.in/yaml.v3"
)
@ -32,35 +33,35 @@ type Rule struct {
// Validate checks that the rule has all required fields and that regex patterns compile.
func (r *Rule) Validate() error {
if r.ID == "" {
return fmt.Errorf("rule validation: id must not be empty")
return coreerr.E("Rule.Validate", "id must not be empty", nil)
}
if r.Title == "" {
return fmt.Errorf("rule %s: title must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": title must not be empty", nil)
}
if r.Severity == "" {
return fmt.Errorf("rule %s: severity must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": severity must not be empty", nil)
}
if !slices.Contains(validSeverities, r.Severity) {
return fmt.Errorf("rule %s: severity %q is not valid (want one of %v)", r.ID, r.Severity, validSeverities)
return coreerr.E("Rule.Validate", fmt.Sprintf("rule %s: severity %q is not valid (want one of %v)", r.ID, r.Severity, validSeverities), nil)
}
if len(r.Languages) == 0 {
return fmt.Errorf("rule %s: languages must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": languages must not be empty", nil)
}
if r.Pattern == "" {
return fmt.Errorf("rule %s: pattern must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": pattern must not be empty", nil)
}
if r.Detection == "" {
return fmt.Errorf("rule %s: detection must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": detection must not be empty", nil)
}
// Only validate regex compilation when detection type is regex.
if r.Detection == "regex" {
if _, err := regexp.Compile(r.Pattern); err != nil {
return fmt.Errorf("rule %s: pattern does not compile: %w", r.ID, err)
return coreerr.E("Rule.Validate", "rule "+r.ID+": pattern does not compile", err)
}
if r.ExcludePattern != "" {
if _, err := regexp.Compile(r.ExcludePattern); err != nil {
return fmt.Errorf("rule %s: exclude_pattern does not compile: %w", r.ID, err)
return coreerr.E("Rule.Validate", "rule "+r.ID+": exclude_pattern does not compile", err)
}
}
}
@ -72,7 +73,7 @@ func (r *Rule) Validate() error {
func ParseRules(data []byte) ([]Rule, error) {
var rules []Rule
if err := yaml.Unmarshal(data, &rules); err != nil {
return nil, fmt.Errorf("parsing rules: %w", err)
return nil, coreerr.E("ParseRules", "parsing rules", err)
}
return rules, nil
}

View file

@ -1,27 +1,34 @@
package lint
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// extensionMap maps file extensions to language identifiers.
var extensionMap = map[string]string{
".go": "go",
".php": "php",
".ts": "ts",
".tsx": "ts",
".js": "js",
".jsx": "js",
".cpp": "cpp",
".cc": "cpp",
".c": "cpp",
".h": "cpp",
".py": "py",
".go": "go",
".php": "php",
".ts": "ts",
".tsx": "ts",
".js": "js",
".jsx": "js",
".cpp": "cpp",
".cc": "cpp",
".c": "cpp",
".h": "cpp",
".py": "python",
".rs": "rust",
".sh": "shell",
".yaml": "yaml",
".yml": "yaml",
".json": "json",
".md": "markdown",
}
// defaultExcludes lists directory names that are always skipped during scanning.
@ -34,32 +41,51 @@ var defaultExcludes = []string{
}
// DetectLanguage returns the language identifier for a filename based on its extension.
// Returns an empty string for unrecognised extensions.
//
// lint.DetectLanguage("main.go")
// lint.DetectLanguage("Dockerfile")
func DetectLanguage(filename string) string {
ext := filepath.Ext(filename)
base := filepath.Base(filename)
if strings.HasPrefix(base, "Dockerfile") {
return "dockerfile"
}
ext := filepath.Ext(base)
if lang, ok := extensionMap[ext]; ok {
return lang
}
return ""
}
func shouldSkipTraversalRoot(path string) bool {
cleanedPath := filepath.Clean(path)
if cleanedPath == "." {
return false
}
base := filepath.Base(cleanedPath)
if base == "." || base == string(filepath.Separator) {
return false
}
return IsExcludedDir(base)
}
// Scanner walks directory trees and matches files against lint rules.
type Scanner struct {
matcher *Matcher
rules []Rule
excludes []string
matcher *Matcher
rules []Rule
}
// NewScanner creates a Scanner with the given rules and default directory exclusions.
func NewScanner(rules []Rule) (*Scanner, error) {
m, err := NewMatcher(rules)
matcher, err := NewMatcher(rules)
if err != nil {
return nil, err
}
return &Scanner{
matcher: m,
rules: rules,
excludes: slices.Clone(defaultExcludes),
matcher: matcher,
rules: rules,
}, nil
}
@ -68,15 +94,19 @@ func NewScanner(rules []Rule) (*Scanner, error) {
func (s *Scanner) ScanDir(root string) ([]Finding, error) {
var findings []Finding
if shouldSkipTraversalRoot(root) {
return findings, nil
}
err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
// Skip excluded directories.
// Skip excluded directories and hidden directories.
if d.IsDir() {
name := d.Name()
if slices.Contains(s.excludes, name) {
if IsExcludedDir(name) {
return filepath.SkipDir
}
return nil
@ -94,13 +124,14 @@ func (s *Scanner) ScanDir(root string) ([]Finding, error) {
return nil
}
content, err := os.ReadFile(path)
raw, err := coreio.Local.Read(path)
if err != nil {
return fmt.Errorf("reading %s: %w", path, err)
return coreerr.E("Scanner.ScanDir", "reading "+path, err)
}
content := []byte(raw)
// Build a matcher scoped to this file's language.
m, err := NewMatcher(langRules)
matcher, err := NewMatcher(langRules)
if err != nil {
return err
}
@ -111,13 +142,13 @@ func (s *Scanner) ScanDir(root string) ([]Finding, error) {
relPath = path
}
found := m.Match(relPath, content)
found := matcher.Match(relPath, content)
findings = append(findings, found...)
return nil
})
if err != nil {
return nil, fmt.Errorf("scanning %s: %w", root, err)
return nil, coreerr.E("Scanner.ScanDir", "scanning "+root, err)
}
return findings, nil
@ -125,10 +156,11 @@ func (s *Scanner) ScanDir(root string) ([]Finding, error) {
// ScanFile scans a single file against all rules.
func (s *Scanner) ScanFile(path string) ([]Finding, error) {
content, err := os.ReadFile(path)
raw, err := coreio.Local.Read(path)
if err != nil {
return nil, fmt.Errorf("reading %s: %w", path, err)
return nil, coreerr.E("Scanner.ScanFile", "reading "+path, err)
}
content := []byte(raw)
lang := DetectLanguage(filepath.Base(path))
if lang == "" {
@ -140,12 +172,12 @@ func (s *Scanner) ScanFile(path string) ([]Finding, error) {
return nil, nil
}
m, err := NewMatcher(langRules)
matcher, err := NewMatcher(langRules)
if err != nil {
return nil, err
}
return m.Match(path, content), nil
return matcher.Match(path, content), nil
}
// filterRulesByLanguage returns rules that include the given language.

View file

@ -25,9 +25,10 @@ func TestDetectLanguage_Good(t *testing.T) {
{"core.c", "cpp"},
{"app.js", "js"},
{"component.jsx", "js"},
{"unknown.rs", ""},
{"unknown.rs", "rust"},
{"noextension", ""},
{"file.py", "py"},
{"file.py", "python"},
{"Dockerfile", "dockerfile"},
}
for _, tt := range tests {
@ -180,6 +181,34 @@ func TestScanFile_Good(t *testing.T) {
assert.Equal(t, "test-panic", findings[0].RuleID)
}
func TestScanFile_Good_Python(t *testing.T) {
dir := t.TempDir()
file := filepath.Join(dir, "app.py")
err := os.WriteFile(file, []byte("print('hello')\n# TODO: fix\n"), 0o644)
require.NoError(t, err)
rules := []Rule{
{
ID: "python-todo",
Title: "Python TODO",
Severity: "low",
Languages: []string{"python"},
Pattern: `TODO`,
Fix: "Remove TODO",
Detection: "regex",
},
}
s, err := NewScanner(rules)
require.NoError(t, err)
findings, err := s.ScanFile(file)
require.NoError(t, err)
require.Len(t, findings, 1)
assert.Equal(t, "python-todo", findings[0].RuleID)
assert.Equal(t, "python", DetectLanguage(file))
}
func TestScanDir_Good_Subdirectories(t *testing.T) {
dir := t.TempDir()
@ -209,6 +238,58 @@ func TestScanDir_Good_Subdirectories(t *testing.T) {
require.Len(t, findings, 1)
}
func TestScanDir_Good_SkipsHiddenRootDirectory(t *testing.T) {
dir := t.TempDir()
hiddenDir := filepath.Join(dir, ".git")
require.NoError(t, os.MkdirAll(hiddenDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(hiddenDir, "main.go"), []byte("// TODO: hidden\n"), 0o644))
rules := []Rule{
{
ID: "test-001",
Title: "Found a TODO",
Severity: "low",
Languages: []string{"go"},
Pattern: `TODO`,
Fix: "Remove TODO",
Detection: "regex",
},
}
s, err := NewScanner(rules)
require.NoError(t, err)
findings, err := s.ScanDir(hiddenDir)
require.NoError(t, err)
assert.Empty(t, findings)
}
func TestScanDir_Good_SkipsHiddenNestedDirectory(t *testing.T) {
dir := t.TempDir()
hiddenDir := filepath.Join(dir, "services", ".generated")
require.NoError(t, os.MkdirAll(hiddenDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(hiddenDir, "main.go"), []byte("// TODO: hidden\n"), 0o644))
rules := []Rule{
{
ID: "test-001",
Title: "Found a TODO",
Severity: "low",
Languages: []string{"go"},
Pattern: `TODO`,
Fix: "Remove TODO",
Detection: "regex",
},
}
s, err := NewScanner(rules)
require.NoError(t, err)
findings, err := s.ScanDir(dir)
require.NoError(t, err)
assert.Empty(t, findings)
}
func TestScanDir_Bad_NonexistentDir(t *testing.T) {
rules := []Rule{
{

746
pkg/lint/service.go Normal file
View file

@ -0,0 +1,746 @@
package lint
import (
"context"
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
"time"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
const (
hookStartMarker = "# core-lint hook start"
hookEndMarker = "# core-lint hook end"
)
// RunInput is the DTO for `core-lint run` and the language/category shortcuts.
//
// input := lint.RunInput{Path: ".", Schedule: "nightly", Output: "json"}
// report, err := lint.NewService().Run(ctx, input)
type RunInput struct {
Path string `json:"path"`
Output string `json:"output,omitempty"`
Config string `json:"config,omitempty"`
Schedule string `json:"schedule,omitempty"`
FailOn string `json:"fail_on,omitempty"`
Category string `json:"category,omitempty"`
Lang string `json:"lang,omitempty"`
Hook bool `json:"hook,omitempty"`
CI bool `json:"ci,omitempty"`
Files []string `json:"files,omitempty"`
SBOM bool `json:"sbom,omitempty"`
}
// ToolInfo describes a supported linter tool and whether it is available in PATH.
//
// tools := lint.NewService().Tools([]string{"go"})
type ToolInfo struct {
Name string `json:"name"`
Available bool `json:"available"`
Languages []string `json:"languages"`
Category string `json:"category"`
Entitlement string `json:"entitlement,omitempty"`
}
// Report aggregates every tool run into a single output document.
//
// report, err := lint.NewService().Run(context.Background(), lint.RunInput{Path: ".", Output: "json"})
type Report struct {
Project string `json:"project"`
Timestamp time.Time `json:"timestamp"`
Duration string `json:"duration"`
Languages []string `json:"languages"`
Tools []ToolRun `json:"tools"`
Findings []Finding `json:"findings"`
Summary Summary `json:"summary"`
}
// ToolRun records the execution status of one adapter.
type ToolRun struct {
Name string `json:"name"`
Version string `json:"version,omitempty"`
Status string `json:"status"`
Duration string `json:"duration"`
Findings int `json:"findings"`
}
// Service orchestrates the configured lint adapters for a project.
//
// svc := lint.NewService()
// report, err := svc.Run(ctx, lint.RunInput{Path: ".", Output: "json"})
type Service struct {
adapters []Adapter
}
// NewService constructs a lint orchestrator with the built-in adapter registry.
//
// svc := lint.NewService()
func NewService() *Service {
return &Service{adapters: defaultAdapters()}
}
// Run executes the selected adapters and returns the merged report.
//
// report, err := lint.NewService().Run(ctx, lint.RunInput{Path: ".", Output: "json"})
func (service *Service) Run(ctx context.Context, input RunInput) (Report, error) {
startedAt := time.Now().UTC()
input = normaliseRunInput(input)
config, _, err := LoadProjectConfig(input.Path, input.Config)
if err != nil {
return Report{}, err
}
schedule, err := ResolveSchedule(config, input.Schedule)
if err != nil {
return Report{}, err
}
if input.FailOn == "" && schedule != nil && schedule.FailOn != "" {
input.FailOn = schedule.FailOn
}
if input.FailOn == "" {
input.FailOn = config.FailOn
}
files, scoped, err := service.scopeFiles(input.Path, config, input, schedule)
if err != nil {
return Report{}, err
}
if input.Hook && len(files) == 0 {
report := Report{
Project: projectName(input.Path),
Timestamp: startedAt,
Duration: time.Since(startedAt).Round(time.Millisecond).String(),
Languages: []string{},
Tools: []ToolRun{},
Findings: []Finding{},
Summary: Summarise(nil),
}
report.Summary.Passed = passesThreshold(report.Summary, input.FailOn)
return report, nil
}
if scoped && len(files) == 0 {
report := Report{
Project: projectName(input.Path),
Timestamp: startedAt,
Duration: time.Since(startedAt).Round(time.Millisecond).String(),
Languages: []string{},
Tools: []ToolRun{},
Findings: []Finding{},
Summary: Summarise(nil),
}
report.Summary.Passed = passesThreshold(report.Summary, input.FailOn)
return report, nil
}
languages := service.languagesForInput(input, files, scoped)
selectedAdapters := service.selectAdapters(config, languages, input, schedule)
var findings []Finding
var toolRuns []ToolRun
for _, adapter := range selectedAdapters {
if input.Hook && !adapter.Fast() {
toolRuns = append(toolRuns, ToolRun{
Name: adapter.Name(),
Status: "skipped",
Duration: "0s",
Findings: 0,
})
continue
}
result := adapter.Run(ctx, input, files)
toolRuns = append(toolRuns, result.Tool)
findings = append(findings, normaliseReportFindings(result.Findings, input.Path)...)
}
findings = dedupeFindings(findings)
sortToolRuns(toolRuns)
sortFindings(findings)
if languages == nil {
languages = []string{}
}
if toolRuns == nil {
toolRuns = []ToolRun{}
}
if findings == nil {
findings = []Finding{}
}
report := Report{
Project: projectName(input.Path),
Timestamp: startedAt,
Duration: time.Since(startedAt).Round(time.Millisecond).String(),
Languages: slices.Clone(languages),
Tools: toolRuns,
Findings: findings,
Summary: Summarise(findings),
}
report.Summary.Passed = passesThreshold(report.Summary, input.FailOn)
return report, nil
}
// Tools returns the current adapter inventory for display in the CLI.
//
// tools := lint.NewService().Tools([]string{"go"})
func (service *Service) Tools(languages []string) []ToolInfo {
var tools []ToolInfo
for _, adapter := range service.adapters {
if len(languages) > 0 && !adapter.MatchesLanguage(languages) {
continue
}
tools = append(tools, ToolInfo{
Name: adapter.Name(),
Available: adapter.Available(),
Languages: slices.Clone(adapter.Languages()),
Category: adapter.Category(),
Entitlement: adapter.Entitlement(),
})
}
slices.SortFunc(tools, func(left ToolInfo, right ToolInfo) int {
return strings.Compare(left.Name, right.Name)
})
if tools == nil {
return []ToolInfo{}
}
return tools
}
// WriteDefaultConfig creates `.core/lint.yaml` in the target project.
//
// path, err := svc.WriteDefaultConfig(".", false)
func (service *Service) WriteDefaultConfig(projectPath string, force bool) (string, error) {
if projectPath == "" {
projectPath = "."
}
targetPath := filepath.Join(projectPath, DefaultConfigPath)
if !force {
if _, err := os.Stat(targetPath); err == nil {
return "", coreerr.E("Service.WriteDefaultConfig", targetPath+" already exists", nil)
}
}
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
return "", coreerr.E("Service.WriteDefaultConfig", "mkdir "+filepath.Dir(targetPath), err)
}
content, err := DefaultConfigYAML()
if err != nil {
return "", err
}
if err := coreio.Local.Write(targetPath, content); err != nil {
return "", coreerr.E("Service.WriteDefaultConfig", "write "+targetPath, err)
}
return targetPath, nil
}
// InstallHook adds a git pre-commit hook that runs `core-lint run --hook`.
//
// _ = lint.NewService().InstallHook(".")
func (service *Service) InstallHook(projectPath string) error {
hookPath, err := hookFilePath(projectPath)
if err != nil {
return err
}
block := hookScriptBlock(false)
content := "#!/bin/sh\n" + block
raw, readErr := coreio.Local.Read(hookPath)
if readErr == nil {
if strings.Contains(raw, hookStartMarker) {
return nil
}
trimmed := strings.TrimRight(raw, "\n")
if trimmed == "" {
content = "#!/bin/sh\n" + block
} else {
content = trimmed + "\n\n" + hookScriptBlock(true)
}
}
if err := os.MkdirAll(filepath.Dir(hookPath), 0o755); err != nil {
return coreerr.E("Service.InstallHook", "mkdir "+filepath.Dir(hookPath), err)
}
if err := coreio.Local.Write(hookPath, content); err != nil {
return coreerr.E("Service.InstallHook", "write "+hookPath, err)
}
if err := os.Chmod(hookPath, 0o755); err != nil {
return coreerr.E("Service.InstallHook", "chmod "+hookPath, err)
}
return nil
}
// RemoveHook removes the block previously installed by InstallHook.
//
// _ = lint.NewService().RemoveHook(".")
func (service *Service) RemoveHook(projectPath string) error {
hookPath, err := hookFilePath(projectPath)
if err != nil {
return err
}
raw, err := coreio.Local.Read(hookPath)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return coreerr.E("Service.RemoveHook", "read "+hookPath, err)
}
startIndex := strings.Index(raw, hookStartMarker)
endIndex := strings.Index(raw, hookEndMarker)
if startIndex < 0 || endIndex < 0 || endIndex < startIndex {
return nil
}
endIndex += len(hookEndMarker)
content := strings.TrimRight(raw[:startIndex]+raw[endIndex:], "\n")
if strings.TrimSpace(content) == "" {
if err := os.Remove(hookPath); err != nil && !os.IsNotExist(err) {
return coreerr.E("Service.RemoveHook", "remove "+hookPath, err)
}
return nil
}
if err := coreio.Local.Write(hookPath, content); err != nil {
return coreerr.E("Service.RemoveHook", "write "+hookPath, err)
}
return nil
}
func (service *Service) languagesForInput(input RunInput, files []string, scoped bool) []string {
if input.Lang != "" {
return []string{input.Lang}
}
if scoped {
return detectFromFiles(files)
}
return Detect(input.Path)
}
func (service *Service) scopeFiles(projectPath string, config LintConfig, input RunInput, schedule *Schedule) ([]string, bool, error) {
if input.Files != nil {
return slices.Clone(input.Files), true, nil
}
if input.Hook {
files, err := service.stagedFiles(projectPath)
return files, true, err
}
if schedule != nil && len(schedule.Paths) > 0 {
files, err := collectConfiguredFiles(projectPath, schedule.Paths, config.Exclude)
return files, true, err
}
if !slices.Equal(config.Paths, DefaultConfig().Paths) || !slices.Equal(config.Exclude, DefaultConfig().Exclude) {
files, err := collectConfiguredFiles(projectPath, config.Paths, config.Exclude)
return files, true, err
}
return nil, false, nil
}
func (service *Service) selectAdapters(config LintConfig, languages []string, input RunInput, schedule *Schedule) []Adapter {
categories := selectedCategories(input, schedule)
enabled := make(map[string]bool)
for _, name := range enabledToolNames(config, languages, input, categories) {
enabled[name] = true
}
var selected []Adapter
for _, adapter := range service.adapters {
if len(enabled) > 0 && !enabled[adapter.Name()] {
continue
}
if len(categories) > 0 && !slices.Contains(categories, adapter.Category()) {
continue
}
if !adapter.MatchesLanguage(languages) {
continue
}
selected = append(selected, adapter)
}
if slices.Contains(languages, "go") && !slices.Contains(categories, "compliance") {
if !hasAdapter(selected, "catalog") {
selected = append([]Adapter{newCatalogAdapter()}, selected...)
}
}
return selected
}
func (service *Service) stagedFiles(projectPath string) ([]string, error) {
toolkit := NewToolkit(projectPath)
stdout, stderr, exitCode, err := toolkit.Run("git", "diff", "--cached", "--name-only")
if err != nil && exitCode != 0 {
return nil, coreerr.E("Service.stagedFiles", "git diff --cached --name-only: "+strings.TrimSpace(stderr), err)
}
var files []string
for line := range strings.SplitSeq(strings.TrimSpace(stdout), "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
files = append(files, line)
}
return files, nil
}
func collectConfiguredFiles(projectPath string, paths []string, excludes []string) ([]string, error) {
seen := make(map[string]bool)
var files []string
for _, path := range paths {
if path == "" {
continue
}
absolutePath := path
if !filepath.IsAbs(absolutePath) {
absolutePath = filepath.Join(projectPath, path)
}
info, err := os.Stat(absolutePath)
if err != nil {
return nil, coreerr.E("collectConfiguredFiles", "stat "+absolutePath, err)
}
if info.IsDir() && shouldSkipTraversalRoot(absolutePath) {
continue
}
addFile := func(candidate string) {
relativePath := relativeConfiguredPath(projectPath, candidate)
if hasHiddenDirectory(relativePath) || hasHiddenDirectory(filepath.ToSlash(filepath.Clean(candidate))) {
return
}
if matchesConfiguredExclude(relativePath, excludes) || matchesConfiguredExclude(filepath.ToSlash(filepath.Clean(candidate)), excludes) {
return
}
if seen[relativePath] {
return
}
seen[relativePath] = true
files = append(files, relativePath)
}
if !info.IsDir() {
addFile(absolutePath)
continue
}
walkErr := filepath.WalkDir(absolutePath, func(currentPath string, entry fs.DirEntry, walkErr error) error {
if walkErr != nil {
return walkErr
}
if entry.IsDir() {
relativeDir := relativeConfiguredPath(projectPath, currentPath)
if matchesConfiguredExclude(relativeDir, excludes) || matchesConfiguredExclude(filepath.ToSlash(filepath.Clean(currentPath)), excludes) {
return filepath.SkipDir
}
if currentPath != absolutePath && IsExcludedDir(entry.Name()) {
return filepath.SkipDir
}
return nil
}
addFile(currentPath)
return nil
})
if walkErr != nil {
return nil, coreerr.E("collectConfiguredFiles", "walk "+absolutePath, walkErr)
}
}
slices.Sort(files)
return files, nil
}
func relativeConfiguredPath(projectPath string, candidate string) string {
relativePath := candidate
if projectPath != "" {
if rel, relErr := filepath.Rel(projectPath, candidate); relErr == nil && rel != "" && !strings.HasPrefix(rel, "..") {
relativePath = rel
}
}
return filepath.ToSlash(filepath.Clean(relativePath))
}
func matchesConfiguredExclude(candidate string, excludes []string) bool {
if candidate == "" || len(excludes) == 0 {
return false
}
normalisedCandidate := filepath.ToSlash(filepath.Clean(candidate))
for _, exclude := range excludes {
normalisedExclude := filepath.ToSlash(filepath.Clean(strings.TrimSpace(exclude)))
if normalisedExclude == "." || normalisedExclude == "" {
continue
}
normalisedExclude = strings.TrimSuffix(normalisedExclude, "/")
if normalisedCandidate == normalisedExclude {
return true
}
if strings.HasPrefix(normalisedCandidate, normalisedExclude+"/") {
return true
}
}
return false
}
func hasHiddenDirectory(candidate string) bool {
if candidate == "" {
return false
}
for _, segment := range strings.Split(filepath.ToSlash(filepath.Clean(candidate)), "/") {
if segment == "" || segment == "." || segment == ".." {
continue
}
if strings.HasPrefix(segment, ".") {
return true
}
}
return false
}
func enabledToolNames(config LintConfig, languages []string, input RunInput, categories []string) []string {
var names []string
if slices.Contains(categories, "security") {
names = append(names, config.Lint.Security...)
}
if slices.Contains(categories, "compliance") {
names = append(names, config.Lint.Compliance...)
}
if input.Lang != "" {
names = append(names, groupForLanguage(config.Lint, input.Lang)...)
} else if shouldIncludeLanguageGroups(categories) {
for _, language := range languages {
names = append(names, groupForLanguage(config.Lint, language)...)
}
}
if input.Lang == "" && shouldIncludeInfraGroups(categories) {
names = append(names, config.Lint.Infra...)
}
if input.Lang == "" {
if input.CI {
names = append(names, config.Lint.Security...)
}
if input.SBOM {
names = append(names, config.Lint.Compliance...)
}
}
return dedupeStrings(names)
}
func selectedCategories(input RunInput, schedule *Schedule) []string {
if input.Category != "" {
return []string{input.Category}
}
if schedule == nil {
return nil
}
return slices.Clone(schedule.Categories)
}
func shouldIncludeLanguageGroups(categories []string) bool {
if len(categories) == 0 {
return true
}
for _, category := range categories {
switch category {
case "security", "compliance":
continue
default:
return true
}
}
return false
}
func shouldIncludeInfraGroups(categories []string) bool {
if len(categories) == 0 {
return true
}
for _, category := range categories {
switch category {
case "security", "compliance":
continue
default:
return true
}
}
return false
}
func groupForLanguage(groups ToolGroups, language string) []string {
switch language {
case "go":
return groups.Go
case "php":
return groups.PHP
case "js":
return groups.JS
case "ts":
return groups.TS
case "python":
return groups.Python
case "shell", "dockerfile", "yaml", "json", "markdown":
return groups.Infra
default:
return nil
}
}
func hookFilePath(projectPath string) (string, error) {
if projectPath == "" {
projectPath = "."
}
toolkit := NewToolkit(projectPath)
stdout, stderr, exitCode, err := toolkit.Run("git", "rev-parse", "--git-dir")
if err != nil && exitCode != 0 {
return "", coreerr.E("hookFilePath", "git rev-parse --git-dir: "+strings.TrimSpace(stderr), err)
}
gitDir := strings.TrimSpace(stdout)
if gitDir == "" {
return "", coreerr.E("hookFilePath", "git directory is empty", nil)
}
if !filepath.IsAbs(gitDir) {
gitDir = filepath.Join(projectPath, gitDir)
}
return filepath.Join(gitDir, "hooks", "pre-commit"), nil
}
func hookScriptBlock(appended bool) string {
command := "exec core-lint run --hook"
if appended {
command = "core-lint run --hook || exit $?"
}
return hookStartMarker + "\n# Installed by core-lint\n" + command + "\n" + hookEndMarker + "\n"
}
func normaliseRunInput(input RunInput) RunInput {
if input.Path == "" {
input.Path = "."
}
if input.CI && input.Output == "" {
input.Output = "github"
}
return input
}
func normaliseReportFindings(findings []Finding, projectPath string) []Finding {
normalised := make([]Finding, 0, len(findings))
for _, finding := range findings {
if finding.Code == "" {
finding.Code = finding.RuleID
}
if finding.Message == "" {
finding.Message = finding.Title
}
if finding.Tool == "" {
finding.Tool = "catalog"
}
if finding.Severity == "" {
finding.Severity = "warning"
} else {
finding.Severity = normaliseSeverity(finding.Severity)
}
if finding.File != "" && projectPath != "" {
if relativePath, err := filepath.Rel(projectPath, finding.File); err == nil && relativePath != "" && !strings.HasPrefix(relativePath, "..") {
finding.File = filepath.ToSlash(relativePath)
} else {
finding.File = filepath.ToSlash(finding.File)
}
}
normalised = append(normalised, finding)
}
return normalised
}
func projectName(path string) string {
absolutePath, err := filepath.Abs(path)
if err != nil {
return filepath.Base(path)
}
return filepath.Base(absolutePath)
}
func dedupeStrings(values []string) []string {
seen := make(map[string]bool)
var deduped []string
for _, value := range values {
if value == "" || seen[value] {
continue
}
seen[value] = true
deduped = append(deduped, value)
}
return deduped
}
func hasAdapter(adapters []Adapter, name string) bool {
for _, adapter := range adapters {
if adapter.Name() == name {
return true
}
}
return false
}
func passesThreshold(summary Summary, threshold string) bool {
switch strings.ToLower(strings.TrimSpace(threshold)) {
case "", "error":
return summary.Errors == 0
case "warning":
return summary.Errors == 0 && summary.Warnings == 0
case "info":
return summary.Total == 0
default:
return summary.Errors == 0
}
}
func sortFindings(findings []Finding) {
slices.SortFunc(findings, func(left Finding, right Finding) int {
switch {
case left.File != right.File:
return strings.Compare(left.File, right.File)
case left.Line != right.Line:
if left.Line < right.Line {
return -1
}
return 1
case left.Column != right.Column:
if left.Column < right.Column {
return -1
}
return 1
case left.Tool != right.Tool:
return strings.Compare(left.Tool, right.Tool)
default:
return strings.Compare(left.Code, right.Code)
}
})
}
func sortToolRuns(toolRuns []ToolRun) {
slices.SortFunc(toolRuns, func(left ToolRun, right ToolRun) int {
return strings.Compare(left.Name, right.Name)
})
}

624
pkg/lint/service_test.go Normal file
View file

@ -0,0 +1,624 @@
package lint
import (
"context"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestServiceRun_Good_CatalogFindings(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "input.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("data")
}
`), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "warning", report.Findings[0].Severity)
assert.Equal(t, "catalog", report.Findings[0].Tool)
assert.Equal(t, "go-cor-003", report.Findings[0].Code)
assert.Equal(t, "correctness", report.Findings[0].Category)
assert.Equal(t, 1, report.Summary.Total)
assert.Equal(t, 1, report.Summary.Warnings)
assert.False(t, report.Summary.Passed)
assert.Contains(t, report.Languages, "go")
require.NotEmpty(t, report.Tools)
assert.Equal(t, "catalog", report.Tools[0].Name)
}
func TestServiceRun_Good_UsesConfiguredPaths(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "services"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "services", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("scoped")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("paths:\n - services\n"), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "services/scoped.go", report.Findings[0].File)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_ExplicitEmptyFilesSkipsScanning(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Files: []string{},
FailOn: "warning",
})
require.NoError(t, err)
assert.Empty(t, report.Languages)
assert.Empty(t, report.Tools)
assert.Empty(t, report.Findings)
assert.True(t, report.Summary.Passed)
}
func TestServiceRun_Good_UsesConfiguredExclude(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "services"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "services", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("scoped")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("exclude:\n - services\n"), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "root.go", report.Findings[0].File)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_SkipsHiddenConfiguredRootDirectory(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".hidden"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".hidden", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("scoped")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("paths:\n - .hidden\n"), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
assert.Empty(t, report.Findings)
assert.Empty(t, report.Tools)
assert.True(t, report.Summary.Passed)
}
func TestServiceRun_Good_SkipsHiddenConfiguredFilePath(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".hidden"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".hidden", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("hidden")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("paths:\n - root.go\n - .hidden/scoped.go\n"), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "root.go", report.Findings[0].File)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_UsesNamedSchedule(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "services"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "services", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("scoped")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`schedules:
nightly:
fail_on: warning
paths:
- services
`), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Schedule: "nightly",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "services/scoped.go", report.Findings[0].File)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_LanguageShortcutIgnoresCiAndSbomGroups(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`lint:
go:
- catalog
- go-tool
security:
- security-tool
compliance:
- compliance-tool
`), 0o644))
svc := &Service{adapters: []Adapter{
shortcutAdapter{name: "go-tool", category: "correctness"},
shortcutAdapter{name: "security-tool", category: "security"},
shortcutAdapter{name: "compliance-tool", category: "compliance"},
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Lang: "go",
CI: true,
SBOM: true,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Tools, 2)
assert.Equal(t, []string{"catalog", "go-tool"}, []string{report.Tools[0].Name, report.Tools[1].Name})
}
func TestServiceRun_Good_LanguageShortcutExcludesInfraGroup(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "composer.json"), []byte("{\n \"name\": \"example/test\"\n}\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`lint:
php:
- php-tool
infra:
- shell-tool
`), 0o644))
svc := &Service{adapters: []Adapter{
shortcutAdapter{name: "php-tool", category: "correctness"},
shortcutAdapter{name: "shell-tool", category: "correctness"},
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Lang: "php",
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Tools, 1)
assert.Equal(t, "php-tool", report.Tools[0].Name)
}
func TestServiceRun_Good_HookModeUsesStagedFiles(t *testing.T) {
if _, err := exec.LookPath("git"); err != nil {
t.Skip("git not available")
}
dir := t.TempDir()
runTestCommand(t, dir, "git", "init")
runTestCommand(t, dir, "git", "config", "user.email", "test@example.com")
runTestCommand(t, dir, "git", "config", "user.name", "Test User")
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "staged.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func run() {
svc := service{}
_ = svc.Process("data")
}
`), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "unstaged.go"), []byte(`package sample
func run2() {
panic("boom")
}
`), 0o644))
runTestCommand(t, dir, "git", "add", "go.mod", "staged.go")
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Hook: true,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "staged.go", report.Findings[0].File)
assert.Equal(t, "go-cor-003", report.Findings[0].Code)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_HookModeWithNoStagedFilesSkipsScanning(t *testing.T) {
if _, err := exec.LookPath("git"); err != nil {
t.Skip("git not available")
}
dir := t.TempDir()
runTestCommand(t, dir, "git", "init")
runTestCommand(t, dir, "git", "config", "user.email", "test@example.com")
runTestCommand(t, dir, "git", "config", "user.name", "Test User")
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "unstaged.go"), []byte(`package sample
func run() {
panic("boom")
}
`), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Hook: true,
FailOn: "warning",
})
require.NoError(t, err)
assert.Empty(t, report.Languages)
assert.Empty(t, report.Tools)
assert.Empty(t, report.Findings)
assert.True(t, report.Summary.Passed)
}
func TestServiceRemoveHook_PreservesExistingHookContent(t *testing.T) {
if _, err := exec.LookPath("git"); err != nil {
t.Skip("git not available")
}
dir := t.TempDir()
runTestCommand(t, dir, "git", "init")
original := "\n# custom hook\nprintf 'keep'"
hookDir := filepath.Join(dir, ".git", "hooks")
require.NoError(t, os.MkdirAll(hookDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(hookDir, "pre-commit"), []byte(original), 0o755))
svc := NewService()
require.NoError(t, svc.InstallHook(dir))
require.NoError(t, svc.RemoveHook(dir))
restored, err := os.ReadFile(filepath.Join(hookDir, "pre-commit"))
require.NoError(t, err)
assert.Equal(t, original, string(restored))
}
func TestServiceRun_JS_PrettierFindings(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "package.json"), []byte("{\n \"name\": \"example\"\n}\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "index.js"), []byte("const value = 1;\n"), 0o644))
setupMockCmdExit(t, "prettier", "index.js\n", "", 1)
svc := &Service{adapters: []Adapter{
newCommandAdapter("prettier", []string{"prettier"}, []string{"js"}, "style", "", false, true, projectPathArguments("--list-different"), parsePrettierDiagnostics),
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
require.Len(t, report.Tools, 1)
assert.Equal(t, "prettier", report.Findings[0].Tool)
assert.Equal(t, "index.js", report.Findings[0].File)
assert.Equal(t, "prettier-format", report.Findings[0].Code)
assert.Equal(t, "warning", report.Findings[0].Severity)
assert.False(t, report.Summary.Passed)
assert.Equal(t, "prettier", report.Tools[0].Name)
assert.Equal(t, "failed", report.Tools[0].Status)
assert.Equal(t, 1, report.Tools[0].Findings)
}
func TestServiceRun_CapturesToolVersion(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "package.json"), []byte("{\n \"name\": \"example\"\n}\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "index.js"), []byte("const value = 1;\n"), 0o644))
binDir := t.TempDir()
scriptPath := filepath.Join(binDir, "prettier")
script := `#!/bin/sh
case "$1" in
--version)
echo "prettier 3.2.1"
exit 0
;;
--list-different)
echo "index.js"
exit 1
;;
esac
echo "unexpected args: $*" >&2
exit 0
`
require.NoError(t, os.WriteFile(scriptPath, []byte(script), 0o755))
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
svc := &Service{adapters: []Adapter{
newCommandAdapter("prettier", []string{"prettier"}, []string{"js"}, "style", "", false, true, projectPathArguments("--list-different"), parsePrettierDiagnostics),
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Tools, 1)
assert.Equal(t, "prettier", report.Tools[0].Name)
assert.Equal(t, "prettier 3.2.1", report.Tools[0].Version)
}
func TestServiceRun_Good_DeduplicatesMergedFindings(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("lint:\n go:\n - dup\n"), 0o644))
finding := Finding{
Tool: "dup",
File: filepath.Join(dir, "input.go"),
Line: 12,
Column: 3,
Severity: "warning",
Code: "duplicate-finding",
Message: "same finding",
}
svc := &Service{adapters: []Adapter{
duplicateAdapter{name: "dup", finding: finding},
duplicateAdapter{name: "dup", finding: finding},
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Tools, 3)
require.Len(t, report.Findings, 1)
assert.Equal(t, "duplicate-finding", report.Findings[0].Code)
assert.Equal(t, 1, report.Summary.Total)
}
func TestServiceTools_EmptyInventoryReturnsEmptySlice(t *testing.T) {
tools := (&Service{}).Tools(nil)
require.NotNil(t, tools)
assert.Empty(t, tools)
}
type shortcutAdapter struct {
name string
category string
}
func (adapter shortcutAdapter) Name() string { return adapter.name }
func (adapter shortcutAdapter) Available() bool { return true }
func (adapter shortcutAdapter) Languages() []string { return []string{"*"} }
func (adapter shortcutAdapter) Command() string { return adapter.name }
func (adapter shortcutAdapter) Entitlement() string { return "" }
func (adapter shortcutAdapter) RequiresEntitlement() bool { return false }
func (adapter shortcutAdapter) MatchesLanguage(languages []string) bool { return true }
func (adapter shortcutAdapter) Category() string { return adapter.category }
func (adapter shortcutAdapter) Fast() bool { return true }
func (adapter shortcutAdapter) Run(_ context.Context, _ RunInput, _ []string) AdapterResult {
return AdapterResult{
Tool: ToolRun{
Name: adapter.name,
Status: "passed",
Duration: "0s",
},
}
}
type duplicateAdapter struct {
name string
finding Finding
}
func (adapter duplicateAdapter) Name() string { return adapter.name }
func (adapter duplicateAdapter) Available() bool { return true }
func (adapter duplicateAdapter) Languages() []string { return []string{"go"} }
func (adapter duplicateAdapter) Command() string { return adapter.name }
func (adapter duplicateAdapter) Entitlement() string { return "" }
func (adapter duplicateAdapter) RequiresEntitlement() bool { return false }
func (adapter duplicateAdapter) MatchesLanguage(languages []string) bool {
for _, language := range languages {
if language == "go" {
return true
}
}
return false
}
func (adapter duplicateAdapter) Category() string { return "correctness" }
func (adapter duplicateAdapter) Fast() bool { return true }
func (adapter duplicateAdapter) Run(_ context.Context, _ RunInput, _ []string) AdapterResult {
return AdapterResult{
Tool: ToolRun{
Name: adapter.name,
Status: "passed",
Duration: "0s",
},
Findings: []Finding{adapter.finding},
}
}
func runTestCommand(t *testing.T, dir string, name string, args ...string) {
t.Helper()
cmd := exec.Command(name, args...)
cmd.Dir = dir
output, err := cmd.CombinedOutput()
require.NoError(t, err, string(output))
}

View file

@ -8,9 +8,12 @@ import (
"os/exec"
"path/filepath"
"regexp"
"slices"
"strconv"
"strings"
"time"
coreerr "forge.lthn.ai/core/go-log"
)
// ToolFinding represents a single issue found by an external tool (e.g. go vet).
@ -35,14 +38,17 @@ type RaceCondition struct {
Desc string `json:"desc"`
}
// TODO represents a tracked code comment like TODO, FIXME, or HACK.
type TODO struct {
// TrackedComment represents a tracked code comment like TODO, FIXME, or HACK.
type TrackedComment struct {
File string `json:"file"`
Line int `json:"line"`
Type string `json:"type"`
Message string `json:"message"`
}
// TODO is kept for compatibility with the older API name.
type TODO = TrackedComment
// Vulnerability represents a dependency vulnerability from govulncheck text output.
type Vulnerability struct {
ID string `json:"id"`
@ -136,8 +142,10 @@ func (t *Toolkit) Run(name string, args ...string) (stdout, stderr string, exitC
return
}
// FindTODOs greps for TODO/FIXME/HACK comments within a directory.
func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) {
// FindTrackedComments greps for TODO/FIXME/HACK comments within a directory.
//
// comments, err := lint.NewToolkit(".").FindTrackedComments("pkg/lint")
func (t *Toolkit) FindTrackedComments(dir string) ([]TrackedComment, error) {
pattern := `\b(TODO|FIXME|HACK)\b(\(.*\))?:`
stdout, stderr, exitCode, err := t.Run("git", "grep", "--line-number", "-E", pattern, "--", dir)
@ -145,10 +153,10 @@ func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) {
return nil, nil
}
if err != nil && exitCode != 1 {
return nil, fmt.Errorf("git grep failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.FindTrackedComments", fmt.Sprintf("git grep failed (exit %d):\n%s", exitCode, stderr), err)
}
var todos []TODO
var comments []TrackedComment
re := regexp.MustCompile(pattern)
for line := range strings.SplitSeq(strings.TrimSpace(stdout), "\n") {
@ -167,21 +175,26 @@ func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) {
}
msg := strings.TrimSpace(re.Split(parts[2], 2)[1])
todos = append(todos, TODO{
comments = append(comments, TrackedComment{
File: parts[0],
Line: lineNum,
Type: todoType,
Message: msg,
})
}
return todos, nil
return comments, nil
}
// FindTODOs is kept for compatibility with the older API name.
func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) {
return t.FindTrackedComments(dir)
}
// AuditDeps runs govulncheck to find dependency vulnerabilities (text output).
func (t *Toolkit) AuditDeps() ([]Vulnerability, error) {
stdout, stderr, exitCode, err := t.Run("govulncheck", "./...")
if err != nil && exitCode != 0 && !strings.Contains(stdout, "Vulnerability") {
return nil, fmt.Errorf("govulncheck failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.AuditDeps", fmt.Sprintf("govulncheck failed (exit %d):\n%s", exitCode, stderr), err)
}
var vulns []Vulnerability
@ -230,7 +243,7 @@ func (t *Toolkit) AuditDeps() ([]Vulnerability, error) {
func (t *Toolkit) DiffStat() (DiffSummary, error) {
stdout, stderr, exitCode, err := t.Run("git", "diff", "--stat")
if err != nil && exitCode != 0 {
return DiffSummary{}, fmt.Errorf("git diff failed (exit %d): %w\n%s", exitCode, err, stderr)
return DiffSummary{}, coreerr.E("Toolkit.DiffStat", fmt.Sprintf("git diff failed (exit %d):\n%s", exitCode, stderr), err)
}
var s DiffSummary
@ -263,7 +276,7 @@ func (t *Toolkit) DiffStat() (DiffSummary, error) {
func (t *Toolkit) UncommittedFiles() ([]string, error) {
stdout, stderr, exitCode, err := t.Run("git", "status", "--porcelain")
if err != nil && exitCode != 0 {
return nil, fmt.Errorf("git status failed: %w\n%s", err, stderr)
return nil, coreerr.E("Toolkit.UncommittedFiles", "git status failed:\n"+stderr, err)
}
var files []string
for line := range strings.SplitSeq(strings.TrimSpace(stdout), "\n") {
@ -281,7 +294,7 @@ func (t *Toolkit) Lint(pkg string) ([]ToolFinding, error) {
return nil, nil
}
if err != nil && exitCode != 2 {
return nil, fmt.Errorf("go vet failed: %w", err)
return nil, coreerr.E("Toolkit.Lint", "go vet failed", err)
}
var findings []ToolFinding
@ -311,7 +324,7 @@ func (t *Toolkit) ScanSecrets(dir string) ([]SecretLeak, error) {
return nil, nil
}
if err != nil && exitCode != 1 {
return nil, fmt.Errorf("gitleaks failed: %w", err)
return nil, coreerr.E("Toolkit.ScanSecrets", "gitleaks failed", err)
}
var leaks []SecretLeak
@ -338,7 +351,7 @@ func (t *Toolkit) ScanSecrets(dir string) ([]SecretLeak, error) {
func (t *Toolkit) ModTidy() error {
_, stderr, exitCode, err := t.Run("go", "mod", "tidy")
if err != nil && exitCode != 0 {
return fmt.Errorf("go mod tidy failed: %s", stderr)
return coreerr.E("Toolkit.ModTidy", "go mod tidy failed: "+strings.TrimSpace(stderr), nil)
}
return nil
}
@ -350,7 +363,7 @@ func (t *Toolkit) Build(targets ...string) ([]BuildResult, error) {
_, stderr, _, err := t.Run("go", "build", "-o", "/dev/null", target)
r := BuildResult{Target: target}
if err != nil {
r.Error = fmt.Errorf("%s", strings.TrimSpace(stderr))
r.Error = coreerr.E("Toolkit.Build", strings.TrimSpace(stderr), nil)
}
results = append(results, r)
}
@ -361,7 +374,7 @@ func (t *Toolkit) Build(targets ...string) ([]BuildResult, error) {
func (t *Toolkit) TestCount(pkg string) (int, error) {
stdout, stderr, exitCode, err := t.Run("go", "test", "-list", ".*", pkg)
if err != nil && exitCode != 0 {
return 0, fmt.Errorf("go test -list failed: %w\n%s", err, stderr)
return 0, coreerr.E("Toolkit.TestCount", fmt.Sprintf("go test -list failed:\n%s", stderr), err)
}
count := 0
for line := range strings.SplitSeq(strings.TrimSpace(stdout), "\n") {
@ -379,7 +392,7 @@ func (t *Toolkit) Coverage(pkg string) ([]CoverageReport, error) {
}
stdout, stderr, exitCode, err := t.Run("go", "test", "-cover", pkg)
if err != nil && exitCode != 0 && !strings.Contains(stdout, "coverage:") {
return nil, fmt.Errorf("go test -cover failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.Coverage", fmt.Sprintf("go test -cover failed (exit %d):\n%s", exitCode, stderr), err)
}
var reports []CoverageReport
@ -406,7 +419,7 @@ func (t *Toolkit) RaceDetect(pkg string) ([]RaceCondition, error) {
}
_, stderr, _, err := t.Run("go", "test", "-race", pkg)
if err != nil && !strings.Contains(stderr, "WARNING: DATA RACE") {
return nil, fmt.Errorf("go test -race failed: %w", err)
return nil, coreerr.E("Toolkit.RaceDetect", "go test -race failed", err)
}
var races []RaceCondition
@ -434,7 +447,7 @@ func (t *Toolkit) RaceDetect(pkg string) ([]RaceCondition, error) {
func (t *Toolkit) GocycloComplexity(threshold int) ([]ComplexFunc, error) {
stdout, stderr, exitCode, err := t.Run("gocyclo", "-over", strconv.Itoa(threshold), ".")
if err != nil && exitCode == -1 {
return nil, fmt.Errorf("gocyclo not available: %w\n%s", err, stderr)
return nil, coreerr.E("Toolkit.GocycloComplexity", "gocyclo not available:\n"+stderr, err)
}
var funcs []ComplexFunc
@ -467,7 +480,7 @@ func (t *Toolkit) GocycloComplexity(threshold int) ([]ComplexFunc, error) {
func (t *Toolkit) DepGraph(pkg string) (*Graph, error) {
stdout, stderr, exitCode, err := t.Run("go", "mod", "graph")
if err != nil && exitCode != 0 {
return nil, fmt.Errorf("go mod graph failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.DepGraph", fmt.Sprintf("go mod graph failed (exit %d):\n%s", exitCode, stderr), err)
}
graph := &Graph{Edges: make(map[string][]string)}
@ -487,6 +500,10 @@ func (t *Toolkit) DepGraph(pkg string) (*Graph, error) {
for node := range nodes {
graph.Nodes = append(graph.Nodes, node)
}
slices.Sort(graph.Nodes)
for src := range graph.Edges {
slices.Sort(graph.Edges[src])
}
return graph, nil
}
@ -494,7 +511,7 @@ func (t *Toolkit) DepGraph(pkg string) (*Graph, error) {
func (t *Toolkit) GitLog(n int) ([]Commit, error) {
stdout, stderr, exitCode, err := t.Run("git", "log", fmt.Sprintf("-n%d", n), "--format=%H|%an|%aI|%s")
if err != nil && exitCode != 0 {
return nil, fmt.Errorf("git log failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.GitLog", fmt.Sprintf("git log failed (exit %d):\n%s", exitCode, stderr), err)
}
var commits []Commit
@ -543,7 +560,7 @@ func (t *Toolkit) CheckPerms(dir string) ([]PermIssue, error) {
return nil
})
if err != nil {
return nil, fmt.Errorf("walk failed: %w", err)
return nil, coreerr.E("Toolkit.CheckPerms", "walk failed", err)
}
return issues, nil
}

View file

@ -135,6 +135,18 @@ func TestToolkit_DepGraph_Good(t *testing.T) {
assert.Len(t, graph.Edges["modA@v1"], 2)
}
func TestToolkit_DepGraph_SortsNodesAndEdges(t *testing.T) {
output := "modB@v2 modD@v1\nmodA@v1 modC@v3\nmodA@v1 modB@v2"
setupMockCmd(t, "go", output)
tk := NewToolkit(t.TempDir())
graph, err := tk.DepGraph("./...")
require.NoError(t, err)
assert.Equal(t, []string{"modA@v1", "modB@v2", "modC@v3", "modD@v1"}, graph.Nodes)
assert.Equal(t, []string{"modB@v2", "modC@v3"}, graph.Edges["modA@v1"])
}
func TestToolkit_RaceDetect_Good(t *testing.T) {
setupMockCmd(t, "go", "ok\texample.com/safe\t0.1s")
@ -191,3 +203,21 @@ func TestToolkit_CheckPerms_Good(t *testing.T) {
require.Len(t, issues, 1)
assert.Equal(t, "World-writable", issues[0].Issue)
}
func TestToolkit_FindTrackedComments_Compatibility(t *testing.T) {
output := "pkg/file.go:12:TODO: fix this\n"
setupMockCmd(t, "git", output)
tk := NewToolkit(t.TempDir())
comments, err := tk.FindTrackedComments("pkg")
require.NoError(t, err)
require.Len(t, comments, 1)
assert.Equal(t, "pkg/file.go", comments[0].File)
assert.Equal(t, 12, comments[0].Line)
assert.Equal(t, "TODO", comments[0].Type)
assert.Equal(t, "fix this", comments[0].Message)
legacyComments, err := tk.FindTODOs("pkg")
require.NoError(t, err)
assert.Equal(t, comments, legacyComments)
}

View file

@ -2,8 +2,9 @@ package lint
import (
"encoding/json"
"fmt"
"strings"
coreerr "forge.lthn.ai/core/go-log"
)
// VulnFinding represents a single vulnerability found by govulncheck.
@ -89,7 +90,7 @@ func (t *Toolkit) VulnCheck(modulePath string) (*VulnResult, error) {
stdout, stderr, exitCode, err := t.Run("govulncheck", "-json", modulePath)
if err != nil && exitCode == -1 {
return nil, fmt.Errorf("govulncheck not installed or not available: %w", err)
return nil, coreerr.E("Toolkit.VulnCheck", "govulncheck not installed or not available", err)
}
return ParseVulnCheckJSON(stdout, stderr)

View file

@ -7,6 +7,8 @@ import (
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// AnalyseOptions configures PHP static analysis.
@ -78,7 +80,7 @@ func Analyse(ctx context.Context, opts AnalyseOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get working directory: %w", err)
return coreerr.E("php.Analyse", "get working directory", err)
}
opts.Dir = cwd
}
@ -90,7 +92,7 @@ func Analyse(ctx context.Context, opts AnalyseOptions) error {
// Check if analyser is available
analyser, found := DetectAnalyser(opts.Dir)
if !found {
return fmt.Errorf("no static analyser found (install PHPStan: composer require phpstan/phpstan --dev)")
return coreerr.E("php.Analyse", "no static analyser found (install PHPStan: composer require phpstan/phpstan --dev)", nil)
}
var cmdName string
@ -192,7 +194,7 @@ func RunPsalm(ctx context.Context, opts PsalmOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get working directory: %w", err)
return coreerr.E("php.RunPsalm", "get working directory", err)
}
opts.Dir = cwd
}

View file

@ -1,13 +1,16 @@
package php
import (
"cmp"
"context"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"slices"
coreerr "forge.lthn.ai/core/go-log"
)
// AuditOptions configures dependency security auditing.
@ -40,7 +43,7 @@ func RunAudit(ctx context.Context, opts AuditOptions) ([]AuditResult, error) {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("get working directory: %w", err)
return nil, coreerr.E("php.RunAudit", "get working directory", err)
}
opts.Dir = cwd
}
@ -101,6 +104,7 @@ func runComposerAudit(ctx context.Context, opts AuditOptions) AuditResult {
})
}
}
sortAuditAdvisories(result.Advisories)
result.Vulnerabilities = len(result.Advisories)
} else if err != nil {
result.Error = err
@ -149,6 +153,7 @@ func runNpmAudit(ctx context.Context, opts AuditOptions) AuditResult {
Severity: vuln.Severity,
})
}
sortAuditAdvisories(result.Advisories)
} else if err != nil {
result.Error = err
}
@ -156,3 +161,14 @@ func runNpmAudit(ctx context.Context, opts AuditOptions) AuditResult {
return result
}
func sortAuditAdvisories(advisories []AuditAdvisory) {
slices.SortFunc(advisories, func(a, b AuditAdvisory) int {
return cmp.Or(
cmp.Compare(a.Package, b.Package),
cmp.Compare(a.Title, b.Title),
cmp.Compare(a.Severity, b.Severity),
cmp.Compare(a.URL, b.URL),
)
})
}

View file

@ -47,6 +47,24 @@ func TestAuditAdvisory_Fields(t *testing.T) {
assert.Equal(t, []string{"CVE-2025-9999", "GHSA-xxxx"}, adv.Identifiers)
}
func TestSortAuditAdvisories_Good(t *testing.T) {
advisories := []AuditAdvisory{
{Package: "vendor/package-b", Title: "Zulu"},
{Package: "vendor/package-a", Title: "Beta"},
{Package: "vendor/package-b", Title: "Alpha"},
}
sortAuditAdvisories(advisories)
require.Len(t, advisories, 3)
assert.Equal(t, "vendor/package-a", advisories[0].Package)
assert.Equal(t, "Beta", advisories[0].Title)
assert.Equal(t, "vendor/package-b", advisories[1].Package)
assert.Equal(t, "Alpha", advisories[1].Title)
assert.Equal(t, "vendor/package-b", advisories[2].Package)
assert.Equal(t, "Zulu", advisories[2].Title)
}
func TestRunComposerAudit_ParsesJSON(t *testing.T) {
// Test the JSON parsing of composer audit output by verifying
// the struct can be populated from JSON matching composer's format.
@ -101,24 +119,20 @@ func TestRunComposerAudit_ParsesJSON(t *testing.T) {
})
}
}
sortAuditAdvisories(result.Advisories)
result.Vulnerabilities = len(result.Advisories)
assert.Equal(t, "composer", result.Tool)
assert.Equal(t, 3, result.Vulnerabilities)
assert.Len(t, result.Advisories, 3)
// Build a map of advisories by package for deterministic assertions
byPkg := make(map[string][]AuditAdvisory)
for _, a := range result.Advisories {
byPkg[a.Package] = append(byPkg[a.Package], a)
}
assert.Len(t, byPkg["vendor/package-a"], 1)
assert.Equal(t, "Remote Code Execution", byPkg["vendor/package-a"][0].Title)
assert.Equal(t, "https://example.com/advisory/1", byPkg["vendor/package-a"][0].URL)
assert.Equal(t, []string{"CVE-2025-1234"}, byPkg["vendor/package-a"][0].Identifiers)
assert.Len(t, byPkg["vendor/package-b"], 2)
assert.Equal(t, "vendor/package-a", result.Advisories[0].Package)
assert.Equal(t, "Remote Code Execution", result.Advisories[0].Title)
assert.Equal(t, "https://example.com/advisory/1", result.Advisories[0].URL)
assert.Equal(t, []string{"CVE-2025-1234"}, result.Advisories[0].Identifiers)
assert.Equal(t, "vendor/package-b", result.Advisories[1].Package)
assert.Equal(t, "Cross-Site Scripting", result.Advisories[1].Title)
assert.Equal(t, "vendor/package-b", result.Advisories[2].Package)
assert.Equal(t, "Open Redirect", result.Advisories[2].Title)
}
func TestNpmAuditJSON_ParsesCorrectly(t *testing.T) {
@ -164,19 +178,15 @@ func TestNpmAuditJSON_ParsesCorrectly(t *testing.T) {
Severity: vuln.Severity,
})
}
sortAuditAdvisories(result.Advisories)
assert.Equal(t, "npm", result.Tool)
assert.Equal(t, 2, result.Vulnerabilities)
assert.Len(t, result.Advisories, 2)
// Build map for deterministic assertions
byPkg := make(map[string]AuditAdvisory)
for _, a := range result.Advisories {
byPkg[a.Package] = a
}
assert.Equal(t, "high", byPkg["lodash"].Severity)
assert.Equal(t, "low", byPkg["minimist"].Severity)
assert.Equal(t, "lodash", result.Advisories[0].Package)
assert.Equal(t, "high", result.Advisories[0].Severity)
assert.Equal(t, "minimist", result.Advisories[1].Package)
assert.Equal(t, "low", result.Advisories[1].Severity)
}
func TestRunAudit_SkipsNpmWithoutPackageJSON(t *testing.T) {

View file

@ -3,11 +3,12 @@ package php
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// fileExists reports whether the named file or directory exists.
@ -68,7 +69,7 @@ func Format(ctx context.Context, opts FormatOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get working directory: %w", err)
return coreerr.E("php.Format", "get working directory", err)
}
opts.Dir = cwd
}
@ -80,7 +81,7 @@ func Format(ctx context.Context, opts FormatOptions) error {
// Check if formatter is available
formatter, found := DetectFormatter(opts.Dir)
if !found {
return fmt.Errorf("no formatter found (install Laravel Pint: composer require laravel/pint --dev)")
return coreerr.E("php.Format", "no formatter found (install Laravel Pint: composer require laravel/pint --dev)", nil)
}
var cmdName string

View file

@ -7,6 +7,8 @@ import (
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// InfectionOptions configures Infection mutation testing.
@ -44,7 +46,7 @@ func RunInfection(ctx context.Context, opts InfectionOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get working directory: %w", err)
return coreerr.E("php.RunInfection", "get working directory", err)
}
opts.Dir = cwd
}

View file

@ -2,11 +2,12 @@ package php
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// RectorOptions configures Rector code refactoring.
@ -40,7 +41,7 @@ func RunRector(ctx context.Context, opts RectorOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get working directory: %w", err)
return coreerr.E("php.RunRector", "get working directory", err)
}
opts.Dir = cwd
}

View file

@ -1,11 +1,20 @@
package php
import (
"cmp"
"context"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"slices"
"strings"
"time"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// SecurityOptions configures security scanning.
@ -19,30 +28,30 @@ type SecurityOptions struct {
// SecurityResult holds the results of security scanning.
type SecurityResult struct {
Checks []SecurityCheck
Summary SecuritySummary
Checks []SecurityCheck `json:"checks"`
Summary SecuritySummary `json:"summary"`
}
// SecurityCheck represents a single security check result.
type SecurityCheck struct {
ID string
Name string
Description string
Severity string
Passed bool
Message string
Fix string
CWE string
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Severity string `json:"severity"`
Passed bool `json:"passed"`
Message string `json:"message,omitempty"`
Fix string `json:"fix,omitempty"`
CWE string `json:"cwe,omitempty"`
}
// SecuritySummary summarises security check results.
type SecuritySummary struct {
Total int
Passed int
Critical int
High int
Medium int
Low int
Total int `json:"total"`
Passed int `json:"passed"`
Critical int `json:"critical"`
High int `json:"high"`
Medium int `json:"medium"`
Low int `json:"low"`
}
// capitalise returns s with the first letter upper-cased.
@ -53,12 +62,56 @@ func capitalise(s string) string {
return strings.ToUpper(s[:1]) + s[1:]
}
// securitySeverityRank maps severities to a sortable rank.
// Lower numbers are more severe.
func securitySeverityRank(severity string) (int, bool) {
switch strings.ToLower(strings.TrimSpace(severity)) {
case "critical":
return 0, true
case "high":
return 1, true
case "medium":
return 2, true
case "low":
return 3, true
case "info":
return 4, true
default:
return 0, false
}
}
// filterSecurityChecks returns checks at or above the requested severity.
func filterSecurityChecks(checks []SecurityCheck, minimum string) ([]SecurityCheck, error) {
if strings.TrimSpace(minimum) == "" {
return checks, nil
}
minRank, ok := securitySeverityRank(minimum)
if !ok {
return nil, coreerr.E("filterSecurityChecks", "invalid security severity "+minimum, nil)
}
filtered := make([]SecurityCheck, 0, len(checks))
for _, check := range checks {
rank, ok := securitySeverityRank(check.Severity)
if !ok {
continue
}
if rank <= minRank {
filtered = append(filtered, check)
}
}
return filtered, nil
}
// RunSecurityChecks runs security checks on the project.
func RunSecurityChecks(ctx context.Context, opts SecurityOptions) (*SecurityResult, error) {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("get working directory: %w", err)
return nil, coreerr.E("RunSecurityChecks", "get working directory", err)
}
opts.Dir = cwd
}
@ -90,38 +143,117 @@ func RunSecurityChecks(ctx context.Context, opts SecurityOptions) (*SecurityResu
fsChecks := runFilesystemSecurityChecks(opts.Dir)
result.Checks = append(result.Checks, fsChecks...)
// Calculate summary
// Check HTTP security headers when a URL is supplied.
result.Checks = append(result.Checks, runHTTPSecurityHeaderChecks(ctx, opts.URL)...)
filteredChecks, err := filterSecurityChecks(result.Checks, opts.Severity)
if err != nil {
return nil, err
}
result.Checks = filteredChecks
// Keep the check order stable for callers that consume the package result
// directly instead of going through the CLI layer.
slices.SortFunc(result.Checks, func(a, b SecurityCheck) int {
return cmp.Compare(a.ID, b.ID)
})
// Calculate summary after any severity filtering has been applied.
for _, check := range result.Checks {
result.Summary.Total++
if check.Passed {
result.Summary.Passed++
} else {
switch check.Severity {
case "critical":
result.Summary.Critical++
case "high":
result.Summary.High++
case "medium":
result.Summary.Medium++
case "low":
result.Summary.Low++
}
continue
}
switch check.Severity {
case "critical":
result.Summary.Critical++
case "high":
result.Summary.High++
case "medium":
result.Summary.Medium++
case "low":
result.Summary.Low++
}
}
return result, nil
}
func runHTTPSecurityHeaderChecks(ctx context.Context, rawURL string) []SecurityCheck {
if strings.TrimSpace(rawURL) == "" {
return nil
}
check := SecurityCheck{
ID: "http_security_headers",
Name: "HTTP Security Headers",
Description: "Check for common security headers on the supplied URL",
Severity: "high",
CWE: "CWE-693",
}
parsedURL, err := url.Parse(rawURL)
if err != nil || parsedURL.Scheme == "" || parsedURL.Host == "" {
check.Message = "Invalid URL"
check.Fix = "Provide a valid http:// or https:// URL"
return []SecurityCheck{check}
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rawURL, nil)
if err != nil {
check.Message = err.Error()
check.Fix = "Provide a reachable URL"
return []SecurityCheck{check}
}
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
check.Message = err.Error()
check.Fix = "Ensure the URL is reachable"
return []SecurityCheck{check}
}
defer resp.Body.Close()
_, _ = io.Copy(io.Discard, resp.Body)
requiredHeaders := []string{
"Content-Security-Policy",
"X-Frame-Options",
"X-Content-Type-Options",
"Referrer-Policy",
}
if strings.EqualFold(parsedURL.Scheme, "https") {
requiredHeaders = append(requiredHeaders, "Strict-Transport-Security")
}
var missing []string
for _, header := range requiredHeaders {
if strings.TrimSpace(resp.Header.Get(header)) == "" {
missing = append(missing, header)
}
}
if len(missing) == 0 {
check.Passed = true
check.Message = "Common security headers are present"
return []SecurityCheck{check}
}
check.Message = fmt.Sprintf("Missing headers: %s", strings.Join(missing, ", "))
check.Fix = "Add the missing security headers to the response"
return []SecurityCheck{check}
}
func runEnvSecurityChecks(dir string) []SecurityCheck {
var checks []SecurityCheck
envPath := filepath.Join(dir, ".env")
envBytes, err := os.ReadFile(envPath)
envContent, err := coreio.Local.Read(envPath)
if err != nil {
return checks
}
envContent := string(envBytes)
envLines := strings.Split(envContent, "\n")
envMap := make(map[string]string)
for _, line := range envLines {

View file

@ -2,8 +2,11 @@ package php
import (
"context"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@ -192,7 +195,7 @@ func TestRunSecurityChecks_Summary(t *testing.T) {
// Summary should have totals
assert.Greater(t, result.Summary.Total, 0)
assert.Greater(t, result.Summary.Critical, 0) // at least debug_mode fails
assert.Greater(t, result.Summary.High, 0) // at least https_enforced fails
assert.Greater(t, result.Summary.High, 0) // at least https_enforced fails
}
func TestRunSecurityChecks_DefaultsDir(t *testing.T) {
@ -202,9 +205,100 @@ func TestRunSecurityChecks_DefaultsDir(t *testing.T) {
assert.NotNil(t, result)
}
func TestRunSecurityChecks_SeverityFilterCritical(t *testing.T) {
dir := t.TempDir()
setupSecurityFixture(t, dir, "APP_DEBUG=true\nAPP_KEY=short\nAPP_URL=http://example.com\n")
result, err := RunSecurityChecks(context.Background(), SecurityOptions{
Dir: dir,
Severity: "critical",
})
require.NoError(t, err)
require.Len(t, result.Checks, 3)
assert.Equal(t, 3, result.Summary.Total)
assert.Equal(t, 1, result.Summary.Passed)
assert.Equal(t, 2, result.Summary.Critical)
assert.Zero(t, result.Summary.High)
for _, check := range result.Checks {
assert.Equal(t, "critical", check.Severity)
}
byID := make(map[string]SecurityCheck)
for _, check := range result.Checks {
byID[check.ID] = check
}
assert.NotContains(t, byID, "https_enforced")
assert.Contains(t, byID, "app_key_set")
assert.Contains(t, byID, "composer_audit")
assert.Contains(t, byID, "debug_mode")
}
func TestRunSecurityChecks_URLAddsHeaderCheck(t *testing.T) {
dir := t.TempDir()
setupSecurityFixture(t, dir, "APP_DEBUG=false\nAPP_KEY=base64:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa=\nAPP_URL=https://example.com\n")
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok"))
}))
defer server.Close()
result, err := RunSecurityChecks(context.Background(), SecurityOptions{
Dir: dir,
URL: server.URL,
})
require.NoError(t, err)
byID := make(map[string]SecurityCheck)
for _, check := range result.Checks {
byID[check.ID] = check
}
headerCheck, ok := byID["http_security_headers"]
require.True(t, ok)
assert.False(t, headerCheck.Passed)
assert.Equal(t, "high", headerCheck.Severity)
assert.True(t, strings.Contains(headerCheck.Message, "Missing headers"))
assert.NotEmpty(t, headerCheck.Fix)
assert.Equal(t, 5, result.Summary.Total)
assert.Equal(t, 4, result.Summary.Passed)
assert.Equal(t, 1, result.Summary.High)
}
func TestRunSecurityChecks_InvalidSeverity(t *testing.T) {
dir := t.TempDir()
_, err := RunSecurityChecks(context.Background(), SecurityOptions{
Dir: dir,
Severity: "banana",
})
require.Error(t, err)
assert.Contains(t, err.Error(), "invalid security severity")
}
func TestCapitalise(t *testing.T) {
assert.Equal(t, "Composer", capitalise("composer"))
assert.Equal(t, "Npm", capitalise("npm"))
assert.Equal(t, "", capitalise(""))
assert.Equal(t, "A", capitalise("a"))
}
func setupSecurityFixture(t *testing.T, dir string, envContent string) {
t.Helper()
require.NoError(t, os.WriteFile(filepath.Join(dir, ".env"), []byte(envContent), 0o644))
composerBin := filepath.Join(dir, "composer")
require.NoError(t, os.WriteFile(composerBin, []byte("#!/bin/sh\ncat <<'JSON'\n{\"advisories\":{}}\nJSON\n"), 0o755))
oldPath := os.Getenv("PATH")
require.NoError(t, os.Setenv("PATH", dir+string(os.PathListSeparator)+oldPath))
t.Cleanup(func() {
require.NoError(t, os.Setenv("PATH", oldPath))
})
}

View file

@ -1,12 +1,14 @@
package php
import (
"bytes"
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// TestOptions configures PHP test execution.
@ -32,6 +34,9 @@ type TestOptions struct {
// JUnit outputs results in JUnit XML format via --log-junit.
JUnit bool
// JUnitPath overrides the JUnit report path. Defaults to test-results.xml.
JUnitPath string
// Output is the writer for test output (defaults to os.Stdout).
Output io.Writer
}
@ -63,7 +68,7 @@ func RunTests(ctx context.Context, opts TestOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get working directory: %w", err)
return coreerr.E("php.RunTests", "get working directory", err)
}
opts.Dir = cwd
}
@ -72,6 +77,18 @@ func RunTests(ctx context.Context, opts TestOptions) error {
opts.Output = os.Stdout
}
if opts.JUnit && opts.JUnitPath == "" {
reportFile, err := os.CreateTemp("", "core-qa-junit-*.xml")
if err != nil {
return coreerr.E("php.RunTests", "create JUnit report file", err)
}
if closeErr := reportFile.Close(); closeErr != nil {
return coreerr.E("php.RunTests", "close JUnit report file", closeErr)
}
opts.JUnitPath = reportFile.Name()
defer os.Remove(opts.JUnitPath)
}
// Detect test runner
runner := DetectTestRunner(opts.Dir)
@ -88,14 +105,27 @@ func RunTests(ctx context.Context, opts TestOptions) error {
cmd := exec.CommandContext(ctx, cmdName, args...)
cmd.Dir = opts.Dir
cmd.Stdout = opts.Output
cmd.Stderr = opts.Output
cmd.Stdin = os.Stdin
// Set XDEBUG_MODE=coverage to avoid PHPUnit 11 warning
cmd.Env = append(os.Environ(), "XDEBUG_MODE=coverage")
return cmd.Run()
if !opts.JUnit {
cmd.Stdout = opts.Output
cmd.Stderr = opts.Output
return cmd.Run()
}
var machineOutput bytes.Buffer
cmd.Stdout = &machineOutput
cmd.Stderr = &machineOutput
runErr := cmd.Run()
reportErr := emitJUnitReport(opts.Output, opts.JUnitPath)
if runErr != nil {
return runErr
}
return reportErr
}
// RunParallel runs tests in parallel using the appropriate runner.
@ -139,7 +169,7 @@ func buildPestCommand(opts TestOptions) (string, []string) {
}
if opts.JUnit {
args = append(args, "--log-junit", "test-results.xml")
args = append(args, "--log-junit", junitReportPath(opts))
}
return cmdName, args
@ -184,8 +214,34 @@ func buildPHPUnitCommand(opts TestOptions) (string, []string) {
}
if opts.JUnit {
args = append(args, "--log-junit", "test-results.xml", "--testdox")
args = append(args, "--log-junit", junitReportPath(opts))
}
return cmdName, args
}
func junitReportPath(opts TestOptions) string {
if opts.JUnitPath != "" {
return opts.JUnitPath
}
return "test-results.xml"
}
func emitJUnitReport(output io.Writer, reportPath string) error {
report, err := os.ReadFile(reportPath)
if err != nil {
return coreerr.E("php.emitJUnitReport", "read JUnit report", err)
}
if _, err := output.Write(report); err != nil {
return coreerr.E("php.emitJUnitReport", "write JUnit report", err)
}
if len(report) == 0 || report[len(report)-1] != '\n' {
if _, err := io.WriteString(output, "\n"); err != nil {
return coreerr.E("php.emitJUnitReport", "terminate JUnit report", err)
}
}
return nil
}

View file

@ -288,7 +288,7 @@ func TestBuildPHPUnitCommand_Good_JUnit(t *testing.T) {
assert.Contains(t, args, "--log-junit")
assert.Contains(t, args, "test-results.xml")
assert.Contains(t, args, "--testdox")
assert.NotContains(t, args, "--testdox")
}
func TestBuildPHPUnitCommand_Good_AllFlags(t *testing.T) {
@ -313,5 +313,5 @@ func TestBuildPHPUnitCommand_Good_AllFlags(t *testing.T) {
assert.Contains(t, args, "--group")
assert.Contains(t, args, "feature")
assert.Contains(t, args, "--log-junit")
assert.Contains(t, args, "--testdox")
assert.NotContains(t, args, "--testdox")
}

53
tests/cli/_lib/run.sh Executable file
View file

@ -0,0 +1,53 @@
#!/usr/bin/env bash
run_capture_stdout() {
local expected_status="$1"
local output_file="$2"
shift 2
set +e
"$@" >"$output_file"
local status=$?
set -e
if [[ "$status" -ne "$expected_status" ]]; then
printf 'expected exit %s, got %s\n' "$expected_status" "$status" >&2
if [[ -s "$output_file" ]]; then
printf 'stdout:\n' >&2
cat "$output_file" >&2
fi
return 1
fi
}
run_capture_all() {
local expected_status="$1"
local output_file="$2"
shift 2
set +e
"$@" >"$output_file" 2>&1
local status=$?
set -e
if [[ "$status" -ne "$expected_status" ]]; then
printf 'expected exit %s, got %s\n' "$expected_status" "$status" >&2
if [[ -s "$output_file" ]]; then
printf 'output:\n' >&2
cat "$output_file" >&2
fi
return 1
fi
}
assert_jq() {
local expression="$1"
local input_file="$2"
jq -e "$expression" "$input_file" >/dev/null
}
assert_contains() {
local needle="$1"
local input_file="$2"
grep -Fq "$needle" "$input_file"
}

View file

@ -0,0 +1,13 @@
version: "3"
tasks:
test:
cmds:
- task -d check test
- task -d catalog/list test
- task -d catalog/show test
- task -d detect test
- task -d tools test
- task -d init test
- task -d run test

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../../cmd/core-lint
lang="$(cat fixtures/lang.txt)"
output="$(mktemp)"
run_capture_all 0 "$output" ./bin/core-lint lint catalog list --lang "$lang"
grep -Fq "go-sec-001" "$output"
grep -Fq "rule(s)" "$output"
EOF

View file

@ -0,0 +1 @@
go

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../../cmd/core-lint
rule_id="$(cat fixtures/rule-id.txt)"
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint lint catalog show "$rule_id"
jq -e '.id == "go-sec-001" and .severity == "high" and (.languages | index("go") != null)' "$output" >/dev/null
jq -e '.title == "SQL wildcard injection in LIKE clauses"' "$output" >/dev/null
EOF

View file

@ -0,0 +1 @@
go-sec-001

View file

@ -0,0 +1,17 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint lint check --format=json fixtures
jq -e 'length == 1 and .[0].rule_id == "go-cor-003" and .[0].file == "input.go"' "$output" >/dev/null
jq -e '.[0].severity == "medium" and .[0].fix != ""' "$output" >/dev/null
EOF

View file

@ -0,0 +1,12 @@
//go:build ignore
package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("data")
}

View file

@ -0,0 +1,17 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint detect --output json ../check/fixtures
jq -e '. == ["go"]' "$output" >/dev/null
EOF

View file

@ -0,0 +1,19 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
project_dir="$(mktemp -d)"
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint init "$project_dir"
test -f "$project_dir/.core/lint.yaml"
grep -Fq "golangci-lint" "$project_dir/.core/lint.yaml"
EOF

View file

@ -0,0 +1,19 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
output="$(mktemp)"
run_capture_stdout 1 "$output" ./bin/core-lint run --output json --fail-on warning ../check/fixtures
jq -e '.findings | length == 1' "$output" >/dev/null
jq -e '.findings[0].code == "go-cor-003"' "$output" >/dev/null
jq -e '.summary.warnings == 1 and .summary.passed == false' "$output" >/dev/null
EOF

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint tools --output json --lang go
jq -e '.[] | select(.name == "golangci-lint")' "$output" >/dev/null
jq -e '.[] | select(.name == "govulncheck")' "$output" >/dev/null
EOF

View file

@ -0,0 +1,11 @@
package main
import (
"forge.lthn.ai/core/cli/pkg/cli"
_ "forge.lthn.ai/core/lint/cmd/qa"
)
func main() {
cli.WithAppName("core")
cli.Main()
}

View file

@ -0,0 +1,20 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
cd fixtures/project
output="$(mktemp)"
export PATH="$(pwd)/../bin:$PATH"
run_capture_stdout 1 "$output" ../../bin/core qa audit --json
jq -e '.results[0].tool == "composer" and .results[0].vulnerabilities == 1' "$output" >/dev/null
jq -e '.has_vulnerabilities == true and .vulnerabilities == 1' "$output" >/dev/null
jq -e '.results[0].advisories[0].package == "vendor/package-a"' "$output" >/dev/null
EOF

View file

@ -0,0 +1,17 @@
#!/usr/bin/env sh
cat <<'JSON'
{
"advisories": {
"vendor/package-a": [
{
"title": "Remote Code Execution",
"link": "https://example.com/advisory/1",
"cve": "CVE-2026-0001",
"affectedVersions": ">=1.0,<1.5"
}
]
}
}
JSON
exit 1

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,5 @@
<?php
function bad_example() {
return "bad";
}

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
output="$(mktemp)"
run_capture_stdout 1 "$output" ./bin/core qa docblock --json --threshold 100 fixtures/src
jq -e '(.passed == false) and (.coverage < .threshold)' "$output" >/dev/null
jq -e '(.missing | length == 1) and (.missing[0].name == "Beta")' "$output" >/dev/null
jq -e '(.warnings | length == 1) and (.warnings[0].path == "fixtures/src")' "$output" >/dev/null
EOF

View file

@ -0,0 +1,6 @@
//go:build ignore
package sample
// Alpha demonstrates a documented exported function.
func Alpha() {}

View file

@ -0,0 +1,5 @@
//go:build ignore
package sample
func Beta() {}

View file

@ -0,0 +1,5 @@
//go:build ignore
package sample
func Broken(

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
cd fixtures/project
output="$(mktemp)"
export PATH="../bin:$PATH"
run_capture_stdout 0 "$output" ../../bin/core qa fmt --json
jq -e '.tool == "pint" and .changed == true and .files[0].path == "src/Bad.php"' "$output" >/dev/null
EOF

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,5 @@
<?php
function bad_example() {
return "bad";
}

View file

@ -0,0 +1,3 @@
#!/usr/bin/env sh
printf '%s\n' '{"tool":"pint","changed":true,"files":[{"path":"src/Bad.php","fixed":1}]}'

View file

@ -0,0 +1,20 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
output="$(mktemp)"
export PATH="$(pwd)/fixtures/bin:$PATH"
run_capture_stdout 0 "$output" ./bin/core qa health --registry fixtures/repos.yaml --json
jq -e '.summary.total_repos == 2 and .summary.filtered_repos == 2' "$output" >/dev/null
jq -e '.summary.passing == 1 and .summary.errors == 1' "$output" >/dev/null
jq -e '.repos[0].status == "error" and .repos[0].name == "beta"' "$output" >/dev/null
jq -e '.repos[1].status == "passing" and .repos[1].name == "alpha"' "$output" >/dev/null
EOF

View file

@ -0,0 +1,26 @@
#!/usr/bin/env sh
case "$*" in
*"--repo forge/alpha"*)
cat <<'JSON'
[
{
"status": "completed",
"conclusion": "success",
"name": "CI",
"headSha": "abc123",
"updatedAt": "2026-03-30T00:00:00Z",
"url": "https://example.com/alpha/run/1"
}
]
JSON
;;
*"--repo forge/beta"*)
printf '%s\n' 'simulated workflow lookup failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac

View file

@ -0,0 +1,8 @@
version: 1
org: forge
base_path: .
repos:
alpha:
type: module
beta:
type: module

View file

@ -0,0 +1,22 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
cd fixtures/project
output="$(mktemp)"
run_capture_all 1 "$output" ../../bin/core qa infection --min-msi 80 --min-covered-msi 90 --threads 8 --filter src --only-covered
grep -Fq "Mutation Testing" "$output"
grep -Fq -- "--min-msi=80" "$output"
grep -Fq -- "--min-covered-msi=90" "$output"
grep -Fq -- "--threads=8" "$output"
grep -Fq -- "--filter=src" "$output"
grep -Fq -- "--only-covered" "$output"
EOF

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,5 @@
<?php
function bad_example() {
return "bad";
}

View file

@ -0,0 +1,4 @@
#!/usr/bin/env sh
printf '%s\n' "infection args: $*"
exit 1

View file

@ -0,0 +1,20 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
output="$(mktemp)"
export PATH="$(pwd)/fixtures/bin:$PATH"
run_capture_stdout 0 "$output" ./bin/core qa issues --registry fixtures/repos.yaml --json
jq -e '.total_issues == 1 and .filtered_issues == 1' "$output" >/dev/null
jq -e '.categories[0].category == "needs_response" and .categories[0].issues[0].repo_name == "alpha"' "$output" >/dev/null
jq -e '.categories[0].issues[0].action_hint != ""' "$output" >/dev/null
jq -e '.fetch_errors[0].repo == "beta"' "$output" >/dev/null
EOF

View file

@ -0,0 +1,42 @@
#!/usr/bin/env sh
case "$*" in
*"api user"*)
printf '%s\n' 'alice'
;;
*"issue list --repo forge/alpha"*)
cat <<'JSON'
[
{
"number": 7,
"title": "Clarify agent output",
"state": "OPEN",
"body": "Explain behaviour",
"createdAt": "2026-03-30T00:00:00Z",
"updatedAt": "2026-03-30T11:00:00Z",
"author": {"login": "bob"},
"assignees": {"nodes": [{"login": "alice"}]},
"labels": {"nodes": [{"name": "agent:ready"}]},
"comments": {
"totalCount": 1,
"nodes": [
{
"author": {"login": "carol"},
"createdAt": "2026-03-30T10:30:00Z"
}
]
},
"url": "https://example.com/issues/7"
}
]
JSON
;;
*"issue list --repo forge/beta"*)
printf '%s\n' 'simulated issue query failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac

View file

@ -0,0 +1,8 @@
version: 1
org: forge
base_path: .
repos:
alpha:
type: module
beta:
type: module

View file

@ -0,0 +1,17 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
cd fixtures/project
output="$(mktemp)"
run_capture_stdout 1 "$output" ../../bin/core qa psalm --json
jq -e '.tool == "psalm" and .issues[0].file == "src/Bad.php" and .issues[0].line == 3' "$output" >/dev/null
EOF

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,5 @@
<psalm>
<projectFiles>
<directory name="src" />
</projectFiles>
</psalm>

View file

@ -0,0 +1,5 @@
<?php
function bad_example() {
return $anotherMissingVariable;
}

View file

@ -0,0 +1,4 @@
#!/usr/bin/env sh
printf '%s\n' '{"tool":"psalm","issues":[{"file":"src/Bad.php","line":3,"message":"Undefined variable $anotherMissingVariable"}]}'
exit 1

View file

@ -0,0 +1,19 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
cd fixtures/project
output="$(mktemp)"
run_capture_all 1 "$output" ../../bin/core qa rector
grep -Fq "Rector Refactoring" "$output"
grep -Fq "(dry-run)" "$output"
grep -Fq "1 refactoring suggestion in src/Bad.php" "$output"
EOF

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,3 @@
<?php
return [];

View file

@ -0,0 +1,5 @@
<?php
function bad_example() {
return "bad";
}

View file

@ -0,0 +1,4 @@
#!/usr/bin/env sh
printf '%s\n' '1 refactoring suggestion in src/Bad.php'
exit 1

View file

@ -0,0 +1,20 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
output="$(mktemp)"
export PATH="$(pwd)/fixtures/bin:$PATH"
run_capture_stdout 0 "$output" ./bin/core qa review --repo forge/example --json
jq -e '.showing_mine == true and .showing_requested == true' "$output" >/dev/null
jq -e '.mine | length == 0 and .requested | length == 1' "$output" >/dev/null
jq -e '.requested[0].number == 42 and .requested[0].title == "Refine agent output"' "$output" >/dev/null
jq -e '.fetch_errors[0].repo == "forge/example" and .fetch_errors[0].scope == "mine"' "$output" >/dev/null
EOF

Some files were not shown because too many files have changed in this diff Show more