Compare commits

..

86 commits
v0.2.0 ... dev

Author SHA1 Message Date
Snider
525d8b993b fix: migrate module paths from forge.lthn.ai to dappco.re
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 16:21:14 +01:00
Virgil
7b0f800e08 fix(lint): skip hidden configured file paths
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 13:06:47 +00:00
Virgil
6ee67362ca refactor(lint): expand adapter helper names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:59:22 +00:00
Virgil
d1264dd88a fix(lint): exclude infra tools from language shortcuts
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:52:49 +00:00
Virgil
85dc5f75d0 refactor(lint): align naming with AX principles
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:48:12 +00:00
Virgil
10f89a83f2 fix(lint): add threshold summary to run failures
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:39:22 +00:00
Virgil
e3ae8caae2 refactor(lint): expand CLI flag names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:29:58 +00:00
Virgil
a567b72b18 docs(lint): add AX usage examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:24:29 +00:00
Virgil
602ea8bec0 docs(lint): add AX usage examples to orchestration types
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:19:35 +00:00
Virgil
63c4c51f21 fix(lint): skip hidden dirs in scanner
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:14:51 +00:00
Virgil
86ec27ca03 refactor(lint): expand service names for AX clarity
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 12:10:57 +00:00
Virgil
c7d6db8ee2 feat(lint): add sarif output for catalog checks
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:59:46 +00:00
Virgil
19f098cf43 fix(lint): preserve explicit empty file scopes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:53:19 +00:00
Virgil
b24021b8f8 refactor(lint): short-circuit explicit output resolution
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:47:18 +00:00
Virgil
5da4a1dbd1 fix(lint): preserve explicit empty file scopes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:43:25 +00:00
Virgil
71529076b3 refactor(lint): centralise run output resolution
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:37:01 +00:00
Virgil
a26a4e1301 refactor(lint): clarify orchestration names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:30:49 +00:00
Virgil
48acea0ef4 refactor(lint): add semantic tracked comment API
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:18:16 +00:00
Virgil
1e1ed30d04 fix(lint): honour lang precedence over ci and sbom groups
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:14:33 +00:00
Virgil
7e32c0c21c fix(lint): normalise report output levels
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 11:07:50 +00:00
Virgil
e7b41af939 feat(lint): detect cpp source files
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 10:58:09 +00:00
Virgil
ebc2c04c3d fix(lint): normalise empty orchestration outputs
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 10:53:06 +00:00
Virgil
23c5d20b1b feat(lint): add named schedule presets
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 10:46:41 +00:00
Virgil
3db0553082 feat(lint): honour configured exclude paths
Co-authored-by: Virgil <virgil@lethean.io>
2026-04-01 10:38:57 +00:00
Virgil
54a82bfe1a feat(lint): honour configured scan paths
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-01 10:34:40 +00:00
Virgil
7c92d313a6 Fix hook mode with no staged files 2026-04-01 09:54:32 +00:00
Virgil
be7f9fe966 Improve lint tool inventory output 2026-04-01 07:19:43 +00:00
Virgil
c82c57748c Add core-lint files flag 2026-04-01 07:11:28 +00:00
Virgil
0f5648aba9 Improve lint API usage examples 2026-04-01 07:04:18 +00:00
Virgil
e9085f6489 Refine core-lint command naming 2026-04-01 06:57:15 +00:00
Virgil
4500d5eb80 Fix catalog scanner language detection 2026-04-01 06:49:10 +00:00
Virgil
7a86afbc65 Improve lint command AX ergonomics 2026-04-01 06:43:30 +00:00
Virgil
877a757d8c Preserve hook content on removal 2026-04-01 06:36:55 +00:00
Virgil
b8ee543bae Populate adapter versions in lint reports 2026-04-01 06:28:04 +00:00
Virgil
20875bc066 Deduplicate merged lint findings 2026-04-01 05:41:25 +00:00
Virgil
8798210a4d Add prettier lint adapter 2026-04-01 05:13:43 +00:00
Virgil
382fe209de chore: verify lint RFC implementation 2026-03-30 15:18:30 +00:00
Virgil
0b41f3caeb docs: clarify lint RFC stream semantics 2026-03-30 15:10:39 +00:00
Virgil
4414aea2b0 docs: clarify lint RFC execution semantics 2026-03-30 15:05:08 +00:00
Virgil
6d226ce8e2 docs: clarify lint tool inventory contract 2026-03-30 14:59:41 +00:00
Virgil
e772a1f0f6 docs: clarify lint compliance shortcut semantics 2026-03-30 14:53:40 +00:00
Virgil
d5be05c578 docs: align lint RFC with AX standard 2026-03-30 14:32:40 +00:00
Virgil
8622e582ab docs: clarify lint RFC contract 2026-03-30 14:27:10 +00:00
Virgil
6ed4ab5ac5 docs: tighten lint RFC contract details 2026-03-30 14:20:32 +00:00
Virgil
0d9fbd7906 docs: rewrite lint RFC to match implementation 2026-03-30 14:14:58 +00:00
Virgil
7ab634bcd2 feat(ax): implement RFC lint orchestration CLI 2026-03-30 13:50:39 +00:00
Snider
119df680d7 docs: add lint RFC and AX RFC to repo docs for agent access
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-30 14:24:33 +01:00
Virgil
eec45af6cc fix(ax): harden structured output paths 2026-03-30 12:28:42 +00:00
Virgil
1660fe025f test(ax): sync health and security coverage 2026-03-30 12:20:10 +00:00
Virgil
7b2bb529e1 fix(ax): honour php security flags 2026-03-30 12:11:28 +00:00
Virgil
364b4b96de fix(ax): normalise audit and health machine output 2026-03-30 11:59:38 +00:00
Virgil
140d2b0583 test(cli): add artifact validation harnesses 2026-03-30 11:45:35 +00:00
Virgil
4a6f59b6fc fix(ax): fail total review and issue outages 2026-03-30 11:22:50 +00:00
Virgil
e05d7cf070 fix(ax): stabilise watch output
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-30 10:52:49 +00:00
Virgil
d5bc922325 fix(ax): stabilise map-derived ordering 2026-03-30 10:46:52 +00:00
Virgil
e1616a055d fix(ax): stabilise remaining qa output
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-30 10:41:49 +00:00
Virgil
30691b883c fix(ax): stabilise issue triage ordering
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-30 10:38:01 +00:00
Virgil
72b4fc4871 fix(ax): keep health summaries accurate under filters 2026-03-30 10:28:16 +00:00
Virgil
1f34ead44f fix(ax): make health and issues machine-friendly 2026-03-30 10:24:38 +00:00
Virgil
95c32c21ca fix(ax): preserve partial review results 2026-03-30 10:14:03 +00:00
Virgil
29a2722eda fix(ax): preserve docblock partial results 2026-03-30 10:05:38 +00:00
Virgil
8c8c6a9d2e fix(ax): clean php structured output modes 2026-03-30 08:14:46 +00:00
Virgil
6d202bb1d9 fix(qa): remove duplicated audit JSON helpers 2026-03-30 07:57:32 +00:00
Virgil
d9d7ae7ffa fix(ax): complete machine-friendly QA output updates 2026-03-30 07:55:44 +00:00
Virgil
3af8556d64 feat(qa): include fetch errors in issues/review JSON and continue on partial failures 2026-03-30 07:43:38 +00:00
Virgil
dfed5e3ab1 feat(qa): make php commands JSON-clean and deterministic 2026-03-30 07:43:29 +00:00
Virgil
a3648041ce feat(qa,lint): add deterministic and JSON-friendly command output 2026-03-30 07:43:29 +00:00
Virgil
cf9e43f0ad feat: improve qa output determinism and JSON modes 2026-03-30 07:43:29 +00:00
Virgil
aa57d1e09f feat(qa): align php command output with AX machine-friendly mode 2026-03-30 07:35:19 +00:00
Virgil
182f108d37 feat: align qa and lint outputs with agent experience 2026-03-30 07:21:21 +00:00
Snider
8ab944d0e7 chore: sync dependencies for v0.3.5
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 17:54:30 +00:00
Snider
876c65bd70 chore: sync dependencies for v0.3.4
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 17:49:52 +00:00
Snider
471266200e refactor: pass locales via RegisterCommands
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 01:38:32 +00:00
Snider
e7d469cc8d feat: embed and load locale translations on init
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 00:45:18 +00:00
Snider
94df217e84 feat: add en-GB locale file for QA commands
69 translation keys for qa, format, test, coverage, watch commands.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-17 00:33:04 +00:00
Snider
a001224b68 chore: sync dependencies for v0.3.3
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-16 22:18:05 +00:00
Snider
9681b062ac refactor: replace all remaining fmt.Errorf with coreerr.E from go-log
Replaces ~46 fmt.Errorf and errors.New calls in production code across
pkg/lint, pkg/php, cmd/core-lint, and cmd/qa with structured coreerr.E()
calls using the forge.lthn.ai/core/go-log convention.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-16 21:48:58 +00:00
Snider
a36f835fe0 refactor: replace os.* and fmt.Errorf with go-io/go-log conventions
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-16 19:44:45 +00:00
Snider
b0bb2de141 chore: sync go.mod dependencies
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-15 15:38:09 +00:00
Snider
44619055ab chore: add .core/ and .idea/ to .gitignore 2026-03-15 10:17:50 +00:00
Snider
21f19249db fix: update stale import paths and dependency versions from extraction
Resolve stale forge.lthn.ai/core/cli v0.1.0 references (tag never existed,
earliest is v0.0.1) and regenerate go.sum via workspace-aware go mod tidy.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-14 13:38:59 +00:00
Snider
75b306c308 docs: add CLAUDE.md project instructions
Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-13 13:38:02 +00:00
Snider
e876b62045 docs: add human-friendly documentation
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 13:02:40 +00:00
Snider
f1aae0055f feat(lint): add PHP QA commands to core qa
Add 8 PHP subcommands to the qa parent command: fmt, stan, psalm,
audit, security, rector, infection, and test. Each command detects
the PHP project and delegates to the pkg/php library functions.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-09 13:17:30 +00:00
Snider
af5c792da8 feat(lint): add pkg/detect + pkg/php — project detection and PHP QA toolchain
Add project type detection (pkg/detect) and complete PHP quality assurance
package (pkg/php) with formatter, analyser, audit, security, refactor,
mutation testing, test runner, pipeline stages, and QA runner that builds
process.RunSpec for orchestrated execution.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-09 13:13:30 +00:00
Snider
bf06489806 feat: add cmd/qa from go-devops
QA commands (watch, review, health, issues, docblock) now live
alongside the lint library they depend on.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-03-09 12:28:24 +00:00
134 changed files with 14795 additions and 299 deletions

2
.gitignore vendored
View file

@ -1 +1,3 @@
bin/
.core/
.idea/

View file

@ -1,30 +1,88 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
`core/lint` is a standalone pattern catalog and regex-based code checker. It loads YAML rule definitions and matches them against source files. Zero framework dependencies.
`core/lint` is a standalone pattern catalog, regex-based code checker, and multi-language QA toolkit. It loads YAML rule definitions and matches them against source files, plus wraps external Go and PHP tooling into structured APIs. Zero framework dependencies — uses `forge.lthn.ai/core/cli` for CLI scaffolding only.
## Build & Development
```bash
core go test
core go qa
core build # produces ./bin/core-lint
core go test # run all tests
core go test ./pkg/lint/... # run tests for a specific package
core go qa # full QA pipeline (vet, lint, test)
core build # produces ./bin/core-lint
```
Run a single test:
```bash
go test -run TestMatcherExcludePattern ./pkg/lint/
```
## Architecture
- `catalog/` — YAML rule files (embedded at compile time)
- `pkg/lint/` — Library: Rule, Catalog, Matcher, Scanner, Report types
- `cmd/core-lint/` — CLI binary using `cli.Main()`
Three distinct subsystems share this module:
### 1. Pattern Catalog & Scanner (`pkg/lint/`)
The core lint engine. YAML rules in `catalog/` are embedded at compile time via `//go:embed` in `lint.go` and loaded through `LoadEmbeddedCatalog()`.
**Data flow:** YAML → `ParseRules``Catalog` → filter by language/severity → `NewMatcher` (compiles regexes) → `Scanner.ScanDir`/`ScanFile``[]Finding` → output as text/JSON/JSONL via `report.go`.
Key types:
- `Rule` — parsed from YAML, validated with `Validate()`. Only `detection: "regex"` rules are matched; other detection types are stored but skipped by `Matcher`.
- `Matcher` — holds pre-compiled `regexp.Regexp` for each rule's `pattern` and optional `exclude_pattern`. Matches line-by-line.
- `Scanner` — walks directory trees, auto-detects language from file extension (`extensionMap`), skips `vendor/node_modules/.git/testdata/.core`.
- `Finding` — a match result with rule ID, file, line, severity, and fix suggestion.
### 2. Go Dev Toolkit (`pkg/lint/tools.go`, `complexity.go`, `coverage.go`, `vulncheck.go`)
Structured Go APIs wrapping external tools (`go vet`, `govulncheck`, `gocyclo`, `gitleaks`, `git`). The `Toolkit` type executes subprocesses and parses their output into typed structs (`ToolFinding`, `Vulnerability`, `CoverageReport`, `RaceCondition`, etc.).
`complexity.go` provides native AST-based cyclomatic complexity analysis (no external tools needed) via `AnalyseComplexity`.
`coverage.go` provides `CoverageStore` for persisting and comparing coverage snapshots over time, detecting regressions.
`vulncheck.go` parses `govulncheck -json` NDJSON output into `VulnFinding` structs.
### 3. PHP QA Toolchain (`pkg/php/`, `pkg/detect/`, `cmd/qa/`)
Wraps PHP ecosystem tools (Pint, PHPStan/Larastan, Psalm, Rector, Infection, PHPUnit/Pest, composer audit). `pkg/detect/` identifies project type by filesystem markers (go.mod → Go, composer.json → PHP).
`pkg/php/pipeline.go` defines a staged QA pipeline: quick (audit, fmt, stan) → standard (+psalm, test) → full (+rector, infection).
`pkg/php/runner.go` builds `process.RunSpec` entries with dependency ordering (`After` field) for the `core/go-process` runner.
### CLI Entry Points
- `cmd/core-lint/main.go``core-lint lint check` and `core-lint lint catalog` commands
- `cmd/qa/``core qa` subcommands registered via `init()``cli.RegisterCommands`. Go-focused (watch, review, health, issues, docblock) and PHP-focused (fmt, stan, psalm, audit, security, rector, infection, test).
## Rule Schema
Each YAML file contains an array of rules with: id, title, severity, languages, tags, pattern (regex), exclude_pattern, fix, example_bad, example_good, detection type.
Each YAML file in `catalog/` contains an array of rules:
```yaml
- id: go-sec-001 # unique identifier
title: "..." # human-readable title
severity: high # info | low | medium | high | critical
languages: [go] # file extensions mapped via extensionMap
tags: [security] # free-form tags
pattern: 'regex' # Go regexp syntax
exclude_pattern: 'regex' # optional, skips matching lines/files
fix: "..." # suggested fix text
detection: regex # only "regex" is actively matched
auto_fixable: false
example_bad: '...'
example_good: '...'
```
Rules are validated on load — `Validate()` checks required fields and compiles regex patterns.
## Coding Standards
- UK English
- UK English (e.g. `Analyse`, `Summarise`, `Colour`)
- All functions have typed params/returns
- Tests use testify
- License: EUPL-1.2
- Tests use `testify` (assert/require)
- Licence: EUPL-1.2

View file

@ -1,13 +1,17 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"strings"
"forge.lthn.ai/core/cli/pkg/cli"
lint "forge.lthn.ai/core/lint"
coreerr "forge.lthn.ai/core/go-log"
cataloglint "forge.lthn.ai/core/lint"
lintpkg "forge.lthn.ai/core/lint/pkg/lint"
)
@ -17,37 +21,258 @@ func main() {
}
func addLintCommands(root *cli.Command) {
lintCmd := cli.NewGroup("lint", "Pattern-based code linter", "")
addRFCCommands(root)
// ── check ──────────────────────────────────────────────────────────────
lintCmd := cli.NewGroup("lint", "Pattern-based code linter", "")
lintCmd.AddCommand(newCheckCommand(), newCatalogCommand())
addRFCCommands(lintCmd)
root.AddCommand(lintCmd)
}
func addRFCCommands(parent *cli.Command) {
parent.AddCommand(
newRunCommand("run", "Run configured linters", lintpkg.RunInput{}),
newDetectCommand("detect", "Detect project languages"),
newToolsCommand("tools", "List supported linter tools"),
newInitCommand("init", "Generate .core/lint.yaml"),
newRunCommand("go", "Run Go linters", lintpkg.RunInput{Lang: "go"}),
newRunCommand("php", "Run PHP linters", lintpkg.RunInput{Lang: "php"}),
newRunCommand("js", "Run JS/TS linters", lintpkg.RunInput{Lang: "js"}),
newRunCommand("python", "Run Python linters", lintpkg.RunInput{Lang: "python"}),
newRunCommand("security", "Run security linters", lintpkg.RunInput{Category: "security"}),
newRunCommand("compliance", "Run compliance linters", lintpkg.RunInput{Category: "compliance"}),
newHookCommand(),
)
}
func newRunCommand(commandName string, summary string, defaults lintpkg.RunInput) *cli.Command {
var (
checkFormat string
checkLang string
checkSeverity string
outputFormat string
configPath string
scheduleName string
failOnLevel string
categoryName string
languageName string
filePaths []string
hookMode bool
ciMode bool
sbomMode bool
)
checkCmd := cli.NewCommand("check", "Scan files for pattern matches", "", func(cmd *cli.Command, args []string) error {
cat, err := lint.LoadEmbeddedCatalog()
if err != nil {
return fmt.Errorf("loading catalog: %w", err)
command := cli.NewCommand(commandName, summary, "", func(command *cli.Command, args []string) error {
input := defaults
input.Output = outputFormat
input.Config = configPath
input.Schedule = scheduleName
input.FailOn = failOnLevel
input.Category = categoryName
input.Lang = languageName
input.Files = filePaths
input.Hook = hookMode
input.CI = ciMode
input.SBOM = sbomMode
if len(args) > 0 {
input.Path = args[0]
}
if input.Path == "" {
input.Path = "."
}
rules := cat.Rules
resolvedOutputFormat, err := lintpkg.ResolveRunOutputFormat(input)
if err != nil {
return err
}
input.Output = resolvedOutputFormat
// Filter by language if specified.
if checkLang != "" {
rules = cat.ForLanguage(checkLang)
service := lintpkg.NewService()
report, err := service.Run(context.Background(), input)
if err != nil {
return err
}
if err := writeReport(command.OutOrStdout(), input.Output, report); err != nil {
return err
}
if !report.Summary.Passed {
return coreerr.E(
"cmd."+commandName,
fmt.Sprintf(
"lint failed (fail-on=%s): %d error(s), %d warning(s), %d info finding(s)",
input.FailOn,
report.Summary.Errors,
report.Summary.Warnings,
report.Summary.Info,
),
nil,
)
}
return nil
})
cli.StringFlag(command, &outputFormat, "output", "o", defaults.Output, "Output format: json, text, github, sarif")
cli.StringFlag(command, &configPath, "config", "c", defaults.Config, "Config path (default: .core/lint.yaml)")
cli.StringFlag(command, &scheduleName, "schedule", "", "", "Run a named schedule from the config")
cli.StringFlag(command, &failOnLevel, "fail-on", "", defaults.FailOn, "Fail threshold: error, warning, info")
cli.StringFlag(command, &categoryName, "category", "", defaults.Category, "Restrict to one category")
cli.StringFlag(command, &languageName, "lang", "l", defaults.Lang, "Restrict to one language")
cli.StringSliceFlag(command, &filePaths, "files", "", defaults.Files, "Restrict scanning to specific files")
cli.BoolFlag(command, &hookMode, "hook", "", defaults.Hook, "Run in pre-commit mode against staged files")
cli.BoolFlag(command, &ciMode, "ci", "", defaults.CI, "GitHub Actions mode (github annotations)")
cli.BoolFlag(command, &sbomMode, "sbom", "", defaults.SBOM, "Enable compliance/SBOM tools")
return command
}
func newDetectCommand(commandName string, summary string) *cli.Command {
var output string
command := cli.NewCommand(commandName, summary, "", func(command *cli.Command, args []string) error {
projectPath := "."
if len(args) > 0 {
projectPath = args[0]
}
languages := lintpkg.Detect(projectPath)
switch output {
case "", "text":
for _, language := range languages {
fmt.Fprintln(command.OutOrStdout(), language)
}
return nil
case "json":
return writeIndentedJSON(command.OutOrStdout(), languages)
default:
return coreerr.E("cmd.detect", "unsupported output format "+output, nil)
}
})
cli.StringFlag(command, &output, "output", "o", "text", "Output format: text, json")
return command
}
func newToolsCommand(commandName string, summary string) *cli.Command {
var output string
var languageFilter string
command := cli.NewCommand(commandName, summary, "", func(command *cli.Command, args []string) error {
service := lintpkg.NewService()
var languages []string
if languageFilter != "" {
languages = []string{languageFilter}
}
tools := service.Tools(languages)
switch output {
case "", "text":
for _, tool := range tools {
status := "missing"
if tool.Available {
status = "available"
}
line := fmt.Sprintf("%-14s [%-11s] %s langs=%s", tool.Name, tool.Category, status, strings.Join(tool.Languages, ","))
if tool.Entitlement != "" {
line += " entitlement=" + tool.Entitlement
}
fmt.Fprintln(command.OutOrStdout(), line)
}
return nil
case "json":
return writeIndentedJSON(command.OutOrStdout(), tools)
default:
return coreerr.E("cmd.tools", "unsupported output format "+output, nil)
}
})
cli.StringFlag(command, &output, "output", "o", "text", "Output format: text, json")
cli.StringFlag(command, &languageFilter, "lang", "l", "", "Filter by language")
return command
}
func newInitCommand(commandName string, summary string) *cli.Command {
var force bool
command := cli.NewCommand(commandName, summary, "", func(command *cli.Command, args []string) error {
projectPath := "."
if len(args) > 0 {
projectPath = args[0]
}
service := lintpkg.NewService()
writtenPath, err := service.WriteDefaultConfig(projectPath, force)
if err != nil {
return err
}
fmt.Fprintln(command.OutOrStdout(), writtenPath)
return nil
})
cli.BoolFlag(command, &force, "force", "f", false, "Overwrite an existing config")
return command
}
func newHookCommand() *cli.Command {
hookCmd := cli.NewGroup("hook", "Install or remove the git pre-commit hook", "")
installCmd := cli.NewCommand("install", "Install the pre-commit hook", "", func(command *cli.Command, args []string) error {
projectPath := "."
if len(args) > 0 {
projectPath = args[0]
}
service := lintpkg.NewService()
if err := service.InstallHook(projectPath); err != nil {
return err
}
fmt.Fprintln(command.OutOrStdout(), "installed")
return nil
})
removeCmd := cli.NewCommand("remove", "Remove the pre-commit hook", "", func(command *cli.Command, args []string) error {
projectPath := "."
if len(args) > 0 {
projectPath = args[0]
}
service := lintpkg.NewService()
if err := service.RemoveHook(projectPath); err != nil {
return err
}
fmt.Fprintln(command.OutOrStdout(), "removed")
return nil
})
hookCmd.AddCommand(installCmd, removeCmd)
return hookCmd
}
func newCheckCommand() *cli.Command {
var (
format string
language string
severity string
)
command := cli.NewCommand("check", "Scan files for pattern matches", "", func(command *cli.Command, args []string) error {
catalog, err := cataloglint.LoadEmbeddedCatalog()
if err != nil {
return coreerr.E("cmd.check", "loading catalog", err)
}
rules := catalog.Rules
if language != "" {
rules = catalog.ForLanguage(language)
if len(rules) == 0 {
fmt.Fprintf(os.Stderr, "no rules for language %q\n", checkLang)
fmt.Fprintf(os.Stderr, "no rules for language %q\n", language)
return nil
}
}
// Filter by severity threshold if specified.
if checkSeverity != "" {
filtered := (&lintpkg.Catalog{Rules: rules}).AtSeverity(checkSeverity)
if severity != "" {
filtered := (&lintpkg.Catalog{Rules: rules}).AtSeverity(severity)
if len(filtered) == 0 {
fmt.Fprintf(os.Stderr, "no rules at severity %q or above\n", checkSeverity)
fmt.Fprintf(os.Stderr, "no rules at severity %q or above\n", severity)
return nil
}
rules = filtered
@ -55,7 +280,7 @@ func addLintCommands(root *cli.Command) {
scanner, err := lintpkg.NewScanner(rules)
if err != nil {
return fmt.Errorf("creating scanner: %w", err)
return coreerr.E("cmd.check", "creating scanner", err)
}
paths := args
@ -63,110 +288,176 @@ func addLintCommands(root *cli.Command) {
paths = []string{"."}
}
var allFindings []lintpkg.Finding
for _, p := range paths {
info, err := os.Stat(p)
var findings []lintpkg.Finding
for _, path := range paths {
info, err := os.Stat(path)
if err != nil {
return fmt.Errorf("stat %s: %w", p, err)
return coreerr.E("cmd.check", "stat "+path, err)
}
var findings []lintpkg.Finding
if info.IsDir() {
findings, err = scanner.ScanDir(p)
} else {
findings, err = scanner.ScanFile(p)
pathFindings, err := scanner.ScanDir(path)
if err != nil {
return err
}
findings = append(findings, pathFindings...)
continue
}
pathFindings, err := scanner.ScanFile(path)
if err != nil {
return err
}
allFindings = append(allFindings, findings...)
findings = append(findings, pathFindings...)
}
switch checkFormat {
switch format {
case "json":
return lintpkg.WriteJSON(os.Stdout, allFindings)
return lintpkg.WriteJSON(command.OutOrStdout(), findings)
case "jsonl":
return lintpkg.WriteJSONL(os.Stdout, allFindings)
return lintpkg.WriteJSONL(command.OutOrStdout(), findings)
case "sarif":
report := lintpkg.Report{
Findings: findings,
Summary: lintpkg.Summarise(findings),
}
return lintpkg.WriteReportSARIF(command.OutOrStdout(), report)
default:
lintpkg.WriteText(os.Stdout, allFindings)
}
if len(allFindings) > 0 {
summary := lintpkg.Summarise(allFindings)
fmt.Fprintf(os.Stderr, "\n%d finding(s)", summary.Total)
var parts []string
for sev, count := range summary.BySeverity {
parts = append(parts, fmt.Sprintf("%d %s", count, sev))
lintpkg.WriteText(command.OutOrStdout(), findings)
if format == "text" && len(findings) > 0 {
writeCatalogSummary(command.OutOrStdout(), findings)
}
if len(parts) > 0 {
fmt.Fprintf(os.Stderr, " (%s)", strings.Join(parts, ", "))
}
fmt.Fprintln(os.Stderr)
return nil
}
return nil
})
cli.StringFlag(checkCmd, &checkFormat, "format", "f", "text", "Output format: text, json, jsonl")
cli.StringFlag(checkCmd, &checkLang, "lang", "l", "", "Filter rules by language (e.g. go, php, ts)")
cli.StringFlag(checkCmd, &checkSeverity, "severity", "s", "", "Minimum severity threshold (info, low, medium, high, critical)")
cli.StringFlag(command, &format, "format", "f", "text", "Output format: text, json, jsonl, sarif")
cli.StringFlag(command, &language, "lang", "l", "", "Filter rules by language")
cli.StringFlag(command, &severity, "severity", "s", "", "Minimum severity threshold (info, low, medium, high, critical)")
// ── catalog ────────────────────────────────────────────────────────────
return command
}
func newCatalogCommand() *cli.Command {
catalogCmd := cli.NewGroup("catalog", "Browse the pattern catalog", "")
// catalog list
var listLang string
listCmd := cli.NewCommand("list", "List all rules in the catalog", "", func(cmd *cli.Command, args []string) error {
cat, err := lint.LoadEmbeddedCatalog()
var listLanguage string
listCmd := cli.NewCommand("list", "List all rules in the catalog", "", func(command *cli.Command, args []string) error {
catalog, err := cataloglint.LoadEmbeddedCatalog()
if err != nil {
return fmt.Errorf("loading catalog: %w", err)
return coreerr.E("cmd.catalog.list", "loading catalog", err)
}
rules := cat.Rules
if listLang != "" {
rules = cat.ForLanguage(listLang)
rules := catalog.Rules
if listLanguage != "" {
rules = catalog.ForLanguage(listLanguage)
}
if len(rules) == 0 {
fmt.Println("No rules found.")
fmt.Fprintln(command.OutOrStdout(), "No rules found.")
return nil
}
for _, r := range rules {
fmt.Printf("%-14s [%-8s] %s\n", r.ID, r.Severity, r.Title)
rules = append([]lintpkg.Rule(nil), rules...)
sort.Slice(rules, func(left int, right int) bool {
if rules[left].Severity == rules[right].Severity {
return strings.Compare(rules[left].ID, rules[right].ID) < 0
}
return strings.Compare(rules[left].Severity, rules[right].Severity) < 0
})
for _, rule := range rules {
fmt.Fprintf(command.OutOrStdout(), "%-14s [%-8s] %s\n", rule.ID, rule.Severity, rule.Title)
}
fmt.Fprintf(os.Stderr, "\n%d rule(s)\n", len(rules))
return nil
})
cli.StringFlag(listCmd, &listLanguage, "lang", "l", "", "Filter by language")
cli.StringFlag(listCmd, &listLang, "lang", "l", "", "Filter by language")
// catalog show
showCmd := cli.NewCommand("show", "Show details of a specific rule", "", func(cmd *cli.Command, args []string) error {
showCmd := cli.NewCommand("show", "Show details of a specific rule", "", func(command *cli.Command, args []string) error {
if len(args) == 0 {
return fmt.Errorf("rule ID required")
return coreerr.E("cmd.catalog.show", "rule ID required", nil)
}
cat, err := lint.LoadEmbeddedCatalog()
catalog, err := cataloglint.LoadEmbeddedCatalog()
if err != nil {
return fmt.Errorf("loading catalog: %w", err)
return coreerr.E("cmd.catalog.show", "loading catalog", err)
}
r := cat.ByID(args[0])
if r == nil {
return fmt.Errorf("rule %q not found", args[0])
rule := catalog.ByID(args[0])
if rule == nil {
return coreerr.E("cmd.catalog.show", "rule "+args[0]+" not found", nil)
}
data, err := json.MarshalIndent(r, "", " ")
data, err := json.MarshalIndent(rule, "", " ")
if err != nil {
return err
}
fmt.Println(string(data))
fmt.Fprintf(command.OutOrStdout(), "%s\n", string(data))
return nil
})
catalogCmd.AddCommand(listCmd, showCmd)
lintCmd.AddCommand(checkCmd, catalogCmd)
root.AddCommand(lintCmd)
return catalogCmd
}
func writeReport(writer io.Writer, output string, report lintpkg.Report) error {
switch output {
case "json":
return lintpkg.WriteReportJSON(writer, report)
case "text":
lintpkg.WriteReportText(writer, report)
return nil
case "github":
lintpkg.WriteReportGitHub(writer, report)
return nil
case "sarif":
return lintpkg.WriteReportSARIF(writer, report)
default:
return coreerr.E("writeReport", "unsupported output format "+output, nil)
}
}
func writeIndentedJSON(writer io.Writer, value any) error {
encoder := json.NewEncoder(writer)
encoder.SetIndent("", " ")
return encoder.Encode(value)
}
func writeCatalogSummary(writer io.Writer, findings []lintpkg.Finding) {
summary := lintpkg.Summarise(findings)
fmt.Fprintf(writer, "\n%d finding(s)", summary.Total)
orderedSeverities := []string{"critical", "high", "medium", "low", "info", "error", "warning"}
seen := make(map[string]bool, len(summary.BySeverity))
var parts []string
for _, severity := range orderedSeverities {
count := summary.BySeverity[severity]
if count == 0 {
continue
}
seen[severity] = true
parts = append(parts, fmt.Sprintf("%d %s", count, severity))
}
var extraSeverities []string
for severity := range summary.BySeverity {
if seen[severity] {
continue
}
extraSeverities = append(extraSeverities, severity)
}
sort.Strings(extraSeverities)
for _, severity := range extraSeverities {
count := summary.BySeverity[severity]
if count == 0 {
continue
}
parts = append(parts, fmt.Sprintf("%d %s", count, severity))
}
if len(parts) > 0 {
fmt.Fprintf(writer, " (%s)", strings.Join(parts, ", "))
}
fmt.Fprintln(writer)
}

286
cmd/core-lint/main_test.go Normal file
View file

@ -0,0 +1,286 @@
package main
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"testing"
lintpkg "forge.lthn.ai/core/lint/pkg/lint"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
buildBinaryOnce sync.Once
builtBinaryPath string
buildBinaryErr error
)
func TestCLI_Run_JSON(t *testing.T) {
dir := t.TempDir()
buildCLI(t)
t.Setenv("PATH", t.TempDir())
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "input.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("data")
}
`), 0o644))
stdout, stderr, exitCode := runCLI(t, dir, "run", "--output", "json", "--fail-on", "warning", dir)
assert.Equal(t, 1, exitCode, stderr)
assert.Contains(t, stderr, "lint failed (fail-on=warning)")
var report lintpkg.Report
require.NoError(t, json.Unmarshal([]byte(stdout), &report))
require.Len(t, report.Findings, 1)
assert.Equal(t, "go-cor-003", report.Findings[0].Code)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestCLI_Run_FilesFlagLimitsScanning(t *testing.T) {
dir := t.TempDir()
buildCLI(t)
t.Setenv("PATH", t.TempDir())
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "clean.go"), []byte(`package sample
func Clean() {}
`), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "ignored.go"), []byte(`package sample
func Run() {
_ = helper()
}
func helper() error { return nil }
`), 0o644))
stdout, stderr, exitCode := runCLI(t, dir, "run", "--output", "json", "--files", "clean.go", dir)
assert.Equal(t, 0, exitCode, stderr)
var report lintpkg.Report
require.NoError(t, json.Unmarshal([]byte(stdout), &report))
assert.Empty(t, report.Findings)
assert.Equal(t, 0, report.Summary.Total)
assert.True(t, report.Summary.Passed)
}
func TestCLI_Run_ScheduleAppliesPreset(t *testing.T) {
dir := t.TempDir()
buildCLI(t)
t.Setenv("PATH", t.TempDir())
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "services"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "services", "clean.go"), []byte(`package sample
func Clean() {}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`output: text
schedules:
nightly:
output: json
paths:
- services
`), 0o644))
stdout, stderr, exitCode := runCLI(t, dir, "run", "--schedule", "nightly", dir)
assert.Equal(t, 0, exitCode, stderr)
var report lintpkg.Report
require.NoError(t, json.Unmarshal([]byte(stdout), &report))
assert.Empty(t, report.Findings)
assert.Equal(t, 0, report.Summary.Total)
assert.True(t, report.Summary.Passed)
}
func TestCLI_Detect_JSON(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}\n"), 0o644))
stdout, stderr, exitCode := runCLI(t, dir, "detect", "--output", "json", dir)
assert.Equal(t, 0, exitCode, stderr)
var languages []string
require.NoError(t, json.Unmarshal([]byte(stdout), &languages))
assert.Equal(t, []string{"go", "js"}, languages)
}
func TestCLI_Init_WritesConfig(t *testing.T) {
dir := t.TempDir()
stdout, stderr, exitCode := runCLI(t, dir, "init", dir)
assert.Equal(t, 0, exitCode, stderr)
assert.Contains(t, stdout, ".core/lint.yaml")
configPath := filepath.Join(dir, ".core", "lint.yaml")
content, err := os.ReadFile(configPath)
require.NoError(t, err)
assert.Contains(t, string(content), "golangci-lint")
assert.Contains(t, string(content), "fail_on: error")
}
func TestCLI_Tools_TextIncludesMetadata(t *testing.T) {
buildCLI(t)
binDir := t.TempDir()
fakeToolPath := filepath.Join(binDir, "gosec")
require.NoError(t, os.WriteFile(fakeToolPath, []byte("#!/bin/sh\nexit 0\n"), 0o755))
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
command := exec.Command(buildCLI(t), "tools", "--lang", "go")
command.Dir = t.TempDir()
command.Env = os.Environ()
output, err := command.CombinedOutput()
require.NoError(t, err, string(output))
text := string(output)
assert.Contains(t, text, "gosec")
assert.Contains(t, text, "langs=go")
assert.Contains(t, text, "entitlement=lint.security")
}
func TestCLI_LintCheck_SARIF(t *testing.T) {
buildCLI(t)
repoRoot := repoRoot(t)
stdout, stderr, exitCode := runCLI(t, repoRoot, "lint", "check", "--format", "sarif", "tests/cli/lint/check/fixtures")
assert.Equal(t, 0, exitCode, stderr)
var sarif struct {
Version string `json:"version"`
Runs []struct {
Tool struct {
Driver struct {
Name string `json:"name"`
} `json:"driver"`
} `json:"tool"`
Results []struct {
RuleID string `json:"ruleId"`
} `json:"results"`
} `json:"runs"`
}
require.NoError(t, json.Unmarshal([]byte(stdout), &sarif))
require.Equal(t, "2.1.0", sarif.Version)
require.Len(t, sarif.Runs, 1)
assert.Equal(t, "core-lint", sarif.Runs[0].Tool.Driver.Name)
require.Len(t, sarif.Runs[0].Results, 1)
assert.Equal(t, "go-cor-003", sarif.Runs[0].Results[0].RuleID)
}
func TestCLI_HookInstallRemove(t *testing.T) {
if _, err := exec.LookPath("git"); err != nil {
t.Skip("git not available")
}
dir := t.TempDir()
runCLIExpectSuccess(t, dir, "git", "init")
runCLIExpectSuccess(t, dir, "git", "config", "user.email", "test@example.com")
runCLIExpectSuccess(t, dir, "git", "config", "user.name", "Test User")
_, stderr, exitCode := runCLI(t, dir, "hook", "install", dir)
assert.Equal(t, 0, exitCode, stderr)
hookPath := filepath.Join(dir, ".git", "hooks", "pre-commit")
hookContent, err := os.ReadFile(hookPath)
require.NoError(t, err)
assert.Contains(t, string(hookContent), "core-lint run --hook")
_, stderr, exitCode = runCLI(t, dir, "hook", "remove", dir)
assert.Equal(t, 0, exitCode, stderr)
removedContent, err := os.ReadFile(hookPath)
if err == nil {
assert.NotContains(t, string(removedContent), "core-lint run --hook")
}
}
func runCLI(t *testing.T, workdir string, args ...string) (string, string, int) {
t.Helper()
command := exec.Command(buildCLI(t), args...)
command.Dir = workdir
command.Env = os.Environ()
stdout, err := command.Output()
if err == nil {
return string(stdout), "", 0
}
exitCode := -1
stderr := ""
if exitErr, ok := err.(*exec.ExitError); ok {
exitCode = exitErr.ExitCode()
stderr = string(exitErr.Stderr)
}
return string(stdout), stderr, exitCode
}
func runCLIExpectSuccess(t *testing.T, dir string, name string, args ...string) {
t.Helper()
command := exec.Command(name, args...)
command.Dir = dir
output, err := command.CombinedOutput()
require.NoError(t, err, string(output))
}
func buildCLI(t *testing.T) string {
t.Helper()
buildBinaryOnce.Do(func() {
repoRoot := repoRoot(t)
binDir, err := os.MkdirTemp("", "core-lint-bin-*")
if err != nil {
buildBinaryErr = err
return
}
builtBinaryPath = filepath.Join(binDir, "core-lint")
command := exec.Command("go", "build", "-o", builtBinaryPath, "./cmd/core-lint")
command.Dir = repoRoot
output, err := command.CombinedOutput()
if err != nil {
buildBinaryErr = fmt.Errorf("build core-lint: %w: %s", err, strings.TrimSpace(string(output)))
}
})
require.NoError(t, buildBinaryErr)
return builtBinaryPath
}
func repoRoot(t *testing.T) string {
t.Helper()
root, err := filepath.Abs(filepath.Join(".", "..", ".."))
require.NoError(t, err)
return root
}

379
cmd/qa/cmd_docblock.go Normal file
View file

@ -0,0 +1,379 @@
// cmd_docblock.go implements docblock/docstring coverage checking for Go code.
//
// Usage:
//
// core qa docblock # Check current directory
// core qa docblock ./pkg/... # Check specific packages
// core qa docblock --threshold=80 # Require 80% coverage
package qa
import (
"cmp"
"encoding/json"
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"path/filepath"
"slices"
"strings"
"forge.lthn.ai/core/cli/pkg/cli"
"forge.lthn.ai/core/go-i18n"
)
// Docblock command flags
var (
docblockThreshold float64
docblockVerbose bool
docblockJSON bool
)
// addDocblockCommand adds the 'docblock' command to qa.
func addDocblockCommand(parent *cli.Command) {
docblockCmd := &cli.Command{
Use: "docblock [packages...]",
Short: i18n.T("cmd.qa.docblock.short"),
Long: i18n.T("cmd.qa.docblock.long"),
RunE: func(cmd *cli.Command, args []string) error {
paths := args
if len(paths) == 0 {
paths = []string{"./..."}
}
return RunDocblockCheck(paths, docblockThreshold, docblockVerbose, docblockJSON)
},
}
docblockCmd.Flags().Float64Var(&docblockThreshold, "threshold", 80, i18n.T("cmd.qa.docblock.flag.threshold"))
docblockCmd.Flags().BoolVarP(&docblockVerbose, "verbose", "v", false, i18n.T("common.flag.verbose"))
docblockCmd.Flags().BoolVar(&docblockJSON, "json", false, i18n.T("common.flag.json"))
parent.AddCommand(docblockCmd)
}
// DocblockResult holds the result of a docblock coverage check.
type DocblockResult struct {
Coverage float64 `json:"coverage"`
Threshold float64 `json:"threshold"`
Total int `json:"total"`
Documented int `json:"documented"`
Missing []MissingDocblock `json:"missing,omitempty"`
Warnings []DocblockWarning `json:"warnings,omitempty"`
Passed bool `json:"passed"`
}
// MissingDocblock represents an exported symbol without documentation.
type MissingDocblock struct {
File string `json:"file"`
Line int `json:"line"`
Name string `json:"name"`
Kind string `json:"kind"` // func, type, const, var
Reason string `json:"reason,omitempty"`
}
// DocblockWarning captures a partial parse failure while still preserving
// the successfully parsed files in the same directory.
type DocblockWarning struct {
Path string `json:"path"`
Error string `json:"error"`
}
// RunDocblockCheck checks docblock coverage for the given packages.
func RunDocblockCheck(paths []string, threshold float64, verbose, jsonOutput bool) error {
result, err := CheckDocblockCoverage(paths)
if err != nil {
return err
}
result.Threshold = threshold
result.Passed = result.Coverage >= threshold
if jsonOutput {
data, err := json.MarshalIndent(result, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
if !result.Passed {
return cli.Err("docblock coverage %.1f%% below threshold %.1f%%", result.Coverage, threshold)
}
return nil
}
// Print result
if verbose && len(result.Missing) > 0 {
cli.Print("%s\n\n", i18n.T("cmd.qa.docblock.missing_docs"))
for _, m := range result.Missing {
cli.Print(" %s:%d: %s %s\n",
dimStyle.Render(m.File),
m.Line,
dimStyle.Render(m.Kind),
m.Name,
)
}
cli.Blank()
}
if len(result.Warnings) > 0 {
for _, warning := range result.Warnings {
cli.Warnf("failed to parse %s: %s", warning.Path, warning.Error)
}
cli.Blank()
}
// Summary
coverageStr := fmt.Sprintf("%.1f%%", result.Coverage)
thresholdStr := fmt.Sprintf("%.1f%%", threshold)
if result.Passed {
cli.Print("%s %s %s/%s (%s >= %s)\n",
successStyle.Render(i18n.T("common.label.success")),
i18n.T("cmd.qa.docblock.coverage"),
fmt.Sprintf("%d", result.Documented),
fmt.Sprintf("%d", result.Total),
successStyle.Render(coverageStr),
thresholdStr,
)
return nil
}
cli.Print("%s %s %s/%s (%s < %s)\n",
errorStyle.Render(i18n.T("common.label.error")),
i18n.T("cmd.qa.docblock.coverage"),
fmt.Sprintf("%d", result.Documented),
fmt.Sprintf("%d", result.Total),
errorStyle.Render(coverageStr),
thresholdStr,
)
// Always show compact file:line list when failing (token-efficient for AI agents)
if len(result.Missing) > 0 {
cli.Blank()
for _, m := range result.Missing {
cli.Print("%s:%d\n", m.File, m.Line)
}
}
return cli.Err("docblock coverage %.1f%% below threshold %.1f%%", result.Coverage, threshold)
}
// CheckDocblockCoverage analyzes Go packages for docblock coverage.
func CheckDocblockCoverage(patterns []string) (*DocblockResult, error) {
result := &DocblockResult{}
// Expand patterns to actual directories
dirs, err := expandPatterns(patterns)
if err != nil {
return nil, err
}
fset := token.NewFileSet()
for _, dir := range dirs {
pkgs, err := parser.ParseDir(fset, dir, func(fi os.FileInfo) bool {
return !strings.HasSuffix(fi.Name(), "_test.go")
}, parser.ParseComments)
if err != nil {
// Preserve partial results when a directory contains both valid and
// invalid files. The caller decides how to present the warning.
result.Warnings = append(result.Warnings, DocblockWarning{
Path: dir,
Error: err.Error(),
})
}
for _, pkg := range pkgs {
for filename, file := range pkg.Files {
checkFile(fset, filename, file, result)
}
}
}
if result.Total > 0 {
result.Coverage = float64(result.Documented) / float64(result.Total) * 100
}
slices.SortFunc(result.Missing, func(a, b MissingDocblock) int {
return cmp.Or(
cmp.Compare(a.File, b.File),
cmp.Compare(a.Line, b.Line),
cmp.Compare(a.Kind, b.Kind),
cmp.Compare(a.Name, b.Name),
)
})
slices.SortFunc(result.Warnings, func(a, b DocblockWarning) int {
return cmp.Or(
cmp.Compare(a.Path, b.Path),
cmp.Compare(a.Error, b.Error),
)
})
return result, nil
}
// expandPatterns expands Go package patterns like ./... to actual directories.
func expandPatterns(patterns []string) ([]string, error) {
var dirs []string
seen := make(map[string]bool)
for _, pattern := range patterns {
if strings.HasSuffix(pattern, "/...") {
// Recursive pattern
base := strings.TrimSuffix(pattern, "/...")
if base == "." {
base = "."
}
err := filepath.Walk(base, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // Skip errors
}
if !info.IsDir() {
return nil
}
// Skip vendor, testdata, and hidden directories (but not "." itself)
name := info.Name()
if name == "vendor" || name == "testdata" || (strings.HasPrefix(name, ".") && name != ".") {
return filepath.SkipDir
}
// Check if directory has Go files
if hasGoFiles(path) && !seen[path] {
dirs = append(dirs, path)
seen[path] = true
}
return nil
})
if err != nil {
return nil, err
}
} else {
// Single directory
path := pattern
if !seen[path] && hasGoFiles(path) {
dirs = append(dirs, path)
seen[path] = true
}
}
}
return dirs, nil
}
// hasGoFiles checks if a directory contains Go files.
func hasGoFiles(dir string) bool {
entries, err := os.ReadDir(dir)
if err != nil {
return false
}
for _, entry := range entries {
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".go") && !strings.HasSuffix(entry.Name(), "_test.go") {
return true
}
}
return false
}
// checkFile analyzes a single file for docblock coverage.
func checkFile(fset *token.FileSet, filename string, file *ast.File, result *DocblockResult) {
// Make filename relative if possible
if cwd, err := os.Getwd(); err == nil {
if rel, err := filepath.Rel(cwd, filename); err == nil {
filename = rel
}
}
for _, decl := range file.Decls {
switch d := decl.(type) {
case *ast.FuncDecl:
// Skip unexported functions
if !ast.IsExported(d.Name.Name) {
continue
}
// Skip methods on unexported types
if d.Recv != nil && len(d.Recv.List) > 0 {
if recvType := getReceiverTypeName(d.Recv.List[0].Type); recvType != "" && !ast.IsExported(recvType) {
continue
}
}
result.Total++
if d.Doc != nil && len(d.Doc.List) > 0 {
result.Documented++
} else {
pos := fset.Position(d.Pos())
result.Missing = append(result.Missing, MissingDocblock{
File: filename,
Line: pos.Line,
Name: d.Name.Name,
Kind: "func",
})
}
case *ast.GenDecl:
for _, spec := range d.Specs {
switch s := spec.(type) {
case *ast.TypeSpec:
if !ast.IsExported(s.Name.Name) {
continue
}
result.Total++
// Type can have doc on GenDecl or TypeSpec
if (d.Doc != nil && len(d.Doc.List) > 0) || (s.Doc != nil && len(s.Doc.List) > 0) {
result.Documented++
} else {
pos := fset.Position(s.Pos())
result.Missing = append(result.Missing, MissingDocblock{
File: filename,
Line: pos.Line,
Name: s.Name.Name,
Kind: "type",
})
}
case *ast.ValueSpec:
// Check exported consts and vars
for _, name := range s.Names {
if !ast.IsExported(name.Name) {
continue
}
result.Total++
// Value can have doc on GenDecl or ValueSpec
if (d.Doc != nil && len(d.Doc.List) > 0) || (s.Doc != nil && len(s.Doc.List) > 0) {
result.Documented++
} else {
pos := fset.Position(name.Pos())
result.Missing = append(result.Missing, MissingDocblock{
File: filename,
Line: pos.Line,
Name: name.Name,
Kind: kindFromToken(d.Tok),
})
}
}
}
}
}
}
}
// getReceiverTypeName extracts the type name from a method receiver.
func getReceiverTypeName(expr ast.Expr) string {
switch t := expr.(type) {
case *ast.Ident:
return t.Name
case *ast.StarExpr:
return getReceiverTypeName(t.X)
}
return ""
}
// kindFromToken returns a string representation of the token kind.
func kindFromToken(tok token.Token) string {
switch tok {
case token.CONST:
return "const"
case token.VAR:
return "var"
default:
return "value"
}
}

View file

@ -0,0 +1,36 @@
package qa
import (
"encoding/json"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunDocblockCheckJSONOutput_IsDeterministicAndKeepsWarnings(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "b.go"), "package sample\n\nfunc Beta() {}\n")
writeTestFile(t, filepath.Join(dir, "a.go"), "package sample\n\nfunc Alpha() {}\n")
writeTestFile(t, filepath.Join(dir, "broken.go"), "package sample\n\nfunc Broken(\n")
restoreWorkingDir(t, dir)
var result DocblockResult
output := captureStdout(t, func() {
err := RunDocblockCheck([]string{"."}, 100, false, true)
require.Error(t, err)
})
require.NoError(t, json.Unmarshal([]byte(output), &result))
assert.False(t, result.Passed)
assert.Equal(t, 2, result.Total)
assert.Equal(t, 0, result.Documented)
require.Len(t, result.Missing, 2)
assert.Equal(t, "a.go", result.Missing[0].File)
assert.Equal(t, "b.go", result.Missing[1].File)
require.Len(t, result.Warnings, 1)
assert.Equal(t, ".", result.Warnings[0].Path)
assert.NotEmpty(t, result.Warnings[0].Error)
}

355
cmd/qa/cmd_health.go Normal file
View file

@ -0,0 +1,355 @@
// cmd_health.go implements the `qa health` command for aggregate CI health.
//
// Usage:
// core qa health # Show CI health summary
// core qa health --problems # Show only repos with problems
package qa
import (
"cmp"
"encoding/json"
"os/exec"
"slices"
"strings"
"forge.lthn.ai/core/cli/pkg/cli"
"forge.lthn.ai/core/go-i18n"
"forge.lthn.ai/core/go-io"
"forge.lthn.ai/core/go-log"
"forge.lthn.ai/core/go-scm/repos"
)
// Health command flags.
var (
healthProblems bool
healthRegistry string
healthJSON bool
)
// HealthWorkflowRun represents a GitHub Actions workflow run.
type HealthWorkflowRun struct {
Status string `json:"status"`
Conclusion string `json:"conclusion"`
Name string `json:"name"`
HeadSha string `json:"headSha"`
UpdatedAt string `json:"updatedAt"`
URL string `json:"url"`
}
// RepoHealth represents the CI health of a single repo.
type RepoHealth struct {
Name string `json:"name"`
Status string `json:"status"` // passing, failing, error, pending, no_ci, disabled
Message string `json:"message"`
URL string `json:"url"`
FailingSince string `json:"failing_since,omitempty"`
}
// HealthSummary captures aggregate health counts.
type HealthSummary struct {
TotalRepos int `json:"total_repos"`
FilteredRepos int `json:"filtered_repos"`
Passing int `json:"passing"`
Failing int `json:"failing"`
Errors int `json:"errors"`
Pending int `json:"pending"`
Disabled int `json:"disabled"`
NotConfigured int `json:"not_configured"`
PassingRate int `json:"passing_rate"`
ProblemsOnly bool `json:"problems_only"`
ByStatus map[string]int `json:"by_status"`
}
// HealthOutput is the JSON payload for `qa health --json`.
type HealthOutput struct {
Summary HealthSummary `json:"summary"`
Repos []RepoHealth `json:"repos"`
}
func addHealthCommand(parent *cli.Command) {
healthCmd := &cli.Command{
Use: "health",
Short: i18n.T("cmd.qa.health.short"),
Long: i18n.T("cmd.qa.health.long"),
RunE: func(cmd *cli.Command, args []string) error {
return runHealth()
},
}
healthCmd.Flags().BoolVarP(&healthProblems, "problems", "p", false, i18n.T("cmd.qa.health.flag.problems"))
healthCmd.Flags().StringVar(&healthRegistry, "registry", "", i18n.T("common.flag.registry"))
healthCmd.Flags().BoolVar(&healthJSON, "json", false, i18n.T("common.flag.json"))
parent.AddCommand(healthCmd)
}
func runHealth() error {
if _, err := exec.LookPath("gh"); err != nil {
return log.E("qa.health", i18n.T("error.gh_not_found"), nil)
}
var reg *repos.Registry
var err error
if healthRegistry != "" {
reg, err = repos.LoadRegistry(io.Local, healthRegistry)
} else {
registryPath, findErr := repos.FindRegistry(io.Local)
if findErr != nil {
return log.E("qa.health", i18n.T("error.registry_not_found"), nil)
}
reg, err = repos.LoadRegistry(io.Local, registryPath)
}
if err != nil {
return log.E("qa.health", "failed to load registry", err)
}
repoList := reg.List()
allHealthResults := make([]RepoHealth, 0, len(repoList))
for _, repo := range repoList {
health := fetchRepoHealth(reg.Org, repo.Name)
allHealthResults = append(allHealthResults, health)
}
// Sort by severity first, then repo name for deterministic output.
slices.SortFunc(allHealthResults, func(a, b RepoHealth) int {
if p := cmp.Compare(healthPriority(a.Status), healthPriority(b.Status)); p != 0 {
return p
}
return strings.Compare(a.Name, b.Name)
})
healthResults := allHealthResults
if healthProblems {
problems := make([]RepoHealth, 0, len(healthResults))
for _, h := range healthResults {
if h.Status != "passing" {
problems = append(problems, h)
}
}
healthResults = problems
}
summary := summariseHealthResults(len(repoList), len(healthResults), allHealthResults, healthProblems)
if healthJSON {
return printHealthJSON(summary, healthResults)
}
cli.Print("%s: %d/%d repos healthy (%d%%)\n\n",
i18n.T("cmd.qa.health.summary"),
summary.Passing,
summary.TotalRepos,
summary.PassingRate)
if len(healthResults) == 0 {
cli.Text(i18n.T("cmd.qa.health.all_healthy"))
return nil
}
grouped := make(map[string][]RepoHealth)
for _, h := range healthResults {
grouped[h.Status] = append(grouped[h.Status], h)
}
printHealthGroup("failing", grouped["failing"], errorStyle)
printHealthGroup("error", grouped["error"], errorStyle)
printHealthGroup("pending", grouped["pending"], warningStyle)
printHealthGroup("no_ci", grouped["no_ci"], dimStyle)
printHealthGroup("disabled", grouped["disabled"], dimStyle)
if !healthProblems {
printHealthGroup("passing", grouped["passing"], successStyle)
}
return nil
}
func fetchRepoHealth(org, repoName string) RepoHealth {
repoFullName := cli.Sprintf("%s/%s", org, repoName)
args := []string{
"run", "list",
"--repo", repoFullName,
"--limit", "1",
"--json", "status,conclusion,name,headSha,updatedAt,url",
}
cmd := exec.Command("gh", args...)
output, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
stderr := string(exitErr.Stderr)
if strings.Contains(stderr, "no workflows") || strings.Contains(stderr, "not found") {
return RepoHealth{
Name: repoName,
Status: "no_ci",
Message: i18n.T("cmd.qa.health.no_ci_configured"),
}
}
}
return RepoHealth{
Name: repoName,
Status: "error",
Message: i18n.T("cmd.qa.health.fetch_error"),
}
}
var runs []HealthWorkflowRun
if err := json.Unmarshal(output, &runs); err != nil {
return RepoHealth{
Name: repoName,
Status: "error",
Message: i18n.T("cmd.qa.health.parse_error"),
}
}
if len(runs) == 0 {
return RepoHealth{
Name: repoName,
Status: "no_ci",
Message: i18n.T("cmd.qa.health.no_ci_configured"),
}
}
run := runs[0]
health := RepoHealth{
Name: repoName,
URL: run.URL,
}
switch run.Status {
case "completed":
switch run.Conclusion {
case "success":
health.Status = "passing"
health.Message = i18n.T("cmd.qa.health.passing")
case "failure":
health.Status = "failing"
health.Message = i18n.T("cmd.qa.health.tests_failing")
case "cancelled":
health.Status = "pending"
health.Message = i18n.T("cmd.qa.health.cancelled")
case "skipped":
health.Status = "passing"
health.Message = i18n.T("cmd.qa.health.skipped")
default:
health.Status = "failing"
health.Message = run.Conclusion
}
case "in_progress", "queued", "waiting":
health.Status = "pending"
health.Message = i18n.T("cmd.qa.health.running")
default:
health.Status = "no_ci"
health.Message = run.Status
}
return health
}
func healthPriority(status string) int {
switch status {
case "failing":
return 0
case "error":
return 1
case "pending":
return 2
case "no_ci":
return 3
case "disabled":
return 4
case "passing":
return 5
default:
return 6
}
}
func summariseHealthResults(totalRepos int, filteredRepos int, results []RepoHealth, problemsOnly bool) HealthSummary {
summary := HealthSummary{
TotalRepos: totalRepos,
FilteredRepos: filteredRepos,
ByStatus: map[string]int{
"passing": 0,
"failing": 0,
"error": 0,
"pending": 0,
"disabled": 0,
"no_ci": 0,
},
ProblemsOnly: problemsOnly,
}
for _, health := range results {
summary.ByStatus[health.Status]++
switch health.Status {
case "passing":
summary.Passing++
case "failing":
summary.Failing++
case "error":
summary.Errors++
case "pending":
summary.Pending++
case "disabled":
summary.Disabled++
case "no_ci":
summary.NotConfigured++
}
}
if summary.TotalRepos > 0 {
summary.PassingRate = (summary.Passing * 100) / summary.TotalRepos
}
return summary
}
func printHealthJSON(summary HealthSummary, repos []RepoHealth) error {
data, err := json.MarshalIndent(HealthOutput{
Summary: summary,
Repos: repos,
}, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
return nil
}
func printHealthGroup(status string, repos []RepoHealth, style *cli.AnsiStyle) {
if len(repos) == 0 {
return
}
slices.SortFunc(repos, func(a, b RepoHealth) int {
return strings.Compare(a.Name, b.Name)
})
var label string
switch status {
case "failing":
label = i18n.T("cmd.qa.health.count_failing")
case "error":
label = i18n.T("cmd.qa.health.count_error")
case "pending":
label = i18n.T("cmd.qa.health.count_pending")
case "no_ci":
label = i18n.T("cmd.qa.health.count_no_ci")
case "disabled":
label = i18n.T("cmd.qa.health.count_disabled")
case "passing":
label = i18n.T("cmd.qa.health.count_passing")
}
cli.Print("%s (%d):\n", style.Render(label), len(repos))
for _, repo := range repos {
cli.Print(" %s %s\n",
cli.RepoStyle.Render(repo.Name),
dimStyle.Render(repo.Message))
if repo.URL != "" && status == "failing" {
cli.Print(" -> %s\n", dimStyle.Render(repo.URL))
}
}
cli.Blank()
}

240
cmd/qa/cmd_health_test.go Normal file
View file

@ -0,0 +1,240 @@
package qa
import (
"encoding/json"
"path/filepath"
"testing"
"forge.lthn.ai/core/cli/pkg/cli"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunHealthJSONOutput_UsesMachineFriendlyKeysAndKeepsFetchErrors(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
alpha:
type: module
beta:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"--repo forge/alpha"*)
cat <<'JSON'
[
{
"status": "completed",
"conclusion": "success",
"name": "CI",
"headSha": "abc123",
"updatedAt": "2026-03-30T00:00:00Z",
"url": "https://example.com/alpha/run/1"
}
]
JSON
;;
*"--repo forge/beta"*)
printf '%s\n' 'simulated workflow lookup failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetHealthFlags(t)
t.Cleanup(func() {
healthRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addHealthCommand(parent)
command := findSubcommand(t, parent, "health")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
var payload HealthOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, 2, payload.Summary.TotalRepos)
assert.Equal(t, 1, payload.Summary.Passing)
assert.Equal(t, 1, payload.Summary.Errors)
assert.Equal(t, 2, payload.Summary.FilteredRepos)
assert.Len(t, payload.Summary.ByStatus, 6)
assert.Equal(t, 1, payload.Summary.ByStatus["passing"])
assert.Equal(t, 1, payload.Summary.ByStatus["error"])
assert.Equal(t, 0, payload.Summary.ByStatus["pending"])
assert.Equal(t, 0, payload.Summary.ByStatus["disabled"])
assert.Equal(t, 0, payload.Summary.ByStatus["no_ci"])
require.Len(t, payload.Repos, 2)
assert.Equal(t, "error", payload.Repos[0].Status)
assert.Equal(t, "beta", payload.Repos[0].Name)
assert.Equal(t, "passing", payload.Repos[1].Status)
assert.Equal(t, "alpha", payload.Repos[1].Name)
assert.Contains(t, output, `"status"`)
assert.NotContains(t, output, `"Status"`)
assert.NotContains(t, output, `"FailingSince"`)
}
func TestRunHealthJSONOutput_ProblemsOnlyKeepsOverallSummary(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
alpha:
type: module
beta:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"--repo forge/alpha"*)
cat <<'JSON'
[
{
"status": "completed",
"conclusion": "success",
"name": "CI",
"headSha": "abc123",
"updatedAt": "2026-03-30T00:00:00Z",
"url": "https://example.com/alpha/run/1"
}
]
JSON
;;
*"--repo forge/beta"*)
printf '%s\n' 'simulated workflow lookup failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetHealthFlags(t)
t.Cleanup(func() {
healthRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addHealthCommand(parent)
command := findSubcommand(t, parent, "health")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
require.NoError(t, command.Flags().Set("problems", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
var payload HealthOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, 2, payload.Summary.TotalRepos)
assert.Equal(t, 1, payload.Summary.Passing)
assert.Equal(t, 1, payload.Summary.Errors)
assert.Equal(t, 1, payload.Summary.FilteredRepos)
assert.True(t, payload.Summary.ProblemsOnly)
assert.Len(t, payload.Summary.ByStatus, 6)
assert.Equal(t, 1, payload.Summary.ByStatus["passing"])
assert.Equal(t, 1, payload.Summary.ByStatus["error"])
assert.Equal(t, 0, payload.Summary.ByStatus["pending"])
assert.Equal(t, 0, payload.Summary.ByStatus["disabled"])
assert.Equal(t, 0, payload.Summary.ByStatus["no_ci"])
require.Len(t, payload.Repos, 1)
assert.Equal(t, "error", payload.Repos[0].Status)
assert.Equal(t, "beta", payload.Repos[0].Name)
}
func TestRunHealthHumanOutput_ShowsFetchErrorsAsErrors(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
alpha:
type: module
beta:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"--repo forge/alpha"*)
cat <<'JSON'
[
{
"status": "completed",
"conclusion": "success",
"name": "CI",
"headSha": "abc123",
"updatedAt": "2026-03-30T00:00:00Z",
"url": "https://example.com/alpha/run/1"
}
]
JSON
;;
*"--repo forge/beta"*)
printf '%s\n' 'simulated workflow lookup failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetHealthFlags(t)
t.Cleanup(func() {
healthRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addHealthCommand(parent)
command := findSubcommand(t, parent, "health")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Contains(t, output, "cmd.qa.health.summary")
assert.Contains(t, output, "alpha")
assert.Contains(t, output, "beta")
assert.Contains(t, output, "cmd.qa.health.fetch_error")
assert.NotContains(t, output, "no CI")
}
func resetHealthFlags(t *testing.T) {
t.Helper()
oldProblems := healthProblems
oldRegistry := healthRegistry
oldJSON := healthJSON
healthProblems = false
healthRegistry = ""
healthJSON = false
t.Cleanup(func() {
healthProblems = oldProblems
healthRegistry = oldRegistry
healthJSON = oldJSON
})
}

507
cmd/qa/cmd_issues.go Normal file
View file

@ -0,0 +1,507 @@
// cmd_issues.go implements the 'qa issues' command for intelligent issue triage.
//
// Usage:
// core qa issues # Show prioritised, actionable issues
// core qa issues --mine # Show issues assigned to you
// core qa issues --triage # Show issues needing triage (no labels/assignee)
// core qa issues --blocked # Show blocked issues
package qa
import (
"cmp"
"encoding/json"
"os/exec"
"slices"
"strings"
"time"
"forge.lthn.ai/core/cli/pkg/cli"
"forge.lthn.ai/core/go-i18n"
"forge.lthn.ai/core/go-io"
"forge.lthn.ai/core/go-log"
"forge.lthn.ai/core/go-scm/repos"
)
// Issue command flags
var (
issuesMine bool
issuesTriage bool
issuesBlocked bool
issuesRegistry string
issuesLimit int
issuesJSON bool
)
// Issue represents a GitHub issue with triage metadata
type Issue struct {
Number int `json:"number"`
Title string `json:"title"`
State string `json:"state"`
Body string `json:"body"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
Author struct {
Login string `json:"login"`
} `json:"author"`
Assignees struct {
Nodes []struct {
Login string `json:"login"`
} `json:"nodes"`
} `json:"assignees"`
Labels struct {
Nodes []struct {
Name string `json:"name"`
} `json:"nodes"`
} `json:"labels"`
Comments struct {
TotalCount int `json:"totalCount"`
Nodes []struct {
Author struct {
Login string `json:"login"`
} `json:"author"`
CreatedAt time.Time `json:"createdAt"`
} `json:"nodes"`
} `json:"comments"`
URL string `json:"url"`
// Computed fields
RepoName string `json:"repo_name"`
Priority int `json:"priority"` // Lower = higher priority
Category string `json:"category"` // "needs_response", "ready", "blocked", "triage"
ActionHint string `json:"action_hint,omitempty"`
}
type IssueFetchError struct {
Repo string `json:"repo"`
Error string `json:"error"`
}
type IssueCategoryOutput struct {
Category string `json:"category"`
Count int `json:"count"`
Issues []Issue `json:"issues"`
}
type IssuesOutput struct {
TotalIssues int `json:"total_issues"`
FilteredIssues int `json:"filtered_issues"`
ShowingMine bool `json:"showing_mine"`
ShowingTriage bool `json:"showing_triage"`
ShowingBlocked bool `json:"showing_blocked"`
Categories []IssueCategoryOutput `json:"categories"`
FetchErrors []IssueFetchError `json:"fetch_errors"`
}
// addIssuesCommand adds the 'issues' subcommand to qa.
func addIssuesCommand(parent *cli.Command) {
issuesCmd := &cli.Command{
Use: "issues",
Short: i18n.T("cmd.qa.issues.short"),
Long: i18n.T("cmd.qa.issues.long"),
RunE: func(cmd *cli.Command, args []string) error {
return runQAIssues()
},
}
issuesCmd.Flags().BoolVarP(&issuesMine, "mine", "m", false, i18n.T("cmd.qa.issues.flag.mine"))
issuesCmd.Flags().BoolVarP(&issuesTriage, "triage", "t", false, i18n.T("cmd.qa.issues.flag.triage"))
issuesCmd.Flags().BoolVarP(&issuesBlocked, "blocked", "b", false, i18n.T("cmd.qa.issues.flag.blocked"))
issuesCmd.Flags().StringVar(&issuesRegistry, "registry", "", i18n.T("common.flag.registry"))
issuesCmd.Flags().IntVarP(&issuesLimit, "limit", "l", 50, i18n.T("cmd.qa.issues.flag.limit"))
issuesCmd.Flags().BoolVar(&issuesJSON, "json", false, i18n.T("common.flag.json"))
parent.AddCommand(issuesCmd)
}
func runQAIssues() error {
// Check gh is available
if _, err := exec.LookPath("gh"); err != nil {
return log.E("qa.issues", i18n.T("error.gh_not_found"), nil)
}
// Load registry
var reg *repos.Registry
var err error
if issuesRegistry != "" {
reg, err = repos.LoadRegistry(io.Local, issuesRegistry)
} else {
registryPath, findErr := repos.FindRegistry(io.Local)
if findErr != nil {
return log.E("qa.issues", i18n.T("error.registry_not_found"), nil)
}
reg, err = repos.LoadRegistry(io.Local, registryPath)
}
if err != nil {
return log.E("qa.issues", "failed to load registry", err)
}
// Fetch issues from all repos
var allIssues []Issue
fetchErrors := make([]IssueFetchError, 0)
repoList := reg.List()
// Registry repos are map-backed, so sort before fetching to keep output stable.
slices.SortFunc(repoList, func(a, b *repos.Repo) int {
return cmp.Compare(a.Name, b.Name)
})
successfulFetches := 0
for i, repo := range repoList {
if !issuesJSON {
cli.Print("%s %d/%d %s\n",
dimStyle.Render(i18n.T("cmd.qa.issues.fetching")),
i+1, len(repoList), repo.Name)
}
issues, err := fetchQAIssues(reg.Org, repo.Name, issuesLimit)
if err != nil {
fetchErrors = append(fetchErrors, IssueFetchError{
Repo: repo.Name,
Error: strings.TrimSpace(err.Error()),
})
if !issuesJSON {
cli.Print("%s\n", warningStyle.Render(i18n.T(
"cmd.qa.issues.fetch_error",
map[string]any{"Repo": repo.Name, "Error": strings.TrimSpace(err.Error())},
)))
}
continue // Skip repos with errors
}
allIssues = append(allIssues, issues...)
successfulFetches++
}
totalIssues := len(allIssues)
if len(allIssues) == 0 {
emptyCategorised := map[string][]Issue{
"needs_response": {},
"ready": {},
"blocked": {},
"triage": {},
}
if issuesJSON {
if err := printCategorisedIssuesJSON(0, emptyCategorised, fetchErrors); err != nil {
return err
}
if successfulFetches == 0 && len(fetchErrors) > 0 {
return cli.Err("failed to fetch issues from any repository")
}
return nil
}
if successfulFetches == 0 && len(fetchErrors) > 0 {
return cli.Err("failed to fetch issues from any repository")
}
cli.Text(i18n.T("cmd.qa.issues.no_issues"))
return nil
}
// Categorise and prioritise issues
categorised := categoriseIssues(allIssues)
// Filter based on flags
if issuesMine {
categorised = filterMine(categorised)
}
if issuesTriage {
categorised = filterCategory(categorised, "triage")
}
if issuesBlocked {
categorised = filterCategory(categorised, "blocked")
}
if issuesJSON {
return printCategorisedIssuesJSON(totalIssues, categorised, fetchErrors)
}
// Print categorised issues
printCategorisedIssues(categorised)
return nil
}
func fetchQAIssues(org, repoName string, limit int) ([]Issue, error) {
repoFullName := cli.Sprintf("%s/%s", org, repoName)
args := []string{
"issue", "list",
"--repo", repoFullName,
"--state", "open",
"--limit", cli.Sprintf("%d", limit),
"--json", "number,title,state,body,createdAt,updatedAt,author,assignees,labels,comments,url",
}
cmd := exec.Command("gh", args...)
output, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return nil, log.E("qa.fetchQAIssues", strings.TrimSpace(string(exitErr.Stderr)), nil)
}
return nil, err
}
var issues []Issue
if err := json.Unmarshal(output, &issues); err != nil {
return nil, err
}
// Tag with repo name
for i := range issues {
issues[i].RepoName = repoName
}
return issues, nil
}
func categoriseIssues(issues []Issue) map[string][]Issue {
result := map[string][]Issue{
"needs_response": {},
"ready": {},
"blocked": {},
"triage": {},
}
currentUser := getCurrentUser()
for i := range issues {
issue := &issues[i]
categoriseIssue(issue, currentUser)
result[issue.Category] = append(result[issue.Category], *issue)
}
// Sort each category by priority
for cat := range result {
slices.SortFunc(result[cat], func(a, b Issue) int {
if priority := cmp.Compare(a.Priority, b.Priority); priority != 0 {
return priority
}
if byDate := cmp.Compare(b.UpdatedAt.Unix(), a.UpdatedAt.Unix()); byDate != 0 {
return byDate
}
if repo := cmp.Compare(a.RepoName, b.RepoName); repo != 0 {
return repo
}
return cmp.Compare(a.Number, b.Number)
})
}
return result
}
func categoriseIssue(issue *Issue, currentUser string) {
labels := getLabels(issue)
// Check if blocked
for _, l := range labels {
if strings.HasPrefix(l, "blocked") || l == "waiting" {
issue.Category = "blocked"
issue.Priority = 30
issue.ActionHint = i18n.T("cmd.qa.issues.hint.blocked")
return
}
}
// Check if needs triage (no labels, no assignee)
if len(issue.Labels.Nodes) == 0 && len(issue.Assignees.Nodes) == 0 {
issue.Category = "triage"
issue.Priority = 20
issue.ActionHint = i18n.T("cmd.qa.issues.hint.triage")
return
}
// Check if needs response (recent comment from someone else)
if issue.Comments.TotalCount > 0 && len(issue.Comments.Nodes) > 0 {
lastComment := issue.Comments.Nodes[len(issue.Comments.Nodes)-1]
// If last comment is not from current user and is recent
if lastComment.Author.Login != currentUser {
age := time.Since(lastComment.CreatedAt)
if age < 48*time.Hour {
issue.Category = "needs_response"
issue.Priority = 10
issue.ActionHint = cli.Sprintf("@%s %s", lastComment.Author.Login, i18n.T("cmd.qa.issues.hint.needs_response"))
return
}
}
}
// Default: ready to work
issue.Category = "ready"
issue.Priority = calculatePriority(labels)
issue.ActionHint = ""
}
// calculatePriority chooses the most urgent matching label so label order
// does not change how issues are ranked.
func calculatePriority(labels []string) int {
priority := 50
// Priority labels
for _, l := range labels {
switch {
case strings.Contains(l, "critical") || strings.Contains(l, "urgent"):
priority = min(priority, 1)
case strings.Contains(l, "high"):
priority = min(priority, 10)
case strings.Contains(l, "medium"):
priority = min(priority, 30)
case strings.Contains(l, "low"):
priority = min(priority, 70)
case l == "good-first-issue" || l == "good first issue":
priority = min(priority, 15) // Boost good first issues
case l == "help-wanted" || l == "help wanted":
priority = min(priority, 20)
case l == "agent:ready" || l == "agentic":
priority = min(priority, 5) // AI-ready issues are high priority
}
}
return priority
}
func getLabels(issue *Issue) []string {
var labels []string
for _, l := range issue.Labels.Nodes {
labels = append(labels, strings.ToLower(l.Name))
}
return labels
}
func getCurrentUser() string {
cmd := exec.Command("gh", "api", "user", "--jq", ".login")
output, err := cmd.Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(output))
}
func filterMine(categorised map[string][]Issue) map[string][]Issue {
currentUser := getCurrentUser()
result := make(map[string][]Issue)
for cat, issues := range categorised {
var filtered []Issue
for _, issue := range issues {
for _, a := range issue.Assignees.Nodes {
if a.Login == currentUser {
filtered = append(filtered, issue)
break
}
}
}
if len(filtered) > 0 {
result[cat] = filtered
}
}
return result
}
func filterCategory(categorised map[string][]Issue, category string) map[string][]Issue {
if issues, ok := categorised[category]; ok && len(issues) > 0 {
return map[string][]Issue{category: issues}
}
return map[string][]Issue{}
}
func printCategorisedIssues(categorised map[string][]Issue) {
// Print in order: needs_response, ready, blocked, triage
categories := []struct {
key string
title string
style *cli.AnsiStyle
}{
{"needs_response", i18n.T("cmd.qa.issues.category.needs_response"), warningStyle},
{"ready", i18n.T("cmd.qa.issues.category.ready"), successStyle},
{"blocked", i18n.T("cmd.qa.issues.category.blocked"), errorStyle},
{"triage", i18n.T("cmd.qa.issues.category.triage"), dimStyle},
}
first := true
for _, cat := range categories {
issues := categorised[cat.key]
if len(issues) == 0 {
continue
}
if !first {
cli.Blank()
}
first = false
cli.Print("%s (%d):\n", cat.style.Render(cat.title), len(issues))
for _, issue := range issues {
printTriagedIssue(issue)
}
}
if first {
cli.Text(i18n.T("cmd.qa.issues.no_issues"))
}
}
func printCategorisedIssuesJSON(totalIssues int, categorised map[string][]Issue, fetchErrors []IssueFetchError) error {
categories := []string{"needs_response", "ready", "blocked", "triage"}
filteredIssues := 0
categoryOutput := make([]IssueCategoryOutput, 0, len(categories))
for _, category := range categories {
issues := categorised[category]
filteredIssues += len(issues)
categoryOutput = append(categoryOutput, IssueCategoryOutput{
Category: category,
Count: len(issues),
Issues: issues,
})
}
output := IssuesOutput{
TotalIssues: totalIssues,
FilteredIssues: filteredIssues,
ShowingMine: issuesMine,
ShowingTriage: issuesTriage,
ShowingBlocked: issuesBlocked,
Categories: categoryOutput,
FetchErrors: fetchErrors,
}
data, err := json.MarshalIndent(output, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
return nil
}
func printTriagedIssue(issue Issue) {
// #42 [core-bio] Fix avatar upload
num := cli.TitleStyle.Render(cli.Sprintf("#%d", issue.Number))
repo := dimStyle.Render(cli.Sprintf("[%s]", issue.RepoName))
title := cli.ValueStyle.Render(truncate(issue.Title, 50))
cli.Print(" %s %s %s", num, repo, title)
// Add labels if priority-related
var importantLabels []string
for _, l := range issue.Labels.Nodes {
name := strings.ToLower(l.Name)
if strings.Contains(name, "priority") || strings.Contains(name, "critical") ||
name == "good-first-issue" || name == "agent:ready" || name == "agentic" {
importantLabels = append(importantLabels, l.Name)
}
}
if len(importantLabels) > 0 {
slices.Sort(importantLabels)
cli.Print(" %s", warningStyle.Render("["+strings.Join(importantLabels, ", ")+"]"))
}
// Add age
age := cli.FormatAge(issue.UpdatedAt)
cli.Print(" %s\n", dimStyle.Render(age))
// Add action hint if present
if issue.ActionHint != "" {
cli.Print(" %s %s\n", dimStyle.Render("->"), issue.ActionHint)
}
}

316
cmd/qa/cmd_issues_test.go Normal file
View file

@ -0,0 +1,316 @@
package qa
import (
"encoding/json"
"fmt"
"path/filepath"
"testing"
"time"
"forge.lthn.ai/core/cli/pkg/cli"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunQAIssuesJSONOutput_UsesMachineFriendlyKeys(t *testing.T) {
dir := t.TempDir()
commentTime := time.Now().UTC().Add(-1 * time.Hour).Format(time.RFC3339)
updatedAt := time.Now().UTC().Format(time.RFC3339)
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
alpha:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), fmt.Sprintf(`#!/bin/sh
case "$*" in
*"api user"*)
printf '%%s\n' 'alice'
;;
*"issue list --repo forge/alpha"*)
cat <<JSON
[
{
"number": 7,
"title": "Clarify agent output",
"state": "OPEN",
"body": "Explain behaviour",
"createdAt": "2026-03-30T00:00:00Z",
"updatedAt": %q,
"author": {"login": "bob"},
"assignees": {"nodes": []},
"labels": {"nodes": [{"name": "agent:ready"}]},
"comments": {
"totalCount": 1,
"nodes": [
{
"author": {"login": "carol"},
"createdAt": %q
}
]
},
"url": "https://example.com/issues/7"
}
]
JSON
;;
*)
printf '%%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`, updatedAt, commentTime))
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetIssuesFlags(t)
t.Cleanup(func() {
issuesRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addIssuesCommand(parent)
command := findSubcommand(t, parent, "issues")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
var payload IssuesOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, 1, payload.TotalIssues)
assert.Equal(t, 1, payload.FilteredIssues)
require.Len(t, payload.Categories, 4)
require.Len(t, payload.Categories[0].Issues, 1)
issue := payload.Categories[0].Issues[0]
assert.Equal(t, "needs_response", payload.Categories[0].Category)
assert.Equal(t, "alpha", issue.RepoName)
assert.Equal(t, 10, issue.Priority)
assert.Equal(t, "needs_response", issue.Category)
assert.Equal(t, "@carol cmd.qa.issues.hint.needs_response", issue.ActionHint)
assert.Contains(t, output, `"repo_name"`)
assert.Contains(t, output, `"action_hint"`)
assert.NotContains(t, output, `"RepoName"`)
assert.NotContains(t, output, `"ActionHint"`)
}
func TestRunQAIssuesJSONOutput_SortsFetchErrorsByRepoName(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
beta:
type: module
alpha:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"issue list --repo forge/alpha"*)
printf '%s\n' 'alpha failed' >&2
exit 1
;;
*"issue list --repo forge/beta"*)
printf '%s\n' 'beta failed' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetIssuesFlags(t)
t.Cleanup(func() {
issuesRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addIssuesCommand(parent)
command := findSubcommand(t, parent, "issues")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
var payload IssuesOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
require.Len(t, payload.FetchErrors, 2)
assert.Equal(t, "alpha", payload.FetchErrors[0].Repo)
assert.Equal(t, "beta", payload.FetchErrors[1].Repo)
}
func TestRunQAIssuesJSONOutput_ReturnsErrorWhenAllFetchesFail(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
beta:
type: module
alpha:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"issue list --repo forge/alpha"*)
printf '%s\n' 'alpha failed' >&2
exit 1
;;
*"issue list --repo forge/beta"*)
printf '%s\n' 'beta failed' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetIssuesFlags(t)
t.Cleanup(func() {
issuesRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addIssuesCommand(parent)
command := findSubcommand(t, parent, "issues")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
require.NoError(t, command.Flags().Set("json", "true"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
var payload IssuesOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
require.Len(t, payload.Categories, 4)
assert.Empty(t, payload.Categories[0].Issues)
require.Len(t, payload.FetchErrors, 2)
assert.Equal(t, "alpha", payload.FetchErrors[0].Repo)
assert.Equal(t, "beta", payload.FetchErrors[1].Repo)
}
func TestRunQAIssuesHumanOutput_ReturnsErrorWhenAllFetchesFail(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "repos.yaml"), `version: 1
org: forge
base_path: .
repos:
beta:
type: module
alpha:
type: module
`)
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"issue list --repo forge/alpha"*)
printf '%s\n' 'alpha failed' >&2
exit 1
;;
*"issue list --repo forge/beta"*)
printf '%s\n' 'beta failed' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetIssuesFlags(t)
t.Cleanup(func() {
issuesRegistry = ""
})
parent := &cli.Command{Use: "qa"}
addIssuesCommand(parent)
command := findSubcommand(t, parent, "issues")
require.NoError(t, command.Flags().Set("registry", filepath.Join(dir, "repos.yaml")))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
assert.NotContains(t, output, "cmd.qa.issues.no_issues")
}
func TestCalculatePriority_UsesMostUrgentLabelRegardlessOfOrder(t *testing.T) {
labelsA := []string{"low", "critical"}
labelsB := []string{"critical", "low"}
assert.Equal(t, 1, calculatePriority(labelsA))
assert.Equal(t, 1, calculatePriority(labelsB))
}
func TestPrintTriagedIssue_SortsImportantLabels(t *testing.T) {
var issue Issue
require.NoError(t, json.Unmarshal([]byte(`{
"number": 7,
"title": "Stabilise output",
"updatedAt": "2026-03-30T00:00:00Z",
"labels": {
"nodes": [
{"name": "priority:urgent"},
{"name": "agent:ready"}
]
}
}`), &issue))
issue.RepoName = "alpha"
output := captureStdout(t, func() {
printTriagedIssue(issue)
})
assert.Contains(t, output, "[agent:ready, priority:urgent]")
assert.NotContains(t, output, "[priority:urgent, agent:ready]")
}
func resetIssuesFlags(t *testing.T) {
t.Helper()
oldMine := issuesMine
oldTriage := issuesTriage
oldBlocked := issuesBlocked
oldRegistry := issuesRegistry
oldLimit := issuesLimit
oldJSON := issuesJSON
issuesMine = false
issuesTriage = false
issuesBlocked = false
issuesRegistry = ""
issuesLimit = 50
issuesJSON = false
t.Cleanup(func() {
issuesMine = oldMine
issuesTriage = oldTriage
issuesBlocked = oldBlocked
issuesRegistry = oldRegistry
issuesLimit = oldLimit
issuesJSON = oldJSON
})
}

804
cmd/qa/cmd_php.go Normal file
View file

@ -0,0 +1,804 @@
// cmd_php.go adds PHP quality assurance subcommands to the qa parent command.
//
// Commands:
// - fmt: Format PHP code with Laravel Pint
// - stan: Run PHPStan static analysis
// - psalm: Run Psalm static analysis
// - audit: Check dependency security
// - security: Run security checks
// - rector: Automated code refactoring
// - infection: Mutation testing
// - test: Run PHPUnit/Pest tests
package qa
import (
"context"
"encoding/json"
"fmt"
"os"
"sort"
"strings"
"forge.lthn.ai/core/cli/pkg/cli"
"forge.lthn.ai/core/lint/pkg/detect"
"forge.lthn.ai/core/lint/pkg/php"
)
// Severity styles for security output.
var (
headerStyle = cli.HeaderStyle
criticalStyle = cli.NewStyle().Bold().Foreground(cli.ColourRed500)
highStyle = cli.NewStyle().Bold().Foreground(cli.ColourOrange500)
mediumStyle = cli.NewStyle().Foreground(cli.ColourAmber500)
lowStyle = cli.NewStyle().Foreground(cli.ColourGray500)
)
// addPHPCommands registers all PHP QA subcommands.
func addPHPCommands(parent *cli.Command) {
addPHPFmtCommand(parent)
addPHPStanCommand(parent)
addPHPPsalmCommand(parent)
addPHPAuditCommand(parent)
addPHPSecurityCommand(parent)
addPHPRectorCommand(parent)
addPHPInfectionCommand(parent)
addPHPTestCommand(parent)
}
// PHP fmt command flags.
var (
phpFmtFix bool
phpFmtDiff bool
phpFmtJSON bool
)
func addPHPFmtCommand(parent *cli.Command) {
cmd := &cli.Command{
Use: "fmt",
Short: "Format PHP code with Laravel Pint",
Long: "Run Laravel Pint to check or fix PHP code style. Uses --test mode by default; pass --fix to apply changes.",
RunE: func(cmd *cli.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
if !detect.IsPHPProject(cwd) {
return cli.Err("not a PHP project (no composer.json found)")
}
if !isMachineReadableOutput(phpFmtJSON) {
cli.Print("%s %s\n", headerStyle.Render("PHP Format"), dimStyle.Render("(Pint)"))
cli.Blank()
}
return php.Format(context.Background(), php.FormatOptions{
Dir: cwd,
Fix: phpFmtFix,
Diff: phpFmtDiff,
JSON: phpFmtJSON,
})
},
}
cmd.Flags().BoolVar(&phpFmtFix, "fix", false, "Apply formatting fixes")
cmd.Flags().BoolVar(&phpFmtDiff, "diff", false, "Show diff of changes")
cmd.Flags().BoolVar(&phpFmtJSON, "json", false, "Output results as JSON")
parent.AddCommand(cmd)
}
// PHP stan command flags.
var (
phpStanLevel int
phpStanMemory string
phpStanJSON bool
phpStanSARIF bool
)
func addPHPStanCommand(parent *cli.Command) {
cmd := &cli.Command{
Use: "stan",
Short: "Run PHPStan static analysis",
Long: "Run PHPStan (or Larastan) to find bugs in PHP code through static analysis.",
RunE: func(cmd *cli.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
if !detect.IsPHPProject(cwd) {
return cli.Err("not a PHP project (no composer.json found)")
}
analyser, found := php.DetectAnalyser(cwd)
if !found {
return cli.Err("no static analyser found (install PHPStan: composer require phpstan/phpstan --dev)")
}
if !isMachineReadableOutput(phpStanJSON, phpStanSARIF) {
cli.Print("%s %s\n", headerStyle.Render("PHP Static Analysis"), dimStyle.Render(fmt.Sprintf("(%s)", analyser)))
cli.Blank()
}
err = php.Analyse(context.Background(), php.AnalyseOptions{
Dir: cwd,
Level: phpStanLevel,
Memory: phpStanMemory,
JSON: phpStanJSON,
SARIF: phpStanSARIF,
})
if err != nil {
return cli.Err("static analysis found issues")
}
if !isMachineReadableOutput(phpStanJSON, phpStanSARIF) {
cli.Blank()
cli.Print("%s\n", successStyle.Render("Static analysis passed"))
}
return nil
},
}
cmd.Flags().IntVar(&phpStanLevel, "level", 0, "Analysis level (0-9, 0 uses config default)")
cmd.Flags().StringVar(&phpStanMemory, "memory", "", "Memory limit (e.g., 2G)")
cmd.Flags().BoolVar(&phpStanJSON, "json", false, "Output results as JSON")
cmd.Flags().BoolVar(&phpStanSARIF, "sarif", false, "Output results in SARIF format")
parent.AddCommand(cmd)
}
// PHP psalm command flags.
var (
phpPsalmLevel int
phpPsalmFix bool
phpPsalmBaseline bool
phpPsalmShowInfo bool
phpPsalmJSON bool
phpPsalmSARIF bool
)
func addPHPPsalmCommand(parent *cli.Command) {
cmd := &cli.Command{
Use: "psalm",
Short: "Run Psalm static analysis",
Long: "Run Psalm for deep type-level static analysis of PHP code.",
RunE: func(cmd *cli.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
if !detect.IsPHPProject(cwd) {
return cli.Err("not a PHP project (no composer.json found)")
}
_, found := php.DetectPsalm(cwd)
if !found {
return cli.Err("Psalm not found (install: composer require vimeo/psalm --dev)")
}
if !isMachineReadableOutput(phpPsalmJSON, phpPsalmSARIF) {
cli.Print("%s\n", headerStyle.Render("PHP Psalm Analysis"))
cli.Blank()
}
err = php.RunPsalm(context.Background(), php.PsalmOptions{
Dir: cwd,
Level: phpPsalmLevel,
Fix: phpPsalmFix,
Baseline: phpPsalmBaseline,
ShowInfo: phpPsalmShowInfo,
JSON: phpPsalmJSON,
SARIF: phpPsalmSARIF,
})
if err != nil {
return cli.Err("Psalm found issues")
}
if !isMachineReadableOutput(phpPsalmJSON, phpPsalmSARIF) {
cli.Blank()
cli.Print("%s\n", successStyle.Render("Psalm analysis passed"))
}
return nil
},
}
cmd.Flags().IntVar(&phpPsalmLevel, "level", 0, "Error level (1=strictest, 8=most lenient)")
cmd.Flags().BoolVar(&phpPsalmFix, "fix", false, "Auto-fix issues where possible")
cmd.Flags().BoolVar(&phpPsalmBaseline, "baseline", false, "Generate/update baseline file")
cmd.Flags().BoolVar(&phpPsalmShowInfo, "show-info", false, "Show info-level issues")
cmd.Flags().BoolVar(&phpPsalmJSON, "json", false, "Output results as JSON")
cmd.Flags().BoolVar(&phpPsalmSARIF, "sarif", false, "Output results in SARIF format")
parent.AddCommand(cmd)
}
// PHP audit command flags.
var (
phpAuditJSON bool
phpAuditFix bool
)
func addPHPAuditCommand(parent *cli.Command) {
cmd := &cli.Command{
Use: "audit",
Short: "Audit PHP and npm dependencies for vulnerabilities",
Long: "Run composer audit and npm audit to check dependencies for known security vulnerabilities.",
RunE: func(cmd *cli.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
if !detect.IsPHPProject(cwd) {
return cli.Err("not a PHP project (no composer.json found)")
}
if !isMachineReadableOutput(phpAuditJSON) {
cli.Print("%s\n", headerStyle.Render("Dependency Audit"))
cli.Blank()
}
results, err := php.RunAudit(context.Background(), php.AuditOptions{
Dir: cwd,
JSON: phpAuditJSON,
Fix: phpAuditFix,
})
if err != nil {
return err
}
if phpAuditJSON {
payload := mapAuditResultsForJSON(results)
data, err := json.MarshalIndent(payload, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
if payload.HasVulnerabilities {
return cli.Err("vulnerabilities found in dependencies")
}
return nil
}
hasVulns := false
for _, result := range results {
if result.Error != nil {
cli.Print("%s %s: %s\n", warningStyle.Render("!"), result.Tool, result.Error)
continue
}
if result.Vulnerabilities > 0 {
hasVulns = true
cli.Print("%s %s: %d vulnerabilities found\n",
errorStyle.Render(cli.Glyph(":cross:")),
result.Tool,
result.Vulnerabilities)
for _, adv := range result.Advisories {
cli.Print(" %s %s: %s\n",
dimStyle.Render("->"),
adv.Package,
adv.Title)
}
} else {
cli.Print("%s %s: no vulnerabilities found\n",
successStyle.Render(cli.Glyph(":check:")),
result.Tool)
}
}
if hasVulns {
return cli.Err("vulnerabilities found in dependencies")
}
return nil
},
}
cmd.Flags().BoolVar(&phpAuditJSON, "json", false, "Output results as JSON")
cmd.Flags().BoolVar(&phpAuditFix, "fix", false, "Auto-fix vulnerabilities (npm only)")
parent.AddCommand(cmd)
}
// PHP security command flags.
var (
phpSecuritySeverity string
phpSecurityJSON bool
phpSecuritySARIF bool
phpSecurityURL string
)
func addPHPSecurityCommand(parent *cli.Command) {
cmd := &cli.Command{
Use: "security",
Short: "Run security checks on the PHP project",
Long: "Check for common security issues including dependency vulnerabilities, .env exposure, debug mode, and more.",
RunE: func(cmd *cli.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
if !detect.IsPHPProject(cwd) {
return cli.Err("not a PHP project (no composer.json found)")
}
if !isMachineReadableOutput(phpSecurityJSON, phpSecuritySARIF) {
cli.Print("%s\n", headerStyle.Render("Security Checks"))
cli.Blank()
}
result, err := php.RunSecurityChecks(context.Background(), php.SecurityOptions{
Dir: cwd,
Severity: phpSecuritySeverity,
JSON: phpSecurityJSON,
SARIF: phpSecuritySARIF,
URL: phpSecurityURL,
})
if err != nil {
return err
}
result.Checks = sortSecurityChecks(result.Checks)
if phpSecuritySARIF {
data, err := json.MarshalIndent(mapSecurityResultForSARIF(result), "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
summary := result.Summary
if summary.Critical > 0 || summary.High > 0 {
return cli.Err("security checks failed")
}
return nil
}
if phpSecurityJSON {
data, err := json.MarshalIndent(result, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
summary := result.Summary
if summary.Critical > 0 || summary.High > 0 {
return cli.Err("security checks failed")
}
return nil
}
// Print each check result
for _, check := range result.Checks {
if check.Passed {
cli.Print("%s %s\n",
successStyle.Render(cli.Glyph(":check:")),
check.Name)
} else {
style := getSeverityStyle(check.Severity)
cli.Print("%s %s %s\n",
errorStyle.Render(cli.Glyph(":cross:")),
check.Name,
style.Render(fmt.Sprintf("[%s]", check.Severity)))
if check.Message != "" {
cli.Print(" %s %s\n", dimStyle.Render("->"), check.Message)
}
if check.Fix != "" {
cli.Print(" %s Fix: %s\n", dimStyle.Render("->"), check.Fix)
}
}
}
// Print summary
cli.Blank()
summary := result.Summary
cli.Print("%s: %d/%d checks passed\n",
headerStyle.Render("Summary"),
summary.Passed, summary.Total)
if summary.Critical > 0 {
cli.Print(" %s\n", criticalStyle.Render(fmt.Sprintf("%d critical", summary.Critical)))
}
if summary.High > 0 {
cli.Print(" %s\n", highStyle.Render(fmt.Sprintf("%d high", summary.High)))
}
if summary.Medium > 0 {
cli.Print(" %s\n", mediumStyle.Render(fmt.Sprintf("%d medium", summary.Medium)))
}
if summary.Low > 0 {
cli.Print(" %s\n", lowStyle.Render(fmt.Sprintf("%d low", summary.Low)))
}
if summary.Critical > 0 || summary.High > 0 {
return cli.Err("security checks failed")
}
return nil
},
}
cmd.Flags().StringVar(&phpSecuritySeverity, "severity", "", "Minimum severity to report (critical, high, medium, low)")
cmd.Flags().BoolVar(&phpSecurityJSON, "json", false, "Output results as JSON")
cmd.Flags().BoolVar(&phpSecuritySARIF, "sarif", false, "Output results in SARIF format")
cmd.Flags().StringVar(&phpSecurityURL, "url", "", "URL to check HTTP security headers")
parent.AddCommand(cmd)
}
type auditJSONOutput struct {
Results []auditResultJSON `json:"results"`
HasVulnerabilities bool `json:"has_vulnerabilities"`
Vulnerabilities int `json:"vulnerabilities"`
}
type auditResultJSON struct {
Tool string `json:"tool"`
Vulnerabilities int `json:"vulnerabilities"`
Advisories []auditAdvisoryJSON `json:"advisories"`
Error string `json:"error,omitempty"`
}
type auditAdvisoryJSON struct {
Package string `json:"package"`
Severity string `json:"severity,omitempty"`
Title string `json:"title,omitempty"`
URL string `json:"url,omitempty"`
Identifiers []string `json:"identifiers,omitempty"`
}
func mapAuditResultsForJSON(results []php.AuditResult) auditJSONOutput {
output := auditJSONOutput{
Results: make([]auditResultJSON, 0, len(results)),
}
sort.Slice(results, func(i, j int) bool {
return results[i].Tool < results[j].Tool
})
for _, result := range results {
entry := auditResultJSON{
Tool: result.Tool,
Vulnerabilities: result.Vulnerabilities,
}
if result.Error != nil {
entry.Error = result.Error.Error()
}
entry.Advisories = make([]auditAdvisoryJSON, 0, len(result.Advisories))
for _, advisory := range result.Advisories {
entry.Advisories = append(entry.Advisories, auditAdvisoryJSON{
Package: advisory.Package,
Severity: advisory.Severity,
Title: advisory.Title,
URL: advisory.URL,
Identifiers: append([]string(nil), advisory.Identifiers...),
})
}
sort.Slice(entry.Advisories, func(i, j int) bool {
if entry.Advisories[i].Package == entry.Advisories[j].Package {
return entry.Advisories[i].Title < entry.Advisories[j].Title
}
return entry.Advisories[i].Package < entry.Advisories[j].Package
})
output.Results = append(output.Results, entry)
output.Vulnerabilities += entry.Vulnerabilities
}
output.HasVulnerabilities = output.Vulnerabilities > 0
return output
}
func sortSecurityChecks(checks []php.SecurityCheck) []php.SecurityCheck {
sort.Slice(checks, func(i, j int) bool {
return checks[i].ID < checks[j].ID
})
return checks
}
// PHP rector command flags.
var (
phpRectorFix bool
phpRectorDiff bool
phpRectorClearCache bool
)
func addPHPRectorCommand(parent *cli.Command) {
cmd := &cli.Command{
Use: "rector",
Short: "Run Rector for automated PHP code refactoring",
Long: "Run Rector to apply automated code refactoring rules. Uses dry-run by default; pass --fix to apply changes.",
RunE: func(cmd *cli.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
if !detect.IsPHPProject(cwd) {
return cli.Err("not a PHP project (no composer.json found)")
}
if !php.DetectRector(cwd) {
return cli.Err("Rector not found (install: composer require rector/rector --dev)")
}
mode := "dry-run"
if phpRectorFix {
mode = "apply"
}
cli.Print("%s %s\n", headerStyle.Render("Rector Refactoring"), dimStyle.Render(fmt.Sprintf("(%s)", mode)))
cli.Blank()
err = php.RunRector(context.Background(), php.RectorOptions{
Dir: cwd,
Fix: phpRectorFix,
Diff: phpRectorDiff,
ClearCache: phpRectorClearCache,
})
if err != nil {
return cli.Err("Rector found refactoring suggestions")
}
cli.Blank()
cli.Print("%s\n", successStyle.Render("Rector check passed"))
return nil
},
}
cmd.Flags().BoolVar(&phpRectorFix, "fix", false, "Apply refactoring changes")
cmd.Flags().BoolVar(&phpRectorDiff, "diff", false, "Show detailed diff of changes")
cmd.Flags().BoolVar(&phpRectorClearCache, "clear-cache", false, "Clear cache before running")
parent.AddCommand(cmd)
}
// PHP infection command flags.
var (
phpInfectionMinMSI int
phpInfectionMinCoveredMSI int
phpInfectionThreads int
phpInfectionFilter string
phpInfectionOnlyCovered bool
)
func addPHPInfectionCommand(parent *cli.Command) {
cmd := &cli.Command{
Use: "infection",
Short: "Run Infection mutation testing",
Long: "Run Infection to test mutation coverage. Mutates code and verifies tests catch the mutations.",
RunE: func(cmd *cli.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
if !detect.IsPHPProject(cwd) {
return cli.Err("not a PHP project (no composer.json found)")
}
if !php.DetectInfection(cwd) {
return cli.Err("Infection not found (install: composer require infection/infection --dev)")
}
cli.Print("%s\n", headerStyle.Render("Mutation Testing"))
cli.Blank()
err = php.RunInfection(context.Background(), php.InfectionOptions{
Dir: cwd,
MinMSI: phpInfectionMinMSI,
MinCoveredMSI: phpInfectionMinCoveredMSI,
Threads: phpInfectionThreads,
Filter: phpInfectionFilter,
OnlyCovered: phpInfectionOnlyCovered,
})
if err != nil {
return cli.Err("mutation testing did not pass minimum thresholds")
}
cli.Blank()
cli.Print("%s\n", successStyle.Render("Mutation testing passed"))
return nil
},
}
cmd.Flags().IntVar(&phpInfectionMinMSI, "min-msi", 0, "Minimum mutation score indicator (0-100, default 50)")
cmd.Flags().IntVar(&phpInfectionMinCoveredMSI, "min-covered-msi", 0, "Minimum covered mutation score (0-100, default 70)")
cmd.Flags().IntVar(&phpInfectionThreads, "threads", 0, "Number of parallel threads (default 4)")
cmd.Flags().StringVar(&phpInfectionFilter, "filter", "", "Filter files by pattern")
cmd.Flags().BoolVar(&phpInfectionOnlyCovered, "only-covered", false, "Only mutate covered code")
parent.AddCommand(cmd)
}
// PHP test command flags.
var (
phpTestParallel bool
phpTestCoverage bool
phpTestFilter string
phpTestGroup string
phpTestJUnit bool
)
func addPHPTestCommand(parent *cli.Command) {
cmd := &cli.Command{
Use: "test",
Short: "Run PHP tests with Pest or PHPUnit",
Long: "Detect and run the PHP test suite. Automatically detects Pest or PHPUnit.",
RunE: func(cmd *cli.Command, args []string) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
if !detect.IsPHPProject(cwd) {
return cli.Err("not a PHP project (no composer.json found)")
}
runner := php.DetectTestRunner(cwd)
if !isMachineReadableOutput(phpTestJUnit) {
cli.Print("%s %s\n", headerStyle.Render("PHP Tests"), dimStyle.Render(fmt.Sprintf("(%s)", runner)))
cli.Blank()
}
var groups []string
if phpTestGroup != "" {
groups = strings.Split(phpTestGroup, ",")
}
err = php.RunTests(context.Background(), php.TestOptions{
Dir: cwd,
Parallel: phpTestParallel,
Coverage: phpTestCoverage,
Filter: phpTestFilter,
Groups: groups,
JUnit: phpTestJUnit,
})
if err != nil {
return cli.Err("tests failed")
}
if !isMachineReadableOutput(phpTestJUnit) {
cli.Blank()
cli.Print("%s\n", successStyle.Render("All tests passed"))
}
return nil
},
}
cmd.Flags().BoolVar(&phpTestParallel, "parallel", false, "Run tests in parallel")
cmd.Flags().BoolVar(&phpTestCoverage, "coverage", false, "Generate code coverage")
cmd.Flags().StringVar(&phpTestFilter, "filter", "", "Filter tests by name pattern")
cmd.Flags().StringVar(&phpTestGroup, "group", "", "Run only tests in specified groups (comma-separated)")
cmd.Flags().BoolVar(&phpTestJUnit, "junit", false, "Output results in JUnit XML format")
parent.AddCommand(cmd)
}
// getSeverityStyle returns a style for the given severity level.
func getSeverityStyle(severity string) *cli.AnsiStyle {
switch strings.ToLower(severity) {
case "critical":
return criticalStyle
case "high":
return highStyle
case "medium":
return mediumStyle
case "low":
return lowStyle
default:
return dimStyle
}
}
func isMachineReadableOutput(flags ...bool) bool {
for _, flag := range flags {
if flag {
return true
}
}
return false
}
type sarifLog struct {
Version string `json:"version"`
Schema string `json:"$schema"`
Runs []sarifRun `json:"runs"`
}
type sarifRun struct {
Tool sarifTool `json:"tool"`
Results []sarifResult `json:"results"`
}
type sarifTool struct {
Driver sarifDriver `json:"driver"`
}
type sarifDriver struct {
Name string `json:"name"`
Rules []sarifRule `json:"rules"`
}
type sarifRule struct {
ID string `json:"id"`
Name string `json:"name"`
ShortDescription sarifMessage `json:"shortDescription"`
FullDescription sarifMessage `json:"fullDescription"`
Help sarifMessage `json:"help,omitempty"`
Properties any `json:"properties,omitempty"`
}
type sarifResult struct {
RuleID string `json:"ruleId"`
Level string `json:"level"`
Message sarifMessage `json:"message"`
Properties any `json:"properties,omitempty"`
}
type sarifMessage struct {
Text string `json:"text"`
}
func mapSecurityResultForSARIF(result *php.SecurityResult) sarifLog {
rules := make([]sarifRule, 0, len(result.Checks))
sarifResults := make([]sarifResult, 0, len(result.Checks))
for _, check := range result.Checks {
rule := sarifRule{
ID: check.ID,
Name: check.Name,
ShortDescription: sarifMessage{Text: check.Name},
FullDescription: sarifMessage{Text: check.Description},
}
if check.Fix != "" {
rule.Help = sarifMessage{Text: check.Fix}
}
if check.CWE != "" {
rule.Properties = map[string]any{"cwe": check.CWE}
}
rules = append(rules, rule)
if check.Passed {
continue
}
message := check.Message
if message == "" {
message = check.Description
}
properties := map[string]any{
"severity": check.Severity,
}
if check.CWE != "" {
properties["cwe"] = check.CWE
}
if check.Fix != "" {
properties["fix"] = check.Fix
}
sarifResults = append(sarifResults, sarifResult{
RuleID: check.ID,
Level: sarifLevel(check.Severity),
Message: sarifMessage{Text: message},
Properties: properties,
})
}
return sarifLog{
Version: "2.1.0",
Schema: "https://json.schemastore.org/sarif-2.1.0.json",
Runs: []sarifRun{{
Tool: sarifTool{
Driver: sarifDriver{
Name: "core qa security",
Rules: rules,
},
},
Results: sarifResults,
}},
}
}
func sarifLevel(severity string) string {
switch strings.ToLower(severity) {
case "critical", "high":
return "error"
case "medium":
return "warning"
default:
return "note"
}
}

432
cmd/qa/cmd_php_test.go Normal file
View file

@ -0,0 +1,432 @@
package qa
import (
"encoding/json"
"io"
"os"
"path/filepath"
"testing"
"forge.lthn.ai/core/cli/pkg/cli"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPHPStanJSONOutput_DoesNotAppendSuccessBanner(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "phpstan"), "#!/bin/sh\nprintf '%s\\n' '{\"tool\":\"phpstan\",\"status\":\"ok\"}'\n")
restoreWorkingDir(t, dir)
resetPHPStanFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPStanCommand(parent)
command := findSubcommand(t, parent, "stan")
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "{\"tool\":\"phpstan\",\"status\":\"ok\"}\n", output)
assert.NotContains(t, output, "Static analysis passed")
assert.NotContains(t, output, "PHP Static Analysis")
}
func TestPHPPsalmJSONOutput_DoesNotAppendSuccessBanner(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "psalm"), "#!/bin/sh\nprintf '%s\\n' '{\"tool\":\"psalm\",\"status\":\"ok\"}'\n")
restoreWorkingDir(t, dir)
resetPHPPsalmFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPPsalmCommand(parent)
command := findSubcommand(t, parent, "psalm")
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "{\"tool\":\"psalm\",\"status\":\"ok\"}\n", output)
assert.NotContains(t, output, "Psalm analysis passed")
assert.NotContains(t, output, "PHP Psalm Analysis")
}
func TestPHPStanSARIFOutput_DoesNotAppendSuccessBanner(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "phpstan"), "#!/bin/sh\nprintf '%s\\n' '{\"version\":\"2.1.0\",\"runs\":[]}'\n")
restoreWorkingDir(t, dir)
resetPHPStanFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPStanCommand(parent)
command := findSubcommand(t, parent, "stan")
require.NoError(t, command.Flags().Set("sarif", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "{\"version\":\"2.1.0\",\"runs\":[]}\n", output)
assert.NotContains(t, output, "Static analysis passed")
assert.NotContains(t, output, "PHP Static Analysis")
}
func TestPHPPsalmSARIFOutput_DoesNotAppendSuccessBanner(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "psalm"), "#!/bin/sh\nprintf '%s\\n' '{\"version\":\"2.1.0\",\"runs\":[]}'\n")
restoreWorkingDir(t, dir)
resetPHPPsalmFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPPsalmCommand(parent)
command := findSubcommand(t, parent, "psalm")
require.NoError(t, command.Flags().Set("sarif", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "{\"version\":\"2.1.0\",\"runs\":[]}\n", output)
assert.NotContains(t, output, "Psalm analysis passed")
assert.NotContains(t, output, "PHP Psalm Analysis")
}
func TestPHPSecurityJSONOutput_UsesMachineFriendlyKeys(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeTestFile(t, filepath.Join(dir, ".env"), "APP_DEBUG=true\nAPP_KEY=short\nAPP_URL=http://example.com\n")
writeExecutable(t, filepath.Join(dir, "bin", "composer"), "#!/bin/sh\nprintf '%s\\n' '{\"advisories\":{}}'\n")
restoreWorkingDir(t, dir)
prependPath(t, filepath.Join(dir, "bin"))
resetPHPSecurityFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPSecurityCommand(parent)
command := findSubcommand(t, parent, "security")
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.Error(t, command.RunE(command, nil))
})
assert.Contains(t, output, "\"checks\"")
assert.Contains(t, output, "\"summary\"")
assert.Contains(t, output, "\"app_key_set\"")
assert.NotContains(t, output, "\"Checks\"")
assert.NotContains(t, output, "Security Checks")
}
func TestPHPSecuritySARIFOutput_IsStructuredAndChromeFree(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeTestFile(t, filepath.Join(dir, ".env"), "APP_DEBUG=true\nAPP_KEY=short\nAPP_URL=http://example.com\n")
writeExecutable(t, filepath.Join(dir, "bin", "composer"), "#!/bin/sh\nprintf '%s\\n' '{\"advisories\":{}}'\n")
restoreWorkingDir(t, dir)
prependPath(t, filepath.Join(dir, "bin"))
resetPHPSecurityFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPSecurityCommand(parent)
command := findSubcommand(t, parent, "security")
require.NoError(t, command.Flags().Set("sarif", "true"))
output := captureStdout(t, func() {
require.Error(t, command.RunE(command, nil))
})
var payload map[string]any
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, "2.1.0", payload["version"])
assert.Contains(t, output, "\"ruleId\": \"app_key_set\"")
assert.NotContains(t, output, "Security Checks")
assert.NotContains(t, output, "Summary:")
}
func TestPHPSecurityJSONOutput_RespectsSeverityFilter(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeTestFile(t, filepath.Join(dir, ".env"), "APP_DEBUG=true\nAPP_KEY=short\nAPP_URL=http://example.com\n")
writeExecutable(t, filepath.Join(dir, "bin", "composer"), "#!/bin/sh\nprintf '%s\\n' '{\"advisories\":{}}'\n")
restoreWorkingDir(t, dir)
prependPath(t, filepath.Join(dir, "bin"))
resetPHPSecurityFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPSecurityCommand(parent)
command := findSubcommand(t, parent, "security")
require.NoError(t, command.Flags().Set("json", "true"))
require.NoError(t, command.Flags().Set("severity", "critical"))
output := captureStdout(t, func() {
require.Error(t, command.RunE(command, nil))
})
var payload struct {
Checks []struct {
ID string `json:"id"`
Severity string `json:"severity"`
} `json:"checks"`
Summary struct {
Total int `json:"total"`
Passed int `json:"passed"`
Critical int `json:"critical"`
High int `json:"high"`
} `json:"summary"`
}
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Equal(t, 3, payload.Summary.Total)
assert.Equal(t, 1, payload.Summary.Passed)
assert.Equal(t, 2, payload.Summary.Critical)
assert.Zero(t, payload.Summary.High)
require.Len(t, payload.Checks, 3)
assert.NotContains(t, output, "https_enforced")
}
func TestPHPAuditJSONOutput_UsesLowerCaseAdvisoryKeys(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "composer"), `#!/bin/sh
cat <<'JSON'
{
"advisories": {
"vendor/package-a": [
{
"title": "Remote Code Execution",
"link": "https://example.com/advisory/1",
"cve": "CVE-2025-1234",
"affectedVersions": ">=1.0,<1.5"
}
]
}
}
JSON
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetPHPAuditFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPAuditCommand(parent)
command := findSubcommand(t, parent, "audit")
require.NoError(t, command.Flags().Set("json", "true"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
var payload struct {
Results []struct {
Tool string `json:"tool"`
Advisories []struct {
Package string `json:"package"`
} `json:"advisories"`
} `json:"results"`
HasVulnerabilities bool `json:"has_vulnerabilities"`
Vulnerabilities int `json:"vulnerabilities"`
}
require.NoError(t, json.Unmarshal([]byte(output), &payload))
require.Len(t, payload.Results, 1)
assert.Equal(t, "composer", payload.Results[0].Tool)
require.Len(t, payload.Results[0].Advisories, 1)
assert.Equal(t, "vendor/package-a", payload.Results[0].Advisories[0].Package)
assert.True(t, payload.HasVulnerabilities)
assert.Equal(t, 1, payload.Vulnerabilities)
assert.NotContains(t, output, "\"Package\"")
assert.NotContains(t, output, "Dependency Audit")
}
func TestPHPTestJUnitOutput_PrintsOnlyXML(t *testing.T) {
dir := t.TempDir()
writeTestFile(t, filepath.Join(dir, "composer.json"), "{}")
writeExecutable(t, filepath.Join(dir, "vendor", "bin", "phpunit"), "#!/bin/sh\njunit=''\nwhile [ $# -gt 0 ]; do\n if [ \"$1\" = \"--log-junit\" ]; then\n shift\n junit=\"$1\"\n fi\n shift\ndone\nprintf '%s\\n' 'human output should be suppressed'\nprintf '%s' '<testsuite tests=\"1\"></testsuite>' > \"$junit\"\n")
restoreWorkingDir(t, dir)
resetPHPTestFlags(t)
parent := &cli.Command{Use: "qa"}
addPHPTestCommand(parent)
command := findSubcommand(t, parent, "test")
require.NoError(t, command.Flags().Set("junit", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Equal(t, "<testsuite tests=\"1\"></testsuite>\n", output)
assert.NotContains(t, output, "human output should be suppressed")
assert.NotContains(t, output, "PHP Tests")
assert.NotContains(t, output, "All tests passed")
}
func writeTestFile(t *testing.T, path string, content string) {
t.Helper()
require.NoError(t, os.MkdirAll(filepath.Dir(path), 0o755))
require.NoError(t, os.WriteFile(path, []byte(content), 0o644))
}
func writeExecutable(t *testing.T, path string, content string) {
t.Helper()
require.NoError(t, os.MkdirAll(filepath.Dir(path), 0o755))
require.NoError(t, os.WriteFile(path, []byte(content), 0o755))
}
func restoreWorkingDir(t *testing.T, dir string) {
t.Helper()
wd, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir(dir))
t.Cleanup(func() {
require.NoError(t, os.Chdir(wd))
})
}
func resetPHPStanFlags(t *testing.T) {
t.Helper()
oldLevel := phpStanLevel
oldMemory := phpStanMemory
oldJSON := phpStanJSON
oldSARIF := phpStanSARIF
phpStanLevel = 0
phpStanMemory = ""
phpStanJSON = false
phpStanSARIF = false
t.Cleanup(func() {
phpStanLevel = oldLevel
phpStanMemory = oldMemory
phpStanJSON = oldJSON
phpStanSARIF = oldSARIF
})
}
func resetPHPPsalmFlags(t *testing.T) {
t.Helper()
oldLevel := phpPsalmLevel
oldFix := phpPsalmFix
oldBaseline := phpPsalmBaseline
oldShowInfo := phpPsalmShowInfo
oldJSON := phpPsalmJSON
oldSARIF := phpPsalmSARIF
phpPsalmLevel = 0
phpPsalmFix = false
phpPsalmBaseline = false
phpPsalmShowInfo = false
phpPsalmJSON = false
phpPsalmSARIF = false
t.Cleanup(func() {
phpPsalmLevel = oldLevel
phpPsalmFix = oldFix
phpPsalmBaseline = oldBaseline
phpPsalmShowInfo = oldShowInfo
phpPsalmJSON = oldJSON
phpPsalmSARIF = oldSARIF
})
}
func resetPHPSecurityFlags(t *testing.T) {
t.Helper()
oldSeverity := phpSecuritySeverity
oldJSON := phpSecurityJSON
oldSARIF := phpSecuritySARIF
oldURL := phpSecurityURL
phpSecuritySeverity = ""
phpSecurityJSON = false
phpSecuritySARIF = false
phpSecurityURL = ""
t.Cleanup(func() {
phpSecuritySeverity = oldSeverity
phpSecurityJSON = oldJSON
phpSecuritySARIF = oldSARIF
phpSecurityURL = oldURL
})
}
func resetPHPAuditFlags(t *testing.T) {
t.Helper()
oldJSON := phpAuditJSON
oldFix := phpAuditFix
phpAuditJSON = false
phpAuditFix = false
t.Cleanup(func() {
phpAuditJSON = oldJSON
phpAuditFix = oldFix
})
}
func resetPHPTestFlags(t *testing.T) {
t.Helper()
oldParallel := phpTestParallel
oldCoverage := phpTestCoverage
oldFilter := phpTestFilter
oldGroup := phpTestGroup
oldJUnit := phpTestJUnit
phpTestParallel = false
phpTestCoverage = false
phpTestFilter = ""
phpTestGroup = ""
phpTestJUnit = false
t.Cleanup(func() {
phpTestParallel = oldParallel
phpTestCoverage = oldCoverage
phpTestFilter = oldFilter
phpTestGroup = oldGroup
phpTestJUnit = oldJUnit
})
}
func findSubcommand(t *testing.T, parent *cli.Command, name string) *cli.Command {
t.Helper()
for _, command := range parent.Commands() {
if command.Name() == name {
return command
}
}
t.Fatalf("subcommand %q not found", name)
return nil
}
func captureStdout(t *testing.T, fn func()) string {
t.Helper()
oldStdout := os.Stdout
reader, writer, err := os.Pipe()
require.NoError(t, err)
os.Stdout = writer
defer func() {
os.Stdout = oldStdout
}()
defer func() {
require.NoError(t, reader.Close())
}()
fn()
require.NoError(t, writer.Close())
output, err := io.ReadAll(reader)
require.NoError(t, err)
return string(output)
}
func prependPath(t *testing.T, dir string) {
t.Helper()
oldPath := os.Getenv("PATH")
require.NoError(t, os.Setenv("PATH", dir+string(os.PathListSeparator)+oldPath))
t.Cleanup(func() {
require.NoError(t, os.Setenv("PATH", oldPath))
})
}

49
cmd/qa/cmd_qa.go Normal file
View file

@ -0,0 +1,49 @@
// Package qa provides quality assurance workflow commands.
//
// Unlike `core dev` which is about doing work (commit, push, pull),
// `core qa` is about verifying work (CI status, reviews, issues).
//
// Commands:
// - watch: Monitor GitHub Actions after a push, report actionable data
// - review: PR review status with actionable next steps
// - health: Aggregate CI health across all repos
// - issues: Intelligent issue triage
package qa
import (
"forge.lthn.ai/core/cli/pkg/cli"
"forge.lthn.ai/core/go-i18n"
"forge.lthn.ai/core/lint/locales"
)
func init() {
cli.RegisterCommands(AddQACommands, locales.FS)
}
// Style aliases from shared package
var (
successStyle = cli.SuccessStyle
errorStyle = cli.ErrorStyle
warningStyle = cli.WarningStyle
dimStyle = cli.DimStyle
)
// AddQACommands registers the 'qa' command and all subcommands.
func AddQACommands(root *cli.Command) {
qaCmd := &cli.Command{
Use: "qa",
Short: i18n.T("cmd.qa.short"),
Long: i18n.T("cmd.qa.long"),
}
root.AddCommand(qaCmd)
// Go-focused subcommands
addWatchCommand(qaCmd)
addReviewCommand(qaCmd)
addHealthCommand(qaCmd)
addIssuesCommand(qaCmd)
addDocblockCommand(qaCmd)
// PHP subcommands
addPHPCommands(qaCmd)
}

415
cmd/qa/cmd_review.go Normal file
View file

@ -0,0 +1,415 @@
// cmd_review.go implements the 'qa review' command for PR review status.
//
// Usage:
// core qa review # Show all PRs needing attention
// core qa review --mine # Show status of your open PRs
// core qa review --requested # Show PRs you need to review
package qa
import (
"context"
"encoding/json"
"fmt"
"os/exec"
"sort"
"strings"
"time"
"forge.lthn.ai/core/cli/pkg/cli"
"forge.lthn.ai/core/go-i18n"
"forge.lthn.ai/core/go-log"
)
// Review command flags
var (
reviewMine bool
reviewRequested bool
reviewRepo string
reviewJSON bool
)
// PullRequest represents a GitHub pull request
type PullRequest struct {
Number int `json:"number"`
Title string `json:"title"`
Author Author `json:"author"`
State string `json:"state"`
IsDraft bool `json:"isDraft"`
Mergeable string `json:"mergeable"`
ReviewDecision string `json:"reviewDecision"`
URL string `json:"url"`
HeadRefName string `json:"headRefName"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
Additions int `json:"additions"`
Deletions int `json:"deletions"`
ChangedFiles int `json:"changedFiles"`
StatusChecks *StatusCheckRollup `json:"statusCheckRollup"`
ReviewRequests ReviewRequests `json:"reviewRequests"`
Reviews []Review `json:"reviews"`
}
// Author represents a GitHub user
type Author struct {
Login string `json:"login"`
}
// StatusCheckRollup contains CI check status
type StatusCheckRollup struct {
Contexts []StatusContext `json:"contexts"`
}
// StatusContext represents a single check
type StatusContext struct {
State string `json:"state"`
Conclusion string `json:"conclusion"`
Name string `json:"name"`
}
// ReviewRequests contains pending review requests
type ReviewRequests struct {
Nodes []ReviewRequest `json:"nodes"`
}
// ReviewRequest represents a review request
type ReviewRequest struct {
RequestedReviewer Author `json:"requestedReviewer"`
}
// Review represents a PR review
type Review struct {
Author Author `json:"author"`
State string `json:"state"`
}
// ReviewFetchError captures a partial fetch failure while preserving any
// successfully fetched PRs in the same review run.
type ReviewFetchError struct {
Repo string `json:"repo"`
Scope string `json:"scope"`
Error string `json:"error"`
}
type reviewOutput struct {
Mine []PullRequest `json:"mine"`
Requested []PullRequest `json:"requested"`
TotalMine int `json:"total_mine"`
TotalRequested int `json:"total_requested"`
ShowingMine bool `json:"showing_mine"`
ShowingRequested bool `json:"showing_requested"`
FetchErrors []ReviewFetchError `json:"fetch_errors"`
}
// addReviewCommand adds the 'review' subcommand to the qa command.
func addReviewCommand(parent *cli.Command) {
reviewCmd := &cli.Command{
Use: "review",
Short: i18n.T("cmd.qa.review.short"),
Long: i18n.T("cmd.qa.review.long"),
RunE: func(cmd *cli.Command, args []string) error {
return runReview()
},
}
reviewCmd.Flags().BoolVarP(&reviewMine, "mine", "m", false, i18n.T("cmd.qa.review.flag.mine"))
reviewCmd.Flags().BoolVarP(&reviewRequested, "requested", "r", false, i18n.T("cmd.qa.review.flag.requested"))
reviewCmd.Flags().StringVar(&reviewRepo, "repo", "", i18n.T("cmd.qa.review.flag.repo"))
reviewCmd.Flags().BoolVar(&reviewJSON, "json", false, i18n.T("common.flag.json"))
parent.AddCommand(reviewCmd)
}
func runReview() error {
// Check gh is available
if _, err := exec.LookPath("gh"); err != nil {
return log.E("qa.review", i18n.T("error.gh_not_found"), nil)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Determine repo
repoFullName := reviewRepo
if repoFullName == "" {
var err error
repoFullName, err = detectRepoFromGit()
if err != nil {
return log.E("qa.review", i18n.T("cmd.qa.review.error.no_repo"), nil)
}
}
// Default: show both mine and requested if neither flag is set
showMine := reviewMine || (!reviewMine && !reviewRequested)
showRequested := reviewRequested || (!reviewMine && !reviewRequested)
minePRs := []PullRequest{}
requestedPRs := []PullRequest{}
fetchErrors := make([]ReviewFetchError, 0)
mineFetched := false
requestedFetched := false
successfulFetches := 0
if showMine {
prs, err := fetchPRs(ctx, repoFullName, "author:@me")
if err != nil {
fetchErrors = append(fetchErrors, ReviewFetchError{
Repo: repoFullName,
Scope: "mine",
Error: strings.TrimSpace(err.Error()),
})
if !reviewJSON {
cli.Warnf("failed to fetch your PRs for %s: %s", repoFullName, strings.TrimSpace(err.Error()))
}
} else {
sort.Slice(prs, func(i, j int) bool {
if prs[i].Number == prs[j].Number {
return strings.Compare(prs[i].Title, prs[j].Title) < 0
}
return prs[i].Number < prs[j].Number
})
minePRs = prs
mineFetched = true
successfulFetches++
}
}
if showRequested {
prs, err := fetchPRs(ctx, repoFullName, "review-requested:@me")
if err != nil {
fetchErrors = append(fetchErrors, ReviewFetchError{
Repo: repoFullName,
Scope: "requested",
Error: strings.TrimSpace(err.Error()),
})
if !reviewJSON {
cli.Warnf("failed to fetch review requested PRs for %s: %s", repoFullName, strings.TrimSpace(err.Error()))
}
} else {
sort.Slice(prs, func(i, j int) bool {
if prs[i].Number == prs[j].Number {
return strings.Compare(prs[i].Title, prs[j].Title) < 0
}
return prs[i].Number < prs[j].Number
})
requestedPRs = prs
requestedFetched = true
successfulFetches++
}
}
output := reviewOutput{
Mine: minePRs,
Requested: requestedPRs,
TotalMine: len(minePRs),
TotalRequested: len(requestedPRs),
ShowingMine: showMine,
ShowingRequested: showRequested,
FetchErrors: fetchErrors,
}
if reviewJSON {
data, err := json.MarshalIndent(output, "", " ")
if err != nil {
return err
}
cli.Print("%s\n", string(data))
if successfulFetches == 0 && len(fetchErrors) > 0 {
return cli.Err("failed to fetch pull requests for %s", repoFullName)
}
return nil
}
if successfulFetches == 0 && len(fetchErrors) > 0 {
return cli.Err("failed to fetch pull requests for %s", repoFullName)
}
if showMine && mineFetched {
if err := printMyPRs(minePRs); err != nil {
return err
}
}
if showRequested && requestedFetched {
if showMine && mineFetched {
cli.Blank()
}
if err := printRequestedPRs(requestedPRs); err != nil {
return err
}
}
return nil
}
// printMyPRs shows the user's open PRs with status
func printMyPRs(prs []PullRequest) error {
if len(prs) == 0 {
cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.qa.review.no_prs")))
return nil
}
cli.Print("%s (%d):\n", i18n.T("cmd.qa.review.your_prs"), len(prs))
for _, pr := range prs {
printPRStatus(pr)
}
return nil
}
// printRequestedPRs shows PRs where user's review is requested
func printRequestedPRs(prs []PullRequest) error {
if len(prs) == 0 {
cli.Print("%s\n", dimStyle.Render(i18n.T("cmd.qa.review.no_reviews")))
return nil
}
cli.Print("%s (%d):\n", i18n.T("cmd.qa.review.review_requested"), len(prs))
for _, pr := range prs {
printPRForReview(pr)
}
return nil
}
// fetchPRs fetches PRs matching the search query
func fetchPRs(ctx context.Context, repo, search string) ([]PullRequest, error) {
args := []string{
"pr", "list",
"--state", "open",
"--search", search,
"--json", "number,title,author,state,isDraft,mergeable,reviewDecision,url,headRefName,createdAt,updatedAt,additions,deletions,changedFiles,statusCheckRollup,reviewRequests,reviews",
}
if repo != "" {
args = append(args, "--repo", repo)
}
cmd := exec.CommandContext(ctx, "gh", args...)
output, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return nil, log.E("qa.fetchPRs", strings.TrimSpace(string(exitErr.Stderr)), nil)
}
return nil, err
}
var prs []PullRequest
if err := json.Unmarshal(output, &prs); err != nil {
return nil, err
}
return prs, nil
}
// printPRStatus prints a PR with its merge status
func printPRStatus(pr PullRequest) {
// Determine status icon and color
status, style, action := analyzePRStatus(pr)
cli.Print(" %s #%d %s\n",
style.Render(status),
pr.Number,
truncate(pr.Title, 50))
if action != "" {
cli.Print(" %s %s\n", dimStyle.Render("->"), action)
}
}
// printPRForReview prints a PR that needs review
func printPRForReview(pr PullRequest) {
// Show PR info with stats
stats := fmt.Sprintf("+%d/-%d, %d files",
pr.Additions, pr.Deletions, pr.ChangedFiles)
cli.Print(" %s #%d %s\n",
warningStyle.Render("◯"),
pr.Number,
truncate(pr.Title, 50))
cli.Print(" %s @%s, %s\n",
dimStyle.Render("->"),
pr.Author.Login,
stats)
cli.Print(" %s gh pr checkout %d\n",
dimStyle.Render("->"),
pr.Number)
}
// analyzePRStatus determines the status, style, and action for a PR
func analyzePRStatus(pr PullRequest) (status string, style *cli.AnsiStyle, action string) {
// Check if draft
if pr.IsDraft {
return "◯", dimStyle, "Draft - convert to ready when done"
}
// Check CI status
ciPassed := true
ciFailed := false
ciPending := false
var failedChecks []string
if pr.StatusChecks != nil {
for _, check := range pr.StatusChecks.Contexts {
switch check.Conclusion {
case "FAILURE", "failure":
ciFailed = true
ciPassed = false
failedChecks = append(failedChecks, check.Name)
case "PENDING", "pending", "":
if check.State == "PENDING" || check.State == "" {
ciPending = true
ciPassed = false
}
}
}
}
// Check review status
approved := pr.ReviewDecision == "APPROVED"
changesRequested := pr.ReviewDecision == "CHANGES_REQUESTED"
// Check mergeable status
hasConflicts := pr.Mergeable == "CONFLICTING"
// Determine overall status
if hasConflicts {
return "✗", errorStyle, "Needs rebase - has merge conflicts"
}
if ciFailed {
if len(failedChecks) > 0 {
sort.Strings(failedChecks)
return "✗", errorStyle, fmt.Sprintf("CI failed: %s", failedChecks[0])
}
return "✗", errorStyle, "CI failed"
}
if changesRequested {
return "✗", warningStyle, "Changes requested - address review feedback"
}
if ciPending {
return "◯", warningStyle, "CI running..."
}
if !approved && pr.ReviewDecision != "" {
return "◯", warningStyle, "Awaiting review"
}
if approved && ciPassed {
return "✓", successStyle, "Ready to merge"
}
return "◯", dimStyle, ""
}
// truncate shortens a string to max length (rune-safe for UTF-8)
func truncate(s string, max int) string {
runes := []rune(s)
if len(runes) <= max {
return s
}
return string(runes[:max-3]) + "..."
}

269
cmd/qa/cmd_review_test.go Normal file
View file

@ -0,0 +1,269 @@
package qa
import (
"encoding/json"
"path/filepath"
"testing"
"forge.lthn.ai/core/cli/pkg/cli"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRunReviewJSONOutput_PreservesPartialResultsAndFetchErrors(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"author:@me"*)
printf '%s\n' 'simulated author query failure' >&2
exit 1
;;
*"review-requested:@me"*)
cat <<'JSON'
[
{
"number": 42,
"title": "Refine agent output",
"author": {"login": "alice"},
"state": "OPEN",
"isDraft": false,
"mergeable": "MERGEABLE",
"reviewDecision": "",
"url": "https://example.com/pull/42",
"headRefName": "feature/agent-output",
"createdAt": "2026-03-30T00:00:00Z",
"updatedAt": "2026-03-30T00:00:00Z",
"additions": 12,
"deletions": 3,
"changedFiles": 2,
"reviewRequests": {"nodes": []},
"reviews": []
}
]
JSON
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetReviewFlags(t)
t.Cleanup(func() {
reviewRepo = ""
})
parent := &cli.Command{Use: "qa"}
addReviewCommand(parent)
command := findSubcommand(t, parent, "review")
require.NoError(t, command.Flags().Set("repo", "forge/example"))
require.NoError(t, command.Flags().Set("json", "true"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
var payload reviewOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.True(t, payload.ShowingMine)
assert.True(t, payload.ShowingRequested)
require.Len(t, payload.Mine, 0)
require.Len(t, payload.Requested, 1)
assert.Equal(t, 42, payload.Requested[0].Number)
assert.Equal(t, "Refine agent output", payload.Requested[0].Title)
require.Len(t, payload.FetchErrors, 1)
assert.Equal(t, "forge/example", payload.FetchErrors[0].Repo)
assert.Equal(t, "mine", payload.FetchErrors[0].Scope)
assert.Contains(t, payload.FetchErrors[0].Error, "simulated author query failure")
}
func TestRunReviewJSONOutput_ReturnsErrorWhenAllFetchesFail(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"author:@me"*)
printf '%s\n' 'simulated author query failure' >&2
exit 1
;;
*"review-requested:@me"*)
printf '%s\n' 'simulated requested query failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetReviewFlags(t)
t.Cleanup(func() {
reviewRepo = ""
})
parent := &cli.Command{Use: "qa"}
addReviewCommand(parent)
command := findSubcommand(t, parent, "review")
require.NoError(t, command.Flags().Set("repo", "forge/example"))
require.NoError(t, command.Flags().Set("json", "true"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
var payload reviewOutput
require.NoError(t, json.Unmarshal([]byte(output), &payload))
assert.Empty(t, payload.Mine)
assert.Empty(t, payload.Requested)
require.Len(t, payload.FetchErrors, 2)
assert.Equal(t, "mine", payload.FetchErrors[0].Scope)
assert.Equal(t, "requested", payload.FetchErrors[1].Scope)
}
func TestRunReviewHumanOutput_PreservesSuccessfulSectionWhenOneFetchFails(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"author:@me"*)
printf '%s\n' 'simulated author query failure' >&2
exit 1
;;
*"review-requested:@me"*)
cat <<'JSON'
[
{
"number": 42,
"title": "Refine agent output",
"author": {"login": "alice"},
"state": "OPEN",
"isDraft": false,
"mergeable": "MERGEABLE",
"reviewDecision": "",
"url": "https://example.com/pull/42",
"headRefName": "feature/agent-output",
"createdAt": "2026-03-30T00:00:00Z",
"updatedAt": "2026-03-30T00:00:00Z",
"additions": 12,
"deletions": 3,
"changedFiles": 2,
"reviewRequests": {"nodes": []},
"reviews": []
}
]
JSON
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetReviewFlags(t)
t.Cleanup(func() {
reviewRepo = ""
})
parent := &cli.Command{Use: "qa"}
addReviewCommand(parent)
command := findSubcommand(t, parent, "review")
require.NoError(t, command.Flags().Set("repo", "forge/example"))
output := captureStdout(t, func() {
require.NoError(t, command.RunE(command, nil))
})
assert.Contains(t, output, "#42 Refine agent output")
assert.Contains(t, output, "gh pr checkout 42")
assert.NotContains(t, output, "Your pull requests")
assert.NotContains(t, output, "cmd.qa.review.no_prs")
}
func TestRunReviewHumanOutput_ReturnsErrorWhenAllFetchesFail(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"author:@me"*)
printf '%s\n' 'simulated author query failure' >&2
exit 1
;;
*"review-requested:@me"*)
printf '%s\n' 'simulated requested query failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
restoreWorkingDir(t, dir)
prependPath(t, dir)
resetReviewFlags(t)
t.Cleanup(func() {
reviewRepo = ""
})
parent := &cli.Command{Use: "qa"}
addReviewCommand(parent)
command := findSubcommand(t, parent, "review")
require.NoError(t, command.Flags().Set("repo", "forge/example"))
var runErr error
output := captureStdout(t, func() {
runErr = command.RunE(command, nil)
})
require.Error(t, runErr)
assert.NotContains(t, output, "Your pull requests")
assert.NotContains(t, output, "Review requested")
}
func TestAnalyzePRStatus_UsesDeterministicFailedCheckName(t *testing.T) {
pr := PullRequest{
Mergeable: "MERGEABLE",
ReviewDecision: "",
StatusChecks: &StatusCheckRollup{
Contexts: []StatusContext{
{State: "FAILURE", Conclusion: "failure", Name: "Zulu"},
{State: "FAILURE", Conclusion: "failure", Name: "Alpha"},
},
},
}
status, _, action := analyzePRStatus(pr)
assert.Equal(t, "✗", status)
assert.Equal(t, "CI failed: Alpha", action)
}
func resetReviewFlags(t *testing.T) {
t.Helper()
oldMine := reviewMine
oldRequested := reviewRequested
oldRepo := reviewRepo
oldJSON := reviewJSON
reviewMine = false
reviewRequested = false
reviewRepo = ""
reviewJSON = false
t.Cleanup(func() {
reviewMine = oldMine
reviewRequested = oldRequested
reviewRepo = oldRepo
reviewJSON = oldJSON
})
}

478
cmd/qa/cmd_watch.go Normal file
View file

@ -0,0 +1,478 @@
// cmd_watch.go implements the 'qa watch' command for monitoring GitHub Actions.
//
// Usage:
// core qa watch # Watch current repo's latest push
// core qa watch --repo X # Watch specific repo
// core qa watch --commit SHA # Watch specific commit
// core qa watch --timeout 5m # Custom timeout (default: 10m)
package qa
import (
"cmp"
"context"
"encoding/json"
"fmt"
"os/exec"
"slices"
"strings"
"time"
"forge.lthn.ai/core/cli/pkg/cli"
"forge.lthn.ai/core/go-i18n"
"forge.lthn.ai/core/go-log"
)
// Watch command flags
var (
watchRepo string
watchCommit string
watchTimeout time.Duration
)
// WorkflowRun represents a GitHub Actions workflow run
type WorkflowRun struct {
ID int64 `json:"databaseId"`
Name string `json:"name"`
DisplayTitle string `json:"displayTitle"`
Status string `json:"status"`
Conclusion string `json:"conclusion"`
HeadSha string `json:"headSha"`
URL string `json:"url"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
// WorkflowJob represents a job within a workflow run
type WorkflowJob struct {
ID int64 `json:"databaseId"`
Name string `json:"name"`
Status string `json:"status"`
Conclusion string `json:"conclusion"`
URL string `json:"url"`
Steps []JobStep `json:"steps"`
}
// JobStep represents a step within a job
type JobStep struct {
Name string `json:"name"`
Status string `json:"status"`
Conclusion string `json:"conclusion"`
Number int `json:"number"`
}
// addWatchCommand adds the 'watch' subcommand to the qa command.
func addWatchCommand(parent *cli.Command) {
watchCmd := &cli.Command{
Use: "watch",
Short: i18n.T("cmd.qa.watch.short"),
Long: i18n.T("cmd.qa.watch.long"),
RunE: func(cmd *cli.Command, args []string) error {
return runWatch()
},
}
watchCmd.Flags().StringVarP(&watchRepo, "repo", "r", "", i18n.T("cmd.qa.watch.flag.repo"))
watchCmd.Flags().StringVarP(&watchCommit, "commit", "c", "", i18n.T("cmd.qa.watch.flag.commit"))
watchCmd.Flags().DurationVarP(&watchTimeout, "timeout", "t", 10*time.Minute, i18n.T("cmd.qa.watch.flag.timeout"))
parent.AddCommand(watchCmd)
}
func runWatch() error {
// Check gh is available
if _, err := exec.LookPath("gh"); err != nil {
return log.E("qa.watch", i18n.T("error.gh_not_found"), nil)
}
// Determine repo
repoFullName, err := resolveRepo(watchRepo)
if err != nil {
return err
}
// Determine commit
commitSha, err := resolveCommit(watchCommit)
if err != nil {
return err
}
cli.Print("%s %s\n", dimStyle.Render(i18n.Label("repo")), repoFullName)
// Safe prefix for display - handle short SHAs gracefully
shaPrefix := commitSha
if len(commitSha) > 8 {
shaPrefix = commitSha[:8]
}
cli.Print("%s %s\n", dimStyle.Render(i18n.T("cmd.qa.watch.commit")), shaPrefix)
cli.Blank()
// Create context with timeout for all gh commands
ctx, cancel := context.WithTimeout(context.Background(), watchTimeout)
defer cancel()
// Poll for workflow runs
pollInterval := 3 * time.Second
var lastStatus string
waitingStatus := dimStyle.Render(i18n.T("cmd.qa.watch.waiting_for_workflows"))
for {
// Check if context deadline exceeded
if ctx.Err() != nil {
cli.Blank()
return log.E("qa.watch", i18n.T("cmd.qa.watch.timeout", map[string]any{"Duration": watchTimeout}), nil)
}
runs, err := fetchWorkflowRunsForCommit(ctx, repoFullName, commitSha)
if err != nil {
return log.Wrap(err, "qa.watch", "failed to fetch workflow runs")
}
if len(runs) == 0 {
// No workflows triggered yet, keep waiting
if waitingStatus != lastStatus {
cli.Print("%s\n", waitingStatus)
lastStatus = waitingStatus
}
time.Sleep(pollInterval)
continue
}
// Check status of all runs
allComplete := true
var pending, success, failed int
for _, run := range runs {
switch run.Status {
case "completed":
if run.Conclusion == "success" {
success++
} else {
// Count all non-success conclusions as failed
// (failure, cancelled, timed_out, action_required, stale, etc.)
failed++
}
default:
allComplete = false
pending++
}
}
// Build status line
status := fmt.Sprintf("%d workflow(s): ", len(runs))
if pending > 0 {
status += warningStyle.Render(fmt.Sprintf("%d running", pending))
if success > 0 || failed > 0 {
status += ", "
}
}
if success > 0 {
status += successStyle.Render(fmt.Sprintf("%d passed", success))
if failed > 0 {
status += ", "
}
}
if failed > 0 {
status += errorStyle.Render(fmt.Sprintf("%d failed", failed))
}
// Only print if status changed
if status != lastStatus {
cli.Print("%s\n", status)
lastStatus = status
}
if allComplete {
cli.Blank()
return printResults(ctx, repoFullName, runs)
}
time.Sleep(pollInterval)
}
}
// resolveRepo determines the repo to watch
func resolveRepo(specified string) (string, error) {
if specified != "" {
// If it contains /, assume it's already full name
if strings.Contains(specified, "/") {
return specified, nil
}
// Try to get org from current directory
org := detectOrgFromGit()
if org != "" {
return org + "/" + specified, nil
}
return "", log.E("qa.watch", i18n.T("cmd.qa.watch.error.repo_format"), nil)
}
// Detect from current directory
return detectRepoFromGit()
}
// resolveCommit determines the commit to watch
func resolveCommit(specified string) (string, error) {
if specified != "" {
return specified, nil
}
// Get HEAD commit
cmd := exec.Command("git", "rev-parse", "HEAD")
output, err := cmd.Output()
if err != nil {
return "", log.Wrap(err, "qa.watch", "failed to get HEAD commit")
}
return strings.TrimSpace(string(output)), nil
}
// detectRepoFromGit detects the repo from git remote
func detectRepoFromGit() (string, error) {
cmd := exec.Command("git", "remote", "get-url", "origin")
output, err := cmd.Output()
if err != nil {
return "", log.E("qa.watch", i18n.T("cmd.qa.watch.error.not_git_repo"), nil)
}
url := strings.TrimSpace(string(output))
return parseGitHubRepo(url)
}
// detectOrgFromGit tries to detect the org from git remote
func detectOrgFromGit() string {
repo, err := detectRepoFromGit()
if err != nil {
return ""
}
parts := strings.Split(repo, "/")
if len(parts) >= 1 {
return parts[0]
}
return ""
}
// parseGitHubRepo extracts org/repo from a git URL
func parseGitHubRepo(url string) (string, error) {
// Handle SSH URLs: git@github.com:org/repo.git
if strings.HasPrefix(url, "git@github.com:") {
path := strings.TrimPrefix(url, "git@github.com:")
path = strings.TrimSuffix(path, ".git")
return path, nil
}
// Handle HTTPS URLs: https://github.com/org/repo.git
if strings.Contains(url, "github.com/") {
parts := strings.Split(url, "github.com/")
if len(parts) >= 2 {
path := strings.TrimSuffix(parts[1], ".git")
return path, nil
}
}
return "", log.E("qa.parseGitHubRepo", "could not parse GitHub repo from URL: "+url, nil)
}
// fetchWorkflowRunsForCommit fetches workflow runs for a specific commit
func fetchWorkflowRunsForCommit(ctx context.Context, repoFullName, commitSha string) ([]WorkflowRun, error) {
args := []string{
"run", "list",
"--repo", repoFullName,
"--commit", commitSha,
"--json", "databaseId,name,displayTitle,status,conclusion,headSha,url,createdAt,updatedAt",
}
cmd := exec.CommandContext(ctx, "gh", args...)
output, err := cmd.Output()
if err != nil {
// Check if context was cancelled/deadline exceeded
if ctx.Err() != nil {
return nil, ctx.Err()
}
if exitErr, ok := err.(*exec.ExitError); ok {
return nil, cli.Err("%s", strings.TrimSpace(string(exitErr.Stderr)))
}
return nil, err
}
var runs []WorkflowRun
if err := json.Unmarshal(output, &runs); err != nil {
return nil, err
}
return runs, nil
}
// printResults prints the final results with actionable information
func printResults(ctx context.Context, repoFullName string, runs []WorkflowRun) error {
var failures []WorkflowRun
var successes []WorkflowRun
for _, run := range runs {
if run.Conclusion == "success" {
successes = append(successes, run)
} else {
// Treat all non-success as failures (failure, cancelled, timed_out, etc.)
failures = append(failures, run)
}
}
slices.SortFunc(successes, compareWorkflowRun)
slices.SortFunc(failures, compareWorkflowRun)
// Print successes briefly
for _, run := range successes {
cli.Print("%s %s\n", successStyle.Render(i18n.T("common.label.success")), run.Name)
}
// Print failures with details
for _, run := range failures {
cli.Print("%s %s\n", errorStyle.Render(i18n.T("common.label.error")), run.Name)
// Fetch failed job details
failedJob, failedStep, errorLine := fetchFailureDetails(ctx, repoFullName, run.ID)
if failedJob != "" {
cli.Print(" %s Job: %s", dimStyle.Render("->"), failedJob)
if failedStep != "" {
cli.Print(" (step: %s)", failedStep)
}
cli.Blank()
}
if errorLine != "" {
cli.Print(" %s Error: %s\n", dimStyle.Render("->"), errorLine)
}
cli.Print(" %s %s\n", dimStyle.Render("->"), run.URL)
}
// Exit with error if any failures
if len(failures) > 0 {
cli.Blank()
return cli.Err("%s", i18n.T("cmd.qa.watch.workflows_failed", map[string]any{"Count": len(failures)}))
}
cli.Blank()
cli.Print("%s\n", successStyle.Render(i18n.T("cmd.qa.watch.all_passed")))
return nil
}
// fetchFailureDetails fetches details about why a workflow failed
func fetchFailureDetails(ctx context.Context, repoFullName string, runID int64) (jobName, stepName, errorLine string) {
// Fetch jobs for this run
args := []string{
"run", "view", fmt.Sprintf("%d", runID),
"--repo", repoFullName,
"--json", "jobs",
}
cmd := exec.CommandContext(ctx, "gh", args...)
output, err := cmd.Output()
if err != nil {
return "", "", ""
}
var result struct {
Jobs []WorkflowJob `json:"jobs"`
}
if err := json.Unmarshal(output, &result); err != nil {
return "", "", ""
}
slices.SortFunc(result.Jobs, compareWorkflowJob)
// Find the failed job and step
for _, job := range result.Jobs {
if job.Conclusion == "failure" {
jobName = job.Name
slices.SortFunc(job.Steps, compareJobStep)
for _, step := range job.Steps {
if step.Conclusion == "failure" {
stepName = fmt.Sprintf("%d: %s", step.Number, step.Name)
break
}
}
break
}
}
// Try to get the error line from logs (if available)
errorLine = fetchErrorFromLogs(ctx, repoFullName, runID)
return jobName, stepName, errorLine
}
// fetchErrorFromLogs attempts to extract the first error line from workflow logs
func fetchErrorFromLogs(ctx context.Context, repoFullName string, runID int64) string {
// Use gh run view --log-failed to get failed step logs
args := []string{
"run", "view", fmt.Sprintf("%d", runID),
"--repo", repoFullName,
"--log-failed",
}
cmd := exec.CommandContext(ctx, "gh", args...)
output, err := cmd.Output()
if err != nil {
return ""
}
// Parse output to find the first meaningful error line
lines := strings.Split(string(output), "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
if line == "" {
continue
}
// Skip common metadata/progress lines
lower := strings.ToLower(line)
if strings.HasPrefix(lower, "##[") { // GitHub Actions command markers
continue
}
if strings.HasPrefix(line, "Run ") || strings.HasPrefix(line, "Running ") {
continue
}
// Look for error indicators
if strings.Contains(lower, "error") ||
strings.Contains(lower, "failed") ||
strings.Contains(lower, "fatal") ||
strings.Contains(lower, "panic") ||
strings.Contains(line, ": ") { // Likely a file:line or key: value format
// Truncate long lines
if len(line) > 120 {
line = line[:117] + "..."
}
return line
}
}
return ""
}
func compareWorkflowRun(a, b WorkflowRun) int {
return cmp.Or(
cmp.Compare(a.Name, b.Name),
cmp.Compare(a.DisplayTitle, b.DisplayTitle),
a.CreatedAt.Compare(b.CreatedAt),
a.UpdatedAt.Compare(b.UpdatedAt),
cmp.Compare(a.ID, b.ID),
cmp.Compare(a.URL, b.URL),
)
}
func compareWorkflowJob(a, b WorkflowJob) int {
return cmp.Or(
cmp.Compare(a.Name, b.Name),
cmp.Compare(a.Conclusion, b.Conclusion),
cmp.Compare(a.Status, b.Status),
cmp.Compare(a.ID, b.ID),
cmp.Compare(a.URL, b.URL),
)
}
func compareJobStep(a, b JobStep) int {
return cmp.Or(
cmp.Compare(a.Number, b.Number),
cmp.Compare(a.Name, b.Name),
cmp.Compare(a.Conclusion, b.Conclusion),
cmp.Compare(a.Status, b.Status),
)
}

103
cmd/qa/cmd_watch_test.go Normal file
View file

@ -0,0 +1,103 @@
package qa
import (
"context"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPrintResults_SortsRunsAndUsesDeterministicDetails(t *testing.T) {
dir := t.TempDir()
writeExecutable(t, filepath.Join(dir, "gh"), `#!/bin/sh
case "$*" in
*"run view 2 --repo forge/alpha --json jobs"*)
cat <<'JSON'
{"jobs":[
{
"databaseId": 20,
"name": "Zulu Job",
"status": "completed",
"conclusion": "failure",
"steps": [
{"name": "Zulu Step", "status": "completed", "conclusion": "failure", "number": 2}
]
},
{
"databaseId": 10,
"name": "Alpha Job",
"status": "completed",
"conclusion": "failure",
"steps": [
{"name": "Zulu Step", "status": "completed", "conclusion": "failure", "number": 2},
{"name": "Alpha Step", "status": "completed", "conclusion": "failure", "number": 1}
]
}
]}
JSON
;;
*"run view 2 --repo forge/alpha --log-failed"*)
cat <<'EOF'
Alpha error detail
EOF
;;
*"run view 4 --repo forge/alpha --json jobs"*)
cat <<'JSON'
{"jobs":[
{
"databaseId": 40,
"name": "Omega Job",
"status": "completed",
"conclusion": "failure",
"steps": [
{"name": "Omega Step", "status": "completed", "conclusion": "failure", "number": 1}
]
}
]}
JSON
;;
*"run view 4 --repo forge/alpha --log-failed"*)
cat <<'EOF'
Omega error detail
EOF
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac
`)
prependPath(t, dir)
runs := []WorkflowRun{
{ID: 3, Name: "Zulu Build", Conclusion: "success", URL: "https://example.com/zulu"},
{ID: 1, Name: "Alpha Build", Conclusion: "success", URL: "https://example.com/alpha"},
{ID: 4, Name: "Omega Failure", Conclusion: "failure", URL: "https://example.com/omega"},
{ID: 2, Name: "Beta Failure", Conclusion: "failure", URL: "https://example.com/beta"},
}
output := captureStdout(t, func() {
err := printResults(context.Background(), "forge/alpha", runs)
require.Error(t, err)
})
assert.NotContains(t, output, "\033[2K\r")
alphaBuild := strings.Index(output, "Alpha Build")
require.NotEqual(t, -1, alphaBuild)
zuluBuild := strings.Index(output, "Zulu Build")
require.NotEqual(t, -1, zuluBuild)
assert.Less(t, alphaBuild, zuluBuild)
betaFailure := strings.Index(output, "Beta Failure")
require.NotEqual(t, -1, betaFailure)
omegaFailure := strings.Index(output, "Omega Failure")
require.NotEqual(t, -1, omegaFailure)
assert.Less(t, betaFailure, omegaFailure)
assert.Contains(t, output, "Job: Alpha Job (step: 1: Alpha Step)")
assert.Contains(t, output, "Error: Alpha error detail")
assert.NotContains(t, output, "Job: Zulu Job")
}

View file

@ -0,0 +1,440 @@
# RFC-025: Agent Experience (AX) Design Principles
- **Status:** Draft
- **Authors:** Snider, Cladius
- **Date:** 2026-03-19
- **Applies to:** All Core ecosystem packages (CoreGO, CorePHP, CoreTS, core-agent)
## Abstract
Agent Experience (AX) is a design paradigm for software systems where the primary code consumer is an AI agent, not a human developer. AX sits alongside User Experience (UX) and Developer Experience (DX) as the third era of interface design.
This RFC establishes AX as a formal design principle for the Core ecosystem and defines the conventions that follow from it.
## Motivation
As of early 2026, AI agents write, review, and maintain the majority of code in the Core ecosystem. The original author has not manually edited code (outside of Core struct design) since October 2025. Code is processed semantically — agents reason about intent, not characters.
Design patterns inherited from the human-developer era optimise for the wrong consumer:
- **Short names** save keystrokes but increase semantic ambiguity
- **Functional option chains** are fluent for humans but opaque for agents tracing configuration
- **Error-at-every-call-site** produces 50% boilerplate that obscures intent
- **Generic type parameters** force agents to carry type context that the runtime already has
- **Panic-hiding conventions** (`Must*`) create implicit control flow that agents must special-case
AX acknowledges this shift and provides principles for designing code, APIs, file structures, and conventions that serve AI agents as first-class consumers.
## The Three Eras
| Era | Primary Consumer | Optimises For | Key Metric |
|-----|-----------------|---------------|------------|
| UX | End users | Discoverability, forgiveness, visual clarity | Task completion time |
| DX | Developers | Typing speed, IDE support, convention familiarity | Time to first commit |
| AX | AI agents | Predictability, composability, semantic navigation | Correct-on-first-pass rate |
AX does not replace UX or DX. End users still need good UX. Developers still need good DX. But when the primary code author and maintainer is an AI agent, the codebase should be designed for that consumer first.
## Principles
### 1. Predictable Names Over Short Names
Names are tokens that agents pattern-match across languages and contexts. Abbreviations introduce mapping overhead.
```
Config not Cfg
Service not Srv
Embed not Emb
Error not Err (as a subsystem name; err for local variables is fine)
Options not Opts
```
**Rule:** If a name would require a comment to explain, it is too short.
**Exception:** Industry-standard abbreviations that are universally understood (`HTTP`, `URL`, `ID`, `IPC`, `I18n`) are acceptable. The test: would an agent trained on any mainstream language recognise it without context?
### 2. Comments as Usage Examples
The function signature tells WHAT. The comment shows HOW with real values.
```go
// Detect the project type from files present
setup.Detect("/path/to/project")
// Set up a workspace with auto-detected template
setup.Run(setup.Options{Path: ".", Template: "auto"})
// Scaffold a PHP module workspace
setup.Run(setup.Options{Path: "./my-module", Template: "php"})
```
**Rule:** If a comment restates what the type signature already says, delete it. If a comment shows a concrete usage with realistic values, keep it.
**Rationale:** Agents learn from examples more effectively than from descriptions. A comment like "Run executes the setup process" adds zero information. A comment like `setup.Run(setup.Options{Path: ".", Template: "auto"})` teaches an agent exactly how to call the function.
### 3. Path Is Documentation
File and directory paths should be self-describing. An agent navigating the filesystem should understand what it is looking at without reading a README.
```
flow/deploy/to/homelab.yaml — deploy TO the homelab
flow/deploy/from/github.yaml — deploy FROM GitHub
flow/code/review.yaml — code review flow
template/file/go/struct.go.tmpl — Go struct file template
template/dir/workspace/php/ — PHP workspace scaffold
```
**Rule:** If an agent needs to read a file to understand what a directory contains, the directory naming has failed.
**Corollary:** The unified path convention (folder structure = HTTP route = CLI command = test path) is AX-native. One path, every surface.
### 4. Templates Over Freeform
When an agent generates code from a template, the output is constrained to known-good shapes. When an agent writes freeform, the output varies.
```go
// Template-driven — consistent output
lib.RenderFile("php/action", data)
lib.ExtractDir("php", targetDir, data)
// Freeform — variance in output
"write a PHP action class that..."
```
**Rule:** For any code pattern that recurs, provide a template. Templates are guardrails for agents.
**Scope:** Templates apply to file generation, workspace scaffolding, config generation, and commit messages. They do NOT apply to novel logic — agents should write business logic freeform with the domain knowledge available.
### 5. Declarative Over Imperative
Agents reason better about declarations of intent than sequences of operations.
```yaml
# Declarative — agent sees what should happen
steps:
- name: build
flow: tools/docker-build
with:
context: "{{ .app_dir }}"
image_name: "{{ .image_name }}"
- name: deploy
flow: deploy/with/docker
with:
host: "{{ .host }}"
```
```go
// Imperative — agent must trace execution
cmd := exec.Command("docker", "build", "--platform", "linux/amd64", "-t", imageName, ".")
cmd.Dir = appDir
if err := cmd.Run(); err != nil {
return fmt.Errorf("docker build: %w", err)
}
```
**Rule:** Orchestration, configuration, and pipeline logic should be declarative (YAML/JSON). Implementation logic should be imperative (Go/PHP/TS). The boundary is: if an agent needs to compose or modify the logic, make it declarative.
### 6. Universal Types (Core Primitives)
Every component in the ecosystem accepts and returns the same primitive types. An agent processing any level of the tree sees identical shapes.
```go
// Universal contract
setup.Run(core.Options{Path: ".", Template: "auto"})
brain.New(core.Options{Name: "openbrain"})
deploy.Run(core.Options{Flow: "deploy/to/homelab"})
// Fractal — Core itself is a Service
core.New(core.Options{
Services: []core.Service{
process.New(core.Options{Name: "process"}),
brain.New(core.Options{Name: "brain"}),
},
})
```
**Core primitive types:**
| Type | Purpose |
|------|---------|
| `core.Options` | Input configuration (what you want) |
| `core.Config` | Runtime settings (what is active) |
| `core.Data` | Embedded or stored content |
| `core.Service` | A managed component with lifecycle |
| `core.Result[T]` | Return value with OK/fail state |
**What this replaces:**
| Go Convention | Core AX | Why |
|--------------|---------|-----|
| `func With*(v) Option` | `core.Options{Field: v}` | Struct literal is parseable; option chain requires tracing |
| `func Must*(v) T` | `core.Result[T]` | No hidden panics; errors flow through Core |
| `func *For[T](c) T` | `c.Service("name")` | String lookup is greppable; generics require type context |
| `val, err :=` everywhere | Single return via `core.Result` | Intent not obscured by error handling |
| `_ = err` | Never needed | Core handles all errors internally |
### 7. Directory as Semantics
The directory structure tells an agent the intent before it reads a word. Top-level directories are semantic categories, not organisational bins.
```
plans/
├── code/ # Pure primitives — read for WHAT exists
├── project/ # Products — read for WHAT we're building and WHY
└── rfc/ # Contracts — read for constraints and rules
```
**Rule:** An agent should know what kind of document it's reading from the path alone. `code/core/go/io/RFC.md` = a lib primitive spec. `project/ofm/RFC.md` = a product spec that cross-references code/. `rfc/snider/borg/RFC-BORG-006-SMSG-FORMAT.md` = an immutable contract for the Borg SMSG protocol.
**Corollary:** The three-way split (code/project/rfc) extends principle 3 (Path Is Documentation) from files to entire subtrees. The path IS the metadata.
### 8. Lib Never Imports Consumer
Dependency flows one direction. Libraries define primitives. Consumers compose from them. A new feature in a consumer can never break a library.
```
code/core/go/* → lib tier (stable foundation)
code/core/agent/ → consumer tier (composes from go/*)
code/core/cli/ → consumer tier (composes from go/*)
code/core/gui/ → consumer tier (composes from go/*)
```
**Rule:** If package A is in `go/` and package B is in the consumer tier, B may import A but A must never import B. The repo naming convention enforces this: `go-{name}` = lib, bare `{name}` = consumer.
**Why this matters for agents:** When an agent is dispatched to implement a feature in `core/agent`, it can freely import from `go-io`, `go-scm`, `go-process`. But if an agent is dispatched to `go-io`, it knows its changes are foundational — every consumer depends on it, so the contract must not break.
### 9. Issues Are N+(rounds) Deep
Problems in code and specs are layered. Surface issues mask deeper issues. Fixing the surface reveals the next layer. This is not a failure mode — it is the discovery process.
```
Pass 1: Find 16 issues (surface — naming, imports, obvious errors)
Pass 2: Find 11 issues (structural — contradictions, missing types)
Pass 3: Find 5 issues (architectural — signature mismatches, registration gaps)
Pass 4: Find 4 issues (contract — cross-spec API mismatches)
Pass 5: Find 2 issues (mechanical — path format, nil safety)
Pass N: Findings are trivial → spec/code is complete
```
**Rule:** Iteration is required, not a failure. Each pass sees what the previous pass could not, because the context changed. An agent dispatched with the same task on the same repo will find different things each time — this is correct behaviour.
**Corollary:** The cheapest model should do the most passes (surface work). The frontier model should arrive last, when only deep issues remain. Tiered iteration: grunt model grinds → mid model pre-warms → frontier model polishes.
**Anti-pattern:** One-shot generation expecting valid output. No model, no human, produces correct-on-first-pass for non-trivial work. Expecting it wastes the first pass on surface issues that a cheaper pass would have caught.
### 10. CLI Tests as Artifact Validation
Unit tests verify the code. CLI tests verify the binary. The directory structure IS the command structure — path maps to command, Taskfile runs the test.
```
tests/cli/
├── core/
│ └── lint/
│ ├── Taskfile.yaml ← test `core-lint` (root)
│ ├── run/
│ │ ├── Taskfile.yaml ← test `core-lint run`
│ │ └── fixtures/
│ ├── go/
│ │ ├── Taskfile.yaml ← test `core-lint go`
│ │ └── fixtures/
│ └── security/
│ ├── Taskfile.yaml ← test `core-lint security`
│ └── fixtures/
```
**Rule:** Every CLI command has a matching `tests/cli/{path}/Taskfile.yaml`. The Taskfile runs the compiled binary against fixtures with known inputs and validates the output. If the CLI test passes, the underlying actions work — because CLI commands call actions, MCP tools call actions, API endpoints call actions. Test the CLI, trust the rest.
**Pattern:**
```yaml
# tests/cli/core/lint/go/Taskfile.yaml
version: '3'
tasks:
test:
cmds:
- core-lint go --output json fixtures/ > /tmp/result.json
- jq -e '.findings | length > 0' /tmp/result.json
- jq -e '.summary.passed == false' /tmp/result.json
```
**Why this matters for agents:** An agent can validate its own work by running `task test` in the matching `tests/cli/` directory. No test framework, no mocking, no setup — just the binary, fixtures, and `jq` assertions. The agent builds the binary, runs the test, sees the result. If it fails, the agent can read the fixture, read the output, and fix the code.
**Corollary:** Fixtures are planted bugs. Each fixture file has a known issue that the linter must find. If the linter doesn't find it, the test fails. Fixtures are the spec for what the tool must detect — they ARE the test cases, not descriptions of test cases.
## Applying AX to Existing Patterns
### File Structure
```
# AX-native: path describes content
core/agent/
├── go/ # Go source
├── php/ # PHP source
├── ui/ # Frontend source
├── claude/ # Claude Code plugin
└── codex/ # Codex plugin
# Not AX: generic names requiring README
src/
├── lib/
├── utils/
└── helpers/
```
### Error Handling
```go
// AX-native: errors are infrastructure, not application logic
svc := c.Service("brain")
cfg := c.Config().Get("database.host")
// Errors logged by Core. Code reads like a spec.
// Not AX: errors dominate the code
svc, err := c.ServiceFor[brain.Service]()
if err != nil {
return fmt.Errorf("get brain service: %w", err)
}
cfg, err := c.Config().Get("database.host")
if err != nil {
_ = err // silenced because "it'll be fine"
}
```
### API Design
```go
// AX-native: one shape, every surface
core.New(core.Options{
Name: "my-app",
Services: []core.Service{...},
Config: core.Config{...},
})
// Not AX: multiple patterns for the same thing
core.New(
core.WithName("my-app"),
core.WithService(factory1),
core.WithService(factory2),
core.WithConfig(cfg),
)
```
## The Plans Convention — AX Development Lifecycle
The `plans/` directory structure encodes a development methodology designed for how generative AI actually works: iterative refinement across structured phases, not one-shot generation.
### The Three-Way Split
```
plans/
├── project/ # 1. WHAT and WHY — start here
├── rfc/ # 2. CONSTRAINTS — immutable contracts
└── code/ # 3. HOW — implementation specs
```
Each directory is a phase. Work flows from project → rfc → code. Each transition forces a refinement pass — you cannot write a code spec without discovering gaps in the project spec, and you cannot write an RFC without discovering assumptions in both.
**Three places for data that can't be written simultaneously = three guaranteed iterations of "actually, this needs changing."** Refinement is baked into the structure, not bolted on as a review step.
### Phase 1: Project (Vision)
Start with `project/`. No code exists yet. Define:
- What the product IS and who it serves
- What existing primitives it consumes (cross-ref to `code/`)
- What constraints it operates under (cross-ref to `rfc/`)
This is where creativity lives. Map features to building blocks. Connect systems. The project spec is integrative — it references everything else.
### Phase 2: RFC (Contracts)
Extract the immutable rules into `rfc/`. These are constraints that don't change with implementation:
- Wire formats, protocols, hash algorithms
- Security properties that must hold
- Compatibility guarantees
RFCs are numbered per component (`RFC-BORG-006-SMSG-FORMAT.md`) and never modified after acceptance. If the contract changes, write a new RFC.
### Phase 3: Code (Implementation Specs)
Define the implementation in `code/`. Each component gets an RFC.md that an agent can implement from:
- Struct definitions (the DTOs — see principle 6)
- Method signatures and behaviour
- Error conditions and edge cases
- Cross-references to other code/ specs
The code spec IS the product. Write the spec → dispatch to an agent → review output → iterate.
### Pre-Launch: Alignment Protocol
Before dispatching for implementation, verify spec-model alignment:
```
1. REVIEW — The implementation model (Codex/Jules) reads the spec
and reports missing elements. This surfaces the delta between
the model's training and the spec's assumptions.
"I need X, Y, Z to implement this" is the model saying
"I hear you but I'm missing context" — without asking.
2. ADJUST — Update the spec to close the gaps. Add examples,
clarify ambiguities, provide the context the model needs.
This is shared alignment, not compromise.
3. VERIFY — A different model (or sub-agent) reviews the adjusted
spec without the planner's bias. Fresh eyes on the contract.
"Does this make sense to someone who wasn't in the room?"
4. READY — When the review findings are trivial or deployment-
related (not architectural), the spec is ready to dispatch.
```
### Implementation: Iterative Dispatch
Same prompt, multiple runs. Each pass sees deeper because the context evolved:
```
Round 1: Build features (the obvious gaps)
Round 2: Write tests (verify what was built)
Round 3: Harden security (what can go wrong?)
Round 4: Next RFC section (what's still missing?)
Round N: Findings are trivial → implementation is complete
```
Re-running is not failure. It is the process. Each pass changes the codebase, which changes what the next pass can see. The iteration IS the refinement.
### Post-Implementation: Auto-Documentation
The QA/verify chain produces artefacts that feed forward:
- Test results document the contract (what works, what doesn't)
- Coverage reports surface untested paths
- Diff summaries prep the changelog for the next release
- Doc site updates from the spec (the spec IS the documentation)
The output of one cycle is the input to the next. The plans repo stays current because the specs drive the code, not the other way round.
## Compatibility
AX conventions are valid, idiomatic Go/PHP/TS. They do not require language extensions, code generation, or non-standard tooling. An AX-designed codebase compiles, tests, and deploys with standard toolchains.
The conventions diverge from community patterns (functional options, Must/For, etc.) but do not violate language specifications. This is a style choice, not a fork.
## Adoption
AX applies to all new code in the Core ecosystem. Existing code migrates incrementally as it is touched — no big-bang rewrite.
Priority order:
1. **Public APIs** (package-level functions, struct constructors)
2. **File structure** (path naming, template locations)
3. **Internal fields** (struct field names, local variables)
## References
- dAppServer unified path convention (2024)
- CoreGO DTO pattern refactor (2026-03-18)
- Core primitives design (2026-03-19)
- Go Proverbs, Rob Pike (2015) — AX provides an updated lens
## Changelog
- 2026-03-19: Initial draft

685
docs/RFC-LINT.md Normal file
View file

@ -0,0 +1,685 @@
# RFC-LINT: core/lint Agent-Native CLI and Adapter Contract
- **Status:** Implemented
- **Date:** 2026-03-30
- **Applies to:** `forge.lthn.ai/core/lint`
- **Standard:** [`docs/RFC-CORE-008-AGENT-EXPERIENCE.md`](./RFC-CORE-008-AGENT-EXPERIENCE.md)
## Abstract
`core/lint` is a standalone Go CLI and library that detects project languages, runs matching lint adapters, merges their findings into one report, and writes machine-readable output for local development, CI, and agent QA.
The binary does not bundle external linters. It orchestrates tools already present in `PATH`, treats missing tools as `skipped`, and keeps the orchestration report contract separate from the legacy catalog commands.
This RFC describes the implementation that exists in this repository. It replaces the earlier draft that described a future Core service with Tasks, IPC actions, MCP wrapping, build stages, artifact stages, entitlement gates, and scheduled runs. Those designs are not the current contract.
## Motivation
Earlier drafts described a future `core/lint` service that does not exist in this module. Agents dispatched to this repository need the contract that is implemented now, not the architecture that might exist later.
The current implementation has three properties that matter for AX:
- one CLI binary with explicit command paths
- one orchestration DTO (`RunInput`) and one orchestration report (`Report`)
- one clear split between adapter-driven runs and the older embedded catalog commands
An agent should be able to read the paths, map the commands, and predict the output shapes without reverse-engineering aspirational features from an outdated RFC.
## AX Principles Applied
This RFC follows the Agent Experience standard directly:
1. Predictable names over short names: `RunInput`, `Report`, `ToolRun`, `ToolInfo`, `Service`, and `Adapter` are the contract nouns across the CLI and package boundary.
2. Comments as usage examples: command examples use real flags and real paths such as `core-lint run --output json .` and `core-lint tools --output json --lang go`.
3. Path is documentation: the implementation map is the contract, and `tests/cli/lint/{path}` mirrors the command path it validates.
4. Declarative over imperative: `.core/lint.yaml` declares tool groups, thresholds, and output defaults instead of encoding those decisions in hidden CLI behavior.
5. One input shape for orchestration: `pkg/lint/service.go` owns `RunInput`.
6. One output shape for orchestration: `pkg/lint/service.go` owns `Report`.
7. CLI tests as artifact validation: the Taskfiles under `tests/cli/lint/...` are the runnable contract for the binary surface.
8. Stable sequencing over hidden magic: adapters run sequentially, then tool runs and findings are sorted before output.
## Path Map
An agent should be able to navigate the module from the path alone:
| Path | Meaning |
|------|---------|
| `cmd/core-lint/main.go` | CLI surface for `run`, `detect`, `tools`, `init`, language shortcuts, `hook`, and the legacy `lint` namespace |
| `pkg/lint/service.go` | Orchestrator for config loading, language selection, adapter selection, hook mode, and report assembly |
| `pkg/lint/adapter.go` | Adapter interface, external adapter registry, built-in catalog fallback, external command execution, and output parsers |
| `pkg/lint/config.go` | Repo-local config contract and defaults for `core-lint init` |
| `pkg/lint/detect_project.go` | Project language detection from markers and file names |
| `pkg/lint/report.go` | `Summary` aggregation and JSON/text/GitHub/SARIF writers |
| `lint.go` | Embedded catalog loader for `lint check` and `lint catalog` |
| `catalog/*.yaml` | Embedded pattern catalog files used by the legacy catalog commands |
| `tests/cli/lint/...` | CLI artifact tests; the path is the command |
## Scope
In scope:
- Project language detection
- Config-driven lint tool selection
- Embedded catalog scanning
- External linter orchestration
- Structured report generation
- Git pre-commit hook installation and removal
- CLI artifact tests in `tests/cli/lint/...`
Out of scope:
- Core service registration
- IPC or MCP exposure
- Build-stage compilation checks
- Artifact-stage scans against compiled binaries or images
- Scheduler integration
- Sidecar SBOM file writing
- Automatic tool installation
- Entitlement enforcement
## Command Surface
The repository ships two CLI surfaces:
- The root AX surface: `core-lint run`, `core-lint detect`, `core-lint tools`, and friends
- The legacy catalog surface: `core-lint lint check` and `core-lint lint catalog ...`
The RFC commands are mounted twice: once at the root and once under `core-lint lint ...`. Both surfaces are real. The root surface is shorter. The namespaced surface keeps the path semantic.
| Capability | Root path | Namespaced alias | Example |
|------------|-----------|------------------|---------|
| Full orchestration | `core-lint run [path]` | `core-lint lint run [path]` | `core-lint run --output json .` |
| Go only | `core-lint go [path]` | `core-lint lint go [path]` | `core-lint go .` |
| PHP only | `core-lint php [path]` | `core-lint lint php [path]` | `core-lint php .` |
| JS group shortcut | `core-lint js [path]` | `core-lint lint js [path]` | `core-lint js .` |
| Python only | `core-lint python [path]` | `core-lint lint python [path]` | `core-lint python .` |
| Security group shortcut | `core-lint security [path]` | `core-lint lint security [path]` | `core-lint security --ci .` |
| Compliance tools only | `core-lint compliance [path]` | `core-lint lint compliance [path]` | `core-lint compliance --output json .` |
| Language detection | `core-lint detect [path]` | `core-lint lint detect [path]` | `core-lint detect --output json .` |
| Tool inventory | `core-lint tools` | `core-lint lint tools` | `core-lint tools --output json --lang go` |
| Default config | `core-lint init [path]` | `core-lint lint init [path]` | `core-lint init /tmp/project` |
| Pre-commit hook install | `core-lint hook install [path]` | `core-lint lint hook install [path]` | `core-lint hook install .` |
| Pre-commit hook remove | `core-lint hook remove [path]` | `core-lint lint hook remove [path]` | `core-lint hook remove .` |
| Embedded catalog scan | none | `core-lint lint check [path...]` | `core-lint lint check --format json tests/cli/lint/check/fixtures` |
| Embedded catalog list | none | `core-lint lint catalog list` | `core-lint lint catalog list --lang go` |
| Embedded catalog show | none | `core-lint lint catalog show RULE_ID` | `core-lint lint catalog show go-sec-001` |
`core-lint js` is a shortcut for `Lang=js`, not a dedicated TypeScript command. TypeScript-only runs use `core-lint run --lang ts ...` or plain `run` with auto-detection.
`core-lint compliance` is also not identical to `core-lint run --sbom`. The shortcut sets `Category=compliance`, so the final adapter filter keeps only adapters whose runtime category is `compliance`. `run --sbom` appends the compliance config group without that category filter.
## RunInput Contract
All orchestration commands resolve into one DTO:
```go
type RunInput struct {
Path string `json:"path"`
Output string `json:"output,omitempty"`
Config string `json:"config,omitempty"`
FailOn string `json:"fail_on,omitempty"`
Category string `json:"category,omitempty"`
Lang string `json:"lang,omitempty"`
Hook bool `json:"hook,omitempty"`
CI bool `json:"ci,omitempty"`
Files []string `json:"files,omitempty"`
SBOM bool `json:"sbom,omitempty"`
}
```
### Input Resolution Rules
`Service.Run()` resolves input in this order:
1. Empty `Path` becomes `.`
2. `CI=true` sets `Output=github` only when `Output` was not provided explicitly
3. Config is loaded from `--config` or `.core/lint.yaml`
4. Empty `FailOn` falls back to the loaded config
5. `Hook=true` with no explicit `Files` reads staged files from `git diff --cached --name-only`
6. `Lang` overrides auto-detection
7. `Files` override directory detection for language inference
### CLI Output Resolution
The CLI resolves output before it calls `Service.Run()`:
1. explicit `--output` wins
2. otherwise `--ci` becomes `github`
3. otherwise the loaded config `output` value is used
4. if the config output is empty, the CLI falls back to `text`
### Category and Language Precedence
Tool group selection is intentionally simple and deterministic:
1. `Category=security` selects the `lint.security` config group
2. `Category=compliance` means only `lint.compliance`
3. `Lang=go|php|js|ts|python|...` means only that language group
4. Plain `run` uses all detected language groups plus `infra`
5. Plain `run --ci` adds the `security` group
6. Plain `run --sbom` adds the `compliance` group
`Lang` is stronger than `CI` and `SBOM`. If `Lang` is set, the language group wins and the extra groups are not appended.
`Category=style`, `Category=correctness`, and other non-group categories act as adapter-side filters only. They do not map to dedicated config groups.
One current consequence is that `grype` is listed in the default `lint.compliance` config group but advertises `Category() == "security"`. `core-lint compliance` therefore filters it out, while plain `core-lint run --sbom` still leaves it eligible.
Final adapter selection has one extra Go-specific exception: if Go is present and `Category != "compliance"`, `Service.Run()` prepends the built-in `catalog` adapter after registry filtering. That means `core-lint security` on a Go project can still emit `catalog` findings tagged `security`.
## Config Contract
Repo-local config lives at `.core/lint.yaml`.
`core-lint init /path/to/project` writes the default file from `pkg/lint/config.go`.
```yaml
lint:
go:
- golangci-lint
- gosec
- govulncheck
- staticcheck
- revive
- errcheck
php:
- phpstan
- psalm
- phpcs
- phpmd
- pint
js:
- biome
- oxlint
- eslint
- prettier
ts:
- biome
- oxlint
- typescript
python:
- ruff
- mypy
- bandit
- pylint
infra:
- shellcheck
- hadolint
- yamllint
- jsonlint
- markdownlint
security:
- gitleaks
- trivy
- gosec
- bandit
- semgrep
compliance:
- syft
- grype
- scancode
output: json
fail_on: error
paths:
- .
exclude:
- vendor/
- node_modules/
- .core/
```
### Config Rules
- If `.core/lint.yaml` does not exist, `DefaultConfig()` is used in memory
- Relative `--config` paths resolve relative to `Path`
- Unknown tool names in config are inert; the adapter registry is authoritative
- The current default config includes `prettier`, but the adapter registry does not yet provide a `prettier` adapter
- `paths` and `exclude` are part of the file schema, but the current orchestration path does not read them; detection and scanning still rely on built-in defaults
- `LintConfig` still accepts a `schedules` map, but no current CLI command reads or executes it
## Detection Contract
`pkg/lint/detect_project.go` is the only project-language detector used by orchestration commands.
### Marker Files
| Marker | Language |
|--------|----------|
| `go.mod` | `go` |
| `composer.json` | `php` |
| `package.json` | `js` |
| `tsconfig.json` | `ts` |
| `requirements.txt` | `python` |
| `pyproject.toml` | `python` |
| `Cargo.toml` | `rust` |
| `Dockerfile*` | `dockerfile` |
### File Extensions
| Extension | Language |
|-----------|----------|
| `.go` | `go` |
| `.php` | `php` |
| `.js`, `.jsx` | `js` |
| `.ts`, `.tsx` | `ts` |
| `.py` | `python` |
| `.rs` | `rust` |
| `.sh` | `shell` |
| `.yaml`, `.yml` | `yaml` |
| `.json` | `json` |
| `.md` | `markdown` |
### Detection Rules
- Directory traversal skips `vendor`, `node_modules`, `.git`, `testdata`, `.core`, and any hidden directory
- Results are de-duplicated and returned in sorted order
- `core-lint detect --output json tests/cli/lint/check/fixtures` currently returns `["go"]`
## Execution Model
`Service.Run()` is the orchestrator. The current implementation is sequential, not parallel.
### Step 1: Load Config
`LoadProjectConfig()` returns the repo-local config or the in-memory default.
### Step 2: Resolve File Scope
- If `Files` was provided, only those files are considered for language detection and adapter arguments
- If `Hook=true` and `Files` is empty, staged files are read from Git
- Otherwise the whole project path is scanned
### Step 3: Resolve Languages
- `Lang` wins first
- `Files` are used next
- `Detect(Path)` is the fallback
### Step 4: Select Adapters
`pkg/lint/service.go` builds a set of enabled tool names from config, then filters the registry from `pkg/lint/adapter.go`.
Special case:
- If `go` is present in the final language set and `Category != "compliance"`, a built-in `catalog` adapter is prepended automatically
### Step 5: Run Adapters
Every selected adapter runs with the same contract:
```go
type Adapter interface {
Name() string
Available() bool
Languages() []string
Command() string
Entitlement() string
RequiresEntitlement() bool
MatchesLanguage(languages []string) bool
Category() string
Fast() bool
Run(ctx context.Context, input RunInput, files []string) AdapterResult
}
```
Execution rules:
- Missing binaries become `ToolRun{Status: "skipped"}`
- External commands run with a 5 minute timeout
- Hook mode marks non-fast adapters as `skipped`
- Parsed findings are normalised, sorted, and merged into one report
- Adapter order becomes deterministic after `sortToolRuns()` and `sortFindings()`
### Step 6: Compute Pass or Fail
`passesThreshold()` applies the configured threshold:
| `fail_on` | Passes when |
|-----------|-------------|
| `error` or empty | `summary.errors == 0` |
| `warning` | `summary.errors == 0 && summary.warnings == 0` |
| `info` | `summary.total == 0` |
CLI exit status follows `report.Summary.Passed`, not raw tool state. A `skipped` or `timeout` tool run does not fail the command by itself.
## Catalog Surfaces
The repository has two catalog paths. They are related, but they are not the same implementation.
### Legacy Embedded Catalog
These commands load the embedded YAML catalog via `lint.go`:
- `core-lint lint check`
- `core-lint lint catalog list`
- `core-lint lint catalog show`
The source of truth is `catalog/*.yaml`.
### Orchestration Catalog Adapter
`core-lint run`, `core-lint go`, and the other orchestration commands prepend a smaller built-in `catalog` adapter from `pkg/lint/adapter.go`.
That adapter reads the hard-coded `defaultCatalogRulesYAML` constant, not `catalog/*.yaml`.
Today the fallback adapter contains these Go rules:
- `go-cor-003`
- `go-cor-004`
- `go-sec-001`
- `go-sec-002`
- `go-sec-004`
The overlap is intentional, but the surfaces are different:
- `lint check` returns raw catalog findings with catalog severities such as `medium` or `high`
- `run` normalises those findings into report severities `warning`, `error`, or `info`
An agent must not assume that `core-lint lint check` and `core-lint run` execute the same rule set.
## Adapter Inventory
The implementation has two adapter sources in `pkg/lint/adapter.go`:
- `defaultAdapters()` defines the external-tool registry exposed by `core-lint tools`
- `newCatalogAdapter()` defines the built-in Go fallback injected by `Service.Run()` when Go is in scope
### ToolInfo Contract
`core-lint tools` returns the runtime inventory from `Service.Tools()`:
```go
type ToolInfo struct {
Name string `json:"name"`
Available bool `json:"available"`
Languages []string `json:"languages"`
Category string `json:"category"`
Entitlement string `json:"entitlement,omitempty"`
}
```
Inventory rules:
- results are sorted by `Name`
- `--lang` filters via `Adapter.MatchesLanguage()`, not strict equality on the `Languages` field
- wildcard adapters with `Languages() == []string{"*"}` still appear under any `--lang` filter
- category tokens also match, so `core-lint tools --lang security` returns security adapters plus wildcard adapters
- `Available` reflects a `PATH` lookup at runtime, not config membership
- `Entitlement` is descriptive metadata; the current implementation does not enforce it
- the built-in `catalog` adapter is not returned by `core-lint tools`; it is injected only during `run`-style orchestration on Go projects
### Injected During Run
| Adapter | Languages | Category | Fast | Notes |
|---------|-----------|----------|------|-------|
| `catalog` | `go` | `correctness` | yes | Built-in regex fallback rules; injected by `Service.Run()`, not listed by `core-lint tools` |
### Go
| Adapter | Category | Fast |
|---------|----------|------|
| `golangci-lint` | `correctness` | yes |
| `gosec` | `security` | no |
| `govulncheck` | `security` | no |
| `staticcheck` | `correctness` | yes |
| `revive` | `style` | yes |
| `errcheck` | `correctness` | yes |
### PHP
| Adapter | Category | Fast |
|---------|----------|------|
| `phpstan` | `correctness` | yes |
| `psalm` | `correctness` | yes |
| `phpcs` | `style` | yes |
| `phpmd` | `correctness` | yes |
| `pint` | `style` | yes |
### JS and TS
| Adapter | Category | Fast |
|---------|----------|------|
| `biome` | `style` | yes |
| `oxlint` | `style` | yes |
| `eslint` | `style` | yes |
| `typescript` | `correctness` | yes |
### Python
| Adapter | Category | Fast |
|---------|----------|------|
| `ruff` | `style` | yes |
| `mypy` | `correctness` | yes |
| `bandit` | `security` | no |
| `pylint` | `style` | yes |
### Infra and Cross-Project
| Adapter | Category | Fast |
|---------|----------|------|
| `shellcheck` | `correctness` | yes |
| `hadolint` | `security` | yes |
| `yamllint` | `style` | yes |
| `jsonlint` | `style` | yes |
| `markdownlint` | `style` | yes |
| `gitleaks` | `security` | no |
| `trivy` | `security` | no |
| `semgrep` | `security` | no |
| `syft` | `compliance` | no |
| `grype` | `security` | no |
| `scancode` | `compliance` | no |
### Adapter Parsing Rules
- JSON tools are parsed recursively and schema-tolerantly by searching for common keys such as `file`, `line`, `column`, `code`, `message`, and `severity`
- Text tools are parsed from `file:line[:column]: message`
- Non-empty output that does not match either parser becomes one synthetic finding with `code: diagnostic`
- A failed command with no usable parsed output becomes one synthetic finding with `code: command-failed`
- Duplicate findings are collapsed on `tool|file|line|column|code|message`
- `ToolRun.Version` exists in the report schema but is not populated yet
### Entitlement Metadata
Adapters still expose `Entitlement()` and `RequiresEntitlement()`, but `Service.Run()` does not enforce them today. The metadata is present; the gate is not.
## Output Contract
Orchestration commands return one report document:
```go
type Report struct {
Project string `json:"project"`
Timestamp time.Time `json:"timestamp"`
Duration string `json:"duration"`
Languages []string `json:"languages"`
Tools []ToolRun `json:"tools"`
Findings []Finding `json:"findings"`
Summary Summary `json:"summary"`
}
type ToolRun struct {
Name string `json:"name"`
Version string `json:"version,omitempty"`
Status string `json:"status"`
Duration string `json:"duration"`
Findings int `json:"findings"`
}
type Summary struct {
Total int `json:"total"`
Errors int `json:"errors"`
Warnings int `json:"warnings"`
Info int `json:"info"`
Passed bool `json:"passed"`
BySeverity map[string]int `json:"by_severity,omitempty"`
}
```
`ToolRun.Status` has four implemented values:
| Status | Meaning |
|--------|---------|
| `passed` | The adapter ran and emitted no findings |
| `failed` | The adapter ran and emitted findings or the command exited non-zero |
| `skipped` | The binary was missing or hook mode skipped a non-fast adapter |
| `timeout` | The command exceeded the 5 minute adapter timeout |
`Finding` is shared with the legacy catalog scanner:
```go
type Finding struct {
Tool string `json:"tool,omitempty"`
File string `json:"file"`
Line int `json:"line"`
Column int `json:"column,omitempty"`
Severity string `json:"severity"`
Code string `json:"code,omitempty"`
Message string `json:"message,omitempty"`
Category string `json:"category,omitempty"`
Fix string `json:"fix,omitempty"`
RuleID string `json:"rule_id,omitempty"`
Title string `json:"title,omitempty"`
Match string `json:"match,omitempty"`
Repo string `json:"repo,omitempty"`
}
```
### Finding Normalisation
During orchestration:
- `Code` falls back to `RuleID`
- `Message` falls back to `Title`
- empty `Tool` becomes `catalog`
- file paths are made relative to `Path` when possible
- severities are collapsed to report levels:
| Raw severity | Report severity |
|--------------|-----------------|
| `critical`, `high`, `error`, `errors` | `error` |
| `medium`, `low`, `warning`, `warn` | `warning` |
| `info`, `note` | `info` |
### Output Modes
| Mode | How to request it | Writer |
|------|-------------------|--------|
| JSON | `--output json` | `WriteReportJSON` |
| Text | `--output text` | `WriteReportText` |
| GitHub annotations | `--output github` or `--ci` | `WriteReportGitHub` |
| SARIF | `--output sarif` | `WriteReportSARIF` |
### Stream Contract
For `run`-style commands, the selected writer always writes the report document to `stdout`.
If the report fails the configured threshold, the CLI still writes the report to `stdout`, then returns an error. The error path adds human-facing diagnostics on `stderr`.
Agents and CI jobs that need machine-readable output should parse `stdout` and treat `stderr` as diagnostic text.
## Hook Mode
`core-lint run --hook` is the installed pre-commit path.
Implementation details:
- staged files come from `git diff --cached --name-only`
- language detection runs only on those staged files
- adapters with `Fast() == false` are marked `skipped`
- output format still follows normal resolution rules; hook mode does not force text output
- `core-lint hook install` writes a managed block into `.git/hooks/pre-commit`
- `core-lint hook remove` removes only the managed block
Installed hook block:
```sh
# core-lint hook start
# Installed by core-lint
exec core-lint run --hook
# core-lint hook end
```
If the hook file already exists, install appends a guarded block instead of overwriting the file. In that appended case the command line becomes `core-lint run --hook || exit $?` rather than `exec core-lint run --hook`.
## Test Contract
The CLI artifact tests are the runnable contract for this RFC:
| Path | Command under test |
|------|--------------------|
| `tests/cli/lint/check/Taskfile.yaml` | `core-lint lint check` |
| `tests/cli/lint/catalog/list/Taskfile.yaml` | `core-lint lint catalog list` |
| `tests/cli/lint/catalog/show/Taskfile.yaml` | `core-lint lint catalog show` |
| `tests/cli/lint/detect/Taskfile.yaml` | `core-lint detect` |
| `tests/cli/lint/tools/Taskfile.yaml` | `core-lint tools` |
| `tests/cli/lint/init/Taskfile.yaml` | `core-lint init` |
| `tests/cli/lint/run/Taskfile.yaml` | `core-lint run` |
| `tests/cli/lint/Taskfile.yaml` | aggregate CLI suite |
The planted bug fixture is `tests/cli/lint/check/fixtures/input.go`.
Current expectations from the test suite:
- `lint check --format=json` finds `go-cor-003` in `input.go`
- `run --output json --fail-on warning` writes one report document to `stdout`, emits failure diagnostics on `stderr`, and exits non-zero
- `detect --output json` returns `["go"]` for the shipped fixture
- `tools --output json --lang go` includes `golangci-lint` and `govulncheck`
- `init` writes `.core/lint.yaml`
Unit-level confirmation also exists in:
- `cmd/core-lint/main_test.go`
- `pkg/lint/service_test.go`
- `pkg/lint/detect_project_test.go`
## Explicit Non-Goals
These items are intentionally not part of the current contract:
- no Core runtime integration
- no `core.Task` pipeline
- no `lint.static`, `lint.build`, or `lint.artifact` action graph
- no scheduled cron registration
- no sidecar `sbom.cdx.json` or `sbom.spdx.json` output
- no parallel adapter execution
- no adapter entitlement enforcement
- no guarantee that every config tool name has a matching adapter
Any future RFC that adds those capabilities must describe the code that implements them, not just the aspiration.
## Compatibility
This RFC matches the code that ships today:
- a standard Go CLI binary built from `cmd/core-lint`
- external tools resolved from `PATH` at runtime
- no required Core runtime, IPC layer, scheduler, or generated action graph
The contract is compatible with the current unit tests and CLI Taskfile tests because it describes the existing paths, flags, DTOs, and outputs rather than a future service boundary.
## Adoption
This contract applies immediately to:
- the root orchestration commands such as `core-lint run`, `core-lint detect`, `core-lint tools`, `core-lint init`, and `core-lint hook`
- the namespaced aliases under `core-lint lint ...`
- the legacy embedded catalog commands under `core-lint lint check` and `core-lint lint catalog ...`
Future work that adds scheduler support, runtime registration, entitlement enforcement, parallel execution, or SBOM file outputs must land behind a new RFC revision that points to implemented code.
## References
- `docs/RFC-CORE-008-AGENT-EXPERIENCE.md`
- `docs/index.md`
- `docs/development.md`
- `cmd/core-lint/main.go`
- `pkg/lint/service.go`
- `pkg/lint/adapter.go`
- `tests/cli/lint/Taskfile.yaml`
## Changelog
- 2026-03-30: Rewrote the RFC to match the implemented standalone CLI, adapter registry, fallback catalog adapter, hook mode, and CLI test paths
- 2026-03-30: Clarified the implemented report boundary, category filtering semantics, ignored config fields, and AX-style motivation/compatibility/adoption sections
- 2026-03-30: Documented the `stdout` versus `stderr` contract for failing `run` commands and the non-strict `tools --lang` matching rules

320
docs/architecture.md Normal file
View file

@ -0,0 +1,320 @@
---
title: Architecture
description: Internal design of core/lint -- types, data flow, and extension points
---
# Architecture
This document explains how `core/lint` works internally. It covers the core library (`pkg/lint`), the PHP quality pipeline (`pkg/php`), and the QA command layer (`cmd/qa`).
## Overview
The system is organised into three layers:
```
cmd/core-lint CLI entry point (lint check, lint catalog)
cmd/qa QA workflow commands (watch, review, health, issues, PHP tools)
|
pkg/lint Core library: rules, catalog, matcher, scanner, reporting
pkg/php PHP tool wrappers: format, analyse, audit, security, test
pkg/detect Project type detection
|
catalog/*.yaml Embedded rule definitions
```
The root `lint.go` file ties the catalog layer to the library:
```go
//go:embed catalog/*.yaml
var catalogFS embed.FS
func LoadEmbeddedCatalog() (*lintpkg.Catalog, error) {
return lintpkg.LoadFS(catalogFS, "catalog")
}
```
This means all YAML rules are baked into the binary at compile time. There are no runtime file lookups.
## Core Types (pkg/lint)
### Rule
A `Rule` represents a single lint check loaded from YAML. Key fields:
```go
type Rule struct {
ID string `yaml:"id"`
Title string `yaml:"title"`
Severity string `yaml:"severity"` // info, low, medium, high, critical
Languages []string `yaml:"languages"` // e.g. ["go"], ["go", "php"]
Tags []string `yaml:"tags"` // e.g. ["security", "injection"]
Pattern string `yaml:"pattern"` // Regex pattern to match
ExcludePattern string `yaml:"exclude_pattern"` // Regex to suppress false positives
Fix string `yaml:"fix"` // Human-readable remediation
Detection string `yaml:"detection"` // "regex" (extensible to other types)
AutoFixable bool `yaml:"auto_fixable"`
ExampleBad string `yaml:"example_bad"`
ExampleGood string `yaml:"example_good"`
FoundIn []string `yaml:"found_in"` // Repos where pattern was observed
FirstSeen string `yaml:"first_seen"`
}
```
Each rule validates itself via `Validate()`, which checks required fields and compiles regex patterns. Severity is constrained to five levels: `info`, `low`, `medium`, `high`, `critical`.
### Catalog
A `Catalog` is a flat collection of rules with query methods:
- `ForLanguage(lang)` -- returns rules targeting a specific language
- `AtSeverity(threshold)` -- returns rules at or above a severity level
- `ByID(id)` -- looks up a single rule
Loading is done via `LoadDir(dir)` for filesystem paths or `LoadFS(fsys, dir)` for embedded filesystems. Both read all `.yaml` files in the directory and parse them into `[]Rule`.
### Matcher
The `Matcher` is the regex execution engine. It pre-compiles all regex-detection rules into `compiledRule` structs:
```go
type compiledRule struct {
rule Rule
pattern *regexp.Regexp
exclude *regexp.Regexp
}
```
`NewMatcher(rules)` compiles patterns once. `Match(filename, content)` then scans line by line:
1. For each compiled rule, check if the filename itself matches the exclude pattern (e.g., skip `_test.go` files).
2. For each line, test against the rule's pattern.
3. If the line matches, check the exclude pattern to suppress false positives.
4. Emit a `Finding` with file, line number, matched text, and remediation advice.
Non-regex detection types are silently skipped, allowing the catalog schema to support future detection mechanisms (AST, semantic) without breaking the matcher.
### Scanner
The `Scanner` orchestrates directory walking and language-aware matching:
1. Walk the directory tree, skipping excluded directories (`vendor`, `node_modules`, `.git`, `testdata`, `.core`).
2. For each file, detect its language from the file extension using `DetectLanguage()`.
3. Filter the rule set to only rules targeting that language.
4. Build a language-scoped `Matcher` and run it against the file content.
Supported language extensions:
| Extension | Language |
|-----------|----------|
| `.go` | go |
| `.php` | php |
| `.ts`, `.tsx` | ts |
| `.js`, `.jsx` | js |
| `.cpp`, `.cc`, `.c`, `.h` | cpp |
| `.py` | py |
### Finding
A `Finding` is the output of a match:
```go
type Finding struct {
RuleID string `json:"rule_id"`
Title string `json:"title"`
Severity string `json:"severity"`
File string `json:"file"`
Line int `json:"line"`
Match string `json:"match"`
Fix string `json:"fix"`
Repo string `json:"repo,omitempty"`
}
```
### Report
The `report.go` file provides three output formats:
- `WriteText(w, findings)` -- human-readable: `file:line [severity] title (rule-id)`
- `WriteJSON(w, findings)` -- pretty-printed JSON array
- `WriteJSONL(w, findings)` -- newline-delimited JSON (one object per line)
`Summarise(findings)` aggregates counts by severity.
## Data Flow
A typical scan follows this path:
```
YAML files ──> LoadFS() ──> Catalog{Rules}
|
ForLanguage() / AtSeverity()
|
[]Rule (filtered)
|
NewScanner(rules)
|
ScanDir(root) / ScanFile(path)
|
┌───────────────┼───────────────┐
│ Walk tree │ Detect lang │
│ Skip dirs │ Filter rules │
│ │ NewMatcher() │
│ │ Match() │
└───────────────┴───────────────┘
|
[]Finding
|
WriteText() / WriteJSON() / WriteJSONL()
```
## Cyclomatic Complexity Analysis (pkg/lint/complexity.go)
The module includes a native Go AST-based cyclomatic complexity analyser. It uses `go/parser` and `go/ast` -- no external tools required.
```go
results, err := lint.AnalyseComplexity(lint.ComplexityConfig{
Threshold: 15,
Path: "./pkg/...",
})
```
Complexity is calculated by starting at 1 and incrementing for each branching construct:
- `if`, `for`, `range`, `case` (non-default), `comm` (non-default)
- `&&`, `||` binary expressions
- `type switch`, `select`
There is also `AnalyseComplexitySource(src, filename, threshold)` for testing without file I/O.
## Coverage Tracking (pkg/lint/coverage.go)
The coverage subsystem supports:
- **Parsing** Go coverage output (`ParseCoverProfile` for `-coverprofile` format, `ParseCoverOutput` for `-cover` output)
- **Snapshotting** via `CoverageSnapshot` (timestamp, per-package percentages, metadata)
- **Persistence** via `CoverageStore` (JSON file-backed append-only store)
- **Regression detection** via `CompareCoverage(previous, current)` which returns a `CoverageComparison` with regressions, improvements, new packages, and removed packages
## Vulnerability Checking (pkg/lint/vulncheck.go)
`VulnCheck` wraps `govulncheck -json` and parses its newline-delimited JSON output into structured `VulnFinding` objects. The parser handles three message types from govulncheck's wire format:
- `config` -- extracts the module path
- `osv` -- stores vulnerability metadata (ID, aliases, summary, affected ranges)
- `finding` -- maps OSV IDs to call traces and affected packages
## Toolkit (pkg/lint/tools.go)
The `Toolkit` struct wraps common developer commands into structured Go APIs. It executes subprocesses and parses their output:
| Method | Wraps | Returns |
|--------|-------|---------|
| `FindTODOs(dir)` | `git grep` | `[]TODO` |
| `Lint(pkg)` | `go vet` | `[]ToolFinding` |
| `Coverage(pkg)` | `go test -cover` | `[]CoverageReport` |
| `RaceDetect(pkg)` | `go test -race` | `[]RaceCondition` |
| `AuditDeps()` | `govulncheck` (text) | `[]Vulnerability` |
| `ScanSecrets(dir)` | `gitleaks` | `[]SecretLeak` |
| `GocycloComplexity(threshold)` | `gocyclo` | `[]ComplexFunc` |
| `DepGraph(pkg)` | `go mod graph` | `*Graph` |
| `GitLog(n)` | `git log` | `[]Commit` |
| `DiffStat()` | `git diff --stat` | `DiffSummary` |
| `UncommittedFiles()` | `git status` | `[]string` |
| `Build(targets...)` | `go build` | `[]BuildResult` |
| `TestCount(pkg)` | `go test -list` | `int` |
| `CheckPerms(dir)` | `filepath.Walk` | `[]PermIssue` |
| `ModTidy()` | `go mod tidy` | `error` |
All methods use the `Run(name, args...)` helper which captures stdout, stderr, and exit code.
## PHP Quality Pipeline (pkg/php)
The `pkg/php` package provides structured wrappers around PHP ecosystem tools. Each tool has:
1. **Detection** -- checks for config files and vendor binaries (e.g., `DetectAnalyser`, `DetectPsalm`, `DetectRector`)
2. **Options struct** -- configures the tool run
3. **Execution function** -- builds the command, runs it, and returns structured results
### Supported Tools
| Function | Tool | Purpose |
|----------|------|---------|
| `Format()` | Laravel Pint | Code style formatting |
| `Analyse()` | PHPStan / Larastan | Static analysis |
| `RunPsalm()` | Psalm | Type-level static analysis |
| `RunAudit()` | Composer audit + npm audit | Dependency vulnerability scanning |
| `RunSecurityChecks()` | Built-in checks | .env exposure, debug mode, filesystem security |
| `RunRector()` | Rector | Automated code refactoring |
| `RunInfection()` | Infection | Mutation testing |
| `RunTests()` | Pest / PHPUnit | Test execution |
### QA Pipeline
The pipeline system (`pipeline.go` + `runner.go`) organises checks into three stages:
- **Quick** -- audit, fmt, stan (fast, run on every push)
- **Standard** -- psalm (if available), test
- **Full** -- rector, infection (slow, run in full QA)
The `QARunner` builds `process.RunSpec` objects with dependency ordering (e.g., `stan` runs after `fmt`, `test` runs after `stan`). This allows future parallelisation while respecting ordering constraints.
### Project Detection (pkg/detect)
The `detect` package identifies project types by checking for marker files:
- `go.mod` present => Go project
- `composer.json` present => PHP project
`DetectAll(dir)` returns all detected types, enabling polyglot project support.
## QA Command Layer (cmd/qa)
The `cmd/qa` package provides workflow-level commands that integrate with GitHub via the `gh` CLI:
- **watch** -- polls GitHub Actions for a specific commit, shows real-time status, drills into failure details (failed job, step, error line from logs)
- **review** -- fetches open PRs, analyses CI status, review decisions, and merge readiness, suggests next actions
- **health** -- scans all repos in a `repos.yaml` registry, reports aggregate CI health with pass rates
- **issues** -- fetches issues across repos, categorises them (needs response, ready, blocked, triage), prioritises by labels and activity
- **docblock** -- parses Go source with `go/ast`, counts exported symbols with and without doc comments, enforces a coverage threshold
Commands register themselves via `cli.RegisterCommands` in an `init()` function, making them available when the package is imported.
## Extension Points
### Adding New Rules
Create a new YAML file in `catalog/` following the schema:
```yaml
- id: go-xxx-001
title: "Description of the issue"
severity: medium # info, low, medium, high, critical
languages: [go]
tags: [security]
pattern: 'regex-pattern'
exclude_pattern: 'false-positive-filter'
fix: "How to fix the issue"
detection: regex
auto_fixable: false
example_bad: 'problematic code'
example_good: 'corrected code'
```
The file will be embedded automatically on the next build.
### Adding New Detection Types
The `Detection` field on `Rule` currently supports `"regex"`. The `Matcher` skips non-regex rules, so adding a new detection type (e.g., `"ast"` for Go AST patterns) requires:
1. Adding the new type to the `Validate()` method
2. Creating a new matcher implementation
3. Integrating it into `Scanner.ScanDir()`
### Loading External Catalogs
Use `LoadDir(path)` to load rules from a directory on disk rather than the embedded catalog:
```go
cat, err := lintpkg.LoadDir("/path/to/custom/rules")
```
This allows organisations to maintain private rule sets alongside the built-in catalog.

271
docs/development.md Normal file
View file

@ -0,0 +1,271 @@
---
title: Development Guide
description: How to build, test, and contribute to core/lint
---
# Development Guide
## Prerequisites
- Go 1.26 or later
- `core` CLI (for build and QA commands)
- `gh` CLI (only needed for the `qa watch`, `qa review`, `qa health`, and `qa issues` commands)
## Building
The project uses the `core` build system. Configuration lives in `.core/build.yaml`.
```bash
# Build the binary (outputs to ./bin/core-lint)
core build
# Build targets: linux/amd64, linux/arm64, darwin/arm64, windows/amd64
# CGO is disabled; the binary is fully static.
```
To build manually with `go build`:
```bash
go build -trimpath -ldflags="-s -w" -o bin/core-lint ./cmd/core-lint
```
## Running Tests
```bash
# Run all tests
core go test
# Run a single test by name
core go test --run TestRule_Validate_Good
# Generate coverage report
core go cov
core go cov --open # Opens HTML report in browser
```
The test suite covers all packages:
| Package | Test count | Focus |
|---------|-----------|-------|
| `pkg/lint` | ~89 | Rule validation, catalog loading, matcher, scanner, report, complexity, coverage, vulncheck, toolkit |
| `pkg/detect` | 6 | Project type detection |
| `pkg/php` | ~125 | All PHP tool wrappers (format, analyse, audit, security, refactor, mutation, test, pipeline, runner) |
Tests follow the `_Good`, `_Bad`, `_Ugly` suffix convention:
- `_Good` -- happy path
- `_Bad` -- expected error conditions
- `_Ugly` -- edge cases and panics
### Test Examples
Testing rules against source content:
```go
func TestMatcher_Match_Good(t *testing.T) {
rules := []Rule{
{
ID: "test-001",
Title: "TODO found",
Severity: "low",
Pattern: `TODO`,
Detection: "regex",
},
}
m, err := NewMatcher(rules)
require.NoError(t, err)
findings := m.Match("example.go", []byte("// TODO: fix this"))
assert.Len(t, findings, 1)
assert.Equal(t, "test-001", findings[0].RuleID)
assert.Equal(t, 1, findings[0].Line)
}
```
Testing complexity analysis without file I/O:
```go
func TestAnalyseComplexitySource_Good(t *testing.T) {
src := `package example
func simple() { if true {} }
func complex() {
if a {} else if b {} else if c {}
for i := range items {
switch {
case x: if y {}
case z:
}
}
}`
results, err := AnalyseComplexitySource(src, "test.go", 3)
require.NoError(t, err)
assert.NotEmpty(t, results)
}
```
## Quality Assurance
```bash
# Full QA pipeline: format, vet, lint, test
core go qa
# Extended QA: includes race detection, vulnerability scan, security checks
core go qa full
# Individual checks
core go fmt # Format code
core go vet # Run go vet
core go lint # Run linter
```
## Project Structure
```
lint/
├── .core/
│ └── build.yaml # Build configuration
├── bin/ # Build output (gitignored)
├── catalog/
│ ├── go-correctness.yaml # Correctness rules (7 rules)
│ ├── go-modernise.yaml # Modernisation rules (5 rules)
│ └── go-security.yaml # Security rules (6 rules)
├── cmd/
│ ├── core-lint/
│ │ └── main.go # CLI binary entry point
│ └── qa/
│ ├── cmd_qa.go # QA command group registration
│ ├── cmd_watch.go # GitHub Actions monitoring
│ ├── cmd_review.go # PR review status
│ ├── cmd_health.go # Aggregate CI health
│ ├── cmd_issues.go # Issue triage
│ ├── cmd_docblock.go # Docblock coverage
│ └── cmd_php.go # PHP QA subcommands
├── pkg/
│ ├── detect/
│ │ ├── detect.go # Project type detection
│ │ └── detect_test.go
│ ├── lint/
│ │ ├── catalog.go # Catalog loading and querying
│ │ ├── complexity.go # Cyclomatic complexity (native AST)
│ │ ├── coverage.go # Coverage tracking and comparison
│ │ ├── matcher.go # Regex matching engine
│ │ ├── report.go # Output formatters (text, JSON, JSONL)
│ │ ├── rule.go # Rule type and validation
│ │ ├── scanner.go # Directory walking and file scanning
│ │ ├── tools.go # Toolkit (subprocess wrappers)
│ │ ├── vulncheck.go # govulncheck JSON parser
│ │ ├── testdata/
│ │ │ └── catalog/
│ │ │ └── test-rules.yaml
│ │ └── *_test.go
│ └── php/
│ ├── analyse.go # PHPStan/Larastan/Psalm wrappers
│ ├── audit.go # Composer audit + npm audit
│ ├── format.go # Laravel Pint wrapper
│ ├── mutation.go # Infection wrapper
│ ├── pipeline.go # QA stage definitions
│ ├── refactor.go # Rector wrapper
│ ├── runner.go # Process spec builder
│ ├── security.go # Security checks (.env, filesystem)
│ ├── test.go # Pest/PHPUnit wrapper
│ └── *_test.go
├── lint.go # Root package: embedded catalog loader
├── go.mod
├── go.sum
├── CLAUDE.md
└── README.md
```
## Writing New Rules
### Rule Schema
Each YAML file in `catalog/` contains an array of rule objects. Required fields:
| Field | Type | Description |
|-------|------|-------------|
| `id` | string | Unique identifier (convention: `{lang}-{category}-{number}`, e.g., `go-sec-001`) |
| `title` | string | Short human-readable description |
| `severity` | string | One of: `info`, `low`, `medium`, `high`, `critical` |
| `languages` | []string | Target languages (e.g., `[go]`, `[go, php]`) |
| `pattern` | string | Detection pattern (regex for `detection: regex`) |
| `fix` | string | Remediation guidance |
| `detection` | string | Detection type (currently only `regex`) |
Optional fields:
| Field | Type | Description |
|-------|------|-------------|
| `tags` | []string | Categorisation tags (e.g., `[security, injection]`) |
| `exclude_pattern` | string | Regex to suppress false positives |
| `found_in` | []string | Repos where the pattern was originally observed |
| `example_bad` | string | Code example that triggers the rule |
| `example_good` | string | Corrected code example |
| `first_seen` | string | Date the pattern was first catalogued |
| `auto_fixable` | bool | Whether automated fixing is feasible |
### Naming Convention
Rule IDs follow the pattern `{lang}-{category}-{number}`:
- `go-sec-*` -- Security rules
- `go-cor-*` -- Correctness rules
- `go-mod-*` -- Modernisation rules
### Testing a New Rule
Create a test that verifies the pattern matches expected code and does not match exclusions:
```go
func TestNewRule_Matches(t *testing.T) {
rules := []Rule{
{
ID: "go-xxx-001",
Title: "My new rule",
Severity: "medium",
Languages: []string{"go"},
Pattern: `my-pattern`,
ExcludePattern: `safe-variant`,
Detection: "regex",
},
}
m, err := NewMatcher(rules)
require.NoError(t, err)
// Should match
findings := m.Match("example.go", []byte("code with my-pattern here"))
assert.Len(t, findings, 1)
// Should not match (exclusion)
findings = m.Match("example.go", []byte("code with safe-variant here"))
assert.Empty(t, findings)
}
```
## Adding PHP Tool Support
To add support for a new PHP tool:
1. Create a new file in `pkg/php/` (e.g., `newtool.go`).
2. Add a detection function that checks for config files or vendor binaries.
3. Add an options struct and an execution function.
4. Add a command in `cmd/qa/cmd_php.go` that wires the tool to the CLI.
5. Add the tool to the pipeline stages in `pipeline.go` if appropriate.
6. Write tests in a corresponding `*_test.go` file.
Follow the existing pattern -- each tool module exports:
- `Detect*()` -- returns whether the tool is available
- `Run*()` or the tool function -- executes the tool with options
- A `*Options` struct -- configures behaviour
## Coding Standards
- **UK English** throughout: `colour`, `organisation`, `centre`, `modernise`, `analyse`, `serialise`
- **Strict typing**: All function parameters and return values must have explicit types
- **Testing**: Use `testify` assertions (`assert`, `require`)
- **Error wrapping**: Use `fmt.Errorf("context: %w", err)` for error chains
- **Formatting**: Standard Go formatting via `gofmt` / `core go fmt`
## Licence
This project is licenced under the EUPL-1.2.

141
docs/index.md Normal file
View file

@ -0,0 +1,141 @@
---
title: core/lint
description: Pattern catalog, regex-based code checker, and quality assurance toolkit for Go and PHP projects
---
# core/lint
`forge.lthn.ai/core/lint` is a standalone pattern catalog and code quality toolkit. It ships a YAML-based rule catalog for detecting security issues, correctness bugs, and modernisation opportunities in Go source code. It also provides a full PHP quality assurance pipeline and a suite of developer tooling wrappers.
The library is designed to be embedded into other tools. The YAML rule files are compiled into the binary at build time via `go:embed`, so there are no runtime file dependencies.
## Module Path
```
forge.lthn.ai/core/lint
```
Requires Go 1.26+.
## Quick Start
### As a Library
```go
import (
lint "forge.lthn.ai/core/lint"
lintpkg "forge.lthn.ai/core/lint/pkg/lint"
)
// Load the embedded rule catalog.
cat, err := lint.LoadEmbeddedCatalog()
if err != nil {
log.Fatal(err)
}
// Filter rules for Go, severity medium and above.
rules := cat.ForLanguage("go")
filtered := (&lintpkg.Catalog{Rules: rules}).AtSeverity("medium")
// Create a scanner and scan a directory.
scanner, err := lintpkg.NewScanner(filtered)
if err != nil {
log.Fatal(err)
}
findings, err := scanner.ScanDir("./src")
if err != nil {
log.Fatal(err)
}
// Output results.
lintpkg.WriteText(os.Stdout, findings)
```
### As a CLI
```bash
# Build the binary
core build # produces ./bin/core-lint
# Scan the current directory with all rules
core-lint lint check
# Scan with filters
core-lint lint check --lang go --severity high ./pkg/...
# Output as JSON
core-lint lint check --format json .
# Browse the catalog
core-lint lint catalog list
core-lint lint catalog list --lang go
core-lint lint catalog show go-sec-001
```
### QA Commands
The `qa` command group provides workflow-level quality assurance:
```bash
# Go-focused
core qa watch # Monitor GitHub Actions after a push
core qa review # PR review status with actionable next steps
core qa health # Aggregate CI health across all repos
core qa issues # Intelligent issue triage
core qa docblock # Check Go docblock coverage
# PHP-focused
core qa fmt # Format PHP code with Laravel Pint
core qa stan # Run PHPStan/Larastan static analysis
core qa psalm # Run Psalm static analysis
core qa audit # Audit composer and npm dependencies
core qa security # Security checks (.env, filesystem, deps)
core qa rector # Automated code refactoring
core qa infection # Mutation testing
core qa test # Run Pest or PHPUnit tests
```
## Package Layout
| Package | Path | Description |
|---------|------|-------------|
| `lint` (root) | `lint.go` | Embeds YAML catalogs and exposes `LoadEmbeddedCatalog()` |
| `pkg/lint` | `pkg/lint/` | Core library: Rule, Catalog, Matcher, Scanner, Report, Complexity, Coverage, VulnCheck, Toolkit |
| `pkg/detect` | `pkg/detect/` | Project type detection (Go, PHP) by filesystem markers |
| `pkg/php` | `pkg/php/` | PHP quality tools: format, analyse, audit, security, refactor, mutation, test, pipeline, runner |
| `cmd/core-lint` | `cmd/core-lint/` | CLI binary (`core-lint lint check`, `core-lint lint catalog`) |
| `cmd/qa` | `cmd/qa/` | QA workflow commands (watch, review, health, issues, docblock, PHP tools) |
| `catalog/` | `catalog/` | YAML rule definitions (embedded at compile time) |
## Rule Catalogs
Three built-in YAML catalogs ship with the module:
| File | Rules | Focus |
|------|-------|-------|
| `go-security.yaml` | 6 | SQL injection, path traversal, XSS, timing attacks, log injection, secret leaks |
| `go-correctness.yaml` | 7 | Unsynchronised goroutines, silent error swallowing, panics in library code, file deletion |
| `go-modernise.yaml` | 5 | Replace legacy patterns with modern stdlib (`slices.Clone`, `slices.Sort`, `maps.Keys`, `errgroup`) |
Total: **18 rules** across 3 severity tiers (info, medium, high, critical). All rules target Go. The catalog is extensible -- add more YAML files to `catalog/` and they will be embedded automatically.
## Dependencies
Direct dependencies:
| Module | Purpose |
|--------|---------|
| `forge.lthn.ai/core/cli` | CLI framework (`cli.Main()`, command registration, TUI styles) |
| `forge.lthn.ai/core/go-i18n` | Internationalisation for CLI strings |
| `forge.lthn.ai/core/go-io` | Filesystem abstraction for registry loading |
| `forge.lthn.ai/core/go-log` | Structured logging and error wrapping |
| `forge.lthn.ai/core/go-scm` | Repository registry (`repos.yaml`) for multi-repo commands |
| `github.com/stretchr/testify` | Test assertions |
| `gopkg.in/yaml.v3` | YAML parsing for rule catalogs |
The `pkg/lint` sub-package has minimal dependencies (only `gopkg.in/yaml.v3` and standard library). The heavier CLI and SCM dependencies live in `cmd/`.
## Licence
EUPL-1.2

44
go.mod
View file

@ -1,62 +1,46 @@
module forge.lthn.ai/core/lint
module dappco.re/go/core/lint
go 1.26.0
require (
dappco.re/go/core/cli v0.3.7
dappco.re/go/core/i18n v0.1.7
dappco.re/go/core/io v0.1.7
dappco.re/go/core/log v0.0.4
dappco.re/go/core/process v0.2.9
dappco.re/go/core/scm v0.3.6
github.com/stretchr/testify v1.11.1
gopkg.in/yaml.v3 v3.0.1
)
require (
forge.lthn.ai/core/cli v0.2.2 // indirect
forge.lthn.ai/core/go v0.1.0 // indirect
forge.lthn.ai/core/go-cache v0.1.0 // indirect
forge.lthn.ai/core/go-config v0.1.0 // indirect
forge.lthn.ai/core/go-crypt v0.1.0 // indirect
forge.lthn.ai/core/go-devops v0.0.3 // indirect
forge.lthn.ai/core/go-help v0.1.2 // indirect
forge.lthn.ai/core/go-i18n v0.1.0 // indirect
forge.lthn.ai/core/go-inference v0.0.2 // indirect
forge.lthn.ai/core/go-io v0.0.3 // indirect
forge.lthn.ai/core/go-log v0.0.1 // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
dappco.re/go/core v0.3.3 // indirect
dappco.re/go/core/inference v0.1.7 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/charmbracelet/bubbletea v1.3.10 // indirect
github.com/charmbracelet/colorprofile v0.4.2 // indirect
github.com/charmbracelet/colorprofile v0.4.3 // indirect
github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect
github.com/charmbracelet/x/ansi v0.11.6 // indirect
github.com/charmbracelet/x/cellbuf v0.0.15 // indirect
github.com/charmbracelet/x/term v0.2.2 // indirect
github.com/clipperhouse/displaywidth v0.11.0 // indirect
github.com/clipperhouse/uax29/v2 v2.7.0 // indirect
github.com/cloudflare/circl v1.6.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.21 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/termenv v0.16.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sagikazarmark/locafero v0.12.0 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/spf13/cobra v1.10.2 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/spf13/viper v1.21.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
github.com/yuin/goldmark v1.7.16 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.48.0 // indirect
golang.org/x/sys v0.41.0 // indirect
golang.org/x/term v0.40.0 // indirect
golang.org/x/text v0.34.0 // indirect
golang.org/x/sys v0.42.0 // indirect
golang.org/x/term v0.41.0 // indirect
golang.org/x/text v0.35.0 // indirect
)

99
go.sum
View file

@ -1,34 +1,25 @@
forge.lthn.ai/core/cli v0.2.2 h1:8J/SsewusYFDqjYTC4MikUX1sB9Yqs79Xg4lXxZoSgc=
forge.lthn.ai/core/cli v0.2.2/go.mod h1:I/V7UFYHv8YVHf9zvbZ9/acT1dAug2B0xLE/L4Ay7Dw=
forge.lthn.ai/core/go v0.1.0 h1:Ow/1NTajrrNPO0zgkskEyEGdx4SKpiNqTaqM0txNOYI=
forge.lthn.ai/core/go v0.1.0/go.mod h1:lwi0tccAlg5j3k6CfoNJEueBc5l9mUeSBX/x6uY8ZbQ=
forge.lthn.ai/core/go-cache v0.1.0 h1:yxPf4bWPZ1jxMnXg8UHBv2xLhet2CRsq5E9PLQYjyj4=
forge.lthn.ai/core/go-cache v0.1.0/go.mod h1:7WbprZVfx/+t4cbJFXMo4sloWk2Eny+rZd8x1Ay9rLk=
forge.lthn.ai/core/go-config v0.1.0 h1:bQnlt8MvFvgPisl//jw4IMHMoCcaIt5FLurwYWqlMx0=
forge.lthn.ai/core/go-config v0.1.0/go.mod h1:jsCzg3BykHqlHZs13PDhP/dq8yTZjsiEyZ35q6jA3Aw=
forge.lthn.ai/core/go-crypt v0.1.0 h1:92gwdQi7iAwktpvZhL/8Cu+QS6xKCtGP4FJfyInPGnw=
forge.lthn.ai/core/go-crypt v0.1.0/go.mod h1:zVAgx6ZiGtC+dbX4R/VKvEPqsEqjyuLl4gQZH9SXBUw=
forge.lthn.ai/core/go-devops v0.0.3 h1:tiSZ2x6a/H1A1IYYUmaM+bEuZqT9Hot7KGCEFN6PSYY=
forge.lthn.ai/core/go-devops v0.0.3/go.mod h1:V5/YaRsrDsYlSnCCJXKX7h1zSbaGyRdRQApPF5XwGAo=
forge.lthn.ai/core/go-help v0.1.2 h1:JP8hhJDAvfjvPuCyLRbU/VEm7YkENAs8debItLkon3w=
forge.lthn.ai/core/go-help v0.1.2/go.mod h1:JSZVb4Gd+P/dTc9laDJsqVCI6OrVbBbBPyPmvw3j4p4=
forge.lthn.ai/core/go-i18n v0.1.0 h1:F7JVSoVkZtzx9JfhpntM9z3iQm1vnuMUi/Zklhz8PCI=
forge.lthn.ai/core/go-i18n v0.1.0/go.mod h1:Q4xsrxuNCl/6NfMv1daria7t1RSiyy8ml+6jiPtUcBs=
forge.lthn.ai/core/go-inference v0.0.2 h1:aHjBkYyLKxLr9tbO4AvzzV/lsZueGq/jeo33SLh113k=
forge.lthn.ai/core/go-inference v0.0.2/go.mod h1:jfWz+IJX55wAH98+ic6FEqqGB6/P31CHlg7VY7pxREw=
forge.lthn.ai/core/go-io v0.0.3 h1:TlhYpGTyjPgAlbEHyYrVSeUChZPhJXcLZ7D/8IbFqfI=
forge.lthn.ai/core/go-io v0.0.3/go.mod h1:ZlU9OQpsvNFNmTJoaHbFIkisZyc0eCq0p8znVWQLRf0=
forge.lthn.ai/core/go-log v0.0.1 h1:x/E6EfF9vixzqiLHQOl2KT25HyBcMc9qiBkomqVlpPg=
forge.lthn.ai/core/go-log v0.0.1/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
forge.lthn.ai/core/go-scm v0.0.2 h1:Ue+gS5vxZkDgTvQrqYu9QdaqEezuTV1kZY3TMqM2uho=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
forge.lthn.ai/core/cli v0.3.7 h1:1GrbaGg0wDGHr6+klSbbGyN/9sSbHvFbdySJznymhwg=
forge.lthn.ai/core/cli v0.3.7/go.mod h1:DBUppJkA9P45ZFGgI2B8VXw1rAZxamHoI/KG7fRvTNs=
forge.lthn.ai/core/go v0.3.3 h1:kYYZ2nRYy0/Be3cyuLJspRjLqTMxpckVyhb/7Sw2gd0=
forge.lthn.ai/core/go v0.3.3/go.mod h1:Cp4ac25pghvO2iqOu59t1GyngTKVOzKB5/VPdhRi9CQ=
forge.lthn.ai/core/go-i18n v0.1.7 h1:aHkAoc3W8fw3RPNvw/UszQbjyFWXHszzbZgty3SwyAA=
forge.lthn.ai/core/go-i18n v0.1.7/go.mod h1:0VDjwtY99NSj2iqwrI09h5GUsJeM9s48MLkr+/Dn4G8=
forge.lthn.ai/core/go-inference v0.1.7 h1:9Dy6v03jX5ZRH3n5iTzlYyGtucuBIgSe+S7GWvBzx9Q=
forge.lthn.ai/core/go-inference v0.1.7/go.mod h1:jfWz+IJX55wAH98+ic6FEqqGB6/P31CHlg7VY7pxREw=
forge.lthn.ai/core/go-io v0.1.7 h1:Tdb6sqh+zz1lsGJaNX9RFWM6MJ/RhSAyxfulLXrJsbk=
forge.lthn.ai/core/go-io v0.1.7/go.mod h1:8lRLFk4Dnp5cR/Cyzh9WclD5566TbpdRgwcH7UZLWn4=
forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0=
forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
forge.lthn.ai/core/go-process v0.2.9 h1:Wql+5TUF+lfU2oJ9I+S764MkTqJhBsuyMM0v1zsfZC4=
forge.lthn.ai/core/go-process v0.2.9/go.mod h1:NIzZOF5IVYYCjHkcNIGcg1mZH+bzGoie4SlZUDYOKIM=
forge.lthn.ai/core/go-scm v0.3.6 h1:LFNx8Fs82mrpxro/MPUM6tMiD4DqPmdu83UknXztQjc=
forge.lthn.ai/core/go-scm v0.3.6/go.mod h1:IWFIYDfRH0mtRdqY5zV06l/RkmkPpBM6FcbKWhg1Qa8=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
github.com/charmbracelet/colorprofile v0.4.2 h1:BdSNuMjRbotnxHSfxy+PCSa4xAmz7szw70ktAtWRYrY=
github.com/charmbracelet/colorprofile v0.4.2/go.mod h1:0rTi81QpwDElInthtrQ6Ni7cG0sDtwAd4C4le060fT8=
github.com/charmbracelet/colorprofile v0.4.3 h1:QPa1IWkYI+AOB+fE+mg/5/4HRMZcaXex9t5KX76i20Q=
github.com/charmbracelet/colorprofile v0.4.3/go.mod h1:/zT4BhpD5aGFpqQQqw7a+VtHCzu+zrQtt1zhMt9mR4Q=
github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE=
github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA=
github.com/charmbracelet/x/ansi v0.11.6 h1:GhV21SiDz/45W9AnV2R61xZMRri5NlLnl6CVF7ihZW8=
@ -41,78 +32,60 @@ github.com/clipperhouse/displaywidth v0.11.0 h1:lBc6kY44VFw+TDx4I8opi/EtL9m20WSE
github.com/clipperhouse/displaywidth v0.11.0/go.mod h1:bkrFNkf81G8HyVqmKGxsPufD3JhNl3dSqnGhOoSD/o0=
github.com/clipperhouse/uax29/v2 v2.7.0 h1:+gs4oBZ2gPfVrKPthwbMzWZDaAFPGYK72F0NJv2v7Vk=
github.com/clipperhouse/uax29/v2 v2.7.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
github.com/mattn/go-runewidth v0.0.20 h1:WcT52H91ZUAwy8+HUkdM3THM6gXqXuLJi9O3rjcQQaQ=
github.com/mattn/go-runewidth v0.0.20/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
github.com/mattn/go-runewidth v0.0.21 h1:jJKAZiQH+2mIinzCJIaIG9Be1+0NR+5sz/lYEEjdM8w=
github.com/mattn/go-runewidth v0.0.21/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
github.com/yuin/goldmark v1.7.16 h1:n+CJdUxaFMiDUNnWC3dMWCIQJSkxH4uz3ZwQBkAlVNE=
github.com/yuin/goldmark v1.7.16/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA=
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

7
locales/embed.go Normal file
View file

@ -0,0 +1,7 @@
// Package locales embeds translation files for this module.
package locales
import "embed"
//go:embed *.json
var FS embed.FS

113
locales/en.json Normal file
View file

@ -0,0 +1,113 @@
{
"cmd": {
"qa": {
"short": "Quality assurance checks",
"long": "Run quality assurance checks across the registry: CI health, issues, reviews, documentation coverage, and workflow monitoring.",
"docblock": {
"short": "Check documentation coverage",
"long": "Analyse exported symbols for missing doc comments and report coverage percentage against a configurable threshold.",
"coverage": "Documentation coverage",
"missing_docs": "Missing documentation:",
"flag": {
"threshold": "Minimum coverage percentage to pass"
}
},
"health": {
"short": "Show CI health across repos",
"long": "Check GitHub Actions workflow status for all repos in the registry and report which are passing, failing, errored, or unconfigured.",
"summary": "CI Health",
"all_healthy": "All repos are healthy.",
"passing": "Passing",
"tests_failing": "Tests failing",
"cancelled": "Cancelled",
"skipped": "Skipped",
"running": "Running",
"fetch_error": "Failed to fetch workflow status",
"parse_error": "Failed to parse workflow response",
"no_ci_configured": "No CI configured",
"count_passing": "passing",
"count_failing": "failing",
"count_error": "error",
"count_pending": "pending",
"count_no_ci": "no CI",
"count_disabled": "disabled",
"flag": {
"problems": "Show only repos with problems"
}
},
"issues": {
"short": "List open issues across repos",
"long": "Fetch and categorise open GitHub issues across registry repos, grouping by status: needs response, ready, blocked, and triage.",
"fetching": "Fetching issues...",
"no_issues": "No open issues found.",
"category": {
"needs_response": "Needs Response",
"ready": "Ready",
"blocked": "Blocked",
"triage": "Triage"
},
"fetch_error": "Failed to fetch issues from {{.Repo}}: {{.Error}}",
"hint": {
"blocked": "blocked by dependency",
"triage": "needs triage",
"needs_response": "awaiting response"
},
"flag": {
"mine": "Show only issues assigned to you",
"triage": "Show only issues needing triage",
"blocked": "Show only blocked issues",
"limit": "Maximum number of issues to fetch"
}
},
"review": {
"short": "List pull requests needing review",
"long": "Show open pull requests across registry repos, highlighting those awaiting your review and your own PRs.",
"your_prs": "Your pull requests",
"review_requested": "Review requested",
"no_prs": "No open pull requests.",
"no_reviews": "No reviews requested.",
"error": {
"no_repo": "Could not determine repository"
},
"flag": {
"mine": "Show only your pull requests",
"requested": "Show only PRs requesting your review",
"repo": "Filter by repository name"
}
},
"watch": {
"short": "Watch CI workflows for a commit",
"long": "Monitor GitHub Actions workflows for a specific commit, polling until all complete or the timeout is reached.",
"commit": "Watching commit",
"waiting_for_workflows": "Waiting for workflows...",
"all_passed": "All workflows passed.",
"workflows_failed": "{{.Count}} workflow(s) failed.",
"timeout": "Timed out after {{.Duration}}.",
"error": {
"not_git_repo": "Current directory is not a git repository",
"repo_format": "Could not determine owner/repo from remote"
},
"flag": {
"repo": "Repository in owner/name format",
"commit": "Commit SHA to watch",
"timeout": "Maximum time to wait"
}
}
}
},
"common": {
"flag": {
"json": "Output as JSON",
"registry": "Path to registry file",
"verbose": "Show detailed output"
},
"label": {
"error": "FAIL",
"success": "PASS"
}
},
"error": {
"gh_not_found": "GitHub CLI (gh) not found; install from https://cli.github.com",
"registry_not_found": "Registry file not found"
}
}

36
pkg/detect/detect.go Normal file
View file

@ -0,0 +1,36 @@
// Package detect identifies project types by examining filesystem markers.
package detect
import "os"
// ProjectType identifies a project's language/framework.
type ProjectType string
const (
Go ProjectType = "go"
PHP ProjectType = "php"
)
// IsGoProject returns true if dir contains a go.mod file.
func IsGoProject(dir string) bool {
_, err := os.Stat(dir + "/go.mod")
return err == nil
}
// IsPHPProject returns true if dir contains a composer.json file.
func IsPHPProject(dir string) bool {
_, err := os.Stat(dir + "/composer.json")
return err == nil
}
// DetectAll returns all detected project types in the directory.
func DetectAll(dir string) []ProjectType {
var types []ProjectType
if IsGoProject(dir) {
types = append(types, Go)
}
if IsPHPProject(dir) {
types = append(types, PHP)
}
return types
}

46
pkg/detect/detect_test.go Normal file
View file

@ -0,0 +1,46 @@
package detect
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestIsGoProject_Good(t *testing.T) {
dir := t.TempDir()
os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
assert.True(t, IsGoProject(dir))
}
func TestIsGoProject_Bad(t *testing.T) {
dir := t.TempDir()
assert.False(t, IsGoProject(dir))
}
func TestIsPHPProject_Good(t *testing.T) {
dir := t.TempDir()
os.WriteFile(filepath.Join(dir, "composer.json"), []byte("{}"), 0644)
assert.True(t, IsPHPProject(dir))
}
func TestIsPHPProject_Bad(t *testing.T) {
dir := t.TempDir()
assert.False(t, IsPHPProject(dir))
}
func TestDetectAll_Good(t *testing.T) {
dir := t.TempDir()
os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
os.WriteFile(filepath.Join(dir, "composer.json"), []byte("{}"), 0644)
types := DetectAll(dir)
assert.Contains(t, types, Go)
assert.Contains(t, types, PHP)
}
func TestDetectAll_Empty(t *testing.T) {
dir := t.TempDir()
types := DetectAll(dir)
assert.Empty(t, types)
}

912
pkg/lint/adapter.go Normal file
View file

@ -0,0 +1,912 @@
package lint
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
coreerr "forge.lthn.ai/core/go-log"
)
// Adapter wraps one lint tool and normalises its output to Finding values.
type Adapter interface {
Name() string
Available() bool
Languages() []string
Command() string
Entitlement() string
RequiresEntitlement() bool
MatchesLanguage(languages []string) bool
Category() string
Fast() bool
Run(ctx context.Context, input RunInput, files []string) AdapterResult
}
// AdapterResult contains one tool execution plus the parsed findings from that run.
type AdapterResult struct {
Tool ToolRun
Findings []Finding
}
type findingParser func(tool string, category string, output string) []Finding
type commandArgumentsBuilder func(projectPath string, files []string) []string
// CommandAdapter runs an external binary and parses its stdout/stderr.
type CommandAdapter struct {
name string
binaries []string
languages []string
category string
entitlement string
requiresEntitlement bool
fast bool
buildArgs commandArgumentsBuilder
parseOutput findingParser
}
// CatalogAdapter wraps the embedded regex rule catalog as a built-in linter.
type CatalogAdapter struct{}
func defaultAdapters() []Adapter {
return []Adapter{
newCommandAdapter("golangci-lint", []string{"golangci-lint"}, []string{"go"}, "correctness", "", false, true, goProjectArguments("run", "--out-format", "json"), parseJSONDiagnostics),
newCommandAdapter("gosec", []string{"gosec"}, []string{"go"}, "security", "lint.security", true, false, goProjectArguments("-fmt", "json"), parseJSONDiagnostics),
newCommandAdapter("govulncheck", []string{"govulncheck"}, []string{"go"}, "security", "", false, false, goProjectArguments("-json"), parseGovulncheckDiagnostics),
newCommandAdapter("staticcheck", []string{"staticcheck"}, []string{"go"}, "correctness", "", false, true, goProjectArguments("-f", "json"), parseJSONDiagnostics),
newCommandAdapter("revive", []string{"revive"}, []string{"go"}, "style", "", false, true, goProjectArguments("-formatter", "json"), parseJSONDiagnostics),
newCommandAdapter("errcheck", []string{"errcheck"}, []string{"go"}, "correctness", "", false, true, goProjectArguments(), parseTextDiagnostics),
newCommandAdapter("phpstan", []string{"phpstan"}, []string{"php"}, "correctness", "", false, true, projectPathArguments("analyse", "--error-format", "json"), parseJSONDiagnostics),
newCommandAdapter("psalm", []string{"psalm"}, []string{"php"}, "correctness", "", false, true, projectPathArguments("--output-format", "json"), parseJSONDiagnostics),
newCommandAdapter("phpcs", []string{"phpcs"}, []string{"php"}, "style", "", false, true, projectPathArguments("--report=json"), parseJSONDiagnostics),
newCommandAdapter("phpmd", []string{"phpmd"}, []string{"php"}, "correctness", "", false, true, phpmdArguments(), parseJSONDiagnostics),
newCommandAdapter("pint", []string{"pint"}, []string{"php"}, "style", "", false, true, projectPathArguments("--format", "json"), parseJSONDiagnostics),
newCommandAdapter("biome", []string{"biome"}, []string{"js", "ts"}, "style", "", false, true, projectPathArguments("check", "--reporter", "json"), parseJSONDiagnostics),
newCommandAdapter("oxlint", []string{"oxlint"}, []string{"js", "ts"}, "style", "", false, true, projectPathArguments("--format", "json"), parseJSONDiagnostics),
newCommandAdapter("eslint", []string{"eslint"}, []string{"js"}, "style", "", false, true, projectPathArguments("--format", "json"), parseJSONDiagnostics),
newCommandAdapter("typescript", []string{"tsc", "typescript"}, []string{"ts"}, "correctness", "", false, true, projectPathArguments("--pretty", "false"), parseTextDiagnostics),
newCommandAdapter("ruff", []string{"ruff"}, []string{"python"}, "style", "", false, true, projectPathArguments("check", "--output-format", "json"), parseJSONDiagnostics),
newCommandAdapter("mypy", []string{"mypy"}, []string{"python"}, "correctness", "", false, true, projectPathArguments("--output", "json"), parseJSONDiagnostics),
newCommandAdapter("bandit", []string{"bandit"}, []string{"python"}, "security", "lint.security", true, false, recursiveProjectPathArguments("-f", "json", "-r"), parseJSONDiagnostics),
newCommandAdapter("pylint", []string{"pylint"}, []string{"python"}, "style", "", false, true, projectPathArguments("--output-format", "json"), parseJSONDiagnostics),
newCommandAdapter("shellcheck", []string{"shellcheck"}, []string{"shell"}, "correctness", "", false, true, filePathArguments("-f", "json"), parseJSONDiagnostics),
newCommandAdapter("hadolint", []string{"hadolint"}, []string{"dockerfile"}, "security", "", false, true, filePathArguments("-f", "json"), parseJSONDiagnostics),
newCommandAdapter("yamllint", []string{"yamllint"}, []string{"yaml"}, "style", "", false, true, projectPathArguments("-f", "parsable"), parseTextDiagnostics),
newCommandAdapter("jsonlint", []string{"jsonlint"}, []string{"json"}, "style", "", false, true, filePathArguments(), parseTextDiagnostics),
newCommandAdapter("markdownlint", []string{"markdownlint", "markdownlint-cli"}, []string{"markdown"}, "style", "", false, true, projectPathArguments("--json"), parseJSONDiagnostics),
newCommandAdapter("prettier", []string{"prettier"}, []string{"js"}, "style", "", false, true, projectPathArguments("--list-different"), parsePrettierDiagnostics),
newCommandAdapter("gitleaks", []string{"gitleaks"}, []string{"*"}, "security", "lint.security", true, false, recursiveProjectPathArguments("detect", "--no-git", "--report-format", "json", "--source"), parseJSONDiagnostics),
newCommandAdapter("trivy", []string{"trivy"}, []string{"*"}, "security", "lint.security", true, false, projectPathArguments("fs", "--format", "json"), parseJSONDiagnostics),
newCommandAdapter("semgrep", []string{"semgrep"}, []string{"*"}, "security", "lint.security", true, false, projectPathArguments("--json"), parseJSONDiagnostics),
newCommandAdapter("syft", []string{"syft"}, []string{"*"}, "compliance", "lint.compliance", true, false, projectPathArguments("scan", "-o", "json"), parseJSONDiagnostics),
newCommandAdapter("grype", []string{"grype"}, []string{"*"}, "security", "lint.compliance", true, false, projectPathArguments("-o", "json"), parseJSONDiagnostics),
newCommandAdapter("scancode", []string{"scancode-toolkit", "scancode"}, []string{"*"}, "compliance", "lint.compliance", true, false, projectPathArguments("--json"), parseJSONDiagnostics),
}
}
func newCatalogAdapter() Adapter {
return CatalogAdapter{}
}
func newCommandAdapter(name string, binaries []string, languages []string, category string, entitlement string, requiresEntitlement bool, fast bool, builder commandArgumentsBuilder, parser findingParser) Adapter {
return CommandAdapter{
name: name,
binaries: binaries,
languages: languages,
category: category,
entitlement: entitlement,
requiresEntitlement: requiresEntitlement,
fast: fast,
buildArgs: builder,
parseOutput: parser,
}
}
func (adapter CommandAdapter) Name() string { return adapter.name }
func (adapter CommandAdapter) Available() bool {
_, ok := adapter.availableBinary()
return ok
}
func (adapter CommandAdapter) Languages() []string {
return append([]string(nil), adapter.languages...)
}
func (adapter CommandAdapter) Command() string {
if len(adapter.binaries) == 0 {
return ""
}
return adapter.binaries[0]
}
func (adapter CommandAdapter) Entitlement() string { return adapter.entitlement }
func (adapter CommandAdapter) RequiresEntitlement() bool { return adapter.requiresEntitlement }
func (adapter CommandAdapter) MatchesLanguage(languages []string) bool {
if len(adapter.languages) == 0 || len(languages) == 0 {
return true
}
if len(adapter.languages) == 1 && adapter.languages[0] == "*" {
return true
}
for _, language := range languages {
if strings.EqualFold(language, adapter.category) {
return true
}
for _, supported := range adapter.languages {
if supported == language {
return true
}
}
}
return false
}
func (adapter CommandAdapter) Category() string { return adapter.category }
func (adapter CommandAdapter) Fast() bool { return adapter.fast }
func (adapter CommandAdapter) Run(ctx context.Context, input RunInput, files []string) AdapterResult {
startedAt := time.Now()
result := AdapterResult{
Tool: ToolRun{
Name: adapter.name,
},
}
binary, ok := adapter.availableBinary()
if !ok {
result.Tool.Status = "skipped"
result.Tool.Duration = "0s"
return result
}
result.Tool.Version = probeCommandVersion(binary, input.Path)
runContext, cancel := context.WithTimeout(ctx, 5*time.Minute)
defer cancel()
args := adapter.buildArgs(input.Path, files)
stdout, stderr, exitCode, runErr := runCommand(runContext, input.Path, binary, args)
result.Tool.Duration = time.Since(startedAt).Round(time.Millisecond).String()
if errors.Is(runContext.Err(), context.DeadlineExceeded) {
result.Tool.Status = "timeout"
return result
}
output := strings.TrimSpace(stdout)
if strings.TrimSpace(stderr) != "" {
if output != "" {
output += "\n" + strings.TrimSpace(stderr)
} else {
output = strings.TrimSpace(stderr)
}
}
if adapter.parseOutput != nil && output != "" {
result.Findings = adapter.parseOutput(adapter.name, adapter.category, output)
}
if len(result.Findings) == 0 && output != "" {
result.Findings = parseTextDiagnostics(adapter.name, adapter.category, output)
}
if len(result.Findings) == 0 && runErr != nil {
result.Findings = []Finding{{
Tool: adapter.name,
Severity: defaultSeverityForCategory(adapter.category),
Code: "command-failed",
Message: strings.TrimSpace(firstNonEmpty(output, runErr.Error())),
Category: adapter.category,
}}
}
for index := range result.Findings {
if result.Findings[index].Tool == "" {
result.Findings[index].Tool = adapter.name
}
if result.Findings[index].Category == "" {
result.Findings[index].Category = adapter.category
}
if result.Findings[index].Severity == "" {
result.Findings[index].Severity = defaultSeverityForCategory(adapter.category)
} else {
result.Findings[index].Severity = normaliseSeverity(result.Findings[index].Severity)
}
}
result.Tool.Findings = len(result.Findings)
switch {
case runErr != nil || exitCode != 0 || len(result.Findings) > 0:
result.Tool.Status = "failed"
default:
result.Tool.Status = "passed"
}
return result
}
func probeCommandVersion(binary string, workingDir string) string {
for _, args := range [][]string{{"--version"}, {"-version"}, {"version"}} {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
stdout, stderr, exitCode, err := runCommand(ctx, workingDir, binary, args)
cancel()
if err != nil && exitCode != 0 {
continue
}
version := firstNonEmpty(stdout, stderr)
if version == "" {
continue
}
if line := firstVersionLine(version); line != "" {
return line
}
}
return ""
}
func (adapter CommandAdapter) availableBinary() (string, bool) {
for _, binary := range adapter.binaries {
path, err := exec.LookPath(binary)
if err == nil {
return path, true
}
}
return "", false
}
func (CatalogAdapter) Name() string { return "catalog" }
func (CatalogAdapter) Available() bool { return true }
func (CatalogAdapter) Languages() []string { return []string{"go"} }
func (CatalogAdapter) Command() string { return "catalog" }
func (CatalogAdapter) Entitlement() string { return "" }
func (CatalogAdapter) RequiresEntitlement() bool { return false }
func (CatalogAdapter) MatchesLanguage(languages []string) bool {
return len(languages) == 0 || containsString(languages, "go")
}
func (CatalogAdapter) Category() string { return "correctness" }
func (CatalogAdapter) Fast() bool { return true }
func (CatalogAdapter) Run(_ context.Context, input RunInput, files []string) AdapterResult {
startedAt := time.Now()
result := AdapterResult{
Tool: ToolRun{
Name: "catalog",
},
}
catalog, err := loadBuiltinCatalog()
if err != nil {
result.Tool.Status = "failed"
result.Tool.Duration = time.Since(startedAt).Round(time.Millisecond).String()
result.Findings = []Finding{{
Tool: "catalog",
Severity: "error",
Code: "catalog-load",
Message: err.Error(),
Category: "correctness",
}}
result.Tool.Findings = len(result.Findings)
return result
}
rules := catalog.Rules
if input.Category != "" {
rules = filterRulesByTag(rules, input.Category)
}
scanner, err := NewScanner(rules)
if err != nil {
result.Tool.Status = "failed"
result.Tool.Duration = time.Since(startedAt).Round(time.Millisecond).String()
result.Findings = []Finding{{
Tool: "catalog",
Severity: "error",
Code: "catalog-scan",
Message: err.Error(),
Category: "correctness",
}}
result.Tool.Findings = len(result.Findings)
return result
}
var findings []Finding
if len(files) > 0 {
for _, file := range files {
scanPath := file
if !filepath.IsAbs(scanPath) {
scanPath = filepath.Join(input.Path, file)
}
fileFindings, scanErr := scanner.ScanFile(scanPath)
if scanErr != nil {
continue
}
findings = append(findings, fileFindings...)
}
} else {
findings, _ = scanner.ScanDir(input.Path)
}
for index := range findings {
rule := catalog.ByID(findings[index].RuleID)
findings[index].Tool = "catalog"
findings[index].Code = findings[index].RuleID
findings[index].Message = findings[index].Title
findings[index].Severity = normaliseSeverity(findings[index].Severity)
if rule != nil {
findings[index].Category = ruleCategory(*rule)
}
}
result.Findings = findings
result.Tool.Findings = len(findings)
result.Tool.Duration = time.Since(startedAt).Round(time.Millisecond).String()
if len(findings) > 0 {
result.Tool.Status = "failed"
} else {
result.Tool.Status = "passed"
}
return result
}
func loadBuiltinCatalog() (*Catalog, error) {
rules, err := ParseRules([]byte(defaultCatalogRulesYAML))
if err != nil {
return nil, coreerr.E("loadBuiltinCatalog", "parse embedded fallback rules", err)
}
return &Catalog{Rules: rules}, nil
}
func goProjectArguments(prefix ...string) commandArgumentsBuilder {
return func(_ string, files []string) []string {
args := append([]string(nil), prefix...)
if len(files) > 0 {
return append(args, files...)
}
return append(args, "./...")
}
}
func projectPathArguments(prefix ...string) commandArgumentsBuilder {
return func(_ string, files []string) []string {
args := append([]string(nil), prefix...)
if len(files) > 0 {
return append(args, files...)
}
return append(args, ".")
}
}
func recursiveProjectPathArguments(prefix ...string) commandArgumentsBuilder {
return func(_ string, files []string) []string {
args := append([]string(nil), prefix...)
if len(files) > 0 {
return append(args, files...)
}
return append(args, ".")
}
}
func filePathArguments(prefix ...string) commandArgumentsBuilder {
return func(_ string, files []string) []string {
args := append([]string(nil), prefix...)
if len(files) > 0 {
return append(args, files...)
}
return append(args, ".")
}
}
func phpmdArguments() commandArgumentsBuilder {
return func(_ string, files []string) []string {
target := "."
if len(files) > 0 {
target = strings.Join(files, ",")
}
return []string{target, "json", "cleancode,codesize,controversial,design,naming,unusedcode"}
}
}
func runCommand(ctx context.Context, workingDir string, binary string, args []string) (string, string, int, error) {
command := exec.CommandContext(ctx, binary, args...)
if workingDir != "" {
command.Dir = workingDir
}
var stdout bytes.Buffer
var stderr bytes.Buffer
command.Stdout = &stdout
command.Stderr = &stderr
err := command.Run()
if err == nil {
return stdout.String(), stderr.String(), 0, nil
}
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return stdout.String(), stderr.String(), exitErr.ExitCode(), err
}
return stdout.String(), stderr.String(), -1, err
}
func parseGovulncheckDiagnostics(tool string, category string, output string) []Finding {
result, err := ParseVulnCheckJSON(output, "")
if err != nil || result == nil {
return nil
}
var findings []Finding
for _, vuln := range result.Findings {
message := strings.TrimSpace(firstNonEmpty(vuln.Description, vuln.Package))
if message == "" {
message = vuln.ID
}
findings = append(findings, Finding{
Tool: tool,
File: vuln.Package,
Severity: "error",
Code: vuln.ID,
Message: message,
Category: category,
})
}
return findings
}
func parseJSONDiagnostics(tool string, category string, output string) []Finding {
decoder := json.NewDecoder(strings.NewReader(output))
var findings []Finding
for {
var value any
err := decoder.Decode(&value)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil
}
findings = append(findings, collectJSONDiagnostics(tool, category, value)...)
}
return dedupeFindings(findings)
}
func collectJSONDiagnostics(tool string, category string, value any) []Finding {
switch typed := value.(type) {
case []any:
var findings []Finding
for _, child := range typed {
findings = append(findings, collectJSONDiagnostics(tool, category, child)...)
}
return findings
case map[string]any:
var findings []Finding
if finding, ok := findingFromMap(tool, category, typed); ok {
findings = append(findings, finding)
}
for _, child := range typed {
findings = append(findings, collectJSONDiagnostics(tool, category, child)...)
}
return findings
default:
return nil
}
}
func findingFromMap(tool string, category string, fields map[string]any) (Finding, bool) {
file := firstStringPath(fields,
[]string{"file"},
[]string{"File"},
[]string{"filename"},
[]string{"path"},
[]string{"location", "path"},
[]string{"artifactLocation", "uri"},
[]string{"Target"},
)
line := firstIntPath(fields,
[]string{"line"},
[]string{"Line"},
[]string{"startLine"},
[]string{"StartLine"},
[]string{"region", "startLine"},
[]string{"location", "start", "line"},
[]string{"Start", "Line"},
)
column := firstIntPath(fields,
[]string{"column"},
[]string{"Column"},
[]string{"col"},
[]string{"startColumn"},
[]string{"StartColumn"},
[]string{"region", "startColumn"},
[]string{"location", "start", "column"},
)
code := firstStringPath(fields,
[]string{"code"},
[]string{"Code"},
[]string{"rule"},
[]string{"Rule"},
[]string{"rule_id"},
[]string{"RuleID"},
[]string{"check_id"},
[]string{"checkId"},
[]string{"id"},
[]string{"ID"},
)
message := firstStringPath(fields,
[]string{"message"},
[]string{"Message"},
[]string{"description"},
[]string{"Description"},
[]string{"title"},
[]string{"Title"},
[]string{"message", "text"},
[]string{"Message", "Text"},
)
severity := firstStringPath(fields,
[]string{"severity"},
[]string{"Severity"},
[]string{"level"},
[]string{"Level"},
[]string{"type"},
[]string{"Type"},
)
if message == "" && code == "" {
return Finding{}, false
}
if file == "" && line == 0 && !strings.Contains(strings.ToLower(category), "security") && code == "" {
return Finding{}, false
}
return Finding{
Tool: tool,
File: file,
Line: line,
Column: column,
Severity: firstNonEmpty(normaliseSeverity(severity), defaultSeverityForCategory(category)),
Code: code,
Message: message,
Category: category,
}, true
}
func parseTextDiagnostics(tool string, category string, output string) []Finding {
var findings []Finding
for line := range strings.SplitSeq(strings.TrimSpace(output), "\n") {
trimmed := strings.TrimSpace(line)
if trimmed == "" {
continue
}
if finding, ok := parseTextDiagnosticLine(tool, category, trimmed); ok {
findings = append(findings, finding)
}
}
if len(findings) == 0 && strings.TrimSpace(output) != "" {
findings = append(findings, Finding{
Tool: tool,
Severity: defaultSeverityForCategory(category),
Code: "diagnostic",
Message: strings.TrimSpace(output),
Category: category,
})
}
return dedupeFindings(findings)
}
func parsePrettierDiagnostics(tool string, category string, output string) []Finding {
var findings []Finding
for line := range strings.SplitSeq(strings.TrimSpace(output), "\n") {
trimmed := strings.TrimSpace(line)
if trimmed == "" {
continue
}
findings = append(findings, Finding{
Tool: tool,
File: filepath.ToSlash(trimmed),
Severity: defaultSeverityForCategory(category),
Code: "prettier-format",
Message: "File is not formatted with Prettier",
Category: category,
})
}
return dedupeFindings(findings)
}
func parseTextDiagnosticLine(tool string, category string, line string) (Finding, bool) {
segments := strings.Split(line, ":")
if len(segments) < 3 {
return Finding{}, false
}
lineNumber, lineErr := strconv.Atoi(strings.TrimSpace(segments[1]))
if lineErr != nil {
return Finding{}, false
}
columnNumber := 0
messageIndex := 2
if len(segments) > 3 {
if parsedColumn, columnErr := strconv.Atoi(strings.TrimSpace(segments[2])); columnErr == nil {
columnNumber = parsedColumn
messageIndex = 3
}
}
message := strings.TrimSpace(strings.Join(segments[messageIndex:], ":"))
if message == "" {
return Finding{}, false
}
severity := defaultSeverityForCategory(category)
switch {
case strings.Contains(strings.ToLower(message), "warning"):
severity = "warning"
case strings.Contains(strings.ToLower(message), "error"):
severity = "error"
}
return Finding{
Tool: tool,
File: filepath.ToSlash(strings.TrimSpace(segments[0])),
Line: lineNumber,
Column: columnNumber,
Severity: severity,
Code: "diagnostic",
Message: message,
Category: category,
}, true
}
func firstStringPath(fields map[string]any, paths ...[]string) string {
for _, path := range paths {
if value, ok := lookupPath(fields, path); ok {
switch typed := value.(type) {
case string:
if strings.TrimSpace(typed) != "" {
return strings.TrimSpace(typed)
}
case json.Number:
return typed.String()
}
}
}
return ""
}
func firstIntPath(fields map[string]any, paths ...[]string) int {
for _, path := range paths {
if value, ok := lookupPath(fields, path); ok {
switch typed := value.(type) {
case int:
return typed
case int64:
return int(typed)
case float64:
return int(typed)
case json.Number:
parsed, _ := typed.Int64()
return int(parsed)
case string:
parsed, err := strconv.Atoi(strings.TrimSpace(typed))
if err == nil {
return parsed
}
}
}
}
return 0
}
func lookupPath(fields map[string]any, path []string) (any, bool) {
current := any(fields)
for _, segment := range path {
object, ok := current.(map[string]any)
if !ok {
return nil, false
}
value, found := mapValue(object, segment)
if !found {
return nil, false
}
current = value
}
return current, true
}
func mapValue(fields map[string]any, key string) (any, bool) {
if value, ok := fields[key]; ok {
return value, true
}
lowerKey := strings.ToLower(key)
for fieldKey, value := range fields {
if strings.ToLower(fieldKey) == lowerKey {
return value, true
}
}
return nil, false
}
func dedupeFindings(findings []Finding) []Finding {
seen := make(map[string]bool)
var deduped []Finding
for _, finding := range findings {
key := strings.Join([]string{
finding.Tool,
finding.File,
strconv.Itoa(finding.Line),
strconv.Itoa(finding.Column),
finding.Code,
finding.Message,
}, "|")
if seen[key] {
continue
}
seen[key] = true
deduped = append(deduped, finding)
}
return deduped
}
func filterRulesByTag(rules []Rule, tag string) []Rule {
var filtered []Rule
for _, rule := range rules {
for _, currentTag := range rule.Tags {
if currentTag == tag {
filtered = append(filtered, rule)
break
}
}
}
return filtered
}
func ruleCategory(rule Rule) string {
for _, tag := range rule.Tags {
switch tag {
case "security", "style", "correctness", "performance", "compliance":
return tag
}
}
return "correctness"
}
func normaliseSeverity(severity string) string {
switch strings.ToLower(strings.TrimSpace(severity)) {
case "critical", "high", "error", "errors":
return "error"
case "medium", "low", "warning", "warn":
return "warning"
case "info", "note":
return "info"
default:
return strings.ToLower(strings.TrimSpace(severity))
}
}
func defaultSeverityForCategory(category string) string {
switch category {
case "security":
return "error"
case "compliance":
return "warning"
default:
return "warning"
}
}
func firstNonEmpty(values ...string) string {
for _, value := range values {
if strings.TrimSpace(value) != "" {
return strings.TrimSpace(value)
}
}
return ""
}
func firstVersionLine(output string) string {
for line := range strings.SplitSeq(strings.TrimSpace(output), "\n") {
line = strings.TrimSpace(line)
if line != "" {
return line
}
}
return ""
}
func containsString(values []string, target string) bool {
for _, value := range values {
if value == target {
return true
}
}
return false
}
const defaultCatalogRulesYAML = `
- id: go-cor-003
title: "Silent error swallowing with blank identifier"
severity: medium
languages: [go]
tags: [correctness, errors]
pattern: '^\s*_\s*=\s*\w+\.\w+\('
exclude_pattern: 'defer|Close\(|Flush\('
fix: "Handle the error explicitly — log it, return it, or document why it is safe to discard"
detection: regex
auto_fixable: false
- id: go-cor-004
title: "Panic in library code"
severity: high
languages: [go]
tags: [correctness, panic]
pattern: '\bpanic\('
exclude_pattern: '_test\.go|// unreachable|Must\w+\('
fix: "Return an error instead of panicking — panics in libraries crash the caller"
detection: regex
auto_fixable: false
- id: go-sec-001
title: "SQL wildcard injection in LIKE clauses"
severity: high
languages: [go]
tags: [security, injection]
pattern: 'LIKE\s+\?.*["%].*\+'
fix: "Use parameterised LIKE with EscapeLike() helper to sanitise wildcard characters"
detection: regex
auto_fixable: false
- id: go-sec-002
title: "Path traversal via filepath.Join"
severity: high
languages: [go]
tags: [security, path-traversal]
pattern: 'filepath\.Join\(.*,\s*\w+\)'
exclude_pattern: 'filepath\.Clean|securejoin|ValidatePath'
fix: "Validate the path component or use securejoin to prevent directory traversal"
detection: regex
auto_fixable: false
- id: go-sec-004
title: "Non-constant-time authentication comparison"
severity: critical
languages: [go]
tags: [security, timing-attack]
pattern: '==\s*\w*(token|key|secret|password|hash|digest|hmac|mac|sig)'
exclude_pattern: 'subtle\.ConstantTimeCompare|hmac\.Equal'
fix: "Use subtle.ConstantTimeCompare() or hmac.Equal() for timing-safe comparison"
detection: regex
auto_fixable: false
`

View file

@ -1,12 +1,13 @@
package lint
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// severityOrder maps severity names to numeric ranks for threshold comparison.
@ -25,23 +26,24 @@ type Catalog struct {
// LoadDir reads all .yaml files from the given directory and returns a Catalog.
func LoadDir(dir string) (*Catalog, error) {
entries, err := os.ReadDir(dir)
entries, err := coreio.Local.List(dir)
if err != nil {
return nil, fmt.Errorf("loading catalog from %s: %w", dir, err)
return nil, coreerr.E("Catalog.LoadDir", "loading catalog from "+dir, err)
}
sortDirEntries(entries)
var rules []Rule
for _, entry := range entries {
if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".yaml") {
continue
}
data, err := os.ReadFile(filepath.Join(dir, entry.Name()))
raw, err := coreio.Local.Read(filepath.Join(dir, entry.Name()))
if err != nil {
return nil, fmt.Errorf("reading %s: %w", entry.Name(), err)
return nil, coreerr.E("Catalog.LoadDir", "reading "+entry.Name(), err)
}
parsed, err := ParseRules(data)
parsed, err := ParseRules([]byte(raw))
if err != nil {
return nil, fmt.Errorf("parsing %s: %w", entry.Name(), err)
return nil, coreerr.E("Catalog.LoadDir", "parsing "+entry.Name(), err)
}
rules = append(rules, parsed...)
}
@ -53,8 +55,9 @@ func LoadDir(dir string) (*Catalog, error) {
func LoadFS(fsys fs.FS, dir string) (*Catalog, error) {
entries, err := fs.ReadDir(fsys, dir)
if err != nil {
return nil, fmt.Errorf("loading catalog from embedded %s: %w", dir, err)
return nil, coreerr.E("Catalog.LoadFS", "loading catalog from embedded "+dir, err)
}
sortDirEntries(entries)
var rules []Rule
for _, entry := range entries {
@ -63,11 +66,11 @@ func LoadFS(fsys fs.FS, dir string) (*Catalog, error) {
}
data, err := fs.ReadFile(fsys, dir+"/"+entry.Name())
if err != nil {
return nil, fmt.Errorf("reading embedded %s: %w", entry.Name(), err)
return nil, coreerr.E("Catalog.LoadFS", "reading embedded "+entry.Name(), err)
}
parsed, err := ParseRules(data)
if err != nil {
return nil, fmt.Errorf("parsing embedded %s: %w", entry.Name(), err)
return nil, coreerr.E("Catalog.LoadFS", "parsing embedded "+entry.Name(), err)
}
rules = append(rules, parsed...)
}
@ -75,6 +78,12 @@ func LoadFS(fsys fs.FS, dir string) (*Catalog, error) {
return &Catalog{Rules: rules}, nil
}
func sortDirEntries(entries []fs.DirEntry) {
slices.SortFunc(entries, func(a, b fs.DirEntry) int {
return strings.Compare(a.Name(), b.Name())
})
}
// ForLanguage returns all rules that apply to the given language.
func (c *Catalog) ForLanguage(lang string) []Rule {
var result []Rule

View file

@ -29,6 +29,38 @@ func TestLoadDir_Good(t *testing.T) {
assert.NotNil(t, cat.ByID("go-mod-001"))
}
func TestLoadDir_SortsFilesDeterministically(t *testing.T) {
dir := t.TempDir()
err := os.WriteFile(filepath.Join(dir, "z.yaml"), []byte(`- id: z-rule
title: "Z rule"
severity: info
languages: [go]
pattern: 'z'
fix: "z"
detection: regex
auto_fixable: false
`), 0o644)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(dir, "a.yaml"), []byte(`- id: a-rule
title: "A rule"
severity: info
languages: [go]
pattern: 'a'
fix: "a"
detection: regex
auto_fixable: false
`), 0o644)
require.NoError(t, err)
cat, err := LoadDir(dir)
require.NoError(t, err)
require.Len(t, cat.Rules, 2)
assert.Equal(t, "a-rule", cat.Rules[0].ID)
assert.Equal(t, "z-rule", cat.Rules[1].ID)
}
func TestLoadDir_Bad_NonexistentDir(t *testing.T) {
_, err := LoadDir("/nonexistent/path/that/does/not/exist")
assert.Error(t, err)

View file

@ -1,13 +1,15 @@
package lint
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"path/filepath"
"strings"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// ComplexityConfig controls cyclomatic complexity analysis.
@ -45,9 +47,9 @@ func AnalyseComplexity(cfg ComplexityConfig) ([]ComplexityResult, error) {
var results []ComplexityResult
info, err := os.Stat(cfg.Path)
info, err := coreio.Local.Stat(cfg.Path)
if err != nil {
return nil, fmt.Errorf("stat %s: %w", cfg.Path, err)
return nil, coreerr.E("AnalyseComplexity", "stat "+cfg.Path, err)
}
if !info.IsDir() {
@ -81,7 +83,7 @@ func AnalyseComplexity(cfg ComplexityConfig) ([]ComplexityResult, error) {
return nil
})
if err != nil {
return nil, fmt.Errorf("walk %s: %w", cfg.Path, err)
return nil, coreerr.E("AnalyseComplexity", "walk "+cfg.Path, err)
}
return results, nil
@ -97,7 +99,7 @@ func AnalyseComplexitySource(src string, filename string, threshold int) ([]Comp
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
if err != nil {
return nil, fmt.Errorf("parse %s: %w", filename, err)
return nil, coreerr.E("AnalyseComplexitySource", "parse "+filename, err)
}
var results []ComplexityResult
@ -130,11 +132,11 @@ func AnalyseComplexitySource(src string, filename string, threshold int) ([]Comp
// analyseFile parses a single Go file and returns functions exceeding the threshold.
func analyseFile(path string, threshold int) ([]ComplexityResult, error) {
src, err := os.ReadFile(path)
src, err := coreio.Local.Read(path)
if err != nil {
return nil, fmt.Errorf("read %s: %w", path, err)
return nil, coreerr.E("analyseFile", "read "+path, err)
}
return AnalyseComplexitySource(string(src), path, threshold)
return AnalyseComplexitySource(src, path, threshold)
}
// calculateComplexity computes the cyclomatic complexity of a function.

182
pkg/lint/config.go Normal file
View file

@ -0,0 +1,182 @@
package lint
import (
"os"
"path/filepath"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
"gopkg.in/yaml.v3"
)
// DefaultConfigPath is the repo-local config path used by core-lint.
const DefaultConfigPath = ".core/lint.yaml"
// LintConfig defines which tools run for each language and how results fail the build.
//
// cfg := lint.DefaultConfig()
// cfg.FailOn = "warning"
type LintConfig struct {
Lint ToolGroups `yaml:"lint" json:"lint"`
Output string `yaml:"output" json:"output"`
FailOn string `yaml:"fail_on" json:"fail_on"`
Paths []string `yaml:"paths" json:"paths"`
Exclude []string `yaml:"exclude" json:"exclude"`
Schedules map[string]Schedule `yaml:"schedules,omitempty" json:"schedules,omitempty"`
}
// ToolGroups maps config groups to tool names.
type ToolGroups struct {
Go []string `yaml:"go,omitempty" json:"go,omitempty"`
PHP []string `yaml:"php,omitempty" json:"php,omitempty"`
JS []string `yaml:"js,omitempty" json:"js,omitempty"`
TS []string `yaml:"ts,omitempty" json:"ts,omitempty"`
Python []string `yaml:"python,omitempty" json:"python,omitempty"`
Infra []string `yaml:"infra,omitempty" json:"infra,omitempty"`
Security []string `yaml:"security,omitempty" json:"security,omitempty"`
Compliance []string `yaml:"compliance,omitempty" json:"compliance,omitempty"`
}
// Schedule declares a named lint run for external schedulers.
type Schedule struct {
Cron string `yaml:"cron" json:"cron"`
Categories []string `yaml:"categories,omitempty" json:"categories,omitempty"`
Output string `yaml:"output,omitempty" json:"output,omitempty"`
Paths []string `yaml:"paths,omitempty" json:"paths,omitempty"`
FailOn string `yaml:"fail_on,omitempty" json:"fail_on,omitempty"`
}
// DefaultConfig returns the RFC baseline config used when a repo has no local file yet.
//
// cfg := lint.DefaultConfig()
// cfg.Output = "sarif"
func DefaultConfig() LintConfig {
return LintConfig{
Lint: ToolGroups{
Go: []string{
"golangci-lint",
"gosec",
"govulncheck",
"staticcheck",
"revive",
"errcheck",
},
PHP: []string{
"phpstan",
"psalm",
"phpcs",
"phpmd",
"pint",
},
JS: []string{
"biome",
"oxlint",
"eslint",
"prettier",
},
TS: []string{
"biome",
"oxlint",
"typescript",
},
Python: []string{
"ruff",
"mypy",
"bandit",
"pylint",
},
Infra: []string{
"shellcheck",
"hadolint",
"yamllint",
"jsonlint",
"markdownlint",
},
Security: []string{
"gitleaks",
"trivy",
"gosec",
"bandit",
"semgrep",
},
Compliance: []string{
"syft",
"grype",
"scancode",
},
},
Output: "json",
FailOn: "error",
Paths: []string{"."},
Exclude: []string{"vendor/", "node_modules/", ".core/"},
}
}
// DefaultConfigYAML marshals the default config as the file content for `core-lint init`.
func DefaultConfigYAML() (string, error) {
data, err := yaml.Marshal(DefaultConfig())
if err != nil {
return "", coreerr.E("DefaultConfigYAML", "marshal default config", err)
}
return string(data), nil
}
// ResolveConfigPath resolves an explicit config path or the repo-local default.
//
// path := lint.ResolveConfigPath(".", "")
// override := lint.ResolveConfigPath("/repo", ".core/lint.yaml")
func ResolveConfigPath(projectPath string, override string) string {
if projectPath == "" {
projectPath = "."
}
if override == "" {
return filepath.Join(projectPath, DefaultConfigPath)
}
if filepath.IsAbs(override) {
return override
}
return filepath.Join(projectPath, override)
}
// LoadProjectConfig reads `.core/lint.yaml` if present, otherwise returns the default config.
//
// cfg, path, err := lint.LoadProjectConfig(".", "")
// cfg, _, err = lint.LoadProjectConfig("/repo", ".core/lint.yaml")
func LoadProjectConfig(projectPath string, override string) (LintConfig, string, error) {
config := DefaultConfig()
path := ResolveConfigPath(projectPath, override)
_, err := coreio.Local.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return config, "", nil
}
return config, "", coreerr.E("LoadProjectConfig", "stat "+path, err)
}
raw, err := coreio.Local.Read(path)
if err != nil {
return config, "", coreerr.E("LoadProjectConfig", "read "+path, err)
}
if err := yaml.Unmarshal([]byte(raw), &config); err != nil {
return config, "", coreerr.E("LoadProjectConfig", "parse "+path, err)
}
return config, path, nil
}
// ResolveSchedule returns a named schedule from the config.
//
// schedule, err := lint.ResolveSchedule(cfg, "nightly")
func ResolveSchedule(config LintConfig, name string) (*Schedule, error) {
if name == "" {
return nil, nil
}
schedule, ok := config.Schedules[name]
if !ok {
return nil, coreerr.E("ResolveSchedule", "schedule "+name+" not found", nil)
}
return &schedule, nil
}

View file

@ -2,22 +2,26 @@ package lint
import (
"bufio"
"cmp"
"encoding/json"
"fmt"
"math"
"os"
"regexp"
"slices"
"strconv"
"strings"
"time"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// CoverageSnapshot represents a point-in-time coverage measurement.
type CoverageSnapshot struct {
Timestamp time.Time `json:"timestamp"`
Packages map[string]float64 `json:"packages"` // package → coverage %
Total float64 `json:"total"` // overall coverage %
Meta map[string]string `json:"meta,omitempty"` // optional metadata (commit, branch, etc.)
Packages map[string]float64 `json:"packages"` // package → coverage %
Total float64 `json:"total"` // overall coverage %
Meta map[string]string `json:"meta,omitempty"` // optional metadata (commit, branch, etc.)
}
// CoverageRegression flags a package whose coverage changed between runs.
@ -51,32 +55,32 @@ func NewCoverageStore(path string) *CoverageStore {
func (s *CoverageStore) Append(snap CoverageSnapshot) error {
snapshots, err := s.Load()
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("load snapshots: %w", err)
return coreerr.E("CoverageStore.Append", "load snapshots", err)
}
snapshots = append(snapshots, snap)
data, err := json.MarshalIndent(snapshots, "", " ")
if err != nil {
return fmt.Errorf("marshal snapshots: %w", err)
return coreerr.E("CoverageStore.Append", "marshal snapshots", err)
}
if err := os.WriteFile(s.Path, data, 0644); err != nil {
return fmt.Errorf("write %s: %w", s.Path, err)
if err := coreio.Local.Write(s.Path, string(data)); err != nil {
return coreerr.E("CoverageStore.Append", "write "+s.Path, err)
}
return nil
}
// Load reads all snapshots from the store.
func (s *CoverageStore) Load() ([]CoverageSnapshot, error) {
data, err := os.ReadFile(s.Path)
raw, err := coreio.Local.Read(s.Path)
if err != nil {
return nil, err
}
var snapshots []CoverageSnapshot
if err := json.Unmarshal(data, &snapshots); err != nil {
return nil, fmt.Errorf("parse %s: %w", s.Path, err)
if err := json.Unmarshal([]byte(raw), &snapshots); err != nil {
return nil, coreerr.E("CoverageStore.Load", "parse "+s.Path, err)
}
return snapshots, nil
}
@ -245,5 +249,24 @@ func CompareCoverage(previous, current CoverageSnapshot) CoverageComparison {
}
}
slices.Sort(comp.NewPackages)
slices.Sort(comp.Removed)
slices.SortFunc(comp.Regressions, func(a, b CoverageRegression) int {
return cmp.Or(
cmp.Compare(a.Package, b.Package),
cmp.Compare(a.Previous, b.Previous),
cmp.Compare(a.Current, b.Current),
cmp.Compare(a.Delta, b.Delta),
)
})
slices.SortFunc(comp.Improvements, func(a, b CoverageRegression) int {
return cmp.Or(
cmp.Compare(a.Package, b.Package),
cmp.Compare(a.Previous, b.Previous),
cmp.Compare(a.Current, b.Current),
cmp.Compare(a.Delta, b.Delta),
)
})
return comp
}

View file

@ -79,6 +79,37 @@ func TestCompareCoverage(t *testing.T) {
assert.InDelta(t, 6.7, comp.TotalDelta, 0.1)
}
func TestCompareCoverage_SortsResultSlices(t *testing.T) {
prev := CoverageSnapshot{
Packages: map[string]float64{
"pkg/z": 90.0,
"pkg/b": 60.0,
"pkg/a": 80.0,
"pkg/c": 50.0,
},
Total: 70.0,
}
curr := CoverageSnapshot{
Packages: map[string]float64{
"pkg/b": 55.0,
"pkg/a": 70.0,
"pkg/c": 60.0,
"pkg/y": 40.0,
},
Total: 55.0,
}
comp := CompareCoverage(prev, curr)
assert.Equal(t, []string{"pkg/y"}, comp.NewPackages)
assert.Equal(t, []string{"pkg/z"}, comp.Removed)
require.Len(t, comp.Regressions, 2)
assert.Equal(t, "pkg/a", comp.Regressions[0].Package)
assert.Equal(t, "pkg/b", comp.Regressions[1].Package)
require.Len(t, comp.Improvements, 1)
assert.Equal(t, "pkg/c", comp.Improvements[0].Package)
}
func TestCompareCoverage_NoChange(t *testing.T) {
snap := CoverageSnapshot{
Packages: map[string]float64{"pkg/a": 80.0},

129
pkg/lint/detect_project.go Normal file
View file

@ -0,0 +1,129 @@
package lint
import (
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
)
var projectLanguageByExtension = map[string]string{
".go": "go",
".php": "php",
".cpp": "cpp",
".cc": "cpp",
".c": "cpp",
".h": "cpp",
".js": "js",
".jsx": "js",
".ts": "ts",
".tsx": "ts",
".py": "python",
".rs": "rust",
".sh": "shell",
".yaml": "yaml",
".yml": "yaml",
".json": "json",
".md": "markdown",
}
// Detect returns the project languages inferred from markers and file names.
//
// lint.Detect(".")
// lint.Detect("/path/to/project")
func Detect(path string) []string {
if path == "" {
path = "."
}
seen := make(map[string]bool)
info, err := os.Stat(path)
if err != nil {
return []string{}
}
if !info.IsDir() {
recordDetectedPath(seen, path)
return sortedDetectedLanguages(seen)
}
if shouldSkipTraversalRoot(path) {
return []string{}
}
_ = filepath.WalkDir(path, func(currentPath string, entry fs.DirEntry, walkErr error) error {
if walkErr != nil {
return nil
}
if entry.IsDir() {
if currentPath != path && IsExcludedDir(entry.Name()) {
return filepath.SkipDir
}
return nil
}
recordDetectedPath(seen, currentPath)
return nil
})
return sortedDetectedLanguages(seen)
}
func detectFromFiles(files []string) []string {
seen := make(map[string]bool)
for _, file := range files {
recordDetectedPath(seen, file)
}
return sortedDetectedLanguages(seen)
}
func recordDetectedPath(seen map[string]bool, path string) {
name := filepath.Base(path)
matchedMarker := false
switch {
case name == "go.mod":
seen["go"] = true
matchedMarker = true
case name == "composer.json":
seen["php"] = true
matchedMarker = true
case name == "package.json":
seen["js"] = true
matchedMarker = true
case name == "tsconfig.json":
seen["ts"] = true
matchedMarker = true
case name == "requirements.txt", name == "pyproject.toml":
seen["python"] = true
matchedMarker = true
case name == "Cargo.toml":
seen["rust"] = true
matchedMarker = true
case strings.HasPrefix(name, "Dockerfile"):
seen["dockerfile"] = true
matchedMarker = true
}
if matchedMarker {
return
}
if lang, ok := projectLanguageByExtension[strings.ToLower(filepath.Ext(name))]; ok {
seen[lang] = true
}
}
func sortedDetectedLanguages(seen map[string]bool) []string {
var languages []string
for language := range seen {
languages = append(languages, language)
}
slices.Sort(languages)
if languages == nil {
return []string{}
}
return languages
}

View file

@ -0,0 +1,59 @@
package lint
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDetect_Good_ProjectMarkersAndFiles(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "main.cpp"), []byte("int main() { return 0; }\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "tsconfig.json"), []byte("{}\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "requirements.txt"), []byte("ruff\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "Dockerfile"), []byte("FROM scratch\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "run.sh"), []byte("#!/bin/sh\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "README.md"), []byte("# Test\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "vendor"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "vendor", "ignored.go"), []byte("package ignored\n"), 0o644))
assert.Equal(t,
[]string{"cpp", "dockerfile", "go", "js", "markdown", "python", "shell", "ts"},
Detect(dir),
)
}
func TestDetectFromFiles_Good(t *testing.T) {
files := []string{
"main.go",
"src/lib.cc",
"web/app.ts",
"Dockerfile",
"scripts/run.sh",
"docs/index.md",
}
assert.Equal(t,
[]string{"cpp", "dockerfile", "go", "markdown", "shell", "ts"},
detectFromFiles(files),
)
}
func TestDetect_MissingPathReturnsEmptySlice(t *testing.T) {
assert.Equal(t, []string{}, Detect(filepath.Join(t.TempDir(), "missing")))
}
func TestDetect_Good_SkipsHiddenRootDirectory(t *testing.T) {
dir := t.TempDir()
hiddenDir := filepath.Join(dir, ".core")
require.NoError(t, os.MkdirAll(hiddenDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(hiddenDir, "main.go"), []byte("package main\n"), 0o644))
assert.Equal(t, []string{}, Detect(hiddenDir))
}

View file

@ -2,20 +2,26 @@ package lint
import (
"bytes"
"fmt"
"regexp"
"strings"
coreerr "forge.lthn.ai/core/go-log"
)
// Finding represents a single match of a rule against a source file.
type Finding struct {
RuleID string `json:"rule_id"`
Title string `json:"title"`
Severity string `json:"severity"`
Tool string `json:"tool,omitempty"`
File string `json:"file"`
Line int `json:"line"`
Match string `json:"match"`
Fix string `json:"fix"`
Column int `json:"column,omitempty"`
Severity string `json:"severity"`
Code string `json:"code,omitempty"`
Message string `json:"message,omitempty"`
Category string `json:"category,omitempty"`
Fix string `json:"fix,omitempty"`
RuleID string `json:"rule_id,omitempty"`
Title string `json:"title,omitempty"`
Match string `json:"match,omitempty"`
Repo string `json:"repo,omitempty"`
}
@ -43,14 +49,14 @@ func NewMatcher(rules []Rule) (*Matcher, error) {
pat, err := regexp.Compile(r.Pattern)
if err != nil {
return nil, fmt.Errorf("compiling pattern for rule %s: %w", r.ID, err)
return nil, coreerr.E("NewMatcher", "compiling pattern for rule "+r.ID, err)
}
var excl *regexp.Regexp
if r.ExcludePattern != "" {
excl, err = regexp.Compile(r.ExcludePattern)
if err != nil {
return nil, fmt.Errorf("compiling exclude pattern for rule %s: %w", r.ID, err)
return nil, coreerr.E("NewMatcher", "compiling exclude pattern for rule "+r.ID, err)
}
}

29
pkg/lint/output.go Normal file
View file

@ -0,0 +1,29 @@
package lint
// ResolveRunOutputFormat resolves the report writer from the run input and project config.
//
// format, err := lint.ResolveRunOutputFormat(lint.RunInput{Path: ".", CI: true})
// format, err := lint.ResolveRunOutputFormat(lint.RunInput{Path: ".", Schedule: "nightly"})
func ResolveRunOutputFormat(input RunInput) (string, error) {
if input.Output != "" {
return input.Output, nil
}
if input.CI {
return "github", nil
}
config, _, err := LoadProjectConfig(input.Path, input.Config)
if err != nil {
return "", err
}
schedule, err := ResolveSchedule(config, input.Schedule)
if err != nil {
return "", err
}
if schedule != nil && schedule.Output != "" {
return schedule.Output, nil
}
if config.Output != "" {
return config.Output, nil
}
return "text", nil
}

64
pkg/lint/output_test.go Normal file
View file

@ -0,0 +1,64 @@
package lint
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestResolveRunOutputFormat_Good_Precedence(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`output: text
schedules:
nightly:
output: json
`), 0o644))
format, err := ResolveRunOutputFormat(RunInput{
Path: dir,
Output: "sarif",
CI: true,
})
require.NoError(t, err)
assert.Equal(t, "sarif", format)
format, err = ResolveRunOutputFormat(RunInput{
Path: dir,
Schedule: "nightly",
CI: true,
})
require.NoError(t, err)
assert.Equal(t, "github", format)
format, err = ResolveRunOutputFormat(RunInput{
Path: dir,
Schedule: "nightly",
})
require.NoError(t, err)
assert.Equal(t, "json", format)
format, err = ResolveRunOutputFormat(RunInput{
Path: dir,
})
require.NoError(t, err)
assert.Equal(t, "text", format)
}
func TestResolveRunOutputFormat_Good_ExplicitOutputBypassesConfigLoading(t *testing.T) {
dir := t.TempDir()
projectPath := filepath.Join(dir, "project-file")
require.NoError(t, os.WriteFile(projectPath, []byte("not a directory"), 0o644))
format, err := ResolveRunOutputFormat(RunInput{
Path: projectPath,
Output: "sarif",
Config: "broken/config.yaml",
Schedule: "nightly",
})
require.NoError(t, err)
assert.Equal(t, "sarif", format)
}

View file

@ -4,27 +4,49 @@ import (
"encoding/json"
"fmt"
"io"
"strings"
)
// Summary holds aggregate counts for a set of findings.
type Summary struct {
Total int `json:"total"`
BySeverity map[string]int `json:"by_severity"`
Errors int `json:"errors"`
Warnings int `json:"warnings"`
Info int `json:"info"`
Passed bool `json:"passed"`
BySeverity map[string]int `json:"by_severity,omitempty"`
}
// Summarise counts findings by severity.
//
// summary := lint.Summarise(findings)
func Summarise(findings []Finding) Summary {
s := Summary{
summary := Summary{
Total: len(findings),
BySeverity: make(map[string]int),
}
for _, f := range findings {
s.BySeverity[f.Severity]++
for _, finding := range findings {
severity := strings.TrimSpace(finding.Severity)
if severity == "" {
severity = "warning"
}
summary.BySeverity[severity]++
switch severity {
case "error":
summary.Errors++
case "info":
summary.Info++
default:
summary.Warnings++
}
}
return s
summary.Passed = summary.Errors == 0
return summary
}
// WriteJSON writes findings as a pretty-printed JSON array.
//
// _ = lint.WriteJSON(os.Stdout, findings)
func WriteJSON(w io.Writer, findings []Finding) error {
if findings == nil {
findings = []Finding{}
@ -35,6 +57,8 @@ func WriteJSON(w io.Writer, findings []Finding) error {
}
// WriteJSONL writes findings as newline-delimited JSON (one object per line).
//
// _ = lint.WriteJSONL(os.Stdout, findings)
func WriteJSONL(w io.Writer, findings []Finding) error {
for _, f := range findings {
data, err := json.Marshal(f)
@ -48,11 +72,172 @@ func WriteJSONL(w io.Writer, findings []Finding) error {
return nil
}
// WriteText writes findings in a human-readable format:
// WriteText writes findings in a human-readable format.
//
// file:line [severity] title (rule-id)
// lint.WriteText(os.Stdout, findings)
func WriteText(w io.Writer, findings []Finding) {
for _, f := range findings {
fmt.Fprintf(w, "%s:%d [%s] %s (%s)\n", f.File, f.Line, f.Severity, f.Title, f.RuleID)
for _, finding := range findings {
message := finding.Message
if message == "" {
message = finding.Title
}
code := finding.Code
if code == "" {
code = finding.RuleID
}
fmt.Fprintf(w, "%s:%d [%s] %s (%s)\n", finding.File, finding.Line, finding.Severity, message, code)
}
}
// WriteReportJSON writes the RFC report document as pretty-printed JSON.
//
// _ = lint.WriteReportJSON(os.Stdout, report)
func WriteReportJSON(w io.Writer, report Report) error {
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(report)
}
// WriteReportText writes report findings followed by a short summary.
//
// lint.WriteReportText(os.Stdout, report)
func WriteReportText(w io.Writer, report Report) {
WriteText(w, report.Findings)
fmt.Fprintf(w, "\n%d finding(s): %d error(s), %d warning(s), %d info\n", report.Summary.Total, report.Summary.Errors, report.Summary.Warnings, report.Summary.Info)
}
// WriteReportGitHub writes GitHub Actions annotation lines.
//
// lint.WriteReportGitHub(os.Stdout, report)
func WriteReportGitHub(w io.Writer, report Report) {
for _, finding := range report.Findings {
level := githubAnnotationLevel(finding.Severity)
location := ""
if finding.File != "" {
location = fmt.Sprintf(" file=%s", finding.File)
if finding.Line > 0 {
location += fmt.Sprintf(",line=%d", finding.Line)
}
if finding.Column > 0 {
location += fmt.Sprintf(",col=%d", finding.Column)
}
}
message := finding.Message
if message == "" {
message = finding.Title
}
code := finding.Code
if code == "" {
code = finding.RuleID
}
fmt.Fprintf(w, "::%s%s::[%s] %s (%s)\n", level, location, finding.Tool, message, code)
}
}
// WriteReportSARIF writes a minimal SARIF document for code scanning tools.
//
// _ = lint.WriteReportSARIF(os.Stdout, report)
func WriteReportSARIF(w io.Writer, report Report) error {
type sarifMessage struct {
Text string `json:"text"`
}
type sarifRegion struct {
StartLine int `json:"startLine,omitempty"`
StartColumn int `json:"startColumn,omitempty"`
}
type sarifArtifactLocation struct {
URI string `json:"uri,omitempty"`
}
type sarifPhysicalLocation struct {
ArtifactLocation sarifArtifactLocation `json:"artifactLocation"`
Region sarifRegion `json:"region,omitempty"`
}
type sarifLocation struct {
PhysicalLocation sarifPhysicalLocation `json:"physicalLocation"`
}
type sarifResult struct {
RuleID string `json:"ruleId,omitempty"`
Level string `json:"level,omitempty"`
Message sarifMessage `json:"message"`
Locations []sarifLocation `json:"locations,omitempty"`
}
type sarifRun struct {
Tool struct {
Driver struct {
Name string `json:"name"`
} `json:"driver"`
} `json:"tool"`
Results []sarifResult `json:"results"`
}
type sarifLog struct {
Version string `json:"version"`
Schema string `json:"$schema"`
Runs []sarifRun `json:"runs"`
}
sarifRunValue := sarifRun{}
sarifRunValue.Tool.Driver.Name = "core-lint"
for _, finding := range report.Findings {
message := finding.Message
if message == "" {
message = finding.Title
}
ruleID := finding.Code
if ruleID == "" {
ruleID = finding.RuleID
}
result := sarifResult{
RuleID: ruleID,
Level: sarifLevel(finding.Severity),
Message: sarifMessage{Text: message},
}
if finding.File != "" {
result.Locations = []sarifLocation{{
PhysicalLocation: sarifPhysicalLocation{
ArtifactLocation: sarifArtifactLocation{URI: finding.File},
Region: sarifRegion{
StartLine: finding.Line,
StartColumn: finding.Column,
},
},
}}
}
sarifRunValue.Results = append(sarifRunValue.Results, result)
}
return json.NewEncoder(w).Encode(sarifLog{
Version: "2.1.0",
Schema: "https://json.schemastore.org/sarif-2.1.0.json",
Runs: []sarifRun{sarifRunValue},
})
}
func githubAnnotationLevel(severity string) string {
switch strings.ToLower(strings.TrimSpace(severity)) {
case "error":
return "error"
case "info":
return "notice"
case "warning", "":
return "warning"
default:
return "warning"
}
}
func sarifLevel(severity string) string {
switch strings.ToLower(strings.TrimSpace(severity)) {
case "error":
return "error"
case "warning":
return "warning"
case "info":
return "note"
default:
return "warning"
}
}

View file

@ -134,3 +134,45 @@ func TestWriteText_Good_Empty(t *testing.T) {
WriteText(&buf, nil)
assert.Empty(t, buf.String())
}
func TestWriteReportGitHub_Good_MapsInfoToNotice(t *testing.T) {
var buf bytes.Buffer
WriteReportGitHub(&buf, Report{
Findings: []Finding{{
Tool: "demo",
File: "example.go",
Line: 7,
Column: 3,
Severity: "info",
Code: "demo-rule",
Message: "explanation",
}},
})
assert.Contains(t, buf.String(), "::notice file=example.go,line=7,col=3::[demo] explanation (demo-rule)")
}
func TestWriteReportSARIF_Good_MapsInfoToNote(t *testing.T) {
var buf bytes.Buffer
err := WriteReportSARIF(&buf, Report{
Findings: []Finding{{
Tool: "demo",
File: "example.go",
Line: 7,
Column: 3,
Severity: "info",
Code: "demo-rule",
Message: "explanation",
}},
})
require.NoError(t, err)
var decoded map[string]any
require.NoError(t, json.Unmarshal(buf.Bytes(), &decoded))
runs := decoded["runs"].([]any)
results := runs[0].(map[string]any)["results"].([]any)
assert.Equal(t, "note", results[0].(map[string]any)["level"])
}

View file

@ -5,6 +5,7 @@ import (
"regexp"
"slices"
coreerr "forge.lthn.ai/core/go-log"
"gopkg.in/yaml.v3"
)
@ -32,35 +33,35 @@ type Rule struct {
// Validate checks that the rule has all required fields and that regex patterns compile.
func (r *Rule) Validate() error {
if r.ID == "" {
return fmt.Errorf("rule validation: id must not be empty")
return coreerr.E("Rule.Validate", "id must not be empty", nil)
}
if r.Title == "" {
return fmt.Errorf("rule %s: title must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": title must not be empty", nil)
}
if r.Severity == "" {
return fmt.Errorf("rule %s: severity must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": severity must not be empty", nil)
}
if !slices.Contains(validSeverities, r.Severity) {
return fmt.Errorf("rule %s: severity %q is not valid (want one of %v)", r.ID, r.Severity, validSeverities)
return coreerr.E("Rule.Validate", fmt.Sprintf("rule %s: severity %q is not valid (want one of %v)", r.ID, r.Severity, validSeverities), nil)
}
if len(r.Languages) == 0 {
return fmt.Errorf("rule %s: languages must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": languages must not be empty", nil)
}
if r.Pattern == "" {
return fmt.Errorf("rule %s: pattern must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": pattern must not be empty", nil)
}
if r.Detection == "" {
return fmt.Errorf("rule %s: detection must not be empty", r.ID)
return coreerr.E("Rule.Validate", "rule "+r.ID+": detection must not be empty", nil)
}
// Only validate regex compilation when detection type is regex.
if r.Detection == "regex" {
if _, err := regexp.Compile(r.Pattern); err != nil {
return fmt.Errorf("rule %s: pattern does not compile: %w", r.ID, err)
return coreerr.E("Rule.Validate", "rule "+r.ID+": pattern does not compile", err)
}
if r.ExcludePattern != "" {
if _, err := regexp.Compile(r.ExcludePattern); err != nil {
return fmt.Errorf("rule %s: exclude_pattern does not compile: %w", r.ID, err)
return coreerr.E("Rule.Validate", "rule "+r.ID+": exclude_pattern does not compile", err)
}
}
}
@ -72,7 +73,7 @@ func (r *Rule) Validate() error {
func ParseRules(data []byte) ([]Rule, error) {
var rules []Rule
if err := yaml.Unmarshal(data, &rules); err != nil {
return nil, fmt.Errorf("parsing rules: %w", err)
return nil, coreerr.E("ParseRules", "parsing rules", err)
}
return rules, nil
}

View file

@ -1,27 +1,34 @@
package lint
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// extensionMap maps file extensions to language identifiers.
var extensionMap = map[string]string{
".go": "go",
".php": "php",
".ts": "ts",
".tsx": "ts",
".js": "js",
".jsx": "js",
".cpp": "cpp",
".cc": "cpp",
".c": "cpp",
".h": "cpp",
".py": "py",
".go": "go",
".php": "php",
".ts": "ts",
".tsx": "ts",
".js": "js",
".jsx": "js",
".cpp": "cpp",
".cc": "cpp",
".c": "cpp",
".h": "cpp",
".py": "python",
".rs": "rust",
".sh": "shell",
".yaml": "yaml",
".yml": "yaml",
".json": "json",
".md": "markdown",
}
// defaultExcludes lists directory names that are always skipped during scanning.
@ -34,32 +41,51 @@ var defaultExcludes = []string{
}
// DetectLanguage returns the language identifier for a filename based on its extension.
// Returns an empty string for unrecognised extensions.
//
// lint.DetectLanguage("main.go")
// lint.DetectLanguage("Dockerfile")
func DetectLanguage(filename string) string {
ext := filepath.Ext(filename)
base := filepath.Base(filename)
if strings.HasPrefix(base, "Dockerfile") {
return "dockerfile"
}
ext := filepath.Ext(base)
if lang, ok := extensionMap[ext]; ok {
return lang
}
return ""
}
func shouldSkipTraversalRoot(path string) bool {
cleanedPath := filepath.Clean(path)
if cleanedPath == "." {
return false
}
base := filepath.Base(cleanedPath)
if base == "." || base == string(filepath.Separator) {
return false
}
return IsExcludedDir(base)
}
// Scanner walks directory trees and matches files against lint rules.
type Scanner struct {
matcher *Matcher
rules []Rule
excludes []string
matcher *Matcher
rules []Rule
}
// NewScanner creates a Scanner with the given rules and default directory exclusions.
func NewScanner(rules []Rule) (*Scanner, error) {
m, err := NewMatcher(rules)
matcher, err := NewMatcher(rules)
if err != nil {
return nil, err
}
return &Scanner{
matcher: m,
rules: rules,
excludes: slices.Clone(defaultExcludes),
matcher: matcher,
rules: rules,
}, nil
}
@ -68,15 +94,19 @@ func NewScanner(rules []Rule) (*Scanner, error) {
func (s *Scanner) ScanDir(root string) ([]Finding, error) {
var findings []Finding
if shouldSkipTraversalRoot(root) {
return findings, nil
}
err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
// Skip excluded directories.
// Skip excluded directories and hidden directories.
if d.IsDir() {
name := d.Name()
if slices.Contains(s.excludes, name) {
if IsExcludedDir(name) {
return filepath.SkipDir
}
return nil
@ -94,13 +124,14 @@ func (s *Scanner) ScanDir(root string) ([]Finding, error) {
return nil
}
content, err := os.ReadFile(path)
raw, err := coreio.Local.Read(path)
if err != nil {
return fmt.Errorf("reading %s: %w", path, err)
return coreerr.E("Scanner.ScanDir", "reading "+path, err)
}
content := []byte(raw)
// Build a matcher scoped to this file's language.
m, err := NewMatcher(langRules)
matcher, err := NewMatcher(langRules)
if err != nil {
return err
}
@ -111,13 +142,13 @@ func (s *Scanner) ScanDir(root string) ([]Finding, error) {
relPath = path
}
found := m.Match(relPath, content)
found := matcher.Match(relPath, content)
findings = append(findings, found...)
return nil
})
if err != nil {
return nil, fmt.Errorf("scanning %s: %w", root, err)
return nil, coreerr.E("Scanner.ScanDir", "scanning "+root, err)
}
return findings, nil
@ -125,10 +156,11 @@ func (s *Scanner) ScanDir(root string) ([]Finding, error) {
// ScanFile scans a single file against all rules.
func (s *Scanner) ScanFile(path string) ([]Finding, error) {
content, err := os.ReadFile(path)
raw, err := coreio.Local.Read(path)
if err != nil {
return nil, fmt.Errorf("reading %s: %w", path, err)
return nil, coreerr.E("Scanner.ScanFile", "reading "+path, err)
}
content := []byte(raw)
lang := DetectLanguage(filepath.Base(path))
if lang == "" {
@ -140,12 +172,12 @@ func (s *Scanner) ScanFile(path string) ([]Finding, error) {
return nil, nil
}
m, err := NewMatcher(langRules)
matcher, err := NewMatcher(langRules)
if err != nil {
return nil, err
}
return m.Match(path, content), nil
return matcher.Match(path, content), nil
}
// filterRulesByLanguage returns rules that include the given language.

View file

@ -25,9 +25,10 @@ func TestDetectLanguage_Good(t *testing.T) {
{"core.c", "cpp"},
{"app.js", "js"},
{"component.jsx", "js"},
{"unknown.rs", ""},
{"unknown.rs", "rust"},
{"noextension", ""},
{"file.py", "py"},
{"file.py", "python"},
{"Dockerfile", "dockerfile"},
}
for _, tt := range tests {
@ -180,6 +181,34 @@ func TestScanFile_Good(t *testing.T) {
assert.Equal(t, "test-panic", findings[0].RuleID)
}
func TestScanFile_Good_Python(t *testing.T) {
dir := t.TempDir()
file := filepath.Join(dir, "app.py")
err := os.WriteFile(file, []byte("print('hello')\n# TODO: fix\n"), 0o644)
require.NoError(t, err)
rules := []Rule{
{
ID: "python-todo",
Title: "Python TODO",
Severity: "low",
Languages: []string{"python"},
Pattern: `TODO`,
Fix: "Remove TODO",
Detection: "regex",
},
}
s, err := NewScanner(rules)
require.NoError(t, err)
findings, err := s.ScanFile(file)
require.NoError(t, err)
require.Len(t, findings, 1)
assert.Equal(t, "python-todo", findings[0].RuleID)
assert.Equal(t, "python", DetectLanguage(file))
}
func TestScanDir_Good_Subdirectories(t *testing.T) {
dir := t.TempDir()
@ -209,6 +238,58 @@ func TestScanDir_Good_Subdirectories(t *testing.T) {
require.Len(t, findings, 1)
}
func TestScanDir_Good_SkipsHiddenRootDirectory(t *testing.T) {
dir := t.TempDir()
hiddenDir := filepath.Join(dir, ".git")
require.NoError(t, os.MkdirAll(hiddenDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(hiddenDir, "main.go"), []byte("// TODO: hidden\n"), 0o644))
rules := []Rule{
{
ID: "test-001",
Title: "Found a TODO",
Severity: "low",
Languages: []string{"go"},
Pattern: `TODO`,
Fix: "Remove TODO",
Detection: "regex",
},
}
s, err := NewScanner(rules)
require.NoError(t, err)
findings, err := s.ScanDir(hiddenDir)
require.NoError(t, err)
assert.Empty(t, findings)
}
func TestScanDir_Good_SkipsHiddenNestedDirectory(t *testing.T) {
dir := t.TempDir()
hiddenDir := filepath.Join(dir, "services", ".generated")
require.NoError(t, os.MkdirAll(hiddenDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(hiddenDir, "main.go"), []byte("// TODO: hidden\n"), 0o644))
rules := []Rule{
{
ID: "test-001",
Title: "Found a TODO",
Severity: "low",
Languages: []string{"go"},
Pattern: `TODO`,
Fix: "Remove TODO",
Detection: "regex",
},
}
s, err := NewScanner(rules)
require.NoError(t, err)
findings, err := s.ScanDir(dir)
require.NoError(t, err)
assert.Empty(t, findings)
}
func TestScanDir_Bad_NonexistentDir(t *testing.T) {
rules := []Rule{
{

746
pkg/lint/service.go Normal file
View file

@ -0,0 +1,746 @@
package lint
import (
"context"
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
"time"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
const (
hookStartMarker = "# core-lint hook start"
hookEndMarker = "# core-lint hook end"
)
// RunInput is the DTO for `core-lint run` and the language/category shortcuts.
//
// input := lint.RunInput{Path: ".", Schedule: "nightly", Output: "json"}
// report, err := lint.NewService().Run(ctx, input)
type RunInput struct {
Path string `json:"path"`
Output string `json:"output,omitempty"`
Config string `json:"config,omitempty"`
Schedule string `json:"schedule,omitempty"`
FailOn string `json:"fail_on,omitempty"`
Category string `json:"category,omitempty"`
Lang string `json:"lang,omitempty"`
Hook bool `json:"hook,omitempty"`
CI bool `json:"ci,omitempty"`
Files []string `json:"files,omitempty"`
SBOM bool `json:"sbom,omitempty"`
}
// ToolInfo describes a supported linter tool and whether it is available in PATH.
//
// tools := lint.NewService().Tools([]string{"go"})
type ToolInfo struct {
Name string `json:"name"`
Available bool `json:"available"`
Languages []string `json:"languages"`
Category string `json:"category"`
Entitlement string `json:"entitlement,omitempty"`
}
// Report aggregates every tool run into a single output document.
//
// report, err := lint.NewService().Run(context.Background(), lint.RunInput{Path: ".", Output: "json"})
type Report struct {
Project string `json:"project"`
Timestamp time.Time `json:"timestamp"`
Duration string `json:"duration"`
Languages []string `json:"languages"`
Tools []ToolRun `json:"tools"`
Findings []Finding `json:"findings"`
Summary Summary `json:"summary"`
}
// ToolRun records the execution status of one adapter.
type ToolRun struct {
Name string `json:"name"`
Version string `json:"version,omitempty"`
Status string `json:"status"`
Duration string `json:"duration"`
Findings int `json:"findings"`
}
// Service orchestrates the configured lint adapters for a project.
//
// svc := lint.NewService()
// report, err := svc.Run(ctx, lint.RunInput{Path: ".", Output: "json"})
type Service struct {
adapters []Adapter
}
// NewService constructs a lint orchestrator with the built-in adapter registry.
//
// svc := lint.NewService()
func NewService() *Service {
return &Service{adapters: defaultAdapters()}
}
// Run executes the selected adapters and returns the merged report.
//
// report, err := lint.NewService().Run(ctx, lint.RunInput{Path: ".", Output: "json"})
func (service *Service) Run(ctx context.Context, input RunInput) (Report, error) {
startedAt := time.Now().UTC()
input = normaliseRunInput(input)
config, _, err := LoadProjectConfig(input.Path, input.Config)
if err != nil {
return Report{}, err
}
schedule, err := ResolveSchedule(config, input.Schedule)
if err != nil {
return Report{}, err
}
if input.FailOn == "" && schedule != nil && schedule.FailOn != "" {
input.FailOn = schedule.FailOn
}
if input.FailOn == "" {
input.FailOn = config.FailOn
}
files, scoped, err := service.scopeFiles(input.Path, config, input, schedule)
if err != nil {
return Report{}, err
}
if input.Hook && len(files) == 0 {
report := Report{
Project: projectName(input.Path),
Timestamp: startedAt,
Duration: time.Since(startedAt).Round(time.Millisecond).String(),
Languages: []string{},
Tools: []ToolRun{},
Findings: []Finding{},
Summary: Summarise(nil),
}
report.Summary.Passed = passesThreshold(report.Summary, input.FailOn)
return report, nil
}
if scoped && len(files) == 0 {
report := Report{
Project: projectName(input.Path),
Timestamp: startedAt,
Duration: time.Since(startedAt).Round(time.Millisecond).String(),
Languages: []string{},
Tools: []ToolRun{},
Findings: []Finding{},
Summary: Summarise(nil),
}
report.Summary.Passed = passesThreshold(report.Summary, input.FailOn)
return report, nil
}
languages := service.languagesForInput(input, files, scoped)
selectedAdapters := service.selectAdapters(config, languages, input, schedule)
var findings []Finding
var toolRuns []ToolRun
for _, adapter := range selectedAdapters {
if input.Hook && !adapter.Fast() {
toolRuns = append(toolRuns, ToolRun{
Name: adapter.Name(),
Status: "skipped",
Duration: "0s",
Findings: 0,
})
continue
}
result := adapter.Run(ctx, input, files)
toolRuns = append(toolRuns, result.Tool)
findings = append(findings, normaliseReportFindings(result.Findings, input.Path)...)
}
findings = dedupeFindings(findings)
sortToolRuns(toolRuns)
sortFindings(findings)
if languages == nil {
languages = []string{}
}
if toolRuns == nil {
toolRuns = []ToolRun{}
}
if findings == nil {
findings = []Finding{}
}
report := Report{
Project: projectName(input.Path),
Timestamp: startedAt,
Duration: time.Since(startedAt).Round(time.Millisecond).String(),
Languages: slices.Clone(languages),
Tools: toolRuns,
Findings: findings,
Summary: Summarise(findings),
}
report.Summary.Passed = passesThreshold(report.Summary, input.FailOn)
return report, nil
}
// Tools returns the current adapter inventory for display in the CLI.
//
// tools := lint.NewService().Tools([]string{"go"})
func (service *Service) Tools(languages []string) []ToolInfo {
var tools []ToolInfo
for _, adapter := range service.adapters {
if len(languages) > 0 && !adapter.MatchesLanguage(languages) {
continue
}
tools = append(tools, ToolInfo{
Name: adapter.Name(),
Available: adapter.Available(),
Languages: slices.Clone(adapter.Languages()),
Category: adapter.Category(),
Entitlement: adapter.Entitlement(),
})
}
slices.SortFunc(tools, func(left ToolInfo, right ToolInfo) int {
return strings.Compare(left.Name, right.Name)
})
if tools == nil {
return []ToolInfo{}
}
return tools
}
// WriteDefaultConfig creates `.core/lint.yaml` in the target project.
//
// path, err := svc.WriteDefaultConfig(".", false)
func (service *Service) WriteDefaultConfig(projectPath string, force bool) (string, error) {
if projectPath == "" {
projectPath = "."
}
targetPath := filepath.Join(projectPath, DefaultConfigPath)
if !force {
if _, err := os.Stat(targetPath); err == nil {
return "", coreerr.E("Service.WriteDefaultConfig", targetPath+" already exists", nil)
}
}
if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil {
return "", coreerr.E("Service.WriteDefaultConfig", "mkdir "+filepath.Dir(targetPath), err)
}
content, err := DefaultConfigYAML()
if err != nil {
return "", err
}
if err := coreio.Local.Write(targetPath, content); err != nil {
return "", coreerr.E("Service.WriteDefaultConfig", "write "+targetPath, err)
}
return targetPath, nil
}
// InstallHook adds a git pre-commit hook that runs `core-lint run --hook`.
//
// _ = lint.NewService().InstallHook(".")
func (service *Service) InstallHook(projectPath string) error {
hookPath, err := hookFilePath(projectPath)
if err != nil {
return err
}
block := hookScriptBlock(false)
content := "#!/bin/sh\n" + block
raw, readErr := coreio.Local.Read(hookPath)
if readErr == nil {
if strings.Contains(raw, hookStartMarker) {
return nil
}
trimmed := strings.TrimRight(raw, "\n")
if trimmed == "" {
content = "#!/bin/sh\n" + block
} else {
content = trimmed + "\n\n" + hookScriptBlock(true)
}
}
if err := os.MkdirAll(filepath.Dir(hookPath), 0o755); err != nil {
return coreerr.E("Service.InstallHook", "mkdir "+filepath.Dir(hookPath), err)
}
if err := coreio.Local.Write(hookPath, content); err != nil {
return coreerr.E("Service.InstallHook", "write "+hookPath, err)
}
if err := os.Chmod(hookPath, 0o755); err != nil {
return coreerr.E("Service.InstallHook", "chmod "+hookPath, err)
}
return nil
}
// RemoveHook removes the block previously installed by InstallHook.
//
// _ = lint.NewService().RemoveHook(".")
func (service *Service) RemoveHook(projectPath string) error {
hookPath, err := hookFilePath(projectPath)
if err != nil {
return err
}
raw, err := coreio.Local.Read(hookPath)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return coreerr.E("Service.RemoveHook", "read "+hookPath, err)
}
startIndex := strings.Index(raw, hookStartMarker)
endIndex := strings.Index(raw, hookEndMarker)
if startIndex < 0 || endIndex < 0 || endIndex < startIndex {
return nil
}
endIndex += len(hookEndMarker)
content := strings.TrimRight(raw[:startIndex]+raw[endIndex:], "\n")
if strings.TrimSpace(content) == "" {
if err := os.Remove(hookPath); err != nil && !os.IsNotExist(err) {
return coreerr.E("Service.RemoveHook", "remove "+hookPath, err)
}
return nil
}
if err := coreio.Local.Write(hookPath, content); err != nil {
return coreerr.E("Service.RemoveHook", "write "+hookPath, err)
}
return nil
}
func (service *Service) languagesForInput(input RunInput, files []string, scoped bool) []string {
if input.Lang != "" {
return []string{input.Lang}
}
if scoped {
return detectFromFiles(files)
}
return Detect(input.Path)
}
func (service *Service) scopeFiles(projectPath string, config LintConfig, input RunInput, schedule *Schedule) ([]string, bool, error) {
if input.Files != nil {
return slices.Clone(input.Files), true, nil
}
if input.Hook {
files, err := service.stagedFiles(projectPath)
return files, true, err
}
if schedule != nil && len(schedule.Paths) > 0 {
files, err := collectConfiguredFiles(projectPath, schedule.Paths, config.Exclude)
return files, true, err
}
if !slices.Equal(config.Paths, DefaultConfig().Paths) || !slices.Equal(config.Exclude, DefaultConfig().Exclude) {
files, err := collectConfiguredFiles(projectPath, config.Paths, config.Exclude)
return files, true, err
}
return nil, false, nil
}
func (service *Service) selectAdapters(config LintConfig, languages []string, input RunInput, schedule *Schedule) []Adapter {
categories := selectedCategories(input, schedule)
enabled := make(map[string]bool)
for _, name := range enabledToolNames(config, languages, input, categories) {
enabled[name] = true
}
var selected []Adapter
for _, adapter := range service.adapters {
if len(enabled) > 0 && !enabled[adapter.Name()] {
continue
}
if len(categories) > 0 && !slices.Contains(categories, adapter.Category()) {
continue
}
if !adapter.MatchesLanguage(languages) {
continue
}
selected = append(selected, adapter)
}
if slices.Contains(languages, "go") && !slices.Contains(categories, "compliance") {
if !hasAdapter(selected, "catalog") {
selected = append([]Adapter{newCatalogAdapter()}, selected...)
}
}
return selected
}
func (service *Service) stagedFiles(projectPath string) ([]string, error) {
toolkit := NewToolkit(projectPath)
stdout, stderr, exitCode, err := toolkit.Run("git", "diff", "--cached", "--name-only")
if err != nil && exitCode != 0 {
return nil, coreerr.E("Service.stagedFiles", "git diff --cached --name-only: "+strings.TrimSpace(stderr), err)
}
var files []string
for line := range strings.SplitSeq(strings.TrimSpace(stdout), "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
files = append(files, line)
}
return files, nil
}
func collectConfiguredFiles(projectPath string, paths []string, excludes []string) ([]string, error) {
seen := make(map[string]bool)
var files []string
for _, path := range paths {
if path == "" {
continue
}
absolutePath := path
if !filepath.IsAbs(absolutePath) {
absolutePath = filepath.Join(projectPath, path)
}
info, err := os.Stat(absolutePath)
if err != nil {
return nil, coreerr.E("collectConfiguredFiles", "stat "+absolutePath, err)
}
if info.IsDir() && shouldSkipTraversalRoot(absolutePath) {
continue
}
addFile := func(candidate string) {
relativePath := relativeConfiguredPath(projectPath, candidate)
if hasHiddenDirectory(relativePath) || hasHiddenDirectory(filepath.ToSlash(filepath.Clean(candidate))) {
return
}
if matchesConfiguredExclude(relativePath, excludes) || matchesConfiguredExclude(filepath.ToSlash(filepath.Clean(candidate)), excludes) {
return
}
if seen[relativePath] {
return
}
seen[relativePath] = true
files = append(files, relativePath)
}
if !info.IsDir() {
addFile(absolutePath)
continue
}
walkErr := filepath.WalkDir(absolutePath, func(currentPath string, entry fs.DirEntry, walkErr error) error {
if walkErr != nil {
return walkErr
}
if entry.IsDir() {
relativeDir := relativeConfiguredPath(projectPath, currentPath)
if matchesConfiguredExclude(relativeDir, excludes) || matchesConfiguredExclude(filepath.ToSlash(filepath.Clean(currentPath)), excludes) {
return filepath.SkipDir
}
if currentPath != absolutePath && IsExcludedDir(entry.Name()) {
return filepath.SkipDir
}
return nil
}
addFile(currentPath)
return nil
})
if walkErr != nil {
return nil, coreerr.E("collectConfiguredFiles", "walk "+absolutePath, walkErr)
}
}
slices.Sort(files)
return files, nil
}
func relativeConfiguredPath(projectPath string, candidate string) string {
relativePath := candidate
if projectPath != "" {
if rel, relErr := filepath.Rel(projectPath, candidate); relErr == nil && rel != "" && !strings.HasPrefix(rel, "..") {
relativePath = rel
}
}
return filepath.ToSlash(filepath.Clean(relativePath))
}
func matchesConfiguredExclude(candidate string, excludes []string) bool {
if candidate == "" || len(excludes) == 0 {
return false
}
normalisedCandidate := filepath.ToSlash(filepath.Clean(candidate))
for _, exclude := range excludes {
normalisedExclude := filepath.ToSlash(filepath.Clean(strings.TrimSpace(exclude)))
if normalisedExclude == "." || normalisedExclude == "" {
continue
}
normalisedExclude = strings.TrimSuffix(normalisedExclude, "/")
if normalisedCandidate == normalisedExclude {
return true
}
if strings.HasPrefix(normalisedCandidate, normalisedExclude+"/") {
return true
}
}
return false
}
func hasHiddenDirectory(candidate string) bool {
if candidate == "" {
return false
}
for _, segment := range strings.Split(filepath.ToSlash(filepath.Clean(candidate)), "/") {
if segment == "" || segment == "." || segment == ".." {
continue
}
if strings.HasPrefix(segment, ".") {
return true
}
}
return false
}
func enabledToolNames(config LintConfig, languages []string, input RunInput, categories []string) []string {
var names []string
if slices.Contains(categories, "security") {
names = append(names, config.Lint.Security...)
}
if slices.Contains(categories, "compliance") {
names = append(names, config.Lint.Compliance...)
}
if input.Lang != "" {
names = append(names, groupForLanguage(config.Lint, input.Lang)...)
} else if shouldIncludeLanguageGroups(categories) {
for _, language := range languages {
names = append(names, groupForLanguage(config.Lint, language)...)
}
}
if input.Lang == "" && shouldIncludeInfraGroups(categories) {
names = append(names, config.Lint.Infra...)
}
if input.Lang == "" {
if input.CI {
names = append(names, config.Lint.Security...)
}
if input.SBOM {
names = append(names, config.Lint.Compliance...)
}
}
return dedupeStrings(names)
}
func selectedCategories(input RunInput, schedule *Schedule) []string {
if input.Category != "" {
return []string{input.Category}
}
if schedule == nil {
return nil
}
return slices.Clone(schedule.Categories)
}
func shouldIncludeLanguageGroups(categories []string) bool {
if len(categories) == 0 {
return true
}
for _, category := range categories {
switch category {
case "security", "compliance":
continue
default:
return true
}
}
return false
}
func shouldIncludeInfraGroups(categories []string) bool {
if len(categories) == 0 {
return true
}
for _, category := range categories {
switch category {
case "security", "compliance":
continue
default:
return true
}
}
return false
}
func groupForLanguage(groups ToolGroups, language string) []string {
switch language {
case "go":
return groups.Go
case "php":
return groups.PHP
case "js":
return groups.JS
case "ts":
return groups.TS
case "python":
return groups.Python
case "shell", "dockerfile", "yaml", "json", "markdown":
return groups.Infra
default:
return nil
}
}
func hookFilePath(projectPath string) (string, error) {
if projectPath == "" {
projectPath = "."
}
toolkit := NewToolkit(projectPath)
stdout, stderr, exitCode, err := toolkit.Run("git", "rev-parse", "--git-dir")
if err != nil && exitCode != 0 {
return "", coreerr.E("hookFilePath", "git rev-parse --git-dir: "+strings.TrimSpace(stderr), err)
}
gitDir := strings.TrimSpace(stdout)
if gitDir == "" {
return "", coreerr.E("hookFilePath", "git directory is empty", nil)
}
if !filepath.IsAbs(gitDir) {
gitDir = filepath.Join(projectPath, gitDir)
}
return filepath.Join(gitDir, "hooks", "pre-commit"), nil
}
func hookScriptBlock(appended bool) string {
command := "exec core-lint run --hook"
if appended {
command = "core-lint run --hook || exit $?"
}
return hookStartMarker + "\n# Installed by core-lint\n" + command + "\n" + hookEndMarker + "\n"
}
func normaliseRunInput(input RunInput) RunInput {
if input.Path == "" {
input.Path = "."
}
if input.CI && input.Output == "" {
input.Output = "github"
}
return input
}
func normaliseReportFindings(findings []Finding, projectPath string) []Finding {
normalised := make([]Finding, 0, len(findings))
for _, finding := range findings {
if finding.Code == "" {
finding.Code = finding.RuleID
}
if finding.Message == "" {
finding.Message = finding.Title
}
if finding.Tool == "" {
finding.Tool = "catalog"
}
if finding.Severity == "" {
finding.Severity = "warning"
} else {
finding.Severity = normaliseSeverity(finding.Severity)
}
if finding.File != "" && projectPath != "" {
if relativePath, err := filepath.Rel(projectPath, finding.File); err == nil && relativePath != "" && !strings.HasPrefix(relativePath, "..") {
finding.File = filepath.ToSlash(relativePath)
} else {
finding.File = filepath.ToSlash(finding.File)
}
}
normalised = append(normalised, finding)
}
return normalised
}
func projectName(path string) string {
absolutePath, err := filepath.Abs(path)
if err != nil {
return filepath.Base(path)
}
return filepath.Base(absolutePath)
}
func dedupeStrings(values []string) []string {
seen := make(map[string]bool)
var deduped []string
for _, value := range values {
if value == "" || seen[value] {
continue
}
seen[value] = true
deduped = append(deduped, value)
}
return deduped
}
func hasAdapter(adapters []Adapter, name string) bool {
for _, adapter := range adapters {
if adapter.Name() == name {
return true
}
}
return false
}
func passesThreshold(summary Summary, threshold string) bool {
switch strings.ToLower(strings.TrimSpace(threshold)) {
case "", "error":
return summary.Errors == 0
case "warning":
return summary.Errors == 0 && summary.Warnings == 0
case "info":
return summary.Total == 0
default:
return summary.Errors == 0
}
}
func sortFindings(findings []Finding) {
slices.SortFunc(findings, func(left Finding, right Finding) int {
switch {
case left.File != right.File:
return strings.Compare(left.File, right.File)
case left.Line != right.Line:
if left.Line < right.Line {
return -1
}
return 1
case left.Column != right.Column:
if left.Column < right.Column {
return -1
}
return 1
case left.Tool != right.Tool:
return strings.Compare(left.Tool, right.Tool)
default:
return strings.Compare(left.Code, right.Code)
}
})
}
func sortToolRuns(toolRuns []ToolRun) {
slices.SortFunc(toolRuns, func(left ToolRun, right ToolRun) int {
return strings.Compare(left.Name, right.Name)
})
}

624
pkg/lint/service_test.go Normal file
View file

@ -0,0 +1,624 @@
package lint
import (
"context"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestServiceRun_Good_CatalogFindings(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "input.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("data")
}
`), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "warning", report.Findings[0].Severity)
assert.Equal(t, "catalog", report.Findings[0].Tool)
assert.Equal(t, "go-cor-003", report.Findings[0].Code)
assert.Equal(t, "correctness", report.Findings[0].Category)
assert.Equal(t, 1, report.Summary.Total)
assert.Equal(t, 1, report.Summary.Warnings)
assert.False(t, report.Summary.Passed)
assert.Contains(t, report.Languages, "go")
require.NotEmpty(t, report.Tools)
assert.Equal(t, "catalog", report.Tools[0].Name)
}
func TestServiceRun_Good_UsesConfiguredPaths(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "services"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "services", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("scoped")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("paths:\n - services\n"), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "services/scoped.go", report.Findings[0].File)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_ExplicitEmptyFilesSkipsScanning(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Files: []string{},
FailOn: "warning",
})
require.NoError(t, err)
assert.Empty(t, report.Languages)
assert.Empty(t, report.Tools)
assert.Empty(t, report.Findings)
assert.True(t, report.Summary.Passed)
}
func TestServiceRun_Good_UsesConfiguredExclude(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "services"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "services", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("scoped")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("exclude:\n - services\n"), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "root.go", report.Findings[0].File)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_SkipsHiddenConfiguredRootDirectory(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".hidden"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".hidden", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("scoped")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("paths:\n - .hidden\n"), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
assert.Empty(t, report.Findings)
assert.Empty(t, report.Tools)
assert.True(t, report.Summary.Passed)
}
func TestServiceRun_Good_SkipsHiddenConfiguredFilePath(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".hidden"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".hidden", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("hidden")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("paths:\n - root.go\n - .hidden/scoped.go\n"), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "root.go", report.Findings[0].File)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_UsesNamedSchedule(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "root.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("root")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "services"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, "services", "scoped.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("scoped")
}
`), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`schedules:
nightly:
fail_on: warning
paths:
- services
`), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Schedule: "nightly",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "services/scoped.go", report.Findings[0].File)
assert.Equal(t, 1, report.Summary.Total)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_LanguageShortcutIgnoresCiAndSbomGroups(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`lint:
go:
- catalog
- go-tool
security:
- security-tool
compliance:
- compliance-tool
`), 0o644))
svc := &Service{adapters: []Adapter{
shortcutAdapter{name: "go-tool", category: "correctness"},
shortcutAdapter{name: "security-tool", category: "security"},
shortcutAdapter{name: "compliance-tool", category: "compliance"},
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Lang: "go",
CI: true,
SBOM: true,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Tools, 2)
assert.Equal(t, []string{"catalog", "go-tool"}, []string{report.Tools[0].Name, report.Tools[1].Name})
}
func TestServiceRun_Good_LanguageShortcutExcludesInfraGroup(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "composer.json"), []byte("{\n \"name\": \"example/test\"\n}\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte(`lint:
php:
- php-tool
infra:
- shell-tool
`), 0o644))
svc := &Service{adapters: []Adapter{
shortcutAdapter{name: "php-tool", category: "correctness"},
shortcutAdapter{name: "shell-tool", category: "correctness"},
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Lang: "php",
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Tools, 1)
assert.Equal(t, "php-tool", report.Tools[0].Name)
}
func TestServiceRun_Good_HookModeUsesStagedFiles(t *testing.T) {
if _, err := exec.LookPath("git"); err != nil {
t.Skip("git not available")
}
dir := t.TempDir()
runTestCommand(t, dir, "git", "init")
runTestCommand(t, dir, "git", "config", "user.email", "test@example.com")
runTestCommand(t, dir, "git", "config", "user.name", "Test User")
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "staged.go"), []byte(`package sample
type service struct{}
func (service) Process(string) error { return nil }
func run() {
svc := service{}
_ = svc.Process("data")
}
`), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "unstaged.go"), []byte(`package sample
func run2() {
panic("boom")
}
`), 0o644))
runTestCommand(t, dir, "git", "add", "go.mod", "staged.go")
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Hook: true,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
assert.Equal(t, "staged.go", report.Findings[0].File)
assert.Equal(t, "go-cor-003", report.Findings[0].Code)
assert.False(t, report.Summary.Passed)
}
func TestServiceRun_Good_HookModeWithNoStagedFilesSkipsScanning(t *testing.T) {
if _, err := exec.LookPath("git"); err != nil {
t.Skip("git not available")
}
dir := t.TempDir()
runTestCommand(t, dir, "git", "init")
runTestCommand(t, dir, "git", "config", "user.email", "test@example.com")
runTestCommand(t, dir, "git", "config", "user.name", "Test User")
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "unstaged.go"), []byte(`package sample
func run() {
panic("boom")
}
`), 0o644))
svc := &Service{adapters: []Adapter{newCatalogAdapter()}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
Hook: true,
FailOn: "warning",
})
require.NoError(t, err)
assert.Empty(t, report.Languages)
assert.Empty(t, report.Tools)
assert.Empty(t, report.Findings)
assert.True(t, report.Summary.Passed)
}
func TestServiceRemoveHook_PreservesExistingHookContent(t *testing.T) {
if _, err := exec.LookPath("git"); err != nil {
t.Skip("git not available")
}
dir := t.TempDir()
runTestCommand(t, dir, "git", "init")
original := "\n# custom hook\nprintf 'keep'"
hookDir := filepath.Join(dir, ".git", "hooks")
require.NoError(t, os.MkdirAll(hookDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(hookDir, "pre-commit"), []byte(original), 0o755))
svc := NewService()
require.NoError(t, svc.InstallHook(dir))
require.NoError(t, svc.RemoveHook(dir))
restored, err := os.ReadFile(filepath.Join(hookDir, "pre-commit"))
require.NoError(t, err)
assert.Equal(t, original, string(restored))
}
func TestServiceRun_JS_PrettierFindings(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "package.json"), []byte("{\n \"name\": \"example\"\n}\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "index.js"), []byte("const value = 1;\n"), 0o644))
setupMockCmdExit(t, "prettier", "index.js\n", "", 1)
svc := &Service{adapters: []Adapter{
newCommandAdapter("prettier", []string{"prettier"}, []string{"js"}, "style", "", false, true, projectPathArguments("--list-different"), parsePrettierDiagnostics),
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Findings, 1)
require.Len(t, report.Tools, 1)
assert.Equal(t, "prettier", report.Findings[0].Tool)
assert.Equal(t, "index.js", report.Findings[0].File)
assert.Equal(t, "prettier-format", report.Findings[0].Code)
assert.Equal(t, "warning", report.Findings[0].Severity)
assert.False(t, report.Summary.Passed)
assert.Equal(t, "prettier", report.Tools[0].Name)
assert.Equal(t, "failed", report.Tools[0].Status)
assert.Equal(t, 1, report.Tools[0].Findings)
}
func TestServiceRun_CapturesToolVersion(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "package.json"), []byte("{\n \"name\": \"example\"\n}\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(dir, "index.js"), []byte("const value = 1;\n"), 0o644))
binDir := t.TempDir()
scriptPath := filepath.Join(binDir, "prettier")
script := `#!/bin/sh
case "$1" in
--version)
echo "prettier 3.2.1"
exit 0
;;
--list-different)
echo "index.js"
exit 1
;;
esac
echo "unexpected args: $*" >&2
exit 0
`
require.NoError(t, os.WriteFile(scriptPath, []byte(script), 0o755))
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
svc := &Service{adapters: []Adapter{
newCommandAdapter("prettier", []string{"prettier"}, []string{"js"}, "style", "", false, true, projectPathArguments("--list-different"), parsePrettierDiagnostics),
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Tools, 1)
assert.Equal(t, "prettier", report.Tools[0].Name)
assert.Equal(t, "prettier 3.2.1", report.Tools[0].Version)
}
func TestServiceRun_Good_DeduplicatesMergedFindings(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module example.com/test\n"), 0o644))
require.NoError(t, os.MkdirAll(filepath.Join(dir, ".core"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(dir, ".core", "lint.yaml"), []byte("lint:\n go:\n - dup\n"), 0o644))
finding := Finding{
Tool: "dup",
File: filepath.Join(dir, "input.go"),
Line: 12,
Column: 3,
Severity: "warning",
Code: "duplicate-finding",
Message: "same finding",
}
svc := &Service{adapters: []Adapter{
duplicateAdapter{name: "dup", finding: finding},
duplicateAdapter{name: "dup", finding: finding},
}}
report, err := svc.Run(context.Background(), RunInput{
Path: dir,
FailOn: "warning",
})
require.NoError(t, err)
require.Len(t, report.Tools, 3)
require.Len(t, report.Findings, 1)
assert.Equal(t, "duplicate-finding", report.Findings[0].Code)
assert.Equal(t, 1, report.Summary.Total)
}
func TestServiceTools_EmptyInventoryReturnsEmptySlice(t *testing.T) {
tools := (&Service{}).Tools(nil)
require.NotNil(t, tools)
assert.Empty(t, tools)
}
type shortcutAdapter struct {
name string
category string
}
func (adapter shortcutAdapter) Name() string { return adapter.name }
func (adapter shortcutAdapter) Available() bool { return true }
func (adapter shortcutAdapter) Languages() []string { return []string{"*"} }
func (adapter shortcutAdapter) Command() string { return adapter.name }
func (adapter shortcutAdapter) Entitlement() string { return "" }
func (adapter shortcutAdapter) RequiresEntitlement() bool { return false }
func (adapter shortcutAdapter) MatchesLanguage(languages []string) bool { return true }
func (adapter shortcutAdapter) Category() string { return adapter.category }
func (adapter shortcutAdapter) Fast() bool { return true }
func (adapter shortcutAdapter) Run(_ context.Context, _ RunInput, _ []string) AdapterResult {
return AdapterResult{
Tool: ToolRun{
Name: adapter.name,
Status: "passed",
Duration: "0s",
},
}
}
type duplicateAdapter struct {
name string
finding Finding
}
func (adapter duplicateAdapter) Name() string { return adapter.name }
func (adapter duplicateAdapter) Available() bool { return true }
func (adapter duplicateAdapter) Languages() []string { return []string{"go"} }
func (adapter duplicateAdapter) Command() string { return adapter.name }
func (adapter duplicateAdapter) Entitlement() string { return "" }
func (adapter duplicateAdapter) RequiresEntitlement() bool { return false }
func (adapter duplicateAdapter) MatchesLanguage(languages []string) bool {
for _, language := range languages {
if language == "go" {
return true
}
}
return false
}
func (adapter duplicateAdapter) Category() string { return "correctness" }
func (adapter duplicateAdapter) Fast() bool { return true }
func (adapter duplicateAdapter) Run(_ context.Context, _ RunInput, _ []string) AdapterResult {
return AdapterResult{
Tool: ToolRun{
Name: adapter.name,
Status: "passed",
Duration: "0s",
},
Findings: []Finding{adapter.finding},
}
}
func runTestCommand(t *testing.T, dir string, name string, args ...string) {
t.Helper()
cmd := exec.Command(name, args...)
cmd.Dir = dir
output, err := cmd.CombinedOutput()
require.NoError(t, err, string(output))
}

View file

@ -8,9 +8,12 @@ import (
"os/exec"
"path/filepath"
"regexp"
"slices"
"strconv"
"strings"
"time"
coreerr "forge.lthn.ai/core/go-log"
)
// ToolFinding represents a single issue found by an external tool (e.g. go vet).
@ -35,14 +38,17 @@ type RaceCondition struct {
Desc string `json:"desc"`
}
// TODO represents a tracked code comment like TODO, FIXME, or HACK.
type TODO struct {
// TrackedComment represents a tracked code comment like TODO, FIXME, or HACK.
type TrackedComment struct {
File string `json:"file"`
Line int `json:"line"`
Type string `json:"type"`
Message string `json:"message"`
}
// TODO is kept for compatibility with the older API name.
type TODO = TrackedComment
// Vulnerability represents a dependency vulnerability from govulncheck text output.
type Vulnerability struct {
ID string `json:"id"`
@ -136,8 +142,10 @@ func (t *Toolkit) Run(name string, args ...string) (stdout, stderr string, exitC
return
}
// FindTODOs greps for TODO/FIXME/HACK comments within a directory.
func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) {
// FindTrackedComments greps for TODO/FIXME/HACK comments within a directory.
//
// comments, err := lint.NewToolkit(".").FindTrackedComments("pkg/lint")
func (t *Toolkit) FindTrackedComments(dir string) ([]TrackedComment, error) {
pattern := `\b(TODO|FIXME|HACK)\b(\(.*\))?:`
stdout, stderr, exitCode, err := t.Run("git", "grep", "--line-number", "-E", pattern, "--", dir)
@ -145,10 +153,10 @@ func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) {
return nil, nil
}
if err != nil && exitCode != 1 {
return nil, fmt.Errorf("git grep failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.FindTrackedComments", fmt.Sprintf("git grep failed (exit %d):\n%s", exitCode, stderr), err)
}
var todos []TODO
var comments []TrackedComment
re := regexp.MustCompile(pattern)
for line := range strings.SplitSeq(strings.TrimSpace(stdout), "\n") {
@ -167,21 +175,26 @@ func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) {
}
msg := strings.TrimSpace(re.Split(parts[2], 2)[1])
todos = append(todos, TODO{
comments = append(comments, TrackedComment{
File: parts[0],
Line: lineNum,
Type: todoType,
Message: msg,
})
}
return todos, nil
return comments, nil
}
// FindTODOs is kept for compatibility with the older API name.
func (t *Toolkit) FindTODOs(dir string) ([]TODO, error) {
return t.FindTrackedComments(dir)
}
// AuditDeps runs govulncheck to find dependency vulnerabilities (text output).
func (t *Toolkit) AuditDeps() ([]Vulnerability, error) {
stdout, stderr, exitCode, err := t.Run("govulncheck", "./...")
if err != nil && exitCode != 0 && !strings.Contains(stdout, "Vulnerability") {
return nil, fmt.Errorf("govulncheck failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.AuditDeps", fmt.Sprintf("govulncheck failed (exit %d):\n%s", exitCode, stderr), err)
}
var vulns []Vulnerability
@ -230,7 +243,7 @@ func (t *Toolkit) AuditDeps() ([]Vulnerability, error) {
func (t *Toolkit) DiffStat() (DiffSummary, error) {
stdout, stderr, exitCode, err := t.Run("git", "diff", "--stat")
if err != nil && exitCode != 0 {
return DiffSummary{}, fmt.Errorf("git diff failed (exit %d): %w\n%s", exitCode, err, stderr)
return DiffSummary{}, coreerr.E("Toolkit.DiffStat", fmt.Sprintf("git diff failed (exit %d):\n%s", exitCode, stderr), err)
}
var s DiffSummary
@ -263,7 +276,7 @@ func (t *Toolkit) DiffStat() (DiffSummary, error) {
func (t *Toolkit) UncommittedFiles() ([]string, error) {
stdout, stderr, exitCode, err := t.Run("git", "status", "--porcelain")
if err != nil && exitCode != 0 {
return nil, fmt.Errorf("git status failed: %w\n%s", err, stderr)
return nil, coreerr.E("Toolkit.UncommittedFiles", "git status failed:\n"+stderr, err)
}
var files []string
for line := range strings.SplitSeq(strings.TrimSpace(stdout), "\n") {
@ -281,7 +294,7 @@ func (t *Toolkit) Lint(pkg string) ([]ToolFinding, error) {
return nil, nil
}
if err != nil && exitCode != 2 {
return nil, fmt.Errorf("go vet failed: %w", err)
return nil, coreerr.E("Toolkit.Lint", "go vet failed", err)
}
var findings []ToolFinding
@ -311,7 +324,7 @@ func (t *Toolkit) ScanSecrets(dir string) ([]SecretLeak, error) {
return nil, nil
}
if err != nil && exitCode != 1 {
return nil, fmt.Errorf("gitleaks failed: %w", err)
return nil, coreerr.E("Toolkit.ScanSecrets", "gitleaks failed", err)
}
var leaks []SecretLeak
@ -338,7 +351,7 @@ func (t *Toolkit) ScanSecrets(dir string) ([]SecretLeak, error) {
func (t *Toolkit) ModTidy() error {
_, stderr, exitCode, err := t.Run("go", "mod", "tidy")
if err != nil && exitCode != 0 {
return fmt.Errorf("go mod tidy failed: %s", stderr)
return coreerr.E("Toolkit.ModTidy", "go mod tidy failed: "+strings.TrimSpace(stderr), nil)
}
return nil
}
@ -350,7 +363,7 @@ func (t *Toolkit) Build(targets ...string) ([]BuildResult, error) {
_, stderr, _, err := t.Run("go", "build", "-o", "/dev/null", target)
r := BuildResult{Target: target}
if err != nil {
r.Error = fmt.Errorf("%s", strings.TrimSpace(stderr))
r.Error = coreerr.E("Toolkit.Build", strings.TrimSpace(stderr), nil)
}
results = append(results, r)
}
@ -361,7 +374,7 @@ func (t *Toolkit) Build(targets ...string) ([]BuildResult, error) {
func (t *Toolkit) TestCount(pkg string) (int, error) {
stdout, stderr, exitCode, err := t.Run("go", "test", "-list", ".*", pkg)
if err != nil && exitCode != 0 {
return 0, fmt.Errorf("go test -list failed: %w\n%s", err, stderr)
return 0, coreerr.E("Toolkit.TestCount", fmt.Sprintf("go test -list failed:\n%s", stderr), err)
}
count := 0
for line := range strings.SplitSeq(strings.TrimSpace(stdout), "\n") {
@ -379,7 +392,7 @@ func (t *Toolkit) Coverage(pkg string) ([]CoverageReport, error) {
}
stdout, stderr, exitCode, err := t.Run("go", "test", "-cover", pkg)
if err != nil && exitCode != 0 && !strings.Contains(stdout, "coverage:") {
return nil, fmt.Errorf("go test -cover failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.Coverage", fmt.Sprintf("go test -cover failed (exit %d):\n%s", exitCode, stderr), err)
}
var reports []CoverageReport
@ -406,7 +419,7 @@ func (t *Toolkit) RaceDetect(pkg string) ([]RaceCondition, error) {
}
_, stderr, _, err := t.Run("go", "test", "-race", pkg)
if err != nil && !strings.Contains(stderr, "WARNING: DATA RACE") {
return nil, fmt.Errorf("go test -race failed: %w", err)
return nil, coreerr.E("Toolkit.RaceDetect", "go test -race failed", err)
}
var races []RaceCondition
@ -434,7 +447,7 @@ func (t *Toolkit) RaceDetect(pkg string) ([]RaceCondition, error) {
func (t *Toolkit) GocycloComplexity(threshold int) ([]ComplexFunc, error) {
stdout, stderr, exitCode, err := t.Run("gocyclo", "-over", strconv.Itoa(threshold), ".")
if err != nil && exitCode == -1 {
return nil, fmt.Errorf("gocyclo not available: %w\n%s", err, stderr)
return nil, coreerr.E("Toolkit.GocycloComplexity", "gocyclo not available:\n"+stderr, err)
}
var funcs []ComplexFunc
@ -467,7 +480,7 @@ func (t *Toolkit) GocycloComplexity(threshold int) ([]ComplexFunc, error) {
func (t *Toolkit) DepGraph(pkg string) (*Graph, error) {
stdout, stderr, exitCode, err := t.Run("go", "mod", "graph")
if err != nil && exitCode != 0 {
return nil, fmt.Errorf("go mod graph failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.DepGraph", fmt.Sprintf("go mod graph failed (exit %d):\n%s", exitCode, stderr), err)
}
graph := &Graph{Edges: make(map[string][]string)}
@ -487,6 +500,10 @@ func (t *Toolkit) DepGraph(pkg string) (*Graph, error) {
for node := range nodes {
graph.Nodes = append(graph.Nodes, node)
}
slices.Sort(graph.Nodes)
for src := range graph.Edges {
slices.Sort(graph.Edges[src])
}
return graph, nil
}
@ -494,7 +511,7 @@ func (t *Toolkit) DepGraph(pkg string) (*Graph, error) {
func (t *Toolkit) GitLog(n int) ([]Commit, error) {
stdout, stderr, exitCode, err := t.Run("git", "log", fmt.Sprintf("-n%d", n), "--format=%H|%an|%aI|%s")
if err != nil && exitCode != 0 {
return nil, fmt.Errorf("git log failed (exit %d): %w\n%s", exitCode, err, stderr)
return nil, coreerr.E("Toolkit.GitLog", fmt.Sprintf("git log failed (exit %d):\n%s", exitCode, stderr), err)
}
var commits []Commit
@ -543,7 +560,7 @@ func (t *Toolkit) CheckPerms(dir string) ([]PermIssue, error) {
return nil
})
if err != nil {
return nil, fmt.Errorf("walk failed: %w", err)
return nil, coreerr.E("Toolkit.CheckPerms", "walk failed", err)
}
return issues, nil
}

View file

@ -135,6 +135,18 @@ func TestToolkit_DepGraph_Good(t *testing.T) {
assert.Len(t, graph.Edges["modA@v1"], 2)
}
func TestToolkit_DepGraph_SortsNodesAndEdges(t *testing.T) {
output := "modB@v2 modD@v1\nmodA@v1 modC@v3\nmodA@v1 modB@v2"
setupMockCmd(t, "go", output)
tk := NewToolkit(t.TempDir())
graph, err := tk.DepGraph("./...")
require.NoError(t, err)
assert.Equal(t, []string{"modA@v1", "modB@v2", "modC@v3", "modD@v1"}, graph.Nodes)
assert.Equal(t, []string{"modB@v2", "modC@v3"}, graph.Edges["modA@v1"])
}
func TestToolkit_RaceDetect_Good(t *testing.T) {
setupMockCmd(t, "go", "ok\texample.com/safe\t0.1s")
@ -191,3 +203,21 @@ func TestToolkit_CheckPerms_Good(t *testing.T) {
require.Len(t, issues, 1)
assert.Equal(t, "World-writable", issues[0].Issue)
}
func TestToolkit_FindTrackedComments_Compatibility(t *testing.T) {
output := "pkg/file.go:12:TODO: fix this\n"
setupMockCmd(t, "git", output)
tk := NewToolkit(t.TempDir())
comments, err := tk.FindTrackedComments("pkg")
require.NoError(t, err)
require.Len(t, comments, 1)
assert.Equal(t, "pkg/file.go", comments[0].File)
assert.Equal(t, 12, comments[0].Line)
assert.Equal(t, "TODO", comments[0].Type)
assert.Equal(t, "fix this", comments[0].Message)
legacyComments, err := tk.FindTODOs("pkg")
require.NoError(t, err)
assert.Equal(t, comments, legacyComments)
}

View file

@ -2,8 +2,9 @@ package lint
import (
"encoding/json"
"fmt"
"strings"
coreerr "forge.lthn.ai/core/go-log"
)
// VulnFinding represents a single vulnerability found by govulncheck.
@ -89,7 +90,7 @@ func (t *Toolkit) VulnCheck(modulePath string) (*VulnResult, error) {
stdout, stderr, exitCode, err := t.Run("govulncheck", "-json", modulePath)
if err != nil && exitCode == -1 {
return nil, fmt.Errorf("govulncheck not installed or not available: %w", err)
return nil, coreerr.E("Toolkit.VulnCheck", "govulncheck not installed or not available", err)
}
return ParseVulnCheckJSON(stdout, stderr)

244
pkg/php/analyse.go Normal file
View file

@ -0,0 +1,244 @@
package php
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// AnalyseOptions configures PHP static analysis.
type AnalyseOptions struct {
// Dir is the project directory (defaults to current working directory).
Dir string
// Level is the PHPStan analysis level (0-9).
Level int
// Paths limits analysis to specific paths.
Paths []string
// Memory is the memory limit for analysis (e.g., "2G").
Memory string
// JSON outputs results in JSON format.
JSON bool
// SARIF outputs results in SARIF format for GitHub Security tab.
SARIF bool
// Output is the writer for output (defaults to os.Stdout).
Output io.Writer
}
// AnalyserType represents the detected static analyser.
type AnalyserType string
// Static analyser type constants.
const (
// AnalyserPHPStan indicates standard PHPStan analyser.
AnalyserPHPStan AnalyserType = "phpstan"
// AnalyserLarastan indicates Laravel-specific Larastan analyser.
AnalyserLarastan AnalyserType = "larastan"
)
// DetectAnalyser detects which static analyser is available in the project.
func DetectAnalyser(dir string) (AnalyserType, bool) {
// Check for PHPStan config
phpstanConfig := filepath.Join(dir, "phpstan.neon")
phpstanDistConfig := filepath.Join(dir, "phpstan.neon.dist")
hasConfig := fileExists(phpstanConfig) || fileExists(phpstanDistConfig)
// Check for vendor binary
phpstanBin := filepath.Join(dir, "vendor", "bin", "phpstan")
hasBin := fileExists(phpstanBin)
if hasConfig || hasBin {
// Check if it's Larastan (Laravel-specific PHPStan)
larastanPath := filepath.Join(dir, "vendor", "larastan", "larastan")
if fileExists(larastanPath) {
return AnalyserLarastan, true
}
// Also check nunomaduro/larastan
larastanPath2 := filepath.Join(dir, "vendor", "nunomaduro", "larastan")
if fileExists(larastanPath2) {
return AnalyserLarastan, true
}
return AnalyserPHPStan, true
}
return "", false
}
// Analyse runs PHPStan or Larastan for static analysis.
func Analyse(ctx context.Context, opts AnalyseOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return coreerr.E("php.Analyse", "get working directory", err)
}
opts.Dir = cwd
}
if opts.Output == nil {
opts.Output = os.Stdout
}
// Check if analyser is available
analyser, found := DetectAnalyser(opts.Dir)
if !found {
return coreerr.E("php.Analyse", "no static analyser found (install PHPStan: composer require phpstan/phpstan --dev)", nil)
}
var cmdName string
var args []string
switch analyser {
case AnalyserPHPStan, AnalyserLarastan:
cmdName, args = buildPHPStanCommand(opts)
}
cmd := exec.CommandContext(ctx, cmdName, args...)
cmd.Dir = opts.Dir
cmd.Stdout = opts.Output
cmd.Stderr = opts.Output
return cmd.Run()
}
// buildPHPStanCommand builds the command for running PHPStan.
func buildPHPStanCommand(opts AnalyseOptions) (string, []string) {
// Check for vendor binary first
vendorBin := filepath.Join(opts.Dir, "vendor", "bin", "phpstan")
cmdName := "phpstan"
if fileExists(vendorBin) {
cmdName = vendorBin
}
args := []string{"analyse"}
if opts.Level > 0 {
args = append(args, "--level", fmt.Sprintf("%d", opts.Level))
}
if opts.Memory != "" {
args = append(args, "--memory-limit", opts.Memory)
}
// Output format - SARIF takes precedence over JSON
if opts.SARIF {
args = append(args, "--error-format=sarif")
} else if opts.JSON {
args = append(args, "--error-format=json")
}
// Add specific paths if provided
args = append(args, opts.Paths...)
return cmdName, args
}
// =============================================================================
// Psalm Static Analysis
// =============================================================================
// PsalmOptions configures Psalm static analysis.
type PsalmOptions struct {
Dir string
Level int // Error level (1=strictest, 8=most lenient)
Fix bool // Auto-fix issues where possible
Baseline bool // Generate/update baseline file
ShowInfo bool // Show info-level issues
JSON bool // Output in JSON format
SARIF bool // Output in SARIF format for GitHub Security tab
Output io.Writer
}
// PsalmType represents the detected Psalm configuration.
type PsalmType string
// Psalm configuration type constants.
const (
// PsalmStandard indicates standard Psalm configuration.
PsalmStandard PsalmType = "psalm"
)
// DetectPsalm checks if Psalm is available in the project.
func DetectPsalm(dir string) (PsalmType, bool) {
// Check for psalm.xml config
psalmConfig := filepath.Join(dir, "psalm.xml")
psalmDistConfig := filepath.Join(dir, "psalm.xml.dist")
hasConfig := fileExists(psalmConfig) || fileExists(psalmDistConfig)
// Check for vendor binary
psalmBin := filepath.Join(dir, "vendor", "bin", "psalm")
if fileExists(psalmBin) {
return PsalmStandard, true
}
if hasConfig {
return PsalmStandard, true
}
return "", false
}
// RunPsalm runs Psalm static analysis.
func RunPsalm(ctx context.Context, opts PsalmOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return coreerr.E("php.RunPsalm", "get working directory", err)
}
opts.Dir = cwd
}
if opts.Output == nil {
opts.Output = os.Stdout
}
// Build command
vendorBin := filepath.Join(opts.Dir, "vendor", "bin", "psalm")
cmdName := "psalm"
if fileExists(vendorBin) {
cmdName = vendorBin
}
args := []string{"--no-progress"}
if opts.Level > 0 && opts.Level <= 8 {
args = append(args, fmt.Sprintf("--error-level=%d", opts.Level))
}
if opts.Fix {
args = append(args, "--alter", "--issues=all")
}
if opts.Baseline {
args = append(args, "--set-baseline=psalm-baseline.xml")
}
if opts.ShowInfo {
args = append(args, "--show-info=true")
}
// Output format - SARIF takes precedence over JSON
if opts.SARIF {
args = append(args, "--output-format=sarif")
} else if opts.JSON {
args = append(args, "--output-format=json")
}
cmd := exec.CommandContext(ctx, cmdName, args...)
cmd.Dir = opts.Dir
cmd.Stdout = opts.Output
cmd.Stderr = opts.Output
return cmd.Run()
}

192
pkg/php/analyse_test.go Normal file
View file

@ -0,0 +1,192 @@
package php
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// mkFile creates a file (and parent directories) for testing.
func mkFile(t *testing.T, path string) {
t.Helper()
require.NoError(t, os.MkdirAll(filepath.Dir(path), 0o755))
require.NoError(t, os.WriteFile(path, []byte("stub"), 0o755))
}
// =============================================================================
// DetectAnalyser
// =============================================================================
func TestDetectAnalyser_Good_PHPStanConfig(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "phpstan.neon"))
typ, found := DetectAnalyser(dir)
assert.True(t, found)
assert.Equal(t, AnalyserPHPStan, typ)
}
func TestDetectAnalyser_Good_PHPStanDistConfig(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "phpstan.neon.dist"))
typ, found := DetectAnalyser(dir)
assert.True(t, found)
assert.Equal(t, AnalyserPHPStan, typ)
}
func TestDetectAnalyser_Good_PHPStanBinary(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "vendor", "bin", "phpstan"))
typ, found := DetectAnalyser(dir)
assert.True(t, found)
assert.Equal(t, AnalyserPHPStan, typ)
}
func TestDetectAnalyser_Good_Larastan(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "phpstan.neon"))
mkFile(t, filepath.Join(dir, "vendor", "larastan", "larastan"))
typ, found := DetectAnalyser(dir)
assert.True(t, found)
assert.Equal(t, AnalyserLarastan, typ)
}
func TestDetectAnalyser_Good_LarastanNunomaduro(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "vendor", "bin", "phpstan"))
mkFile(t, filepath.Join(dir, "vendor", "nunomaduro", "larastan"))
typ, found := DetectAnalyser(dir)
assert.True(t, found)
assert.Equal(t, AnalyserLarastan, typ)
}
func TestDetectAnalyser_Bad_NoAnalyser(t *testing.T) {
dir := t.TempDir()
typ, found := DetectAnalyser(dir)
assert.False(t, found)
assert.Equal(t, AnalyserType(""), typ)
}
// =============================================================================
// DetectPsalm
// =============================================================================
func TestDetectPsalm_Good_PsalmConfig(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "psalm.xml"))
typ, found := DetectPsalm(dir)
assert.True(t, found)
assert.Equal(t, PsalmStandard, typ)
}
func TestDetectPsalm_Good_PsalmDistConfig(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "psalm.xml.dist"))
typ, found := DetectPsalm(dir)
assert.True(t, found)
assert.Equal(t, PsalmStandard, typ)
}
func TestDetectPsalm_Good_PsalmBinary(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "vendor", "bin", "psalm"))
typ, found := DetectPsalm(dir)
assert.True(t, found)
assert.Equal(t, PsalmStandard, typ)
}
func TestDetectPsalm_Bad_NoPsalm(t *testing.T) {
dir := t.TempDir()
typ, found := DetectPsalm(dir)
assert.False(t, found)
assert.Equal(t, PsalmType(""), typ)
}
// =============================================================================
// buildPHPStanCommand
// =============================================================================
func TestBuildPHPStanCommand_Good_Defaults(t *testing.T) {
dir := t.TempDir()
opts := AnalyseOptions{Dir: dir}
cmdName, args := buildPHPStanCommand(opts)
assert.Equal(t, "phpstan", cmdName)
assert.Equal(t, []string{"analyse"}, args)
}
func TestBuildPHPStanCommand_Good_VendorBinary(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin", "phpstan")
mkFile(t, vendorBin)
opts := AnalyseOptions{Dir: dir}
cmdName, args := buildPHPStanCommand(opts)
assert.Equal(t, vendorBin, cmdName)
assert.Equal(t, []string{"analyse"}, args)
}
func TestBuildPHPStanCommand_Good_WithLevel(t *testing.T) {
dir := t.TempDir()
opts := AnalyseOptions{Dir: dir, Level: 5}
_, args := buildPHPStanCommand(opts)
assert.Contains(t, args, "--level")
assert.Contains(t, args, "5")
}
func TestBuildPHPStanCommand_Good_WithMemory(t *testing.T) {
dir := t.TempDir()
opts := AnalyseOptions{Dir: dir, Memory: "2G"}
_, args := buildPHPStanCommand(opts)
assert.Contains(t, args, "--memory-limit")
assert.Contains(t, args, "2G")
}
func TestBuildPHPStanCommand_Good_SARIF(t *testing.T) {
dir := t.TempDir()
opts := AnalyseOptions{Dir: dir, SARIF: true}
_, args := buildPHPStanCommand(opts)
assert.Contains(t, args, "--error-format=sarif")
}
func TestBuildPHPStanCommand_Good_JSON(t *testing.T) {
dir := t.TempDir()
opts := AnalyseOptions{Dir: dir, JSON: true}
_, args := buildPHPStanCommand(opts)
assert.Contains(t, args, "--error-format=json")
}
func TestBuildPHPStanCommand_Good_SARIFPrecedence(t *testing.T) {
dir := t.TempDir()
opts := AnalyseOptions{Dir: dir, SARIF: true, JSON: true}
_, args := buildPHPStanCommand(opts)
assert.Contains(t, args, "--error-format=sarif")
assert.NotContains(t, args, "--error-format=json")
}
func TestBuildPHPStanCommand_Good_WithPaths(t *testing.T) {
dir := t.TempDir()
opts := AnalyseOptions{Dir: dir, Paths: []string{"src", "app"}}
_, args := buildPHPStanCommand(opts)
assert.Contains(t, args, "src")
assert.Contains(t, args, "app")
}

174
pkg/php/audit.go Normal file
View file

@ -0,0 +1,174 @@
package php
import (
"cmp"
"context"
"encoding/json"
"io"
"os"
"os/exec"
"path/filepath"
"slices"
coreerr "forge.lthn.ai/core/go-log"
)
// AuditOptions configures dependency security auditing.
type AuditOptions struct {
Dir string
JSON bool // Output in JSON format
Fix bool // Auto-fix vulnerabilities (npm only)
Output io.Writer
}
// AuditResult holds the results of a security audit.
type AuditResult struct {
Tool string
Vulnerabilities int
Advisories []AuditAdvisory
Error error
}
// AuditAdvisory represents a single security advisory.
type AuditAdvisory struct {
Package string
Severity string
Title string
URL string
Identifiers []string
}
// RunAudit runs security audits on dependencies.
func RunAudit(ctx context.Context, opts AuditOptions) ([]AuditResult, error) {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return nil, coreerr.E("php.RunAudit", "get working directory", err)
}
opts.Dir = cwd
}
if opts.Output == nil {
opts.Output = os.Stdout
}
var results []AuditResult
// Run composer audit
composerResult := runComposerAudit(ctx, opts)
results = append(results, composerResult)
// Run npm audit if package.json exists
if fileExists(filepath.Join(opts.Dir, "package.json")) {
npmResult := runNpmAudit(ctx, opts)
results = append(results, npmResult)
}
return results, nil
}
func runComposerAudit(ctx context.Context, opts AuditOptions) AuditResult {
result := AuditResult{Tool: "composer"}
args := []string{"audit", "--format=json"}
cmd := exec.CommandContext(ctx, "composer", args...)
cmd.Dir = opts.Dir
output, err := cmd.Output()
if err != nil {
// composer audit returns non-zero if vulnerabilities found
if exitErr, ok := err.(*exec.ExitError); ok {
output = append(output, exitErr.Stderr...)
}
}
// Parse JSON output
var auditData struct {
Advisories map[string][]struct {
Title string `json:"title"`
Link string `json:"link"`
CVE string `json:"cve"`
AffectedRanges string `json:"affectedVersions"`
} `json:"advisories"`
}
if jsonErr := json.Unmarshal(output, &auditData); jsonErr == nil {
for pkg, advisories := range auditData.Advisories {
for _, adv := range advisories {
result.Advisories = append(result.Advisories, AuditAdvisory{
Package: pkg,
Title: adv.Title,
URL: adv.Link,
Identifiers: []string{adv.CVE},
})
}
}
sortAuditAdvisories(result.Advisories)
result.Vulnerabilities = len(result.Advisories)
} else if err != nil {
result.Error = err
}
return result
}
func runNpmAudit(ctx context.Context, opts AuditOptions) AuditResult {
result := AuditResult{Tool: "npm"}
args := []string{"audit", "--json"}
if opts.Fix {
args = []string{"audit", "fix"}
}
cmd := exec.CommandContext(ctx, "npm", args...)
cmd.Dir = opts.Dir
output, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
output = append(output, exitErr.Stderr...)
}
}
if !opts.Fix {
// Parse JSON output
var auditData struct {
Metadata struct {
Vulnerabilities struct {
Total int `json:"total"`
} `json:"vulnerabilities"`
} `json:"metadata"`
Vulnerabilities map[string]struct {
Severity string `json:"severity"`
Via []any `json:"via"`
} `json:"vulnerabilities"`
}
if jsonErr := json.Unmarshal(output, &auditData); jsonErr == nil {
result.Vulnerabilities = auditData.Metadata.Vulnerabilities.Total
for pkg, vuln := range auditData.Vulnerabilities {
result.Advisories = append(result.Advisories, AuditAdvisory{
Package: pkg,
Severity: vuln.Severity,
})
}
sortAuditAdvisories(result.Advisories)
} else if err != nil {
result.Error = err
}
}
return result
}
func sortAuditAdvisories(advisories []AuditAdvisory) {
slices.SortFunc(advisories, func(a, b AuditAdvisory) int {
return cmp.Or(
cmp.Compare(a.Package, b.Package),
cmp.Compare(a.Title, b.Title),
cmp.Compare(a.Severity, b.Severity),
cmp.Compare(a.URL, b.URL),
)
})
}

242
pkg/php/audit_test.go Normal file
View file

@ -0,0 +1,242 @@
package php
import (
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAuditResult_Fields(t *testing.T) {
result := AuditResult{
Tool: "composer",
Vulnerabilities: 2,
Advisories: []AuditAdvisory{
{Package: "vendor/pkg", Severity: "high", Title: "RCE", URL: "https://example.com/1", Identifiers: []string{"CVE-2025-0001"}},
{Package: "vendor/other", Severity: "medium", Title: "XSS", URL: "https://example.com/2", Identifiers: []string{"CVE-2025-0002"}},
},
}
assert.Equal(t, "composer", result.Tool)
assert.Equal(t, 2, result.Vulnerabilities)
assert.Len(t, result.Advisories, 2)
assert.Equal(t, "vendor/pkg", result.Advisories[0].Package)
assert.Equal(t, "high", result.Advisories[0].Severity)
assert.Equal(t, "RCE", result.Advisories[0].Title)
assert.Equal(t, "https://example.com/1", result.Advisories[0].URL)
assert.Equal(t, []string{"CVE-2025-0001"}, result.Advisories[0].Identifiers)
}
func TestAuditAdvisory_Fields(t *testing.T) {
adv := AuditAdvisory{
Package: "laravel/framework",
Severity: "critical",
Title: "SQL Injection",
URL: "https://example.com/advisory",
Identifiers: []string{"CVE-2025-9999", "GHSA-xxxx"},
}
assert.Equal(t, "laravel/framework", adv.Package)
assert.Equal(t, "critical", adv.Severity)
assert.Equal(t, "SQL Injection", adv.Title)
assert.Equal(t, "https://example.com/advisory", adv.URL)
assert.Equal(t, []string{"CVE-2025-9999", "GHSA-xxxx"}, adv.Identifiers)
}
func TestSortAuditAdvisories_Good(t *testing.T) {
advisories := []AuditAdvisory{
{Package: "vendor/package-b", Title: "Zulu"},
{Package: "vendor/package-a", Title: "Beta"},
{Package: "vendor/package-b", Title: "Alpha"},
}
sortAuditAdvisories(advisories)
require.Len(t, advisories, 3)
assert.Equal(t, "vendor/package-a", advisories[0].Package)
assert.Equal(t, "Beta", advisories[0].Title)
assert.Equal(t, "vendor/package-b", advisories[1].Package)
assert.Equal(t, "Alpha", advisories[1].Title)
assert.Equal(t, "vendor/package-b", advisories[2].Package)
assert.Equal(t, "Zulu", advisories[2].Title)
}
func TestRunComposerAudit_ParsesJSON(t *testing.T) {
// Test the JSON parsing of composer audit output by verifying
// the struct can be populated from JSON matching composer's format.
composerOutput := `{
"advisories": {
"vendor/package-a": [
{
"title": "Remote Code Execution",
"link": "https://example.com/advisory/1",
"cve": "CVE-2025-1234",
"affectedVersions": ">=1.0,<1.5"
}
],
"vendor/package-b": [
{
"title": "Cross-Site Scripting",
"link": "https://example.com/advisory/2",
"cve": "CVE-2025-5678",
"affectedVersions": ">=2.0,<2.3"
},
{
"title": "Open Redirect",
"link": "https://example.com/advisory/3",
"cve": "CVE-2025-9012",
"affectedVersions": ">=2.0,<2.1"
}
]
}
}`
var auditData struct {
Advisories map[string][]struct {
Title string `json:"title"`
Link string `json:"link"`
CVE string `json:"cve"`
AffectedRanges string `json:"affectedVersions"`
} `json:"advisories"`
}
err := json.Unmarshal([]byte(composerOutput), &auditData)
require.NoError(t, err)
// Simulate the same parsing logic as runComposerAudit
result := AuditResult{Tool: "composer"}
for pkg, advisories := range auditData.Advisories {
for _, adv := range advisories {
result.Advisories = append(result.Advisories, AuditAdvisory{
Package: pkg,
Title: adv.Title,
URL: adv.Link,
Identifiers: []string{adv.CVE},
})
}
}
sortAuditAdvisories(result.Advisories)
result.Vulnerabilities = len(result.Advisories)
assert.Equal(t, "composer", result.Tool)
assert.Equal(t, 3, result.Vulnerabilities)
assert.Len(t, result.Advisories, 3)
assert.Equal(t, "vendor/package-a", result.Advisories[0].Package)
assert.Equal(t, "Remote Code Execution", result.Advisories[0].Title)
assert.Equal(t, "https://example.com/advisory/1", result.Advisories[0].URL)
assert.Equal(t, []string{"CVE-2025-1234"}, result.Advisories[0].Identifiers)
assert.Equal(t, "vendor/package-b", result.Advisories[1].Package)
assert.Equal(t, "Cross-Site Scripting", result.Advisories[1].Title)
assert.Equal(t, "vendor/package-b", result.Advisories[2].Package)
assert.Equal(t, "Open Redirect", result.Advisories[2].Title)
}
func TestNpmAuditJSON_ParsesCorrectly(t *testing.T) {
// Test npm audit JSON parsing logic
npmOutput := `{
"metadata": {
"vulnerabilities": {
"total": 2
}
},
"vulnerabilities": {
"lodash": {
"severity": "high",
"via": ["prototype pollution"]
},
"minimist": {
"severity": "low",
"via": ["prototype pollution"]
}
}
}`
var auditData struct {
Metadata struct {
Vulnerabilities struct {
Total int `json:"total"`
} `json:"vulnerabilities"`
} `json:"metadata"`
Vulnerabilities map[string]struct {
Severity string `json:"severity"`
Via []any `json:"via"`
} `json:"vulnerabilities"`
}
err := json.Unmarshal([]byte(npmOutput), &auditData)
require.NoError(t, err)
result := AuditResult{Tool: "npm"}
result.Vulnerabilities = auditData.Metadata.Vulnerabilities.Total
for pkg, vuln := range auditData.Vulnerabilities {
result.Advisories = append(result.Advisories, AuditAdvisory{
Package: pkg,
Severity: vuln.Severity,
})
}
sortAuditAdvisories(result.Advisories)
assert.Equal(t, "npm", result.Tool)
assert.Equal(t, 2, result.Vulnerabilities)
assert.Len(t, result.Advisories, 2)
assert.Equal(t, "lodash", result.Advisories[0].Package)
assert.Equal(t, "high", result.Advisories[0].Severity)
assert.Equal(t, "minimist", result.Advisories[1].Package)
assert.Equal(t, "low", result.Advisories[1].Severity)
}
func TestRunAudit_SkipsNpmWithoutPackageJSON(t *testing.T) {
// Create a temp directory with no package.json
dir := t.TempDir()
// RunAudit should only return composer result (npm skipped)
// Note: composer will fail since it's not installed in the test env,
// but the important thing is npm audit is NOT run
results, err := RunAudit(context.Background(), AuditOptions{
Dir: dir,
Output: os.Stdout,
})
// No error from RunAudit itself (individual tool errors are in AuditResult.Error)
assert.NoError(t, err)
assert.Len(t, results, 1, "should only have composer result when no package.json")
assert.Equal(t, "composer", results[0].Tool)
}
func TestRunAudit_IncludesNpmWithPackageJSON(t *testing.T) {
// Create a temp directory with a package.json
dir := t.TempDir()
err := os.WriteFile(filepath.Join(dir, "package.json"), []byte(`{"name":"test"}`), 0644)
require.NoError(t, err)
results, runErr := RunAudit(context.Background(), AuditOptions{
Dir: dir,
Output: os.Stdout,
})
// No error from RunAudit itself
assert.NoError(t, runErr)
assert.Len(t, results, 2, "should have both composer and npm results when package.json exists")
assert.Equal(t, "composer", results[0].Tool)
assert.Equal(t, "npm", results[1].Tool)
}
func TestAuditOptions_Defaults(t *testing.T) {
opts := AuditOptions{}
assert.Empty(t, opts.Dir)
assert.False(t, opts.JSON)
assert.False(t, opts.Fix)
assert.Nil(t, opts.Output)
}
func TestAuditResult_ZeroValue(t *testing.T) {
result := AuditResult{}
assert.Empty(t, result.Tool)
assert.Equal(t, 0, result.Vulnerabilities)
assert.Nil(t, result.Advisories)
assert.NoError(t, result.Error)
}

130
pkg/php/format.go Normal file
View file

@ -0,0 +1,130 @@
// Package php provides linting and quality tools for PHP projects.
package php
import (
"context"
"io"
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// fileExists reports whether the named file or directory exists.
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
// FormatOptions configures PHP code formatting.
type FormatOptions struct {
// Dir is the project directory (defaults to current working directory).
Dir string
// Fix automatically fixes formatting issues.
Fix bool
// Diff shows a diff of changes instead of modifying files.
Diff bool
// JSON outputs results in JSON format.
JSON bool
// Paths limits formatting to specific paths.
Paths []string
// Output is the writer for output (defaults to os.Stdout).
Output io.Writer
}
// FormatterType represents the detected formatter.
type FormatterType string
// Formatter type constants.
const (
// FormatterPint indicates Laravel Pint code formatter.
FormatterPint FormatterType = "pint"
)
// DetectFormatter detects which formatter is available in the project.
func DetectFormatter(dir string) (FormatterType, bool) {
// Check for Pint config
pintConfig := filepath.Join(dir, "pint.json")
if fileExists(pintConfig) {
return FormatterPint, true
}
// Check for vendor binary
pintBin := filepath.Join(dir, "vendor", "bin", "pint")
if fileExists(pintBin) {
return FormatterPint, true
}
return "", false
}
// Format runs Laravel Pint to format PHP code.
func Format(ctx context.Context, opts FormatOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return coreerr.E("php.Format", "get working directory", err)
}
opts.Dir = cwd
}
if opts.Output == nil {
opts.Output = os.Stdout
}
// Check if formatter is available
formatter, found := DetectFormatter(opts.Dir)
if !found {
return coreerr.E("php.Format", "no formatter found (install Laravel Pint: composer require laravel/pint --dev)", nil)
}
var cmdName string
var args []string
switch formatter {
case FormatterPint:
cmdName, args = buildPintCommand(opts)
}
cmd := exec.CommandContext(ctx, cmdName, args...)
cmd.Dir = opts.Dir
cmd.Stdout = opts.Output
cmd.Stderr = opts.Output
return cmd.Run()
}
// buildPintCommand builds the command for running Laravel Pint.
func buildPintCommand(opts FormatOptions) (string, []string) {
// Check for vendor binary first
vendorBin := filepath.Join(opts.Dir, "vendor", "bin", "pint")
cmdName := "pint"
if fileExists(vendorBin) {
cmdName = vendorBin
}
var args []string
if !opts.Fix {
args = append(args, "--test")
}
if opts.Diff {
args = append(args, "--diff")
}
if opts.JSON {
args = append(args, "--format=json")
}
// Add specific paths if provided
args = append(args, opts.Paths...)
return cmdName, args
}

112
pkg/php/format_test.go Normal file
View file

@ -0,0 +1,112 @@
package php
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDetectFormatter_PintConfig(t *testing.T) {
dir := t.TempDir()
// Create pint.json
err := os.WriteFile(filepath.Join(dir, "pint.json"), []byte("{}"), 0644)
require.NoError(t, err)
ft, found := DetectFormatter(dir)
assert.True(t, found)
assert.Equal(t, FormatterPint, ft)
}
func TestDetectFormatter_VendorBinary(t *testing.T) {
dir := t.TempDir()
// Create vendor/bin/pint
binDir := filepath.Join(dir, "vendor", "bin")
err := os.MkdirAll(binDir, 0755)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(binDir, "pint"), []byte("#!/bin/sh\n"), 0755)
require.NoError(t, err)
ft, found := DetectFormatter(dir)
assert.True(t, found)
assert.Equal(t, FormatterPint, ft)
}
func TestDetectFormatter_Empty(t *testing.T) {
dir := t.TempDir()
ft, found := DetectFormatter(dir)
assert.False(t, found)
assert.Equal(t, FormatterType(""), ft)
}
func TestBuildPintCommand_Defaults(t *testing.T) {
dir := t.TempDir()
opts := FormatOptions{Dir: dir}
cmdName, args := buildPintCommand(opts)
// No vendor binary, so fallback to bare "pint"
assert.Equal(t, "pint", cmdName)
// Fix is false by default, so --test should be present
assert.Contains(t, args, "--test")
}
func TestBuildPintCommand_Fix(t *testing.T) {
dir := t.TempDir()
opts := FormatOptions{Dir: dir, Fix: true}
cmdName, args := buildPintCommand(opts)
assert.Equal(t, "pint", cmdName)
assert.NotContains(t, args, "--test")
}
func TestBuildPintCommand_VendorBinary(t *testing.T) {
dir := t.TempDir()
binDir := filepath.Join(dir, "vendor", "bin")
require.NoError(t, os.MkdirAll(binDir, 0755))
require.NoError(t, os.WriteFile(filepath.Join(binDir, "pint"), []byte("#!/bin/sh\n"), 0755))
opts := FormatOptions{Dir: dir, Fix: true}
cmdName, _ := buildPintCommand(opts)
assert.Equal(t, filepath.Join(dir, "vendor", "bin", "pint"), cmdName)
}
func TestBuildPintCommand_AllFlags(t *testing.T) {
dir := t.TempDir()
opts := FormatOptions{
Dir: dir,
Fix: false,
Diff: true,
JSON: true,
Paths: []string{"src/", "tests/"},
}
_, args := buildPintCommand(opts)
assert.Contains(t, args, "--test")
assert.Contains(t, args, "--diff")
assert.Contains(t, args, "--format=json")
assert.Contains(t, args, "src/")
assert.Contains(t, args, "tests/")
}
func TestFileExists(t *testing.T) {
dir := t.TempDir()
// Existing file
f := filepath.Join(dir, "exists.txt")
require.NoError(t, os.WriteFile(f, []byte("hi"), 0644))
assert.True(t, fileExists(f))
// Non-existent file
assert.False(t, fileExists(filepath.Join(dir, "nope.txt")))
}

137
pkg/php/mutation.go Normal file
View file

@ -0,0 +1,137 @@
package php
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// InfectionOptions configures Infection mutation testing.
type InfectionOptions struct {
Dir string
MinMSI int // Minimum mutation score indicator (0-100)
MinCoveredMSI int // Minimum covered mutation score (0-100)
Threads int // Number of parallel threads
Filter string // Filter files by pattern
OnlyCovered bool // Only mutate covered code
Output io.Writer
}
// DetectInfection checks if Infection is available in the project.
func DetectInfection(dir string) bool {
// Check for infection config files
configs := []string{"infection.json", "infection.json5", "infection.json.dist"}
for _, config := range configs {
if fileExists(filepath.Join(dir, config)) {
return true
}
}
// Check for vendor binary
infectionBin := filepath.Join(dir, "vendor", "bin", "infection")
if fileExists(infectionBin) {
return true
}
return false
}
// RunInfection runs Infection mutation testing.
func RunInfection(ctx context.Context, opts InfectionOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return coreerr.E("php.RunInfection", "get working directory", err)
}
opts.Dir = cwd
}
if opts.Output == nil {
opts.Output = os.Stdout
}
// Build command
vendorBin := filepath.Join(opts.Dir, "vendor", "bin", "infection")
cmdName := "infection"
if fileExists(vendorBin) {
cmdName = vendorBin
}
var args []string
// Set defaults
minMSI := opts.MinMSI
if minMSI == 0 {
minMSI = 50
}
minCoveredMSI := opts.MinCoveredMSI
if minCoveredMSI == 0 {
minCoveredMSI = 70
}
threads := opts.Threads
if threads == 0 {
threads = 4
}
args = append(args, fmt.Sprintf("--min-msi=%d", minMSI))
args = append(args, fmt.Sprintf("--min-covered-msi=%d", minCoveredMSI))
args = append(args, fmt.Sprintf("--threads=%d", threads))
if opts.Filter != "" {
args = append(args, "--filter="+opts.Filter)
}
if opts.OnlyCovered {
args = append(args, "--only-covered")
}
cmd := exec.CommandContext(ctx, cmdName, args...)
cmd.Dir = opts.Dir
cmd.Stdout = opts.Output
cmd.Stderr = opts.Output
return cmd.Run()
}
// buildInfectionCommand builds the command for running Infection (exported for testing).
func buildInfectionCommand(opts InfectionOptions) (string, []string) {
vendorBin := filepath.Join(opts.Dir, "vendor", "bin", "infection")
cmdName := "infection"
if fileExists(vendorBin) {
cmdName = vendorBin
}
var args []string
minMSI := opts.MinMSI
if minMSI == 0 {
minMSI = 50
}
minCoveredMSI := opts.MinCoveredMSI
if minCoveredMSI == 0 {
minCoveredMSI = 70
}
threads := opts.Threads
if threads == 0 {
threads = 4
}
args = append(args, fmt.Sprintf("--min-msi=%d", minMSI))
args = append(args, fmt.Sprintf("--min-covered-msi=%d", minCoveredMSI))
args = append(args, fmt.Sprintf("--threads=%d", threads))
if opts.Filter != "" {
args = append(args, "--filter="+opts.Filter)
}
if opts.OnlyCovered {
args = append(args, "--only-covered")
}
return cmdName, args
}

145
pkg/php/mutation_test.go Normal file
View file

@ -0,0 +1,145 @@
package php
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// =============================================================================
// DetectInfection
// =============================================================================
func TestDetectInfection_Good_InfectionJSON(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "infection.json"))
assert.True(t, DetectInfection(dir))
}
func TestDetectInfection_Good_InfectionJSON5(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "infection.json5"))
assert.True(t, DetectInfection(dir))
}
func TestDetectInfection_Good_InfectionJSONDist(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "infection.json.dist"))
assert.True(t, DetectInfection(dir))
}
func TestDetectInfection_Good_VendorBinary(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "vendor", "bin", "infection"))
assert.True(t, DetectInfection(dir))
}
func TestDetectInfection_Bad_Empty(t *testing.T) {
dir := t.TempDir()
assert.False(t, DetectInfection(dir))
}
// =============================================================================
// buildInfectionCommand
// =============================================================================
func TestBuildInfectionCommand_Good_Defaults(t *testing.T) {
dir := t.TempDir()
opts := InfectionOptions{Dir: dir}
cmdName, args := buildInfectionCommand(opts)
assert.Equal(t, "infection", cmdName)
// Defaults: minMSI=50, minCoveredMSI=70, threads=4
assert.Contains(t, args, "--min-msi=50")
assert.Contains(t, args, "--min-covered-msi=70")
assert.Contains(t, args, "--threads=4")
}
func TestBuildInfectionCommand_Good_CustomThresholds(t *testing.T) {
dir := t.TempDir()
opts := InfectionOptions{
Dir: dir,
MinMSI: 80,
MinCoveredMSI: 90,
Threads: 8,
}
_, args := buildInfectionCommand(opts)
assert.Contains(t, args, "--min-msi=80")
assert.Contains(t, args, "--min-covered-msi=90")
assert.Contains(t, args, "--threads=8")
}
func TestBuildInfectionCommand_Good_VendorBinary(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin", "infection")
mkFile(t, vendorBin)
opts := InfectionOptions{Dir: dir}
cmdName, _ := buildInfectionCommand(opts)
assert.Equal(t, vendorBin, cmdName)
}
func TestBuildInfectionCommand_Good_Filter(t *testing.T) {
dir := t.TempDir()
opts := InfectionOptions{Dir: dir, Filter: "src/Models"}
_, args := buildInfectionCommand(opts)
assert.Contains(t, args, "--filter=src/Models")
}
func TestBuildInfectionCommand_Good_OnlyCovered(t *testing.T) {
dir := t.TempDir()
opts := InfectionOptions{Dir: dir, OnlyCovered: true}
_, args := buildInfectionCommand(opts)
assert.Contains(t, args, "--only-covered")
}
func TestBuildInfectionCommand_Good_AllFlags(t *testing.T) {
dir := t.TempDir()
opts := InfectionOptions{
Dir: dir,
MinMSI: 60,
MinCoveredMSI: 80,
Threads: 2,
Filter: "app/",
OnlyCovered: true,
}
_, args := buildInfectionCommand(opts)
assert.Contains(t, args, "--min-msi=60")
assert.Contains(t, args, "--min-covered-msi=80")
assert.Contains(t, args, "--threads=2")
assert.Contains(t, args, "--filter=app/")
assert.Contains(t, args, "--only-covered")
}
func TestInfectionOptions_Defaults(t *testing.T) {
opts := InfectionOptions{}
assert.Empty(t, opts.Dir)
assert.Equal(t, 0, opts.MinMSI)
assert.Equal(t, 0, opts.MinCoveredMSI)
assert.Equal(t, 0, opts.Threads)
assert.Empty(t, opts.Filter)
assert.False(t, opts.OnlyCovered)
assert.Nil(t, opts.Output)
}
func TestDetectInfection_Good_BothConfigAndBinary(t *testing.T) {
dir := t.TempDir()
// Create both config and vendor binary
require.NoError(t, os.WriteFile(filepath.Join(dir, "infection.json5"), []byte("{}"), 0644))
mkFile(t, filepath.Join(dir, "vendor", "bin", "infection"))
assert.True(t, DetectInfection(dir))
}

73
pkg/php/pipeline.go Normal file
View file

@ -0,0 +1,73 @@
package php
// QAOptions configures the full QA pipeline.
type QAOptions struct {
Dir string
Quick bool // Only run quick checks
Full bool // Run all stages including slow checks
Fix bool // Auto-fix issues where possible
JSON bool // Output results as JSON
}
// QAStage represents a stage in the QA pipeline.
type QAStage string
const (
QAStageQuick QAStage = "quick" // fast checks: audit, fmt, stan
QAStageStandard QAStage = "standard" // standard checks + tests
QAStageFull QAStage = "full" // all including slow scans
)
// QACheckResult holds the result of a single QA check.
type QACheckResult struct {
Name string
Stage QAStage
Passed bool
Duration string
Error error
Output string
}
// QAResult holds the results of the full QA pipeline.
type QAResult struct {
Stages []QAStage
Checks []QACheckResult
Passed bool
Summary string
}
// GetQAStages returns the stages to run based on options.
func GetQAStages(opts QAOptions) []QAStage {
if opts.Quick {
return []QAStage{QAStageQuick}
}
if opts.Full {
return []QAStage{QAStageQuick, QAStageStandard, QAStageFull}
}
return []QAStage{QAStageQuick, QAStageStandard}
}
// GetQAChecks returns the checks for a given stage.
func GetQAChecks(dir string, stage QAStage) []string {
switch stage {
case QAStageQuick:
return []string{"audit", "fmt", "stan"}
case QAStageStandard:
checks := []string{}
if _, found := DetectPsalm(dir); found {
checks = append(checks, "psalm")
}
checks = append(checks, "test")
return checks
case QAStageFull:
checks := []string{}
if DetectRector(dir) {
checks = append(checks, "rector")
}
if DetectInfection(dir) {
checks = append(checks, "infection")
}
return checks
}
return nil
}

69
pkg/php/pipeline_test.go Normal file
View file

@ -0,0 +1,69 @@
package php
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetQAStages_Default(t *testing.T) {
stages := GetQAStages(QAOptions{})
assert.Equal(t, []QAStage{QAStageQuick, QAStageStandard}, stages)
}
func TestGetQAStages_Quick(t *testing.T) {
stages := GetQAStages(QAOptions{Quick: true})
assert.Equal(t, []QAStage{QAStageQuick}, stages)
}
func TestGetQAStages_Full(t *testing.T) {
stages := GetQAStages(QAOptions{Full: true})
assert.Equal(t, []QAStage{QAStageQuick, QAStageStandard, QAStageFull}, stages)
}
func TestGetQAChecks_Quick(t *testing.T) {
dir := t.TempDir()
checks := GetQAChecks(dir, QAStageQuick)
assert.Equal(t, []string{"audit", "fmt", "stan"}, checks)
}
func TestGetQAChecks_Standard_NoPsalm(t *testing.T) {
dir := t.TempDir()
checks := GetQAChecks(dir, QAStageStandard)
assert.Equal(t, []string{"test"}, checks)
}
func TestGetQAChecks_Standard_WithPsalm(t *testing.T) {
dir := t.TempDir()
// Create vendor/bin/psalm
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "psalm"), []byte("#!/bin/sh"), 0755)
checks := GetQAChecks(dir, QAStageStandard)
assert.Contains(t, checks, "psalm")
assert.Contains(t, checks, "test")
}
func TestGetQAChecks_Full_NothingDetected(t *testing.T) {
dir := t.TempDir()
checks := GetQAChecks(dir, QAStageFull)
assert.Empty(t, checks)
}
func TestGetQAChecks_Full_WithRectorAndInfection(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "rector"), []byte("#!/bin/sh"), 0755)
os.WriteFile(filepath.Join(vendorBin, "infection"), []byte("#!/bin/sh"), 0755)
checks := GetQAChecks(dir, QAStageFull)
assert.Contains(t, checks, "rector")
assert.Contains(t, checks, "infection")
}
func TestGetQAChecks_InvalidStage(t *testing.T) {
checks := GetQAChecks(t.TempDir(), QAStage("invalid"))
assert.Nil(t, checks)
}

105
pkg/php/refactor.go Normal file
View file

@ -0,0 +1,105 @@
package php
import (
"context"
"io"
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// RectorOptions configures Rector code refactoring.
type RectorOptions struct {
Dir string
Fix bool // Apply changes (default is dry-run)
Diff bool // Show detailed diff
ClearCache bool // Clear cache before running
Output io.Writer
}
// DetectRector checks if Rector is available in the project.
func DetectRector(dir string) bool {
// Check for rector.php config
rectorConfig := filepath.Join(dir, "rector.php")
if fileExists(rectorConfig) {
return true
}
// Check for vendor binary
rectorBin := filepath.Join(dir, "vendor", "bin", "rector")
if fileExists(rectorBin) {
return true
}
return false
}
// RunRector runs Rector for automated code refactoring.
func RunRector(ctx context.Context, opts RectorOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return coreerr.E("php.RunRector", "get working directory", err)
}
opts.Dir = cwd
}
if opts.Output == nil {
opts.Output = os.Stdout
}
// Build command
vendorBin := filepath.Join(opts.Dir, "vendor", "bin", "rector")
cmdName := "rector"
if fileExists(vendorBin) {
cmdName = vendorBin
}
args := []string{"process"}
if !opts.Fix {
args = append(args, "--dry-run")
}
if opts.Diff {
args = append(args, "--output-format", "diff")
}
if opts.ClearCache {
args = append(args, "--clear-cache")
}
cmd := exec.CommandContext(ctx, cmdName, args...)
cmd.Dir = opts.Dir
cmd.Stdout = opts.Output
cmd.Stderr = opts.Output
return cmd.Run()
}
// buildRectorCommand builds the command for running Rector (exported for testing).
func buildRectorCommand(opts RectorOptions) (string, []string) {
vendorBin := filepath.Join(opts.Dir, "vendor", "bin", "rector")
cmdName := "rector"
if fileExists(vendorBin) {
cmdName = vendorBin
}
args := []string{"process"}
if !opts.Fix {
args = append(args, "--dry-run")
}
if opts.Diff {
args = append(args, "--output-format", "diff")
}
if opts.ClearCache {
args = append(args, "--clear-cache")
}
return cmdName, args
}

122
pkg/php/refactor_test.go Normal file
View file

@ -0,0 +1,122 @@
package php
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// =============================================================================
// DetectRector
// =============================================================================
func TestDetectRector_Good_RectorConfig(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "rector.php"))
assert.True(t, DetectRector(dir))
}
func TestDetectRector_Good_VendorBinary(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "vendor", "bin", "rector"))
assert.True(t, DetectRector(dir))
}
func TestDetectRector_Bad_Empty(t *testing.T) {
dir := t.TempDir()
assert.False(t, DetectRector(dir))
}
// =============================================================================
// buildRectorCommand
// =============================================================================
func TestBuildRectorCommand_Good_Defaults(t *testing.T) {
dir := t.TempDir()
opts := RectorOptions{Dir: dir}
cmdName, args := buildRectorCommand(opts)
assert.Equal(t, "rector", cmdName)
// Fix is false by default, so --dry-run should be present
assert.Contains(t, args, "process")
assert.Contains(t, args, "--dry-run")
}
func TestBuildRectorCommand_Good_Fix(t *testing.T) {
dir := t.TempDir()
opts := RectorOptions{Dir: dir, Fix: true}
cmdName, args := buildRectorCommand(opts)
assert.Equal(t, "rector", cmdName)
assert.Contains(t, args, "process")
assert.NotContains(t, args, "--dry-run")
}
func TestBuildRectorCommand_Good_VendorBinary(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin", "rector")
mkFile(t, vendorBin)
opts := RectorOptions{Dir: dir}
cmdName, _ := buildRectorCommand(opts)
assert.Equal(t, vendorBin, cmdName)
}
func TestBuildRectorCommand_Good_Diff(t *testing.T) {
dir := t.TempDir()
opts := RectorOptions{Dir: dir, Diff: true}
_, args := buildRectorCommand(opts)
assert.Contains(t, args, "--output-format")
assert.Contains(t, args, "diff")
}
func TestBuildRectorCommand_Good_ClearCache(t *testing.T) {
dir := t.TempDir()
opts := RectorOptions{Dir: dir, ClearCache: true}
_, args := buildRectorCommand(opts)
assert.Contains(t, args, "--clear-cache")
}
func TestBuildRectorCommand_Good_AllFlags(t *testing.T) {
dir := t.TempDir()
opts := RectorOptions{
Dir: dir,
Fix: true,
Diff: true,
ClearCache: true,
}
_, args := buildRectorCommand(opts)
assert.Contains(t, args, "process")
assert.NotContains(t, args, "--dry-run")
assert.Contains(t, args, "--output-format")
assert.Contains(t, args, "diff")
assert.Contains(t, args, "--clear-cache")
}
func TestRectorOptions_Defaults(t *testing.T) {
opts := RectorOptions{}
assert.Empty(t, opts.Dir)
assert.False(t, opts.Fix)
assert.False(t, opts.Diff)
assert.False(t, opts.ClearCache)
assert.Nil(t, opts.Output)
}
func TestDetectRector_Good_BothConfigAndBinary(t *testing.T) {
dir := t.TempDir()
// Create both config and vendor binary
require.NoError(t, os.WriteFile(filepath.Join(dir, "rector.php"), []byte("<?php\n"), 0644))
mkFile(t, filepath.Join(dir, "vendor", "bin", "rector"))
assert.True(t, DetectRector(dir))
}

214
pkg/php/runner.go Normal file
View file

@ -0,0 +1,214 @@
package php
import (
"path/filepath"
process "forge.lthn.ai/core/go-process"
)
// QARunner builds process run specs for PHP QA checks.
type QARunner struct {
dir string
fix bool
}
// NewQARunner creates a QA runner for the given directory.
func NewQARunner(dir string, fix bool) *QARunner {
return &QARunner{dir: dir, fix: fix}
}
// BuildSpecs creates RunSpecs for the given QA checks.
func (r *QARunner) BuildSpecs(checks []string) []process.RunSpec {
specs := make([]process.RunSpec, 0, len(checks))
for _, check := range checks {
spec := r.buildSpec(check)
if spec != nil {
specs = append(specs, *spec)
}
}
return specs
}
// buildSpec creates a RunSpec for a single check.
func (r *QARunner) buildSpec(check string) *process.RunSpec {
switch check {
case "audit":
return &process.RunSpec{
Name: "audit",
Command: "composer",
Args: []string{"audit", "--format=summary"},
Dir: r.dir,
}
case "fmt":
_, found := DetectFormatter(r.dir)
if !found {
return nil
}
vendorBin := filepath.Join(r.dir, "vendor", "bin", "pint")
cmd := "pint"
if fileExists(vendorBin) {
cmd = vendorBin
}
args := []string{}
if !r.fix {
args = append(args, "--test")
}
return &process.RunSpec{
Name: "fmt",
Command: cmd,
Args: args,
Dir: r.dir,
After: []string{"audit"},
}
case "stan":
_, found := DetectAnalyser(r.dir)
if !found {
return nil
}
vendorBin := filepath.Join(r.dir, "vendor", "bin", "phpstan")
cmd := "phpstan"
if fileExists(vendorBin) {
cmd = vendorBin
}
return &process.RunSpec{
Name: "stan",
Command: cmd,
Args: []string{"analyse", "--no-progress"},
Dir: r.dir,
After: []string{"fmt"},
}
case "psalm":
_, found := DetectPsalm(r.dir)
if !found {
return nil
}
vendorBin := filepath.Join(r.dir, "vendor", "bin", "psalm")
cmd := "psalm"
if fileExists(vendorBin) {
cmd = vendorBin
}
args := []string{"--no-progress"}
if r.fix {
args = append(args, "--alter", "--issues=all")
}
return &process.RunSpec{
Name: "psalm",
Command: cmd,
Args: args,
Dir: r.dir,
After: []string{"stan"},
}
case "test":
pestBin := filepath.Join(r.dir, "vendor", "bin", "pest")
phpunitBin := filepath.Join(r.dir, "vendor", "bin", "phpunit")
var cmd string
if fileExists(pestBin) {
cmd = pestBin
} else if fileExists(phpunitBin) {
cmd = phpunitBin
} else {
return nil
}
after := []string{"stan"}
if _, found := DetectPsalm(r.dir); found {
after = []string{"psalm"}
}
return &process.RunSpec{
Name: "test",
Command: cmd,
Args: []string{},
Dir: r.dir,
After: after,
}
case "rector":
if !DetectRector(r.dir) {
return nil
}
vendorBin := filepath.Join(r.dir, "vendor", "bin", "rector")
cmd := "rector"
if fileExists(vendorBin) {
cmd = vendorBin
}
args := []string{"process"}
if !r.fix {
args = append(args, "--dry-run")
}
return &process.RunSpec{
Name: "rector",
Command: cmd,
Args: args,
Dir: r.dir,
After: []string{"test"},
AllowFailure: true,
}
case "infection":
if !DetectInfection(r.dir) {
return nil
}
vendorBin := filepath.Join(r.dir, "vendor", "bin", "infection")
cmd := "infection"
if fileExists(vendorBin) {
cmd = vendorBin
}
return &process.RunSpec{
Name: "infection",
Command: cmd,
Args: []string{"--min-msi=50", "--min-covered-msi=70", "--threads=4"},
Dir: r.dir,
After: []string{"test"},
AllowFailure: true,
}
}
return nil
}
// QARunResult holds the results of running QA checks.
type QARunResult struct {
Passed bool `json:"passed"`
Duration string `json:"duration"`
Results []QACheckRunResult `json:"results"`
PassedCount int `json:"passed_count"`
FailedCount int `json:"failed_count"`
SkippedCount int `json:"skipped_count"`
}
// QACheckRunResult holds the result of a single QA check.
type QACheckRunResult struct {
Name string `json:"name"`
Passed bool `json:"passed"`
Skipped bool `json:"skipped"`
ExitCode int `json:"exit_code"`
Duration string `json:"duration"`
Output string `json:"output,omitempty"`
}
// GetIssueMessage returns a human-readable issue description for a failed check.
func (r QACheckRunResult) GetIssueMessage() string {
if r.Passed || r.Skipped {
return ""
}
switch r.Name {
case "audit":
return "found vulnerabilities"
case "fmt":
return "found style issues"
case "stan":
return "found analysis errors"
case "psalm":
return "found type errors"
case "test":
return "tests failed"
case "rector":
return "found refactoring suggestions"
case "infection":
return "mutation testing did not pass"
default:
return "found issues"
}
}

245
pkg/php/runner_test.go Normal file
View file

@ -0,0 +1,245 @@
package php
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewQARunner(t *testing.T) {
runner := NewQARunner("/tmp/test", false)
assert.NotNil(t, runner)
}
func TestBuildSpecs_Audit(t *testing.T) {
dir := t.TempDir()
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"audit"})
require.Len(t, specs, 1)
assert.Equal(t, "audit", specs[0].Name)
assert.Equal(t, "composer", specs[0].Command)
assert.Contains(t, specs[0].Args, "--format=summary")
}
func TestBuildSpecs_Fmt_WithPint(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "pint"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"fmt"})
require.Len(t, specs, 1)
assert.Equal(t, "fmt", specs[0].Name)
assert.Contains(t, specs[0].Args, "--test")
assert.Equal(t, []string{"audit"}, specs[0].After)
}
func TestBuildSpecs_Fmt_Fix(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "pint"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, true) // fix mode
specs := runner.BuildSpecs([]string{"fmt"})
require.Len(t, specs, 1)
assert.NotContains(t, specs[0].Args, "--test")
}
func TestBuildSpecs_Fmt_NoPint(t *testing.T) {
dir := t.TempDir()
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"fmt"})
assert.Empty(t, specs)
}
func TestBuildSpecs_Stan_WithPHPStan(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "phpstan"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"stan"})
require.Len(t, specs, 1)
assert.Equal(t, "stan", specs[0].Name)
assert.Contains(t, specs[0].Args, "--no-progress")
assert.Equal(t, []string{"fmt"}, specs[0].After)
}
func TestBuildSpecs_Stan_NotDetected(t *testing.T) {
dir := t.TempDir()
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"stan"})
assert.Empty(t, specs)
}
func TestBuildSpecs_Psalm_WithPsalm(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "psalm"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"psalm"})
require.Len(t, specs, 1)
assert.Equal(t, "psalm", specs[0].Name)
assert.Equal(t, []string{"stan"}, specs[0].After)
}
func TestBuildSpecs_Psalm_Fix(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "psalm"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, true)
specs := runner.BuildSpecs([]string{"psalm"})
require.Len(t, specs, 1)
assert.Contains(t, specs[0].Args, "--alter")
}
func TestBuildSpecs_Test_Pest(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "pest"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"test"})
require.Len(t, specs, 1)
assert.Equal(t, "test", specs[0].Name)
assert.Equal(t, []string{"stan"}, specs[0].After)
}
func TestBuildSpecs_Test_PHPUnit(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "phpunit"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"test"})
require.Len(t, specs, 1)
assert.Contains(t, specs[0].Command, "phpunit")
}
func TestBuildSpecs_Test_WithPsalmDep(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "pest"), []byte("#!/bin/sh"), 0755)
os.WriteFile(filepath.Join(vendorBin, "psalm"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"test"})
require.Len(t, specs, 1)
assert.Equal(t, []string{"psalm"}, specs[0].After)
}
func TestBuildSpecs_Test_NoRunner(t *testing.T) {
dir := t.TempDir()
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"test"})
assert.Empty(t, specs)
}
func TestBuildSpecs_Rector(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "rector"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"rector"})
require.Len(t, specs, 1)
assert.True(t, specs[0].AllowFailure)
assert.Contains(t, specs[0].Args, "--dry-run")
assert.Equal(t, []string{"test"}, specs[0].After)
}
func TestBuildSpecs_Rector_Fix(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "rector"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, true)
specs := runner.BuildSpecs([]string{"rector"})
require.Len(t, specs, 1)
assert.NotContains(t, specs[0].Args, "--dry-run")
}
func TestBuildSpecs_Infection(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "infection"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"infection"})
require.Len(t, specs, 1)
assert.True(t, specs[0].AllowFailure)
assert.Equal(t, []string{"test"}, specs[0].After)
}
func TestBuildSpecs_Unknown(t *testing.T) {
runner := NewQARunner(t.TempDir(), false)
specs := runner.BuildSpecs([]string{"unknown"})
assert.Empty(t, specs)
}
func TestBuildSpecs_Multiple(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin")
os.MkdirAll(vendorBin, 0755)
os.WriteFile(filepath.Join(vendorBin, "pint"), []byte("#!/bin/sh"), 0755)
os.WriteFile(filepath.Join(vendorBin, "phpstan"), []byte("#!/bin/sh"), 0755)
runner := NewQARunner(dir, false)
specs := runner.BuildSpecs([]string{"audit", "fmt", "stan"})
assert.Len(t, specs, 3)
}
func TestQACheckRunResult_GetIssueMessage(t *testing.T) {
tests := []struct {
name string
result QACheckRunResult
expected string
}{
{"passed returns empty", QACheckRunResult{Passed: true, Name: "audit"}, ""},
{"skipped returns empty", QACheckRunResult{Skipped: true, Name: "audit"}, ""},
{"audit", QACheckRunResult{Name: "audit"}, "found vulnerabilities"},
{"fmt", QACheckRunResult{Name: "fmt"}, "found style issues"},
{"stan", QACheckRunResult{Name: "stan"}, "found analysis errors"},
{"psalm", QACheckRunResult{Name: "psalm"}, "found type errors"},
{"test", QACheckRunResult{Name: "test"}, "tests failed"},
{"rector", QACheckRunResult{Name: "rector"}, "found refactoring suggestions"},
{"infection", QACheckRunResult{Name: "infection"}, "mutation testing did not pass"},
{"unknown", QACheckRunResult{Name: "whatever"}, "found issues"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, tt.result.GetIssueMessage())
})
}
}
func TestQARunResult(t *testing.T) {
result := QARunResult{
Passed: true,
Duration: "1.5s",
Results: []QACheckRunResult{
{Name: "audit", Passed: true},
{Name: "fmt", Passed: true},
},
PassedCount: 2,
}
assert.True(t, result.Passed)
assert.Equal(t, 2, result.PassedCount)
assert.Equal(t, 0, result.FailedCount)
}

362
pkg/php/security.go Normal file
View file

@ -0,0 +1,362 @@
package php
import (
"cmp"
"context"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"slices"
"strings"
"time"
coreio "forge.lthn.ai/core/go-io"
coreerr "forge.lthn.ai/core/go-log"
)
// SecurityOptions configures security scanning.
type SecurityOptions struct {
Dir string
Severity string // Minimum severity (critical, high, medium, low)
JSON bool // Output in JSON format
SARIF bool // Output in SARIF format
URL string // URL to check HTTP headers (optional)
}
// SecurityResult holds the results of security scanning.
type SecurityResult struct {
Checks []SecurityCheck `json:"checks"`
Summary SecuritySummary `json:"summary"`
}
// SecurityCheck represents a single security check result.
type SecurityCheck struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Severity string `json:"severity"`
Passed bool `json:"passed"`
Message string `json:"message,omitempty"`
Fix string `json:"fix,omitempty"`
CWE string `json:"cwe,omitempty"`
}
// SecuritySummary summarises security check results.
type SecuritySummary struct {
Total int `json:"total"`
Passed int `json:"passed"`
Critical int `json:"critical"`
High int `json:"high"`
Medium int `json:"medium"`
Low int `json:"low"`
}
// capitalise returns s with the first letter upper-cased.
func capitalise(s string) string {
if s == "" {
return s
}
return strings.ToUpper(s[:1]) + s[1:]
}
// securitySeverityRank maps severities to a sortable rank.
// Lower numbers are more severe.
func securitySeverityRank(severity string) (int, bool) {
switch strings.ToLower(strings.TrimSpace(severity)) {
case "critical":
return 0, true
case "high":
return 1, true
case "medium":
return 2, true
case "low":
return 3, true
case "info":
return 4, true
default:
return 0, false
}
}
// filterSecurityChecks returns checks at or above the requested severity.
func filterSecurityChecks(checks []SecurityCheck, minimum string) ([]SecurityCheck, error) {
if strings.TrimSpace(minimum) == "" {
return checks, nil
}
minRank, ok := securitySeverityRank(minimum)
if !ok {
return nil, coreerr.E("filterSecurityChecks", "invalid security severity "+minimum, nil)
}
filtered := make([]SecurityCheck, 0, len(checks))
for _, check := range checks {
rank, ok := securitySeverityRank(check.Severity)
if !ok {
continue
}
if rank <= minRank {
filtered = append(filtered, check)
}
}
return filtered, nil
}
// RunSecurityChecks runs security checks on the project.
func RunSecurityChecks(ctx context.Context, opts SecurityOptions) (*SecurityResult, error) {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return nil, coreerr.E("RunSecurityChecks", "get working directory", err)
}
opts.Dir = cwd
}
result := &SecurityResult{}
// Run composer audit
auditResults, _ := RunAudit(ctx, AuditOptions{Dir: opts.Dir})
for _, audit := range auditResults {
check := SecurityCheck{
ID: audit.Tool + "_audit",
Name: capitalise(audit.Tool) + " Security Audit",
Description: "Check " + audit.Tool + " dependencies for vulnerabilities",
Severity: "critical",
Passed: audit.Vulnerabilities == 0 && audit.Error == nil,
CWE: "CWE-1395",
}
if !check.Passed {
check.Message = fmt.Sprintf("Found %d vulnerabilities", audit.Vulnerabilities)
}
result.Checks = append(result.Checks, check)
}
// Check .env file for security issues
envChecks := runEnvSecurityChecks(opts.Dir)
result.Checks = append(result.Checks, envChecks...)
// Check filesystem security
fsChecks := runFilesystemSecurityChecks(opts.Dir)
result.Checks = append(result.Checks, fsChecks...)
// Check HTTP security headers when a URL is supplied.
result.Checks = append(result.Checks, runHTTPSecurityHeaderChecks(ctx, opts.URL)...)
filteredChecks, err := filterSecurityChecks(result.Checks, opts.Severity)
if err != nil {
return nil, err
}
result.Checks = filteredChecks
// Keep the check order stable for callers that consume the package result
// directly instead of going through the CLI layer.
slices.SortFunc(result.Checks, func(a, b SecurityCheck) int {
return cmp.Compare(a.ID, b.ID)
})
// Calculate summary after any severity filtering has been applied.
for _, check := range result.Checks {
result.Summary.Total++
if check.Passed {
result.Summary.Passed++
continue
}
switch check.Severity {
case "critical":
result.Summary.Critical++
case "high":
result.Summary.High++
case "medium":
result.Summary.Medium++
case "low":
result.Summary.Low++
}
}
return result, nil
}
func runHTTPSecurityHeaderChecks(ctx context.Context, rawURL string) []SecurityCheck {
if strings.TrimSpace(rawURL) == "" {
return nil
}
check := SecurityCheck{
ID: "http_security_headers",
Name: "HTTP Security Headers",
Description: "Check for common security headers on the supplied URL",
Severity: "high",
CWE: "CWE-693",
}
parsedURL, err := url.Parse(rawURL)
if err != nil || parsedURL.Scheme == "" || parsedURL.Host == "" {
check.Message = "Invalid URL"
check.Fix = "Provide a valid http:// or https:// URL"
return []SecurityCheck{check}
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rawURL, nil)
if err != nil {
check.Message = err.Error()
check.Fix = "Provide a reachable URL"
return []SecurityCheck{check}
}
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
check.Message = err.Error()
check.Fix = "Ensure the URL is reachable"
return []SecurityCheck{check}
}
defer resp.Body.Close()
_, _ = io.Copy(io.Discard, resp.Body)
requiredHeaders := []string{
"Content-Security-Policy",
"X-Frame-Options",
"X-Content-Type-Options",
"Referrer-Policy",
}
if strings.EqualFold(parsedURL.Scheme, "https") {
requiredHeaders = append(requiredHeaders, "Strict-Transport-Security")
}
var missing []string
for _, header := range requiredHeaders {
if strings.TrimSpace(resp.Header.Get(header)) == "" {
missing = append(missing, header)
}
}
if len(missing) == 0 {
check.Passed = true
check.Message = "Common security headers are present"
return []SecurityCheck{check}
}
check.Message = fmt.Sprintf("Missing headers: %s", strings.Join(missing, ", "))
check.Fix = "Add the missing security headers to the response"
return []SecurityCheck{check}
}
func runEnvSecurityChecks(dir string) []SecurityCheck {
var checks []SecurityCheck
envPath := filepath.Join(dir, ".env")
envContent, err := coreio.Local.Read(envPath)
if err != nil {
return checks
}
envLines := strings.Split(envContent, "\n")
envMap := make(map[string]string)
for _, line := range envLines {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
parts := strings.SplitN(line, "=", 2)
if len(parts) == 2 {
envMap[parts[0]] = parts[1]
}
}
// Check APP_DEBUG
if debug, ok := envMap["APP_DEBUG"]; ok {
check := SecurityCheck{
ID: "debug_mode",
Name: "Debug Mode Disabled",
Description: "APP_DEBUG should be false in production",
Severity: "critical",
Passed: strings.ToLower(debug) != "true",
CWE: "CWE-215",
}
if !check.Passed {
check.Message = "Debug mode exposes sensitive information"
check.Fix = "Set APP_DEBUG=false in .env"
}
checks = append(checks, check)
}
// Check APP_KEY
if key, ok := envMap["APP_KEY"]; ok {
check := SecurityCheck{
ID: "app_key_set",
Name: "Application Key Set",
Description: "APP_KEY must be set and valid",
Severity: "critical",
Passed: len(key) >= 32,
CWE: "CWE-321",
}
if !check.Passed {
check.Message = "Missing or weak encryption key"
check.Fix = "Run: php artisan key:generate"
}
checks = append(checks, check)
}
// Check APP_URL for HTTPS
if url, ok := envMap["APP_URL"]; ok {
check := SecurityCheck{
ID: "https_enforced",
Name: "HTTPS Enforced",
Description: "APP_URL should use HTTPS in production",
Severity: "high",
Passed: strings.HasPrefix(url, "https://"),
CWE: "CWE-319",
}
if !check.Passed {
check.Message = "Application not using HTTPS"
check.Fix = "Update APP_URL to use https://"
}
checks = append(checks, check)
}
return checks
}
func runFilesystemSecurityChecks(dir string) []SecurityCheck {
var checks []SecurityCheck
// Check .env not in public
publicEnvPaths := []string{"public/.env", "public_html/.env"}
for _, path := range publicEnvPaths {
fullPath := filepath.Join(dir, path)
if fileExists(fullPath) {
checks = append(checks, SecurityCheck{
ID: "env_not_public",
Name: ".env Not Publicly Accessible",
Description: ".env file should not be in public directory",
Severity: "critical",
Passed: false,
Message: "Environment file exposed to web at " + path,
CWE: "CWE-538",
})
}
}
// Check .git not in public
publicGitPaths := []string{"public/.git", "public_html/.git"}
for _, path := range publicGitPaths {
fullPath := filepath.Join(dir, path)
if fileExists(fullPath) {
checks = append(checks, SecurityCheck{
ID: "git_not_public",
Name: ".git Not Publicly Accessible",
Description: ".git directory should not be in public",
Severity: "critical",
Passed: false,
Message: "Git repository exposed to web (source code leak)",
CWE: "CWE-538",
})
}
}
return checks
}

304
pkg/php/security_test.go Normal file
View file

@ -0,0 +1,304 @@
package php
import (
"context"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSecurityCheck_Fields(t *testing.T) {
check := SecurityCheck{
ID: "debug_mode",
Name: "Debug Mode Disabled",
Description: "APP_DEBUG should be false in production",
Severity: "critical",
Passed: false,
Message: "Debug mode exposes sensitive information",
Fix: "Set APP_DEBUG=false in .env",
CWE: "CWE-215",
}
assert.Equal(t, "debug_mode", check.ID)
assert.Equal(t, "Debug Mode Disabled", check.Name)
assert.Equal(t, "critical", check.Severity)
assert.False(t, check.Passed)
assert.Equal(t, "CWE-215", check.CWE)
assert.Equal(t, "Set APP_DEBUG=false in .env", check.Fix)
}
func TestSecuritySummary_Fields(t *testing.T) {
summary := SecuritySummary{
Total: 10,
Passed: 6,
Critical: 2,
High: 1,
Medium: 1,
Low: 0,
}
assert.Equal(t, 10, summary.Total)
assert.Equal(t, 6, summary.Passed)
assert.Equal(t, 2, summary.Critical)
assert.Equal(t, 1, summary.High)
assert.Equal(t, 1, summary.Medium)
assert.Equal(t, 0, summary.Low)
}
func TestRunEnvSecurityChecks_DebugTrue(t *testing.T) {
dir := t.TempDir()
envContent := "APP_DEBUG=true\n"
err := os.WriteFile(filepath.Join(dir, ".env"), []byte(envContent), 0644)
require.NoError(t, err)
checks := runEnvSecurityChecks(dir)
require.Len(t, checks, 1)
assert.Equal(t, "debug_mode", checks[0].ID)
assert.False(t, checks[0].Passed)
assert.Equal(t, "critical", checks[0].Severity)
assert.Equal(t, "Debug mode exposes sensitive information", checks[0].Message)
assert.Equal(t, "Set APP_DEBUG=false in .env", checks[0].Fix)
}
func TestRunEnvSecurityChecks_AllPass(t *testing.T) {
dir := t.TempDir()
envContent := "APP_DEBUG=false\nAPP_KEY=base64:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa=\nAPP_URL=https://example.com\n"
err := os.WriteFile(filepath.Join(dir, ".env"), []byte(envContent), 0644)
require.NoError(t, err)
checks := runEnvSecurityChecks(dir)
require.Len(t, checks, 3)
// Build a map by ID for deterministic assertions
byID := make(map[string]SecurityCheck)
for _, c := range checks {
byID[c.ID] = c
}
assert.True(t, byID["debug_mode"].Passed)
assert.True(t, byID["app_key_set"].Passed)
assert.True(t, byID["https_enforced"].Passed)
}
func TestRunEnvSecurityChecks_WeakKey(t *testing.T) {
dir := t.TempDir()
envContent := "APP_KEY=short\n"
err := os.WriteFile(filepath.Join(dir, ".env"), []byte(envContent), 0644)
require.NoError(t, err)
checks := runEnvSecurityChecks(dir)
require.Len(t, checks, 1)
assert.Equal(t, "app_key_set", checks[0].ID)
assert.False(t, checks[0].Passed)
assert.Equal(t, "Missing or weak encryption key", checks[0].Message)
}
func TestRunEnvSecurityChecks_HttpUrl(t *testing.T) {
dir := t.TempDir()
envContent := "APP_URL=http://example.com\n"
err := os.WriteFile(filepath.Join(dir, ".env"), []byte(envContent), 0644)
require.NoError(t, err)
checks := runEnvSecurityChecks(dir)
require.Len(t, checks, 1)
assert.Equal(t, "https_enforced", checks[0].ID)
assert.False(t, checks[0].Passed)
assert.Equal(t, "high", checks[0].Severity)
assert.Equal(t, "Application not using HTTPS", checks[0].Message)
}
func TestRunEnvSecurityChecks_NoEnvFile(t *testing.T) {
dir := t.TempDir()
checks := runEnvSecurityChecks(dir)
assert.Empty(t, checks)
}
func TestRunFilesystemSecurityChecks_EnvInPublic(t *testing.T) {
dir := t.TempDir()
// Create public/.env
publicDir := filepath.Join(dir, "public")
err := os.Mkdir(publicDir, 0755)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(publicDir, ".env"), []byte("SECRET=leaked"), 0644)
require.NoError(t, err)
checks := runFilesystemSecurityChecks(dir)
require.Len(t, checks, 1)
assert.Equal(t, "env_not_public", checks[0].ID)
assert.False(t, checks[0].Passed)
assert.Equal(t, "critical", checks[0].Severity)
assert.Contains(t, checks[0].Message, "public/.env")
}
func TestRunFilesystemSecurityChecks_GitInPublic(t *testing.T) {
dir := t.TempDir()
// Create public/.git directory
gitDir := filepath.Join(dir, "public", ".git")
err := os.MkdirAll(gitDir, 0755)
require.NoError(t, err)
checks := runFilesystemSecurityChecks(dir)
require.Len(t, checks, 1)
assert.Equal(t, "git_not_public", checks[0].ID)
assert.False(t, checks[0].Passed)
assert.Contains(t, checks[0].Message, "source code leak")
}
func TestRunFilesystemSecurityChecks_EmptyDir(t *testing.T) {
dir := t.TempDir()
checks := runFilesystemSecurityChecks(dir)
assert.Empty(t, checks)
}
func TestRunSecurityChecks_Summary(t *testing.T) {
dir := t.TempDir()
// Create .env with debug=true (critical fail) and http URL (high fail)
envContent := "APP_DEBUG=true\nAPP_KEY=base64:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa=\nAPP_URL=http://insecure.com\n"
err := os.WriteFile(filepath.Join(dir, ".env"), []byte(envContent), 0644)
require.NoError(t, err)
result, err := RunSecurityChecks(context.Background(), SecurityOptions{Dir: dir})
require.NoError(t, err)
// Find the env-related checks by ID
byID := make(map[string]SecurityCheck)
for _, c := range result.Checks {
byID[c.ID] = c
}
// debug_mode should fail (critical)
assert.False(t, byID["debug_mode"].Passed)
// app_key_set should pass
assert.True(t, byID["app_key_set"].Passed)
// https_enforced should fail (high)
assert.False(t, byID["https_enforced"].Passed)
// Summary should have totals
assert.Greater(t, result.Summary.Total, 0)
assert.Greater(t, result.Summary.Critical, 0) // at least debug_mode fails
assert.Greater(t, result.Summary.High, 0) // at least https_enforced fails
}
func TestRunSecurityChecks_DefaultsDir(t *testing.T) {
// Test that empty Dir defaults to cwd (should not error)
result, err := RunSecurityChecks(context.Background(), SecurityOptions{})
require.NoError(t, err)
assert.NotNil(t, result)
}
func TestRunSecurityChecks_SeverityFilterCritical(t *testing.T) {
dir := t.TempDir()
setupSecurityFixture(t, dir, "APP_DEBUG=true\nAPP_KEY=short\nAPP_URL=http://example.com\n")
result, err := RunSecurityChecks(context.Background(), SecurityOptions{
Dir: dir,
Severity: "critical",
})
require.NoError(t, err)
require.Len(t, result.Checks, 3)
assert.Equal(t, 3, result.Summary.Total)
assert.Equal(t, 1, result.Summary.Passed)
assert.Equal(t, 2, result.Summary.Critical)
assert.Zero(t, result.Summary.High)
for _, check := range result.Checks {
assert.Equal(t, "critical", check.Severity)
}
byID := make(map[string]SecurityCheck)
for _, check := range result.Checks {
byID[check.ID] = check
}
assert.NotContains(t, byID, "https_enforced")
assert.Contains(t, byID, "app_key_set")
assert.Contains(t, byID, "composer_audit")
assert.Contains(t, byID, "debug_mode")
}
func TestRunSecurityChecks_URLAddsHeaderCheck(t *testing.T) {
dir := t.TempDir()
setupSecurityFixture(t, dir, "APP_DEBUG=false\nAPP_KEY=base64:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa=\nAPP_URL=https://example.com\n")
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok"))
}))
defer server.Close()
result, err := RunSecurityChecks(context.Background(), SecurityOptions{
Dir: dir,
URL: server.URL,
})
require.NoError(t, err)
byID := make(map[string]SecurityCheck)
for _, check := range result.Checks {
byID[check.ID] = check
}
headerCheck, ok := byID["http_security_headers"]
require.True(t, ok)
assert.False(t, headerCheck.Passed)
assert.Equal(t, "high", headerCheck.Severity)
assert.True(t, strings.Contains(headerCheck.Message, "Missing headers"))
assert.NotEmpty(t, headerCheck.Fix)
assert.Equal(t, 5, result.Summary.Total)
assert.Equal(t, 4, result.Summary.Passed)
assert.Equal(t, 1, result.Summary.High)
}
func TestRunSecurityChecks_InvalidSeverity(t *testing.T) {
dir := t.TempDir()
_, err := RunSecurityChecks(context.Background(), SecurityOptions{
Dir: dir,
Severity: "banana",
})
require.Error(t, err)
assert.Contains(t, err.Error(), "invalid security severity")
}
func TestCapitalise(t *testing.T) {
assert.Equal(t, "Composer", capitalise("composer"))
assert.Equal(t, "Npm", capitalise("npm"))
assert.Equal(t, "", capitalise(""))
assert.Equal(t, "A", capitalise("a"))
}
func setupSecurityFixture(t *testing.T, dir string, envContent string) {
t.Helper()
require.NoError(t, os.WriteFile(filepath.Join(dir, ".env"), []byte(envContent), 0o644))
composerBin := filepath.Join(dir, "composer")
require.NoError(t, os.WriteFile(composerBin, []byte("#!/bin/sh\ncat <<'JSON'\n{\"advisories\":{}}\nJSON\n"), 0o755))
oldPath := os.Getenv("PATH")
require.NoError(t, os.Setenv("PATH", dir+string(os.PathListSeparator)+oldPath))
t.Cleanup(func() {
require.NoError(t, os.Setenv("PATH", oldPath))
})
}

247
pkg/php/test.go Normal file
View file

@ -0,0 +1,247 @@
package php
import (
"bytes"
"context"
"io"
"os"
"os/exec"
"path/filepath"
coreerr "forge.lthn.ai/core/go-log"
)
// TestOptions configures PHP test execution.
type TestOptions struct {
// Dir is the project directory (defaults to current working directory).
Dir string
// Filter filters tests by name pattern.
Filter string
// Parallel runs tests in parallel.
Parallel bool
// Coverage generates code coverage.
Coverage bool
// CoverageFormat is the coverage output format (text, html, clover).
CoverageFormat string
// Groups runs only tests in the specified groups.
Groups []string
// JUnit outputs results in JUnit XML format via --log-junit.
JUnit bool
// JUnitPath overrides the JUnit report path. Defaults to test-results.xml.
JUnitPath string
// Output is the writer for test output (defaults to os.Stdout).
Output io.Writer
}
// TestRunner represents the detected test runner.
type TestRunner string
// Test runner type constants.
const (
// TestRunnerPest indicates Pest testing framework.
TestRunnerPest TestRunner = "pest"
// TestRunnerPHPUnit indicates PHPUnit testing framework.
TestRunnerPHPUnit TestRunner = "phpunit"
)
// DetectTestRunner detects which test runner is available in the project.
// Returns Pest if tests/Pest.php exists, otherwise PHPUnit.
func DetectTestRunner(dir string) TestRunner {
pestFile := filepath.Join(dir, "tests", "Pest.php")
if fileExists(pestFile) {
return TestRunnerPest
}
return TestRunnerPHPUnit
}
// RunTests runs PHPUnit or Pest tests.
func RunTests(ctx context.Context, opts TestOptions) error {
if opts.Dir == "" {
cwd, err := os.Getwd()
if err != nil {
return coreerr.E("php.RunTests", "get working directory", err)
}
opts.Dir = cwd
}
if opts.Output == nil {
opts.Output = os.Stdout
}
if opts.JUnit && opts.JUnitPath == "" {
reportFile, err := os.CreateTemp("", "core-qa-junit-*.xml")
if err != nil {
return coreerr.E("php.RunTests", "create JUnit report file", err)
}
if closeErr := reportFile.Close(); closeErr != nil {
return coreerr.E("php.RunTests", "close JUnit report file", closeErr)
}
opts.JUnitPath = reportFile.Name()
defer os.Remove(opts.JUnitPath)
}
// Detect test runner
runner := DetectTestRunner(opts.Dir)
// Build command based on runner
var cmdName string
var args []string
switch runner {
case TestRunnerPest:
cmdName, args = buildPestCommand(opts)
default:
cmdName, args = buildPHPUnitCommand(opts)
}
cmd := exec.CommandContext(ctx, cmdName, args...)
cmd.Dir = opts.Dir
cmd.Stdin = os.Stdin
// Set XDEBUG_MODE=coverage to avoid PHPUnit 11 warning
cmd.Env = append(os.Environ(), "XDEBUG_MODE=coverage")
if !opts.JUnit {
cmd.Stdout = opts.Output
cmd.Stderr = opts.Output
return cmd.Run()
}
var machineOutput bytes.Buffer
cmd.Stdout = &machineOutput
cmd.Stderr = &machineOutput
runErr := cmd.Run()
reportErr := emitJUnitReport(opts.Output, opts.JUnitPath)
if runErr != nil {
return runErr
}
return reportErr
}
// RunParallel runs tests in parallel using the appropriate runner.
func RunParallel(ctx context.Context, opts TestOptions) error {
opts.Parallel = true
return RunTests(ctx, opts)
}
// buildPestCommand builds the command for running Pest tests.
func buildPestCommand(opts TestOptions) (string, []string) {
// Check for vendor binary first
vendorBin := filepath.Join(opts.Dir, "vendor", "bin", "pest")
cmdName := "pest"
if fileExists(vendorBin) {
cmdName = vendorBin
}
var args []string
if opts.Filter != "" {
args = append(args, "--filter", opts.Filter)
}
if opts.Parallel {
args = append(args, "--parallel")
}
if opts.Coverage {
switch opts.CoverageFormat {
case "html":
args = append(args, "--coverage-html", "coverage")
case "clover":
args = append(args, "--coverage-clover", "coverage.xml")
default:
args = append(args, "--coverage")
}
}
for _, group := range opts.Groups {
args = append(args, "--group", group)
}
if opts.JUnit {
args = append(args, "--log-junit", junitReportPath(opts))
}
return cmdName, args
}
// buildPHPUnitCommand builds the command for running PHPUnit tests.
func buildPHPUnitCommand(opts TestOptions) (string, []string) {
// Check for vendor binary first
vendorBin := filepath.Join(opts.Dir, "vendor", "bin", "phpunit")
cmdName := "phpunit"
if fileExists(vendorBin) {
cmdName = vendorBin
}
var args []string
if opts.Filter != "" {
args = append(args, "--filter", opts.Filter)
}
if opts.Parallel {
// PHPUnit uses paratest for parallel execution
paratestBin := filepath.Join(opts.Dir, "vendor", "bin", "paratest")
if fileExists(paratestBin) {
cmdName = paratestBin
}
}
if opts.Coverage {
switch opts.CoverageFormat {
case "html":
args = append(args, "--coverage-html", "coverage")
case "clover":
args = append(args, "--coverage-clover", "coverage.xml")
default:
args = append(args, "--coverage-text")
}
}
for _, group := range opts.Groups {
args = append(args, "--group", group)
}
if opts.JUnit {
args = append(args, "--log-junit", junitReportPath(opts))
}
return cmdName, args
}
func junitReportPath(opts TestOptions) string {
if opts.JUnitPath != "" {
return opts.JUnitPath
}
return "test-results.xml"
}
func emitJUnitReport(output io.Writer, reportPath string) error {
report, err := os.ReadFile(reportPath)
if err != nil {
return coreerr.E("php.emitJUnitReport", "read JUnit report", err)
}
if _, err := output.Write(report); err != nil {
return coreerr.E("php.emitJUnitReport", "write JUnit report", err)
}
if len(report) == 0 || report[len(report)-1] != '\n' {
if _, err := io.WriteString(output, "\n"); err != nil {
return coreerr.E("php.emitJUnitReport", "terminate JUnit report", err)
}
}
return nil
}

317
pkg/php/test_test.go Normal file
View file

@ -0,0 +1,317 @@
package php
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// =============================================================================
// DetectTestRunner
// =============================================================================
func TestDetectTestRunner_Good_Pest(t *testing.T) {
dir := t.TempDir()
// Create tests/Pest.php
mkFile(t, filepath.Join(dir, "tests", "Pest.php"))
runner := DetectTestRunner(dir)
assert.Equal(t, TestRunnerPest, runner)
}
func TestDetectTestRunner_Good_PHPUnit(t *testing.T) {
dir := t.TempDir()
// No tests/Pest.php → defaults to PHPUnit
runner := DetectTestRunner(dir)
assert.Equal(t, TestRunnerPHPUnit, runner)
}
func TestDetectTestRunner_Good_PHPUnitWithTestsDir(t *testing.T) {
dir := t.TempDir()
// tests/ dir exists but no Pest.php
require.NoError(t, os.MkdirAll(filepath.Join(dir, "tests"), 0o755))
runner := DetectTestRunner(dir)
assert.Equal(t, TestRunnerPHPUnit, runner)
}
// =============================================================================
// buildPestCommand
// =============================================================================
func TestBuildPestCommand_Good_Defaults(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir}
cmdName, args := buildPestCommand(opts)
assert.Equal(t, "pest", cmdName)
assert.Empty(t, args)
}
func TestBuildPestCommand_Good_VendorBinary(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin", "pest")
mkFile(t, vendorBin)
opts := TestOptions{Dir: dir}
cmdName, _ := buildPestCommand(opts)
assert.Equal(t, vendorBin, cmdName)
}
func TestBuildPestCommand_Good_Filter(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Filter: "TestLogin"}
_, args := buildPestCommand(opts)
assert.Contains(t, args, "--filter")
assert.Contains(t, args, "TestLogin")
}
func TestBuildPestCommand_Good_Parallel(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Parallel: true}
_, args := buildPestCommand(opts)
assert.Contains(t, args, "--parallel")
}
func TestBuildPestCommand_Good_CoverageDefault(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Coverage: true}
_, args := buildPestCommand(opts)
assert.Contains(t, args, "--coverage")
}
func TestBuildPestCommand_Good_CoverageHTML(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Coverage: true, CoverageFormat: "html"}
_, args := buildPestCommand(opts)
assert.Contains(t, args, "--coverage-html")
assert.Contains(t, args, "coverage")
}
func TestBuildPestCommand_Good_CoverageClover(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Coverage: true, CoverageFormat: "clover"}
_, args := buildPestCommand(opts)
assert.Contains(t, args, "--coverage-clover")
assert.Contains(t, args, "coverage.xml")
}
func TestBuildPestCommand_Good_Groups(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Groups: []string{"unit", "integration"}}
_, args := buildPestCommand(opts)
// Should have --group unit --group integration
groupCount := 0
for _, a := range args {
if a == "--group" {
groupCount++
}
}
assert.Equal(t, 2, groupCount)
assert.Contains(t, args, "unit")
assert.Contains(t, args, "integration")
}
func TestBuildPestCommand_Good_JUnit(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, JUnit: true}
_, args := buildPestCommand(opts)
assert.Contains(t, args, "--log-junit")
assert.Contains(t, args, "test-results.xml")
}
func TestBuildPestCommand_Good_AllFlags(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{
Dir: dir,
Filter: "TestFoo",
Parallel: true,
Coverage: true,
CoverageFormat: "clover",
Groups: []string{"smoke"},
JUnit: true,
}
_, args := buildPestCommand(opts)
assert.Contains(t, args, "--filter")
assert.Contains(t, args, "TestFoo")
assert.Contains(t, args, "--parallel")
assert.Contains(t, args, "--coverage-clover")
assert.Contains(t, args, "--group")
assert.Contains(t, args, "smoke")
assert.Contains(t, args, "--log-junit")
}
// =============================================================================
// buildPHPUnitCommand
// =============================================================================
func TestBuildPHPUnitCommand_Good_Defaults(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir}
cmdName, args := buildPHPUnitCommand(opts)
assert.Equal(t, "phpunit", cmdName)
assert.Empty(t, args)
}
func TestBuildPHPUnitCommand_Good_VendorBinary(t *testing.T) {
dir := t.TempDir()
vendorBin := filepath.Join(dir, "vendor", "bin", "phpunit")
mkFile(t, vendorBin)
opts := TestOptions{Dir: dir}
cmdName, _ := buildPHPUnitCommand(opts)
assert.Equal(t, vendorBin, cmdName)
}
func TestBuildPHPUnitCommand_Good_Filter(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Filter: "TestCheckout"}
_, args := buildPHPUnitCommand(opts)
assert.Contains(t, args, "--filter")
assert.Contains(t, args, "TestCheckout")
}
func TestBuildPHPUnitCommand_Good_Parallel_WithParatest(t *testing.T) {
dir := t.TempDir()
paratestBin := filepath.Join(dir, "vendor", "bin", "paratest")
mkFile(t, paratestBin)
opts := TestOptions{Dir: dir, Parallel: true}
cmdName, _ := buildPHPUnitCommand(opts)
assert.Equal(t, paratestBin, cmdName)
}
func TestBuildPHPUnitCommand_Good_Parallel_NoParatest(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Parallel: true}
cmdName, _ := buildPHPUnitCommand(opts)
// Falls back to phpunit when paratest is not available
assert.Equal(t, "phpunit", cmdName)
}
func TestBuildPHPUnitCommand_Good_Parallel_VendorPHPUnit_WithParatest(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "vendor", "bin", "phpunit"))
paratestBin := filepath.Join(dir, "vendor", "bin", "paratest")
mkFile(t, paratestBin)
opts := TestOptions{Dir: dir, Parallel: true}
cmdName, _ := buildPHPUnitCommand(opts)
// paratest takes precedence over phpunit when parallel is requested
assert.Equal(t, paratestBin, cmdName)
}
func TestBuildPHPUnitCommand_Good_CoverageDefault(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Coverage: true}
_, args := buildPHPUnitCommand(opts)
assert.Contains(t, args, "--coverage-text")
}
func TestBuildPHPUnitCommand_Good_CoverageHTML(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Coverage: true, CoverageFormat: "html"}
_, args := buildPHPUnitCommand(opts)
assert.Contains(t, args, "--coverage-html")
assert.Contains(t, args, "coverage")
}
func TestBuildPHPUnitCommand_Good_CoverageClover(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Coverage: true, CoverageFormat: "clover"}
_, args := buildPHPUnitCommand(opts)
assert.Contains(t, args, "--coverage-clover")
assert.Contains(t, args, "coverage.xml")
}
func TestBuildPHPUnitCommand_Good_Groups(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, Groups: []string{"api", "slow"}}
_, args := buildPHPUnitCommand(opts)
groupCount := 0
for _, a := range args {
if a == "--group" {
groupCount++
}
}
assert.Equal(t, 2, groupCount)
assert.Contains(t, args, "api")
assert.Contains(t, args, "slow")
}
func TestBuildPHPUnitCommand_Good_JUnit(t *testing.T) {
dir := t.TempDir()
opts := TestOptions{Dir: dir, JUnit: true}
_, args := buildPHPUnitCommand(opts)
assert.Contains(t, args, "--log-junit")
assert.Contains(t, args, "test-results.xml")
assert.NotContains(t, args, "--testdox")
}
func TestBuildPHPUnitCommand_Good_AllFlags(t *testing.T) {
dir := t.TempDir()
mkFile(t, filepath.Join(dir, "vendor", "bin", "paratest"))
opts := TestOptions{
Dir: dir,
Filter: "TestBar",
Parallel: true,
Coverage: true,
CoverageFormat: "html",
Groups: []string{"feature"},
JUnit: true,
}
cmdName, args := buildPHPUnitCommand(opts)
assert.Equal(t, filepath.Join(dir, "vendor", "bin", "paratest"), cmdName)
assert.Contains(t, args, "--filter")
assert.Contains(t, args, "TestBar")
assert.Contains(t, args, "--coverage-html")
assert.Contains(t, args, "--group")
assert.Contains(t, args, "feature")
assert.Contains(t, args, "--log-junit")
assert.NotContains(t, args, "--testdox")
}

53
tests/cli/_lib/run.sh Executable file
View file

@ -0,0 +1,53 @@
#!/usr/bin/env bash
run_capture_stdout() {
local expected_status="$1"
local output_file="$2"
shift 2
set +e
"$@" >"$output_file"
local status=$?
set -e
if [[ "$status" -ne "$expected_status" ]]; then
printf 'expected exit %s, got %s\n' "$expected_status" "$status" >&2
if [[ -s "$output_file" ]]; then
printf 'stdout:\n' >&2
cat "$output_file" >&2
fi
return 1
fi
}
run_capture_all() {
local expected_status="$1"
local output_file="$2"
shift 2
set +e
"$@" >"$output_file" 2>&1
local status=$?
set -e
if [[ "$status" -ne "$expected_status" ]]; then
printf 'expected exit %s, got %s\n' "$expected_status" "$status" >&2
if [[ -s "$output_file" ]]; then
printf 'output:\n' >&2
cat "$output_file" >&2
fi
return 1
fi
}
assert_jq() {
local expression="$1"
local input_file="$2"
jq -e "$expression" "$input_file" >/dev/null
}
assert_contains() {
local needle="$1"
local input_file="$2"
grep -Fq "$needle" "$input_file"
}

View file

@ -0,0 +1,13 @@
version: "3"
tasks:
test:
cmds:
- task -d check test
- task -d catalog/list test
- task -d catalog/show test
- task -d detect test
- task -d tools test
- task -d init test
- task -d run test

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../../cmd/core-lint
lang="$(cat fixtures/lang.txt)"
output="$(mktemp)"
run_capture_all 0 "$output" ./bin/core-lint lint catalog list --lang "$lang"
grep -Fq "go-sec-001" "$output"
grep -Fq "rule(s)" "$output"
EOF

View file

@ -0,0 +1 @@
go

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../../cmd/core-lint
rule_id="$(cat fixtures/rule-id.txt)"
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint lint catalog show "$rule_id"
jq -e '.id == "go-sec-001" and .severity == "high" and (.languages | index("go") != null)' "$output" >/dev/null
jq -e '.title == "SQL wildcard injection in LIKE clauses"' "$output" >/dev/null
EOF

View file

@ -0,0 +1 @@
go-sec-001

View file

@ -0,0 +1,17 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint lint check --format=json fixtures
jq -e 'length == 1 and .[0].rule_id == "go-cor-003" and .[0].file == "input.go"' "$output" >/dev/null
jq -e '.[0].severity == "medium" and .[0].fix != ""' "$output" >/dev/null
EOF

View file

@ -0,0 +1,12 @@
//go:build ignore
package sample
type service struct{}
func (service) Process(string) error { return nil }
func Run() {
svc := service{}
_ = svc.Process("data")
}

View file

@ -0,0 +1,17 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint detect --output json ../check/fixtures
jq -e '. == ["go"]' "$output" >/dev/null
EOF

View file

@ -0,0 +1,19 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
project_dir="$(mktemp -d)"
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint init "$project_dir"
test -f "$project_dir/.core/lint.yaml"
grep -Fq "golangci-lint" "$project_dir/.core/lint.yaml"
EOF

View file

@ -0,0 +1,19 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
output="$(mktemp)"
run_capture_stdout 1 "$output" ./bin/core-lint run --output json --fail-on warning ../check/fixtures
jq -e '.findings | length == 1' "$output" >/dev/null
jq -e '.findings[0].code == "go-cor-003"' "$output" >/dev/null
jq -e '.summary.warnings == 1 and .summary.passed == false' "$output" >/dev/null
EOF

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core-lint ../../../../cmd/core-lint
output="$(mktemp)"
run_capture_stdout 0 "$output" ./bin/core-lint tools --output json --lang go
jq -e '.[] | select(.name == "golangci-lint")' "$output" >/dev/null
jq -e '.[] | select(.name == "govulncheck")' "$output" >/dev/null
EOF

View file

@ -0,0 +1,11 @@
package main
import (
"forge.lthn.ai/core/cli/pkg/cli"
_ "forge.lthn.ai/core/lint/cmd/qa"
)
func main() {
cli.WithAppName("core")
cli.Main()
}

View file

@ -0,0 +1,20 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
cd fixtures/project
output="$(mktemp)"
export PATH="$(pwd)/../bin:$PATH"
run_capture_stdout 1 "$output" ../../bin/core qa audit --json
jq -e '.results[0].tool == "composer" and .results[0].vulnerabilities == 1' "$output" >/dev/null
jq -e '.has_vulnerabilities == true and .vulnerabilities == 1' "$output" >/dev/null
jq -e '.results[0].advisories[0].package == "vendor/package-a"' "$output" >/dev/null
EOF

View file

@ -0,0 +1,17 @@
#!/usr/bin/env sh
cat <<'JSON'
{
"advisories": {
"vendor/package-a": [
{
"title": "Remote Code Execution",
"link": "https://example.com/advisory/1",
"cve": "CVE-2026-0001",
"affectedVersions": ">=1.0,<1.5"
}
]
}
}
JSON
exit 1

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,5 @@
<?php
function bad_example() {
return "bad";
}

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
output="$(mktemp)"
run_capture_stdout 1 "$output" ./bin/core qa docblock --json --threshold 100 fixtures/src
jq -e '(.passed == false) and (.coverage < .threshold)' "$output" >/dev/null
jq -e '(.missing | length == 1) and (.missing[0].name == "Beta")' "$output" >/dev/null
jq -e '(.warnings | length == 1) and (.warnings[0].path == "fixtures/src")' "$output" >/dev/null
EOF

View file

@ -0,0 +1,6 @@
//go:build ignore
package sample
// Alpha demonstrates a documented exported function.
func Alpha() {}

View file

@ -0,0 +1,5 @@
//go:build ignore
package sample
func Beta() {}

View file

@ -0,0 +1,5 @@
//go:build ignore
package sample
func Broken(

View file

@ -0,0 +1,18 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
cd fixtures/project
output="$(mktemp)"
export PATH="../bin:$PATH"
run_capture_stdout 0 "$output" ../../bin/core qa fmt --json
jq -e '.tool == "pint" and .changed == true and .files[0].path == "src/Bad.php"' "$output" >/dev/null
EOF

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,5 @@
<?php
function bad_example() {
return "bad";
}

View file

@ -0,0 +1,3 @@
#!/usr/bin/env sh
printf '%s\n' '{"tool":"pint","changed":true,"files":[{"path":"src/Bad.php","fixed":1}]}'

View file

@ -0,0 +1,20 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
output="$(mktemp)"
export PATH="$(pwd)/fixtures/bin:$PATH"
run_capture_stdout 0 "$output" ./bin/core qa health --registry fixtures/repos.yaml --json
jq -e '.summary.total_repos == 2 and .summary.filtered_repos == 2' "$output" >/dev/null
jq -e '.summary.passing == 1 and .summary.errors == 1' "$output" >/dev/null
jq -e '.repos[0].status == "error" and .repos[0].name == "beta"' "$output" >/dev/null
jq -e '.repos[1].status == "passing" and .repos[1].name == "alpha"' "$output" >/dev/null
EOF

View file

@ -0,0 +1,26 @@
#!/usr/bin/env sh
case "$*" in
*"--repo forge/alpha"*)
cat <<'JSON'
[
{
"status": "completed",
"conclusion": "success",
"name": "CI",
"headSha": "abc123",
"updatedAt": "2026-03-30T00:00:00Z",
"url": "https://example.com/alpha/run/1"
}
]
JSON
;;
*"--repo forge/beta"*)
printf '%s\n' 'simulated workflow lookup failure' >&2
exit 1
;;
*)
printf '%s\n' "unexpected gh invocation: $*" >&2
exit 1
;;
esac

View file

@ -0,0 +1,8 @@
version: 1
org: forge
base_path: .
repos:
alpha:
type: module
beta:
type: module

View file

@ -0,0 +1,22 @@
version: "3"
tasks:
test:
cmds:
- |
bash <<'EOF'
set -euo pipefail
source ../../_lib/run.sh
go build -trimpath -ldflags="-s -w" -o bin/core ../_harness
cd fixtures/project
output="$(mktemp)"
run_capture_all 1 "$output" ../../bin/core qa infection --min-msi 80 --min-covered-msi 90 --threads 8 --filter src --only-covered
grep -Fq "Mutation Testing" "$output"
grep -Fq -- "--min-msi=80" "$output"
grep -Fq -- "--min-covered-msi=90" "$output"
grep -Fq -- "--threads=8" "$output"
grep -Fq -- "--filter=src" "$output"
grep -Fq -- "--only-covered" "$output"
EOF

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,5 @@
<?php
function bad_example() {
return "bad";
}

Some files were not shown because too many files have changed in this diff Show more