feat(devkit): Phase 4 — vulnerability scanning, complexity analysis, coverage trending
- govulncheck JSON output parsing with structured VulnFinding types (13 tests) - Cyclomatic complexity analysis via go/ast with configurable threshold (21 tests) - Coverage snapshot persistence and regression detection with CoverageStore (19 tests) 53 new tests, 68 total devkit tests. All pass with -race. Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
7aaa2154b6
commit
e20083d51b
7 changed files with 1622 additions and 3 deletions
6
TODO.md
6
TODO.md
|
|
@ -58,9 +58,9 @@ Dispatched from core/go orchestration. Pick up tasks in order.
|
|||
|
||||
## Phase 4: DevKit Expansion
|
||||
|
||||
- [ ] **Vulnerability scanning** — Integrate `govulncheck` output parsing into devkit findings.
|
||||
- [ ] **Complexity thresholds** — Configurable cyclomatic complexity threshold. Flag functions exceeding it.
|
||||
- [ ] **Coverage trending** — Store coverage snapshots, detect regressions between runs.
|
||||
- [x] **Vulnerability scanning** — `VulnCheck()` runs `govulncheck -json` and `ParseVulnCheckJSON()` parses newline-delimited JSON into `VulnFinding` structs (ID, aliases, package, called function, description, fixed version, module path). Handles malformed lines, missing OSV entries, empty traces. 13 tests in `vulncheck_test.go`.
|
||||
- [x] **Complexity thresholds** — `AnalyseComplexity()` walks Go source via `go/ast` with configurable threshold (default 15). Counts: if, for, range, case, comm, &&, ||, type switch, select. Skips vendor/, test files, hidden dirs. `AnalyseComplexitySource()` for in-memory parsing. 21 tests in `complexity_test.go`.
|
||||
- [x] **Coverage trending** — `ParseCoverProfile()` parses coverprofile format, `ParseCoverOutput()` parses human-readable `go test -cover` output. `CoverageStore` with JSON persistence (Append/Load/Latest). `CompareCoverage()` diffs snapshots, flags regressions/improvements/new/removed packages. 19 tests in `coverage_test.go`.
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
200
devkit/complexity.go
Normal file
200
devkit/complexity.go
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
// Package devkit provides a developer toolkit for common automation commands.
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
package devkit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ComplexityConfig controls cyclomatic complexity analysis.
|
||||
type ComplexityConfig struct {
|
||||
Threshold int // Minimum complexity to report (default 15)
|
||||
Path string // Directory or file path to analyse
|
||||
}
|
||||
|
||||
// ComplexityResult represents a single function with its cyclomatic complexity.
|
||||
type ComplexityResult struct {
|
||||
FuncName string
|
||||
Package string
|
||||
File string
|
||||
Line int
|
||||
Complexity int
|
||||
}
|
||||
|
||||
// DefaultComplexityConfig returns a config with sensible defaults.
|
||||
func DefaultComplexityConfig() ComplexityConfig {
|
||||
return ComplexityConfig{
|
||||
Threshold: 15,
|
||||
Path: ".",
|
||||
}
|
||||
}
|
||||
|
||||
// AnalyseComplexity walks Go source files and returns functions exceeding the
|
||||
// configured complexity threshold. Uses native go/ast parsing — no external tools.
|
||||
func AnalyseComplexity(cfg ComplexityConfig) ([]ComplexityResult, error) {
|
||||
if cfg.Threshold <= 0 {
|
||||
cfg.Threshold = 15
|
||||
}
|
||||
if cfg.Path == "" {
|
||||
cfg.Path = "."
|
||||
}
|
||||
|
||||
var results []ComplexityResult
|
||||
|
||||
info, err := os.Stat(cfg.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat %s: %w", cfg.Path, err)
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
// Single file
|
||||
fileResults, err := analyseFile(cfg.Path, cfg.Threshold)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, fileResults...)
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// Walk directory for .go files
|
||||
err = filepath.Walk(cfg.Path, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Skip vendor and hidden directories
|
||||
name := fi.Name()
|
||||
if name == "vendor" || strings.HasPrefix(name, ".") {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !strings.HasSuffix(path, ".go") || strings.HasSuffix(path, "_test.go") {
|
||||
return nil
|
||||
}
|
||||
fileResults, err := analyseFile(path, cfg.Threshold)
|
||||
if err != nil {
|
||||
return nil // Skip files that fail to parse
|
||||
}
|
||||
results = append(results, fileResults...)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("walk %s: %w", cfg.Path, err)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// AnalyseComplexitySource parses Go source code from a string and returns
|
||||
// complexity results. Useful for testing without file I/O.
|
||||
func AnalyseComplexitySource(src string, filename string, threshold int) ([]ComplexityResult, error) {
|
||||
if threshold <= 0 {
|
||||
threshold = 15
|
||||
}
|
||||
|
||||
fset := token.NewFileSet()
|
||||
f, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse %s: %w", filename, err)
|
||||
}
|
||||
|
||||
var results []ComplexityResult
|
||||
pkgName := f.Name.Name
|
||||
|
||||
ast.Inspect(f, func(n ast.Node) bool {
|
||||
switch fn := n.(type) {
|
||||
case *ast.FuncDecl:
|
||||
complexity := calculateComplexity(fn)
|
||||
if complexity >= threshold {
|
||||
pos := fset.Position(fn.Pos())
|
||||
funcName := fn.Name.Name
|
||||
if fn.Recv != nil && len(fn.Recv.List) > 0 {
|
||||
funcName = receiverType(fn.Recv.List[0].Type) + "." + funcName
|
||||
}
|
||||
results = append(results, ComplexityResult{
|
||||
FuncName: funcName,
|
||||
Package: pkgName,
|
||||
File: pos.Filename,
|
||||
Line: pos.Line,
|
||||
Complexity: complexity,
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// analyseFile parses a single Go file and returns functions exceeding the threshold.
|
||||
func analyseFile(path string, threshold int) ([]ComplexityResult, error) {
|
||||
src, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
return AnalyseComplexitySource(string(src), path, threshold)
|
||||
}
|
||||
|
||||
// calculateComplexity computes the cyclomatic complexity of a function.
|
||||
// Starts at 1, increments for each branching construct.
|
||||
func calculateComplexity(fn *ast.FuncDecl) int {
|
||||
if fn.Body == nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
complexity := 1
|
||||
ast.Inspect(fn.Body, func(n ast.Node) bool {
|
||||
switch node := n.(type) {
|
||||
case *ast.IfStmt:
|
||||
complexity++
|
||||
case *ast.ForStmt:
|
||||
complexity++
|
||||
case *ast.RangeStmt:
|
||||
complexity++
|
||||
case *ast.CaseClause:
|
||||
// Each case adds a branch (except default, which is the "else")
|
||||
if node.List != nil {
|
||||
complexity++
|
||||
}
|
||||
case *ast.CommClause:
|
||||
// Select case
|
||||
if node.Comm != nil {
|
||||
complexity++
|
||||
}
|
||||
case *ast.BinaryExpr:
|
||||
if node.Op == token.LAND || node.Op == token.LOR {
|
||||
complexity++
|
||||
}
|
||||
case *ast.TypeSwitchStmt:
|
||||
complexity++
|
||||
case *ast.SelectStmt:
|
||||
complexity++
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return complexity
|
||||
}
|
||||
|
||||
// receiverType extracts the type name from a method receiver.
|
||||
func receiverType(expr ast.Expr) string {
|
||||
switch t := expr.(type) {
|
||||
case *ast.StarExpr:
|
||||
return receiverType(t.X)
|
||||
case *ast.Ident:
|
||||
return t.Name
|
||||
case *ast.IndexExpr:
|
||||
return receiverType(t.X)
|
||||
default:
|
||||
return "?"
|
||||
}
|
||||
}
|
||||
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
430
devkit/complexity_test.go
Normal file
430
devkit/complexity_test.go
Normal file
|
|
@ -0,0 +1,430 @@
|
|||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
package devkit
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAnalyseComplexitySource_SimpleFunc_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func simple() {
|
||||
x := 1
|
||||
_ = x
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "simple.go", 1)
|
||||
require.NoError(t, err)
|
||||
// Complexity = 1 (just the function body, no branches), threshold = 1
|
||||
assert.Len(t, results, 1)
|
||||
assert.Equal(t, "simple", results[0].FuncName)
|
||||
assert.Equal(t, "example", results[0].Package)
|
||||
assert.Equal(t, 1, results[0].Complexity)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_IfElse_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func branches(x int) string {
|
||||
if x > 0 {
|
||||
return "positive"
|
||||
} else if x < 0 {
|
||||
return "negative"
|
||||
}
|
||||
return "zero"
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "branches.go", 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
// 1 (base) + 1 (if) + 1 (else if) = 3
|
||||
assert.Equal(t, 3, results[0].Complexity)
|
||||
assert.Equal(t, "branches", results[0].FuncName)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_ForLoop_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func loopy(items []int) int {
|
||||
total := 0
|
||||
for _, v := range items {
|
||||
total += v
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
total += i
|
||||
}
|
||||
return total
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "loops.go", 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
// 1 (base) + 1 (range) + 1 (for) = 3
|
||||
assert.Equal(t, 3, results[0].Complexity)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_SwitchCase_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func switcher(x int) string {
|
||||
switch x {
|
||||
case 1:
|
||||
return "one"
|
||||
case 2:
|
||||
return "two"
|
||||
case 3:
|
||||
return "three"
|
||||
default:
|
||||
return "other"
|
||||
}
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "switch.go", 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
// 1 (base) + 3 (case 1, 2, 3; default has nil List) = 4
|
||||
assert.Equal(t, 4, results[0].Complexity)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_LogicalOperators_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func complex(a, b, c bool) bool {
|
||||
if a && b || c {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "logical.go", 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
// 1 (base) + 1 (if) + 1 (&&) + 1 (||) = 4
|
||||
assert.Equal(t, 4, results[0].Complexity)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_MethodReceiver_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
type Server struct{}
|
||||
|
||||
func (s *Server) Handle(req int) string {
|
||||
if req > 0 {
|
||||
return "ok"
|
||||
}
|
||||
return "bad"
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "method.go", 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
assert.Equal(t, "Server.Handle", results[0].FuncName)
|
||||
assert.Equal(t, 2, results[0].Complexity)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_HighComplexity_Good(t *testing.T) {
|
||||
// Build a function with high complexity to test threshold filtering.
|
||||
src := `package example
|
||||
|
||||
func monster(x, y, z int) int {
|
||||
result := 0
|
||||
if x > 0 {
|
||||
if y > 0 {
|
||||
if z > 0 {
|
||||
result = 1
|
||||
} else if z < -10 {
|
||||
result = 2
|
||||
}
|
||||
} else if y < -5 {
|
||||
result = 3
|
||||
}
|
||||
} else if x < -10 {
|
||||
result = 4
|
||||
}
|
||||
for i := 0; i < x; i++ {
|
||||
for j := 0; j < y; j++ {
|
||||
if i > j && j > 0 {
|
||||
result += i
|
||||
} else if i == j || i < 0 {
|
||||
result += j
|
||||
}
|
||||
}
|
||||
}
|
||||
switch result {
|
||||
case 1:
|
||||
result++
|
||||
case 2:
|
||||
result--
|
||||
case 3:
|
||||
result *= 2
|
||||
}
|
||||
return result
|
||||
}
|
||||
`
|
||||
// With threshold 15 — should be above it
|
||||
results, err := AnalyseComplexitySource(src, "monster.go", 15)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
assert.Equal(t, "monster", results[0].FuncName)
|
||||
assert.GreaterOrEqual(t, results[0].Complexity, 15)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_BelowThreshold_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func simple() int {
|
||||
return 42
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "simple.go", 5)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, results) // Complexity 1, below threshold 5
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_MultipleFuncs_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func low() { }
|
||||
|
||||
func medium(x int) {
|
||||
if x > 0 {
|
||||
if x > 10 {
|
||||
_ = x
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func high(a, b, c, d int) int {
|
||||
if a > 0 {
|
||||
if b > 0 {
|
||||
if c > 0 {
|
||||
if d > 0 {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "multi.go", 3)
|
||||
require.NoError(t, err)
|
||||
// low: 1, medium: 3, high: 5
|
||||
assert.Len(t, results, 2) // medium and high
|
||||
assert.Equal(t, "medium", results[0].FuncName)
|
||||
assert.Equal(t, 3, results[0].Complexity)
|
||||
assert.Equal(t, "high", results[1].FuncName)
|
||||
assert.Equal(t, 5, results[1].Complexity)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_SelectStmt_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func selecter(ch1, ch2 chan int) int {
|
||||
select {
|
||||
case v := <-ch1:
|
||||
return v
|
||||
case v := <-ch2:
|
||||
return v
|
||||
}
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "select.go", 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
// 1 (base) + 1 (select) + 2 (comm clauses) = 4
|
||||
assert.Equal(t, 4, results[0].Complexity)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_TypeSwitch_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func typeSwitch(v interface{}) string {
|
||||
switch v.(type) {
|
||||
case int:
|
||||
return "int"
|
||||
case string:
|
||||
return "string"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "typeswitch.go", 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
// 1 (base) + 1 (type switch) + 2 (case int, case string; default has nil List) = 4
|
||||
assert.Equal(t, 4, results[0].Complexity)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_EmptyBody_Good(t *testing.T) {
|
||||
// Interface methods or abstract funcs have nil body
|
||||
src := `package example
|
||||
|
||||
type Iface interface {
|
||||
DoSomething(x int) error
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "iface.go", 1)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, results) // No FuncDecl in interface
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_ParseError_Bad(t *testing.T) {
|
||||
src := `this is not valid go code at all!!!`
|
||||
_, err := AnalyseComplexitySource(src, "bad.go", 1)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "parse")
|
||||
}
|
||||
|
||||
func TestAnalyseComplexity_DefaultThreshold_Good(t *testing.T) {
|
||||
cfg := DefaultComplexityConfig()
|
||||
assert.Equal(t, 15, cfg.Threshold)
|
||||
assert.Equal(t, ".", cfg.Path)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexity_ZeroThreshold_Good(t *testing.T) {
|
||||
// Zero threshold should default to 15
|
||||
src := `package example
|
||||
func f() { }
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "zero.go", 0)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, results) // complexity 1, default threshold 15
|
||||
}
|
||||
|
||||
func TestAnalyseComplexity_Directory_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Write a Go file with a complex function
|
||||
src := `package example
|
||||
|
||||
func complex(a, b, c, d, e int) int {
|
||||
if a > 0 {
|
||||
if b > 0 {
|
||||
if c > 0 {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
if d > 0 || e > 0 {
|
||||
return 2
|
||||
}
|
||||
return 0
|
||||
}
|
||||
`
|
||||
err := os.WriteFile(filepath.Join(dir, "example.go"), []byte(src), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := ComplexityConfig{Threshold: 3, Path: dir}
|
||||
results, err := AnalyseComplexity(cfg)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
assert.Equal(t, "complex", results[0].FuncName)
|
||||
// 1 (base) + 3 (if x>0, if y>0, if z>0) + 1 (if d>0||e>0) + 1 (||) = 6
|
||||
assert.Equal(t, 6, results[0].Complexity)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexity_SingleFile_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
src := `package example
|
||||
|
||||
func branchy(x int) {
|
||||
if x > 0 { }
|
||||
if x > 1 { }
|
||||
if x > 2 { }
|
||||
}
|
||||
`
|
||||
path := filepath.Join(dir, "single.go")
|
||||
err := os.WriteFile(path, []byte(src), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := ComplexityConfig{Threshold: 1, Path: path}
|
||||
results, err := AnalyseComplexity(cfg)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
assert.Equal(t, 4, results[0].Complexity) // 1 + 3 ifs
|
||||
}
|
||||
|
||||
func TestAnalyseComplexity_SkipsTestFiles_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Production file — should be analysed
|
||||
prod := `package example
|
||||
func prodFunc(x int) {
|
||||
if x > 0 { }
|
||||
if x > 1 { }
|
||||
}
|
||||
`
|
||||
err := os.WriteFile(filepath.Join(dir, "prod.go"), []byte(prod), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test file — should be skipped
|
||||
test := `package example
|
||||
func TestHelper(x int) {
|
||||
if x > 0 { }
|
||||
if x > 1 { }
|
||||
if x > 2 { }
|
||||
if x > 3 { }
|
||||
}
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "prod_test.go"), []byte(test), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := ComplexityConfig{Threshold: 1, Path: dir}
|
||||
results, err := AnalyseComplexity(cfg)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
assert.Equal(t, "prodFunc", results[0].FuncName)
|
||||
}
|
||||
|
||||
func TestAnalyseComplexity_SkipsVendor_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create vendor dir with a Go file
|
||||
vendorDir := filepath.Join(dir, "vendor")
|
||||
err := os.MkdirAll(vendorDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
vendorSrc := `package lib
|
||||
func VendorFunc(x int) {
|
||||
if x > 0 { }
|
||||
if x > 1 { }
|
||||
}
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(vendorDir, "lib.go"), []byte(vendorSrc), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := ComplexityConfig{Threshold: 1, Path: dir}
|
||||
results, err := AnalyseComplexity(cfg)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, results) // vendor dir should be skipped
|
||||
}
|
||||
|
||||
func TestAnalyseComplexity_NonexistentPath_Bad(t *testing.T) {
|
||||
cfg := ComplexityConfig{Threshold: 1, Path: "/nonexistent/path/xyz"}
|
||||
_, err := AnalyseComplexity(cfg)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "stat")
|
||||
}
|
||||
|
||||
func TestAnalyseComplexitySource_NestedLogicalOps_Good(t *testing.T) {
|
||||
src := `package example
|
||||
|
||||
func nested(a, b, c, d bool) bool {
|
||||
return (a && b) || (c && d)
|
||||
}
|
||||
`
|
||||
results, err := AnalyseComplexitySource(src, "nested.go", 1)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
// 1 (base) + 2 (&&) + 1 (||) = 4
|
||||
assert.Equal(t, 4, results[0].Complexity)
|
||||
}
|
||||
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
261
devkit/coverage.go
Normal file
261
devkit/coverage.go
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
// Package devkit provides a developer toolkit for common automation commands.
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
package devkit
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CoverageSnapshot represents a point-in-time coverage measurement.
|
||||
type CoverageSnapshot struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Packages map[string]float64 `json:"packages"` // package → coverage %
|
||||
Total float64 `json:"total"` // overall coverage %
|
||||
Meta map[string]string `json:"meta,omitempty"` // optional metadata (commit, branch, etc.)
|
||||
}
|
||||
|
||||
// CoverageRegression flags a package whose coverage dropped between runs.
|
||||
type CoverageRegression struct {
|
||||
Package string
|
||||
Previous float64
|
||||
Current float64
|
||||
Delta float64 // Negative means regression
|
||||
}
|
||||
|
||||
// CoverageComparison holds the result of comparing two snapshots.
|
||||
type CoverageComparison struct {
|
||||
Regressions []CoverageRegression
|
||||
Improvements []CoverageRegression
|
||||
NewPackages []string // Packages present in current but not previous
|
||||
Removed []string // Packages present in previous but not current
|
||||
TotalDelta float64 // Change in overall coverage
|
||||
}
|
||||
|
||||
// CoverageStore persists coverage snapshots to a JSON file.
|
||||
type CoverageStore struct {
|
||||
Path string // File path for JSON storage
|
||||
}
|
||||
|
||||
// NewCoverageStore creates a store backed by the given file path.
|
||||
func NewCoverageStore(path string) *CoverageStore {
|
||||
return &CoverageStore{Path: path}
|
||||
}
|
||||
|
||||
// Append adds a snapshot to the store.
|
||||
func (s *CoverageStore) Append(snap CoverageSnapshot) error {
|
||||
snapshots, err := s.Load()
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("load snapshots: %w", err)
|
||||
}
|
||||
|
||||
snapshots = append(snapshots, snap)
|
||||
|
||||
data, err := json.MarshalIndent(snapshots, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal snapshots: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(s.Path, data, 0644); err != nil {
|
||||
return fmt.Errorf("write %s: %w", s.Path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load reads all snapshots from the store.
|
||||
func (s *CoverageStore) Load() ([]CoverageSnapshot, error) {
|
||||
data, err := os.ReadFile(s.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var snapshots []CoverageSnapshot
|
||||
if err := json.Unmarshal(data, &snapshots); err != nil {
|
||||
return nil, fmt.Errorf("parse %s: %w", s.Path, err)
|
||||
}
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
// Latest returns the most recent snapshot, or nil if the store is empty.
|
||||
func (s *CoverageStore) Latest() (*CoverageSnapshot, error) {
|
||||
snapshots, err := s.Load()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if len(snapshots) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
latest := &snapshots[0]
|
||||
for i := range snapshots {
|
||||
if snapshots[i].Timestamp.After(latest.Timestamp) {
|
||||
latest = &snapshots[i]
|
||||
}
|
||||
}
|
||||
return latest, nil
|
||||
}
|
||||
|
||||
// ParseCoverProfile parses output from `go test -coverprofile=cover.out` format.
|
||||
// Each line is: mode: set/count/atomic (first line) or
|
||||
// package/file.go:startLine.startCol,endLine.endCol stmts count
|
||||
func ParseCoverProfile(data string) (CoverageSnapshot, error) {
|
||||
snap := CoverageSnapshot{
|
||||
Timestamp: time.Now(),
|
||||
Packages: make(map[string]float64),
|
||||
}
|
||||
|
||||
type pkgStats struct {
|
||||
covered int
|
||||
total int
|
||||
}
|
||||
packages := make(map[string]*pkgStats)
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(data))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.HasPrefix(line, "mode:") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Format: pkg/file.go:line.col,line.col numStmt count
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) != 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract package from file path
|
||||
filePath := parts[0]
|
||||
colonIdx := strings.Index(filePath, ":")
|
||||
if colonIdx < 0 {
|
||||
continue
|
||||
}
|
||||
file := filePath[:colonIdx]
|
||||
|
||||
// Package is everything up to the last /
|
||||
pkg := file
|
||||
if lastSlash := strings.LastIndex(file, "/"); lastSlash >= 0 {
|
||||
pkg = file[:lastSlash]
|
||||
}
|
||||
|
||||
stmts, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
count, err := strconv.Atoi(parts[2])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := packages[pkg]; !ok {
|
||||
packages[pkg] = &pkgStats{}
|
||||
}
|
||||
packages[pkg].total += stmts
|
||||
if count > 0 {
|
||||
packages[pkg].covered += stmts
|
||||
}
|
||||
}
|
||||
|
||||
totalCovered := 0
|
||||
totalStmts := 0
|
||||
|
||||
for pkg, stats := range packages {
|
||||
if stats.total > 0 {
|
||||
snap.Packages[pkg] = math.Round(float64(stats.covered)/float64(stats.total)*1000) / 10
|
||||
} else {
|
||||
snap.Packages[pkg] = 0
|
||||
}
|
||||
totalCovered += stats.covered
|
||||
totalStmts += stats.total
|
||||
}
|
||||
|
||||
if totalStmts > 0 {
|
||||
snap.Total = math.Round(float64(totalCovered)/float64(totalStmts)*1000) / 10
|
||||
}
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
// ParseCoverOutput parses the human-readable `go test -cover ./...` output.
|
||||
// Extracts lines like: ok example.com/pkg 0.5s coverage: 85.0% of statements
|
||||
func ParseCoverOutput(output string) (CoverageSnapshot, error) {
|
||||
snap := CoverageSnapshot{
|
||||
Timestamp: time.Now(),
|
||||
Packages: make(map[string]float64),
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`ok\s+(\S+)\s+.*coverage:\s+([\d.]+)%`)
|
||||
scanner := bufio.NewScanner(strings.NewReader(output))
|
||||
|
||||
totalPct := 0.0
|
||||
count := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
matches := re.FindStringSubmatch(scanner.Text())
|
||||
if len(matches) == 3 {
|
||||
pct, _ := strconv.ParseFloat(matches[2], 64)
|
||||
snap.Packages[matches[1]] = pct
|
||||
totalPct += pct
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
snap.Total = math.Round(totalPct/float64(count)*10) / 10
|
||||
}
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
// CompareCoverage computes the difference between two snapshots.
|
||||
func CompareCoverage(previous, current CoverageSnapshot) CoverageComparison {
|
||||
comp := CoverageComparison{
|
||||
TotalDelta: math.Round((current.Total-previous.Total)*10) / 10,
|
||||
}
|
||||
|
||||
// Check each current package against previous
|
||||
for pkg, curPct := range current.Packages {
|
||||
prevPct, existed := previous.Packages[pkg]
|
||||
if !existed {
|
||||
comp.NewPackages = append(comp.NewPackages, pkg)
|
||||
continue
|
||||
}
|
||||
|
||||
delta := math.Round((curPct-prevPct)*10) / 10
|
||||
if delta < 0 {
|
||||
comp.Regressions = append(comp.Regressions, CoverageRegression{
|
||||
Package: pkg,
|
||||
Previous: prevPct,
|
||||
Current: curPct,
|
||||
Delta: delta,
|
||||
})
|
||||
} else if delta > 0 {
|
||||
comp.Improvements = append(comp.Improvements, CoverageRegression{
|
||||
Package: pkg,
|
||||
Previous: prevPct,
|
||||
Current: curPct,
|
||||
Delta: delta,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check for removed packages
|
||||
for pkg := range previous.Packages {
|
||||
if _, exists := current.Packages[pkg]; !exists {
|
||||
comp.Removed = append(comp.Removed, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
return comp
|
||||
}
|
||||
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
358
devkit/coverage_test.go
Normal file
358
devkit/coverage_test.go
Normal file
|
|
@ -0,0 +1,358 @@
|
|||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
package devkit
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const sampleCoverProfile = `mode: set
|
||||
example.com/pkg1/file1.go:10.1,20.2 5 1
|
||||
example.com/pkg1/file1.go:22.1,30.2 3 0
|
||||
example.com/pkg1/file2.go:5.1,15.2 4 1
|
||||
example.com/pkg2/main.go:1.1,10.2 10 1
|
||||
example.com/pkg2/main.go:12.1,20.2 10 0
|
||||
`
|
||||
|
||||
func TestParseCoverProfile_Good(t *testing.T) {
|
||||
snap, err := ParseCoverProfile(sampleCoverProfile)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, snap.Packages, 2)
|
||||
|
||||
// pkg1: 5+4 covered out of 5+3+4=12 => 9/12 = 75%
|
||||
assert.Equal(t, 75.0, snap.Packages["example.com/pkg1"])
|
||||
|
||||
// pkg2: 10 covered out of 10+10=20 => 10/20 = 50%
|
||||
assert.Equal(t, 50.0, snap.Packages["example.com/pkg2"])
|
||||
|
||||
// Total: 19/32 = 59.4%
|
||||
assert.Equal(t, 59.4, snap.Total)
|
||||
assert.False(t, snap.Timestamp.IsZero())
|
||||
}
|
||||
|
||||
func TestParseCoverProfile_Empty_Good(t *testing.T) {
|
||||
snap, err := ParseCoverProfile("")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, snap.Packages)
|
||||
assert.Equal(t, 0.0, snap.Total)
|
||||
}
|
||||
|
||||
func TestParseCoverProfile_ModeOnly_Good(t *testing.T) {
|
||||
snap, err := ParseCoverProfile("mode: set\n")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, snap.Packages)
|
||||
}
|
||||
|
||||
func TestParseCoverProfile_FullCoverage_Good(t *testing.T) {
|
||||
data := `mode: set
|
||||
example.com/perfect/main.go:1.1,10.2 10 1
|
||||
example.com/perfect/main.go:12.1,20.2 5 1
|
||||
`
|
||||
snap, err := ParseCoverProfile(data)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 100.0, snap.Packages["example.com/perfect"])
|
||||
assert.Equal(t, 100.0, snap.Total)
|
||||
}
|
||||
|
||||
func TestParseCoverProfile_ZeroCoverage_Good(t *testing.T) {
|
||||
data := `mode: set
|
||||
example.com/zero/main.go:1.1,10.2 10 0
|
||||
example.com/zero/main.go:12.1,20.2 5 0
|
||||
`
|
||||
snap, err := ParseCoverProfile(data)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0.0, snap.Packages["example.com/zero"])
|
||||
assert.Equal(t, 0.0, snap.Total)
|
||||
}
|
||||
|
||||
func TestParseCoverProfile_MalformedLines_Bad(t *testing.T) {
|
||||
data := `mode: set
|
||||
not a valid line
|
||||
example.com/pkg/file.go:1.1,10.2 5 1
|
||||
another bad line with spaces
|
||||
example.com/pkg/file.go:12.1,20.2 5 0
|
||||
`
|
||||
snap, err := ParseCoverProfile(data)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, snap.Packages, 1)
|
||||
assert.Equal(t, 50.0, snap.Packages["example.com/pkg"])
|
||||
}
|
||||
|
||||
func TestParseCoverOutput_Good(t *testing.T) {
|
||||
output := `? example.com/skipped [no test files]
|
||||
ok example.com/pkg1 0.5s coverage: 85.0% of statements
|
||||
ok example.com/pkg2 0.2s coverage: 42.5% of statements
|
||||
ok example.com/pkg3 1.1s coverage: 100.0% of statements
|
||||
`
|
||||
snap, err := ParseCoverOutput(output)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, snap.Packages, 3)
|
||||
assert.Equal(t, 85.0, snap.Packages["example.com/pkg1"])
|
||||
assert.Equal(t, 42.5, snap.Packages["example.com/pkg2"])
|
||||
assert.Equal(t, 100.0, snap.Packages["example.com/pkg3"])
|
||||
|
||||
// Total = avg of (85 + 42.5 + 100) / 3 = 75.8333... rounded to 75.8
|
||||
assert.Equal(t, 75.8, snap.Total)
|
||||
}
|
||||
|
||||
func TestParseCoverOutput_Empty_Good(t *testing.T) {
|
||||
snap, err := ParseCoverOutput("")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, snap.Packages)
|
||||
assert.Equal(t, 0.0, snap.Total)
|
||||
}
|
||||
|
||||
func TestParseCoverOutput_NoTestFiles_Good(t *testing.T) {
|
||||
output := `? example.com/empty [no test files]
|
||||
`
|
||||
snap, err := ParseCoverOutput(output)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, snap.Packages)
|
||||
}
|
||||
|
||||
// --- CompareCoverage tests ---
|
||||
|
||||
func TestCompareCoverage_Regression_Good(t *testing.T) {
|
||||
prev := CoverageSnapshot{
|
||||
Packages: map[string]float64{
|
||||
"pkg1": 90.0,
|
||||
"pkg2": 85.0,
|
||||
"pkg3": 70.0,
|
||||
},
|
||||
Total: 81.7,
|
||||
}
|
||||
curr := CoverageSnapshot{
|
||||
Packages: map[string]float64{
|
||||
"pkg1": 90.0, // unchanged
|
||||
"pkg2": 75.0, // regression: -10
|
||||
"pkg3": 80.0, // improvement: +10
|
||||
},
|
||||
Total: 81.7,
|
||||
}
|
||||
|
||||
comp := CompareCoverage(prev, curr)
|
||||
|
||||
assert.Len(t, comp.Regressions, 1)
|
||||
assert.Equal(t, "pkg2", comp.Regressions[0].Package)
|
||||
assert.Equal(t, -10.0, comp.Regressions[0].Delta)
|
||||
assert.Equal(t, 85.0, comp.Regressions[0].Previous)
|
||||
assert.Equal(t, 75.0, comp.Regressions[0].Current)
|
||||
|
||||
assert.Len(t, comp.Improvements, 1)
|
||||
assert.Equal(t, "pkg3", comp.Improvements[0].Package)
|
||||
assert.Equal(t, 10.0, comp.Improvements[0].Delta)
|
||||
}
|
||||
|
||||
func TestCompareCoverage_NewAndRemoved_Good(t *testing.T) {
|
||||
prev := CoverageSnapshot{
|
||||
Packages: map[string]float64{
|
||||
"old-pkg": 50.0,
|
||||
"stable": 80.0,
|
||||
},
|
||||
Total: 65.0,
|
||||
}
|
||||
curr := CoverageSnapshot{
|
||||
Packages: map[string]float64{
|
||||
"stable": 80.0,
|
||||
"new-pkg": 60.0,
|
||||
},
|
||||
Total: 70.0,
|
||||
}
|
||||
|
||||
comp := CompareCoverage(prev, curr)
|
||||
|
||||
assert.Contains(t, comp.NewPackages, "new-pkg")
|
||||
assert.Contains(t, comp.Removed, "old-pkg")
|
||||
assert.Equal(t, 5.0, comp.TotalDelta)
|
||||
assert.Empty(t, comp.Regressions)
|
||||
}
|
||||
|
||||
func TestCompareCoverage_Identical_Good(t *testing.T) {
|
||||
snap := CoverageSnapshot{
|
||||
Packages: map[string]float64{
|
||||
"pkg1": 80.0,
|
||||
"pkg2": 90.0,
|
||||
},
|
||||
Total: 85.0,
|
||||
}
|
||||
|
||||
comp := CompareCoverage(snap, snap)
|
||||
|
||||
assert.Empty(t, comp.Regressions)
|
||||
assert.Empty(t, comp.Improvements)
|
||||
assert.Empty(t, comp.NewPackages)
|
||||
assert.Empty(t, comp.Removed)
|
||||
assert.Equal(t, 0.0, comp.TotalDelta)
|
||||
}
|
||||
|
||||
func TestCompareCoverage_EmptySnapshots_Good(t *testing.T) {
|
||||
prev := CoverageSnapshot{Packages: map[string]float64{}}
|
||||
curr := CoverageSnapshot{Packages: map[string]float64{}}
|
||||
|
||||
comp := CompareCoverage(prev, curr)
|
||||
assert.Empty(t, comp.Regressions)
|
||||
assert.Empty(t, comp.Improvements)
|
||||
assert.Empty(t, comp.NewPackages)
|
||||
assert.Empty(t, comp.Removed)
|
||||
}
|
||||
|
||||
func TestCompareCoverage_AllNew_Good(t *testing.T) {
|
||||
prev := CoverageSnapshot{Packages: map[string]float64{}}
|
||||
curr := CoverageSnapshot{
|
||||
Packages: map[string]float64{
|
||||
"new1": 50.0,
|
||||
"new2": 75.0,
|
||||
},
|
||||
Total: 62.5,
|
||||
}
|
||||
|
||||
comp := CompareCoverage(prev, curr)
|
||||
assert.Len(t, comp.NewPackages, 2)
|
||||
assert.Empty(t, comp.Regressions)
|
||||
assert.Equal(t, 62.5, comp.TotalDelta)
|
||||
}
|
||||
|
||||
// --- CoverageStore tests ---
|
||||
|
||||
func TestCoverageStore_AppendAndLoad_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "coverage.json")
|
||||
store := NewCoverageStore(path)
|
||||
|
||||
snap1 := CoverageSnapshot{
|
||||
Timestamp: time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Packages: map[string]float64{"pkg1": 80.0},
|
||||
Total: 80.0,
|
||||
}
|
||||
snap2 := CoverageSnapshot{
|
||||
Timestamp: time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC),
|
||||
Packages: map[string]float64{"pkg1": 85.0},
|
||||
Total: 85.0,
|
||||
}
|
||||
|
||||
require.NoError(t, store.Append(snap1))
|
||||
require.NoError(t, store.Append(snap2))
|
||||
|
||||
loaded, err := store.Load()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, loaded, 2)
|
||||
assert.Equal(t, 80.0, loaded[0].Total)
|
||||
assert.Equal(t, 85.0, loaded[1].Total)
|
||||
}
|
||||
|
||||
func TestCoverageStore_Latest_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "coverage.json")
|
||||
store := NewCoverageStore(path)
|
||||
|
||||
// Add snapshots out of chronological order
|
||||
snap1 := CoverageSnapshot{
|
||||
Timestamp: time.Date(2025, 1, 3, 0, 0, 0, 0, time.UTC),
|
||||
Packages: map[string]float64{"pkg1": 90.0},
|
||||
Total: 90.0,
|
||||
}
|
||||
snap2 := CoverageSnapshot{
|
||||
Timestamp: time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Packages: map[string]float64{"pkg1": 70.0},
|
||||
Total: 70.0,
|
||||
}
|
||||
|
||||
require.NoError(t, store.Append(snap2)) // older first
|
||||
require.NoError(t, store.Append(snap1)) // newer second
|
||||
|
||||
latest, err := store.Latest()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, latest)
|
||||
assert.Equal(t, 90.0, latest.Total)
|
||||
}
|
||||
|
||||
func TestCoverageStore_LatestEmpty_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "nonexistent.json")
|
||||
store := NewCoverageStore(path)
|
||||
|
||||
latest, err := store.Latest()
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, latest)
|
||||
}
|
||||
|
||||
func TestCoverageStore_LoadNonexistent_Bad(t *testing.T) {
|
||||
store := NewCoverageStore("/nonexistent/path/coverage.json")
|
||||
_, err := store.Load()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCoverageStore_LoadCorrupt_Bad(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "corrupt.json")
|
||||
require.NoError(t, os.WriteFile(path, []byte("not json!!!"), 0644))
|
||||
|
||||
store := NewCoverageStore(path)
|
||||
_, err := store.Load()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "parse")
|
||||
}
|
||||
|
||||
func TestCoverageStore_WithMeta_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "coverage.json")
|
||||
store := NewCoverageStore(path)
|
||||
|
||||
snap := CoverageSnapshot{
|
||||
Timestamp: time.Now(),
|
||||
Packages: map[string]float64{"pkg1": 80.0},
|
||||
Total: 80.0,
|
||||
Meta: map[string]string{
|
||||
"commit": "abc123",
|
||||
"branch": "main",
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, store.Append(snap))
|
||||
|
||||
loaded, err := store.Load()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, loaded, 1)
|
||||
assert.Equal(t, "abc123", loaded[0].Meta["commit"])
|
||||
assert.Equal(t, "main", loaded[0].Meta["branch"])
|
||||
}
|
||||
|
||||
func TestCoverageStore_Persistence_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "persist.json")
|
||||
|
||||
// Write with one store instance
|
||||
store1 := NewCoverageStore(path)
|
||||
snap := CoverageSnapshot{
|
||||
Timestamp: time.Now(),
|
||||
Packages: map[string]float64{"pkg1": 55.5},
|
||||
Total: 55.5,
|
||||
}
|
||||
require.NoError(t, store1.Append(snap))
|
||||
|
||||
// Read with a different store instance
|
||||
store2 := NewCoverageStore(path)
|
||||
loaded, err := store2.Load()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, loaded, 1)
|
||||
assert.Equal(t, 55.5, loaded[0].Total)
|
||||
}
|
||||
|
||||
func TestCompareCoverage_SmallDelta_Good(t *testing.T) {
|
||||
// Test that very small deltas (<0.05) round to 0 and are not flagged.
|
||||
prev := CoverageSnapshot{
|
||||
Packages: map[string]float64{"pkg1": 80.01},
|
||||
Total: 80.01,
|
||||
}
|
||||
curr := CoverageSnapshot{
|
||||
Packages: map[string]float64{"pkg1": 80.04},
|
||||
Total: 80.04,
|
||||
}
|
||||
|
||||
comp := CompareCoverage(prev, curr)
|
||||
assert.Empty(t, comp.Regressions)
|
||||
assert.Empty(t, comp.Improvements) // 0.03 rounds to 0.0
|
||||
}
|
||||
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
181
devkit/vulncheck.go
Normal file
181
devkit/vulncheck.go
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
// Package devkit provides a developer toolkit for common automation commands.
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
package devkit
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// VulnFinding represents a single vulnerability found by govulncheck.
|
||||
type VulnFinding struct {
|
||||
ID string // e.g. GO-2024-1234
|
||||
Aliases []string // CVE/GHSA aliases
|
||||
Package string // Affected package path
|
||||
CalledFunction string // Function in call stack (empty if not called)
|
||||
Description string // Human-readable summary
|
||||
Severity string // "HIGH", "MEDIUM", "LOW", or empty
|
||||
FixedVersion string // Version that contains the fix
|
||||
ModulePath string // Go module path
|
||||
}
|
||||
|
||||
// VulnResult holds the complete output of a vulnerability scan.
|
||||
type VulnResult struct {
|
||||
Findings []VulnFinding
|
||||
Module string // Module path that was scanned
|
||||
}
|
||||
|
||||
// --- govulncheck JSON wire types ---
|
||||
|
||||
// govulncheckMessage represents a single JSON line from govulncheck -json output.
|
||||
type govulncheckMessage struct {
|
||||
Config *govulncheckConfig `json:"config,omitempty"`
|
||||
OSV *govulncheckOSV `json:"osv,omitempty"`
|
||||
Finding *govulncheckFind `json:"finding,omitempty"`
|
||||
Progress *json.RawMessage `json:"progress,omitempty"`
|
||||
}
|
||||
|
||||
type govulncheckConfig struct {
|
||||
GoVersion string `json:"go_version"`
|
||||
ModulePath string `json:"module_path"`
|
||||
}
|
||||
|
||||
type govulncheckOSV struct {
|
||||
ID string `json:"id"`
|
||||
Aliases []string `json:"aliases"`
|
||||
Summary string `json:"summary"`
|
||||
Affected []govulncheckAffect `json:"affected"`
|
||||
}
|
||||
|
||||
type govulncheckAffect struct {
|
||||
Package *govulncheckPkg `json:"package,omitempty"`
|
||||
Ranges []govulncheckRange `json:"ranges,omitempty"`
|
||||
Severity []govulncheckSeverity `json:"database_specific,omitempty"`
|
||||
}
|
||||
|
||||
type govulncheckPkg struct {
|
||||
Name string `json:"name"`
|
||||
Ecosystem string `json:"ecosystem"`
|
||||
}
|
||||
|
||||
type govulncheckRange struct {
|
||||
Events []govulncheckEvent `json:"events"`
|
||||
}
|
||||
|
||||
type govulncheckEvent struct {
|
||||
Fixed string `json:"fixed,omitempty"`
|
||||
}
|
||||
|
||||
type govulncheckSeverity struct {
|
||||
Severity string `json:"severity,omitempty"`
|
||||
}
|
||||
|
||||
type govulncheckFind struct {
|
||||
OSV string `json:"osv"`
|
||||
Trace []govulncheckTrace `json:"trace"`
|
||||
}
|
||||
|
||||
type govulncheckTrace struct {
|
||||
Module string `json:"module,omitempty"`
|
||||
Package string `json:"package,omitempty"`
|
||||
Function string `json:"function,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
// VulnCheck runs govulncheck -json on the given module path and parses
|
||||
// the output into structured VulnFindings.
|
||||
func (t *Toolkit) VulnCheck(modulePath string) (*VulnResult, error) {
|
||||
if modulePath == "" {
|
||||
modulePath = "./..."
|
||||
}
|
||||
|
||||
stdout, stderr, exitCode, err := t.Run("govulncheck", "-json", modulePath)
|
||||
if err != nil && exitCode == -1 {
|
||||
return nil, fmt.Errorf("govulncheck not installed or not available: %w", err)
|
||||
}
|
||||
|
||||
return ParseVulnCheckJSON(stdout, stderr)
|
||||
}
|
||||
|
||||
// ParseVulnCheckJSON parses govulncheck -json output (newline-delimited JSON messages).
|
||||
func ParseVulnCheckJSON(stdout, stderr string) (*VulnResult, error) {
|
||||
result := &VulnResult{}
|
||||
|
||||
// Collect OSV entries and findings separately, then correlate.
|
||||
osvMap := make(map[string]*govulncheckOSV)
|
||||
var findings []govulncheckFind
|
||||
|
||||
// Parse line-by-line to gracefully skip malformed entries.
|
||||
// json.Decoder.More() hangs on non-JSON input, so we split first.
|
||||
for _, line := range strings.Split(stdout, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var msg govulncheckMessage
|
||||
if err := json.Unmarshal([]byte(line), &msg); err != nil {
|
||||
// Skip malformed lines — govulncheck sometimes emits progress text
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.Config != nil {
|
||||
result.Module = msg.Config.ModulePath
|
||||
}
|
||||
if msg.OSV != nil {
|
||||
osvMap[msg.OSV.ID] = msg.OSV
|
||||
}
|
||||
if msg.Finding != nil {
|
||||
findings = append(findings, *msg.Finding)
|
||||
}
|
||||
}
|
||||
|
||||
// Build VulnFindings by correlating findings with OSV metadata.
|
||||
for _, f := range findings {
|
||||
finding := VulnFinding{
|
||||
ID: f.OSV,
|
||||
}
|
||||
|
||||
// Extract package, function, and module from trace.
|
||||
if len(f.Trace) > 0 {
|
||||
// The first trace entry is the called function in user code;
|
||||
// the last is the vulnerable symbol.
|
||||
last := f.Trace[len(f.Trace)-1]
|
||||
finding.Package = last.Package
|
||||
finding.CalledFunction = last.Function
|
||||
finding.ModulePath = last.Module
|
||||
|
||||
// If the trace has a version, capture it.
|
||||
for _, tr := range f.Trace {
|
||||
if tr.Version != "" {
|
||||
finding.FixedVersion = tr.Version
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enrich from OSV entry.
|
||||
if osv, ok := osvMap[f.OSV]; ok {
|
||||
finding.Description = osv.Summary
|
||||
finding.Aliases = osv.Aliases
|
||||
|
||||
// Extract fixed version and severity from affected entries.
|
||||
for _, aff := range osv.Affected {
|
||||
for _, r := range aff.Ranges {
|
||||
for _, ev := range r.Events {
|
||||
if ev.Fixed != "" && finding.FixedVersion == "" {
|
||||
finding.FixedVersion = ev.Fixed
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.Findings = append(result.Findings, finding)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
189
devkit/vulncheck_test.go
Normal file
189
devkit/vulncheck_test.go
Normal file
|
|
@ -0,0 +1,189 @@
|
|||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
package devkit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const sampleVulnJSON = `{"config":{"module_path":"example.com/mymod","go_version":"go1.22.0"}}
|
||||
{"progress":{"message":"Scanning your code..."}}
|
||||
{"osv":{"id":"GO-2024-0001","aliases":["CVE-2024-1234","GHSA-abcd-1234"],"summary":"Buffer overflow in net/http","affected":[{"package":{"name":"stdlib","ecosystem":"Go"},"ranges":[{"events":[{"fixed":"1.22.1"}]}]}]}}
|
||||
{"osv":{"id":"GO-2024-0002","aliases":["CVE-2024-5678"],"summary":"Path traversal in archive/zip","affected":[{"package":{"name":"stdlib","ecosystem":"Go"},"ranges":[{"events":[{"fixed":"1.21.9"}]}]}]}}
|
||||
{"finding":{"osv":"GO-2024-0001","trace":[{"module":"example.com/mymod","package":"example.com/mymod/server","function":"HandleRequest"},{"module":"stdlib","package":"net/http","function":"ReadRequest","version":"go1.22.0"}]}}
|
||||
{"finding":{"osv":"GO-2024-0002","trace":[{"module":"stdlib","package":"archive/zip","function":"OpenReader","version":"go1.22.0"}]}}
|
||||
`
|
||||
|
||||
func TestParseVulnCheckJSON_Good(t *testing.T) {
|
||||
result, err := ParseVulnCheckJSON(sampleVulnJSON, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "example.com/mymod", result.Module)
|
||||
assert.Len(t, result.Findings, 2)
|
||||
|
||||
// First finding: GO-2024-0001
|
||||
f0 := result.Findings[0]
|
||||
assert.Equal(t, "GO-2024-0001", f0.ID)
|
||||
assert.Equal(t, "net/http", f0.Package)
|
||||
assert.Equal(t, "ReadRequest", f0.CalledFunction)
|
||||
assert.Equal(t, "Buffer overflow in net/http", f0.Description)
|
||||
assert.Contains(t, f0.Aliases, "CVE-2024-1234")
|
||||
assert.Contains(t, f0.Aliases, "GHSA-abcd-1234")
|
||||
assert.Equal(t, "go1.22.0", f0.FixedVersion) // from trace version
|
||||
|
||||
// Second finding: GO-2024-0002
|
||||
f1 := result.Findings[1]
|
||||
assert.Equal(t, "GO-2024-0002", f1.ID)
|
||||
assert.Equal(t, "archive/zip", f1.Package)
|
||||
assert.Equal(t, "OpenReader", f1.CalledFunction)
|
||||
assert.Equal(t, "Path traversal in archive/zip", f1.Description)
|
||||
assert.Contains(t, f1.Aliases, "CVE-2024-5678")
|
||||
}
|
||||
|
||||
func TestParseVulnCheckJSON_EmptyOutput_Good(t *testing.T) {
|
||||
result, err := ParseVulnCheckJSON("", "")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, result.Findings)
|
||||
assert.Empty(t, result.Module)
|
||||
}
|
||||
|
||||
func TestParseVulnCheckJSON_ConfigOnly_Good(t *testing.T) {
|
||||
input := `{"config":{"module_path":"example.com/clean","go_version":"go1.23.0"}}
|
||||
`
|
||||
result, err := ParseVulnCheckJSON(input, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "example.com/clean", result.Module)
|
||||
assert.Empty(t, result.Findings)
|
||||
}
|
||||
|
||||
func TestParseVulnCheckJSON_MalformedLines_Bad(t *testing.T) {
|
||||
input := `not valid json
|
||||
{"config":{"module_path":"example.com/mod"}}
|
||||
also broken {{{
|
||||
{"osv":{"id":"GO-2024-0099","summary":"Test vuln","aliases":[],"affected":[]}}
|
||||
{"finding":{"osv":"GO-2024-0099","trace":[{"module":"stdlib","package":"crypto/tls","function":"Dial"}]}}
|
||||
`
|
||||
result, err := ParseVulnCheckJSON(input, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "example.com/mod", result.Module)
|
||||
assert.Len(t, result.Findings, 1)
|
||||
assert.Equal(t, "GO-2024-0099", result.Findings[0].ID)
|
||||
assert.Equal(t, "Dial", result.Findings[0].CalledFunction)
|
||||
}
|
||||
|
||||
func TestParseVulnCheckJSON_FindingWithoutOSV_Bad(t *testing.T) {
|
||||
// Finding references an OSV ID that was never emitted — should still parse.
|
||||
input := `{"finding":{"osv":"GO-2024-UNKNOWN","trace":[{"module":"example.com/mod","package":"example.com/mod/pkg","function":"DoStuff"}]}}
|
||||
`
|
||||
result, err := ParseVulnCheckJSON(input, "")
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result.Findings, 1)
|
||||
|
||||
f := result.Findings[0]
|
||||
assert.Equal(t, "GO-2024-UNKNOWN", f.ID)
|
||||
assert.Equal(t, "example.com/mod/pkg", f.Package)
|
||||
assert.Equal(t, "DoStuff", f.CalledFunction)
|
||||
assert.Empty(t, f.Description) // No OSV entry to enrich from
|
||||
assert.Empty(t, f.Aliases)
|
||||
}
|
||||
|
||||
func TestParseVulnCheckJSON_NoTrace_Bad(t *testing.T) {
|
||||
input := `{"osv":{"id":"GO-2024-0050","summary":"Empty trace test","aliases":["CVE-2024-0050"],"affected":[]}}
|
||||
{"finding":{"osv":"GO-2024-0050","trace":[]}}
|
||||
`
|
||||
result, err := ParseVulnCheckJSON(input, "")
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result.Findings, 1)
|
||||
|
||||
f := result.Findings[0]
|
||||
assert.Equal(t, "GO-2024-0050", f.ID)
|
||||
assert.Equal(t, "Empty trace test", f.Description)
|
||||
assert.Empty(t, f.Package)
|
||||
assert.Empty(t, f.CalledFunction)
|
||||
}
|
||||
|
||||
func TestParseVulnCheckJSON_MultipleFindings_Good(t *testing.T) {
|
||||
input := `{"osv":{"id":"GO-2024-0010","summary":"Vuln A","aliases":["CVE-A"],"affected":[{"ranges":[{"events":[{"fixed":"1.20.5"}]}]}]}}
|
||||
{"osv":{"id":"GO-2024-0011","summary":"Vuln B","aliases":["CVE-B"],"affected":[]}}
|
||||
{"osv":{"id":"GO-2024-0012","summary":"Vuln C","aliases":["CVE-C"],"affected":[{"ranges":[{"events":[{"fixed":"1.21.0"}]}]}]}}
|
||||
{"finding":{"osv":"GO-2024-0010","trace":[{"package":"net/http","function":"Serve"}]}}
|
||||
{"finding":{"osv":"GO-2024-0011","trace":[{"package":"encoding/xml","function":"Unmarshal"}]}}
|
||||
{"finding":{"osv":"GO-2024-0012","trace":[{"package":"os/exec","function":"Command"}]}}
|
||||
`
|
||||
result, err := ParseVulnCheckJSON(input, "")
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result.Findings, 3)
|
||||
|
||||
assert.Equal(t, "Vuln A", result.Findings[0].Description)
|
||||
assert.Equal(t, "1.20.5", result.Findings[0].FixedVersion)
|
||||
assert.Equal(t, "Vuln B", result.Findings[1].Description)
|
||||
assert.Equal(t, "Vuln C", result.Findings[2].Description)
|
||||
assert.Equal(t, "1.21.0", result.Findings[2].FixedVersion)
|
||||
}
|
||||
|
||||
func TestParseVulnCheckJSON_FixedVersionFromOSV_Good(t *testing.T) {
|
||||
// When trace has no version, fixed version should come from OSV affected ranges.
|
||||
input := `{"osv":{"id":"GO-2024-0077","summary":"Test","aliases":[],"affected":[{"ranges":[{"events":[{"fixed":"0.9.1"}]}]}]}}
|
||||
{"finding":{"osv":"GO-2024-0077","trace":[{"package":"example.com/lib","function":"Process"}]}}
|
||||
`
|
||||
result, err := ParseVulnCheckJSON(input, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, result.Findings, 1)
|
||||
assert.Equal(t, "0.9.1", result.Findings[0].FixedVersion)
|
||||
}
|
||||
|
||||
func TestVulnCheck_NotInstalled_Ugly(t *testing.T) {
|
||||
setupMockCmdExit(t, "govulncheck-nonexistent", "", "", 1)
|
||||
// Don't mock govulncheck — ensure it handles missing binary gracefully
|
||||
// We'll rely on the binary not being in the test temp PATH.
|
||||
|
||||
tk := New(t.TempDir())
|
||||
// Remove PATH to simulate govulncheck not found
|
||||
t.Setenv("PATH", t.TempDir())
|
||||
_, err := tk.VulnCheck("./...")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not installed or not available")
|
||||
}
|
||||
|
||||
func TestVulnCheck_WithMock_Good(t *testing.T) {
|
||||
// Mock govulncheck to return our sample JSON
|
||||
setupMockCmd(t, "govulncheck", sampleVulnJSON)
|
||||
|
||||
tk := New(t.TempDir())
|
||||
result, err := tk.VulnCheck("./...")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "example.com/mymod", result.Module)
|
||||
assert.Len(t, result.Findings, 2)
|
||||
}
|
||||
|
||||
func TestVulnCheck_DefaultModulePath_Good(t *testing.T) {
|
||||
setupMockCmd(t, "govulncheck", `{"config":{"module_path":"default/mod"}}`)
|
||||
|
||||
tk := New(t.TempDir())
|
||||
result, err := tk.VulnCheck("")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "default/mod", result.Module)
|
||||
}
|
||||
|
||||
func TestParseVulnCheckJSON_ProgressOnly_Good(t *testing.T) {
|
||||
input := `{"progress":{"message":"Scanning..."}}
|
||||
{"progress":{"message":"Done"}}
|
||||
`
|
||||
result, err := ParseVulnCheckJSON(input, "")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, result.Findings)
|
||||
}
|
||||
|
||||
func TestParseVulnCheckJSON_ModulePathFromTrace_Good(t *testing.T) {
|
||||
input := `{"finding":{"osv":"GO-2024-0099","trace":[{"module":"example.com/vulnerable","package":"example.com/vulnerable/pkg","function":"Bad","version":"v1.2.3"}]}}
|
||||
`
|
||||
result, err := ParseVulnCheckJSON(input, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, result.Findings, 1)
|
||||
assert.Equal(t, "example.com/vulnerable", result.Findings[0].ModulePath)
|
||||
assert.Equal(t, "v1.2.3", result.Findings[0].FixedVersion)
|
||||
}
|
||||
|
||||
// LEK-1 | lthn.ai | EUPL-1.2
|
||||
Loading…
Add table
Reference in a new issue