cli/cmd/ml/cmd_score.go
Charon 27d5e2e100 refactor: flatten commands, extract php/ci to own repos (#2)
## Summary
- Extract PHP/Laravel commands to `core/php` repo (42 files, standalone module)
- Extract CI/release + SDK commands to `core/ci` repo (10 files)
- Remove `internal/variants/` build tag system entirely
- Move all 30 remaining command packages from `internal/cmd/` to top-level `cmd/`
- Rewrite `main.go` with direct imports — no more variant selection
- PHP and CI are now optional via commented import lines in main.go

Co-authored-by: Claude <developers@lethean.io>
Reviewed-on: #2
Co-authored-by: Charon <charon@lthn.ai>
Co-committed-by: Charon <charon@lthn.ai>
2026-02-16 14:45:06 +00:00

77 lines
2 KiB
Go

package ml
import (
"context"
"fmt"
"time"
"forge.lthn.ai/core/go/pkg/cli"
"forge.lthn.ai/core/go/pkg/ml"
)
var (
scoreInput string
scoreSuites string
scoreOutput string
scoreConcur int
)
var scoreCmd = &cli.Command{
Use: "score",
Short: "Score responses with heuristic and LLM judges",
Long: "Reads a JSONL file of prompt/response pairs and scores them across configured suites.",
RunE: runScore,
}
func init() {
scoreCmd.Flags().StringVar(&scoreInput, "input", "", "Input JSONL file with prompt/response pairs (required)")
scoreCmd.Flags().StringVar(&scoreSuites, "suites", "all", "Comma-separated scoring suites (heuristic,semantic,content,exact,truthfulqa,donotanswer,toxigen)")
scoreCmd.Flags().StringVar(&scoreOutput, "output", "", "Output JSON file for scores")
scoreCmd.Flags().IntVar(&scoreConcur, "concurrency", 4, "Number of concurrent scoring workers")
scoreCmd.MarkFlagRequired("input")
}
func runScore(cmd *cli.Command, args []string) error {
responses, err := ml.ReadResponses(scoreInput)
if err != nil {
return fmt.Errorf("read input: %w", err)
}
var judge *ml.Judge
if judgeURL != "" {
backend := ml.NewHTTPBackend(judgeURL, judgeModel)
judge = ml.NewJudge(backend)
}
engine := ml.NewEngine(judge, scoreConcur, scoreSuites)
ctx := context.Background()
perPrompt := engine.ScoreAll(ctx, responses)
averages := ml.ComputeAverages(perPrompt)
if scoreOutput != "" {
output := &ml.ScorerOutput{
Metadata: ml.Metadata{
JudgeModel: judgeModel,
JudgeURL: judgeURL,
ScoredAt: time.Now(),
Suites: ml.SplitComma(scoreSuites),
},
ModelAverages: averages,
PerPrompt: perPrompt,
}
if err := ml.WriteScores(scoreOutput, output); err != nil {
return fmt.Errorf("write output: %w", err)
}
fmt.Printf("Scores written to %s\n", scoreOutput)
} else {
for model, avgs := range averages {
fmt.Printf("%s:\n", model)
for field, val := range avgs {
fmt.Printf(" %-25s %.3f\n", field, val)
}
}
}
return nil
}