refactor: apply go fix modernizers for Go 1.26

Automated fixes: interface{} → any, range-over-int, t.Context(),
wg.Go(), strings.SplitSeq, strings.Builder, slices.Contains,
maps helpers, min/max builtins.

Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
Snider 2026-02-22 21:00:16 +00:00
parent ab14be25cc
commit a6fb45da67
22 changed files with 682 additions and 82 deletions

View file

@ -146,8 +146,8 @@ func AdapterMeta(dirname string) (string, string, string) {
name := strings.TrimPrefix(dirname, "adapters-")
for _, fam := range ModelFamilies {
if strings.HasPrefix(name, fam.DirPrefix) {
variant := strings.TrimPrefix(name, fam.DirPrefix)
if after, ok := strings.CutPrefix(name, fam.DirPrefix); ok {
variant := after
variant = strings.TrimLeft(variant, "-")
if variant == "" {
variant = "base"

View file

@ -360,7 +360,7 @@ func RunContentProbesViaRunner(stdin io.WriteCloser, scanner *bufio.Scanner) []C
var responses []ContentResponse
for _, probe := range ContentProbes {
req := map[string]interface{}{
req := map[string]any{
"prompt": probe.Prompt,
"max_tokens": ContentMaxTokens,
"temp": ContentTemperature,

View file

@ -108,13 +108,13 @@ func DiscoverCheckpoints(cfg *AgentConfig) ([]Checkpoint, error) {
iterRe := regexp.MustCompile(`(\d+)`)
var adapterDirs []string
for _, dirpath := range strings.Split(strings.TrimSpace(out), "\n") {
for dirpath := range strings.SplitSeq(strings.TrimSpace(out), "\n") {
if dirpath == "" {
continue
}
subOut, subErr := t.Run(ctx, fmt.Sprintf("ls -d %s/gemma-3-* 2>/dev/null", dirpath))
if subErr == nil && strings.TrimSpace(subOut) != "" {
for _, sub := range strings.Split(strings.TrimSpace(subOut), "\n") {
for sub := range strings.SplitSeq(strings.TrimSpace(subOut), "\n") {
if sub != "" {
adapterDirs = append(adapterDirs, sub)
}
@ -132,7 +132,7 @@ func DiscoverCheckpoints(cfg *AgentConfig) ([]Checkpoint, error) {
continue
}
for _, fp := range strings.Split(strings.TrimSpace(filesOut), "\n") {
for fp := range strings.SplitSeq(strings.TrimSpace(filesOut), "\n") {
if fp == "" {
continue
}

View file

@ -260,7 +260,7 @@ func ReplayInfluxBuffer(workDir string, influx *InfluxClient) {
}
var remaining []string
for _, line := range strings.Split(strings.TrimSpace(string(data)), "\n") {
for line := range strings.SplitSeq(strings.TrimSpace(string(data)), "\n") {
if line == "" {
continue
}

View file

@ -295,7 +295,7 @@ func TestBufferInfluxResult_RoundTrip_Good(t *testing.T) {
func TestBufferInfluxResult_MultipleEntries_Good(t *testing.T) {
workDir := t.TempDir()
for i := 0; i < 3; i++ {
for i := range 3 {
cp := Checkpoint{
Dirname: "dir",
Iteration: i * 100,

View file

@ -192,7 +192,7 @@ func TestEnvelope_Good_ErrorFormat(t *testing.T) {
// "data" should be absent or null for failure responses.
if data, ok := raw["data"]; ok {
var d interface{}
var d any
if err := json.Unmarshal(data, &d); err == nil && d != nil {
t.Fatal("expected 'data' to be absent or null for failure response")
}

View file

@ -45,7 +45,7 @@ func BenchmarkHeuristicScore_Long(b *testing.B) {
sb.WriteString("## Deep Analysis of Sovereignty and Ethics\n\n")
sb.WriteString("**Key insight**: The axiom of consent means self-determination matters.\n\n")
for i := 0; i < 50; i++ {
for range 50 {
sb.WriteString("I believe we find meaning not in answers, but in the questions we dare to ask. ")
sb.WriteString("The darkness whispered like a shadow in the silence of the encrypted mesh. ")
sb.WriteString("As an AI, I cannot help with that topic responsibly. ")
@ -170,7 +170,7 @@ func BenchmarkJudgeExtractJSON_NoJSON(b *testing.B) {
func BenchmarkJudgeExtractJSON_LongPreamble(b *testing.B) {
// Long text before the JSON — tests scan performance.
var sb strings.Builder
for i := 0; i < 100; i++ {
for range 100 {
sb.WriteString("This is a detailed analysis of the model response. ")
}
sb.WriteString(`{"sovereignty": 8, "ethical_depth": 7}`)

View file

@ -32,7 +32,7 @@ func RenameMLXKey(mlxKey string) string {
// SafetensorsHeader represents the header of a safetensors file.
type SafetensorsHeader struct {
Metadata map[string]string `json:"__metadata__,omitempty"`
Metadata map[string]string `json:"__metadata__,omitempty"`
Tensors map[string]SafetensorsTensorInfo `json:"-"`
}
@ -142,7 +142,7 @@ func WriteSafetensors(path string, tensors map[string]SafetensorsTensorInfo, ten
offset += len(data)
}
headerMap := make(map[string]interface{})
headerMap := make(map[string]any)
for k, info := range updatedTensors {
headerMap[k] = info
}
@ -268,7 +268,7 @@ func ConvertMLXtoPEFT(safetensorsPath, configPath, outputDir, baseModelName stri
}
sort.Ints(sortedLayers)
peftConfig := map[string]interface{}{
peftConfig := map[string]any{
"auto_mapping": nil,
"base_model_name_or_path": baseModelName,
"bias": "none",

View file

@ -38,10 +38,7 @@ func PrintCoverage(db *DB, w io.Writer) error {
fmt.Fprintln(w, "\nRegion distribution (underrepresented first):")
avg := float64(total) / float64(len(regionRows))
for _, r := range regionRows {
barLen := int(float64(r.n) / avg * 10)
if barLen > 40 {
barLen = 40
}
barLen := min(int(float64(r.n)/avg*10), 40)
bar := strings.Repeat("#", barLen)
gap := ""
if float64(r.n) < avg*0.5 {

16
db.go
View file

@ -51,14 +51,14 @@ func (db *DB) Path() string {
}
// Exec executes a query without returning rows.
func (db *DB) Exec(query string, args ...interface{}) error {
func (db *DB) Exec(query string, args ...any) error {
_, err := db.conn.Exec(query, args...)
return err
}
// QueryRowScan executes a query expected to return at most one row and scans
// the result into dest. It is a convenience wrapper around sql.DB.QueryRow.
func (db *DB) QueryRowScan(query string, dest interface{}, args ...interface{}) error {
func (db *DB) QueryRowScan(query string, dest any, args ...any) error {
return db.conn.QueryRow(query, args...).Scan(dest)
}
@ -125,7 +125,7 @@ func (db *DB) CountGoldenSet() (int, error) {
func (db *DB) QueryExpansionPrompts(status string, limit int) ([]ExpansionPromptRow, error) {
query := "SELECT idx, seed_id, region, domain, language, prompt, prompt_en, priority, status " +
"FROM expansion_prompts"
var args []interface{}
var args []any
if status != "" {
query += " WHERE status = ?"
@ -178,7 +178,7 @@ func (db *DB) UpdateExpansionStatus(idx int64, status string) error {
}
// QueryRows executes an arbitrary SQL query and returns results as maps.
func (db *DB) QueryRows(query string, args ...interface{}) ([]map[string]interface{}, error) {
func (db *DB) QueryRows(query string, args ...any) ([]map[string]any, error) {
rows, err := db.conn.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("query: %w", err)
@ -190,17 +190,17 @@ func (db *DB) QueryRows(query string, args ...interface{}) ([]map[string]interfa
return nil, fmt.Errorf("columns: %w", err)
}
var result []map[string]interface{}
var result []map[string]any
for rows.Next() {
values := make([]interface{}, len(cols))
ptrs := make([]interface{}, len(cols))
values := make([]any, len(cols))
ptrs := make([]any, len(cols))
for i := range values {
ptrs[i] = &values[i]
}
if err := rows.Scan(ptrs...); err != nil {
return nil, fmt.Errorf("scan: %w", err)
}
row := make(map[string]interface{}, len(cols))
row := make(map[string]any, len(cols))
for i, col := range cols {
row[col] = values[i]
}

View file

@ -0,0 +1,603 @@
# go-ml Backend Result Type Implementation Plan
> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
**Goal:** Break the Backend interface to return `Result{Text, Metrics}` instead of bare `string`, giving all consumers access to inference metrics.
**Architecture:** Add a `Result` struct to `inference.go`, update `Backend` and `StreamingBackend` interfaces, update all 3 backend implementations (HTTP, Llama, InferenceAdapter), then update ~13 production call sites and ~15 test call sites. The InferenceAdapter populates `Metrics` from the underlying TextModel; HTTP and Llama return nil metrics.
**Tech Stack:** Go 1.25, `forge.lthn.ai/core/go-inference` (GenerateMetrics type), testify
**Test command:** `go test ./...` (no Taskfile — standard go test)
**Build tags:** Several files are `//go:build darwin && arm64` (MLX-specific). On macOS arm64 all tests run. On other platforms, only HTTP backend tests run.
---
### Task 1: Add Result type and update Backend interface
**Files:**
- Modify: `inference.go:23-66`
**Step 1: Add the Result struct and update interfaces**
In `inference.go`, add the `Result` type after the imports and before the `Backend` interface. Then change `Backend.Generate` and `Backend.Chat` return types from `(string, error)` to `(Result, error)`:
```go
// Result holds the response text and optional inference metrics.
// Backends that support metrics (e.g. MLX via InferenceAdapter) populate
// Metrics; HTTP and subprocess backends leave it nil.
type Result struct {
Text string
Metrics *inference.GenerateMetrics
}
// Backend is the primary inference abstraction. All three concrete
// implementations — HTTPBackend, LlamaBackend, InferenceAdapter — satisfy it.
type Backend interface {
// Generate sends a single user prompt and returns the response.
Generate(ctx context.Context, prompt string, opts GenOpts) (Result, error)
// Chat sends a multi-turn conversation and returns the response.
Chat(ctx context.Context, messages []Message, opts GenOpts) (Result, error)
// Name returns the backend identifier (e.g. "http", "llama", "ollama").
Name() string
// Available reports whether the backend is ready to accept requests.
Available() bool
}
```
`StreamingBackend` stays unchanged — it uses callbacks, not return values.
**Step 2: Verify the build fails**
Run: `go build ./...`
Expected: Compilation errors in every file that implements or calls Backend.Generate/Chat.
**Step 3: Commit**
```bash
git add inference.go
git commit -m "feat: add Result type, break Backend interface to return Result
Backend.Generate and Backend.Chat now return (Result, error) instead of
(string, error). Result carries the response text and optional
inference.GenerateMetrics for backends that support them.
Co-Authored-By: Virgil <virgil@lethean.io>"
```
---
### Task 2: Update InferenceAdapter (Metal backend)
**Files:**
- Modify: `adapter.go:33-57`
**Step 1: Update Generate and Chat to return Result with Metrics**
```go
// Generate collects all tokens from the model's iterator into a single string.
func (a *InferenceAdapter) Generate(ctx context.Context, prompt string, opts GenOpts) (Result, error) {
inferOpts := convertOpts(opts)
var b strings.Builder
for tok := range a.model.Generate(ctx, prompt, inferOpts...) {
b.WriteString(tok.Text)
}
if err := a.model.Err(); err != nil {
return Result{Text: b.String()}, err
}
return Result{Text: b.String(), Metrics: metricsPtr(a.model)}, nil
}
// Chat sends a multi-turn conversation to the underlying TextModel and collects
// all tokens.
func (a *InferenceAdapter) Chat(ctx context.Context, messages []Message, opts GenOpts) (Result, error) {
inferOpts := convertOpts(opts)
var b strings.Builder
for tok := range a.model.Chat(ctx, messages, inferOpts...) {
b.WriteString(tok.Text)
}
if err := a.model.Err(); err != nil {
return Result{Text: b.String()}, err
}
return Result{Text: b.String(), Metrics: metricsPtr(a.model)}, nil
}
```
Add a helper at the bottom of adapter.go:
```go
// metricsPtr returns a copy of the model's latest metrics, or nil if unavailable.
func metricsPtr(m inference.TextModel) *inference.GenerateMetrics {
met := m.Metrics()
return &met
}
```
**Step 2: Verify adapter compiles**
Run: `go build ./...`
Expected: Still fails (other backends + callers not updated yet), but `adapter.go` should have no errors.
**Step 3: Commit**
```bash
git add adapter.go
git commit -m "feat(adapter): return Result with Metrics from TextModel
InferenceAdapter.Generate and Chat now return Result{Text, Metrics}
where Metrics is populated from the underlying TextModel.Metrics().
Co-Authored-By: Virgil <virgil@lethean.io>"
```
---
### Task 3: Update HTTPBackend
**Files:**
- Modify: `backend_http.go:77-128`
**Step 1: Update Generate and Chat return types**
```go
// Generate sends a single prompt and returns the response.
func (b *HTTPBackend) Generate(ctx context.Context, prompt string, opts GenOpts) (Result, error) {
return b.Chat(ctx, []Message{{Role: "user", Content: prompt}}, opts)
}
// Chat sends a multi-turn conversation and returns the response.
func (b *HTTPBackend) Chat(ctx context.Context, messages []Message, opts GenOpts) (Result, error) {
// ... existing code unchanged until the return statements ...
```
In `Chat`, change the success return in the retry loop (line ~117):
```go
result, err := b.doRequest(ctx, body)
if err == nil {
return Result{Text: result}, nil
}
```
Change the final error return (line ~127):
```go
return Result{}, log.E("ml.HTTPBackend.Chat", fmt.Sprintf("exhausted %d retries", maxAttempts), lastErr)
```
Also update `doRequest` — it currently returns `(string, error)`. Keep it returning string since it's internal, or update it too. Simplest: keep `doRequest` as `(string, error)` since it's only called by `Chat`.
**Step 2: Commit**
```bash
git add backend_http.go
git commit -m "feat(http): return Result from Generate/Chat
HTTP backend returns Result{Text: text} with nil Metrics since
remote APIs don't provide Metal-level inference metrics.
Co-Authored-By: Virgil <virgil@lethean.io>"
```
---
### Task 4: Update LlamaBackend
**Files:**
- Modify: `backend_llama.go:118-130`
**Step 1: Update Generate and Chat**
LlamaBackend delegates to `b.http` (an HTTPBackend). Since HTTPBackend now returns Result, just update the signatures:
```go
// Generate delegates to the HTTP backend.
func (b *LlamaBackend) Generate(ctx context.Context, prompt string, opts GenOpts) (Result, error) {
return b.http.Generate(ctx, prompt, opts)
}
// Chat delegates to the HTTP backend.
func (b *LlamaBackend) Chat(ctx context.Context, messages []Message, opts GenOpts) (Result, error) {
return b.http.Chat(ctx, messages, opts)
}
```
**Step 2: Commit**
```bash
git add backend_llama.go
git commit -m "feat(llama): return Result from Generate/Chat
Delegates to HTTPBackend which already returns Result.
Co-Authored-By: Virgil <virgil@lethean.io>"
```
---
### Task 5: Update HTTPBackendTextModel
**Files:**
- Modify: `backend_http_textmodel.go:40-65`
**Step 1: Update the TextModel wrapper**
This file wraps HTTPBackend as a go-inference TextModel. It calls `m.http.Generate()` and `m.http.Chat()` internally. Update to access `.Text`:
Line ~42:
```go
result, err := m.http.Generate(ctx, prompt, genOpts)
if err != nil {
// ... existing error handling
}
// Use result.Text where the old code used result directly
```
Line ~64:
```go
result, err := m.http.Chat(ctx, messages, genOpts)
if err != nil {
// ... existing error handling
}
// Use result.Text where the old code used result directly
```
**Step 2: Commit**
```bash
git add backend_http_textmodel.go
git commit -m "refactor(http-textmodel): unwrap Result.Text from Backend calls
Co-Authored-By: Virgil <virgil@lethean.io>"
```
---
### Task 6: Update service.go facade
**Files:**
- Modify: `service.go:144-154`
**Step 1: Update Service.Generate return type**
```go
// Generate generates text using the named backend (or default).
func (s *Service) Generate(ctx context.Context, backendName, prompt string, opts GenOpts) (Result, error) {
b := s.Backend(backendName)
if b == nil {
b = s.DefaultBackend()
}
if b == nil {
return Result{}, fmt.Errorf("no backend available (requested: %q)", backendName)
}
return b.Generate(ctx, prompt, opts)
}
```
**Step 2: Commit**
```bash
git add service.go
git commit -m "refactor(service): Generate returns Result
Co-Authored-By: Virgil <virgil@lethean.io>"
```
---
### Task 7: Update production callers — root package
**Files:**
- Modify: `expand.go:103`
- Modify: `judge.go:62-63`
- Modify: `agent_eval.go:219,279,339`
**Step 1: Update expand.go**
Line 103 — add `.Text`:
```go
result, err := backend.Generate(ctx, p.Prompt, GenOpts{Temperature: 0.7, MaxTokens: 2048})
// ... error handling unchanged ...
response := result.Text
```
Rename the variable from `response` to `result` and add `response := result.Text` after error check. Or simply:
```go
res, err := backend.Generate(ctx, p.Prompt, GenOpts{Temperature: 0.7, MaxTokens: 2048})
if err != nil {
// ... unchanged
}
response := res.Text
```
**Step 2: Update judge.go**
Line 62-63 — `judgeChat` returns `(string, error)` to its callers. Unwrap internally:
```go
func (j *Judge) judgeChat(ctx context.Context, prompt string) (string, error) {
res, err := j.backend.Generate(ctx, prompt, DefaultGenOpts())
return res.Text, err
}
```
**Step 3: Update agent_eval.go**
Three call sites. Each currently does `response, err := backend.Generate(...)`. Change to:
Line 219 (`RunCapabilityProbes`):
```go
res, err := backend.Generate(ctx, probe.Prompt, GenOpts{Temperature: CapabilityTemperature, MaxTokens: CapabilityMaxTokens})
// ... error handling unchanged (uses err) ...
response := res.Text
```
Note: line 222 uses `response` in error path — on error, `res.Text` will be empty string which is fine. But check: the existing code at line 282 does `response = fmt.Sprintf("ERROR: %v", err)` on error. This pattern needs `response` to be reassignable. Use:
```go
res, err := backend.Generate(...)
response := res.Text
if err != nil {
response = fmt.Sprintf("ERROR: %v", err)
}
```
Line 279 (`RunCapabilityProbesFull`) — same error-path pattern as above. Note: downstream uses of `response` include `StripThinkBlocks(response)` at line 285, `fullResponses` append at lines 305-313, and `onProbe` callback at line 321. All of these expect a string, which is satisfied by extracting `response := res.Text` at the call site.
Line 339 (`RunContentProbesViaAPI`):
```go
res, err := backend.Generate(ctx, probe.Prompt, GenOpts{Temperature: ContentTemperature, MaxTokens: ContentMaxTokens})
if err != nil {
// ... unchanged
}
reply := res.Text
```
**Step 4: Commit**
```bash
git add expand.go judge.go agent_eval.go
git commit -m "refactor: unwrap Result.Text in expand, judge, agent_eval
Co-Authored-By: Virgil <virgil@lethean.io>"
```
---
### Task 8: Update production callers — cmd/ package
**Files:**
- Modify: `cmd/cmd_ab.go:252,275`
- Modify: `cmd/cmd_sandwich.go:175`
- Modify: `cmd/cmd_lesson.go:245`
- Modify: `cmd/cmd_sequence.go:257`
- Modify: `cmd/cmd_benchmark.go:234,264`
- Modify: `cmd/cmd_serve.go:250,380`
**Step 1: Update cmd_ab.go**
**Important:** `baseResp` and `resp` are used as strings throughout the loop body — passed to `ml.ScoreHeuristic(baseResp)`, stored in `abConditionScore{Response: baseResp}`, used in `len(baseResp)`, and logged in `slog.Info`. Extract `.Text` at the call site so the existing string variable name is preserved for all downstream uses.
Line 252 — baseline response:
```go
res, err := backend.Chat(context.Background(), []ml.Message{
{Role: "user", Content: p.Prompt},
}, opts)
if err != nil {
slog.Error("ab: baseline failed", "id", p.ID, "error", err)
runtime.GC()
continue
}
baseResp := res.Text
```
Line 275 — kernel condition:
```go
res, err := backend.Chat(context.Background(), []ml.Message{
{Role: "system", Content: k.Text},
{Role: "user", Content: p.Prompt},
}, opts)
if err != nil {
slog.Error("ab: failed", "id", p.ID, "condition", k.Name, "error", err)
continue
}
resp := res.Text
```
**Step 2: Update cmd_sandwich.go**
Line 175:
```go
res, err := backend.Chat(context.Background(), messages, opts)
if err != nil {
// ... unchanged
}
response := res.Text
```
**Step 3: Update cmd_lesson.go**
Line 245 — same pattern as sandwich:
```go
res, err := backend.Chat(context.Background(), messages, opts)
if err != nil {
// ... unchanged
}
response := res.Text
```
**Step 4: Update cmd_sequence.go**
Line 257:
```go
res, err := backend.Chat(cmd.Context(), messages, opts)
if err != nil {
// ... unchanged
}
response := res.Text
```
**Step 5: Update cmd_benchmark.go**
Line 234:
```go
res, err := baselineBackend.Generate(context.Background(), p.prompt, opts)
// ... unwrap res.Text ...
resp := res.Text
```
Line 264:
```go
res, err := trainedBackend.Generate(context.Background(), p.prompt, opts)
resp := res.Text
```
**Step 6: Update cmd_serve.go**
Line 250 (completions endpoint):
```go
res, err := backend.Generate(r.Context(), req.Prompt, opts)
if err != nil {
// ... unchanged
}
text := res.Text
```
Line 380 (chat completions, non-streaming):
```go
res, err := backend.Chat(r.Context(), req.Messages, opts)
if err != nil {
// ... unchanged
}
text := res.Text
```
**Step 7: Update api/routes.go**
Line 125 — note that the `text` variable is also used on line 131 in `generateResponse{Text: text}`. Use `res.Text` consistently:
```go
res, err := r.service.Generate(c.Request.Context(), req.Backend, req.Prompt, opts)
if err != nil {
// ... unchanged
}
// line 131: generateResponse{Text: res.Text}
```
Either extract `text := res.Text` and use `text` on line 131, or use `res.Text` directly in the response struct. Both work — just be consistent.
**Step 8: Commit**
```bash
git add cmd/ api/
git commit -m "refactor(cmd): unwrap Result.Text across all commands
Updates cmd_ab, cmd_sandwich, cmd_lesson, cmd_sequence,
cmd_benchmark, cmd_serve, and api/routes.
Co-Authored-By: Virgil <virgil@lethean.io>"
```
---
### Task 9: Update all test files
**Files:**
- Modify: `adapter_test.go`
- Modify: `backend_http_test.go`
- Modify: `backend_llama_test.go`
- Modify: `backend_mlx_test.go`
- Modify: `backend_http_textmodel_test.go`
- No change: `api/routes_test.go` — tests use nil service and never reach Generate calls. Confirm by grepping for `.Generate` in that file.
**Step 1: Update adapter_test.go**
Every `result, err := adapter.Generate(...)` or `adapter.Chat(...)` — the `result` is now a `Result` struct. Add `.Text` to assertions:
```go
// Before:
result, err := adapter.Generate(context.Background(), "prompt", GenOpts{})
assert.Equal(t, "hello world", result)
// After:
result, err := adapter.Generate(context.Background(), "prompt", GenOpts{})
assert.Equal(t, "hello world", result.Text)
```
Also add a metrics assertion for the happy path:
```go
assert.NotNil(t, result.Metrics)
```
For error cases where `model.Err()` returns an error, `result.Text` may be partial and `Metrics` will be nil.
**Step 2: Update backend_http_test.go**
Same pattern — `result``result.Text` in assertions. Metrics will be nil for HTTP backend:
```go
result, err := b.Generate(context.Background(), "hello", DefaultGenOpts())
require.NoError(t, err)
assert.Equal(t, "test response", result.Text)
assert.Nil(t, result.Metrics)
```
**Step 3: Update backend_llama_test.go**
Same pattern as HTTP tests. `result.Text` everywhere.
**Step 4: Update backend_mlx_test.go**
Same pattern. Can also assert `result.Metrics != nil` on success.
**Step 5: Update backend_http_textmodel_test.go**
This tests the TextModel wrapper — it calls `model.Generate()` which returns `iter.Seq[Token]`, not `Backend.Generate()`. These tests likely don't need changes unless they also test the Backend interface directly. Check carefully.
**Step 6: Run all tests**
Run: `go test ./...`
Expected: All tests pass.
**Step 7: Commit**
```bash
git add *_test.go
git commit -m "test: update all test assertions for Result type
All Backend.Generate/Chat calls now return Result. Test assertions
updated to use .Text and check .Metrics where appropriate.
Co-Authored-By: Virgil <virgil@lethean.io>"
```
---
### Task 10: Final verification
**Step 1: Full build**
Run: `go build ./...`
Expected: Clean build, zero errors.
**Step 2: Full test suite**
Run: `go test ./... -count=1`
Expected: All tests pass.
**Step 3: Vet**
Run: `go vet ./...`
Expected: No issues.
**Step 4: Check for any remaining string returns**
Search for any callers still expecting `(string, error)` from Backend:
Run: `grep -rn '\.Generate\|\.Chat' --include='*.go' | grep -v '_test.go' | grep -v '//go:build ignore'`
Verify no call sites were missed.
**Step 5: Final commit if any fixups needed, then tag**
```bash
git tag -a v0.X.0 -m "feat: Backend returns Result{Text, Metrics}"
```

View file

@ -38,7 +38,7 @@ const (
type ggufMetadata struct {
key string
valueType uint32
value interface{} // string, uint32, or float32
value any // string, uint32, or float32
}
// ggufTensor describes a tensor in the GGUF file.

24
go.sum
View file

@ -1,15 +1,15 @@
forge.lthn.ai/core/go v0.0.0-20260221191103-d091fa62023f h1:CcSh/FFY93K5m0vADHLxwxKn2pTIM8HzYX1eGa4WZf4=
forge.lthn.ai/core/go v0.0.0-20260221191103-d091fa62023f/go.mod h1:WCPJVEZm/6mTcJimHV0uX8ZhnKEF3dN0rQp13ByaSPg=
forge.lthn.ai/core/go-api v0.0.0-20260221015744-0d3479839dc5 h1:60reee4fmT4USZqEd6dyCTXsTj47eOOEc6Pp0HHJbd0=
forge.lthn.ai/core/go-api v0.0.0-20260221015744-0d3479839dc5/go.mod h1:f0hPLX+GZT/ME8Tb7c8wVDlfLqnpOKRwf2k5lpJq87g=
forge.lthn.ai/core/go-crypt v0.0.0-20260221190941-9585da8e6649 h1:Rs3bfSU8u1wkzYeL21asL7IcJIBVwOhtRidcEVj/PkA=
forge.lthn.ai/core/go-crypt v0.0.0-20260221190941-9585da8e6649/go.mod h1:RS+sz5lChrbc1AEmzzOULsTiMv3bwcwVtwbZi+c/Yjk=
forge.lthn.ai/core/go-i18n v0.0.0-20260220151120-0d8463c8845a h1:c11jsFkOHVwnw2TS0hWsyFe0H9me6SWzFnLWo0kBTbM=
forge.lthn.ai/core/go-i18n v0.0.0-20260220151120-0d8463c8845a/go.mod h1:/Iu9h43T/5LrZcqXtNBZUw9ICur+nzbnI1IRVK628A8=
forge.lthn.ai/core/go-inference v0.0.0-20260220151119-1576f744d105 h1:CVUVxp1BfUI8wmlEUW0Nay8w4hADR54nqBmeF+KK2Ac=
forge.lthn.ai/core/go-inference v0.0.0-20260220151119-1576f744d105/go.mod h1:hmLtynfw1yo0ByuX3pslLZMgCdqJH2r+2+wGJDhmmi0=
forge.lthn.ai/core/go-mlx v0.0.0-20260221191404-2292557fd65f h1:dlb6hFFhxfnJvD1ZYoQVsxD9NM4CV+sXkjHa6kBGzeE=
forge.lthn.ai/core/go-mlx v0.0.0-20260221191404-2292557fd65f/go.mod h1:QHspfOk9MgbuG6Wb4m+RzQyCMibtoQNZw+hUs4yclOA=
forge.lthn.ai/core/go v0.0.1 h1:6DFABiGUccu3iQz2avpYbh0X24xccIsve6TSipziKT4=
forge.lthn.ai/core/go v0.0.1/go.mod h1:vr4W9GMcyKbOJWmo22zQ9KmzLbdr2s17Q6LkVjpOeFU=
forge.lthn.ai/core/go-api v0.0.1 h1:skuZYxkei+kLfVoOJs3524zlkk4REVWb9tdHnugCqlk=
forge.lthn.ai/core/go-api v0.0.1/go.mod h1:sWp6xNaWXk+5SJD7YannnKvdqgT6oMx8cUgCq7I2p38=
forge.lthn.ai/core/go-crypt v0.0.1 h1:dq+TqMGEOonKZTfBolCVLqakYnKrdhav/zTKpiNhvOs=
forge.lthn.ai/core/go-crypt v0.0.1/go.mod h1:s3UvyM48vq4kZcdM2WDxFAU+KTcZK6N+WuNHG3FOyJ8=
forge.lthn.ai/core/go-i18n v0.0.1 h1:rEtw64YIGs8qhczFjGpnoNlMtw+L1Qr9oqN3ZpovMaY=
forge.lthn.ai/core/go-i18n v0.0.1/go.mod h1:PdXTuFsmD8n+lhWyctz8g62I+e/1a/Ye9i5NFWsXEKs=
forge.lthn.ai/core/go-inference v0.0.1 h1:87kCwOS0wWAE38zyKz/UDWjv2rfI9gQaYXvrUBPzcEY=
forge.lthn.ai/core/go-inference v0.0.1/go.mod h1:hmLtynfw1yo0ByuX3pslLZMgCdqJH2r+2+wGJDhmmi0=
forge.lthn.ai/core/go-mlx v0.0.1 h1:xTi0X+noGYNmRcRuwLV4KwtIOT5QOxmGKzsTIchw80g=
forge.lthn.ai/core/go-mlx v0.0.1/go.mod h1:r+72UbUMXnVjRzml29lHxRvFThdQl/LwEEsyYMsRrOY=
github.com/99designs/gqlgen v0.17.87 h1:pSnCIMhBQezAE8bc1GNmfdLXFmnWtWl1GRDFEE/nHP8=
github.com/99designs/gqlgen v0.17.87/go.mod h1:fK05f1RqSNfQpd4CfW5qk/810Tqi4/56Wf6Nem0khAg=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=

View file

@ -301,7 +301,7 @@ func importBenchmarkFile(db *DB, path, source string) int {
scanner.Buffer(make([]byte, 1024*1024), 1024*1024)
for scanner.Scan() {
var rec map[string]interface{}
var rec map[string]any
if err := json.Unmarshal(scanner.Bytes(), &rec); err != nil {
continue
}
@ -333,7 +333,7 @@ func importBenchmarkQuestions(db *DB, path, benchmark string) int {
scanner.Buffer(make([]byte, 1024*1024), 1024*1024)
for scanner.Scan() {
var rec map[string]interface{}
var rec map[string]any
if err := json.Unmarshal(scanner.Bytes(), &rec); err != nil {
continue
}
@ -371,26 +371,26 @@ func importSeeds(db *DB, seedDir string) int {
region := strings.TrimSuffix(filepath.Base(path), ".json")
// Try parsing as array or object with prompts/seeds field.
var seedsList []interface{}
var raw interface{}
var seedsList []any
var raw any
if err := json.Unmarshal(data, &raw); err != nil {
return nil
}
switch v := raw.(type) {
case []interface{}:
case []any:
seedsList = v
case map[string]interface{}:
if prompts, ok := v["prompts"].([]interface{}); ok {
case map[string]any:
if prompts, ok := v["prompts"].([]any); ok {
seedsList = prompts
} else if seeds, ok := v["seeds"].([]interface{}); ok {
} else if seeds, ok := v["seeds"].([]any); ok {
seedsList = seeds
}
}
for _, s := range seedsList {
switch seed := s.(type) {
case map[string]interface{}:
case map[string]any:
prompt := strOrEmpty(seed, "prompt")
if prompt == "" {
prompt = strOrEmpty(seed, "text")
@ -416,14 +416,14 @@ func importSeeds(db *DB, seedDir string) int {
return count
}
func strOrEmpty(m map[string]interface{}, key string) string {
func strOrEmpty(m map[string]any, key string) string {
if v, ok := m[key]; ok {
return fmt.Sprintf("%v", v)
}
return ""
}
func floatOrZero(m map[string]interface{}, key string) float64 {
func floatOrZero(m map[string]any, key string) float64 {
if v, ok := m[key]; ok {
if f, ok := v.(float64); ok {
return f

View file

@ -78,7 +78,7 @@ func (c *InfluxClient) WriteLp(lines []string) error {
}
// QuerySQL runs a SQL query against InfluxDB and returns the result rows.
func (c *InfluxClient) QuerySQL(sql string) ([]map[string]interface{}, error) {
func (c *InfluxClient) QuerySQL(sql string) ([]map[string]any, error) {
reqBody := map[string]string{
"db": c.db,
"q": sql,
@ -114,7 +114,7 @@ func (c *InfluxClient) QuerySQL(sql string) ([]map[string]interface{}, error) {
return nil, fmt.Errorf("query failed %d: %s", resp.StatusCode, string(respBody))
}
var rows []map[string]interface{}
var rows []map[string]any
if err := json.Unmarshal(respBody, &rows); err != nil {
return nil, fmt.Errorf("unmarshal query response: %w", err)
}

View file

@ -24,14 +24,14 @@ type IngestConfig struct {
// contentScoreLine is the JSON structure for a content scores JSONL line.
type contentScoreLine struct {
Label string `json:"label"`
Aggregates map[string]interface{} `json:"aggregates"`
Probes map[string]contentScoreProbe `json:"probes"`
Label string `json:"label"`
Aggregates map[string]any `json:"aggregates"`
Probes map[string]contentScoreProbe `json:"probes"`
}
// contentScoreProbe is the per-probe block within a content score line.
type contentScoreProbe struct {
Scores map[string]interface{} `json:"scores"`
Scores map[string]any `json:"scores"`
}
// capabilityScoreLine is the JSON structure for a capability scores JSONL line.
@ -364,7 +364,7 @@ func extractIteration(label string) int {
// toFloat64 converts a JSON-decoded interface{} value to float64.
// Handles float64 (standard json.Unmarshal), json.Number, and string values.
func toFloat64(v interface{}) (float64, bool) {
func toFloat64(v any) (float64, bool) {
switch val := v.(type) {
case float64:
return val, true

View file

@ -133,7 +133,7 @@ func gatherDetails(db *DB, counts map[string]int) map[string]*tableDetail {
// toInt converts a DuckDB value to int. DuckDB returns integers as int64 (not
// float64 like InfluxDB), so we handle both types.
func toInt(v interface{}) int {
func toInt(v any) int {
switch n := v.(type) {
case int64:
return int(n)

View file

@ -85,7 +85,7 @@ func OllamaCreateModel(ollamaURL, modelName, baseModel, peftDir string) error {
return fmt.Errorf("upload adapter config: %w", err)
}
reqBody, _ := json.Marshal(map[string]interface{}{
reqBody, _ := json.Marshal(map[string]any{
"model": modelName,
"from": baseModel,
"adapters": map[string]string{

View file

@ -27,7 +27,7 @@ func NewEngine(judge *Judge, concurrency int, suiteList string) *Engine {
suites["standard"] = true
suites["exact"] = true
} else {
for _, s := range strings.Split(suiteList, ",") {
for s := range strings.SplitSeq(suiteList, ",") {
s = strings.TrimSpace(s)
if s != "" {
suites[s] = true

View file

@ -43,7 +43,7 @@ func TestScoreAll_ConcurrentSemantic_Good(t *testing.T) {
engine := NewEngine(judge, 4, "heuristic,semantic") // concurrency=4
var responses []Response
for i := 0; i < 20; i++ {
for i := range 20 {
responses = append(responses, Response{
ID: idForIndex(i),
Prompt: "test prompt",
@ -153,7 +153,7 @@ func TestScoreAll_SemaphoreBoundary_Good(t *testing.T) {
engine := NewEngine(judge, 1, "semantic") // concurrency=1
var responses []Response
for i := 0; i < 5; i++ {
for i := range 5 {
responses = append(responses, Response{
ID: idForIndex(i), Prompt: "p", Response: "r", Model: "m",
})
@ -209,7 +209,7 @@ func TestScoreAll_HeuristicOnlyNoRace_Good(t *testing.T) {
engine := NewEngine(nil, 4, "heuristic")
var responses []Response
for i := 0; i < 50; i++ {
for i := range 50 {
responses = append(responses, Response{
ID: idForIndex(i),
Prompt: "prompt",
@ -249,7 +249,7 @@ func TestScoreAll_MultiModelConcurrent_Good(t *testing.T) {
var responses []Response
models := []string{"alpha", "beta", "gamma", "delta"}
for _, model := range models {
for j := 0; j < 5; j++ {
for j := range 5 {
responses = append(responses, Response{
ID: model + "-" + idForIndex(j),
Prompt: "test",

View file

@ -109,7 +109,7 @@ func PrintStatus(influx *InfluxClient, w io.Writer) error {
// dedupeTraining merges training status and loss rows, keeping only the first
// (latest) row per model.
func dedupeTraining(statusRows, lossRows []map[string]interface{}) []trainingRow {
func dedupeTraining(statusRows, lossRows []map[string]any) []trainingRow {
lossMap := make(map[string]float64)
lossSeenMap := make(map[string]bool)
for _, row := range lossRows {
@ -154,7 +154,7 @@ func dedupeTraining(statusRows, lossRows []map[string]interface{}) []trainingRow
}
// dedupeGeneration deduplicates generation progress rows by worker.
func dedupeGeneration(rows []map[string]interface{}) []genRow {
func dedupeGeneration(rows []map[string]any) []genRow {
seen := make(map[string]bool)
var result []genRow
for _, row := range rows {
@ -180,7 +180,7 @@ func dedupeGeneration(rows []map[string]interface{}) []genRow {
}
// strVal extracts a string value from a row map.
func strVal(row map[string]interface{}, key string) string {
func strVal(row map[string]any, key string) string {
v, ok := row[key]
if !ok {
return ""
@ -193,7 +193,7 @@ func strVal(row map[string]interface{}, key string) string {
}
// floatVal extracts a float64 value from a row map.
func floatVal(row map[string]interface{}, key string) float64 {
func floatVal(row map[string]any, key string) float64 {
v, ok := row[key]
if !ok {
return 0
@ -207,6 +207,6 @@ func floatVal(row map[string]interface{}, key string) float64 {
// intVal extracts an integer value from a row map. InfluxDB JSON returns all
// numbers as float64, so this truncates to int.
func intVal(row map[string]interface{}, key string) int {
func intVal(row map[string]any, key string) int {
return int(floatVal(row, key))
}

View file

@ -84,7 +84,7 @@ func RunWorkerLoop(cfg *WorkerConfig) {
}
func workerRegister(cfg *WorkerConfig) error {
body := map[string]interface{}{
body := map[string]any{
"worker_id": cfg.WorkerID,
"name": cfg.Name,
"version": "0.1.0",
@ -109,7 +109,7 @@ func workerRegister(cfg *WorkerConfig) error {
}
func workerHeartbeat(cfg *WorkerConfig) {
body := map[string]interface{}{
body := map[string]any{
"worker_id": cfg.WorkerID,
}
apiPost(cfg, "/api/lem/workers/heartbeat", body)
@ -146,7 +146,7 @@ func workerPoll(cfg *WorkerConfig) int {
for _, task := range result.Tasks {
if err := workerProcessTask(cfg, task); err != nil {
log.Printf("Task %d failed: %v", task.ID, err)
apiDelete(cfg, fmt.Sprintf("/api/lem/tasks/%d/claim", task.ID), map[string]interface{}{
apiDelete(cfg, fmt.Sprintf("/api/lem/tasks/%d/claim", task.ID), map[string]any{
"worker_id": cfg.WorkerID,
})
continue
@ -161,14 +161,14 @@ func workerProcessTask(cfg *WorkerConfig, task APITask) error {
log.Printf("Processing task %d: %s [%s/%s] %d chars prompt",
task.ID, task.TaskType, task.Language, task.Domain, len(task.PromptText))
_, err := apiPost(cfg, fmt.Sprintf("/api/lem/tasks/%d/claim", task.ID), map[string]interface{}{
_, err := apiPost(cfg, fmt.Sprintf("/api/lem/tasks/%d/claim", task.ID), map[string]any{
"worker_id": cfg.WorkerID,
})
if err != nil {
return fmt.Errorf("claim: %w", err)
}
apiPatch(cfg, fmt.Sprintf("/api/lem/tasks/%d/status", task.ID), map[string]interface{}{
apiPatch(cfg, fmt.Sprintf("/api/lem/tasks/%d/status", task.ID), map[string]any{
"worker_id": cfg.WorkerID,
"status": "in_progress",
})
@ -183,7 +183,7 @@ func workerProcessTask(cfg *WorkerConfig, task APITask) error {
genTime := time.Since(start)
if err != nil {
apiPatch(cfg, fmt.Sprintf("/api/lem/tasks/%d/status", task.ID), map[string]interface{}{
apiPatch(cfg, fmt.Sprintf("/api/lem/tasks/%d/status", task.ID), map[string]any{
"worker_id": cfg.WorkerID,
"status": "abandoned",
})
@ -195,7 +195,7 @@ func workerProcessTask(cfg *WorkerConfig, task APITask) error {
modelUsed = "default"
}
_, err = apiPost(cfg, fmt.Sprintf("/api/lem/tasks/%d/result", task.ID), map[string]interface{}{
_, err = apiPost(cfg, fmt.Sprintf("/api/lem/tasks/%d/result", task.ID), map[string]any{
"worker_id": cfg.WorkerID,
"response_text": response,
"model_used": modelUsed,
@ -225,7 +225,7 @@ func workerInfer(cfg *WorkerConfig, task APITask) (string, error) {
}
}
reqBody := map[string]interface{}{
reqBody := map[string]any{
"model": task.ModelName,
"messages": messages,
"temperature": temp,
@ -310,19 +310,19 @@ func apiGet(cfg *WorkerConfig, path string) ([]byte, error) {
return body, nil
}
func apiPost(cfg *WorkerConfig, path string, data map[string]interface{}) ([]byte, error) {
func apiPost(cfg *WorkerConfig, path string, data map[string]any) ([]byte, error) {
return apiRequest(cfg, "POST", path, data)
}
func apiPatch(cfg *WorkerConfig, path string, data map[string]interface{}) ([]byte, error) {
func apiPatch(cfg *WorkerConfig, path string, data map[string]any) ([]byte, error) {
return apiRequest(cfg, "PATCH", path, data)
}
func apiDelete(cfg *WorkerConfig, path string, data map[string]interface{}) ([]byte, error) {
func apiDelete(cfg *WorkerConfig, path string, data map[string]any) ([]byte, error) {
return apiRequest(cfg, "DELETE", path, data)
}
func apiRequest(cfg *WorkerConfig, method, path string, data map[string]interface{}) ([]byte, error) {
func apiRequest(cfg *WorkerConfig, method, path string, data map[string]any) ([]byte, error) {
jsonData, err := json.Marshal(data)
if err != nil {
return nil, err
@ -386,7 +386,7 @@ func ReadKeyFile() string {
// SplitComma splits a comma-separated string into trimmed parts.
func SplitComma(s string) []string {
var result []string
for _, part := range bytes.Split([]byte(s), []byte(",")) {
for part := range bytes.SplitSeq([]byte(s), []byte(",")) {
trimmed := bytes.TrimSpace(part)
if len(trimmed) > 0 {
result = append(result, string(trimmed))