Compare commits
6 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
36c184e7dd | ||
|
|
0ab8627447 | ||
|
|
3680aaf871 | ||
|
|
d9a63f1981 | ||
|
|
a7772087ae | ||
|
|
af4e1d6ae2 |
21 changed files with 1122 additions and 419 deletions
|
|
@ -43,8 +43,12 @@ Coverage target: maintain ≥90.9%.
|
|||
|
||||
- UK English throughout (colour, licence, initialise)
|
||||
- Explicit types on all function signatures and struct fields
|
||||
- Exported declarations must have Go doc comments beginning with the identifier name
|
||||
- `go test ./...` and `go vet ./...` must pass before commit
|
||||
- SPDX header on all source files: `// SPDX-Licence-Identifier: EUPL-1.2`
|
||||
- Error handling: all errors must use `coreerr.E(op, msg, err)` from `dappco.re/go/core/log`, never `fmt.Errorf` or `errors.New`
|
||||
- Banned imports in non-test Go files: `errors`, `github.com/pkg/errors`, and legacy `forge.lthn.ai/...` paths
|
||||
- Conventional commits: `type(scope): description`
|
||||
- Co-Author trailer: `Co-Authored-By: Virgil <virgil@lethean.io>`
|
||||
|
||||
The conventions test suite enforces banned imports, exported usage comments, and test naming via `go test ./...`.
|
||||
|
|
|
|||
54
CODEX.md
Normal file
54
CODEX.md
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
# CODEX.md
|
||||
|
||||
This file provides guidance to Codex when working in this repository.
|
||||
|
||||
Claude Code JSONL transcript parser, analytics engine, and HTML/video renderer. Module: `dappco.re/go/core/session`
|
||||
|
||||
## Commands
|
||||
|
||||
```bash
|
||||
go test ./... # Run all tests
|
||||
go test -v -run TestFunctionName_Context # Run single test
|
||||
go test -race ./... # Race detector
|
||||
go test -bench=. -benchmem ./... # Benchmarks
|
||||
go vet ./... # Vet
|
||||
golangci-lint run ./... # Lint (optional, config in .golangci.yml)
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
Single-package library (`package session`) with five source files forming a pipeline:
|
||||
|
||||
1. **parser.go** — Core JSONL parser. Reads Claude Code session files line-by-line (8 MiB scanner buffer), correlates `tool_use`/`tool_result` pairs via a `pendingTools` map keyed by tool ID, and produces `Session` with `[]Event`. Also handles session listing, fetching, and pruning.
|
||||
2. **analytics.go** — Pure computation over `[]Event`. `Analyse()` returns `SessionAnalytics` (per-tool counts, error rates, latency stats, token estimates). No I/O.
|
||||
3. **html.go** — `RenderHTML()` generates a self-contained HTML file (inline CSS/JS, dark theme, collapsible panels, client-side search). All user content is `html.EscapeString`-escaped.
|
||||
4. **video.go** — `RenderMP4()` generates a VHS `.tape` script and shells out to `vhs`. Requires `vhs` on PATH.
|
||||
5. **search.go** — `Search()`/`SearchSeq()` does cross-session case-insensitive substring search over tool event inputs and outputs.
|
||||
|
||||
Both slice-returning and `iter.Seq` variants exist for `ListSessions`, `Search`, and `Session.EventsSeq`.
|
||||
|
||||
### Adding a new tool type
|
||||
|
||||
Touch all layers: add input struct in `parser.go` → case in `extractToolInput` → label in `html.go` `RenderHTML` → tape entry in `video.go` `generateTape` → tests in `parser_test.go`.
|
||||
|
||||
## Testing
|
||||
|
||||
Tests are white-box (`package session`). Test helpers in `parser_test.go` build synthetic JSONL in-memory — no fixture files. Use `writeJSONL(t, dir, name, lines...)` and the entry builders (`toolUseEntry`, `toolResultEntry`, `userTextEntry`, `assistantTextEntry`).
|
||||
|
||||
Naming convention: `TestFile_Function_Good/Bad/Ugly` (group by file, collapse the specific behaviour into the function segment, and suffix with happy path / expected errors / extreme edge cases).
|
||||
|
||||
Coverage target: maintain ≥90.9%.
|
||||
|
||||
## Coding Standards
|
||||
|
||||
- UK English throughout (colour, licence, initialise)
|
||||
- Explicit types on all function signatures and struct fields
|
||||
- Exported declarations must have Go doc comments beginning with the identifier name and include an `Example:` usage snippet
|
||||
- `go test ./...` and `go vet ./...` must pass before commit
|
||||
- SPDX header on all source files: `// SPDX-Licence-Identifier: EUPL-1.2`
|
||||
- Error handling: all package errors must use `core.E(op, msg, err)` from `dappco.re/go/core`; do not use `core.NewError`, `fmt.Errorf`, or `errors.New`
|
||||
- Banned imports in non-test Go files: `errors`, `github.com/pkg/errors`, and legacy `forge.lthn.ai/...` paths
|
||||
- Conventional commits: `type(scope): description`
|
||||
- Co-Author trailer: `Co-Authored-By: Virgil <virgil@lethean.io>`
|
||||
|
||||
The conventions test suite enforces banned imports, exported usage comments, and test naming via `go test ./...`.
|
||||
37
analytics.go
37
analytics.go
|
|
@ -2,14 +2,17 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// SessionAnalytics holds computed metrics for a parsed session.
|
||||
//
|
||||
// Example:
|
||||
// analytics := session.Analyse(sess)
|
||||
type SessionAnalytics struct {
|
||||
Duration time.Duration
|
||||
ActiveTime time.Duration
|
||||
|
|
@ -24,6 +27,9 @@ type SessionAnalytics struct {
|
|||
}
|
||||
|
||||
// Analyse iterates session events and computes analytics. Pure function, no I/O.
|
||||
//
|
||||
// Example:
|
||||
// analytics := session.Analyse(sess)
|
||||
func Analyse(sess *Session) *SessionAnalytics {
|
||||
a := &SessionAnalytics{
|
||||
ToolCounts: make(map[string]int),
|
||||
|
|
@ -97,32 +103,35 @@ func Analyse(sess *Session) *SessionAnalytics {
|
|||
}
|
||||
|
||||
// FormatAnalytics returns a tabular text summary suitable for CLI display.
|
||||
//
|
||||
// Example:
|
||||
// summary := session.FormatAnalytics(analytics)
|
||||
func FormatAnalytics(a *SessionAnalytics) string {
|
||||
var b strings.Builder
|
||||
b := core.NewBuilder()
|
||||
|
||||
b.WriteString("Session Analytics\n")
|
||||
b.WriteString(strings.Repeat("=", 50) + "\n\n")
|
||||
b.WriteString(repeatString("=", 50) + "\n\n")
|
||||
|
||||
b.WriteString(fmt.Sprintf(" Duration: %s\n", formatDuration(a.Duration)))
|
||||
b.WriteString(fmt.Sprintf(" Active Time: %s\n", formatDuration(a.ActiveTime)))
|
||||
b.WriteString(fmt.Sprintf(" Events: %d\n", a.EventCount))
|
||||
b.WriteString(fmt.Sprintf(" Success Rate: %.1f%%\n", a.SuccessRate*100))
|
||||
b.WriteString(fmt.Sprintf(" Est. Input Tk: %d\n", a.EstimatedInputTokens))
|
||||
b.WriteString(fmt.Sprintf(" Est. Output Tk: %d\n", a.EstimatedOutputTokens))
|
||||
b.WriteString(core.Sprintf(" Duration: %s\n", formatDuration(a.Duration)))
|
||||
b.WriteString(core.Sprintf(" Active Time: %s\n", formatDuration(a.ActiveTime)))
|
||||
b.WriteString(core.Sprintf(" Events: %d\n", a.EventCount))
|
||||
b.WriteString(core.Sprintf(" Success Rate: %.1f%%\n", a.SuccessRate*100))
|
||||
b.WriteString(core.Sprintf(" Est. Input Tk: %d\n", a.EstimatedInputTokens))
|
||||
b.WriteString(core.Sprintf(" Est. Output Tk: %d\n", a.EstimatedOutputTokens))
|
||||
|
||||
if len(a.ToolCounts) > 0 {
|
||||
b.WriteString("\n Tool Breakdown\n")
|
||||
b.WriteString(" " + strings.Repeat("-", 48) + "\n")
|
||||
b.WriteString(fmt.Sprintf(" %-14s %6s %6s %10s %10s\n",
|
||||
b.WriteString(" " + repeatString("-", 48) + "\n")
|
||||
b.WriteString(core.Sprintf(" %-14s %6s %6s %10s %10s\n",
|
||||
"Tool", "Calls", "Errors", "Avg", "Max"))
|
||||
b.WriteString(" " + strings.Repeat("-", 48) + "\n")
|
||||
b.WriteString(" " + repeatString("-", 48) + "\n")
|
||||
|
||||
// Sort tools for deterministic output
|
||||
for _, tool := range slices.Sorted(maps.Keys(a.ToolCounts)) {
|
||||
errors := a.ErrorCounts[tool]
|
||||
avg := a.AvgLatency[tool]
|
||||
max := a.MaxLatency[tool]
|
||||
b.WriteString(fmt.Sprintf(" %-14s %6d %6d %10s %10s\n",
|
||||
b.WriteString(core.Sprintf(" %-14s %6d %6d %10s %10s\n",
|
||||
tool, a.ToolCounts[tool], errors,
|
||||
formatDuration(avg), formatDuration(max)))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -10,7 +9,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAnalyse_EmptySession_Good(t *testing.T) {
|
||||
func TestAnalytics_AnalyseEmptySession_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "empty",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -31,13 +30,13 @@ func TestAnalyse_EmptySession_Good(t *testing.T) {
|
|||
assert.Equal(t, 0, a.EstimatedOutputTokens)
|
||||
}
|
||||
|
||||
func TestAnalyse_NilSession_Good(t *testing.T) {
|
||||
func TestAnalytics_AnalyseNilSession_Good(t *testing.T) {
|
||||
a := Analyse(nil)
|
||||
require.NotNil(t, a)
|
||||
assert.Equal(t, 0, a.EventCount)
|
||||
}
|
||||
|
||||
func TestAnalyse_SingleToolCall_Good(t *testing.T) {
|
||||
func TestAnalytics_AnalyseSingleToolCall_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "single",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -67,7 +66,7 @@ func TestAnalyse_SingleToolCall_Good(t *testing.T) {
|
|||
assert.Equal(t, 2*time.Second, a.MaxLatency["Bash"])
|
||||
}
|
||||
|
||||
func TestAnalyse_MixedToolsWithErrors_Good(t *testing.T) {
|
||||
func TestAnalytics_AnalyseMixedToolsWithErrors_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "mixed",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -148,7 +147,7 @@ func TestAnalyse_MixedToolsWithErrors_Good(t *testing.T) {
|
|||
assert.Equal(t, 2100*time.Millisecond, a.ActiveTime)
|
||||
}
|
||||
|
||||
func TestAnalyse_LatencyCalculations_Good(t *testing.T) {
|
||||
func TestAnalytics_AnalyseLatencyCalculations_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "latency",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -192,7 +191,7 @@ func TestAnalyse_LatencyCalculations_Good(t *testing.T) {
|
|||
assert.Equal(t, 200*time.Millisecond, a.MaxLatency["Read"])
|
||||
}
|
||||
|
||||
func TestAnalyse_TokenEstimation_Good(t *testing.T) {
|
||||
func TestAnalytics_AnalyseTokenEstimation_Good(t *testing.T) {
|
||||
// 4 chars = ~1 token
|
||||
sess := &Session{
|
||||
ID: "tokens",
|
||||
|
|
@ -201,19 +200,19 @@ func TestAnalyse_TokenEstimation_Good(t *testing.T) {
|
|||
Events: []Event{
|
||||
{
|
||||
Type: "user",
|
||||
Input: strings.Repeat("a", 400), // 100 tokens
|
||||
Input: repeatString("a", 400), // 100 tokens
|
||||
},
|
||||
{
|
||||
Type: "tool_use",
|
||||
Tool: "Bash",
|
||||
Input: strings.Repeat("b", 80), // 20 tokens
|
||||
Output: strings.Repeat("c", 200), // 50 tokens
|
||||
Input: repeatString("b", 80), // 20 tokens
|
||||
Output: repeatString("c", 200), // 50 tokens
|
||||
Duration: time.Second,
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Type: "assistant",
|
||||
Input: strings.Repeat("d", 120), // 30 tokens
|
||||
Input: repeatString("d", 120), // 30 tokens
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -226,7 +225,7 @@ func TestAnalyse_TokenEstimation_Good(t *testing.T) {
|
|||
assert.Equal(t, 50, a.EstimatedOutputTokens)
|
||||
}
|
||||
|
||||
func TestFormatAnalytics_Output_Good(t *testing.T) {
|
||||
func TestAnalytics_FormatAnalyticsOutput_Good(t *testing.T) {
|
||||
a := &SessionAnalytics{
|
||||
Duration: 5 * time.Minute,
|
||||
ActiveTime: 2 * time.Minute,
|
||||
|
|
@ -269,7 +268,7 @@ func TestFormatAnalytics_Output_Good(t *testing.T) {
|
|||
assert.Contains(t, output, "Tool Breakdown")
|
||||
}
|
||||
|
||||
func TestFormatAnalytics_EmptyAnalytics_Good(t *testing.T) {
|
||||
func TestAnalytics_FormatAnalyticsEmptyAnalytics_Good(t *testing.T) {
|
||||
a := &SessionAnalytics{
|
||||
ToolCounts: make(map[string]int),
|
||||
ErrorCounts: make(map[string]int),
|
||||
|
|
|
|||
|
|
@ -2,11 +2,11 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"io/fs"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// BenchmarkParseTranscript benchmarks parsing a ~1MB+ JSONL file.
|
||||
|
|
@ -92,44 +92,44 @@ func BenchmarkSearch(b *testing.B) {
|
|||
func generateBenchJSONL(b testing.TB, dir string, numTools int) string {
|
||||
b.Helper()
|
||||
|
||||
var sb strings.Builder
|
||||
sb := core.NewBuilder()
|
||||
baseTS := "2026-02-20T10:00:00Z"
|
||||
|
||||
// Opening user message
|
||||
sb.WriteString(fmt.Sprintf(`{"type":"user","timestamp":"%s","sessionId":"bench","message":{"role":"user","content":[{"type":"text","text":"Start benchmark session"}]}}`, baseTS))
|
||||
sb.WriteString(core.Sprintf(`{"type":"user","timestamp":"%s","sessionId":"bench","message":{"role":"user","content":[{"type":"text","text":"Start benchmark session"}]}}`, baseTS))
|
||||
sb.WriteByte('\n')
|
||||
|
||||
for i := range numTools {
|
||||
toolID := fmt.Sprintf("tool-%d", i)
|
||||
toolID := core.Sprintf("tool-%d", i)
|
||||
offset := i * 2
|
||||
|
||||
// Alternate between different tool types for realistic distribution
|
||||
var toolUse, toolResult string
|
||||
switch i % 5 {
|
||||
case 0: // Bash
|
||||
toolUse = fmt.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Bash","id":"%s","input":{"command":"echo iteration %d","description":"echo test"}}]}}`,
|
||||
toolUse = core.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Bash","id":"%s","input":{"command":"echo iteration %d","description":"echo test"}}]}}`,
|
||||
offset/60, offset%60, toolID, i)
|
||||
toolResult = fmt.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"iteration %d output line one\niteration %d output line two","is_error":false}]}}`,
|
||||
toolResult = core.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"iteration %d output line one\niteration %d output line two","is_error":false}]}}`,
|
||||
(offset+1)/60, (offset+1)%60, toolID, i, i)
|
||||
case 1: // Read
|
||||
toolUse = fmt.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Read","id":"%s","input":{"file_path":"/tmp/bench/file-%d.go"}}]}}`,
|
||||
toolUse = core.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Read","id":"%s","input":{"file_path":"/tmp/bench/file-%d.go"}}]}}`,
|
||||
offset/60, offset%60, toolID, i)
|
||||
toolResult = fmt.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"package main\n\nfunc main() {\n\tfmt.Println(%d)\n}","is_error":false}]}}`,
|
||||
toolResult = core.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"package main\n\nfunc main() {\n\tfmt.Println(%d)\n}","is_error":false}]}}`,
|
||||
(offset+1)/60, (offset+1)%60, toolID, i)
|
||||
case 2: // Edit
|
||||
toolUse = fmt.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Edit","id":"%s","input":{"file_path":"/tmp/bench/file-%d.go","old_string":"old","new_string":"new"}}]}}`,
|
||||
toolUse = core.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Edit","id":"%s","input":{"file_path":"/tmp/bench/file-%d.go","old_string":"old","new_string":"new"}}]}}`,
|
||||
offset/60, offset%60, toolID, i)
|
||||
toolResult = fmt.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"ok","is_error":false}]}}`,
|
||||
toolResult = core.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"ok","is_error":false}]}}`,
|
||||
(offset+1)/60, (offset+1)%60, toolID)
|
||||
case 3: // Grep
|
||||
toolUse = fmt.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Grep","id":"%s","input":{"pattern":"TODO","path":"/tmp/bench"}}]}}`,
|
||||
toolUse = core.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Grep","id":"%s","input":{"pattern":"TODO","path":"/tmp/bench"}}]}}`,
|
||||
offset/60, offset%60, toolID)
|
||||
toolResult = fmt.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"/tmp/bench/file.go:10: // TODO fix this","is_error":false}]}}`,
|
||||
toolResult = core.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"/tmp/bench/file.go:10: // TODO fix this","is_error":false}]}}`,
|
||||
(offset+1)/60, (offset+1)%60, toolID)
|
||||
case 4: // Glob
|
||||
toolUse = fmt.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Glob","id":"%s","input":{"pattern":"**/*.go"}}]}}`,
|
||||
toolUse = core.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"assistant","content":[{"type":"tool_use","name":"Glob","id":"%s","input":{"pattern":"**/*.go"}}]}}`,
|
||||
offset/60, offset%60, toolID)
|
||||
toolResult = fmt.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"/tmp/a.go\n/tmp/b.go\n/tmp/c.go","is_error":false}]}}`,
|
||||
toolResult = core.Sprintf(`{"type":"user","timestamp":"2026-02-20T10:%02d:%02dZ","sessionId":"bench","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"%s","content":"/tmp/a.go\n/tmp/b.go\n/tmp/c.go","is_error":false}]}}`,
|
||||
(offset+1)/60, (offset+1)%60, toolID)
|
||||
}
|
||||
|
||||
|
|
@ -140,16 +140,24 @@ func generateBenchJSONL(b testing.TB, dir string, numTools int) string {
|
|||
}
|
||||
|
||||
// Closing assistant message
|
||||
sb.WriteString(fmt.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T12:00:00Z","sessionId":"bench","message":{"role":"assistant","content":[{"type":"text","text":"Benchmark session complete."}]}}%s`, "\n"))
|
||||
sb.WriteString(core.Sprintf(`{"type":"assistant","timestamp":"2026-02-20T12:00:00Z","sessionId":"bench","message":{"role":"assistant","content":[{"type":"text","text":"Benchmark session complete."}]}}%s`, "\n"))
|
||||
|
||||
name := fmt.Sprintf("bench-%d.jsonl", numTools)
|
||||
path := filepath.Join(dir, name)
|
||||
if err := os.WriteFile(path, []byte(sb.String()), 0644); err != nil {
|
||||
b.Fatal(err)
|
||||
name := core.Sprintf("bench-%d.jsonl", numTools)
|
||||
filePath := path.Join(dir, name)
|
||||
writeResult := hostFS.Write(filePath, sb.String())
|
||||
if !writeResult.OK {
|
||||
b.Fatal(resultError(writeResult))
|
||||
}
|
||||
|
||||
info, _ := os.Stat(path)
|
||||
statResult := hostFS.Stat(filePath)
|
||||
if !statResult.OK {
|
||||
b.Fatal(resultError(statResult))
|
||||
}
|
||||
info, ok := statResult.Value.(fs.FileInfo)
|
||||
if !ok {
|
||||
b.Fatal("expected fs.FileInfo from Stat")
|
||||
}
|
||||
b.Logf("Generated %s: %d bytes, %d tool pairs", name, info.Size(), numTools)
|
||||
|
||||
return path
|
||||
return filePath
|
||||
}
|
||||
|
|
|
|||
400
conventions_test.go
Normal file
400
conventions_test.go
Normal file
|
|
@ -0,0 +1,400 @@
|
|||
// SPDX-Licence-Identifier: EUPL-1.2
|
||||
package session
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"path"
|
||||
"regexp"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
var testNamePattern = regexp.MustCompile(`^Test[A-Za-z0-9]+_[A-Za-z0-9]+_(Good|Bad|Ugly)$`)
|
||||
|
||||
func TestConventions_BannedImports_Good(t *testing.T) {
|
||||
files := parseGoFiles(t, ".")
|
||||
|
||||
banned := map[string]string{
|
||||
core.Concat("encoding", "/json"): "use dappco.re/go/core JSON helpers instead",
|
||||
core.Concat("error", "s"): "use core.E/op-aware errors instead",
|
||||
core.Concat("f", "mt"): "use dappco.re/go/core formatting helpers instead",
|
||||
"github.com/pkg/errors": "use coreerr.E(op, msg, err) for package errors",
|
||||
core.Concat("o", "s"): "use dappco.re/go/core filesystem helpers instead",
|
||||
core.Concat("o", "s/exec"): "use session command helpers or core process abstractions instead",
|
||||
core.Concat("path", "/filepath"): "use path or dappco.re/go/core path helpers instead",
|
||||
core.Concat("string", "s"): "use dappco.re/go/core string helpers or local helpers instead",
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
for _, spec := range file.ast.Imports {
|
||||
importPath := trimQuotes(spec.Path.Value)
|
||||
if core.HasPrefix(importPath, "forge.lthn.ai/") {
|
||||
t.Errorf("%s imports %q; use dappco.re/go/core/... paths instead", file.path, importPath)
|
||||
continue
|
||||
}
|
||||
if reason, ok := banned[importPath]; ok {
|
||||
t.Errorf("%s imports %q; %s", file.path, importPath, reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConventions_ErrorHandling_Good(t *testing.T) {
|
||||
files := parseGoFiles(t, ".")
|
||||
|
||||
for _, file := range files {
|
||||
if core.HasSuffix(file.path, "_test.go") {
|
||||
continue
|
||||
}
|
||||
|
||||
ast.Inspect(file.ast, func(node ast.Node) bool {
|
||||
call, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
sel, ok := call.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
pkg, ok := sel.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
switch {
|
||||
case pkg.Name == "core" && sel.Sel.Name == "NewError":
|
||||
t.Errorf("%s uses core.NewError; use core.E(op, msg, err)", file.path)
|
||||
case pkg.Name == "fmt" && sel.Sel.Name == "Errorf":
|
||||
t.Errorf("%s uses fmt.Errorf; use core.E(op, msg, err)", file.path)
|
||||
case pkg.Name == "errors" && sel.Sel.Name == "New":
|
||||
t.Errorf("%s uses errors.New; use core.E(op, msg, err)", file.path)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConventions_TestNaming_Good(t *testing.T) {
|
||||
files := parseGoFiles(t, ".")
|
||||
|
||||
for _, file := range files {
|
||||
if !core.HasSuffix(file.path, "_test.go") {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, decl := range file.ast.Decls {
|
||||
fn, ok := decl.(*ast.FuncDecl)
|
||||
if !ok || fn.Recv != nil {
|
||||
continue
|
||||
}
|
||||
if !core.HasPrefix(fn.Name.Name, "Test") || fn.Name.Name == "TestMain" {
|
||||
continue
|
||||
}
|
||||
if !isTestingTFunc(file, fn) {
|
||||
continue
|
||||
}
|
||||
expectedPrefix := core.Concat("Test", testFileToken(file.path), "_")
|
||||
if !core.HasPrefix(fn.Name.Name, expectedPrefix) {
|
||||
t.Errorf("%s contains %s; expected prefix %s", file.path, fn.Name.Name, expectedPrefix)
|
||||
continue
|
||||
}
|
||||
if !testNamePattern.MatchString(fn.Name.Name) {
|
||||
t.Errorf("%s contains %s; expected TestFile_Function_Good/Bad/Ugly", file.path, fn.Name.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConventions_UsageComments_Good(t *testing.T) {
|
||||
files := parseGoFiles(t, ".")
|
||||
|
||||
for _, file := range files {
|
||||
if core.HasSuffix(file.path, "_test.go") {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, decl := range file.ast.Decls {
|
||||
switch d := decl.(type) {
|
||||
case *ast.FuncDecl:
|
||||
if d.Recv != nil || !d.Name.IsExported() {
|
||||
continue
|
||||
}
|
||||
text := commentText(d.Doc)
|
||||
if !hasDocPrefix(text, d.Name.Name) || !hasUsageExample(text) {
|
||||
t.Errorf("%s: exported function %s needs a usage comment starting with %s and containing Example:", file.path, d.Name.Name, d.Name.Name)
|
||||
}
|
||||
case *ast.GenDecl:
|
||||
for i, spec := range d.Specs {
|
||||
switch s := spec.(type) {
|
||||
case *ast.TypeSpec:
|
||||
if !s.Name.IsExported() {
|
||||
continue
|
||||
}
|
||||
text := commentText(typeDocGroup(d, s, i))
|
||||
if !hasDocPrefix(text, s.Name.Name) || !hasUsageExample(text) {
|
||||
t.Errorf("%s: exported type %s needs a usage comment starting with %s and containing Example:", file.path, s.Name.Name, s.Name.Name)
|
||||
}
|
||||
case *ast.ValueSpec:
|
||||
doc := valueDocGroup(d, s, i)
|
||||
for _, name := range s.Names {
|
||||
if !name.IsExported() {
|
||||
continue
|
||||
}
|
||||
text := commentText(doc)
|
||||
if !hasDocPrefix(text, name.Name) || !hasUsageExample(text) {
|
||||
t.Errorf("%s: exported declaration %s needs a usage comment starting with %s and containing Example:", file.path, name.Name, name.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type parsedFile struct {
|
||||
path string
|
||||
ast *ast.File
|
||||
testingImportNames map[string]struct{}
|
||||
hasTestingDotImport bool
|
||||
}
|
||||
|
||||
func parseGoFiles(t *testing.T, dir string) []parsedFile {
|
||||
t.Helper()
|
||||
|
||||
paths := core.PathGlob(path.Join(dir, "*.go"))
|
||||
if len(paths) == 0 {
|
||||
t.Fatalf("no Go files found in %s", dir)
|
||||
}
|
||||
|
||||
slices.Sort(paths)
|
||||
|
||||
fset := token.NewFileSet()
|
||||
files := make([]parsedFile, 0, len(paths))
|
||||
for _, filePath := range paths {
|
||||
fileAST, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
t.Fatalf("parse %s: %v", filePath, err)
|
||||
}
|
||||
|
||||
testingImportNames, hasTestingDotImport := testingImports(fileAST)
|
||||
files = append(files, parsedFile{
|
||||
path: path.Base(filePath),
|
||||
ast: fileAST,
|
||||
testingImportNames: testingImportNames,
|
||||
hasTestingDotImport: hasTestingDotImport,
|
||||
})
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func TestConventions_ParseGoFilesMultiplePackages_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
writeTestFile(t, path.Join(dir, "session.go"), "package session\n")
|
||||
writeTestFile(t, path.Join(dir, "session_external_test.go"), "package session_test\n")
|
||||
writeTestFile(t, path.Join(dir, "README.md"), "# ignored\n")
|
||||
|
||||
files := parseGoFiles(t, dir)
|
||||
if len(files) != 2 {
|
||||
t.Fatalf("expected 2 Go files, got %d", len(files))
|
||||
}
|
||||
|
||||
names := []string{files[0].path, files[1].path}
|
||||
slices.Sort(names)
|
||||
if names[0] != "session.go" || names[1] != "session_external_test.go" {
|
||||
t.Fatalf("unexpected files: %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConventions_IsTestingTFuncAliasedImport_Good(t *testing.T) {
|
||||
fileAST, fn := parseTestFunc(t, `
|
||||
package session_test
|
||||
|
||||
import t "testing"
|
||||
|
||||
func TestConventions_AliasedImportContext_Good(testcase *t.T) {}
|
||||
`, "TestConventions_AliasedImportContext_Good")
|
||||
|
||||
names, hasDotImport := testingImports(fileAST)
|
||||
file := parsedFile{
|
||||
ast: fileAST,
|
||||
testingImportNames: names,
|
||||
hasTestingDotImport: hasDotImport,
|
||||
}
|
||||
|
||||
if !isTestingTFunc(file, fn) {
|
||||
t.Fatal("expected aliased *testing.T signature to be recognised")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConventions_IsTestingTFuncDotImport_Good(t *testing.T) {
|
||||
fileAST, fn := parseTestFunc(t, `
|
||||
package session_test
|
||||
|
||||
import . "testing"
|
||||
|
||||
func TestConventions_DotImportContext_Good(testcase *T) {}
|
||||
`, "TestConventions_DotImportContext_Good")
|
||||
|
||||
names, hasDotImport := testingImports(fileAST)
|
||||
file := parsedFile{
|
||||
ast: fileAST,
|
||||
testingImportNames: names,
|
||||
hasTestingDotImport: hasDotImport,
|
||||
}
|
||||
|
||||
if !isTestingTFunc(file, fn) {
|
||||
t.Fatal("expected dot-imported *testing.T signature to be recognised")
|
||||
}
|
||||
}
|
||||
|
||||
func testingImports(file *ast.File) (map[string]struct{}, bool) {
|
||||
names := make(map[string]struct{})
|
||||
hasDotImport := false
|
||||
|
||||
for _, spec := range file.Imports {
|
||||
importPath := trimQuotes(spec.Path.Value)
|
||||
if importPath != "testing" {
|
||||
continue
|
||||
}
|
||||
if spec.Name == nil {
|
||||
names["testing"] = struct{}{}
|
||||
continue
|
||||
}
|
||||
switch spec.Name.Name {
|
||||
case ".":
|
||||
hasDotImport = true
|
||||
case "_":
|
||||
continue
|
||||
default:
|
||||
names[spec.Name.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return names, hasDotImport
|
||||
}
|
||||
|
||||
func isTestingTFunc(file parsedFile, fn *ast.FuncDecl) bool {
|
||||
if fn.Type == nil || fn.Type.Params == nil || len(fn.Type.Params.List) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
param := fn.Type.Params.List[0]
|
||||
star, ok := param.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
switch expr := star.X.(type) {
|
||||
case *ast.Ident:
|
||||
return file.hasTestingDotImport && expr.Name == "T"
|
||||
case *ast.SelectorExpr:
|
||||
pkg, ok := expr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if expr.Sel.Name != "T" {
|
||||
return false
|
||||
}
|
||||
_, ok = file.testingImportNames[pkg.Name]
|
||||
return ok
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func typeDocGroup(decl *ast.GenDecl, spec *ast.TypeSpec, index int) *ast.CommentGroup {
|
||||
if spec.Doc != nil {
|
||||
return spec.Doc
|
||||
}
|
||||
if len(decl.Specs) == 1 && index == 0 {
|
||||
return decl.Doc
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func valueDocGroup(decl *ast.GenDecl, spec *ast.ValueSpec, index int) *ast.CommentGroup {
|
||||
if spec.Doc != nil {
|
||||
return spec.Doc
|
||||
}
|
||||
if len(decl.Specs) == 1 && index == 0 {
|
||||
return decl.Doc
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func commentText(group *ast.CommentGroup) string {
|
||||
if group == nil {
|
||||
return ""
|
||||
}
|
||||
return core.Trim(group.Text())
|
||||
}
|
||||
|
||||
func hasDocPrefix(text, name string) bool {
|
||||
if text == "" || !core.HasPrefix(text, name) {
|
||||
return false
|
||||
}
|
||||
if len(text) == len(name) {
|
||||
return true
|
||||
}
|
||||
|
||||
next := text[len(name)]
|
||||
return (next < 'A' || next > 'Z') && (next < 'a' || next > 'z') && (next < '0' || next > '9') && next != '_'
|
||||
}
|
||||
|
||||
func hasUsageExample(text string) bool {
|
||||
if text == "" {
|
||||
return false
|
||||
}
|
||||
return core.HasPrefix(text, "Example:") || core.Contains(text, "\nExample:")
|
||||
}
|
||||
|
||||
func testFileToken(filePath string) string {
|
||||
stem := core.TrimSuffix(path.Base(filePath), "_test.go")
|
||||
switch stem {
|
||||
case "html":
|
||||
return "HTML"
|
||||
default:
|
||||
if stem == "" {
|
||||
return ""
|
||||
}
|
||||
return core.Concat(core.Upper(stem[:1]), stem[1:])
|
||||
}
|
||||
}
|
||||
|
||||
func writeTestFile(t *testing.T, path, content string) {
|
||||
t.Helper()
|
||||
|
||||
writeResult := hostFS.Write(path, content)
|
||||
if !writeResult.OK {
|
||||
t.Fatalf("write %s: %v", path, resultError(writeResult))
|
||||
}
|
||||
}
|
||||
|
||||
func parseTestFunc(t *testing.T, src, name string) (*ast.File, *ast.FuncDecl) {
|
||||
t.Helper()
|
||||
|
||||
fset := token.NewFileSet()
|
||||
fileAST, err := parser.ParseFile(fset, "test.go", src, parser.ParseComments)
|
||||
if err != nil {
|
||||
t.Fatalf("parse test source: %v", err)
|
||||
}
|
||||
|
||||
for _, decl := range fileAST.Decls {
|
||||
fn, ok := decl.(*ast.FuncDecl)
|
||||
if ok && fn.Name.Name == name {
|
||||
return fileAST, fn
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf("function %s not found", name)
|
||||
return nil, nil
|
||||
}
|
||||
67
core_helpers.go
Normal file
67
core_helpers.go
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
// SPDX-Licence-Identifier: EUPL-1.2
|
||||
package session
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
var hostFS = (&core.Fs{}).NewUnrestricted()
|
||||
|
||||
type rawJSON []byte
|
||||
|
||||
func (m *rawJSON) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return core.E("rawJSON.UnmarshalJSON", "nil receiver", nil)
|
||||
}
|
||||
*m = append((*m)[:0], data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m rawJSON) MarshalJSON() ([]byte, error) {
|
||||
if m == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func resultError(result core.Result) error {
|
||||
if result.OK {
|
||||
return nil
|
||||
}
|
||||
if err, ok := result.Value.(error); ok && err != nil {
|
||||
return err
|
||||
}
|
||||
return core.E("resultError", "unexpected core result failure", nil)
|
||||
}
|
||||
|
||||
func repeatString(s string, count int) string {
|
||||
if s == "" || count <= 0 {
|
||||
return ""
|
||||
}
|
||||
return string(bytes.Repeat([]byte(s), count))
|
||||
}
|
||||
|
||||
func containsAny(s, chars string) bool {
|
||||
for _, ch := range chars {
|
||||
if bytes.IndexRune([]byte(s), ch) >= 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func indexOf(s, substr string) int {
|
||||
return bytes.Index([]byte(s), []byte(substr))
|
||||
}
|
||||
|
||||
func trimQuotes(s string) string {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
if (s[0] == '"' && s[len(s)-1] == '"') || (s[0] == '`' && s[len(s)-1] == '`') {
|
||||
return s[1 : len(s)-1]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
|
@ -239,10 +239,11 @@ Success or failure of a `tool_use` event is indicated by a Unicode check mark (U
|
|||
|
||||
Each event is rendered as a `<div class="event">` containing:
|
||||
|
||||
- `.event-header`: always visible; shows timestamp, tool label, truncated input (120 chars), duration, and status icon.
|
||||
- `.event-header`: always visible; shows timestamp, tool label, truncated input (120 chars), duration, status icon, and a permalink anchor.
|
||||
- `.event-body`: hidden by default; shown on click via the `toggle(i)` JavaScript function which toggles the `open` class.
|
||||
|
||||
The arrow indicator rotates 90 degrees (CSS `transform: rotate(90deg)`) when the panel is open. Output text in `.event-body` is capped at 400px height with `overflow-y: auto`.
|
||||
If the page loads with an `#evt-N` fragment, that event is opened automatically and scrolled into view.
|
||||
|
||||
Input label semantics vary per tool:
|
||||
|
||||
|
|
|
|||
|
|
@ -138,6 +138,17 @@ Both `go vet ./...` and `golangci-lint run ./...` must be clean before committin
|
|||
- Use explicit types on struct fields and function signatures.
|
||||
- Avoid `interface{}` in public APIs; use typed parameters where possible.
|
||||
- Handle all errors explicitly; do not use blank `_` for error returns in non-test code.
|
||||
- Exported declarations must have Go doc comments beginning with the identifier name.
|
||||
|
||||
### Imports and Error Handling
|
||||
|
||||
- Do not import `errors` or `github.com/pkg/errors` in non-test Go files; use `coreerr.E(op, msg, err)` from `dappco.re/go/core/log`.
|
||||
- Do not reintroduce legacy `forge.lthn.ai/...` module paths; use `dappco.re/go/core/...` imports.
|
||||
|
||||
### Test Naming
|
||||
|
||||
Test functions should follow `TestFunctionName_Context_Good/Bad/Ugly`.
|
||||
The conventions test suite checks test naming, banned imports, and exported usage comments during `go test ./...`.
|
||||
|
||||
### File Headers
|
||||
|
||||
|
|
|
|||
|
|
@ -76,5 +76,5 @@ The following have been identified as potential improvements but are not current
|
|||
- **Parallel search**: fan out `ParseTranscript` calls across goroutines with a result channel to reduce wall time for large directories.
|
||||
- **Persistent index**: a lightweight SQLite index or binary cache per session file to avoid re-parsing on every `Search` or `ListSessions` call.
|
||||
- **Additional tool types**: the parser's `extractToolInput` fallback handles any unknown tool by listing its JSON keys. Dedicated handling could be added for `WebFetch`, `WebSearch`, `NotebookEdit`, and other tools that appear in Claude Code sessions.
|
||||
- **HTML export options**: configurable truncation limits, optional full-output display, and per-event direct links (anchor IDs already exist as `evt-{i}`).
|
||||
- **HTML export options**: configurable truncation limits and optional full-output display remain open; per-event direct links are now available via `#evt-{i}` permalinks.
|
||||
- **VHS alternative**: a pure-Go terminal animation renderer to eliminate the `vhs` dependency for MP4 output.
|
||||
|
|
|
|||
2
go.mod
2
go.mod
|
|
@ -3,7 +3,7 @@ module dappco.re/go/core/session
|
|||
go 1.26.0
|
||||
|
||||
require (
|
||||
dappco.re/go/core/log v0.1.0
|
||||
dappco.re/go/core v0.8.0-alpha.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
)
|
||||
|
||||
|
|
|
|||
4
go.sum
4
go.sum
|
|
@ -1,5 +1,5 @@
|
|||
dappco.re/go/core/log v0.1.0 h1:pa71Vq2TD2aoEUQWFKwNcaJ3GBY8HbaNGqtE688Unyc=
|
||||
dappco.re/go/core/log v0.1.0/go.mod h1:Nkqb8gsXhZAO8VLpx7B8i1iAmohhzqA20b9Zr8VUcJs=
|
||||
dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk=
|
||||
dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
|
|||
75
html.go
75
html.go
|
|
@ -2,22 +2,21 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html"
|
||||
"os"
|
||||
"strings"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// RenderHTML generates a self-contained HTML timeline from a session.
|
||||
//
|
||||
// Example:
|
||||
// err := session.RenderHTML(sess, "/tmp/session.html")
|
||||
func RenderHTML(sess *Session, outputPath string) error {
|
||||
f, err := os.Create(outputPath)
|
||||
if err != nil {
|
||||
return coreerr.E("RenderHTML", "create html", err)
|
||||
if !hostFS.IsDir(path.Dir(outputPath)) {
|
||||
return core.E("RenderHTML", "parent directory does not exist", nil)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
duration := sess.EndTime.Sub(sess.StartTime)
|
||||
toolCount := 0
|
||||
|
|
@ -31,7 +30,8 @@ func RenderHTML(sess *Session, outputPath string) error {
|
|||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, `<!DOCTYPE html>
|
||||
b := core.NewBuilder()
|
||||
b.WriteString(core.Sprintf(`<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
|
|
@ -71,6 +71,8 @@ body { background: var(--bg); color: var(--fg); font-family: var(--font); font-s
|
|||
.event-header .input { flex: 1; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }
|
||||
.event-header .dur { color: var(--dim); font-size: 11px; min-width: 50px; text-align: right; }
|
||||
.event-header .status { font-size: 14px; min-width: 20px; text-align: center; }
|
||||
.event-header .permalink { color: var(--dim); font-size: 12px; min-width: 16px; text-align: center; text-decoration: none; }
|
||||
.event-header .permalink:hover { color: var(--accent); }
|
||||
.event-header .arrow { color: var(--dim); font-size: 10px; transition: transform 0.15s; min-width: 16px; }
|
||||
.event.open .arrow { transform: rotate(90deg); }
|
||||
.event-body { display: none; padding: 12px; background: var(--bg); border-top: 1px solid var(--border); }
|
||||
|
|
@ -93,14 +95,14 @@ body { background: var(--bg); color: var(--fg); font-family: var(--font); font-s
|
|||
shortID(sess.ID), shortID(sess.ID),
|
||||
sess.StartTime.Format("2006-01-02 15:04:05"),
|
||||
formatDuration(duration),
|
||||
toolCount)
|
||||
toolCount))
|
||||
|
||||
if errorCount > 0 {
|
||||
fmt.Fprintf(f, `
|
||||
<span class="err">%d errors</span>`, errorCount)
|
||||
b.WriteString(core.Sprintf(`
|
||||
<span class="err">%d errors</span>`, errorCount))
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, `
|
||||
b.WriteString(`
|
||||
</div>
|
||||
</div>
|
||||
<div class="search">
|
||||
|
|
@ -108,7 +110,7 @@ body { background: var(--bg); color: var(--fg); font-family: var(--font); font-s
|
|||
<select id="filter" onchange="filterEvents()">
|
||||
<option value="all">All events</option>
|
||||
<option value="tool_use">Tool calls only</option>
|
||||
<option value="errors">Errors only</option>
|
||||
<option value='errors'>Errors only</option>
|
||||
<option value="Bash">Bash only</option>
|
||||
<option value="user">User messages</option>
|
||||
</select>
|
||||
|
|
@ -119,7 +121,7 @@ body { background: var(--bg); color: var(--fg); font-family: var(--font); font-s
|
|||
|
||||
var i int
|
||||
for evt := range sess.EventsSeq() {
|
||||
toolClass := strings.ToLower(evt.Tool)
|
||||
toolClass := core.Lower(evt.Tool)
|
||||
if evt.Type == "user" {
|
||||
toolClass = "user"
|
||||
} else if evt.Type == "assistant" {
|
||||
|
|
@ -152,7 +154,7 @@ body { background: var(--bg); color: var(--fg); font-family: var(--font); font-s
|
|||
durStr = formatDuration(evt.Duration)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, `<div class="event%s" data-type="%s" data-tool="%s" data-text="%s" id="evt-%d">
|
||||
b.WriteString(core.Sprintf(`<div class="event%s" data-type="%s" data-tool="%s" data-text="%s" id="evt-%d">
|
||||
<div class="event-header" onclick="toggle(%d)">
|
||||
<span class="arrow">▶</span>
|
||||
<span class="time">%s</span>
|
||||
|
|
@ -160,13 +162,14 @@ body { background: var(--bg); color: var(--fg); font-family: var(--font); font-s
|
|||
<span class="input">%s</span>
|
||||
<span class="dur">%s</span>
|
||||
<span class="status">%s</span>
|
||||
<a class="permalink" href="#evt-%d" aria-label="Direct link to this event" onclick="event.stopPropagation()">#</a>
|
||||
</div>
|
||||
<div class="event-body">
|
||||
`,
|
||||
errorClass,
|
||||
evt.Type,
|
||||
evt.Tool,
|
||||
html.EscapeString(strings.ToLower(evt.Input+" "+evt.Output)),
|
||||
html.EscapeString(core.Lower(core.Concat(evt.Input, " ", evt.Output))),
|
||||
i,
|
||||
i,
|
||||
evt.Timestamp.Format("15:04:05"),
|
||||
|
|
@ -174,7 +177,8 @@ body { background: var(--bg); color: var(--fg); font-family: var(--font); font-s
|
|||
html.EscapeString(toolLabel),
|
||||
html.EscapeString(truncate(evt.Input, 120)),
|
||||
durStr,
|
||||
statusIcon)
|
||||
statusIcon,
|
||||
i))
|
||||
|
||||
if evt.Input != "" {
|
||||
label := "Command"
|
||||
|
|
@ -187,8 +191,8 @@ body { background: var(--bg); color: var(--fg); font-family: var(--font); font-s
|
|||
} else if evt.Tool == "Edit" || evt.Tool == "Write" {
|
||||
label = "File"
|
||||
}
|
||||
fmt.Fprintf(f, ` <div class="section"><div class="label">%s</div><pre>%s</pre></div>
|
||||
`, label, html.EscapeString(evt.Input))
|
||||
b.WriteString(core.Sprintf(` <div class="section"><div class="label">%s</div><pre>%s</pre></div>
|
||||
`, label, html.EscapeString(evt.Input)))
|
||||
}
|
||||
|
||||
if evt.Output != "" {
|
||||
|
|
@ -196,17 +200,17 @@ body { background: var(--bg); color: var(--fg); font-family: var(--font); font-s
|
|||
if !evt.Success {
|
||||
outClass = "output err"
|
||||
}
|
||||
fmt.Fprintf(f, ` <div class="section"><div class="label">Output</div><pre class="%s">%s</pre></div>
|
||||
`, outClass, html.EscapeString(evt.Output))
|
||||
b.WriteString(core.Sprintf(` <div class="section"><div class="label">Output</div><pre class="%s">%s</pre></div>
|
||||
`, outClass, html.EscapeString(evt.Output)))
|
||||
}
|
||||
|
||||
fmt.Fprint(f, ` </div>
|
||||
b.WriteString(` </div>
|
||||
</div>
|
||||
`)
|
||||
i++
|
||||
}
|
||||
|
||||
fmt.Fprint(f, `</div>
|
||||
b.WriteString(`</div>
|
||||
<script>
|
||||
function toggle(i) {
|
||||
document.getElementById('evt-'+i).classList.toggle('open');
|
||||
|
|
@ -227,17 +231,32 @@ function filterEvents() {
|
|||
el.classList.toggle('hidden', !show);
|
||||
});
|
||||
}
|
||||
function openHashEvent() {
|
||||
const hash = window.location.hash;
|
||||
if (!hash || !hash.startsWith('#evt-')) return;
|
||||
const el = document.getElementById(hash.slice(1));
|
||||
if (!el) return;
|
||||
el.classList.add('open');
|
||||
el.scrollIntoView({block: 'start'});
|
||||
}
|
||||
document.addEventListener('keydown', e => {
|
||||
if (e.key === '/' && document.activeElement.tagName !== 'INPUT') {
|
||||
e.preventDefault();
|
||||
document.getElementById('search').focus();
|
||||
}
|
||||
});
|
||||
window.addEventListener('hashchange', openHashEvent);
|
||||
document.addEventListener('DOMContentLoaded', openHashEvent);
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
`)
|
||||
|
||||
writeResult := hostFS.Write(outputPath, b.String())
|
||||
if !writeResult.OK {
|
||||
return core.E("RenderHTML", "write html", resultError(writeResult))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -250,13 +269,13 @@ func shortID(id string) string {
|
|||
|
||||
func formatDuration(d time.Duration) string {
|
||||
if d < time.Second {
|
||||
return fmt.Sprintf("%dms", d.Milliseconds())
|
||||
return core.Sprintf("%dms", d.Milliseconds())
|
||||
}
|
||||
if d < time.Minute {
|
||||
return fmt.Sprintf("%.1fs", d.Seconds())
|
||||
return core.Sprintf("%.1fs", d.Seconds())
|
||||
}
|
||||
if d < time.Hour {
|
||||
return fmt.Sprintf("%dm%ds", int(d.Minutes()), int(d.Seconds())%60)
|
||||
return core.Sprintf("%dm%ds", int(d.Minutes()), int(d.Seconds())%60)
|
||||
}
|
||||
return fmt.Sprintf("%dh%dm", int(d.Hours()), int(d.Minutes())%60)
|
||||
return core.Sprintf("%dh%dm", int(d.Hours()), int(d.Minutes())%60)
|
||||
}
|
||||
|
|
|
|||
60
html_test.go
60
html_test.go
|
|
@ -2,16 +2,15 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRenderHTML_BasicSession_Good(t *testing.T) {
|
||||
func TestHTML_RenderHTMLBasicSession_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
outputPath := dir + "/output.html"
|
||||
|
||||
|
|
@ -57,10 +56,9 @@ func TestRenderHTML_BasicSession_Good(t *testing.T) {
|
|||
err := RenderHTML(sess, outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
html := string(content)
|
||||
readResult := hostFS.Read(outputPath)
|
||||
require.True(t, readResult.OK)
|
||||
html := readResult.Value.(string)
|
||||
|
||||
// Basic structure checks
|
||||
assert.Contains(t, html, "<!DOCTYPE html>")
|
||||
|
|
@ -75,12 +73,14 @@ func TestRenderHTML_BasicSession_Good(t *testing.T) {
|
|||
assert.Contains(t, html, "Claude") // assistant event label
|
||||
assert.Contains(t, html, "Bash")
|
||||
assert.Contains(t, html, "Read")
|
||||
assert.Contains(t, html, `href="#evt-0"`)
|
||||
assert.Contains(t, html, "openHashEvent")
|
||||
// Should contain JS for toggle and filter
|
||||
assert.Contains(t, html, "function toggle")
|
||||
assert.Contains(t, html, "function filterEvents")
|
||||
}
|
||||
|
||||
func TestRenderHTML_EmptySession_Good(t *testing.T) {
|
||||
func TestHTML_RenderHTMLEmptySession_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
outputPath := dir + "/empty.html"
|
||||
|
||||
|
|
@ -95,17 +95,16 @@ func TestRenderHTML_EmptySession_Good(t *testing.T) {
|
|||
err := RenderHTML(sess, outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
html := string(content)
|
||||
readResult := hostFS.Read(outputPath)
|
||||
require.True(t, readResult.OK)
|
||||
html := readResult.Value.(string)
|
||||
assert.Contains(t, html, "<!DOCTYPE html>")
|
||||
assert.Contains(t, html, "0 tool calls")
|
||||
// Should NOT contain error span
|
||||
assert.NotContains(t, html, "errors</span>")
|
||||
}
|
||||
|
||||
func TestRenderHTML_WithErrors_Good(t *testing.T) {
|
||||
func TestHTML_RenderHTMLWithErrors_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
outputPath := dir + "/errors.html"
|
||||
|
||||
|
|
@ -140,17 +139,16 @@ func TestRenderHTML_WithErrors_Good(t *testing.T) {
|
|||
err := RenderHTML(sess, outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
html := string(content)
|
||||
readResult := hostFS.Read(outputPath)
|
||||
require.True(t, readResult.OK)
|
||||
html := readResult.Value.(string)
|
||||
assert.Contains(t, html, "1 errors")
|
||||
assert.Contains(t, html, `class="event error"`)
|
||||
assert.Contains(t, html, "✗") // cross mark for failed
|
||||
assert.Contains(t, html, "✓") // check mark for success
|
||||
}
|
||||
|
||||
func TestRenderHTML_SpecialCharacters_Good(t *testing.T) {
|
||||
func TestHTML_RenderHTMLSpecialCharacters_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
outputPath := dir + "/special.html"
|
||||
|
||||
|
|
@ -180,10 +178,9 @@ func TestRenderHTML_SpecialCharacters_Good(t *testing.T) {
|
|||
err := RenderHTML(sess, outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
html := string(content)
|
||||
readResult := hostFS.Read(outputPath)
|
||||
require.True(t, readResult.OK)
|
||||
html := readResult.Value.(string)
|
||||
|
||||
// Script tags should be escaped, never raw
|
||||
assert.NotContains(t, html, "<script>alert")
|
||||
|
|
@ -191,7 +188,7 @@ func TestRenderHTML_SpecialCharacters_Good(t *testing.T) {
|
|||
assert.Contains(t, html, "&")
|
||||
}
|
||||
|
||||
func TestRenderHTML_InvalidPath_Ugly(t *testing.T) {
|
||||
func TestHTML_RenderHTMLInvalidPath_Ugly(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "test",
|
||||
Events: nil,
|
||||
|
|
@ -199,10 +196,10 @@ func TestRenderHTML_InvalidPath_Ugly(t *testing.T) {
|
|||
|
||||
err := RenderHTML(sess, "/nonexistent/dir/output.html")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "create html")
|
||||
assert.Contains(t, err.Error(), "parent directory does not exist")
|
||||
}
|
||||
|
||||
func TestRenderHTML_LabelsByToolType_Good(t *testing.T) {
|
||||
func TestHTML_RenderHTMLLabelsByToolType_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
outputPath := dir + "/labels.html"
|
||||
|
||||
|
|
@ -224,15 +221,14 @@ func TestRenderHTML_LabelsByToolType_Good(t *testing.T) {
|
|||
err := RenderHTML(sess, outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(outputPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
html := string(content)
|
||||
readResult := hostFS.Read(outputPath)
|
||||
require.True(t, readResult.OK)
|
||||
html := readResult.Value.(string)
|
||||
|
||||
// Bash gets "Command" label
|
||||
assert.True(t, strings.Contains(html, "Command"), "Bash events should use 'Command' label")
|
||||
assert.True(t, core.Contains(html, "Command"), "Bash events should use 'Command' label")
|
||||
// Read, Glob, Grep get "Target" label
|
||||
assert.True(t, strings.Contains(html, "Target"), "Read/Glob/Grep events should use 'Target' label")
|
||||
assert.True(t, core.Contains(html, "Target"), "Read/Glob/Grep events should use 'Target' label")
|
||||
// Edit, Write get "File" label
|
||||
assert.True(t, strings.Contains(html, "File"), "Edit/Write events should use 'File' label")
|
||||
assert.True(t, core.Contains(html, "File"), "Edit/Write events should use 'File' label")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ go-session provides two output formats for visualising parsed sessions: a self-c
|
|||
- Yellow: User messages
|
||||
- Grey: Assistant responses
|
||||
- Red border: Failed tool calls
|
||||
- **Permalinks** on each event card for direct `#evt-N` links
|
||||
|
||||
### Usage
|
||||
|
||||
|
|
|
|||
225
parser.go
225
parser.go
|
|
@ -3,18 +3,15 @@ package session
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"iter"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// maxScannerBuffer is the maximum line length the scanner will accept.
|
||||
|
|
@ -22,6 +19,9 @@ import (
|
|||
const maxScannerBuffer = 8 * 1024 * 1024
|
||||
|
||||
// Event represents a single action in a session timeline.
|
||||
//
|
||||
// Example:
|
||||
// evt := session.Event{Type: "tool_use", Tool: "Bash"}
|
||||
type Event struct {
|
||||
Timestamp time.Time
|
||||
Type string // "tool_use", "user", "assistant", "error"
|
||||
|
|
@ -35,6 +35,9 @@ type Event struct {
|
|||
}
|
||||
|
||||
// Session holds parsed session metadata and events.
|
||||
//
|
||||
// Example:
|
||||
// sess := &session.Session{ID: "abc123", Events: []session.Event{}}
|
||||
type Session struct {
|
||||
ID string
|
||||
Path string
|
||||
|
|
@ -44,33 +47,39 @@ type Session struct {
|
|||
}
|
||||
|
||||
// EventsSeq returns an iterator over the session's events.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// for evt := range sess.EventsSeq() {
|
||||
// _ = evt
|
||||
// }
|
||||
func (s *Session) EventsSeq() iter.Seq[Event] {
|
||||
return slices.Values(s.Events)
|
||||
}
|
||||
|
||||
// rawEntry is the top-level structure of a Claude Code JSONL line.
|
||||
type rawEntry struct {
|
||||
Type string `json:"type"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
SessionID string `json:"sessionId"`
|
||||
Message json.RawMessage `json:"message"`
|
||||
UserType string `json:"userType"`
|
||||
Type string `json:"type"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
SessionID string `json:"sessionId"`
|
||||
Message rawJSON `json:"message"`
|
||||
UserType string `json:"userType"`
|
||||
}
|
||||
|
||||
type rawMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content []json.RawMessage `json:"content"`
|
||||
Role string `json:"role"`
|
||||
Content []rawJSON `json:"content"`
|
||||
}
|
||||
|
||||
type contentBlock struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Input json.RawMessage `json:"input,omitempty"`
|
||||
ToolUseID string `json:"tool_use_id,omitempty"`
|
||||
Content any `json:"content,omitempty"`
|
||||
IsError *bool `json:"is_error,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Input rawJSON `json:"input,omitempty"`
|
||||
ToolUseID string `json:"tool_use_id,omitempty"`
|
||||
Content any `json:"content,omitempty"`
|
||||
IsError *bool `json:"is_error,omitempty"`
|
||||
}
|
||||
|
||||
type bashInput struct {
|
||||
|
|
@ -113,6 +122,9 @@ type taskInput struct {
|
|||
}
|
||||
|
||||
// ParseStats reports diagnostic information from a parse run.
|
||||
//
|
||||
// Example:
|
||||
// stats := &session.ParseStats{TotalLines: 42}
|
||||
type ParseStats struct {
|
||||
TotalLines int
|
||||
SkippedLines int
|
||||
|
|
@ -121,36 +133,50 @@ type ParseStats struct {
|
|||
}
|
||||
|
||||
// ListSessions returns all sessions found in the Claude projects directory.
|
||||
//
|
||||
// Example:
|
||||
// sessions, err := session.ListSessions("/tmp/projects")
|
||||
func ListSessions(projectsDir string) ([]Session, error) {
|
||||
return slices.Collect(ListSessionsSeq(projectsDir)), nil
|
||||
}
|
||||
|
||||
// ListSessionsSeq returns an iterator over all sessions found in the Claude projects directory.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// for sess := range session.ListSessionsSeq("/tmp/projects") {
|
||||
// _ = sess
|
||||
// }
|
||||
func ListSessionsSeq(projectsDir string) iter.Seq[Session] {
|
||||
return func(yield func(Session) bool) {
|
||||
matches, err := filepath.Glob(filepath.Join(projectsDir, "*.jsonl"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
matches := core.PathGlob(path.Join(projectsDir, "*.jsonl"))
|
||||
|
||||
var sessions []Session
|
||||
for _, path := range matches {
|
||||
base := filepath.Base(path)
|
||||
id := strings.TrimSuffix(base, ".jsonl")
|
||||
for _, filePath := range matches {
|
||||
base := path.Base(filePath)
|
||||
id := core.TrimSuffix(base, ".jsonl")
|
||||
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
infoResult := hostFS.Stat(filePath)
|
||||
if !infoResult.OK {
|
||||
continue
|
||||
}
|
||||
info, ok := infoResult.Value.(fs.FileInfo)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
s := Session{
|
||||
ID: id,
|
||||
Path: path,
|
||||
Path: filePath,
|
||||
}
|
||||
|
||||
// Quick scan for first and last timestamps
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
openResult := hostFS.Open(filePath)
|
||||
if !openResult.OK {
|
||||
continue
|
||||
}
|
||||
f, ok := openResult.Value.(io.ReadCloser)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -159,7 +185,7 @@ func ListSessionsSeq(projectsDir string) iter.Seq[Session] {
|
|||
var firstTS, lastTS string
|
||||
for scanner.Scan() {
|
||||
var entry rawEntry
|
||||
if json.Unmarshal(scanner.Bytes(), &entry) != nil {
|
||||
if !core.JSONUnmarshal(scanner.Bytes(), &entry).OK {
|
||||
continue
|
||||
}
|
||||
if entry.Timestamp == "" {
|
||||
|
|
@ -203,22 +229,26 @@ func ListSessionsSeq(projectsDir string) iter.Seq[Session] {
|
|||
|
||||
// PruneSessions deletes session files in the projects directory that were last
|
||||
// modified more than maxAge ago. Returns the number of files deleted.
|
||||
//
|
||||
// Example:
|
||||
// deleted, err := session.PruneSessions("/tmp/projects", 24*time.Hour)
|
||||
func PruneSessions(projectsDir string, maxAge time.Duration) (int, error) {
|
||||
matches, err := filepath.Glob(filepath.Join(projectsDir, "*.jsonl"))
|
||||
if err != nil {
|
||||
return 0, coreerr.E("PruneSessions", "list sessions", err)
|
||||
}
|
||||
matches := core.PathGlob(path.Join(projectsDir, "*.jsonl"))
|
||||
|
||||
var deleted int
|
||||
now := time.Now()
|
||||
for _, path := range matches {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
for _, filePath := range matches {
|
||||
infoResult := hostFS.Stat(filePath)
|
||||
if !infoResult.OK {
|
||||
continue
|
||||
}
|
||||
info, ok := infoResult.Value.(fs.FileInfo)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if now.Sub(info.ModTime()) > maxAge {
|
||||
if err := os.Remove(path); err == nil {
|
||||
if deleteResult := hostFS.Delete(filePath); deleteResult.OK {
|
||||
deleted++
|
||||
}
|
||||
}
|
||||
|
|
@ -228,6 +258,9 @@ func PruneSessions(projectsDir string, maxAge time.Duration) (int, error) {
|
|||
|
||||
// IsExpired returns true if the session's end time is older than the given maxAge
|
||||
// relative to now.
|
||||
//
|
||||
// Example:
|
||||
// expired := sess.IsExpired(24 * time.Hour)
|
||||
func (s *Session) IsExpired(maxAge time.Duration) bool {
|
||||
if s.EndTime.IsZero() {
|
||||
return false
|
||||
|
|
@ -237,39 +270,59 @@ func (s *Session) IsExpired(maxAge time.Duration) bool {
|
|||
|
||||
// FetchSession retrieves a session by ID from the projects directory.
|
||||
// It ensures the ID does not contain path traversal characters.
|
||||
//
|
||||
// Example:
|
||||
// sess, stats, err := session.FetchSession("/tmp/projects", "abc123")
|
||||
func FetchSession(projectsDir, id string) (*Session, *ParseStats, error) {
|
||||
if strings.Contains(id, "..") || strings.ContainsAny(id, `/\`) {
|
||||
return nil, nil, coreerr.E("FetchSession", "invalid session id", nil)
|
||||
if core.Contains(id, "..") || containsAny(id, `/\`) {
|
||||
return nil, nil, core.E("FetchSession", "invalid session id", nil)
|
||||
}
|
||||
|
||||
path := filepath.Join(projectsDir, id+".jsonl")
|
||||
return ParseTranscript(path)
|
||||
filePath := path.Join(projectsDir, id+".jsonl")
|
||||
return ParseTranscript(filePath)
|
||||
}
|
||||
|
||||
// ParseTranscript reads a JSONL session file and returns structured events.
|
||||
// Malformed or truncated lines are skipped; diagnostics are reported in ParseStats.
|
||||
func ParseTranscript(path string) (*Session, *ParseStats, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, nil, coreerr.E("ParseTranscript", "open transcript", err)
|
||||
//
|
||||
// Example:
|
||||
// sess, stats, err := session.ParseTranscript("/tmp/projects/abc123.jsonl")
|
||||
func ParseTranscript(filePath string) (*Session, *ParseStats, error) {
|
||||
openResult := hostFS.Open(filePath)
|
||||
if !openResult.OK {
|
||||
return nil, nil, core.E("ParseTranscript", "open transcript", resultError(openResult))
|
||||
}
|
||||
f, ok := openResult.Value.(io.ReadCloser)
|
||||
if !ok {
|
||||
return nil, nil, core.E("ParseTranscript", "unexpected file handle type", nil)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
base := filepath.Base(path)
|
||||
id := strings.TrimSuffix(base, ".jsonl")
|
||||
base := path.Base(filePath)
|
||||
id := core.TrimSuffix(base, ".jsonl")
|
||||
|
||||
sess, stats, err := parseFromReader(f, id)
|
||||
if sess != nil {
|
||||
sess.Path = path
|
||||
sess.Path = filePath
|
||||
}
|
||||
return sess, stats, err
|
||||
if err != nil {
|
||||
return sess, stats, core.E("ParseTranscript", "parse transcript", err)
|
||||
}
|
||||
return sess, stats, nil
|
||||
}
|
||||
|
||||
// ParseTranscriptReader parses a JSONL session from an io.Reader, enabling
|
||||
// streaming parse without needing a file on disc. The id parameter sets
|
||||
// the session ID (since there is no file name to derive it from).
|
||||
//
|
||||
// Example:
|
||||
// sess, stats, err := session.ParseTranscriptReader(reader, "abc123")
|
||||
func ParseTranscriptReader(r io.Reader, id string) (*Session, *ParseStats, error) {
|
||||
return parseFromReader(r, id)
|
||||
sess, stats, err := parseFromReader(r, id)
|
||||
if err != nil {
|
||||
return sess, stats, core.E("ParseTranscriptReader", "parse transcript", err)
|
||||
}
|
||||
return sess, stats, nil
|
||||
}
|
||||
|
||||
// parseFromReader is the shared implementation for both file-based and
|
||||
|
|
@ -302,7 +355,7 @@ func parseFromReader(r io.Reader, id string) (*Session, *ParseStats, error) {
|
|||
stats.TotalLines++
|
||||
|
||||
raw := scanner.Text()
|
||||
if strings.TrimSpace(raw) == "" {
|
||||
if core.Trim(raw) == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -310,21 +363,21 @@ func parseFromReader(r io.Reader, id string) (*Session, *ParseStats, error) {
|
|||
lastLineFailed = false
|
||||
|
||||
var entry rawEntry
|
||||
if err := json.Unmarshal([]byte(raw), &entry); err != nil {
|
||||
if !core.JSONUnmarshalString(raw, &entry).OK {
|
||||
stats.SkippedLines++
|
||||
preview := raw
|
||||
if len(preview) > 100 {
|
||||
preview = preview[:100]
|
||||
}
|
||||
stats.Warnings = append(stats.Warnings,
|
||||
fmt.Sprintf("line %d: skipped (bad JSON): %s", lineNum, preview))
|
||||
core.Sprintf("line %d: skipped (bad JSON): %s", lineNum, preview))
|
||||
lastLineFailed = true
|
||||
continue
|
||||
}
|
||||
|
||||
ts, err := time.Parse(time.RFC3339Nano, entry.Timestamp)
|
||||
if err != nil {
|
||||
stats.Warnings = append(stats.Warnings, fmt.Sprintf("line %d: bad timestamp %q: %v", lineNum, entry.Timestamp, err))
|
||||
stats.Warnings = append(stats.Warnings, core.Sprintf("line %d: bad timestamp %q: %v", lineNum, entry.Timestamp, err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -338,20 +391,20 @@ func parseFromReader(r io.Reader, id string) (*Session, *ParseStats, error) {
|
|||
switch entry.Type {
|
||||
case "assistant":
|
||||
var msg rawMessage
|
||||
if err := json.Unmarshal(entry.Message, &msg); err != nil {
|
||||
stats.Warnings = append(stats.Warnings, fmt.Sprintf("line %d: failed to unmarshal assistant message: %v", lineNum, err))
|
||||
if !core.JSONUnmarshal(entry.Message, &msg).OK {
|
||||
stats.Warnings = append(stats.Warnings, core.Sprintf("line %d: failed to unmarshal assistant message", lineNum))
|
||||
continue
|
||||
}
|
||||
for i, raw := range msg.Content {
|
||||
var block contentBlock
|
||||
if err := json.Unmarshal(raw, &block); err != nil {
|
||||
stats.Warnings = append(stats.Warnings, fmt.Sprintf("line %d block %d: failed to unmarshal content: %v", lineNum, i, err))
|
||||
if !core.JSONUnmarshal(raw, &block).OK {
|
||||
stats.Warnings = append(stats.Warnings, core.Sprintf("line %d block %d: failed to unmarshal content", lineNum, i))
|
||||
continue
|
||||
}
|
||||
|
||||
switch block.Type {
|
||||
case "text":
|
||||
if text := strings.TrimSpace(block.Text); text != "" {
|
||||
if text := core.Trim(block.Text); text != "" {
|
||||
sess.Events = append(sess.Events, Event{
|
||||
Timestamp: ts,
|
||||
Type: "assistant",
|
||||
|
|
@ -371,14 +424,14 @@ func parseFromReader(r io.Reader, id string) (*Session, *ParseStats, error) {
|
|||
|
||||
case "user":
|
||||
var msg rawMessage
|
||||
if err := json.Unmarshal(entry.Message, &msg); err != nil {
|
||||
stats.Warnings = append(stats.Warnings, fmt.Sprintf("line %d: failed to unmarshal user message: %v", lineNum, err))
|
||||
if !core.JSONUnmarshal(entry.Message, &msg).OK {
|
||||
stats.Warnings = append(stats.Warnings, core.Sprintf("line %d: failed to unmarshal user message", lineNum))
|
||||
continue
|
||||
}
|
||||
for i, raw := range msg.Content {
|
||||
var block contentBlock
|
||||
if err := json.Unmarshal(raw, &block); err != nil {
|
||||
stats.Warnings = append(stats.Warnings, fmt.Sprintf("line %d block %d: failed to unmarshal content: %v", lineNum, i, err))
|
||||
if !core.JSONUnmarshal(raw, &block).OK {
|
||||
stats.Warnings = append(stats.Warnings, core.Sprintf("line %d block %d: failed to unmarshal content", lineNum, i))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -405,7 +458,7 @@ func parseFromReader(r io.Reader, id string) (*Session, *ParseStats, error) {
|
|||
}
|
||||
|
||||
case "text":
|
||||
if text := strings.TrimSpace(block.Text); text != "" {
|
||||
if text := core.Trim(block.Text); text != "" {
|
||||
sess.Events = append(sess.Events, Event{
|
||||
Timestamp: ts,
|
||||
Type: "user",
|
||||
|
|
@ -432,14 +485,14 @@ func parseFromReader(r io.Reader, id string) (*Session, *ParseStats, error) {
|
|||
if stats.OrphanedToolCalls > 0 {
|
||||
for id := range pendingTools {
|
||||
stats.Warnings = append(stats.Warnings,
|
||||
fmt.Sprintf("orphaned tool call: %s", id))
|
||||
core.Sprintf("orphaned tool call: %s", id))
|
||||
}
|
||||
}
|
||||
|
||||
return sess, stats, nil
|
||||
}
|
||||
|
||||
func extractToolInput(toolName string, raw json.RawMessage) string {
|
||||
func extractToolInput(toolName string, raw rawJSON) string {
|
||||
if raw == nil {
|
||||
return ""
|
||||
}
|
||||
|
|
@ -447,7 +500,7 @@ func extractToolInput(toolName string, raw json.RawMessage) string {
|
|||
switch toolName {
|
||||
case "Bash":
|
||||
var inp bashInput
|
||||
if json.Unmarshal(raw, &inp) == nil {
|
||||
if core.JSONUnmarshal(raw, &inp).OK {
|
||||
desc := inp.Description
|
||||
if desc != "" {
|
||||
desc = " # " + desc
|
||||
|
|
@ -456,49 +509,49 @@ func extractToolInput(toolName string, raw json.RawMessage) string {
|
|||
}
|
||||
case "Read":
|
||||
var inp readInput
|
||||
if json.Unmarshal(raw, &inp) == nil {
|
||||
if core.JSONUnmarshal(raw, &inp).OK {
|
||||
return inp.FilePath
|
||||
}
|
||||
case "Edit":
|
||||
var inp editInput
|
||||
if json.Unmarshal(raw, &inp) == nil {
|
||||
return fmt.Sprintf("%s (edit)", inp.FilePath)
|
||||
if core.JSONUnmarshal(raw, &inp).OK {
|
||||
return core.Sprintf("%s (edit)", inp.FilePath)
|
||||
}
|
||||
case "Write":
|
||||
var inp writeInput
|
||||
if json.Unmarshal(raw, &inp) == nil {
|
||||
return fmt.Sprintf("%s (%d bytes)", inp.FilePath, len(inp.Content))
|
||||
if core.JSONUnmarshal(raw, &inp).OK {
|
||||
return core.Sprintf("%s (%d bytes)", inp.FilePath, len(inp.Content))
|
||||
}
|
||||
case "Grep":
|
||||
var inp grepInput
|
||||
if json.Unmarshal(raw, &inp) == nil {
|
||||
if core.JSONUnmarshal(raw, &inp).OK {
|
||||
path := inp.Path
|
||||
if path == "" {
|
||||
path = "."
|
||||
}
|
||||
return fmt.Sprintf("/%s/ in %s", inp.Pattern, path)
|
||||
return core.Sprintf("/%s/ in %s", inp.Pattern, path)
|
||||
}
|
||||
case "Glob":
|
||||
var inp globInput
|
||||
if json.Unmarshal(raw, &inp) == nil {
|
||||
if core.JSONUnmarshal(raw, &inp).OK {
|
||||
return inp.Pattern
|
||||
}
|
||||
case "Task":
|
||||
var inp taskInput
|
||||
if json.Unmarshal(raw, &inp) == nil {
|
||||
if core.JSONUnmarshal(raw, &inp).OK {
|
||||
desc := inp.Description
|
||||
if desc == "" {
|
||||
desc = truncate(inp.Prompt, 80)
|
||||
}
|
||||
return fmt.Sprintf("[%s] %s", inp.SubagentType, desc)
|
||||
return core.Sprintf("[%s] %s", inp.SubagentType, desc)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: show raw JSON keys
|
||||
var m map[string]any
|
||||
if json.Unmarshal(raw, &m) == nil {
|
||||
if core.JSONUnmarshal(raw, &m).OK {
|
||||
parts := slices.Sorted(maps.Keys(m))
|
||||
return strings.Join(parts, ", ")
|
||||
return core.Join(", ", parts...)
|
||||
}
|
||||
|
||||
return ""
|
||||
|
|
@ -517,13 +570,13 @@ func extractResultContent(content any) string {
|
|||
}
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, "\n")
|
||||
return core.Join("\n", parts...)
|
||||
case map[string]any:
|
||||
if text, ok := v["text"].(string); ok {
|
||||
return text
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%v", content)
|
||||
return core.Sprint(content)
|
||||
}
|
||||
|
||||
func truncate(s string, max int) string {
|
||||
|
|
|
|||
293
parser_test.go
293
parser_test.go
|
|
@ -3,14 +3,12 @@ package session
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"path"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -25,8 +23,11 @@ func ts(offsetSec int) string {
|
|||
|
||||
// jsonlLine marshals an arbitrary map to a single JSONL line.
|
||||
func jsonlLine(m map[string]any) string {
|
||||
b, _ := json.Marshal(m)
|
||||
return string(b)
|
||||
marshalResult := core.JSONMarshal(m)
|
||||
if !marshalResult.OK {
|
||||
panic(resultError(marshalResult))
|
||||
}
|
||||
return string(marshalResult.Value.([]byte))
|
||||
}
|
||||
|
||||
// userTextEntry creates a JSONL line for a user text message.
|
||||
|
|
@ -103,15 +104,22 @@ func toolResultEntry(timestamp, toolUseID string, content any, isError bool) str
|
|||
// writeJSONL writes lines to a temp .jsonl file and returns its path.
|
||||
func writeJSONL(t *testing.T, dir string, name string, lines ...string) string {
|
||||
t.Helper()
|
||||
path := filepath.Join(dir, name)
|
||||
err := os.WriteFile(path, []byte(strings.Join(lines, "\n")+"\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
return path
|
||||
filePath := path.Join(dir, name)
|
||||
writeResult := hostFS.Write(filePath, core.Concat(core.Join("\n", lines...), "\n"))
|
||||
require.True(t, writeResult.OK)
|
||||
return filePath
|
||||
}
|
||||
|
||||
func setFileTimes(filePath string, atime, mtime time.Time) error {
|
||||
return syscall.UtimesNano(filePath, []syscall.Timespec{
|
||||
syscall.NsecToTimespec(atime.UnixNano()),
|
||||
syscall.NsecToTimespec(mtime.UnixNano()),
|
||||
})
|
||||
}
|
||||
|
||||
// --- ParseTranscript tests ---
|
||||
|
||||
func TestParseTranscript_MinimalValid_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptMinimalValid_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := writeJSONL(t, dir, "minimal.jsonl",
|
||||
userTextEntry(ts(0), "Hello"),
|
||||
|
|
@ -136,7 +144,7 @@ func TestParseTranscript_MinimalValid_Good(t *testing.T) {
|
|||
assert.Equal(t, "Hi there!", sess.Events[1].Input)
|
||||
}
|
||||
|
||||
func TestParseTranscript_ToolCalls_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptToolCalls_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
lines := []string{
|
||||
|
|
@ -227,7 +235,7 @@ func TestParseTranscript_ToolCalls_Good(t *testing.T) {
|
|||
assert.Equal(t, "[research] Code analysis", toolEvents[6].Input)
|
||||
}
|
||||
|
||||
func TestParseTranscript_ToolError_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptToolError_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := writeJSONL(t, dir, "error.jsonl",
|
||||
toolUseEntry(ts(0), "Bash", "tool-err-1", map[string]any{
|
||||
|
|
@ -251,11 +259,12 @@ func TestParseTranscript_ToolError_Good(t *testing.T) {
|
|||
assert.Contains(t, toolEvents[0].ErrorMsg, "No such file or directory")
|
||||
}
|
||||
|
||||
func TestParseTranscript_EmptyFile_Bad(t *testing.T) {
|
||||
func TestParser_ParseTranscriptEmptyFile_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := writeJSONL(t, dir, "empty.jsonl")
|
||||
// Write a truly empty file
|
||||
require.NoError(t, os.WriteFile(path, []byte(""), 0644))
|
||||
writeResult := hostFS.Write(path, "")
|
||||
require.True(t, writeResult.OK)
|
||||
|
||||
sess, _, err := ParseTranscript(path)
|
||||
require.NoError(t, err)
|
||||
|
|
@ -264,7 +273,7 @@ func TestParseTranscript_EmptyFile_Bad(t *testing.T) {
|
|||
assert.True(t, sess.StartTime.IsZero())
|
||||
}
|
||||
|
||||
func TestParseTranscript_MalformedJSON_Bad(t *testing.T) {
|
||||
func TestParser_ParseTranscriptMalformedJSON_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := writeJSONL(t, dir, "malformed.jsonl",
|
||||
`{invalid json`,
|
||||
|
|
@ -284,7 +293,7 @@ func TestParseTranscript_MalformedJSON_Bad(t *testing.T) {
|
|||
assert.Equal(t, "assistant", sess.Events[1].Type)
|
||||
}
|
||||
|
||||
func TestParseTranscript_TruncatedJSONL_Bad(t *testing.T) {
|
||||
func TestParser_ParseTranscriptTruncatedJSONL_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
validLine := userTextEntry(ts(0), "Hello")
|
||||
// Truncated line: cut a valid JSON line in half
|
||||
|
|
@ -302,7 +311,7 @@ func TestParseTranscript_TruncatedJSONL_Bad(t *testing.T) {
|
|||
assert.Equal(t, "user", sess.Events[0].Type)
|
||||
}
|
||||
|
||||
func TestParseTranscript_LargeSession_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptLargeSession_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
var lines []string
|
||||
|
|
@ -310,13 +319,13 @@ func TestParseTranscript_LargeSession_Good(t *testing.T) {
|
|||
|
||||
// Generate 1000+ tool call pairs
|
||||
for i := range 1100 {
|
||||
toolID := fmt.Sprintf("tool-%d", i)
|
||||
toolID := core.Sprintf("tool-%d", i)
|
||||
offset := (i * 2) + 1
|
||||
lines = append(lines,
|
||||
toolUseEntry(ts(offset), "Bash", toolID, map[string]any{
|
||||
"command": fmt.Sprintf("echo %d", i),
|
||||
"command": core.Sprintf("echo %d", i),
|
||||
}),
|
||||
toolResultEntry(ts(offset+1), toolID, fmt.Sprintf("output %d", i), false),
|
||||
toolResultEntry(ts(offset+1), toolID, core.Sprintf("output %d", i), false),
|
||||
)
|
||||
}
|
||||
lines = append(lines, assistantTextEntry(ts(2202), "Done"))
|
||||
|
|
@ -335,7 +344,7 @@ func TestParseTranscript_LargeSession_Good(t *testing.T) {
|
|||
assert.Equal(t, 1100, toolCount, "all 1100 tool events should be parsed")
|
||||
}
|
||||
|
||||
func TestParseTranscript_NestedToolResults_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptNestedToolResults_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Tool result with array content (multiple text blocks)
|
||||
|
|
@ -384,7 +393,7 @@ func TestParseTranscript_NestedToolResults_Good(t *testing.T) {
|
|||
assert.Contains(t, toolEvents[0].Output, "Second block")
|
||||
}
|
||||
|
||||
func TestParseTranscript_NestedMapResult_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptNestedMapResult_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
lines := []string{
|
||||
|
|
@ -428,13 +437,13 @@ func TestParseTranscript_NestedMapResult_Good(t *testing.T) {
|
|||
assert.Contains(t, toolEvents[0].Output, "file contents here")
|
||||
}
|
||||
|
||||
func TestParseTranscript_FileNotFound_Ugly(t *testing.T) {
|
||||
func TestParser_ParseTranscriptFileNotFound_Ugly(t *testing.T) {
|
||||
_, _, err := ParseTranscript("/nonexistent/path/session.jsonl")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "open transcript")
|
||||
}
|
||||
|
||||
func TestParseTranscript_SessionIDFromFilename_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptSessionIDFromFilename_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := writeJSONL(t, dir, "abc123def456.jsonl",
|
||||
userTextEntry(ts(0), "test"),
|
||||
|
|
@ -445,7 +454,7 @@ func TestParseTranscript_SessionIDFromFilename_Good(t *testing.T) {
|
|||
assert.Equal(t, "abc123def456", sess.ID)
|
||||
}
|
||||
|
||||
func TestParseTranscript_TimestampsTracked_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptTimestampsTracked_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := writeJSONL(t, dir, "timestamps.jsonl",
|
||||
userTextEntry(ts(0), "start"),
|
||||
|
|
@ -463,9 +472,9 @@ func TestParseTranscript_TimestampsTracked_Good(t *testing.T) {
|
|||
assert.Equal(t, expectedEnd, sess.EndTime)
|
||||
}
|
||||
|
||||
func TestParseTranscript_TextTruncation_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptTextTruncation_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
longText := strings.Repeat("x", 600)
|
||||
longText := repeatString("x", 600)
|
||||
path := writeJSONL(t, dir, "truncation.jsonl",
|
||||
userTextEntry(ts(0), longText),
|
||||
)
|
||||
|
|
@ -476,10 +485,10 @@ func TestParseTranscript_TextTruncation_Good(t *testing.T) {
|
|||
require.Len(t, sess.Events, 1)
|
||||
// Input should be truncated to 500 + "..."
|
||||
assert.True(t, len(sess.Events[0].Input) <= 504, "input should be truncated")
|
||||
assert.True(t, strings.HasSuffix(sess.Events[0].Input, "..."), "truncated text should end with ...")
|
||||
assert.True(t, core.HasSuffix(sess.Events[0].Input, "..."), "truncated text should end with ...")
|
||||
}
|
||||
|
||||
func TestSession_EventsSeq_Good(t *testing.T) {
|
||||
func TestParser_SessionEventsSeq_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
Events: []Event{
|
||||
{Type: "user", Input: "one"},
|
||||
|
|
@ -496,7 +505,7 @@ func TestSession_EventsSeq_Good(t *testing.T) {
|
|||
assert.Equal(t, sess.Events, events)
|
||||
}
|
||||
|
||||
func TestParseTranscript_MixedContentBlocks_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptMixedContentBlocks_Good(t *testing.T) {
|
||||
// Assistant message with both text and tool_use in the same message
|
||||
dir := t.TempDir()
|
||||
|
||||
|
|
@ -534,7 +543,7 @@ func TestParseTranscript_MixedContentBlocks_Good(t *testing.T) {
|
|||
assert.Equal(t, "Read", sess.Events[1].Tool)
|
||||
}
|
||||
|
||||
func TestParseTranscript_UnmatchedToolResult_Bad(t *testing.T) {
|
||||
func TestParser_ParseTranscriptUnmatchedToolResult_Bad(t *testing.T) {
|
||||
// A tool_result with no matching tool_use should be silently ignored
|
||||
dir := t.TempDir()
|
||||
path := writeJSONL(t, dir, "unmatched.jsonl",
|
||||
|
|
@ -550,7 +559,7 @@ func TestParseTranscript_UnmatchedToolResult_Bad(t *testing.T) {
|
|||
assert.Equal(t, "user", sess.Events[0].Type)
|
||||
}
|
||||
|
||||
func TestParseTranscript_EmptyTimestamp_Bad(t *testing.T) {
|
||||
func TestParser_ParseTranscriptEmptyTimestamp_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Entry with empty timestamp
|
||||
line := jsonlLine(map[string]any{
|
||||
|
|
@ -575,7 +584,7 @@ func TestParseTranscript_EmptyTimestamp_Bad(t *testing.T) {
|
|||
|
||||
// --- ListSessions tests ---
|
||||
|
||||
func TestListSessions_EmptyDir_Good(t *testing.T) {
|
||||
func TestParser_ListSessionsEmptyDir_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
sessions, err := ListSessions(dir)
|
||||
|
|
@ -583,7 +592,7 @@ func TestListSessions_EmptyDir_Good(t *testing.T) {
|
|||
assert.Empty(t, sessions)
|
||||
}
|
||||
|
||||
func TestListSessions_SingleSession_Good(t *testing.T) {
|
||||
func TestParser_ListSessionsSingleSession_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeJSONL(t, dir, "session-abc.jsonl",
|
||||
userTextEntry(ts(0), "Hello"),
|
||||
|
|
@ -599,7 +608,7 @@ func TestListSessions_SingleSession_Good(t *testing.T) {
|
|||
assert.False(t, sessions[0].EndTime.IsZero())
|
||||
}
|
||||
|
||||
func TestListSessions_MultipleSorted_Good(t *testing.T) {
|
||||
func TestParser_ListSessionsMultipleSorted_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create three sessions with different timestamps.
|
||||
|
|
@ -624,16 +633,16 @@ func TestListSessions_MultipleSorted_Good(t *testing.T) {
|
|||
assert.Equal(t, "old", sessions[2].ID)
|
||||
}
|
||||
|
||||
func TestListSessions_NonJSONLIgnored_Good(t *testing.T) {
|
||||
func TestParser_ListSessionsNonJSONLIgnored_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
writeJSONL(t, dir, "real-session.jsonl",
|
||||
userTextEntry(ts(0), "real"),
|
||||
)
|
||||
// Write non-JSONL files
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "readme.md"), []byte("# Hello"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "notes.txt"), []byte("notes"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "data.json"), []byte("{}"), 0644))
|
||||
require.True(t, hostFS.Write(path.Join(dir, "readme.md"), "# Hello").OK)
|
||||
require.True(t, hostFS.Write(path.Join(dir, "notes.txt"), "notes").OK)
|
||||
require.True(t, hostFS.Write(path.Join(dir, "data.json"), "{}").OK)
|
||||
|
||||
sessions, err := ListSessions(dir)
|
||||
require.NoError(t, err)
|
||||
|
|
@ -641,7 +650,7 @@ func TestListSessions_NonJSONLIgnored_Good(t *testing.T) {
|
|||
assert.Equal(t, "real-session", sessions[0].ID)
|
||||
}
|
||||
|
||||
func TestListSessionsSeq_MultipleSorted_Good(t *testing.T) {
|
||||
func TestParser_ListSessionsSeqMultipleSorted_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create three sessions with different timestamps.
|
||||
|
|
@ -661,7 +670,7 @@ func TestListSessionsSeq_MultipleSorted_Good(t *testing.T) {
|
|||
assert.Equal(t, "old", sessions[2].ID)
|
||||
}
|
||||
|
||||
func TestListSessions_MalformedJSONLStillListed_Bad(t *testing.T) {
|
||||
func TestParser_ListSessionsMalformedJSONLStillListed_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// A .jsonl file with no valid timestamps — should still list with zero time or modtime
|
||||
|
|
@ -680,80 +689,80 @@ func TestListSessions_MalformedJSONLStillListed_Bad(t *testing.T) {
|
|||
|
||||
// --- extractToolInput tests ---
|
||||
|
||||
func TestExtractToolInput_Bash_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"command":"go test ./...","description":"run tests","timeout":120}`)
|
||||
func TestParser_ExtractToolInputBash_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"command":"go test ./...","description":"run tests","timeout":120}`))
|
||||
result := extractToolInput("Bash", input)
|
||||
assert.Equal(t, "go test ./... # run tests", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_BashNoDescription_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"command":"ls -la"}`)
|
||||
func TestParser_ExtractToolInputBashNoDescription_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"command":"ls -la"}`))
|
||||
result := extractToolInput("Bash", input)
|
||||
assert.Equal(t, "ls -la", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_Read_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"file_path":"/Users/test/main.go","offset":10,"limit":50}`)
|
||||
func TestParser_ExtractToolInputRead_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"file_path":"/Users/test/main.go","offset":10,"limit":50}`))
|
||||
result := extractToolInput("Read", input)
|
||||
assert.Equal(t, "/Users/test/main.go", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_Edit_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"file_path":"/tmp/app.go","old_string":"foo","new_string":"bar"}`)
|
||||
func TestParser_ExtractToolInputEdit_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"file_path":"/tmp/app.go","old_string":"foo","new_string":"bar"}`))
|
||||
result := extractToolInput("Edit", input)
|
||||
assert.Equal(t, "/tmp/app.go (edit)", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_Write_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"file_path":"/tmp/out.txt","content":"hello world"}`)
|
||||
func TestParser_ExtractToolInputWrite_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"file_path":"/tmp/out.txt","content":"hello world"}`))
|
||||
result := extractToolInput("Write", input)
|
||||
assert.Equal(t, "/tmp/out.txt (11 bytes)", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_Grep_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"pattern":"TODO","path":"/src"}`)
|
||||
func TestParser_ExtractToolInputGrep_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"pattern":"TODO","path":"/src"}`))
|
||||
result := extractToolInput("Grep", input)
|
||||
assert.Equal(t, "/TODO/ in /src", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_GrepNoPath_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"pattern":"FIXME"}`)
|
||||
func TestParser_ExtractToolInputGrepNoPath_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"pattern":"FIXME"}`))
|
||||
result := extractToolInput("Grep", input)
|
||||
assert.Equal(t, "/FIXME/ in .", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_Glob_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"pattern":"**/*.go","path":"/src"}`)
|
||||
func TestParser_ExtractToolInputGlob_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"pattern":"**/*.go","path":"/src"}`))
|
||||
result := extractToolInput("Glob", input)
|
||||
assert.Equal(t, "**/*.go", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_Task_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"prompt":"Analyse the codebase","description":"Code review","subagent_type":"research"}`)
|
||||
func TestParser_ExtractToolInputTask_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"prompt":"Analyse the codebase","description":"Code review","subagent_type":"research"}`))
|
||||
result := extractToolInput("Task", input)
|
||||
assert.Equal(t, "[research] Code review", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_TaskNoDescription_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"prompt":"Short prompt","subagent_type":"codegen"}`)
|
||||
func TestParser_ExtractToolInputTaskNoDescription_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"prompt":"Short prompt","subagent_type":"codegen"}`))
|
||||
result := extractToolInput("Task", input)
|
||||
assert.Equal(t, "[codegen] Short prompt", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_UnknownTool_Good(t *testing.T) {
|
||||
input := json.RawMessage(`{"alpha":"one","beta":"two"}`)
|
||||
func TestParser_ExtractToolInputUnknownTool_Good(t *testing.T) {
|
||||
input := rawJSON([]byte(`{"alpha":"one","beta":"two"}`))
|
||||
result := extractToolInput("CustomTool", input)
|
||||
// Fallback: sorted keys
|
||||
assert.Equal(t, "alpha, beta", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_NilInput_Bad(t *testing.T) {
|
||||
func TestParser_ExtractToolInputNilInput_Bad(t *testing.T) {
|
||||
result := extractToolInput("Bash", nil)
|
||||
assert.Equal(t, "", result)
|
||||
}
|
||||
|
||||
func TestExtractToolInput_InvalidJSON_Bad(t *testing.T) {
|
||||
input := json.RawMessage(`{broken`)
|
||||
func TestParser_ExtractToolInputInvalidJSON_Bad(t *testing.T) {
|
||||
input := rawJSON([]byte(`{broken`))
|
||||
result := extractToolInput("Bash", input)
|
||||
// All unmarshals fail, including the fallback map unmarshal
|
||||
assert.Equal(t, "", result)
|
||||
|
|
@ -761,12 +770,12 @@ func TestExtractToolInput_InvalidJSON_Bad(t *testing.T) {
|
|||
|
||||
// --- extractResultContent tests ---
|
||||
|
||||
func TestExtractResultContent_String_Good(t *testing.T) {
|
||||
func TestParser_ExtractResultContentString_Good(t *testing.T) {
|
||||
result := extractResultContent("simple string")
|
||||
assert.Equal(t, "simple string", result)
|
||||
}
|
||||
|
||||
func TestExtractResultContent_Array_Good(t *testing.T) {
|
||||
func TestParser_ExtractResultContentArray_Good(t *testing.T) {
|
||||
content := []any{
|
||||
map[string]any{"type": "text", "text": "line one"},
|
||||
map[string]any{"type": "text", "text": "line two"},
|
||||
|
|
@ -775,45 +784,45 @@ func TestExtractResultContent_Array_Good(t *testing.T) {
|
|||
assert.Equal(t, "line one\nline two", result)
|
||||
}
|
||||
|
||||
func TestExtractResultContent_Map_Good(t *testing.T) {
|
||||
func TestParser_ExtractResultContentMap_Good(t *testing.T) {
|
||||
content := map[string]any{"text": "from map"}
|
||||
result := extractResultContent(content)
|
||||
assert.Equal(t, "from map", result)
|
||||
}
|
||||
|
||||
func TestExtractResultContent_Other_Bad(t *testing.T) {
|
||||
func TestParser_ExtractResultContentOther_Bad(t *testing.T) {
|
||||
result := extractResultContent(42)
|
||||
assert.Equal(t, "42", result)
|
||||
}
|
||||
|
||||
// --- truncate tests ---
|
||||
|
||||
func TestTruncate_Short_Good(t *testing.T) {
|
||||
func TestParser_TruncateShort_Good(t *testing.T) {
|
||||
assert.Equal(t, "hello", truncate("hello", 10))
|
||||
}
|
||||
|
||||
func TestTruncate_Exact_Good(t *testing.T) {
|
||||
func TestParser_TruncateExact_Good(t *testing.T) {
|
||||
assert.Equal(t, "hello", truncate("hello", 5))
|
||||
}
|
||||
|
||||
func TestTruncate_Long_Good(t *testing.T) {
|
||||
func TestParser_TruncateLong_Good(t *testing.T) {
|
||||
result := truncate("hello world", 5)
|
||||
assert.Equal(t, "hello...", result)
|
||||
}
|
||||
|
||||
func TestTruncate_Empty_Good(t *testing.T) {
|
||||
func TestParser_TruncateEmpty_Good(t *testing.T) {
|
||||
assert.Equal(t, "", truncate("", 10))
|
||||
}
|
||||
|
||||
// --- helper function tests ---
|
||||
|
||||
func TestShortID_Good(t *testing.T) {
|
||||
func TestParser_ShortIDTruncatesAndPreservesLength_Good(t *testing.T) {
|
||||
assert.Equal(t, "abcdefgh", shortID("abcdefghijklmnop"))
|
||||
assert.Equal(t, "short", shortID("short"))
|
||||
assert.Equal(t, "12345678", shortID("12345678"))
|
||||
}
|
||||
|
||||
func TestFormatDuration_Good(t *testing.T) {
|
||||
func TestParser_FormatDurationCommonDurations_Good(t *testing.T) {
|
||||
assert.Equal(t, "500ms", formatDuration(500*time.Millisecond))
|
||||
assert.Equal(t, "1.5s", formatDuration(1500*time.Millisecond))
|
||||
assert.Equal(t, "2m30s", formatDuration(2*time.Minute+30*time.Second))
|
||||
|
|
@ -822,7 +831,7 @@ func TestFormatDuration_Good(t *testing.T) {
|
|||
|
||||
// --- ParseStats tests ---
|
||||
|
||||
func TestParseStats_CleanJSONL_Good(t *testing.T) {
|
||||
func TestParser_ParseStatsCleanJSONL_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := writeJSONL(t, dir, "clean.jsonl",
|
||||
userTextEntry(ts(0), "Hello"),
|
||||
|
|
@ -843,7 +852,7 @@ func TestParseStats_CleanJSONL_Good(t *testing.T) {
|
|||
assert.Empty(t, stats.Warnings)
|
||||
}
|
||||
|
||||
func TestParseStats_MalformedLines_Good(t *testing.T) {
|
||||
func TestParser_ParseStatsMalformedLines_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := writeJSONL(t, dir, "malformed-stats.jsonl",
|
||||
`{bad json line one`,
|
||||
|
|
@ -867,7 +876,7 @@ func TestParseStats_MalformedLines_Good(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseStats_OrphanedToolCalls_Good(t *testing.T) {
|
||||
func TestParser_ParseStatsOrphanedToolCalls_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Two tool_use entries with no matching tool_result
|
||||
path := writeJSONL(t, dir, "orphaned.jsonl",
|
||||
|
|
@ -889,21 +898,21 @@ func TestParseStats_OrphanedToolCalls_Good(t *testing.T) {
|
|||
// Warnings should mention orphaned tool IDs
|
||||
var orphanWarnings int
|
||||
for _, w := range stats.Warnings {
|
||||
if strings.Contains(w, "orphaned tool call") {
|
||||
if core.Contains(w, "orphaned tool call") {
|
||||
orphanWarnings++
|
||||
}
|
||||
}
|
||||
assert.Equal(t, 2, orphanWarnings)
|
||||
}
|
||||
|
||||
func TestParseStats_TruncatedFinalLine_Good(t *testing.T) {
|
||||
func TestParser_ParseStatsTruncatedFinalLine_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
validLine := userTextEntry(ts(0), "Hello")
|
||||
truncatedLine := `{"type":"assi`
|
||||
|
||||
// Write without trailing newline after truncated line
|
||||
path := filepath.Join(dir, "truncfinal.jsonl")
|
||||
require.NoError(t, os.WriteFile(path, []byte(validLine+"\n"+truncatedLine+"\n"), 0644))
|
||||
path := path.Join(dir, "truncfinal.jsonl")
|
||||
require.True(t, hostFS.Write(path, validLine+"\n"+truncatedLine+"\n").OK)
|
||||
|
||||
_, stats, err := ParseTranscript(path)
|
||||
require.NoError(t, err)
|
||||
|
|
@ -914,20 +923,20 @@ func TestParseStats_TruncatedFinalLine_Good(t *testing.T) {
|
|||
// Should detect truncated final line
|
||||
var foundTruncated bool
|
||||
for _, w := range stats.Warnings {
|
||||
if strings.Contains(w, "truncated final line") {
|
||||
if core.Contains(w, "truncated final line") {
|
||||
foundTruncated = true
|
||||
}
|
||||
}
|
||||
assert.True(t, foundTruncated, "should detect truncated final line")
|
||||
}
|
||||
|
||||
func TestParseStats_FileEndingMidJSON_Good(t *testing.T) {
|
||||
func TestParser_ParseStatsFileEndingMidJSON_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
validLine := userTextEntry(ts(0), "Hello")
|
||||
midJSON := `{"type":"assistant","timestamp":"2026-02-20T10:00:01Z","sessionId":"test","message":{"role":"assi`
|
||||
|
||||
path := filepath.Join(dir, "midjson.jsonl")
|
||||
require.NoError(t, os.WriteFile(path, []byte(validLine+"\n"+midJSON+"\n"), 0644))
|
||||
path := path.Join(dir, "midjson.jsonl")
|
||||
require.True(t, hostFS.Write(path, validLine+"\n"+midJSON+"\n").OK)
|
||||
|
||||
sess, stats, err := ParseTranscript(path)
|
||||
require.NoError(t, err)
|
||||
|
|
@ -938,20 +947,20 @@ func TestParseStats_FileEndingMidJSON_Good(t *testing.T) {
|
|||
|
||||
var foundTruncated bool
|
||||
for _, w := range stats.Warnings {
|
||||
if strings.Contains(w, "truncated final line") {
|
||||
if core.Contains(w, "truncated final line") {
|
||||
foundTruncated = true
|
||||
}
|
||||
}
|
||||
assert.True(t, foundTruncated)
|
||||
}
|
||||
|
||||
func TestParseStats_CompleteFileNoTrailingNewline_Good(t *testing.T) {
|
||||
func TestParser_ParseStatsCompleteFileNoTrailingNewline_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
line := userTextEntry(ts(0), "Hello")
|
||||
|
||||
// Write without trailing newline — should still parse fine
|
||||
path := filepath.Join(dir, "nonewline.jsonl")
|
||||
require.NoError(t, os.WriteFile(path, []byte(line), 0644))
|
||||
path := path.Join(dir, "nonewline.jsonl")
|
||||
require.True(t, hostFS.Write(path, line).OK)
|
||||
|
||||
sess, stats, err := ParseTranscript(path)
|
||||
require.NoError(t, err)
|
||||
|
|
@ -965,17 +974,17 @@ func TestParseStats_CompleteFileNoTrailingNewline_Good(t *testing.T) {
|
|||
// No truncation warning since the line parsed successfully
|
||||
var foundTruncated bool
|
||||
for _, w := range stats.Warnings {
|
||||
if strings.Contains(w, "truncated final line") {
|
||||
if core.Contains(w, "truncated final line") {
|
||||
foundTruncated = true
|
||||
}
|
||||
}
|
||||
assert.False(t, foundTruncated)
|
||||
}
|
||||
|
||||
func TestParseStats_WarningPreviewTruncated_Good(t *testing.T) {
|
||||
func TestParser_ParseStatsWarningPreviewTruncated_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// A malformed line longer than 100 chars
|
||||
longBadLine := `{` + strings.Repeat("x", 200)
|
||||
longBadLine := `{` + repeatString("x", 200)
|
||||
path := writeJSONL(t, dir, "longbad.jsonl",
|
||||
longBadLine,
|
||||
userTextEntry(ts(0), "Valid"),
|
||||
|
|
@ -993,13 +1002,13 @@ func TestParseStats_WarningPreviewTruncated_Good(t *testing.T) {
|
|||
|
||||
// --- ParseTranscriptReader (streaming) tests ---
|
||||
|
||||
func TestParseTranscriptReader_MinimalValid_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptReaderMinimalValid_Good(t *testing.T) {
|
||||
// Parse directly from an in-memory reader.
|
||||
data := strings.Join([]string{
|
||||
data := core.Join("\n", []string{
|
||||
userTextEntry(ts(0), "hello"),
|
||||
assistantTextEntry(ts(1), "world"),
|
||||
}, "\n") + "\n"
|
||||
reader := strings.NewReader(data)
|
||||
}...) + "\n"
|
||||
reader := core.NewReader(data)
|
||||
|
||||
sess, stats, err := ParseTranscriptReader(reader, "stream-session")
|
||||
require.NoError(t, err)
|
||||
|
|
@ -1015,15 +1024,15 @@ func TestParseTranscriptReader_MinimalValid_Good(t *testing.T) {
|
|||
assert.Equal(t, 0, stats.SkippedLines)
|
||||
}
|
||||
|
||||
func TestParseTranscriptReader_BytesBuffer_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptReaderBytesBuffer_Good(t *testing.T) {
|
||||
// Parse from a bytes.Buffer (common streaming use case).
|
||||
data := strings.Join([]string{
|
||||
data := core.Join("\n", []string{
|
||||
toolUseEntry(ts(0), "Bash", "tu-buf-1", map[string]any{
|
||||
"command": "echo ok",
|
||||
"description": "test",
|
||||
}),
|
||||
toolResultEntry(ts(1), "tu-buf-1", "ok", false),
|
||||
}, "\n") + "\n"
|
||||
}...) + "\n"
|
||||
buf := bytes.NewBufferString(data)
|
||||
|
||||
sess, _, err := ParseTranscriptReader(buf, "buf-session")
|
||||
|
|
@ -1033,8 +1042,8 @@ func TestParseTranscriptReader_BytesBuffer_Good(t *testing.T) {
|
|||
assert.True(t, sess.Events[0].Success)
|
||||
}
|
||||
|
||||
func TestParseTranscriptReader_EmptyReader_Good(t *testing.T) {
|
||||
reader := strings.NewReader("")
|
||||
func TestParser_ParseTranscriptReaderEmptyReader_Good(t *testing.T) {
|
||||
reader := core.NewReader("")
|
||||
|
||||
sess, stats, err := ParseTranscriptReader(reader, "empty")
|
||||
require.NoError(t, err)
|
||||
|
|
@ -1043,11 +1052,11 @@ func TestParseTranscriptReader_EmptyReader_Good(t *testing.T) {
|
|||
assert.Equal(t, 0, stats.TotalLines)
|
||||
}
|
||||
|
||||
func TestParseTranscriptReader_LargeLines_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptReaderLargeLines_Good(t *testing.T) {
|
||||
// Verify the scanner handles very long lines (> 64KB).
|
||||
longText := strings.Repeat("x", 128*1024) // 128KB of text
|
||||
longText := repeatString("x", 128*1024) // 128KB of text
|
||||
data := userTextEntry(ts(0), longText) + "\n"
|
||||
reader := strings.NewReader(data)
|
||||
reader := core.NewReader(data)
|
||||
|
||||
sess, _, err := ParseTranscriptReader(reader, "big-session")
|
||||
require.NoError(t, err)
|
||||
|
|
@ -1056,14 +1065,14 @@ func TestParseTranscriptReader_LargeLines_Good(t *testing.T) {
|
|||
assert.Len(t, sess.Events[0].Input, 503) // 500 + "..."
|
||||
}
|
||||
|
||||
func TestParseTranscriptReader_MalformedWithStats_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptReaderMalformedWithStats_Good(t *testing.T) {
|
||||
// Malformed lines in a reader should still produce correct stats.
|
||||
data := strings.Join([]string{
|
||||
data := core.Join("\n", []string{
|
||||
`{bad json`,
|
||||
userTextEntry(ts(0), "valid"),
|
||||
`also bad`,
|
||||
}, "\n") + "\n"
|
||||
reader := strings.NewReader(data)
|
||||
}...) + "\n"
|
||||
reader := core.NewReader(data)
|
||||
|
||||
sess, stats, err := ParseTranscriptReader(reader, "mixed")
|
||||
require.NoError(t, err)
|
||||
|
|
@ -1072,15 +1081,15 @@ func TestParseTranscriptReader_MalformedWithStats_Good(t *testing.T) {
|
|||
assert.Equal(t, 2, stats.SkippedLines)
|
||||
}
|
||||
|
||||
func TestParseTranscriptReader_OrphanedTools_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptReaderOrphanedTools_Good(t *testing.T) {
|
||||
// Tool calls without results should be tracked in stats.
|
||||
data := strings.Join([]string{
|
||||
data := core.Join("\n", []string{
|
||||
toolUseEntry(ts(0), "Bash", "orphan-r1", map[string]any{
|
||||
"command": "ls",
|
||||
}),
|
||||
assistantTextEntry(ts(1), "No result arrived"),
|
||||
}, "\n") + "\n"
|
||||
reader := strings.NewReader(data)
|
||||
}...) + "\n"
|
||||
reader := core.NewReader(data)
|
||||
|
||||
_, stats, err := ParseTranscriptReader(reader, "orphan-reader")
|
||||
require.NoError(t, err)
|
||||
|
|
@ -1089,7 +1098,7 @@ func TestParseTranscriptReader_OrphanedTools_Good(t *testing.T) {
|
|||
|
||||
// --- Custom MCP tool tests ---
|
||||
|
||||
func TestParseTranscript_CustomMCPTool_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptCustomMCPTool_Good(t *testing.T) {
|
||||
// A tool_use with a non-standard MCP tool name (e.g. mcp__server__tool).
|
||||
dir := t.TempDir()
|
||||
lines := []string{
|
||||
|
|
@ -1121,7 +1130,7 @@ func TestParseTranscript_CustomMCPTool_Good(t *testing.T) {
|
|||
assert.True(t, toolEvents[0].Success)
|
||||
}
|
||||
|
||||
func TestParseTranscript_CustomMCPToolNestedInput_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptCustomMCPToolNestedInput_Good(t *testing.T) {
|
||||
// MCP tool with nested JSON input — should show top-level keys.
|
||||
dir := t.TempDir()
|
||||
lines := []string{
|
||||
|
|
@ -1148,7 +1157,7 @@ func TestParseTranscript_CustomMCPToolNestedInput_Good(t *testing.T) {
|
|||
assert.Contains(t, toolEvents[0].Input, "query")
|
||||
}
|
||||
|
||||
func TestParseTranscript_UnknownToolEmptyInput_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptUnknownToolEmptyInput_Good(t *testing.T) {
|
||||
// A tool_use with an empty input object.
|
||||
dir := t.TempDir()
|
||||
lines := []string{
|
||||
|
|
@ -1174,7 +1183,7 @@ func TestParseTranscript_UnknownToolEmptyInput_Good(t *testing.T) {
|
|||
|
||||
// --- Edge case error recovery tests ---
|
||||
|
||||
func TestParseTranscript_BinaryGarbage_Ugly(t *testing.T) {
|
||||
func TestParser_ParseTranscriptBinaryGarbage_Ugly(t *testing.T) {
|
||||
// Binary garbage interspersed with valid lines — must not panic.
|
||||
dir := t.TempDir()
|
||||
garbage := string([]byte{0x00, 0x01, 0x02, 0xff, 0xfe, 0xfd})
|
||||
|
|
@ -1200,7 +1209,7 @@ func TestParseTranscript_BinaryGarbage_Ugly(t *testing.T) {
|
|||
assert.Equal(t, 2, stats.SkippedLines)
|
||||
}
|
||||
|
||||
func TestParseTranscript_NullBytes_Ugly(t *testing.T) {
|
||||
func TestParser_ParseTranscriptNullBytes_Ugly(t *testing.T) {
|
||||
// Lines with embedded null bytes.
|
||||
dir := t.TempDir()
|
||||
lines := []string{
|
||||
|
|
@ -1214,11 +1223,11 @@ func TestParseTranscript_NullBytes_Ugly(t *testing.T) {
|
|||
assert.Len(t, sess.Events, 1)
|
||||
}
|
||||
|
||||
func TestParseTranscript_VeryLongLine_Ugly(t *testing.T) {
|
||||
func TestParser_ParseTranscriptVeryLongLine_Ugly(t *testing.T) {
|
||||
// A single line that exceeds the default bufio.Scanner buffer.
|
||||
// The parser should handle this without error thanks to the enlarged buffer.
|
||||
dir := t.TempDir()
|
||||
huge := strings.Repeat("a", 5*1024*1024) // 5MB text
|
||||
huge := repeatString("a", 5*1024*1024) // 5MB text
|
||||
path := writeJSONL(t, dir, "huge_line.jsonl",
|
||||
userTextEntry(ts(0), huge),
|
||||
)
|
||||
|
|
@ -1228,7 +1237,7 @@ func TestParseTranscript_VeryLongLine_Ugly(t *testing.T) {
|
|||
require.Len(t, sess.Events, 1)
|
||||
}
|
||||
|
||||
func TestParseTranscript_MalformedMessageJSON_Bad(t *testing.T) {
|
||||
func TestParser_ParseTranscriptMalformedMessageJSON_Bad(t *testing.T) {
|
||||
// Valid outer JSON but the message field is not valid message structure.
|
||||
dir := t.TempDir()
|
||||
lines := []string{
|
||||
|
|
@ -1244,7 +1253,7 @@ func TestParseTranscript_MalformedMessageJSON_Bad(t *testing.T) {
|
|||
assert.Equal(t, "ok", sess.Events[0].Input)
|
||||
}
|
||||
|
||||
func TestParseTranscript_MalformedContentBlock_Bad(t *testing.T) {
|
||||
func TestParser_ParseTranscriptMalformedContentBlock_Bad(t *testing.T) {
|
||||
// Valid message structure but content blocks are malformed.
|
||||
dir := t.TempDir()
|
||||
lines := []string{
|
||||
|
|
@ -1259,7 +1268,7 @@ func TestParseTranscript_MalformedContentBlock_Bad(t *testing.T) {
|
|||
assert.Equal(t, "still ok", sess.Events[0].Input)
|
||||
}
|
||||
|
||||
func TestParseTranscript_TruncatedMissingBrace_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptTruncatedMissingBrace_Good(t *testing.T) {
|
||||
// Final line is missing its closing brace — should be skipped gracefully.
|
||||
dir := t.TempDir()
|
||||
lines := []string{
|
||||
|
|
@ -1277,7 +1286,7 @@ func TestParseTranscript_TruncatedMissingBrace_Good(t *testing.T) {
|
|||
assert.Equal(t, "also valid", sess.Events[1].Input)
|
||||
}
|
||||
|
||||
func TestParseTranscript_TruncatedMidKey_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptTruncatedMidKey_Good(t *testing.T) {
|
||||
// Line truncated in the middle of a JSON key.
|
||||
dir := t.TempDir()
|
||||
lines := []string{
|
||||
|
|
@ -1292,7 +1301,7 @@ func TestParseTranscript_TruncatedMidKey_Good(t *testing.T) {
|
|||
assert.Equal(t, "first", sess.Events[0].Input)
|
||||
}
|
||||
|
||||
func TestParseTranscript_AllBadLines_Good(t *testing.T) {
|
||||
func TestParser_ParseTranscriptAllBadLines_Good(t *testing.T) {
|
||||
// Every line is truncated/malformed — result should be empty, no error.
|
||||
dir := t.TempDir()
|
||||
lines := []string{
|
||||
|
|
@ -1313,7 +1322,7 @@ func TestParseTranscript_AllBadLines_Good(t *testing.T) {
|
|||
|
||||
// --- PruneSessions tests ---
|
||||
|
||||
func TestPruneSessions_DeletesOldFiles_Good(t *testing.T) {
|
||||
func TestParser_PruneSessionsDeletesOldFiles_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create a session file with an old modification time.
|
||||
|
|
@ -1322,7 +1331,7 @@ func TestPruneSessions_DeletesOldFiles_Good(t *testing.T) {
|
|||
)
|
||||
// Backdate the file's mtime by 2 hours.
|
||||
oldTime := time.Now().Add(-2 * time.Hour)
|
||||
require.NoError(t, os.Chtimes(path, oldTime, oldTime))
|
||||
require.NoError(t, setFileTimes(path, oldTime, oldTime))
|
||||
|
||||
// Create a recent session file.
|
||||
writeJSONL(t, dir, "new-session.jsonl",
|
||||
|
|
@ -1341,7 +1350,7 @@ func TestPruneSessions_DeletesOldFiles_Good(t *testing.T) {
|
|||
assert.Equal(t, "new-session", sessions[0].ID)
|
||||
}
|
||||
|
||||
func TestPruneSessions_NothingToDelete_Good(t *testing.T) {
|
||||
func TestParser_PruneSessionsNothingToDelete_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
writeJSONL(t, dir, "recent.jsonl",
|
||||
|
|
@ -1353,7 +1362,7 @@ func TestPruneSessions_NothingToDelete_Good(t *testing.T) {
|
|||
assert.Equal(t, 0, deleted)
|
||||
}
|
||||
|
||||
func TestPruneSessions_EmptyDir_Good(t *testing.T) {
|
||||
func TestParser_PruneSessionsEmptyDir_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
deleted, err := PruneSessions(dir, 1*time.Hour)
|
||||
|
|
@ -1363,28 +1372,28 @@ func TestPruneSessions_EmptyDir_Good(t *testing.T) {
|
|||
|
||||
// --- IsExpired tests ---
|
||||
|
||||
func TestIsExpired_RecentSession_Good(t *testing.T) {
|
||||
func TestParser_IsExpiredRecentSession_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
EndTime: time.Now().Add(-5 * time.Minute),
|
||||
}
|
||||
assert.False(t, sess.IsExpired(1*time.Hour))
|
||||
}
|
||||
|
||||
func TestIsExpired_OldSession_Good(t *testing.T) {
|
||||
func TestParser_IsExpiredOldSession_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
EndTime: time.Now().Add(-2 * time.Hour),
|
||||
}
|
||||
assert.True(t, sess.IsExpired(1*time.Hour))
|
||||
}
|
||||
|
||||
func TestIsExpired_ZeroEndTime_Bad(t *testing.T) {
|
||||
func TestParser_IsExpiredZeroEndTime_Bad(t *testing.T) {
|
||||
sess := &Session{}
|
||||
assert.False(t, sess.IsExpired(1*time.Hour))
|
||||
}
|
||||
|
||||
// --- FetchSession tests ---
|
||||
|
||||
func TestFetchSession_ValidID_Good(t *testing.T) {
|
||||
func TestParser_FetchSessionValidID_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeJSONL(t, dir, "abc123.jsonl",
|
||||
userTextEntry(ts(0), "hello"),
|
||||
|
|
@ -1398,7 +1407,7 @@ func TestFetchSession_ValidID_Good(t *testing.T) {
|
|||
assert.Len(t, sess.Events, 1)
|
||||
}
|
||||
|
||||
func TestFetchSession_PathTraversal_Ugly(t *testing.T) {
|
||||
func TestParser_FetchSessionPathTraversal_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
_, _, err := FetchSession(dir, "../etc/passwd")
|
||||
|
|
@ -1406,7 +1415,7 @@ func TestFetchSession_PathTraversal_Ugly(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "invalid session id")
|
||||
}
|
||||
|
||||
func TestFetchSession_BackslashTraversal_Ugly(t *testing.T) {
|
||||
func TestParser_FetchSessionBackslashTraversal_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
_, _, err := FetchSession(dir, `foo\bar`)
|
||||
|
|
@ -1414,7 +1423,7 @@ func TestFetchSession_BackslashTraversal_Ugly(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "invalid session id")
|
||||
}
|
||||
|
||||
func TestFetchSession_ForwardSlash_Ugly(t *testing.T) {
|
||||
func TestParser_FetchSessionForwardSlash_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
_, _, err := FetchSession(dir, "foo/bar")
|
||||
|
|
@ -1422,7 +1431,7 @@ func TestFetchSession_ForwardSlash_Ugly(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "invalid session id")
|
||||
}
|
||||
|
||||
func TestFetchSession_NotFound_Bad(t *testing.T) {
|
||||
func TestParser_FetchSessionNotFound_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
_, _, err := FetchSession(dir, "nonexistent")
|
||||
|
|
@ -1432,7 +1441,7 @@ func TestFetchSession_NotFound_Bad(t *testing.T) {
|
|||
|
||||
// --- ListSessions with truncated files ---
|
||||
|
||||
func TestListSessions_TruncatedFile_Good(t *testing.T) {
|
||||
func TestParser_ListSessionsTruncatedFile_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// A .jsonl file where some lines are truncated — ListSessions should
|
||||
// still extract timestamps from valid lines.
|
||||
|
|
@ -1453,5 +1462,3 @@ func TestListSessions_TruncatedFile_Good(t *testing.T) {
|
|||
}
|
||||
|
||||
// --- PruneSessions tests ---
|
||||
|
||||
|
||||
|
|
|
|||
32
search.go
32
search.go
|
|
@ -3,13 +3,17 @@ package session
|
|||
|
||||
import (
|
||||
"iter"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// SearchResult represents a match found in a session transcript.
|
||||
//
|
||||
// Example:
|
||||
// result := session.SearchResult{SessionID: "abc123", Tool: "Bash"}
|
||||
type SearchResult struct {
|
||||
SessionID string
|
||||
Timestamp time.Time
|
||||
|
|
@ -18,22 +22,28 @@ type SearchResult struct {
|
|||
}
|
||||
|
||||
// Search finds events matching the query across all sessions in the directory.
|
||||
//
|
||||
// Example:
|
||||
// results, err := session.Search("/tmp/projects", "go test")
|
||||
func Search(projectsDir, query string) ([]SearchResult, error) {
|
||||
return slices.Collect(SearchSeq(projectsDir, query)), nil
|
||||
}
|
||||
|
||||
// SearchSeq returns an iterator over search results matching the query across all sessions.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// for result := range session.SearchSeq("/tmp/projects", "go test") {
|
||||
// _ = result
|
||||
// }
|
||||
func SearchSeq(projectsDir, query string) iter.Seq[SearchResult] {
|
||||
return func(yield func(SearchResult) bool) {
|
||||
matches, err := filepath.Glob(filepath.Join(projectsDir, "*.jsonl"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
matches := core.PathGlob(path.Join(projectsDir, "*.jsonl"))
|
||||
|
||||
query = strings.ToLower(query)
|
||||
query = core.Lower(query)
|
||||
|
||||
for _, path := range matches {
|
||||
sess, _, err := ParseTranscript(path)
|
||||
for _, filePath := range matches {
|
||||
sess, _, err := ParseTranscript(filePath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
|
@ -42,8 +52,8 @@ func SearchSeq(projectsDir, query string) iter.Seq[SearchResult] {
|
|||
if evt.Type != "tool_use" {
|
||||
continue
|
||||
}
|
||||
text := strings.ToLower(evt.Input + " " + evt.Output)
|
||||
if strings.Contains(text, query) {
|
||||
text := core.Lower(core.Concat(evt.Input, " ", evt.Output))
|
||||
if core.Contains(text, query) {
|
||||
matchCtx := evt.Input
|
||||
if matchCtx == "" {
|
||||
matchCtx = truncate(evt.Output, 120)
|
||||
|
|
|
|||
|
|
@ -2,15 +2,14 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSearch_EmptyDir_Good(t *testing.T) {
|
||||
func TestSearch_SearchEmptyDir_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
results, err := Search(dir, "anything")
|
||||
|
|
@ -18,7 +17,7 @@ func TestSearch_EmptyDir_Good(t *testing.T) {
|
|||
assert.Empty(t, results)
|
||||
}
|
||||
|
||||
func TestSearch_NoMatches_Good(t *testing.T) {
|
||||
func TestSearch_SearchNoMatches_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeJSONL(t, dir, "session.jsonl",
|
||||
toolUseEntry(ts(0), "Bash", "tool-1", map[string]any{
|
||||
|
|
@ -32,7 +31,7 @@ func TestSearch_NoMatches_Good(t *testing.T) {
|
|||
assert.Empty(t, results)
|
||||
}
|
||||
|
||||
func TestSearch_SingleMatch_Good(t *testing.T) {
|
||||
func TestSearch_SearchSingleMatch_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeJSONL(t, dir, "session.jsonl",
|
||||
toolUseEntry(ts(0), "Bash", "tool-1", map[string]any{
|
||||
|
|
@ -50,7 +49,7 @@ func TestSearch_SingleMatch_Good(t *testing.T) {
|
|||
assert.Contains(t, results[0].Match, "go test")
|
||||
}
|
||||
|
||||
func TestSearchSeq_SingleMatch_Good(t *testing.T) {
|
||||
func TestSearch_SearchSeqSingleMatch_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeJSONL(t, dir, "session.jsonl",
|
||||
toolUseEntry(ts(0), "Bash", "tool-1", map[string]any{
|
||||
|
|
@ -69,7 +68,7 @@ func TestSearchSeq_SingleMatch_Good(t *testing.T) {
|
|||
assert.Equal(t, "Bash", results[0].Tool)
|
||||
}
|
||||
|
||||
func TestSearch_MultipleMatches_Good(t *testing.T) {
|
||||
func TestSearch_SearchMultipleMatches_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeJSONL(t, dir, "session1.jsonl",
|
||||
toolUseEntry(ts(0), "Bash", "t1", map[string]any{
|
||||
|
|
@ -93,7 +92,7 @@ func TestSearch_MultipleMatches_Good(t *testing.T) {
|
|||
assert.Len(t, results, 3, "should find matches across both sessions")
|
||||
}
|
||||
|
||||
func TestSearch_CaseInsensitive_Good(t *testing.T) {
|
||||
func TestSearch_SearchCaseInsensitive_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeJSONL(t, dir, "session.jsonl",
|
||||
toolUseEntry(ts(0), "Bash", "t1", map[string]any{
|
||||
|
|
@ -107,7 +106,7 @@ func TestSearch_CaseInsensitive_Good(t *testing.T) {
|
|||
assert.Len(t, results, 1, "search should be case-insensitive")
|
||||
}
|
||||
|
||||
func TestSearch_MatchesInOutput_Good(t *testing.T) {
|
||||
func TestSearch_SearchMatchesInOutput_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeJSONL(t, dir, "session.jsonl",
|
||||
toolUseEntry(ts(0), "Bash", "t1", map[string]any{
|
||||
|
|
@ -123,7 +122,7 @@ func TestSearch_MatchesInOutput_Good(t *testing.T) {
|
|||
assert.Contains(t, results[0].Match, "cat log.txt")
|
||||
}
|
||||
|
||||
func TestSearch_SkipsNonToolEvents_Good(t *testing.T) {
|
||||
func TestSearch_SearchSkipsNonToolEvents_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeJSONL(t, dir, "session.jsonl",
|
||||
userTextEntry(ts(0), "Please search for something"),
|
||||
|
|
@ -136,16 +135,17 @@ func TestSearch_SkipsNonToolEvents_Good(t *testing.T) {
|
|||
assert.Empty(t, results, "should only match tool_use events, not user/assistant text")
|
||||
}
|
||||
|
||||
func TestSearch_NonJSONLIgnored_Good(t *testing.T) {
|
||||
func TestSearch_SearchNonJSONLIgnored_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "readme.md"), []byte("go test"), 0644))
|
||||
writeResult := hostFS.Write(path.Join(dir, "readme.md"), "go test")
|
||||
require.True(t, writeResult.OK)
|
||||
|
||||
results, err := Search(dir, "go test")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, results, "non-JSONL files should be ignored")
|
||||
}
|
||||
|
||||
func TestSearch_MalformedSessionSkipped_Bad(t *testing.T) {
|
||||
func TestSearch_SearchMalformedSessionSkipped_Bad(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// One broken session and one valid session
|
||||
|
|
|
|||
127
video.go
127
video.go
|
|
@ -2,48 +2,48 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"io/fs"
|
||||
"path"
|
||||
"syscall"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// RenderMP4 generates an MP4 video from session events using VHS (charmbracelet).
|
||||
//
|
||||
// Example:
|
||||
// err := session.RenderMP4(sess, "/tmp/session.mp4")
|
||||
func RenderMP4(sess *Session, outputPath string) error {
|
||||
if _, err := exec.LookPath("vhs"); err != nil {
|
||||
return coreerr.E("RenderMP4", "vhs not installed (go install github.com/charmbracelet/vhs@latest)", nil)
|
||||
vhsPath := lookupExecutable("vhs")
|
||||
if vhsPath == "" {
|
||||
return core.E("RenderMP4", "vhs not installed (go install github.com/charmbracelet/vhs@latest)", nil)
|
||||
}
|
||||
|
||||
tape := generateTape(sess, outputPath)
|
||||
|
||||
tmpFile, err := os.CreateTemp("", "session-*.tape")
|
||||
if err != nil {
|
||||
return coreerr.E("RenderMP4", "create tape", err)
|
||||
tmpDir := hostFS.TempDir("session-")
|
||||
if tmpDir == "" {
|
||||
return core.E("RenderMP4", "failed to create temp dir", nil)
|
||||
}
|
||||
defer os.Remove(tmpFile.Name())
|
||||
defer hostFS.DeleteAll(tmpDir)
|
||||
|
||||
if _, err := tmpFile.WriteString(tape); err != nil {
|
||||
tmpFile.Close()
|
||||
return coreerr.E("RenderMP4", "write tape", err)
|
||||
tapePath := path.Join(tmpDir, core.Concat(core.ID(), ".tape"))
|
||||
writeResult := hostFS.Write(tapePath, tape)
|
||||
if !writeResult.OK {
|
||||
return core.E("RenderMP4", "write tape", resultError(writeResult))
|
||||
}
|
||||
tmpFile.Close()
|
||||
|
||||
cmd := exec.Command("vhs", tmpFile.Name())
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return coreerr.E("RenderMP4", "vhs render", err)
|
||||
if err := runCommand(vhsPath, tapePath); err != nil {
|
||||
return core.E("RenderMP4", "vhs render", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateTape(sess *Session, outputPath string) string {
|
||||
var b strings.Builder
|
||||
b := core.NewBuilder()
|
||||
|
||||
b.WriteString(fmt.Sprintf("Output %s\n", outputPath))
|
||||
b.WriteString(core.Sprintf("Output %s\n", outputPath))
|
||||
b.WriteString("Set FontSize 16\n")
|
||||
b.WriteString("Set Width 1400\n")
|
||||
b.WriteString("Set Height 800\n")
|
||||
|
|
@ -57,7 +57,7 @@ func generateTape(sess *Session, outputPath string) string {
|
|||
if len(id) > 8 {
|
||||
id = id[:8]
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("Type \"# Session %s | %s\"\n",
|
||||
b.WriteString(core.Sprintf("Type \"# Session %s | %s\"\n",
|
||||
id, sess.StartTime.Format("2006-01-02 15:04")))
|
||||
b.WriteString("Enter\n")
|
||||
b.WriteString("Sleep 2s\n")
|
||||
|
|
@ -75,7 +75,7 @@ func generateTape(sess *Session, outputPath string) string {
|
|||
continue
|
||||
}
|
||||
// Show the command
|
||||
b.WriteString(fmt.Sprintf("Type %q\n", "$ "+cmd))
|
||||
b.WriteString(core.Sprintf("Type %q\n", "$ "+cmd))
|
||||
b.WriteString("Enter\n")
|
||||
|
||||
// Show abbreviated output
|
||||
|
|
@ -84,11 +84,11 @@ func generateTape(sess *Session, outputPath string) string {
|
|||
output = output[:200] + "..."
|
||||
}
|
||||
if output != "" {
|
||||
for line := range strings.SplitSeq(output, "\n") {
|
||||
for _, line := range core.Split(output, "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("Type %q\n", line))
|
||||
b.WriteString(core.Sprintf("Type %q\n", line))
|
||||
b.WriteString("Enter\n")
|
||||
}
|
||||
}
|
||||
|
|
@ -104,14 +104,14 @@ func generateTape(sess *Session, outputPath string) string {
|
|||
b.WriteString("\n")
|
||||
|
||||
case "Read", "Edit", "Write":
|
||||
b.WriteString(fmt.Sprintf("Type %q\n",
|
||||
fmt.Sprintf("# %s: %s", evt.Tool, truncate(evt.Input, 80))))
|
||||
b.WriteString(core.Sprintf("Type %q\n",
|
||||
core.Sprintf("# %s: %s", evt.Tool, truncate(evt.Input, 80))))
|
||||
b.WriteString("Enter\n")
|
||||
b.WriteString("Sleep 500ms\n")
|
||||
|
||||
case "Task":
|
||||
b.WriteString(fmt.Sprintf("Type %q\n",
|
||||
fmt.Sprintf("# Agent: %s", truncate(evt.Input, 80))))
|
||||
b.WriteString(core.Sprintf("Type %q\n",
|
||||
core.Sprintf("# Agent: %s", truncate(evt.Input, 80))))
|
||||
b.WriteString("Enter\n")
|
||||
b.WriteString("Sleep 1s\n")
|
||||
}
|
||||
|
|
@ -123,8 +123,73 @@ func generateTape(sess *Session, outputPath string) string {
|
|||
|
||||
func extractCommand(input string) string {
|
||||
// Remove description suffix (after " # ")
|
||||
if idx := strings.Index(input, " # "); idx > 0 {
|
||||
if idx := indexOf(input, " # "); idx > 0 {
|
||||
return input[:idx]
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
func lookupExecutable(name string) string {
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
if containsAny(name, `/\`) {
|
||||
if isExecutablePath(name) {
|
||||
return name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, dir := range core.Split(core.Env("PATH"), ":") {
|
||||
if dir == "" {
|
||||
dir = "."
|
||||
}
|
||||
candidate := path.Join(dir, name)
|
||||
if isExecutablePath(candidate) {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isExecutablePath(filePath string) bool {
|
||||
statResult := hostFS.Stat(filePath)
|
||||
if !statResult.OK {
|
||||
return false
|
||||
}
|
||||
info, ok := statResult.Value.(fs.FileInfo)
|
||||
if !ok || info.IsDir() {
|
||||
return false
|
||||
}
|
||||
return info.Mode()&0111 != 0
|
||||
}
|
||||
|
||||
func runCommand(command string, args ...string) error {
|
||||
argv := append([]string{command}, args...)
|
||||
procAttr := &syscall.ProcAttr{
|
||||
Env: syscall.Environ(),
|
||||
Files: []uintptr{0, 1, 2},
|
||||
}
|
||||
|
||||
pid, err := syscall.ForkExec(command, argv, procAttr)
|
||||
if err != nil {
|
||||
return core.E("runCommand", "fork exec command", err)
|
||||
}
|
||||
|
||||
var status syscall.WaitStatus
|
||||
if _, err := syscall.Wait4(pid, &status, 0, nil); err != nil {
|
||||
return core.E("runCommand", "wait for command", err)
|
||||
}
|
||||
|
||||
if status.Exited() && status.ExitStatus() == 0 {
|
||||
return nil
|
||||
}
|
||||
if status.Signaled() {
|
||||
return core.E("runCommand", core.Sprintf("command terminated by signal %d", status.Signal()), nil)
|
||||
}
|
||||
if status.Exited() {
|
||||
return core.E("runCommand", core.Sprintf("command exited with status %d", status.ExitStatus()), nil)
|
||||
}
|
||||
|
||||
return core.E("runCommand", "command failed", nil)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,16 +2,15 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGenerateTape_BasicSession_Good(t *testing.T) {
|
||||
func TestVideo_GenerateTapeBasicSession_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "tape-test-12345678",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -45,7 +44,7 @@ func TestGenerateTape_BasicSession_Good(t *testing.T) {
|
|||
assert.Contains(t, tape, "# Read: /tmp/file.go")
|
||||
}
|
||||
|
||||
func TestGenerateTape_SkipsNonToolEvents_Good(t *testing.T) {
|
||||
func TestVideo_GenerateTapeSkipsNonToolEvents_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "skip-test",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -65,7 +64,7 @@ func TestGenerateTape_SkipsNonToolEvents_Good(t *testing.T) {
|
|||
assert.Contains(t, tape, "echo hi")
|
||||
}
|
||||
|
||||
func TestGenerateTape_FailedCommand_Good(t *testing.T) {
|
||||
func TestVideo_GenerateTapeFailedCommand_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "fail-test",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -84,7 +83,7 @@ func TestGenerateTape_FailedCommand_Good(t *testing.T) {
|
|||
assert.Contains(t, tape, `"# ✗ FAILED"`)
|
||||
}
|
||||
|
||||
func TestGenerateTape_LongOutput_Good(t *testing.T) {
|
||||
func TestVideo_GenerateTapeLongOutput_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "long-test",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -93,7 +92,7 @@ func TestGenerateTape_LongOutput_Good(t *testing.T) {
|
|||
Type: "tool_use",
|
||||
Tool: "Bash",
|
||||
Input: "cat huge.log",
|
||||
Output: strings.Repeat("x", 300),
|
||||
Output: repeatString("x", 300),
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
|
|
@ -104,7 +103,7 @@ func TestGenerateTape_LongOutput_Good(t *testing.T) {
|
|||
assert.Contains(t, tape, "...")
|
||||
}
|
||||
|
||||
func TestGenerateTape_TaskEvent_Good(t *testing.T) {
|
||||
func TestVideo_GenerateTapeTaskEvent_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "task-test",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -121,7 +120,7 @@ func TestGenerateTape_TaskEvent_Good(t *testing.T) {
|
|||
assert.Contains(t, tape, "# Agent: [research] Analyse code structure")
|
||||
}
|
||||
|
||||
func TestGenerateTape_EditWriteEvents_Good(t *testing.T) {
|
||||
func TestVideo_GenerateTapeEditWriteEvents_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "edit-test",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -136,7 +135,7 @@ func TestGenerateTape_EditWriteEvents_Good(t *testing.T) {
|
|||
assert.Contains(t, tape, "# Write: /tmp/new.go (50 bytes)")
|
||||
}
|
||||
|
||||
func TestGenerateTape_EmptySession_Good(t *testing.T) {
|
||||
func TestVideo_GenerateTapeEmptySession_Good(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "empty-test",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -149,18 +148,18 @@ func TestGenerateTape_EmptySession_Good(t *testing.T) {
|
|||
assert.Contains(t, tape, "Output /tmp/out.mp4")
|
||||
assert.Contains(t, tape, "Sleep 3s")
|
||||
// No tool events
|
||||
lines := strings.Split(tape, "\n")
|
||||
lines := core.Split(tape, "\n")
|
||||
var toolLines int
|
||||
for _, line := range lines {
|
||||
if strings.Contains(line, "$ ") || strings.Contains(line, "# Read:") ||
|
||||
strings.Contains(line, "# Edit:") || strings.Contains(line, "# Write:") {
|
||||
if core.Contains(line, "$ ") || core.Contains(line, "# Read:") ||
|
||||
core.Contains(line, "# Edit:") || core.Contains(line, "# Write:") {
|
||||
toolLines++
|
||||
}
|
||||
}
|
||||
assert.Equal(t, 0, toolLines)
|
||||
}
|
||||
|
||||
func TestGenerateTape_BashEmptyCommand_Bad(t *testing.T) {
|
||||
func TestVideo_GenerateTapeBashEmptyCommand_Bad(t *testing.T) {
|
||||
sess := &Session{
|
||||
ID: "empty-cmd",
|
||||
StartTime: time.Date(2026, 2, 20, 10, 0, 0, 0, time.UTC),
|
||||
|
|
@ -174,25 +173,25 @@ func TestGenerateTape_BashEmptyCommand_Bad(t *testing.T) {
|
|||
assert.NotContains(t, tape, `"$ "`)
|
||||
}
|
||||
|
||||
func TestExtractCommand_Good(t *testing.T) {
|
||||
func TestVideo_ExtractCommandStripsDescriptionSuffix_Good(t *testing.T) {
|
||||
assert.Equal(t, "ls -la", extractCommand("ls -la # list files"))
|
||||
assert.Equal(t, "go test ./...", extractCommand("go test ./..."))
|
||||
assert.Equal(t, "echo hello", extractCommand("echo hello"))
|
||||
}
|
||||
|
||||
func TestExtractCommand_NoDescription_Good(t *testing.T) {
|
||||
func TestVideo_ExtractCommandNoDescription_Good(t *testing.T) {
|
||||
assert.Equal(t, "plain command", extractCommand("plain command"))
|
||||
}
|
||||
|
||||
func TestExtractCommand_DescriptionAtStart_Good(t *testing.T) {
|
||||
func TestVideo_ExtractCommandDescriptionAtStart_Good(t *testing.T) {
|
||||
// " # " at position 0 means idx <= 0, so it returns the whole input
|
||||
result := extractCommand(" # description only")
|
||||
assert.Equal(t, " # description only", result)
|
||||
}
|
||||
|
||||
func TestRenderMP4_NoVHS_Ugly(t *testing.T) {
|
||||
func TestVideo_RenderMP4NoVHS_Ugly(t *testing.T) {
|
||||
// Skip if vhs is actually installed (this tests the error path)
|
||||
if _, err := exec.LookPath("vhs"); err == nil {
|
||||
if lookupExecutable("vhs") != "" {
|
||||
t.Skip("vhs is installed; skipping missing-vhs test")
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue