fix(ax): remove legacy status readers

Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
Virgil 2026-03-30 20:20:50 +00:00
parent dc6f2e2073
commit 6bc0eb7e46
12 changed files with 305 additions and 334 deletions

View file

@ -288,8 +288,7 @@ func TestDispatch_AgentCompletionMonitor_Good(t *testing.T) {
r := monitor.run(context.Background(), core.NewOptions())
assert.True(t, r.OK)
updated, err := ReadStatus(wsDir)
require.NoError(t, err)
updated := mustReadStatus(t, wsDir)
assert.Equal(t, "completed", updated.Status)
assert.Equal(t, 0, updated.PID)
@ -345,8 +344,7 @@ func TestDispatch_AgentCompletionMonitor_Ugly(t *testing.T) {
r := monitor.run(context.Background(), core.NewOptions())
assert.True(t, r.OK)
updated, err := ReadStatus(wsDir)
require.NoError(t, err)
updated := mustReadStatus(t, wsDir)
assert.Equal(t, "blocked", updated.Status)
assert.Equal(t, "Need credentials", updated.Question)
assert.False(t, fs.Exists(core.JoinPath(metaDir, "agent-codex.log")))
@ -371,8 +369,7 @@ func TestDispatch_OnAgentComplete_Good(t *testing.T) {
outputFile := core.JoinPath(metaDir, "agent-codex.log")
s.onAgentComplete("codex", wsDir, outputFile, 0, "completed", "test output")
updated, err := ReadStatus(wsDir)
require.NoError(t, err)
updated := mustReadStatus(t, wsDir)
assert.Equal(t, "completed", updated.Status)
assert.Equal(t, 0, updated.PID)
@ -397,7 +394,7 @@ func TestDispatch_OnAgentComplete_Bad(t *testing.T) {
s := newPrepWithProcess()
s.onAgentComplete("codex", wsDir, core.JoinPath(metaDir, "agent-codex.log"), 1, "failed", "error")
updated, _ := ReadStatus(wsDir)
updated := mustReadStatus(t, wsDir)
assert.Equal(t, "failed", updated.Status)
assert.Contains(t, updated.Question, "code 1")
}
@ -419,7 +416,7 @@ func TestDispatch_OnAgentComplete_Ugly(t *testing.T) {
s := newPrepWithProcess()
s.onAgentComplete("codex", wsDir, core.JoinPath(metaDir, "agent-codex.log"), 0, "completed", "")
updated, _ := ReadStatus(wsDir)
updated := mustReadStatus(t, wsDir)
assert.Equal(t, "blocked", updated.Status)
assert.Equal(t, "Need credentials", updated.Question)

View file

@ -336,7 +336,7 @@ func TestQueue_CountRunningByAgent_Ugly_CorruptStatusJSON(t *testing.T) {
failCount: make(map[string]int),
}
// Corrupt status.json → ReadStatus fails → skipped → count is 0
// Corrupt status.json → ReadStatusResult fails → skipped → count is 0
assert.Equal(t, 0, s.countRunningByAgent("codex"))
}
@ -648,7 +648,6 @@ func TestQueue_DrainQueue_Bad_FrozenQueueDoesNothing(t *testing.T) {
assert.NotPanics(t, func() { s.drainQueue() })
// Workspace should still be queued
updated, err := ReadStatus(ws)
require.NoError(t, err)
updated := mustReadStatus(t, ws)
assert.Equal(t, "queued", updated.Status)
}

View file

@ -90,28 +90,6 @@ func writeStatusResult(wsDir string, status *WorkspaceStatus) core.Result {
return core.Result{OK: true}
}
// ReadStatus parses the status.json in a workspace directory.
//
// Deprecated: use ReadStatusResult.
//
// st, err := agentic.ReadStatus("/path/to/workspace")
func ReadStatus(wsDir string) (*WorkspaceStatus, error) {
r := ReadStatusResult(wsDir)
if !r.OK {
err, _ := r.Value.(error)
if err == nil {
return nil, core.E("ReadStatus", "failed to read status", nil)
}
return nil, err
}
st, ok := r.Value.(*WorkspaceStatus)
if !ok || st == nil {
return nil, core.E("ReadStatus", "invalid status payload", nil)
}
return st, nil
}
// ReadStatusResult parses status.json and returns a WorkspaceStatus pointer.
//
// r := ReadStatusResult("/path/to/workspace")

View file

@ -34,8 +34,8 @@ func TestStatus_EmptyWorkspace_Good(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.status(context.Background(), nil, StatusInput{})
@ -89,8 +89,8 @@ func TestStatus_MixedWorkspaces_Good(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.status(context.Background(), nil, StatusInput{})
@ -120,8 +120,8 @@ func TestStatus_DeepLayout_Good(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.status(context.Background(), nil, StatusInput{})
@ -141,8 +141,8 @@ func TestStatus_CorruptStatus_Good(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.status(context.Background(), nil, StatusInput{})
@ -158,10 +158,10 @@ func TestShutdown_DispatchStart_Good(t *testing.T) {
c := coreWithRunnerActions()
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
frozen: true,
pokeCh: make(chan struct{}, 1),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: true,
pokeCh: make(chan struct{}, 1),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.dispatchStart(context.Background(), nil, ShutdownInput{})
@ -178,9 +178,9 @@ func TestShutdown_ShutdownGraceful_Good(t *testing.T) {
c := coreWithRunnerActions()
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.shutdownGraceful(context.Background(), nil, ShutdownInput{})
@ -198,9 +198,9 @@ func TestShutdown_ShutdownNow_Good_EmptyWorkspace(t *testing.T) {
c := coreWithRunnerActions()
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.shutdownNow(context.Background(), nil, ShutdownInput{})
@ -230,8 +230,8 @@ func TestShutdown_ShutdownNow_Good_ClearsQueued(t *testing.T) {
c := coreWithRunnerActions()
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.shutdownNow(context.Background(), nil, ShutdownInput{})
@ -258,10 +258,10 @@ func TestPrep_BrainRecall_Good_Success(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
brainURL: srv.URL,
brainKey: "test-brain-key",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
brainURL: srv.URL,
brainKey: "test-brain-key",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result, count := s.brainRecall(context.Background(), "go-core")
@ -280,10 +280,10 @@ func TestPrep_BrainRecall_Good_NoMemories(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
brainURL: srv.URL,
brainKey: "test-brain-key",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
brainURL: srv.URL,
brainKey: "test-brain-key",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result, count := s.brainRecall(context.Background(), "go-core")
@ -294,9 +294,9 @@ func TestPrep_BrainRecall_Good_NoMemories(t *testing.T) {
func TestPrep_BrainRecall_Bad_NoBrainKey(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
brainKey: "",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
brainKey: "",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result, count := s.brainRecall(context.Background(), "go-core")
@ -312,10 +312,10 @@ func TestPrep_BrainRecall_Bad_ServerError(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
brainURL: srv.URL,
brainKey: "test-brain-key",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
brainURL: srv.URL,
brainKey: "test-brain-key",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result, count := s.brainRecall(context.Background(), "go-core")
@ -328,8 +328,8 @@ func TestPrep_BrainRecall_Bad_ServerError(t *testing.T) {
func TestPrep_PrepWorkspace_Bad_NoRepo(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, _, err := s.prepWorkspace(context.Background(), nil, PrepInput{})
@ -343,9 +343,9 @@ func TestPrep_PrepWorkspace_Bad_NoIdentifier(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
codePath: t.TempDir(),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
codePath: t.TempDir(),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, _, err := s.prepWorkspace(context.Background(), nil, PrepInput{
@ -361,9 +361,9 @@ func TestPrep_PrepWorkspace_Bad_InvalidRepoName(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
codePath: t.TempDir(),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
codePath: t.TempDir(),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, _, err := s.prepWorkspace(context.Background(), nil, PrepInput{
@ -397,11 +397,11 @@ func TestPr_ListPRs_Good_SpecificRepo(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.listPRs(context.Background(), nil, ListPRsInput{
@ -423,9 +423,9 @@ func TestRunner_Poke_Good_SendsSignal(t *testing.T) {
// Verify it does not panic and does not send to the channel.
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
pokeCh: make(chan struct{}, 1),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
pokeCh: make(chan struct{}, 1),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
assert.NotPanics(t, func() { s.Poke() })
@ -435,9 +435,9 @@ func TestRunner_Poke_Good_SendsSignal(t *testing.T) {
func TestRunner_Poke_Good_NonBlocking(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
pokeCh: make(chan struct{}, 1),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
pokeCh: make(chan struct{}, 1),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// Fill the channel
@ -452,9 +452,9 @@ func TestRunner_Poke_Good_NonBlocking(t *testing.T) {
func TestRunner_Poke_Bad_NilChannel(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
pokeCh: nil,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
pokeCh: nil,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// Should not panic with nil channel
@ -463,7 +463,7 @@ func TestRunner_Poke_Bad_NilChannel(t *testing.T) {
})
}
// --- ReadStatus / writeStatus (extended) ---
// --- ReadStatusResult / writeStatus (extended) ---
func TestStatus_WriteRead_Good_WithPID(t *testing.T) {
dir := t.TempDir()
@ -479,8 +479,7 @@ func TestStatus_WriteRead_Good_WithPID(t *testing.T) {
require.NoError(t, err)
// Read it back
got, err := ReadStatus(dir)
require.NoError(t, err)
got := mustReadStatus(t, dir)
assert.Equal(t, "running", got.Status)
assert.Equal(t, "codex", got.Agent)
assert.Equal(t, "go-io", got.Repo)
@ -509,8 +508,7 @@ func TestStatus_WriteRead_Good_AllFields(t *testing.T) {
err := writeStatus(dir, st)
require.NoError(t, err)
got, err := ReadStatus(dir)
require.NoError(t, err)
got := mustReadStatus(t, dir)
assert.Equal(t, "blocked", got.Status)
assert.Equal(t, "claude", got.Agent)
assert.Equal(t, "core", got.Org)
@ -525,9 +523,9 @@ func TestStatus_WriteRead_Good_AllFields(t *testing.T) {
func TestPrep_OnShutdown_Good(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
r := s.OnShutdown(context.Background())
@ -543,9 +541,9 @@ func TestQueue_DrainQueue_Good_FrozenDoesNothing(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
frozen: true,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: true,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// Should return immediately when frozen
@ -576,9 +574,9 @@ func TestShutdown_ShutdownNow_Ugly_DeepLayout(t *testing.T) {
c := coreWithRunnerActions()
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.shutdownNow(context.Background(), nil, ShutdownInput{})
@ -594,10 +592,10 @@ func TestShutdown_DispatchStart_Bad_NilPokeCh(t *testing.T) {
c := coreWithRunnerActions()
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
frozen: true,
pokeCh: nil,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: true,
pokeCh: nil,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.dispatchStart(context.Background(), nil, ShutdownInput{})
@ -609,10 +607,10 @@ func TestShutdown_DispatchStart_Bad_NilPokeCh(t *testing.T) {
func TestShutdown_DispatchStart_Ugly_AlreadyUnfrozen(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
frozen: false, // already unfrozen
pokeCh: make(chan struct{}, 1),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: false, // already unfrozen
pokeCh: make(chan struct{}, 1),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.dispatchStart(context.Background(), nil, ShutdownInput{})
@ -630,9 +628,9 @@ func TestShutdown_ShutdownGraceful_Bad_AlreadyFrozen(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
frozen: true, // already frozen
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: true, // already frozen
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.shutdownGraceful(context.Background(), nil, ShutdownInput{})
@ -663,9 +661,9 @@ func TestShutdown_ShutdownGraceful_Ugly_WithWorkspaces(t *testing.T) {
c := coreWithRunnerActions()
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.shutdownGraceful(context.Background(), nil, ShutdownInput{})
@ -697,9 +695,9 @@ func TestShutdown_ShutdownNow_Bad_NoRunningPIDs(t *testing.T) {
c := coreWithRunnerActions()
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(c, AgentOptions{}),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
frozen: false,
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
_, out, err := s.shutdownNow(context.Background(), nil, ShutdownInput{})

View file

@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/require"
)
// --- ReadStatus ---
// --- ReadStatusResult ---
func TestStatus_ReadStatus_Good_AllFields(t *testing.T) {
dir := t.TempDir()
@ -34,8 +34,7 @@ func TestStatus_ReadStatus_Good_AllFields(t *testing.T) {
}
require.True(t, fs.Write(core.JoinPath(dir, "status.json"), core.JSONMarshalString(original)).OK)
st, err := ReadStatus(dir)
require.NoError(t, err)
st := mustReadStatus(t, dir)
assert.Equal(t, original.Status, st.Status)
assert.Equal(t, original.Agent, st.Agent)
@ -50,25 +49,28 @@ func TestStatus_ReadStatus_Good_AllFields(t *testing.T) {
func TestStatus_ReadStatus_Bad_MissingFile(t *testing.T) {
dir := t.TempDir()
_, err := ReadStatus(dir)
assert.Error(t, err, "missing status.json must return an error")
result := ReadStatusResult(dir)
assert.False(t, result.OK)
_, ok := result.Value.(error)
require.True(t, ok)
}
func TestStatus_ReadStatus_Bad_CorruptJSON(t *testing.T) {
dir := t.TempDir()
require.True(t, fs.Write(core.JoinPath(dir, "status.json"), `{"status": "running", broken`).OK)
_, err := ReadStatus(dir)
assert.Error(t, err, "corrupt JSON must return an error")
result := ReadStatusResult(dir)
assert.False(t, result.OK)
_, ok := result.Value.(error)
require.True(t, ok)
}
func TestStatus_ReadStatus_Bad_NullJSON(t *testing.T) {
dir := t.TempDir()
require.True(t, fs.Write(core.JoinPath(dir, "status.json"), "null").OK)
// null is valid JSON — ReadStatus returns a zero-value struct, not an error
st, err := ReadStatus(dir)
require.NoError(t, err)
// null is valid JSON — ReadStatusResult returns a zero-value struct, not an error
st := mustReadStatus(t, dir)
assert.Equal(t, "", st.Status)
}
@ -87,8 +89,7 @@ func TestStatus_WriteStatus_Good_WritesAndReadsBack(t *testing.T) {
err := writeStatus(dir, st)
require.NoError(t, err)
read, err := ReadStatus(dir)
require.NoError(t, err)
read := mustReadStatus(t, dir)
assert.Equal(t, "queued", read.Status)
assert.Equal(t, "gemini:pro", read.Agent)
assert.Equal(t, "go-log", read.Repo)
@ -112,8 +113,7 @@ func TestStatus_WriteStatus_Good_Overwrites(t *testing.T) {
require.NoError(t, writeStatus(dir, &WorkspaceStatus{Status: "running", Agent: "gemini"}))
require.NoError(t, writeStatus(dir, &WorkspaceStatus{Status: "completed", Agent: "gemini"}))
st, err := ReadStatus(dir)
require.NoError(t, err)
st := mustReadStatus(t, dir)
assert.Equal(t, "completed", st.Status)
}

View file

@ -12,6 +12,17 @@ import (
"github.com/stretchr/testify/require"
)
func mustReadStatus(t *testing.T, dir string) *WorkspaceStatus {
t.Helper()
result := ReadStatusResult(dir)
require.True(t, result.OK)
status, ok := workspaceStatusValue(result)
require.True(t, ok)
return status
}
func TestStatus_WriteStatus_Good(t *testing.T) {
dir := t.TempDir()
status := &WorkspaceStatus{
@ -76,8 +87,7 @@ func TestStatus_ReadStatus_Good(t *testing.T) {
require.True(t, fs.Write(core.JoinPath(dir, "status.json"), core.JSONMarshalString(status)).OK)
read, err := ReadStatus(dir)
require.NoError(t, err)
read := mustReadStatus(t, dir)
assert.Equal(t, "completed", read.Status)
assert.Equal(t, "codex", read.Agent)
@ -140,16 +150,20 @@ func TestStatus_ReadStatusResult_Ugly_InvalidJSON(t *testing.T) {
func TestStatus_ReadStatus_Bad_NoFile(t *testing.T) {
dir := t.TempDir()
_, err := ReadStatus(dir)
assert.Error(t, err)
result := ReadStatusResult(dir)
assert.False(t, result.OK)
_, ok := result.Value.(error)
assert.True(t, ok)
}
func TestStatus_ReadStatus_Bad_InvalidJSON(t *testing.T) {
dir := t.TempDir()
require.True(t, fs.Write(core.JoinPath(dir, "status.json"), "not json{").OK)
_, err := ReadStatus(dir)
assert.Error(t, err)
result := ReadStatusResult(dir)
assert.False(t, result.OK)
_, ok := result.Value.(error)
assert.True(t, ok)
}
func TestStatus_ReadStatus_Good_BlockedWithQuestion(t *testing.T) {
@ -164,8 +178,7 @@ func TestStatus_ReadStatus_Good_BlockedWithQuestion(t *testing.T) {
require.True(t, fs.Write(core.JoinPath(dir, "status.json"), core.JSONMarshalString(status)).OK)
read, err := ReadStatus(dir)
require.NoError(t, err)
read := mustReadStatus(t, dir)
assert.Equal(t, "blocked", read.Status)
assert.Equal(t, "Which interface should I implement?", read.Question)
@ -190,8 +203,7 @@ func TestStatus_WriteRead_Good_Roundtrip(t *testing.T) {
err := writeStatus(dir, original)
require.NoError(t, err)
read, err := ReadStatus(dir)
require.NoError(t, err)
read := mustReadStatus(t, dir)
assert.Equal(t, original.Status, read.Status)
assert.Equal(t, original.Agent, read.Agent)
@ -215,8 +227,7 @@ func TestStatus_WriteStatus_Good_OverwriteExisting(t *testing.T) {
err = writeStatus(dir, second)
require.NoError(t, err)
read, err := ReadStatus(dir)
require.NoError(t, err)
read := mustReadStatus(t, dir)
assert.Equal(t, "completed", read.Status)
}
@ -224,8 +235,10 @@ func TestStatus_ReadStatus_Ugly_EmptyFile(t *testing.T) {
dir := t.TempDir()
require.True(t, fs.Write(core.JoinPath(dir, "status.json"), "").OK)
_, err := ReadStatus(dir)
assert.Error(t, err)
result := ReadStatusResult(dir)
assert.False(t, result.OK)
_, ok := result.Value.(error)
assert.True(t, ok)
}
// --- status() dead PID detection ---
@ -290,16 +303,13 @@ func TestStatus_Status_Ugly(t *testing.T) {
assert.Equal(t, 1, out.Failed)
// Verify statuses were persisted to disk
st1, err := ReadStatus(ws1)
require.NoError(t, err)
st1 := mustReadStatus(t, ws1)
assert.Equal(t, "blocked", st1.Status)
st2, err := ReadStatus(ws2)
require.NoError(t, err)
st2 := mustReadStatus(t, ws2)
assert.Equal(t, "completed", st2.Status)
st3, err := ReadStatus(ws3)
require.NoError(t, err)
st3 := mustReadStatus(t, ws3)
assert.Equal(t, "failed", st3.Status)
assert.Equal(t, "Agent process died (no output log)", st3.Question)
}
@ -332,8 +342,7 @@ func TestStatus_WriteStatus_Ugly(t *testing.T) {
assert.False(t, original.UpdatedAt.IsZero(), "writeStatus must set UpdatedAt")
// Read back and verify every field
read, err := ReadStatus(dir)
require.NoError(t, err)
read := mustReadStatus(t, dir)
assert.Equal(t, "blocked", read.Status)
assert.Equal(t, "gemini:flash", read.Agent)

View file

@ -12,7 +12,6 @@ import (
core "dappco.re/go/core"
"dappco.re/go/core/forge"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// --- commentOnIssue ---
@ -33,11 +32,11 @@ func TestPr_CommentOnIssue_Good_PostsCommentOnPR(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
s.commentOnIssue(context.Background(), "core", "repo", 7, "Test comment")
@ -80,11 +79,11 @@ func TestVerify_AutoVerifyAndMerge_Good_FullPipeline(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
s.autoVerifyAndMerge(wsDir)
@ -92,8 +91,7 @@ func TestVerify_AutoVerifyAndMerge_Good_FullPipeline(t *testing.T) {
assert.True(t, commented, "should have posted comment")
// Status should be marked as merged
updated, err := ReadStatus(wsDir)
require.NoError(t, err)
updated := mustReadStatus(t, wsDir)
assert.Equal(t, "merged", updated.Status)
}
@ -113,11 +111,11 @@ func TestVerify_AttemptVerifyAndMerge_Good_TestsPassMergeSucceeds(t *testing.T)
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.attemptVerifyAndMerge(dir, "core", "test", "agent/fix", 1)
@ -139,11 +137,11 @@ func TestVerify_AttemptVerifyAndMerge_Bad_MergeFails(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.attemptVerifyAndMerge(dir, "core", "test", "agent/fix", 1)

View file

@ -34,10 +34,10 @@ func TestVerify_ForgeMergePR_Good_Success(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-forge-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-forge-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
r := s.forgeMergePR(context.Background(), "core", "test-repo", 42)
@ -52,10 +52,10 @@ func TestVerify_ForgeMergePR_Good_204Response(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
r := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
@ -73,10 +73,10 @@ func TestVerify_ForgeMergePR_Bad_ConflictResponse(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
r := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
@ -95,10 +95,10 @@ func TestVerify_ForgeMergePR_Bad_ServerError(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
r := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
@ -112,10 +112,10 @@ func TestVerify_ForgeMergePR_Bad_NetworkError(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
r := s.forgeMergePR(context.Background(), "core", "test-repo", 1)
@ -161,10 +161,10 @@ func TestVerify_EnsureLabel_Good_CreatesLabel(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
s.ensureLabel(context.Background(), "core", "test-repo", "needs-review", "e11d48")
@ -177,10 +177,10 @@ func TestVerify_EnsureLabel_Bad_NetworkError(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// Should not panic
@ -202,10 +202,10 @@ func TestVerify_GetLabelID_Good_Found(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
id := s.getLabelID(context.Background(), "core", "test-repo", "needs-review")
@ -222,10 +222,10 @@ func TestVerify_GetLabelID_Bad_NotFound(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
id := s.getLabelID(context.Background(), "core", "test-repo", "missing-label")
@ -238,10 +238,10 @@ func TestVerify_GetLabelID_Bad_NetworkError(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
id := s.getLabelID(context.Background(), "core", "test-repo", "any")
@ -255,8 +255,8 @@ func TestVerify_RunVerification_Good_NoProjectFile(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runVerification(dir)
@ -270,8 +270,8 @@ func TestVerify_RunVerification_Good_GoProject(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runVerification(dir)
@ -285,8 +285,8 @@ func TestVerify_RunVerification_Good_PHPProject(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runVerification(dir)
@ -300,8 +300,8 @@ func TestVerify_RunVerification_Good_NodeProject(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runVerification(dir)
@ -314,8 +314,8 @@ func TestVerify_RunVerification_Good_NodeNoTestScript(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runVerification(dir)
@ -348,8 +348,8 @@ func TestVerify_AutoVerifyAndMerge_Bad_NoStatus(t *testing.T) {
dir := t.TempDir()
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// Should not panic when status.json is missing
assert.NotPanics(t, func() {
@ -367,8 +367,8 @@ func TestVerify_AutoVerifyAndMerge_Bad_NoPRURL(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// Should return early — no PR URL
@ -386,8 +386,8 @@ func TestVerify_AutoVerifyAndMerge_Bad_EmptyRepo(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
assert.NotPanics(t, func() {
@ -406,8 +406,8 @@ func TestVerify_AutoVerifyAndMerge_Bad_InvalidPRURL(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// extractPRNumber returns 0 for invalid URL, so autoVerifyAndMerge returns early
@ -449,11 +449,11 @@ func TestVerify_FlagForReview_Good_AddsLabel(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
s.flagForReview("core", "test-repo", 42, testFailed)
@ -482,11 +482,11 @@ func TestVerify_FlagForReview_Good_MergeConflictMessage(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
s.flagForReview("core", "test-repo", 1, mergeConflict)
@ -530,8 +530,8 @@ func TestVerify_AutoVerifyAndMerge_Ugly(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// PR number is 0 → should return early without panicking
@ -540,8 +540,7 @@ func TestVerify_AutoVerifyAndMerge_Ugly(t *testing.T) {
})
// Status should remain unchanged (not "merged")
st, err := ReadStatus(dir)
require.NoError(t, err)
st := mustReadStatus(t, dir)
assert.Equal(t, "completed", st.Status)
}
@ -569,11 +568,11 @@ func TestVerify_AttemptVerifyAndMerge_Ugly(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.attemptVerifyAndMerge(dir, "core", "test-repo", "agent/fix", 42)
@ -606,10 +605,10 @@ func TestVerify_EnsureLabel_Ugly_AlreadyExists409(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// Should not panic on 409 — ensureLabel is fire-and-forget
@ -628,10 +627,10 @@ func TestVerify_GetLabelID_Ugly_EmptyArray(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
id := s.getLabelID(context.Background(), "core", "test-repo", "needs-review")
@ -649,10 +648,10 @@ func TestVerify_ForgeMergePR_Ugly_EmptyBody200(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
r := s.forgeMergePR(context.Background(), "core", "test-repo", 42)
@ -681,11 +680,11 @@ func TestVerify_FlagForReview_Bad_AllAPICallsFail(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// Should not panic when all API calls (ensureLabel, getLabelID, add label, comment) fail
@ -708,11 +707,11 @@ func TestVerify_FlagForReview_Ugly_LabelNotFoundZeroID(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
forge: forge.NewForge(srv.URL, "test-token"),
forgeURL: srv.URL,
forgeToken: "test-token",
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
// label ID 0 is passed to "add labels" payload — should not panic
@ -730,8 +729,8 @@ func TestVerify_RunVerification_Bad_GoModButNoGoFiles(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runVerification(dir)
@ -748,8 +747,8 @@ func TestVerify_RunVerification_Ugly_MultipleProjectFiles(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runVerification(dir)
@ -766,8 +765,8 @@ func TestVerify_RunVerification_Ugly_GoAndPHPProjectFiles(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runVerification(dir)
@ -794,8 +793,8 @@ func TestVerify_Add_Good(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runGoTests(dir)
@ -812,8 +811,8 @@ func TestVerify_RunGoTests_Bad(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runGoTests(dir)
@ -830,8 +829,8 @@ func TestVerify_RunGoTests_Ugly(t *testing.T) {
s := &PrepSubsystem{
ServiceRuntime: core.NewServiceRuntime(testCore, AgentOptions{}),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
backoff: make(map[string]time.Time),
failCount: make(map[string]int),
}
result := s.runGoTests(dir)

View file

@ -64,27 +64,6 @@ func CoreRoot() string {
return agentic.CoreRoot()
}
// ReadStatus reads `status.json` from one workspace directory.
//
// Deprecated: use ReadStatusResult.
//
// st, err := runner.ReadStatus("/srv/core/workspace/core/go-io/task-5")
func ReadStatus(wsDir string) (*WorkspaceStatus, error) {
r := ReadStatusResult(wsDir)
if !r.OK {
err, _ := r.Value.(error)
if err == nil {
return nil, core.E("runner.ReadStatus", "failed to read status", nil)
}
return nil, err
}
st, ok := r.Value.(*WorkspaceStatus)
if !ok || st == nil {
return nil, core.E("runner.ReadStatus", "invalid status payload", nil)
}
return st, nil
}
// ReadStatusResult reads status.json as core.Result.
//
// result := ReadStatusResult("/srv/core/workspace/core/go-io/task-5")

View file

@ -49,9 +49,9 @@ func ExampleWriteStatus() {
})
core.Println(result.OK)
st, err := ReadStatus(dir)
core.Println(err == nil)
core.Println(st.Status)
statusResult := ReadStatusResult(dir)
core.Println(statusResult.OK)
core.Println(statusResult.Value.(*WorkspaceStatus).Status)
// Output:
// true
// true

View file

@ -12,6 +12,17 @@ import (
"github.com/stretchr/testify/require"
)
func mustReadStatus(t *testing.T, dir string) *WorkspaceStatus {
t.Helper()
result := ReadStatusResult(dir)
require.True(t, result.OK)
status, ok := result.Value.(*WorkspaceStatus)
require.True(t, ok)
return status
}
func TestPaths_CoreRoot_Good(t *testing.T) {
t.Setenv("CORE_WORKSPACE", "/tmp/core-root")
assert.Equal(t, "/tmp/core-root", CoreRoot())
@ -62,8 +73,7 @@ func TestPaths_ReadStatus_Good(t *testing.T) {
}
require.True(t, agentic.LocalFs().WriteAtomic(agentic.WorkspaceStatusPath(wsDir), core.JSONMarshalString(status)).OK)
st, err := ReadStatus(wsDir)
require.NoError(t, err)
st := mustReadStatus(t, wsDir)
assert.Equal(t, "completed", st.Status)
assert.Equal(t, "codex", st.Agent)
assert.Equal(t, "go-io", st.Repo)
@ -128,13 +138,17 @@ func TestPaths_ReadStatus_Bad(t *testing.T) {
wsDir := t.TempDir()
require.True(t, agentic.LocalFs().WriteAtomic(agentic.WorkspaceStatusPath(wsDir), "{not-json").OK)
_, err := ReadStatus(wsDir)
assert.Error(t, err)
result := ReadStatusResult(wsDir)
assert.False(t, result.OK)
_, ok := result.Value.(error)
assert.True(t, ok)
}
func TestPaths_ReadStatus_Ugly(t *testing.T) {
_, err := ReadStatus(t.TempDir())
assert.Error(t, err)
result := ReadStatusResult(t.TempDir())
assert.False(t, result.OK)
_, ok := result.Value.(error)
assert.True(t, ok)
}
func TestPaths_WriteStatus_Good(t *testing.T) {
@ -150,16 +164,17 @@ func TestPaths_WriteStatus_Good(t *testing.T) {
})
assert.True(t, result.OK)
st, err := ReadStatus(wsDir)
require.NoError(t, err)
st := mustReadStatus(t, wsDir)
assert.Equal(t, "running", st.Status)
assert.Equal(t, "codex", st.Agent)
assert.Equal(t, "go-io", st.Repo)
assert.Equal(t, "agent/ax-cleanup", st.Branch)
assert.Equal(t, 1, st.Runs)
agenticStatus, err := agentic.ReadStatus(wsDir)
require.NoError(t, err)
agenticResult := agentic.ReadStatusResult(wsDir)
require.True(t, agenticResult.OK)
agenticStatus, ok := agenticResult.Value.(*agentic.WorkspaceStatus)
require.True(t, ok)
assert.False(t, agenticStatus.UpdatedAt.IsZero())
}
@ -191,8 +206,7 @@ func TestPaths_WriteStatus_Ugly(t *testing.T) {
Runs: 3,
}).OK)
st, err := ReadStatus(wsDir)
require.NoError(t, err)
st := mustReadStatus(t, wsDir)
assert.Equal(t, "completed", st.Status)
assert.Equal(t, "claude", st.Agent)
assert.Equal(t, "agent/ax-cleanup", st.Branch)

View file

@ -362,23 +362,24 @@ func TestRunner_HydrateWorkspaces_Good_DeepWorkspaceName(t *testing.T) {
assert.Equal(t, "go-io", st.Repo)
}
// --- WriteStatus / ReadStatus ---
// --- WriteStatus / ReadStatusResult ---
func TestRunner_WriteReadStatus_Good(t *testing.T) {
dir := t.TempDir()
st := &WorkspaceStatus{Status: "running", Agent: "codex", Repo: "go-io", PID: 999}
require.True(t, WriteStatus(dir, st).OK)
got, err := ReadStatus(dir)
require.NoError(t, err)
got := mustReadStatus(t, dir)
assert.Equal(t, "running", got.Status)
assert.Equal(t, "codex", got.Agent)
assert.Equal(t, 999, got.PID)
}
func TestRunner_ReadStatus_Bad_NoFile(t *testing.T) {
_, err := ReadStatus(t.TempDir())
assert.Error(t, err)
result := ReadStatusResult(t.TempDir())
assert.False(t, result.OK)
_, ok := result.Value.(error)
assert.True(t, ok)
}
func TestRunner_WriteReadStatus_Ugly_OverwriteExisting(t *testing.T) {
@ -386,7 +387,6 @@ func TestRunner_WriteReadStatus_Ugly_OverwriteExisting(t *testing.T) {
require.True(t, WriteStatus(dir, &WorkspaceStatus{Status: "running"}).OK)
require.True(t, WriteStatus(dir, &WorkspaceStatus{Status: "completed"}).OK)
got, err := ReadStatus(dir)
require.NoError(t, err)
got := mustReadStatus(t, dir)
assert.Equal(t, "completed", got.Status)
}