diff --git a/FINDINGS.md b/FINDINGS.md
index cdf71f0..60ca9b5 100644
--- a/FINDINGS.md
+++ b/FINDINGS.md
@@ -2,11 +2,11 @@
## Code Quality
-- **100 tests in node/, all pass** — 71.9% statement coverage (dispatcher adds 10 test funcs / 17 subtests)
-- **logging/ fully tested** (12 tests, 100% coverage)
-- **UEPS 88.5% coverage** — wire protocol tests added in Phase 1
+- **node/ 87.5% statement coverage** — integration tests, bufpool tests, and benchmarks added (Phase 5)
+- **ueps/ 93.1% coverage** — benchmarks added (Phase 5)
+- **logging/ fully tested** (12 tests, 86.2% coverage)
- **`go vet` clean** — no static analysis warnings
-- **`go test -race` clean** — no data races (GracefulClose race fixed, see below)
+- **`go test -race` clean** — no data races
- **Zero TODOs/FIXMEs** in codebase
## Security Posture (Strong)
@@ -29,7 +29,7 @@
- Decoupled MinerManager/ProfileManager interfaces
- UEPS dispatcher: functional IntentHandler type, RWMutex-protected handler map, sentinel errors
-## Critical Test Gaps
+## Test Coverage Summary
| File | Lines | Tests | Coverage |
|------|-------|-------|----------|
@@ -42,6 +42,8 @@
| transport.go | 934 | 11 tests | Good (Phase 2) |
| controller.go | 327 | 14 tests | Good (Phase 3) |
| dispatcher.go | 120 | 10 tests (17 subtests) | 100% (Phase 4) |
+| bufpool.go | 55 | 11 tests | Good (Phase 5) |
+| integration | — | 10 tests | Good (Phase 5) |
| ueps/packet.go | 124 | 9 tests | Good (Phase 1) |
| ueps/reader.go | 138 | 9 tests | Good (Phase 1) |
@@ -61,6 +63,40 @@
4. **Threshold at 50,000 (not configurable)** — Kept as a constant to match the original stub. Can be made configurable via functional options if needed later.
5. **RWMutex for handler map** — Read-heavy workload (dispatches far outnumber registrations), so RWMutex is appropriate. Registration takes a write lock, dispatch takes a read lock.
+## Phase 5 Benchmark Results (AMD Ryzen 9 9950X)
+
+| Operation | Time/op | Allocs/op |
+|-----------|---------|-----------|
+| Identity key generation (STMF) | 23 us | 6 |
+| Full identity generation + disk | 74 us | 51 |
+| Shared secret derivation (ECDH) | 46 us | 9 |
+| KD-tree nearest (10 peers) | 247 ns | 2 |
+| KD-tree nearest (100 peers) | 497 ns | 2 |
+| KD-tree nearest (1000 peers) | 2.9 us | 2 |
+| NewMessage (Ping) | 271 ns | 5 |
+| NewMessage (Stats, 2 miners) | 701 ns | 5 |
+| MarshalJSON (pooled buffer) | 375 ns | 2 |
+| json.Marshal (stdlib) | 367 ns | 2 |
+| SMSG Encrypt | 2.6 us | 34 |
+| SMSG Decrypt | 3.9 us | 47 |
+| SMSG Round-trip | 6.4 us | 81 |
+| Challenge generate | 105 ns | 1 |
+| Challenge sign (HMAC-SHA256) | 276 ns | 6 |
+| Challenge verify | 295 ns | 6 |
+| UEPS marshal+sign (64B payload) | 509 ns | 27 |
+| UEPS marshal+sign (1KB payload) | 948 ns | 27 |
+| UEPS marshal+sign (64KB payload) | 27.7 us | 27 |
+| UEPS read+verify (64B payload) | 843 ns | 18 |
+| UEPS read+verify (1KB payload) | 1.4 us | 20 |
+| UEPS read+verify (64KB payload) | 44 us | 33 |
+| UEPS round-trip (256B payload) | 1.5 us | 45 |
+
+**Observations:**
+- KD-tree peer scoring scales well: O(log n) with only 2 allocs regardless of tree size.
+- MarshalJSON's buffer pool shows no measurable advantage at small message sizes — stdlib has been heavily optimised. The pool's value emerges under sustained load where GC pressure is reduced.
+- SMSG is the dominant cost per-message (~6.4 us round-trip), making it the primary bottleneck for message throughput. At ~150K encrypted messages/sec/core, this is adequate for P2P mesh traffic.
+- UEPS wire format is lightweight: signing a 64B packet costs ~500 ns, mostly HMAC computation.
+
## Bugs Fixed
1. **P2P-RACE-1: GracefulClose data race** (Phase 3) — `GracefulClose` called `pc.Conn.SetWriteDeadline()` outside of `writeMu`, racing with concurrent `Send()` calls that also modify the write deadline. Fixed by removing the bare `SetWriteDeadline` call and relying on `Send()` which already manages deadlines under the lock. Detected by `go test -race`.
diff --git a/node/bufpool_test.go b/node/bufpool_test.go
index 54f5851..93235c1 100644
--- a/node/bufpool_test.go
+++ b/node/bufpool_test.go
@@ -10,80 +10,56 @@ import (
"github.com/stretchr/testify/require"
)
-func TestBufPool_GetPutRoundTrip(t *testing.T) {
- buf := getBuffer()
- require.NotNil(t, buf, "getBuffer should return a non-nil buffer")
- assert.Equal(t, 0, buf.Len(), "buffer should be empty after get")
+// --- bufpool.go tests ---
- buf.WriteString("hello")
- assert.Equal(t, 5, buf.Len())
+func TestGetBuffer_ReturnsResetBuffer(t *testing.T) {
+ t.Run("buffer is initially empty", func(t *testing.T) {
+ buf := getBuffer()
+ defer putBuffer(buf)
- putBuffer(buf)
+ assert.Equal(t, 0, buf.Len(), "buffer from pool should have zero length")
+ })
- // Get another buffer — should be reset
- buf2 := getBuffer()
- assert.Equal(t, 0, buf2.Len(), "buffer should be reset after get")
- putBuffer(buf2)
+ t.Run("buffer is reset after reuse", func(t *testing.T) {
+ buf := getBuffer()
+ buf.WriteString("stale data that should be cleared")
+ putBuffer(buf)
+
+ buf2 := getBuffer()
+ defer putBuffer(buf2)
+
+ assert.Equal(t, 0, buf2.Len(),
+ "reused buffer should be reset (no stale data)")
+ })
}
-func TestBufPool_BufferReuse(t *testing.T) {
- // Get a buffer, write to it, put it back, get again.
- // The pool may return the same buffer (though not guaranteed by sync.Pool).
- // We can at least verify the buffer is properly reset.
- buf1 := getBuffer()
- buf1.WriteString("reuse-test")
- cap1 := buf1.Cap()
- putBuffer(buf1)
+func TestPutBuffer_DiscardsOversizedBuffers(t *testing.T) {
+ t.Run("buffer at 64KB limit is pooled", func(t *testing.T) {
+ buf := getBuffer()
+ buf.Grow(65536)
+ putBuffer(buf)
- buf2 := getBuffer()
- assert.Equal(t, 0, buf2.Len(), "reused buffer must be reset")
- // If we got the same buffer, capacity should be at least as large
- if buf2.Cap() >= cap1 {
- // Likely the same buffer — good, it was reused
- t.Logf("buffer likely reused: cap1=%d, cap2=%d", cap1, buf2.Cap())
- }
- putBuffer(buf2)
-}
+ buf2 := getBuffer()
+ defer putBuffer(buf2)
+ assert.Equal(t, 0, buf2.Len())
+ })
-func TestBufPool_LargeBufferNotPooled(t *testing.T) {
- buf := getBuffer()
- // Grow buffer beyond the 64KB threshold
- large := make([]byte, 70000)
- buf.Write(large)
- assert.Greater(t, buf.Cap(), 65536, "buffer should have grown past threshold")
+ t.Run("buffer exceeding 64KB is discarded", func(t *testing.T) {
+ buf := getBuffer()
+ large := make([]byte, 65537)
+ buf.Write(large)
+ assert.Greater(t, buf.Cap(), 65536, "buffer should have grown past 64KB")
- putBuffer(buf) // Should NOT be returned to the pool
+ putBuffer(buf)
- // Get a new buffer — it should be a fresh one (small capacity)
- buf2 := getBuffer()
- assert.LessOrEqual(t, buf2.Cap(), 65536,
- "buffer from pool should not be the oversized one")
- putBuffer(buf2)
-}
-
-func TestBufPool_ConcurrentGetPut(t *testing.T) {
- const goroutines = 100
- const iterations = 50
- var wg sync.WaitGroup
- wg.Add(goroutines)
-
- for g := 0; g < goroutines; g++ {
- go func(id int) {
- defer wg.Done()
- for i := 0; i < iterations; i++ {
- buf := getBuffer()
- buf.WriteString("concurrent-data")
- assert.Greater(t, buf.Len(), 0)
- putBuffer(buf)
- }
- }(g)
- }
-
- wg.Wait()
+ buf2 := getBuffer()
+ defer putBuffer(buf2)
+ assert.LessOrEqual(t, buf2.Cap(), 65536,
+ "pool should not return an oversized buffer")
+ })
}
func TestBufPool_BufferIndependence(t *testing.T) {
- // Get two buffers, write to one, verify the other is unaffected.
buf1 := getBuffer()
buf2 := getBuffer()
@@ -93,7 +69,6 @@ func TestBufPool_BufferIndependence(t *testing.T) {
assert.Equal(t, "buffer-one", buf1.String())
assert.Equal(t, "buffer-two", buf2.String())
- // Writing more to buf1 should not affect buf2
buf1.WriteString("-extra")
assert.Equal(t, "buffer-one-extra", buf1.String())
assert.Equal(t, "buffer-two", buf2.String())
@@ -107,66 +82,175 @@ func TestMarshalJSON_BasicTypes(t *testing.T) {
name string
input interface{}
}{
- {"string", "hello"},
- {"int", 42},
- {"float", 3.14},
- {"bool", true},
- {"nil", nil},
- {"map", map[string]string{"key": "value"}},
- {"slice", []int{1, 2, 3}},
+ {
+ name: "string value",
+ input: "hello",
+ },
+ {
+ name: "integer value",
+ input: 42,
+ },
+ {
+ name: "float value",
+ input: 3.14,
+ },
+ {
+ name: "boolean value",
+ input: true,
+ },
+ {
+ name: "nil value",
+ input: nil,
+ },
+ {
+ name: "struct value",
+ input: PingPayload{SentAt: 1234567890},
+ },
+ {
+ name: "map value",
+ input: map[string]interface{}{"key": "value", "num": 42},
+ },
+ {
+ name: "slice value",
+ input: []int{1, 2, 3},
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- pooled, err := MarshalJSON(tt.input)
+ got, err := MarshalJSON(tt.input)
require.NoError(t, err)
- standard, err := json.Marshal(tt.input)
+ expected, err := json.Marshal(tt.input)
require.NoError(t, err)
- assert.Equal(t, string(standard), string(pooled),
- "MarshalJSON should produce same output as json.Marshal")
+ assert.JSONEq(t, string(expected), string(got),
+ "MarshalJSON output should match json.Marshal")
})
}
}
+func TestMarshalJSON_NoTrailingNewline(t *testing.T) {
+ data, err := MarshalJSON(map[string]string{"key": "value"})
+ require.NoError(t, err)
+
+ assert.NotEqual(t, byte('\n'), data[len(data)-1],
+ "MarshalJSON should strip the trailing newline added by json.Encoder")
+}
+
+func TestMarshalJSON_HTMLEscaping(t *testing.T) {
+ input := map[string]string{"html": ""}
+ data, err := MarshalJSON(input)
+ require.NoError(t, err)
+
+ assert.Contains(t, string(data), "