[agent/codex] Update ALL specs/ sub-package stubs. 1. specs/logging/RFC.md... #15
56 changed files with 1763 additions and 1108 deletions
10
CLAUDE.md
10
CLAUDE.md
|
|
@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
|||
|
||||
## Project
|
||||
|
||||
`go-p2p` is the P2P networking layer for the Lethean network. Module path: `forge.lthn.ai/core/go-p2p`
|
||||
`go-p2p` is the P2P networking layer for the Lethean network. Module path: `dappco.re/go/core/p2p`
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
|
@ -40,7 +40,7 @@ logging/ — Structured levelled logger with component scoping (stdlib only)
|
|||
|
||||
### Data flow
|
||||
|
||||
1. **Identity** (`identity.go`) — Ed25519 keypair via Borg STMF. Shared secrets derived via X25519 ECDH + SHA-256.
|
||||
1. **Identity** (`identity.go`) — X25519 keypair via Borg STMF. Shared secrets derived via X25519 ECDH + SHA-256.
|
||||
2. **Transport** (`transport.go`) — WebSocket server/client (gorilla/websocket). Handshake exchanges `NodeIdentity` + HMAC-SHA256 challenge-response. Post-handshake messages are Borg SMSG-encrypted. Includes deduplication (5-min TTL), rate limiting (token bucket: 100 burst/50 per sec), and MaxConns enforcement.
|
||||
3. **Dispatcher** (`dispatcher.go`) — Routes verified UEPS packets to intent handlers. Threat circuit breaker drops packets with `ThreatScore > 50,000` before routing.
|
||||
4. **Controller** (`controller.go`) — Issues requests to remote peers using a pending-map pattern (`map[string]chan *Message`). Auto-connects to peers on demand.
|
||||
|
|
@ -75,13 +75,13 @@ type ProfileManager interface {
|
|||
|
||||
- UK English (colour, organisation, centre, behaviour, recognise)
|
||||
- All parameters and return types explicitly annotated
|
||||
- Tests use `testify` assert/require; table-driven subtests with `t.Run()`
|
||||
- Tests use `testify` assert/require; prefer table-driven subtests with `t.Run()` when multiple related cases share one shape
|
||||
- Test name suffixes: `_Good` (happy path), `_Bad` (expected errors), `_Ugly` (panic/edge cases)
|
||||
- Licence: EUPL-1.2 — new files need `// SPDX-License-Identifier: EUPL-1.2`
|
||||
- Security-first: do not weaken HMAC, challenge-response, Zip Slip defence, or rate limiting
|
||||
- Use `logging` package only — no `fmt.Println` or `log.Printf` in library code
|
||||
- Error handling: use `coreerr.E()` from `go-log` — never `fmt.Errorf` or `errors.New` in library code
|
||||
- File I/O: use `coreio.Local` from `go-io` — never `os.ReadFile`/`os.WriteFile` in library code (exception: `os.OpenFile` for streaming writes where `coreio` lacks support)
|
||||
- Error handling: use `core.E()` from `dappco.re/go/core` — never `fmt.Errorf` or `errors.New` in library code
|
||||
- File I/O: use `dappco.re/go/core` filesystem helpers (package-level adapters in `node/` backed by `core.Fs`) — never `os.ReadFile`/`os.WriteFile` in library code (exception: `os.OpenFile` for streaming writes where filesystem helpers cannot preserve tar header mode bits)
|
||||
- Hot-path debug logging uses sampling pattern: `if counter.Add(1)%interval == 0`
|
||||
|
||||
### Transport test helper
|
||||
|
|
|
|||
11
CODEX.md
Normal file
11
CODEX.md
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
<!-- SPDX-License-Identifier: EUPL-1.2 -->
|
||||
|
||||
# CODEX.md
|
||||
|
||||
Codex-compatible entrypoint for this repository.
|
||||
|
||||
- Treat `CLAUDE.md` as the authoritative local conventions file for commands, architecture notes, coding standards, and commit format.
|
||||
- Current module path: `dappco.re/go/core/p2p`.
|
||||
- Verification baseline: `go build ./...`, `go vet ./...`, and `go test ./...`.
|
||||
- Use conventional commits with `Co-Authored-By: Virgil <virgil@lethean.io>`.
|
||||
- If `.core/reference/docs/RFC.md` is absent in the checkout, report that gap explicitly and use the local docs under `docs/` plus the code as the available reference set.
|
||||
50
README.md
50
README.md
|
|
@ -1,30 +1,52 @@
|
|||
[](https://pkg.go.dev/forge.lthn.ai/core/go-p2p)
|
||||
[](LICENSE.md)
|
||||
[](https://pkg.go.dev/dappco.re/go/core/p2p)
|
||||
[](CONTRIBUTING.md#license)
|
||||
[](go.mod)
|
||||
|
||||
# go-p2p
|
||||
|
||||
P2P mesh networking layer for the Lethean network. Provides Ed25519 node identity, an encrypted WebSocket transport with HMAC-SHA256 challenge-response handshake, KD-tree peer selection across four dimensions (latency, hops, geography, reliability score), UEPS wire protocol (RFC-021) TLV packet builder and reader, UEPS intent routing with a threat circuit breaker, and TIM deployment bundle encryption with Zip Slip and decompression-bomb defences.
|
||||
P2P mesh networking layer for the Lethean network. Provides X25519 node identity, an encrypted WebSocket transport with HMAC-SHA256 challenge-response handshake, KD-tree peer selection across four dimensions (latency, hops, geography, reliability score), UEPS wire protocol (RFC-021) TLV packet builder and reader, UEPS intent routing with a threat circuit breaker, and TIM deployment bundle encryption with Zip Slip and decompression-bomb defences.
|
||||
|
||||
**Module**: `forge.lthn.ai/core/go-p2p`
|
||||
**Module**: `dappco.re/go/core/p2p`
|
||||
**Licence**: EUPL-1.2
|
||||
**Language**: Go 1.25
|
||||
**Language**: Go 1.26
|
||||
|
||||
## Quick Start
|
||||
|
||||
```go
|
||||
import (
|
||||
"forge.lthn.ai/core/go-p2p/node"
|
||||
"forge.lthn.ai/core/go-p2p/ueps"
|
||||
"log"
|
||||
|
||||
"dappco.re/go/core/p2p/node"
|
||||
"dappco.re/go/core/p2p/ueps"
|
||||
)
|
||||
|
||||
// Start a P2P node
|
||||
identity, _ := node.LoadOrCreateIdentity()
|
||||
transport := node.NewTransport(identity, node.TransportConfig{ListenAddr: ":9091"})
|
||||
transport.Start(ctx)
|
||||
nm, err := node.NewNodeManager()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if !nm.HasIdentity() {
|
||||
if err := nm.GenerateIdentity("worker-1", node.RoleWorker); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Build a UEPS packet
|
||||
pkt, _ := ueps.NewBuilder(ueps.IntentCompute, payload).MarshalAndSign(sharedSecret)
|
||||
registry, err := node.NewPeerRegistry()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
transport := node.NewTransport(nm, registry, node.DefaultTransportConfig())
|
||||
if err := transport.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
payload := []byte(`{"job":"hashrate"}`)
|
||||
sharedSecret := make([]byte, 32)
|
||||
pkt, err := ueps.NewBuilder(node.IntentCompute, payload).MarshalAndSign(sharedSecret)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_ = pkt
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
|
@ -44,4 +66,4 @@ go build ./...
|
|||
|
||||
## Licence
|
||||
|
||||
European Union Public Licence 1.2 — see [LICENCE](LICENCE) for details.
|
||||
European Union Public Licence 1.2 — see [CONTRIBUTING](CONTRIBUTING.md#license) for details.
|
||||
|
|
|
|||
148
SESSION-BRIEF.md
148
SESSION-BRIEF.md
|
|
@ -1,129 +1,57 @@
|
|||
# Session Brief: core/go-p2p
|
||||
|
||||
**Repo**: `forge.lthn.ai/core/go-p2p` (clone at `/tmp/core-go-p2p`)
|
||||
**Module**: `forge.lthn.ai/core/go-p2p`
|
||||
**Status**: 16 Go files, ~2,500 LOC, node tests PASS (42% coverage), ueps has NO TESTS
|
||||
**Wiki**: https://forge.lthn.ai/core/go-p2p/wiki (6 pages)
|
||||
**Repo**: `forge.lthn.ai/core/go-p2p`
|
||||
**Module**: `dappco.re/go/core/p2p`
|
||||
**Status**: `go build ./...`, `go vet ./...`, and `go test ./...` pass on 2026-03-27.
|
||||
**Primary references**: `CLAUDE.md`, `docs/architecture.md`, `docs/development.md`
|
||||
|
||||
## What This Is
|
||||
|
||||
P2P networking layer for the Lethean network. Three packages:
|
||||
P2P networking layer for the Lethean network. The repository currently consists of four Go packages:
|
||||
|
||||
### node/ — P2P Mesh (14 files)
|
||||
- **Identity**: Ed25519 keypair generation, PEM serialisation, challenge-response auth
|
||||
- **Transport**: Encrypted WebSocket connections via gorilla/websocket + Borg (encrypted blob storage)
|
||||
- **Peers**: Registry with scoring, persistence, auth modes (open/allowlist), name validation
|
||||
- **Messages**: Typed protocol messages (handshake, ping, stats, miner control, deploy, logs)
|
||||
- **Protocol**: Response handler with validation and typed parsing
|
||||
- **Worker**: Command handler (ping, stats, miner start/stop, deploy profiles, get logs)
|
||||
- **Dispatcher**: UEPS packet routing skeleton with threat circuit breaker
|
||||
- **Controller**: Remote node operations (connect, command, disconnect)
|
||||
- **Bundle**: Service factory for Core framework DI registration
|
||||
|
||||
### ueps/ — Wire Protocol (2 files, NO TESTS)
|
||||
- **PacketBuilder**: Constructs signed UEPS frames with TLV encoding
|
||||
- **ReadAndVerify**: Parses and verifies HMAC-SHA256 integrity
|
||||
- TLV tags: 0x01-0x05 (header fields), 0x06 (HMAC), 0xFF (payload marker)
|
||||
- Header: Version, CurrentLayer, TargetLayer, IntentID, ThreatScore
|
||||
|
||||
### logging/ — Structured Logger (1 file)
|
||||
- Simple levelled logger (INFO/WARN/ERROR/DEBUG) with key-value pairs
|
||||
- `node/` — P2P mesh: identity, transport, peer registry, messages, protocol helpers, worker/controller logic, dispatcher, and deployment bundles
|
||||
- `node/levin/` — standalone CryptoNote Levin binary protocol support
|
||||
- `ueps/` — UEPS TLV wire protocol with HMAC-SHA256 integrity verification
|
||||
- `logging/` — structured levelled logger with component scoping
|
||||
|
||||
## Current State
|
||||
|
||||
| Area | Status |
|
||||
|------|--------|
|
||||
| node/ tests | PASS — 42% statement coverage |
|
||||
| ueps/ tests | NONE — zero test files |
|
||||
| logging/ tests | NONE |
|
||||
| go vet | Clean |
|
||||
| TODOs/FIXMEs | None found |
|
||||
| Identity (Ed25519) | Well tested — keypair, challenge-response, deterministic sigs |
|
||||
| PeerRegistry | Well tested — add/remove, scoring, persistence, auth modes, name validation |
|
||||
| Messages | Well tested — all 15 message types, serialisation, error codes |
|
||||
| Worker | Well tested — ping, stats, miner, deploy, logs handlers |
|
||||
| Transport | NOT tested — WebSocket + Borg encryption |
|
||||
| Controller | NOT tested — remote node operations |
|
||||
| Dispatcher | NOT tested — UEPS routing skeleton |
|
||||
| Build | PASS |
|
||||
| Vet | PASS |
|
||||
| Tests | PASS |
|
||||
| `logging/` | Has direct unit coverage |
|
||||
| `ueps/` | Has round-trip, malformed packet, and coverage-path tests |
|
||||
| `node/transport` | Has real WebSocket handshake and integration tests |
|
||||
| `node/controller` | Has request/response, auto-connect, ping, and miner-control tests |
|
||||
| `node/dispatcher` | Has routing, threshold, and concurrency tests |
|
||||
| `node/levin` | Has protocol encode/decode coverage |
|
||||
|
||||
## Key Behaviours
|
||||
|
||||
- **Identity** — X25519 keypair generation via Borg STMF, persisted through XDG paths
|
||||
- **Transport** — WebSocket mesh with challenge-response authentication, SMSG encryption, deduplication, rate limiting, and keepalive handling
|
||||
- **Peer registry** — KD-tree selection across latency, hops, geography, and reliability score
|
||||
- **Controller/worker** — request/response messaging for stats, miner control, logs, and deployment
|
||||
- **Dispatcher** — UEPS intent routing with a threat circuit breaker at `ThreatScore > 50000`
|
||||
- **Bundles** — TIM-based profile and miner bundle handling with defensive tar extraction
|
||||
|
||||
## Dependencies
|
||||
|
||||
- `github.com/Snider/Borg` v0.2.0 (encrypted blob storage)
|
||||
- `github.com/Snider/Enchantrix` v0.0.2 (secure environment)
|
||||
- `github.com/Snider/Poindexter` (secure pointer)
|
||||
- `github.com/gorilla/websocket` v1.5.3
|
||||
- `github.com/google/uuid` v1.6.0
|
||||
- `github.com/ProtonMail/go-crypto` v1.3.0
|
||||
- `dappco.re/go/core` v0.8.0-alpha.1
|
||||
- `forge.lthn.ai/Snider/Borg` v0.3.1
|
||||
- `forge.lthn.ai/Snider/Poindexter` v0.0.3
|
||||
- `github.com/adrg/xdg` v0.5.3
|
||||
- `github.com/google/uuid` v1.6.0
|
||||
- `github.com/gorilla/websocket` v1.5.3
|
||||
- `github.com/stretchr/testify` v1.11.1
|
||||
- `golang.org/x/crypto` v0.45.0
|
||||
|
||||
## Priority Work
|
||||
|
||||
### High (coverage gaps)
|
||||
1. **UEPS tests** — Zero tests for the wire protocol. This is the consent-gated TLV protocol from RFC-021. Need: builder round-trip, HMAC verification, malformed packet rejection, boundary conditions (max ThreatScore, empty payload, oversized payload).
|
||||
2. **Transport tests** — WebSocket connection, Borg encryption handshake, reconnection logic.
|
||||
3. **Controller tests** — Connect/command/disconnect flow.
|
||||
4. **Dispatcher tests** — UEPS routing, threat circuit breaker (ThreatScore > 50000 drops).
|
||||
|
||||
### Medium (hardening)
|
||||
5. **Increase node/ coverage** from 42% to 70%+ — focus on transport.go, controller.go, dispatcher.go
|
||||
6. **Benchmarks** — Peer scoring, UEPS marshal/unmarshal, identity key generation
|
||||
7. **Integration test** — Full node-to-node handshake over localhost WebSocket
|
||||
|
||||
### Low (completeness)
|
||||
8. **Logging tests** — Simple but should have coverage
|
||||
9. **Peer discovery** — Currently manual. Add mDNS or DHT discovery
|
||||
10. **Connection pooling** — Transport creates fresh connections; add pool for controller
|
||||
|
||||
## File Map
|
||||
|
||||
```
|
||||
/tmp/core-go-p2p/
|
||||
├── node/
|
||||
│ ├── bundle.go + bundle_test.go — Core DI factory
|
||||
│ ├── identity.go + identity_test.go — Ed25519 keypair, PEM, challenge-response
|
||||
│ ├── message.go + message_test.go — Protocol message types
|
||||
│ ├── peer.go + peer_test.go — Registry, scoring, auth
|
||||
│ ├── protocol.go + protocol_test.go — Response validation, typed parsing
|
||||
│ ├── worker.go + worker_test.go — Command handlers
|
||||
│ ├── transport.go (NO TEST) — WebSocket + Borg encryption
|
||||
│ ├── controller.go (NO TEST) — Remote node operations
|
||||
│ ├── dispatcher.go (NO TEST) — UEPS routing skeleton
|
||||
│ └── logging.go — Package-level logger setup
|
||||
├── ueps/
|
||||
│ ├── ueps.go (NO TEST) — PacketBuilder, ReadAndVerify, TLV
|
||||
│ └── types.go (NO TEST) — UEPSHeader, ParsedPacket, intent IDs
|
||||
├── logging/
|
||||
│ └── logger.go (NO TEST) — Levelled structured logger
|
||||
├── go.mod
|
||||
└── go.sum
|
||||
```
|
||||
|
||||
## Key Interfaces
|
||||
|
||||
```go
|
||||
// node/message.go — 15 message types
|
||||
const (
|
||||
MsgHandshake MsgHandshakeAck MsgPing MsgPong
|
||||
MsgDisconnect MsgGetStats MsgStats MsgStartMiner
|
||||
MsgStopMiner MsgMinerAck MsgDeploy MsgDeployAck
|
||||
MsgGetLogs MsgLogs MsgError
|
||||
)
|
||||
|
||||
// ueps/types.go — UEPS header
|
||||
type UEPSHeader struct {
|
||||
Version uint8 // 0x09
|
||||
CurrentLayer uint8
|
||||
TargetLayer uint8
|
||||
IntentID uint8 // 0x01=Handshake, 0x20=Compute, 0x30=Rehab, 0xFF=Extended
|
||||
ThreatScore uint16
|
||||
}
|
||||
```
|
||||
|
||||
## Conventions
|
||||
|
||||
- UK English
|
||||
- Tests: testify assert/require
|
||||
- Licence: EUPL-1.2
|
||||
- Lethean codenames: Borg (Secure/Blob), Poindexter (Secure/Pointer), Enchantrix (Secure/Environment)
|
||||
- UK English in comments, logs, and docs
|
||||
- `core.E()` for library error wrapping and sentinel definitions
|
||||
- `core.Fs` adapters for library file I/O in `node/`
|
||||
- `testify` in tests; prefer `t.Run()` tables for related cases
|
||||
- EUPL-1.2 SPDX identifiers on new files
|
||||
- Conventional commits with `Co-Authored-By: Virgil <virgil@lethean.io>`
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# Architecture — go-p2p
|
||||
|
||||
`go-p2p` is the P2P networking layer for the Lethean network. Module path: `forge.lthn.ai/core/go-p2p`.
|
||||
`go-p2p` is the P2P networking layer for the Lethean network. Module path: `dappco.re/go/core/p2p`.
|
||||
|
||||
## Package Structure
|
||||
|
||||
|
|
@ -17,7 +17,7 @@ go-p2p/
|
|||
|
||||
### identity.go — Node Identity
|
||||
|
||||
Each node holds an Ed25519 keypair generated via Borg STMF (X25519 curve). The private key is stored at `~/.local/share/lethean-desktop/node/private.key` (mode 0600) and the public identity JSON at `~/.config/lethean-desktop/node.json`.
|
||||
Each node holds an X25519 keypair generated via Borg STMF. The private key is stored at `~/.local/share/lethean-desktop/node/private.key` (mode 0600) and the public identity JSON at `~/.config/lethean-desktop/node.json`.
|
||||
|
||||
`NodeIdentity` carries:
|
||||
- `ID` — 32-character hex string derived from SHA-256 of the public key (first 16 bytes)
|
||||
|
|
@ -83,7 +83,7 @@ The `Transport` manages a WebSocket server (gorilla/websocket) and outbound conn
|
|||
|
||||
`SelectOptimalPeer()` queries the tree for the point nearest to the origin (ideal: zero latency, zero hops, zero distance, maximum score). `SelectNearestPeers(n)` returns the n best.
|
||||
|
||||
**Persistence**: Writes are debounced with a 5-second coalesce window (`scheduleSave`). The actual write uses an atomic rename pattern (write to `.tmp`, then `os.Rename`) to prevent partial file corruption. `Close()` flushes any pending dirty state synchronously.
|
||||
**Persistence**: Writes are debounced with a 5-second coalesce window (`scheduleSave`). The actual write uses an atomic rename pattern (write to `.tmp`, then rename) to prevent partial file corruption. `Close()` flushes any pending dirty state synchronously.
|
||||
|
||||
**Auth modes**:
|
||||
- `PeerAuthOpen` — any connecting peer is accepted (default).
|
||||
|
|
@ -209,10 +209,10 @@ The Unified Encrypted Packet Structure defines a TLV-encoded binary frame authen
|
|||
[0x04][len][IntentID] Header: Semantic routing token
|
||||
[0x05][0x02][ThreatScore] Header: uint16, big-endian
|
||||
[0x06][0x20][HMAC-SHA256] Signature: 32 bytes, covers header TLVs + payload data
|
||||
[0xFF][...payload...] Data: no length prefix (relies on external framing)
|
||||
[0xFF][len][...payload...] Data: length-prefixed payload
|
||||
```
|
||||
|
||||
**HMAC coverage**: The signature is computed over the serialised header TLVs (tags 0x01–0x05) concatenated with the raw payload bytes. The HMAC TLV itself (tag 0x06) and the payload tag byte (0xFF) are excluded from the signed data.
|
||||
**HMAC coverage**: The signature is computed over the serialised header TLVs (tags 0x01–0x05) concatenated with the raw payload bytes. The HMAC TLV itself (tag 0x06) and the payload TLV header (tag `0xFF` plus the 2-byte length) are excluded from the signed data.
|
||||
|
||||
### PacketBuilder
|
||||
|
||||
|
|
@ -220,9 +220,7 @@ The Unified Encrypted Packet Structure defines a TLV-encoded binary frame authen
|
|||
|
||||
### ReadAndVerify
|
||||
|
||||
`ReadAndVerify(r *bufio.Reader, sharedSecret)` reads a stream, decodes the TLV fields in order, reconstructs the signed data buffer, and verifies the HMAC with `hmac.Equal`. Unknown TLV tags are accumulated into the signed data buffer (forward-compatible extension mechanism) but their semantics are ignored.
|
||||
|
||||
**Known limitation**: Tag 0xFF carries no length prefix. The reader calls `io.ReadAll` on the remaining stream, which requires external TCP framing (e.g. a 4-byte length prefix on the enclosing connection) to delimit the packet boundary. The packet is not self-delimiting.
|
||||
`ReadAndVerify(r *bufio.Reader, sharedSecret)` reads a stream, decodes the TLV fields in order, reconstructs the signed data buffer, and verifies the HMAC with `hmac.Equal`. Unknown TLV tags are accumulated into the signed data buffer (forward-compatible extension mechanism) but their semantics are ignored. The payload TLV is length-prefixed like every other field, so UEPS frames are self-delimiting.
|
||||
|
||||
## logging/ — Structured Logger
|
||||
|
||||
|
|
@ -255,8 +253,8 @@ The codebase is verified race-free under `go test -race`.
|
|||
```
|
||||
node/ ──► ueps/
|
||||
node/ ──► logging/
|
||||
node/ ──► github.com/Snider/Borg (STMF crypto, SMSG encryption, TIM)
|
||||
node/ ──► github.com/Snider/Poindexter (KD-tree peer selection)
|
||||
node/ ──► forge.lthn.ai/Snider/Borg (STMF crypto, SMSG encryption, TIM)
|
||||
node/ ──► forge.lthn.ai/Snider/Poindexter (KD-tree peer selection)
|
||||
node/ ──► github.com/gorilla/websocket
|
||||
node/ ──► github.com/google/uuid
|
||||
ueps/ ──► (stdlib only)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Prerequisites
|
||||
|
||||
- Go 1.25 or later (the module declares `go 1.25.5`)
|
||||
- Go 1.26 or later (the module declares `go 1.26.0`)
|
||||
- Network access to `forge.lthn.ai` for private dependencies (Borg, Poindexter, Enchantrix)
|
||||
- SSH key configured for `git@forge.lthn.ai:2223` (HTTPS auth is not supported on Forge)
|
||||
|
||||
|
|
@ -43,7 +43,7 @@ go vet ./...
|
|||
|
||||
### Table-Driven Subtests
|
||||
|
||||
All tests use table-driven subtests with `t.Run()`. A test that does not follow this pattern should be refactored before merging.
|
||||
Prefer table-driven subtests with `t.Run()` when multiple related cases share the same structure. Use clear case names and keep setup and verification consistent across the table.
|
||||
|
||||
```go
|
||||
func TestFoo(t *testing.T) {
|
||||
|
|
@ -177,12 +177,12 @@ All parameters and return types must carry explicit type annotations. Avoid `int
|
|||
### Error Handling
|
||||
|
||||
- Never discard errors silently.
|
||||
- Wrap errors with context using `fmt.Errorf("context: %w", err)`.
|
||||
- Wrap library errors with context using `core.E("operation", "context", err)`.
|
||||
- Return typed sentinel errors for conditions callers need to inspect programmatically.
|
||||
|
||||
### Licence Header
|
||||
|
||||
Every new file must carry the EUPL-1.2 licence identifier. The module's `LICENSE` file governs the package. Do not include the full licence text in each file; a short SPDX identifier comment at the top is sufficient for new files:
|
||||
Every new file must carry the EUPL-1.2 licence identifier. The project is licensed under EUPL-1.2; do not include the full licence text in each file. A short SPDX identifier comment at the top is sufficient for new files:
|
||||
|
||||
```go
|
||||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ Peers are persisted to `~/.config/lethean-desktop/peers.json` as a JSON array.
|
|||
|
||||
### Debounced Writes
|
||||
|
||||
To avoid excessive disk I/O, saves are debounced with a 5-second coalesce interval. Multiple mutations within that window produce a single disk write. The write uses an atomic rename pattern (write to `.tmp`, then `os.Rename`) to prevent corruption on crash.
|
||||
To avoid excessive disk I/O, saves are debounced with a 5-second coalesce interval. Multiple mutations within that window produce a single disk write. The write uses an atomic rename pattern (write to `.tmp`, then rename) to prevent corruption on crash.
|
||||
|
||||
```go
|
||||
// Flush pending changes on shutdown
|
||||
|
|
|
|||
|
|
@ -10,10 +10,10 @@ Implemented the complete test suite for the UEPS binary framing layer. Tests cov
|
|||
|
||||
- PacketBuilder round-trip: basic, binary payload, elevated threat score, large payload
|
||||
- HMAC verification: payload tampering detected, header tampering detected, wrong shared secret detected
|
||||
- Boundary conditions: nil payload, empty slice payload, `uint16` max ThreatScore (65,535), TLV value exceeding 255 bytes (`writeTLV` error path)
|
||||
- Boundary conditions: nil payload, empty slice payload, `uint16` max ThreatScore (65,535), TLV value exceeding 65,535 bytes (`writeTLV` error path)
|
||||
- Stream robustness: truncated packets detected at multiple cut points (EOF mid-tag, mid-length, mid-value), missing HMAC tag, unknown TLV tags skipped and included in signed data
|
||||
|
||||
The 11.5% gap from 100% coverage is the reader's `io.ReadAll` error path, which requires a contrived broken `io.Reader` to exercise.
|
||||
The remaining gap from 100% coverage at the time was the payload read-error path, which required a contrived broken reader to exercise.
|
||||
|
||||
### Phase 2 — Transport Tests
|
||||
|
||||
|
|
@ -86,11 +86,9 @@ Three integration tests (`TestIntegration_*`) exercise the full stack end-to-end
|
|||
|
||||
## Known Limitations
|
||||
|
||||
### UEPS 0xFF Payload Not Self-Delimiting
|
||||
### UEPS Payload Framing (Resolved)
|
||||
|
||||
The `TagPayload` (0xFF) field carries no length prefix. `ReadAndVerify` calls `io.ReadAll` on the remaining stream, which means the packet format relies on external TCP framing to delimit the packet boundary. The enclosing transport must provide a length-prefixed frame before calling `ReadAndVerify`. This is noted in comments in both `packet.go` and `reader.go` but no solution is implemented.
|
||||
|
||||
Consequence: UEPS packets cannot be chained in a raw stream without an outer framing protocol. The current WebSocket transport encapsulates each UEPS frame in a single WebSocket message, which provides the necessary boundary implicitly.
|
||||
The `TagPayload` (0xFF) field now uses the same 2-byte length prefix as the other TLVs. `ReadAndVerify` reads that explicit length, so UEPS packets are self-delimiting and can be chained in a stream without relying on an outer framing layer.
|
||||
|
||||
### No Resource Cleanup on Some Error Paths
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ description: P2P mesh networking layer for the Lethean network.
|
|||
|
||||
P2P networking layer for the Lethean network. Encrypted WebSocket mesh with UEPS wire protocol.
|
||||
|
||||
**Module:** `forge.lthn.ai/core/go-p2p`
|
||||
**Module:** `dappco.re/go/core/p2p`
|
||||
**Go:** 1.26
|
||||
**Licence:** EUPL-1.2
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ description: UEPS intent-based packet routing with threat circuit breaker.
|
|||
|
||||
# Intent Routing
|
||||
|
||||
The `Dispatcher` routes verified UEPS packets to registered intent handlers. Before routing, it enforces a threat circuit breaker that silently drops packets with elevated threat scores.
|
||||
The `Dispatcher` routes verified UEPS packets to registered intent handlers. Before routing, it enforces a threat circuit breaker that blocks packets with elevated threat scores and returns sentinel errors to the caller.
|
||||
|
||||
**File:** `node/dispatcher.go`
|
||||
|
||||
|
|
@ -74,8 +74,8 @@ Dropped packets are logged at WARN level with the threat score, threshold, inten
|
|||
|
||||
### Design Rationale
|
||||
|
||||
- **High-threat packets are dropped silently** (from the sender's perspective) rather than returning an error, consistent with the "don't even parse the payload" philosophy.
|
||||
- **Unknown intents are dropped**, not forwarded, to avoid back-pressure on the transport layer. They are logged at WARN level for debugging.
|
||||
- **High-threat packets are not dispatched**. The dispatcher logs them and returns `ErrThreatScoreExceeded` to the caller; the sender still receives no protocol-level response.
|
||||
- **Unknown intents are not forwarded**. The dispatcher logs them and returns `ErrUnknownIntent`, avoiding back-pressure on the transport layer.
|
||||
- **Handler errors propagate** to the caller, allowing upstream code to record failures.
|
||||
|
||||
## Intent Constants
|
||||
|
|
@ -100,12 +100,13 @@ const (
|
|||
|
||||
```go
|
||||
var (
|
||||
ErrThreatScoreExceeded = fmt.Errorf(
|
||||
"packet rejected: threat score exceeds safety threshold (%d)",
|
||||
ThreatScoreThreshold,
|
||||
ErrThreatScoreExceeded = core.E(
|
||||
"Dispatcher.Dispatch",
|
||||
core.Sprintf("packet rejected: threat score exceeds safety threshold (%d)", ThreatScoreThreshold),
|
||||
nil,
|
||||
)
|
||||
ErrUnknownIntent = errors.New("packet dropped: unknown intent")
|
||||
ErrNilPacket = errors.New("dispatch: nil packet")
|
||||
ErrUnknownIntent = core.E("Dispatcher.Dispatch", "packet dropped: unknown intent", nil)
|
||||
ErrNilPacket = core.E("Dispatcher.Dispatch", "nil packet", nil)
|
||||
)
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ description: TLV-encoded wire protocol with HMAC-SHA256 integrity verification (
|
|||
|
||||
The `ueps` package implements the Universal Encrypted Payload System -- a consent-gated TLV (Type-Length-Value) wire protocol with HMAC-SHA256 integrity verification. This is the low-level binary protocol that sits beneath the JSON-over-WebSocket mesh layer.
|
||||
|
||||
**Package:** `forge.lthn.ai/core/go-p2p/ueps`
|
||||
**Package:** `dappco.re/go/core/p2p/ueps`
|
||||
|
||||
## TLV Format
|
||||
|
||||
|
|
|
|||
5
go.mod
5
go.mod
|
|
@ -3,8 +3,7 @@ module dappco.re/go/core/p2p
|
|||
go 1.26.0
|
||||
|
||||
require (
|
||||
dappco.re/go/core/io v0.2.0
|
||||
dappco.re/go/core/log v0.1.0
|
||||
dappco.re/go/core v0.8.0-alpha.1
|
||||
forge.lthn.ai/Snider/Borg v0.3.1
|
||||
forge.lthn.ai/Snider/Poindexter v0.0.3
|
||||
github.com/adrg/xdg v0.5.3
|
||||
|
|
@ -15,11 +14,11 @@ require (
|
|||
|
||||
require (
|
||||
forge.lthn.ai/Snider/Enchantrix v0.0.4 // indirect
|
||||
forge.lthn.ai/core/go-log v0.0.4 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.4.0 // indirect
|
||||
github.com/cloudflare/circl v1.6.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/klauspost/compress v1.18.4 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
golang.org/x/crypto v0.49.0 // indirect
|
||||
golang.org/x/sys v0.42.0 // indirect
|
||||
|
|
|
|||
9
go.sum
9
go.sum
|
|
@ -1,21 +1,18 @@
|
|||
dappco.re/go/core/io v0.2.0 h1:zuudgIiTsQQ5ipVt97saWdGLROovbEB/zdVyy9/l+I4=
|
||||
dappco.re/go/core/io v0.2.0/go.mod h1:1QnQV6X9LNgFKfm8SkOtR9LLaj3bDcsOIeJOOyjbL5E=
|
||||
dappco.re/go/core/log v0.1.0 h1:pa71Vq2TD2aoEUQWFKwNcaJ3GBY8HbaNGqtE688Unyc=
|
||||
dappco.re/go/core/log v0.1.0/go.mod h1:Nkqb8gsXhZAO8VLpx7B8i1iAmohhzqA20b9Zr8VUcJs=
|
||||
dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk=
|
||||
dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
|
||||
forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8=
|
||||
forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg=
|
||||
forge.lthn.ai/Snider/Enchantrix v0.0.4 h1:biwpix/bdedfyc0iVeK15awhhJKH6TEMYOTXzHXx5TI=
|
||||
forge.lthn.ai/Snider/Enchantrix v0.0.4/go.mod h1:OGCwuVeZPq3OPe2h6TX/ZbgEjHU6B7owpIBeXQGbSe0=
|
||||
forge.lthn.ai/Snider/Poindexter v0.0.3 h1:cx5wRhuLRKBM8riIZyNVAT2a8rwRhn1dodFBktocsVE=
|
||||
forge.lthn.ai/Snider/Poindexter v0.0.3/go.mod h1:ddzGia98k3HKkR0gl58IDzqz+MmgW2cQJOCNLfuWPpo=
|
||||
forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0=
|
||||
forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw=
|
||||
github.com/ProtonMail/go-crypto v1.4.0 h1:Zq/pbM3F5DFgJiMouxEdSVY44MVoQNEKp5d5QxIQceQ=
|
||||
github.com/ProtonMail/go-crypto v1.4.0/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
|
||||
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
|
|
|
|||
|
|
@ -2,18 +2,18 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// Level represents the severity of a log message.
|
||||
//
|
||||
// level := LevelInfo
|
||||
type Level int
|
||||
|
||||
const (
|
||||
|
|
@ -44,6 +44,8 @@ func (l Level) String() string {
|
|||
}
|
||||
|
||||
// Logger provides structured logging with configurable output and level.
|
||||
//
|
||||
// logger := New(DefaultConfig())
|
||||
type Logger struct {
|
||||
mu sync.Mutex
|
||||
output io.Writer
|
||||
|
|
@ -52,6 +54,8 @@ type Logger struct {
|
|||
}
|
||||
|
||||
// Config holds configuration for creating a new Logger.
|
||||
//
|
||||
// cfg := Config{Output: io.Discard, Level: LevelDebug, Component: "sync"}
|
||||
type Config struct {
|
||||
Output io.Writer
|
||||
Level Level
|
||||
|
|
@ -59,18 +63,22 @@ type Config struct {
|
|||
}
|
||||
|
||||
// DefaultConfig returns the default logger configuration.
|
||||
//
|
||||
// cfg := DefaultConfig()
|
||||
func DefaultConfig() Config {
|
||||
return Config{
|
||||
Output: os.Stderr,
|
||||
Output: defaultOutput,
|
||||
Level: LevelInfo,
|
||||
Component: "",
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Logger with the given configuration.
|
||||
//
|
||||
// logger := New(DefaultConfig())
|
||||
func New(cfg Config) *Logger {
|
||||
if cfg.Output == nil {
|
||||
cfg.Output = os.Stderr
|
||||
cfg.Output = defaultOutput
|
||||
}
|
||||
return &Logger{
|
||||
output: cfg.Output,
|
||||
|
|
@ -103,8 +111,22 @@ func (l *Logger) GetLevel() Level {
|
|||
}
|
||||
|
||||
// Fields represents key-value pairs for structured logging.
|
||||
//
|
||||
// fields := Fields{"peer_id": "node-1", "attempt": 2}
|
||||
type Fields map[string]any
|
||||
|
||||
type stderrWriter struct{}
|
||||
|
||||
func (stderrWriter) Write(p []byte) (int, error) {
|
||||
written, err := syscall.Write(syscall.Stderr, p)
|
||||
if err != nil {
|
||||
return written, core.E("logging.stderrWriter.Write", "failed to write log line", err)
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
var defaultOutput io.Writer = stderrWriter{}
|
||||
|
||||
// log writes a log message at the specified level.
|
||||
func (l *Logger) log(level Level, msg string, fields Fields) {
|
||||
l.mu.Lock()
|
||||
|
|
@ -115,7 +137,7 @@ func (l *Logger) log(level Level, msg string, fields Fields) {
|
|||
}
|
||||
|
||||
// Build the log line
|
||||
var sb strings.Builder
|
||||
sb := core.NewBuilder()
|
||||
timestamp := time.Now().Format("2006/01/02 15:04:05")
|
||||
sb.WriteString(timestamp)
|
||||
sb.WriteString(" [")
|
||||
|
|
@ -138,12 +160,12 @@ func (l *Logger) log(level Level, msg string, fields Fields) {
|
|||
sb.WriteString(" ")
|
||||
sb.WriteString(k)
|
||||
sb.WriteString("=")
|
||||
sb.WriteString(fmt.Sprintf("%v", v))
|
||||
sb.WriteString(core.Sprint(v))
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString("\n")
|
||||
fmt.Fprint(l.output, sb.String())
|
||||
_, _ = l.output.Write([]byte(sb.String()))
|
||||
}
|
||||
|
||||
// Debug logs a debug message.
|
||||
|
|
@ -168,22 +190,22 @@ func (l *Logger) Error(msg string, fields ...Fields) {
|
|||
|
||||
// Debugf logs a formatted debug message.
|
||||
func (l *Logger) Debugf(format string, args ...any) {
|
||||
l.log(LevelDebug, fmt.Sprintf(format, args...), nil)
|
||||
l.log(LevelDebug, core.Sprintf(format, args...), nil)
|
||||
}
|
||||
|
||||
// Infof logs a formatted informational message.
|
||||
func (l *Logger) Infof(format string, args ...any) {
|
||||
l.log(LevelInfo, fmt.Sprintf(format, args...), nil)
|
||||
l.log(LevelInfo, core.Sprintf(format, args...), nil)
|
||||
}
|
||||
|
||||
// Warnf logs a formatted warning message.
|
||||
func (l *Logger) Warnf(format string, args ...any) {
|
||||
l.log(LevelWarn, fmt.Sprintf(format, args...), nil)
|
||||
l.log(LevelWarn, core.Sprintf(format, args...), nil)
|
||||
}
|
||||
|
||||
// Errorf logs a formatted error message.
|
||||
func (l *Logger) Errorf(format string, args ...any) {
|
||||
l.log(LevelError, fmt.Sprintf(format, args...), nil)
|
||||
l.log(LevelError, core.Sprintf(format, args...), nil)
|
||||
}
|
||||
|
||||
// mergeFields combines multiple Fields maps into one.
|
||||
|
|
@ -206,6 +228,8 @@ var (
|
|||
)
|
||||
|
||||
// SetGlobal sets the global logger instance.
|
||||
//
|
||||
// SetGlobal(New(DefaultConfig()))
|
||||
func SetGlobal(l *Logger) {
|
||||
globalMu.Lock()
|
||||
defer globalMu.Unlock()
|
||||
|
|
@ -213,6 +237,8 @@ func SetGlobal(l *Logger) {
|
|||
}
|
||||
|
||||
// GetGlobal returns the global logger instance.
|
||||
//
|
||||
// logger := GetGlobal()
|
||||
func GetGlobal() *Logger {
|
||||
globalMu.RLock()
|
||||
defer globalMu.RUnlock()
|
||||
|
|
@ -220,6 +246,8 @@ func GetGlobal() *Logger {
|
|||
}
|
||||
|
||||
// SetGlobalLevel sets the log level of the global logger.
|
||||
//
|
||||
// SetGlobalLevel(LevelDebug)
|
||||
func SetGlobalLevel(level Level) {
|
||||
globalMu.RLock()
|
||||
defer globalMu.RUnlock()
|
||||
|
|
@ -229,48 +257,66 @@ func SetGlobalLevel(level Level) {
|
|||
// Global convenience functions that use the global logger
|
||||
|
||||
// Debug logs a debug message using the global logger.
|
||||
//
|
||||
// Debug("connected", Fields{"peer_id": "node-1"})
|
||||
func Debug(msg string, fields ...Fields) {
|
||||
GetGlobal().Debug(msg, fields...)
|
||||
}
|
||||
|
||||
// Info logs an informational message using the global logger.
|
||||
//
|
||||
// Info("worker started", Fields{"component": "transport"})
|
||||
func Info(msg string, fields ...Fields) {
|
||||
GetGlobal().Info(msg, fields...)
|
||||
}
|
||||
|
||||
// Warn logs a warning message using the global logger.
|
||||
//
|
||||
// Warn("peer rate limited", Fields{"peer_id": "node-1"})
|
||||
func Warn(msg string, fields ...Fields) {
|
||||
GetGlobal().Warn(msg, fields...)
|
||||
}
|
||||
|
||||
// Error logs an error message using the global logger.
|
||||
//
|
||||
// Error("send failed", Fields{"peer_id": "node-1"})
|
||||
func Error(msg string, fields ...Fields) {
|
||||
GetGlobal().Error(msg, fields...)
|
||||
}
|
||||
|
||||
// Debugf logs a formatted debug message using the global logger.
|
||||
//
|
||||
// Debugf("connected peer %s", "node-1")
|
||||
func Debugf(format string, args ...any) {
|
||||
GetGlobal().Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Infof logs a formatted informational message using the global logger.
|
||||
//
|
||||
// Infof("worker %s ready", "node-1")
|
||||
func Infof(format string, args ...any) {
|
||||
GetGlobal().Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf logs a formatted warning message using the global logger.
|
||||
//
|
||||
// Warnf("peer %s is slow", "node-1")
|
||||
func Warnf(format string, args ...any) {
|
||||
GetGlobal().Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf logs a formatted error message using the global logger.
|
||||
//
|
||||
// Errorf("peer %s failed", "node-1")
|
||||
func Errorf(format string, args ...any) {
|
||||
GetGlobal().Errorf(format, args...)
|
||||
}
|
||||
|
||||
// ParseLevel parses a string into a log level.
|
||||
//
|
||||
// level, err := ParseLevel("warn")
|
||||
func ParseLevel(s string) (Level, error) {
|
||||
switch strings.ToUpper(s) {
|
||||
switch core.Upper(s) {
|
||||
case "DEBUG":
|
||||
return LevelDebug, nil
|
||||
case "INFO":
|
||||
|
|
@ -280,6 +326,6 @@ func ParseLevel(s string) (Level, error) {
|
|||
case "ERROR":
|
||||
return LevelError, nil
|
||||
default:
|
||||
return LevelInfo, coreerr.E("logging.ParseLevel", "unknown log level: "+s, nil)
|
||||
return LevelInfo, core.E("logging.ParseLevel", "unknown log level: "+s, nil)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,11 +2,12 @@ package logging
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func TestLoggerLevels(t *testing.T) {
|
||||
func TestLogger_Levels_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Config{
|
||||
Output: &buf,
|
||||
|
|
@ -21,29 +22,29 @@ func TestLoggerLevels(t *testing.T) {
|
|||
|
||||
// Info should appear
|
||||
logger.Info("info message")
|
||||
if !strings.Contains(buf.String(), "[INFO]") {
|
||||
if !core.Contains(buf.String(), "[INFO]") {
|
||||
t.Error("Info message should appear")
|
||||
}
|
||||
if !strings.Contains(buf.String(), "info message") {
|
||||
if !core.Contains(buf.String(), "info message") {
|
||||
t.Error("Info message content should appear")
|
||||
}
|
||||
buf.Reset()
|
||||
|
||||
// Warn should appear
|
||||
logger.Warn("warn message")
|
||||
if !strings.Contains(buf.String(), "[WARN]") {
|
||||
if !core.Contains(buf.String(), "[WARN]") {
|
||||
t.Error("Warn message should appear")
|
||||
}
|
||||
buf.Reset()
|
||||
|
||||
// Error should appear
|
||||
logger.Error("error message")
|
||||
if !strings.Contains(buf.String(), "[ERROR]") {
|
||||
if !core.Contains(buf.String(), "[ERROR]") {
|
||||
t.Error("Error message should appear")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerDebugLevel(t *testing.T) {
|
||||
func TestLogger_DebugLevel_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Config{
|
||||
Output: &buf,
|
||||
|
|
@ -51,12 +52,12 @@ func TestLoggerDebugLevel(t *testing.T) {
|
|||
})
|
||||
|
||||
logger.Debug("debug message")
|
||||
if !strings.Contains(buf.String(), "[DEBUG]") {
|
||||
if !core.Contains(buf.String(), "[DEBUG]") {
|
||||
t.Error("Debug message should appear at Debug level")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerWithFields(t *testing.T) {
|
||||
func TestLogger_WithFields_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Config{
|
||||
Output: &buf,
|
||||
|
|
@ -66,15 +67,15 @@ func TestLoggerWithFields(t *testing.T) {
|
|||
logger.Info("test message", Fields{"key": "value", "num": 42})
|
||||
output := buf.String()
|
||||
|
||||
if !strings.Contains(output, "key=value") {
|
||||
if !core.Contains(output, "key=value") {
|
||||
t.Error("Field key=value should appear")
|
||||
}
|
||||
if !strings.Contains(output, "num=42") {
|
||||
if !core.Contains(output, "num=42") {
|
||||
t.Error("Field num=42 should appear")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerWithComponent(t *testing.T) {
|
||||
func TestLogger_WithComponent_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Config{
|
||||
Output: &buf,
|
||||
|
|
@ -85,12 +86,12 @@ func TestLoggerWithComponent(t *testing.T) {
|
|||
logger.Info("test message")
|
||||
output := buf.String()
|
||||
|
||||
if !strings.Contains(output, "[TestComponent]") {
|
||||
if !core.Contains(output, "[TestComponent]") {
|
||||
t.Error("Component name should appear in log")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerDerivedComponent(t *testing.T) {
|
||||
func TestLogger_DerivedComponent_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
parent := New(Config{
|
||||
Output: &buf,
|
||||
|
|
@ -101,12 +102,12 @@ func TestLoggerDerivedComponent(t *testing.T) {
|
|||
child.Info("child message")
|
||||
output := buf.String()
|
||||
|
||||
if !strings.Contains(output, "[ChildComponent]") {
|
||||
if !core.Contains(output, "[ChildComponent]") {
|
||||
t.Error("Derived component name should appear")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoggerFormatted(t *testing.T) {
|
||||
func TestLogger_Formatted_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Config{
|
||||
Output: &buf,
|
||||
|
|
@ -116,12 +117,12 @@ func TestLoggerFormatted(t *testing.T) {
|
|||
logger.Infof("formatted %s %d", "string", 123)
|
||||
output := buf.String()
|
||||
|
||||
if !strings.Contains(output, "formatted string 123") {
|
||||
if !core.Contains(output, "formatted string 123") {
|
||||
t.Errorf("Formatted message should appear, got: %s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetLevel(t *testing.T) {
|
||||
func TestLogger_SetLevel_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Config{
|
||||
Output: &buf,
|
||||
|
|
@ -137,7 +138,7 @@ func TestSetLevel(t *testing.T) {
|
|||
// Change to Info level
|
||||
logger.SetLevel(LevelInfo)
|
||||
logger.Info("should appear now")
|
||||
if !strings.Contains(buf.String(), "should appear now") {
|
||||
if !core.Contains(buf.String(), "should appear now") {
|
||||
t.Error("Info should appear after level change")
|
||||
}
|
||||
|
||||
|
|
@ -147,7 +148,7 @@ func TestSetLevel(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseLevel(t *testing.T) {
|
||||
func TestLogger_ParseLevel_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected Level
|
||||
|
|
@ -180,7 +181,7 @@ func TestParseLevel(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGlobalLogger(t *testing.T) {
|
||||
func TestLogger_GlobalLogger_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
logger := New(Config{
|
||||
Output: &buf,
|
||||
|
|
@ -190,7 +191,7 @@ func TestGlobalLogger(t *testing.T) {
|
|||
SetGlobal(logger)
|
||||
|
||||
Info("global test")
|
||||
if !strings.Contains(buf.String(), "global test") {
|
||||
if !core.Contains(buf.String(), "global test") {
|
||||
t.Error("Global logger should write message")
|
||||
}
|
||||
|
||||
|
|
@ -205,7 +206,7 @@ func TestGlobalLogger(t *testing.T) {
|
|||
SetGlobal(New(DefaultConfig()))
|
||||
}
|
||||
|
||||
func TestLevelString(t *testing.T) {
|
||||
func TestLogger_LevelString_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
level Level
|
||||
expected string
|
||||
|
|
@ -224,7 +225,7 @@ func TestLevelString(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMergeFields(t *testing.T) {
|
||||
func TestLogger_MergeFields_Good(t *testing.T) {
|
||||
// Empty fields
|
||||
result := mergeFields(nil)
|
||||
if result != nil {
|
||||
|
|
|
|||
44
node/ax_test_helpers_test.go
Normal file
44
node/ax_test_helpers_test.go
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testJoinPath(parts ...string) string {
|
||||
return core.JoinPath(parts...)
|
||||
}
|
||||
|
||||
func testNodeManagerPaths(dir string) (string, string) {
|
||||
return testJoinPath(dir, "private.key"), testJoinPath(dir, "node.json")
|
||||
}
|
||||
|
||||
func testWriteFile(t *testing.T, path string, content []byte, mode fs.FileMode) {
|
||||
t.Helper()
|
||||
require.NoError(t, fsResultErr(localFS.WriteMode(path, string(content), mode)))
|
||||
}
|
||||
|
||||
func testReadFile(t *testing.T, path string) []byte {
|
||||
t.Helper()
|
||||
content, err := fsRead(path)
|
||||
require.NoError(t, err)
|
||||
return []byte(content)
|
||||
}
|
||||
|
||||
func testJSONMarshal(t *testing.T, v any) []byte {
|
||||
t.Helper()
|
||||
result := core.JSONMarshal(v)
|
||||
require.True(t, result.OK, "marshal should succeed: %v", result.Value)
|
||||
return result.Value.([]byte)
|
||||
}
|
||||
|
||||
func testJSONUnmarshal(t *testing.T, data []byte, target any) {
|
||||
t.Helper()
|
||||
result := core.JSONUnmarshal(data, target)
|
||||
require.True(t, result.OK, "unmarshal should succeed: %v", result.Value)
|
||||
}
|
||||
|
|
@ -2,11 +2,10 @@ package node
|
|||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"forge.lthn.ai/Snider/Borg/pkg/smsg"
|
||||
)
|
||||
|
||||
|
|
@ -16,10 +15,7 @@ func BenchmarkIdentityGenerate(b *testing.B) {
|
|||
b.ReportAllocs()
|
||||
for b.Loop() {
|
||||
dir := b.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
b.Fatalf("create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -34,10 +30,10 @@ func BenchmarkDeriveSharedSecret(b *testing.B) {
|
|||
dir1 := b.TempDir()
|
||||
dir2 := b.TempDir()
|
||||
|
||||
nm1, _ := NewNodeManagerWithPaths(filepath.Join(dir1, "k"), filepath.Join(dir1, "n"))
|
||||
nm1, _ := NewNodeManagerWithPaths(testJoinPath(dir1, "k"), testJoinPath(dir1, "n"))
|
||||
nm1.GenerateIdentity("node1", RoleDual)
|
||||
|
||||
nm2, _ := NewNodeManagerWithPaths(filepath.Join(dir2, "k"), filepath.Join(dir2, "n"))
|
||||
nm2, _ := NewNodeManagerWithPaths(testJoinPath(dir2, "k"), testJoinPath(dir2, "n"))
|
||||
nm2.GenerateIdentity("node2", RoleDual)
|
||||
|
||||
peerPubKey := nm2.GetIdentity().PublicKey
|
||||
|
|
@ -88,8 +84,8 @@ func BenchmarkMessageSerialise(b *testing.B) {
|
|||
}
|
||||
|
||||
var restored Message
|
||||
if err := json.Unmarshal(data, &restored); err != nil {
|
||||
b.Fatalf("unmarshal message: %v", err)
|
||||
if result := core.JSONUnmarshal(data, &restored); !result.OK {
|
||||
b.Fatalf("unmarshal message: %v", result.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -136,9 +132,8 @@ func BenchmarkMarshalJSON(b *testing.B) {
|
|||
b.Run("Stdlib", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for b.Loop() {
|
||||
_, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
if result := core.JSONMarshal(data); !result.OK {
|
||||
b.Fatal(result.Value)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
@ -150,10 +145,10 @@ func BenchmarkSMSGEncryptDecrypt(b *testing.B) {
|
|||
dir1 := b.TempDir()
|
||||
dir2 := b.TempDir()
|
||||
|
||||
nm1, _ := NewNodeManagerWithPaths(filepath.Join(dir1, "k"), filepath.Join(dir1, "n"))
|
||||
nm1, _ := NewNodeManagerWithPaths(testJoinPath(dir1, "k"), testJoinPath(dir1, "n"))
|
||||
nm1.GenerateIdentity("node1", RoleDual)
|
||||
|
||||
nm2, _ := NewNodeManagerWithPaths(filepath.Join(dir2, "k"), filepath.Join(dir2, "n"))
|
||||
nm2, _ := NewNodeManagerWithPaths(testJoinPath(dir2, "k"), testJoinPath(dir2, "n"))
|
||||
nm2.GenerateIdentity("node2", RoleDual)
|
||||
|
||||
sharedSecret, _ := nm1.DeriveSharedSecret(nm2.GetIdentity().PublicKey)
|
||||
|
|
@ -202,7 +197,7 @@ func BenchmarkChallengeSignVerify(b *testing.B) {
|
|||
// BenchmarkPeerScoring measures KD-tree rebuild and peer selection.
|
||||
func BenchmarkPeerScoring(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
reg, err := NewPeerRegistryWithPath(filepath.Join(dir, "peers.json"))
|
||||
reg, err := NewPeerRegistryWithPath(testJoinPath(dir, "peers.json"))
|
||||
if err != nil {
|
||||
b.Fatalf("create registry: %v", err)
|
||||
}
|
||||
|
|
@ -211,7 +206,7 @@ func BenchmarkPeerScoring(b *testing.B) {
|
|||
// Add 50 peers with varied metrics
|
||||
for i := range 50 {
|
||||
peer := &Peer{
|
||||
ID: filepath.Join("peer", string(rune('A'+i%26)), string(rune('0'+i/26))),
|
||||
ID: testJoinPath("peer", string(rune('A'+i%26)), string(rune('0'+i/26))),
|
||||
Name: "peer",
|
||||
PingMS: float64(i*10 + 5),
|
||||
Hops: i%5 + 1,
|
||||
|
|
|
|||
|
|
@ -2,8 +2,9 @@ package node
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// bufferPool provides reusable byte buffers for JSON encoding.
|
||||
|
|
@ -29,27 +30,24 @@ func putBuffer(buf *bytes.Buffer) {
|
|||
}
|
||||
}
|
||||
|
||||
// MarshalJSON encodes a value to JSON using a pooled buffer.
|
||||
// MarshalJSON encodes a value to JSON using Core's JSON primitive and then
|
||||
// restores the historical no-EscapeHTML behaviour expected by the node package.
|
||||
// Returns a copy of the encoded bytes (safe to use after the function returns).
|
||||
//
|
||||
// data, err := MarshalJSON(v)
|
||||
func MarshalJSON(v any) ([]byte, error) {
|
||||
buf := getBuffer()
|
||||
defer putBuffer(buf)
|
||||
|
||||
enc := json.NewEncoder(buf)
|
||||
// Don't escape HTML characters (matches json.Marshal behavior for these use cases)
|
||||
enc.SetEscapeHTML(false)
|
||||
if err := enc.Encode(v); err != nil {
|
||||
return nil, err
|
||||
encoded := core.JSONMarshal(v)
|
||||
if !encoded.OK {
|
||||
return nil, encoded.Value.(error)
|
||||
}
|
||||
data := encoded.Value.([]byte)
|
||||
|
||||
// json.Encoder.Encode adds a newline; remove it to match json.Marshal
|
||||
data := buf.Bytes()
|
||||
if len(data) > 0 && data[len(data)-1] == '\n' {
|
||||
data = data[:len(data)-1]
|
||||
}
|
||||
data = bytes.ReplaceAll(data, []byte(`\u003c`), []byte("<"))
|
||||
data = bytes.ReplaceAll(data, []byte(`\u003e`), []byte(">"))
|
||||
data = bytes.ReplaceAll(data, []byte(`\u0026`), []byte("&"))
|
||||
|
||||
// Return a copy since the buffer will be reused
|
||||
result := make([]byte, len(data))
|
||||
copy(result, data)
|
||||
return result, nil
|
||||
// Return a copy since callers may retain the slice after subsequent calls.
|
||||
out := make([]byte, len(data))
|
||||
copy(out, data)
|
||||
return out, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,17 +2,17 @@ package node
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// --- bufpool.go tests ---
|
||||
|
||||
func TestGetBuffer_ReturnsResetBuffer(t *testing.T) {
|
||||
func TestBufpool_GetBuffer_ReturnsResetBuffer_Good(t *testing.T) {
|
||||
t.Run("buffer is initially empty", func(t *testing.T) {
|
||||
buf := getBuffer()
|
||||
defer putBuffer(buf)
|
||||
|
|
@ -33,7 +33,7 @@ func TestGetBuffer_ReturnsResetBuffer(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestPutBuffer_DiscardsOversizedBuffers(t *testing.T) {
|
||||
func TestBufpool_PutBuffer_DiscardsOversizedBuffers_Good(t *testing.T) {
|
||||
t.Run("buffer at 64KB limit is pooled", func(t *testing.T) {
|
||||
buf := getBuffer()
|
||||
buf.Grow(65536)
|
||||
|
|
@ -59,7 +59,7 @@ func TestPutBuffer_DiscardsOversizedBuffers(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestBufPool_BufferIndependence(t *testing.T) {
|
||||
func TestBufpool_BufPool_BufferIndependence_Good(t *testing.T) {
|
||||
buf1 := getBuffer()
|
||||
buf2 := getBuffer()
|
||||
|
||||
|
|
@ -77,7 +77,7 @@ func TestBufPool_BufferIndependence(t *testing.T) {
|
|||
putBuffer(buf2)
|
||||
}
|
||||
|
||||
func TestMarshalJSON_BasicTypes(t *testing.T) {
|
||||
func TestBufpool_MarshalJSON_BasicTypes_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input any
|
||||
|
|
@ -121,8 +121,7 @@ func TestMarshalJSON_BasicTypes(t *testing.T) {
|
|||
got, err := MarshalJSON(tt.input)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := json.Marshal(tt.input)
|
||||
require.NoError(t, err)
|
||||
expected := testJSONMarshal(t, tt.input)
|
||||
|
||||
assert.JSONEq(t, string(expected), string(got),
|
||||
"MarshalJSON output should match json.Marshal")
|
||||
|
|
@ -130,7 +129,7 @@ func TestMarshalJSON_BasicTypes(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMarshalJSON_NoTrailingNewline(t *testing.T) {
|
||||
func TestBufpool_MarshalJSON_NoTrailingNewline_Good(t *testing.T) {
|
||||
data, err := MarshalJSON(map[string]string{"key": "value"})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -138,7 +137,7 @@ func TestMarshalJSON_NoTrailingNewline(t *testing.T) {
|
|||
"MarshalJSON should strip the trailing newline added by json.Encoder")
|
||||
}
|
||||
|
||||
func TestMarshalJSON_HTMLEscaping(t *testing.T) {
|
||||
func TestBufpool_MarshalJSON_HTMLEscaping_Good(t *testing.T) {
|
||||
input := map[string]string{"html": "<script>alert('xss')</script>"}
|
||||
data, err := MarshalJSON(input)
|
||||
require.NoError(t, err)
|
||||
|
|
@ -147,7 +146,7 @@ func TestMarshalJSON_HTMLEscaping(t *testing.T) {
|
|||
"HTML characters should not be escaped when EscapeHTML is false")
|
||||
}
|
||||
|
||||
func TestMarshalJSON_ReturnsCopy(t *testing.T) {
|
||||
func TestBufpool_MarshalJSON_ReturnsCopy_Good(t *testing.T) {
|
||||
data1, err := MarshalJSON("first")
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -162,7 +161,7 @@ func TestMarshalJSON_ReturnsCopy(t *testing.T) {
|
|||
"returned slice should be a copy and not be mutated by subsequent calls")
|
||||
}
|
||||
|
||||
func TestMarshalJSON_ReturnsIndependentCopy(t *testing.T) {
|
||||
func TestBufpool_MarshalJSON_ReturnsIndependentCopy_Good(t *testing.T) {
|
||||
data1, err := MarshalJSON(map[string]string{"first": "call"})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -175,13 +174,13 @@ func TestMarshalJSON_ReturnsIndependentCopy(t *testing.T) {
|
|||
"second result should contain its own data")
|
||||
}
|
||||
|
||||
func TestMarshalJSON_InvalidValue(t *testing.T) {
|
||||
func TestBufpool_MarshalJSON_InvalidValue_Bad(t *testing.T) {
|
||||
ch := make(chan int)
|
||||
_, err := MarshalJSON(ch)
|
||||
assert.Error(t, err, "marshalling an unserialisable type should return an error")
|
||||
}
|
||||
|
||||
func TestBufferPool_ConcurrentAccess(t *testing.T) {
|
||||
func TestBufpool_BufferPool_ConcurrentAccess_Ugly(t *testing.T) {
|
||||
const goroutines = 100
|
||||
const iterations = 50
|
||||
|
||||
|
|
@ -206,7 +205,7 @@ func TestBufferPool_ConcurrentAccess(t *testing.T) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestMarshalJSON_ConcurrentSafety(t *testing.T) {
|
||||
func TestBufpool_MarshalJSON_ConcurrentSafety_Ugly(t *testing.T) {
|
||||
const goroutines = 50
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
|
@ -223,8 +222,8 @@ func TestMarshalJSON_ConcurrentSafety(t *testing.T) {
|
|||
|
||||
if err == nil {
|
||||
var parsed PingPayload
|
||||
err = json.Unmarshal(data, &parsed)
|
||||
if err != nil {
|
||||
if result := core.JSONUnmarshal(data, &parsed); !result.OK {
|
||||
err = result.Value.(error)
|
||||
errs[idx] = err
|
||||
return
|
||||
}
|
||||
|
|
@ -242,7 +241,7 @@ func TestMarshalJSON_ConcurrentSafety(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBufferPool_ReuseAfterReset(t *testing.T) {
|
||||
func TestBufpool_BufferPool_ReuseAfterReset_Ugly(t *testing.T) {
|
||||
buf := getBuffer()
|
||||
buf.Write(make([]byte, 4096))
|
||||
putBuffer(buf)
|
||||
|
|
|
|||
158
node/bundle.go
158
node/bundle.go
|
|
@ -5,29 +5,32 @@ import (
|
|||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"io/fs"
|
||||
|
||||
coreio "dappco.re/go/core/io"
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
|
||||
"forge.lthn.ai/Snider/Borg/pkg/datanode"
|
||||
"forge.lthn.ai/Snider/Borg/pkg/tim"
|
||||
)
|
||||
|
||||
// BundleType defines the type of deployment bundle.
|
||||
//
|
||||
// bundleType := BundleProfile
|
||||
type BundleType string
|
||||
|
||||
const (
|
||||
BundleProfile BundleType = "profile" // Just config/profile JSON
|
||||
BundleMiner BundleType = "miner" // Miner binary + config
|
||||
BundleFull BundleType = "full" // Everything (miner + profiles + config)
|
||||
// BundleProfile contains a profile JSON payload.
|
||||
BundleProfile BundleType = "profile"
|
||||
// BundleMiner contains a miner binary and optional profile data.
|
||||
BundleMiner BundleType = "miner"
|
||||
// BundleFull contains the full deployment payload.
|
||||
BundleFull BundleType = "full"
|
||||
)
|
||||
|
||||
// Bundle represents a deployment bundle for P2P transfer.
|
||||
//
|
||||
// bundle := &Bundle{Type: BundleProfile, Name: "xmrig", Data: []byte("{}")}
|
||||
type Bundle struct {
|
||||
Type BundleType `json:"type"`
|
||||
Name string `json:"name"`
|
||||
|
|
@ -36,6 +39,8 @@ type Bundle struct {
|
|||
}
|
||||
|
||||
// BundleManifest describes the contents of a bundle.
|
||||
//
|
||||
// manifest := BundleManifest{Name: "xmrig", Type: BundleMiner}
|
||||
type BundleManifest struct {
|
||||
Type BundleType `json:"type"`
|
||||
Name string `json:"name"`
|
||||
|
|
@ -46,18 +51,20 @@ type BundleManifest struct {
|
|||
}
|
||||
|
||||
// CreateProfileBundle creates an encrypted bundle containing a mining profile.
|
||||
//
|
||||
// bundle, err := CreateProfileBundle(profileJSON, "xmrig-default", "password")
|
||||
func CreateProfileBundle(profileJSON []byte, name string, password string) (*Bundle, error) {
|
||||
// Create a TIM with just the profile config
|
||||
t, err := tim.New()
|
||||
if err != nil {
|
||||
return nil, coreerr.E("CreateProfileBundle", "failed to create TIM", err)
|
||||
return nil, core.E("CreateProfileBundle", "failed to create TIM", err)
|
||||
}
|
||||
t.Config = profileJSON
|
||||
|
||||
// Encrypt to STIM format
|
||||
stimData, err := t.ToSigil(password)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("CreateProfileBundle", "failed to encrypt bundle", err)
|
||||
return nil, core.E("CreateProfileBundle", "failed to encrypt bundle", err)
|
||||
}
|
||||
|
||||
// Calculate checksum
|
||||
|
|
@ -72,6 +79,8 @@ func CreateProfileBundle(profileJSON []byte, name string, password string) (*Bun
|
|||
}
|
||||
|
||||
// CreateProfileBundleUnencrypted creates a plain JSON bundle (for testing or trusted networks).
|
||||
//
|
||||
// bundle, err := CreateProfileBundleUnencrypted(profileJSON, "xmrig-default")
|
||||
func CreateProfileBundleUnencrypted(profileJSON []byte, name string) (*Bundle, error) {
|
||||
checksum := calculateChecksum(profileJSON)
|
||||
|
||||
|
|
@ -84,32 +93,34 @@ func CreateProfileBundleUnencrypted(profileJSON []byte, name string) (*Bundle, e
|
|||
}
|
||||
|
||||
// CreateMinerBundle creates an encrypted bundle containing a miner binary and optional profile.
|
||||
//
|
||||
// bundle, err := CreateMinerBundle("/srv/miners/xmrig", profileJSON, "xmrig", "password")
|
||||
func CreateMinerBundle(minerPath string, profileJSON []byte, name string, password string) (*Bundle, error) {
|
||||
// Read miner binary
|
||||
minerContent, err := coreio.Local.Read(minerPath)
|
||||
minerContent, err := fsRead(minerPath)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("CreateMinerBundle", "failed to read miner binary", err)
|
||||
return nil, core.E("CreateMinerBundle", "failed to read miner binary", err)
|
||||
}
|
||||
minerData := []byte(minerContent)
|
||||
|
||||
// Create a tarball with the miner binary
|
||||
tarData, err := createTarball(map[string][]byte{
|
||||
filepath.Base(minerPath): minerData,
|
||||
core.PathBase(minerPath): minerData,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, coreerr.E("CreateMinerBundle", "failed to create tarball", err)
|
||||
return nil, core.E("CreateMinerBundle", "failed to create tarball", err)
|
||||
}
|
||||
|
||||
// Create DataNode from tarball
|
||||
dn, err := datanode.FromTar(tarData)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("CreateMinerBundle", "failed to create datanode", err)
|
||||
return nil, core.E("CreateMinerBundle", "failed to create datanode", err)
|
||||
}
|
||||
|
||||
// Create TIM from DataNode
|
||||
t, err := tim.FromDataNode(dn)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("CreateMinerBundle", "failed to create TIM", err)
|
||||
return nil, core.E("CreateMinerBundle", "failed to create TIM", err)
|
||||
}
|
||||
|
||||
// Set profile as config if provided
|
||||
|
|
@ -120,7 +131,7 @@ func CreateMinerBundle(minerPath string, profileJSON []byte, name string, passwo
|
|||
// Encrypt to STIM format
|
||||
stimData, err := t.ToSigil(password)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("CreateMinerBundle", "failed to encrypt bundle", err)
|
||||
return nil, core.E("CreateMinerBundle", "failed to encrypt bundle", err)
|
||||
}
|
||||
|
||||
checksum := calculateChecksum(stimData)
|
||||
|
|
@ -134,10 +145,12 @@ func CreateMinerBundle(minerPath string, profileJSON []byte, name string, passwo
|
|||
}
|
||||
|
||||
// ExtractProfileBundle decrypts and extracts a profile bundle.
|
||||
//
|
||||
// profileJSON, err := ExtractProfileBundle(bundle, "password")
|
||||
func ExtractProfileBundle(bundle *Bundle, password string) ([]byte, error) {
|
||||
// Verify checksum first
|
||||
if calculateChecksum(bundle.Data) != bundle.Checksum {
|
||||
return nil, coreerr.E("ExtractProfileBundle", "checksum mismatch - bundle may be corrupted", nil)
|
||||
return nil, core.E("ExtractProfileBundle", "checksum mismatch - bundle may be corrupted", nil)
|
||||
}
|
||||
|
||||
// If it's unencrypted JSON, just return it
|
||||
|
|
@ -148,41 +161,45 @@ func ExtractProfileBundle(bundle *Bundle, password string) ([]byte, error) {
|
|||
// Decrypt STIM format
|
||||
t, err := tim.FromSigil(bundle.Data, password)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("ExtractProfileBundle", "failed to decrypt bundle", err)
|
||||
return nil, core.E("ExtractProfileBundle", "failed to decrypt bundle", err)
|
||||
}
|
||||
|
||||
return t.Config, nil
|
||||
}
|
||||
|
||||
// ExtractMinerBundle decrypts and extracts a miner bundle, returning the miner path and profile.
|
||||
//
|
||||
// minerPath, profileJSON, err := ExtractMinerBundle(bundle, "password", "/srv/miners")
|
||||
func ExtractMinerBundle(bundle *Bundle, password string, destDir string) (string, []byte, error) {
|
||||
// Verify checksum
|
||||
if calculateChecksum(bundle.Data) != bundle.Checksum {
|
||||
return "", nil, coreerr.E("ExtractMinerBundle", "checksum mismatch - bundle may be corrupted", nil)
|
||||
return "", nil, core.E("ExtractMinerBundle", "checksum mismatch - bundle may be corrupted", nil)
|
||||
}
|
||||
|
||||
// Decrypt STIM format
|
||||
t, err := tim.FromSigil(bundle.Data, password)
|
||||
if err != nil {
|
||||
return "", nil, coreerr.E("ExtractMinerBundle", "failed to decrypt bundle", err)
|
||||
return "", nil, core.E("ExtractMinerBundle", "failed to decrypt bundle", err)
|
||||
}
|
||||
|
||||
// Convert rootfs to tarball and extract
|
||||
tarData, err := t.RootFS.ToTar()
|
||||
if err != nil {
|
||||
return "", nil, coreerr.E("ExtractMinerBundle", "failed to convert rootfs to tar", err)
|
||||
return "", nil, core.E("ExtractMinerBundle", "failed to convert rootfs to tar", err)
|
||||
}
|
||||
|
||||
// Extract tarball to destination
|
||||
minerPath, err := extractTarball(tarData, destDir)
|
||||
if err != nil {
|
||||
return "", nil, coreerr.E("ExtractMinerBundle", "failed to extract tarball", err)
|
||||
return "", nil, core.E("ExtractMinerBundle", "failed to extract tarball", err)
|
||||
}
|
||||
|
||||
return minerPath, t.Config, nil
|
||||
}
|
||||
|
||||
// VerifyBundle checks if a bundle's checksum is valid.
|
||||
//
|
||||
// ok := VerifyBundle(bundle)
|
||||
func VerifyBundle(bundle *Bundle) bool {
|
||||
return calculateChecksum(bundle.Data) == bundle.Checksum
|
||||
}
|
||||
|
|
@ -212,7 +229,7 @@ func createTarball(files map[string][]byte) ([]byte, error) {
|
|||
|
||||
for name, content := range files {
|
||||
// Create parent directories if needed
|
||||
dir := filepath.Dir(name)
|
||||
dir := core.PathDir(name)
|
||||
if dir != "." && !dirs[dir] {
|
||||
hdr := &tar.Header{
|
||||
Name: dir + "/",
|
||||
|
|
@ -227,7 +244,7 @@ func createTarball(files map[string][]byte) ([]byte, error) {
|
|||
|
||||
// Determine file mode (executable for binaries in miners/)
|
||||
mode := int64(0644)
|
||||
if filepath.Dir(name) == "miners" || !isJSON(content) {
|
||||
if core.PathDir(name) == "miners" || !isJSON(content) {
|
||||
mode = 0755
|
||||
}
|
||||
|
||||
|
|
@ -254,13 +271,22 @@ func createTarball(files map[string][]byte) ([]byte, error) {
|
|||
// extractTarball extracts a tar archive to a directory, returns first executable found.
|
||||
func extractTarball(tarData []byte, destDir string) (string, error) {
|
||||
// Ensure destDir is an absolute, clean path for security checks
|
||||
absDestDir, err := filepath.Abs(destDir)
|
||||
if err != nil {
|
||||
return "", coreerr.E("extractTarball", "failed to resolve destination directory", err)
|
||||
absDestDir := destDir
|
||||
pathSeparator := core.Env("DS")
|
||||
if pathSeparator == "" {
|
||||
pathSeparator = "/"
|
||||
}
|
||||
if !core.PathIsAbs(absDestDir) {
|
||||
cwd := core.Env("DIR_CWD")
|
||||
if cwd == "" {
|
||||
return "", core.E("extractTarball", "failed to resolve destination directory", nil)
|
||||
}
|
||||
absDestDir = core.CleanPath(core.Concat(cwd, pathSeparator, absDestDir), pathSeparator)
|
||||
} else {
|
||||
absDestDir = core.CleanPath(absDestDir, pathSeparator)
|
||||
}
|
||||
absDestDir = filepath.Clean(absDestDir)
|
||||
|
||||
if err := coreio.Local.EnsureDir(absDestDir); err != nil {
|
||||
if err := fsEnsureDir(absDestDir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
|
@ -277,57 +303,54 @@ func extractTarball(tarData []byte, destDir string) (string, error) {
|
|||
}
|
||||
|
||||
// Security: Sanitize the tar entry name to prevent path traversal (Zip Slip)
|
||||
cleanName := filepath.Clean(hdr.Name)
|
||||
cleanName := core.CleanPath(hdr.Name, "/")
|
||||
|
||||
// Reject absolute paths
|
||||
if filepath.IsAbs(cleanName) {
|
||||
return "", coreerr.E("extractTarball", "invalid tar entry: absolute path not allowed: "+hdr.Name, nil)
|
||||
if core.PathIsAbs(cleanName) {
|
||||
return "", core.E("extractTarball", "invalid tar entry: absolute path not allowed: "+hdr.Name, nil)
|
||||
}
|
||||
|
||||
// Reject paths that escape the destination directory
|
||||
if strings.HasPrefix(cleanName, ".."+string(os.PathSeparator)) || cleanName == ".." {
|
||||
return "", coreerr.E("extractTarball", "invalid tar entry: path traversal attempt: "+hdr.Name, nil)
|
||||
if core.HasPrefix(cleanName, "../") || cleanName == ".." {
|
||||
return "", core.E("extractTarball", "invalid tar entry: path traversal attempt: "+hdr.Name, nil)
|
||||
}
|
||||
|
||||
// Build the full path and verify it's within destDir
|
||||
fullPath := filepath.Join(absDestDir, cleanName)
|
||||
fullPath = filepath.Clean(fullPath)
|
||||
fullPath := core.CleanPath(core.Concat(absDestDir, pathSeparator, cleanName), pathSeparator)
|
||||
|
||||
// Final security check: ensure the path is still within destDir
|
||||
if !strings.HasPrefix(fullPath, absDestDir+string(os.PathSeparator)) && fullPath != absDestDir {
|
||||
return "", coreerr.E("extractTarball", "invalid tar entry: path escape attempt: "+hdr.Name, nil)
|
||||
allowedPrefix := core.Concat(absDestDir, pathSeparator)
|
||||
if absDestDir == pathSeparator {
|
||||
allowedPrefix = absDestDir
|
||||
}
|
||||
if !core.HasPrefix(fullPath, allowedPrefix) && fullPath != absDestDir {
|
||||
return "", core.E("extractTarball", "invalid tar entry: path escape attempt: "+hdr.Name, nil)
|
||||
}
|
||||
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if err := coreio.Local.EnsureDir(fullPath); err != nil {
|
||||
if err := fsEnsureDir(fullPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
case tar.TypeReg:
|
||||
// Ensure parent directory exists
|
||||
if err := coreio.Local.EnsureDir(filepath.Dir(fullPath)); err != nil {
|
||||
if err := fsEnsureDir(core.PathDir(fullPath)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// os.OpenFile is used deliberately here instead of coreio.Local.Create/Write
|
||||
// because coreio hardcodes file permissions (0644) and we need to preserve
|
||||
// the tar header's mode bits — executable binaries require 0755.
|
||||
f, err := os.OpenFile(fullPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return "", coreerr.E("extractTarball", "failed to create file "+hdr.Name, err)
|
||||
}
|
||||
|
||||
// Limit file size to prevent decompression bombs (100MB max per file)
|
||||
const maxFileSize int64 = 100 * 1024 * 1024
|
||||
limitedReader := io.LimitReader(tr, maxFileSize+1)
|
||||
written, err := io.Copy(f, limitedReader)
|
||||
f.Close()
|
||||
content, err := io.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return "", coreerr.E("extractTarball", "failed to write file "+hdr.Name, err)
|
||||
return "", core.E("extractTarball", "failed to write file "+hdr.Name, err)
|
||||
}
|
||||
if written > maxFileSize {
|
||||
coreio.Local.Delete(fullPath)
|
||||
return "", coreerr.E("extractTarball", "file "+hdr.Name+" exceeds maximum size", nil)
|
||||
if int64(len(content)) > maxFileSize {
|
||||
fsDelete(fullPath)
|
||||
return "", core.E("extractTarball", "file "+hdr.Name+" exceeds maximum size", nil)
|
||||
}
|
||||
if err := fsResultErr(localFS.WriteMode(fullPath, string(content), fs.FileMode(hdr.Mode))); err != nil {
|
||||
return "", core.E("extractTarball", "failed to create file "+hdr.Name, err)
|
||||
}
|
||||
|
||||
// Track first executable
|
||||
|
|
@ -345,17 +368,30 @@ func extractTarball(tarData []byte, destDir string) (string, error) {
|
|||
}
|
||||
|
||||
// StreamBundle writes a bundle to a writer (for large transfers).
|
||||
//
|
||||
// err := StreamBundle(bundle, writer)
|
||||
func StreamBundle(bundle *Bundle, w io.Writer) error {
|
||||
encoder := json.NewEncoder(w)
|
||||
return encoder.Encode(bundle)
|
||||
result := core.JSONMarshal(bundle)
|
||||
if !result.OK {
|
||||
return result.Value.(error)
|
||||
}
|
||||
_, err := w.Write(result.Value.([]byte))
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadBundle reads a bundle from a reader.
|
||||
//
|
||||
// bundle, err := ReadBundle(reader)
|
||||
func ReadBundle(r io.Reader) (*Bundle, error) {
|
||||
var bundle Bundle
|
||||
decoder := json.NewDecoder(r)
|
||||
if err := decoder.Decode(&bundle); err != nil {
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var bundle Bundle
|
||||
result := core.JSONUnmarshal(buf.Bytes(), &bundle)
|
||||
if !result.OK {
|
||||
return nil, result.Value.(error)
|
||||
}
|
||||
return &bundle, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,12 +3,10 @@ package node
|
|||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCreateProfileBundleUnencrypted(t *testing.T) {
|
||||
func TestBundle_CreateProfileBundleUnencrypted_Good(t *testing.T) {
|
||||
profileJSON := []byte(`{"name":"test-profile","minerType":"xmrig","config":{}}`)
|
||||
|
||||
bundle, err := CreateProfileBundleUnencrypted(profileJSON, "test-profile")
|
||||
|
|
@ -33,7 +31,7 @@ func TestCreateProfileBundleUnencrypted(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestVerifyBundle(t *testing.T) {
|
||||
func TestBundle_VerifyBundle_Good(t *testing.T) {
|
||||
t.Run("ValidChecksum", func(t *testing.T) {
|
||||
bundle, _ := CreateProfileBundleUnencrypted([]byte(`{"test":"data"}`), "test")
|
||||
|
||||
|
|
@ -61,7 +59,7 @@ func TestVerifyBundle(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestCreateProfileBundle(t *testing.T) {
|
||||
func TestBundle_CreateProfileBundle_Good(t *testing.T) {
|
||||
profileJSON := []byte(`{"name":"encrypted-profile","minerType":"xmrig"}`)
|
||||
password := "test-password-123"
|
||||
|
||||
|
|
@ -90,7 +88,7 @@ func TestCreateProfileBundle(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestExtractProfileBundle(t *testing.T) {
|
||||
func TestBundle_ExtractProfileBundle_Good(t *testing.T) {
|
||||
t.Run("UnencryptedBundle", func(t *testing.T) {
|
||||
originalJSON := []byte(`{"name":"plain","config":{}}`)
|
||||
bundle, _ := CreateProfileBundleUnencrypted(originalJSON, "plain")
|
||||
|
|
@ -142,7 +140,7 @@ func TestExtractProfileBundle(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestTarballFunctions(t *testing.T) {
|
||||
func TestBundle_TarballFunctions_Good(t *testing.T) {
|
||||
t.Run("CreateAndExtractTarball", func(t *testing.T) {
|
||||
files := map[string][]byte{
|
||||
"file1.txt": []byte("content of file 1"),
|
||||
|
|
@ -160,8 +158,7 @@ func TestTarballFunctions(t *testing.T) {
|
|||
}
|
||||
|
||||
// Extract to temp directory
|
||||
tmpDir, _ := os.MkdirTemp("", "tarball-test")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
firstExec, err := extractTarball(tarData, tmpDir)
|
||||
if err != nil {
|
||||
|
|
@ -170,12 +167,7 @@ func TestTarballFunctions(t *testing.T) {
|
|||
|
||||
// Check files exist
|
||||
for name, content := range files {
|
||||
path := filepath.Join(tmpDir, name)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read extracted file %s: %v", name, err)
|
||||
continue
|
||||
}
|
||||
data := testReadFile(t, testJoinPath(tmpDir, name))
|
||||
|
||||
if !bytes.Equal(data, content) {
|
||||
t.Errorf("content mismatch for %s", name)
|
||||
|
|
@ -189,7 +181,7 @@ func TestTarballFunctions(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestStreamAndReadBundle(t *testing.T) {
|
||||
func TestBundle_StreamAndReadBundle_Good(t *testing.T) {
|
||||
original, _ := CreateProfileBundleUnencrypted([]byte(`{"streaming":"test"}`), "stream-test")
|
||||
|
||||
// Stream to buffer
|
||||
|
|
@ -218,7 +210,7 @@ func TestStreamAndReadBundle(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCalculateChecksum(t *testing.T) {
|
||||
func TestBundle_CalculateChecksum_Good(t *testing.T) {
|
||||
t.Run("Deterministic", func(t *testing.T) {
|
||||
data := []byte("test data for checksum")
|
||||
|
||||
|
|
@ -256,7 +248,7 @@ func TestCalculateChecksum(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestIsJSON(t *testing.T) {
|
||||
func TestBundle_IsJSON_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
data []byte
|
||||
expected bool
|
||||
|
|
@ -279,7 +271,7 @@ func TestIsJSON(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBundleTypes(t *testing.T) {
|
||||
func TestBundle_Types_Good(t *testing.T) {
|
||||
types := []BundleType{
|
||||
BundleProfile,
|
||||
BundleMiner,
|
||||
|
|
@ -295,16 +287,11 @@ func TestBundleTypes(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCreateMinerBundle(t *testing.T) {
|
||||
func TestBundle_CreateMinerBundle_Good(t *testing.T) {
|
||||
// Create a temp "miner binary"
|
||||
tmpDir, _ := os.MkdirTemp("", "miner-bundle-test")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
minerPath := filepath.Join(tmpDir, "test-miner")
|
||||
err := os.WriteFile(minerPath, []byte("fake miner binary content"), 0755)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create test miner: %v", err)
|
||||
}
|
||||
tmpDir := t.TempDir()
|
||||
minerPath := testJoinPath(tmpDir, "test-miner")
|
||||
testWriteFile(t, minerPath, []byte("fake miner binary content"), 0o755)
|
||||
|
||||
profileJSON := []byte(`{"profile":"data"}`)
|
||||
password := "miner-password"
|
||||
|
|
@ -323,8 +310,7 @@ func TestCreateMinerBundle(t *testing.T) {
|
|||
}
|
||||
|
||||
// Extract and verify
|
||||
extractDir, _ := os.MkdirTemp("", "miner-extract-test")
|
||||
defer os.RemoveAll(extractDir)
|
||||
extractDir := t.TempDir()
|
||||
|
||||
extractedPath, extractedProfile, err := ExtractMinerBundle(bundle, password, extractDir)
|
||||
if err != nil {
|
||||
|
|
@ -341,10 +327,7 @@ func TestCreateMinerBundle(t *testing.T) {
|
|||
|
||||
// If we got an extracted path, verify its content
|
||||
if extractedPath != "" {
|
||||
minerData, err := os.ReadFile(extractedPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read extracted miner: %v", err)
|
||||
}
|
||||
minerData := testReadFile(t, extractedPath)
|
||||
|
||||
if string(minerData) != "fake miner binary content" {
|
||||
t.Error("miner content mismatch")
|
||||
|
|
@ -354,7 +337,7 @@ func TestCreateMinerBundle(t *testing.T) {
|
|||
|
||||
// --- Additional coverage tests for bundle.go ---
|
||||
|
||||
func TestExtractTarball_PathTraversal(t *testing.T) {
|
||||
func TestBundle_ExtractTarball_PathTraversal_Bad(t *testing.T) {
|
||||
t.Run("AbsolutePath", func(t *testing.T) {
|
||||
// Create a tarball with an absolute path entry
|
||||
tarData, err := createTarballWithCustomName("/etc/passwd", []byte("malicious"))
|
||||
|
|
@ -446,8 +429,8 @@ func TestExtractTarball_PathTraversal(t *testing.T) {
|
|||
}
|
||||
|
||||
// Verify symlink was not created
|
||||
linkPath := filepath.Join(tmpDir, "link")
|
||||
if _, statErr := os.Lstat(linkPath); !os.IsNotExist(statErr) {
|
||||
linkPath := testJoinPath(tmpDir, "link")
|
||||
if fsExists(linkPath) {
|
||||
t.Error("symlink should not be created")
|
||||
}
|
||||
})
|
||||
|
|
@ -481,10 +464,7 @@ func TestExtractTarball_PathTraversal(t *testing.T) {
|
|||
}
|
||||
|
||||
// Verify directory and file exist
|
||||
data, err := os.ReadFile(filepath.Join(tmpDir, "mydir", "file.txt"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read extracted file: %v", err)
|
||||
}
|
||||
data := testReadFile(t, testJoinPath(tmpDir, "mydir", "file.txt"))
|
||||
if !bytes.Equal(data, content) {
|
||||
t.Error("content mismatch")
|
||||
}
|
||||
|
|
@ -531,7 +511,7 @@ func createTarballWithSymlink(name, target string) ([]byte, error) {
|
|||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func TestExtractMinerBundle_ChecksumMismatch(t *testing.T) {
|
||||
func TestBundle_ExtractMinerBundle_ChecksumMismatch_Bad(t *testing.T) {
|
||||
bundle := &Bundle{
|
||||
Type: BundleMiner,
|
||||
Name: "bad-bundle",
|
||||
|
|
@ -545,17 +525,17 @@ func TestExtractMinerBundle_ChecksumMismatch(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCreateMinerBundle_NonExistentFile(t *testing.T) {
|
||||
func TestBundle_CreateMinerBundle_NonExistentFile_Bad(t *testing.T) {
|
||||
_, err := CreateMinerBundle("/non/existent/miner", nil, "test", "password")
|
||||
if err == nil {
|
||||
t.Error("expected error for non-existent miner file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateMinerBundle_NilProfile(t *testing.T) {
|
||||
func TestBundle_CreateMinerBundle_NilProfile_Ugly(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
minerPath := filepath.Join(tmpDir, "miner")
|
||||
os.WriteFile(minerPath, []byte("binary"), 0755)
|
||||
minerPath := testJoinPath(tmpDir, "miner")
|
||||
testWriteFile(t, minerPath, []byte("binary"), 0o755)
|
||||
|
||||
bundle, err := CreateMinerBundle(minerPath, nil, "nil-profile", "pass")
|
||||
if err != nil {
|
||||
|
|
@ -566,7 +546,7 @@ func TestCreateMinerBundle_NilProfile(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestReadBundle_InvalidJSON(t *testing.T) {
|
||||
func TestBundle_ReadBundle_InvalidJSON_Bad(t *testing.T) {
|
||||
reader := bytes.NewReader([]byte("not json"))
|
||||
_, err := ReadBundle(reader)
|
||||
if err == nil {
|
||||
|
|
@ -574,7 +554,7 @@ func TestReadBundle_InvalidJSON(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStreamBundle_EmptyBundle(t *testing.T) {
|
||||
func TestBundle_StreamBundle_EmptyBundle_Ugly(t *testing.T) {
|
||||
bundle := &Bundle{
|
||||
Type: BundleProfile,
|
||||
Name: "empty",
|
||||
|
|
@ -598,7 +578,7 @@ func TestStreamBundle_EmptyBundle(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCreateTarball_MultipleDirs(t *testing.T) {
|
||||
func TestBundle_CreateTarball_MultipleDirs_Good(t *testing.T) {
|
||||
files := map[string][]byte{
|
||||
"dir1/file1.txt": []byte("content1"),
|
||||
"dir2/file2.txt": []byte("content2"),
|
||||
|
|
@ -616,11 +596,7 @@ func TestCreateTarball_MultipleDirs(t *testing.T) {
|
|||
}
|
||||
|
||||
for name, content := range files {
|
||||
data, err := os.ReadFile(filepath.Join(tmpDir, name))
|
||||
if err != nil {
|
||||
t.Errorf("failed to read %s: %v", name, err)
|
||||
continue
|
||||
}
|
||||
data := testReadFile(t, testJoinPath(tmpDir, name))
|
||||
if !bytes.Equal(data, content) {
|
||||
t.Errorf("content mismatch for %s", name)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,16 +2,17 @@ package node
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
|
||||
"dappco.re/go/core/p2p/logging"
|
||||
)
|
||||
|
||||
// Controller manages remote peer operations from a controller node.
|
||||
//
|
||||
// controller := NewController(nodeManager, peerRegistry, transport)
|
||||
type Controller struct {
|
||||
node *NodeManager
|
||||
peers *PeerRegistry
|
||||
|
|
@ -23,6 +24,8 @@ type Controller struct {
|
|||
}
|
||||
|
||||
// NewController creates a new Controller instance.
|
||||
//
|
||||
// controller := NewController(nodeManager, peerRegistry, transport)
|
||||
func NewController(node *NodeManager, peers *PeerRegistry, transport *Transport) *Controller {
|
||||
c := &Controller{
|
||||
node: node,
|
||||
|
|
@ -67,11 +70,11 @@ func (c *Controller) sendRequest(peerID string, msg *Message, timeout time.Durat
|
|||
if c.transport.GetConnection(peerID) == nil {
|
||||
peer := c.peers.GetPeer(peerID)
|
||||
if peer == nil {
|
||||
return nil, coreerr.E("Controller.sendRequest", "peer not found: "+peerID, nil)
|
||||
return nil, core.E("Controller.sendRequest", "peer not found: "+peerID, nil)
|
||||
}
|
||||
conn, err := c.transport.Connect(peer)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("Controller.sendRequest", "failed to connect to peer", err)
|
||||
return nil, core.E("Controller.sendRequest", "failed to connect to peer", err)
|
||||
}
|
||||
// Use the real peer ID after handshake (it may have changed)
|
||||
actualPeerID = conn.Peer.ID
|
||||
|
|
@ -96,7 +99,7 @@ func (c *Controller) sendRequest(peerID string, msg *Message, timeout time.Durat
|
|||
|
||||
// Send the message
|
||||
if err := c.transport.Send(actualPeerID, msg); err != nil {
|
||||
return nil, coreerr.E("Controller.sendRequest", "failed to send message", err)
|
||||
return nil, core.E("Controller.sendRequest", "failed to send message", err)
|
||||
}
|
||||
|
||||
// Wait for response
|
||||
|
|
@ -107,7 +110,7 @@ func (c *Controller) sendRequest(peerID string, msg *Message, timeout time.Durat
|
|||
case resp := <-respCh:
|
||||
return resp, nil
|
||||
case <-ctx.Done():
|
||||
return nil, coreerr.E("Controller.sendRequest", "request timeout", nil)
|
||||
return nil, core.E("Controller.sendRequest", "request timeout", nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -120,7 +123,7 @@ func (c *Controller) GetRemoteStats(peerID string) (*StatsPayload, error) {
|
|||
|
||||
msg, err := NewMessage(MsgGetStats, identity.ID, peerID, nil)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("Controller.GetRemoteStats", "failed to create message", err)
|
||||
return nil, core.E("Controller.GetRemoteStats", "failed to create message", err)
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(peerID, msg, 10*time.Second)
|
||||
|
|
@ -137,14 +140,14 @@ func (c *Controller) GetRemoteStats(peerID string) (*StatsPayload, error) {
|
|||
}
|
||||
|
||||
// StartRemoteMiner requests a remote peer to start a miner with a given profile.
|
||||
func (c *Controller) StartRemoteMiner(peerID, minerType, profileID string, configOverride json.RawMessage) error {
|
||||
func (c *Controller) StartRemoteMiner(peerID, minerType, profileID string, configOverride RawMessage) error {
|
||||
identity := c.node.GetIdentity()
|
||||
if identity == nil {
|
||||
return ErrIdentityNotInitialized
|
||||
}
|
||||
|
||||
if minerType == "" {
|
||||
return coreerr.E("Controller.StartRemoteMiner", "miner type is required", nil)
|
||||
return core.E("Controller.StartRemoteMiner", "miner type is required", nil)
|
||||
}
|
||||
|
||||
payload := StartMinerPayload{
|
||||
|
|
@ -155,7 +158,7 @@ func (c *Controller) StartRemoteMiner(peerID, minerType, profileID string, confi
|
|||
|
||||
msg, err := NewMessage(MsgStartMiner, identity.ID, peerID, payload)
|
||||
if err != nil {
|
||||
return coreerr.E("Controller.StartRemoteMiner", "failed to create message", err)
|
||||
return core.E("Controller.StartRemoteMiner", "failed to create message", err)
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(peerID, msg, 30*time.Second)
|
||||
|
|
@ -169,7 +172,7 @@ func (c *Controller) StartRemoteMiner(peerID, minerType, profileID string, confi
|
|||
}
|
||||
|
||||
if !ack.Success {
|
||||
return coreerr.E("Controller.StartRemoteMiner", "miner start failed: "+ack.Error, nil)
|
||||
return core.E("Controller.StartRemoteMiner", "miner start failed: "+ack.Error, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -188,7 +191,7 @@ func (c *Controller) StopRemoteMiner(peerID, minerName string) error {
|
|||
|
||||
msg, err := NewMessage(MsgStopMiner, identity.ID, peerID, payload)
|
||||
if err != nil {
|
||||
return coreerr.E("Controller.StopRemoteMiner", "failed to create message", err)
|
||||
return core.E("Controller.StopRemoteMiner", "failed to create message", err)
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(peerID, msg, 30*time.Second)
|
||||
|
|
@ -202,7 +205,7 @@ func (c *Controller) StopRemoteMiner(peerID, minerName string) error {
|
|||
}
|
||||
|
||||
if !ack.Success {
|
||||
return coreerr.E("Controller.StopRemoteMiner", "miner stop failed: "+ack.Error, nil)
|
||||
return core.E("Controller.StopRemoteMiner", "miner stop failed: "+ack.Error, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -222,7 +225,7 @@ func (c *Controller) GetRemoteLogs(peerID, minerName string, lines int) ([]strin
|
|||
|
||||
msg, err := NewMessage(MsgGetLogs, identity.ID, peerID, payload)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("Controller.GetRemoteLogs", "failed to create message", err)
|
||||
return nil, core.E("Controller.GetRemoteLogs", "failed to create message", err)
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(peerID, msg, 10*time.Second)
|
||||
|
|
@ -281,7 +284,7 @@ func (c *Controller) PingPeer(peerID string) (float64, error) {
|
|||
|
||||
msg, err := NewMessage(MsgPing, identity.ID, peerID, payload)
|
||||
if err != nil {
|
||||
return 0, coreerr.E("Controller.PingPeer", "failed to create message", err)
|
||||
return 0, core.E("Controller.PingPeer", "failed to create message", err)
|
||||
}
|
||||
|
||||
resp, err := c.sendRequest(peerID, msg, 5*time.Second)
|
||||
|
|
@ -309,7 +312,7 @@ func (c *Controller) PingPeer(peerID string) (float64, error) {
|
|||
func (c *Controller) ConnectToPeer(peerID string) error {
|
||||
peer := c.peers.GetPeer(peerID)
|
||||
if peer == nil {
|
||||
return coreerr.E("Controller.ConnectToPeer", "peer not found: "+peerID, nil)
|
||||
return core.E("Controller.ConnectToPeer", "peer not found: "+peerID, nil)
|
||||
}
|
||||
|
||||
_, err := c.transport.Connect(peer)
|
||||
|
|
@ -320,7 +323,7 @@ func (c *Controller) ConnectToPeer(peerID string) error {
|
|||
func (c *Controller) DisconnectFromPeer(peerID string) error {
|
||||
conn := c.transport.GetConnection(peerID)
|
||||
if conn == nil {
|
||||
return coreerr.E("Controller.DisconnectFromPeer", "peer not connected: "+peerID, nil)
|
||||
return core.E("Controller.DisconnectFromPeer", "peer not connected: "+peerID, nil)
|
||||
}
|
||||
|
||||
return conn.Close()
|
||||
|
|
|
|||
|
|
@ -1,17 +1,15 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -75,7 +73,7 @@ func makeWorkerServer(t *testing.T) (*NodeManager, string, *Transport) {
|
|||
|
||||
// --- Controller Tests ---
|
||||
|
||||
func TestController_RequestResponseCorrelation(t *testing.T) {
|
||||
func TestController_RequestResponseCorrelation_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPair(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -86,7 +84,7 @@ func TestController_RequestResponseCorrelation(t *testing.T) {
|
|||
assert.Greater(t, rtt, 0.0, "RTT should be positive")
|
||||
}
|
||||
|
||||
func TestController_RequestTimeout(t *testing.T) {
|
||||
func TestController_RequestTimeout_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
// Register a handler on the server that deliberately ignores all messages,
|
||||
|
|
@ -117,7 +115,7 @@ func TestController_RequestTimeout(t *testing.T) {
|
|||
assert.Less(t, elapsed, 1*time.Second, "should return quickly after the deadline")
|
||||
}
|
||||
|
||||
func TestController_AutoConnect(t *testing.T) {
|
||||
func TestController_AutoConnect_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
// Register worker on the server side.
|
||||
|
|
@ -149,7 +147,7 @@ func TestController_AutoConnect(t *testing.T) {
|
|||
assert.Equal(t, 1, tp.Client.ConnectedPeers(), "should have 1 connection after auto-connect")
|
||||
}
|
||||
|
||||
func TestController_GetAllStats(t *testing.T) {
|
||||
func TestController_GetAllStats_Good(t *testing.T) {
|
||||
// Controller node with connections to two independent worker servers.
|
||||
controllerNM := testNode(t, "controller", RoleController)
|
||||
controllerReg := testRegistry(t)
|
||||
|
|
@ -194,7 +192,7 @@ func TestController_GetAllStats(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestController_PingPeerRTT(t *testing.T) {
|
||||
func TestController_PingPeerRTT_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPair(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -217,7 +215,7 @@ func TestController_PingPeerRTT(t *testing.T) {
|
|||
assert.Greater(t, peerAfter.PingMS, 0.0, "PingMS should be positive")
|
||||
}
|
||||
|
||||
func TestController_ConcurrentRequests(t *testing.T) {
|
||||
func TestController_ConcurrentRequests_Ugly(t *testing.T) {
|
||||
// Multiple goroutines send pings to different peers simultaneously.
|
||||
// Verify correct correlation — no cross-talk between responses.
|
||||
controllerNM := testNode(t, "controller", RoleController)
|
||||
|
|
@ -271,7 +269,7 @@ func TestController_ConcurrentRequests(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestController_DeadPeerCleanup(t *testing.T) {
|
||||
func TestController_DeadPeerCleanup_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
// Server deliberately ignores all messages.
|
||||
|
|
@ -307,7 +305,7 @@ func TestController_DeadPeerCleanup(t *testing.T) {
|
|||
|
||||
// --- Additional edge-case tests ---
|
||||
|
||||
func TestController_MultipleSequentialPings(t *testing.T) {
|
||||
func TestController_MultipleSequentialPings_Good(t *testing.T) {
|
||||
// Ensures sequential requests to the same peer are correctly correlated.
|
||||
controller, _, tp := setupControllerPair(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
|
@ -319,7 +317,7 @@ func TestController_MultipleSequentialPings(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestController_ConcurrentRequestsSamePeer(t *testing.T) {
|
||||
func TestController_ConcurrentRequestsSamePeer_Ugly(t *testing.T) {
|
||||
// Multiple goroutines sending requests to the SAME peer simultaneously.
|
||||
// Tests concurrent pending-map insertions/deletions under contention.
|
||||
controller, _, tp := setupControllerPair(t)
|
||||
|
|
@ -343,7 +341,7 @@ func TestController_ConcurrentRequestsSamePeer(t *testing.T) {
|
|||
"all concurrent requests to the same peer should succeed")
|
||||
}
|
||||
|
||||
func TestController_GetRemoteStats(t *testing.T) {
|
||||
func TestController_GetRemoteStats_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPair(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -357,7 +355,7 @@ func TestController_GetRemoteStats(t *testing.T) {
|
|||
assert.GreaterOrEqual(t, stats.Uptime, int64(0), "uptime should be non-negative")
|
||||
}
|
||||
|
||||
func TestController_ConnectToPeerUnknown(t *testing.T) {
|
||||
func TestController_ConnectToPeerUnknown_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
|
||||
|
||||
|
|
@ -366,7 +364,7 @@ func TestController_ConnectToPeerUnknown(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "not found")
|
||||
}
|
||||
|
||||
func TestController_DisconnectFromPeer(t *testing.T) {
|
||||
func TestController_DisconnectFromPeer_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPair(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -376,7 +374,7 @@ func TestController_DisconnectFromPeer(t *testing.T) {
|
|||
require.NoError(t, err, "DisconnectFromPeer should succeed")
|
||||
}
|
||||
|
||||
func TestController_DisconnectFromPeerNotConnected(t *testing.T) {
|
||||
func TestController_DisconnectFromPeerNotConnected_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
|
||||
|
||||
|
|
@ -385,7 +383,7 @@ func TestController_DisconnectFromPeerNotConnected(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "not connected")
|
||||
}
|
||||
|
||||
func TestController_SendRequestPeerNotFound(t *testing.T) {
|
||||
func TestController_SendRequestPeerNotFound_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
|
||||
|
||||
|
|
@ -475,7 +473,7 @@ func (m *mockMinerManagerFull) StopMiner(name string) error {
|
|||
defer m.mu.Unlock()
|
||||
|
||||
if _, exists := m.miners[name]; !exists {
|
||||
return fmt.Errorf("miner %s not found", name)
|
||||
return core.E("mockMinerManagerFull.StopMiner", "miner "+name+" not found", nil)
|
||||
}
|
||||
delete(m.miners, name)
|
||||
return nil
|
||||
|
|
@ -498,7 +496,7 @@ func (m *mockMinerManagerFull) GetMiner(name string) (MinerInstance, error) {
|
|||
|
||||
miner, exists := m.miners[name]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("miner %s not found", name)
|
||||
return nil, core.E("mockMinerManagerFull.GetMiner", "miner "+name+" not found", nil)
|
||||
}
|
||||
return miner, nil
|
||||
}
|
||||
|
|
@ -521,25 +519,25 @@ func (m *mockMinerFull) GetConsoleHistory(lines int) []string {
|
|||
return m.consoleHistory[:lines]
|
||||
}
|
||||
|
||||
func TestController_StartRemoteMiner(t *testing.T) {
|
||||
func TestController_StartRemoteMiner_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPairWithMiner(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
configOverride := json.RawMessage(`{"pool":"pool.example.com:3333"}`)
|
||||
configOverride := RawMessage(`{"pool":"pool.example.com:3333"}`)
|
||||
err := controller.StartRemoteMiner(serverID, "xmrig", "profile-1", configOverride)
|
||||
|
||||
require.NoError(t, err, "StartRemoteMiner should succeed")
|
||||
}
|
||||
|
||||
func TestController_StartRemoteMiner_WithConfig(t *testing.T) {
|
||||
func TestController_StartRemoteMiner_WithConfig_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPairWithMiner(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
configOverride := json.RawMessage(`{"pool":"custom-pool:3333","threads":4}`)
|
||||
configOverride := RawMessage(`{"pool":"custom-pool:3333","threads":4}`)
|
||||
err := controller.StartRemoteMiner(serverID, "xmrig", "", configOverride)
|
||||
require.NoError(t, err, "StartRemoteMiner with config override should succeed")
|
||||
}
|
||||
|
||||
func TestController_StartRemoteMiner_EmptyType(t *testing.T) {
|
||||
func TestController_StartRemoteMiner_EmptyType_Bad(t *testing.T) {
|
||||
controller, _, tp := setupControllerPairWithMiner(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -548,14 +546,12 @@ func TestController_StartRemoteMiner_EmptyType(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "miner type is required")
|
||||
}
|
||||
|
||||
func TestController_StartRemoteMiner_NoIdentity(t *testing.T) {
|
||||
func TestController_StartRemoteMiner_NoIdentity_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
// Create a node without identity
|
||||
nmNoID, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(t.TempDir(), "priv.key"),
|
||||
filepath.Join(t.TempDir(), "node.json"),
|
||||
)
|
||||
keyPath, configPath := testNodeManagerPaths(t.TempDir())
|
||||
nmNoID, err := NewNodeManagerWithPaths(keyPath, configPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
|
||||
|
|
@ -565,7 +561,7 @@ func TestController_StartRemoteMiner_NoIdentity(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "identity not initialized")
|
||||
}
|
||||
|
||||
func TestController_StopRemoteMiner(t *testing.T) {
|
||||
func TestController_StopRemoteMiner_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPairWithMiner(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -573,7 +569,7 @@ func TestController_StopRemoteMiner(t *testing.T) {
|
|||
require.NoError(t, err, "StopRemoteMiner should succeed for existing miner")
|
||||
}
|
||||
|
||||
func TestController_StopRemoteMiner_NotFound(t *testing.T) {
|
||||
func TestController_StopRemoteMiner_NotFound_Bad(t *testing.T) {
|
||||
controller, _, tp := setupControllerPairWithMiner(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -581,12 +577,10 @@ func TestController_StopRemoteMiner_NotFound(t *testing.T) {
|
|||
require.Error(t, err, "StopRemoteMiner should fail for non-existent miner")
|
||||
}
|
||||
|
||||
func TestController_StopRemoteMiner_NoIdentity(t *testing.T) {
|
||||
func TestController_StopRemoteMiner_NoIdentity_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
nmNoID, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(t.TempDir(), "priv.key"),
|
||||
filepath.Join(t.TempDir(), "node.json"),
|
||||
)
|
||||
keyPath, configPath := testNodeManagerPaths(t.TempDir())
|
||||
nmNoID, err := NewNodeManagerWithPaths(keyPath, configPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
|
||||
|
|
@ -596,7 +590,7 @@ func TestController_StopRemoteMiner_NoIdentity(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "identity not initialized")
|
||||
}
|
||||
|
||||
func TestController_GetRemoteLogs(t *testing.T) {
|
||||
func TestController_GetRemoteLogs_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPairWithMiner(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -607,7 +601,7 @@ func TestController_GetRemoteLogs(t *testing.T) {
|
|||
assert.Contains(t, lines[0], "started")
|
||||
}
|
||||
|
||||
func TestController_GetRemoteLogs_LimitedLines(t *testing.T) {
|
||||
func TestController_GetRemoteLogs_LimitedLines_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPairWithMiner(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -616,12 +610,10 @@ func TestController_GetRemoteLogs_LimitedLines(t *testing.T) {
|
|||
assert.Len(t, lines, 1, "should return only 1 line")
|
||||
}
|
||||
|
||||
func TestController_GetRemoteLogs_NoIdentity(t *testing.T) {
|
||||
func TestController_GetRemoteLogs_NoIdentity_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
nmNoID, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(t.TempDir(), "priv.key"),
|
||||
filepath.Join(t.TempDir(), "node.json"),
|
||||
)
|
||||
keyPath, configPath := testNodeManagerPaths(t.TempDir())
|
||||
nmNoID, err := NewNodeManagerWithPaths(keyPath, configPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
|
||||
|
|
@ -631,7 +623,7 @@ func TestController_GetRemoteLogs_NoIdentity(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "identity not initialized")
|
||||
}
|
||||
|
||||
func TestController_GetRemoteStats_WithMiners(t *testing.T) {
|
||||
func TestController_GetRemoteStats_WithMiners_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPairWithMiner(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -645,12 +637,10 @@ func TestController_GetRemoteStats_WithMiners(t *testing.T) {
|
|||
assert.Equal(t, 1234.5, stats.Miners[0].Hashrate)
|
||||
}
|
||||
|
||||
func TestController_GetRemoteStats_NoIdentity(t *testing.T) {
|
||||
func TestController_GetRemoteStats_NoIdentity_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
nmNoID, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(t.TempDir(), "priv.key"),
|
||||
filepath.Join(t.TempDir(), "node.json"),
|
||||
)
|
||||
keyPath, configPath := testNodeManagerPaths(t.TempDir())
|
||||
nmNoID, err := NewNodeManagerWithPaths(keyPath, configPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
|
||||
|
|
@ -660,7 +650,7 @@ func TestController_GetRemoteStats_NoIdentity(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "identity not initialized")
|
||||
}
|
||||
|
||||
func TestController_ConnectToPeer_Success(t *testing.T) {
|
||||
func TestController_ConnectToPeer_Success_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
worker := NewWorker(tp.ServerNode, tp.Server)
|
||||
|
|
@ -684,7 +674,7 @@ func TestController_ConnectToPeer_Success(t *testing.T) {
|
|||
assert.Equal(t, 1, tp.Client.ConnectedPeers(), "should have 1 connection after ConnectToPeer")
|
||||
}
|
||||
|
||||
func TestController_HandleResponse_NonReply(t *testing.T) {
|
||||
func TestController_HandleResponse_NonReply_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
|
||||
|
||||
|
|
@ -699,7 +689,7 @@ func TestController_HandleResponse_NonReply(t *testing.T) {
|
|||
assert.Equal(t, 0, count)
|
||||
}
|
||||
|
||||
func TestController_HandleResponse_FullChannel(t *testing.T) {
|
||||
func TestController_HandleResponse_FullChannel_Ugly(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
|
||||
|
||||
|
|
@ -723,12 +713,10 @@ func TestController_HandleResponse_FullChannel(t *testing.T) {
|
|||
assert.False(t, exists, "pending entry should be removed after handling")
|
||||
}
|
||||
|
||||
func TestController_PingPeer_NoIdentity(t *testing.T) {
|
||||
func TestController_PingPeer_NoIdentity_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
nmNoID, _ := NewNodeManagerWithPaths(
|
||||
filepath.Join(t.TempDir(), "priv.key"),
|
||||
filepath.Join(t.TempDir(), "node.json"),
|
||||
)
|
||||
keyPath, configPath := testNodeManagerPaths(t.TempDir())
|
||||
nmNoID, _ := NewNodeManagerWithPaths(keyPath, configPath)
|
||||
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
|
||||
|
||||
_, err := controller.PingPeer("some-peer")
|
||||
|
|
|
|||
53
node/core_fs.go
Normal file
53
node/core_fs.go
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
// SPDX-License-Identifier: EUPL-1.2
|
||||
|
||||
package node
|
||||
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
var localFS = (&core.Fs{}).New("/")
|
||||
|
||||
func fsEnsureDir(path string) error {
|
||||
return fsResultErr(localFS.EnsureDir(path))
|
||||
}
|
||||
|
||||
func fsWrite(path, content string) error {
|
||||
return fsResultErr(localFS.Write(path, content))
|
||||
}
|
||||
|
||||
func fsRead(path string) (string, error) {
|
||||
result := localFS.Read(path)
|
||||
if !result.OK {
|
||||
return "", fsResultErr(result)
|
||||
}
|
||||
|
||||
content, ok := result.Value.(string)
|
||||
if !ok {
|
||||
return "", core.E("node.fsRead", "filesystem read returned non-string content", nil)
|
||||
}
|
||||
|
||||
return content, nil
|
||||
}
|
||||
|
||||
func fsDelete(path string) error {
|
||||
return fsResultErr(localFS.Delete(path))
|
||||
}
|
||||
|
||||
func fsRename(oldPath, newPath string) error {
|
||||
return fsResultErr(localFS.Rename(oldPath, newPath))
|
||||
}
|
||||
|
||||
func fsExists(path string) bool {
|
||||
return localFS.Exists(path)
|
||||
}
|
||||
|
||||
func fsResultErr(result core.Result) error {
|
||||
if result.OK {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err, ok := result.Value.(error); ok && err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return core.E("node.fs", "filesystem operation failed", nil)
|
||||
}
|
||||
|
|
@ -1,11 +1,10 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"iter"
|
||||
"sync"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
|
||||
"dappco.re/go/core/p2p/logging"
|
||||
"dappco.re/go/core/p2p/ueps"
|
||||
|
|
@ -29,19 +28,27 @@ const (
|
|||
|
||||
// IntentHandler processes a UEPS packet that has been routed by intent.
|
||||
// Implementations receive the fully parsed and HMAC-verified packet.
|
||||
//
|
||||
// var handler IntentHandler = func(pkt *ueps.ParsedPacket) error { return nil }
|
||||
type IntentHandler func(pkt *ueps.ParsedPacket) error
|
||||
|
||||
// Dispatcher routes verified UEPS packets to registered intent handlers.
|
||||
// It enforces a threat circuit breaker before routing: any packet whose
|
||||
// ThreatScore exceeds ThreatScoreThreshold is dropped and logged.
|
||||
//
|
||||
// dispatcher := NewDispatcher()
|
||||
//
|
||||
// Design decisions:
|
||||
//
|
||||
// - Handlers are registered per IntentID (1:1 mapping).
|
||||
//
|
||||
// - Unknown intents are logged at WARN level and silently dropped (no error
|
||||
// returned to the caller) to avoid back-pressure on the transport layer.
|
||||
//
|
||||
// - High-threat packets are dropped silently (logged at WARN) rather than
|
||||
// returning an error, consistent with the "don't even parse the payload"
|
||||
// philosophy from the original stub.
|
||||
//
|
||||
// - The dispatcher is safe for concurrent use; a RWMutex protects the
|
||||
// handler map.
|
||||
type Dispatcher struct {
|
||||
|
|
@ -51,6 +58,8 @@ type Dispatcher struct {
|
|||
}
|
||||
|
||||
// NewDispatcher creates a Dispatcher with no registered handlers.
|
||||
//
|
||||
// dispatcher := NewDispatcher()
|
||||
func NewDispatcher() *Dispatcher {
|
||||
return &Dispatcher{
|
||||
handlers: make(map[byte]IntentHandler),
|
||||
|
|
@ -69,7 +78,7 @@ func (d *Dispatcher) RegisterHandler(intentID byte, handler IntentHandler) {
|
|||
defer d.mu.Unlock()
|
||||
d.handlers[intentID] = handler
|
||||
d.log.Debug("handler registered", logging.Fields{
|
||||
"intent_id": fmt.Sprintf("0x%02X", intentID),
|
||||
"intent_id": core.Sprintf("0x%02X", intentID),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -108,7 +117,7 @@ func (d *Dispatcher) Dispatch(pkt *ueps.ParsedPacket) error {
|
|||
d.log.Warn("packet dropped: threat score exceeds safety threshold", logging.Fields{
|
||||
"threat_score": pkt.Header.ThreatScore,
|
||||
"threshold": ThreatScoreThreshold,
|
||||
"intent_id": fmt.Sprintf("0x%02X", pkt.Header.IntentID),
|
||||
"intent_id": core.Sprintf("0x%02X", pkt.Header.IntentID),
|
||||
"version": pkt.Header.Version,
|
||||
})
|
||||
return ErrThreatScoreExceeded
|
||||
|
|
@ -121,7 +130,7 @@ func (d *Dispatcher) Dispatch(pkt *ueps.ParsedPacket) error {
|
|||
|
||||
if !exists {
|
||||
d.log.Warn("packet dropped: unknown intent", logging.Fields{
|
||||
"intent_id": fmt.Sprintf("0x%02X", pkt.Header.IntentID),
|
||||
"intent_id": core.Sprintf("0x%02X", pkt.Header.IntentID),
|
||||
"version": pkt.Header.Version,
|
||||
})
|
||||
return ErrUnknownIntent
|
||||
|
|
@ -134,12 +143,12 @@ func (d *Dispatcher) Dispatch(pkt *ueps.ParsedPacket) error {
|
|||
var (
|
||||
// ErrThreatScoreExceeded is returned when a packet's ThreatScore exceeds
|
||||
// the safety threshold.
|
||||
ErrThreatScoreExceeded = coreerr.E("Dispatcher.Dispatch", fmt.Sprintf("packet rejected: threat score exceeds safety threshold (%d)", ThreatScoreThreshold), nil)
|
||||
ErrThreatScoreExceeded = core.E("Dispatcher.Dispatch", core.Sprintf("packet rejected: threat score exceeds safety threshold (%d)", ThreatScoreThreshold), nil)
|
||||
|
||||
// ErrUnknownIntent is returned when no handler is registered for the
|
||||
// packet's IntentID.
|
||||
ErrUnknownIntent = coreerr.E("Dispatcher.Dispatch", "packet dropped: unknown intent", nil)
|
||||
ErrUnknownIntent = core.E("Dispatcher.Dispatch", "packet dropped: unknown intent", nil)
|
||||
|
||||
// ErrNilPacket is returned when a nil packet is passed to Dispatch.
|
||||
ErrNilPacket = coreerr.E("Dispatcher.Dispatch", "nil packet", nil)
|
||||
ErrNilPacket = core.E("Dispatcher.Dispatch", "nil packet", nil)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/p2p/ueps"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
@ -28,7 +28,7 @@ func makePacket(intentID byte, threatScore uint16, payload []byte) *ueps.ParsedP
|
|||
|
||||
// --- Dispatcher Tests ---
|
||||
|
||||
func TestDispatcher_RegisterAndDispatch(t *testing.T) {
|
||||
func TestDispatcher_RegisterAndDispatch_Good(t *testing.T) {
|
||||
t.Run("handler receives the correct packet", func(t *testing.T) {
|
||||
d := NewDispatcher()
|
||||
var received *ueps.ParsedPacket
|
||||
|
|
@ -49,7 +49,7 @@ func TestDispatcher_RegisterAndDispatch(t *testing.T) {
|
|||
|
||||
t.Run("handler error propagates to caller", func(t *testing.T) {
|
||||
d := NewDispatcher()
|
||||
handlerErr := fmt.Errorf("compute failed")
|
||||
handlerErr := core.NewError("compute failed")
|
||||
|
||||
d.RegisterHandler(IntentCompute, func(pkt *ueps.ParsedPacket) error {
|
||||
return handlerErr
|
||||
|
|
@ -62,7 +62,7 @@ func TestDispatcher_RegisterAndDispatch(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestDispatcher_ThreatCircuitBreaker(t *testing.T) {
|
||||
func TestDispatcher_ThreatCircuitBreaker_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
threatScore uint16
|
||||
|
|
@ -118,7 +118,7 @@ func TestDispatcher_ThreatCircuitBreaker(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDispatcher_UnknownIntentDropped(t *testing.T) {
|
||||
func TestDispatcher_UnknownIntentDropped_Bad(t *testing.T) {
|
||||
d := NewDispatcher()
|
||||
|
||||
// Register handlers for known intents only
|
||||
|
|
@ -133,7 +133,7 @@ func TestDispatcher_UnknownIntentDropped(t *testing.T) {
|
|||
assert.ErrorIs(t, err, ErrUnknownIntent)
|
||||
}
|
||||
|
||||
func TestDispatcher_MultipleHandlersCorrectRouting(t *testing.T) {
|
||||
func TestDispatcher_MultipleHandlersCorrectRouting_Good(t *testing.T) {
|
||||
d := NewDispatcher()
|
||||
|
||||
var handshakeCalled, computeCalled, rehabCalled, customCalled bool
|
||||
|
|
@ -192,7 +192,7 @@ func TestDispatcher_MultipleHandlersCorrectRouting(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDispatcher_NilAndEmptyPayload(t *testing.T) {
|
||||
func TestDispatcher_NilAndEmptyPayload_Ugly(t *testing.T) {
|
||||
t.Run("nil packet returns ErrNilPacket", func(t *testing.T) {
|
||||
d := NewDispatcher()
|
||||
err := d.Dispatch(nil)
|
||||
|
|
@ -234,7 +234,7 @@ func TestDispatcher_NilAndEmptyPayload(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestDispatcher_ConcurrentDispatchSafety(t *testing.T) {
|
||||
func TestDispatcher_ConcurrentDispatchSafety_Ugly(t *testing.T) {
|
||||
d := NewDispatcher()
|
||||
|
||||
var count atomic.Int64
|
||||
|
|
@ -261,7 +261,7 @@ func TestDispatcher_ConcurrentDispatchSafety(t *testing.T) {
|
|||
assert.Equal(t, int64(goroutines), count.Load())
|
||||
}
|
||||
|
||||
func TestDispatcher_ConcurrentRegisterAndDispatch(t *testing.T) {
|
||||
func TestDispatcher_ConcurrentRegisterAndDispatch_Ugly(t *testing.T) {
|
||||
d := NewDispatcher()
|
||||
|
||||
var count atomic.Int64
|
||||
|
|
@ -301,7 +301,7 @@ func TestDispatcher_ConcurrentRegisterAndDispatch(t *testing.T) {
|
|||
assert.True(t, count.Load() >= 0)
|
||||
}
|
||||
|
||||
func TestDispatcher_ReplaceHandler(t *testing.T) {
|
||||
func TestDispatcher_ReplaceHandler_Good(t *testing.T) {
|
||||
d := NewDispatcher()
|
||||
|
||||
var firstCalled, secondCalled bool
|
||||
|
|
@ -325,7 +325,7 @@ func TestDispatcher_ReplaceHandler(t *testing.T) {
|
|||
assert.True(t, secondCalled, "replacement handler should be called")
|
||||
}
|
||||
|
||||
func TestDispatcher_ThreatBlocksBeforeRouting(t *testing.T) {
|
||||
func TestDispatcher_ThreatBlocksBeforeRouting_Good(t *testing.T) {
|
||||
// Verify that the circuit breaker fires before intent routing,
|
||||
// so even an unknown intent returns ErrThreatScoreExceeded (not ErrUnknownIntent).
|
||||
d := NewDispatcher()
|
||||
|
|
@ -337,7 +337,7 @@ func TestDispatcher_ThreatBlocksBeforeRouting(t *testing.T) {
|
|||
"threat circuit breaker should fire before intent routing")
|
||||
}
|
||||
|
||||
func TestDispatcher_IntentConstants(t *testing.T) {
|
||||
func TestDispatcher_IntentConstants_Good(t *testing.T) {
|
||||
// Verify the well-known intent IDs match the spec (RFC-021).
|
||||
assert.Equal(t, byte(0x01), IntentHandshake)
|
||||
assert.Equal(t, byte(0x20), IntentCompute)
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
package node
|
||||
|
||||
import coreerr "dappco.re/go/core/log"
|
||||
import core "dappco.re/go/core"
|
||||
|
||||
// Sentinel errors shared across the node package.
|
||||
var (
|
||||
// ErrIdentityNotInitialized is returned when a node operation requires
|
||||
// a node identity but none has been generated or loaded.
|
||||
ErrIdentityNotInitialized = coreerr.E("node", "node identity not initialized", nil)
|
||||
ErrIdentityNotInitialized = core.E("node", "node identity not initialized", nil)
|
||||
|
||||
// ErrMinerManagerNotConfigured is returned when a miner operation is
|
||||
// attempted but no MinerManager has been set on the Worker.
|
||||
ErrMinerManagerNotConfigured = coreerr.E("node", "miner manager not configured", nil)
|
||||
ErrMinerManagerNotConfigured = core.E("node", "miner manager not configured", nil)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -7,13 +7,10 @@ import (
|
|||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
coreio "dappco.re/go/core/io"
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
|
||||
"forge.lthn.ai/Snider/Borg/pkg/stmf"
|
||||
"github.com/adrg/xdg"
|
||||
|
|
@ -23,16 +20,20 @@ import (
|
|||
const ChallengeSize = 32
|
||||
|
||||
// GenerateChallenge creates a random challenge for authentication.
|
||||
//
|
||||
// challenge, err := GenerateChallenge()
|
||||
func GenerateChallenge() ([]byte, error) {
|
||||
challenge := make([]byte, ChallengeSize)
|
||||
if _, err := rand.Read(challenge); err != nil {
|
||||
return nil, coreerr.E("GenerateChallenge", "failed to generate challenge", err)
|
||||
return nil, core.E("GenerateChallenge", "failed to generate challenge", err)
|
||||
}
|
||||
return challenge, nil
|
||||
}
|
||||
|
||||
// SignChallenge creates an HMAC signature of a challenge using a shared secret.
|
||||
// The signature proves possession of the shared secret without revealing it.
|
||||
//
|
||||
// signature := SignChallenge(challenge, sharedSecret)
|
||||
func SignChallenge(challenge []byte, sharedSecret []byte) []byte {
|
||||
mac := hmac.New(sha256.New, sharedSecret)
|
||||
mac.Write(challenge)
|
||||
|
|
@ -40,12 +41,16 @@ func SignChallenge(challenge []byte, sharedSecret []byte) []byte {
|
|||
}
|
||||
|
||||
// VerifyChallenge verifies that a challenge response was signed with the correct shared secret.
|
||||
//
|
||||
// ok := VerifyChallenge(challenge, signature, sharedSecret)
|
||||
func VerifyChallenge(challenge, response, sharedSecret []byte) bool {
|
||||
expected := SignChallenge(challenge, sharedSecret)
|
||||
return hmac.Equal(response, expected)
|
||||
}
|
||||
|
||||
// NodeRole defines the operational mode of a node.
|
||||
//
|
||||
// role := RoleWorker
|
||||
type NodeRole string
|
||||
|
||||
const (
|
||||
|
|
@ -58,6 +63,8 @@ const (
|
|||
)
|
||||
|
||||
// NodeIdentity represents the public identity of a node.
|
||||
//
|
||||
// identity := NodeIdentity{Name: "worker-1", Role: RoleWorker}
|
||||
type NodeIdentity struct {
|
||||
ID string `json:"id"` // Derived from public key (first 16 bytes hex)
|
||||
Name string `json:"name"` // Human-friendly name
|
||||
|
|
@ -67,6 +74,8 @@ type NodeIdentity struct {
|
|||
}
|
||||
|
||||
// NodeManager handles node identity operations including key generation and storage.
|
||||
//
|
||||
// nodeManager, err := NewNodeManager()
|
||||
type NodeManager struct {
|
||||
identity *NodeIdentity
|
||||
privateKey []byte // Never serialized to JSON
|
||||
|
|
@ -77,15 +86,17 @@ type NodeManager struct {
|
|||
}
|
||||
|
||||
// NewNodeManager creates a new NodeManager, loading existing identity if available.
|
||||
//
|
||||
// nodeManager, err := NewNodeManager()
|
||||
func NewNodeManager() (*NodeManager, error) {
|
||||
keyPath, err := xdg.DataFile("lethean-desktop/node/private.key")
|
||||
if err != nil {
|
||||
return nil, coreerr.E("NodeManager.New", "failed to get key path", err)
|
||||
return nil, core.E("NodeManager.New", "failed to get key path", err)
|
||||
}
|
||||
|
||||
configPath, err := xdg.ConfigFile("lethean-desktop/node.json")
|
||||
if err != nil {
|
||||
return nil, coreerr.E("NodeManager.New", "failed to get config path", err)
|
||||
return nil, core.E("NodeManager.New", "failed to get config path", err)
|
||||
}
|
||||
|
||||
return NewNodeManagerWithPaths(keyPath, configPath)
|
||||
|
|
@ -93,6 +104,8 @@ func NewNodeManager() (*NodeManager, error) {
|
|||
|
||||
// NewNodeManagerWithPaths creates a NodeManager with custom paths.
|
||||
// This is primarily useful for testing to avoid xdg path caching issues.
|
||||
//
|
||||
// nodeManager, err := NewNodeManagerWithPaths("/srv/p2p/private.key", "/srv/p2p/node.json")
|
||||
func NewNodeManagerWithPaths(keyPath, configPath string) (*NodeManager, error) {
|
||||
nm := &NodeManager{
|
||||
keyPath: keyPath,
|
||||
|
|
@ -135,7 +148,7 @@ func (n *NodeManager) GenerateIdentity(name string, role NodeRole) error {
|
|||
// Generate X25519 keypair using STMF
|
||||
keyPair, err := stmf.GenerateKeyPair()
|
||||
if err != nil {
|
||||
return coreerr.E("NodeManager.GenerateIdentity", "failed to generate keypair", err)
|
||||
return core.E("NodeManager.GenerateIdentity", "failed to generate keypair", err)
|
||||
}
|
||||
|
||||
// Derive node ID from public key (first 16 bytes as hex = 32 char ID)
|
||||
|
|
@ -156,12 +169,12 @@ func (n *NodeManager) GenerateIdentity(name string, role NodeRole) error {
|
|||
|
||||
// Save private key
|
||||
if err := n.savePrivateKey(); err != nil {
|
||||
return coreerr.E("NodeManager.GenerateIdentity", "failed to save private key", err)
|
||||
return core.E("NodeManager.GenerateIdentity", "failed to save private key", err)
|
||||
}
|
||||
|
||||
// Save identity config
|
||||
if err := n.saveIdentity(); err != nil {
|
||||
return coreerr.E("NodeManager.GenerateIdentity", "failed to save identity", err)
|
||||
return core.E("NodeManager.GenerateIdentity", "failed to save identity", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -180,19 +193,19 @@ func (n *NodeManager) DeriveSharedSecret(peerPubKeyBase64 string) ([]byte, error
|
|||
// Load peer's public key
|
||||
peerPubKey, err := stmf.LoadPublicKeyBase64(peerPubKeyBase64)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("NodeManager.DeriveSharedSecret", "failed to load peer public key", err)
|
||||
return nil, core.E("NodeManager.DeriveSharedSecret", "failed to load peer public key", err)
|
||||
}
|
||||
|
||||
// Load our private key
|
||||
privateKey, err := ecdh.X25519().NewPrivateKey(n.privateKey)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("NodeManager.DeriveSharedSecret", "failed to load private key", err)
|
||||
return nil, core.E("NodeManager.DeriveSharedSecret", "failed to load private key", err)
|
||||
}
|
||||
|
||||
// Derive shared secret using ECDH
|
||||
sharedSecret, err := privateKey.ECDH(peerPubKey)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("NodeManager.DeriveSharedSecret", "failed to derive shared secret", err)
|
||||
return nil, core.E("NodeManager.DeriveSharedSecret", "failed to derive shared secret", err)
|
||||
}
|
||||
|
||||
// Hash the shared secret using SHA-256 (same pattern as Borg/trix)
|
||||
|
|
@ -203,14 +216,14 @@ func (n *NodeManager) DeriveSharedSecret(peerPubKeyBase64 string) ([]byte, error
|
|||
// savePrivateKey saves the private key to disk with restricted permissions.
|
||||
func (n *NodeManager) savePrivateKey() error {
|
||||
// Ensure directory exists
|
||||
dir := filepath.Dir(n.keyPath)
|
||||
if err := coreio.Local.EnsureDir(dir); err != nil {
|
||||
return coreerr.E("NodeManager.savePrivateKey", "failed to create key directory", err)
|
||||
dir := core.PathDir(n.keyPath)
|
||||
if err := fsEnsureDir(dir); err != nil {
|
||||
return core.E("NodeManager.savePrivateKey", "failed to create key directory", err)
|
||||
}
|
||||
|
||||
// Write private key
|
||||
if err := coreio.Local.Write(n.keyPath, string(n.privateKey)); err != nil {
|
||||
return coreerr.E("NodeManager.savePrivateKey", "failed to write private key", err)
|
||||
if err := fsWrite(n.keyPath, string(n.privateKey)); err != nil {
|
||||
return core.E("NodeManager.savePrivateKey", "failed to write private key", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -219,18 +232,19 @@ func (n *NodeManager) savePrivateKey() error {
|
|||
// saveIdentity saves the public identity to the config file.
|
||||
func (n *NodeManager) saveIdentity() error {
|
||||
// Ensure directory exists
|
||||
dir := filepath.Dir(n.configPath)
|
||||
if err := coreio.Local.EnsureDir(dir); err != nil {
|
||||
return coreerr.E("NodeManager.saveIdentity", "failed to create config directory", err)
|
||||
dir := core.PathDir(n.configPath)
|
||||
if err := fsEnsureDir(dir); err != nil {
|
||||
return core.E("NodeManager.saveIdentity", "failed to create config directory", err)
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(n.identity, "", " ")
|
||||
if err != nil {
|
||||
return coreerr.E("NodeManager.saveIdentity", "failed to marshal identity", err)
|
||||
result := core.JSONMarshal(n.identity)
|
||||
if !result.OK {
|
||||
return core.E("NodeManager.saveIdentity", "failed to marshal identity", result.Value.(error))
|
||||
}
|
||||
data := result.Value.([]byte)
|
||||
|
||||
if err := coreio.Local.Write(n.configPath, string(data)); err != nil {
|
||||
return coreerr.E("NodeManager.saveIdentity", "failed to write identity", err)
|
||||
if err := fsWrite(n.configPath, string(data)); err != nil {
|
||||
return core.E("NodeManager.saveIdentity", "failed to write identity", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -239,27 +253,28 @@ func (n *NodeManager) saveIdentity() error {
|
|||
// loadIdentity loads the node identity from disk.
|
||||
func (n *NodeManager) loadIdentity() error {
|
||||
// Load identity config
|
||||
content, err := coreio.Local.Read(n.configPath)
|
||||
content, err := fsRead(n.configPath)
|
||||
if err != nil {
|
||||
return coreerr.E("NodeManager.loadIdentity", "failed to read identity", err)
|
||||
return core.E("NodeManager.loadIdentity", "failed to read identity", err)
|
||||
}
|
||||
|
||||
var identity NodeIdentity
|
||||
if err := json.Unmarshal([]byte(content), &identity); err != nil {
|
||||
return coreerr.E("NodeManager.loadIdentity", "failed to unmarshal identity", err)
|
||||
result := core.JSONUnmarshalString(content, &identity)
|
||||
if !result.OK {
|
||||
return core.E("NodeManager.loadIdentity", "failed to unmarshal identity", result.Value.(error))
|
||||
}
|
||||
|
||||
// Load private key
|
||||
keyContent, err := coreio.Local.Read(n.keyPath)
|
||||
keyContent, err := fsRead(n.keyPath)
|
||||
if err != nil {
|
||||
return coreerr.E("NodeManager.loadIdentity", "failed to read private key", err)
|
||||
return core.E("NodeManager.loadIdentity", "failed to read private key", err)
|
||||
}
|
||||
privateKey := []byte(keyContent)
|
||||
|
||||
// Reconstruct keypair from private key
|
||||
keyPair, err := stmf.LoadKeyPair(privateKey)
|
||||
if err != nil {
|
||||
return coreerr.E("NodeManager.loadIdentity", "failed to load keypair", err)
|
||||
return core.E("NodeManager.loadIdentity", "failed to load keypair", err)
|
||||
}
|
||||
|
||||
n.identity = &identity
|
||||
|
|
@ -275,16 +290,16 @@ func (n *NodeManager) Delete() error {
|
|||
defer n.mu.Unlock()
|
||||
|
||||
// Remove private key (ignore if already absent)
|
||||
if coreio.Local.Exists(n.keyPath) {
|
||||
if err := coreio.Local.Delete(n.keyPath); err != nil {
|
||||
return coreerr.E("NodeManager.Delete", "failed to remove private key", err)
|
||||
if fsExists(n.keyPath) {
|
||||
if err := fsDelete(n.keyPath); err != nil {
|
||||
return core.E("NodeManager.Delete", "failed to remove private key", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove identity config (ignore if already absent)
|
||||
if coreio.Local.Exists(n.configPath) {
|
||||
if err := coreio.Local.Delete(n.configPath); err != nil {
|
||||
return coreerr.E("NodeManager.Delete", "failed to remove identity", err)
|
||||
if fsExists(n.configPath) {
|
||||
if err := fsDelete(n.configPath); err != nil {
|
||||
return core.E("NodeManager.Delete", "failed to remove identity", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,35 +1,21 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// setupTestNodeManager creates a NodeManager with paths in a temp directory.
|
||||
func setupTestNodeManager(t *testing.T) (*NodeManager, func()) {
|
||||
tmpDir, err := os.MkdirTemp("", "node-identity-test")
|
||||
tmpDir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp dir: %v", err)
|
||||
}
|
||||
|
||||
keyPath := filepath.Join(tmpDir, "private.key")
|
||||
configPath := filepath.Join(tmpDir, "node.json")
|
||||
|
||||
nm, err := NewNodeManagerWithPaths(keyPath, configPath)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
|
||||
return nm, cleanup
|
||||
return nm, func() {}
|
||||
}
|
||||
|
||||
func TestNodeIdentity(t *testing.T) {
|
||||
func TestIdentity_NodeIdentity_Good(t *testing.T) {
|
||||
t.Run("NewNodeManager", func(t *testing.T) {
|
||||
nm, cleanup := setupTestNodeManager(t)
|
||||
defer cleanup()
|
||||
|
|
@ -75,14 +61,8 @@ func TestNodeIdentity(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("LoadExistingIdentity", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "node-load-test")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
keyPath := filepath.Join(tmpDir, "private.key")
|
||||
configPath := filepath.Join(tmpDir, "node.json")
|
||||
tmpDir := t.TempDir()
|
||||
keyPath, configPath := testNodeManagerPaths(tmpDir)
|
||||
|
||||
// First, create an identity
|
||||
nm1, err := NewNodeManagerWithPaths(keyPath, configPath)
|
||||
|
|
@ -120,16 +100,11 @@ func TestNodeIdentity(t *testing.T) {
|
|||
|
||||
t.Run("DeriveSharedSecret", func(t *testing.T) {
|
||||
// Create two node managers with separate temp directories
|
||||
tmpDir1, _ := os.MkdirTemp("", "node1")
|
||||
tmpDir2, _ := os.MkdirTemp("", "node2")
|
||||
defer os.RemoveAll(tmpDir1)
|
||||
defer os.RemoveAll(tmpDir2)
|
||||
tmpDir1 := t.TempDir()
|
||||
tmpDir2 := t.TempDir()
|
||||
|
||||
// Node 1
|
||||
nm1, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(tmpDir1, "private.key"),
|
||||
filepath.Join(tmpDir1, "node.json"),
|
||||
)
|
||||
nm1, err := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir1))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager 1: %v", err)
|
||||
}
|
||||
|
|
@ -139,10 +114,7 @@ func TestNodeIdentity(t *testing.T) {
|
|||
}
|
||||
|
||||
// Node 2
|
||||
nm2, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(tmpDir2, "private.key"),
|
||||
filepath.Join(tmpDir2, "node.json"),
|
||||
)
|
||||
nm2, err := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir2))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager 2: %v", err)
|
||||
}
|
||||
|
|
@ -198,7 +170,7 @@ func TestNodeIdentity(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestNodeRoles(t *testing.T) {
|
||||
func TestIdentity_NodeRoles_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
role NodeRole
|
||||
expected string
|
||||
|
|
@ -217,7 +189,7 @@ func TestNodeRoles(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestChallengeResponse(t *testing.T) {
|
||||
func TestIdentity_ChallengeResponse_Good(t *testing.T) {
|
||||
t.Run("GenerateChallenge", func(t *testing.T) {
|
||||
challenge, err := GenerateChallenge()
|
||||
if err != nil {
|
||||
|
|
@ -315,21 +287,13 @@ func TestChallengeResponse(t *testing.T) {
|
|||
|
||||
t.Run("IntegrationWithSharedSecret", func(t *testing.T) {
|
||||
// Create two nodes and test end-to-end challenge-response
|
||||
tmpDir1, _ := os.MkdirTemp("", "node-challenge-1")
|
||||
tmpDir2, _ := os.MkdirTemp("", "node-challenge-2")
|
||||
defer os.RemoveAll(tmpDir1)
|
||||
defer os.RemoveAll(tmpDir2)
|
||||
tmpDir1 := t.TempDir()
|
||||
tmpDir2 := t.TempDir()
|
||||
|
||||
nm1, _ := NewNodeManagerWithPaths(
|
||||
filepath.Join(tmpDir1, "private.key"),
|
||||
filepath.Join(tmpDir1, "node.json"),
|
||||
)
|
||||
nm1, _ := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir1))
|
||||
nm1.GenerateIdentity("challenger", RoleDual)
|
||||
|
||||
nm2, _ := NewNodeManagerWithPaths(
|
||||
filepath.Join(tmpDir2, "private.key"),
|
||||
filepath.Join(tmpDir2, "node.json"),
|
||||
)
|
||||
nm2, _ := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir2))
|
||||
nm2.GenerateIdentity("responder", RoleDual)
|
||||
|
||||
// Challenger generates challenge
|
||||
|
|
@ -352,7 +316,7 @@ func TestChallengeResponse(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestNodeManager_DeriveSharedSecret_NoIdentity(t *testing.T) {
|
||||
func TestIdentity_NodeManager_DeriveSharedSecret_NoIdentity_Bad(t *testing.T) {
|
||||
nm, cleanup := setupTestNodeManager(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -363,7 +327,7 @@ func TestNodeManager_DeriveSharedSecret_NoIdentity(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNodeManager_GetIdentity_NilWhenNoIdentity(t *testing.T) {
|
||||
func TestIdentity_NodeManager_GetIdentity_NilWhenNoIdentity_Bad(t *testing.T) {
|
||||
nm, cleanup := setupTestNodeManager(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -373,11 +337,11 @@ func TestNodeManager_GetIdentity_NilWhenNoIdentity(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNodeManager_Delete_NoFiles(t *testing.T) {
|
||||
func TestIdentity_NodeManager_Delete_NoFiles_Bad(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(tmpDir, "nonexistent.key"),
|
||||
filepath.Join(tmpDir, "nonexistent.json"),
|
||||
testJoinPath(tmpDir, "nonexistent.key"),
|
||||
testJoinPath(tmpDir, "nonexistent.json"),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
|
|
|
|||
|
|
@ -3,11 +3,9 @@ package node
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
|
@ -29,7 +27,7 @@ import (
|
|||
// 5. Graceful shutdown with disconnect messages
|
||||
// ============================================================================
|
||||
|
||||
func TestIntegration_FullNodeLifecycle(t *testing.T) {
|
||||
func TestIntegration_FullNodeLifecycle_Good(t *testing.T) {
|
||||
// ----------------------------------------------------------------
|
||||
// Step 1: Identity creation
|
||||
// ----------------------------------------------------------------
|
||||
|
|
@ -240,7 +238,7 @@ func TestIntegration_FullNodeLifecycle(t *testing.T) {
|
|||
|
||||
// TestIntegration_SharedSecretAgreement verifies that two independently created
|
||||
// nodes derive the same shared secret via ECDH.
|
||||
func TestIntegration_SharedSecretAgreement(t *testing.T) {
|
||||
func TestIntegration_SharedSecretAgreement_Good(t *testing.T) {
|
||||
nodeA := testNode(t, "secret-node-a", RoleDual)
|
||||
nodeB := testNode(t, "secret-node-b", RoleDual)
|
||||
|
||||
|
|
@ -260,7 +258,7 @@ func TestIntegration_SharedSecretAgreement(t *testing.T) {
|
|||
|
||||
// TestIntegration_TwoNodeBidirectionalMessages verifies that both nodes
|
||||
// can send and receive encrypted messages after the handshake.
|
||||
func TestIntegration_TwoNodeBidirectionalMessages(t *testing.T) {
|
||||
func TestIntegration_TwoNodeBidirectionalMessages_Good(t *testing.T) {
|
||||
controller, _, tp := setupControllerPair(t)
|
||||
serverID := tp.ServerNode.GetIdentity().ID
|
||||
|
||||
|
|
@ -285,7 +283,7 @@ func TestIntegration_TwoNodeBidirectionalMessages(t *testing.T) {
|
|||
|
||||
// TestIntegration_MultiPeerTopology verifies that a controller can
|
||||
// simultaneously communicate with multiple workers.
|
||||
func TestIntegration_MultiPeerTopology(t *testing.T) {
|
||||
func TestIntegration_MultiPeerTopology_Good(t *testing.T) {
|
||||
controllerNM := testNode(t, "multi-controller", RoleController)
|
||||
controllerReg := testRegistry(t)
|
||||
controllerTransport := NewTransport(controllerNM, controllerReg, DefaultTransportConfig())
|
||||
|
|
@ -343,10 +341,9 @@ func TestIntegration_MultiPeerTopology(t *testing.T) {
|
|||
|
||||
// TestIntegration_IdentityPersistenceAndReload verifies that a node identity
|
||||
// can be generated, persisted, and reloaded from disk.
|
||||
func TestIntegration_IdentityPersistenceAndReload(t *testing.T) {
|
||||
func TestIntegration_IdentityPersistenceAndReload_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
keyPath := filepath.Join(dir, "private.key")
|
||||
configPath := filepath.Join(dir, "node.json")
|
||||
keyPath, configPath := testNodeManagerPaths(dir)
|
||||
|
||||
// Create and persist identity.
|
||||
nm1, err := NewNodeManagerWithPaths(keyPath, configPath)
|
||||
|
|
@ -386,10 +383,7 @@ func TestIntegration_IdentityPersistenceAndReload(t *testing.T) {
|
|||
// stmfGenerateKeyPair is a helper that generates a keypair and returns
|
||||
// the public key as base64 (for use in DeriveSharedSecret tests).
|
||||
func stmfGenerateKeyPair(dir string) (string, error) {
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
@ -399,10 +393,9 @@ func stmfGenerateKeyPair(dir string) (string, error) {
|
|||
return nm.GetIdentity().PublicKey, nil
|
||||
}
|
||||
|
||||
|
||||
// TestIntegration_UEPSFullRoundTrip exercises a complete UEPS packet
|
||||
// lifecycle: build, sign, transmit (simulated), read, verify, dispatch.
|
||||
func TestIntegration_UEPSFullRoundTrip(t *testing.T) {
|
||||
func TestIntegration_UEPSFullRoundTrip_Ugly(t *testing.T) {
|
||||
nodeA := testNode(t, "ueps-node-a", RoleController)
|
||||
nodeB := testNode(t, "ueps-node-b", RoleWorker)
|
||||
|
||||
|
|
@ -453,7 +446,7 @@ func TestIntegration_UEPSFullRoundTrip(t *testing.T) {
|
|||
|
||||
// TestIntegration_UEPSIntegrityFailure verifies that a tampered UEPS packet
|
||||
// is rejected by HMAC verification.
|
||||
func TestIntegration_UEPSIntegrityFailure(t *testing.T) {
|
||||
func TestIntegration_UEPSIntegrityFailure_Bad(t *testing.T) {
|
||||
nodeA := testNode(t, "integrity-a", RoleController)
|
||||
nodeB := testNode(t, "integrity-b", RoleWorker)
|
||||
|
||||
|
|
@ -484,7 +477,7 @@ func TestIntegration_UEPSIntegrityFailure(t *testing.T) {
|
|||
|
||||
// TestIntegration_AllowlistHandshakeRejection verifies that a peer not in the
|
||||
// allowlist is rejected during the WebSocket handshake.
|
||||
func TestIntegration_AllowlistHandshakeRejection(t *testing.T) {
|
||||
func TestIntegration_AllowlistHandshakeRejection_Bad(t *testing.T) {
|
||||
workerNM := testNode(t, "allowlist-worker", RoleWorker)
|
||||
workerReg := testRegistry(t)
|
||||
workerReg.SetAuthMode(PeerAuthAllowlist)
|
||||
|
|
@ -521,7 +514,7 @@ func TestIntegration_AllowlistHandshakeRejection(t *testing.T) {
|
|||
|
||||
// TestIntegration_AllowlistHandshakeAccepted verifies that an allowlisted
|
||||
// peer can connect successfully.
|
||||
func TestIntegration_AllowlistHandshakeAccepted(t *testing.T) {
|
||||
func TestIntegration_AllowlistHandshakeAccepted_Good(t *testing.T) {
|
||||
workerNM := testNode(t, "allowlist-worker-ok", RoleWorker)
|
||||
workerReg := testRegistry(t)
|
||||
workerReg.SetAuthMode(PeerAuthAllowlist)
|
||||
|
|
@ -563,7 +556,7 @@ func TestIntegration_AllowlistHandshakeAccepted(t *testing.T) {
|
|||
|
||||
// TestIntegration_DispatcherWithRealUEPSPackets builds real UEPS packets
|
||||
// from wire bytes and routes them through the dispatcher.
|
||||
func TestIntegration_DispatcherWithRealUEPSPackets(t *testing.T) {
|
||||
func TestIntegration_DispatcherWithRealUEPSPackets_Good(t *testing.T) {
|
||||
sharedSecret := make([]byte, 32)
|
||||
for i := range sharedSecret {
|
||||
sharedSecret[i] = byte(i ^ 0x42)
|
||||
|
|
@ -614,7 +607,7 @@ func TestIntegration_DispatcherWithRealUEPSPackets(t *testing.T) {
|
|||
// TestIntegration_MessageSerialiseDeserialise verifies that messages survive
|
||||
// the full serialisation/encryption/decryption/deserialisation pipeline
|
||||
// with all fields intact.
|
||||
func TestIntegration_MessageSerialiseDeserialise(t *testing.T) {
|
||||
func TestIntegration_MessageSerialiseDeserialise_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
pc := tp.connectClient(t)
|
||||
|
||||
|
|
@ -653,14 +646,14 @@ func TestIntegration_MessageSerialiseDeserialise(t *testing.T) {
|
|||
assert.Equal(t, original.ReplyTo, decrypted.ReplyTo)
|
||||
|
||||
var originalStats, decryptedStats StatsPayload
|
||||
require.NoError(t, json.Unmarshal(original.Payload, &originalStats))
|
||||
require.NoError(t, json.Unmarshal(decrypted.Payload, &decryptedStats))
|
||||
testJSONUnmarshal(t, original.Payload, &originalStats)
|
||||
testJSONUnmarshal(t, decrypted.Payload, &decryptedStats)
|
||||
assert.Equal(t, originalStats, decryptedStats)
|
||||
}
|
||||
|
||||
// TestIntegration_GetRemoteStats_EndToEnd tests the full stats retrieval flow
|
||||
// across a real WebSocket connection.
|
||||
func TestIntegration_GetRemoteStats_EndToEnd(t *testing.T) {
|
||||
func TestIntegration_GetRemoteStats_EndToEnd_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
worker := NewWorker(tp.ServerNode, tp.Server)
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ const (
|
|||
// Connection wraps a net.Conn and provides framed Levin packet I/O.
|
||||
// All writes are serialised by an internal mutex, making it safe to call
|
||||
// WritePacket and WriteResponse concurrently from multiple goroutines.
|
||||
//
|
||||
// connection := NewConnection(conn)
|
||||
type Connection struct {
|
||||
// MaxPayloadSize is the upper bound accepted for incoming payloads.
|
||||
// Defaults to the package-level MaxPayloadSize (100 MB).
|
||||
|
|
@ -44,6 +46,8 @@ type Connection struct {
|
|||
}
|
||||
|
||||
// NewConnection creates a Connection that wraps conn with sensible defaults.
|
||||
//
|
||||
// connection := NewConnection(conn)
|
||||
func NewConnection(conn net.Conn) *Connection {
|
||||
return &Connection{
|
||||
MaxPayloadSize: MaxPayloadSize,
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConnection_RoundTrip(t *testing.T) {
|
||||
func TestConnection_RoundTrip_Ugly(t *testing.T) {
|
||||
a, b := net.Pipe()
|
||||
defer a.Close()
|
||||
defer b.Close()
|
||||
|
|
@ -41,7 +41,7 @@ func TestConnection_RoundTrip(t *testing.T) {
|
|||
assert.Equal(t, payload, data)
|
||||
}
|
||||
|
||||
func TestConnection_EmptyPayload(t *testing.T) {
|
||||
func TestConnection_EmptyPayload_Ugly(t *testing.T) {
|
||||
a, b := net.Pipe()
|
||||
defer a.Close()
|
||||
defer b.Close()
|
||||
|
|
@ -64,7 +64,7 @@ func TestConnection_EmptyPayload(t *testing.T) {
|
|||
assert.Nil(t, data)
|
||||
}
|
||||
|
||||
func TestConnection_Response(t *testing.T) {
|
||||
func TestConnection_Response_Good(t *testing.T) {
|
||||
a, b := net.Pipe()
|
||||
defer a.Close()
|
||||
defer b.Close()
|
||||
|
|
@ -91,7 +91,7 @@ func TestConnection_Response(t *testing.T) {
|
|||
assert.Equal(t, payload, data)
|
||||
}
|
||||
|
||||
func TestConnection_PayloadTooBig(t *testing.T) {
|
||||
func TestConnection_PayloadTooBig_Bad(t *testing.T) {
|
||||
a, b := net.Pipe()
|
||||
defer a.Close()
|
||||
defer b.Close()
|
||||
|
|
@ -125,7 +125,7 @@ func TestConnection_PayloadTooBig(t *testing.T) {
|
|||
require.NoError(t, <-errCh)
|
||||
}
|
||||
|
||||
func TestConnection_ReadTimeout(t *testing.T) {
|
||||
func TestConnection_ReadTimeout_Bad(t *testing.T) {
|
||||
a, b := net.Pipe()
|
||||
defer a.Close()
|
||||
defer b.Close()
|
||||
|
|
@ -143,7 +143,7 @@ func TestConnection_ReadTimeout(t *testing.T) {
|
|||
assert.True(t, netErr.Timeout(), "expected timeout error")
|
||||
}
|
||||
|
||||
func TestConnection_RemoteAddr(t *testing.T) {
|
||||
func TestConnection_RemoteAddr_Good(t *testing.T) {
|
||||
a, b := net.Pipe()
|
||||
defer a.Close()
|
||||
defer b.Close()
|
||||
|
|
@ -153,7 +153,7 @@ func TestConnection_RemoteAddr(t *testing.T) {
|
|||
assert.NotEmpty(t, addr)
|
||||
}
|
||||
|
||||
func TestConnection_Close(t *testing.T) {
|
||||
func TestConnection_Close_Ugly(t *testing.T) {
|
||||
a, b := net.Pipe()
|
||||
defer b.Close()
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ package levin
|
|||
import (
|
||||
"encoding/binary"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// HeaderSize is the exact byte length of a serialised Levin header.
|
||||
|
|
@ -43,11 +43,13 @@ const (
|
|||
|
||||
// Sentinel errors returned by DecodeHeader.
|
||||
var (
|
||||
ErrBadSignature = coreerr.E("levin", "bad signature", nil)
|
||||
ErrPayloadTooBig = coreerr.E("levin", "payload exceeds maximum size", nil)
|
||||
ErrBadSignature = core.E("levin", "bad signature", nil)
|
||||
ErrPayloadTooBig = core.E("levin", "payload exceeds maximum size", nil)
|
||||
)
|
||||
|
||||
// Header is the 33-byte packed header that prefixes every Levin message.
|
||||
//
|
||||
// header := Header{Command: CommandHandshake, ExpectResponse: true}
|
||||
type Header struct {
|
||||
Signature uint64
|
||||
PayloadSize uint64
|
||||
|
|
@ -59,6 +61,8 @@ type Header struct {
|
|||
}
|
||||
|
||||
// EncodeHeader serialises h into a fixed-size 33-byte array (little-endian).
|
||||
//
|
||||
// encoded := EncodeHeader(header)
|
||||
func EncodeHeader(h *Header) [HeaderSize]byte {
|
||||
var buf [HeaderSize]byte
|
||||
binary.LittleEndian.PutUint64(buf[0:8], h.Signature)
|
||||
|
|
@ -77,6 +81,8 @@ func EncodeHeader(h *Header) [HeaderSize]byte {
|
|||
|
||||
// DecodeHeader deserialises a 33-byte array into a Header, validating
|
||||
// the magic signature.
|
||||
//
|
||||
// header, err := DecodeHeader(buf)
|
||||
func DecodeHeader(buf [HeaderSize]byte) (Header, error) {
|
||||
var h Header
|
||||
h.Signature = binary.LittleEndian.Uint64(buf[0:8])
|
||||
|
|
|
|||
|
|
@ -11,11 +11,11 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHeaderSizeIs33(t *testing.T) {
|
||||
func TestHeader_SizeIs33_Good(t *testing.T) {
|
||||
assert.Equal(t, 33, HeaderSize)
|
||||
}
|
||||
|
||||
func TestEncodeHeader_KnownValues(t *testing.T) {
|
||||
func TestHeader_EncodeHeader_KnownValues_Good(t *testing.T) {
|
||||
h := &Header{
|
||||
Signature: Signature,
|
||||
PayloadSize: 256,
|
||||
|
|
@ -56,7 +56,7 @@ func TestEncodeHeader_KnownValues(t *testing.T) {
|
|||
assert.Equal(t, uint32(0), pv)
|
||||
}
|
||||
|
||||
func TestEncodeHeader_ExpectResponseFalse(t *testing.T) {
|
||||
func TestHeader_EncodeHeader_ExpectResponseFalse_Good(t *testing.T) {
|
||||
h := &Header{
|
||||
Signature: Signature,
|
||||
PayloadSize: 42,
|
||||
|
|
@ -68,7 +68,7 @@ func TestEncodeHeader_ExpectResponseFalse(t *testing.T) {
|
|||
assert.Equal(t, byte(0x00), buf[16])
|
||||
}
|
||||
|
||||
func TestEncodeHeader_NegativeReturnCode(t *testing.T) {
|
||||
func TestHeader_EncodeHeader_NegativeReturnCode_Good(t *testing.T) {
|
||||
h := &Header{
|
||||
Signature: Signature,
|
||||
PayloadSize: 0,
|
||||
|
|
@ -81,7 +81,7 @@ func TestEncodeHeader_NegativeReturnCode(t *testing.T) {
|
|||
assert.Equal(t, ReturnErrFormat, rc)
|
||||
}
|
||||
|
||||
func TestDecodeHeader_RoundTrip(t *testing.T) {
|
||||
func TestHeader_DecodeHeader_RoundTrip_Ugly(t *testing.T) {
|
||||
original := &Header{
|
||||
Signature: Signature,
|
||||
PayloadSize: 1024,
|
||||
|
|
@ -105,7 +105,7 @@ func TestDecodeHeader_RoundTrip(t *testing.T) {
|
|||
assert.Equal(t, original.ProtocolVersion, decoded.ProtocolVersion)
|
||||
}
|
||||
|
||||
func TestDecodeHeader_AllCommands(t *testing.T) {
|
||||
func TestHeader_DecodeHeader_AllCommands_Good(t *testing.T) {
|
||||
commands := []uint32{
|
||||
CommandHandshake,
|
||||
CommandTimedSync,
|
||||
|
|
@ -131,7 +131,7 @@ func TestDecodeHeader_AllCommands(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDecodeHeader_BadSignature(t *testing.T) {
|
||||
func TestHeader_DecodeHeader_BadSignature_Bad(t *testing.T) {
|
||||
h := &Header{
|
||||
Signature: 0xDEADBEEF,
|
||||
PayloadSize: 0,
|
||||
|
|
@ -143,7 +143,7 @@ func TestDecodeHeader_BadSignature(t *testing.T) {
|
|||
assert.ErrorIs(t, err, ErrBadSignature)
|
||||
}
|
||||
|
||||
func TestDecodeHeader_PayloadTooBig(t *testing.T) {
|
||||
func TestHeader_DecodeHeader_PayloadTooBig_Bad(t *testing.T) {
|
||||
h := &Header{
|
||||
Signature: Signature,
|
||||
PayloadSize: MaxPayloadSize + 1,
|
||||
|
|
@ -155,7 +155,7 @@ func TestDecodeHeader_PayloadTooBig(t *testing.T) {
|
|||
assert.ErrorIs(t, err, ErrPayloadTooBig)
|
||||
}
|
||||
|
||||
func TestDecodeHeader_MaxPayloadExact(t *testing.T) {
|
||||
func TestHeader_DecodeHeader_MaxPayloadExact_Ugly(t *testing.T) {
|
||||
h := &Header{
|
||||
Signature: Signature,
|
||||
PayloadSize: MaxPayloadSize,
|
||||
|
|
|
|||
|
|
@ -5,12 +5,11 @@ package levin
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// Portable storage signatures and version (9-byte header).
|
||||
|
|
@ -41,20 +40,24 @@ const (
|
|||
|
||||
// Sentinel errors for storage encoding and decoding.
|
||||
var (
|
||||
ErrStorageBadSignature = coreerr.E("levin.storage", "bad storage signature", nil)
|
||||
ErrStorageTruncated = coreerr.E("levin.storage", "truncated storage data", nil)
|
||||
ErrStorageBadVersion = coreerr.E("levin.storage", "unsupported storage version", nil)
|
||||
ErrStorageNameTooLong = coreerr.E("levin.storage", "entry name exceeds 255 bytes", nil)
|
||||
ErrStorageTypeMismatch = coreerr.E("levin.storage", "value type mismatch", nil)
|
||||
ErrStorageUnknownType = coreerr.E("levin.storage", "unknown type tag", nil)
|
||||
ErrStorageBadSignature = core.E("levin.storage", "bad storage signature", nil)
|
||||
ErrStorageTruncated = core.E("levin.storage", "truncated storage data", nil)
|
||||
ErrStorageBadVersion = core.E("levin.storage", "unsupported storage version", nil)
|
||||
ErrStorageNameTooLong = core.E("levin.storage", "entry name exceeds 255 bytes", nil)
|
||||
ErrStorageTypeMismatch = core.E("levin.storage", "value type mismatch", nil)
|
||||
ErrStorageUnknownType = core.E("levin.storage", "unknown type tag", nil)
|
||||
)
|
||||
|
||||
// Section is an ordered map of named values forming a portable storage section.
|
||||
// Field iteration order is always alphabetical by key for deterministic encoding.
|
||||
//
|
||||
// section := Section{"id": StringVal([]byte("peer-1"))}
|
||||
type Section map[string]Value
|
||||
|
||||
// Value holds a typed portable storage value. Use the constructor functions
|
||||
// (Uint64Val, StringVal, ObjectVal, etc.) to create instances.
|
||||
//
|
||||
// value := StringVal([]byte("peer-1"))
|
||||
type Value struct {
|
||||
Type uint8
|
||||
|
||||
|
|
@ -78,39 +81,63 @@ type Value struct {
|
|||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Uint64Val creates a Value of TypeUint64.
|
||||
//
|
||||
// value := Uint64Val(42)
|
||||
func Uint64Val(v uint64) Value { return Value{Type: TypeUint64, uintVal: v} }
|
||||
|
||||
// Uint32Val creates a Value of TypeUint32.
|
||||
//
|
||||
// value := Uint32Val(42)
|
||||
func Uint32Val(v uint32) Value { return Value{Type: TypeUint32, uintVal: uint64(v)} }
|
||||
|
||||
// Uint16Val creates a Value of TypeUint16.
|
||||
//
|
||||
// value := Uint16Val(42)
|
||||
func Uint16Val(v uint16) Value { return Value{Type: TypeUint16, uintVal: uint64(v)} }
|
||||
|
||||
// Uint8Val creates a Value of TypeUint8.
|
||||
//
|
||||
// value := Uint8Val(42)
|
||||
func Uint8Val(v uint8) Value { return Value{Type: TypeUint8, uintVal: uint64(v)} }
|
||||
|
||||
// Int64Val creates a Value of TypeInt64.
|
||||
//
|
||||
// value := Int64Val(42)
|
||||
func Int64Val(v int64) Value { return Value{Type: TypeInt64, intVal: v} }
|
||||
|
||||
// Int32Val creates a Value of TypeInt32.
|
||||
//
|
||||
// value := Int32Val(42)
|
||||
func Int32Val(v int32) Value { return Value{Type: TypeInt32, intVal: int64(v)} }
|
||||
|
||||
// Int16Val creates a Value of TypeInt16.
|
||||
//
|
||||
// value := Int16Val(42)
|
||||
func Int16Val(v int16) Value { return Value{Type: TypeInt16, intVal: int64(v)} }
|
||||
|
||||
// Int8Val creates a Value of TypeInt8.
|
||||
//
|
||||
// value := Int8Val(42)
|
||||
func Int8Val(v int8) Value { return Value{Type: TypeInt8, intVal: int64(v)} }
|
||||
|
||||
// BoolVal creates a Value of TypeBool.
|
||||
//
|
||||
// value := BoolVal(true)
|
||||
func BoolVal(v bool) Value { return Value{Type: TypeBool, boolVal: v} }
|
||||
|
||||
// DoubleVal creates a Value of TypeDouble.
|
||||
//
|
||||
// value := DoubleVal(3.14)
|
||||
func DoubleVal(v float64) Value { return Value{Type: TypeDouble, floatVal: v} }
|
||||
|
||||
// StringVal creates a Value of TypeString. The slice is not copied.
|
||||
//
|
||||
// value := StringVal([]byte("hello"))
|
||||
func StringVal(v []byte) Value { return Value{Type: TypeString, bytesVal: v} }
|
||||
|
||||
// ObjectVal creates a Value of TypeObject wrapping a nested Section.
|
||||
//
|
||||
// value := ObjectVal(Section{"id": StringVal([]byte("peer-1"))})
|
||||
func ObjectVal(s Section) Value { return Value{Type: TypeObject, objectVal: s} }
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
|
@ -118,21 +145,29 @@ func ObjectVal(s Section) Value { return Value{Type: TypeObject, objectVal: s} }
|
|||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Uint64ArrayVal creates a typed array of uint64 values.
|
||||
//
|
||||
// value := Uint64ArrayVal([]uint64{1, 2, 3})
|
||||
func Uint64ArrayVal(vs []uint64) Value {
|
||||
return Value{Type: ArrayFlag | TypeUint64, uint64Array: vs}
|
||||
}
|
||||
|
||||
// Uint32ArrayVal creates a typed array of uint32 values.
|
||||
//
|
||||
// value := Uint32ArrayVal([]uint32{1, 2, 3})
|
||||
func Uint32ArrayVal(vs []uint32) Value {
|
||||
return Value{Type: ArrayFlag | TypeUint32, uint32Array: vs}
|
||||
}
|
||||
|
||||
// StringArrayVal creates a typed array of byte-string values.
|
||||
//
|
||||
// value := StringArrayVal([][]byte{[]byte("a"), []byte("b")})
|
||||
func StringArrayVal(vs [][]byte) Value {
|
||||
return Value{Type: ArrayFlag | TypeString, stringArray: vs}
|
||||
}
|
||||
|
||||
// ObjectArrayVal creates a typed array of Section values.
|
||||
//
|
||||
// value := ObjectArrayVal([]Section{{"id": StringVal([]byte("peer-1"))}})
|
||||
func ObjectArrayVal(vs []Section) Value {
|
||||
return Value{Type: ArrayFlag | TypeObject, objectArray: vs}
|
||||
}
|
||||
|
|
@ -280,6 +315,8 @@ func (v Value) AsSectionArray() ([]Section, error) {
|
|||
// EncodeStorage serialises a Section to the portable storage binary format,
|
||||
// including the 9-byte header. Keys are sorted alphabetically to ensure
|
||||
// deterministic output.
|
||||
//
|
||||
// data, err := EncodeStorage(section)
|
||||
func EncodeStorage(s Section) ([]byte, error) {
|
||||
buf := make([]byte, 0, 256)
|
||||
|
||||
|
|
@ -394,7 +431,7 @@ func encodeValue(buf []byte, v Value) ([]byte, error) {
|
|||
return encodeSection(buf, v.objectVal)
|
||||
|
||||
default:
|
||||
return nil, coreerr.E("levin.encodeValue", fmt.Sprintf("unknown type tag: 0x%02x", v.Type), ErrStorageUnknownType)
|
||||
return nil, core.E("levin.encodeValue", core.Sprintf("unknown type tag: 0x%02x", v.Type), ErrStorageUnknownType)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -441,7 +478,7 @@ func encodeArray(buf []byte, v Value) ([]byte, error) {
|
|||
return buf, nil
|
||||
|
||||
default:
|
||||
return nil, coreerr.E("levin.encodeArray", fmt.Sprintf("unknown type tag: array of 0x%02x", elemType), ErrStorageUnknownType)
|
||||
return nil, core.E("levin.encodeArray", core.Sprintf("unknown type tag: array of 0x%02x", elemType), ErrStorageUnknownType)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -451,6 +488,8 @@ func encodeArray(buf []byte, v Value) ([]byte, error) {
|
|||
|
||||
// DecodeStorage deserialises portable storage binary data (including the
|
||||
// 9-byte header) into a Section.
|
||||
//
|
||||
// section, err := DecodeStorage(data)
|
||||
func DecodeStorage(data []byte) (Section, error) {
|
||||
if len(data) < StorageHeaderSize {
|
||||
return nil, ErrStorageTruncated
|
||||
|
|
@ -476,7 +515,7 @@ func DecodeStorage(data []byte) (Section, error) {
|
|||
func decodeSection(buf []byte) (Section, int, error) {
|
||||
count, n, err := UnpackVarint(buf)
|
||||
if err != nil {
|
||||
return nil, 0, coreerr.E("levin.decodeSection", "section entry count", err)
|
||||
return nil, 0, core.E("levin.decodeSection", "section entry count", err)
|
||||
}
|
||||
off := n
|
||||
|
||||
|
|
@ -507,7 +546,7 @@ func decodeSection(buf []byte) (Section, int, error) {
|
|||
// Value.
|
||||
val, consumed, err := decodeValue(buf[off:], tag)
|
||||
if err != nil {
|
||||
return nil, 0, coreerr.E("levin.decodeSection", "field "+name, err)
|
||||
return nil, 0, core.E("levin.decodeSection", "field "+name, err)
|
||||
}
|
||||
off += consumed
|
||||
|
||||
|
|
@ -613,7 +652,7 @@ func decodeValue(buf []byte, tag uint8) (Value, int, error) {
|
|||
return Value{Type: TypeObject, objectVal: sec}, consumed, nil
|
||||
|
||||
default:
|
||||
return Value{}, 0, coreerr.E("levin.decodeValue", fmt.Sprintf("unknown type tag: 0x%02x", tag), ErrStorageUnknownType)
|
||||
return Value{}, 0, core.E("levin.decodeValue", core.Sprintf("unknown type tag: 0x%02x", tag), ErrStorageUnknownType)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -681,6 +720,6 @@ func decodeArray(buf []byte, tag uint8) (Value, int, error) {
|
|||
return Value{Type: tag, objectArray: arr}, off, nil
|
||||
|
||||
default:
|
||||
return Value{}, 0, coreerr.E("levin.decodeArray", fmt.Sprintf("unknown type tag: array of 0x%02x", elemType), ErrStorageUnknownType)
|
||||
return Value{}, 0, core.E("levin.decodeArray", core.Sprintf("unknown type tag: array of 0x%02x", elemType), ErrStorageUnknownType)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEncodeStorage_EmptySection(t *testing.T) {
|
||||
func TestStorage_EncodeStorage_EmptySection_Ugly(t *testing.T) {
|
||||
s := Section{}
|
||||
data, err := EncodeStorage(s)
|
||||
require.NoError(t, err)
|
||||
|
|
@ -35,7 +35,7 @@ func TestEncodeStorage_EmptySection(t *testing.T) {
|
|||
assert.Equal(t, byte(0x00), data[9])
|
||||
}
|
||||
|
||||
func TestStorage_PrimitivesRoundTrip(t *testing.T) {
|
||||
func TestStorage_PrimitivesRoundTrip_Ugly(t *testing.T) {
|
||||
s := Section{
|
||||
"u64": Uint64Val(0xDEADBEEFCAFEBABE),
|
||||
"u32": Uint32Val(0xCAFEBABE),
|
||||
|
|
@ -106,7 +106,7 @@ func TestStorage_PrimitivesRoundTrip(t *testing.T) {
|
|||
assert.Equal(t, 3.141592653589793, pi)
|
||||
}
|
||||
|
||||
func TestStorage_NestedObject(t *testing.T) {
|
||||
func TestStorage_NestedObject_Good(t *testing.T) {
|
||||
inner := Section{
|
||||
"port": Uint16Val(18080),
|
||||
"host": StringVal([]byte("127.0.0.1")),
|
||||
|
|
@ -138,7 +138,7 @@ func TestStorage_NestedObject(t *testing.T) {
|
|||
assert.Equal(t, []byte("127.0.0.1"), host)
|
||||
}
|
||||
|
||||
func TestStorage_Uint64Array(t *testing.T) {
|
||||
func TestStorage_Uint64Array_Good(t *testing.T) {
|
||||
s := Section{
|
||||
"heights": Uint64ArrayVal([]uint64{10, 20, 30}),
|
||||
}
|
||||
|
|
@ -154,7 +154,7 @@ func TestStorage_Uint64Array(t *testing.T) {
|
|||
assert.Equal(t, []uint64{10, 20, 30}, arr)
|
||||
}
|
||||
|
||||
func TestStorage_StringArray(t *testing.T) {
|
||||
func TestStorage_StringArray_Good(t *testing.T) {
|
||||
s := Section{
|
||||
"peers": StringArrayVal([][]byte{[]byte("foo"), []byte("bar")}),
|
||||
}
|
||||
|
|
@ -172,7 +172,7 @@ func TestStorage_StringArray(t *testing.T) {
|
|||
assert.Equal(t, []byte("bar"), arr[1])
|
||||
}
|
||||
|
||||
func TestStorage_ObjectArray(t *testing.T) {
|
||||
func TestStorage_ObjectArray_Good(t *testing.T) {
|
||||
sections := []Section{
|
||||
{"id": Uint32Val(1), "name": StringVal([]byte("alice"))},
|
||||
{"id": Uint32Val(2), "name": StringVal([]byte("bob"))},
|
||||
|
|
@ -208,7 +208,7 @@ func TestStorage_ObjectArray(t *testing.T) {
|
|||
assert.Equal(t, []byte("bob"), name2)
|
||||
}
|
||||
|
||||
func TestDecodeStorage_BadSignature(t *testing.T) {
|
||||
func TestStorage_DecodeStorage_BadSignature_Bad(t *testing.T) {
|
||||
// Corrupt the first 4 bytes.
|
||||
data := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x01, 0x02, 0x01, 0x01, 0x00}
|
||||
_, err := DecodeStorage(data)
|
||||
|
|
@ -216,16 +216,16 @@ func TestDecodeStorage_BadSignature(t *testing.T) {
|
|||
assert.ErrorIs(t, err, ErrStorageBadSignature)
|
||||
}
|
||||
|
||||
func TestDecodeStorage_TooShort(t *testing.T) {
|
||||
func TestStorage_DecodeStorage_TooShort_Bad(t *testing.T) {
|
||||
_, err := DecodeStorage([]byte{0x01, 0x11})
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, ErrStorageTruncated)
|
||||
}
|
||||
|
||||
func TestStorage_ByteIdenticalReencode(t *testing.T) {
|
||||
func TestStorage_ByteIdenticalReencode_Ugly(t *testing.T) {
|
||||
s := Section{
|
||||
"alpha": Uint64Val(999),
|
||||
"bravo": StringVal([]byte("deterministic")),
|
||||
"alpha": Uint64Val(999),
|
||||
"bravo": StringVal([]byte("deterministic")),
|
||||
"charlie": BoolVal(false),
|
||||
"delta": ObjectVal(Section{
|
||||
"x": Int32Val(-42),
|
||||
|
|
@ -246,7 +246,7 @@ func TestStorage_ByteIdenticalReencode(t *testing.T) {
|
|||
assert.Equal(t, data1, data2, "re-encoded bytes must be identical")
|
||||
}
|
||||
|
||||
func TestStorage_TypeMismatchErrors(t *testing.T) {
|
||||
func TestStorage_TypeMismatchErrors_Bad(t *testing.T) {
|
||||
v := Uint64Val(42)
|
||||
|
||||
_, err := v.AsUint32()
|
||||
|
|
@ -265,7 +265,7 @@ func TestStorage_TypeMismatchErrors(t *testing.T) {
|
|||
assert.ErrorIs(t, err, ErrStorageTypeMismatch)
|
||||
}
|
||||
|
||||
func TestStorage_Uint32Array(t *testing.T) {
|
||||
func TestStorage_Uint32Array_Good(t *testing.T) {
|
||||
s := Section{
|
||||
"ports": Uint32ArrayVal([]uint32{8080, 8443, 9090}),
|
||||
}
|
||||
|
|
@ -281,7 +281,7 @@ func TestStorage_Uint32Array(t *testing.T) {
|
|||
assert.Equal(t, []uint32{8080, 8443, 9090}, arr)
|
||||
}
|
||||
|
||||
func TestDecodeStorage_BadVersion(t *testing.T) {
|
||||
func TestStorage_DecodeStorage_BadVersion_Bad(t *testing.T) {
|
||||
// Valid signatures but version 2 instead of 1.
|
||||
data := []byte{0x01, 0x11, 0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x02, 0x00}
|
||||
_, err := DecodeStorage(data)
|
||||
|
|
@ -289,11 +289,11 @@ func TestDecodeStorage_BadVersion(t *testing.T) {
|
|||
assert.ErrorIs(t, err, ErrStorageBadVersion)
|
||||
}
|
||||
|
||||
func TestStorage_EmptyArrays(t *testing.T) {
|
||||
func TestStorage_EmptyArrays_Ugly(t *testing.T) {
|
||||
s := Section{
|
||||
"empty_u64": Uint64ArrayVal([]uint64{}),
|
||||
"empty_str": StringArrayVal([][]byte{}),
|
||||
"empty_obj": ObjectArrayVal([]Section{}),
|
||||
"empty_u64": Uint64ArrayVal([]uint64{}),
|
||||
"empty_str": StringArrayVal([][]byte{}),
|
||||
"empty_obj": ObjectArrayVal([]Section{}),
|
||||
}
|
||||
|
||||
data, err := EncodeStorage(s)
|
||||
|
|
@ -315,7 +315,7 @@ func TestStorage_EmptyArrays(t *testing.T) {
|
|||
assert.Empty(t, objarr)
|
||||
}
|
||||
|
||||
func TestStorage_BoolFalseRoundTrip(t *testing.T) {
|
||||
func TestStorage_BoolFalseRoundTrip_Ugly(t *testing.T) {
|
||||
s := Section{
|
||||
"off": BoolVal(false),
|
||||
"on": BoolVal(true),
|
||||
|
|
|
|||
|
|
@ -6,31 +6,33 @@ package levin
|
|||
import (
|
||||
"encoding/binary"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// Size-mark bits occupying the two lowest bits of the first byte.
|
||||
const (
|
||||
varintMask = 0x03
|
||||
varintMark1 = 0x00 // 1 byte, max 63
|
||||
varintMark2 = 0x01 // 2 bytes, max 16,383
|
||||
varintMark4 = 0x02 // 4 bytes, max 1,073,741,823
|
||||
varintMark8 = 0x03 // 8 bytes, max 4,611,686,018,427,387,903
|
||||
varintMax1 = 63
|
||||
varintMax2 = 16_383
|
||||
varintMax4 = 1_073_741_823
|
||||
varintMax8 = 4_611_686_018_427_387_903
|
||||
varintMask = 0x03
|
||||
varintMark1 = 0x00 // 1 byte, max 63
|
||||
varintMark2 = 0x01 // 2 bytes, max 16,383
|
||||
varintMark4 = 0x02 // 4 bytes, max 1,073,741,823
|
||||
varintMark8 = 0x03 // 8 bytes, max 4,611,686,018,427,387,903
|
||||
varintMax1 = 63
|
||||
varintMax2 = 16_383
|
||||
varintMax4 = 1_073_741_823
|
||||
varintMax8 = 4_611_686_018_427_387_903
|
||||
)
|
||||
|
||||
// ErrVarintTruncated is returned when the buffer is too short.
|
||||
var ErrVarintTruncated = coreerr.E("levin", "truncated varint", nil)
|
||||
var ErrVarintTruncated = core.E("levin", "truncated varint", nil)
|
||||
|
||||
// ErrVarintOverflow is returned when the value is too large to encode.
|
||||
var ErrVarintOverflow = coreerr.E("levin", "varint overflow", nil)
|
||||
var ErrVarintOverflow = core.E("levin", "varint overflow", nil)
|
||||
|
||||
// PackVarint encodes v using the epee portable-storage varint scheme.
|
||||
// The low two bits of the first byte indicate the total encoded width;
|
||||
// the remaining bits carry the value in little-endian order.
|
||||
//
|
||||
// encoded := PackVarint(42)
|
||||
func PackVarint(v uint64) []byte {
|
||||
switch {
|
||||
case v <= varintMax1:
|
||||
|
|
@ -55,6 +57,8 @@ func PackVarint(v uint64) []byte {
|
|||
|
||||
// UnpackVarint decodes one epee portable-storage varint from buf.
|
||||
// It returns the decoded value, the number of bytes consumed, and any error.
|
||||
//
|
||||
// value, err := UnpackVarint(data)
|
||||
func UnpackVarint(buf []byte) (value uint64, bytesConsumed int, err error) {
|
||||
if len(buf) == 0 {
|
||||
return 0, 0, ErrVarintTruncated
|
||||
|
|
|
|||
|
|
@ -10,41 +10,41 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPackVarint_Value5(t *testing.T) {
|
||||
func TestVarint_PackVarint_Value5_Good(t *testing.T) {
|
||||
// 5 << 2 | 0x00 = 20 = 0x14
|
||||
got := PackVarint(5)
|
||||
assert.Equal(t, []byte{0x14}, got)
|
||||
}
|
||||
|
||||
func TestPackVarint_Value100(t *testing.T) {
|
||||
func TestVarint_PackVarint_Value100_Good(t *testing.T) {
|
||||
// 100 << 2 | 0x01 = 401 = 0x0191 → LE [0x91, 0x01]
|
||||
got := PackVarint(100)
|
||||
assert.Equal(t, []byte{0x91, 0x01}, got)
|
||||
}
|
||||
|
||||
func TestPackVarint_Value65536(t *testing.T) {
|
||||
func TestVarint_PackVarint_Value65536_Good(t *testing.T) {
|
||||
// 65536 << 2 | 0x02 = 262146 = 0x00040002 → LE [0x02, 0x00, 0x04, 0x00]
|
||||
got := PackVarint(65536)
|
||||
assert.Equal(t, []byte{0x02, 0x00, 0x04, 0x00}, got)
|
||||
}
|
||||
|
||||
func TestPackVarint_Value2Billion(t *testing.T) {
|
||||
func TestVarint_PackVarint_Value2Billion_Good(t *testing.T) {
|
||||
got := PackVarint(2_000_000_000)
|
||||
require.Len(t, got, 8)
|
||||
// Low 2 bits must be 0x03 (8-byte mark).
|
||||
assert.Equal(t, byte(0x03), got[0]&0x03)
|
||||
}
|
||||
|
||||
func TestPackVarint_Zero(t *testing.T) {
|
||||
func TestVarint_PackVarint_Zero_Ugly(t *testing.T) {
|
||||
got := PackVarint(0)
|
||||
assert.Equal(t, []byte{0x00}, got)
|
||||
}
|
||||
|
||||
func TestPackVarint_Boundaries(t *testing.T) {
|
||||
func TestVarint_PackVarint_Boundaries_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
value uint64
|
||||
wantLen int
|
||||
name string
|
||||
value uint64
|
||||
wantLen int
|
||||
}{
|
||||
{"1-byte max (63)", 63, 1},
|
||||
{"2-byte min (64)", 64, 2},
|
||||
|
|
@ -63,7 +63,7 @@ func TestPackVarint_Boundaries(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestVarint_RoundTrip(t *testing.T) {
|
||||
func TestVarint_RoundTrip_Ugly(t *testing.T) {
|
||||
values := []uint64{
|
||||
0, 1, 63, 64, 100, 16_383, 16_384,
|
||||
1_073_741_823, 1_073_741_824,
|
||||
|
|
@ -79,13 +79,13 @@ func TestVarint_RoundTrip(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestUnpackVarint_EmptyInput(t *testing.T) {
|
||||
func TestVarint_UnpackVarint_EmptyInput_Ugly(t *testing.T) {
|
||||
_, _, err := UnpackVarint([]byte{})
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, ErrVarintTruncated)
|
||||
}
|
||||
|
||||
func TestUnpackVarint_Truncated2Byte(t *testing.T) {
|
||||
func TestVarint_UnpackVarint_Truncated2Byte_Bad(t *testing.T) {
|
||||
// Encode 64 (needs 2 bytes), then only pass 1 byte.
|
||||
buf := PackVarint(64)
|
||||
require.Len(t, buf, 2)
|
||||
|
|
@ -94,7 +94,7 @@ func TestUnpackVarint_Truncated2Byte(t *testing.T) {
|
|||
assert.ErrorIs(t, err, ErrVarintTruncated)
|
||||
}
|
||||
|
||||
func TestUnpackVarint_Truncated4Byte(t *testing.T) {
|
||||
func TestVarint_UnpackVarint_Truncated4Byte_Bad(t *testing.T) {
|
||||
buf := PackVarint(16_384)
|
||||
require.Len(t, buf, 4)
|
||||
_, _, err := UnpackVarint(buf[:2])
|
||||
|
|
@ -102,7 +102,7 @@ func TestUnpackVarint_Truncated4Byte(t *testing.T) {
|
|||
assert.ErrorIs(t, err, ErrVarintTruncated)
|
||||
}
|
||||
|
||||
func TestUnpackVarint_Truncated8Byte(t *testing.T) {
|
||||
func TestVarint_UnpackVarint_Truncated8Byte_Bad(t *testing.T) {
|
||||
buf := PackVarint(1_073_741_824)
|
||||
require.Len(t, buf, 8)
|
||||
_, _, err := UnpackVarint(buf[:4])
|
||||
|
|
@ -110,7 +110,7 @@ func TestUnpackVarint_Truncated8Byte(t *testing.T) {
|
|||
assert.ErrorIs(t, err, ErrVarintTruncated)
|
||||
}
|
||||
|
||||
func TestUnpackVarint_ExtraBytes(t *testing.T) {
|
||||
func TestVarint_UnpackVarint_ExtraBytes_Good(t *testing.T) {
|
||||
// Ensure that extra trailing bytes are not consumed.
|
||||
buf := append(PackVarint(42), 0xFF, 0xFF)
|
||||
decoded, consumed, err := UnpackVarint(buf)
|
||||
|
|
@ -119,7 +119,7 @@ func TestUnpackVarint_ExtraBytes(t *testing.T) {
|
|||
assert.Equal(t, 1, consumed)
|
||||
}
|
||||
|
||||
func TestPackVarint_SizeMarkBits(t *testing.T) {
|
||||
func TestVarint_PackVarint_SizeMarkBits_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
value uint64
|
||||
|
|
|
|||
101
node/message.go
101
node/message.go
|
|
@ -1,10 +1,10 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
|
|
@ -18,14 +18,49 @@ const (
|
|||
|
||||
// SupportedProtocolVersions lists all protocol versions this node supports.
|
||||
// Used for version negotiation during handshake.
|
||||
//
|
||||
// versions := SupportedProtocolVersions
|
||||
var SupportedProtocolVersions = []string{"1.0"}
|
||||
|
||||
// RawMessage stores an already-encoded JSON payload for deferred decoding.
|
||||
//
|
||||
// payload := RawMessage(`{"pool":"pool.example.com:3333"}`)
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalJSON preserves the raw JSON payload when the message is encoded.
|
||||
//
|
||||
// data, err := RawMessage(`{"ok":true}`).MarshalJSON()
|
||||
func (m RawMessage) MarshalJSON() ([]byte, error) {
|
||||
if m == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON stores the raw JSON payload bytes without decoding them.
|
||||
//
|
||||
// var payload RawMessage
|
||||
// _ = payload.UnmarshalJSON([]byte(`{"ok":true}`))
|
||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return core.E("node.RawMessage.UnmarshalJSON", "raw message target is nil", nil)
|
||||
}
|
||||
|
||||
*m = append((*m)[:0], data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsProtocolVersionSupported checks if a given version is supported.
|
||||
//
|
||||
// ok := IsProtocolVersionSupported("1.0")
|
||||
func IsProtocolVersionSupported(version string) bool {
|
||||
return slices.Contains(SupportedProtocolVersions, version)
|
||||
}
|
||||
|
||||
// MessageType defines the type of P2P message.
|
||||
//
|
||||
// msgType := MsgPing
|
||||
type MessageType string
|
||||
|
||||
const (
|
||||
|
|
@ -56,25 +91,29 @@ const (
|
|||
)
|
||||
|
||||
// Message represents a P2P message between nodes.
|
||||
//
|
||||
// msg, err := NewMessage(MsgPing, "controller", "worker", PingPayload{SentAt: time.Now().UnixMilli()})
|
||||
type Message struct {
|
||||
ID string `json:"id"` // UUID
|
||||
Type MessageType `json:"type"`
|
||||
From string `json:"from"` // Sender node ID
|
||||
To string `json:"to"` // Recipient node ID (empty for broadcast)
|
||||
Timestamp time.Time `json:"ts"`
|
||||
Payload json.RawMessage `json:"payload"`
|
||||
ReplyTo string `json:"replyTo,omitempty"` // ID of message being replied to
|
||||
ID string `json:"id"` // UUID
|
||||
Type MessageType `json:"type"`
|
||||
From string `json:"from"` // Sender node ID
|
||||
To string `json:"to"` // Recipient node ID (empty for broadcast)
|
||||
Timestamp time.Time `json:"ts"`
|
||||
Payload RawMessage `json:"payload"`
|
||||
ReplyTo string `json:"replyTo,omitempty"` // ID of message being replied to
|
||||
}
|
||||
|
||||
// NewMessage creates a new message with a generated ID and timestamp.
|
||||
//
|
||||
// msg, err := NewMessage(MsgPing, "controller", "worker", PingPayload{SentAt: 42})
|
||||
func NewMessage(msgType MessageType, from, to string, payload any) (*Message, error) {
|
||||
var payloadBytes json.RawMessage
|
||||
var payloadBytes RawMessage
|
||||
if payload != nil {
|
||||
data, err := MarshalJSON(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadBytes = data
|
||||
payloadBytes = RawMessage(data)
|
||||
}
|
||||
|
||||
return &Message{
|
||||
|
|
@ -102,12 +141,18 @@ func (m *Message) ParsePayload(v any) error {
|
|||
if m.Payload == nil {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(m.Payload, v)
|
||||
result := core.JSONUnmarshal(m.Payload, v)
|
||||
if !result.OK {
|
||||
return result.Value.(error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// --- Payload Types ---
|
||||
|
||||
// HandshakePayload is sent during connection establishment.
|
||||
//
|
||||
// payload := HandshakePayload{Identity: NodeIdentity{Name: "worker-1"}, Version: ProtocolVersion}
|
||||
type HandshakePayload struct {
|
||||
Identity NodeIdentity `json:"identity"`
|
||||
Challenge []byte `json:"challenge,omitempty"` // Random bytes for auth
|
||||
|
|
@ -115,6 +160,8 @@ type HandshakePayload struct {
|
|||
}
|
||||
|
||||
// HandshakeAckPayload is the response to a handshake.
|
||||
//
|
||||
// ack := HandshakeAckPayload{Accepted: true}
|
||||
type HandshakeAckPayload struct {
|
||||
Identity NodeIdentity `json:"identity"`
|
||||
ChallengeResponse []byte `json:"challengeResponse,omitempty"`
|
||||
|
|
@ -123,29 +170,39 @@ type HandshakeAckPayload struct {
|
|||
}
|
||||
|
||||
// PingPayload for keepalive/latency measurement.
|
||||
//
|
||||
// payload := PingPayload{SentAt: 42}
|
||||
type PingPayload struct {
|
||||
SentAt int64 `json:"sentAt"` // Unix timestamp in milliseconds
|
||||
}
|
||||
|
||||
// PongPayload response to ping.
|
||||
//
|
||||
// payload := PongPayload{SentAt: 42, ReceivedAt: 43}
|
||||
type PongPayload struct {
|
||||
SentAt int64 `json:"sentAt"` // Echo of ping's sentAt
|
||||
ReceivedAt int64 `json:"receivedAt"` // When ping was received
|
||||
}
|
||||
|
||||
// StartMinerPayload requests starting a miner.
|
||||
//
|
||||
// payload := StartMinerPayload{MinerType: "xmrig"}
|
||||
type StartMinerPayload struct {
|
||||
MinerType string `json:"minerType"` // Required: miner type (e.g., "xmrig", "tt-miner")
|
||||
ProfileID string `json:"profileId,omitempty"`
|
||||
Config json.RawMessage `json:"config,omitempty"` // Override profile config
|
||||
MinerType string `json:"minerType"` // Required: miner type (e.g., "xmrig", "tt-miner")
|
||||
ProfileID string `json:"profileId,omitempty"`
|
||||
Config RawMessage `json:"config,omitempty"` // Override profile config
|
||||
}
|
||||
|
||||
// StopMinerPayload requests stopping a miner.
|
||||
//
|
||||
// payload := StopMinerPayload{MinerName: "xmrig-0"}
|
||||
type StopMinerPayload struct {
|
||||
MinerName string `json:"minerName"`
|
||||
}
|
||||
|
||||
// MinerAckPayload acknowledges a miner start/stop operation.
|
||||
//
|
||||
// ack := MinerAckPayload{Success: true, MinerName: "xmrig-0"}
|
||||
type MinerAckPayload struct {
|
||||
Success bool `json:"success"`
|
||||
MinerName string `json:"minerName,omitempty"`
|
||||
|
|
@ -153,6 +210,8 @@ type MinerAckPayload struct {
|
|||
}
|
||||
|
||||
// MinerStatsItem represents stats for a single miner.
|
||||
//
|
||||
// miner := MinerStatsItem{Name: "xmrig-0", Hashrate: 1200}
|
||||
type MinerStatsItem struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
|
|
@ -166,6 +225,8 @@ type MinerStatsItem struct {
|
|||
}
|
||||
|
||||
// StatsPayload contains miner statistics.
|
||||
//
|
||||
// stats := StatsPayload{NodeID: "worker-1"}
|
||||
type StatsPayload struct {
|
||||
NodeID string `json:"nodeId"`
|
||||
NodeName string `json:"nodeName"`
|
||||
|
|
@ -174,6 +235,8 @@ type StatsPayload struct {
|
|||
}
|
||||
|
||||
// GetLogsPayload requests console logs from a miner.
|
||||
//
|
||||
// payload := GetLogsPayload{MinerName: "xmrig-0", Lines: 100}
|
||||
type GetLogsPayload struct {
|
||||
MinerName string `json:"minerName"`
|
||||
Lines int `json:"lines"` // Number of lines to fetch
|
||||
|
|
@ -181,6 +244,8 @@ type GetLogsPayload struct {
|
|||
}
|
||||
|
||||
// LogsPayload contains console log lines.
|
||||
//
|
||||
// payload := LogsPayload{MinerName: "xmrig-0", Lines: []string{"started"}}
|
||||
type LogsPayload struct {
|
||||
MinerName string `json:"minerName"`
|
||||
Lines []string `json:"lines"`
|
||||
|
|
@ -188,6 +253,8 @@ type LogsPayload struct {
|
|||
}
|
||||
|
||||
// DeployPayload contains a deployment bundle.
|
||||
//
|
||||
// payload := DeployPayload{Name: "xmrig", BundleType: string(BundleMiner)}
|
||||
type DeployPayload struct {
|
||||
BundleType string `json:"type"` // "profile" | "miner" | "full"
|
||||
Data []byte `json:"data"` // STIM-encrypted bundle
|
||||
|
|
@ -196,6 +263,8 @@ type DeployPayload struct {
|
|||
}
|
||||
|
||||
// DeployAckPayload acknowledges a deployment.
|
||||
//
|
||||
// ack := DeployAckPayload{Success: true, Name: "xmrig"}
|
||||
type DeployAckPayload struct {
|
||||
Success bool `json:"success"`
|
||||
Name string `json:"name,omitempty"`
|
||||
|
|
@ -203,6 +272,8 @@ type DeployAckPayload struct {
|
|||
}
|
||||
|
||||
// ErrorPayload contains error information.
|
||||
//
|
||||
// payload := ErrorPayload{Code: ErrCodeOperationFailed, Message: "start failed"}
|
||||
type ErrorPayload struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
|
|
@ -220,6 +291,8 @@ const (
|
|||
)
|
||||
|
||||
// NewErrorMessage creates an error response message.
|
||||
//
|
||||
// msg, err := NewErrorMessage("worker", "controller", ErrCodeOperationFailed, "miner start failed", "req-1")
|
||||
func NewErrorMessage(from, to string, code int, message string, replyTo string) (*Message, error) {
|
||||
msg, err := NewMessage(MsgError, from, to, ErrorPayload{
|
||||
Code: code,
|
||||
|
|
|
|||
|
|
@ -1,12 +1,11 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewMessage(t *testing.T) {
|
||||
func TestMessage_NewMessage_Good(t *testing.T) {
|
||||
t.Run("BasicMessage", func(t *testing.T) {
|
||||
msg, err := NewMessage(MsgPing, "sender-id", "receiver-id", nil)
|
||||
if err != nil {
|
||||
|
|
@ -60,7 +59,7 @@ func TestNewMessage(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestMessageReply(t *testing.T) {
|
||||
func TestMessage_Reply_Good(t *testing.T) {
|
||||
original, _ := NewMessage(MsgPing, "sender", "receiver", PingPayload{SentAt: 12345})
|
||||
|
||||
reply, err := original.Reply(MsgPong, PongPayload{
|
||||
|
|
@ -89,7 +88,7 @@ func TestMessageReply(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParsePayload(t *testing.T) {
|
||||
func TestMessage_ParsePayload_Good(t *testing.T) {
|
||||
t.Run("ValidPayload", func(t *testing.T) {
|
||||
payload := StartMinerPayload{
|
||||
MinerType: "xmrig",
|
||||
|
|
@ -160,7 +159,7 @@ func TestParsePayload(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestNewErrorMessage(t *testing.T) {
|
||||
func TestMessage_NewErrorMessage_Bad(t *testing.T) {
|
||||
errMsg, err := NewErrorMessage("sender", "receiver", ErrCodeOperationFailed, "something went wrong", "original-msg-id")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create error message: %v", err)
|
||||
|
|
@ -189,24 +188,18 @@ func TestNewErrorMessage(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMessageSerialization(t *testing.T) {
|
||||
func TestMessage_Serialization_Good(t *testing.T) {
|
||||
original, _ := NewMessage(MsgStartMiner, "ctrl", "worker", StartMinerPayload{
|
||||
MinerType: "xmrig",
|
||||
ProfileID: "my-profile",
|
||||
})
|
||||
|
||||
// Serialize
|
||||
data, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to serialize message: %v", err)
|
||||
}
|
||||
data := testJSONMarshal(t, original)
|
||||
|
||||
// Deserialize
|
||||
var restored Message
|
||||
err = json.Unmarshal(data, &restored)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to deserialize message: %v", err)
|
||||
}
|
||||
testJSONUnmarshal(t, data, &restored)
|
||||
|
||||
if restored.ID != original.ID {
|
||||
t.Error("ID mismatch after serialization")
|
||||
|
|
@ -221,8 +214,7 @@ func TestMessageSerialization(t *testing.T) {
|
|||
}
|
||||
|
||||
var payload StartMinerPayload
|
||||
err = restored.ParsePayload(&payload)
|
||||
if err != nil {
|
||||
if err := restored.ParsePayload(&payload); err != nil {
|
||||
t.Fatalf("failed to parse restored payload: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -231,7 +223,7 @@ func TestMessageSerialization(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMessageTypes(t *testing.T) {
|
||||
func TestMessage_Types_Good(t *testing.T) {
|
||||
types := []MessageType{
|
||||
MsgHandshake,
|
||||
MsgHandshakeAck,
|
||||
|
|
@ -264,7 +256,7 @@ func TestMessageTypes(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestErrorCodes(t *testing.T) {
|
||||
func TestMessage_ErrorCodes_Bad(t *testing.T) {
|
||||
codes := map[int]string{
|
||||
ErrCodeUnknown: "Unknown",
|
||||
ErrCodeInvalidMessage: "InvalidMessage",
|
||||
|
|
@ -283,7 +275,7 @@ func TestErrorCodes(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewMessage_NilPayload(t *testing.T) {
|
||||
func TestMessage_NewMessage_NilPayload_Ugly(t *testing.T) {
|
||||
msg, err := NewMessage(MsgPing, "from", "to", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("NewMessage with nil payload should succeed: %v", err)
|
||||
|
|
@ -293,7 +285,7 @@ func TestNewMessage_NilPayload(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMessage_ParsePayload_Nil(t *testing.T) {
|
||||
func TestMessage_ParsePayload_Nil_Ugly(t *testing.T) {
|
||||
msg := &Message{Payload: nil}
|
||||
var target PingPayload
|
||||
err := msg.ParsePayload(&target)
|
||||
|
|
@ -302,7 +294,7 @@ func TestMessage_ParsePayload_Nil(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewErrorMessage_Success(t *testing.T) {
|
||||
func TestMessage_NewErrorMessage_Success_Bad(t *testing.T) {
|
||||
msg, err := NewErrorMessage("from", "to", ErrCodeOperationFailed, "something went wrong", "reply-123")
|
||||
if err != nil {
|
||||
t.Fatalf("NewErrorMessage failed: %v", err)
|
||||
|
|
@ -315,7 +307,10 @@ func TestNewErrorMessage_Success(t *testing.T) {
|
|||
}
|
||||
|
||||
var payload ErrorPayload
|
||||
msg.ParsePayload(&payload)
|
||||
err = msg.ParsePayload(&payload)
|
||||
if err != nil {
|
||||
t.Fatalf("ParsePayload failed: %v", err)
|
||||
}
|
||||
if payload.Code != ErrCodeOperationFailed {
|
||||
t.Errorf("expected code %d, got %d", ErrCodeOperationFailed, payload.Code)
|
||||
}
|
||||
|
|
|
|||
67
node/peer.go
67
node/peer.go
|
|
@ -1,17 +1,14 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"iter"
|
||||
"maps"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
coreio "dappco.re/go/core/io"
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/p2p/logging"
|
||||
|
||||
poindexter "forge.lthn.ai/Snider/Poindexter"
|
||||
|
|
@ -19,6 +16,8 @@ import (
|
|||
)
|
||||
|
||||
// Peer represents a known remote node.
|
||||
//
|
||||
// peer := &Peer{ID: "worker-1", Address: "127.0.0.1:9101"}
|
||||
type Peer struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
|
|
@ -42,6 +41,8 @@ type Peer struct {
|
|||
const saveDebounceInterval = 5 * time.Second
|
||||
|
||||
// PeerAuthMode controls how unknown peers are handled
|
||||
//
|
||||
// mode := PeerAuthAllowlist
|
||||
type PeerAuthMode int
|
||||
|
||||
const (
|
||||
|
|
@ -79,18 +80,20 @@ func validatePeerName(name string) error {
|
|||
return nil // Empty names are allowed (optional field)
|
||||
}
|
||||
if len(name) < PeerNameMinLength {
|
||||
return coreerr.E("validatePeerName", "peer name too short", nil)
|
||||
return core.E("validatePeerName", "peer name too short", nil)
|
||||
}
|
||||
if len(name) > PeerNameMaxLength {
|
||||
return coreerr.E("validatePeerName", "peer name too long", nil)
|
||||
return core.E("validatePeerName", "peer name too long", nil)
|
||||
}
|
||||
if !peerNameRegex.MatchString(name) {
|
||||
return coreerr.E("validatePeerName", "peer name contains invalid characters (use alphanumeric, hyphens, underscores, spaces)", nil)
|
||||
return core.E("validatePeerName", "peer name contains invalid characters (use alphanumeric, hyphens, underscores, spaces)", nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeerRegistry manages known peers with KD-tree based selection.
|
||||
//
|
||||
// peerRegistry, err := NewPeerRegistry()
|
||||
type PeerRegistry struct {
|
||||
peers map[string]*Peer
|
||||
kdTree *poindexter.KDTree[string] // KD-tree with peer ID as payload
|
||||
|
|
@ -120,10 +123,12 @@ var (
|
|||
)
|
||||
|
||||
// NewPeerRegistry creates a new PeerRegistry, loading existing peers if available.
|
||||
//
|
||||
// peerRegistry, err := NewPeerRegistry()
|
||||
func NewPeerRegistry() (*PeerRegistry, error) {
|
||||
peersPath, err := xdg.ConfigFile("lethean-desktop/peers.json")
|
||||
if err != nil {
|
||||
return nil, coreerr.E("PeerRegistry.New", "failed to get peers path", err)
|
||||
return nil, core.E("PeerRegistry.New", "failed to get peers path", err)
|
||||
}
|
||||
|
||||
return NewPeerRegistryWithPath(peersPath)
|
||||
|
|
@ -131,6 +136,8 @@ func NewPeerRegistry() (*PeerRegistry, error) {
|
|||
|
||||
// NewPeerRegistryWithPath creates a new PeerRegistry with a custom path.
|
||||
// This is primarily useful for testing to avoid xdg path caching issues.
|
||||
//
|
||||
// peerRegistry, err := NewPeerRegistryWithPath("/srv/p2p/peers.json")
|
||||
func NewPeerRegistryWithPath(peersPath string) (*PeerRegistry, error) {
|
||||
pr := &PeerRegistry{
|
||||
peers: make(map[string]*Peer),
|
||||
|
|
@ -244,7 +251,7 @@ func (r *PeerRegistry) AddPeer(peer *Peer) error {
|
|||
|
||||
if peer.ID == "" {
|
||||
r.mu.Unlock()
|
||||
return coreerr.E("PeerRegistry.AddPeer", "peer ID is required", nil)
|
||||
return core.E("PeerRegistry.AddPeer", "peer ID is required", nil)
|
||||
}
|
||||
|
||||
// Validate peer name (P2P-LOW-3)
|
||||
|
|
@ -255,7 +262,7 @@ func (r *PeerRegistry) AddPeer(peer *Peer) error {
|
|||
|
||||
if _, exists := r.peers[peer.ID]; exists {
|
||||
r.mu.Unlock()
|
||||
return coreerr.E("PeerRegistry.AddPeer", "peer "+peer.ID+" already exists", nil)
|
||||
return core.E("PeerRegistry.AddPeer", "peer "+peer.ID+" already exists", nil)
|
||||
}
|
||||
|
||||
// Set defaults
|
||||
|
|
@ -280,7 +287,7 @@ func (r *PeerRegistry) UpdatePeer(peer *Peer) error {
|
|||
|
||||
if _, exists := r.peers[peer.ID]; !exists {
|
||||
r.mu.Unlock()
|
||||
return coreerr.E("PeerRegistry.UpdatePeer", "peer "+peer.ID+" not found", nil)
|
||||
return core.E("PeerRegistry.UpdatePeer", "peer "+peer.ID+" not found", nil)
|
||||
}
|
||||
|
||||
r.peers[peer.ID] = peer
|
||||
|
|
@ -297,7 +304,7 @@ func (r *PeerRegistry) RemovePeer(id string) error {
|
|||
|
||||
if _, exists := r.peers[id]; !exists {
|
||||
r.mu.Unlock()
|
||||
return coreerr.E("PeerRegistry.RemovePeer", "peer "+id+" not found", nil)
|
||||
return core.E("PeerRegistry.RemovePeer", "peer "+id+" not found", nil)
|
||||
}
|
||||
|
||||
delete(r.peers, id)
|
||||
|
|
@ -351,7 +358,7 @@ func (r *PeerRegistry) UpdateMetrics(id string, pingMS, geoKM float64, hops int)
|
|||
peer, exists := r.peers[id]
|
||||
if !exists {
|
||||
r.mu.Unlock()
|
||||
return coreerr.E("PeerRegistry.UpdateMetrics", "peer "+id+" not found", nil)
|
||||
return core.E("PeerRegistry.UpdateMetrics", "peer "+id+" not found", nil)
|
||||
}
|
||||
|
||||
peer.PingMS = pingMS
|
||||
|
|
@ -373,7 +380,7 @@ func (r *PeerRegistry) UpdateScore(id string, score float64) error {
|
|||
peer, exists := r.peers[id]
|
||||
if !exists {
|
||||
r.mu.Unlock()
|
||||
return coreerr.E("PeerRegistry.UpdateScore", "peer "+id+" not found", nil)
|
||||
return core.E("PeerRegistry.UpdateScore", "peer "+id+" not found", nil)
|
||||
}
|
||||
|
||||
// Clamp score to 0-100
|
||||
|
|
@ -655,28 +662,29 @@ func (r *PeerRegistry) scheduleSave() {
|
|||
// Must be called with r.mu held (at least RLock).
|
||||
func (r *PeerRegistry) saveNow() error {
|
||||
// Ensure directory exists
|
||||
dir := filepath.Dir(r.path)
|
||||
if err := coreio.Local.EnsureDir(dir); err != nil {
|
||||
return coreerr.E("PeerRegistry.saveNow", "failed to create peers directory", err)
|
||||
dir := core.PathDir(r.path)
|
||||
if err := fsEnsureDir(dir); err != nil {
|
||||
return core.E("PeerRegistry.saveNow", "failed to create peers directory", err)
|
||||
}
|
||||
|
||||
// Convert to slice for JSON
|
||||
peers := slices.Collect(maps.Values(r.peers))
|
||||
|
||||
data, err := json.MarshalIndent(peers, "", " ")
|
||||
if err != nil {
|
||||
return coreerr.E("PeerRegistry.saveNow", "failed to marshal peers", err)
|
||||
result := core.JSONMarshal(peers)
|
||||
if !result.OK {
|
||||
return core.E("PeerRegistry.saveNow", "failed to marshal peers", result.Value.(error))
|
||||
}
|
||||
data := result.Value.([]byte)
|
||||
|
||||
// Use atomic write pattern: write to temp file, then rename
|
||||
tmpPath := r.path + ".tmp"
|
||||
if err := coreio.Local.Write(tmpPath, string(data)); err != nil {
|
||||
return coreerr.E("PeerRegistry.saveNow", "failed to write peers temp file", err)
|
||||
if err := fsWrite(tmpPath, string(data)); err != nil {
|
||||
return core.E("PeerRegistry.saveNow", "failed to write peers temp file", err)
|
||||
}
|
||||
|
||||
if err := coreio.Local.Rename(tmpPath, r.path); err != nil {
|
||||
coreio.Local.Delete(tmpPath) // Clean up temp file
|
||||
return coreerr.E("PeerRegistry.saveNow", "failed to rename peers file", err)
|
||||
if err := fsRename(tmpPath, r.path); err != nil {
|
||||
fsDelete(tmpPath) // Clean up temp file
|
||||
return core.E("PeerRegistry.saveNow", "failed to rename peers file", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -718,14 +726,15 @@ func (r *PeerRegistry) save() error {
|
|||
|
||||
// load reads peers from disk.
|
||||
func (r *PeerRegistry) load() error {
|
||||
content, err := coreio.Local.Read(r.path)
|
||||
content, err := fsRead(r.path)
|
||||
if err != nil {
|
||||
return coreerr.E("PeerRegistry.load", "failed to read peers", err)
|
||||
return core.E("PeerRegistry.load", "failed to read peers", err)
|
||||
}
|
||||
|
||||
var peers []*Peer
|
||||
if err := json.Unmarshal([]byte(content), &peers); err != nil {
|
||||
return coreerr.E("PeerRegistry.load", "failed to unmarshal peers", err)
|
||||
result := core.JSONUnmarshalString(content, &peers)
|
||||
if !result.OK {
|
||||
return core.E("PeerRegistry.load", "failed to unmarshal peers", result.Value.(error))
|
||||
}
|
||||
|
||||
r.peers = make(map[string]*Peer)
|
||||
|
|
|
|||
|
|
@ -1,35 +1,24 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func setupTestPeerRegistry(t *testing.T) (*PeerRegistry, func()) {
|
||||
tmpDir, err := os.MkdirTemp("", "peer-registry-test")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp dir: %v", err)
|
||||
}
|
||||
|
||||
peersPath := filepath.Join(tmpDir, "peers.json")
|
||||
tmpDir := t.TempDir()
|
||||
peersPath := testJoinPath(tmpDir, "peers.json")
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(peersPath)
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
|
||||
return pr, cleanup
|
||||
return pr, func() {}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_NewPeerRegistry(t *testing.T) {
|
||||
func TestPeer_Registry_NewPeerRegistry_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -38,7 +27,7 @@ func TestPeerRegistry_NewPeerRegistry(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_AddPeer(t *testing.T) {
|
||||
func TestPeer_Registry_AddPeer_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -67,7 +56,7 @@ func TestPeerRegistry_AddPeer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_GetPeer(t *testing.T) {
|
||||
func TestPeer_Registry_GetPeer_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -97,7 +86,7 @@ func TestPeerRegistry_GetPeer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_ListPeers(t *testing.T) {
|
||||
func TestPeer_Registry_ListPeers_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -117,7 +106,7 @@ func TestPeerRegistry_ListPeers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_RemovePeer(t *testing.T) {
|
||||
func TestPeer_Registry_RemovePeer_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -150,7 +139,7 @@ func TestPeerRegistry_RemovePeer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_UpdateMetrics(t *testing.T) {
|
||||
func TestPeer_Registry_UpdateMetrics_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -183,7 +172,7 @@ func TestPeerRegistry_UpdateMetrics(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_UpdateScore(t *testing.T) {
|
||||
func TestPeer_Registry_UpdateScore_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -237,7 +226,7 @@ func TestPeerRegistry_UpdateScore(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_SetConnected(t *testing.T) {
|
||||
func TestPeer_Registry_SetConnected_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -272,7 +261,7 @@ func TestPeerRegistry_SetConnected(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_GetConnectedPeers(t *testing.T) {
|
||||
func TestPeer_Registry_GetConnectedPeers_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -295,7 +284,7 @@ func TestPeerRegistry_GetConnectedPeers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_SelectOptimalPeer(t *testing.T) {
|
||||
func TestPeer_Registry_SelectOptimalPeer_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -321,7 +310,7 @@ func TestPeerRegistry_SelectOptimalPeer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_SelectNearestPeers(t *testing.T) {
|
||||
func TestPeer_Registry_SelectNearestPeers_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -342,11 +331,9 @@ func TestPeerRegistry_SelectNearestPeers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_Persistence(t *testing.T) {
|
||||
tmpDir, _ := os.MkdirTemp("", "persist-test")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
peersPath := filepath.Join(tmpDir, "peers.json")
|
||||
func TestPeer_Registry_Persistence_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
peersPath := testJoinPath(tmpDir, "peers.json")
|
||||
|
||||
// Create and save
|
||||
pr1, err := NewPeerRegistryWithPath(peersPath)
|
||||
|
|
@ -391,7 +378,7 @@ func TestPeerRegistry_Persistence(t *testing.T) {
|
|||
|
||||
// --- Security Feature Tests ---
|
||||
|
||||
func TestPeerRegistry_AuthMode(t *testing.T) {
|
||||
func TestPeer_Registry_AuthMode_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -413,7 +400,7 @@ func TestPeerRegistry_AuthMode(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_PublicKeyAllowlist(t *testing.T) {
|
||||
func TestPeer_Registry_PublicKeyAllowlist_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -450,7 +437,7 @@ func TestPeerRegistry_PublicKeyAllowlist(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_IsPeerAllowed_OpenMode(t *testing.T) {
|
||||
func TestPeer_Registry_IsPeerAllowed_OpenMode_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -466,7 +453,7 @@ func TestPeerRegistry_IsPeerAllowed_OpenMode(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_IsPeerAllowed_AllowlistMode(t *testing.T) {
|
||||
func TestPeer_Registry_IsPeerAllowed_AllowlistMode_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -501,7 +488,7 @@ func TestPeerRegistry_IsPeerAllowed_AllowlistMode(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_PeerNameValidation(t *testing.T) {
|
||||
func TestPeer_Registry_PeerNameValidation_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -545,7 +532,7 @@ func TestPeerRegistry_PeerNameValidation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_ScoreRecording(t *testing.T) {
|
||||
func TestPeer_Registry_ScoreRecording_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -601,7 +588,7 @@ func TestPeerRegistry_ScoreRecording(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_GetPeersByScore(t *testing.T) {
|
||||
func TestPeer_Registry_GetPeersByScore_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -635,7 +622,7 @@ func TestPeerRegistry_GetPeersByScore(t *testing.T) {
|
|||
|
||||
// --- Additional coverage tests for peer.go ---
|
||||
|
||||
func TestSafeKeyPrefix(t *testing.T) {
|
||||
func TestPeer_SafeKeyPrefix_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
|
|
@ -658,7 +645,7 @@ func TestSafeKeyPrefix(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestValidatePeerName(t *testing.T) {
|
||||
func TestPeer_ValidatePeerName_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
peerName string
|
||||
|
|
@ -691,7 +678,7 @@ func TestValidatePeerName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_AddPeer_EmptyID(t *testing.T) {
|
||||
func TestPeer_Registry_AddPeer_EmptyID_Bad(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -702,7 +689,7 @@ func TestPeerRegistry_AddPeer_EmptyID(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_UpdatePeer(t *testing.T) {
|
||||
func TestPeer_Registry_UpdatePeer_Good(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -735,7 +722,7 @@ func TestPeerRegistry_UpdatePeer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_UpdateMetrics_NotFound(t *testing.T) {
|
||||
func TestPeer_Registry_UpdateMetrics_NotFound_Bad(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -745,7 +732,7 @@ func TestPeerRegistry_UpdateMetrics_NotFound(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_UpdateScore_NotFound(t *testing.T) {
|
||||
func TestPeer_Registry_UpdateScore_NotFound_Bad(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -755,7 +742,7 @@ func TestPeerRegistry_UpdateScore_NotFound(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_RecordSuccess_NotFound(t *testing.T) {
|
||||
func TestPeer_Registry_RecordSuccess_NotFound_Bad(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -763,21 +750,21 @@ func TestPeerRegistry_RecordSuccess_NotFound(t *testing.T) {
|
|||
pr.RecordSuccess("ghost-peer")
|
||||
}
|
||||
|
||||
func TestPeerRegistry_RecordFailure_NotFound(t *testing.T) {
|
||||
func TestPeer_Registry_RecordFailure_NotFound_Bad(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
pr.RecordFailure("ghost-peer")
|
||||
}
|
||||
|
||||
func TestPeerRegistry_RecordTimeout_NotFound(t *testing.T) {
|
||||
func TestPeer_Registry_RecordTimeout_NotFound_Bad(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
pr.RecordTimeout("ghost-peer")
|
||||
}
|
||||
|
||||
func TestPeerRegistry_SelectOptimalPeer_EmptyRegistry(t *testing.T) {
|
||||
func TestPeer_Registry_SelectOptimalPeer_EmptyRegistry_Ugly(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -787,7 +774,7 @@ func TestPeerRegistry_SelectOptimalPeer_EmptyRegistry(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_SelectNearestPeers_EmptyRegistry(t *testing.T) {
|
||||
func TestPeer_Registry_SelectNearestPeers_EmptyRegistry_Ugly(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -797,7 +784,7 @@ func TestPeerRegistry_SelectNearestPeers_EmptyRegistry(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_SetConnected_NonExistent(t *testing.T) {
|
||||
func TestPeer_Registry_SetConnected_NonExistent_Bad(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -805,7 +792,7 @@ func TestPeerRegistry_SetConnected_NonExistent(t *testing.T) {
|
|||
pr.SetConnected("ghost-peer", true)
|
||||
}
|
||||
|
||||
func TestPeerRegistry_Close_NoDirtyData(t *testing.T) {
|
||||
func TestPeer_Registry_Close_NoDirtyData_Ugly(t *testing.T) {
|
||||
pr, cleanup := setupTestPeerRegistry(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
@ -816,11 +803,9 @@ func TestPeerRegistry_Close_NoDirtyData(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_Close_WithDirtyData(t *testing.T) {
|
||||
tmpDir, _ := os.MkdirTemp("", "close-dirty-test")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
peersPath := filepath.Join(tmpDir, "peers.json")
|
||||
func TestPeer_Registry_Close_WithDirtyData_Ugly(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
peersPath := testJoinPath(tmpDir, "peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(peersPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create registry: %v", err)
|
||||
|
|
@ -845,11 +830,9 @@ func TestPeerRegistry_Close_WithDirtyData(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_ScheduleSave_Debounce(t *testing.T) {
|
||||
tmpDir, _ := os.MkdirTemp("", "debounce-test")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
peersPath := filepath.Join(tmpDir, "peers.json")
|
||||
func TestPeer_Registry_ScheduleSave_Debounce_Ugly(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
peersPath := testJoinPath(tmpDir, "peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(peersPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create registry: %v", err)
|
||||
|
|
@ -867,11 +850,9 @@ func TestPeerRegistry_ScheduleSave_Debounce(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_SaveNow(t *testing.T) {
|
||||
tmpDir, _ := os.MkdirTemp("", "savenow-test")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
peersPath := filepath.Join(tmpDir, "subdir", "peers.json")
|
||||
func TestPeer_Registry_SaveNow_Good(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
peersPath := testJoinPath(tmpDir, "subdir", "peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(peersPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create registry: %v", err)
|
||||
|
|
@ -888,20 +869,18 @@ func TestPeerRegistry_SaveNow(t *testing.T) {
|
|||
}
|
||||
|
||||
// Verify the file was written
|
||||
if _, err := os.Stat(peersPath); os.IsNotExist(err) {
|
||||
if !fsExists(peersPath) {
|
||||
t.Error("peers.json should exist after saveNow")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerRegistry_ScheduleSave_TimerFires(t *testing.T) {
|
||||
func TestPeer_Registry_ScheduleSave_TimerFires_Ugly(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping debounce timer test in short mode")
|
||||
}
|
||||
|
||||
tmpDir, _ := os.MkdirTemp("", "timer-fire-test")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
peersPath := filepath.Join(tmpDir, "peers.json")
|
||||
tmpDir := t.TempDir()
|
||||
peersPath := testJoinPath(tmpDir, "peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(peersPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create registry: %v", err)
|
||||
|
|
@ -913,7 +892,7 @@ func TestPeerRegistry_ScheduleSave_TimerFires(t *testing.T) {
|
|||
time.Sleep(6 * time.Second)
|
||||
|
||||
// The file should have been saved by the timer
|
||||
if _, err := os.Stat(peersPath); os.IsNotExist(err) {
|
||||
if !fsExists(peersPath) {
|
||||
t.Error("peers.json should exist after debounce timer fires")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,22 +1,24 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// ProtocolError represents an error from the remote peer.
|
||||
//
|
||||
// err := &ProtocolError{Code: ErrCodeOperationFailed, Message: "start failed"}
|
||||
type ProtocolError struct {
|
||||
Code int
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *ProtocolError) Error() string {
|
||||
return fmt.Sprintf("remote error (%d): %s", e.Code, e.Message)
|
||||
return core.Sprintf("remote error (%d): %s", e.Code, e.Message)
|
||||
}
|
||||
|
||||
// ResponseHandler provides helpers for handling protocol responses.
|
||||
//
|
||||
// handler := &ResponseHandler{}
|
||||
type ResponseHandler struct{}
|
||||
|
||||
// ValidateResponse checks if the response is valid and returns a parsed error if it's an error response.
|
||||
|
|
@ -26,7 +28,7 @@ type ResponseHandler struct{}
|
|||
// 3. If response type matches expected (returns error if not)
|
||||
func (h *ResponseHandler) ValidateResponse(resp *Message, expectedType MessageType) error {
|
||||
if resp == nil {
|
||||
return coreerr.E("ResponseHandler.ValidateResponse", "nil response", nil)
|
||||
return core.E("ResponseHandler.ValidateResponse", "nil response", nil)
|
||||
}
|
||||
|
||||
// Check for error response
|
||||
|
|
@ -40,7 +42,7 @@ func (h *ResponseHandler) ValidateResponse(resp *Message, expectedType MessageTy
|
|||
|
||||
// Check expected type
|
||||
if resp.Type != expectedType {
|
||||
return coreerr.E("ResponseHandler.ValidateResponse", "unexpected response type: expected "+string(expectedType)+", got "+string(resp.Type), nil)
|
||||
return core.E("ResponseHandler.ValidateResponse", "unexpected response type: expected "+string(expectedType)+", got "+string(resp.Type), nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -55,7 +57,7 @@ func (h *ResponseHandler) ParseResponse(resp *Message, expectedType MessageType,
|
|||
|
||||
if target != nil {
|
||||
if err := resp.ParsePayload(target); err != nil {
|
||||
return coreerr.E("ResponseHandler.ParseResponse", "failed to parse "+string(expectedType)+" payload", err)
|
||||
return core.E("ResponseHandler.ParseResponse", "failed to parse "+string(expectedType)+" payload", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -66,22 +68,30 @@ func (h *ResponseHandler) ParseResponse(resp *Message, expectedType MessageType,
|
|||
var DefaultResponseHandler = &ResponseHandler{}
|
||||
|
||||
// ValidateResponse is a convenience function using the default handler.
|
||||
//
|
||||
// err := ValidateResponse(msg, MsgStats)
|
||||
func ValidateResponse(resp *Message, expectedType MessageType) error {
|
||||
return DefaultResponseHandler.ValidateResponse(resp, expectedType)
|
||||
}
|
||||
|
||||
// ParseResponse is a convenience function using the default handler.
|
||||
//
|
||||
// err := ParseResponse(msg, MsgStats, &stats)
|
||||
func ParseResponse(resp *Message, expectedType MessageType, target any) error {
|
||||
return DefaultResponseHandler.ParseResponse(resp, expectedType, target)
|
||||
}
|
||||
|
||||
// IsProtocolError returns true if the error is a ProtocolError.
|
||||
//
|
||||
// ok := IsProtocolError(err)
|
||||
func IsProtocolError(err error) bool {
|
||||
_, ok := err.(*ProtocolError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetProtocolErrorCode returns the error code if err is a ProtocolError, otherwise returns 0.
|
||||
//
|
||||
// code := GetProtocolErrorCode(err)
|
||||
func GetProtocolErrorCode(err error) int {
|
||||
if pe, ok := err.(*ProtocolError); ok {
|
||||
return pe.Code
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
func TestResponseHandler_ValidateResponse(t *testing.T) {
|
||||
func TestProtocol_ResponseHandler_ValidateResponse_Good(t *testing.T) {
|
||||
handler := &ResponseHandler{}
|
||||
|
||||
t.Run("NilResponse", func(t *testing.T) {
|
||||
|
|
@ -51,7 +52,7 @@ func TestResponseHandler_ValidateResponse(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestResponseHandler_ParseResponse(t *testing.T) {
|
||||
func TestProtocol_ResponseHandler_ParseResponse_Good(t *testing.T) {
|
||||
handler := &ResponseHandler{}
|
||||
|
||||
t.Run("ParseStats", func(t *testing.T) {
|
||||
|
|
@ -119,7 +120,7 @@ func TestResponseHandler_ParseResponse(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestProtocolError(t *testing.T) {
|
||||
func TestProtocol_Error_Bad(t *testing.T) {
|
||||
err := &ProtocolError{Code: 1001, Message: "test error"}
|
||||
|
||||
if err.Error() != "remote error (1001): test error" {
|
||||
|
|
@ -135,7 +136,7 @@ func TestProtocolError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConvenienceFunctions(t *testing.T) {
|
||||
func TestProtocol_ConvenienceFunctions_Good(t *testing.T) {
|
||||
msg, _ := NewMessage(MsgStats, "sender", "receiver", StatsPayload{NodeID: "test"})
|
||||
|
||||
// Test ValidateResponse
|
||||
|
|
@ -153,8 +154,8 @@ func TestConvenienceFunctions(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetProtocolErrorCode_NonProtocolError(t *testing.T) {
|
||||
err := fmt.Errorf("regular error")
|
||||
func TestProtocol_GetProtocolErrorCode_NonProtocolError_Bad(t *testing.T) {
|
||||
err := core.NewError("regular error")
|
||||
if GetProtocolErrorCode(err) != 0 {
|
||||
t.Error("Expected 0 for non-ProtocolError")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,8 +4,6 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"iter"
|
||||
"maps"
|
||||
"net/http"
|
||||
|
|
@ -15,7 +13,7 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
"dappco.re/go/core/p2p/logging"
|
||||
|
||||
"forge.lthn.ai/Snider/Borg/pkg/smsg"
|
||||
|
|
@ -32,6 +30,8 @@ const debugLogInterval = 100
|
|||
const DefaultMaxMessageSize int64 = 1 << 20 // 1MB
|
||||
|
||||
// TransportConfig configures the WebSocket transport.
|
||||
//
|
||||
// cfg := DefaultTransportConfig()
|
||||
type TransportConfig struct {
|
||||
ListenAddr string // ":9091" default
|
||||
WSPath string // "/ws" - WebSocket endpoint path
|
||||
|
|
@ -44,6 +44,8 @@ type TransportConfig struct {
|
|||
}
|
||||
|
||||
// DefaultTransportConfig returns sensible defaults.
|
||||
//
|
||||
// cfg := DefaultTransportConfig()
|
||||
func DefaultTransportConfig() TransportConfig {
|
||||
return TransportConfig{
|
||||
ListenAddr: ":9091",
|
||||
|
|
@ -56,9 +58,13 @@ func DefaultTransportConfig() TransportConfig {
|
|||
}
|
||||
|
||||
// MessageHandler processes incoming messages.
|
||||
//
|
||||
// var handler MessageHandler = func(conn *PeerConnection, msg *Message) {}
|
||||
type MessageHandler func(conn *PeerConnection, msg *Message)
|
||||
|
||||
// MessageDeduplicator tracks seen message IDs to prevent duplicate processing
|
||||
//
|
||||
// deduplicator := NewMessageDeduplicator(5 * time.Minute)
|
||||
type MessageDeduplicator struct {
|
||||
seen map[string]time.Time
|
||||
mu sync.RWMutex
|
||||
|
|
@ -66,6 +72,8 @@ type MessageDeduplicator struct {
|
|||
}
|
||||
|
||||
// NewMessageDeduplicator creates a deduplicator with specified TTL
|
||||
//
|
||||
// deduplicator := NewMessageDeduplicator(5 * time.Minute)
|
||||
func NewMessageDeduplicator(ttl time.Duration) *MessageDeduplicator {
|
||||
d := &MessageDeduplicator{
|
||||
seen: make(map[string]time.Time),
|
||||
|
|
@ -102,6 +110,8 @@ func (d *MessageDeduplicator) Cleanup() {
|
|||
}
|
||||
|
||||
// Transport manages WebSocket connections with SMSG encryption.
|
||||
//
|
||||
// transport := NewTransport(nodeManager, peerRegistry, DefaultTransportConfig())
|
||||
type Transport struct {
|
||||
config TransportConfig
|
||||
server *http.Server
|
||||
|
|
@ -119,6 +129,8 @@ type Transport struct {
|
|||
}
|
||||
|
||||
// PeerRateLimiter implements a simple token bucket rate limiter per peer
|
||||
//
|
||||
// rateLimiter := NewPeerRateLimiter(100, 50)
|
||||
type PeerRateLimiter struct {
|
||||
tokens int
|
||||
maxTokens int
|
||||
|
|
@ -128,6 +140,8 @@ type PeerRateLimiter struct {
|
|||
}
|
||||
|
||||
// NewPeerRateLimiter creates a rate limiter with specified messages/second
|
||||
//
|
||||
// rateLimiter := NewPeerRateLimiter(100, 50)
|
||||
func NewPeerRateLimiter(maxTokens, refillRate int) *PeerRateLimiter {
|
||||
return &PeerRateLimiter{
|
||||
tokens: maxTokens,
|
||||
|
|
@ -160,6 +174,8 @@ func (r *PeerRateLimiter) Allow() bool {
|
|||
}
|
||||
|
||||
// PeerConnection represents an active connection to a peer.
|
||||
//
|
||||
// peerConnection := &PeerConnection{Peer: &Peer{ID: "worker-1"}}
|
||||
type PeerConnection struct {
|
||||
Peer *Peer
|
||||
Conn *websocket.Conn
|
||||
|
|
@ -172,6 +188,8 @@ type PeerConnection struct {
|
|||
}
|
||||
|
||||
// NewTransport creates a new WebSocket transport.
|
||||
//
|
||||
// transport := NewTransport(nodeManager, peerRegistry, DefaultTransportConfig())
|
||||
func NewTransport(node *NodeManager, registry *PeerRegistry, config TransportConfig) *Transport {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
|
|
@ -290,7 +308,7 @@ func (t *Transport) Stop() error {
|
|||
defer cancel()
|
||||
|
||||
if err := t.server.Shutdown(ctx); err != nil {
|
||||
return coreerr.E("Transport.Stop", "server shutdown error", err)
|
||||
return core.E("Transport.Stop", "server shutdown error", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -321,7 +339,7 @@ func (t *Transport) Connect(peer *Peer) (*PeerConnection, error) {
|
|||
}
|
||||
conn, _, err := dialer.Dial(u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("Transport.Connect", "failed to connect to peer", err)
|
||||
return nil, core.E("Transport.Connect", "failed to connect to peer", err)
|
||||
}
|
||||
|
||||
pc := &PeerConnection{
|
||||
|
|
@ -336,7 +354,7 @@ func (t *Transport) Connect(peer *Peer) (*PeerConnection, error) {
|
|||
// This also derives and stores the shared secret in pc.SharedSecret
|
||||
if err := t.performHandshake(pc); err != nil {
|
||||
conn.Close()
|
||||
return nil, coreerr.E("Transport.Connect", "handshake failed", err)
|
||||
return nil, core.E("Transport.Connect", "handshake failed", err)
|
||||
}
|
||||
|
||||
// Store connection using the real peer ID from handshake
|
||||
|
|
@ -369,7 +387,7 @@ func (t *Transport) Send(peerID string, msg *Message) error {
|
|||
t.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return coreerr.E("Transport.Send", "peer "+peerID+" not connected", nil)
|
||||
return core.E("Transport.Send", "peer "+peerID+" not connected", nil)
|
||||
}
|
||||
|
||||
return pc.Send(msg)
|
||||
|
|
@ -457,7 +475,7 @@ func (t *Transport) handleWSUpgrade(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// Decode handshake message (not encrypted yet, contains public key)
|
||||
var msg Message
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
if result := core.JSONUnmarshal(data, &msg); !result.OK {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
|
@ -485,7 +503,7 @@ func (t *Transport) handleWSUpgrade(w http.ResponseWriter, r *http.Request) {
|
|||
rejectPayload := HandshakeAckPayload{
|
||||
Identity: *identity,
|
||||
Accepted: false,
|
||||
Reason: fmt.Sprintf("incompatible protocol version %s, supported: %v", payload.Version, SupportedProtocolVersions),
|
||||
Reason: core.Sprintf("incompatible protocol version %s, supported: %v", payload.Version, SupportedProtocolVersions),
|
||||
}
|
||||
rejectMsg, _ := NewMessage(MsgHandshakeAck, identity.ID, payload.Identity.ID, rejectPayload)
|
||||
if rejectData, err := MarshalJSON(rejectMsg); err == nil {
|
||||
|
|
@ -629,7 +647,7 @@ func (t *Transport) performHandshake(pc *PeerConnection) error {
|
|||
// Generate challenge for the server to prove it has the matching private key
|
||||
challenge, err := GenerateChallenge()
|
||||
if err != nil {
|
||||
return coreerr.E("Transport.performHandshake", "generate challenge", err)
|
||||
return core.E("Transport.performHandshake", "generate challenge", err)
|
||||
}
|
||||
|
||||
payload := HandshakePayload{
|
||||
|
|
@ -640,41 +658,41 @@ func (t *Transport) performHandshake(pc *PeerConnection) error {
|
|||
|
||||
msg, err := NewMessage(MsgHandshake, identity.ID, pc.Peer.ID, payload)
|
||||
if err != nil {
|
||||
return coreerr.E("Transport.performHandshake", "create handshake message", err)
|
||||
return core.E("Transport.performHandshake", "create handshake message", err)
|
||||
}
|
||||
|
||||
// First message is unencrypted (peer needs our public key)
|
||||
data, err := MarshalJSON(msg)
|
||||
if err != nil {
|
||||
return coreerr.E("Transport.performHandshake", "marshal handshake message", err)
|
||||
return core.E("Transport.performHandshake", "marshal handshake message", err)
|
||||
}
|
||||
|
||||
if err := pc.Conn.WriteMessage(websocket.TextMessage, data); err != nil {
|
||||
return coreerr.E("Transport.performHandshake", "send handshake", err)
|
||||
return core.E("Transport.performHandshake", "send handshake", err)
|
||||
}
|
||||
|
||||
// Wait for ack
|
||||
_, ackData, err := pc.Conn.ReadMessage()
|
||||
if err != nil {
|
||||
return coreerr.E("Transport.performHandshake", "read handshake ack", err)
|
||||
return core.E("Transport.performHandshake", "read handshake ack", err)
|
||||
}
|
||||
|
||||
var ackMsg Message
|
||||
if err := json.Unmarshal(ackData, &ackMsg); err != nil {
|
||||
return coreerr.E("Transport.performHandshake", "unmarshal handshake ack", err)
|
||||
if result := core.JSONUnmarshal(ackData, &ackMsg); !result.OK {
|
||||
return core.E("Transport.performHandshake", "unmarshal handshake ack", result.Value.(error))
|
||||
}
|
||||
|
||||
if ackMsg.Type != MsgHandshakeAck {
|
||||
return coreerr.E("Transport.performHandshake", "expected handshake_ack, got "+string(ackMsg.Type), nil)
|
||||
return core.E("Transport.performHandshake", "expected handshake_ack, got "+string(ackMsg.Type), nil)
|
||||
}
|
||||
|
||||
var ackPayload HandshakeAckPayload
|
||||
if err := ackMsg.ParsePayload(&ackPayload); err != nil {
|
||||
return coreerr.E("Transport.performHandshake", "parse handshake ack payload", err)
|
||||
return core.E("Transport.performHandshake", "parse handshake ack payload", err)
|
||||
}
|
||||
|
||||
if !ackPayload.Accepted {
|
||||
return coreerr.E("Transport.performHandshake", "handshake rejected: "+ackPayload.Reason, nil)
|
||||
return core.E("Transport.performHandshake", "handshake rejected: "+ackPayload.Reason, nil)
|
||||
}
|
||||
|
||||
// Update peer with the received identity info
|
||||
|
|
@ -686,15 +704,15 @@ func (t *Transport) performHandshake(pc *PeerConnection) error {
|
|||
// Verify challenge response - derive shared secret first using the peer's public key
|
||||
sharedSecret, err := t.node.DeriveSharedSecret(pc.Peer.PublicKey)
|
||||
if err != nil {
|
||||
return coreerr.E("Transport.performHandshake", "derive shared secret for challenge verification", err)
|
||||
return core.E("Transport.performHandshake", "derive shared secret for challenge verification", err)
|
||||
}
|
||||
|
||||
// Verify the server's response to our challenge
|
||||
if len(ackPayload.ChallengeResponse) == 0 {
|
||||
return coreerr.E("Transport.performHandshake", "server did not provide challenge response", nil)
|
||||
return core.E("Transport.performHandshake", "server did not provide challenge response", nil)
|
||||
}
|
||||
if !VerifyChallenge(challenge, ackPayload.ChallengeResponse, sharedSecret) {
|
||||
return coreerr.E("Transport.performHandshake", "challenge response verification failed: server may not have matching private key", nil)
|
||||
return core.E("Transport.performHandshake", "challenge response verification failed: server may not have matching private key", nil)
|
||||
}
|
||||
|
||||
// Store the shared secret for later use
|
||||
|
|
@ -841,7 +859,7 @@ func (pc *PeerConnection) Send(msg *Message) error {
|
|||
|
||||
// Set write deadline to prevent blocking forever
|
||||
if err := pc.Conn.SetWriteDeadline(time.Now().Add(10 * time.Second)); err != nil {
|
||||
return coreerr.E("PeerConnection.Send", "failed to set write deadline", err)
|
||||
return core.E("PeerConnection.Send", "failed to set write deadline", err)
|
||||
}
|
||||
defer pc.Conn.SetWriteDeadline(time.Time{}) // Reset deadline after send
|
||||
|
||||
|
|
@ -858,6 +876,8 @@ func (pc *PeerConnection) Close() error {
|
|||
}
|
||||
|
||||
// DisconnectPayload contains reason for disconnect.
|
||||
//
|
||||
// payload := DisconnectPayload{Reason: "shutdown", Code: DisconnectNormal}
|
||||
type DisconnectPayload struct {
|
||||
Reason string `json:"reason"`
|
||||
Code int `json:"code"` // Optional disconnect code
|
||||
|
|
@ -932,8 +952,8 @@ func (t *Transport) decryptMessage(data []byte, sharedSecret []byte) (*Message,
|
|||
|
||||
// Parse message from JSON
|
||||
var msg Message
|
||||
if err := json.Unmarshal([]byte(smsgMsg.Body), &msg); err != nil {
|
||||
return nil, err
|
||||
if result := core.JSONUnmarshalString(smsgMsg.Body, &msg); !result.OK {
|
||||
return nil, result.Value.(error)
|
||||
}
|
||||
|
||||
return &msg, nil
|
||||
|
|
|
|||
|
|
@ -1,17 +1,15 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
|
|
@ -21,10 +19,7 @@ import (
|
|||
func testNode(t *testing.T, name string, role NodeRole) *NodeManager {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("create node manager %q: %v", name, err)
|
||||
}
|
||||
|
|
@ -38,7 +33,7 @@ func testNode(t *testing.T, name string, role NodeRole) *NodeManager {
|
|||
func testRegistry(t *testing.T) *PeerRegistry {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
reg, err := NewPeerRegistryWithPath(filepath.Join(dir, "peers.json"))
|
||||
reg, err := NewPeerRegistryWithPath(testJoinPath(dir, "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("create registry: %v", err)
|
||||
}
|
||||
|
|
@ -124,7 +119,7 @@ func (tp *testTransportPair) connectClient(t *testing.T) *PeerConnection {
|
|||
|
||||
// --- Unit Tests for Sub-Components ---
|
||||
|
||||
func TestMessageDeduplicator(t *testing.T) {
|
||||
func TestTransport_MessageDeduplicator_Good(t *testing.T) {
|
||||
t.Run("MarkAndCheck", func(t *testing.T) {
|
||||
d := NewMessageDeduplicator(5 * time.Minute)
|
||||
|
||||
|
|
@ -175,7 +170,7 @@ func TestMessageDeduplicator(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestPeerRateLimiter(t *testing.T) {
|
||||
func TestTransport_PeerRateLimiter_Good(t *testing.T) {
|
||||
t.Run("AllowUpToBurst", func(t *testing.T) {
|
||||
rl := NewPeerRateLimiter(10, 5)
|
||||
|
||||
|
|
@ -213,7 +208,7 @@ func TestPeerRateLimiter(t *testing.T) {
|
|||
|
||||
// --- Transport Integration Tests ---
|
||||
|
||||
func TestTransport_FullHandshake(t *testing.T) {
|
||||
func TestTransport_FullHandshake_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
pc := tp.connectClient(t)
|
||||
|
||||
|
|
@ -243,7 +238,7 @@ func TestTransport_FullHandshake(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_HandshakeRejectWrongVersion(t *testing.T) {
|
||||
func TestTransport_HandshakeRejectWrongVersion_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
// Dial raw WebSocket and send handshake with unsupported version
|
||||
|
|
@ -272,9 +267,7 @@ func TestTransport_HandshakeRejectWrongVersion(t *testing.T) {
|
|||
}
|
||||
|
||||
var resp Message
|
||||
if err := json.Unmarshal(respData, &resp); err != nil {
|
||||
t.Fatalf("unmarshal response: %v", err)
|
||||
}
|
||||
testJSONUnmarshal(t, respData, &resp)
|
||||
|
||||
var ack HandshakeAckPayload
|
||||
resp.ParsePayload(&ack)
|
||||
|
|
@ -282,12 +275,12 @@ func TestTransport_HandshakeRejectWrongVersion(t *testing.T) {
|
|||
if ack.Accepted {
|
||||
t.Error("should reject incompatible protocol version")
|
||||
}
|
||||
if !strings.Contains(ack.Reason, "incompatible protocol version") {
|
||||
if !core.Contains(ack.Reason, "incompatible protocol version") {
|
||||
t.Errorf("expected version rejection reason, got: %s", ack.Reason)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransport_HandshakeRejectAllowlist(t *testing.T) {
|
||||
func TestTransport_HandshakeRejectAllowlist_Bad(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
// Switch server to allowlist mode WITHOUT adding client's key
|
||||
|
|
@ -305,12 +298,12 @@ func TestTransport_HandshakeRejectAllowlist(t *testing.T) {
|
|||
if err == nil {
|
||||
t.Fatal("should reject peer not in allowlist")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "rejected") {
|
||||
if !core.Contains(err.Error(), "rejected") {
|
||||
t.Errorf("expected rejection error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransport_EncryptedMessageRoundTrip(t *testing.T) {
|
||||
func TestTransport_EncryptedMessageRoundTrip_Ugly(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
received := make(chan *Message, 1)
|
||||
|
|
@ -353,7 +346,7 @@ func TestTransport_EncryptedMessageRoundTrip(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_MessageDedup(t *testing.T) {
|
||||
func TestTransport_MessageDedup_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
var count atomic.Int32
|
||||
|
|
@ -383,7 +376,7 @@ func TestTransport_MessageDedup(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_RateLimiting(t *testing.T) {
|
||||
func TestTransport_RateLimiting_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
var count atomic.Int32
|
||||
|
|
@ -415,7 +408,7 @@ func TestTransport_RateLimiting(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_MaxConnsEnforcement(t *testing.T) {
|
||||
func TestTransport_MaxConnsEnforcement_Good(t *testing.T) {
|
||||
// Server with MaxConns=1
|
||||
serverNM := testNode(t, "maxconns-server", RoleWorker)
|
||||
serverReg := testRegistry(t)
|
||||
|
|
@ -467,7 +460,7 @@ func TestTransport_MaxConnsEnforcement(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_KeepaliveTimeout(t *testing.T) {
|
||||
func TestTransport_KeepaliveTimeout_Bad(t *testing.T) {
|
||||
// Use short keepalive settings so the test is fast
|
||||
serverCfg := DefaultTransportConfig()
|
||||
serverCfg.PingInterval = 100 * time.Millisecond
|
||||
|
|
@ -516,7 +509,7 @@ func TestTransport_KeepaliveTimeout(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_GracefulClose(t *testing.T) {
|
||||
func TestTransport_GracefulClose_Ugly(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
received := make(chan *Message, 10)
|
||||
|
|
@ -551,7 +544,7 @@ func TestTransport_GracefulClose(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_ConcurrentSends(t *testing.T) {
|
||||
func TestTransport_ConcurrentSends_Ugly(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
var count atomic.Int32
|
||||
|
|
@ -591,7 +584,7 @@ func TestTransport_ConcurrentSends(t *testing.T) {
|
|||
|
||||
// --- Additional coverage tests ---
|
||||
|
||||
func TestTransport_Broadcast(t *testing.T) {
|
||||
func TestTransport_Broadcast_Good(t *testing.T) {
|
||||
// Set up a controller with two worker peers connected.
|
||||
controllerNM := testNode(t, "broadcast-controller", RoleController)
|
||||
controllerReg := testRegistry(t)
|
||||
|
|
@ -648,7 +641,7 @@ func TestTransport_Broadcast(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_BroadcastExcludesSender(t *testing.T) {
|
||||
func TestTransport_BroadcastExcludesSender_Good(t *testing.T) {
|
||||
// Verify that Broadcast excludes the sender.
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
|
|
@ -675,7 +668,7 @@ func TestTransport_BroadcastExcludesSender(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_NewTransport_DefaultMaxMessageSize(t *testing.T) {
|
||||
func TestTransport_NewTransport_DefaultMaxMessageSize_Good(t *testing.T) {
|
||||
nm := testNode(t, "defaults", RoleWorker)
|
||||
reg := testRegistry(t)
|
||||
cfg := TransportConfig{
|
||||
|
|
@ -692,7 +685,7 @@ func TestTransport_NewTransport_DefaultMaxMessageSize(t *testing.T) {
|
|||
// The actual default is applied at usage time (readLoop, handleWSUpgrade)
|
||||
}
|
||||
|
||||
func TestTransport_ConnectedPeers(t *testing.T) {
|
||||
func TestTransport_ConnectedPeers_Good(t *testing.T) {
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
if tp.Server.ConnectedPeers() != 0 {
|
||||
|
|
@ -707,7 +700,7 @@ func TestTransport_ConnectedPeers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_StartAndStop(t *testing.T) {
|
||||
func TestTransport_StartAndStop_Good(t *testing.T) {
|
||||
nm := testNode(t, "start-test", RoleWorker)
|
||||
reg := testRegistry(t)
|
||||
cfg := DefaultTransportConfig()
|
||||
|
|
@ -729,7 +722,7 @@ func TestTransport_StartAndStop(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTransport_CheckOrigin(t *testing.T) {
|
||||
func TestTransport_CheckOrigin_Good(t *testing.T) {
|
||||
nm := testNode(t, "origin-test", RoleWorker)
|
||||
reg := testRegistry(t)
|
||||
cfg := DefaultTransportConfig()
|
||||
|
|
|
|||
|
|
@ -2,11 +2,9 @@ package node
|
|||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
|
||||
"dappco.re/go/core/p2p/logging"
|
||||
"github.com/adrg/xdg"
|
||||
|
|
@ -14,6 +12,8 @@ import (
|
|||
|
||||
// MinerManager interface for the mining package integration.
|
||||
// This allows the node package to interact with mining.Manager without import cycles.
|
||||
//
|
||||
// var minerManager MinerManager
|
||||
type MinerManager interface {
|
||||
StartMiner(minerType string, config any) (MinerInstance, error)
|
||||
StopMiner(name string) error
|
||||
|
|
@ -22,6 +22,8 @@ type MinerManager interface {
|
|||
}
|
||||
|
||||
// MinerInstance represents a running miner for stats collection.
|
||||
//
|
||||
// var miner MinerInstance
|
||||
type MinerInstance interface {
|
||||
GetName() string
|
||||
GetType() string
|
||||
|
|
@ -30,12 +32,16 @@ type MinerInstance interface {
|
|||
}
|
||||
|
||||
// ProfileManager interface for profile operations.
|
||||
//
|
||||
// var profileManager ProfileManager
|
||||
type ProfileManager interface {
|
||||
GetProfile(id string) (any, error)
|
||||
SaveProfile(profile any) error
|
||||
}
|
||||
|
||||
// Worker handles incoming messages on a worker node.
|
||||
//
|
||||
// worker := NewWorker(nodeManager, transport)
|
||||
type Worker struct {
|
||||
node *NodeManager
|
||||
transport *Transport
|
||||
|
|
@ -46,6 +52,8 @@ type Worker struct {
|
|||
}
|
||||
|
||||
// NewWorker creates a new Worker instance.
|
||||
//
|
||||
// worker := NewWorker(nodeManager, transport)
|
||||
func NewWorker(node *NodeManager, transport *Transport) *Worker {
|
||||
return &Worker{
|
||||
node: node,
|
||||
|
|
@ -55,7 +63,6 @@ func NewWorker(node *NodeManager, transport *Transport) *Worker {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// SetMinerManager sets the miner manager for handling miner operations.
|
||||
func (w *Worker) SetMinerManager(manager MinerManager) {
|
||||
w.minerManager = manager
|
||||
|
|
@ -119,7 +126,7 @@ func (w *Worker) HandleMessage(conn *PeerConnection, msg *Message) {
|
|||
func (w *Worker) handlePing(msg *Message) (*Message, error) {
|
||||
var ping PingPayload
|
||||
if err := msg.ParsePayload(&ping); err != nil {
|
||||
return nil, coreerr.E("Worker.handlePing", "invalid ping payload", err)
|
||||
return nil, core.E("Worker.handlePing", "invalid ping payload", err)
|
||||
}
|
||||
|
||||
pong := PongPayload{
|
||||
|
|
@ -202,12 +209,12 @@ func (w *Worker) handleStartMiner(msg *Message) (*Message, error) {
|
|||
|
||||
var payload StartMinerPayload
|
||||
if err := msg.ParsePayload(&payload); err != nil {
|
||||
return nil, coreerr.E("Worker.handleStartMiner", "invalid start miner payload", err)
|
||||
return nil, core.E("Worker.handleStartMiner", "invalid start miner payload", err)
|
||||
}
|
||||
|
||||
// Validate miner type is provided
|
||||
if payload.MinerType == "" {
|
||||
return nil, coreerr.E("Worker.handleStartMiner", "miner type is required", nil)
|
||||
return nil, core.E("Worker.handleStartMiner", "miner type is required", nil)
|
||||
}
|
||||
|
||||
// Get the config from the profile or use the override
|
||||
|
|
@ -217,11 +224,11 @@ func (w *Worker) handleStartMiner(msg *Message) (*Message, error) {
|
|||
} else if w.profileManager != nil {
|
||||
profile, err := w.profileManager.GetProfile(payload.ProfileID)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("Worker.handleStartMiner", "profile not found: "+payload.ProfileID, nil)
|
||||
return nil, core.E("Worker.handleStartMiner", "profile not found: "+payload.ProfileID, nil)
|
||||
}
|
||||
config = profile
|
||||
} else {
|
||||
return nil, coreerr.E("Worker.handleStartMiner", "no config provided and no profile manager configured", nil)
|
||||
return nil, core.E("Worker.handleStartMiner", "no config provided and no profile manager configured", nil)
|
||||
}
|
||||
|
||||
// Start the miner
|
||||
|
|
@ -249,7 +256,7 @@ func (w *Worker) handleStopMiner(msg *Message) (*Message, error) {
|
|||
|
||||
var payload StopMinerPayload
|
||||
if err := msg.ParsePayload(&payload); err != nil {
|
||||
return nil, coreerr.E("Worker.handleStopMiner", "invalid stop miner payload", err)
|
||||
return nil, core.E("Worker.handleStopMiner", "invalid stop miner payload", err)
|
||||
}
|
||||
|
||||
err := w.minerManager.StopMiner(payload.MinerName)
|
||||
|
|
@ -272,7 +279,7 @@ func (w *Worker) handleGetLogs(msg *Message) (*Message, error) {
|
|||
|
||||
var payload GetLogsPayload
|
||||
if err := msg.ParsePayload(&payload); err != nil {
|
||||
return nil, coreerr.E("Worker.handleGetLogs", "invalid get logs payload", err)
|
||||
return nil, core.E("Worker.handleGetLogs", "invalid get logs payload", err)
|
||||
}
|
||||
|
||||
// Validate and limit the Lines parameter to prevent resource exhaustion
|
||||
|
|
@ -283,7 +290,7 @@ func (w *Worker) handleGetLogs(msg *Message) (*Message, error) {
|
|||
|
||||
miner, err := w.minerManager.GetMiner(payload.MinerName)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("Worker.handleGetLogs", "miner not found: "+payload.MinerName, nil)
|
||||
return nil, core.E("Worker.handleGetLogs", "miner not found: "+payload.MinerName, nil)
|
||||
}
|
||||
|
||||
lines := miner.GetConsoleHistory(payload.Lines)
|
||||
|
|
@ -301,7 +308,7 @@ func (w *Worker) handleGetLogs(msg *Message) (*Message, error) {
|
|||
func (w *Worker) handleDeploy(conn *PeerConnection, msg *Message) (*Message, error) {
|
||||
var payload DeployPayload
|
||||
if err := msg.ParsePayload(&payload); err != nil {
|
||||
return nil, coreerr.E("Worker.handleDeploy", "invalid deploy payload", err)
|
||||
return nil, core.E("Worker.handleDeploy", "invalid deploy payload", err)
|
||||
}
|
||||
|
||||
// Reconstruct Bundle object from payload
|
||||
|
|
@ -321,19 +328,19 @@ func (w *Worker) handleDeploy(conn *PeerConnection, msg *Message) (*Message, err
|
|||
switch bundle.Type {
|
||||
case BundleProfile:
|
||||
if w.profileManager == nil {
|
||||
return nil, coreerr.E("Worker.handleDeploy", "profile manager not configured", nil)
|
||||
return nil, core.E("Worker.handleDeploy", "profile manager not configured", nil)
|
||||
}
|
||||
|
||||
// Decrypt and extract profile data
|
||||
profileData, err := ExtractProfileBundle(bundle, password)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("Worker.handleDeploy", "failed to extract profile bundle", err)
|
||||
return nil, core.E("Worker.handleDeploy", "failed to extract profile bundle", err)
|
||||
}
|
||||
|
||||
// Unmarshal into interface{} to pass to ProfileManager
|
||||
var profile any
|
||||
if err := json.Unmarshal(profileData, &profile); err != nil {
|
||||
return nil, coreerr.E("Worker.handleDeploy", "invalid profile data JSON", err)
|
||||
if result := core.JSONUnmarshal(profileData, &profile); !result.OK {
|
||||
return nil, core.E("Worker.handleDeploy", "invalid profile data JSON", result.Value.(error))
|
||||
}
|
||||
|
||||
if err := w.profileManager.SaveProfile(profile); err != nil {
|
||||
|
|
@ -354,8 +361,8 @@ func (w *Worker) handleDeploy(conn *PeerConnection, msg *Message) (*Message, err
|
|||
case BundleMiner, BundleFull:
|
||||
// Determine installation directory
|
||||
// We use w.DataDir/lethean-desktop/miners/<bundle_name>
|
||||
minersDir := filepath.Join(w.DataDir, "lethean-desktop", "miners")
|
||||
installDir := filepath.Join(minersDir, payload.Name)
|
||||
minersDir := core.JoinPath(w.DataDir, "lethean-desktop", "miners")
|
||||
installDir := core.JoinPath(minersDir, payload.Name)
|
||||
|
||||
logging.Info("deploying miner bundle", logging.Fields{
|
||||
"name": payload.Name,
|
||||
|
|
@ -366,14 +373,14 @@ func (w *Worker) handleDeploy(conn *PeerConnection, msg *Message) (*Message, err
|
|||
// Extract miner bundle
|
||||
minerPath, profileData, err := ExtractMinerBundle(bundle, password, installDir)
|
||||
if err != nil {
|
||||
return nil, coreerr.E("Worker.handleDeploy", "failed to extract miner bundle", err)
|
||||
return nil, core.E("Worker.handleDeploy", "failed to extract miner bundle", err)
|
||||
}
|
||||
|
||||
// If the bundle contained a profile config, save it
|
||||
if len(profileData) > 0 && w.profileManager != nil {
|
||||
var profile any
|
||||
if err := json.Unmarshal(profileData, &profile); err != nil {
|
||||
logging.Warn("failed to parse profile from miner bundle", logging.Fields{"error": err})
|
||||
if result := core.JSONUnmarshal(profileData, &profile); !result.OK {
|
||||
logging.Warn("failed to parse profile from miner bundle", logging.Fields{"error": result.Value.(error)})
|
||||
} else {
|
||||
if err := w.profileManager.SaveProfile(profile); err != nil {
|
||||
logging.Warn("failed to save profile from miner bundle", logging.Fields{"error": err})
|
||||
|
|
@ -396,7 +403,7 @@ func (w *Worker) handleDeploy(conn *PeerConnection, msg *Message) (*Message, err
|
|||
return msg.Reply(MsgDeployAck, ack)
|
||||
|
||||
default:
|
||||
return nil, coreerr.E("Worker.handleDeploy", "unknown bundle type: "+payload.BundleType, nil)
|
||||
return nil, core.E("Worker.handleDeploy", "unknown bundle type: "+payload.BundleType, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,34 +2,26 @@ package node
|
|||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// setupTestEnv sets up a temporary environment for testing and returns cleanup function
|
||||
func setupTestEnv(t *testing.T) func() {
|
||||
tmpDir := t.TempDir()
|
||||
os.Setenv("XDG_CONFIG_HOME", filepath.Join(tmpDir, "config"))
|
||||
os.Setenv("XDG_DATA_HOME", filepath.Join(tmpDir, "data"))
|
||||
return func() {
|
||||
os.Unsetenv("XDG_CONFIG_HOME")
|
||||
os.Unsetenv("XDG_DATA_HOME")
|
||||
}
|
||||
t.Setenv("XDG_CONFIG_HOME", testJoinPath(tmpDir, "config"))
|
||||
t.Setenv("XDG_DATA_HOME", testJoinPath(tmpDir, "data"))
|
||||
return func() {}
|
||||
}
|
||||
|
||||
func TestNewWorker(t *testing.T) {
|
||||
func TestWorker_NewWorker_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -37,7 +29,7 @@ func TestNewWorker(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -57,15 +49,12 @@ func TestNewWorker(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_SetMinerManager(t *testing.T) {
|
||||
func TestWorker_SetMinerManager_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -73,7 +62,7 @@ func TestWorker_SetMinerManager(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -90,15 +79,12 @@ func TestWorker_SetMinerManager(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_SetProfileManager(t *testing.T) {
|
||||
func TestWorker_SetProfileManager_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -106,7 +92,7 @@ func TestWorker_SetProfileManager(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -123,15 +109,12 @@ func TestWorker_SetProfileManager(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandlePing(t *testing.T) {
|
||||
func TestWorker_HandlePing_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -139,7 +122,7 @@ func TestWorker_HandlePing(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -187,15 +170,12 @@ func TestWorker_HandlePing(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleGetStats(t *testing.T) {
|
||||
func TestWorker_HandleGetStats_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -203,7 +183,7 @@ func TestWorker_HandleGetStats(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -250,15 +230,12 @@ func TestWorker_HandleGetStats(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleStartMiner_NoManager(t *testing.T) {
|
||||
func TestWorker_HandleStartMiner_NoManager_Bad(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -266,7 +243,7 @@ func TestWorker_HandleStartMiner_NoManager(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -293,15 +270,12 @@ func TestWorker_HandleStartMiner_NoManager(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleStopMiner_NoManager(t *testing.T) {
|
||||
func TestWorker_HandleStopMiner_NoManager_Bad(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -309,7 +283,7 @@ func TestWorker_HandleStopMiner_NoManager(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -336,15 +310,12 @@ func TestWorker_HandleStopMiner_NoManager(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleGetLogs_NoManager(t *testing.T) {
|
||||
func TestWorker_HandleGetLogs_NoManager_Bad(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -352,7 +323,7 @@ func TestWorker_HandleGetLogs_NoManager(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -379,15 +350,12 @@ func TestWorker_HandleGetLogs_NoManager(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleDeploy_Profile(t *testing.T) {
|
||||
func TestWorker_HandleDeploy_Profile_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -395,7 +363,7 @@ func TestWorker_HandleDeploy_Profile(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -426,15 +394,12 @@ func TestWorker_HandleDeploy_Profile(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleDeploy_UnknownType(t *testing.T) {
|
||||
func TestWorker_HandleDeploy_UnknownType_Bad(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -442,7 +407,7 @@ func TestWorker_HandleDeploy_UnknownType(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -472,7 +437,7 @@ func TestWorker_HandleDeploy_UnknownType(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConvertMinerStats(t *testing.T) {
|
||||
func TestWorker_ConvertMinerStats_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rawStats any
|
||||
|
|
@ -573,15 +538,15 @@ type mockMinerManagerFailing struct {
|
|||
}
|
||||
|
||||
func (m *mockMinerManagerFailing) StartMiner(minerType string, config any) (MinerInstance, error) {
|
||||
return nil, fmt.Errorf("mining hardware not available")
|
||||
return nil, core.E("mockMinerManagerFailing.StartMiner", "mining hardware not available", nil)
|
||||
}
|
||||
|
||||
func (m *mockMinerManagerFailing) StopMiner(name string) error {
|
||||
return fmt.Errorf("miner %s not found", name)
|
||||
return core.E("mockMinerManagerFailing.StopMiner", "miner "+name+" not found", nil)
|
||||
}
|
||||
|
||||
func (m *mockMinerManagerFailing) GetMiner(name string) (MinerInstance, error) {
|
||||
return nil, fmt.Errorf("miner %s not found", name)
|
||||
return nil, core.E("mockMinerManagerFailing.GetMiner", "miner "+name+" not found", nil)
|
||||
}
|
||||
|
||||
// mockProfileManagerFull implements ProfileManager that returns real data.
|
||||
|
|
@ -592,7 +557,7 @@ type mockProfileManagerFull struct {
|
|||
func (m *mockProfileManagerFull) GetProfile(id string) (any, error) {
|
||||
p, ok := m.profiles[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("profile %s not found", id)
|
||||
return nil, core.E("mockProfileManagerFull.GetProfile", "profile "+id+" not found", nil)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
|
@ -605,22 +570,19 @@ func (m *mockProfileManagerFull) SaveProfile(profile any) error {
|
|||
type mockProfileManagerFailing struct{}
|
||||
|
||||
func (m *mockProfileManagerFailing) GetProfile(id string) (any, error) {
|
||||
return nil, fmt.Errorf("profile %s not found", id)
|
||||
return nil, core.E("mockProfileManagerFailing.GetProfile", "profile "+id+" not found", nil)
|
||||
}
|
||||
|
||||
func (m *mockProfileManagerFailing) SaveProfile(profile any) error {
|
||||
return fmt.Errorf("save failed")
|
||||
return core.E("mockProfileManagerFailing.SaveProfile", "save failed", nil)
|
||||
}
|
||||
|
||||
func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
|
||||
func TestWorker_HandleStartMiner_WithManager_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
|
|
@ -628,7 +590,7 @@ func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
|
|||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -649,7 +611,7 @@ func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
|
|||
t.Run("WithConfigOverride", func(t *testing.T) {
|
||||
payload := StartMinerPayload{
|
||||
MinerType: "xmrig",
|
||||
Config: json.RawMessage(`{"pool":"test:3333"}`),
|
||||
Config: RawMessage(`{"pool":"test:3333"}`),
|
||||
}
|
||||
msg, err := NewMessage(MsgStartMiner, "sender-id", identity.ID, payload)
|
||||
if err != nil {
|
||||
|
|
@ -680,7 +642,7 @@ func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
|
|||
t.Run("EmptyMinerType", func(t *testing.T) {
|
||||
payload := StartMinerPayload{
|
||||
MinerType: "",
|
||||
Config: json.RawMessage(`{}`),
|
||||
Config: RawMessage(`{}`),
|
||||
}
|
||||
msg, err := NewMessage(MsgStartMiner, "sender-id", identity.ID, payload)
|
||||
if err != nil {
|
||||
|
|
@ -747,7 +709,7 @@ func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
|
|||
|
||||
payload := StartMinerPayload{
|
||||
MinerType: "xmrig",
|
||||
Config: json.RawMessage(`{}`),
|
||||
Config: RawMessage(`{}`),
|
||||
}
|
||||
msg, err := NewMessage(MsgStartMiner, "sender-id", identity.ID, payload)
|
||||
if err != nil {
|
||||
|
|
@ -780,26 +742,23 @@ type mockMinerManagerWithStart struct {
|
|||
|
||||
func (m *mockMinerManagerWithStart) StartMiner(minerType string, config any) (MinerInstance, error) {
|
||||
m.counter++
|
||||
name := fmt.Sprintf("%s-%d", minerType, m.counter)
|
||||
name := core.Sprintf("%s-%d", minerType, m.counter)
|
||||
return &mockMinerInstance{name: name, minerType: minerType}, nil
|
||||
}
|
||||
|
||||
func TestWorker_HandleStopMiner_WithManager(t *testing.T) {
|
||||
func TestWorker_HandleStopMiner_WithManager_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
|
||||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -851,22 +810,19 @@ func TestWorker_HandleStopMiner_WithManager(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestWorker_HandleGetLogs_WithManager(t *testing.T) {
|
||||
func TestWorker_HandleGetLogs_WithManager_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
|
||||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -961,22 +917,19 @@ func TestWorker_HandleGetLogs_WithManager(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestWorker_HandleGetStats_WithMinerManager(t *testing.T) {
|
||||
func TestWorker_HandleGetStats_WithMinerManager_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
|
||||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -1025,22 +978,19 @@ func TestWorker_HandleGetStats_WithMinerManager(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleMessage_UnknownType(t *testing.T) {
|
||||
func TestWorker_HandleMessage_UnknownType_Bad(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
|
||||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -1055,22 +1005,19 @@ func TestWorker_HandleMessage_UnknownType(t *testing.T) {
|
|||
worker.HandleMessage(nil, msg)
|
||||
}
|
||||
|
||||
func TestWorker_HandleDeploy_ProfileWithManager(t *testing.T) {
|
||||
func TestWorker_HandleDeploy_ProfileWithManager_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
|
||||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -1113,22 +1060,19 @@ func TestWorker_HandleDeploy_ProfileWithManager(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleDeploy_ProfileSaveFails(t *testing.T) {
|
||||
func TestWorker_HandleDeploy_ProfileSaveFails_Bad(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
|
||||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -1162,22 +1106,19 @@ func TestWorker_HandleDeploy_ProfileSaveFails(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleDeploy_MinerBundle(t *testing.T) {
|
||||
func TestWorker_HandleDeploy_MinerBundle_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
|
||||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -1190,8 +1131,8 @@ func TestWorker_HandleDeploy_MinerBundle(t *testing.T) {
|
|||
identity := nm.GetIdentity()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
minerPath := filepath.Join(tmpDir, "test-miner")
|
||||
os.WriteFile(minerPath, []byte("fake miner binary"), 0755)
|
||||
minerPath := testJoinPath(tmpDir, "test-miner")
|
||||
testWriteFile(t, minerPath, []byte("fake miner binary"), 0o755)
|
||||
|
||||
profileJSON := []byte(`{"pool":"test:3333"}`)
|
||||
|
||||
|
|
@ -1229,22 +1170,19 @@ func TestWorker_HandleDeploy_MinerBundle(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleDeploy_FullBundle(t *testing.T) {
|
||||
func TestWorker_HandleDeploy_FullBundle_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
|
||||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -1255,8 +1193,8 @@ func TestWorker_HandleDeploy_FullBundle(t *testing.T) {
|
|||
identity := nm.GetIdentity()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
minerPath := filepath.Join(tmpDir, "test-miner")
|
||||
os.WriteFile(minerPath, []byte("miner binary"), 0755)
|
||||
minerPath := testJoinPath(tmpDir, "test-miner")
|
||||
testWriteFile(t, minerPath, []byte("miner binary"), 0o755)
|
||||
|
||||
sharedSecret := []byte("full-secret-key!")
|
||||
bundlePassword := base64.StdEncoding.EncodeToString(sharedSecret)
|
||||
|
|
@ -1288,22 +1226,19 @@ func TestWorker_HandleDeploy_FullBundle(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleDeploy_MinerBundle_WithProfileManager(t *testing.T) {
|
||||
func TestWorker_HandleDeploy_MinerBundle_WithProfileManager_Good(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, err := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create node manager: %v", err)
|
||||
}
|
||||
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
|
||||
t.Fatalf("failed to generate identity: %v", err)
|
||||
}
|
||||
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create peer registry: %v", err)
|
||||
}
|
||||
|
|
@ -1317,8 +1252,8 @@ func TestWorker_HandleDeploy_MinerBundle_WithProfileManager(t *testing.T) {
|
|||
identity := nm.GetIdentity()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
minerPath := filepath.Join(tmpDir, "test-miner")
|
||||
os.WriteFile(minerPath, []byte("miner binary"), 0755)
|
||||
minerPath := testJoinPath(tmpDir, "test-miner")
|
||||
testWriteFile(t, minerPath, []byte("miner binary"), 0o755)
|
||||
|
||||
profileJSON := []byte(`{"pool":"test:3333"}`)
|
||||
sharedSecret := []byte("profile-secret!!")
|
||||
|
|
@ -1352,17 +1287,14 @@ func TestWorker_HandleDeploy_MinerBundle_WithProfileManager(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleDeploy_InvalidPayload(t *testing.T) {
|
||||
func TestWorker_HandleDeploy_InvalidPayload_Bad(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
dir := t.TempDir()
|
||||
nm, _ := NewNodeManagerWithPaths(
|
||||
filepath.Join(dir, "private.key"),
|
||||
filepath.Join(dir, "node.json"),
|
||||
)
|
||||
nm, _ := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
|
||||
nm.GenerateIdentity("test", RoleWorker)
|
||||
pr, _ := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, _ := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
transport := NewTransport(nm, pr, DefaultTransportConfig())
|
||||
worker := NewWorker(nm, transport)
|
||||
worker.DataDir = t.TempDir()
|
||||
|
|
@ -1377,16 +1309,17 @@ func TestWorker_HandleDeploy_InvalidPayload(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleGetStats_NoIdentity(t *testing.T) {
|
||||
func TestWorker_HandleGetStats_NoIdentity_Bad(t *testing.T) {
|
||||
cleanup := setupTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
nm, _ := NewNodeManagerWithPaths(
|
||||
filepath.Join(t.TempDir(), "priv.key"),
|
||||
filepath.Join(t.TempDir(), "node.json"),
|
||||
testJoinPath(tmpDir, "priv.key"),
|
||||
testJoinPath(tmpDir, "node.json"),
|
||||
)
|
||||
// Don't generate identity
|
||||
pr, _ := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
|
||||
pr, _ := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
|
||||
transport := NewTransport(nm, pr, DefaultTransportConfig())
|
||||
worker := NewWorker(nm, transport)
|
||||
worker.DataDir = t.TempDir()
|
||||
|
|
@ -1398,7 +1331,7 @@ func TestWorker_HandleGetStats_NoIdentity(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleMessage_IntegrationViaWebSocket(t *testing.T) {
|
||||
func TestWorker_HandleMessage_IntegrationViaWebSocket_Good(t *testing.T) {
|
||||
// Test HandleMessage through real WebSocket -- exercises error response sending path
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
|
|
@ -1414,14 +1347,14 @@ func TestWorker_HandleMessage_IntegrationViaWebSocket(t *testing.T) {
|
|||
|
||||
// Send start_miner which will fail because no manager is set.
|
||||
// The worker should send an error response via the connection.
|
||||
err := controller.StartRemoteMiner(serverID, "xmrig", "", json.RawMessage(`{}`))
|
||||
err := controller.StartRemoteMiner(serverID, "xmrig", "", RawMessage(`{}`))
|
||||
// Should get an error back (either protocol error or operation failed)
|
||||
if err == nil {
|
||||
t.Error("expected error when worker has no miner manager")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorker_HandleMessage_GetStats_IntegrationViaWebSocket(t *testing.T) {
|
||||
func TestWorker_HandleMessage_GetStats_IntegrationViaWebSocket_Good(t *testing.T) {
|
||||
// HandleMessage dispatch for get_stats through real WebSocket
|
||||
tp := setupTestTransportPair(t)
|
||||
|
||||
|
|
|
|||
80
specs/logging.md
Normal file
80
specs/logging.md
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
# logging
|
||||
|
||||
**Import:** `dappco.re/go/core/p2p/logging`
|
||||
|
||||
**Files:** 1
|
||||
|
||||
## Types
|
||||
|
||||
### `Level`
|
||||
`type Level int`
|
||||
|
||||
Log severity used by `Logger`. `String` renders the level name in upper case, and `ParseLevel` accepts `debug`, `info`, `warn` or `warning`, and `error`.
|
||||
|
||||
### `Config`
|
||||
```go
|
||||
type Config struct {
|
||||
Output io.Writer
|
||||
Level Level
|
||||
Component string
|
||||
}
|
||||
```
|
||||
|
||||
Configuration passed to `New`.
|
||||
|
||||
- `Output`: destination for log lines. `New` falls back to stderr when this is `nil`.
|
||||
- `Level`: minimum severity that will be emitted.
|
||||
- `Component`: optional component label added to each line.
|
||||
|
||||
### `Fields`
|
||||
`type Fields map[string]any`
|
||||
|
||||
Structured key/value fields passed to logging calls. When multiple `Fields` values are supplied, they are merged from left to right, so later maps override earlier keys.
|
||||
|
||||
### `Logger`
|
||||
`type Logger struct { /* unexported fields */ }`
|
||||
|
||||
Structured logger with configurable output, severity filtering, and component scoping. Log writes are serialised by a mutex and are formatted as timestamped single-line records.
|
||||
|
||||
## Functions
|
||||
|
||||
### Top-level
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `DefaultConfig` | `func DefaultConfig() Config` | Returns the default configuration: stderr output, `LevelInfo`, and no component label. |
|
||||
| `New` | `func New(cfg Config) *Logger` | Creates a `Logger` from `cfg`, substituting the default stderr writer when `cfg.Output` is `nil`. |
|
||||
| `SetGlobal` | `func SetGlobal(l *Logger)` | Replaces the package-level global logger instance. |
|
||||
| `GetGlobal` | `func GetGlobal() *Logger` | Returns the current package-level global logger. |
|
||||
| `SetGlobalLevel` | `func SetGlobalLevel(level Level)` | Updates the minimum severity on the current global logger. |
|
||||
| `Debug` | `func Debug(msg string, fields ...Fields)` | Logs a debug message through the global logger. |
|
||||
| `Info` | `func Info(msg string, fields ...Fields)` | Logs an informational message through the global logger. |
|
||||
| `Warn` | `func Warn(msg string, fields ...Fields)` | Logs a warning message through the global logger. |
|
||||
| `Error` | `func Error(msg string, fields ...Fields)` | Logs an error message through the global logger. |
|
||||
| `Debugf` | `func Debugf(format string, args ...any)` | Formats and logs a debug message through the global logger. |
|
||||
| `Infof` | `func Infof(format string, args ...any)` | Formats and logs an informational message through the global logger. |
|
||||
| `Warnf` | `func Warnf(format string, args ...any)` | Formats and logs a warning message through the global logger. |
|
||||
| `Errorf` | `func Errorf(format string, args ...any)` | Formats and logs an error message through the global logger. |
|
||||
| `ParseLevel` | `func ParseLevel(s string) (Level, error)` | Parses a text level into `Level`. Unknown strings return `LevelInfo` plus an error. |
|
||||
|
||||
### `Level` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `String` | `func (l Level) String() string` | Returns `DEBUG`, `INFO`, `WARN`, `ERROR`, or `UNKNOWN` for out-of-range values. |
|
||||
|
||||
### `*Logger` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `WithComponent` | `func (l *Logger) WithComponent(component string) *Logger` | Returns a new logger that uses the same output and current level but replaces the component label. |
|
||||
| `SetLevel` | `func (l *Logger) SetLevel(level Level)` | Sets the minimum severity that the logger will emit. |
|
||||
| `GetLevel` | `func (l *Logger) GetLevel() Level` | Returns the current minimum severity. |
|
||||
| `Debug` | `func (l *Logger) Debug(msg string, fields ...Fields)` | Logs `msg` at debug level after merging any supplied field maps. |
|
||||
| `Info` | `func (l *Logger) Info(msg string, fields ...Fields)` | Logs `msg` at info level after merging any supplied field maps. |
|
||||
| `Warn` | `func (l *Logger) Warn(msg string, fields ...Fields)` | Logs `msg` at warning level after merging any supplied field maps. |
|
||||
| `Error` | `func (l *Logger) Error(msg string, fields ...Fields)` | Logs `msg` at error level after merging any supplied field maps. |
|
||||
| `Debugf` | `func (l *Logger) Debugf(format string, args ...any)` | Formats and logs a debug message. |
|
||||
| `Infof` | `func (l *Logger) Infof(format string, args ...any)` | Formats and logs an informational message. |
|
||||
| `Warnf` | `func (l *Logger) Warnf(format string, args ...any)` | Formats and logs a warning message. |
|
||||
| `Errorf` | `func (l *Logger) Errorf(format string, args ...any)` | Formats and logs an error message. |
|
||||
117
specs/node-levin.md
Normal file
117
specs/node-levin.md
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
# levin
|
||||
|
||||
**Import:** `dappco.re/go/core/p2p/node/levin`
|
||||
|
||||
**Files:** 4
|
||||
|
||||
## Types
|
||||
|
||||
### `Connection`
|
||||
```go
|
||||
type Connection struct {
|
||||
MaxPayloadSize uint64
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
}
|
||||
```
|
||||
|
||||
Wrapper around `net.Conn` that reads and writes framed Levin packets.
|
||||
|
||||
- `MaxPayloadSize`: per-connection payload ceiling enforced by `ReadPacket`. `NewConnection` starts with the package `MaxPayloadSize` default.
|
||||
- `ReadTimeout`: deadline applied before each `ReadPacket` call. `NewConnection` sets this to `DefaultReadTimeout`.
|
||||
- `WriteTimeout`: deadline applied before each write. `NewConnection` sets this to `DefaultWriteTimeout`.
|
||||
|
||||
### `Header`
|
||||
```go
|
||||
type Header struct {
|
||||
Signature uint64
|
||||
PayloadSize uint64
|
||||
ExpectResponse bool
|
||||
Command uint32
|
||||
ReturnCode int32
|
||||
Flags uint32
|
||||
ProtocolVersion uint32
|
||||
}
|
||||
```
|
||||
|
||||
Packed 33-byte Levin frame header. `EncodeHeader` writes these fields little-endian, and `DecodeHeader` validates the `Signature` and package-level `MaxPayloadSize`.
|
||||
|
||||
### `Section`
|
||||
`type Section map[string]Value`
|
||||
|
||||
Portable-storage object used by the Levin encoder and decoder. `EncodeStorage` sorts keys alphabetically for deterministic output.
|
||||
|
||||
### `Value`
|
||||
```go
|
||||
type Value struct {
|
||||
Type uint8
|
||||
}
|
||||
```
|
||||
|
||||
Tagged portable-storage value. The exported `Type` field identifies which internal scalar or array slot is populated; constructors such as `Uint64Val`, `StringVal`, and `ObjectArrayVal` create correctly-typed instances.
|
||||
|
||||
## Functions
|
||||
|
||||
### Top-level framing and storage functions
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `NewConnection` | `func NewConnection(conn net.Conn) *Connection` | Wraps `conn` with Levin defaults: 100 MB payload limit, 120 s read timeout, and 30 s write timeout. |
|
||||
| `EncodeHeader` | `func EncodeHeader(h *Header) [HeaderSize]byte` | Serialises `h` into the fixed 33-byte Levin header format. |
|
||||
| `DecodeHeader` | `func DecodeHeader(buf [HeaderSize]byte) (Header, error)` | Parses a 33-byte header, rejecting bad magic signatures and payload sizes above the package-level limit. |
|
||||
| `PackVarint` | `func PackVarint(v uint64) []byte` | Encodes `v` using the epee portable-storage varint scheme where the low two bits of the first byte encode the width. |
|
||||
| `UnpackVarint` | `func UnpackVarint(buf []byte) (value uint64, bytesConsumed int, err error)` | Decodes one portable-storage varint and returns the value, consumed width, and any truncation or overflow error. |
|
||||
| `EncodeStorage` | `func EncodeStorage(s Section) ([]byte, error)` | Serialises a `Section` into portable-storage binary form, including the 9-byte storage header. |
|
||||
| `DecodeStorage` | `func DecodeStorage(data []byte) (Section, error)` | Deserialises portable-storage binary data, validates the storage signatures and version, and reconstructs a `Section`. |
|
||||
|
||||
### `Value` constructors
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `Uint64Val` | `func Uint64Val(v uint64) Value` | Creates a scalar `Value` with `TypeUint64`. |
|
||||
| `Uint32Val` | `func Uint32Val(v uint32) Value` | Creates a scalar `Value` with `TypeUint32`. |
|
||||
| `Uint16Val` | `func Uint16Val(v uint16) Value` | Creates a scalar `Value` with `TypeUint16`. |
|
||||
| `Uint8Val` | `func Uint8Val(v uint8) Value` | Creates a scalar `Value` with `TypeUint8`. |
|
||||
| `Int64Val` | `func Int64Val(v int64) Value` | Creates a scalar `Value` with `TypeInt64`. |
|
||||
| `Int32Val` | `func Int32Val(v int32) Value` | Creates a scalar `Value` with `TypeInt32`. |
|
||||
| `Int16Val` | `func Int16Val(v int16) Value` | Creates a scalar `Value` with `TypeInt16`. |
|
||||
| `Int8Val` | `func Int8Val(v int8) Value` | Creates a scalar `Value` with `TypeInt8`. |
|
||||
| `BoolVal` | `func BoolVal(v bool) Value` | Creates a scalar `Value` with `TypeBool`. |
|
||||
| `DoubleVal` | `func DoubleVal(v float64) Value` | Creates a scalar `Value` with `TypeDouble`. |
|
||||
| `StringVal` | `func StringVal(v []byte) Value` | Creates a scalar `Value` with `TypeString`. The byte slice is stored without copying. |
|
||||
| `ObjectVal` | `func ObjectVal(s Section) Value` | Creates a scalar `Value` with `TypeObject` that wraps a nested `Section`. |
|
||||
| `Uint64ArrayVal` | `func Uint64ArrayVal(vs []uint64) Value` | Creates an array `Value` tagged as `ArrayFlag | TypeUint64`. |
|
||||
| `Uint32ArrayVal` | `func Uint32ArrayVal(vs []uint32) Value` | Creates an array `Value` tagged as `ArrayFlag | TypeUint32`. |
|
||||
| `StringArrayVal` | `func StringArrayVal(vs [][]byte) Value` | Creates an array `Value` tagged as `ArrayFlag | TypeString`. |
|
||||
| `ObjectArrayVal` | `func ObjectArrayVal(vs []Section) Value` | Creates an array `Value` tagged as `ArrayFlag | TypeObject`. |
|
||||
|
||||
### `*Connection` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `WritePacket` | `func (c *Connection) WritePacket(cmd uint32, payload []byte, expectResponse bool) error` | Sends a Levin request or notification with `FlagRequest`, `ReturnOK`, and the current protocol version. Header and payload writes are serialised by an internal mutex. |
|
||||
| `WriteResponse` | `func (c *Connection) WriteResponse(cmd uint32, payload []byte, returnCode int32) error` | Sends a Levin response with `FlagResponse` and the supplied return code. |
|
||||
| `ReadPacket` | `func (c *Connection) ReadPacket() (Header, []byte, error)` | Applies the read deadline, reads exactly one header and payload, validates the frame, and enforces the connection-specific `MaxPayloadSize`. Empty payloads are returned as `nil` without allocation. |
|
||||
| `Close` | `func (c *Connection) Close() error` | Closes the wrapped network connection. |
|
||||
| `RemoteAddr` | `func (c *Connection) RemoteAddr() string` | Returns the wrapped connection's remote address string. |
|
||||
|
||||
### `Value` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `AsUint64` | `func (v Value) AsUint64() (uint64, error)` | Returns the scalar `uint64` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsUint32` | `func (v Value) AsUint32() (uint32, error)` | Returns the scalar `uint32` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsUint16` | `func (v Value) AsUint16() (uint16, error)` | Returns the scalar `uint16` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsUint8` | `func (v Value) AsUint8() (uint8, error)` | Returns the scalar `uint8` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsInt64` | `func (v Value) AsInt64() (int64, error)` | Returns the scalar `int64` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsInt32` | `func (v Value) AsInt32() (int32, error)` | Returns the scalar `int32` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsInt16` | `func (v Value) AsInt16() (int16, error)` | Returns the scalar `int16` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsInt8` | `func (v Value) AsInt8() (int8, error)` | Returns the scalar `int8` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsBool` | `func (v Value) AsBool() (bool, error)` | Returns the scalar `bool` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsDouble` | `func (v Value) AsDouble() (float64, error)` | Returns the scalar `float64` value or `ErrStorageTypeMismatch`. |
|
||||
| `AsString` | `func (v Value) AsString() ([]byte, error)` | Returns the scalar byte-string or `ErrStorageTypeMismatch`. |
|
||||
| `AsSection` | `func (v Value) AsSection() (Section, error)` | Returns the nested `Section` or `ErrStorageTypeMismatch`. |
|
||||
| `AsUint64Array` | `func (v Value) AsUint64Array() ([]uint64, error)` | Returns the `[]uint64` array or `ErrStorageTypeMismatch`. |
|
||||
| `AsUint32Array` | `func (v Value) AsUint32Array() ([]uint32, error)` | Returns the `[]uint32` array or `ErrStorageTypeMismatch`. |
|
||||
| `AsStringArray` | `func (v Value) AsStringArray() ([][]byte, error)` | Returns the `[][]byte` array or `ErrStorageTypeMismatch`. |
|
||||
| `AsSectionArray` | `func (v Value) AsSectionArray() ([]Section, error)` | Returns the `[]Section` array or `ErrStorageTypeMismatch`. |
|
||||
237
specs/node.md
Normal file
237
specs/node.md
Normal file
|
|
@ -0,0 +1,237 @@
|
|||
# node
|
||||
|
||||
**Import:** `dappco.re/go/core/p2p/node`
|
||||
|
||||
**Files:** 12
|
||||
|
||||
## Types
|
||||
|
||||
### Core types
|
||||
|
||||
| Type | Definition | Description |
|
||||
| --- | --- | --- |
|
||||
| `BundleType` | `type BundleType string` | Deployment bundle kind used by `Bundle` and `BundleManifest`. |
|
||||
| `Bundle` | `struct{ Type BundleType; Name string; Data []byte; Checksum string }` | Transferable deployment bundle. `Data` contains STIM-encrypted bytes or raw JSON, and `Checksum` is the SHA-256 hex digest of `Data`. |
|
||||
| `BundleManifest` | `struct{ Type BundleType; Name string; Version string; MinerType string; ProfileIDs []string; CreatedAt string }` | Metadata describing the logical contents of a bundle payload. |
|
||||
| `Controller` | `struct{ /* unexported fields */ }` | High-level controller client for remote peer operations. It keeps a pending-response map keyed by request ID and registers its internal response handler with the transport in `NewController`. |
|
||||
| `Dispatcher` | `struct{ /* unexported fields */ }` | Concurrent-safe UEPS router. It applies the threat-score circuit breaker before dispatching to a handler map keyed by `IntentID`. |
|
||||
| `IntentHandler` | `type IntentHandler func(pkt *ueps.ParsedPacket) error` | Callback signature used by `Dispatcher` for verified UEPS packets. |
|
||||
| `Message` | `struct{ ID string; Type MessageType; From string; To string; Timestamp time.Time; Payload RawMessage; ReplyTo string }` | Generic P2P message envelope. `Payload` stores raw JSON, and `ReplyTo` links responses back to the originating request. |
|
||||
| `MessageDeduplicator` | `struct{ /* unexported fields */ }` | TTL cache of recently seen message IDs used to suppress duplicates. |
|
||||
| `MessageHandler` | `type MessageHandler func(conn *PeerConnection, msg *Message)` | Callback signature for decrypted inbound transport messages. |
|
||||
| `MessageType` | `type MessageType string` | String message discriminator stored in `Message.Type`. |
|
||||
| `NodeIdentity` | `struct{ ID string; Name string; PublicKey string; CreatedAt time.Time; Role NodeRole }` | Public node identity. `ID` is derived from the first 16 bytes of the SHA-256 hash of the public key. |
|
||||
| `NodeManager` | `struct{ /* unexported fields */ }` | Identity and key manager that loads, generates, persists, and deletes X25519 node credentials. |
|
||||
| `NodeRole` | `type NodeRole string` | Operational mode string for controller, worker, or dual-role nodes. |
|
||||
| `Peer` | `struct{ ID string; Name string; PublicKey string; Address string; Role NodeRole; AddedAt time.Time; LastSeen time.Time; PingMS float64; Hops int; GeoKM float64; Score float64; Connected bool }` | Registry record for a remote node, including addressing, role, scoring metrics, and transient connection state. |
|
||||
| `PeerAuthMode` | `type PeerAuthMode int` | Peer admission policy used by `PeerRegistry` when unknown peers attempt to connect. |
|
||||
| `PeerConnection` | `struct{ Peer *Peer; Conn *websocket.Conn; SharedSecret []byte; LastActivity time.Time }` | Active WebSocket session to a peer, including the negotiated shared secret and transport-owned write/close coordination. |
|
||||
| `PeerRateLimiter` | `struct{ /* unexported fields */ }` | Per-peer token bucket limiter used by the transport hot path. |
|
||||
| `PeerRegistry` | `struct{ /* unexported fields */ }` | Concurrent peer store with KD-tree selection, allowlist state, and debounced persistence to disk. |
|
||||
| `ProtocolError` | `struct{ Code int; Message string }` | Structured remote error returned by protocol response helpers when a peer replies with `MsgError`. |
|
||||
| `RawMessage` | `type RawMessage []byte` | Raw JSON payload bytes preserved without eager decoding. |
|
||||
| `ResponseHandler` | `struct{}` | Helper for validating message envelopes and decoding typed responses. |
|
||||
| `Transport` | `struct{ /* unexported fields */ }` | WebSocket transport that manages listeners, connections, encryption, deduplication, and shutdown coordination. |
|
||||
| `TransportConfig` | `struct{ ListenAddr string; WSPath string; TLSCertPath string; TLSKeyPath string; MaxConns int; MaxMessageSize int64; PingInterval time.Duration; PongTimeout time.Duration }` | Listener, TLS, sizing, and keepalive settings for `Transport`. |
|
||||
| `Worker` | `struct{ DataDir string /* plus unexported fields */ }` | Inbound command handler for worker nodes. It tracks uptime, optional miner/profile integrations, and the base directory used for deployments. |
|
||||
|
||||
### Payload and integration types
|
||||
|
||||
| Type | Definition | Description |
|
||||
| --- | --- | --- |
|
||||
| `DeployAckPayload` | `struct{ Success bool; Name string; Error string }` | Deployment acknowledgement with success state, optional deployed name, and optional error text. |
|
||||
| `DeployPayload` | `struct{ BundleType string; Data []byte; Checksum string; Name string }` | Deployment request carrying STIM-encrypted bundle bytes (or other bundle data), checksum, and logical name. |
|
||||
| `DisconnectPayload` | `struct{ Reason string; Code int }` | Disconnect notice with human-readable reason and optional disconnect code. |
|
||||
| `ErrorPayload` | `struct{ Code int; Message string; Details string }` | Payload used by `MsgError` responses. |
|
||||
| `GetLogsPayload` | `struct{ MinerName string; Lines int; Since int64 }` | Request for miner console output, optionally bounded by line count and a Unix timestamp. |
|
||||
| `HandshakeAckPayload` | `struct{ Identity NodeIdentity; ChallengeResponse []byte; Accepted bool; Reason string }` | Handshake reply containing the responder identity, optional challenge response, acceptance flag, and optional rejection reason. |
|
||||
| `HandshakePayload` | `struct{ Identity NodeIdentity; Challenge []byte; Version string }` | Handshake request containing node identity, optional authentication challenge, and protocol version. |
|
||||
| `LogsPayload` | `struct{ MinerName string; Lines []string; HasMore bool }` | Returned miner log lines plus an indicator that more lines are available. |
|
||||
| `MinerAckPayload` | `struct{ Success bool; MinerName string; Error string }` | Acknowledgement for remote miner start and stop operations. |
|
||||
| `MinerInstance` | `interface{ GetName() string; GetType() string; GetStats() (any, error); GetConsoleHistory(lines int) []string }` | Minimal runtime miner contract used by the worker to collect stats and logs without importing the mining package. |
|
||||
| `MinerManager` | `interface{ StartMiner(minerType string, config any) (MinerInstance, error); StopMiner(name string) error; ListMiners() []MinerInstance; GetMiner(name string) (MinerInstance, error) }` | Worker-facing miner control contract. |
|
||||
| `MinerStatsItem` | `struct{ Name string; Type string; Hashrate float64; Shares int; Rejected int; Uptime int; Pool string; Algorithm string; CPUThreads int }` | Protocol-facing summary of one miner's runtime statistics. |
|
||||
| `PingPayload` | `struct{ SentAt int64 }` | Ping payload carrying the sender's millisecond timestamp. |
|
||||
| `PongPayload` | `struct{ SentAt int64; ReceivedAt int64 }` | Ping response carrying the echoed send time and the receiver's millisecond timestamp. |
|
||||
| `ProfileManager` | `interface{ GetProfile(id string) (any, error); SaveProfile(profile any) error }` | Worker-facing profile storage contract. |
|
||||
| `StartMinerPayload` | `struct{ MinerType string; ProfileID string; Config RawMessage }` | Request to start a miner with an optional profile ID and raw JSON config override. |
|
||||
| `StatsPayload` | `struct{ NodeID string; NodeName string; Miners []MinerStatsItem; Uptime int64 }` | Node-wide stats response with node identity fields, miner summaries, and uptime in seconds. |
|
||||
| `StopMinerPayload` | `struct{ MinerName string }` | Request to stop a miner by name. |
|
||||
|
||||
## Functions
|
||||
|
||||
### Bundle, protocol, and utility functions
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `CreateProfileBundle` | `func CreateProfileBundle(profileJSON []byte, name string, password string) (*Bundle, error)` | Builds a TIM containing `profileJSON`, encrypts it to STIM with `password`, and returns a `BundleProfile` bundle with a SHA-256 checksum. |
|
||||
| `CreateProfileBundleUnencrypted` | `func CreateProfileBundleUnencrypted(profileJSON []byte, name string) (*Bundle, error)` | Returns a `BundleProfile` bundle whose `Data` is the raw JSON payload and whose checksum is computed over that JSON. |
|
||||
| `CreateMinerBundle` | `func CreateMinerBundle(minerPath string, profileJSON []byte, name string, password string) (*Bundle, error)` | Reads a miner binary, tars it, loads it into a TIM, optionally attaches `profileJSON`, encrypts the result to STIM, and returns a `BundleMiner` bundle. |
|
||||
| `ExtractProfileBundle` | `func ExtractProfileBundle(bundle *Bundle, password string) ([]byte, error)` | Verifies `bundle.Checksum`, returns raw JSON directly when `bundle.Data` already looks like JSON, otherwise decrypts STIM and returns the embedded config bytes. |
|
||||
| `ExtractMinerBundle` | `func ExtractMinerBundle(bundle *Bundle, password string, destDir string) (string, []byte, error)` | Verifies checksum, decrypts STIM, extracts the root filesystem tarball into `destDir`, and returns the first executable path plus the embedded config bytes. |
|
||||
| `VerifyBundle` | `func VerifyBundle(bundle *Bundle) bool` | Returns whether `bundle.Checksum` matches the SHA-256 checksum of `bundle.Data`. |
|
||||
| `StreamBundle` | `func StreamBundle(bundle *Bundle, w io.Writer) error` | JSON-encodes `bundle` and writes it to `w`. |
|
||||
| `ReadBundle` | `func ReadBundle(r io.Reader) (*Bundle, error)` | Reads all bytes from `r`, JSON-decodes them into a `Bundle`, and returns the result. |
|
||||
| `GenerateChallenge` | `func GenerateChallenge() ([]byte, error)` | Returns a new 32-byte random authentication challenge. |
|
||||
| `SignChallenge` | `func SignChallenge(challenge []byte, sharedSecret []byte) []byte` | Computes the HMAC-SHA256 signature of `challenge` using `sharedSecret`. |
|
||||
| `VerifyChallenge` | `func VerifyChallenge(challenge, response, sharedSecret []byte) bool` | Recomputes the expected challenge signature and compares it to `response` with `hmac.Equal`. |
|
||||
| `IsProtocolVersionSupported` | `func IsProtocolVersionSupported(version string) bool` | Returns whether `version` is present in `SupportedProtocolVersions`. |
|
||||
| `MarshalJSON` | `func MarshalJSON(v any) ([]byte, error)` | Encodes `v` with the core JSON helper, restores the package's historical no-EscapeHTML behaviour, and returns a caller-owned copy of the bytes. |
|
||||
| `NewMessage` | `func NewMessage(msgType MessageType, from, to string, payload any) (*Message, error)` | Creates a message with a generated UUID, current timestamp, and JSON-encoded payload. A `nil` payload leaves `Payload` empty. |
|
||||
| `NewErrorMessage` | `func NewErrorMessage(from, to string, code int, message string, replyTo string) (*Message, error)` | Creates a `MsgError` response containing an `ErrorPayload` and sets `ReplyTo` to the supplied request ID. |
|
||||
| `ValidateResponse` | `func ValidateResponse(resp *Message, expectedType MessageType) error` | Convenience wrapper that delegates to `DefaultResponseHandler.ValidateResponse`. |
|
||||
| `ParseResponse` | `func ParseResponse(resp *Message, expectedType MessageType, target any) error` | Convenience wrapper that delegates to `DefaultResponseHandler.ParseResponse`. |
|
||||
| `IsProtocolError` | `func IsProtocolError(err error) bool` | Returns whether `err` is a `*ProtocolError`. |
|
||||
| `GetProtocolErrorCode` | `func GetProtocolErrorCode(err error) int` | Returns `err.(*ProtocolError).Code` when `err` is a `*ProtocolError`, otherwise `0`. |
|
||||
|
||||
### Constructors
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `DefaultTransportConfig` | `func DefaultTransportConfig() TransportConfig` | Returns the transport defaults: `:9091`, `/ws`, `MaxConns=100`, `MaxMessageSize=1<<20`, `PingInterval=30s`, and `PongTimeout=10s`. |
|
||||
| `NewController` | `func NewController(node *NodeManager, peers *PeerRegistry, transport *Transport) *Controller` | Creates a controller, initialises its pending-response map, and installs its response handler on `transport`. |
|
||||
| `NewDispatcher` | `func NewDispatcher() *Dispatcher` | Creates an empty dispatcher with a debug-level component logger named `dispatcher`. |
|
||||
| `NewMessageDeduplicator` | `func NewMessageDeduplicator(ttl time.Duration) *MessageDeduplicator` | Creates a deduplicator that retains message IDs for the supplied TTL. |
|
||||
| `NewNodeManager` | `func NewNodeManager() (*NodeManager, error)` | Resolves XDG key and config paths, then loads an existing identity if present. |
|
||||
| `NewNodeManagerWithPaths` | `func NewNodeManagerWithPaths(keyPath, configPath string) (*NodeManager, error)` | Creates a node manager using explicit key and config paths, primarily for tests. |
|
||||
| `NewPeerRateLimiter` | `func NewPeerRateLimiter(maxTokens, refillRate int) *PeerRateLimiter` | Creates a token bucket seeded with `maxTokens` and refilled at `refillRate` tokens per second. |
|
||||
| `NewPeerRegistry` | `func NewPeerRegistry() (*PeerRegistry, error)` | Resolves the XDG peers path, loads any persisted peers, and builds the selection KD-tree. |
|
||||
| `NewPeerRegistryWithPath` | `func NewPeerRegistryWithPath(peersPath string) (*PeerRegistry, error)` | Creates a peer registry bound to `peersPath` with open authentication mode and an empty public-key allowlist. |
|
||||
| `NewTransport` | `func NewTransport(node *NodeManager, registry *PeerRegistry, config TransportConfig) *Transport` | Creates a transport with lifecycle context, a 5-minute message deduplicator, and a WebSocket upgrader that only accepts local origins. |
|
||||
| `NewWorker` | `func NewWorker(node *NodeManager, transport *Transport) *Worker` | Creates a worker, records its start time for uptime reporting, and defaults `DataDir` to `xdg.DataHome`. |
|
||||
|
||||
### `RawMessage` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `MarshalJSON` | `func (m RawMessage) MarshalJSON() ([]byte, error)` | Emits raw payload bytes unchanged, or `null` when the receiver is `nil`. |
|
||||
| `UnmarshalJSON` | `func (m *RawMessage) UnmarshalJSON(data []byte) error` | Copies `data` into the receiver without decoding it. Passing a `nil` receiver returns an error. |
|
||||
|
||||
### `*Message` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `Reply` | `func (m *Message) Reply(msgType MessageType, payload any) (*Message, error)` | Creates a reply message that swaps `From` and `To` and sets `ReplyTo` to `m.ID`. |
|
||||
| `ParsePayload` | `func (m *Message) ParsePayload(v any) error` | JSON-decodes `Payload` into `v`. A `nil` payload is treated as a no-op. |
|
||||
|
||||
### `*NodeManager` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `HasIdentity` | `func (n *NodeManager) HasIdentity() bool` | Returns whether an identity is currently loaded in memory. |
|
||||
| `GetIdentity` | `func (n *NodeManager) GetIdentity() *NodeIdentity` | Returns a copy of the loaded public identity, or `nil` when no identity is initialised. |
|
||||
| `GenerateIdentity` | `func (n *NodeManager) GenerateIdentity(name string, role NodeRole) error` | Generates a new X25519 keypair, derives the node ID from the public key hash, stores the public identity, and persists both key and config to disk. |
|
||||
| `DeriveSharedSecret` | `func (n *NodeManager) DeriveSharedSecret(peerPubKeyBase64 string) ([]byte, error)` | Decodes the peer public key, performs X25519 ECDH with the node private key, hashes the result with SHA-256, and returns the symmetric key material. |
|
||||
| `Delete` | `func (n *NodeManager) Delete() error` | Removes persisted key/config files when they exist and clears the in-memory identity and key state. |
|
||||
|
||||
### `*Controller` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `GetRemoteStats` | `func (c *Controller) GetRemoteStats(peerID string) (*StatsPayload, error)` | Sends `MsgGetStats` to `peerID`, waits for a response, and decodes the resulting `MsgStats` payload. |
|
||||
| `StartRemoteMiner` | `func (c *Controller) StartRemoteMiner(peerID, minerType, profileID string, configOverride RawMessage) error` | Validates `minerType`, sends `MsgStartMiner`, waits for `MsgMinerAck`, and returns an error when the remote ack reports failure. |
|
||||
| `StopRemoteMiner` | `func (c *Controller) StopRemoteMiner(peerID, minerName string) error` | Sends `MsgStopMiner`, waits for `MsgMinerAck`, and returns an error when the remote ack reports failure. |
|
||||
| `GetRemoteLogs` | `func (c *Controller) GetRemoteLogs(peerID, minerName string, lines int) ([]string, error)` | Requests `MsgLogs` from a remote miner and returns the decoded log lines. |
|
||||
| `GetAllStats` | `func (c *Controller) GetAllStats() map[string]*StatsPayload` | Requests stats from every currently connected peer and returns the successful responses keyed by peer ID. |
|
||||
| `PingPeer` | `func (c *Controller) PingPeer(peerID string) (float64, error)` | Sends a ping, measures round-trip time in milliseconds, and updates the peer registry metrics for that peer. |
|
||||
| `ConnectToPeer` | `func (c *Controller) ConnectToPeer(peerID string) error` | Looks up `peerID` in the registry and establishes a transport connection. |
|
||||
| `DisconnectFromPeer` | `func (c *Controller) DisconnectFromPeer(peerID string) error` | Gracefully closes an active transport connection for `peerID`. |
|
||||
|
||||
### `*Dispatcher` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `RegisterHandler` | `func (d *Dispatcher) RegisterHandler(intentID byte, handler IntentHandler)` | Associates `handler` with `intentID`, replacing any existing handler for that intent. |
|
||||
| `Handlers` | `func (d *Dispatcher) Handlers() iter.Seq2[byte, IntentHandler]` | Returns an iterator over the currently registered intent handlers. |
|
||||
| `Dispatch` | `func (d *Dispatcher) Dispatch(pkt *ueps.ParsedPacket) error` | Rejects `nil` packets, drops packets whose `ThreatScore` exceeds `ThreatScoreThreshold`, rejects unknown intents, and otherwise invokes the matching handler. |
|
||||
|
||||
### `*MessageDeduplicator` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `IsDuplicate` | `func (d *MessageDeduplicator) IsDuplicate(msgID string) bool` | Returns whether `msgID` is still present in the deduplicator's TTL window. |
|
||||
| `Mark` | `func (d *MessageDeduplicator) Mark(msgID string)` | Records `msgID` with the current time. |
|
||||
| `Cleanup` | `func (d *MessageDeduplicator) Cleanup()` | Removes expired message IDs whose age exceeds the configured TTL. |
|
||||
|
||||
### `*PeerRateLimiter` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `Allow` | `func (r *PeerRateLimiter) Allow() bool` | Refills tokens according to elapsed whole seconds and returns whether one token could be consumed for the current message. |
|
||||
|
||||
### `*PeerRegistry` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `SetAuthMode` | `func (r *PeerRegistry) SetAuthMode(mode PeerAuthMode)` | Replaces the current peer admission mode. |
|
||||
| `GetAuthMode` | `func (r *PeerRegistry) GetAuthMode() PeerAuthMode` | Returns the current peer admission mode. |
|
||||
| `AllowPublicKey` | `func (r *PeerRegistry) AllowPublicKey(publicKey string)` | Adds `publicKey` to the explicit allowlist. |
|
||||
| `RevokePublicKey` | `func (r *PeerRegistry) RevokePublicKey(publicKey string)` | Removes `publicKey` from the explicit allowlist. |
|
||||
| `IsPublicKeyAllowed` | `func (r *PeerRegistry) IsPublicKeyAllowed(publicKey string) bool` | Returns whether `publicKey` is currently allowlisted. |
|
||||
| `IsPeerAllowed` | `func (r *PeerRegistry) IsPeerAllowed(peerID string, publicKey string) bool` | Returns `true` in open mode, or in allowlist mode when the peer is already registered or the supplied public key is allowlisted. |
|
||||
| `ListAllowedPublicKeys` | `func (r *PeerRegistry) ListAllowedPublicKeys() []string` | Returns a slice snapshot of allowlisted public keys. |
|
||||
| `AllowedPublicKeys` | `func (r *PeerRegistry) AllowedPublicKeys() iter.Seq[string]` | Returns an iterator over allowlisted public keys. |
|
||||
| `AddPeer` | `func (r *PeerRegistry) AddPeer(peer *Peer) error` | Validates the peer, sets `AddedAt` when zero, defaults `Score` to `50`, adds it to the registry, rebuilds the KD-tree, and schedules a debounced save. |
|
||||
| `UpdatePeer` | `func (r *PeerRegistry) UpdatePeer(peer *Peer) error` | Replaces an existing peer entry, rebuilds the KD-tree, and schedules a debounced save. |
|
||||
| `RemovePeer` | `func (r *PeerRegistry) RemovePeer(id string) error` | Deletes an existing peer, rebuilds the KD-tree, and schedules a debounced save. |
|
||||
| `GetPeer` | `func (r *PeerRegistry) GetPeer(id string) *Peer` | Returns a copy of the peer identified by `id`, or `nil` when absent. |
|
||||
| `ListPeers` | `func (r *PeerRegistry) ListPeers() []*Peer` | Returns a slice of peer copies. |
|
||||
| `Peers` | `func (r *PeerRegistry) Peers() iter.Seq[*Peer]` | Returns an iterator over peer copies so callers cannot mutate registry state directly. |
|
||||
| `UpdateMetrics` | `func (r *PeerRegistry) UpdateMetrics(id string, pingMS, geoKM float64, hops int) error` | Updates latency, distance, hop count, and `LastSeen`, rebuilds the KD-tree, and schedules a debounced save. |
|
||||
| `UpdateScore` | `func (r *PeerRegistry) UpdateScore(id string, score float64) error` | Clamps `score` into `[0,100]`, updates the peer, rebuilds the KD-tree, and schedules a debounced save. |
|
||||
| `SetConnected` | `func (r *PeerRegistry) SetConnected(id string, connected bool)` | Updates the connection flag for a peer and refreshes `LastSeen` when marking the peer connected. |
|
||||
| `RecordSuccess` | `func (r *PeerRegistry) RecordSuccess(id string)` | Increases the peer score by `ScoreSuccessIncrement` up to `ScoreMaximum`, updates `LastSeen`, and schedules a save. |
|
||||
| `RecordFailure` | `func (r *PeerRegistry) RecordFailure(id string)` | Decreases the peer score by `ScoreFailureDecrement` down to `ScoreMinimum` and schedules a save. |
|
||||
| `RecordTimeout` | `func (r *PeerRegistry) RecordTimeout(id string)` | Decreases the peer score by `ScoreTimeoutDecrement` down to `ScoreMinimum` and schedules a save. |
|
||||
| `GetPeersByScore` | `func (r *PeerRegistry) GetPeersByScore() []*Peer` | Returns peers sorted by descending score. |
|
||||
| `PeersByScore` | `func (r *PeerRegistry) PeersByScore() iter.Seq[*Peer]` | Returns an iterator over peers sorted by descending score. |
|
||||
| `SelectOptimalPeer` | `func (r *PeerRegistry) SelectOptimalPeer() *Peer` | Uses the KD-tree to find the peer closest to the ideal metrics vector and returns a copy of that peer. |
|
||||
| `SelectNearestPeers` | `func (r *PeerRegistry) SelectNearestPeers(n int) []*Peer` | Returns copies of the `n` nearest peers from the KD-tree according to the weighted metrics. |
|
||||
| `GetConnectedPeers` | `func (r *PeerRegistry) GetConnectedPeers() []*Peer` | Returns a slice of copies for peers whose `Connected` flag is true. |
|
||||
| `ConnectedPeers` | `func (r *PeerRegistry) ConnectedPeers() iter.Seq[*Peer]` | Returns an iterator over connected peer copies. |
|
||||
| `Count` | `func (r *PeerRegistry) Count() int` | Returns the number of registered peers. |
|
||||
| `Close` | `func (r *PeerRegistry) Close() error` | Stops any pending save timer and immediately flushes dirty peer data to disk when needed. |
|
||||
|
||||
### `*ResponseHandler` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `ValidateResponse` | `func (h *ResponseHandler) ValidateResponse(resp *Message, expectedType MessageType) error` | Rejects `nil` responses, unwraps `MsgError` into a `ProtocolError`, and checks that `resp.Type` matches `expectedType`. |
|
||||
| `ParseResponse` | `func (h *ResponseHandler) ParseResponse(resp *Message, expectedType MessageType, target any) error` | Runs `ValidateResponse` and then decodes the payload into `target` when `target` is not `nil`. |
|
||||
|
||||
### `*Transport` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `Start` | `func (t *Transport) Start() error` | Starts the WebSocket listener and begins accepting inbound peer connections. |
|
||||
| `Stop` | `func (t *Transport) Stop() error` | Cancels transport context, closes active connections, and shuts down the listener. |
|
||||
| `OnMessage` | `func (t *Transport) OnMessage(handler MessageHandler)` | Installs the inbound message callback used after decryption. It must be set before `Start` to avoid races. |
|
||||
| `Connect` | `func (t *Transport) Connect(peer *Peer) (*PeerConnection, error)` | Dials `peer`, performs the handshake, derives the shared secret, and returns the active peer connection. |
|
||||
| `Send` | `func (t *Transport) Send(peerID string, msg *Message) error` | Looks up the active connection for `peerID` and sends `msg` over it. |
|
||||
| `Connections` | `func (t *Transport) Connections() iter.Seq[*PeerConnection]` | Returns an iterator over active peer connections. |
|
||||
| `Broadcast` | `func (t *Transport) Broadcast(msg *Message) error` | Sends `msg` to every connected peer except the sender identified by `msg.From`. |
|
||||
| `GetConnection` | `func (t *Transport) GetConnection(peerID string) *PeerConnection` | Returns the active connection for `peerID`, or `nil` when not connected. |
|
||||
| `ConnectedPeers` | `func (t *Transport) ConnectedPeers() int` | Returns the number of active peer connections. |
|
||||
|
||||
### `*PeerConnection` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `Send` | `func (pc *PeerConnection) Send(msg *Message) error` | Encrypts and writes a message over the WebSocket connection. |
|
||||
| `Close` | `func (pc *PeerConnection) Close() error` | Closes the underlying connection once and releases transport state for that peer. |
|
||||
| `GracefulClose` | `func (pc *PeerConnection) GracefulClose(reason string, code int) error` | Sends a `MsgDisconnect` notification before closing the connection. |
|
||||
|
||||
### `*Worker` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `SetMinerManager` | `func (w *Worker) SetMinerManager(manager MinerManager)` | Installs the miner manager used for start, stop, stats, and log requests. |
|
||||
| `SetProfileManager` | `func (w *Worker) SetProfileManager(manager ProfileManager)` | Installs the profile manager used during deployment handling. |
|
||||
| `HandleMessage` | `func (w *Worker) HandleMessage(conn *PeerConnection, msg *Message)` | Dispatches supported message types, sends normal replies on success, and emits `MsgError` responses when a handled command fails. |
|
||||
| `RegisterWithTransport` | `func (w *Worker) RegisterWithTransport()` | Registers `HandleMessage` as the transport's inbound message callback. |
|
||||
|
||||
### `*ProtocolError` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `Error` | `func (e *ProtocolError) Error() string` | Formats the remote error as `remote error (<code>): <message>`. |
|
||||
67
specs/ueps.md
Normal file
67
specs/ueps.md
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
# ueps
|
||||
|
||||
**Import:** `dappco.re/go/core/p2p/ueps`
|
||||
|
||||
**Files:** 2
|
||||
|
||||
## Types
|
||||
|
||||
### `UEPSHeader`
|
||||
```go
|
||||
type UEPSHeader struct {
|
||||
Version uint8
|
||||
CurrentLayer uint8
|
||||
TargetLayer uint8
|
||||
IntentID uint8
|
||||
ThreatScore uint16
|
||||
}
|
||||
```
|
||||
|
||||
Routing and integrity metadata carried in UEPS frames.
|
||||
|
||||
- `Version`: protocol version byte. `NewBuilder` initialises this to `0x09`.
|
||||
- `CurrentLayer`: source layer byte. `NewBuilder` initialises this to `5`.
|
||||
- `TargetLayer`: destination layer byte. `NewBuilder` initialises this to `5`.
|
||||
- `IntentID`: semantic intent token.
|
||||
- `ThreatScore`: unsigned 16-bit risk score.
|
||||
|
||||
### `PacketBuilder`
|
||||
```go
|
||||
type PacketBuilder struct {
|
||||
Header UEPSHeader
|
||||
Payload []byte
|
||||
}
|
||||
```
|
||||
|
||||
Mutable packet assembly state used to produce a signed UEPS frame.
|
||||
|
||||
- `Header`: TLV metadata written before the payload.
|
||||
- `Payload`: raw payload bytes appended as the terminal TLV.
|
||||
|
||||
### `ParsedPacket`
|
||||
```go
|
||||
type ParsedPacket struct {
|
||||
Header UEPSHeader
|
||||
Payload []byte
|
||||
}
|
||||
```
|
||||
|
||||
Verified packet returned by `ReadAndVerify`.
|
||||
|
||||
- `Header`: decoded UEPS header values reconstructed from the stream.
|
||||
- `Payload`: payload bytes from the `TagPayload` TLV.
|
||||
|
||||
## Functions
|
||||
|
||||
### Top-level
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `NewBuilder` | `func NewBuilder(intentID uint8, payload []byte) *PacketBuilder` | Creates a packet builder with default header values (`Version=0x09`, `CurrentLayer=5`, `TargetLayer=5`, `ThreatScore=0`) and the supplied intent and payload. |
|
||||
| `ReadAndVerify` | `func ReadAndVerify(r *bufio.Reader, sharedSecret []byte) (*ParsedPacket, error)` | Reads TLVs from `r` until `TagPayload`, reconstructs the signed header bytes, and verifies the HMAC-SHA256 over headers plus payload using `sharedSecret`. Missing signatures, truncated data, and HMAC mismatches return errors. |
|
||||
|
||||
### `*PacketBuilder` methods
|
||||
|
||||
| Name | Signature | Description |
|
||||
| --- | --- | --- |
|
||||
| `MarshalAndSign` | `func (p *PacketBuilder) MarshalAndSign(sharedSecret []byte) ([]byte, error)` | Serialises header TLVs `0x01` through `0x05`, signs those bytes plus `Payload` with HMAC-SHA256, appends the `TagHMAC` TLV, then writes the terminal `TagPayload` TLV. All TLV lengths are encoded as 2-byte big-endian unsigned integers. |
|
||||
|
|
@ -7,7 +7,7 @@ import (
|
|||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// TLV Types
|
||||
|
|
@ -22,8 +22,10 @@ const (
|
|||
)
|
||||
|
||||
// UEPSHeader represents the conscious routing metadata
|
||||
//
|
||||
// header := UEPSHeader{IntentID: 0x01}
|
||||
type UEPSHeader struct {
|
||||
Version uint8 // Default 0x09
|
||||
Version uint8 // Default 0x09
|
||||
CurrentLayer uint8
|
||||
TargetLayer uint8
|
||||
IntentID uint8 // Semantic Token
|
||||
|
|
@ -31,12 +33,16 @@ type UEPSHeader struct {
|
|||
}
|
||||
|
||||
// PacketBuilder helps construct a signed UEPS frame
|
||||
//
|
||||
// builder := NewBuilder(0x01, []byte("hello"))
|
||||
type PacketBuilder struct {
|
||||
Header UEPSHeader
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
// NewBuilder creates a packet context for a specific intent
|
||||
//
|
||||
// builder := NewBuilder(0x01, []byte("hello"))
|
||||
func NewBuilder(intentID uint8, payload []byte) *PacketBuilder {
|
||||
return &PacketBuilder{
|
||||
Header: UEPSHeader{
|
||||
|
|
@ -68,7 +74,7 @@ func (p *PacketBuilder) MarshalAndSign(sharedSecret []byte) ([]byte, error) {
|
|||
if err := writeTLV(buf, TagIntent, []byte{p.Header.IntentID}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
// Threat Score is uint16, needs binary packing
|
||||
tsBuf := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(tsBuf, p.Header.ThreatScore)
|
||||
|
|
@ -105,22 +111,21 @@ func (p *PacketBuilder) MarshalAndSign(sharedSecret []byte) ([]byte, error) {
|
|||
func writeTLV(w io.Writer, tag uint8, value []byte) error {
|
||||
// Check length constraint (2 byte length = max 65535 bytes)
|
||||
if len(value) > 65535 {
|
||||
return coreerr.E("ueps.writeTLV", "TLV value too large for 2-byte length header", nil)
|
||||
return core.E("ueps.writeTLV", "TLV value too large for 2-byte length header", nil)
|
||||
}
|
||||
|
||||
if _, err := w.Write([]byte{tag}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
lenBuf := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(lenBuf, uint16(len(value)))
|
||||
if _, err := w.Write(lenBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
if _, err := w.Write(value); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@ import (
|
|||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -22,7 +22,7 @@ type failWriter struct {
|
|||
|
||||
func (f *failWriter) Write(p []byte) (int, error) {
|
||||
if f.remaining <= 0 {
|
||||
return 0, errors.New("write failed")
|
||||
return 0, core.NewError("write failed")
|
||||
}
|
||||
f.remaining--
|
||||
return len(p), nil
|
||||
|
|
@ -30,7 +30,7 @@ func (f *failWriter) Write(p []byte) (int, error) {
|
|||
|
||||
// TestWriteTLV_TagWriteFails verifies writeTLV returns an error
|
||||
// when the very first Write (the tag byte) fails.
|
||||
func TestWriteTLV_TagWriteFails(t *testing.T) {
|
||||
func TestPacketCoverage_WriteTLV_TagWriteFails_Bad(t *testing.T) {
|
||||
w := &failWriter{remaining: 0}
|
||||
err := writeTLV(w, TagVersion, []byte{0x09})
|
||||
|
||||
|
|
@ -40,7 +40,7 @@ func TestWriteTLV_TagWriteFails(t *testing.T) {
|
|||
|
||||
// TestWriteTLV_LengthWriteFails verifies writeTLV returns an error
|
||||
// when the second Write (the length byte) fails.
|
||||
func TestWriteTLV_LengthWriteFails(t *testing.T) {
|
||||
func TestPacketCoverage_WriteTLV_LengthWriteFails_Bad(t *testing.T) {
|
||||
w := &failWriter{remaining: 1}
|
||||
err := writeTLV(w, TagVersion, []byte{0x09})
|
||||
|
||||
|
|
@ -50,7 +50,7 @@ func TestWriteTLV_LengthWriteFails(t *testing.T) {
|
|||
|
||||
// TestWriteTLV_ValueWriteFails verifies writeTLV returns an error
|
||||
// when the third Write (the value bytes) fails.
|
||||
func TestWriteTLV_ValueWriteFails(t *testing.T) {
|
||||
func TestPacketCoverage_WriteTLV_ValueWriteFails_Bad(t *testing.T) {
|
||||
w := &failWriter{remaining: 2}
|
||||
err := writeTLV(w, TagVersion, []byte{0x09})
|
||||
|
||||
|
|
@ -81,7 +81,7 @@ func (r *errorAfterNReader) Read(p []byte) (int, error) {
|
|||
// TestReadAndVerify_PayloadReadError exercises the error branch at
|
||||
// reader.go:51-53 where io.ReadAll fails after the 0xFF tag byte
|
||||
// has been successfully read.
|
||||
func TestReadAndVerify_PayloadReadError(t *testing.T) {
|
||||
func TestPacketCoverage_ReadAndVerify_PayloadReadError_Bad(t *testing.T) {
|
||||
// Build a valid packet so we have genuine TLV headers + HMAC.
|
||||
payload := []byte("coverage test")
|
||||
builder := NewBuilder(0x20, payload)
|
||||
|
|
@ -104,7 +104,7 @@ func TestReadAndVerify_PayloadReadError(t *testing.T) {
|
|||
prefix := frame[:payloadTagIdx+1]
|
||||
r := &errorAfterNReader{
|
||||
data: prefix,
|
||||
err: errors.New("connection reset"),
|
||||
err: core.NewError("connection reset"),
|
||||
}
|
||||
|
||||
_, err = ReadAndVerify(bufio.NewReader(r), testSecret)
|
||||
|
|
@ -115,7 +115,7 @@ func TestReadAndVerify_PayloadReadError(t *testing.T) {
|
|||
// TestReadAndVerify_PayloadReadError_EOF ensures that a truncated payload
|
||||
// (missing bytes after TagPayload) is handled as an I/O error (UnexpectedEOF)
|
||||
// because ReadAndVerify now uses io.ReadFull with the expected length prefix.
|
||||
func TestReadAndVerify_PayloadReadError_EOF(t *testing.T) {
|
||||
func TestPacketCoverage_ReadAndVerify_PayloadReadError_EOF_Bad(t *testing.T) {
|
||||
payload := []byte("eof test")
|
||||
builder := NewBuilder(0x20, payload)
|
||||
frame, err := builder.MarshalAndSign(testSecret)
|
||||
|
|
@ -141,7 +141,7 @@ func TestReadAndVerify_PayloadReadError_EOF(t *testing.T) {
|
|||
// TestWriteTLV_AllWritesSucceed confirms the happy path still works
|
||||
// after exercising all error branches — a simple sanity check using
|
||||
// failWriter with enough remaining writes.
|
||||
func TestWriteTLV_AllWritesSucceed(t *testing.T) {
|
||||
func TestPacketCoverage_WriteTLV_AllWritesSucceed_Good(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
err := writeTLV(&buf, TagVersion, []byte{0x09})
|
||||
require.NoError(t, err)
|
||||
|
|
@ -149,10 +149,9 @@ func TestWriteTLV_AllWritesSucceed(t *testing.T) {
|
|||
assert.Equal(t, []byte{TagVersion, 0x00, 0x01, 0x09}, buf.Bytes())
|
||||
}
|
||||
|
||||
|
||||
// TestWriteTLV_FailWriterTable runs the three failure scenarios in
|
||||
// a table-driven fashion for completeness.
|
||||
func TestWriteTLV_FailWriterTable(t *testing.T) {
|
||||
func TestPacketCoverage_WriteTLV_FailWriterTable_Bad(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
remaining int
|
||||
|
|
@ -177,7 +176,7 @@ func TestWriteTLV_FailWriterTable(t *testing.T) {
|
|||
// HMAC computation independently of the builder. This also serves as
|
||||
// a cross-check that our errorAfterNReader is not accidentally
|
||||
// corrupting the prefix bytes.
|
||||
func TestReadAndVerify_ManualPacket_PayloadReadError(t *testing.T) {
|
||||
func TestPacketCoverage_ReadAndVerify_ManualPacket_PayloadReadError_Bad(t *testing.T) {
|
||||
payload := []byte("manual test")
|
||||
|
||||
// Build header TLVs
|
||||
|
|
|
|||
|
|
@ -7,14 +7,15 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// testSecret is a deterministic shared secret for reproducible tests.
|
||||
var testSecret = []byte("test-shared-secret-32-bytes!!!!!")
|
||||
|
||||
func TestPacketBuilder_RoundTrip(t *testing.T) {
|
||||
func TestPacket_Builder_RoundTrip_Ugly(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
intentID uint8
|
||||
|
|
@ -84,7 +85,7 @@ func TestPacketBuilder_RoundTrip(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHMACVerification_TamperedPayload(t *testing.T) {
|
||||
func TestPacket_HMACVerification_TamperedPayload_Bad(t *testing.T) {
|
||||
builder := NewBuilder(0x20, []byte("original payload"))
|
||||
frame, err := builder.MarshalAndSign(testSecret)
|
||||
if err != nil {
|
||||
|
|
@ -100,12 +101,12 @@ func TestHMACVerification_TamperedPayload(t *testing.T) {
|
|||
if err == nil {
|
||||
t.Fatal("Expected HMAC mismatch error for tampered payload")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "integrity violation") {
|
||||
if !core.Contains(err.Error(), "integrity violation") {
|
||||
t.Errorf("Expected integrity violation error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHMACVerification_TamperedHeader(t *testing.T) {
|
||||
func TestPacket_HMACVerification_TamperedHeader_Bad(t *testing.T) {
|
||||
builder := NewBuilder(0x20, []byte("test payload"))
|
||||
frame, err := builder.MarshalAndSign(testSecret)
|
||||
if err != nil {
|
||||
|
|
@ -122,12 +123,12 @@ func TestHMACVerification_TamperedHeader(t *testing.T) {
|
|||
if err == nil {
|
||||
t.Fatal("Expected HMAC mismatch error for tampered header")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "integrity violation") {
|
||||
if !core.Contains(err.Error(), "integrity violation") {
|
||||
t.Errorf("Expected integrity violation error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHMACVerification_WrongSharedSecret(t *testing.T) {
|
||||
func TestPacket_HMACVerification_WrongSharedSecret_Bad(t *testing.T) {
|
||||
builder := NewBuilder(0x20, []byte("secret data"))
|
||||
frame, err := builder.MarshalAndSign([]byte("key-A-used-for-signing!!!!!!!!!!"))
|
||||
if err != nil {
|
||||
|
|
@ -138,12 +139,12 @@ func TestHMACVerification_WrongSharedSecret(t *testing.T) {
|
|||
if err == nil {
|
||||
t.Fatal("Expected HMAC mismatch error for wrong shared secret")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "integrity violation") {
|
||||
if !core.Contains(err.Error(), "integrity violation") {
|
||||
t.Errorf("Expected integrity violation error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyPayload(t *testing.T) {
|
||||
func TestPacket_EmptyPayload_Ugly(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload []byte
|
||||
|
|
@ -175,7 +176,7 @@ func TestEmptyPayload(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMaxThreatScoreBoundary(t *testing.T) {
|
||||
func TestPacket_MaxThreatScoreBoundary_Ugly(t *testing.T) {
|
||||
builder := NewBuilder(0x20, []byte("threat boundary"))
|
||||
builder.Header.ThreatScore = 65535 // uint16 max
|
||||
|
||||
|
|
@ -194,7 +195,7 @@ func TestMaxThreatScoreBoundary(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMissingHMACTag(t *testing.T) {
|
||||
func TestPacket_MissingHMACTag_Bad(t *testing.T) {
|
||||
// Craft a packet manually: header TLVs + payload tag, but no HMAC (0x06)
|
||||
var buf bytes.Buffer
|
||||
|
||||
|
|
@ -214,24 +215,24 @@ func TestMissingHMACTag(t *testing.T) {
|
|||
if err == nil {
|
||||
t.Fatal("Expected 'missing HMAC' error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "missing HMAC") {
|
||||
if !core.Contains(err.Error(), "missing HMAC") {
|
||||
t.Errorf("Expected 'missing HMAC' error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteTLV_ValueTooLarge(t *testing.T) {
|
||||
func TestPacket_WriteTLV_ValueTooLarge_Bad(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
oversized := make([]byte, 65536) // 1 byte over the 65535 limit
|
||||
err := writeTLV(&buf, TagVersion, oversized)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for TLV value > 65535 bytes")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "TLV value too large") {
|
||||
if !core.Contains(err.Error(), "TLV value too large") {
|
||||
t.Errorf("Expected 'TLV value too large' error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTruncatedPacket(t *testing.T) {
|
||||
func TestPacket_TruncatedPacket_Bad(t *testing.T) {
|
||||
builder := NewBuilder(0x20, []byte("full payload"))
|
||||
frame, err := builder.MarshalAndSign(testSecret)
|
||||
if err != nil {
|
||||
|
|
@ -256,7 +257,7 @@ func TestTruncatedPacket(t *testing.T) {
|
|||
{
|
||||
name: "CutMidHMAC",
|
||||
cutAt: 20, // Somewhere inside the header TLVs or HMAC
|
||||
wantErr: "", // Any io error
|
||||
wantErr: "", // Any io error
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -267,14 +268,14 @@ func TestTruncatedPacket(t *testing.T) {
|
|||
if err == nil {
|
||||
t.Fatal("Expected error for truncated packet")
|
||||
}
|
||||
if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) {
|
||||
if tc.wantErr != "" && !core.Contains(err.Error(), tc.wantErr) {
|
||||
t.Errorf("Expected error containing %q, got: %v", tc.wantErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnknownTLVTag(t *testing.T) {
|
||||
func TestPacket_UnknownTLVTag_Bad(t *testing.T) {
|
||||
// Build a valid packet, then inject an unknown tag before the HMAC.
|
||||
// The unknown tag must be included in signedData for HMAC to pass.
|
||||
payload := []byte("tagged payload")
|
||||
|
|
@ -324,7 +325,7 @@ func TestUnknownTLVTag(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewBuilder_Defaults(t *testing.T) {
|
||||
func TestPacket_NewBuilder_Defaults_Good(t *testing.T) {
|
||||
builder := NewBuilder(0x20, []byte("data"))
|
||||
|
||||
if builder.Header.Version != 0x09 {
|
||||
|
|
@ -344,7 +345,7 @@ func TestNewBuilder_Defaults(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestThreatScoreBoundaries(t *testing.T) {
|
||||
func TestPacket_ThreatScoreBoundaries_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
score uint16
|
||||
|
|
@ -378,7 +379,7 @@ func TestThreatScoreBoundaries(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWriteTLV_BoundaryLengths(t *testing.T) {
|
||||
func TestPacket_WriteTLV_BoundaryLengths_Ugly(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
length int
|
||||
|
|
@ -407,9 +408,8 @@ func TestWriteTLV_BoundaryLengths(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// TestReadAndVerify_EmptyReader verifies behaviour on completely empty input.
|
||||
func TestReadAndVerify_EmptyReader(t *testing.T) {
|
||||
func TestPacket_ReadAndVerify_EmptyReader_Ugly(t *testing.T) {
|
||||
_, err := ReadAndVerify(bufio.NewReader(bytes.NewReader(nil)), testSecret)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for empty reader")
|
||||
|
|
|
|||
|
|
@ -8,10 +8,12 @@ import (
|
|||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
coreerr "dappco.re/go/core/log"
|
||||
core "dappco.re/go/core"
|
||||
)
|
||||
|
||||
// ParsedPacket holds the verified data
|
||||
//
|
||||
// packet := &ParsedPacket{Header: UEPSHeader{IntentID: 0x01}}
|
||||
type ParsedPacket struct {
|
||||
Header UEPSHeader
|
||||
Payload []byte
|
||||
|
|
@ -19,6 +21,8 @@ type ParsedPacket struct {
|
|||
|
||||
// ReadAndVerify reads a UEPS frame from the stream and validates the HMAC.
|
||||
// It consumes the stream up to the end of the packet.
|
||||
//
|
||||
// packet, err := ReadAndVerify(reader, sharedSecret)
|
||||
func ReadAndVerify(r *bufio.Reader, sharedSecret []byte) (*ParsedPacket, error) {
|
||||
// Buffer to reconstruct the data for HMAC verification
|
||||
var signedData bytes.Buffer
|
||||
|
|
@ -93,7 +97,7 @@ func ReadAndVerify(r *bufio.Reader, sharedSecret []byte) (*ParsedPacket, error)
|
|||
|
||||
verify:
|
||||
if len(signature) == 0 {
|
||||
return nil, coreerr.E("ueps.ReadAndVerify", "UEPS packet missing HMAC signature", nil)
|
||||
return nil, core.E("ueps.ReadAndVerify", "UEPS packet missing HMAC signature", nil)
|
||||
}
|
||||
|
||||
// 5. Verify HMAC
|
||||
|
|
@ -104,7 +108,7 @@ verify:
|
|||
expectedMAC := mac.Sum(nil)
|
||||
|
||||
if !hmac.Equal(signature, expectedMAC) {
|
||||
return nil, coreerr.E("ueps.ReadAndVerify", "integrity violation: HMAC mismatch (ThreatScore +100)", nil)
|
||||
return nil, core.E("ueps.ReadAndVerify", "integrity violation: HMAC mismatch (ThreatScore +100)", nil)
|
||||
}
|
||||
|
||||
return &ParsedPacket{
|
||||
|
|
@ -112,4 +116,3 @@ verify:
|
|||
Payload: payload,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue