[agent/codex] AX v0.8.0 polish pass. Fix ALL violations — banned imports... #11

Merged
Virgil merged 1 commit from agent/upgrade-to-core-v0-8-0-alpha-1 into dev 2026-03-26 16:27:05 +00:00
38 changed files with 815 additions and 726 deletions

57
go.sum
View file

@ -1,99 +1,42 @@
dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk=
dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8=
forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg=
forge.lthn.ai/Snider/Enchantrix v0.0.4 h1:biwpix/bdedfyc0iVeK15awhhJKH6TEMYOTXzHXx5TI=
forge.lthn.ai/Snider/Enchantrix v0.0.4/go.mod h1:OGCwuVeZPq3OPe2h6TX/ZbgEjHU6B7owpIBeXQGbSe0=
forge.lthn.ai/Snider/Poindexter v0.0.3 h1:cx5wRhuLRKBM8riIZyNVAT2a8rwRhn1dodFBktocsVE=
forge.lthn.ai/Snider/Poindexter v0.0.3/go.mod h1:ddzGia98k3HKkR0gl58IDzqz+MmgW2cQJOCNLfuWPpo=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/ProtonMail/go-crypto v1.4.0 h1:Zq/pbM3F5DFgJiMouxEdSVY44MVoQNEKp5d5QxIQceQ=
github.com/ProtonMail/go-crypto v1.4.0/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo=
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/clipperhouse/uax29/v2 v2.4.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.7.0/go.mod h1:/1IUejTKH8xipsAcdfcSAlUlo2J7lkYV8GTKxAT/L3E=
github.com/go-git/go-git/v5 v5.16.4/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jchv/go-winloader v0.0.0-20250406163304-c1995be93bd1/go.mod h1:alcuEEnZsY1WQsagKhZDsoPCRoOijYqhZvPwLG0kzVs=
github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=
github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/labstack/echo/v4 v4.13.3/go.mod h1:o90YNEeQWjDozo584l7AwhJMHN0bOC4tAfg+Xox9q5g=
github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU=
github.com/leaanthony/go-ansi-parser v1.6.1/go.mod h1:+vva/2y4alzVmmIEpk9QDhA7vLC5zKDTRwfZGOp3IWU=
github.com/leaanthony/gosod v1.0.4/go.mod h1:GKuIL0zzPj3O1SdWQOdgURSuhkF+Urizzxh26t9f1cw=
github.com/leaanthony/slicer v1.6.0/go.mod h1:o/Iz29g7LN0GqH3aMjWAe90381nyZlDNquK+mtH2Fj8=
github.com/leaanthony/u v1.1.1/go.mod h1:9+o6hejoRljvZ3BzdYlVL0JYCwtnAsVuN9pVTQcaRfI=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
github.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tkrajina/go-reflector v0.5.8/go.mod h1:ECbqLgccecY5kPmPmXg1MrHW585yMcDkVl6IvJe64T4=
github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/wailsapp/go-webview2 v1.0.23/go.mod h1:qJmWAmAmaniuKGZPWwne+uor3AHMB5PFhqiK0Bbj8kc=
github.com/wailsapp/mimetype v1.4.1/go.mod h1:9aV5k31bBOv5z6u+QP8TltzvNGJPmNJD4XlAL3U+j3o=
github.com/wailsapp/wails/v2 v2.11.0/go.mod h1:jrf0ZaM6+GBc1wRmXsM8cIvzlg0karYin3erahI4+0k=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y=
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View file

@ -4,14 +4,16 @@ package logging
import (
"io"
"maps"
"os"
"sync"
"syscall"
"time"
core "dappco.re/go/core"
)
// Level represents the severity of a log message.
//
// level := LevelInfo
type Level int
const (
@ -42,6 +44,8 @@ func (l Level) String() string {
}
// Logger provides structured logging with configurable output and level.
//
// logger := New(DefaultConfig())
type Logger struct {
mu sync.Mutex
output io.Writer
@ -50,6 +54,8 @@ type Logger struct {
}
// Config holds configuration for creating a new Logger.
//
// cfg := Config{Output: io.Discard, Level: LevelDebug, Component: "sync"}
type Config struct {
Output io.Writer
Level Level
@ -57,18 +63,22 @@ type Config struct {
}
// DefaultConfig returns the default logger configuration.
//
// cfg := DefaultConfig()
func DefaultConfig() Config {
return Config{
Output: os.Stderr,
Output: defaultOutput,
Level: LevelInfo,
Component: "",
}
}
// New creates a new Logger with the given configuration.
//
// logger := New(DefaultConfig())
func New(cfg Config) *Logger {
if cfg.Output == nil {
cfg.Output = os.Stderr
cfg.Output = defaultOutput
}
return &Logger{
output: cfg.Output,
@ -101,8 +111,22 @@ func (l *Logger) GetLevel() Level {
}
// Fields represents key-value pairs for structured logging.
//
// fields := Fields{"peer_id": "node-1", "attempt": 2}
type Fields map[string]any
type stderrWriter struct{}
func (stderrWriter) Write(p []byte) (int, error) {
written, err := syscall.Write(syscall.Stderr, p)
if err != nil {
return written, core.E("logging.stderrWriter.Write", "failed to write log line", err)
}
return written, nil
}
var defaultOutput io.Writer = stderrWriter{}
// log writes a log message at the specified level.
func (l *Logger) log(level Level, msg string, fields Fields) {
l.mu.Lock()
@ -204,6 +228,8 @@ var (
)
// SetGlobal sets the global logger instance.
//
// SetGlobal(New(DefaultConfig()))
func SetGlobal(l *Logger) {
globalMu.Lock()
defer globalMu.Unlock()
@ -211,6 +237,8 @@ func SetGlobal(l *Logger) {
}
// GetGlobal returns the global logger instance.
//
// logger := GetGlobal()
func GetGlobal() *Logger {
globalMu.RLock()
defer globalMu.RUnlock()
@ -218,6 +246,8 @@ func GetGlobal() *Logger {
}
// SetGlobalLevel sets the log level of the global logger.
//
// SetGlobalLevel(LevelDebug)
func SetGlobalLevel(level Level) {
globalMu.RLock()
defer globalMu.RUnlock()
@ -227,46 +257,64 @@ func SetGlobalLevel(level Level) {
// Global convenience functions that use the global logger
// Debug logs a debug message using the global logger.
//
// Debug("connected", Fields{"peer_id": "node-1"})
func Debug(msg string, fields ...Fields) {
GetGlobal().Debug(msg, fields...)
}
// Info logs an informational message using the global logger.
//
// Info("worker started", Fields{"component": "transport"})
func Info(msg string, fields ...Fields) {
GetGlobal().Info(msg, fields...)
}
// Warn logs a warning message using the global logger.
//
// Warn("peer rate limited", Fields{"peer_id": "node-1"})
func Warn(msg string, fields ...Fields) {
GetGlobal().Warn(msg, fields...)
}
// Error logs an error message using the global logger.
//
// Error("send failed", Fields{"peer_id": "node-1"})
func Error(msg string, fields ...Fields) {
GetGlobal().Error(msg, fields...)
}
// Debugf logs a formatted debug message using the global logger.
//
// Debugf("connected peer %s", "node-1")
func Debugf(format string, args ...any) {
GetGlobal().Debugf(format, args...)
}
// Infof logs a formatted informational message using the global logger.
//
// Infof("worker %s ready", "node-1")
func Infof(format string, args ...any) {
GetGlobal().Infof(format, args...)
}
// Warnf logs a formatted warning message using the global logger.
//
// Warnf("peer %s is slow", "node-1")
func Warnf(format string, args ...any) {
GetGlobal().Warnf(format, args...)
}
// Errorf logs a formatted error message using the global logger.
//
// Errorf("peer %s failed", "node-1")
func Errorf(format string, args ...any) {
GetGlobal().Errorf(format, args...)
}
// ParseLevel parses a string into a log level.
//
// level, err := ParseLevel("warn")
func ParseLevel(s string) (Level, error) {
switch core.Upper(s) {
case "DEBUG":

View file

@ -2,11 +2,12 @@ package logging
import (
"bytes"
"strings"
"testing"
core "dappco.re/go/core"
)
func TestLoggerLevels(t *testing.T) {
func TestLogger_Levels_Good(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
@ -21,29 +22,29 @@ func TestLoggerLevels(t *testing.T) {
// Info should appear
logger.Info("info message")
if !strings.Contains(buf.String(), "[INFO]") {
if !core.Contains(buf.String(), "[INFO]") {
t.Error("Info message should appear")
}
if !strings.Contains(buf.String(), "info message") {
if !core.Contains(buf.String(), "info message") {
t.Error("Info message content should appear")
}
buf.Reset()
// Warn should appear
logger.Warn("warn message")
if !strings.Contains(buf.String(), "[WARN]") {
if !core.Contains(buf.String(), "[WARN]") {
t.Error("Warn message should appear")
}
buf.Reset()
// Error should appear
logger.Error("error message")
if !strings.Contains(buf.String(), "[ERROR]") {
if !core.Contains(buf.String(), "[ERROR]") {
t.Error("Error message should appear")
}
}
func TestLoggerDebugLevel(t *testing.T) {
func TestLogger_DebugLevel_Good(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
@ -51,12 +52,12 @@ func TestLoggerDebugLevel(t *testing.T) {
})
logger.Debug("debug message")
if !strings.Contains(buf.String(), "[DEBUG]") {
if !core.Contains(buf.String(), "[DEBUG]") {
t.Error("Debug message should appear at Debug level")
}
}
func TestLoggerWithFields(t *testing.T) {
func TestLogger_WithFields_Good(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
@ -66,15 +67,15 @@ func TestLoggerWithFields(t *testing.T) {
logger.Info("test message", Fields{"key": "value", "num": 42})
output := buf.String()
if !strings.Contains(output, "key=value") {
if !core.Contains(output, "key=value") {
t.Error("Field key=value should appear")
}
if !strings.Contains(output, "num=42") {
if !core.Contains(output, "num=42") {
t.Error("Field num=42 should appear")
}
}
func TestLoggerWithComponent(t *testing.T) {
func TestLogger_WithComponent_Good(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
@ -85,12 +86,12 @@ func TestLoggerWithComponent(t *testing.T) {
logger.Info("test message")
output := buf.String()
if !strings.Contains(output, "[TestComponent]") {
if !core.Contains(output, "[TestComponent]") {
t.Error("Component name should appear in log")
}
}
func TestLoggerDerivedComponent(t *testing.T) {
func TestLogger_DerivedComponent_Good(t *testing.T) {
var buf bytes.Buffer
parent := New(Config{
Output: &buf,
@ -101,12 +102,12 @@ func TestLoggerDerivedComponent(t *testing.T) {
child.Info("child message")
output := buf.String()
if !strings.Contains(output, "[ChildComponent]") {
if !core.Contains(output, "[ChildComponent]") {
t.Error("Derived component name should appear")
}
}
func TestLoggerFormatted(t *testing.T) {
func TestLogger_Formatted_Good(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
@ -116,12 +117,12 @@ func TestLoggerFormatted(t *testing.T) {
logger.Infof("formatted %s %d", "string", 123)
output := buf.String()
if !strings.Contains(output, "formatted string 123") {
if !core.Contains(output, "formatted string 123") {
t.Errorf("Formatted message should appear, got: %s", output)
}
}
func TestSetLevel(t *testing.T) {
func TestLogger_SetLevel_Good(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
@ -137,7 +138,7 @@ func TestSetLevel(t *testing.T) {
// Change to Info level
logger.SetLevel(LevelInfo)
logger.Info("should appear now")
if !strings.Contains(buf.String(), "should appear now") {
if !core.Contains(buf.String(), "should appear now") {
t.Error("Info should appear after level change")
}
@ -147,7 +148,7 @@ func TestSetLevel(t *testing.T) {
}
}
func TestParseLevel(t *testing.T) {
func TestLogger_ParseLevel_Good(t *testing.T) {
tests := []struct {
input string
expected Level
@ -180,7 +181,7 @@ func TestParseLevel(t *testing.T) {
}
}
func TestGlobalLogger(t *testing.T) {
func TestLogger_GlobalLogger_Good(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
@ -190,7 +191,7 @@ func TestGlobalLogger(t *testing.T) {
SetGlobal(logger)
Info("global test")
if !strings.Contains(buf.String(), "global test") {
if !core.Contains(buf.String(), "global test") {
t.Error("Global logger should write message")
}
@ -205,7 +206,7 @@ func TestGlobalLogger(t *testing.T) {
SetGlobal(New(DefaultConfig()))
}
func TestLevelString(t *testing.T) {
func TestLogger_LevelString_Good(t *testing.T) {
tests := []struct {
level Level
expected string
@ -224,7 +225,7 @@ func TestLevelString(t *testing.T) {
}
}
func TestMergeFields(t *testing.T) {
func TestLogger_MergeFields_Good(t *testing.T) {
// Empty fields
result := mergeFields(nil)
if result != nil {

View file

@ -0,0 +1,44 @@
// SPDX-License-Identifier: EUPL-1.2
package node
import (
"io/fs"
"testing"
core "dappco.re/go/core"
"github.com/stretchr/testify/require"
)
func testJoinPath(parts ...string) string {
return core.JoinPath(parts...)
}
func testNodeManagerPaths(dir string) (string, string) {
return testJoinPath(dir, "private.key"), testJoinPath(dir, "node.json")
}
func testWriteFile(t *testing.T, path string, content []byte, mode fs.FileMode) {
t.Helper()
require.NoError(t, fsResultErr(localFS.WriteMode(path, string(content), mode)))
}
func testReadFile(t *testing.T, path string) []byte {
t.Helper()
content, err := fsRead(path)
require.NoError(t, err)
return []byte(content)
}
func testJSONMarshal(t *testing.T, v any) []byte {
t.Helper()
result := core.JSONMarshal(v)
require.True(t, result.OK, "marshal should succeed: %v", result.Value)
return result.Value.([]byte)
}
func testJSONUnmarshal(t *testing.T, data []byte, target any) {
t.Helper()
result := core.JSONUnmarshal(data, target)
require.True(t, result.OK, "unmarshal should succeed: %v", result.Value)
}

View file

@ -2,11 +2,10 @@ package node
import (
"encoding/base64"
"encoding/json"
"path/filepath"
"testing"
"time"
core "dappco.re/go/core"
"forge.lthn.ai/Snider/Borg/pkg/smsg"
)
@ -16,10 +15,7 @@ func BenchmarkIdentityGenerate(b *testing.B) {
b.ReportAllocs()
for b.Loop() {
dir := b.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
b.Fatalf("create node manager: %v", err)
}
@ -34,10 +30,10 @@ func BenchmarkDeriveSharedSecret(b *testing.B) {
dir1 := b.TempDir()
dir2 := b.TempDir()
nm1, _ := NewNodeManagerWithPaths(filepath.Join(dir1, "k"), filepath.Join(dir1, "n"))
nm1, _ := NewNodeManagerWithPaths(testJoinPath(dir1, "k"), testJoinPath(dir1, "n"))
nm1.GenerateIdentity("node1", RoleDual)
nm2, _ := NewNodeManagerWithPaths(filepath.Join(dir2, "k"), filepath.Join(dir2, "n"))
nm2, _ := NewNodeManagerWithPaths(testJoinPath(dir2, "k"), testJoinPath(dir2, "n"))
nm2.GenerateIdentity("node2", RoleDual)
peerPubKey := nm2.GetIdentity().PublicKey
@ -88,8 +84,8 @@ func BenchmarkMessageSerialise(b *testing.B) {
}
var restored Message
if err := json.Unmarshal(data, &restored); err != nil {
b.Fatalf("unmarshal message: %v", err)
if result := core.JSONUnmarshal(data, &restored); !result.OK {
b.Fatalf("unmarshal message: %v", result.Value)
}
}
}
@ -136,9 +132,8 @@ func BenchmarkMarshalJSON(b *testing.B) {
b.Run("Stdlib", func(b *testing.B) {
b.ReportAllocs()
for b.Loop() {
_, err := json.Marshal(data)
if err != nil {
b.Fatal(err)
if result := core.JSONMarshal(data); !result.OK {
b.Fatal(result.Value)
}
}
})
@ -150,10 +145,10 @@ func BenchmarkSMSGEncryptDecrypt(b *testing.B) {
dir1 := b.TempDir()
dir2 := b.TempDir()
nm1, _ := NewNodeManagerWithPaths(filepath.Join(dir1, "k"), filepath.Join(dir1, "n"))
nm1, _ := NewNodeManagerWithPaths(testJoinPath(dir1, "k"), testJoinPath(dir1, "n"))
nm1.GenerateIdentity("node1", RoleDual)
nm2, _ := NewNodeManagerWithPaths(filepath.Join(dir2, "k"), filepath.Join(dir2, "n"))
nm2, _ := NewNodeManagerWithPaths(testJoinPath(dir2, "k"), testJoinPath(dir2, "n"))
nm2.GenerateIdentity("node2", RoleDual)
sharedSecret, _ := nm1.DeriveSharedSecret(nm2.GetIdentity().PublicKey)
@ -202,7 +197,7 @@ func BenchmarkChallengeSignVerify(b *testing.B) {
// BenchmarkPeerScoring measures KD-tree rebuild and peer selection.
func BenchmarkPeerScoring(b *testing.B) {
dir := b.TempDir()
reg, err := NewPeerRegistryWithPath(filepath.Join(dir, "peers.json"))
reg, err := NewPeerRegistryWithPath(testJoinPath(dir, "peers.json"))
if err != nil {
b.Fatalf("create registry: %v", err)
}
@ -211,7 +206,7 @@ func BenchmarkPeerScoring(b *testing.B) {
// Add 50 peers with varied metrics
for i := range 50 {
peer := &Peer{
ID: filepath.Join("peer", string(rune('A'+i%26)), string(rune('0'+i/26))),
ID: testJoinPath("peer", string(rune('A'+i%26)), string(rune('0'+i/26))),
Name: "peer",
PingMS: float64(i*10 + 5),
Hops: i%5 + 1,

View file

@ -33,6 +33,8 @@ func putBuffer(buf *bytes.Buffer) {
// MarshalJSON encodes a value to JSON using Core's JSON primitive and then
// restores the historical no-EscapeHTML behaviour expected by the node package.
// Returns a copy of the encoded bytes (safe to use after the function returns).
//
// data, err := MarshalJSON(v)
func MarshalJSON(v any) ([]byte, error) {
encoded := core.JSONMarshal(v)
if !encoded.OK {

View file

@ -2,17 +2,17 @@ package node
import (
"bytes"
"encoding/json"
"sync"
"testing"
core "dappco.re/go/core"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// --- bufpool.go tests ---
func TestGetBuffer_ReturnsResetBuffer(t *testing.T) {
func TestBufpool_GetBuffer_ReturnsResetBuffer_Good(t *testing.T) {
t.Run("buffer is initially empty", func(t *testing.T) {
buf := getBuffer()
defer putBuffer(buf)
@ -33,7 +33,7 @@ func TestGetBuffer_ReturnsResetBuffer(t *testing.T) {
})
}
func TestPutBuffer_DiscardsOversizedBuffers(t *testing.T) {
func TestBufpool_PutBuffer_DiscardsOversizedBuffers_Good(t *testing.T) {
t.Run("buffer at 64KB limit is pooled", func(t *testing.T) {
buf := getBuffer()
buf.Grow(65536)
@ -59,7 +59,7 @@ func TestPutBuffer_DiscardsOversizedBuffers(t *testing.T) {
})
}
func TestBufPool_BufferIndependence(t *testing.T) {
func TestBufpool_BufPool_BufferIndependence_Good(t *testing.T) {
buf1 := getBuffer()
buf2 := getBuffer()
@ -77,7 +77,7 @@ func TestBufPool_BufferIndependence(t *testing.T) {
putBuffer(buf2)
}
func TestMarshalJSON_BasicTypes(t *testing.T) {
func TestBufpool_MarshalJSON_BasicTypes_Good(t *testing.T) {
tests := []struct {
name string
input any
@ -121,8 +121,7 @@ func TestMarshalJSON_BasicTypes(t *testing.T) {
got, err := MarshalJSON(tt.input)
require.NoError(t, err)
expected, err := json.Marshal(tt.input)
require.NoError(t, err)
expected := testJSONMarshal(t, tt.input)
assert.JSONEq(t, string(expected), string(got),
"MarshalJSON output should match json.Marshal")
@ -130,7 +129,7 @@ func TestMarshalJSON_BasicTypes(t *testing.T) {
}
}
func TestMarshalJSON_NoTrailingNewline(t *testing.T) {
func TestBufpool_MarshalJSON_NoTrailingNewline_Good(t *testing.T) {
data, err := MarshalJSON(map[string]string{"key": "value"})
require.NoError(t, err)
@ -138,7 +137,7 @@ func TestMarshalJSON_NoTrailingNewline(t *testing.T) {
"MarshalJSON should strip the trailing newline added by json.Encoder")
}
func TestMarshalJSON_HTMLEscaping(t *testing.T) {
func TestBufpool_MarshalJSON_HTMLEscaping_Good(t *testing.T) {
input := map[string]string{"html": "<script>alert('xss')</script>"}
data, err := MarshalJSON(input)
require.NoError(t, err)
@ -147,7 +146,7 @@ func TestMarshalJSON_HTMLEscaping(t *testing.T) {
"HTML characters should not be escaped when EscapeHTML is false")
}
func TestMarshalJSON_ReturnsCopy(t *testing.T) {
func TestBufpool_MarshalJSON_ReturnsCopy_Good(t *testing.T) {
data1, err := MarshalJSON("first")
require.NoError(t, err)
@ -162,7 +161,7 @@ func TestMarshalJSON_ReturnsCopy(t *testing.T) {
"returned slice should be a copy and not be mutated by subsequent calls")
}
func TestMarshalJSON_ReturnsIndependentCopy(t *testing.T) {
func TestBufpool_MarshalJSON_ReturnsIndependentCopy_Good(t *testing.T) {
data1, err := MarshalJSON(map[string]string{"first": "call"})
require.NoError(t, err)
@ -175,13 +174,13 @@ func TestMarshalJSON_ReturnsIndependentCopy(t *testing.T) {
"second result should contain its own data")
}
func TestMarshalJSON_InvalidValue(t *testing.T) {
func TestBufpool_MarshalJSON_InvalidValue_Bad(t *testing.T) {
ch := make(chan int)
_, err := MarshalJSON(ch)
assert.Error(t, err, "marshalling an unserialisable type should return an error")
}
func TestBufferPool_ConcurrentAccess(t *testing.T) {
func TestBufpool_BufferPool_ConcurrentAccess_Ugly(t *testing.T) {
const goroutines = 100
const iterations = 50
@ -206,7 +205,7 @@ func TestBufferPool_ConcurrentAccess(t *testing.T) {
wg.Wait()
}
func TestMarshalJSON_ConcurrentSafety(t *testing.T) {
func TestBufpool_MarshalJSON_ConcurrentSafety_Ugly(t *testing.T) {
const goroutines = 50
var wg sync.WaitGroup
@ -223,8 +222,8 @@ func TestMarshalJSON_ConcurrentSafety(t *testing.T) {
if err == nil {
var parsed PingPayload
err = json.Unmarshal(data, &parsed)
if err != nil {
if result := core.JSONUnmarshal(data, &parsed); !result.OK {
err = result.Value.(error)
errs[idx] = err
return
}
@ -242,7 +241,7 @@ func TestMarshalJSON_ConcurrentSafety(t *testing.T) {
}
}
func TestBufferPool_ReuseAfterReset(t *testing.T) {
func TestBufpool_BufferPool_ReuseAfterReset_Ugly(t *testing.T) {
buf := getBuffer()
buf.Write(make([]byte, 4096))
putBuffer(buf)

View file

@ -6,7 +6,7 @@ import (
"crypto/sha256"
"encoding/hex"
"io"
"os"
"io/fs"
core "dappco.re/go/core"
@ -15,15 +15,22 @@ import (
)
// BundleType defines the type of deployment bundle.
//
// bundleType := BundleProfile
type BundleType string
const (
BundleProfile BundleType = "profile" // Just config/profile JSON
BundleMiner BundleType = "miner" // Miner binary + config
BundleFull BundleType = "full" // Everything (miner + profiles + config)
// BundleProfile contains a profile JSON payload.
BundleProfile BundleType = "profile"
// BundleMiner contains a miner binary and optional profile data.
BundleMiner BundleType = "miner"
// BundleFull contains the full deployment payload.
BundleFull BundleType = "full"
)
// Bundle represents a deployment bundle for P2P transfer.
//
// bundle := &Bundle{Type: BundleProfile, Name: "xmrig", Data: []byte("{}")}
type Bundle struct {
Type BundleType `json:"type"`
Name string `json:"name"`
@ -32,6 +39,8 @@ type Bundle struct {
}
// BundleManifest describes the contents of a bundle.
//
// manifest := BundleManifest{Name: "xmrig", Type: BundleMiner}
type BundleManifest struct {
Type BundleType `json:"type"`
Name string `json:"name"`
@ -42,6 +51,8 @@ type BundleManifest struct {
}
// CreateProfileBundle creates an encrypted bundle containing a mining profile.
//
// bundle, err := CreateProfileBundle(profileJSON, "xmrig-default", "password")
func CreateProfileBundle(profileJSON []byte, name string, password string) (*Bundle, error) {
// Create a TIM with just the profile config
t, err := tim.New()
@ -68,6 +79,8 @@ func CreateProfileBundle(profileJSON []byte, name string, password string) (*Bun
}
// CreateProfileBundleUnencrypted creates a plain JSON bundle (for testing or trusted networks).
//
// bundle, err := CreateProfileBundleUnencrypted(profileJSON, "xmrig-default")
func CreateProfileBundleUnencrypted(profileJSON []byte, name string) (*Bundle, error) {
checksum := calculateChecksum(profileJSON)
@ -80,6 +93,8 @@ func CreateProfileBundleUnencrypted(profileJSON []byte, name string) (*Bundle, e
}
// CreateMinerBundle creates an encrypted bundle containing a miner binary and optional profile.
//
// bundle, err := CreateMinerBundle("/srv/miners/xmrig", profileJSON, "xmrig", "password")
func CreateMinerBundle(minerPath string, profileJSON []byte, name string, password string) (*Bundle, error) {
// Read miner binary
minerContent, err := fsRead(minerPath)
@ -130,6 +145,8 @@ func CreateMinerBundle(minerPath string, profileJSON []byte, name string, passwo
}
// ExtractProfileBundle decrypts and extracts a profile bundle.
//
// profileJSON, err := ExtractProfileBundle(bundle, "password")
func ExtractProfileBundle(bundle *Bundle, password string) ([]byte, error) {
// Verify checksum first
if calculateChecksum(bundle.Data) != bundle.Checksum {
@ -151,6 +168,8 @@ func ExtractProfileBundle(bundle *Bundle, password string) ([]byte, error) {
}
// ExtractMinerBundle decrypts and extracts a miner bundle, returning the miner path and profile.
//
// minerPath, profileJSON, err := ExtractMinerBundle(bundle, "password", "/srv/miners")
func ExtractMinerBundle(bundle *Bundle, password string, destDir string) (string, []byte, error) {
// Verify checksum
if calculateChecksum(bundle.Data) != bundle.Checksum {
@ -179,6 +198,8 @@ func ExtractMinerBundle(bundle *Bundle, password string, destDir string) (string
}
// VerifyBundle checks if a bundle's checksum is valid.
//
// ok := VerifyBundle(bundle)
func VerifyBundle(bundle *Bundle) bool {
return calculateChecksum(bundle.Data) == bundle.Checksum
}
@ -251,14 +272,18 @@ func createTarball(files map[string][]byte) ([]byte, error) {
func extractTarball(tarData []byte, destDir string) (string, error) {
// Ensure destDir is an absolute, clean path for security checks
absDestDir := destDir
pathSeparator := core.Env("DS")
if pathSeparator == "" {
pathSeparator = "/"
}
if !core.PathIsAbs(absDestDir) {
cwd, err := os.Getwd()
if err != nil {
return "", core.E("extractTarball", "failed to resolve destination directory", err)
cwd := core.Env("DIR_CWD")
if cwd == "" {
return "", core.E("extractTarball", "failed to resolve destination directory", nil)
}
absDestDir = core.CleanPath(core.Concat(cwd, string(os.PathSeparator), absDestDir), string(os.PathSeparator))
absDestDir = core.CleanPath(core.Concat(cwd, pathSeparator, absDestDir), pathSeparator)
} else {
absDestDir = core.CleanPath(absDestDir, string(os.PathSeparator))
absDestDir = core.CleanPath(absDestDir, pathSeparator)
}
if err := fsEnsureDir(absDestDir); err != nil {
@ -291,11 +316,11 @@ func extractTarball(tarData []byte, destDir string) (string, error) {
}
// Build the full path and verify it's within destDir
fullPath := core.CleanPath(core.Concat(absDestDir, string(os.PathSeparator), cleanName), string(os.PathSeparator))
fullPath := core.CleanPath(core.Concat(absDestDir, pathSeparator, cleanName), pathSeparator)
// Final security check: ensure the path is still within destDir
allowedPrefix := core.Concat(absDestDir, string(os.PathSeparator))
if absDestDir == string(os.PathSeparator) {
allowedPrefix := core.Concat(absDestDir, pathSeparator)
if absDestDir == pathSeparator {
allowedPrefix = absDestDir
}
if !core.HasPrefix(fullPath, allowedPrefix) && fullPath != absDestDir {
@ -313,26 +338,20 @@ func extractTarball(tarData []byte, destDir string) (string, error) {
return "", err
}
// os.OpenFile is used deliberately here instead of core.Fs.Create/Write
// because the helper writes with fixed default permissions and we need to preserve
// the tar header's mode bits — executable binaries require 0755.
f, err := os.OpenFile(fullPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(hdr.Mode))
if err != nil {
return "", core.E("extractTarball", "failed to create file "+hdr.Name, err)
}
// Limit file size to prevent decompression bombs (100MB max per file)
const maxFileSize int64 = 100 * 1024 * 1024
limitedReader := io.LimitReader(tr, maxFileSize+1)
written, err := io.Copy(f, limitedReader)
f.Close()
content, err := io.ReadAll(limitedReader)
if err != nil {
return "", core.E("extractTarball", "failed to write file "+hdr.Name, err)
}
if written > maxFileSize {
if int64(len(content)) > maxFileSize {
fsDelete(fullPath)
return "", core.E("extractTarball", "file "+hdr.Name+" exceeds maximum size", nil)
}
if err := fsResultErr(localFS.WriteMode(fullPath, string(content), fs.FileMode(hdr.Mode))); err != nil {
return "", core.E("extractTarball", "failed to create file "+hdr.Name, err)
}
// Track first executable
if hdr.Mode&0111 != 0 && firstExecutable == "" {
@ -349,6 +368,8 @@ func extractTarball(tarData []byte, destDir string) (string, error) {
}
// StreamBundle writes a bundle to a writer (for large transfers).
//
// err := StreamBundle(bundle, writer)
func StreamBundle(bundle *Bundle, w io.Writer) error {
result := core.JSONMarshal(bundle)
if !result.OK {
@ -359,6 +380,8 @@ func StreamBundle(bundle *Bundle, w io.Writer) error {
}
// ReadBundle reads a bundle from a reader.
//
// bundle, err := ReadBundle(reader)
func ReadBundle(r io.Reader) (*Bundle, error) {
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {

View file

@ -3,12 +3,10 @@ package node
import (
"archive/tar"
"bytes"
"os"
"path/filepath"
"testing"
)
func TestCreateProfileBundleUnencrypted(t *testing.T) {
func TestBundle_CreateProfileBundleUnencrypted_Good(t *testing.T) {
profileJSON := []byte(`{"name":"test-profile","minerType":"xmrig","config":{}}`)
bundle, err := CreateProfileBundleUnencrypted(profileJSON, "test-profile")
@ -33,7 +31,7 @@ func TestCreateProfileBundleUnencrypted(t *testing.T) {
}
}
func TestVerifyBundle(t *testing.T) {
func TestBundle_VerifyBundle_Good(t *testing.T) {
t.Run("ValidChecksum", func(t *testing.T) {
bundle, _ := CreateProfileBundleUnencrypted([]byte(`{"test":"data"}`), "test")
@ -61,7 +59,7 @@ func TestVerifyBundle(t *testing.T) {
})
}
func TestCreateProfileBundle(t *testing.T) {
func TestBundle_CreateProfileBundle_Good(t *testing.T) {
profileJSON := []byte(`{"name":"encrypted-profile","minerType":"xmrig"}`)
password := "test-password-123"
@ -90,7 +88,7 @@ func TestCreateProfileBundle(t *testing.T) {
}
}
func TestExtractProfileBundle(t *testing.T) {
func TestBundle_ExtractProfileBundle_Good(t *testing.T) {
t.Run("UnencryptedBundle", func(t *testing.T) {
originalJSON := []byte(`{"name":"plain","config":{}}`)
bundle, _ := CreateProfileBundleUnencrypted(originalJSON, "plain")
@ -142,7 +140,7 @@ func TestExtractProfileBundle(t *testing.T) {
})
}
func TestTarballFunctions(t *testing.T) {
func TestBundle_TarballFunctions_Good(t *testing.T) {
t.Run("CreateAndExtractTarball", func(t *testing.T) {
files := map[string][]byte{
"file1.txt": []byte("content of file 1"),
@ -160,8 +158,7 @@ func TestTarballFunctions(t *testing.T) {
}
// Extract to temp directory
tmpDir, _ := os.MkdirTemp("", "tarball-test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
firstExec, err := extractTarball(tarData, tmpDir)
if err != nil {
@ -170,12 +167,7 @@ func TestTarballFunctions(t *testing.T) {
// Check files exist
for name, content := range files {
path := filepath.Join(tmpDir, name)
data, err := os.ReadFile(path)
if err != nil {
t.Errorf("failed to read extracted file %s: %v", name, err)
continue
}
data := testReadFile(t, testJoinPath(tmpDir, name))
if !bytes.Equal(data, content) {
t.Errorf("content mismatch for %s", name)
@ -189,7 +181,7 @@ func TestTarballFunctions(t *testing.T) {
})
}
func TestStreamAndReadBundle(t *testing.T) {
func TestBundle_StreamAndReadBundle_Good(t *testing.T) {
original, _ := CreateProfileBundleUnencrypted([]byte(`{"streaming":"test"}`), "stream-test")
// Stream to buffer
@ -218,7 +210,7 @@ func TestStreamAndReadBundle(t *testing.T) {
}
}
func TestCalculateChecksum(t *testing.T) {
func TestBundle_CalculateChecksum_Good(t *testing.T) {
t.Run("Deterministic", func(t *testing.T) {
data := []byte("test data for checksum")
@ -256,7 +248,7 @@ func TestCalculateChecksum(t *testing.T) {
})
}
func TestIsJSON(t *testing.T) {
func TestBundle_IsJSON_Good(t *testing.T) {
tests := []struct {
data []byte
expected bool
@ -279,7 +271,7 @@ func TestIsJSON(t *testing.T) {
}
}
func TestBundleTypes(t *testing.T) {
func TestBundle_Types_Good(t *testing.T) {
types := []BundleType{
BundleProfile,
BundleMiner,
@ -295,16 +287,11 @@ func TestBundleTypes(t *testing.T) {
}
}
func TestCreateMinerBundle(t *testing.T) {
func TestBundle_CreateMinerBundle_Good(t *testing.T) {
// Create a temp "miner binary"
tmpDir, _ := os.MkdirTemp("", "miner-bundle-test")
defer os.RemoveAll(tmpDir)
minerPath := filepath.Join(tmpDir, "test-miner")
err := os.WriteFile(minerPath, []byte("fake miner binary content"), 0755)
if err != nil {
t.Fatalf("failed to create test miner: %v", err)
}
tmpDir := t.TempDir()
minerPath := testJoinPath(tmpDir, "test-miner")
testWriteFile(t, minerPath, []byte("fake miner binary content"), 0o755)
profileJSON := []byte(`{"profile":"data"}`)
password := "miner-password"
@ -323,8 +310,7 @@ func TestCreateMinerBundle(t *testing.T) {
}
// Extract and verify
extractDir, _ := os.MkdirTemp("", "miner-extract-test")
defer os.RemoveAll(extractDir)
extractDir := t.TempDir()
extractedPath, extractedProfile, err := ExtractMinerBundle(bundle, password, extractDir)
if err != nil {
@ -341,10 +327,7 @@ func TestCreateMinerBundle(t *testing.T) {
// If we got an extracted path, verify its content
if extractedPath != "" {
minerData, err := os.ReadFile(extractedPath)
if err != nil {
t.Fatalf("failed to read extracted miner: %v", err)
}
minerData := testReadFile(t, extractedPath)
if string(minerData) != "fake miner binary content" {
t.Error("miner content mismatch")
@ -354,7 +337,7 @@ func TestCreateMinerBundle(t *testing.T) {
// --- Additional coverage tests for bundle.go ---
func TestExtractTarball_PathTraversal(t *testing.T) {
func TestBundle_ExtractTarball_PathTraversal_Bad(t *testing.T) {
t.Run("AbsolutePath", func(t *testing.T) {
// Create a tarball with an absolute path entry
tarData, err := createTarballWithCustomName("/etc/passwd", []byte("malicious"))
@ -446,8 +429,8 @@ func TestExtractTarball_PathTraversal(t *testing.T) {
}
// Verify symlink was not created
linkPath := filepath.Join(tmpDir, "link")
if _, statErr := os.Lstat(linkPath); !os.IsNotExist(statErr) {
linkPath := testJoinPath(tmpDir, "link")
if fsExists(linkPath) {
t.Error("symlink should not be created")
}
})
@ -481,10 +464,7 @@ func TestExtractTarball_PathTraversal(t *testing.T) {
}
// Verify directory and file exist
data, err := os.ReadFile(filepath.Join(tmpDir, "mydir", "file.txt"))
if err != nil {
t.Fatalf("failed to read extracted file: %v", err)
}
data := testReadFile(t, testJoinPath(tmpDir, "mydir", "file.txt"))
if !bytes.Equal(data, content) {
t.Error("content mismatch")
}
@ -531,7 +511,7 @@ func createTarballWithSymlink(name, target string) ([]byte, error) {
return buf.Bytes(), nil
}
func TestExtractMinerBundle_ChecksumMismatch(t *testing.T) {
func TestBundle_ExtractMinerBundle_ChecksumMismatch_Bad(t *testing.T) {
bundle := &Bundle{
Type: BundleMiner,
Name: "bad-bundle",
@ -545,17 +525,17 @@ func TestExtractMinerBundle_ChecksumMismatch(t *testing.T) {
}
}
func TestCreateMinerBundle_NonExistentFile(t *testing.T) {
func TestBundle_CreateMinerBundle_NonExistentFile_Bad(t *testing.T) {
_, err := CreateMinerBundle("/non/existent/miner", nil, "test", "password")
if err == nil {
t.Error("expected error for non-existent miner file")
}
}
func TestCreateMinerBundle_NilProfile(t *testing.T) {
func TestBundle_CreateMinerBundle_NilProfile_Ugly(t *testing.T) {
tmpDir := t.TempDir()
minerPath := filepath.Join(tmpDir, "miner")
os.WriteFile(minerPath, []byte("binary"), 0755)
minerPath := testJoinPath(tmpDir, "miner")
testWriteFile(t, minerPath, []byte("binary"), 0o755)
bundle, err := CreateMinerBundle(minerPath, nil, "nil-profile", "pass")
if err != nil {
@ -566,7 +546,7 @@ func TestCreateMinerBundle_NilProfile(t *testing.T) {
}
}
func TestReadBundle_InvalidJSON(t *testing.T) {
func TestBundle_ReadBundle_InvalidJSON_Bad(t *testing.T) {
reader := bytes.NewReader([]byte("not json"))
_, err := ReadBundle(reader)
if err == nil {
@ -574,7 +554,7 @@ func TestReadBundle_InvalidJSON(t *testing.T) {
}
}
func TestStreamBundle_EmptyBundle(t *testing.T) {
func TestBundle_StreamBundle_EmptyBundle_Ugly(t *testing.T) {
bundle := &Bundle{
Type: BundleProfile,
Name: "empty",
@ -598,7 +578,7 @@ func TestStreamBundle_EmptyBundle(t *testing.T) {
}
}
func TestCreateTarball_MultipleDirs(t *testing.T) {
func TestBundle_CreateTarball_MultipleDirs_Good(t *testing.T) {
files := map[string][]byte{
"dir1/file1.txt": []byte("content1"),
"dir2/file2.txt": []byte("content2"),
@ -616,11 +596,7 @@ func TestCreateTarball_MultipleDirs(t *testing.T) {
}
for name, content := range files {
data, err := os.ReadFile(filepath.Join(tmpDir, name))
if err != nil {
t.Errorf("failed to read %s: %v", name, err)
continue
}
data := testReadFile(t, testJoinPath(tmpDir, name))
if !bytes.Equal(data, content) {
t.Errorf("content mismatch for %s", name)
}

View file

@ -11,6 +11,8 @@ import (
)
// Controller manages remote peer operations from a controller node.
//
// controller := NewController(nodeManager, peerRegistry, transport)
type Controller struct {
node *NodeManager
peers *PeerRegistry
@ -22,6 +24,8 @@ type Controller struct {
}
// NewController creates a new Controller instance.
//
// controller := NewController(nodeManager, peerRegistry, transport)
func NewController(node *NodeManager, peers *PeerRegistry, transport *Transport) *Controller {
c := &Controller{
node: node,

View file

@ -1,17 +1,15 @@
package node
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"path/filepath"
"sync"
"sync/atomic"
"testing"
"time"
core "dappco.re/go/core"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -75,7 +73,7 @@ func makeWorkerServer(t *testing.T) (*NodeManager, string, *Transport) {
// --- Controller Tests ---
func TestController_RequestResponseCorrelation(t *testing.T) {
func TestController_RequestResponseCorrelation_Good(t *testing.T) {
controller, _, tp := setupControllerPair(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -86,7 +84,7 @@ func TestController_RequestResponseCorrelation(t *testing.T) {
assert.Greater(t, rtt, 0.0, "RTT should be positive")
}
func TestController_RequestTimeout(t *testing.T) {
func TestController_RequestTimeout_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
// Register a handler on the server that deliberately ignores all messages,
@ -117,7 +115,7 @@ func TestController_RequestTimeout(t *testing.T) {
assert.Less(t, elapsed, 1*time.Second, "should return quickly after the deadline")
}
func TestController_AutoConnect(t *testing.T) {
func TestController_AutoConnect_Good(t *testing.T) {
tp := setupTestTransportPair(t)
// Register worker on the server side.
@ -149,7 +147,7 @@ func TestController_AutoConnect(t *testing.T) {
assert.Equal(t, 1, tp.Client.ConnectedPeers(), "should have 1 connection after auto-connect")
}
func TestController_GetAllStats(t *testing.T) {
func TestController_GetAllStats_Good(t *testing.T) {
// Controller node with connections to two independent worker servers.
controllerNM := testNode(t, "controller", RoleController)
controllerReg := testRegistry(t)
@ -194,7 +192,7 @@ func TestController_GetAllStats(t *testing.T) {
}
}
func TestController_PingPeerRTT(t *testing.T) {
func TestController_PingPeerRTT_Good(t *testing.T) {
controller, _, tp := setupControllerPair(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -217,7 +215,7 @@ func TestController_PingPeerRTT(t *testing.T) {
assert.Greater(t, peerAfter.PingMS, 0.0, "PingMS should be positive")
}
func TestController_ConcurrentRequests(t *testing.T) {
func TestController_ConcurrentRequests_Ugly(t *testing.T) {
// Multiple goroutines send pings to different peers simultaneously.
// Verify correct correlation — no cross-talk between responses.
controllerNM := testNode(t, "controller", RoleController)
@ -271,7 +269,7 @@ func TestController_ConcurrentRequests(t *testing.T) {
}
}
func TestController_DeadPeerCleanup(t *testing.T) {
func TestController_DeadPeerCleanup_Good(t *testing.T) {
tp := setupTestTransportPair(t)
// Server deliberately ignores all messages.
@ -307,7 +305,7 @@ func TestController_DeadPeerCleanup(t *testing.T) {
// --- Additional edge-case tests ---
func TestController_MultipleSequentialPings(t *testing.T) {
func TestController_MultipleSequentialPings_Good(t *testing.T) {
// Ensures sequential requests to the same peer are correctly correlated.
controller, _, tp := setupControllerPair(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -319,7 +317,7 @@ func TestController_MultipleSequentialPings(t *testing.T) {
}
}
func TestController_ConcurrentRequestsSamePeer(t *testing.T) {
func TestController_ConcurrentRequestsSamePeer_Ugly(t *testing.T) {
// Multiple goroutines sending requests to the SAME peer simultaneously.
// Tests concurrent pending-map insertions/deletions under contention.
controller, _, tp := setupControllerPair(t)
@ -343,7 +341,7 @@ func TestController_ConcurrentRequestsSamePeer(t *testing.T) {
"all concurrent requests to the same peer should succeed")
}
func TestController_GetRemoteStats(t *testing.T) {
func TestController_GetRemoteStats_Good(t *testing.T) {
controller, _, tp := setupControllerPair(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -357,7 +355,7 @@ func TestController_GetRemoteStats(t *testing.T) {
assert.GreaterOrEqual(t, stats.Uptime, int64(0), "uptime should be non-negative")
}
func TestController_ConnectToPeerUnknown(t *testing.T) {
func TestController_ConnectToPeerUnknown_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
@ -366,7 +364,7 @@ func TestController_ConnectToPeerUnknown(t *testing.T) {
assert.Contains(t, err.Error(), "not found")
}
func TestController_DisconnectFromPeer(t *testing.T) {
func TestController_DisconnectFromPeer_Good(t *testing.T) {
controller, _, tp := setupControllerPair(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -376,7 +374,7 @@ func TestController_DisconnectFromPeer(t *testing.T) {
require.NoError(t, err, "DisconnectFromPeer should succeed")
}
func TestController_DisconnectFromPeerNotConnected(t *testing.T) {
func TestController_DisconnectFromPeerNotConnected_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
@ -385,7 +383,7 @@ func TestController_DisconnectFromPeerNotConnected(t *testing.T) {
assert.Contains(t, err.Error(), "not connected")
}
func TestController_SendRequestPeerNotFound(t *testing.T) {
func TestController_SendRequestPeerNotFound_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
@ -475,7 +473,7 @@ func (m *mockMinerManagerFull) StopMiner(name string) error {
defer m.mu.Unlock()
if _, exists := m.miners[name]; !exists {
return fmt.Errorf("miner %s not found", name)
return core.E("mockMinerManagerFull.StopMiner", "miner "+name+" not found", nil)
}
delete(m.miners, name)
return nil
@ -498,7 +496,7 @@ func (m *mockMinerManagerFull) GetMiner(name string) (MinerInstance, error) {
miner, exists := m.miners[name]
if !exists {
return nil, fmt.Errorf("miner %s not found", name)
return nil, core.E("mockMinerManagerFull.GetMiner", "miner "+name+" not found", nil)
}
return miner, nil
}
@ -521,25 +519,25 @@ func (m *mockMinerFull) GetConsoleHistory(lines int) []string {
return m.consoleHistory[:lines]
}
func TestController_StartRemoteMiner(t *testing.T) {
func TestController_StartRemoteMiner_Good(t *testing.T) {
controller, _, tp := setupControllerPairWithMiner(t)
serverID := tp.ServerNode.GetIdentity().ID
configOverride := json.RawMessage(`{"pool":"pool.example.com:3333"}`)
configOverride := RawMessage(`{"pool":"pool.example.com:3333"}`)
err := controller.StartRemoteMiner(serverID, "xmrig", "profile-1", configOverride)
require.NoError(t, err, "StartRemoteMiner should succeed")
}
func TestController_StartRemoteMiner_WithConfig(t *testing.T) {
func TestController_StartRemoteMiner_WithConfig_Good(t *testing.T) {
controller, _, tp := setupControllerPairWithMiner(t)
serverID := tp.ServerNode.GetIdentity().ID
configOverride := json.RawMessage(`{"pool":"custom-pool:3333","threads":4}`)
configOverride := RawMessage(`{"pool":"custom-pool:3333","threads":4}`)
err := controller.StartRemoteMiner(serverID, "xmrig", "", configOverride)
require.NoError(t, err, "StartRemoteMiner with config override should succeed")
}
func TestController_StartRemoteMiner_EmptyType(t *testing.T) {
func TestController_StartRemoteMiner_EmptyType_Bad(t *testing.T) {
controller, _, tp := setupControllerPairWithMiner(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -548,14 +546,12 @@ func TestController_StartRemoteMiner_EmptyType(t *testing.T) {
assert.Contains(t, err.Error(), "miner type is required")
}
func TestController_StartRemoteMiner_NoIdentity(t *testing.T) {
func TestController_StartRemoteMiner_NoIdentity_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
// Create a node without identity
nmNoID, err := NewNodeManagerWithPaths(
filepath.Join(t.TempDir(), "priv.key"),
filepath.Join(t.TempDir(), "node.json"),
)
keyPath, configPath := testNodeManagerPaths(t.TempDir())
nmNoID, err := NewNodeManagerWithPaths(keyPath, configPath)
require.NoError(t, err)
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
@ -565,7 +561,7 @@ func TestController_StartRemoteMiner_NoIdentity(t *testing.T) {
assert.Contains(t, err.Error(), "identity not initialized")
}
func TestController_StopRemoteMiner(t *testing.T) {
func TestController_StopRemoteMiner_Good(t *testing.T) {
controller, _, tp := setupControllerPairWithMiner(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -573,7 +569,7 @@ func TestController_StopRemoteMiner(t *testing.T) {
require.NoError(t, err, "StopRemoteMiner should succeed for existing miner")
}
func TestController_StopRemoteMiner_NotFound(t *testing.T) {
func TestController_StopRemoteMiner_NotFound_Bad(t *testing.T) {
controller, _, tp := setupControllerPairWithMiner(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -581,12 +577,10 @@ func TestController_StopRemoteMiner_NotFound(t *testing.T) {
require.Error(t, err, "StopRemoteMiner should fail for non-existent miner")
}
func TestController_StopRemoteMiner_NoIdentity(t *testing.T) {
func TestController_StopRemoteMiner_NoIdentity_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
nmNoID, err := NewNodeManagerWithPaths(
filepath.Join(t.TempDir(), "priv.key"),
filepath.Join(t.TempDir(), "node.json"),
)
keyPath, configPath := testNodeManagerPaths(t.TempDir())
nmNoID, err := NewNodeManagerWithPaths(keyPath, configPath)
require.NoError(t, err)
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
@ -596,7 +590,7 @@ func TestController_StopRemoteMiner_NoIdentity(t *testing.T) {
assert.Contains(t, err.Error(), "identity not initialized")
}
func TestController_GetRemoteLogs(t *testing.T) {
func TestController_GetRemoteLogs_Good(t *testing.T) {
controller, _, tp := setupControllerPairWithMiner(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -607,7 +601,7 @@ func TestController_GetRemoteLogs(t *testing.T) {
assert.Contains(t, lines[0], "started")
}
func TestController_GetRemoteLogs_LimitedLines(t *testing.T) {
func TestController_GetRemoteLogs_LimitedLines_Good(t *testing.T) {
controller, _, tp := setupControllerPairWithMiner(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -616,12 +610,10 @@ func TestController_GetRemoteLogs_LimitedLines(t *testing.T) {
assert.Len(t, lines, 1, "should return only 1 line")
}
func TestController_GetRemoteLogs_NoIdentity(t *testing.T) {
func TestController_GetRemoteLogs_NoIdentity_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
nmNoID, err := NewNodeManagerWithPaths(
filepath.Join(t.TempDir(), "priv.key"),
filepath.Join(t.TempDir(), "node.json"),
)
keyPath, configPath := testNodeManagerPaths(t.TempDir())
nmNoID, err := NewNodeManagerWithPaths(keyPath, configPath)
require.NoError(t, err)
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
@ -631,7 +623,7 @@ func TestController_GetRemoteLogs_NoIdentity(t *testing.T) {
assert.Contains(t, err.Error(), "identity not initialized")
}
func TestController_GetRemoteStats_WithMiners(t *testing.T) {
func TestController_GetRemoteStats_WithMiners_Good(t *testing.T) {
controller, _, tp := setupControllerPairWithMiner(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -645,12 +637,10 @@ func TestController_GetRemoteStats_WithMiners(t *testing.T) {
assert.Equal(t, 1234.5, stats.Miners[0].Hashrate)
}
func TestController_GetRemoteStats_NoIdentity(t *testing.T) {
func TestController_GetRemoteStats_NoIdentity_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
nmNoID, err := NewNodeManagerWithPaths(
filepath.Join(t.TempDir(), "priv.key"),
filepath.Join(t.TempDir(), "node.json"),
)
keyPath, configPath := testNodeManagerPaths(t.TempDir())
nmNoID, err := NewNodeManagerWithPaths(keyPath, configPath)
require.NoError(t, err)
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
@ -660,7 +650,7 @@ func TestController_GetRemoteStats_NoIdentity(t *testing.T) {
assert.Contains(t, err.Error(), "identity not initialized")
}
func TestController_ConnectToPeer_Success(t *testing.T) {
func TestController_ConnectToPeer_Success_Good(t *testing.T) {
tp := setupTestTransportPair(t)
worker := NewWorker(tp.ServerNode, tp.Server)
@ -684,7 +674,7 @@ func TestController_ConnectToPeer_Success(t *testing.T) {
assert.Equal(t, 1, tp.Client.ConnectedPeers(), "should have 1 connection after ConnectToPeer")
}
func TestController_HandleResponse_NonReply(t *testing.T) {
func TestController_HandleResponse_NonReply_Good(t *testing.T) {
tp := setupTestTransportPair(t)
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
@ -699,7 +689,7 @@ func TestController_HandleResponse_NonReply(t *testing.T) {
assert.Equal(t, 0, count)
}
func TestController_HandleResponse_FullChannel(t *testing.T) {
func TestController_HandleResponse_FullChannel_Ugly(t *testing.T) {
tp := setupTestTransportPair(t)
controller := NewController(tp.ClientNode, tp.ClientReg, tp.Client)
@ -723,12 +713,10 @@ func TestController_HandleResponse_FullChannel(t *testing.T) {
assert.False(t, exists, "pending entry should be removed after handling")
}
func TestController_PingPeer_NoIdentity(t *testing.T) {
func TestController_PingPeer_NoIdentity_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
nmNoID, _ := NewNodeManagerWithPaths(
filepath.Join(t.TempDir(), "priv.key"),
filepath.Join(t.TempDir(), "node.json"),
)
keyPath, configPath := testNodeManagerPaths(t.TempDir())
nmNoID, _ := NewNodeManagerWithPaths(keyPath, configPath)
controller := NewController(nmNoID, tp.ClientReg, tp.Client)
_, err := controller.PingPeer("some-peer")

View file

@ -28,19 +28,27 @@ const (
// IntentHandler processes a UEPS packet that has been routed by intent.
// Implementations receive the fully parsed and HMAC-verified packet.
//
// var handler IntentHandler = func(pkt *ueps.ParsedPacket) error { return nil }
type IntentHandler func(pkt *ueps.ParsedPacket) error
// Dispatcher routes verified UEPS packets to registered intent handlers.
// It enforces a threat circuit breaker before routing: any packet whose
// ThreatScore exceeds ThreatScoreThreshold is dropped and logged.
//
// dispatcher := NewDispatcher()
//
// Design decisions:
//
// - Handlers are registered per IntentID (1:1 mapping).
//
// - Unknown intents are logged at WARN level and silently dropped (no error
// returned to the caller) to avoid back-pressure on the transport layer.
//
// - High-threat packets are dropped silently (logged at WARN) rather than
// returning an error, consistent with the "don't even parse the payload"
// philosophy from the original stub.
//
// - The dispatcher is safe for concurrent use; a RWMutex protects the
// handler map.
type Dispatcher struct {
@ -50,6 +58,8 @@ type Dispatcher struct {
}
// NewDispatcher creates a Dispatcher with no registered handlers.
//
// dispatcher := NewDispatcher()
func NewDispatcher() *Dispatcher {
return &Dispatcher{
handlers: make(map[byte]IntentHandler),

View file

@ -1,11 +1,11 @@
package node
import (
"fmt"
"sync"
"sync/atomic"
"testing"
core "dappco.re/go/core"
"dappco.re/go/core/p2p/ueps"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -28,7 +28,7 @@ func makePacket(intentID byte, threatScore uint16, payload []byte) *ueps.ParsedP
// --- Dispatcher Tests ---
func TestDispatcher_RegisterAndDispatch(t *testing.T) {
func TestDispatcher_RegisterAndDispatch_Good(t *testing.T) {
t.Run("handler receives the correct packet", func(t *testing.T) {
d := NewDispatcher()
var received *ueps.ParsedPacket
@ -49,7 +49,7 @@ func TestDispatcher_RegisterAndDispatch(t *testing.T) {
t.Run("handler error propagates to caller", func(t *testing.T) {
d := NewDispatcher()
handlerErr := fmt.Errorf("compute failed")
handlerErr := core.NewError("compute failed")
d.RegisterHandler(IntentCompute, func(pkt *ueps.ParsedPacket) error {
return handlerErr
@ -62,7 +62,7 @@ func TestDispatcher_RegisterAndDispatch(t *testing.T) {
})
}
func TestDispatcher_ThreatCircuitBreaker(t *testing.T) {
func TestDispatcher_ThreatCircuitBreaker_Good(t *testing.T) {
tests := []struct {
name string
threatScore uint16
@ -118,7 +118,7 @@ func TestDispatcher_ThreatCircuitBreaker(t *testing.T) {
}
}
func TestDispatcher_UnknownIntentDropped(t *testing.T) {
func TestDispatcher_UnknownIntentDropped_Bad(t *testing.T) {
d := NewDispatcher()
// Register handlers for known intents only
@ -133,7 +133,7 @@ func TestDispatcher_UnknownIntentDropped(t *testing.T) {
assert.ErrorIs(t, err, ErrUnknownIntent)
}
func TestDispatcher_MultipleHandlersCorrectRouting(t *testing.T) {
func TestDispatcher_MultipleHandlersCorrectRouting_Good(t *testing.T) {
d := NewDispatcher()
var handshakeCalled, computeCalled, rehabCalled, customCalled bool
@ -192,7 +192,7 @@ func TestDispatcher_MultipleHandlersCorrectRouting(t *testing.T) {
}
}
func TestDispatcher_NilAndEmptyPayload(t *testing.T) {
func TestDispatcher_NilAndEmptyPayload_Ugly(t *testing.T) {
t.Run("nil packet returns ErrNilPacket", func(t *testing.T) {
d := NewDispatcher()
err := d.Dispatch(nil)
@ -234,7 +234,7 @@ func TestDispatcher_NilAndEmptyPayload(t *testing.T) {
})
}
func TestDispatcher_ConcurrentDispatchSafety(t *testing.T) {
func TestDispatcher_ConcurrentDispatchSafety_Ugly(t *testing.T) {
d := NewDispatcher()
var count atomic.Int64
@ -261,7 +261,7 @@ func TestDispatcher_ConcurrentDispatchSafety(t *testing.T) {
assert.Equal(t, int64(goroutines), count.Load())
}
func TestDispatcher_ConcurrentRegisterAndDispatch(t *testing.T) {
func TestDispatcher_ConcurrentRegisterAndDispatch_Ugly(t *testing.T) {
d := NewDispatcher()
var count atomic.Int64
@ -301,7 +301,7 @@ func TestDispatcher_ConcurrentRegisterAndDispatch(t *testing.T) {
assert.True(t, count.Load() >= 0)
}
func TestDispatcher_ReplaceHandler(t *testing.T) {
func TestDispatcher_ReplaceHandler_Good(t *testing.T) {
d := NewDispatcher()
var firstCalled, secondCalled bool
@ -325,7 +325,7 @@ func TestDispatcher_ReplaceHandler(t *testing.T) {
assert.True(t, secondCalled, "replacement handler should be called")
}
func TestDispatcher_ThreatBlocksBeforeRouting(t *testing.T) {
func TestDispatcher_ThreatBlocksBeforeRouting_Good(t *testing.T) {
// Verify that the circuit breaker fires before intent routing,
// so even an unknown intent returns ErrThreatScoreExceeded (not ErrUnknownIntent).
d := NewDispatcher()
@ -337,7 +337,7 @@ func TestDispatcher_ThreatBlocksBeforeRouting(t *testing.T) {
"threat circuit breaker should fire before intent routing")
}
func TestDispatcher_IntentConstants(t *testing.T) {
func TestDispatcher_IntentConstants_Good(t *testing.T) {
// Verify the well-known intent IDs match the spec (RFC-021).
assert.Equal(t, byte(0x01), IntentHandshake)
assert.Equal(t, byte(0x20), IntentCompute)

View file

@ -20,6 +20,8 @@ import (
const ChallengeSize = 32
// GenerateChallenge creates a random challenge for authentication.
//
// challenge, err := GenerateChallenge()
func GenerateChallenge() ([]byte, error) {
challenge := make([]byte, ChallengeSize)
if _, err := rand.Read(challenge); err != nil {
@ -30,6 +32,8 @@ func GenerateChallenge() ([]byte, error) {
// SignChallenge creates an HMAC signature of a challenge using a shared secret.
// The signature proves possession of the shared secret without revealing it.
//
// signature := SignChallenge(challenge, sharedSecret)
func SignChallenge(challenge []byte, sharedSecret []byte) []byte {
mac := hmac.New(sha256.New, sharedSecret)
mac.Write(challenge)
@ -37,12 +41,16 @@ func SignChallenge(challenge []byte, sharedSecret []byte) []byte {
}
// VerifyChallenge verifies that a challenge response was signed with the correct shared secret.
//
// ok := VerifyChallenge(challenge, signature, sharedSecret)
func VerifyChallenge(challenge, response, sharedSecret []byte) bool {
expected := SignChallenge(challenge, sharedSecret)
return hmac.Equal(response, expected)
}
// NodeRole defines the operational mode of a node.
//
// role := RoleWorker
type NodeRole string
const (
@ -55,6 +63,8 @@ const (
)
// NodeIdentity represents the public identity of a node.
//
// identity := NodeIdentity{Name: "worker-1", Role: RoleWorker}
type NodeIdentity struct {
ID string `json:"id"` // Derived from public key (first 16 bytes hex)
Name string `json:"name"` // Human-friendly name
@ -64,6 +74,8 @@ type NodeIdentity struct {
}
// NodeManager handles node identity operations including key generation and storage.
//
// nodeManager, err := NewNodeManager()
type NodeManager struct {
identity *NodeIdentity
privateKey []byte // Never serialized to JSON
@ -74,6 +86,8 @@ type NodeManager struct {
}
// NewNodeManager creates a new NodeManager, loading existing identity if available.
//
// nodeManager, err := NewNodeManager()
func NewNodeManager() (*NodeManager, error) {
keyPath, err := xdg.DataFile("lethean-desktop/node/private.key")
if err != nil {
@ -90,6 +104,8 @@ func NewNodeManager() (*NodeManager, error) {
// NewNodeManagerWithPaths creates a NodeManager with custom paths.
// This is primarily useful for testing to avoid xdg path caching issues.
//
// nodeManager, err := NewNodeManagerWithPaths("/srv/p2p/private.key", "/srv/p2p/node.json")
func NewNodeManagerWithPaths(keyPath, configPath string) (*NodeManager, error) {
nm := &NodeManager{
keyPath: keyPath,

View file

@ -1,35 +1,21 @@
package node
import (
"os"
"path/filepath"
"testing"
)
// setupTestNodeManager creates a NodeManager with paths in a temp directory.
func setupTestNodeManager(t *testing.T) (*NodeManager, func()) {
tmpDir, err := os.MkdirTemp("", "node-identity-test")
tmpDir := t.TempDir()
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir))
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
keyPath := filepath.Join(tmpDir, "private.key")
configPath := filepath.Join(tmpDir, "node.json")
nm, err := NewNodeManagerWithPaths(keyPath, configPath)
if err != nil {
os.RemoveAll(tmpDir)
t.Fatalf("failed to create node manager: %v", err)
}
cleanup := func() {
os.RemoveAll(tmpDir)
}
return nm, cleanup
return nm, func() {}
}
func TestNodeIdentity(t *testing.T) {
func TestIdentity_NodeIdentity_Good(t *testing.T) {
t.Run("NewNodeManager", func(t *testing.T) {
nm, cleanup := setupTestNodeManager(t)
defer cleanup()
@ -75,14 +61,8 @@ func TestNodeIdentity(t *testing.T) {
})
t.Run("LoadExistingIdentity", func(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "node-load-test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
keyPath := filepath.Join(tmpDir, "private.key")
configPath := filepath.Join(tmpDir, "node.json")
tmpDir := t.TempDir()
keyPath, configPath := testNodeManagerPaths(tmpDir)
// First, create an identity
nm1, err := NewNodeManagerWithPaths(keyPath, configPath)
@ -120,16 +100,11 @@ func TestNodeIdentity(t *testing.T) {
t.Run("DeriveSharedSecret", func(t *testing.T) {
// Create two node managers with separate temp directories
tmpDir1, _ := os.MkdirTemp("", "node1")
tmpDir2, _ := os.MkdirTemp("", "node2")
defer os.RemoveAll(tmpDir1)
defer os.RemoveAll(tmpDir2)
tmpDir1 := t.TempDir()
tmpDir2 := t.TempDir()
// Node 1
nm1, err := NewNodeManagerWithPaths(
filepath.Join(tmpDir1, "private.key"),
filepath.Join(tmpDir1, "node.json"),
)
nm1, err := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir1))
if err != nil {
t.Fatalf("failed to create node manager 1: %v", err)
}
@ -139,10 +114,7 @@ func TestNodeIdentity(t *testing.T) {
}
// Node 2
nm2, err := NewNodeManagerWithPaths(
filepath.Join(tmpDir2, "private.key"),
filepath.Join(tmpDir2, "node.json"),
)
nm2, err := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir2))
if err != nil {
t.Fatalf("failed to create node manager 2: %v", err)
}
@ -198,7 +170,7 @@ func TestNodeIdentity(t *testing.T) {
})
}
func TestNodeRoles(t *testing.T) {
func TestIdentity_NodeRoles_Good(t *testing.T) {
tests := []struct {
role NodeRole
expected string
@ -217,7 +189,7 @@ func TestNodeRoles(t *testing.T) {
}
}
func TestChallengeResponse(t *testing.T) {
func TestIdentity_ChallengeResponse_Good(t *testing.T) {
t.Run("GenerateChallenge", func(t *testing.T) {
challenge, err := GenerateChallenge()
if err != nil {
@ -315,21 +287,13 @@ func TestChallengeResponse(t *testing.T) {
t.Run("IntegrationWithSharedSecret", func(t *testing.T) {
// Create two nodes and test end-to-end challenge-response
tmpDir1, _ := os.MkdirTemp("", "node-challenge-1")
tmpDir2, _ := os.MkdirTemp("", "node-challenge-2")
defer os.RemoveAll(tmpDir1)
defer os.RemoveAll(tmpDir2)
tmpDir1 := t.TempDir()
tmpDir2 := t.TempDir()
nm1, _ := NewNodeManagerWithPaths(
filepath.Join(tmpDir1, "private.key"),
filepath.Join(tmpDir1, "node.json"),
)
nm1, _ := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir1))
nm1.GenerateIdentity("challenger", RoleDual)
nm2, _ := NewNodeManagerWithPaths(
filepath.Join(tmpDir2, "private.key"),
filepath.Join(tmpDir2, "node.json"),
)
nm2, _ := NewNodeManagerWithPaths(testNodeManagerPaths(tmpDir2))
nm2.GenerateIdentity("responder", RoleDual)
// Challenger generates challenge
@ -352,7 +316,7 @@ func TestChallengeResponse(t *testing.T) {
})
}
func TestNodeManager_DeriveSharedSecret_NoIdentity(t *testing.T) {
func TestIdentity_NodeManager_DeriveSharedSecret_NoIdentity_Bad(t *testing.T) {
nm, cleanup := setupTestNodeManager(t)
defer cleanup()
@ -363,7 +327,7 @@ func TestNodeManager_DeriveSharedSecret_NoIdentity(t *testing.T) {
}
}
func TestNodeManager_GetIdentity_NilWhenNoIdentity(t *testing.T) {
func TestIdentity_NodeManager_GetIdentity_NilWhenNoIdentity_Bad(t *testing.T) {
nm, cleanup := setupTestNodeManager(t)
defer cleanup()
@ -373,11 +337,11 @@ func TestNodeManager_GetIdentity_NilWhenNoIdentity(t *testing.T) {
}
}
func TestNodeManager_Delete_NoFiles(t *testing.T) {
func TestIdentity_NodeManager_Delete_NoFiles_Bad(t *testing.T) {
tmpDir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(tmpDir, "nonexistent.key"),
filepath.Join(tmpDir, "nonexistent.json"),
testJoinPath(tmpDir, "nonexistent.key"),
testJoinPath(tmpDir, "nonexistent.json"),
)
if err != nil {
t.Fatalf("failed to create node manager: %v", err)

View file

@ -3,11 +3,9 @@ package node
import (
"bufio"
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"net/url"
"path/filepath"
"sync"
"sync/atomic"
"testing"
@ -29,7 +27,7 @@ import (
// 5. Graceful shutdown with disconnect messages
// ============================================================================
func TestIntegration_FullNodeLifecycle(t *testing.T) {
func TestIntegration_FullNodeLifecycle_Good(t *testing.T) {
// ----------------------------------------------------------------
// Step 1: Identity creation
// ----------------------------------------------------------------
@ -240,7 +238,7 @@ func TestIntegration_FullNodeLifecycle(t *testing.T) {
// TestIntegration_SharedSecretAgreement verifies that two independently created
// nodes derive the same shared secret via ECDH.
func TestIntegration_SharedSecretAgreement(t *testing.T) {
func TestIntegration_SharedSecretAgreement_Good(t *testing.T) {
nodeA := testNode(t, "secret-node-a", RoleDual)
nodeB := testNode(t, "secret-node-b", RoleDual)
@ -260,7 +258,7 @@ func TestIntegration_SharedSecretAgreement(t *testing.T) {
// TestIntegration_TwoNodeBidirectionalMessages verifies that both nodes
// can send and receive encrypted messages after the handshake.
func TestIntegration_TwoNodeBidirectionalMessages(t *testing.T) {
func TestIntegration_TwoNodeBidirectionalMessages_Good(t *testing.T) {
controller, _, tp := setupControllerPair(t)
serverID := tp.ServerNode.GetIdentity().ID
@ -285,7 +283,7 @@ func TestIntegration_TwoNodeBidirectionalMessages(t *testing.T) {
// TestIntegration_MultiPeerTopology verifies that a controller can
// simultaneously communicate with multiple workers.
func TestIntegration_MultiPeerTopology(t *testing.T) {
func TestIntegration_MultiPeerTopology_Good(t *testing.T) {
controllerNM := testNode(t, "multi-controller", RoleController)
controllerReg := testRegistry(t)
controllerTransport := NewTransport(controllerNM, controllerReg, DefaultTransportConfig())
@ -343,10 +341,9 @@ func TestIntegration_MultiPeerTopology(t *testing.T) {
// TestIntegration_IdentityPersistenceAndReload verifies that a node identity
// can be generated, persisted, and reloaded from disk.
func TestIntegration_IdentityPersistenceAndReload(t *testing.T) {
func TestIntegration_IdentityPersistenceAndReload_Good(t *testing.T) {
dir := t.TempDir()
keyPath := filepath.Join(dir, "private.key")
configPath := filepath.Join(dir, "node.json")
keyPath, configPath := testNodeManagerPaths(dir)
// Create and persist identity.
nm1, err := NewNodeManagerWithPaths(keyPath, configPath)
@ -386,10 +383,7 @@ func TestIntegration_IdentityPersistenceAndReload(t *testing.T) {
// stmfGenerateKeyPair is a helper that generates a keypair and returns
// the public key as base64 (for use in DeriveSharedSecret tests).
func stmfGenerateKeyPair(dir string) (string, error) {
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
return "", err
}
@ -399,10 +393,9 @@ func stmfGenerateKeyPair(dir string) (string, error) {
return nm.GetIdentity().PublicKey, nil
}
// TestIntegration_UEPSFullRoundTrip exercises a complete UEPS packet
// lifecycle: build, sign, transmit (simulated), read, verify, dispatch.
func TestIntegration_UEPSFullRoundTrip(t *testing.T) {
func TestIntegration_UEPSFullRoundTrip_Ugly(t *testing.T) {
nodeA := testNode(t, "ueps-node-a", RoleController)
nodeB := testNode(t, "ueps-node-b", RoleWorker)
@ -453,7 +446,7 @@ func TestIntegration_UEPSFullRoundTrip(t *testing.T) {
// TestIntegration_UEPSIntegrityFailure verifies that a tampered UEPS packet
// is rejected by HMAC verification.
func TestIntegration_UEPSIntegrityFailure(t *testing.T) {
func TestIntegration_UEPSIntegrityFailure_Bad(t *testing.T) {
nodeA := testNode(t, "integrity-a", RoleController)
nodeB := testNode(t, "integrity-b", RoleWorker)
@ -484,7 +477,7 @@ func TestIntegration_UEPSIntegrityFailure(t *testing.T) {
// TestIntegration_AllowlistHandshakeRejection verifies that a peer not in the
// allowlist is rejected during the WebSocket handshake.
func TestIntegration_AllowlistHandshakeRejection(t *testing.T) {
func TestIntegration_AllowlistHandshakeRejection_Bad(t *testing.T) {
workerNM := testNode(t, "allowlist-worker", RoleWorker)
workerReg := testRegistry(t)
workerReg.SetAuthMode(PeerAuthAllowlist)
@ -521,7 +514,7 @@ func TestIntegration_AllowlistHandshakeRejection(t *testing.T) {
// TestIntegration_AllowlistHandshakeAccepted verifies that an allowlisted
// peer can connect successfully.
func TestIntegration_AllowlistHandshakeAccepted(t *testing.T) {
func TestIntegration_AllowlistHandshakeAccepted_Good(t *testing.T) {
workerNM := testNode(t, "allowlist-worker-ok", RoleWorker)
workerReg := testRegistry(t)
workerReg.SetAuthMode(PeerAuthAllowlist)
@ -563,7 +556,7 @@ func TestIntegration_AllowlistHandshakeAccepted(t *testing.T) {
// TestIntegration_DispatcherWithRealUEPSPackets builds real UEPS packets
// from wire bytes and routes them through the dispatcher.
func TestIntegration_DispatcherWithRealUEPSPackets(t *testing.T) {
func TestIntegration_DispatcherWithRealUEPSPackets_Good(t *testing.T) {
sharedSecret := make([]byte, 32)
for i := range sharedSecret {
sharedSecret[i] = byte(i ^ 0x42)
@ -614,7 +607,7 @@ func TestIntegration_DispatcherWithRealUEPSPackets(t *testing.T) {
// TestIntegration_MessageSerialiseDeserialise verifies that messages survive
// the full serialisation/encryption/decryption/deserialisation pipeline
// with all fields intact.
func TestIntegration_MessageSerialiseDeserialise(t *testing.T) {
func TestIntegration_MessageSerialiseDeserialise_Good(t *testing.T) {
tp := setupTestTransportPair(t)
pc := tp.connectClient(t)
@ -653,14 +646,14 @@ func TestIntegration_MessageSerialiseDeserialise(t *testing.T) {
assert.Equal(t, original.ReplyTo, decrypted.ReplyTo)
var originalStats, decryptedStats StatsPayload
require.NoError(t, json.Unmarshal(original.Payload, &originalStats))
require.NoError(t, json.Unmarshal(decrypted.Payload, &decryptedStats))
testJSONUnmarshal(t, original.Payload, &originalStats)
testJSONUnmarshal(t, decrypted.Payload, &decryptedStats)
assert.Equal(t, originalStats, decryptedStats)
}
// TestIntegration_GetRemoteStats_EndToEnd tests the full stats retrieval flow
// across a real WebSocket connection.
func TestIntegration_GetRemoteStats_EndToEnd(t *testing.T) {
func TestIntegration_GetRemoteStats_EndToEnd_Good(t *testing.T) {
tp := setupTestTransportPair(t)
worker := NewWorker(tp.ServerNode, tp.Server)

View file

@ -28,6 +28,8 @@ const (
// Connection wraps a net.Conn and provides framed Levin packet I/O.
// All writes are serialised by an internal mutex, making it safe to call
// WritePacket and WriteResponse concurrently from multiple goroutines.
//
// connection := NewConnection(conn)
type Connection struct {
// MaxPayloadSize is the upper bound accepted for incoming payloads.
// Defaults to the package-level MaxPayloadSize (100 MB).
@ -44,6 +46,8 @@ type Connection struct {
}
// NewConnection creates a Connection that wraps conn with sensible defaults.
//
// connection := NewConnection(conn)
func NewConnection(conn net.Conn) *Connection {
return &Connection{
MaxPayloadSize: MaxPayloadSize,

View file

@ -12,7 +12,7 @@ import (
"github.com/stretchr/testify/require"
)
func TestConnection_RoundTrip(t *testing.T) {
func TestConnection_RoundTrip_Ugly(t *testing.T) {
a, b := net.Pipe()
defer a.Close()
defer b.Close()
@ -41,7 +41,7 @@ func TestConnection_RoundTrip(t *testing.T) {
assert.Equal(t, payload, data)
}
func TestConnection_EmptyPayload(t *testing.T) {
func TestConnection_EmptyPayload_Ugly(t *testing.T) {
a, b := net.Pipe()
defer a.Close()
defer b.Close()
@ -64,7 +64,7 @@ func TestConnection_EmptyPayload(t *testing.T) {
assert.Nil(t, data)
}
func TestConnection_Response(t *testing.T) {
func TestConnection_Response_Good(t *testing.T) {
a, b := net.Pipe()
defer a.Close()
defer b.Close()
@ -91,7 +91,7 @@ func TestConnection_Response(t *testing.T) {
assert.Equal(t, payload, data)
}
func TestConnection_PayloadTooBig(t *testing.T) {
func TestConnection_PayloadTooBig_Bad(t *testing.T) {
a, b := net.Pipe()
defer a.Close()
defer b.Close()
@ -125,7 +125,7 @@ func TestConnection_PayloadTooBig(t *testing.T) {
require.NoError(t, <-errCh)
}
func TestConnection_ReadTimeout(t *testing.T) {
func TestConnection_ReadTimeout_Bad(t *testing.T) {
a, b := net.Pipe()
defer a.Close()
defer b.Close()
@ -143,7 +143,7 @@ func TestConnection_ReadTimeout(t *testing.T) {
assert.True(t, netErr.Timeout(), "expected timeout error")
}
func TestConnection_RemoteAddr(t *testing.T) {
func TestConnection_RemoteAddr_Good(t *testing.T) {
a, b := net.Pipe()
defer a.Close()
defer b.Close()
@ -153,7 +153,7 @@ func TestConnection_RemoteAddr(t *testing.T) {
assert.NotEmpty(t, addr)
}
func TestConnection_Close(t *testing.T) {
func TestConnection_Close_Ugly(t *testing.T) {
a, b := net.Pipe()
defer b.Close()

View file

@ -48,6 +48,8 @@ var (
)
// Header is the 33-byte packed header that prefixes every Levin message.
//
// header := Header{Command: CommandHandshake, ExpectResponse: true}
type Header struct {
Signature uint64
PayloadSize uint64
@ -59,6 +61,8 @@ type Header struct {
}
// EncodeHeader serialises h into a fixed-size 33-byte array (little-endian).
//
// encoded := EncodeHeader(header)
func EncodeHeader(h *Header) [HeaderSize]byte {
var buf [HeaderSize]byte
binary.LittleEndian.PutUint64(buf[0:8], h.Signature)
@ -77,6 +81,8 @@ func EncodeHeader(h *Header) [HeaderSize]byte {
// DecodeHeader deserialises a 33-byte array into a Header, validating
// the magic signature.
//
// header, err := DecodeHeader(buf)
func DecodeHeader(buf [HeaderSize]byte) (Header, error) {
var h Header
h.Signature = binary.LittleEndian.Uint64(buf[0:8])

View file

@ -11,11 +11,11 @@ import (
"github.com/stretchr/testify/require"
)
func TestHeaderSizeIs33(t *testing.T) {
func TestHeader_SizeIs33_Good(t *testing.T) {
assert.Equal(t, 33, HeaderSize)
}
func TestEncodeHeader_KnownValues(t *testing.T) {
func TestHeader_EncodeHeader_KnownValues_Good(t *testing.T) {
h := &Header{
Signature: Signature,
PayloadSize: 256,
@ -56,7 +56,7 @@ func TestEncodeHeader_KnownValues(t *testing.T) {
assert.Equal(t, uint32(0), pv)
}
func TestEncodeHeader_ExpectResponseFalse(t *testing.T) {
func TestHeader_EncodeHeader_ExpectResponseFalse_Good(t *testing.T) {
h := &Header{
Signature: Signature,
PayloadSize: 42,
@ -68,7 +68,7 @@ func TestEncodeHeader_ExpectResponseFalse(t *testing.T) {
assert.Equal(t, byte(0x00), buf[16])
}
func TestEncodeHeader_NegativeReturnCode(t *testing.T) {
func TestHeader_EncodeHeader_NegativeReturnCode_Good(t *testing.T) {
h := &Header{
Signature: Signature,
PayloadSize: 0,
@ -81,7 +81,7 @@ func TestEncodeHeader_NegativeReturnCode(t *testing.T) {
assert.Equal(t, ReturnErrFormat, rc)
}
func TestDecodeHeader_RoundTrip(t *testing.T) {
func TestHeader_DecodeHeader_RoundTrip_Ugly(t *testing.T) {
original := &Header{
Signature: Signature,
PayloadSize: 1024,
@ -105,7 +105,7 @@ func TestDecodeHeader_RoundTrip(t *testing.T) {
assert.Equal(t, original.ProtocolVersion, decoded.ProtocolVersion)
}
func TestDecodeHeader_AllCommands(t *testing.T) {
func TestHeader_DecodeHeader_AllCommands_Good(t *testing.T) {
commands := []uint32{
CommandHandshake,
CommandTimedSync,
@ -131,7 +131,7 @@ func TestDecodeHeader_AllCommands(t *testing.T) {
}
}
func TestDecodeHeader_BadSignature(t *testing.T) {
func TestHeader_DecodeHeader_BadSignature_Bad(t *testing.T) {
h := &Header{
Signature: 0xDEADBEEF,
PayloadSize: 0,
@ -143,7 +143,7 @@ func TestDecodeHeader_BadSignature(t *testing.T) {
assert.ErrorIs(t, err, ErrBadSignature)
}
func TestDecodeHeader_PayloadTooBig(t *testing.T) {
func TestHeader_DecodeHeader_PayloadTooBig_Bad(t *testing.T) {
h := &Header{
Signature: Signature,
PayloadSize: MaxPayloadSize + 1,
@ -155,7 +155,7 @@ func TestDecodeHeader_PayloadTooBig(t *testing.T) {
assert.ErrorIs(t, err, ErrPayloadTooBig)
}
func TestDecodeHeader_MaxPayloadExact(t *testing.T) {
func TestHeader_DecodeHeader_MaxPayloadExact_Ugly(t *testing.T) {
h := &Header{
Signature: Signature,
PayloadSize: MaxPayloadSize,

View file

@ -50,10 +50,14 @@ var (
// Section is an ordered map of named values forming a portable storage section.
// Field iteration order is always alphabetical by key for deterministic encoding.
//
// section := Section{"id": StringVal([]byte("peer-1"))}
type Section map[string]Value
// Value holds a typed portable storage value. Use the constructor functions
// (Uint64Val, StringVal, ObjectVal, etc.) to create instances.
//
// value := StringVal([]byte("peer-1"))
type Value struct {
Type uint8
@ -77,39 +81,63 @@ type Value struct {
// ---------------------------------------------------------------------------
// Uint64Val creates a Value of TypeUint64.
//
// value := Uint64Val(42)
func Uint64Val(v uint64) Value { return Value{Type: TypeUint64, uintVal: v} }
// Uint32Val creates a Value of TypeUint32.
//
// value := Uint32Val(42)
func Uint32Val(v uint32) Value { return Value{Type: TypeUint32, uintVal: uint64(v)} }
// Uint16Val creates a Value of TypeUint16.
//
// value := Uint16Val(42)
func Uint16Val(v uint16) Value { return Value{Type: TypeUint16, uintVal: uint64(v)} }
// Uint8Val creates a Value of TypeUint8.
//
// value := Uint8Val(42)
func Uint8Val(v uint8) Value { return Value{Type: TypeUint8, uintVal: uint64(v)} }
// Int64Val creates a Value of TypeInt64.
//
// value := Int64Val(42)
func Int64Val(v int64) Value { return Value{Type: TypeInt64, intVal: v} }
// Int32Val creates a Value of TypeInt32.
//
// value := Int32Val(42)
func Int32Val(v int32) Value { return Value{Type: TypeInt32, intVal: int64(v)} }
// Int16Val creates a Value of TypeInt16.
//
// value := Int16Val(42)
func Int16Val(v int16) Value { return Value{Type: TypeInt16, intVal: int64(v)} }
// Int8Val creates a Value of TypeInt8.
//
// value := Int8Val(42)
func Int8Val(v int8) Value { return Value{Type: TypeInt8, intVal: int64(v)} }
// BoolVal creates a Value of TypeBool.
//
// value := BoolVal(true)
func BoolVal(v bool) Value { return Value{Type: TypeBool, boolVal: v} }
// DoubleVal creates a Value of TypeDouble.
//
// value := DoubleVal(3.14)
func DoubleVal(v float64) Value { return Value{Type: TypeDouble, floatVal: v} }
// StringVal creates a Value of TypeString. The slice is not copied.
//
// value := StringVal([]byte("hello"))
func StringVal(v []byte) Value { return Value{Type: TypeString, bytesVal: v} }
// ObjectVal creates a Value of TypeObject wrapping a nested Section.
//
// value := ObjectVal(Section{"id": StringVal([]byte("peer-1"))})
func ObjectVal(s Section) Value { return Value{Type: TypeObject, objectVal: s} }
// ---------------------------------------------------------------------------
@ -117,21 +145,29 @@ func ObjectVal(s Section) Value { return Value{Type: TypeObject, objectVal: s} }
// ---------------------------------------------------------------------------
// Uint64ArrayVal creates a typed array of uint64 values.
//
// value := Uint64ArrayVal([]uint64{1, 2, 3})
func Uint64ArrayVal(vs []uint64) Value {
return Value{Type: ArrayFlag | TypeUint64, uint64Array: vs}
}
// Uint32ArrayVal creates a typed array of uint32 values.
//
// value := Uint32ArrayVal([]uint32{1, 2, 3})
func Uint32ArrayVal(vs []uint32) Value {
return Value{Type: ArrayFlag | TypeUint32, uint32Array: vs}
}
// StringArrayVal creates a typed array of byte-string values.
//
// value := StringArrayVal([][]byte{[]byte("a"), []byte("b")})
func StringArrayVal(vs [][]byte) Value {
return Value{Type: ArrayFlag | TypeString, stringArray: vs}
}
// ObjectArrayVal creates a typed array of Section values.
//
// value := ObjectArrayVal([]Section{{"id": StringVal([]byte("peer-1"))}})
func ObjectArrayVal(vs []Section) Value {
return Value{Type: ArrayFlag | TypeObject, objectArray: vs}
}
@ -279,6 +315,8 @@ func (v Value) AsSectionArray() ([]Section, error) {
// EncodeStorage serialises a Section to the portable storage binary format,
// including the 9-byte header. Keys are sorted alphabetically to ensure
// deterministic output.
//
// data, err := EncodeStorage(section)
func EncodeStorage(s Section) ([]byte, error) {
buf := make([]byte, 0, 256)
@ -450,6 +488,8 @@ func encodeArray(buf []byte, v Value) ([]byte, error) {
// DecodeStorage deserialises portable storage binary data (including the
// 9-byte header) into a Section.
//
// section, err := DecodeStorage(data)
func DecodeStorage(data []byte) (Section, error) {
if len(data) < StorageHeaderSize {
return nil, ErrStorageTruncated

View file

@ -10,7 +10,7 @@ import (
"github.com/stretchr/testify/require"
)
func TestEncodeStorage_EmptySection(t *testing.T) {
func TestStorage_EncodeStorage_EmptySection_Ugly(t *testing.T) {
s := Section{}
data, err := EncodeStorage(s)
require.NoError(t, err)
@ -35,7 +35,7 @@ func TestEncodeStorage_EmptySection(t *testing.T) {
assert.Equal(t, byte(0x00), data[9])
}
func TestStorage_PrimitivesRoundTrip(t *testing.T) {
func TestStorage_PrimitivesRoundTrip_Ugly(t *testing.T) {
s := Section{
"u64": Uint64Val(0xDEADBEEFCAFEBABE),
"u32": Uint32Val(0xCAFEBABE),
@ -106,7 +106,7 @@ func TestStorage_PrimitivesRoundTrip(t *testing.T) {
assert.Equal(t, 3.141592653589793, pi)
}
func TestStorage_NestedObject(t *testing.T) {
func TestStorage_NestedObject_Good(t *testing.T) {
inner := Section{
"port": Uint16Val(18080),
"host": StringVal([]byte("127.0.0.1")),
@ -138,7 +138,7 @@ func TestStorage_NestedObject(t *testing.T) {
assert.Equal(t, []byte("127.0.0.1"), host)
}
func TestStorage_Uint64Array(t *testing.T) {
func TestStorage_Uint64Array_Good(t *testing.T) {
s := Section{
"heights": Uint64ArrayVal([]uint64{10, 20, 30}),
}
@ -154,7 +154,7 @@ func TestStorage_Uint64Array(t *testing.T) {
assert.Equal(t, []uint64{10, 20, 30}, arr)
}
func TestStorage_StringArray(t *testing.T) {
func TestStorage_StringArray_Good(t *testing.T) {
s := Section{
"peers": StringArrayVal([][]byte{[]byte("foo"), []byte("bar")}),
}
@ -172,7 +172,7 @@ func TestStorage_StringArray(t *testing.T) {
assert.Equal(t, []byte("bar"), arr[1])
}
func TestStorage_ObjectArray(t *testing.T) {
func TestStorage_ObjectArray_Good(t *testing.T) {
sections := []Section{
{"id": Uint32Val(1), "name": StringVal([]byte("alice"))},
{"id": Uint32Val(2), "name": StringVal([]byte("bob"))},
@ -208,7 +208,7 @@ func TestStorage_ObjectArray(t *testing.T) {
assert.Equal(t, []byte("bob"), name2)
}
func TestDecodeStorage_BadSignature(t *testing.T) {
func TestStorage_DecodeStorage_BadSignature_Bad(t *testing.T) {
// Corrupt the first 4 bytes.
data := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x01, 0x02, 0x01, 0x01, 0x00}
_, err := DecodeStorage(data)
@ -216,16 +216,16 @@ func TestDecodeStorage_BadSignature(t *testing.T) {
assert.ErrorIs(t, err, ErrStorageBadSignature)
}
func TestDecodeStorage_TooShort(t *testing.T) {
func TestStorage_DecodeStorage_TooShort_Bad(t *testing.T) {
_, err := DecodeStorage([]byte{0x01, 0x11})
require.Error(t, err)
assert.ErrorIs(t, err, ErrStorageTruncated)
}
func TestStorage_ByteIdenticalReencode(t *testing.T) {
func TestStorage_ByteIdenticalReencode_Ugly(t *testing.T) {
s := Section{
"alpha": Uint64Val(999),
"bravo": StringVal([]byte("deterministic")),
"alpha": Uint64Val(999),
"bravo": StringVal([]byte("deterministic")),
"charlie": BoolVal(false),
"delta": ObjectVal(Section{
"x": Int32Val(-42),
@ -246,7 +246,7 @@ func TestStorage_ByteIdenticalReencode(t *testing.T) {
assert.Equal(t, data1, data2, "re-encoded bytes must be identical")
}
func TestStorage_TypeMismatchErrors(t *testing.T) {
func TestStorage_TypeMismatchErrors_Bad(t *testing.T) {
v := Uint64Val(42)
_, err := v.AsUint32()
@ -265,7 +265,7 @@ func TestStorage_TypeMismatchErrors(t *testing.T) {
assert.ErrorIs(t, err, ErrStorageTypeMismatch)
}
func TestStorage_Uint32Array(t *testing.T) {
func TestStorage_Uint32Array_Good(t *testing.T) {
s := Section{
"ports": Uint32ArrayVal([]uint32{8080, 8443, 9090}),
}
@ -281,7 +281,7 @@ func TestStorage_Uint32Array(t *testing.T) {
assert.Equal(t, []uint32{8080, 8443, 9090}, arr)
}
func TestDecodeStorage_BadVersion(t *testing.T) {
func TestStorage_DecodeStorage_BadVersion_Bad(t *testing.T) {
// Valid signatures but version 2 instead of 1.
data := []byte{0x01, 0x11, 0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x02, 0x00}
_, err := DecodeStorage(data)
@ -289,11 +289,11 @@ func TestDecodeStorage_BadVersion(t *testing.T) {
assert.ErrorIs(t, err, ErrStorageBadVersion)
}
func TestStorage_EmptyArrays(t *testing.T) {
func TestStorage_EmptyArrays_Ugly(t *testing.T) {
s := Section{
"empty_u64": Uint64ArrayVal([]uint64{}),
"empty_str": StringArrayVal([][]byte{}),
"empty_obj": ObjectArrayVal([]Section{}),
"empty_u64": Uint64ArrayVal([]uint64{}),
"empty_str": StringArrayVal([][]byte{}),
"empty_obj": ObjectArrayVal([]Section{}),
}
data, err := EncodeStorage(s)
@ -315,7 +315,7 @@ func TestStorage_EmptyArrays(t *testing.T) {
assert.Empty(t, objarr)
}
func TestStorage_BoolFalseRoundTrip(t *testing.T) {
func TestStorage_BoolFalseRoundTrip_Ugly(t *testing.T) {
s := Section{
"off": BoolVal(false),
"on": BoolVal(true),

View file

@ -31,6 +31,8 @@ var ErrVarintOverflow = core.E("levin", "varint overflow", nil)
// PackVarint encodes v using the epee portable-storage varint scheme.
// The low two bits of the first byte indicate the total encoded width;
// the remaining bits carry the value in little-endian order.
//
// encoded := PackVarint(42)
func PackVarint(v uint64) []byte {
switch {
case v <= varintMax1:
@ -55,6 +57,8 @@ func PackVarint(v uint64) []byte {
// UnpackVarint decodes one epee portable-storage varint from buf.
// It returns the decoded value, the number of bytes consumed, and any error.
//
// value, err := UnpackVarint(data)
func UnpackVarint(buf []byte) (value uint64, bytesConsumed int, err error) {
if len(buf) == 0 {
return 0, 0, ErrVarintTruncated

View file

@ -10,41 +10,41 @@ import (
"github.com/stretchr/testify/require"
)
func TestPackVarint_Value5(t *testing.T) {
func TestVarint_PackVarint_Value5_Good(t *testing.T) {
// 5 << 2 | 0x00 = 20 = 0x14
got := PackVarint(5)
assert.Equal(t, []byte{0x14}, got)
}
func TestPackVarint_Value100(t *testing.T) {
func TestVarint_PackVarint_Value100_Good(t *testing.T) {
// 100 << 2 | 0x01 = 401 = 0x0191 → LE [0x91, 0x01]
got := PackVarint(100)
assert.Equal(t, []byte{0x91, 0x01}, got)
}
func TestPackVarint_Value65536(t *testing.T) {
func TestVarint_PackVarint_Value65536_Good(t *testing.T) {
// 65536 << 2 | 0x02 = 262146 = 0x00040002 → LE [0x02, 0x00, 0x04, 0x00]
got := PackVarint(65536)
assert.Equal(t, []byte{0x02, 0x00, 0x04, 0x00}, got)
}
func TestPackVarint_Value2Billion(t *testing.T) {
func TestVarint_PackVarint_Value2Billion_Good(t *testing.T) {
got := PackVarint(2_000_000_000)
require.Len(t, got, 8)
// Low 2 bits must be 0x03 (8-byte mark).
assert.Equal(t, byte(0x03), got[0]&0x03)
}
func TestPackVarint_Zero(t *testing.T) {
func TestVarint_PackVarint_Zero_Ugly(t *testing.T) {
got := PackVarint(0)
assert.Equal(t, []byte{0x00}, got)
}
func TestPackVarint_Boundaries(t *testing.T) {
func TestVarint_PackVarint_Boundaries_Good(t *testing.T) {
tests := []struct {
name string
value uint64
wantLen int
name string
value uint64
wantLen int
}{
{"1-byte max (63)", 63, 1},
{"2-byte min (64)", 64, 2},
@ -63,7 +63,7 @@ func TestPackVarint_Boundaries(t *testing.T) {
}
}
func TestVarint_RoundTrip(t *testing.T) {
func TestVarint_RoundTrip_Ugly(t *testing.T) {
values := []uint64{
0, 1, 63, 64, 100, 16_383, 16_384,
1_073_741_823, 1_073_741_824,
@ -79,13 +79,13 @@ func TestVarint_RoundTrip(t *testing.T) {
}
}
func TestUnpackVarint_EmptyInput(t *testing.T) {
func TestVarint_UnpackVarint_EmptyInput_Ugly(t *testing.T) {
_, _, err := UnpackVarint([]byte{})
require.Error(t, err)
assert.ErrorIs(t, err, ErrVarintTruncated)
}
func TestUnpackVarint_Truncated2Byte(t *testing.T) {
func TestVarint_UnpackVarint_Truncated2Byte_Bad(t *testing.T) {
// Encode 64 (needs 2 bytes), then only pass 1 byte.
buf := PackVarint(64)
require.Len(t, buf, 2)
@ -94,7 +94,7 @@ func TestUnpackVarint_Truncated2Byte(t *testing.T) {
assert.ErrorIs(t, err, ErrVarintTruncated)
}
func TestUnpackVarint_Truncated4Byte(t *testing.T) {
func TestVarint_UnpackVarint_Truncated4Byte_Bad(t *testing.T) {
buf := PackVarint(16_384)
require.Len(t, buf, 4)
_, _, err := UnpackVarint(buf[:2])
@ -102,7 +102,7 @@ func TestUnpackVarint_Truncated4Byte(t *testing.T) {
assert.ErrorIs(t, err, ErrVarintTruncated)
}
func TestUnpackVarint_Truncated8Byte(t *testing.T) {
func TestVarint_UnpackVarint_Truncated8Byte_Bad(t *testing.T) {
buf := PackVarint(1_073_741_824)
require.Len(t, buf, 8)
_, _, err := UnpackVarint(buf[:4])
@ -110,7 +110,7 @@ func TestUnpackVarint_Truncated8Byte(t *testing.T) {
assert.ErrorIs(t, err, ErrVarintTruncated)
}
func TestUnpackVarint_ExtraBytes(t *testing.T) {
func TestVarint_UnpackVarint_ExtraBytes_Good(t *testing.T) {
// Ensure that extra trailing bytes are not consumed.
buf := append(PackVarint(42), 0xFF, 0xFF)
decoded, consumed, err := UnpackVarint(buf)
@ -119,7 +119,7 @@ func TestUnpackVarint_ExtraBytes(t *testing.T) {
assert.Equal(t, 1, consumed)
}
func TestPackVarint_SizeMarkBits(t *testing.T) {
func TestVarint_PackVarint_SizeMarkBits_Good(t *testing.T) {
tests := []struct {
name string
value uint64

View file

@ -1,7 +1,6 @@
package node
import (
"encoding/json"
"slices"
"time"
@ -19,17 +18,49 @@ const (
// SupportedProtocolVersions lists all protocol versions this node supports.
// Used for version negotiation during handshake.
//
// versions := SupportedProtocolVersions
var SupportedProtocolVersions = []string{"1.0"}
// RawMessage is the message payload byte slice used for deferred JSON decoding.
type RawMessage = json.RawMessage
// RawMessage stores an already-encoded JSON payload for deferred decoding.
//
// payload := RawMessage(`{"pool":"pool.example.com:3333"}`)
type RawMessage []byte
// MarshalJSON preserves the raw JSON payload when the message is encoded.
//
// data, err := RawMessage(`{"ok":true}`).MarshalJSON()
func (m RawMessage) MarshalJSON() ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
return m, nil
}
// UnmarshalJSON stores the raw JSON payload bytes without decoding them.
//
// var payload RawMessage
// _ = payload.UnmarshalJSON([]byte(`{"ok":true}`))
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return core.E("node.RawMessage.UnmarshalJSON", "raw message target is nil", nil)
}
*m = append((*m)[:0], data...)
return nil
}
// IsProtocolVersionSupported checks if a given version is supported.
//
// ok := IsProtocolVersionSupported("1.0")
func IsProtocolVersionSupported(version string) bool {
return slices.Contains(SupportedProtocolVersions, version)
}
// MessageType defines the type of P2P message.
//
// msgType := MsgPing
type MessageType string
const (
@ -60,6 +91,8 @@ const (
)
// Message represents a P2P message between nodes.
//
// msg, err := NewMessage(MsgPing, "controller", "worker", PingPayload{SentAt: time.Now().UnixMilli()})
type Message struct {
ID string `json:"id"` // UUID
Type MessageType `json:"type"`
@ -71,6 +104,8 @@ type Message struct {
}
// NewMessage creates a new message with a generated ID and timestamp.
//
// msg, err := NewMessage(MsgPing, "controller", "worker", PingPayload{SentAt: 42})
func NewMessage(msgType MessageType, from, to string, payload any) (*Message, error) {
var payloadBytes RawMessage
if payload != nil {
@ -78,7 +113,7 @@ func NewMessage(msgType MessageType, from, to string, payload any) (*Message, er
if err != nil {
return nil, err
}
payloadBytes = data
payloadBytes = RawMessage(data)
}
return &Message{
@ -116,6 +151,8 @@ func (m *Message) ParsePayload(v any) error {
// --- Payload Types ---
// HandshakePayload is sent during connection establishment.
//
// payload := HandshakePayload{Identity: NodeIdentity{Name: "worker-1"}, Version: ProtocolVersion}
type HandshakePayload struct {
Identity NodeIdentity `json:"identity"`
Challenge []byte `json:"challenge,omitempty"` // Random bytes for auth
@ -123,6 +160,8 @@ type HandshakePayload struct {
}
// HandshakeAckPayload is the response to a handshake.
//
// ack := HandshakeAckPayload{Accepted: true}
type HandshakeAckPayload struct {
Identity NodeIdentity `json:"identity"`
ChallengeResponse []byte `json:"challengeResponse,omitempty"`
@ -131,17 +170,23 @@ type HandshakeAckPayload struct {
}
// PingPayload for keepalive/latency measurement.
//
// payload := PingPayload{SentAt: 42}
type PingPayload struct {
SentAt int64 `json:"sentAt"` // Unix timestamp in milliseconds
}
// PongPayload response to ping.
//
// payload := PongPayload{SentAt: 42, ReceivedAt: 43}
type PongPayload struct {
SentAt int64 `json:"sentAt"` // Echo of ping's sentAt
ReceivedAt int64 `json:"receivedAt"` // When ping was received
}
// StartMinerPayload requests starting a miner.
//
// payload := StartMinerPayload{MinerType: "xmrig"}
type StartMinerPayload struct {
MinerType string `json:"minerType"` // Required: miner type (e.g., "xmrig", "tt-miner")
ProfileID string `json:"profileId,omitempty"`
@ -149,11 +194,15 @@ type StartMinerPayload struct {
}
// StopMinerPayload requests stopping a miner.
//
// payload := StopMinerPayload{MinerName: "xmrig-0"}
type StopMinerPayload struct {
MinerName string `json:"minerName"`
}
// MinerAckPayload acknowledges a miner start/stop operation.
//
// ack := MinerAckPayload{Success: true, MinerName: "xmrig-0"}
type MinerAckPayload struct {
Success bool `json:"success"`
MinerName string `json:"minerName,omitempty"`
@ -161,6 +210,8 @@ type MinerAckPayload struct {
}
// MinerStatsItem represents stats for a single miner.
//
// miner := MinerStatsItem{Name: "xmrig-0", Hashrate: 1200}
type MinerStatsItem struct {
Name string `json:"name"`
Type string `json:"type"`
@ -174,6 +225,8 @@ type MinerStatsItem struct {
}
// StatsPayload contains miner statistics.
//
// stats := StatsPayload{NodeID: "worker-1"}
type StatsPayload struct {
NodeID string `json:"nodeId"`
NodeName string `json:"nodeName"`
@ -182,6 +235,8 @@ type StatsPayload struct {
}
// GetLogsPayload requests console logs from a miner.
//
// payload := GetLogsPayload{MinerName: "xmrig-0", Lines: 100}
type GetLogsPayload struct {
MinerName string `json:"minerName"`
Lines int `json:"lines"` // Number of lines to fetch
@ -189,6 +244,8 @@ type GetLogsPayload struct {
}
// LogsPayload contains console log lines.
//
// payload := LogsPayload{MinerName: "xmrig-0", Lines: []string{"started"}}
type LogsPayload struct {
MinerName string `json:"minerName"`
Lines []string `json:"lines"`
@ -196,6 +253,8 @@ type LogsPayload struct {
}
// DeployPayload contains a deployment bundle.
//
// payload := DeployPayload{Name: "xmrig", BundleType: string(BundleMiner)}
type DeployPayload struct {
BundleType string `json:"type"` // "profile" | "miner" | "full"
Data []byte `json:"data"` // STIM-encrypted bundle
@ -204,6 +263,8 @@ type DeployPayload struct {
}
// DeployAckPayload acknowledges a deployment.
//
// ack := DeployAckPayload{Success: true, Name: "xmrig"}
type DeployAckPayload struct {
Success bool `json:"success"`
Name string `json:"name,omitempty"`
@ -211,6 +272,8 @@ type DeployAckPayload struct {
}
// ErrorPayload contains error information.
//
// payload := ErrorPayload{Code: ErrCodeOperationFailed, Message: "start failed"}
type ErrorPayload struct {
Code int `json:"code"`
Message string `json:"message"`
@ -228,6 +291,8 @@ const (
)
// NewErrorMessage creates an error response message.
//
// msg, err := NewErrorMessage("worker", "controller", ErrCodeOperationFailed, "miner start failed", "req-1")
func NewErrorMessage(from, to string, code int, message string, replyTo string) (*Message, error) {
msg, err := NewMessage(MsgError, from, to, ErrorPayload{
Code: code,

View file

@ -1,12 +1,11 @@
package node
import (
"encoding/json"
"testing"
"time"
)
func TestNewMessage(t *testing.T) {
func TestMessage_NewMessage_Good(t *testing.T) {
t.Run("BasicMessage", func(t *testing.T) {
msg, err := NewMessage(MsgPing, "sender-id", "receiver-id", nil)
if err != nil {
@ -60,7 +59,7 @@ func TestNewMessage(t *testing.T) {
})
}
func TestMessageReply(t *testing.T) {
func TestMessage_Reply_Good(t *testing.T) {
original, _ := NewMessage(MsgPing, "sender", "receiver", PingPayload{SentAt: 12345})
reply, err := original.Reply(MsgPong, PongPayload{
@ -89,7 +88,7 @@ func TestMessageReply(t *testing.T) {
}
}
func TestParsePayload(t *testing.T) {
func TestMessage_ParsePayload_Good(t *testing.T) {
t.Run("ValidPayload", func(t *testing.T) {
payload := StartMinerPayload{
MinerType: "xmrig",
@ -160,7 +159,7 @@ func TestParsePayload(t *testing.T) {
})
}
func TestNewErrorMessage(t *testing.T) {
func TestMessage_NewErrorMessage_Bad(t *testing.T) {
errMsg, err := NewErrorMessage("sender", "receiver", ErrCodeOperationFailed, "something went wrong", "original-msg-id")
if err != nil {
t.Fatalf("failed to create error message: %v", err)
@ -189,24 +188,18 @@ func TestNewErrorMessage(t *testing.T) {
}
}
func TestMessageSerialization(t *testing.T) {
func TestMessage_Serialization_Good(t *testing.T) {
original, _ := NewMessage(MsgStartMiner, "ctrl", "worker", StartMinerPayload{
MinerType: "xmrig",
ProfileID: "my-profile",
})
// Serialize
data, err := json.Marshal(original)
if err != nil {
t.Fatalf("failed to serialize message: %v", err)
}
data := testJSONMarshal(t, original)
// Deserialize
var restored Message
err = json.Unmarshal(data, &restored)
if err != nil {
t.Fatalf("failed to deserialize message: %v", err)
}
testJSONUnmarshal(t, data, &restored)
if restored.ID != original.ID {
t.Error("ID mismatch after serialization")
@ -221,8 +214,7 @@ func TestMessageSerialization(t *testing.T) {
}
var payload StartMinerPayload
err = restored.ParsePayload(&payload)
if err != nil {
if err := restored.ParsePayload(&payload); err != nil {
t.Fatalf("failed to parse restored payload: %v", err)
}
@ -231,7 +223,7 @@ func TestMessageSerialization(t *testing.T) {
}
}
func TestMessageTypes(t *testing.T) {
func TestMessage_Types_Good(t *testing.T) {
types := []MessageType{
MsgHandshake,
MsgHandshakeAck,
@ -264,7 +256,7 @@ func TestMessageTypes(t *testing.T) {
}
}
func TestErrorCodes(t *testing.T) {
func TestMessage_ErrorCodes_Bad(t *testing.T) {
codes := map[int]string{
ErrCodeUnknown: "Unknown",
ErrCodeInvalidMessage: "InvalidMessage",
@ -283,7 +275,7 @@ func TestErrorCodes(t *testing.T) {
}
}
func TestNewMessage_NilPayload(t *testing.T) {
func TestMessage_NewMessage_NilPayload_Ugly(t *testing.T) {
msg, err := NewMessage(MsgPing, "from", "to", nil)
if err != nil {
t.Fatalf("NewMessage with nil payload should succeed: %v", err)
@ -293,7 +285,7 @@ func TestNewMessage_NilPayload(t *testing.T) {
}
}
func TestMessage_ParsePayload_Nil(t *testing.T) {
func TestMessage_ParsePayload_Nil_Ugly(t *testing.T) {
msg := &Message{Payload: nil}
var target PingPayload
err := msg.ParsePayload(&target)
@ -302,7 +294,7 @@ func TestMessage_ParsePayload_Nil(t *testing.T) {
}
}
func TestNewErrorMessage_Success(t *testing.T) {
func TestMessage_NewErrorMessage_Success_Bad(t *testing.T) {
msg, err := NewErrorMessage("from", "to", ErrCodeOperationFailed, "something went wrong", "reply-123")
if err != nil {
t.Fatalf("NewErrorMessage failed: %v", err)
@ -315,7 +307,10 @@ func TestNewErrorMessage_Success(t *testing.T) {
}
var payload ErrorPayload
msg.ParsePayload(&payload)
err = msg.ParsePayload(&payload)
if err != nil {
t.Fatalf("ParsePayload failed: %v", err)
}
if payload.Code != ErrCodeOperationFailed {
t.Errorf("expected code %d, got %d", ErrCodeOperationFailed, payload.Code)
}

View file

@ -16,6 +16,8 @@ import (
)
// Peer represents a known remote node.
//
// peer := &Peer{ID: "worker-1", Address: "127.0.0.1:9101"}
type Peer struct {
ID string `json:"id"`
Name string `json:"name"`
@ -39,6 +41,8 @@ type Peer struct {
const saveDebounceInterval = 5 * time.Second
// PeerAuthMode controls how unknown peers are handled
//
// mode := PeerAuthAllowlist
type PeerAuthMode int
const (
@ -88,6 +92,8 @@ func validatePeerName(name string) error {
}
// PeerRegistry manages known peers with KD-tree based selection.
//
// peerRegistry, err := NewPeerRegistry()
type PeerRegistry struct {
peers map[string]*Peer
kdTree *poindexter.KDTree[string] // KD-tree with peer ID as payload
@ -117,6 +123,8 @@ var (
)
// NewPeerRegistry creates a new PeerRegistry, loading existing peers if available.
//
// peerRegistry, err := NewPeerRegistry()
func NewPeerRegistry() (*PeerRegistry, error) {
peersPath, err := xdg.ConfigFile("lethean-desktop/peers.json")
if err != nil {
@ -128,6 +136,8 @@ func NewPeerRegistry() (*PeerRegistry, error) {
// NewPeerRegistryWithPath creates a new PeerRegistry with a custom path.
// This is primarily useful for testing to avoid xdg path caching issues.
//
// peerRegistry, err := NewPeerRegistryWithPath("/srv/p2p/peers.json")
func NewPeerRegistryWithPath(peersPath string) (*PeerRegistry, error) {
pr := &PeerRegistry{
peers: make(map[string]*Peer),

View file

@ -1,35 +1,24 @@
package node
import (
"os"
"path/filepath"
"slices"
"testing"
"time"
)
func setupTestPeerRegistry(t *testing.T) (*PeerRegistry, func()) {
tmpDir, err := os.MkdirTemp("", "peer-registry-test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
peersPath := filepath.Join(tmpDir, "peers.json")
tmpDir := t.TempDir()
peersPath := testJoinPath(tmpDir, "peers.json")
pr, err := NewPeerRegistryWithPath(peersPath)
if err != nil {
os.RemoveAll(tmpDir)
t.Fatalf("failed to create peer registry: %v", err)
}
cleanup := func() {
os.RemoveAll(tmpDir)
}
return pr, cleanup
return pr, func() {}
}
func TestPeerRegistry_NewPeerRegistry(t *testing.T) {
func TestPeer_Registry_NewPeerRegistry_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -38,7 +27,7 @@ func TestPeerRegistry_NewPeerRegistry(t *testing.T) {
}
}
func TestPeerRegistry_AddPeer(t *testing.T) {
func TestPeer_Registry_AddPeer_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -67,7 +56,7 @@ func TestPeerRegistry_AddPeer(t *testing.T) {
}
}
func TestPeerRegistry_GetPeer(t *testing.T) {
func TestPeer_Registry_GetPeer_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -97,7 +86,7 @@ func TestPeerRegistry_GetPeer(t *testing.T) {
}
}
func TestPeerRegistry_ListPeers(t *testing.T) {
func TestPeer_Registry_ListPeers_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -117,7 +106,7 @@ func TestPeerRegistry_ListPeers(t *testing.T) {
}
}
func TestPeerRegistry_RemovePeer(t *testing.T) {
func TestPeer_Registry_RemovePeer_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -150,7 +139,7 @@ func TestPeerRegistry_RemovePeer(t *testing.T) {
}
}
func TestPeerRegistry_UpdateMetrics(t *testing.T) {
func TestPeer_Registry_UpdateMetrics_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -183,7 +172,7 @@ func TestPeerRegistry_UpdateMetrics(t *testing.T) {
}
}
func TestPeerRegistry_UpdateScore(t *testing.T) {
func TestPeer_Registry_UpdateScore_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -237,7 +226,7 @@ func TestPeerRegistry_UpdateScore(t *testing.T) {
}
}
func TestPeerRegistry_SetConnected(t *testing.T) {
func TestPeer_Registry_SetConnected_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -272,7 +261,7 @@ func TestPeerRegistry_SetConnected(t *testing.T) {
}
}
func TestPeerRegistry_GetConnectedPeers(t *testing.T) {
func TestPeer_Registry_GetConnectedPeers_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -295,7 +284,7 @@ func TestPeerRegistry_GetConnectedPeers(t *testing.T) {
}
}
func TestPeerRegistry_SelectOptimalPeer(t *testing.T) {
func TestPeer_Registry_SelectOptimalPeer_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -321,7 +310,7 @@ func TestPeerRegistry_SelectOptimalPeer(t *testing.T) {
}
}
func TestPeerRegistry_SelectNearestPeers(t *testing.T) {
func TestPeer_Registry_SelectNearestPeers_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -342,11 +331,9 @@ func TestPeerRegistry_SelectNearestPeers(t *testing.T) {
}
}
func TestPeerRegistry_Persistence(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "persist-test")
defer os.RemoveAll(tmpDir)
peersPath := filepath.Join(tmpDir, "peers.json")
func TestPeer_Registry_Persistence_Good(t *testing.T) {
tmpDir := t.TempDir()
peersPath := testJoinPath(tmpDir, "peers.json")
// Create and save
pr1, err := NewPeerRegistryWithPath(peersPath)
@ -391,7 +378,7 @@ func TestPeerRegistry_Persistence(t *testing.T) {
// --- Security Feature Tests ---
func TestPeerRegistry_AuthMode(t *testing.T) {
func TestPeer_Registry_AuthMode_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -413,7 +400,7 @@ func TestPeerRegistry_AuthMode(t *testing.T) {
}
}
func TestPeerRegistry_PublicKeyAllowlist(t *testing.T) {
func TestPeer_Registry_PublicKeyAllowlist_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -450,7 +437,7 @@ func TestPeerRegistry_PublicKeyAllowlist(t *testing.T) {
}
}
func TestPeerRegistry_IsPeerAllowed_OpenMode(t *testing.T) {
func TestPeer_Registry_IsPeerAllowed_OpenMode_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -466,7 +453,7 @@ func TestPeerRegistry_IsPeerAllowed_OpenMode(t *testing.T) {
}
}
func TestPeerRegistry_IsPeerAllowed_AllowlistMode(t *testing.T) {
func TestPeer_Registry_IsPeerAllowed_AllowlistMode_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -501,7 +488,7 @@ func TestPeerRegistry_IsPeerAllowed_AllowlistMode(t *testing.T) {
}
}
func TestPeerRegistry_PeerNameValidation(t *testing.T) {
func TestPeer_Registry_PeerNameValidation_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -545,7 +532,7 @@ func TestPeerRegistry_PeerNameValidation(t *testing.T) {
}
}
func TestPeerRegistry_ScoreRecording(t *testing.T) {
func TestPeer_Registry_ScoreRecording_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -601,7 +588,7 @@ func TestPeerRegistry_ScoreRecording(t *testing.T) {
}
}
func TestPeerRegistry_GetPeersByScore(t *testing.T) {
func TestPeer_Registry_GetPeersByScore_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -635,7 +622,7 @@ func TestPeerRegistry_GetPeersByScore(t *testing.T) {
// --- Additional coverage tests for peer.go ---
func TestSafeKeyPrefix(t *testing.T) {
func TestPeer_SafeKeyPrefix_Good(t *testing.T) {
tests := []struct {
name string
key string
@ -658,7 +645,7 @@ func TestSafeKeyPrefix(t *testing.T) {
}
}
func TestValidatePeerName(t *testing.T) {
func TestPeer_ValidatePeerName_Good(t *testing.T) {
tests := []struct {
name string
peerName string
@ -691,7 +678,7 @@ func TestValidatePeerName(t *testing.T) {
}
}
func TestPeerRegistry_AddPeer_EmptyID(t *testing.T) {
func TestPeer_Registry_AddPeer_EmptyID_Bad(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -702,7 +689,7 @@ func TestPeerRegistry_AddPeer_EmptyID(t *testing.T) {
}
}
func TestPeerRegistry_UpdatePeer(t *testing.T) {
func TestPeer_Registry_UpdatePeer_Good(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -735,7 +722,7 @@ func TestPeerRegistry_UpdatePeer(t *testing.T) {
}
}
func TestPeerRegistry_UpdateMetrics_NotFound(t *testing.T) {
func TestPeer_Registry_UpdateMetrics_NotFound_Bad(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -745,7 +732,7 @@ func TestPeerRegistry_UpdateMetrics_NotFound(t *testing.T) {
}
}
func TestPeerRegistry_UpdateScore_NotFound(t *testing.T) {
func TestPeer_Registry_UpdateScore_NotFound_Bad(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -755,7 +742,7 @@ func TestPeerRegistry_UpdateScore_NotFound(t *testing.T) {
}
}
func TestPeerRegistry_RecordSuccess_NotFound(t *testing.T) {
func TestPeer_Registry_RecordSuccess_NotFound_Bad(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -763,21 +750,21 @@ func TestPeerRegistry_RecordSuccess_NotFound(t *testing.T) {
pr.RecordSuccess("ghost-peer")
}
func TestPeerRegistry_RecordFailure_NotFound(t *testing.T) {
func TestPeer_Registry_RecordFailure_NotFound_Bad(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
pr.RecordFailure("ghost-peer")
}
func TestPeerRegistry_RecordTimeout_NotFound(t *testing.T) {
func TestPeer_Registry_RecordTimeout_NotFound_Bad(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
pr.RecordTimeout("ghost-peer")
}
func TestPeerRegistry_SelectOptimalPeer_EmptyRegistry(t *testing.T) {
func TestPeer_Registry_SelectOptimalPeer_EmptyRegistry_Ugly(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -787,7 +774,7 @@ func TestPeerRegistry_SelectOptimalPeer_EmptyRegistry(t *testing.T) {
}
}
func TestPeerRegistry_SelectNearestPeers_EmptyRegistry(t *testing.T) {
func TestPeer_Registry_SelectNearestPeers_EmptyRegistry_Ugly(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -797,7 +784,7 @@ func TestPeerRegistry_SelectNearestPeers_EmptyRegistry(t *testing.T) {
}
}
func TestPeerRegistry_SetConnected_NonExistent(t *testing.T) {
func TestPeer_Registry_SetConnected_NonExistent_Bad(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -805,7 +792,7 @@ func TestPeerRegistry_SetConnected_NonExistent(t *testing.T) {
pr.SetConnected("ghost-peer", true)
}
func TestPeerRegistry_Close_NoDirtyData(t *testing.T) {
func TestPeer_Registry_Close_NoDirtyData_Ugly(t *testing.T) {
pr, cleanup := setupTestPeerRegistry(t)
defer cleanup()
@ -816,11 +803,9 @@ func TestPeerRegistry_Close_NoDirtyData(t *testing.T) {
}
}
func TestPeerRegistry_Close_WithDirtyData(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "close-dirty-test")
defer os.RemoveAll(tmpDir)
peersPath := filepath.Join(tmpDir, "peers.json")
func TestPeer_Registry_Close_WithDirtyData_Ugly(t *testing.T) {
tmpDir := t.TempDir()
peersPath := testJoinPath(tmpDir, "peers.json")
pr, err := NewPeerRegistryWithPath(peersPath)
if err != nil {
t.Fatalf("failed to create registry: %v", err)
@ -845,11 +830,9 @@ func TestPeerRegistry_Close_WithDirtyData(t *testing.T) {
}
}
func TestPeerRegistry_ScheduleSave_Debounce(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "debounce-test")
defer os.RemoveAll(tmpDir)
peersPath := filepath.Join(tmpDir, "peers.json")
func TestPeer_Registry_ScheduleSave_Debounce_Ugly(t *testing.T) {
tmpDir := t.TempDir()
peersPath := testJoinPath(tmpDir, "peers.json")
pr, err := NewPeerRegistryWithPath(peersPath)
if err != nil {
t.Fatalf("failed to create registry: %v", err)
@ -867,11 +850,9 @@ func TestPeerRegistry_ScheduleSave_Debounce(t *testing.T) {
}
}
func TestPeerRegistry_SaveNow(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "savenow-test")
defer os.RemoveAll(tmpDir)
peersPath := filepath.Join(tmpDir, "subdir", "peers.json")
func TestPeer_Registry_SaveNow_Good(t *testing.T) {
tmpDir := t.TempDir()
peersPath := testJoinPath(tmpDir, "subdir", "peers.json")
pr, err := NewPeerRegistryWithPath(peersPath)
if err != nil {
t.Fatalf("failed to create registry: %v", err)
@ -888,20 +869,18 @@ func TestPeerRegistry_SaveNow(t *testing.T) {
}
// Verify the file was written
if _, err := os.Stat(peersPath); os.IsNotExist(err) {
if !fsExists(peersPath) {
t.Error("peers.json should exist after saveNow")
}
}
func TestPeerRegistry_ScheduleSave_TimerFires(t *testing.T) {
func TestPeer_Registry_ScheduleSave_TimerFires_Ugly(t *testing.T) {
if testing.Short() {
t.Skip("skipping debounce timer test in short mode")
}
tmpDir, _ := os.MkdirTemp("", "timer-fire-test")
defer os.RemoveAll(tmpDir)
peersPath := filepath.Join(tmpDir, "peers.json")
tmpDir := t.TempDir()
peersPath := testJoinPath(tmpDir, "peers.json")
pr, err := NewPeerRegistryWithPath(peersPath)
if err != nil {
t.Fatalf("failed to create registry: %v", err)
@ -913,7 +892,7 @@ func TestPeerRegistry_ScheduleSave_TimerFires(t *testing.T) {
time.Sleep(6 * time.Second)
// The file should have been saved by the timer
if _, err := os.Stat(peersPath); os.IsNotExist(err) {
if !fsExists(peersPath) {
t.Error("peers.json should exist after debounce timer fires")
}

View file

@ -5,6 +5,8 @@ import (
)
// ProtocolError represents an error from the remote peer.
//
// err := &ProtocolError{Code: ErrCodeOperationFailed, Message: "start failed"}
type ProtocolError struct {
Code int
Message string
@ -15,6 +17,8 @@ func (e *ProtocolError) Error() string {
}
// ResponseHandler provides helpers for handling protocol responses.
//
// handler := &ResponseHandler{}
type ResponseHandler struct{}
// ValidateResponse checks if the response is valid and returns a parsed error if it's an error response.
@ -64,22 +68,30 @@ func (h *ResponseHandler) ParseResponse(resp *Message, expectedType MessageType,
var DefaultResponseHandler = &ResponseHandler{}
// ValidateResponse is a convenience function using the default handler.
//
// err := ValidateResponse(msg, MsgStats)
func ValidateResponse(resp *Message, expectedType MessageType) error {
return DefaultResponseHandler.ValidateResponse(resp, expectedType)
}
// ParseResponse is a convenience function using the default handler.
//
// err := ParseResponse(msg, MsgStats, &stats)
func ParseResponse(resp *Message, expectedType MessageType, target any) error {
return DefaultResponseHandler.ParseResponse(resp, expectedType, target)
}
// IsProtocolError returns true if the error is a ProtocolError.
//
// ok := IsProtocolError(err)
func IsProtocolError(err error) bool {
_, ok := err.(*ProtocolError)
return ok
}
// GetProtocolErrorCode returns the error code if err is a ProtocolError, otherwise returns 0.
//
// code := GetProtocolErrorCode(err)
func GetProtocolErrorCode(err error) int {
if pe, ok := err.(*ProtocolError); ok {
return pe.Code

View file

@ -1,11 +1,12 @@
package node
import (
"fmt"
"testing"
core "dappco.re/go/core"
)
func TestResponseHandler_ValidateResponse(t *testing.T) {
func TestProtocol_ResponseHandler_ValidateResponse_Good(t *testing.T) {
handler := &ResponseHandler{}
t.Run("NilResponse", func(t *testing.T) {
@ -51,7 +52,7 @@ func TestResponseHandler_ValidateResponse(t *testing.T) {
})
}
func TestResponseHandler_ParseResponse(t *testing.T) {
func TestProtocol_ResponseHandler_ParseResponse_Good(t *testing.T) {
handler := &ResponseHandler{}
t.Run("ParseStats", func(t *testing.T) {
@ -119,7 +120,7 @@ func TestResponseHandler_ParseResponse(t *testing.T) {
})
}
func TestProtocolError(t *testing.T) {
func TestProtocol_Error_Bad(t *testing.T) {
err := &ProtocolError{Code: 1001, Message: "test error"}
if err.Error() != "remote error (1001): test error" {
@ -135,7 +136,7 @@ func TestProtocolError(t *testing.T) {
}
}
func TestConvenienceFunctions(t *testing.T) {
func TestProtocol_ConvenienceFunctions_Good(t *testing.T) {
msg, _ := NewMessage(MsgStats, "sender", "receiver", StatsPayload{NodeID: "test"})
// Test ValidateResponse
@ -153,8 +154,8 @@ func TestConvenienceFunctions(t *testing.T) {
}
}
func TestGetProtocolErrorCode_NonProtocolError(t *testing.T) {
err := fmt.Errorf("regular error")
func TestProtocol_GetProtocolErrorCode_NonProtocolError_Bad(t *testing.T) {
err := core.NewError("regular error")
if GetProtocolErrorCode(err) != 0 {
t.Error("Expected 0 for non-ProtocolError")
}

View file

@ -30,6 +30,8 @@ const debugLogInterval = 100
const DefaultMaxMessageSize int64 = 1 << 20 // 1MB
// TransportConfig configures the WebSocket transport.
//
// cfg := DefaultTransportConfig()
type TransportConfig struct {
ListenAddr string // ":9091" default
WSPath string // "/ws" - WebSocket endpoint path
@ -42,6 +44,8 @@ type TransportConfig struct {
}
// DefaultTransportConfig returns sensible defaults.
//
// cfg := DefaultTransportConfig()
func DefaultTransportConfig() TransportConfig {
return TransportConfig{
ListenAddr: ":9091",
@ -54,9 +58,13 @@ func DefaultTransportConfig() TransportConfig {
}
// MessageHandler processes incoming messages.
//
// var handler MessageHandler = func(conn *PeerConnection, msg *Message) {}
type MessageHandler func(conn *PeerConnection, msg *Message)
// MessageDeduplicator tracks seen message IDs to prevent duplicate processing
//
// deduplicator := NewMessageDeduplicator(5 * time.Minute)
type MessageDeduplicator struct {
seen map[string]time.Time
mu sync.RWMutex
@ -64,6 +72,8 @@ type MessageDeduplicator struct {
}
// NewMessageDeduplicator creates a deduplicator with specified TTL
//
// deduplicator := NewMessageDeduplicator(5 * time.Minute)
func NewMessageDeduplicator(ttl time.Duration) *MessageDeduplicator {
d := &MessageDeduplicator{
seen: make(map[string]time.Time),
@ -100,6 +110,8 @@ func (d *MessageDeduplicator) Cleanup() {
}
// Transport manages WebSocket connections with SMSG encryption.
//
// transport := NewTransport(nodeManager, peerRegistry, DefaultTransportConfig())
type Transport struct {
config TransportConfig
server *http.Server
@ -117,6 +129,8 @@ type Transport struct {
}
// PeerRateLimiter implements a simple token bucket rate limiter per peer
//
// rateLimiter := NewPeerRateLimiter(100, 50)
type PeerRateLimiter struct {
tokens int
maxTokens int
@ -126,6 +140,8 @@ type PeerRateLimiter struct {
}
// NewPeerRateLimiter creates a rate limiter with specified messages/second
//
// rateLimiter := NewPeerRateLimiter(100, 50)
func NewPeerRateLimiter(maxTokens, refillRate int) *PeerRateLimiter {
return &PeerRateLimiter{
tokens: maxTokens,
@ -158,6 +174,8 @@ func (r *PeerRateLimiter) Allow() bool {
}
// PeerConnection represents an active connection to a peer.
//
// peerConnection := &PeerConnection{Peer: &Peer{ID: "worker-1"}}
type PeerConnection struct {
Peer *Peer
Conn *websocket.Conn
@ -170,6 +188,8 @@ type PeerConnection struct {
}
// NewTransport creates a new WebSocket transport.
//
// transport := NewTransport(nodeManager, peerRegistry, DefaultTransportConfig())
func NewTransport(node *NodeManager, registry *PeerRegistry, config TransportConfig) *Transport {
ctx, cancel := context.WithCancel(context.Background())
@ -856,6 +876,8 @@ func (pc *PeerConnection) Close() error {
}
// DisconnectPayload contains reason for disconnect.
//
// payload := DisconnectPayload{Reason: "shutdown", Code: DisconnectNormal}
type DisconnectPayload struct {
Reason string `json:"reason"`
Code int `json:"code"` // Optional disconnect code

View file

@ -1,17 +1,15 @@
package node
import (
"encoding/json"
"net/http"
"net/http/httptest"
"net/url"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
core "dappco.re/go/core"
"github.com/gorilla/websocket"
)
@ -21,10 +19,7 @@ import (
func testNode(t *testing.T, name string, role NodeRole) *NodeManager {
t.Helper()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("create node manager %q: %v", name, err)
}
@ -38,7 +33,7 @@ func testNode(t *testing.T, name string, role NodeRole) *NodeManager {
func testRegistry(t *testing.T) *PeerRegistry {
t.Helper()
dir := t.TempDir()
reg, err := NewPeerRegistryWithPath(filepath.Join(dir, "peers.json"))
reg, err := NewPeerRegistryWithPath(testJoinPath(dir, "peers.json"))
if err != nil {
t.Fatalf("create registry: %v", err)
}
@ -124,7 +119,7 @@ func (tp *testTransportPair) connectClient(t *testing.T) *PeerConnection {
// --- Unit Tests for Sub-Components ---
func TestMessageDeduplicator(t *testing.T) {
func TestTransport_MessageDeduplicator_Good(t *testing.T) {
t.Run("MarkAndCheck", func(t *testing.T) {
d := NewMessageDeduplicator(5 * time.Minute)
@ -175,7 +170,7 @@ func TestMessageDeduplicator(t *testing.T) {
})
}
func TestPeerRateLimiter(t *testing.T) {
func TestTransport_PeerRateLimiter_Good(t *testing.T) {
t.Run("AllowUpToBurst", func(t *testing.T) {
rl := NewPeerRateLimiter(10, 5)
@ -213,7 +208,7 @@ func TestPeerRateLimiter(t *testing.T) {
// --- Transport Integration Tests ---
func TestTransport_FullHandshake(t *testing.T) {
func TestTransport_FullHandshake_Good(t *testing.T) {
tp := setupTestTransportPair(t)
pc := tp.connectClient(t)
@ -243,7 +238,7 @@ func TestTransport_FullHandshake(t *testing.T) {
}
}
func TestTransport_HandshakeRejectWrongVersion(t *testing.T) {
func TestTransport_HandshakeRejectWrongVersion_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
// Dial raw WebSocket and send handshake with unsupported version
@ -272,9 +267,7 @@ func TestTransport_HandshakeRejectWrongVersion(t *testing.T) {
}
var resp Message
if err := json.Unmarshal(respData, &resp); err != nil {
t.Fatalf("unmarshal response: %v", err)
}
testJSONUnmarshal(t, respData, &resp)
var ack HandshakeAckPayload
resp.ParsePayload(&ack)
@ -282,12 +275,12 @@ func TestTransport_HandshakeRejectWrongVersion(t *testing.T) {
if ack.Accepted {
t.Error("should reject incompatible protocol version")
}
if !strings.Contains(ack.Reason, "incompatible protocol version") {
if !core.Contains(ack.Reason, "incompatible protocol version") {
t.Errorf("expected version rejection reason, got: %s", ack.Reason)
}
}
func TestTransport_HandshakeRejectAllowlist(t *testing.T) {
func TestTransport_HandshakeRejectAllowlist_Bad(t *testing.T) {
tp := setupTestTransportPair(t)
// Switch server to allowlist mode WITHOUT adding client's key
@ -305,12 +298,12 @@ func TestTransport_HandshakeRejectAllowlist(t *testing.T) {
if err == nil {
t.Fatal("should reject peer not in allowlist")
}
if !strings.Contains(err.Error(), "rejected") {
if !core.Contains(err.Error(), "rejected") {
t.Errorf("expected rejection error, got: %v", err)
}
}
func TestTransport_EncryptedMessageRoundTrip(t *testing.T) {
func TestTransport_EncryptedMessageRoundTrip_Ugly(t *testing.T) {
tp := setupTestTransportPair(t)
received := make(chan *Message, 1)
@ -353,7 +346,7 @@ func TestTransport_EncryptedMessageRoundTrip(t *testing.T) {
}
}
func TestTransport_MessageDedup(t *testing.T) {
func TestTransport_MessageDedup_Good(t *testing.T) {
tp := setupTestTransportPair(t)
var count atomic.Int32
@ -383,7 +376,7 @@ func TestTransport_MessageDedup(t *testing.T) {
}
}
func TestTransport_RateLimiting(t *testing.T) {
func TestTransport_RateLimiting_Good(t *testing.T) {
tp := setupTestTransportPair(t)
var count atomic.Int32
@ -415,7 +408,7 @@ func TestTransport_RateLimiting(t *testing.T) {
}
}
func TestTransport_MaxConnsEnforcement(t *testing.T) {
func TestTransport_MaxConnsEnforcement_Good(t *testing.T) {
// Server with MaxConns=1
serverNM := testNode(t, "maxconns-server", RoleWorker)
serverReg := testRegistry(t)
@ -467,7 +460,7 @@ func TestTransport_MaxConnsEnforcement(t *testing.T) {
}
}
func TestTransport_KeepaliveTimeout(t *testing.T) {
func TestTransport_KeepaliveTimeout_Bad(t *testing.T) {
// Use short keepalive settings so the test is fast
serverCfg := DefaultTransportConfig()
serverCfg.PingInterval = 100 * time.Millisecond
@ -516,7 +509,7 @@ func TestTransport_KeepaliveTimeout(t *testing.T) {
}
}
func TestTransport_GracefulClose(t *testing.T) {
func TestTransport_GracefulClose_Ugly(t *testing.T) {
tp := setupTestTransportPair(t)
received := make(chan *Message, 10)
@ -551,7 +544,7 @@ func TestTransport_GracefulClose(t *testing.T) {
}
}
func TestTransport_ConcurrentSends(t *testing.T) {
func TestTransport_ConcurrentSends_Ugly(t *testing.T) {
tp := setupTestTransportPair(t)
var count atomic.Int32
@ -591,7 +584,7 @@ func TestTransport_ConcurrentSends(t *testing.T) {
// --- Additional coverage tests ---
func TestTransport_Broadcast(t *testing.T) {
func TestTransport_Broadcast_Good(t *testing.T) {
// Set up a controller with two worker peers connected.
controllerNM := testNode(t, "broadcast-controller", RoleController)
controllerReg := testRegistry(t)
@ -648,7 +641,7 @@ func TestTransport_Broadcast(t *testing.T) {
}
}
func TestTransport_BroadcastExcludesSender(t *testing.T) {
func TestTransport_BroadcastExcludesSender_Good(t *testing.T) {
// Verify that Broadcast excludes the sender.
tp := setupTestTransportPair(t)
@ -675,7 +668,7 @@ func TestTransport_BroadcastExcludesSender(t *testing.T) {
}
}
func TestTransport_NewTransport_DefaultMaxMessageSize(t *testing.T) {
func TestTransport_NewTransport_DefaultMaxMessageSize_Good(t *testing.T) {
nm := testNode(t, "defaults", RoleWorker)
reg := testRegistry(t)
cfg := TransportConfig{
@ -692,7 +685,7 @@ func TestTransport_NewTransport_DefaultMaxMessageSize(t *testing.T) {
// The actual default is applied at usage time (readLoop, handleWSUpgrade)
}
func TestTransport_ConnectedPeers(t *testing.T) {
func TestTransport_ConnectedPeers_Good(t *testing.T) {
tp := setupTestTransportPair(t)
if tp.Server.ConnectedPeers() != 0 {
@ -707,7 +700,7 @@ func TestTransport_ConnectedPeers(t *testing.T) {
}
}
func TestTransport_StartAndStop(t *testing.T) {
func TestTransport_StartAndStop_Good(t *testing.T) {
nm := testNode(t, "start-test", RoleWorker)
reg := testRegistry(t)
cfg := DefaultTransportConfig()
@ -729,7 +722,7 @@ func TestTransport_StartAndStop(t *testing.T) {
}
}
func TestTransport_CheckOrigin(t *testing.T) {
func TestTransport_CheckOrigin_Good(t *testing.T) {
nm := testNode(t, "origin-test", RoleWorker)
reg := testRegistry(t)
cfg := DefaultTransportConfig()

View file

@ -12,6 +12,8 @@ import (
// MinerManager interface for the mining package integration.
// This allows the node package to interact with mining.Manager without import cycles.
//
// var minerManager MinerManager
type MinerManager interface {
StartMiner(minerType string, config any) (MinerInstance, error)
StopMiner(name string) error
@ -20,6 +22,8 @@ type MinerManager interface {
}
// MinerInstance represents a running miner for stats collection.
//
// var miner MinerInstance
type MinerInstance interface {
GetName() string
GetType() string
@ -28,12 +32,16 @@ type MinerInstance interface {
}
// ProfileManager interface for profile operations.
//
// var profileManager ProfileManager
type ProfileManager interface {
GetProfile(id string) (any, error)
SaveProfile(profile any) error
}
// Worker handles incoming messages on a worker node.
//
// worker := NewWorker(nodeManager, transport)
type Worker struct {
node *NodeManager
transport *Transport
@ -44,6 +52,8 @@ type Worker struct {
}
// NewWorker creates a new Worker instance.
//
// worker := NewWorker(nodeManager, transport)
func NewWorker(node *NodeManager, transport *Transport) *Worker {
return &Worker{
node: node,

View file

@ -2,34 +2,26 @@ package node
import (
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path/filepath"
"testing"
"time"
core "dappco.re/go/core"
)
// setupTestEnv sets up a temporary environment for testing and returns cleanup function
func setupTestEnv(t *testing.T) func() {
tmpDir := t.TempDir()
os.Setenv("XDG_CONFIG_HOME", filepath.Join(tmpDir, "config"))
os.Setenv("XDG_DATA_HOME", filepath.Join(tmpDir, "data"))
return func() {
os.Unsetenv("XDG_CONFIG_HOME")
os.Unsetenv("XDG_DATA_HOME")
}
t.Setenv("XDG_CONFIG_HOME", testJoinPath(tmpDir, "config"))
t.Setenv("XDG_DATA_HOME", testJoinPath(tmpDir, "data"))
return func() {}
}
func TestNewWorker(t *testing.T) {
func TestWorker_NewWorker_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -37,7 +29,7 @@ func TestNewWorker(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -57,15 +49,12 @@ func TestNewWorker(t *testing.T) {
}
}
func TestWorker_SetMinerManager(t *testing.T) {
func TestWorker_SetMinerManager_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -73,7 +62,7 @@ func TestWorker_SetMinerManager(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -90,15 +79,12 @@ func TestWorker_SetMinerManager(t *testing.T) {
}
}
func TestWorker_SetProfileManager(t *testing.T) {
func TestWorker_SetProfileManager_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -106,7 +92,7 @@ func TestWorker_SetProfileManager(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -123,15 +109,12 @@ func TestWorker_SetProfileManager(t *testing.T) {
}
}
func TestWorker_HandlePing(t *testing.T) {
func TestWorker_HandlePing_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -139,7 +122,7 @@ func TestWorker_HandlePing(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -187,15 +170,12 @@ func TestWorker_HandlePing(t *testing.T) {
}
}
func TestWorker_HandleGetStats(t *testing.T) {
func TestWorker_HandleGetStats_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -203,7 +183,7 @@ func TestWorker_HandleGetStats(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -250,15 +230,12 @@ func TestWorker_HandleGetStats(t *testing.T) {
}
}
func TestWorker_HandleStartMiner_NoManager(t *testing.T) {
func TestWorker_HandleStartMiner_NoManager_Bad(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -266,7 +243,7 @@ func TestWorker_HandleStartMiner_NoManager(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -293,15 +270,12 @@ func TestWorker_HandleStartMiner_NoManager(t *testing.T) {
}
}
func TestWorker_HandleStopMiner_NoManager(t *testing.T) {
func TestWorker_HandleStopMiner_NoManager_Bad(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -309,7 +283,7 @@ func TestWorker_HandleStopMiner_NoManager(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -336,15 +310,12 @@ func TestWorker_HandleStopMiner_NoManager(t *testing.T) {
}
}
func TestWorker_HandleGetLogs_NoManager(t *testing.T) {
func TestWorker_HandleGetLogs_NoManager_Bad(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -352,7 +323,7 @@ func TestWorker_HandleGetLogs_NoManager(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -379,15 +350,12 @@ func TestWorker_HandleGetLogs_NoManager(t *testing.T) {
}
}
func TestWorker_HandleDeploy_Profile(t *testing.T) {
func TestWorker_HandleDeploy_Profile_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -395,7 +363,7 @@ func TestWorker_HandleDeploy_Profile(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -426,15 +394,12 @@ func TestWorker_HandleDeploy_Profile(t *testing.T) {
}
}
func TestWorker_HandleDeploy_UnknownType(t *testing.T) {
func TestWorker_HandleDeploy_UnknownType_Bad(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -442,7 +407,7 @@ func TestWorker_HandleDeploy_UnknownType(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -472,7 +437,7 @@ func TestWorker_HandleDeploy_UnknownType(t *testing.T) {
}
}
func TestConvertMinerStats(t *testing.T) {
func TestWorker_ConvertMinerStats_Good(t *testing.T) {
tests := []struct {
name string
rawStats any
@ -573,15 +538,15 @@ type mockMinerManagerFailing struct {
}
func (m *mockMinerManagerFailing) StartMiner(minerType string, config any) (MinerInstance, error) {
return nil, fmt.Errorf("mining hardware not available")
return nil, core.E("mockMinerManagerFailing.StartMiner", "mining hardware not available", nil)
}
func (m *mockMinerManagerFailing) StopMiner(name string) error {
return fmt.Errorf("miner %s not found", name)
return core.E("mockMinerManagerFailing.StopMiner", "miner "+name+" not found", nil)
}
func (m *mockMinerManagerFailing) GetMiner(name string) (MinerInstance, error) {
return nil, fmt.Errorf("miner %s not found", name)
return nil, core.E("mockMinerManagerFailing.GetMiner", "miner "+name+" not found", nil)
}
// mockProfileManagerFull implements ProfileManager that returns real data.
@ -592,7 +557,7 @@ type mockProfileManagerFull struct {
func (m *mockProfileManagerFull) GetProfile(id string) (any, error) {
p, ok := m.profiles[id]
if !ok {
return nil, fmt.Errorf("profile %s not found", id)
return nil, core.E("mockProfileManagerFull.GetProfile", "profile "+id+" not found", nil)
}
return p, nil
}
@ -605,22 +570,19 @@ func (m *mockProfileManagerFull) SaveProfile(profile any) error {
type mockProfileManagerFailing struct{}
func (m *mockProfileManagerFailing) GetProfile(id string) (any, error) {
return nil, fmt.Errorf("profile %s not found", id)
return nil, core.E("mockProfileManagerFailing.GetProfile", "profile "+id+" not found", nil)
}
func (m *mockProfileManagerFailing) SaveProfile(profile any) error {
return fmt.Errorf("save failed")
return core.E("mockProfileManagerFailing.SaveProfile", "save failed", nil)
}
func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
func TestWorker_HandleStartMiner_WithManager_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
@ -628,7 +590,7 @@ func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -649,7 +611,7 @@ func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
t.Run("WithConfigOverride", func(t *testing.T) {
payload := StartMinerPayload{
MinerType: "xmrig",
Config: json.RawMessage(`{"pool":"test:3333"}`),
Config: RawMessage(`{"pool":"test:3333"}`),
}
msg, err := NewMessage(MsgStartMiner, "sender-id", identity.ID, payload)
if err != nil {
@ -680,7 +642,7 @@ func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
t.Run("EmptyMinerType", func(t *testing.T) {
payload := StartMinerPayload{
MinerType: "",
Config: json.RawMessage(`{}`),
Config: RawMessage(`{}`),
}
msg, err := NewMessage(MsgStartMiner, "sender-id", identity.ID, payload)
if err != nil {
@ -747,7 +709,7 @@ func TestWorker_HandleStartMiner_WithManager(t *testing.T) {
payload := StartMinerPayload{
MinerType: "xmrig",
Config: json.RawMessage(`{}`),
Config: RawMessage(`{}`),
}
msg, err := NewMessage(MsgStartMiner, "sender-id", identity.ID, payload)
if err != nil {
@ -780,26 +742,23 @@ type mockMinerManagerWithStart struct {
func (m *mockMinerManagerWithStart) StartMiner(minerType string, config any) (MinerInstance, error) {
m.counter++
name := fmt.Sprintf("%s-%d", minerType, m.counter)
name := core.Sprintf("%s-%d", minerType, m.counter)
return &mockMinerInstance{name: name, minerType: minerType}, nil
}
func TestWorker_HandleStopMiner_WithManager(t *testing.T) {
func TestWorker_HandleStopMiner_WithManager_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -851,22 +810,19 @@ func TestWorker_HandleStopMiner_WithManager(t *testing.T) {
})
}
func TestWorker_HandleGetLogs_WithManager(t *testing.T) {
func TestWorker_HandleGetLogs_WithManager_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -961,22 +917,19 @@ func TestWorker_HandleGetLogs_WithManager(t *testing.T) {
})
}
func TestWorker_HandleGetStats_WithMinerManager(t *testing.T) {
func TestWorker_HandleGetStats_WithMinerManager_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -1025,22 +978,19 @@ func TestWorker_HandleGetStats_WithMinerManager(t *testing.T) {
}
}
func TestWorker_HandleMessage_UnknownType(t *testing.T) {
func TestWorker_HandleMessage_UnknownType_Bad(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -1055,22 +1005,19 @@ func TestWorker_HandleMessage_UnknownType(t *testing.T) {
worker.HandleMessage(nil, msg)
}
func TestWorker_HandleDeploy_ProfileWithManager(t *testing.T) {
func TestWorker_HandleDeploy_ProfileWithManager_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -1113,22 +1060,19 @@ func TestWorker_HandleDeploy_ProfileWithManager(t *testing.T) {
}
}
func TestWorker_HandleDeploy_ProfileSaveFails(t *testing.T) {
func TestWorker_HandleDeploy_ProfileSaveFails_Bad(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -1162,22 +1106,19 @@ func TestWorker_HandleDeploy_ProfileSaveFails(t *testing.T) {
}
}
func TestWorker_HandleDeploy_MinerBundle(t *testing.T) {
func TestWorker_HandleDeploy_MinerBundle_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -1190,8 +1131,8 @@ func TestWorker_HandleDeploy_MinerBundle(t *testing.T) {
identity := nm.GetIdentity()
tmpDir := t.TempDir()
minerPath := filepath.Join(tmpDir, "test-miner")
os.WriteFile(minerPath, []byte("fake miner binary"), 0755)
minerPath := testJoinPath(tmpDir, "test-miner")
testWriteFile(t, minerPath, []byte("fake miner binary"), 0o755)
profileJSON := []byte(`{"pool":"test:3333"}`)
@ -1229,22 +1170,19 @@ func TestWorker_HandleDeploy_MinerBundle(t *testing.T) {
}
}
func TestWorker_HandleDeploy_FullBundle(t *testing.T) {
func TestWorker_HandleDeploy_FullBundle_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -1255,8 +1193,8 @@ func TestWorker_HandleDeploy_FullBundle(t *testing.T) {
identity := nm.GetIdentity()
tmpDir := t.TempDir()
minerPath := filepath.Join(tmpDir, "test-miner")
os.WriteFile(minerPath, []byte("miner binary"), 0755)
minerPath := testJoinPath(tmpDir, "test-miner")
testWriteFile(t, minerPath, []byte("miner binary"), 0o755)
sharedSecret := []byte("full-secret-key!")
bundlePassword := base64.StdEncoding.EncodeToString(sharedSecret)
@ -1288,22 +1226,19 @@ func TestWorker_HandleDeploy_FullBundle(t *testing.T) {
}
}
func TestWorker_HandleDeploy_MinerBundle_WithProfileManager(t *testing.T) {
func TestWorker_HandleDeploy_MinerBundle_WithProfileManager_Good(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, err := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, err := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
if err != nil {
t.Fatalf("failed to create node manager: %v", err)
}
if err := nm.GenerateIdentity("test-worker", RoleWorker); err != nil {
t.Fatalf("failed to generate identity: %v", err)
}
pr, err := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, err := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
if err != nil {
t.Fatalf("failed to create peer registry: %v", err)
}
@ -1317,8 +1252,8 @@ func TestWorker_HandleDeploy_MinerBundle_WithProfileManager(t *testing.T) {
identity := nm.GetIdentity()
tmpDir := t.TempDir()
minerPath := filepath.Join(tmpDir, "test-miner")
os.WriteFile(minerPath, []byte("miner binary"), 0755)
minerPath := testJoinPath(tmpDir, "test-miner")
testWriteFile(t, minerPath, []byte("miner binary"), 0o755)
profileJSON := []byte(`{"pool":"test:3333"}`)
sharedSecret := []byte("profile-secret!!")
@ -1352,17 +1287,14 @@ func TestWorker_HandleDeploy_MinerBundle_WithProfileManager(t *testing.T) {
}
}
func TestWorker_HandleDeploy_InvalidPayload(t *testing.T) {
func TestWorker_HandleDeploy_InvalidPayload_Bad(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
dir := t.TempDir()
nm, _ := NewNodeManagerWithPaths(
filepath.Join(dir, "private.key"),
filepath.Join(dir, "node.json"),
)
nm, _ := NewNodeManagerWithPaths(testNodeManagerPaths(dir))
nm.GenerateIdentity("test", RoleWorker)
pr, _ := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, _ := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
transport := NewTransport(nm, pr, DefaultTransportConfig())
worker := NewWorker(nm, transport)
worker.DataDir = t.TempDir()
@ -1377,16 +1309,17 @@ func TestWorker_HandleDeploy_InvalidPayload(t *testing.T) {
}
}
func TestWorker_HandleGetStats_NoIdentity(t *testing.T) {
func TestWorker_HandleGetStats_NoIdentity_Bad(t *testing.T) {
cleanup := setupTestEnv(t)
defer cleanup()
tmpDir := t.TempDir()
nm, _ := NewNodeManagerWithPaths(
filepath.Join(t.TempDir(), "priv.key"),
filepath.Join(t.TempDir(), "node.json"),
testJoinPath(tmpDir, "priv.key"),
testJoinPath(tmpDir, "node.json"),
)
// Don't generate identity
pr, _ := NewPeerRegistryWithPath(t.TempDir() + "/peers.json")
pr, _ := NewPeerRegistryWithPath(testJoinPath(t.TempDir(), "peers.json"))
transport := NewTransport(nm, pr, DefaultTransportConfig())
worker := NewWorker(nm, transport)
worker.DataDir = t.TempDir()
@ -1398,7 +1331,7 @@ func TestWorker_HandleGetStats_NoIdentity(t *testing.T) {
}
}
func TestWorker_HandleMessage_IntegrationViaWebSocket(t *testing.T) {
func TestWorker_HandleMessage_IntegrationViaWebSocket_Good(t *testing.T) {
// Test HandleMessage through real WebSocket -- exercises error response sending path
tp := setupTestTransportPair(t)
@ -1414,14 +1347,14 @@ func TestWorker_HandleMessage_IntegrationViaWebSocket(t *testing.T) {
// Send start_miner which will fail because no manager is set.
// The worker should send an error response via the connection.
err := controller.StartRemoteMiner(serverID, "xmrig", "", json.RawMessage(`{}`))
err := controller.StartRemoteMiner(serverID, "xmrig", "", RawMessage(`{}`))
// Should get an error back (either protocol error or operation failed)
if err == nil {
t.Error("expected error when worker has no miner manager")
}
}
func TestWorker_HandleMessage_GetStats_IntegrationViaWebSocket(t *testing.T) {
func TestWorker_HandleMessage_GetStats_IntegrationViaWebSocket_Good(t *testing.T) {
// HandleMessage dispatch for get_stats through real WebSocket
tp := setupTestTransportPair(t)

View file

@ -22,6 +22,8 @@ const (
)
// UEPSHeader represents the conscious routing metadata
//
// header := UEPSHeader{IntentID: 0x01}
type UEPSHeader struct {
Version uint8 // Default 0x09
CurrentLayer uint8
@ -31,12 +33,16 @@ type UEPSHeader struct {
}
// PacketBuilder helps construct a signed UEPS frame
//
// builder := NewBuilder(0x01, []byte("hello"))
type PacketBuilder struct {
Header UEPSHeader
Payload []byte
}
// NewBuilder creates a packet context for a specific intent
//
// builder := NewBuilder(0x01, []byte("hello"))
func NewBuilder(intentID uint8, payload []byte) *PacketBuilder {
return &PacketBuilder{
Header: UEPSHeader{

View file

@ -6,10 +6,10 @@ import (
"crypto/hmac"
"crypto/sha256"
"encoding/binary"
"errors"
"io"
"testing"
core "dappco.re/go/core"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -22,7 +22,7 @@ type failWriter struct {
func (f *failWriter) Write(p []byte) (int, error) {
if f.remaining <= 0 {
return 0, errors.New("write failed")
return 0, core.NewError("write failed")
}
f.remaining--
return len(p), nil
@ -30,7 +30,7 @@ func (f *failWriter) Write(p []byte) (int, error) {
// TestWriteTLV_TagWriteFails verifies writeTLV returns an error
// when the very first Write (the tag byte) fails.
func TestWriteTLV_TagWriteFails(t *testing.T) {
func TestPacketCoverage_WriteTLV_TagWriteFails_Bad(t *testing.T) {
w := &failWriter{remaining: 0}
err := writeTLV(w, TagVersion, []byte{0x09})
@ -40,7 +40,7 @@ func TestWriteTLV_TagWriteFails(t *testing.T) {
// TestWriteTLV_LengthWriteFails verifies writeTLV returns an error
// when the second Write (the length byte) fails.
func TestWriteTLV_LengthWriteFails(t *testing.T) {
func TestPacketCoverage_WriteTLV_LengthWriteFails_Bad(t *testing.T) {
w := &failWriter{remaining: 1}
err := writeTLV(w, TagVersion, []byte{0x09})
@ -50,7 +50,7 @@ func TestWriteTLV_LengthWriteFails(t *testing.T) {
// TestWriteTLV_ValueWriteFails verifies writeTLV returns an error
// when the third Write (the value bytes) fails.
func TestWriteTLV_ValueWriteFails(t *testing.T) {
func TestPacketCoverage_WriteTLV_ValueWriteFails_Bad(t *testing.T) {
w := &failWriter{remaining: 2}
err := writeTLV(w, TagVersion, []byte{0x09})
@ -81,7 +81,7 @@ func (r *errorAfterNReader) Read(p []byte) (int, error) {
// TestReadAndVerify_PayloadReadError exercises the error branch at
// reader.go:51-53 where io.ReadAll fails after the 0xFF tag byte
// has been successfully read.
func TestReadAndVerify_PayloadReadError(t *testing.T) {
func TestPacketCoverage_ReadAndVerify_PayloadReadError_Bad(t *testing.T) {
// Build a valid packet so we have genuine TLV headers + HMAC.
payload := []byte("coverage test")
builder := NewBuilder(0x20, payload)
@ -104,7 +104,7 @@ func TestReadAndVerify_PayloadReadError(t *testing.T) {
prefix := frame[:payloadTagIdx+1]
r := &errorAfterNReader{
data: prefix,
err: errors.New("connection reset"),
err: core.NewError("connection reset"),
}
_, err = ReadAndVerify(bufio.NewReader(r), testSecret)
@ -115,7 +115,7 @@ func TestReadAndVerify_PayloadReadError(t *testing.T) {
// TestReadAndVerify_PayloadReadError_EOF ensures that a truncated payload
// (missing bytes after TagPayload) is handled as an I/O error (UnexpectedEOF)
// because ReadAndVerify now uses io.ReadFull with the expected length prefix.
func TestReadAndVerify_PayloadReadError_EOF(t *testing.T) {
func TestPacketCoverage_ReadAndVerify_PayloadReadError_EOF_Bad(t *testing.T) {
payload := []byte("eof test")
builder := NewBuilder(0x20, payload)
frame, err := builder.MarshalAndSign(testSecret)
@ -141,7 +141,7 @@ func TestReadAndVerify_PayloadReadError_EOF(t *testing.T) {
// TestWriteTLV_AllWritesSucceed confirms the happy path still works
// after exercising all error branches — a simple sanity check using
// failWriter with enough remaining writes.
func TestWriteTLV_AllWritesSucceed(t *testing.T) {
func TestPacketCoverage_WriteTLV_AllWritesSucceed_Good(t *testing.T) {
var buf bytes.Buffer
err := writeTLV(&buf, TagVersion, []byte{0x09})
require.NoError(t, err)
@ -149,10 +149,9 @@ func TestWriteTLV_AllWritesSucceed(t *testing.T) {
assert.Equal(t, []byte{TagVersion, 0x00, 0x01, 0x09}, buf.Bytes())
}
// TestWriteTLV_FailWriterTable runs the three failure scenarios in
// a table-driven fashion for completeness.
func TestWriteTLV_FailWriterTable(t *testing.T) {
func TestPacketCoverage_WriteTLV_FailWriterTable_Bad(t *testing.T) {
tests := []struct {
name string
remaining int
@ -177,7 +176,7 @@ func TestWriteTLV_FailWriterTable(t *testing.T) {
// HMAC computation independently of the builder. This also serves as
// a cross-check that our errorAfterNReader is not accidentally
// corrupting the prefix bytes.
func TestReadAndVerify_ManualPacket_PayloadReadError(t *testing.T) {
func TestPacketCoverage_ReadAndVerify_ManualPacket_PayloadReadError_Bad(t *testing.T) {
payload := []byte("manual test")
// Build header TLVs

View file

@ -7,14 +7,15 @@ import (
"crypto/sha256"
"encoding/binary"
"io"
"strings"
"testing"
core "dappco.re/go/core"
)
// testSecret is a deterministic shared secret for reproducible tests.
var testSecret = []byte("test-shared-secret-32-bytes!!!!!")
func TestPacketBuilder_RoundTrip(t *testing.T) {
func TestPacket_Builder_RoundTrip_Ugly(t *testing.T) {
tests := []struct {
name string
intentID uint8
@ -84,7 +85,7 @@ func TestPacketBuilder_RoundTrip(t *testing.T) {
}
}
func TestHMACVerification_TamperedPayload(t *testing.T) {
func TestPacket_HMACVerification_TamperedPayload_Bad(t *testing.T) {
builder := NewBuilder(0x20, []byte("original payload"))
frame, err := builder.MarshalAndSign(testSecret)
if err != nil {
@ -100,12 +101,12 @@ func TestHMACVerification_TamperedPayload(t *testing.T) {
if err == nil {
t.Fatal("Expected HMAC mismatch error for tampered payload")
}
if !strings.Contains(err.Error(), "integrity violation") {
if !core.Contains(err.Error(), "integrity violation") {
t.Errorf("Expected integrity violation error, got: %v", err)
}
}
func TestHMACVerification_TamperedHeader(t *testing.T) {
func TestPacket_HMACVerification_TamperedHeader_Bad(t *testing.T) {
builder := NewBuilder(0x20, []byte("test payload"))
frame, err := builder.MarshalAndSign(testSecret)
if err != nil {
@ -122,12 +123,12 @@ func TestHMACVerification_TamperedHeader(t *testing.T) {
if err == nil {
t.Fatal("Expected HMAC mismatch error for tampered header")
}
if !strings.Contains(err.Error(), "integrity violation") {
if !core.Contains(err.Error(), "integrity violation") {
t.Errorf("Expected integrity violation error, got: %v", err)
}
}
func TestHMACVerification_WrongSharedSecret(t *testing.T) {
func TestPacket_HMACVerification_WrongSharedSecret_Bad(t *testing.T) {
builder := NewBuilder(0x20, []byte("secret data"))
frame, err := builder.MarshalAndSign([]byte("key-A-used-for-signing!!!!!!!!!!"))
if err != nil {
@ -138,12 +139,12 @@ func TestHMACVerification_WrongSharedSecret(t *testing.T) {
if err == nil {
t.Fatal("Expected HMAC mismatch error for wrong shared secret")
}
if !strings.Contains(err.Error(), "integrity violation") {
if !core.Contains(err.Error(), "integrity violation") {
t.Errorf("Expected integrity violation error, got: %v", err)
}
}
func TestEmptyPayload(t *testing.T) {
func TestPacket_EmptyPayload_Ugly(t *testing.T) {
tests := []struct {
name string
payload []byte
@ -175,7 +176,7 @@ func TestEmptyPayload(t *testing.T) {
}
}
func TestMaxThreatScoreBoundary(t *testing.T) {
func TestPacket_MaxThreatScoreBoundary_Ugly(t *testing.T) {
builder := NewBuilder(0x20, []byte("threat boundary"))
builder.Header.ThreatScore = 65535 // uint16 max
@ -194,7 +195,7 @@ func TestMaxThreatScoreBoundary(t *testing.T) {
}
}
func TestMissingHMACTag(t *testing.T) {
func TestPacket_MissingHMACTag_Bad(t *testing.T) {
// Craft a packet manually: header TLVs + payload tag, but no HMAC (0x06)
var buf bytes.Buffer
@ -214,24 +215,24 @@ func TestMissingHMACTag(t *testing.T) {
if err == nil {
t.Fatal("Expected 'missing HMAC' error")
}
if !strings.Contains(err.Error(), "missing HMAC") {
if !core.Contains(err.Error(), "missing HMAC") {
t.Errorf("Expected 'missing HMAC' error, got: %v", err)
}
}
func TestWriteTLV_ValueTooLarge(t *testing.T) {
func TestPacket_WriteTLV_ValueTooLarge_Bad(t *testing.T) {
var buf bytes.Buffer
oversized := make([]byte, 65536) // 1 byte over the 65535 limit
err := writeTLV(&buf, TagVersion, oversized)
if err == nil {
t.Fatal("Expected error for TLV value > 65535 bytes")
}
if !strings.Contains(err.Error(), "TLV value too large") {
if !core.Contains(err.Error(), "TLV value too large") {
t.Errorf("Expected 'TLV value too large' error, got: %v", err)
}
}
func TestTruncatedPacket(t *testing.T) {
func TestPacket_TruncatedPacket_Bad(t *testing.T) {
builder := NewBuilder(0x20, []byte("full payload"))
frame, err := builder.MarshalAndSign(testSecret)
if err != nil {
@ -256,7 +257,7 @@ func TestTruncatedPacket(t *testing.T) {
{
name: "CutMidHMAC",
cutAt: 20, // Somewhere inside the header TLVs or HMAC
wantErr: "", // Any io error
wantErr: "", // Any io error
},
}
@ -267,14 +268,14 @@ func TestTruncatedPacket(t *testing.T) {
if err == nil {
t.Fatal("Expected error for truncated packet")
}
if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) {
if tc.wantErr != "" && !core.Contains(err.Error(), tc.wantErr) {
t.Errorf("Expected error containing %q, got: %v", tc.wantErr, err)
}
})
}
}
func TestUnknownTLVTag(t *testing.T) {
func TestPacket_UnknownTLVTag_Bad(t *testing.T) {
// Build a valid packet, then inject an unknown tag before the HMAC.
// The unknown tag must be included in signedData for HMAC to pass.
payload := []byte("tagged payload")
@ -324,7 +325,7 @@ func TestUnknownTLVTag(t *testing.T) {
}
}
func TestNewBuilder_Defaults(t *testing.T) {
func TestPacket_NewBuilder_Defaults_Good(t *testing.T) {
builder := NewBuilder(0x20, []byte("data"))
if builder.Header.Version != 0x09 {
@ -344,7 +345,7 @@ func TestNewBuilder_Defaults(t *testing.T) {
}
}
func TestThreatScoreBoundaries(t *testing.T) {
func TestPacket_ThreatScoreBoundaries_Good(t *testing.T) {
tests := []struct {
name string
score uint16
@ -378,7 +379,7 @@ func TestThreatScoreBoundaries(t *testing.T) {
}
}
func TestWriteTLV_BoundaryLengths(t *testing.T) {
func TestPacket_WriteTLV_BoundaryLengths_Ugly(t *testing.T) {
tests := []struct {
name string
length int
@ -407,9 +408,8 @@ func TestWriteTLV_BoundaryLengths(t *testing.T) {
}
}
// TestReadAndVerify_EmptyReader verifies behaviour on completely empty input.
func TestReadAndVerify_EmptyReader(t *testing.T) {
func TestPacket_ReadAndVerify_EmptyReader_Ugly(t *testing.T) {
_, err := ReadAndVerify(bufio.NewReader(bytes.NewReader(nil)), testSecret)
if err == nil {
t.Fatal("Expected error for empty reader")

View file

@ -12,6 +12,8 @@ import (
)
// ParsedPacket holds the verified data
//
// packet := &ParsedPacket{Header: UEPSHeader{IntentID: 0x01}}
type ParsedPacket struct {
Header UEPSHeader
Payload []byte
@ -19,6 +21,8 @@ type ParsedPacket struct {
// ReadAndVerify reads a UEPS frame from the stream and validates the HMAC.
// It consumes the stream up to the end of the packet.
//
// packet, err := ReadAndVerify(reader, sharedSecret)
func ReadAndVerify(r *bufio.Reader, sharedSecret []byte) (*ParsedPacket, error) {
// Buffer to reconstruct the data for HMAC verification
var signedData bytes.Buffer