feat(proxy): add log tests, fix nil config panic, complete test triads
- Add log package tests (AccessLog and ShareLog Good/Bad/Ugly triads) - Fix nil pointer panic in pool.NewStrategyFactory when config is nil - Add Worker Hashrate Good/Bad/Ugly test triad - Add ConfigWatcher Start Bad test (nonexistent path) - Add FailoverStrategy CurrentPools Bad/Ugly, EnabledPools Good/Bad/Ugly, and NewStrategyFactory Good/Bad/Ugly test triads - Improve doc comments on Stats, StatsSummary, Workers, WorkerRecord with AX-compliant usage examples Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
31a151d23c
commit
2470f1ac3d
7 changed files with 619 additions and 12 deletions
|
|
@ -66,6 +66,36 @@ func TestConfigWatcher_Start_Good(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestConfigWatcher_Start_Bad verifies a watcher with a nonexistent path does not panic
|
||||
// and does not call the onChange callback.
|
||||
//
|
||||
// watcher := proxy.NewConfigWatcher("/nonexistent/config.json", func(cfg *proxy.Config) {
|
||||
// // never called
|
||||
// })
|
||||
// watcher.Start()
|
||||
// watcher.Stop()
|
||||
func TestConfigWatcher_Start_Bad(t *testing.T) {
|
||||
called := make(chan struct{}, 1)
|
||||
watcher := NewConfigWatcher("/nonexistent/path/config.json", func(*Config) {
|
||||
select {
|
||||
case called <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
if watcher == nil {
|
||||
t.Fatal("expected watcher even for a nonexistent path")
|
||||
}
|
||||
watcher.Start()
|
||||
defer watcher.Stop()
|
||||
|
||||
select {
|
||||
case <-called:
|
||||
t.Fatal("expected no callback for nonexistent config file")
|
||||
case <-time.After(2 * time.Second):
|
||||
// expected: no update fired
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigWatcher_Start_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "config.json")
|
||||
|
|
|
|||
341
log/impl_test.go
Normal file
341
log/impl_test.go
Normal file
|
|
@ -0,0 +1,341 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
// TestAccessLog_OnLogin_Good verifies a CONNECT line is written with the expected columns.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnLogin(proxy.Event{Miner: miner}) // writes "CONNECT 10.0.0.1 WALLET XMRig/6.21.0"
|
||||
func TestAccessLog_OnLogin_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
defer al.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
al.OnLogin(proxy.Event{Miner: miner})
|
||||
al.Close()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("expected log file to exist: %v", err)
|
||||
}
|
||||
line := strings.TrimSpace(string(data))
|
||||
if !strings.Contains(line, "CONNECT") {
|
||||
t.Fatalf("expected CONNECT in log line, got %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessLog_OnLogin_Bad verifies a nil miner event does not panic or write anything.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnLogin(proxy.Event{Miner: nil}) // no-op
|
||||
func TestAccessLog_OnLogin_Bad(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
defer al.Close()
|
||||
|
||||
al.OnLogin(proxy.Event{Miner: nil})
|
||||
al.Close()
|
||||
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
data, _ := os.ReadFile(path)
|
||||
if len(data) > 0 {
|
||||
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessLog_OnLogin_Ugly verifies a nil AccessLog does not panic.
|
||||
//
|
||||
// var al *log.AccessLog
|
||||
// al.OnLogin(proxy.Event{Miner: miner}) // no-op, no panic
|
||||
func TestAccessLog_OnLogin_Ugly(t *testing.T) {
|
||||
var al *AccessLog
|
||||
miner := newTestMiner(t)
|
||||
al.OnLogin(proxy.Event{Miner: miner})
|
||||
}
|
||||
|
||||
// TestAccessLog_OnClose_Good verifies a CLOSE line includes rx and tx byte counts.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnClose(proxy.Event{Miner: miner}) // writes "CLOSE <ip> <user> rx=0 tx=0"
|
||||
func TestAccessLog_OnClose_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
defer al.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
al.OnClose(proxy.Event{Miner: miner})
|
||||
al.Close()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("expected log file to exist: %v", err)
|
||||
}
|
||||
line := strings.TrimSpace(string(data))
|
||||
if !strings.Contains(line, "CLOSE") {
|
||||
t.Fatalf("expected CLOSE in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "rx=") {
|
||||
t.Fatalf("expected rx= in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "tx=") {
|
||||
t.Fatalf("expected tx= in log line, got %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessLog_OnClose_Bad verifies a nil miner close event produces no output.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnClose(proxy.Event{Miner: nil}) // no-op
|
||||
func TestAccessLog_OnClose_Bad(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
defer al.Close()
|
||||
|
||||
al.OnClose(proxy.Event{Miner: nil})
|
||||
al.Close()
|
||||
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
data, _ := os.ReadFile(path)
|
||||
if len(data) > 0 {
|
||||
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessLog_OnClose_Ugly verifies close on an empty-path log is a no-op.
|
||||
//
|
||||
// al := log.NewAccessLog("")
|
||||
// al.OnClose(proxy.Event{Miner: miner}) // no-op, empty path
|
||||
func TestAccessLog_OnClose_Ugly(t *testing.T) {
|
||||
al := NewAccessLog("")
|
||||
defer al.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
al.OnClose(proxy.Event{Miner: miner})
|
||||
}
|
||||
|
||||
// TestShareLog_OnAccept_Good verifies an ACCEPT line is written with diff and latency.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000, Latency: 82})
|
||||
func TestShareLog_OnAccept_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
defer sl.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000, Latency: 82})
|
||||
sl.Close()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("expected log file to exist: %v", err)
|
||||
}
|
||||
line := strings.TrimSpace(string(data))
|
||||
if !strings.Contains(line, "ACCEPT") {
|
||||
t.Fatalf("expected ACCEPT in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "diff=100000") {
|
||||
t.Fatalf("expected diff=100000 in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "latency=82ms") {
|
||||
t.Fatalf("expected latency=82ms in log line, got %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
// TestShareLog_OnAccept_Bad verifies a nil miner accept event produces no output.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnAccept(proxy.Event{Miner: nil}) // no-op
|
||||
func TestShareLog_OnAccept_Bad(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
defer sl.Close()
|
||||
|
||||
sl.OnAccept(proxy.Event{Miner: nil, Diff: 100000})
|
||||
sl.Close()
|
||||
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
data, _ := os.ReadFile(path)
|
||||
if len(data) > 0 {
|
||||
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestShareLog_OnAccept_Ugly verifies a nil ShareLog does not panic.
|
||||
//
|
||||
// var sl *log.ShareLog
|
||||
// sl.OnAccept(proxy.Event{Miner: miner}) // no-op, no panic
|
||||
func TestShareLog_OnAccept_Ugly(t *testing.T) {
|
||||
var sl *ShareLog
|
||||
miner := newTestMiner(t)
|
||||
sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000})
|
||||
}
|
||||
|
||||
// TestShareLog_OnReject_Good verifies a REJECT line is written with the rejection reason.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
|
||||
func TestShareLog_OnReject_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
defer sl.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
|
||||
sl.Close()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("expected log file to exist: %v", err)
|
||||
}
|
||||
line := strings.TrimSpace(string(data))
|
||||
if !strings.Contains(line, "REJECT") {
|
||||
t.Fatalf("expected REJECT in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "Low difficulty share") {
|
||||
t.Fatalf("expected rejection reason in log line, got %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
// TestShareLog_OnReject_Bad verifies a nil miner reject event produces no output.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnReject(proxy.Event{Miner: nil}) // no-op
|
||||
func TestShareLog_OnReject_Bad(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
defer sl.Close()
|
||||
|
||||
sl.OnReject(proxy.Event{Miner: nil, Error: "Low difficulty share"})
|
||||
sl.Close()
|
||||
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
data, _ := os.ReadFile(path)
|
||||
if len(data) > 0 {
|
||||
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestShareLog_OnReject_Ugly verifies an empty-path ShareLog silently discards the reject line.
|
||||
//
|
||||
// sl := log.NewShareLog("")
|
||||
// sl.OnReject(proxy.Event{Miner: miner, Error: "reason"}) // no-op, empty path
|
||||
func TestShareLog_OnReject_Ugly(t *testing.T) {
|
||||
sl := NewShareLog("")
|
||||
defer sl.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
sl.OnReject(proxy.Event{Miner: miner, Error: "reason"})
|
||||
}
|
||||
|
||||
// TestAccessLog_Close_Good verifies Close releases the file handle and is safe to call twice.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnLogin(proxy.Event{Miner: miner})
|
||||
// al.Close()
|
||||
// al.Close() // double close is safe
|
||||
func TestAccessLog_Close_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
|
||||
miner := newTestMiner(t)
|
||||
al.OnLogin(proxy.Event{Miner: miner})
|
||||
al.Close()
|
||||
al.Close()
|
||||
}
|
||||
|
||||
// TestAccessLog_Close_Bad verifies Close on a nil AccessLog does not panic.
|
||||
//
|
||||
// var al *log.AccessLog
|
||||
// al.Close() // no-op, no panic
|
||||
func TestAccessLog_Close_Bad(t *testing.T) {
|
||||
var al *AccessLog
|
||||
al.Close()
|
||||
}
|
||||
|
||||
// TestAccessLog_Close_Ugly verifies Close on a never-opened log does not panic.
|
||||
//
|
||||
// al := log.NewAccessLog("/nonexistent/dir/access.log")
|
||||
// al.Close() // no file was ever opened
|
||||
func TestAccessLog_Close_Ugly(t *testing.T) {
|
||||
al := NewAccessLog("/nonexistent/dir/access.log")
|
||||
al.Close()
|
||||
}
|
||||
|
||||
// TestShareLog_Close_Good verifies Close releases the file handle and is safe to call twice.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 1000})
|
||||
// sl.Close()
|
||||
// sl.Close() // double close is safe
|
||||
func TestShareLog_Close_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
|
||||
miner := newTestMiner(t)
|
||||
sl.OnAccept(proxy.Event{Miner: miner, Diff: 1000})
|
||||
sl.Close()
|
||||
sl.Close()
|
||||
}
|
||||
|
||||
// TestShareLog_Close_Bad verifies Close on a nil ShareLog does not panic.
|
||||
//
|
||||
// var sl *log.ShareLog
|
||||
// sl.Close() // no-op, no panic
|
||||
func TestShareLog_Close_Bad(t *testing.T) {
|
||||
var sl *ShareLog
|
||||
sl.Close()
|
||||
}
|
||||
|
||||
// TestShareLog_Close_Ugly verifies Close on a never-opened log does not panic.
|
||||
//
|
||||
// sl := log.NewShareLog("/nonexistent/dir/shares.log")
|
||||
// sl.Close() // no file was ever opened
|
||||
func TestShareLog_Close_Ugly(t *testing.T) {
|
||||
sl := NewShareLog("/nonexistent/dir/shares.log")
|
||||
sl.Close()
|
||||
}
|
||||
|
||||
// newTestMiner creates a minimal miner for log testing using a net.Pipe connection.
|
||||
func newTestMiner(t *testing.T) *proxy.Miner {
|
||||
t.Helper()
|
||||
client, server := net.Pipe()
|
||||
t.Cleanup(func() {
|
||||
_ = client.Close()
|
||||
_ = server.Close()
|
||||
})
|
||||
miner := proxy.NewMiner(client, 3333, nil)
|
||||
miner.SetID(1)
|
||||
return miner
|
||||
}
|
||||
|
||||
// pipeAddr satisfies the net.Addr interface for pipe-based test connections.
|
||||
type pipeAddr struct{}
|
||||
|
||||
func (a pipeAddr) Network() string { return "pipe" }
|
||||
func (a pipeAddr) String() string { return "pipe" }
|
||||
|
||||
// pipeConn wraps an os.Pipe as a net.Conn for tests that need a closeable socket.
|
||||
type pipeConn struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func (p *pipeConn) RemoteAddr() net.Addr { return pipeAddr{} }
|
||||
func (p *pipeConn) LocalAddr() net.Addr { return pipeAddr{} }
|
||||
func (p *pipeConn) SetDeadline(_ time.Time) error { return nil }
|
||||
func (p *pipeConn) SetReadDeadline(_ time.Time) error { return nil }
|
||||
func (p *pipeConn) SetWriteDeadline(_ time.Time) error { return nil }
|
||||
|
|
@ -22,7 +22,11 @@ import (
|
|||
// strategy := factory(listener)
|
||||
func NewStrategyFactory(config *proxy.Config) StrategyFactory {
|
||||
return func(listener StratumListener) Strategy {
|
||||
return NewFailoverStrategy(config.Pools, listener, config)
|
||||
var pools []proxy.PoolConfig
|
||||
if config != nil {
|
||||
pools = config.Pools
|
||||
}
|
||||
return NewFailoverStrategy(pools, listener, config)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,10 @@ import (
|
|||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
// TestFailoverStrategy_CurrentPools_Good verifies that currentPools follows the live config.
|
||||
//
|
||||
// strategy := pool.NewFailoverStrategy(cfg.Pools, nil, cfg)
|
||||
// strategy.currentPools() // returns cfg.Pools
|
||||
func TestFailoverStrategy_CurrentPools_Good(t *testing.T) {
|
||||
cfg := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
|
|
@ -25,3 +29,140 @@ func TestFailoverStrategy_CurrentPools_Good(t *testing.T) {
|
|||
t.Fatalf("expected current pools to follow config reload, got %+v", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_CurrentPools_Bad verifies that a nil strategy returns an empty pool list.
|
||||
//
|
||||
// var strategy *pool.FailoverStrategy
|
||||
// strategy.currentPools() // nil
|
||||
func TestFailoverStrategy_CurrentPools_Bad(t *testing.T) {
|
||||
var strategy *FailoverStrategy
|
||||
pools := strategy.currentPools()
|
||||
if pools != nil {
|
||||
t.Fatalf("expected nil pools from nil strategy, got %+v", pools)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_CurrentPools_Ugly verifies that a strategy with a nil config
|
||||
// falls back to the pools passed at construction time.
|
||||
//
|
||||
// strategy := pool.NewFailoverStrategy(initialPools, nil, nil)
|
||||
// strategy.currentPools() // returns initialPools
|
||||
func TestFailoverStrategy_CurrentPools_Ugly(t *testing.T) {
|
||||
initialPools := []proxy.PoolConfig{
|
||||
{URL: "fallback.example:3333", Enabled: true},
|
||||
{URL: "fallback.example:4444", Enabled: false},
|
||||
}
|
||||
strategy := NewFailoverStrategy(initialPools, nil, nil)
|
||||
|
||||
got := strategy.currentPools()
|
||||
if len(got) != 2 {
|
||||
t.Fatalf("expected 2 pools from constructor fallback, got %d", len(got))
|
||||
}
|
||||
if got[0].URL != "fallback.example:3333" {
|
||||
t.Fatalf("expected constructor pool URL, got %q", got[0].URL)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_EnabledPools_Good verifies that only enabled pools are selected.
|
||||
//
|
||||
// enabled := pool.enabledPools(pools) // filters to enabled-only
|
||||
func TestFailoverStrategy_EnabledPools_Good(t *testing.T) {
|
||||
pools := []proxy.PoolConfig{
|
||||
{URL: "active.example:3333", Enabled: true},
|
||||
{URL: "disabled.example:3333", Enabled: false},
|
||||
{URL: "active2.example:3333", Enabled: true},
|
||||
}
|
||||
got := enabledPools(pools)
|
||||
if len(got) != 2 {
|
||||
t.Fatalf("expected 2 enabled pools, got %d", len(got))
|
||||
}
|
||||
if got[0].URL != "active.example:3333" || got[1].URL != "active2.example:3333" {
|
||||
t.Fatalf("expected only enabled pool URLs, got %+v", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_EnabledPools_Bad verifies that an empty pool list returns empty.
|
||||
//
|
||||
// pool.enabledPools(nil) // empty
|
||||
func TestFailoverStrategy_EnabledPools_Bad(t *testing.T) {
|
||||
got := enabledPools(nil)
|
||||
if len(got) != 0 {
|
||||
t.Fatalf("expected 0 pools from nil input, got %d", len(got))
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_EnabledPools_Ugly verifies that all-disabled pools return empty.
|
||||
//
|
||||
// pool.enabledPools([]proxy.PoolConfig{{Enabled: false}}) // empty
|
||||
func TestFailoverStrategy_EnabledPools_Ugly(t *testing.T) {
|
||||
pools := []proxy.PoolConfig{
|
||||
{URL: "a.example:3333", Enabled: false},
|
||||
{URL: "b.example:3333", Enabled: false},
|
||||
}
|
||||
got := enabledPools(pools)
|
||||
if len(got) != 0 {
|
||||
t.Fatalf("expected 0 enabled pools when all disabled, got %d", len(got))
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewStrategyFactory_Good verifies the factory creates a strategy connected to the config.
|
||||
//
|
||||
// factory := pool.NewStrategyFactory(cfg)
|
||||
// strategy := factory(listener) // creates FailoverStrategy
|
||||
func TestNewStrategyFactory_Good(t *testing.T) {
|
||||
cfg := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
factory := NewStrategyFactory(cfg)
|
||||
if factory == nil {
|
||||
t.Fatal("expected a non-nil factory")
|
||||
}
|
||||
strategy := factory(nil)
|
||||
if strategy == nil {
|
||||
t.Fatal("expected a non-nil strategy from factory")
|
||||
}
|
||||
if strategy.IsActive() {
|
||||
t.Fatal("expected new strategy to be inactive before connecting")
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewStrategyFactory_Bad verifies a factory created with nil config does not panic.
|
||||
//
|
||||
// factory := pool.NewStrategyFactory(nil)
|
||||
// strategy := factory(nil)
|
||||
func TestNewStrategyFactory_Bad(t *testing.T) {
|
||||
factory := NewStrategyFactory(nil)
|
||||
strategy := factory(nil)
|
||||
if strategy == nil {
|
||||
t.Fatal("expected a non-nil strategy even from nil config")
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewStrategyFactory_Ugly verifies the factory forwards the correct pool list to the strategy.
|
||||
//
|
||||
// cfg.Pools = append(cfg.Pools, proxy.PoolConfig{URL: "added.example:3333", Enabled: true})
|
||||
// strategy := factory(nil)
|
||||
// // strategy sees the updated pools via the shared config pointer
|
||||
func TestNewStrategyFactory_Ugly(t *testing.T) {
|
||||
cfg := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
factory := NewStrategyFactory(cfg)
|
||||
cfg.Pools = append(cfg.Pools, proxy.PoolConfig{URL: "added.example:3333", Enabled: true})
|
||||
|
||||
strategy := factory(nil)
|
||||
fs, ok := strategy.(*FailoverStrategy)
|
||||
if !ok {
|
||||
t.Fatal("expected FailoverStrategy")
|
||||
}
|
||||
pools := fs.currentPools()
|
||||
if len(pools) != 2 {
|
||||
t.Fatalf("expected 2 pools after config update, got %d", len(pools))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
18
stats.go
18
stats.go
|
|
@ -6,11 +6,14 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// stats := proxy.NewStats()
|
||||
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
|
||||
// bus.Subscribe(proxy.EventReject, stats.OnReject)
|
||||
// stats.Tick()
|
||||
// _ = stats.Summary()
|
||||
// Stats tracks global proxy metrics. Hot-path counters are atomic. Hashrate windows
|
||||
// use a ring buffer per window size, advanced by Tick().
|
||||
//
|
||||
// stats := proxy.NewStats()
|
||||
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
|
||||
// bus.Subscribe(proxy.EventReject, stats.OnReject)
|
||||
// stats.Tick()
|
||||
// summary := stats.Summary()
|
||||
type Stats struct {
|
||||
accepted atomic.Uint64
|
||||
rejected atomic.Uint64
|
||||
|
|
@ -45,7 +48,10 @@ type tickWindow struct {
|
|||
size int // window size in seconds = len(buckets)
|
||||
}
|
||||
|
||||
// summary := proxy.NewStats().Summary()
|
||||
// StatsSummary is the serialisable snapshot returned by Summary().
|
||||
//
|
||||
// summary := proxy.NewStats().Summary()
|
||||
// _ = summary.Hashrate[0] // 60-second window H/s
|
||||
type StatsSummary struct {
|
||||
Accepted uint64 `json:"accepted"`
|
||||
Rejected uint64 `json:"rejected"`
|
||||
|
|
|
|||
15
worker.go
15
worker.go
|
|
@ -5,9 +5,12 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// workers := proxy.NewWorkers(proxy.WorkersByRigID, bus)
|
||||
// workers.OnLogin(proxy.Event{Miner: &proxy.Miner{rigID: "rig-alpha", user: "WALLET", ip: "10.0.0.1"}})
|
||||
// _ = workers.List()
|
||||
// Workers maintains per-worker aggregate stats. Workers are identified by name,
|
||||
// derived from the miner's login fields per WorkersMode.
|
||||
//
|
||||
// workers := proxy.NewWorkers(proxy.WorkersByRigID, bus)
|
||||
// workers.OnLogin(proxy.Event{Miner: miner})
|
||||
// records := workers.List()
|
||||
type Workers struct {
|
||||
mode WorkersMode
|
||||
entries []WorkerRecord // ordered by first-seen (stable)
|
||||
|
|
@ -17,8 +20,10 @@ type Workers struct {
|
|||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// record := proxy.WorkerRecord{Name: "rig-alpha"}
|
||||
// _ = record.Hashrate(60)
|
||||
// WorkerRecord is the per-identity aggregate with rolling hashrate windows.
|
||||
//
|
||||
// record := proxy.WorkerRecord{Name: "rig-alpha", Accepted: 10, Hashes: 500000}
|
||||
// hr60 := record.Hashrate(60) // H/s over the last 60 seconds
|
||||
type WorkerRecord struct {
|
||||
Name string
|
||||
LastIP string
|
||||
|
|
|
|||
|
|
@ -48,6 +48,86 @@ func TestWorker_NewWorkers_Ugly(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestWorker_Hashrate_Good verifies that recording an accepted share produces a nonzero
|
||||
// hashrate reading from the 60-second window.
|
||||
//
|
||||
// record := proxy.WorkerRecord{}
|
||||
// record.Hashrate(60) // > 0.0 after an accepted share
|
||||
func TestWorker_Hashrate_Good(t *testing.T) {
|
||||
bus := NewEventBus()
|
||||
workers := NewWorkers(WorkersByUser, bus)
|
||||
|
||||
miner := &Miner{id: 100, user: "hashtest", ip: "10.0.0.10"}
|
||||
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 50000})
|
||||
|
||||
records := workers.List()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one worker record, got %d", len(records))
|
||||
}
|
||||
hr := records[0].Hashrate(60)
|
||||
if hr <= 0 {
|
||||
t.Fatalf("expected nonzero hashrate for 60-second window after accept, got %f", hr)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWorker_Hashrate_Bad verifies that an invalid window size returns 0.
|
||||
//
|
||||
// record := proxy.WorkerRecord{}
|
||||
// record.Hashrate(999) // 0.0 (unsupported window)
|
||||
func TestWorker_Hashrate_Bad(t *testing.T) {
|
||||
bus := NewEventBus()
|
||||
workers := NewWorkers(WorkersByUser, bus)
|
||||
|
||||
miner := &Miner{id: 101, user: "hashtest-bad", ip: "10.0.0.11"}
|
||||
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 50000})
|
||||
|
||||
records := workers.List()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one worker record, got %d", len(records))
|
||||
}
|
||||
hr := records[0].Hashrate(999)
|
||||
if hr != 0 {
|
||||
t.Fatalf("expected zero hashrate for unsupported window, got %f", hr)
|
||||
}
|
||||
hrZero := records[0].Hashrate(0)
|
||||
if hrZero != 0 {
|
||||
t.Fatalf("expected zero hashrate for zero window, got %f", hrZero)
|
||||
}
|
||||
hrNeg := records[0].Hashrate(-1)
|
||||
if hrNeg != 0 {
|
||||
t.Fatalf("expected zero hashrate for negative window, got %f", hrNeg)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWorker_Hashrate_Ugly verifies that calling Hashrate on a nil record returns 0
|
||||
// and that a worker with no accepts also returns 0.
|
||||
//
|
||||
// var record *proxy.WorkerRecord
|
||||
// record.Hashrate(60) // 0.0
|
||||
func TestWorker_Hashrate_Ugly(t *testing.T) {
|
||||
var nilRecord *WorkerRecord
|
||||
if hr := nilRecord.Hashrate(60); hr != 0 {
|
||||
t.Fatalf("expected zero hashrate for nil record, got %f", hr)
|
||||
}
|
||||
|
||||
bus := NewEventBus()
|
||||
workers := NewWorkers(WorkersByUser, bus)
|
||||
|
||||
miner := &Miner{id: 102, user: "hashtest-ugly", ip: "10.0.0.12"}
|
||||
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
|
||||
records := workers.List()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one worker record, got %d", len(records))
|
||||
}
|
||||
hr := records[0].Hashrate(60)
|
||||
if hr != 0 {
|
||||
t.Fatalf("expected zero hashrate for worker with no accepts, got %f", hr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorker_CustomDiffOrdering_Good(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue