feat(proxy): implement RFC test coverage and AX usage-example comments

Add missing Good/Bad/Ugly test triplets per RFC section 22:
- stats_test.go: OnAccept/OnReject/Tick/OnLogin/OnClose tests with
  concurrency race test and top-10 diff slot verification
- ratelimit_test.go: full Good/Bad/Ugly set including ban persistence
  and disabled-limiter edge case
- customdiff_test.go: renamed to Apply_Good/Bad/Ugly convention per RFC
- storage_test.go: full Add_Good/Bad/Ugly set including 256-slot fill,
  overflow rejection, and dead-slot reclamation via SetJob
- job_test.go: added Good/Bad/Ugly for BlobWithFixedByte, DifficultyFromTarget,
  and IsValid

Add Miner.Diff() public getter for the last difficulty sent to miner.

Add AX-compliant usage-example comments (principle 2) to all Miner
accessors, Proxy query methods, EffectiveShareDifficulty, targetFromDifficulty,
MinerSnapshot, and RateLimiter.IsActive.

Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
Snider 2026-04-05 07:02:54 +01:00
parent 6f0747abc2
commit 31a151d23c
7 changed files with 634 additions and 32 deletions

View file

@ -251,6 +251,9 @@ func (j Job) DifficultyFromTarget() uint64 {
return uint64(math.MaxUint32) / uint64(target) return uint64(math.MaxUint32) / uint64(target)
} }
// targetFromDifficulty converts a difficulty into the 8-char little-endian hex target.
//
// target := targetFromDifficulty(10000) // "b88d0600"
func targetFromDifficulty(diff uint64) string { func targetFromDifficulty(diff uint64) string {
if diff <= 1 { if diff <= 1 {
return "ffffffff" return "ffffffff"
@ -268,6 +271,10 @@ func targetFromDifficulty(diff uint64) string {
return hex.EncodeToString(raw[:]) return hex.EncodeToString(raw[:])
} }
// EffectiveShareDifficulty returns the share difficulty capped by the miner's custom diff.
// If no custom diff is set or the pool diff is already lower, the pool diff is returned.
//
// diff := proxy.EffectiveShareDifficulty(job, miner) // 25000 when customDiff < poolDiff
func EffectiveShareDifficulty(job Job, miner *Miner) uint64 { func EffectiveShareDifficulty(job Job, miner *Miner) uint64 {
diff := job.DifficultyFromTarget() diff := job.DifficultyFromTarget()
if miner == nil || miner.customDiff == 0 || diff == 0 || diff <= miner.customDiff { if miner == nil || miner.customDiff == 0 || diff == 0 || diff <= miner.customDiff {

View file

@ -2,7 +2,12 @@ package proxy
import "testing" import "testing"
func TestCustomDiff_OnLogin(t *testing.T) { // TestCustomDiff_Apply_Good verifies a user suffix "+50000" sets customDiff and strips the suffix.
//
// cd := proxy.NewCustomDiff(10000)
// cd.Apply(&proxy.Miner{user: "WALLET+50000"})
// // miner.User() == "WALLET", miner.customDiff == 50000
func TestCustomDiff_Apply_Good(t *testing.T) {
cd := NewCustomDiff(10000) cd := NewCustomDiff(10000)
miner := &Miner{user: "WALLET+50000"} miner := &Miner{user: "WALLET+50000"}
cd.OnLogin(Event{Miner: miner}) cd.OnLogin(Event{Miner: miner})
@ -12,37 +17,62 @@ func TestCustomDiff_OnLogin(t *testing.T) {
if miner.customDiff != 50000 { if miner.customDiff != 50000 {
t.Fatalf("expected custom diff 50000, got %d", miner.customDiff) t.Fatalf("expected custom diff 50000, got %d", miner.customDiff)
} }
}
miner = &Miner{user: "WALLET+abc"} // TestCustomDiff_Apply_Bad verifies "+abc" (non-numeric) leaves user unchanged, customDiff=0.
//
// cd := proxy.NewCustomDiff(10000)
// cd.Apply(&proxy.Miner{user: "WALLET+abc"})
// // miner.User() == "WALLET+abc", miner.customDiff == 0
func TestCustomDiff_Apply_Bad(t *testing.T) {
cd := NewCustomDiff(10000)
miner := &Miner{user: "WALLET+abc"}
cd.OnLogin(Event{Miner: miner}) cd.OnLogin(Event{Miner: miner})
if miner.User() != "WALLET+abc" { if miner.User() != "WALLET+abc" {
t.Fatalf("expected invalid suffix to remain unchanged") t.Fatalf("expected invalid suffix to remain unchanged, got %q", miner.User())
} }
if miner.customDiff != 0 { if miner.customDiff != 0 {
t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff) t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff)
} }
}
miner = &Miner{user: "WALLET"} // TestCustomDiff_Apply_Ugly verifies globalDiff=10000 is used when no suffix is present.
//
// cd := proxy.NewCustomDiff(10000)
// cd.Apply(&proxy.Miner{user: "WALLET"})
// // miner.customDiff == 10000 (falls back to global)
func TestCustomDiff_Apply_Ugly(t *testing.T) {
cd := NewCustomDiff(10000)
miner := &Miner{user: "WALLET"}
cd.OnLogin(Event{Miner: miner}) cd.OnLogin(Event{Miner: miner})
if miner.customDiff != 10000 { if miner.customDiff != 10000 {
t.Fatalf("expected global diff fallback, got %d", miner.customDiff) t.Fatalf("expected global diff fallback 10000, got %d", miner.customDiff)
} }
} }
func TestCustomDiff_OnLogin_Ugly(t *testing.T) { // TestCustomDiff_OnLogin_NonNumericSuffix verifies a non-decimal suffix after plus is ignored.
//
// cd := proxy.NewCustomDiff(10000)
// cd.OnLogin(proxy.Event{Miner: &proxy.Miner{user: "WALLET+50000extra"}})
func TestCustomDiff_OnLogin_NonNumericSuffix(t *testing.T) {
cd := NewCustomDiff(10000) cd := NewCustomDiff(10000)
miner := &Miner{user: "WALLET+50000extra"} miner := &Miner{user: "WALLET+50000extra"}
cd.OnLogin(Event{Miner: miner}) cd.OnLogin(Event{Miner: miner})
if miner.User() != "WALLET+50000extra" { if miner.User() != "WALLET+50000extra" {
t.Fatalf("expected non-suffix plus segment to remain unchanged, got %q", miner.User()) t.Fatalf("expected non-numeric suffix plus segment to remain unchanged, got %q", miner.User())
} }
if miner.customDiff != 0 { if miner.customDiff != 0 {
t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff) t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff)
} }
} }
// TestEffectiveShareDifficulty_CustomDiffCapsPoolDifficulty verifies the cap applied by custom diff.
//
// job := proxy.Job{Target: "01000000"}
// miner := &proxy.Miner{customDiff: 25000}
// proxy.EffectiveShareDifficulty(job, miner) // 25000 (capped)
func TestEffectiveShareDifficulty_CustomDiffCapsPoolDifficulty(t *testing.T) { func TestEffectiveShareDifficulty_CustomDiffCapsPoolDifficulty(t *testing.T) {
job := Job{Target: "01000000"} job := Job{Target: "01000000"}
miner := &Miner{customDiff: 25000} miner := &Miner{customDiff: 25000}

View file

@ -5,7 +5,11 @@ import (
"testing" "testing"
) )
func TestJob_BlobWithFixedByte(t *testing.T) { // TestJob_BlobWithFixedByte_Good verifies nonce patching on a full 160-char blob.
//
// job := proxy.Job{Blob: strings.Repeat("0", 160)}
// result := job.BlobWithFixedByte(0x2A) // chars 78-79 become "2a"
func TestJob_BlobWithFixedByte_Good(t *testing.T) {
job := Job{Blob: strings.Repeat("0", 160)} job := Job{Blob: strings.Repeat("0", 160)}
got := job.BlobWithFixedByte(0x2A) got := job.BlobWithFixedByte(0x2A)
if len(got) != 160 { if len(got) != 160 {
@ -16,16 +20,97 @@ func TestJob_BlobWithFixedByte(t *testing.T) {
} }
} }
func TestJob_DifficultyFromTarget(t *testing.T) { // TestJob_BlobWithFixedByte_Bad verifies a short blob is returned unchanged.
//
// job := proxy.Job{Blob: "0000"}
// result := job.BlobWithFixedByte(0x2A) // too short, returned as-is
func TestJob_BlobWithFixedByte_Bad(t *testing.T) {
shortBlob := "0000"
job := Job{Blob: shortBlob}
got := job.BlobWithFixedByte(0x2A)
if got != shortBlob {
t.Fatalf("expected short blob to be returned unchanged, got %q", got)
}
}
// TestJob_BlobWithFixedByte_Ugly verifies fixedByte 0xFF renders as lowercase "ff".
//
// job := proxy.Job{Blob: strings.Repeat("0", 160)}
// result := job.BlobWithFixedByte(0xFF) // chars 78-79 become "ff" (not "FF")
func TestJob_BlobWithFixedByte_Ugly(t *testing.T) {
job := Job{Blob: strings.Repeat("0", 160)}
got := job.BlobWithFixedByte(0xFF)
if got[78:80] != "ff" {
t.Fatalf("expected lowercase 'ff', got %q", got[78:80])
}
if len(got) != 160 {
t.Fatalf("expected blob length preserved, got %d", len(got))
}
}
// TestJob_DifficultyFromTarget_Good verifies a known target converts to the expected difficulty.
//
// job := proxy.Job{Target: "b88d0600"}
// diff := job.DifficultyFromTarget() // 10000
func TestJob_DifficultyFromTarget_Good(t *testing.T) {
job := Job{Target: "b88d0600"} job := Job{Target: "b88d0600"}
if got := job.DifficultyFromTarget(); got != 10000 { if got := job.DifficultyFromTarget(); got != 10000 {
t.Fatalf("expected difficulty 10000, got %d", got) t.Fatalf("expected difficulty 10000, got %d", got)
} }
} }
func TestJob_DifficultyFromTarget_MaxTarget(t *testing.T) { // TestJob_DifficultyFromTarget_Bad verifies a zero target produces difficulty 0 without panic.
//
// job := proxy.Job{Target: "00000000"}
// diff := job.DifficultyFromTarget() // 0 (no divide-by-zero)
func TestJob_DifficultyFromTarget_Bad(t *testing.T) {
job := Job{Target: "00000000"}
if got := job.DifficultyFromTarget(); got != 0 {
t.Fatalf("expected difficulty 0 for zero target, got %d", got)
}
}
// TestJob_DifficultyFromTarget_Ugly verifies the maximum target "ffffffff" yields difficulty 1.
//
// job := proxy.Job{Target: "ffffffff"}
// diff := job.DifficultyFromTarget() // 1
func TestJob_DifficultyFromTarget_Ugly(t *testing.T) {
job := Job{Target: "ffffffff"} job := Job{Target: "ffffffff"}
if got := job.DifficultyFromTarget(); got != 1 { if got := job.DifficultyFromTarget(); got != 1 {
t.Fatalf("expected minimum difficulty 1, got %d", got) t.Fatalf("expected minimum difficulty 1, got %d", got)
} }
} }
// TestJob_IsValid_Good verifies a job with blob and job ID is valid.
//
// job := proxy.Job{Blob: "abc", JobID: "job-1"}
// job.IsValid() // true
func TestJob_IsValid_Good(t *testing.T) {
job := Job{Blob: "abc", JobID: "job-1"}
if !job.IsValid() {
t.Fatalf("expected job with blob and job id to be valid")
}
}
// TestJob_IsValid_Bad verifies a job with empty blob or job ID is invalid.
//
// job := proxy.Job{Blob: "", JobID: "job-1"}
// job.IsValid() // false
func TestJob_IsValid_Bad(t *testing.T) {
if (Job{Blob: "", JobID: "job-1"}).IsValid() {
t.Fatalf("expected empty blob to be invalid")
}
if (Job{Blob: "abc", JobID: ""}).IsValid() {
t.Fatalf("expected empty job id to be invalid")
}
}
// TestJob_IsValid_Ugly verifies a zero-value job is invalid.
//
// job := proxy.Job{}
// job.IsValid() // false
func TestJob_IsValid_Ugly(t *testing.T) {
if (Job{}).IsValid() {
t.Fatalf("expected zero-value job to be invalid")
}
}

View file

@ -5,16 +5,87 @@ import (
"time" "time"
) )
func TestRateLimiter_Allow(t *testing.T) { // TestRateLimiter_Allow_Good verifies the first N calls within budget are allowed.
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1}) //
if !rl.Allow("1.2.3.4:1234") { // limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 10})
t.Fatalf("expected first call to pass") // limiter.Allow("1.2.3.4:3333") // true (first 10 calls)
func TestRateLimiter_Allow_Good(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60})
for i := 0; i < 10; i++ {
if !rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected call %d to be allowed", i+1)
} }
if rl.Allow("1.2.3.4:1234") {
t.Fatalf("expected second call to fail")
} }
} }
// TestRateLimiter_Allow_Bad verifies the 11th call fails when budget is 10/min.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 10})
// // calls 1-10 pass, call 11 fails
func TestRateLimiter_Allow_Bad(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60})
for i := 0; i < 10; i++ {
rl.Allow("1.2.3.4:3333")
}
if rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected 11th call to be rejected")
}
}
// TestRateLimiter_Allow_Ugly verifies a banned IP stays banned for BanDurationSeconds.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 300})
// limiter.Allow("1.2.3.4:3333") // true (exhausts budget)
// limiter.Allow("1.2.3.4:3333") // false (banned for 300 seconds)
func TestRateLimiter_Allow_Ugly(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 300})
if !rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected first call to pass")
}
if rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected second call to fail")
}
// Verify the IP is still banned even with a fresh bucket
rl.mu.Lock()
rl.bucketByHost["1.2.3.4"] = &tokenBucket{tokens: 100, lastRefill: time.Now()}
rl.mu.Unlock()
if rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected banned IP to remain banned regardless of fresh bucket")
}
}
// TestRateLimiter_Tick_Good verifies Tick removes expired bans.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1})
// limiter.Tick()
func TestRateLimiter_Tick_Good(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1})
rl.Allow("1.2.3.4:3333")
rl.Allow("1.2.3.4:3333") // triggers ban
// Simulate expired ban
rl.mu.Lock()
rl.banUntilByHost["1.2.3.4"] = time.Now().Add(-time.Second)
rl.mu.Unlock()
rl.Tick()
rl.mu.Lock()
_, banned := rl.banUntilByHost["1.2.3.4"]
rl.mu.Unlock()
if banned {
t.Fatalf("expected expired ban to be removed by Tick")
}
}
// TestRateLimiter_Allow_ReplenishesHighLimits verifies token replenishment at high rates.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 120})
func TestRateLimiter_Allow_ReplenishesHighLimits(t *testing.T) { func TestRateLimiter_Allow_ReplenishesHighLimits(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 120, BanDurationSeconds: 1}) rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 120, BanDurationSeconds: 1})
rl.mu.Lock() rl.mu.Lock()
@ -28,3 +99,17 @@ func TestRateLimiter_Allow_ReplenishesHighLimits(t *testing.T) {
t.Fatalf("expected bucket to replenish at 120/min") t.Fatalf("expected bucket to replenish at 120/min")
} }
} }
// TestRateLimiter_Disabled_Good verifies a zero-budget limiter allows all connections.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 0})
// limiter.Allow("any-ip") // always true
func TestRateLimiter_Disabled_Good(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 0})
for i := 0; i < 100; i++ {
if !rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected disabled limiter to allow all connections")
}
}
}

View file

@ -6,29 +6,135 @@ import (
"dappco.re/go/proxy" "dappco.re/go/proxy"
) )
func TestNonceStorage_AddAndRemove(t *testing.T) { // TestStorage_Add_Good verifies 256 sequential Add calls fill all slots with unique FixedByte values.
//
// storage := nicehash.NewNonceStorage()
// for i := 0; i < 256; i++ {
// m := &proxy.Miner{}
// m.SetID(int64(i + 1))
// ok := storage.Add(m) // true for all 256
// }
func TestStorage_Add_Good(t *testing.T) {
storage := NewNonceStorage()
seen := make(map[uint8]bool)
for i := 0; i < 256; i++ {
m := &proxy.Miner{}
m.SetID(int64(i + 1))
ok := storage.Add(m)
if !ok {
t.Fatalf("expected add %d to succeed", i)
}
if seen[m.FixedByte()] {
t.Fatalf("duplicate fixed byte %d at add %d", m.FixedByte(), i)
}
seen[m.FixedByte()] = true
}
}
// TestStorage_Add_Bad verifies the 257th Add returns false when all 256 slots are occupied.
//
// storage := nicehash.NewNonceStorage()
// // fill 256 slots...
// ok := storage.Add(overflowMiner) // false — table is full
func TestStorage_Add_Bad(t *testing.T) {
storage := NewNonceStorage()
for i := 0; i < 256; i++ {
m := &proxy.Miner{}
m.SetID(int64(i + 1))
storage.Add(m)
}
overflow := &proxy.Miner{}
overflow.SetID(257)
if storage.Add(overflow) {
t.Fatalf("expected 257th add to fail when table is full")
}
}
// TestStorage_Add_Ugly verifies that a removed slot (dead) is reclaimed after SetJob clears it.
//
// storage := nicehash.NewNonceStorage()
// storage.Add(miner)
// storage.Remove(miner) // slot becomes dead (-minerID)
// storage.SetJob(job) // dead slots cleared to 0
// storage.Add(newMiner) // reclaimed slot succeeds
func TestStorage_Add_Ugly(t *testing.T) {
storage := NewNonceStorage() storage := NewNonceStorage()
miner := &proxy.Miner{} miner := &proxy.Miner{}
miner.SetID(1) miner.SetID(1)
if !storage.Add(miner) { if !storage.Add(miner) {
t.Fatalf("expected add to succeed") t.Fatalf("expected first add to succeed")
}
if miner.FixedByte() != 0 {
t.Fatalf("expected first slot to be 0, got %d", miner.FixedByte())
} }
storage.Remove(miner) storage.Remove(miner)
free, dead, active := storage.SlotCount() free, dead, active := storage.SlotCount()
if free != 255 || dead != 1 || active != 0 { if dead != 1 || active != 0 {
t.Fatalf("unexpected slot counts: free=%d dead=%d active=%d", free, dead, active) t.Fatalf("expected 1 dead slot, got free=%d dead=%d active=%d", free, dead, active)
}
// SetJob clears dead slots
storage.SetJob(proxy.Job{Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", JobID: "job-1"})
free, dead, active = storage.SlotCount()
if dead != 0 {
t.Fatalf("expected dead slots cleared after SetJob, got %d", dead)
}
// Reclaim the slot
newMiner := &proxy.Miner{}
newMiner.SetID(2)
if !storage.Add(newMiner) {
t.Fatalf("expected reclaimed slot add to succeed")
} }
} }
func TestNonceStorage_IsValidJobID_Ugly(t *testing.T) { // TestStorage_IsValidJobID_Good verifies the current job ID is accepted.
//
// storage := nicehash.NewNonceStorage()
// storage.SetJob(proxy.Job{JobID: "job-2", Blob: "..."})
// storage.IsValidJobID("job-2") // true
func TestStorage_IsValidJobID_Good(t *testing.T) {
storage := NewNonceStorage() storage := NewNonceStorage()
storage.job = proxy.Job{JobID: "job-2"} storage.SetJob(proxy.Job{
storage.prevJob = proxy.Job{JobID: "job-1"} JobID: "job-1",
Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
})
if !storage.IsValidJobID("job-1") {
t.Fatalf("expected current job to be valid")
}
}
// TestStorage_IsValidJobID_Bad verifies an unknown job ID is rejected.
//
// storage := nicehash.NewNonceStorage()
// storage.IsValidJobID("nonexistent") // false
func TestStorage_IsValidJobID_Bad(t *testing.T) {
storage := NewNonceStorage()
storage.SetJob(proxy.Job{
JobID: "job-1",
Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
})
if storage.IsValidJobID("nonexistent") {
t.Fatalf("expected unknown job id to be invalid")
}
if storage.IsValidJobID("") {
t.Fatalf("expected empty job id to be invalid")
}
}
// TestStorage_IsValidJobID_Ugly verifies the previous job ID is accepted but counts as expired.
//
// storage := nicehash.NewNonceStorage()
// // job-1 is current, job-2 pushes job-1 to previous
// storage.IsValidJobID("job-1") // true (but expired counter increments)
func TestStorage_IsValidJobID_Ugly(t *testing.T) {
storage := NewNonceStorage()
blob160 := "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
storage.SetJob(proxy.Job{JobID: "job-1", Blob: blob160, ClientID: "session-1"})
storage.SetJob(proxy.Job{JobID: "job-2", Blob: blob160, ClientID: "session-1"})
if !storage.IsValidJobID("job-2") { if !storage.IsValidJobID("job-2") {
t.Fatalf("expected current job to be valid") t.Fatalf("expected current job to be valid")
@ -39,7 +145,16 @@ func TestNonceStorage_IsValidJobID_Ugly(t *testing.T) {
if storage.expired != 1 { if storage.expired != 1 {
t.Fatalf("expected one expired job validation, got %d", storage.expired) t.Fatalf("expected one expired job validation, got %d", storage.expired)
} }
if storage.IsValidJobID("") { }
t.Fatalf("expected empty job id to be invalid")
// TestStorage_SlotCount_Good verifies free/dead/active counts on a fresh storage.
//
// storage := nicehash.NewNonceStorage()
// free, dead, active := storage.SlotCount() // 256, 0, 0
func TestStorage_SlotCount_Good(t *testing.T) {
storage := NewNonceStorage()
free, dead, active := storage.SlotCount()
if free != 256 || dead != 0 || active != 0 {
t.Fatalf("expected 256/0/0, got free=%d dead=%d active=%d", free, dead, active)
} }
} }

View file

@ -24,6 +24,13 @@ const (
) )
// MinerSnapshot is a serialisable view of one miner connection. // MinerSnapshot is a serialisable view of one miner connection.
//
// snapshots := p.MinerSnapshots()
// for _, s := range snapshots {
// _ = s.ID // 1
// _ = s.IP // "10.0.0.1:49152"
// _ = s.Diff // 100000
// }
type MinerSnapshot struct { type MinerSnapshot struct {
ID int64 ID int64
IP string IP string
@ -102,6 +109,8 @@ func New(config *Config) (*Proxy, Result) {
} }
// Mode returns the runtime mode, for example "nicehash" or "simple". // Mode returns the runtime mode, for example "nicehash" or "simple".
//
// mode := p.Mode() // "nicehash"
func (p *Proxy) Mode() string { func (p *Proxy) Mode() string {
if p == nil || p.config == nil { if p == nil || p.config == nil {
return "" return ""
@ -112,6 +121,8 @@ func (p *Proxy) Mode() string {
} }
// WorkersMode returns the active worker identity strategy, for example proxy.WorkersByRigID. // WorkersMode returns the active worker identity strategy, for example proxy.WorkersByRigID.
//
// mode := p.WorkersMode() // proxy.WorkersByRigID
func (p *Proxy) WorkersMode() WorkersMode { func (p *Proxy) WorkersMode() WorkersMode {
if p == nil || p.config == nil { if p == nil || p.config == nil {
return WorkersDisabled return WorkersDisabled
@ -122,6 +133,9 @@ func (p *Proxy) WorkersMode() WorkersMode {
} }
// Summary returns a snapshot of the current global metrics. // Summary returns a snapshot of the current global metrics.
//
// summary := p.Summary()
// _ = summary.Accepted
func (p *Proxy) Summary() StatsSummary { func (p *Proxy) Summary() StatsSummary {
if p == nil || p.stats == nil { if p == nil || p.stats == nil {
return StatsSummary{} return StatsSummary{}
@ -134,6 +148,9 @@ func (p *Proxy) Summary() StatsSummary {
} }
// WorkerRecords returns a snapshot of the current worker aggregates. // WorkerRecords returns a snapshot of the current worker aggregates.
//
// records := p.WorkerRecords()
// for _, r := range records { _ = r.Name }
func (p *Proxy) WorkerRecords() []WorkerRecord { func (p *Proxy) WorkerRecords() []WorkerRecord {
if p == nil || p.workers == nil { if p == nil || p.workers == nil {
return nil return nil
@ -142,6 +159,9 @@ func (p *Proxy) WorkerRecords() []WorkerRecord {
} }
// MinerSnapshots returns a snapshot of the live miner connections. // MinerSnapshots returns a snapshot of the live miner connections.
//
// snapshots := p.MinerSnapshots()
// for _, s := range snapshots { _ = s.IP }
func (p *Proxy) MinerSnapshots() []MinerSnapshot { func (p *Proxy) MinerSnapshots() []MinerSnapshot {
if p == nil { if p == nil {
return nil return nil
@ -172,6 +192,8 @@ func (p *Proxy) MinerSnapshots() []MinerSnapshot {
} }
// MinerCount returns the current and peak miner counts. // MinerCount returns the current and peak miner counts.
//
// now, max := p.MinerCount() // 142, 200
func (p *Proxy) MinerCount() (now, max uint64) { func (p *Proxy) MinerCount() (now, max uint64) {
if p == nil || p.stats == nil { if p == nil || p.stats == nil {
return 0, 0 return 0, 0
@ -180,6 +202,9 @@ func (p *Proxy) MinerCount() (now, max uint64) {
} }
// Upstreams returns the current upstream connection counts. // Upstreams returns the current upstream connection counts.
//
// stats := p.Upstreams()
// _ = stats.Active // 1
func (p *Proxy) Upstreams() UpstreamStats { func (p *Proxy) Upstreams() UpstreamStats {
if p == nil || p.splitter == nil { if p == nil || p.splitter == nil {
return UpstreamStats{} return UpstreamStats{}
@ -188,6 +213,9 @@ func (p *Proxy) Upstreams() UpstreamStats {
} }
// Events returns the proxy event bus for subscription. // Events returns the proxy event bus for subscription.
//
// bus := p.Events()
// bus.Subscribe(proxy.EventAccept, handler)
func (p *Proxy) Events() *EventBus { func (p *Proxy) Events() *EventBus {
if p == nil { if p == nil {
return nil return nil
@ -862,71 +890,166 @@ func NewMiner(conn net.Conn, localPort uint16, tlsCfg *tls.Config) *Miner {
return miner return miner
} }
// SetID assigns the miner's internal ID. Used by NonceStorage tests.
//
// m.SetID(42)
func (m *Miner) SetID(id int64) { m.id = id } func (m *Miner) SetID(id int64) { m.id = id }
// ID returns the miner's monotonically increasing per-process identifier.
//
// id := m.ID() // 42
func (m *Miner) ID() int64 { return m.id } func (m *Miner) ID() int64 { return m.id }
// SetMapperID assigns which NonceMapper owns this miner in NiceHash mode.
//
// m.SetMapperID(0) // assigned to mapper 0
func (m *Miner) SetMapperID(id int64) { func (m *Miner) SetMapperID(id int64) {
m.mapperID = id m.mapperID = id
} }
// MapperID returns the owning NonceMapper's ID, or -1 if unassigned.
//
// if m.MapperID() < 0 { /* miner not assigned to any mapper */ }
func (m *Miner) MapperID() int64 { func (m *Miner) MapperID() int64 {
return m.mapperID return m.mapperID
} }
// SetRouteID assigns the SimpleMapper ID in simple mode.
//
// m.SetRouteID(3)
func (m *Miner) SetRouteID(id int64) { func (m *Miner) SetRouteID(id int64) {
m.routeID = id m.routeID = id
} }
// RouteID returns the SimpleMapper ID, or -1 if unassigned.
//
// if m.RouteID() < 0 { /* miner not routed */ }
func (m *Miner) RouteID() int64 { func (m *Miner) RouteID() int64 {
return m.routeID return m.routeID
} }
// SetExtendedNiceHash enables or disables NiceHash nonce-splitting mode for this miner.
//
// m.SetExtendedNiceHash(true)
func (m *Miner) SetExtendedNiceHash(enabled bool) { func (m *Miner) SetExtendedNiceHash(enabled bool) {
m.extNH = enabled m.extNH = enabled
} }
// ExtendedNiceHash reports whether this miner is in NiceHash nonce-splitting mode.
//
// if m.ExtendedNiceHash() { /* blob byte 39 is patched */ }
func (m *Miner) ExtendedNiceHash() bool { func (m *Miner) ExtendedNiceHash() bool {
return m.extNH return m.extNH
} }
// SetCurrentJob assigns the current pool work unit to this miner.
//
// m.SetCurrentJob(proxy.Job{Blob: "...", JobID: "job-1"})
func (m *Miner) SetCurrentJob(job Job) { func (m *Miner) SetCurrentJob(job Job) {
m.currentJob = job m.currentJob = job
} }
// CurrentJob returns the last job forwarded to this miner.
//
// job := m.CurrentJob()
// if job.IsValid() { /* miner has a valid job */ }
func (m *Miner) CurrentJob() Job { func (m *Miner) CurrentJob() Job {
return m.currentJob return m.currentJob
} }
// LoginAlgos returns the algorithm list sent by the miner during login, or nil if empty.
//
// algos := m.LoginAlgos() // ["cn/r", "rx/0"]
func (m *Miner) LoginAlgos() []string { func (m *Miner) LoginAlgos() []string {
if m == nil || len(m.loginAlgos) == 0 { if m == nil || len(m.loginAlgos) == 0 {
return nil return nil
} }
return append([]string(nil), m.loginAlgos...) return append([]string(nil), m.loginAlgos...)
} }
// FixedByte returns the NiceHash slot index (0-255) assigned to this miner.
//
// slot := m.FixedByte() // 0x2A
func (m *Miner) FixedByte() uint8 { func (m *Miner) FixedByte() uint8 {
return m.fixedByte return m.fixedByte
} }
// SetFixedByte assigns the NiceHash slot index for this miner.
//
// m.SetFixedByte(0x2A)
func (m *Miner) SetFixedByte(value uint8) { func (m *Miner) SetFixedByte(value uint8) {
m.fixedByte = value m.fixedByte = value
} }
// IP returns the remote IP address (without port) for logging.
//
// ip := m.IP() // "10.0.0.1"
func (m *Miner) IP() string { func (m *Miner) IP() string {
return m.ip return m.ip
} }
// RemoteAddr returns the full remote address including port.
//
// addr := m.RemoteAddr() // "10.0.0.1:49152"
func (m *Miner) RemoteAddr() string { func (m *Miner) RemoteAddr() string {
if m == nil { if m == nil {
return "" return ""
} }
return m.remoteAddr return m.remoteAddr
} }
// User returns the wallet address from login params, with any custom diff suffix stripped.
//
// user := m.User() // "WALLET" (even if login was "WALLET+50000")
func (m *Miner) User() string { func (m *Miner) User() string {
return m.user return m.user
} }
// Password returns the login params.pass value.
//
// pass := m.Password() // "x"
func (m *Miner) Password() string { func (m *Miner) Password() string {
return m.password return m.password
} }
// Agent returns the mining software identifier from login params.
//
// agent := m.Agent() // "XMRig/6.21.0"
func (m *Miner) Agent() string { func (m *Miner) Agent() string {
return m.agent return m.agent
} }
// RigID returns the optional rigid extension field from login params.
//
// rigid := m.RigID() // "rig-alpha"
func (m *Miner) RigID() string { func (m *Miner) RigID() string {
return m.rigID return m.rigID
} }
// RX returns the total bytes received from this miner.
//
// rx := m.RX() // 4096
func (m *Miner) RX() uint64 { func (m *Miner) RX() uint64 {
return m.rx return m.rx
} }
// TX returns the total bytes sent to this miner.
//
// tx := m.TX() // 8192
func (m *Miner) TX() uint64 { func (m *Miner) TX() uint64 {
return m.tx return m.tx
} }
// Diff returns the last difficulty sent to this miner from the pool.
//
// diff := m.Diff() // 100000
func (m *Miner) Diff() uint64 {
return m.diff
}
// State returns the current lifecycle state of this miner connection.
//
// if m.State() == proxy.MinerStateReady { /* miner is active */ }
func (m *Miner) State() MinerState { func (m *Miner) State() MinerState {
return m.state return m.state
} }
@ -1863,6 +1986,8 @@ func (s *Server) listen() Result {
} }
// IsActive reports whether the limiter has enabled rate limiting. // IsActive reports whether the limiter has enabled rate limiting.
//
// if rl.IsActive() { /* rate limiting is enabled */ }
func (rl *RateLimiter) IsActive() bool { func (rl *RateLimiter) IsActive() bool {
return rl != nil && rl.limit.MaxConnectionsPerMinute > 0 return rl != nil && rl.limit.MaxConnectionsPerMinute > 0
} }

View file

@ -1,8 +1,82 @@
package proxy package proxy
import "testing" import (
"sync"
"testing"
)
func TestProxy_Stats_InvalidRejectReasons_Good(t *testing.T) { // TestStats_OnAccept_Good verifies that accepted counter, hashes, and topDiff are updated.
//
// stats := proxy.NewStats()
// stats.OnAccept(proxy.Event{Diff: 100000, Latency: 82})
// summary := stats.Summary()
// _ = summary.Accepted // 1
// _ = summary.Hashes // 100000
func TestStats_OnAccept_Good(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 100000, Latency: 82})
summary := stats.Summary()
if summary.Accepted != 1 {
t.Fatalf("expected accepted 1, got %d", summary.Accepted)
}
if summary.Hashes != 100000 {
t.Fatalf("expected hashes 100000, got %d", summary.Hashes)
}
if summary.TopDiff[0] != 100000 {
t.Fatalf("expected top diff 100000, got %d", summary.TopDiff[0])
}
}
// TestStats_OnAccept_Bad verifies concurrent OnAccept calls do not race.
//
// stats := proxy.NewStats()
// // 100 goroutines each call OnAccept — no data race under -race flag.
func TestStats_OnAccept_Bad(t *testing.T) {
stats := NewStats()
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func(diff uint64) {
defer wg.Done()
stats.OnAccept(Event{Diff: diff, Latency: 10})
}(uint64(i + 1))
}
wg.Wait()
summary := stats.Summary()
if summary.Accepted != 100 {
t.Fatalf("expected 100 accepted, got %d", summary.Accepted)
}
}
// TestStats_OnAccept_Ugly verifies that 15 accepts with varying diffs fill all topDiff slots.
//
// stats := proxy.NewStats()
// // 15 accepts with diffs 1..15 → topDiff[9] is 6 (10th highest), not 0
func TestStats_OnAccept_Ugly(t *testing.T) {
stats := NewStats()
for i := 1; i <= 15; i++ {
stats.OnAccept(Event{Diff: uint64(i)})
}
summary := stats.Summary()
// top 10 should be 15, 14, 13, ..., 6
if summary.TopDiff[0] != 15 {
t.Fatalf("expected top diff[0]=15, got %d", summary.TopDiff[0])
}
if summary.TopDiff[9] != 6 {
t.Fatalf("expected top diff[9]=6, got %d", summary.TopDiff[9])
}
}
// TestStats_OnReject_Good verifies that rejected and invalid counters are updated.
//
// stats := proxy.NewStats()
// stats.OnReject(proxy.Event{Error: "Low difficulty share"})
func TestStats_OnReject_Good(t *testing.T) {
stats := NewStats() stats := NewStats()
stats.OnReject(Event{Error: "Low difficulty share"}) stats.OnReject(Event{Error: "Low difficulty share"})
@ -16,3 +90,84 @@ func TestProxy_Stats_InvalidRejectReasons_Good(t *testing.T) {
t.Fatalf("expected two invalid shares, got %d", summary.Invalid) t.Fatalf("expected two invalid shares, got %d", summary.Invalid)
} }
} }
// TestStats_OnReject_Bad verifies that a non-invalid rejection increments rejected but not invalid.
//
// stats := proxy.NewStats()
// stats.OnReject(proxy.Event{Error: "Stale share"})
func TestStats_OnReject_Bad(t *testing.T) {
stats := NewStats()
stats.OnReject(Event{Error: "Stale share"})
summary := stats.Summary()
if summary.Rejected != 1 {
t.Fatalf("expected one rejected, got %d", summary.Rejected)
}
if summary.Invalid != 0 {
t.Fatalf("expected zero invalid for non-invalid reason, got %d", summary.Invalid)
}
}
// TestStats_OnReject_Ugly verifies an expired accepted share increments both accepted and expired.
//
// stats := proxy.NewStats()
// stats.OnAccept(proxy.Event{Diff: 1000, Expired: true})
func TestStats_OnReject_Ugly(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 1000, Expired: true})
summary := stats.Summary()
if summary.Accepted != 1 {
t.Fatalf("expected accepted 1, got %d", summary.Accepted)
}
if summary.Expired != 1 {
t.Fatalf("expected expired 1, got %d", summary.Expired)
}
}
// TestStats_Tick_Good verifies that Tick advances the rolling window position.
//
// stats := proxy.NewStats()
// stats.OnAccept(proxy.Event{Diff: 500})
// stats.Tick()
// summary := stats.Summary()
func TestStats_Tick_Good(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 500})
stats.Tick()
summary := stats.Summary()
// After one tick, the hashrate should still include the 500 diff
if summary.Hashrate[HashrateWindow60s] == 0 {
t.Fatalf("expected non-zero 60s hashrate after accept and tick")
}
}
// TestStats_OnLogin_OnClose_Good verifies miner count tracking.
//
// stats := proxy.NewStats()
// stats.OnLogin(proxy.Event{Miner: &proxy.Miner{}})
// stats.OnClose(proxy.Event{Miner: &proxy.Miner{}})
func TestStats_OnLogin_OnClose_Good(t *testing.T) {
stats := NewStats()
m := &Miner{}
stats.OnLogin(Event{Miner: m})
if got := stats.miners.Load(); got != 1 {
t.Fatalf("expected 1 miner, got %d", got)
}
if got := stats.maxMiners.Load(); got != 1 {
t.Fatalf("expected max miners 1, got %d", got)
}
stats.OnClose(Event{Miner: m})
if got := stats.miners.Load(); got != 0 {
t.Fatalf("expected 0 miners after close, got %d", got)
}
if got := stats.maxMiners.Load(); got != 1 {
t.Fatalf("expected max miners to remain 1, got %d", got)
}
}