go-proxy/splitter/nicehash/storage.go
Virgil 48c6e0fc6d feat(proxy): implement RFC runtime primitives
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 10:39:59 +00:00

142 lines
3.2 KiB
Go

package nicehash
import (
"sync"
"dappco.re/go/core/proxy"
)
// NonceStorage is the 256-slot fixed-byte allocation table for one NonceMapper.
//
// Slot encoding:
//
// 0 = free
// +minerID = active miner
// -minerID = disconnected miner (dead slot, cleared on next SetJob)
//
// storage := nicehash.NewNonceStorage()
type NonceStorage struct {
slots [256]int64 // slot state per above encoding
miners map[int64]*proxy.Miner // minerID → Miner pointer for active miners
job proxy.Job // current job from pool
prevJob proxy.Job // previous job (for stale submit validation)
cursor int // search starts here (round-robin allocation)
mu sync.Mutex
}
// NewNonceStorage allocates the fixed-size miner slot table.
//
// storage := nicehash.NewNonceStorage()
func NewNonceStorage() *NonceStorage {
return &NonceStorage{
miners: make(map[int64]*proxy.Miner),
}
}
// Add finds the next free slot starting from cursor (wrapping), sets slot[index] = minerID,
// and sets the miner fixed byte.
//
// ok := storage.Add(miner)
func (s *NonceStorage) Add(miner *proxy.Miner) bool {
if miner == nil {
return false
}
s.mu.Lock()
defer s.mu.Unlock()
for offset := 0; offset < len(s.slots); offset++ {
index := (s.cursor + offset) % len(s.slots)
if s.slots[index] != 0 {
continue
}
s.slots[index] = miner.ID()
s.miners[miner.ID()] = miner
miner.SetFixedByte(uint8(index))
s.cursor = (index + 1) % len(s.slots)
return true
}
return false
}
// Remove marks slot[miner.FixedByte] as a dead slot until the next SetJob call.
//
// storage.Remove(miner)
func (s *NonceStorage) Remove(miner *proxy.Miner) {
if miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
index := int(miner.FixedByte())
if index >= 0 && index < len(s.slots) && s.slots[index] == miner.ID() {
s.slots[index] = -miner.ID()
}
delete(s.miners, miner.ID())
}
// SetJob replaces the current job, clears dead slots, and fans the job out to active miners.
//
// storage.SetJob(job)
func (s *NonceStorage) SetJob(job proxy.Job) {
s.mu.Lock()
if s.job.ClientID == job.ClientID || s.job.ClientID == "" {
s.prevJob = s.job
} else {
s.prevJob = proxy.Job{}
}
s.job = job
miners := make([]*proxy.Miner, 0, len(s.miners))
for index, minerID := range s.slots {
if minerID < 0 {
s.slots[index] = 0
continue
}
if minerID > 0 {
if miner := s.miners[minerID]; miner != nil {
miners = append(miners, miner)
}
}
}
s.mu.Unlock()
for _, miner := range miners {
miner.ForwardJob(job, job.Algo)
}
}
// IsValidJobID returns true if id matches the current or previous job ID.
//
// if !storage.IsValidJobID(submitJobID) { reject }
func (s *NonceStorage) IsValidJobID(id string) bool {
s.mu.Lock()
defer s.mu.Unlock()
return id != "" && (id == s.job.JobID || id == s.prevJob.JobID)
}
// SlotCount returns free, dead, and active slot counts for monitoring output.
//
// free, dead, active := storage.SlotCount()
func (s *NonceStorage) SlotCount() (free int, dead int, active int) {
s.mu.Lock()
defer s.mu.Unlock()
for _, slot := range s.slots {
switch {
case slot == 0:
free++
case slot < 0:
dead++
default:
active++
}
}
return free, dead, active
}