go-proxy/proxy.go

161 lines
4.7 KiB
Go
Raw Normal View History

// Package proxy is a CryptoNote stratum mining proxy library.
//
// It accepts miner connections over TCP (optionally TLS), splits the 32-bit nonce
// space across up to 256 simultaneous miners per upstream pool connection (NiceHash
// mode), and presents a small monitoring API.
//
// Full specification: docs/RFC.md
//
// p, result := proxy.New(cfg)
// if result.OK { p.Start() }
package proxy
import (
"net/http"
"sync"
"sync/atomic"
"time"
)
// Proxy is the top-level orchestrator. It owns the server, splitter, stats, workers,
// event bus, tick goroutine, and optional HTTP API.
//
// p, result := proxy.New(cfg)
// if result.OK { p.Start() }
type Proxy struct {
config *Config
customDifficulty *CustomDiff
rateLimiter *RateLimiter
splitter Splitter
stats *Stats
workers *Workers
events *EventBus
currentMiners atomic.Uint64
miners map[int64]*Miner
minerMu sync.RWMutex
servers []*Server
httpServer *http.Server
accessLogger *appendLineLogger
shareLogger *appendLineLogger
ticker *time.Ticker
watcher *ConfigWatcher
done chan struct{}
}
// Splitter is the interface both NonceSplitter and SimpleSplitter satisfy.
type Splitter interface {
// Connect establishes the first pool upstream connection.
Connect()
// OnLogin routes a newly authenticated miner to an upstream slot.
OnLogin(event *LoginEvent)
// OnSubmit routes a share submission to the correct upstream.
OnSubmit(event *SubmitEvent)
// OnClose releases the upstream slot for a disconnecting miner.
OnClose(event *CloseEvent)
// Tick is called every second for keepalive and GC housekeeping.
Tick(ticks uint64)
// GC runs every 60 ticks to reclaim disconnected upstream slots.
GC()
// Upstreams returns current upstream pool connection counts.
Upstreams() UpstreamStats
}
// UpstreamStats carries pool connection state counts for monitoring.
type UpstreamStats struct {
Active uint64 // connections currently receiving jobs
Sleep uint64 // idle connections (simple mode reuse pool)
Error uint64 // connections in error/reconnecting state
Total uint64 // Active + Sleep + Error
}
// LoginEvent is dispatched when a miner completes the login handshake.
type LoginEvent struct {
Miner *Miner
}
// SubmitEvent is dispatched when a miner submits a share.
type SubmitEvent struct {
Miner *Miner
JobID string
Nonce string
Result string
Algo string
RequestID int64
}
// CloseEvent is dispatched when a miner TCP connection closes.
type CloseEvent struct {
Miner *Miner
}
// ConfigWatcher polls a config file for mtime changes and calls onChange on modification.
// Uses 1-second polling; does not require fsnotify.
//
// w := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
// p.Reload(cfg)
// })
// w.Start()
type ConfigWatcher struct {
path string
onChange func(*Config)
enabled bool
lastModifiedAt time.Time
done chan struct{}
}
// RateLimiter implements per-IP token bucket connection rate limiting.
// Each unique IP has a bucket initialised to MaxConnectionsPerMinute tokens.
// Each connection attempt consumes one token. Tokens refill at 1 per (60/max) seconds.
// An IP that empties its bucket is added to a ban list for BanDurationSeconds.
//
// rl := proxy.NewRateLimiter(cfg.RateLimit)
// if !rl.Allow("1.2.3.4") { conn.Close(); return }
type RateLimiter struct {
limitConfig RateLimit
buckets map[string]*tokenBucket
banned map[string]time.Time
mu sync.Mutex
}
// tokenBucket is a simple token bucket for one IP.
type tokenBucket struct {
tokens int
lastRefill time.Time
}
// CustomDiff resolves and applies per-miner difficulty overrides at login time.
// Resolution order: user-suffix (+N) > Config.CustomDiff > pool difficulty.
//
// cd := proxy.NewCustomDiff(cfg.CustomDiff)
// bus.Subscribe(proxy.EventLogin, cd.OnLogin)
type CustomDiff struct {
globalDifficulty uint64
mu sync.RWMutex
}
var splitterFactories = map[string]func(*Config, *EventBus) Splitter{
"": noopSplitterFactory,
}
// RegisterSplitterFactory registers a splitter constructor for a mode name.
//
// proxy.RegisterSplitterFactory("nicehash", func(cfg *proxy.Config, bus *proxy.EventBus) proxy.Splitter {
// return nicehash.NewNonceSplitter(cfg, bus, pool.NewStrategyFactory(cfg))
// })
func RegisterSplitterFactory(mode string, factory func(*Config, *EventBus) Splitter) {
if mode == "" || factory == nil {
return
}
splitterFactories[mode] = factory
}
func newSplitter(cfg *Config, events *EventBus) Splitter {
if cfg == nil {
return noopSplitter{}
}
if factory, exists := splitterFactories[cfg.Mode]; exists && factory != nil {
return factory(cfg, events)
}
return noopSplitter{}
}