Compare commits

...
Sign in to create a new pull request.

45 commits
dev ... main

Author SHA1 Message Date
Virgil
46231ef0a3 fix(proxy): align shutdown and custom diff fallback
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:41:59 +00:00
Virgil
066bc42a89 refactor(ax): improve internal naming for rate limiter and custom diff
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:38:41 +00:00
Virgil
b65bb76ac5 refactor(proxy): add semantic stringers
Keep WorkersByPass as a compatibility alias while introducing predictable String methods for public enums.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:35:33 +00:00
Virgil
d2d737764f refactor(log): clarify append-only logger fields
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:31:21 +00:00
Virgil
8225649394 fix(config): require explicit mode and workers
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:26:55 +00:00
Virgil
69eb908fe8 fix(pool): retry failover from primary
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:23:15 +00:00
Virgil
d66ccd3ab6 fix(pool): honour failover order on reconnect
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:18:26 +00:00
Virgil
9e997554fa fix(config): validate mode and workers enums
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:14:54 +00:00
Virgil
3535e4b006 feat(pool): support descriptive password alias
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:11:25 +00:00
Virgil
1d6176153c fix(pool): handle upstream error responses
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:07:26 +00:00
Virgil
23623a97d3 refactor(ax): improve public API examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 17:02:53 +00:00
Virgil
e523fd0740 refactor(api): share monitoring route registration
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 16:45:13 +00:00
Virgil
140e66ac64 chore(proxy): sort miners deterministically
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 16:07:57 +00:00
Virgil
b1af2e0081 fix(proxy): honour disabled config watcher
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 15:58:27 +00:00
Virgil
38a93605a1 refactor(ax): unify api response shapes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 15:47:57 +00:00
Virgil
5190caf9d6 refactor(ax): expand internal naming
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 14:33:44 +00:00
Virgil
8798eea2a0 docs(ax): clarify public API examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 14:21:44 +00:00
Virgil
f2fd83caad fix(nicehash): count stale job hits
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 14:19:22 +00:00
Virgil
f16c9033e3 refactor(proxy): use clearer runtime names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 14:16:33 +00:00
Virgil
22d3cd9c09 docs(ax): add codex conventions
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 14:12:59 +00:00
Virgil
c26136b208 fix(miner): preserve full job payloads
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 13:53:22 +00:00
Virgil
259f7e80c8 fix(proxy): reset custom diff and preserve share difficulty
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 13:24:17 +00:00
Virgil
22e98635e7 fix(proxy): hot-reload log file paths
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 13:14:08 +00:00
Virgil
75de9000f0 fix(proxy): gate submit algo forwarding
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 13:10:38 +00:00
Virgil
0ab02e9e4b docs(ax): clarify public api examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 13:07:29 +00:00
Virgil
22f1420d1c fix(splitter): tighten stale job validation
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 12:11:48 +00:00
Virgil
fe2872149e fix(proxy): count accepted tcp connections
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 12:07:32 +00:00
Virgil
78e740add7 fix(proxy): reset login activity timer
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 12:04:42 +00:00
Virgil
f0477b9980 refactor(proxy): use semantic runtime names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 12:01:40 +00:00
Virgil
c164bf2e26 fix(proxy): close append-only loggers on stop
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:55:44 +00:00
Virgil
0a195f7962 feat(proxy): add UUID session ids and custom diff buckets
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:53:03 +00:00
Virgil
0d7c60726c fix(pool): enforce stratum line limits
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:48:16 +00:00
Virgil
417b967d48 fix(proxy): preserve submitted job snapshots
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:44:43 +00:00
Virgil
96a0652235 fix(splitter): clear stale pending submits
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:40:03 +00:00
Virgil
8579b0cc11 fix(proxy): relax config validation and dedupe disconnects
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:36:27 +00:00
Virgil
b63e7562de fix(config): reject unsupported proxy modes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:30:27 +00:00
Virgil
9e906d11f9 fix(proxy): keep runtime state local
Preserve bind addresses on reload and track active miners per Proxy instance.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:21:38 +00:00
Virgil
e41ad7ef2e feat(proxy): dispatch submits and drain shutdown
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:18:35 +00:00
Virgil
465ea38308 feat(proxy): honour TLS config and pool keepalive
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:16:29 +00:00
Virgil
3376cea600 fix(pool): restore failover and stratum job handling
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:13:41 +00:00
Virgil
07ff21aa67 fix(proxy): align job and splitter behaviour with RFC
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:07:41 +00:00
Virgil
36fb1232d5 feat(proxy): implement runtime HTTP and logging hooks
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:03:54 +00:00
Virgil
7d2d309529 feat(proxy): close RFC gaps
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 10:52:30 +00:00
Virgil
20f0626a19 feat(proxy): wire miner runtime flow
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 10:47:58 +00:00
Virgil
48c6e0fc6d feat(proxy): implement RFC runtime primitives
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 10:39:59 +00:00
41 changed files with 5964 additions and 181 deletions

22
CODEX.md Normal file
View file

@ -0,0 +1,22 @@
# CODEX.md
This repository uses the same conventions as `CLAUDE.md`.
## Source Of Truth
- RFC: `docs/RFC.md`
- AX principles: `.core/reference/RFC-025-AGENT-EXPERIENCE.md`
- Package conventions: `CLAUDE.md`
## Quick Commands
```bash
go test ./...
go test -race ./...
go vet ./...
```
## Commit Style
- Use conventional commits: `type(scope): description`
- Include `Co-Authored-By: Virgil <virgil@lethean.io>`

View file

@ -1,63 +1,26 @@
// Package api implements the HTTP monitoring endpoints for the proxy. // Package api wires the monitoring endpoints onto an HTTP router.
// //
// Registered routes: // mux := http.NewServeMux()
// // api.RegisterRoutes(mux, p)
// GET /1/summary — aggregated proxy stats
// GET /1/workers — per-worker hashrate table
// GET /1/miners — per-connection state table
//
// proxyapi.RegisterRoutes(apiRouter, p)
package api package api
// SummaryResponse is the /1/summary JSON body. import (
// "dappco.re/go/core/proxy"
// {"version":"1.0.0","mode":"nicehash","hashrate":{"total":[...]}, ...} )
type SummaryResponse struct {
Version string `json:"version"`
Mode string `json:"mode"`
Hashrate HashrateResponse `json:"hashrate"`
Miners MinersCountResponse `json:"miners"`
Workers uint64 `json:"workers"`
Upstreams UpstreamResponse `json:"upstreams"`
Results ResultsResponse `json:"results"`
}
// HashrateResponse carries the per-window hashrate array. // Router is the minimal route-registration surface used by RegisterRoutes.
// type Router = proxy.RouteRegistrar
// HashrateResponse{Total: [6]float64{12345.67, 11900.00, 12100.00, 11800.00, 12000.00, 12200.00}}
type HashrateResponse struct {
Total [6]float64 `json:"total"`
}
// MinersCountResponse carries current and peak miner counts. type SummaryResponse = proxy.SummaryResponse
// type HashrateResponse = proxy.HashrateResponse
// MinersCountResponse{Now: 142, Max: 200} type MinersCountResponse = proxy.MinersCountResponse
type MinersCountResponse struct { type UpstreamResponse = proxy.UpstreamResponse
Now uint64 `json:"now"` type ResultsResponse = proxy.ResultsResponse
Max uint64 `json:"max"`
}
// UpstreamResponse carries pool connection state counts. // RegisterRoutes mounts the monitoring endpoints on any router with HandleFunc.
// //
// UpstreamResponse{Active: 1, Sleep: 0, Error: 0, Total: 1, Ratio: 142.0} // mux := http.NewServeMux()
type UpstreamResponse struct { // api.RegisterRoutes(mux, p)
Active uint64 `json:"active"` func RegisterRoutes(router Router, proxyValue *proxy.Proxy) {
Sleep uint64 `json:"sleep"` proxy.RegisterMonitoringRoutes(router, proxyValue)
Error uint64 `json:"error"`
Total uint64 `json:"total"`
Ratio float64 `json:"ratio"`
}
// ResultsResponse carries share acceptance statistics.
//
// ResultsResponse{Accepted: 4821, Rejected: 3, Invalid: 0, Expired: 12}
type ResultsResponse struct {
Accepted uint64 `json:"accepted"`
Rejected uint64 `json:"rejected"`
Invalid uint64 `json:"invalid"`
Expired uint64 `json:"expired"`
AvgTime uint32 `json:"avg_time"`
Latency uint32 `json:"latency"`
HashesTotal uint64 `json:"hashes_total"`
Best [10]uint64 `json:"best"`
} }

View file

@ -5,22 +5,24 @@ package proxy
// cfg, result := proxy.LoadConfig("config.json") // cfg, result := proxy.LoadConfig("config.json")
// if !result.OK { log.Fatal(result.Error) } // if !result.OK { log.Fatal(result.Error) }
type Config struct { type Config struct {
Mode string `json:"mode"` // "nicehash" or "simple" Mode string `json:"mode"` // "nicehash" or "simple"
Bind []BindAddr `json:"bind"` // listen addresses Bind []BindAddr `json:"bind"` // listen addresses
Pools []PoolConfig `json:"pools"` // ordered primary + fallbacks Pools []PoolConfig `json:"pools"` // ordered primary + fallbacks
TLS TLSConfig `json:"tls"` // inbound TLS (miner-facing) TLS TLSConfig `json:"tls"` // inbound TLS (miner-facing)
HTTP HTTPConfig `json:"http"` // monitoring API HTTP HTTPConfig `json:"http"` // monitoring API
AccessPassword string `json:"access-password"` // "" = no auth required AccessPassword string `json:"access-password"` // "" = no auth required
CustomDiff uint64 `json:"custom-diff"` // 0 = disabled CustomDiff uint64 `json:"custom-diff"` // 0 = disabled
CustomDiffStats bool `json:"custom-diff-stats"` // report per custom-diff bucket CustomDiffStats bool `json:"custom-diff-stats"` // report per custom-diff bucket
AlgoExtension bool `json:"algo-ext"` // forward algo field in jobs AlgoExtension bool `json:"algo-ext"` // forward algo field in jobs
Workers WorkersMode `json:"workers"` // "rig-id", "user", "password", "agent", "ip", "false" Workers WorkersMode `json:"workers"` // "rig-id", "user", "password", "agent", "ip", "false"
AccessLogFile string `json:"access-log-file"` // "" = disabled AccessLogFile string `json:"access-log-file"` // "" = disabled
ReuseTimeout int `json:"reuse-timeout"` // seconds; simple mode upstream reuse ShareLogFile string `json:"share-log-file"` // "" = disabled
Retries int `json:"retries"` // pool reconnect attempts ReuseTimeout int `json:"reuse-timeout"` // seconds; simple mode upstream reuse
RetryPause int `json:"retry-pause"` // seconds between retries Retries int `json:"retries"` // pool reconnect attempts
Watch bool `json:"watch"` // hot-reload on file change RetryPause int `json:"retry-pause"` // seconds between retries
RateLimit RateLimit `json:"rate-limit"` // per-IP connection rate limit Watch bool `json:"watch"` // hot-reload on file change
RateLimit RateLimit `json:"rate-limit"` // per-IP connection rate limit
sourcePath string `json:"-"`
} }
// BindAddr is one TCP listen endpoint. // BindAddr is one TCP listen endpoint.
@ -39,6 +41,7 @@ type PoolConfig struct {
URL string `json:"url"` URL string `json:"url"`
User string `json:"user"` User string `json:"user"`
Pass string `json:"pass"` Pass string `json:"pass"`
Password string `json:"password"`
RigID string `json:"rig-id"` RigID string `json:"rig-id"`
Algo string `json:"algo"` Algo string `json:"algo"`
TLS bool `json:"tls"` TLS bool `json:"tls"`
@ -81,9 +84,11 @@ type RateLimit struct {
type WorkersMode string type WorkersMode string
const ( const (
WorkersByRigID WorkersMode = "rig-id" // rigid field, fallback to user WorkersByRigID WorkersMode = "rig-id" // rigid field, fallback to user
WorkersByUser WorkersMode = "user" WorkersByUser WorkersMode = "user"
WorkersByPass WorkersMode = "password" WorkersByPassword WorkersMode = "password"
// WorkersByPass is kept as a compatibility alias for older configs.
WorkersByPass WorkersMode = WorkersByPassword
WorkersByAgent WorkersMode = "agent" WorkersByAgent WorkersMode = "agent"
WorkersByIP WorkersMode = "ip" WorkersByIP WorkersMode = "ip"
WorkersDisabled WorkersMode = "false" WorkersDisabled WorkersMode = "false"

154
config_runtime.go Normal file
View file

@ -0,0 +1,154 @@
package proxy
import (
"encoding/json"
"errors"
"os"
"strings"
"time"
)
// LoadConfig reads a JSON config file and validates the result.
//
// cfg, errorValue := proxy.LoadConfig("config.json")
// if errorValue != nil {
// return
// }
func LoadConfig(path string) (*Config, error) {
data, errorValue := os.ReadFile(path)
if errorValue != nil {
return nil, errorValue
}
config := &Config{}
if errorValue = json.Unmarshal(data, config); errorValue != nil {
return nil, errorValue
}
config.sourcePath = path
if errorValue = config.Validate(); errorValue != nil {
return nil, errorValue
}
return config, nil
}
// Validate checks that `bind` and `pools` are present and every enabled pool has a URL.
//
// cfg := &proxy.Config{
// Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
// Pools: []proxy.PoolConfig{{URL: "pool-a:3333", Enabled: true}},
// }
// if errorValue := cfg.Validate(); errorValue != nil {
// return
// }
func (c *Config) Validate() error {
if c == nil {
return errors.New("config is nil")
}
if strings.TrimSpace(c.Mode) == "" {
return errors.New("mode is empty")
}
if c.Mode != "nicehash" && c.Mode != "simple" {
return errors.New("mode is invalid")
}
if strings.TrimSpace(string(c.Workers)) == "" {
return errors.New("workers mode is empty")
}
if c.Workers != WorkersByRigID &&
c.Workers != WorkersByUser &&
c.Workers != WorkersByPass &&
c.Workers != WorkersByPassword &&
c.Workers != WorkersByAgent &&
c.Workers != WorkersByIP &&
c.Workers != WorkersDisabled {
return errors.New("workers mode is invalid")
}
if len(c.Bind) == 0 {
return errors.New("bind list is empty")
}
if len(c.Pools) == 0 {
return errors.New("pool list is empty")
}
for _, poolConfig := range c.Pools {
if poolConfig.Enabled && strings.TrimSpace(poolConfig.URL) == "" {
return errors.New("enabled pool URL is empty")
}
}
return nil
}
// NewConfigWatcher watches a config file and reloads the proxy on modification.
//
// w := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
// p.Reload(cfg)
// })
func NewConfigWatcher(path string, onChange func(*Config)) *ConfigWatcher {
return newConfigWatcher(path, onChange, true)
}
func newConfigWatcher(path string, onChange func(*Config), enabled bool) *ConfigWatcher {
return &ConfigWatcher{
path: path,
onChange: onChange,
enabled: enabled,
done: make(chan struct{}),
}
}
// Start begins 1-second polling for `config.json`.
//
// w.Start()
func (w *ConfigWatcher) Start() {
if w == nil || !w.enabled {
return
}
if info, errorValue := os.Stat(w.path); errorValue == nil {
w.lastModifiedAt = info.ModTime()
}
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
info, errorValue := os.Stat(w.path)
if errorValue != nil {
continue
}
if !info.ModTime().After(w.lastModifiedAt) {
continue
}
w.lastModifiedAt = info.ModTime()
config, errorValue := LoadConfig(w.path)
if errorValue == nil && w.onChange != nil {
w.onChange(config)
}
case <-w.done:
return
}
}
}()
}
// Stop ends polling so the watcher can be shut down with `p.Stop()`.
//
// w.Stop()
func (w *ConfigWatcher) Stop() {
if w == nil || w.done == nil {
return
}
select {
case <-w.done:
default:
close(w.done)
}
}

106
config_runtime_test.go Normal file
View file

@ -0,0 +1,106 @@
package proxy
import (
"os"
"testing"
"time"
)
func TestConfig_Validate_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue != nil {
t.Fatalf("expected valid config, got %v", errorValue)
}
}
func TestConfig_Validate_Bad(t *testing.T) {
cfg := &Config{
Mode: "bogus",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected invalid mode to fail validation")
}
}
func TestConfig_Validate_EmptyMode(t *testing.T) {
cfg := &Config{
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected empty mode to fail validation")
}
}
func TestConfig_Validate_Ugly(t *testing.T) {
cfg := &Config{
Mode: "simple",
Workers: WorkersMode("bogus"),
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected invalid workers mode to fail validation")
}
}
func TestConfig_Validate_EmptyWorkers(t *testing.T) {
cfg := &Config{
Mode: "simple",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected empty workers mode to fail validation")
}
}
func TestConfig_Validate_EnabledPoolURL(t *testing.T) {
cfg := &Config{
Mode: "simple",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected empty enabled pool URL to fail validation")
}
}
func TestConfigWatcher_Start_Bad(t *testing.T) {
path := t.TempDir() + "/config.json"
errorValue := os.WriteFile(path, []byte(`{"mode":"nicehash","workers":"rig-id","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-a:3333","enabled":true}]}`), 0o644)
if errorValue != nil {
t.Fatal(errorValue)
}
triggered := make(chan struct{}, 1)
watcher := newConfigWatcher(path, func(cfg *Config) {
triggered <- struct{}{}
}, false)
watcher.Start()
defer watcher.Stop()
errorValue = os.WriteFile(path, []byte(`{"mode":"nicehash","workers":"rig-id","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-b:3333","enabled":true}]}`), 0o644)
if errorValue != nil {
t.Fatal(errorValue)
}
select {
case <-triggered:
t.Fatal("expected disabled watcher to stay quiet")
case <-time.After(1200 * time.Millisecond):
}
}

49
enum_string.go Normal file
View file

@ -0,0 +1,49 @@
package proxy
// String returns the stable name for one worker routing mode.
//
// mode := WorkersByRigID
// _ = mode.String()
func (mode WorkersMode) String() string {
return string(mode)
}
// String returns the stable name for one miner state.
//
// state := MinerStateReady
// _ = state.String()
func (state MinerState) String() string {
switch state {
case MinerStateWaitLogin:
return "wait_login"
case MinerStateWaitReady:
return "wait_ready"
case MinerStateReady:
return "ready"
case MinerStateClosing:
return "closing"
default:
return "unknown"
}
}
// String returns the stable name for one event type.
//
// eventType := EventAccept
// _ = eventType.String()
func (eventType EventType) String() string {
switch eventType {
case EventLogin:
return "login"
case EventSubmit:
return "submit"
case EventAccept:
return "accept"
case EventReject:
return "reject"
case EventClose:
return "close"
default:
return "unknown"
}
}

View file

@ -18,6 +18,7 @@ type EventType int
const ( const (
EventLogin EventType = iota // miner completed login EventLogin EventType = iota // miner completed login
EventSubmit // miner submitted a share
EventAccept // pool accepted a submitted share EventAccept // pool accepted a submitted share
EventReject // pool rejected a share (or share expired) EventReject // pool rejected a share (or share expired)
EventClose // miner TCP connection closed EventClose // miner TCP connection closed
@ -31,11 +32,51 @@ type EventHandler func(Event)
// //
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m}) // bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m})
type Event struct { type Event struct {
Type EventType Type EventType
Miner *Miner // always set Miner *Miner // always set
Job *Job // set for Accept and Reject events Job *Job // set for Accept and Reject events
Diff uint64 // effective difficulty of the share (Accept and Reject) JobID string // set for Submit events
Error string // rejection reason (Reject only) Nonce string // set for Submit events
Latency uint16 // pool response time in ms (Accept and Reject) Result string // set for Submit events
Expired bool // true if the share was accepted but against the previous job Algo string // set for Submit events
RequestID int64 // set for Submit events
Diff uint64 // effective difficulty of the share (Accept and Reject)
Error string // rejection reason (Reject only)
Latency uint16 // pool response time in ms (Accept and Reject)
Expired bool // true if the share was accepted but against the previous job
}
// NewEventBus builds an empty synchronous event dispatcher.
//
// bus := proxy.NewEventBus()
func NewEventBus() *EventBus {
return &EventBus{
listeners: make(map[EventType][]EventHandler),
}
}
// Subscribe registers a handler for the given event type. Safe to call before Start.
//
// bus.Subscribe(proxy.EventAccept, func(e proxy.Event) { stats.OnAccept(e) })
func (b *EventBus) Subscribe(eventType EventType, handler EventHandler) {
if handler == nil {
return
}
b.mu.Lock()
defer b.mu.Unlock()
b.listeners[eventType] = append(b.listeners[eventType], handler)
}
// Dispatch calls all registered handlers for the event's type in subscription order.
//
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m})
func (b *EventBus) Dispatch(event Event) {
b.mu.RLock()
handlers := append([]EventHandler(nil), b.listeners[event.Type]...)
b.mu.RUnlock()
for _, handler := range handlers {
handler(event)
}
} }

88
job.go
View file

@ -1,5 +1,11 @@
package proxy package proxy
import (
"encoding/binary"
"encoding/hex"
"strconv"
)
// Job holds the current work unit received from a pool. Immutable once assigned. // Job holds the current work unit received from a pool. Immutable once assigned.
// //
// j := proxy.Job{ // j := proxy.Job{
@ -9,11 +15,79 @@ package proxy
// Algo: "cn/r", // Algo: "cn/r",
// } // }
type Job struct { type Job struct {
Blob string // hex-encoded block template (160 hex chars = 80 bytes) Blob string `json:"blob"` // hex-encoded block template (160 hex chars = 80 bytes)
JobID string // pool-assigned identifier JobID string `json:"job_id"` // pool-assigned identifier
Target string // 8-char hex little-endian uint32 difficulty target Target string `json:"target"` // 8-char hex little-endian uint32 difficulty target
Algo string // algorithm e.g. "cn/r", "rx/0"; "" if not negotiated Algo string `json:"algo"` // algorithm e.g. "cn/r", "rx/0"; "" if not negotiated
Height uint64 // block height (0 if pool did not provide) Height uint64 `json:"height"` // block height (0 if pool did not provide)
SeedHash string // RandomX seed hash hex (empty if not RandomX) SeedHash string `json:"seed_hash"` // RandomX seed hash hex (empty if not RandomX)
ClientID string // pool session ID that issued this job (for stale detection) ClientID string `json:"id"` // pool session ID that issued this job (for stale detection)
}
// IsValid returns true if Blob and JobID are non-empty.
//
// if !job.IsValid() { return }
func (j Job) IsValid() bool {
return j.Blob != "" && j.JobID != ""
}
// BlobWithFixedByte returns a copy of Blob with hex characters at positions 78-79
// (blob byte index 39) replaced by the two-digit lowercase hex of fixedByte.
//
// partitioned := job.BlobWithFixedByte(0x2A) // chars 78-79 become "2a"
func (j Job) BlobWithFixedByte(fixedByte uint8) string {
if len(j.Blob) < 80 {
return j.Blob
}
blob := []byte(j.Blob)
blob[78] = lowerHexDigit(fixedByte >> 4)
blob[79] = lowerHexDigit(fixedByte & 0x0F)
return string(blob)
}
// DifficultyFromTarget converts the 8-char little-endian hex Target field to a uint64 difficulty.
//
// diff := job.DifficultyFromTarget() // "b88d0600" → ~100000
func (j Job) DifficultyFromTarget() uint64 {
if len(j.Target) != 8 {
return 0
}
targetBytes, errorValue := hex.DecodeString(j.Target)
if errorValue != nil || len(targetBytes) != 4 {
return 0
}
targetValue := binary.LittleEndian.Uint32(targetBytes)
if targetValue == 0 {
return 0
}
return uint64(^uint32(0) / targetValue)
}
// TargetForDifficulty converts a difficulty back to the 8-char little-endian target field.
//
// target := proxy.TargetForDifficulty(100000)
func TargetForDifficulty(difficulty uint64) string {
if difficulty <= 1 {
return "ffffffff"
}
targetValue := uint64(^uint32(0)) / difficulty
if targetValue == 0 {
targetValue = 1
}
if targetValue > uint64(^uint32(0)) {
targetValue = uint64(^uint32(0))
}
targetBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(targetBytes, uint32(targetValue))
return hex.EncodeToString(targetBytes)
}
func lowerHexDigit(value uint8) byte {
return strconv.FormatUint(uint64(value), 16)[0]
} }

85
job_test.go Normal file
View file

@ -0,0 +1,85 @@
package proxy
import (
"encoding/json"
"testing"
)
func TestJob_IsValid_Good(t *testing.T) {
job := Job{Blob: "abcd", JobID: "job-1"}
if !job.IsValid() {
t.Fatal("expected valid job")
}
}
func TestJob_IsValid_Bad(t *testing.T) {
job := Job{Blob: "abcd"}
if job.IsValid() {
t.Fatal("expected invalid job without job ID")
}
}
func TestJob_IsValid_Ugly(t *testing.T) {
var job Job
if job.IsValid() {
t.Fatal("zero job should be invalid")
}
}
func TestJob_BlobWithFixedByte_Good(t *testing.T) {
job := Job{Blob: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}
got := job.BlobWithFixedByte(0x2a)
if got[78:80] != "2a" {
t.Fatalf("expected byte patch 2a, got %s", got[78:80])
}
}
func TestJob_BlobWithFixedByte_Bad(t *testing.T) {
job := Job{Blob: "short"}
if got := job.BlobWithFixedByte(0x2a); got != "short" {
t.Fatalf("expected short blob unchanged, got %q", got)
}
}
func TestJob_BlobWithFixedByte_Ugly(t *testing.T) {
job := Job{Blob: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"}
got := job.BlobWithFixedByte(0x00)
if got[78:80] != "00" {
t.Fatalf("expected byte patch 00, got %s", got[78:80])
}
}
func TestJob_DifficultyFromTarget_Good(t *testing.T) {
job := Job{Target: "b88d0600"}
if got := job.DifficultyFromTarget(); got == 0 {
t.Fatal("expected non-zero difficulty")
}
}
func TestJob_DifficultyFromTarget_Bad(t *testing.T) {
job := Job{Target: "zzzzzzzz"}
if got := job.DifficultyFromTarget(); got != 0 {
t.Fatalf("expected invalid target difficulty to be zero, got %d", got)
}
}
func TestJob_DifficultyFromTarget_Ugly(t *testing.T) {
job := Job{Target: "00000000"}
if got := job.DifficultyFromTarget(); got != 0 {
t.Fatalf("expected zero target difficulty to be zero, got %d", got)
}
job = Job{Target: "ffffffff"}
if got := job.DifficultyFromTarget(); got != 1 {
t.Fatalf("expected maximum target to resolve to difficulty 1, got %d", got)
}
}
func TestJob_JSON_Unmarshal_Good(t *testing.T) {
var job Job
if err := json.Unmarshal([]byte(`{"blob":"abcd","job_id":"job-1","target":"b88d0600","algo":"cn/r","height":42,"seed_hash":"seed","id":"session-1"}`), &job); err != nil {
t.Fatal(err)
}
if job.JobID != "job-1" || job.SeedHash != "seed" || job.ClientID != "session-1" {
t.Fatalf("unexpected decoded job: %+v", job)
}
}

View file

@ -5,19 +5,108 @@
// bus.Subscribe(proxy.EventClose, al.OnClose) // bus.Subscribe(proxy.EventClose, al.OnClose)
package log package log
import "sync" import (
"fmt"
"os"
"sync"
// AccessLog writes connection lifecycle lines to an append-only text file. "dappco.re/go/core/proxy"
)
// AccessLog writes append-only connection lines.
// //
// Line format (connect): 2026-04-04T12:00:00Z CONNECT <ip> <user> <agent> // al := log.NewAccessLog("/var/log/proxy-access.log")
// Line format (close): 2026-04-04T12:00:00Z CLOSE <ip> <user> rx=<bytes> tx=<bytes>
//
// al, result := log.NewAccessLog("/var/log/proxy-access.log")
// bus.Subscribe(proxy.EventLogin, al.OnLogin) // bus.Subscribe(proxy.EventLogin, al.OnLogin)
// bus.Subscribe(proxy.EventClose, al.OnClose) // bus.Subscribe(proxy.EventClose, al.OnClose)
type AccessLog struct { type AccessLog struct {
path string path string
mu sync.Mutex mu sync.Mutex
// f is opened append-only on first write; nil until first event. file *os.File
// Uses core.File for I/O abstraction. closed bool
}
// NewAccessLog opens the file lazily on first write.
//
// al := log.NewAccessLog("/var/log/proxy-access.log")
func NewAccessLog(path string) *AccessLog {
return &AccessLog{path: path}
}
// OnLogin writes `2026-04-04T12:00:00Z CONNECT 10.0.0.1 WALLET XMRig/6.21.0`.
//
// al.OnLogin(proxy.Event{Miner: miner})
func (l *AccessLog) OnLogin(event proxy.Event) {
if event.Miner == nil {
return
}
line := fmt.Sprintf("%s CONNECT %s %s %s\n",
utcTimestamp(),
event.Miner.IP(),
event.Miner.User(),
event.Miner.Agent(),
)
l.writeLine(line)
}
// OnClose writes `2026-04-04T12:00:00Z CLOSE 10.0.0.1 WALLET rx=512 tx=4096`.
//
// al.OnClose(proxy.Event{Miner: miner})
func (l *AccessLog) OnClose(event proxy.Event) {
if event.Miner == nil {
return
}
line := fmt.Sprintf("%s CLOSE %s %s rx=%d tx=%d\n",
utcTimestamp(),
event.Miner.IP(),
event.Miner.User(),
event.Miner.RX(),
event.Miner.TX(),
)
l.writeLine(line)
}
// Close releases the append-only file handle if it has been opened.
//
// al.Close()
func (l *AccessLog) Close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
l.closed = true
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func (l *AccessLog) writeLine(line string) {
if l == nil || l.path == "" {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
if l.file == nil {
file, errorValue := os.OpenFile(l.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if errorValue != nil {
return
}
l.file = file
}
_, _ = l.file.WriteString(line)
} }

View file

@ -1,18 +1,110 @@
package log package log
import "sync" import (
"fmt"
"os"
"sync"
"time"
// ShareLog writes share result lines to an append-only text file. "dappco.re/go/core/proxy"
// )
// Line format (accept): 2026-04-04T12:00:00Z ACCEPT <user> diff=<diff> latency=<ms>ms
// Line format (reject): 2026-04-04T12:00:00Z REJECT <user> reason="<message>" // ShareLog writes append-only share result lines.
// //
// sl := log.NewShareLog("/var/log/proxy-shares.log") // sl := log.NewShareLog("/var/log/proxy-shares.log")
// bus.Subscribe(proxy.EventAccept, sl.OnAccept) // bus.Subscribe(proxy.EventAccept, sl.OnAccept)
// bus.Subscribe(proxy.EventReject, sl.OnReject) // bus.Subscribe(proxy.EventReject, sl.OnReject)
type ShareLog struct { type ShareLog struct {
path string path string
mu sync.Mutex mu sync.Mutex
// f is opened append-only on first write; nil until first event. file *os.File
// Uses core.File for I/O abstraction. closed bool
}
// NewShareLog opens the file lazily on first write.
//
// sl := log.NewShareLog("/var/log/proxy-shares.log")
func NewShareLog(path string) *ShareLog {
return &ShareLog{path: path}
}
// OnAccept writes `2026-04-04T12:00:00Z ACCEPT WALLET diff=100000 latency=82ms`.
//
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000})
func (l *ShareLog) OnAccept(event proxy.Event) {
if event.Miner == nil {
return
}
line := fmt.Sprintf("%s ACCEPT %s diff=%d latency=%dms\n",
utcTimestamp(),
event.Miner.User(),
event.Diff,
event.Latency,
)
l.writeLine(line)
}
// OnReject writes `2026-04-04T12:00:00Z REJECT WALLET reason="Low difficulty share"`.
//
// sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
func (l *ShareLog) OnReject(event proxy.Event) {
if event.Miner == nil {
return
}
line := fmt.Sprintf("%s REJECT %s reason=%q\n",
utcTimestamp(),
event.Miner.User(),
event.Error,
)
l.writeLine(line)
}
// Close releases the append-only file handle if it has been opened.
//
// sl.Close()
func (l *ShareLog) Close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
l.closed = true
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func (l *ShareLog) writeLine(line string) {
if l == nil || l.path == "" {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
if l.file == nil {
file, errorValue := os.OpenFile(l.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if errorValue != nil {
return
}
l.file = file
}
_, _ = l.file.WriteString(line)
}
func utcTimestamp() string {
return time.Now().UTC().Format(time.RFC3339)
} }

View file

@ -25,28 +25,36 @@ const (
// m := proxy.NewMiner(conn, 3333, nil) // m := proxy.NewMiner(conn, 3333, nil)
// m.Start() // m.Start()
type Miner struct { type Miner struct {
id int64 // monotonically increasing per-process; atomic assignment id int64 // monotonically increasing per-process; atomic assignment
rpcID string // UUID v4 sent to miner as session id rpcID string // UUID v4 sent to miner as session id
state MinerState state MinerState
extAlgo bool // miner sent algo list in login params stateMu sync.RWMutex
extNH bool // NiceHash mode active (fixed byte splitting) extAlgo bool // miner sent algo list in login params
ip string // remote IP (without port, for logging) algoExtension bool // config allows forwarding algo negotiation
extNH bool // NiceHash mode active (fixed byte splitting)
ip string // remote IP (without port, for logging)
localPort uint16 localPort uint16
user string // login params.login (wallet address), custom diff suffix stripped user string // login params.login (wallet address), custom diff suffix stripped
password string // login params.pass password string // login params.pass
agent string // login params.agent agent string // login params.agent
rigID string // login params.rigid (optional extension) rigID string // login params.rigid (optional extension)
fixedByte uint8 // NiceHash slot index (0-255) algo []string
mapperID int64 // which NonceMapper owns this miner; -1 = unassigned fixedByte uint8 // NiceHash slot index (0-255)
routeID int64 // SimpleMapper ID in simple mode; -1 = unassigned mapperID int64 // which NonceMapper owns this miner; -1 = unassigned
customDiff uint64 // 0 = use pool diff; non-zero = cap diff to this value routeID int64 // SimpleMapper ID in simple mode; -1 = unassigned
diff uint64 // last difficulty sent to this miner from the pool customDiff uint64 // 0 = use pool diff; non-zero = cap diff to this value
rx uint64 // bytes received from miner diff uint64 // last difficulty sent to this miner from the pool
tx uint64 // bytes sent to miner rx uint64 // bytes received from miner
tx uint64 // bytes sent to miner
connectedAt time.Time connectedAt time.Time
lastActivityAt time.Time lastActivityAt time.Time
events *EventBus
splitter Splitter
currentJob *Job
closeOnce sync.Once
accessPassword string
conn net.Conn conn net.Conn
tlsConn *tls.Conn // nil if plain TCP tlsConn *tls.Conn // nil if plain TCP
sendMu sync.Mutex // serialises writes to conn sendMu sync.Mutex // serialises writes to conn
buf [16384]byte // per-miner send buffer; avoids per-write allocations buf [16384]byte // per-miner send buffer; avoids per-write allocations
} }

124
miner_methods.go Normal file
View file

@ -0,0 +1,124 @@
package proxy
import (
"crypto/tls"
"net"
"sync/atomic"
"time"
)
var minerIDSequence atomic.Int64
// NewMiner creates a Miner for an accepted net.Conn. Does not start reading yet.
//
// m := proxy.NewMiner(conn, 3333, nil)
func NewMiner(conn net.Conn, localPort uint16, tlsCfg *tls.Config) *Miner {
miner := &Miner{
id: minerIDSequence.Add(1),
state: MinerStateWaitLogin,
localPort: localPort,
mapperID: -1,
routeID: -1,
conn: conn,
connectedAt: time.Now().UTC(),
lastActivityAt: time.Now().UTC(),
}
if tlsCfg != nil {
if tlsConnection, ok := conn.(*tls.Conn); ok {
miner.tlsConn = tlsConnection
}
}
if conn != nil && conn.RemoteAddr() != nil {
miner.ip = remoteHost(conn.RemoteAddr().String())
}
return miner
}
func (m *Miner) ID() int64 { return m.id }
func (m *Miner) RPCID() string { return m.rpcID }
func (m *Miner) User() string { return m.user }
func (m *Miner) Password() string { return m.password }
func (m *Miner) Agent() string { return m.agent }
func (m *Miner) RigID() string { return m.rigID }
func (m *Miner) IP() string { return m.ip }
func (m *Miner) State() MinerState {
if m == nil {
return MinerStateClosing
}
m.stateMu.RLock()
state := m.state
m.stateMu.RUnlock()
return state
}
func (m *Miner) Diff() uint64 { return m.diff }
func (m *Miner) FixedByte() uint8 { return m.fixedByte }
func (m *Miner) MapperID() int64 { return m.mapperID }
func (m *Miner) RouteID() int64 { return m.routeID }
func (m *Miner) CustomDiff() uint64 { return m.customDiff }
func (m *Miner) TX() uint64 { return m.tx }
func (m *Miner) RX() uint64 { return m.rx }
func (m *Miner) LastActivityAt() time.Time { return m.lastActivityAt }
func (m *Miner) SetRPCID(value string) { m.rpcID = value }
func (m *Miner) SetUser(value string) { m.user = value }
func (m *Miner) SetPassword(value string) { m.password = value }
func (m *Miner) SetAgent(value string) { m.agent = value }
func (m *Miner) SetRigID(value string) { m.rigID = value }
func (m *Miner) SetState(value MinerState) {
if m == nil {
return
}
m.stateMu.Lock()
m.state = value
m.stateMu.Unlock()
}
func (m *Miner) SetDiff(value uint64) { m.diff = value }
func (m *Miner) SetFixedByte(value uint8) { m.fixedByte = value }
func (m *Miner) SetMapperID(value int64) { m.mapperID = value }
func (m *Miner) SetRouteID(value int64) { m.routeID = value }
func (m *Miner) SetCustomDiff(value uint64) { m.customDiff = value }
func (m *Miner) SetNiceHashEnabled(value bool) { m.extNH = value }
func (m *Miner) PrimeJob(job Job) {
if m == nil || !job.IsValid() {
return
}
m.currentJob = &job
m.diff = m.effectiveDifficulty(job)
}
func (m *Miner) Touch() {
m.lastActivityAt = time.Now().UTC()
}
func remoteHost(address string) string {
host, _, errorValue := net.SplitHostPort(address)
if errorValue != nil {
return address
}
return host
}
func (m *Miner) effectiveDifficulty(job Job) uint64 {
difficulty := job.DifficultyFromTarget()
if m == nil || m.customDiff == 0 {
return difficulty
}
if difficulty == 0 || difficulty <= m.customDiff {
return difficulty
}
return m.customDiff
}
func (m *Miner) effectiveTarget(job Job) string {
difficulty := m.effectiveDifficulty(job)
if difficulty == 0 {
return job.Target
}
return TargetForDifficulty(difficulty)
}

443
miner_runtime.go Normal file
View file

@ -0,0 +1,443 @@
package proxy
import (
"bufio"
"crypto/rand"
"encoding/hex"
"encoding/json"
"net"
"strings"
"time"
)
type minerRequest struct {
ID int64 `json:"id"`
Method string `json:"method"`
Params json.RawMessage `json:"params"`
}
// Start begins the read loop in a goroutine and arms the login timeout timer.
//
// m.Start()
func (m *Miner) Start() {
if m == nil || m.conn == nil {
return
}
go func() {
reader := bufio.NewReaderSize(m.conn, len(m.buf))
for {
if errorValue := m.applyReadDeadline(); errorValue != nil {
m.Close()
return
}
line, isPrefix, errorValue := reader.ReadLine()
if errorValue != nil {
m.Close()
return
}
if isPrefix {
m.Close()
return
}
if len(line) == 0 {
continue
}
m.rx += uint64(len(line) + 1)
m.Touch()
m.handleLine(line)
}
}()
}
// ForwardJob encodes the job as a stratum job notification and writes it to the miner.
//
// m.ForwardJob(job, "cn/r")
func (m *Miner) ForwardJob(job Job, algo string) {
if m == nil || m.conn == nil {
return
}
blob := job.Blob
if m.extNH {
blob = job.BlobWithFixedByte(m.fixedByte)
}
target := m.effectiveTarget(job)
m.diff = m.effectiveDifficulty(job)
m.SetState(MinerStateReady)
jobCopy := job
m.currentJob = &jobCopy
m.Touch()
params := map[string]interface{}{
"blob": blob,
"job_id": job.JobID,
"target": target,
"id": m.rpcID,
}
if job.Height > 0 {
params["height"] = job.Height
}
if job.SeedHash != "" {
params["seed_hash"] = job.SeedHash
}
if m.algoExtension && m.extAlgo && algo != "" {
params["algo"] = algo
}
m.writeJSON(map[string]interface{}{
"jsonrpc": "2.0",
"method": "job",
"params": params,
})
}
// ReplyWithError sends a JSON-RPC error response for the given request id.
//
// m.ReplyWithError(2, "Low difficulty share")
func (m *Miner) ReplyWithError(id int64, message string) {
m.writeJSON(map[string]interface{}{
"id": id,
"jsonrpc": "2.0",
"error": map[string]interface{}{
"code": -1,
"message": message,
},
})
}
// Success sends a JSON-RPC success response with the given status string.
//
// m.Success(2, "OK")
func (m *Miner) Success(id int64, status string) {
m.writeJSON(map[string]interface{}{
"id": id,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]string{
"status": status,
},
})
}
// Close initiates graceful TCP shutdown. Safe to call multiple times.
//
// m.Close()
func (m *Miner) Close() {
if m == nil || m.conn == nil {
return
}
m.closeOnce.Do(func() {
m.SetState(MinerStateClosing)
if m.events != nil {
m.events.Dispatch(Event{Type: EventClose, Miner: m})
}
_ = m.conn.Close()
})
}
func (m *Miner) writeJSON(value interface{}) {
if m == nil || m.conn == nil {
return
}
data, errorValue := json.Marshal(value)
if errorValue != nil {
return
}
m.sendMu.Lock()
defer m.sendMu.Unlock()
data = append(data, '\n')
written, errorValue := m.conn.Write(data)
if errorValue == nil {
m.tx += uint64(written)
}
}
func (m *Miner) handleLine(line []byte) {
if len(line) > len(m.buf) {
m.Close()
return
}
request := minerRequest{}
if errorValue := json.Unmarshal(line, &request); errorValue != nil {
m.Close()
return
}
switch request.Method {
case "login":
m.handleLogin(request)
case "submit":
m.handleSubmit(request)
case "keepalived":
m.handleKeepalived(request)
default:
if request.ID != 0 {
m.ReplyWithError(request.ID, "Invalid request")
}
}
}
func (m *Miner) handleLogin(request minerRequest) {
type loginParams struct {
Login string `json:"login"`
Pass string `json:"pass"`
Agent string `json:"agent"`
Algo []string `json:"algo"`
RigID string `json:"rigid"`
}
params := loginParams{}
if errorValue := json.Unmarshal(request.Params, &params); errorValue != nil {
m.ReplyWithError(request.ID, "Invalid payment address provided")
return
}
if params.Login == "" {
m.ReplyWithError(request.ID, "Invalid payment address provided")
return
}
if m.accessPassword != "" && params.Pass != m.accessPassword {
m.ReplyWithError(request.ID, "Invalid password")
return
}
m.SetCustomDiff(0)
m.currentJob = nil
m.diff = 0
m.SetPassword(params.Pass)
m.SetAgent(params.Agent)
m.SetRigID(params.RigID)
m.algo = append(m.algo[:0], params.Algo...)
m.extAlgo = len(params.Algo) > 0
m.SetUser(params.Login)
m.SetRPCID(newRPCID())
if m.events != nil {
m.events.Dispatch(Event{Type: EventLogin, Miner: m})
}
m.Touch()
if m.State() == MinerStateClosing {
return
}
result := map[string]interface{}{
"id": m.rpcID,
"status": "OK",
}
if m.currentJob != nil && m.currentJob.IsValid() {
jobCopy := *m.currentJob
blob := jobCopy.Blob
if m.extNH {
blob = jobCopy.BlobWithFixedByte(m.fixedByte)
}
jobResult := map[string]interface{}{
"blob": blob,
"job_id": jobCopy.JobID,
"target": m.effectiveTarget(jobCopy),
"id": m.rpcID,
}
if jobCopy.Height > 0 {
jobResult["height"] = jobCopy.Height
}
if jobCopy.SeedHash != "" {
jobResult["seed_hash"] = jobCopy.SeedHash
}
if m.algoExtension && m.extAlgo && jobCopy.Algo != "" {
jobResult["algo"] = jobCopy.Algo
}
result["job"] = jobResult
if m.algoExtension && m.extAlgo {
result["extensions"] = []string{"algo"}
}
m.SetState(MinerStateReady)
} else {
m.SetState(MinerStateWaitReady)
if m.algoExtension && m.extAlgo {
result["extensions"] = []string{"algo"}
}
}
m.writeJSON(map[string]interface{}{
"id": request.ID,
"jsonrpc": "2.0",
"error": nil,
"result": result,
})
}
func (m *Miner) handleSubmit(request minerRequest) {
if m.State() != MinerStateReady {
m.ReplyWithError(request.ID, "Unauthenticated")
return
}
type submitParams struct {
ID string `json:"id"`
JobID string `json:"job_id"`
Nonce string `json:"nonce"`
Result string `json:"result"`
Algo string `json:"algo"`
}
params := submitParams{}
if errorValue := json.Unmarshal(request.Params, &params); errorValue != nil {
m.ReplyWithError(request.ID, "Malformed share")
return
}
if params.ID != m.rpcID {
m.ReplyWithError(request.ID, "Unauthenticated")
return
}
if params.JobID == "" {
m.ReplyWithError(request.ID, "Missing job id")
return
}
if len(params.Nonce) != 8 || params.Nonce != strings.ToLower(params.Nonce) {
m.ReplyWithError(request.ID, "Invalid nonce")
return
}
if _, errorValue := hex.DecodeString(params.Nonce); errorValue != nil {
m.ReplyWithError(request.ID, "Invalid nonce")
return
}
submitAlgo := ""
if m.algoExtension && m.extAlgo {
submitAlgo = params.Algo
}
m.Touch()
if m.events != nil {
m.events.Dispatch(Event{
Type: EventSubmit,
Miner: m,
JobID: params.JobID,
Nonce: params.Nonce,
Result: params.Result,
Algo: submitAlgo,
RequestID: request.ID,
})
return
}
if m.splitter != nil {
m.splitter.OnSubmit(&SubmitEvent{
Miner: m,
JobID: params.JobID,
Nonce: params.Nonce,
Result: params.Result,
Algo: submitAlgo,
RequestID: request.ID,
})
}
}
func (m *Miner) handleKeepalived(request minerRequest) {
m.Touch()
m.Success(request.ID, "KEEPALIVED")
}
func (m *Miner) currentJobCopy() *Job {
if m == nil || m.currentJob == nil {
return nil
}
jobCopy := *m.currentJob
return &jobCopy
}
func (m *Miner) applyReadDeadline() error {
if m == nil || m.conn == nil {
return nil
}
deadline := m.readDeadline()
if deadline.IsZero() {
return nil
}
return m.conn.SetReadDeadline(deadline)
}
func (m *Miner) readDeadline() time.Time {
if m == nil {
return time.Time{}
}
switch m.State() {
case MinerStateWaitLogin:
return m.lastActivityAt.Add(10 * time.Second)
case MinerStateWaitReady, MinerStateReady:
return m.lastActivityAt.Add(600 * time.Second)
default:
return time.Time{}
}
}
func (m *Miner) dispatchSubmitResult(eventType EventType, diff uint64, errorMessage string, requestID int64) {
if m == nil || m.events == nil {
return
}
jobCopy := m.currentJobCopy()
m.events.Dispatch(Event{
Type: eventType,
Miner: m,
Job: jobCopy,
Diff: diff,
Error: errorMessage,
Latency: 0,
})
if eventType == EventAccept {
m.Success(requestID, "OK")
return
}
m.ReplyWithError(requestID, errorMessage)
}
func (m *Miner) setStateFromJob(job Job) {
m.currentJob = &job
m.SetState(MinerStateReady)
}
func (m *Miner) Expire() {
if m == nil || m.State() == MinerStateClosing {
return
}
m.Close()
}
func newRPCID() string {
value := make([]byte, 16)
_, _ = rand.Read(value)
value[6] = (value[6] & 0x0f) | 0x40
value[8] = (value[8] & 0x3f) | 0x80
encoded := make([]byte, 36)
hex.Encode(encoded[0:8], value[0:4])
encoded[8] = '-'
hex.Encode(encoded[9:13], value[4:6])
encoded[13] = '-'
hex.Encode(encoded[14:18], value[6:8])
encoded[18] = '-'
hex.Encode(encoded[19:23], value[8:10])
encoded[23] = '-'
hex.Encode(encoded[24:36], value[10:16])
return string(encoded)
}
func (m *Miner) RemoteAddr() net.Addr {
if m == nil || m.conn == nil {
return nil
}
return m.conn.RemoteAddr()
}

394
miner_runtime_test.go Normal file
View file

@ -0,0 +1,394 @@
package proxy
import (
"bufio"
"encoding/json"
"net"
"strings"
"testing"
"time"
)
func TestMiner_Login_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
before := miner.LastActivityAt()
miner.Start()
defer miner.Close()
time.Sleep(5 * time.Millisecond)
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"method": "login",
"params": map[string]interface{}{
"login": "wallet",
"pass": "x",
"agent": "xmrig",
},
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
if response["jsonrpc"] != "2.0" {
t.Fatalf("unexpected response: %#v", response)
}
if !miner.LastActivityAt().After(before) {
t.Fatalf("expected login to refresh last activity timestamp, got before=%s after=%s", before, miner.LastActivityAt())
}
result := response["result"].(map[string]interface{})
id, _ := result["id"].(string)
if result["status"] != "OK" || len(id) != 36 || id[8] != '-' || id[13] != '-' || id[18] != '-' || id[23] != '-' || id[14] != '4' || !strings.ContainsAny(string(id[19]), "89ab") {
t.Fatalf("unexpected login response: %#v", response)
}
}
func TestMiner_Keepalived_Bad(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 2,
"jsonrpc": "2.0",
"method": "keepalived",
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
result := response["result"].(map[string]interface{})
if result["status"] != "KEEPALIVED" {
t.Fatalf("unexpected keepalived response: %#v", response)
}
}
func TestMiner_Submit_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.Start()
defer miner.Close()
miner.SetRPCID("session")
miner.SetState(MinerStateReady)
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 3,
"jsonrpc": "2.0",
"method": "submit",
"params": map[string]interface{}{
"id": "session",
"job_id": "job-1",
"nonce": "ABC123",
"result": "abc",
"algo": "cn/r",
},
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
if response["error"] == nil {
t.Fatalf("expected invalid nonce error, got %#v", response)
}
}
func TestMiner_Login_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.algoExtension = true
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 4,
"jsonrpc": "2.0",
"method": "login",
"params": map[string]interface{}{
"login": "wallet",
"pass": "x",
"agent": "xmrig",
"algo": []string{"cn/r"},
},
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
result := response["result"].(map[string]interface{})
extensions, ok := result["extensions"].([]interface{})
if !ok || len(extensions) != 1 || extensions[0] != "algo" {
t.Fatalf("expected algo extension to be advertised, got %#v", response)
}
}
func TestMiner_Login_NiceHashPatchedJob_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.algoExtension = true
miner.SetCustomDiff(10000)
miner.events = NewEventBus()
miner.events.Subscribe(EventLogin, func(event Event) {
if event.Miner == nil {
return
}
event.Miner.SetNiceHashEnabled(true)
event.Miner.SetFixedByte(0x2a)
event.Miner.PrimeJob(Job{
Blob: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
JobID: "job-1",
Target: "b88d0600",
Algo: "cn/r",
Height: 42,
SeedHash: "seed-hash",
})
})
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 5,
"jsonrpc": "2.0",
"method": "login",
"params": map[string]interface{}{
"login": "wallet",
"pass": "x",
"agent": "xmrig",
"algo": []string{"cn/r"},
},
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
result := response["result"].(map[string]interface{})
job := result["job"].(map[string]interface{})
if blob, _ := job["blob"].(string); blob[78:80] != "2a" {
t.Fatalf("expected patched NiceHash blob, got %q", blob)
}
if target, _ := job["target"].(string); target != TargetForDifficulty(10000) {
t.Fatalf("expected custom diff target, got %q", target)
}
if height, _ := job["height"].(float64); height != 42 {
t.Fatalf("expected job height to be forwarded, got %#v", job)
}
if seedHash, _ := job["seed_hash"].(string); seedHash != "seed-hash" {
t.Fatalf("expected job seed_hash to be forwarded, got %#v", job)
}
}
func TestMiner_ForwardJob_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.algoExtension = true
miner.extAlgo = true
miner.SetRPCID("session")
miner.SetCustomDiff(10000)
go miner.ForwardJob(Job{
Blob: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
JobID: "job-2",
Target: "b88d0600",
Algo: "rx/0",
Height: 99,
SeedHash: "seed",
}, "rx/0")
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
params := response["params"].(map[string]interface{})
if params["target"] != TargetForDifficulty(10000) {
t.Fatalf("expected custom diff target, got %#v", params)
}
if params["algo"] != "rx/0" || params["seed_hash"] != "seed" || params["height"] != float64(99) {
t.Fatalf("expected extended job fields to be forwarded, got %#v", params)
}
if miner.Diff() != 10000 {
t.Fatalf("expected miner diff to track the effective target, got %d", miner.Diff())
}
}
func TestMiner_Submit_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.events = NewEventBus()
miner.algoExtension = true
miner.extAlgo = true
miner.SetRPCID("session")
miner.SetState(MinerStateReady)
submitSeen := make(chan Event, 1)
miner.events.Subscribe(EventSubmit, func(event Event) {
submitSeen <- event
miner.Success(event.RequestID, "OK")
})
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 6,
"jsonrpc": "2.0",
"method": "submit",
"params": map[string]interface{}{
"id": "session",
"job_id": "job-1",
"nonce": "deadbeef",
"result": "abc",
"algo": "cn/r",
},
}); err != nil {
t.Fatal(err)
}
select {
case event := <-submitSeen:
if event.JobID != "job-1" || event.Nonce != "deadbeef" || event.Algo != "cn/r" {
t.Fatalf("unexpected submit event: %+v", event)
}
case <-time.After(time.Second):
t.Fatal("expected submit event to be dispatched")
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
result := response["result"].(map[string]interface{})
if result["status"] != "OK" {
t.Fatalf("unexpected submit response: %#v", response)
}
}
func TestMiner_Submit_AlgoExtension_Bad(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.events = NewEventBus()
miner.SetRPCID("session")
miner.SetState(MinerStateReady)
submitSeen := make(chan Event, 1)
miner.events.Subscribe(EventSubmit, func(event Event) {
submitSeen <- event
miner.Success(event.RequestID, "OK")
})
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 7,
"jsonrpc": "2.0",
"method": "submit",
"params": map[string]interface{}{
"id": "session",
"job_id": "job-1",
"nonce": "deadbeef",
"result": "abc",
"algo": "cn/r",
},
}); err != nil {
t.Fatal(err)
}
select {
case event := <-submitSeen:
if event.Algo != "" {
t.Fatalf("expected algo to be suppressed when extension is disabled, got %+v", event)
}
case <-time.After(time.Second):
t.Fatal("expected submit event to be dispatched")
}
}

View file

@ -5,9 +5,17 @@
package pool package pool
import ( import (
"bufio"
"crypto/sha256"
"crypto/tls" "crypto/tls"
"encoding/hex"
"encoding/json"
"errors"
"net" "net"
"strings"
"sync" "sync"
"sync/atomic"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/core/proxy"
) )
@ -19,14 +27,15 @@ import (
// client := pool.NewStratumClient(poolCfg, listener) // client := pool.NewStratumClient(poolCfg, listener)
// client.Connect() // client.Connect()
type StratumClient struct { type StratumClient struct {
cfg proxy.PoolConfig config proxy.PoolConfig
listener StratumListener listener StratumListener
conn net.Conn conn net.Conn
tlsConn *tls.Conn // nil if plain TCP tlsConn *tls.Conn // nil if plain TCP
sessionID string // pool-assigned session id from login reply sessionID string // pool-assigned session id from login reply
seq int64 // atomic JSON-RPC request id counter requestSequence int64 // atomic JSON-RPC request id counter
active bool // true once first job received active bool // true once first job received
sendMu sync.Mutex disconnectOnce sync.Once
sendMu sync.Mutex
} }
// StratumListener receives events from the pool connection. // StratumListener receives events from the pool connection.
@ -39,3 +48,241 @@ type StratumListener interface {
// OnDisconnect is called when the pool TCP connection closes for any reason. // OnDisconnect is called when the pool TCP connection closes for any reason.
OnDisconnect() OnDisconnect()
} }
type jsonRPCRequest struct {
ID int64 `json:"id"`
Method string `json:"method"`
Params interface{} `json:"params,omitempty"`
}
type jsonRPCResponse struct {
ID int64 `json:"id"`
Method string `json:"method"`
Params json.RawMessage `json:"params"`
Result json.RawMessage `json:"result"`
Error *jsonRPCErrorBody `json:"error"`
}
type jsonRPCErrorBody struct {
Code int `json:"code"`
Message string `json:"message"`
}
// NewStratumClient builds one outbound pool client.
//
// client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.lthn.io:3333", User: "WALLET", Pass: "x"}, listener)
func NewStratumClient(cfg proxy.PoolConfig, listener StratumListener) *StratumClient {
return &StratumClient{
config: cfg,
listener: listener,
}
}
// Connect dials the pool and starts the read loop.
//
// client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.lthn.io:3333", TLS: true}, listener)
// errorValue := client.Connect()
func (c *StratumClient) Connect() error {
var connection net.Conn
var errorValue error
dialer := net.Dialer{}
if c.config.Keepalive {
dialer.KeepAlive = 30 * time.Second
}
if c.config.TLS {
connection, errorValue = dialer.Dial("tcp", c.config.URL)
if errorValue != nil {
return errorValue
}
serverName := c.config.URL
if host, _, splitError := net.SplitHostPort(c.config.URL); splitError == nil && host != "" {
serverName = host
}
tlsConnection := tls.Client(connection, &tls.Config{MinVersion: tls.VersionTLS12, ServerName: serverName})
errorValue = tlsConnection.Handshake()
if errorValue != nil {
_ = connection.Close()
return errorValue
}
if c.config.TLSFingerprint != "" {
state := tlsConnection.ConnectionState()
if len(state.PeerCertificates) == 0 {
_ = connection.Close()
return errors.New("missing peer certificate")
}
fingerprint := sha256.Sum256(state.PeerCertificates[0].Raw)
if hex.EncodeToString(fingerprint[:]) != strings.ToLower(c.config.TLSFingerprint) {
_ = connection.Close()
return errors.New("pool fingerprint mismatch")
}
}
connection = tlsConnection
c.tlsConn = tlsConnection
} else {
connection, errorValue = dialer.Dial("tcp", c.config.URL)
if errorValue != nil {
return errorValue
}
}
c.conn = connection
c.disconnectOnce = sync.Once{}
go c.readLoop()
return nil
}
// Login sends the stratum login request using the configured wallet and password.
//
// client.Login()
func (c *StratumClient) Login() {
password := c.config.Password
if password == "" {
password = c.config.Pass
}
params := map[string]interface{}{
"login": c.config.User,
"pass": password,
"rigid": c.config.RigID,
}
if c.config.Algo != "" {
params["algo"] = []string{c.config.Algo}
}
_ = c.writeJSON(jsonRPCRequest{
ID: 1,
Method: "login",
Params: params,
})
}
// Submit sends a share submission. Returns the sequence number for result correlation.
//
// seq := client.Submit("job-1", "deadbeef", "HASH64HEX", "cn/r")
func (c *StratumClient) Submit(jobID string, nonce string, result string, algo string) int64 {
requestID := atomic.AddInt64(&c.requestSequence, 1)
params := map[string]string{
"id": c.sessionID,
"job_id": jobID,
"nonce": nonce,
"result": result,
}
if algo != "" {
params["algo"] = algo
}
_ = c.writeJSON(jsonRPCRequest{
ID: requestID,
Method: "submit",
Params: params,
})
return requestID
}
// Disconnect closes the connection and emits one disconnect callback.
//
// client.Disconnect()
func (c *StratumClient) Disconnect() {
if c.conn != nil {
_ = c.conn.Close()
}
c.notifyDisconnect()
}
func (c *StratumClient) writeJSON(value interface{}) error {
if c.conn == nil {
return nil
}
data, errorValue := json.Marshal(value)
if errorValue != nil {
return errorValue
}
c.sendMu.Lock()
defer c.sendMu.Unlock()
_, errorValue = c.conn.Write(append(data, '\n'))
return errorValue
}
func (c *StratumClient) readLoop() {
reader := bufio.NewReaderSize(c.conn, 16384)
for {
line, isPrefix, errorValue := reader.ReadLine()
if errorValue != nil {
c.notifyDisconnect()
return
}
if isPrefix {
c.notifyDisconnect()
return
}
response := jsonRPCResponse{}
if errorValue = json.Unmarshal(line, &response); errorValue != nil {
continue
}
c.handleMessage(response)
}
}
func (c *StratumClient) notifyDisconnect() {
c.disconnectOnce.Do(func() {
if c.listener != nil {
c.listener.OnDisconnect()
}
})
}
func (c *StratumClient) handleMessage(response jsonRPCResponse) {
if response.Method == "job" {
var payload proxy.Job
if json.Unmarshal(response.Params, &payload) == nil && payload.IsValid() {
payload.ClientID = c.sessionID
c.active = true
if c.listener != nil {
c.listener.OnJob(payload)
}
}
return
}
if response.ID == 1 && c.sessionID == "" {
if len(response.Result) > 0 {
var loginResult struct {
ID string `json:"id"`
Job proxy.Job `json:"job"`
}
if json.Unmarshal(response.Result, &loginResult) == nil && loginResult.ID != "" {
c.sessionID = loginResult.ID
if loginResult.Job.IsValid() {
loginResult.Job.ClientID = c.sessionID
c.active = true
if c.listener != nil {
c.listener.OnJob(loginResult.Job)
}
}
}
}
if response.Error != nil {
c.Disconnect()
}
return
}
if response.ID == 0 || c.listener == nil {
return
}
accepted := response.Error == nil
errorMessage := ""
if response.Error != nil {
errorMessage = response.Error.Message
}
c.listener.OnResultAccepted(response.ID, accepted, errorMessage)
}

248
pool/client_test.go Normal file
View file

@ -0,0 +1,248 @@
package pool
import (
"encoding/json"
"net"
"sync"
"testing"
"time"
"dappco.re/go/core/proxy"
)
type disconnectCountingListener struct {
mu sync.Mutex
count int
}
func (l *disconnectCountingListener) OnJob(job proxy.Job) {}
func (l *disconnectCountingListener) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
}
func (l *disconnectCountingListener) OnDisconnect() {
l.mu.Lock()
l.count++
l.mu.Unlock()
}
func (l *disconnectCountingListener) Count() int {
l.mu.Lock()
defer l.mu.Unlock()
return l.count
}
func TestStratumClient_ReadLoop_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
listener := &disconnectCountingListener{}
client := &StratumClient{
listener: listener,
conn: serverConn,
}
go client.readLoop()
payload := make([]byte, 16385)
for index := range payload {
payload[index] = 'a'
}
payload = append(payload, '\n')
writeErr := make(chan error, 1)
go func() {
_, err := clientConn.Write(payload)
writeErr <- err
}()
time.Sleep(50 * time.Millisecond)
if got := listener.Count(); got != 1 {
t.Fatalf("expected oversized line to close the connection, got %d disconnect callbacks", got)
}
select {
case err := <-writeErr:
if err != nil {
t.Fatal(err)
}
default:
}
}
func TestStratumClient_Disconnect_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
listener := &disconnectCountingListener{}
client := &StratumClient{
listener: listener,
conn: serverConn,
}
go client.readLoop()
time.Sleep(10 * time.Millisecond)
client.Disconnect()
time.Sleep(50 * time.Millisecond)
if got := listener.Count(); got != 1 {
t.Fatalf("expected one disconnect callback, got %d", got)
}
}
type resultCapturingListener struct {
mu sync.Mutex
sequence int64
accepted bool
errorMessage string
results int
disconnects int
}
func (l *resultCapturingListener) OnJob(job proxy.Job) {}
func (l *resultCapturingListener) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
l.mu.Lock()
l.sequence = sequence
l.accepted = accepted
l.errorMessage = errorMessage
l.results++
l.mu.Unlock()
}
func (l *resultCapturingListener) OnDisconnect() {
l.mu.Lock()
l.disconnects++
l.mu.Unlock()
}
func (l *resultCapturingListener) Snapshot() (int64, bool, string, int, int) {
l.mu.Lock()
defer l.mu.Unlock()
return l.sequence, l.accepted, l.errorMessage, l.results, l.disconnects
}
func TestStratumClient_HandleMessage_Bad(t *testing.T) {
listener := &resultCapturingListener{}
client := &StratumClient{
listener: listener,
sessionID: "session-1",
}
client.handleMessage(jsonRPCResponse{
ID: 7,
Error: &jsonRPCErrorBody{
Code: -1,
Message: "Low difficulty share",
},
})
sequence, accepted, errorMessage, results, disconnects := listener.Snapshot()
if sequence != 7 || accepted || errorMessage != "Low difficulty share" || results != 1 {
t.Fatalf("expected rejected submit callback, got sequence=%d accepted=%v error=%q results=%d", sequence, accepted, errorMessage, results)
}
if disconnects != 0 {
t.Fatalf("expected no disconnect on submit rejection, got %d", disconnects)
}
}
func TestStratumClient_HandleMessage_Good(t *testing.T) {
listener := &resultCapturingListener{}
client := &StratumClient{
listener: listener,
sessionID: "session-1",
}
client.handleMessage(jsonRPCResponse{
ID: 7,
})
sequence, accepted, errorMessage, results, disconnects := listener.Snapshot()
if sequence != 7 || !accepted || errorMessage != "" || results != 1 {
t.Fatalf("expected accepted submit callback, got sequence=%d accepted=%v error=%q results=%d", sequence, accepted, errorMessage, results)
}
if disconnects != 0 {
t.Fatalf("expected no disconnect on submit accept, got %d", disconnects)
}
}
func TestStratumClient_HandleMessage_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
listener := &resultCapturingListener{}
client := &StratumClient{
listener: listener,
conn: serverConn,
}
defer client.Disconnect()
client.handleMessage(jsonRPCResponse{
ID: 1,
Error: &jsonRPCErrorBody{
Code: -1,
Message: "Unauthenticated",
},
})
_, _, _, results, disconnects := listener.Snapshot()
if results != 0 {
t.Fatalf("expected login rejection not to be reported as a share result, got %d results", results)
}
if disconnects != 1 {
t.Fatalf("expected login rejection to disconnect once, got %d", disconnects)
}
}
func TestStratumClient_Login_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
client := &StratumClient{
config: proxy.PoolConfig{
User: "WALLET",
Pass: "legacy",
Password: "preferred",
RigID: "rig-alpha",
Algo: "cn/r",
},
conn: serverConn,
}
writeDone := make(chan struct{})
go func() {
client.Login()
close(writeDone)
}()
buffer := make([]byte, 2048)
n, err := clientConn.Read(buffer)
if err != nil {
t.Fatal(err)
}
var request jsonRPCRequest
if err := json.Unmarshal(buffer[:n], &request); err != nil {
t.Fatal(err)
}
params, ok := request.Params.(map[string]interface{})
if !ok {
t.Fatalf("expected login params map, got %T", request.Params)
}
if got := params["pass"]; got != "preferred" {
t.Fatalf("expected preferred password, got %v", got)
}
if got := params["rigid"]; got != "rig-alpha" {
t.Fatalf("expected rigid field to be forwarded, got %v", got)
}
if got := params["algo"]; got == nil {
t.Fatal("expected algo extension to be forwarded")
}
client.Disconnect()
select {
case <-writeDone:
case <-time.After(time.Second):
t.Fatal("expected login write to complete")
}
}

View file

@ -2,6 +2,7 @@ package pool
import ( import (
"sync" "sync"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/core/proxy"
) )
@ -17,7 +18,9 @@ type FailoverStrategy struct {
current int current int
client *StratumClient client *StratumClient
listener StratumListener listener StratumListener
cfg *proxy.Config config *proxy.Config
closed bool
running bool
mu sync.Mutex mu sync.Mutex
} }
@ -35,3 +38,160 @@ type Strategy interface {
Disconnect() Disconnect()
IsActive() bool IsActive() bool
} }
// NewStrategyFactory captures the live pool list and retry settings.
//
// factory := pool.NewStrategyFactory(proxy.Config{Pools: []proxy.PoolConfig{{URL: "pool.lthn.io:3333", Enabled: true}}})
func NewStrategyFactory(cfg *proxy.Config) StrategyFactory {
return func(listener StratumListener) Strategy {
if cfg == nil {
return NewFailoverStrategy(nil, listener, nil)
}
return NewFailoverStrategy(cfg.Pools, listener, cfg)
}
}
// NewFailoverStrategy builds one failover client stack.
//
// strategy := pool.NewFailoverStrategy([]proxy.PoolConfig{{URL: "pool.lthn.io:3333", Enabled: true}}, listener, cfg)
func NewFailoverStrategy(pools []proxy.PoolConfig, listener StratumListener, cfg *proxy.Config) *FailoverStrategy {
return &FailoverStrategy{
pools: append([]proxy.PoolConfig(nil), pools...),
listener: listener,
config: cfg,
}
}
// Connect dials the first enabled pool and rotates through fallbacks on failure.
//
// strategy.Connect()
func (s *FailoverStrategy) Connect() {
s.mu.Lock()
s.closed = false
s.mu.Unlock()
s.connectFrom(0)
}
func (s *FailoverStrategy) connectFrom(start int) {
s.mu.Lock()
if s.running || s.closed {
s.mu.Unlock()
return
}
s.running = true
s.mu.Unlock()
defer func() {
s.mu.Lock()
s.running = false
s.mu.Unlock()
}()
pools := s.pools
if s.config != nil && len(s.config.Pools) > 0 {
pools = s.config.Pools
}
if len(pools) == 0 {
return
}
retries := 1
pause := time.Duration(0)
if s.config != nil {
if s.config.Retries > 0 {
retries = s.config.Retries
}
if s.config.RetryPause > 0 {
pause = time.Duration(s.config.RetryPause) * time.Second
}
}
for attempt := 0; attempt < retries; attempt++ {
for offset := 0; offset < len(pools); offset++ {
index := (start + offset) % len(pools)
poolConfig := pools[index]
if !poolConfig.Enabled {
continue
}
client := NewStratumClient(poolConfig, s)
if errorValue := client.Connect(); errorValue == nil {
s.mu.Lock()
if s.closed {
s.mu.Unlock()
client.Disconnect()
return
}
s.client = client
s.current = index
s.mu.Unlock()
client.Login()
return
}
}
if pause > 0 && attempt < retries-1 {
time.Sleep(pause)
}
}
}
func (s *FailoverStrategy) Submit(jobID string, nonce string, result string, algo string) int64 {
s.mu.Lock()
client := s.client
s.mu.Unlock()
if client == nil {
return 0
}
return client.Submit(jobID, nonce, result, algo)
}
func (s *FailoverStrategy) Disconnect() {
s.mu.Lock()
s.closed = true
client := s.client
s.client = nil
s.mu.Unlock()
if client != nil {
client.Disconnect()
}
}
func (s *FailoverStrategy) IsActive() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.client != nil && s.client.active
}
func (s *FailoverStrategy) OnJob(job proxy.Job) {
if s.listener != nil {
s.listener.OnJob(job)
}
}
func (s *FailoverStrategy) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
if s.listener != nil {
s.listener.OnResultAccepted(sequence, accepted, errorMessage)
}
}
func (s *FailoverStrategy) OnDisconnect() {
s.mu.Lock()
client := s.client
s.client = nil
closed := s.closed
s.mu.Unlock()
if s.listener != nil {
s.listener.OnDisconnect()
}
if closed {
return
}
if client != nil {
client.active = false
}
go func() {
time.Sleep(10 * time.Millisecond)
s.connectFrom(0)
}()
}

306
pool/strategy_test.go Normal file
View file

@ -0,0 +1,306 @@
package pool
import (
"bufio"
"encoding/json"
"net"
"sync"
"sync/atomic"
"testing"
"time"
"dappco.re/go/core/proxy"
)
type strategyTestListener struct {
jobCh chan proxy.Job
disconnectMu sync.Mutex
disconnects int
}
func (l *strategyTestListener) OnJob(job proxy.Job) {
l.jobCh <- job
}
func (l *strategyTestListener) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {}
func (l *strategyTestListener) OnDisconnect() {
l.disconnectMu.Lock()
l.disconnects++
l.disconnectMu.Unlock()
}
func (l *strategyTestListener) Disconnects() int {
l.disconnectMu.Lock()
defer l.disconnectMu.Unlock()
return l.disconnects
}
func TestFailoverStrategy_Connect_Ugly(t *testing.T) {
primaryListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer primaryListener.Close()
backupListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer backupListener.Close()
go func() {
conn, acceptErr := primaryListener.Accept()
if acceptErr != nil {
return
}
_ = primaryListener.Close()
_ = conn.Close()
}()
go func() {
conn, acceptErr := backupListener.Accept()
if acceptErr != nil {
return
}
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]interface{}{
"id": "session-1",
"job": map[string]interface{}{
"blob": "abcd",
"job_id": "job-1",
"target": "b88d0600",
},
},
})
}()
listener := &strategyTestListener{
jobCh: make(chan proxy.Job, 1),
}
strategy := NewFailoverStrategy([]proxy.PoolConfig{
{URL: primaryListener.Addr().String(), Enabled: true},
{URL: backupListener.Addr().String(), Enabled: true},
}, listener, &proxy.Config{Retries: 2})
strategy.Connect()
defer strategy.Disconnect()
select {
case job := <-listener.jobCh:
if job.JobID != "job-1" {
t.Fatalf("expected backup job, got %+v", job)
}
case <-time.After(3 * time.Second):
t.Fatal("expected failover job after primary disconnect")
}
if listener.Disconnects() == 0 {
t.Fatal("expected disconnect callback before failover reconnect")
}
}
func TestFailoverStrategy_OnDisconnect_Good(t *testing.T) {
primaryListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer primaryListener.Close()
backupListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer backupListener.Close()
var primaryConnections atomic.Int32
go func() {
conn, acceptErr := primaryListener.Accept()
if acceptErr != nil {
return
}
primaryConnections.Add(1)
defer primaryListener.Close()
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": map[string]interface{}{
"code": -1,
"message": "Unauthenticated",
},
})
}()
go func() {
conn, acceptErr := backupListener.Accept()
if acceptErr != nil {
return
}
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]interface{}{
"id": "session-1",
"job": map[string]interface{}{
"blob": "abcd",
"job_id": "job-1",
"target": "b88d0600",
},
},
})
}()
listener := &strategyTestListener{
jobCh: make(chan proxy.Job, 1),
}
strategy := NewFailoverStrategy([]proxy.PoolConfig{
{URL: primaryListener.Addr().String(), Enabled: true},
{URL: backupListener.Addr().String(), Enabled: true},
}, listener, &proxy.Config{Retries: 1})
strategy.Connect()
defer strategy.Disconnect()
select {
case job := <-listener.jobCh:
if job.JobID != "job-1" {
t.Fatalf("expected backup job, got %+v", job)
}
case <-time.After(3 * time.Second):
t.Fatalf("expected backup job after primary disconnect, primary connections=%d", primaryConnections.Load())
}
if listener.Disconnects() == 0 {
t.Fatal("expected disconnect callback before failover reconnect")
}
}
func TestFailoverStrategy_OnDisconnect_PrimaryFirst(t *testing.T) {
primaryListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
primaryAddr := primaryListener.Addr().String()
_ = primaryListener.Close()
backupListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer backupListener.Close()
go func() {
conn, acceptErr := backupListener.Accept()
if acceptErr != nil {
return
}
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]interface{}{
"id": "session-backup",
"job": map[string]interface{}{
"blob": "abcd",
"job_id": "backup-job",
"target": "b88d0600",
},
},
})
time.Sleep(40 * time.Millisecond)
}()
listener := &strategyTestListener{
jobCh: make(chan proxy.Job, 2),
}
strategy := NewFailoverStrategy([]proxy.PoolConfig{
{URL: primaryAddr, Enabled: true},
{URL: backupListener.Addr().String(), Enabled: true},
}, listener, &proxy.Config{Retries: 2})
strategy.Connect()
defer strategy.Disconnect()
select {
case job := <-listener.jobCh:
if job.JobID != "backup-job" {
t.Fatalf("expected initial failover job, got %+v", job)
}
case <-time.After(3 * time.Second):
t.Fatal("expected initial failover job")
}
primaryListener, err = net.Listen("tcp", primaryAddr)
if err != nil {
t.Fatal(err)
}
defer primaryListener.Close()
go func() {
conn, acceptErr := primaryListener.Accept()
if acceptErr != nil {
return
}
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]interface{}{
"id": "session-primary",
"job": map[string]interface{}{
"blob": "abcd",
"job_id": "primary-job",
"target": "b88d0600",
},
},
})
}()
select {
case job := <-listener.jobCh:
if job.JobID != "primary-job" {
t.Fatalf("expected reconnect to prefer primary pool, got %+v", job)
}
case <-time.After(3 * time.Second):
t.Fatal("expected reconnect job")
}
}

View file

@ -11,7 +11,9 @@
package proxy package proxy
import ( import (
"net/http"
"sync" "sync"
"sync/atomic"
"time" "time"
) )
@ -21,15 +23,23 @@ import (
// p, result := proxy.New(cfg) // p, result := proxy.New(cfg)
// if result.OK { p.Start() } // if result.OK { p.Start() }
type Proxy struct { type Proxy struct {
config *Config config *Config
splitter Splitter customDifficulty *CustomDiff
stats *Stats rateLimiter *RateLimiter
workers *Workers splitter Splitter
events *EventBus stats *Stats
servers []*Server workers *Workers
ticker *time.Ticker events *EventBus
watcher *ConfigWatcher currentMiners atomic.Uint64
done chan struct{} miners map[int64]*Miner
minerMu sync.RWMutex
servers []*Server
httpServer *http.Server
accessLogger *appendLineLogger
shareLogger *appendLineLogger
ticker *time.Ticker
watcher *ConfigWatcher
done chan struct{}
} }
// Splitter is the interface both NonceSplitter and SimpleSplitter satisfy. // Splitter is the interface both NonceSplitter and SimpleSplitter satisfy.
@ -86,10 +96,11 @@ type CloseEvent struct {
// }) // })
// w.Start() // w.Start()
type ConfigWatcher struct { type ConfigWatcher struct {
path string path string
onChange func(*Config) onChange func(*Config)
lastMod time.Time enabled bool
done chan struct{} lastModifiedAt time.Time
done chan struct{}
} }
// RateLimiter implements per-IP token bucket connection rate limiting. // RateLimiter implements per-IP token bucket connection rate limiting.
@ -100,7 +111,7 @@ type ConfigWatcher struct {
// rl := proxy.NewRateLimiter(cfg.RateLimit) // rl := proxy.NewRateLimiter(cfg.RateLimit)
// if !rl.Allow("1.2.3.4") { conn.Close(); return } // if !rl.Allow("1.2.3.4") { conn.Close(); return }
type RateLimiter struct { type RateLimiter struct {
cfg RateLimit config RateLimit
buckets map[string]*tokenBucket buckets map[string]*tokenBucket
banned map[string]time.Time banned map[string]time.Time
mu sync.Mutex mu sync.Mutex
@ -119,4 +130,31 @@ type tokenBucket struct {
// bus.Subscribe(proxy.EventLogin, cd.OnLogin) // bus.Subscribe(proxy.EventLogin, cd.OnLogin)
type CustomDiff struct { type CustomDiff struct {
globalDiff uint64 globalDiff uint64
mu sync.RWMutex
}
var splitterFactories = map[string]func(*Config, *EventBus) Splitter{
"": noopSplitterFactory,
}
// RegisterSplitterFactory registers a splitter constructor for a mode name.
//
// proxy.RegisterSplitterFactory("nicehash", func(cfg *proxy.Config, bus *proxy.EventBus) proxy.Splitter {
// return nicehash.NewNonceSplitter(cfg, bus, pool.NewStrategyFactory(cfg))
// })
func RegisterSplitterFactory(mode string, factory func(*Config, *EventBus) Splitter) {
if mode == "" || factory == nil {
return
}
splitterFactories[mode] = factory
}
func newSplitter(cfg *Config, events *EventBus) Splitter {
if cfg == nil {
return noopSplitter{}
}
if factory, exists := splitterFactories[cfg.Mode]; exists && factory != nil {
return factory(cfg, events)
}
return noopSplitter{}
} }

244
proxy_http_runtime.go Normal file
View file

@ -0,0 +1,244 @@
package proxy
import (
"context"
"encoding/json"
"net"
"net/http"
"strconv"
"strings"
"time"
)
const proxyAPIVersion = "1.0.0"
// RouteRegistrar is the minimal route-registration surface used by RegisterMonitoringRoutes.
//
// mux := http.NewServeMux()
// RegisterMonitoringRoutes(mux, p)
type RouteRegistrar interface {
HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request))
}
// SummaryResponse is the /1/summary JSON body.
type SummaryResponse struct {
Version string `json:"version"`
Mode string `json:"mode"`
Hashrate HashrateResponse `json:"hashrate"`
Miners MinersCountResponse `json:"miners"`
Workers uint64 `json:"workers"`
Upstreams UpstreamResponse `json:"upstreams"`
Results ResultsResponse `json:"results"`
}
// HashrateResponse carries the per-window hashrate array.
type HashrateResponse struct {
Total [6]float64 `json:"total"`
}
// MinersCountResponse carries current and peak miner counts.
type MinersCountResponse struct {
Now uint64 `json:"now"`
Max uint64 `json:"max"`
}
// UpstreamResponse carries pool connection state counts.
type UpstreamResponse struct {
Active uint64 `json:"active"`
Sleep uint64 `json:"sleep"`
Error uint64 `json:"error"`
Total uint64 `json:"total"`
Ratio float64 `json:"ratio"`
}
// ResultsResponse carries share acceptance statistics.
type ResultsResponse struct {
Accepted uint64 `json:"accepted"`
Rejected uint64 `json:"rejected"`
Invalid uint64 `json:"invalid"`
Expired uint64 `json:"expired"`
AvgTime uint32 `json:"avg_time"`
Latency uint32 `json:"latency"`
HashesTotal uint64 `json:"hashes_total"`
Best [10]uint64 `json:"best"`
}
func startHTTPServer(p *Proxy) {
if p == nil || p.config == nil || !p.config.HTTP.Enabled || p.httpServer != nil {
return
}
mux := http.NewServeMux()
RegisterMonitoringRoutes(mux, p)
address := net.JoinHostPort(p.config.HTTP.Host, strconv.Itoa(int(p.config.HTTP.Port)))
listener, errorValue := net.Listen("tcp", address)
if errorValue != nil {
return
}
server := &http.Server{
Handler: mux,
}
p.httpServer = server
go func() {
_ = server.Serve(listener)
}()
}
func stopHTTPServer(p *Proxy) {
if p == nil || p.httpServer == nil {
return
}
server := p.httpServer
p.httpServer = nil
shutdownContext, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = server.Shutdown(shutdownContext)
}
// RegisterMonitoringRoutes mounts the monitoring endpoints on any router with HandleFunc.
//
// mux := http.NewServeMux()
// RegisterMonitoringRoutes(mux, p)
func RegisterMonitoringRoutes(router RouteRegistrar, proxyValue *Proxy) {
if router == nil || proxyValue == nil {
return
}
router.HandleFunc("/1/summary", func(writer http.ResponseWriter, request *http.Request) {
if !allowHTTPRequest(writer, request, proxyValue.HTTPConfig()) {
return
}
summary := proxyValue.Summary()
upstreams := proxyValue.Upstreams()
ratio := 0.0
if upstreams.Total > 0 {
ratio = float64(proxyValue.CurrentMiners()) / float64(upstreams.Total)
}
response := SummaryResponse{
Version: proxyAPIVersion,
Mode: proxyValue.Mode(),
Hashrate: HashrateResponse{
Total: summary.Hashrate,
},
Miners: MinersCountResponse{
Now: proxyValue.CurrentMiners(),
Max: proxyValue.MaxMiners(),
},
Workers: uint64(len(proxyValue.Workers())),
Upstreams: UpstreamResponse{
Active: upstreams.Active,
Sleep: upstreams.Sleep,
Error: upstreams.Error,
Total: upstreams.Total,
Ratio: ratio,
},
Results: ResultsResponse{
Accepted: summary.Accepted,
Rejected: summary.Rejected,
Invalid: summary.Invalid,
Expired: summary.Expired,
AvgTime: summary.AvgTime,
Latency: summary.AvgLatency,
HashesTotal: summary.Hashes,
Best: summary.TopDiff,
},
}
writeHTTPJSON(writer, response)
})
router.HandleFunc("/1/workers", func(writer http.ResponseWriter, request *http.Request) {
if !allowHTTPRequest(writer, request, proxyValue.HTTPConfig()) {
return
}
records := proxyValue.Workers()
rows := make([][]interface{}, 0, len(records))
for _, record := range records {
rows = append(rows, []interface{}{
record.Name,
record.LastIP,
record.Connections,
record.Accepted,
record.Rejected,
record.Invalid,
record.Hashes,
record.LastHashAt.Unix(),
record.Hashrate(60),
record.Hashrate(600),
record.Hashrate(3600),
record.Hashrate(43200),
record.Hashrate(86400),
})
}
writeHTTPJSON(writer, map[string]interface{}{
"mode": proxyValue.WorkersMode(),
"workers": rows,
})
})
router.HandleFunc("/1/miners", func(writer http.ResponseWriter, request *http.Request) {
if !allowHTTPRequest(writer, request, proxyValue.HTTPConfig()) {
return
}
miners := proxyValue.Miners()
rows := make([][]interface{}, 0, len(miners))
for _, miner := range miners {
ip := ""
if remote := miner.RemoteAddr(); remote != nil {
ip = remote.String()
}
rows = append(rows, []interface{}{
miner.ID(),
ip,
miner.TX(),
miner.RX(),
miner.State(),
miner.Diff(),
miner.User(),
"********",
miner.RigID(),
miner.Agent(),
})
}
writeHTTPJSON(writer, map[string]interface{}{
"format": []string{"id", "ip", "tx", "rx", "state", "diff", "user", "password", "rig_id", "agent"},
"miners": rows,
})
})
}
func allowHTTPRequest(writer http.ResponseWriter, request *http.Request, config HTTPConfig) bool {
if request == nil {
return false
}
if config.AccessToken != "" {
header := request.Header.Get("Authorization")
prefix := "Bearer "
if !strings.HasPrefix(header, prefix) || strings.TrimSpace(strings.TrimPrefix(header, prefix)) != config.AccessToken {
writer.Header().Set("WWW-Authenticate", "Bearer")
http.Error(writer, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return false
}
}
if config.Restricted && request.Method != http.MethodGet {
http.Error(writer, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
return false
}
return true
}
func writeHTTPJSON(writer http.ResponseWriter, value interface{}) {
writer.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(writer).Encode(value)
}

142
proxy_logging_runtime.go Normal file
View file

@ -0,0 +1,142 @@
package proxy
import (
"fmt"
"os"
"sync"
"time"
)
type appendLineLogger struct {
path string
mu sync.Mutex
file *os.File
closed bool
}
func newAppendLineLogger(path string) *appendLineLogger {
return &appendLineLogger{path: path}
}
func (l *appendLineLogger) writeLine(line string) {
if l == nil || l.path == "" {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
if l.file == nil {
file, errorValue := os.OpenFile(l.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if errorValue != nil {
return
}
l.file = file
}
_, _ = l.file.WriteString(line)
}
func (l *appendLineLogger) setPath(path string) {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.path == path {
return
}
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
l.closed = false
l.path = path
}
func (l *appendLineLogger) close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
l.closed = true
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func subscribeAccessLog(events *EventBus, path string) *appendLineLogger {
if events == nil || path == "" {
return nil
}
logger := newAppendLineLogger(path)
events.Subscribe(EventLogin, func(event Event) {
if event.Miner == nil {
return
}
logger.writeLine(fmt.Sprintf("%s CONNECT %s %s %s\n",
time.Now().UTC().Format(time.RFC3339),
event.Miner.IP(),
event.Miner.User(),
event.Miner.Agent(),
))
})
events.Subscribe(EventClose, func(event Event) {
if event.Miner == nil {
return
}
logger.writeLine(fmt.Sprintf("%s CLOSE %s %s rx=%d tx=%d\n",
time.Now().UTC().Format(time.RFC3339),
event.Miner.IP(),
event.Miner.User(),
event.Miner.RX(),
event.Miner.TX(),
))
})
return logger
}
func subscribeShareLog(events *EventBus, path string) *appendLineLogger {
if events == nil || path == "" {
return nil
}
logger := newAppendLineLogger(path)
events.Subscribe(EventAccept, func(event Event) {
if event.Miner == nil {
return
}
logger.writeLine(fmt.Sprintf("%s ACCEPT %s diff=%d latency=%dms\n",
time.Now().UTC().Format(time.RFC3339),
event.Miner.User(),
event.Diff,
event.Latency,
))
})
events.Subscribe(EventReject, func(event Event) {
if event.Miner == nil {
return
}
logger.writeLine(fmt.Sprintf("%s REJECT %s reason=%q\n",
time.Now().UTC().Format(time.RFC3339),
event.Miner.User(),
event.Error,
))
})
return logger
}

356
proxy_runtime.go Normal file
View file

@ -0,0 +1,356 @@
package proxy
import (
"crypto/tls"
"errors"
"net"
"sort"
"time"
)
type splitterShutdown interface {
PendingCount() int
Disconnect()
}
// New wires the proxy and returns a ready-to-start instance.
//
// p, errorValue := proxy.New(config)
func New(config *Config) (*Proxy, error) {
if config == nil {
return nil, errors.New("config is nil")
}
if errorValue := config.Validate(); errorValue != nil {
return nil, errorValue
}
eventBus := NewEventBus()
statsValue := NewStats()
customDifficultyFilter := NewCustomDiff(config.CustomDiff)
eventBus.Subscribe(EventLogin, customDifficultyFilter.OnLogin)
workersValue := NewWorkers(config.Workers, eventBus)
workersValue.SetCustomDiffStats(config.CustomDiffStats)
splitterValue := newSplitter(config, eventBus)
proxyInstance := &Proxy{
config: config,
customDifficulty: customDifficultyFilter,
splitter: splitterValue,
stats: statsValue,
workers: workersValue,
events: eventBus,
miners: make(map[int64]*Miner),
rateLimiter: NewRateLimiter(config.RateLimit),
done: make(chan struct{}),
}
proxyInstance.accessLogger = subscribeAccessLog(eventBus, config.AccessLogFile)
proxyInstance.shareLogger = subscribeShareLog(eventBus, config.ShareLogFile)
eventBus.Subscribe(EventLogin, func(event Event) {
if event.Miner != nil {
proxyInstance.minerMu.Lock()
proxyInstance.miners[event.Miner.ID()] = event.Miner
proxyInstance.minerMu.Unlock()
}
current := proxyInstance.currentMiners.Add(1)
for {
maximum := statsValue.maxMiners.Load()
if current <= maximum || statsValue.maxMiners.CompareAndSwap(maximum, current) {
break
}
}
})
eventBus.Subscribe(EventClose, func(event Event) {
if event.Miner != nil {
proxyInstance.minerMu.Lock()
delete(proxyInstance.miners, event.Miner.ID())
proxyInstance.minerMu.Unlock()
}
if proxyInstance.currentMiners.Load() > 0 {
proxyInstance.currentMiners.Add(^uint64(0))
}
})
eventBus.Subscribe(EventAccept, statsValue.OnAccept)
eventBus.Subscribe(EventReject, statsValue.OnReject)
if splitterValue != nil {
eventBus.Subscribe(EventSubmit, func(event Event) {
splitterValue.OnSubmit(&SubmitEvent{
Miner: event.Miner,
JobID: event.JobID,
Nonce: event.Nonce,
Result: event.Result,
Algo: event.Algo,
RequestID: event.RequestID,
})
})
eventBus.Subscribe(EventLogin, func(event Event) {
splitterValue.OnLogin(&LoginEvent{Miner: event.Miner})
})
eventBus.Subscribe(EventClose, func(event Event) {
splitterValue.OnClose(&CloseEvent{Miner: event.Miner})
})
}
if config.Watch && config.sourcePath != "" {
proxyInstance.watcher = newConfigWatcher(config.sourcePath, proxyInstance.Reload, config.Watch)
proxyInstance.watcher.Start()
}
return proxyInstance, nil
}
// Start connects the pool, opens listeners, and blocks until `Stop()`.
//
// p.Start()
func (p *Proxy) Start() {
if p.splitter != nil {
p.splitter.Connect()
}
p.ticker = time.NewTicker(time.Second)
for _, bind := range p.config.Bind {
var tlsConfig *tls.Config
if bind.TLS && p.config.TLS.Enabled {
certificate, errorValue := tls.LoadX509KeyPair(p.config.TLS.CertFile, p.config.TLS.KeyFile)
if errorValue == nil {
tlsConfig = buildTLSConfig(p.config.TLS)
tlsConfig.Certificates = []tls.Certificate{certificate}
} else {
p.Stop()
return
}
}
server, errorValue := NewServer(bind, tlsConfig, p.rateLimiter, p.acceptConn)
if errorValue != nil {
p.Stop()
return
}
p.servers = append(p.servers, server)
server.Start()
}
if p.config != nil && p.config.HTTP.Enabled {
startHTTPServer(p)
}
go func() {
var ticks uint64
for {
select {
case <-p.ticker.C:
ticks++
p.stats.Tick()
p.workers.Tick()
if p.rateLimiter != nil {
p.rateLimiter.Tick()
}
if p.splitter != nil {
p.splitter.Tick(ticks)
}
case <-p.done:
return
}
}
}()
<-p.done
}
type noopSplitter struct{}
func (noopSplitter) Connect() {}
func (noopSplitter) OnLogin(event *LoginEvent) {}
func (noopSplitter) OnSubmit(event *SubmitEvent) {}
func (noopSplitter) OnClose(event *CloseEvent) {}
func (noopSplitter) Tick(ticks uint64) {}
func (noopSplitter) GC() {}
func (noopSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
func (noopSplitter) PendingCount() int { return 0 }
func (noopSplitter) Disconnect() {}
func noopSplitterFactory(cfg *Config, events *EventBus) Splitter {
return noopSplitter{}
}
// Stop closes listeners, log files, watcher, miners, and pool connections.
//
// p.Stop()
func (p *Proxy) Stop() {
if p.ticker != nil {
p.ticker.Stop()
}
for _, server := range p.servers {
server.Stop()
}
stopHTTPServer(p)
if p.watcher != nil {
p.watcher.Stop()
}
if shutdown, ok := p.splitter.(splitterShutdown); ok {
deadline := time.Now().Add(5 * time.Second)
for shutdown.PendingCount() > 0 && time.Now().Before(deadline) {
time.Sleep(50 * time.Millisecond)
}
}
p.minerMu.RLock()
miners := make([]*Miner, 0, len(p.miners))
for _, miner := range p.miners {
miners = append(miners, miner)
}
p.minerMu.RUnlock()
for _, miner := range miners {
if miner != nil {
miner.Close()
}
}
if shutdown, ok := p.splitter.(splitterShutdown); ok {
shutdown.Disconnect()
}
if p.accessLogger != nil {
p.accessLogger.close()
}
if p.shareLogger != nil {
p.shareLogger.close()
}
select {
case <-p.done:
default:
close(p.done)
}
}
// Reload replaces the live config.
//
// p.Reload(newCfg)
func (p *Proxy) Reload(config *Config) {
if config != nil {
if p.config == nil {
p.config = config
} else {
sourcePath := p.config.sourcePath
bind := append([]BindAddr(nil), p.config.Bind...)
*p.config = *config
p.config.sourcePath = sourcePath
p.config.Bind = bind
}
if p.customDifficulty != nil {
p.customDifficulty.SetGlobalDiff(p.config.CustomDiff)
}
if p.workers != nil {
p.workers.SetCustomDiffStats(p.config.CustomDiffStats)
}
if p.rateLimiter != nil {
p.rateLimiter.SetConfig(p.config.RateLimit)
}
if p.accessLogger != nil {
p.accessLogger.setPath(p.config.AccessLogFile)
}
if p.shareLogger != nil {
p.shareLogger.setPath(p.config.ShareLogFile)
}
}
}
func (p *Proxy) Summary() StatsSummary {
if p == nil || p.stats == nil {
return StatsSummary{}
}
return p.stats.Summary()
}
func (p *Proxy) Workers() []WorkerRecord {
if p == nil || p.workers == nil {
return nil
}
return p.workers.List()
}
func (p *Proxy) Miners() []*Miner {
if p == nil {
return nil
}
p.minerMu.RLock()
defer p.minerMu.RUnlock()
miners := make([]*Miner, 0, len(p.miners))
for _, miner := range p.miners {
miners = append(miners, miner)
}
sort.Slice(miners, func(left int, right int) bool {
if miners[left] == nil {
return false
}
if miners[right] == nil {
return true
}
return miners[left].ID() < miners[right].ID()
})
return miners
}
func (p *Proxy) CurrentMiners() uint64 {
if p == nil {
return 0
}
return p.currentMiners.Load()
}
func (p *Proxy) MaxMiners() uint64 {
if p == nil || p.stats == nil {
return 0
}
return p.stats.maxMiners.Load()
}
func (p *Proxy) Mode() string {
if p == nil || p.config == nil {
return ""
}
return p.config.Mode
}
func (p *Proxy) HTTPConfig() HTTPConfig {
if p == nil || p.config == nil {
return HTTPConfig{}
}
return p.config.HTTP
}
func (p *Proxy) WorkersMode() string {
if p == nil || p.config == nil {
return ""
}
return string(p.config.Workers)
}
func (p *Proxy) Upstreams() UpstreamStats {
if p == nil || p.splitter == nil {
return UpstreamStats{}
}
return p.splitter.Upstreams()
}
func (p *Proxy) acceptConn(conn net.Conn, localPort uint16) {
if p != nil && p.stats != nil {
p.stats.connections.Add(1)
}
var tlsConfig *tls.Config
if _, ok := conn.(*tls.Conn); ok {
tlsConfig = &tls.Config{}
}
miner := NewMiner(conn, localPort, tlsConfig)
miner.events = p.events
miner.splitter = p.splitter
if p.config != nil {
miner.accessPassword = p.config.AccessPassword
miner.algoExtension = p.config.AlgoExtension
}
miner.Start()
}

134
proxy_runtime_test.go Normal file
View file

@ -0,0 +1,134 @@
package proxy
import (
"net"
"os"
"strings"
"testing"
)
func TestProxy_Reload_Good(t *testing.T) {
tempDir := t.TempDir()
cfg := &Config{
Mode: "nicehash",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
CustomDiff: 100,
Workers: WorkersDisabled,
AccessLogFile: tempDir + "/access-a.log",
ShareLogFile: tempDir + "/share-a.log",
}
proxyValue, errorValue := New(cfg)
if errorValue != nil {
t.Fatal(errorValue)
}
miner := &Miner{user: "wallet", agent: "agent", ip: "10.0.0.1"}
proxyValue.events.Dispatch(Event{Type: EventLogin, Miner: miner})
proxyValue.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 100, Latency: 10})
reloadCfg := &Config{
Mode: "simple",
Bind: []BindAddr{{Host: "0.0.0.0", Port: 4444}},
Pools: []PoolConfig{{URL: "pool-b:4444", Enabled: true}},
CustomDiff: 250,
Workers: WorkersByUser,
AccessLogFile: tempDir + "/access-b.log",
ShareLogFile: tempDir + "/share-b.log",
}
proxyValue.Reload(reloadCfg)
if len(proxyValue.config.Bind) != 1 || proxyValue.config.Bind[0].Port != 3333 {
t.Fatalf("expected bind addresses to remain unchanged, got %+v", proxyValue.config.Bind)
}
if len(proxyValue.config.Pools) != 1 || proxyValue.config.Pools[0].URL != "pool-b:4444" {
t.Fatalf("expected pools to reload, got %+v", proxyValue.config.Pools)
}
if proxyValue.config.CustomDiff != 250 {
t.Fatalf("expected custom diff to reload, got %d", proxyValue.config.CustomDiff)
}
if proxyValue.customDifficulty == nil || proxyValue.customDifficulty.globalDiff != 250 {
t.Fatalf("expected live custom diff to update, got %+v", proxyValue.customDifficulty)
}
proxyValue.events.Dispatch(Event{Type: EventLogin, Miner: miner})
proxyValue.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 250, Latency: 12})
oldAccessLog, errorValue := os.ReadFile(tempDir + "/access-a.log")
if errorValue != nil {
t.Fatal(errorValue)
}
newAccessLog, errorValue := os.ReadFile(tempDir + "/access-b.log")
if errorValue != nil {
t.Fatal(errorValue)
}
if strings.Count(string(oldAccessLog), "CONNECT") != 1 || strings.Count(string(newAccessLog), "CONNECT") != 1 {
t.Fatalf("expected access log writes to move across reload, got old=%q new=%q", oldAccessLog, newAccessLog)
}
oldShareLog, errorValue := os.ReadFile(tempDir + "/share-a.log")
if errorValue != nil {
t.Fatal(errorValue)
}
newShareLog, errorValue := os.ReadFile(tempDir + "/share-b.log")
if errorValue != nil {
t.Fatal(errorValue)
}
if strings.Count(string(oldShareLog), "ACCEPT") != 1 || strings.Count(string(newShareLog), "ACCEPT") != 1 {
t.Fatalf("expected share log writes to move across reload, got old=%q new=%q", oldShareLog, newShareLog)
}
}
func TestProxy_CurrentMiners_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
Workers: WorkersDisabled,
}
firstProxy, errorValue := New(cfg)
if errorValue != nil {
t.Fatal(errorValue)
}
secondProxy, errorValue := New(cfg)
if errorValue != nil {
t.Fatal(errorValue)
}
miner := &Miner{}
firstProxy.events.Dispatch(Event{Type: EventLogin, Miner: miner})
if got := firstProxy.CurrentMiners(); got != 1 {
t.Fatalf("expected first proxy miner count 1, got %d", got)
}
if got := secondProxy.CurrentMiners(); got != 0 {
t.Fatalf("expected second proxy miner count 0, got %d", got)
}
firstProxy.events.Dispatch(Event{Type: EventClose, Miner: miner})
if got := firstProxy.CurrentMiners(); got != 0 {
t.Fatalf("expected first proxy miner count to return to 0, got %d", got)
}
}
func TestProxy_AcceptConn_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
Workers: WorkersDisabled,
}
proxyValue, errorValue := New(cfg)
if errorValue != nil {
t.Fatal(errorValue)
}
serverConn, clientConn := net.Pipe()
proxyValue.acceptConn(serverConn, 3333)
if got := proxyValue.stats.connections.Load(); got != 1 {
t.Fatalf("expected connection counter to increment on accept, got %d", got)
}
_ = clientConn.Close()
_ = serverConn.Close()
}

181
runtime_support.go Normal file
View file

@ -0,0 +1,181 @@
package proxy
import (
"strconv"
"strings"
"time"
)
// NewRateLimiter creates a per-IP limiter, for example:
//
// rl := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300})
func NewRateLimiter(config RateLimit) *RateLimiter {
return &RateLimiter{
config: config,
buckets: make(map[string]*tokenBucket),
banned: make(map[string]time.Time),
}
}
// SetConfig swaps in a live reload value such as:
//
// rl.SetConfig(proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300})
func (rateLimiter *RateLimiter) SetConfig(config RateLimit) {
if rateLimiter == nil {
return
}
rateLimiter.mu.Lock()
rateLimiter.config = config
rateLimiter.mu.Unlock()
}
// Allow returns true if the IP address is permitted to open a new connection. Thread-safe.
//
// if rl.Allow(conn.RemoteAddr().String()) { proceed() }
func (rateLimiter *RateLimiter) Allow(ip string) bool {
if rateLimiter == nil {
return true
}
host := remoteHost(ip)
now := time.Now().UTC()
rateLimiter.mu.Lock()
defer rateLimiter.mu.Unlock()
if rateLimiter.config.MaxConnectionsPerMinute <= 0 {
return true
}
if bannedUntil, exists := rateLimiter.banned[host]; exists {
if bannedUntil.After(now) {
return false
}
delete(rateLimiter.banned, host)
}
bucket, exists := rateLimiter.buckets[host]
if !exists {
bucket = &tokenBucket{
tokens: rateLimiter.config.MaxConnectionsPerMinute,
lastRefill: now,
}
rateLimiter.buckets[host] = bucket
}
rateLimiter.refillBucket(bucket, now)
if bucket.tokens <= 0 {
if rateLimiter.config.BanDurationSeconds > 0 {
rateLimiter.banned[host] = now.Add(time.Duration(rateLimiter.config.BanDurationSeconds) * time.Second)
}
return false
}
bucket.tokens--
return true
}
// Tick removes expired ban entries and refills all token buckets. Called every second.
//
// rl.Tick()
func (rateLimiter *RateLimiter) Tick() {
if rateLimiter == nil {
return
}
now := time.Now().UTC()
rateLimiter.mu.Lock()
defer rateLimiter.mu.Unlock()
if rateLimiter.config.MaxConnectionsPerMinute <= 0 {
return
}
for host, bannedUntil := range rateLimiter.banned {
if !bannedUntil.After(now) {
delete(rateLimiter.banned, host)
}
}
for _, bucket := range rateLimiter.buckets {
rateLimiter.refillBucket(bucket, now)
}
}
func (rateLimiter *RateLimiter) refillBucket(bucket *tokenBucket, now time.Time) {
if bucket == nil || rateLimiter.config.MaxConnectionsPerMinute <= 0 {
return
}
refillEvery := time.Minute / time.Duration(rateLimiter.config.MaxConnectionsPerMinute)
if refillEvery <= 0 {
refillEvery = time.Second
}
elapsed := now.Sub(bucket.lastRefill)
if elapsed < refillEvery {
return
}
tokensToAdd := int(elapsed / refillEvery)
bucket.tokens += tokensToAdd
if bucket.tokens > rateLimiter.config.MaxConnectionsPerMinute {
bucket.tokens = rateLimiter.config.MaxConnectionsPerMinute
}
bucket.lastRefill = bucket.lastRefill.Add(time.Duration(tokensToAdd) * refillEvery)
}
// NewCustomDiff stores the default custom difficulty override.
//
// cd := proxy.NewCustomDiff(50000)
func NewCustomDiff(globalDiff uint64) *CustomDiff {
return &CustomDiff{globalDiff: globalDiff}
}
// SetGlobalDiff updates the default custom difficulty override.
//
// cd.SetGlobalDiff(100000)
func (customDiff *CustomDiff) SetGlobalDiff(globalDiff uint64) {
if customDiff == nil {
return
}
customDiff.mu.Lock()
customDiff.globalDiff = globalDiff
customDiff.mu.Unlock()
}
// OnLogin parses `WALLET+50000` into `WALLET` and `50000`.
//
// cd.OnLogin(proxy.Event{Miner: miner})
func (customDiff *CustomDiff) OnLogin(event Event) {
if event.Miner == nil {
return
}
user := event.Miner.User()
index := strings.LastIndex(user, "+")
if index > 0 && index < len(user)-1 {
if value, errorValue := strconv.ParseUint(user[index+1:], 10, 64); errorValue == nil {
event.Miner.SetUser(user[:index])
event.Miner.SetCustomDiff(value)
return
}
}
if customDiff == nil {
event.Miner.SetCustomDiff(0)
return
}
customDiff.mu.RLock()
globalDiff := customDiff.globalDiff
customDiff.mu.RUnlock()
if globalDiff > 0 {
event.Miner.SetCustomDiff(globalDiff)
return
}
event.Miner.SetCustomDiff(0)
}

57
runtime_support_test.go Normal file
View file

@ -0,0 +1,57 @@
package proxy
import (
"testing"
"time"
)
func TestRateLimiter_Allow_Good(t *testing.T) {
limiter := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 2})
if !limiter.Allow("127.0.0.1:1234") {
t.Fatal("expected first connection to pass")
}
}
func TestRateLimiter_Allow_Bad(t *testing.T) {
limiter := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 60})
if !limiter.Allow("127.0.0.1:1234") {
t.Fatal("expected first connection to pass")
}
if limiter.Allow("127.0.0.1:1234") {
t.Fatal("expected second connection to be blocked")
}
}
func TestRateLimiter_Allow_Ugly(t *testing.T) {
limiter := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1})
limiter.Allow("127.0.0.1:1234")
time.Sleep(time.Second)
limiter.Tick()
if limiter.Allow("127.0.0.1:1234") {
t.Fatal("expected bucket not to refill fully after one second at 1/minute")
}
}
func TestCustomDiff_OnLogin_Good(t *testing.T) {
miner := &Miner{user: "wallet+5000"}
NewCustomDiff(100).OnLogin(Event{Miner: miner})
if miner.User() != "wallet" || miner.CustomDiff() != 5000 {
t.Fatalf("expected parsed custom diff, got user=%q diff=%d", miner.User(), miner.CustomDiff())
}
}
func TestCustomDiff_OnLogin_Bad(t *testing.T) {
miner := &Miner{user: "wallet"}
NewCustomDiff(100).OnLogin(Event{Miner: miner})
if miner.CustomDiff() != 100 {
t.Fatalf("expected fallback diff 100, got %d", miner.CustomDiff())
}
}
func TestCustomDiff_OnLogin_Ugly(t *testing.T) {
miner := &Miner{user: "wallet+bad"}
NewCustomDiff(100).OnLogin(Event{Miner: miner})
if miner.User() != "wallet+bad" || miner.CustomDiff() != 100 {
t.Fatalf("expected invalid suffix to preserve user and fall back to global diff, got user=%q diff=%d", miner.User(), miner.CustomDiff())
}
}

84
server_runtime.go Normal file
View file

@ -0,0 +1,84 @@
package proxy
import (
"crypto/tls"
"errors"
"net"
"strconv"
)
// NewServer opens a listener and prepares the accept loop.
//
// srv, errorValue := proxy.NewServer(proxy.BindAddr{Host: "0.0.0.0", Port: 3333}, nil, rateLimiter, onAccept)
func NewServer(bindAddress BindAddr, tlsConfig *tls.Config, rateLimiter *RateLimiter, onAccept func(net.Conn, uint16)) (*Server, error) {
address := net.JoinHostPort(bindAddress.Host, strconv.Itoa(int(bindAddress.Port)))
listener, errorValue := net.Listen("tcp", address)
if errorValue != nil {
return nil, errorValue
}
return &Server{
addr: bindAddress,
tlsCfg: tlsConfig,
limiter: rateLimiter,
onAccept: onAccept,
listener: listener,
done: make(chan struct{}),
}, nil
}
// Start accepts miners in a background goroutine.
//
// srv.Start()
func (server *Server) Start() {
if server == nil || server.listener == nil {
return
}
go func() {
for {
conn, errorValue := server.listener.Accept()
if errorValue != nil {
select {
case <-server.done:
return
default:
continue
}
}
if server.limiter != nil && !server.limiter.Allow(conn.RemoteAddr().String()) {
_ = conn.Close()
continue
}
if server.tlsCfg != nil {
conn = tls.Server(conn, server.tlsCfg)
}
if server.onAccept == nil {
_ = conn.Close()
continue
}
server.onAccept(conn, server.addr.Port)
}
}()
}
// Stop closes the listener without forcing existing sockets shut.
//
// srv.Stop()
func (server *Server) Stop() {
if server == nil || server.listener == nil {
return
}
select {
case <-server.done:
default:
close(server.done)
}
_ = server.listener.Close()
}
var errServerClosed = errors.New("server closed")

View file

@ -2,6 +2,7 @@ package nicehash
import ( import (
"sync" "sync"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/core/proxy"
"dappco.re/go/core/proxy/pool" "dappco.re/go/core/proxy/pool"
@ -15,11 +16,13 @@ import (
type NonceMapper struct { type NonceMapper struct {
id int64 id int64
storage *NonceStorage storage *NonceStorage
strategy pool.Strategy // manages pool client lifecycle and failover strategy pool.Strategy // manages pool client lifecycle and failover
pending map[int64]SubmitContext // sequence → {requestID, minerID} pending map[int64]SubmitContext // sequence → {requestID, minerID, jobID}
cfg *proxy.Config config *proxy.Config
events *proxy.EventBus
active bool // true once pool has sent at least one job active bool // true once pool has sent at least one job
suspended int // > 0 when pool connection is in error/reconnecting suspended int // > 0 when pool connection is in error/reconnecting
idleAt time.Time
mu sync.Mutex mu sync.Mutex
} }
@ -27,6 +30,187 @@ type NonceMapper struct {
// //
// ctx := SubmitContext{RequestID: 42, MinerID: 7} // ctx := SubmitContext{RequestID: 42, MinerID: 7}
type SubmitContext struct { type SubmitContext struct {
RequestID int64 // JSON-RPC id from the miner's submit request RequestID int64 // JSON-RPC id from the miner's submit request
MinerID int64 // miner that submitted MinerID int64 // miner that submitted
Job proxy.Job
JobID string
Expired bool
SubmittedAt time.Time
}
// NewNonceMapper creates one upstream pool mapper and its local slot table.
//
// mapper := nicehash.NewNonceMapper(1, cfg, strategy)
func NewNonceMapper(id int64, cfg *proxy.Config, strategy pool.Strategy) *NonceMapper {
return &NonceMapper{
id: id,
storage: NewNonceStorage(),
strategy: strategy,
config: cfg,
pending: make(map[int64]SubmitContext),
}
}
func (m *NonceMapper) Add(miner *proxy.Miner) bool {
if !m.storage.Add(miner) {
return false
}
m.mu.Lock()
m.idleAt = time.Time{}
m.mu.Unlock()
return true
}
func (m *NonceMapper) Remove(miner *proxy.Miner) {
m.storage.Remove(miner)
_, _, active := m.storage.SlotCount()
if active == 0 {
m.mu.Lock()
if m.idleAt.IsZero() {
m.idleAt = time.Now().UTC()
}
m.mu.Unlock()
}
}
func (m *NonceMapper) Submit(event *proxy.SubmitEvent) {
if event == nil || event.Miner == nil || m.strategy == nil {
return
}
job, valid, expired := m.storage.JobForID(event.JobID)
if !valid {
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
return
}
sequence := m.strategy.Submit(event.JobID, event.Nonce, event.Result, event.Algo)
if sequence == 0 {
if event.Miner != nil {
event.Miner.ReplyWithError(event.RequestID, "Pool unavailable")
}
return
}
m.mu.Lock()
m.pending[sequence] = SubmitContext{
RequestID: event.RequestID,
MinerID: event.Miner.ID(),
Job: job,
JobID: event.JobID,
Expired: expired,
SubmittedAt: time.Now().UTC(),
}
m.mu.Unlock()
}
func (m *NonceMapper) IsActive() bool {
if m.strategy == nil {
return false
}
return m.strategy.IsActive()
}
func (m *NonceMapper) OnJob(job proxy.Job) {
if !job.IsValid() {
return
}
m.mu.Lock()
m.active = true
m.suspended = 0
m.idleAt = time.Time{}
m.mu.Unlock()
m.storage.SetJob(job)
}
func (m *NonceMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
m.mu.Lock()
context, exists := m.pending[sequence]
if exists {
delete(m.pending, sequence)
}
m.mu.Unlock()
if !exists {
return
}
miner := m.storage.Miners()[context.MinerID]
if miner == nil {
return
}
shareDifficulty := context.Job.DifficultyFromTarget()
if shareDifficulty == 0 {
shareDifficulty = miner.Diff()
}
eventType := proxy.EventReject
if accepted {
eventType = proxy.EventAccept
}
if m.events != nil {
latency := uint16(0)
if !context.SubmittedAt.IsZero() {
elapsed := time.Since(context.SubmittedAt).Milliseconds()
if elapsed > 0 {
if elapsed > int64(^uint16(0)) {
latency = ^uint16(0)
} else {
latency = uint16(elapsed)
}
}
}
m.events.Dispatch(proxy.Event{
Type: eventType,
Miner: miner,
Job: jobPointer(context.Job),
Diff: shareDifficulty,
Error: errorMessage,
Latency: latency,
Expired: context.Expired,
})
}
if accepted {
miner.Success(context.RequestID, "OK")
return
}
miner.ReplyWithError(context.RequestID, errorMessage)
}
func (m *NonceMapper) OnDisconnect() {
m.clearPending()
m.mu.Lock()
m.active = false
m.suspended++
m.mu.Unlock()
}
func (m *NonceMapper) IdleDuration(now time.Time) time.Duration {
m.mu.Lock()
idleAt := m.idleAt
m.mu.Unlock()
if idleAt.IsZero() {
return 0
}
return now.Sub(idleAt)
}
func (m *NonceMapper) clearPending() {
m.mu.Lock()
m.pending = make(map[int64]SubmitContext)
m.mu.Unlock()
}
func jobPointer(job proxy.Job) *proxy.Job {
if !job.IsValid() {
return nil
}
jobCopy := job
return &jobCopy
} }

View file

@ -0,0 +1,12 @@
package nicehash
import (
"dappco.re/go/core/proxy"
"dappco.re/go/core/proxy/pool"
)
func init() {
proxy.RegisterSplitterFactory("nicehash", func(cfg *proxy.Config, events *proxy.EventBus) proxy.Splitter {
return NewNonceSplitter(cfg, events, pool.NewStrategyFactory(cfg))
})
}

View file

@ -11,6 +11,7 @@ package nicehash
import ( import (
"sync" "sync"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/core/proxy"
"dappco.re/go/core/proxy/pool" "dappco.re/go/core/proxy/pool"
@ -28,3 +29,188 @@ type NonceSplitter struct {
strategyFactory pool.StrategyFactory strategyFactory pool.StrategyFactory
mu sync.RWMutex mu sync.RWMutex
} }
// NewNonceSplitter creates the NiceHash splitter.
//
// s := nicehash.NewNonceSplitter(cfg, bus, factory)
func NewNonceSplitter(cfg *proxy.Config, events *proxy.EventBus, factory pool.StrategyFactory) *NonceSplitter {
return &NonceSplitter{
cfg: cfg,
events: events,
strategyFactory: factory,
mappers: make([]*NonceMapper, 0, 1),
}
}
func (s *NonceSplitter) Connect() {
s.mu.Lock()
defer s.mu.Unlock()
if len(s.mappers) > 0 {
return
}
mapper := s.newMapperLocked()
s.mappers = append(s.mappers, mapper)
mapper.strategy.Connect()
}
func (s *NonceSplitter) OnLogin(event *proxy.LoginEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
for _, mapper := range s.mappers {
if mapper.Add(event.Miner) {
mapper.events = s.events
event.Miner.SetMapperID(mapper.id)
event.Miner.SetNiceHashEnabled(true)
if currentJob := mapper.storage.CurrentJob(); currentJob != nil && currentJob.IsValid() {
event.Miner.PrimeJob(*currentJob)
}
return
}
}
mapper := s.newMapperLocked()
s.mappers = append(s.mappers, mapper)
mapper.strategy.Connect()
if mapper.Add(event.Miner) {
mapper.events = s.events
event.Miner.SetMapperID(mapper.id)
event.Miner.SetNiceHashEnabled(true)
if currentJob := mapper.storage.CurrentJob(); currentJob != nil && currentJob.IsValid() {
event.Miner.PrimeJob(*currentJob)
}
}
}
func (s *NonceSplitter) OnSubmit(event *proxy.SubmitEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.RLock()
defer s.mu.RUnlock()
for _, mapper := range s.mappers {
if mapper.id == event.Miner.MapperID() {
mapper.Submit(event)
return
}
}
}
func (s *NonceSplitter) OnClose(event *proxy.CloseEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.RLock()
defer s.mu.RUnlock()
for _, mapper := range s.mappers {
if mapper.id == event.Miner.MapperID() {
mapper.Remove(event.Miner)
return
}
}
}
func (s *NonceSplitter) Tick(ticks uint64) {
if ticks%60 == 0 {
s.GC()
}
}
func (s *NonceSplitter) GC() {
s.mu.Lock()
defer s.mu.Unlock()
now := time.Now().UTC()
filtered := s.mappers[:0]
for _, mapper := range s.mappers {
_, _, active := mapper.storage.SlotCount()
if active == 0 && mapper.IdleDuration(now) >= 60*time.Second {
if mapper.strategy != nil {
mapper.strategy.Disconnect()
}
continue
}
filtered = append(filtered, mapper)
}
s.mappers = filtered
}
func (s *NonceSplitter) Upstreams() proxy.UpstreamStats {
s.mu.RLock()
defer s.mu.RUnlock()
var stats proxy.UpstreamStats
for _, mapper := range s.mappers {
stats.Total++
switch {
case mapper.suspended > 0:
stats.Error++
case mapper.IsActive():
stats.Active++
default:
stats.Sleep++
}
}
return stats
}
func (s *NonceSplitter) PendingCount() int {
s.mu.RLock()
mappers := append([]*NonceMapper(nil), s.mappers...)
s.mu.RUnlock()
pending := 0
for _, mapper := range mappers {
if mapper == nil {
continue
}
mapper.mu.Lock()
pending += len(mapper.pending)
mapper.mu.Unlock()
}
return pending
}
func (s *NonceSplitter) Disconnect() {
s.mu.Lock()
mappers := s.mappers
s.mappers = nil
s.mu.Unlock()
for _, mapper := range mappers {
if mapper == nil {
continue
}
mapper.mu.Lock()
strategy := mapper.strategy
mapper.strategy = nil
mapper.active = false
mapper.suspended = 0
mapper.mu.Unlock()
if strategy != nil {
strategy.Disconnect()
}
}
}
func (s *NonceSplitter) newMapperLocked() *NonceMapper {
mapperID := int64(len(s.mappers) + 1)
mapper := NewNonceMapper(mapperID, s.cfg, nil)
mapper.events = s.events
var strategy pool.Strategy
if s.strategyFactory != nil {
strategy = s.strategyFactory(mapper)
}
mapper.strategy = strategy
return mapper
}

View file

@ -21,5 +21,184 @@ type NonceStorage struct {
job proxy.Job // current job from pool job proxy.Job // current job from pool
prevJob proxy.Job // previous job (for stale submit validation) prevJob proxy.Job // previous job (for stale submit validation)
cursor int // search starts here (round-robin allocation) cursor int // search starts here (round-robin allocation)
expired uint64 // stale job ID hits for the previous job
mu sync.Mutex mu sync.Mutex
} }
// NewNonceStorage allocates the fixed-size miner slot table.
//
// storage := nicehash.NewNonceStorage()
func NewNonceStorage() *NonceStorage {
return &NonceStorage{
miners: make(map[int64]*proxy.Miner),
}
}
// Add finds the next free slot starting from cursor (wrapping), sets slot[index] = minerID,
// and sets the miner fixed byte.
//
// ok := storage.Add(miner)
func (s *NonceStorage) Add(miner *proxy.Miner) bool {
if miner == nil {
return false
}
s.mu.Lock()
defer s.mu.Unlock()
for offset := 0; offset < len(s.slots); offset++ {
index := (s.cursor + offset) % len(s.slots)
if s.slots[index] != 0 {
continue
}
s.slots[index] = miner.ID()
s.miners[miner.ID()] = miner
miner.SetFixedByte(uint8(index))
s.cursor = (index + 1) % len(s.slots)
return true
}
return false
}
// Remove marks slot[miner.FixedByte] as a dead slot until the next SetJob call.
//
// storage.Remove(miner)
func (s *NonceStorage) Remove(miner *proxy.Miner) {
if miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
index := int(miner.FixedByte())
if index >= 0 && index < len(s.slots) && s.slots[index] == miner.ID() {
s.slots[index] = -miner.ID()
}
delete(s.miners, miner.ID())
}
// SetJob replaces the current job, clears dead slots, and fans the job out to active miners.
//
// storage.SetJob(job)
func (s *NonceStorage) SetJob(job proxy.Job) {
s.mu.Lock()
if s.job.IsValid() && s.job.ClientID != "" && s.job.ClientID == job.ClientID {
s.prevJob = s.job
} else {
s.prevJob = proxy.Job{}
}
s.job = job
miners := make([]*proxy.Miner, 0, len(s.miners))
for index, minerID := range s.slots {
if minerID < 0 {
s.slots[index] = 0
continue
}
if minerID > 0 {
if miner := s.miners[minerID]; miner != nil {
miners = append(miners, miner)
}
}
}
s.mu.Unlock()
for _, miner := range miners {
miner.ForwardJob(job, job.Algo)
}
}
// IsValidJobID returns true if id matches the current or previous job ID.
//
// if !storage.IsValidJobID(submitJobID) { reject }
func (s *NonceStorage) IsValidJobID(id string) bool {
valid, _ := s.JobStatus(id)
return valid
}
// JobForID returns a copy of the current or previous job for the given ID.
//
// job, valid, expired := storage.JobForID(submitJobID)
func (s *NonceStorage) JobForID(id string) (job proxy.Job, valid bool, expired bool) {
s.mu.Lock()
defer s.mu.Unlock()
if id == "" {
return proxy.Job{}, false, false
}
if id == s.job.JobID {
return s.job, true, false
}
if s.prevJob.IsValid() && s.prevJob.ClientID != "" && id == s.prevJob.JobID {
s.expired++
return s.prevJob, true, true
}
return proxy.Job{}, false, false
}
// JobStatus returns whether the job ID is current or stale-but-still-acceptable.
//
// valid, expired := storage.JobStatus(submitJobID)
func (s *NonceStorage) JobStatus(id string) (valid bool, expired bool) {
_, valid, expired = s.JobForID(id)
return valid, expired
}
// SlotCount returns free, dead, and active slot counts for monitoring output.
//
// free, dead, active := storage.SlotCount()
func (s *NonceStorage) SlotCount() (free int, dead int, active int) {
s.mu.Lock()
defer s.mu.Unlock()
for _, slot := range s.slots {
switch {
case slot == 0:
free++
case slot < 0:
dead++
default:
active++
}
}
return free, dead, active
}
// ExpiredCount returns the number of times the previous job ID has been accepted as stale.
//
// count := storage.ExpiredCount()
func (s *NonceStorage) ExpiredCount() uint64 {
s.mu.Lock()
defer s.mu.Unlock()
return s.expired
}
// Miners returns a snapshot of the active miner map.
func (s *NonceStorage) Miners() map[int64]*proxy.Miner {
s.mu.Lock()
defer s.mu.Unlock()
miners := make(map[int64]*proxy.Miner, len(s.miners))
for minerID, miner := range s.miners {
miners[minerID] = miner
}
return miners
}
// CurrentJob returns a copy of the latest assigned job, if any.
func (s *NonceStorage) CurrentJob() *proxy.Job {
s.mu.Lock()
defer s.mu.Unlock()
if !s.job.IsValid() {
return nil
}
jobCopy := s.job
return &jobCopy
}

View file

@ -0,0 +1,120 @@
package nicehash
import (
"strings"
"testing"
"time"
"dappco.re/go/core/proxy"
)
func TestNonceStorage_Add_Good(t *testing.T) {
storage := NewNonceStorage()
miner := proxy.NewMiner(nil, 0, nil)
miner.SetUser("wallet")
if !storage.Add(miner) {
t.Fatal("expected slot allocation to succeed")
}
}
func TestNonceStorage_Add_Bad(t *testing.T) {
storage := NewNonceStorage()
if storage.Add(nil) {
t.Fatal("expected nil miner allocation to fail")
}
}
func TestNonceStorage_Add_Ugly(t *testing.T) {
storage := NewNonceStorage()
for index := 0; index < 256; index++ {
miner := proxy.NewMiner(nil, 0, nil)
if !storage.Add(miner) {
t.Fatalf("expected miner %d to fit", index)
}
}
if storage.Add(proxy.NewMiner(nil, 0, nil)) {
t.Fatal("expected 257th miner to fail")
}
}
func TestNonceStorage_IsValidJobID_Good(t *testing.T) {
storage := NewNonceStorage()
storage.SetJob(proxy.Job{Blob: "abcd", JobID: "job-1"})
if !storage.IsValidJobID("job-1") {
t.Fatal("expected current job ID to be valid")
}
}
func TestNonceStorage_IsValidJobID_Bad(t *testing.T) {
storage := NewNonceStorage()
storage.SetJob(proxy.Job{Blob: "abcd", JobID: "job-1"})
if storage.IsValidJobID("job-2") {
t.Fatal("expected unknown job ID to be invalid")
}
}
func TestNonceStorage_IsValidJobID_Ugly(t *testing.T) {
storage := NewNonceStorage()
storage.SetJob(proxy.Job{Blob: "abcd", JobID: "job-1", ClientID: "pool-a"})
storage.SetJob(proxy.Job{Blob: "efgh", JobID: "job-2", ClientID: "pool-a"})
if !storage.IsValidJobID("job-1") {
t.Fatal("expected previous job ID from same client to remain valid")
}
if got := storage.ExpiredCount(); got != 1 {
t.Fatalf("expected stale job lookups to increment the expired counter, got %d", got)
}
}
func TestNonceStorage_IsValidJobID_BadClientID(t *testing.T) {
storage := NewNonceStorage()
storage.SetJob(proxy.Job{Blob: "abcd", JobID: "job-1", ClientID: "pool-a"})
storage.SetJob(proxy.Job{Blob: "efgh", JobID: "job-2", ClientID: "pool-b"})
if storage.IsValidJobID("job-1") {
t.Fatal("expected previous job ID from a different client to be invalid")
}
}
func TestNonceMapper_OnDisconnect_Ugly(t *testing.T) {
mapper := NewNonceMapper(1, &proxy.Config{}, nil)
mapper.pending[1] = SubmitContext{RequestID: 7}
mapper.OnDisconnect()
if len(mapper.pending) != 0 {
t.Fatalf("expected pending submits to be cleared, got %d", len(mapper.pending))
}
}
func TestNonceMapper_OnResultAccepted_Good(t *testing.T) {
bus := proxy.NewEventBus()
resultCh := make(chan proxy.Event, 1)
bus.Subscribe(proxy.EventAccept, func(event proxy.Event) {
resultCh <- event
})
miner := proxy.NewMiner(nil, 0, nil)
mapper := NewNonceMapper(1, &proxy.Config{}, nil)
mapper.events = bus
if !mapper.storage.Add(miner) {
t.Fatal("expected miner slot allocation")
}
mapper.storage.SetJob(proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-a", Target: "b88d0600"})
mapper.mu.Lock()
mapper.pending[1] = SubmitContext{
RequestID: 7,
MinerID: miner.ID(),
Job: proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-a", Target: "b88d0600"},
SubmittedAt: time.Now().UTC(),
}
mapper.mu.Unlock()
mapper.storage.SetJob(proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-b", Target: "b88d0600"})
mapper.OnResultAccepted(1, true, "")
select {
case event := <-resultCh:
if event.Job == nil || event.Job.JobID != "job-a" {
t.Fatalf("expected submitted job to be reported, got %#v", event.Job)
}
case <-time.After(time.Second):
t.Fatal("expected accept event")
}
}

View file

@ -1,6 +1,7 @@
package simple package simple
import ( import (
"sync"
"time" "time"
"dappco.re/go/core/proxy" "dappco.re/go/core/proxy"
@ -16,6 +17,135 @@ type SimpleMapper struct {
id int64 id int64
miner *proxy.Miner // nil when idle miner *proxy.Miner // nil when idle
strategy pool.Strategy strategy pool.Strategy
events *proxy.EventBus
pending map[int64]simpleSubmitContext
job proxy.Job
prevJob proxy.Job
idleAt time.Time // zero when active idleAt time.Time // zero when active
stopped bool stopped bool
mu sync.Mutex
}
type simpleSubmitContext struct {
RequestID int64
Job proxy.Job
Expired bool
SubmittedAt time.Time
}
// NewSimpleMapper stores the mapper ID and strategy.
//
// mapper := simple.NewSimpleMapper(1, strategy)
func NewSimpleMapper(id int64, strategy pool.Strategy) *SimpleMapper {
return &SimpleMapper{id: id, strategy: strategy, pending: make(map[int64]simpleSubmitContext)}
}
func (m *SimpleMapper) OnJob(job proxy.Job) {
if !job.IsValid() {
return
}
m.mu.Lock()
if m.job.IsValid() && m.job.ClientID != "" && m.job.ClientID == job.ClientID {
m.prevJob = m.job
} else {
m.prevJob = proxy.Job{}
}
m.job = job
miner := m.miner
m.mu.Unlock()
if miner != nil {
miner.ForwardJob(job, job.Algo)
}
}
func (m *SimpleMapper) JobStatus(id string) (valid bool, expired bool) {
_, valid, expired = m.JobForID(id)
return valid, expired
}
func (m *SimpleMapper) JobForID(id string) (proxy.Job, bool, bool) {
m.mu.Lock()
defer m.mu.Unlock()
if id == "" {
return proxy.Job{}, false, false
}
if id == m.job.JobID {
return m.job, true, false
}
if m.prevJob.IsValid() && m.prevJob.ClientID != "" && id == m.prevJob.JobID {
return m.prevJob, true, true
}
return proxy.Job{}, false, false
}
func (m *SimpleMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
m.mu.Lock()
context, exists := m.pending[sequence]
miner := m.miner
if !exists {
m.mu.Unlock()
return
}
delete(m.pending, sequence)
m.mu.Unlock()
if miner == nil {
return
}
shareDifficulty := context.Job.DifficultyFromTarget()
if shareDifficulty == 0 {
shareDifficulty = miner.Diff()
}
if accepted {
latency := shareLatency(context.SubmittedAt)
if m.events != nil {
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Job: jobPointer(context.Job), Diff: shareDifficulty, Latency: latency, Expired: context.Expired})
}
miner.Success(context.RequestID, "OK")
return
}
latency := shareLatency(context.SubmittedAt)
if m.events != nil {
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Job: jobPointer(context.Job), Diff: shareDifficulty, Error: errorMessage, Latency: latency, Expired: context.Expired})
}
miner.ReplyWithError(context.RequestID, errorMessage)
}
func (m *SimpleMapper) OnDisconnect() {
m.clearPending()
m.stopped = true
}
func (m *SimpleMapper) clearPending() {
m.mu.Lock()
m.pending = make(map[int64]simpleSubmitContext)
m.mu.Unlock()
}
func jobPointer(job proxy.Job) *proxy.Job {
if !job.IsValid() {
return nil
}
jobCopy := job
return &jobCopy
}
func shareLatency(submittedAt time.Time) uint16 {
if submittedAt.IsZero() {
return 0
}
elapsed := time.Since(submittedAt).Milliseconds()
if elapsed <= 0 {
return 0
}
if elapsed > int64(^uint16(0)) {
return ^uint16(0)
}
return uint16(elapsed)
} }

View file

@ -0,0 +1,12 @@
package simple
import (
"dappco.re/go/core/proxy"
"dappco.re/go/core/proxy/pool"
)
func init() {
proxy.RegisterSplitterFactory("simple", func(cfg *proxy.Config, events *proxy.EventBus) proxy.Splitter {
return NewSimpleSplitter(cfg, events, pool.NewStrategyFactory(cfg))
})
}

View file

@ -9,6 +9,7 @@ package simple
import ( import (
"sync" "sync"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/core/proxy"
"dappco.re/go/core/proxy/pool" "dappco.re/go/core/proxy/pool"
@ -18,11 +19,257 @@ import (
// //
// s := simple.NewSimpleSplitter(cfg, eventBus, strategyFactory) // s := simple.NewSimpleSplitter(cfg, eventBus, strategyFactory)
type SimpleSplitter struct { type SimpleSplitter struct {
active map[int64]*SimpleMapper // minerID → mapper active map[int64]*SimpleMapper // minerID → mapper
idle map[int64]*SimpleMapper // mapperID → mapper (reuse pool, keyed by mapper seq) idle map[int64]*SimpleMapper // mapperID → mapper (reuse pool, keyed by mapper seq)
cfg *proxy.Config config *proxy.Config
events *proxy.EventBus events *proxy.EventBus
factory pool.StrategyFactory strategyFactory pool.StrategyFactory
mu sync.Mutex mu sync.Mutex
seq int64 // monotonic mapper sequence counter mapperSequence int64 // monotonic mapper sequence counter
}
// NewSimpleSplitter creates the passthrough splitter.
//
// s := simple.NewSimpleSplitter(cfg, bus, factory)
func NewSimpleSplitter(cfg *proxy.Config, events *proxy.EventBus, factory pool.StrategyFactory) *SimpleSplitter {
return &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: make(map[int64]*SimpleMapper),
config: cfg,
events: events,
strategyFactory: factory,
}
}
func (s *SimpleSplitter) Connect() {}
func (s *SimpleSplitter) OnLogin(event *proxy.LoginEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
timeout := time.Duration(0)
if s.config != nil && s.config.ReuseTimeout > 0 {
timeout = time.Duration(s.config.ReuseTimeout) * time.Second
}
var mapper *SimpleMapper
now := time.Now().UTC()
for mapperID, idleMapper := range s.idle {
if idleMapper == nil || idleMapper.stopped || idleMapper.strategy == nil || !idleMapper.strategy.IsActive() || (timeout > 0 && !idleMapper.idleAt.IsZero() && now.Sub(idleMapper.idleAt) > timeout) {
if idleMapper != nil && idleMapper.strategy != nil {
idleMapper.strategy.Disconnect()
}
delete(s.idle, mapperID)
continue
}
mapper = idleMapper
delete(s.idle, mapperID)
break
}
if mapper == nil {
s.mapperSequence++
var strategy pool.Strategy
mapper = NewSimpleMapper(s.mapperSequence, nil)
mapper.events = s.events
if s.strategyFactory != nil {
strategy = s.strategyFactory(mapper)
}
mapper.strategy = strategy
if mapper.strategy != nil {
mapper.strategy.Connect()
}
} else {
mapper.events = s.events
mapper.clearPending()
}
mapper.miner = event.Miner
mapper.idleAt = time.Time{}
event.Miner.SetRouteID(mapper.id)
s.active[event.Miner.ID()] = mapper
mapper.mu.Lock()
currentJob := mapper.job
mapper.mu.Unlock()
if currentJob.IsValid() {
event.Miner.PrimeJob(currentJob)
}
}
func (s *SimpleSplitter) OnSubmit(event *proxy.SubmitEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.Lock()
mapper := s.active[event.Miner.ID()]
s.mu.Unlock()
if mapper == nil || mapper.strategy == nil {
return
}
job, valid, expired := mapper.JobForID(event.JobID)
if !valid {
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
return
}
sequence := mapper.strategy.Submit(event.JobID, event.Nonce, event.Result, event.Algo)
if sequence == 0 {
event.Miner.ReplyWithError(event.RequestID, "Pool unavailable")
return
}
mapper.mu.Lock()
mapper.pending[sequence] = simpleSubmitContext{
RequestID: event.RequestID,
Job: job,
Expired: expired,
SubmittedAt: time.Now().UTC(),
}
mapper.mu.Unlock()
}
func (s *SimpleSplitter) OnClose(event *proxy.CloseEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
mapper := s.active[event.Miner.ID()]
if mapper == nil {
return
}
delete(s.active, event.Miner.ID())
mapper.clearPending()
mapper.miner = nil
mapper.idleAt = time.Now().UTC()
if s.config != nil && s.config.ReuseTimeout > 0 {
s.idle[mapper.id] = mapper
return
}
mapper.stopped = true
if mapper.strategy != nil {
mapper.strategy.Disconnect()
}
}
func (s *SimpleSplitter) Tick(ticks uint64) {
if ticks%60 == 0 {
s.GC()
}
}
func (s *SimpleSplitter) GC() {
s.mu.Lock()
defer s.mu.Unlock()
timeout := time.Duration(0)
if s.config != nil && s.config.ReuseTimeout > 0 {
timeout = time.Duration(s.config.ReuseTimeout) * time.Second
}
now := time.Now().UTC()
for mapperID, mapper := range s.idle {
if mapper == nil {
delete(s.idle, mapperID)
continue
}
if mapper.stopped || mapper.strategy == nil || !mapper.strategy.IsActive() || timeout == 0 || (!mapper.idleAt.IsZero() && now.Sub(mapper.idleAt) > timeout) {
if mapper.strategy != nil {
mapper.strategy.Disconnect()
}
delete(s.idle, mapperID)
}
}
}
func (s *SimpleSplitter) Upstreams() proxy.UpstreamStats {
s.mu.Lock()
defer s.mu.Unlock()
stats := proxy.UpstreamStats{
Sleep: uint64(len(s.idle)),
}
for _, mapper := range s.active {
stats.Total++
if mapper.strategy != nil && mapper.strategy.IsActive() {
stats.Active++
} else {
stats.Error++
}
}
stats.Total += uint64(len(s.idle))
return stats
}
func (s *SimpleSplitter) PendingCount() int {
s.mu.Lock()
mapperList := make([]*SimpleMapper, 0, len(s.active)+len(s.idle))
for _, mapper := range s.active {
mapperList = append(mapperList, mapper)
}
for _, mapper := range s.idle {
mapperList = append(mapperList, mapper)
}
s.mu.Unlock()
pending := 0
for _, mapper := range mapperList {
if mapper == nil {
continue
}
mapper.mu.Lock()
pending += len(mapper.pending)
mapper.mu.Unlock()
}
return pending
}
func (s *SimpleSplitter) Disconnect() {
s.mu.Lock()
active := s.active
idle := s.idle
s.active = make(map[int64]*SimpleMapper)
s.idle = make(map[int64]*SimpleMapper)
s.mu.Unlock()
for _, mapper := range active {
if mapper == nil {
continue
}
mapper.mu.Lock()
mapper.stopped = true
strategy := mapper.strategy
mapper.strategy = nil
mapper.miner = nil
mapper.mu.Unlock()
if strategy != nil {
strategy.Disconnect()
}
}
for _, mapper := range idle {
if mapper == nil {
continue
}
mapper.mu.Lock()
mapper.stopped = true
strategy := mapper.strategy
mapper.strategy = nil
mapper.miner = nil
mapper.mu.Unlock()
if strategy != nil {
strategy.Disconnect()
}
}
} }

View file

@ -0,0 +1,196 @@
package simple
import (
"os"
"strings"
"testing"
"time"
"dappco.re/go/core/proxy"
"dappco.re/go/core/proxy/pool"
)
type fakeStrategy struct {
active bool
connects int
disconnects int
}
func (s *fakeStrategy) Connect() {}
func (s *fakeStrategy) Submit(jobID, nonce, result, algo string) int64 { return 1 }
func (s *fakeStrategy) Disconnect() {
s.disconnects++
s.active = false
}
func (s *fakeStrategy) IsActive() bool { return s.active }
func TestSimpleSplitter_OnLogin_Ugly(t *testing.T) {
deadStrategy := &fakeStrategy{active: false}
liveStrategy := &fakeStrategy{active: true}
splitter := &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: map[int64]*SimpleMapper{
1: {
id: 1,
strategy: deadStrategy,
idleAt: time.Now().UTC(),
},
},
config: &proxy.Config{ReuseTimeout: 60},
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
return liveStrategy
},
}
miner := &proxy.Miner{}
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
if len(splitter.idle) != 0 {
t.Fatalf("expected dead idle mapper to be discarded, got %d idle mappers", len(splitter.idle))
}
if len(splitter.active) != 1 {
t.Fatalf("expected one active mapper, got %d", len(splitter.active))
}
if deadStrategy.disconnects != 1 {
t.Fatalf("expected dead mapper to be disconnected once, got %d", deadStrategy.disconnects)
}
if miner.RouteID() == 0 {
t.Fatal("expected miner to receive a route ID")
}
}
func TestSimpleSplitter_OnLogin_Bad(t *testing.T) {
activeStrategy := &fakeStrategy{active: true}
splitter := &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: map[int64]*SimpleMapper{
1: {
id: 1,
strategy: activeStrategy,
idleAt: time.Now().UTC().Add(-2 * time.Minute),
},
},
config: &proxy.Config{ReuseTimeout: 60},
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
return activeStrategy
},
}
miner := &proxy.Miner{}
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
if len(splitter.idle) != 0 {
t.Fatalf("expected stale idle mapper to be discarded, got %d idle mappers", len(splitter.idle))
}
if len(splitter.active) != 1 {
t.Fatalf("expected one active mapper, got %d active mappers", len(splitter.active))
}
}
func TestSimpleSplitter_OnClose_Ugly(t *testing.T) {
activeStrategy := &fakeStrategy{active: true}
splitter := &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: make(map[int64]*SimpleMapper),
config: &proxy.Config{ReuseTimeout: 60},
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
return activeStrategy
},
}
miner := &proxy.Miner{}
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
mapper := splitter.active[miner.ID()]
if mapper == nil {
t.Fatal("expected active mapper")
}
mapper.pending[1] = simpleSubmitContext{RequestID: 42}
splitter.OnClose(&proxy.CloseEvent{Miner: miner})
if len(mapper.pending) != 0 {
t.Fatalf("expected pending submits to be cleared, got %d", len(mapper.pending))
}
if _, exists := splitter.idle[mapper.id]; !exists {
t.Fatal("expected mapper to move to idle pool")
}
}
func TestSimpleMapper_OnResultAccepted_Good(t *testing.T) {
bus := proxy.NewEventBus()
resultCh := make(chan proxy.Event, 1)
bus.Subscribe(proxy.EventAccept, func(event proxy.Event) {
resultCh <- event
})
mapper := &SimpleMapper{
miner: &proxy.Miner{},
events: bus,
pending: make(map[int64]simpleSubmitContext),
job: proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-b", Target: "b88d0600"},
prevJob: proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-a", Target: "b88d0600"},
}
mapper.pending[1] = simpleSubmitContext{
RequestID: 7,
Job: proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-a", Target: "b88d0600"},
SubmittedAt: time.Now().UTC(),
}
mapper.OnResultAccepted(1, true, "")
select {
case event := <-resultCh:
if event.Job == nil || event.Job.JobID != "job-a" {
t.Fatalf("expected submitted job to be reported, got %#v", event.Job)
}
case <-time.After(time.Second):
t.Fatal("expected accept event")
}
}
func TestSimpleMapper_JobForID_BadClientID(t *testing.T) {
mapper := &SimpleMapper{
pending: make(map[int64]simpleSubmitContext),
}
mapper.OnJob(proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-a", ClientID: "pool-a"})
mapper.OnJob(proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-b", ClientID: "pool-b"})
if valid, expired := mapper.JobStatus("job-a"); valid || expired {
t.Fatalf("expected stale job from a different client to be invalid, got valid=%t expired=%t", valid, expired)
}
}
func TestConfigWatcher_Start_Ugly(t *testing.T) {
path := t.TempDir() + "/config.json"
errorValue := os.WriteFile(path, []byte(`{"mode":"simple","workers":"rig-id","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-a:3333","enabled":true}]}`), 0o644)
if errorValue != nil {
t.Fatal(errorValue)
}
watcherTriggered := make(chan struct{}, 1)
watcher := proxy.NewConfigWatcher(path, func(cfg *proxy.Config) {
watcherTriggered <- struct{}{}
})
watcher.Start()
defer watcher.Stop()
select {
case <-watcherTriggered:
t.Fatal("expected watcher to stay quiet until the file changes")
case <-time.After(1200 * time.Millisecond):
}
if errorValue = os.WriteFile(path, []byte(`{"mode":"simple","workers":"rig-id","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-b:3333","enabled":true}]}`), 0o644); errorValue != nil {
t.Fatal(errorValue)
}
select {
case <-watcherTriggered:
case <-time.After(2 * time.Second):
t.Fatal("expected watcher to observe the modification")
}
}

188
stats.go
View file

@ -1,6 +1,8 @@
package proxy package proxy
import ( import (
"slices"
"sort"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -13,18 +15,18 @@ import (
// bus.Subscribe(proxy.EventAccept, s.OnAccept) // bus.Subscribe(proxy.EventAccept, s.OnAccept)
// bus.Subscribe(proxy.EventReject, s.OnReject) // bus.Subscribe(proxy.EventReject, s.OnReject)
type Stats struct { type Stats struct {
accepted atomic.Uint64 accepted atomic.Uint64
rejected atomic.Uint64 rejected atomic.Uint64
invalid atomic.Uint64 invalid atomic.Uint64
expired atomic.Uint64 expired atomic.Uint64
hashes atomic.Uint64 // cumulative sum of accepted share difficulties hashes atomic.Uint64 // cumulative sum of accepted share difficulties
connections atomic.Uint64 // total TCP connections accepted (ever) connections atomic.Uint64 // total TCP connections accepted (ever)
maxMiners atomic.Uint64 // peak concurrent miner count maxMiners atomic.Uint64 // peak concurrent miner count
topDiff [10]uint64 // top-10 accepted difficulties, sorted descending; guarded by mu topDifficulties [10]uint64 // top-10 accepted difficulties, sorted descending; guarded by mu
latency []uint16 // pool response latencies in ms; capped at 10000 samples; guarded by mu latencySamples []uint16 // pool response latencies in ms; capped at 10000 samples; guarded by mu
windows [6]tickWindow // one per hashrate reporting period windows [6]tickWindow // one per hashrate reporting period
startTime time.Time startTime time.Time
mu sync.Mutex mu sync.Mutex
} }
// Hashrate window sizes in seconds. Index maps to Stats.windows and SummaryResponse.Hashrate. // Hashrate window sizes in seconds. Index maps to Stats.windows and SummaryResponse.Hashrate.
@ -53,8 +55,164 @@ type StatsSummary struct {
Invalid uint64 `json:"invalid"` Invalid uint64 `json:"invalid"`
Expired uint64 `json:"expired"` Expired uint64 `json:"expired"`
Hashes uint64 `json:"hashes_total"` Hashes uint64 `json:"hashes_total"`
AvgTime uint32 `json:"avg_time"` // seconds per accepted share AvgTime uint32 `json:"avg_time"` // seconds per accepted share
AvgLatency uint32 `json:"latency"` // median pool response latency in ms AvgLatency uint32 `json:"latency"` // median pool response latency in ms
Hashrate [6]float64 `json:"hashrate"` // H/s per window (index = HashrateWindow* constants) Hashrate [6]float64 `json:"hashrate"` // H/s per window (index = HashrateWindow* constants)
TopDiff [10]uint64 `json:"best"` TopDiff [10]uint64 `json:"best"`
} }
var hashrateWindowSizes = [5]int{60, 600, 3600, 43200, 86400}
// NewStats allocates the rolling windows and initialises the clock anchor.
//
// s := proxy.NewStats()
func NewStats() *Stats {
stats := &Stats{
startTime: time.Now().UTC(),
latencySamples: make([]uint16, 0, 128),
}
for index, size := range hashrateWindowSizes {
stats.windows[index] = tickWindow{
buckets: make([]uint64, size),
size: size,
}
}
return stats
}
// OnAccept records an accepted share. Adds diff to the current second's bucket in all windows.
//
// stats.OnAccept(proxy.Event{Diff: 100000, Latency: 82})
func (s *Stats) OnAccept(event Event) {
s.accepted.Add(1)
s.hashes.Add(event.Diff)
if event.Expired {
s.expired.Add(1)
}
s.mu.Lock()
for index := 0; index < HashrateWindowAll; index++ {
s.windows[index].buckets[s.windows[index].pos] += event.Diff
}
insertTopDiff(&s.topDifficulties, event.Diff)
if event.Latency > 0 {
s.latencySamples = appendCappedLatency(s.latencySamples, event.Latency)
}
s.mu.Unlock()
}
// OnReject records a rejected share. If e.Error indicates low diff or malformed, increments invalid.
//
// stats.OnReject(proxy.Event{Error: "Low difficulty share"})
func (s *Stats) OnReject(event Event) {
s.rejected.Add(1)
if isInvalidShareError(event.Error) {
s.invalid.Add(1)
}
if event.Expired {
s.expired.Add(1)
}
if event.Latency > 0 {
s.mu.Lock()
s.latencySamples = appendCappedLatency(s.latencySamples, event.Latency)
s.mu.Unlock()
}
}
// Tick advances all rolling windows by one second bucket. Called by the proxy tick loop.
//
// stats.Tick()
func (s *Stats) Tick() {
s.mu.Lock()
defer s.mu.Unlock()
for index := 0; index < HashrateWindowAll; index++ {
window := &s.windows[index]
window.pos = (window.pos + 1) % window.size
window.buckets[window.pos] = 0
}
}
// Summary returns a point-in-time snapshot of all stats fields for API serialisation.
//
// summary := stats.Summary()
func (s *Stats) Summary() StatsSummary {
s.mu.Lock()
defer s.mu.Unlock()
var summary StatsSummary
summary.Accepted = s.accepted.Load()
summary.Rejected = s.rejected.Load()
summary.Invalid = s.invalid.Load()
summary.Expired = s.expired.Load()
summary.Hashes = s.hashes.Load()
summary.TopDiff = s.topDifficulties
for index := 0; index < HashrateWindowAll; index++ {
windowSize := hashrateWindowSizes[index]
summary.Hashrate[index] = float64(sumBuckets(s.windows[index].buckets)) / float64(windowSize)
}
uptimeSeconds := uint64(time.Since(s.startTime).Seconds())
if uptimeSeconds > 0 {
summary.Hashrate[HashrateWindowAll] = float64(summary.Hashes) / float64(uptimeSeconds)
}
if summary.Accepted > 0 && uptimeSeconds > 0 {
summary.AvgTime = uint32(uptimeSeconds / summary.Accepted)
}
if len(s.latencySamples) > 0 {
values := slices.Clone(s.latencySamples)
sort.Slice(values, func(left int, right int) bool {
return values[left] < values[right]
})
summary.AvgLatency = uint32(values[len(values)/2])
}
return summary
}
func appendCappedLatency(latencies []uint16, latency uint16) []uint16 {
if len(latencies) == 10000 {
copy(latencies, latencies[1:])
latencies[len(latencies)-1] = latency
return latencies
}
return append(latencies, latency)
}
func insertTopDiff(topDiff *[10]uint64, difficulty uint64) {
if difficulty == 0 {
return
}
for index, value := range topDiff {
if difficulty <= value {
continue
}
copy(topDiff[index+1:], topDiff[index:len(topDiff)-1])
topDiff[index] = difficulty
return
}
}
func isInvalidShareError(message string) bool {
switch message {
case "Low difficulty share", "Invalid nonce", "Malformed share", "Invalid result":
return true
default:
return false
}
}
func sumBuckets(values []uint64) uint64 {
var total uint64
for _, value := range values {
total += value
}
return total
}

141
stats_workers_test.go Normal file
View file

@ -0,0 +1,141 @@
package proxy
import "testing"
func TestEventBus_Dispatch_Good(t *testing.T) {
bus := NewEventBus()
called := false
bus.Subscribe(EventLogin, func(event Event) {
called = event.Miner != nil
})
bus.Dispatch(Event{Type: EventLogin, Miner: &Miner{}})
if !called {
t.Fatal("expected handler to be called")
}
}
func TestEventBus_Dispatch_Bad(t *testing.T) {
bus := NewEventBus()
bus.Subscribe(EventLogin, nil)
bus.Dispatch(Event{Type: EventLogin})
}
func TestEventBus_Dispatch_Ugly(t *testing.T) {
bus := NewEventBus()
count := 0
bus.Subscribe(EventLogin, func(event Event) { count++ })
bus.Subscribe(EventLogin, func(event Event) { count++ })
bus.Dispatch(Event{Type: EventLogin})
if count != 2 {
t.Fatalf("expected both handlers to run, got %d", count)
}
}
func TestStats_Summary_Good(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 120, Latency: 80})
summary := stats.Summary()
if summary.Accepted != 1 || summary.Hashes != 120 {
t.Fatalf("unexpected summary: %+v", summary)
}
}
func TestStats_Summary_Bad(t *testing.T) {
stats := NewStats()
stats.OnReject(Event{Error: "Low difficulty share"})
summary := stats.Summary()
if summary.Rejected != 1 || summary.Invalid != 1 {
t.Fatalf("unexpected summary: %+v", summary)
}
}
func TestStats_Summary_Ugly(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 100, Latency: 10})
stats.Tick()
stats.OnAccept(Event{Diff: 200, Latency: 20})
summary := stats.Summary()
if summary.TopDiff[0] != 200 || summary.TopDiff[1] != 100 {
t.Fatalf("unexpected best shares: %+v", summary.TopDiff)
}
}
func TestWorkers_List_Good(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByRigID, bus)
miner := &Miner{id: 1, user: "wallet", rigID: "rig-a", ip: "10.0.0.1"}
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 600})
records := workers.List()
if len(records) != 1 || records[0].Name != "rig-a" || records[0].Accepted != 1 {
t.Fatalf("unexpected worker records: %+v", records)
}
}
func TestWorkers_List_Bad(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersDisabled, bus)
bus.Dispatch(Event{Type: EventLogin, Miner: &Miner{id: 1, user: "wallet"}})
if len(workers.List()) != 0 {
t.Fatal("expected no worker records when disabled")
}
}
func TestWorkers_List_Ugly(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByRigID, bus)
miner := &Miner{id: 1, user: "wallet", ip: "10.0.0.1"}
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
bus.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Low difficulty share"})
records := workers.List()
if len(records) != 1 || records[0].Name != "wallet" || records[0].Invalid != 1 {
t.Fatalf("unexpected worker records: %+v", records)
}
}
func TestWorkers_CustomDiffStats_Good(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
workers.SetCustomDiffStats(true)
firstMiner := &Miner{id: 1, user: "wallet", customDiff: 1000}
secondMiner := &Miner{id: 2, user: "wallet", customDiff: 2000}
bus.Dispatch(Event{Type: EventLogin, Miner: firstMiner})
bus.Dispatch(Event{Type: EventLogin, Miner: secondMiner})
records := workers.List()
if len(records) != 2 || records[0].Name == records[1].Name {
t.Fatalf("expected separate custom-diff buckets, got %+v", records)
}
}
func TestWorkers_CustomDiffStats_Bad(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
workers.SetCustomDiffStats(true)
firstMiner := &Miner{id: 1, user: "wallet", customDiff: 1000}
secondMiner := &Miner{id: 2, user: "wallet", customDiff: 1000}
bus.Dispatch(Event{Type: EventLogin, Miner: firstMiner})
bus.Dispatch(Event{Type: EventLogin, Miner: secondMiner})
records := workers.List()
if len(records) != 1 {
t.Fatalf("expected identical custom-diff bucket to merge, got %+v", records)
}
}
func TestWorkers_CustomDiffStats_Ugly(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
firstMiner := &Miner{id: 1, user: "wallet", customDiff: 1000}
secondMiner := &Miner{id: 2, user: "wallet", customDiff: 2000}
bus.Dispatch(Event{Type: EventLogin, Miner: firstMiner})
bus.Dispatch(Event{Type: EventLogin, Miner: secondMiner})
records := workers.List()
if len(records) != 1 || records[0].Name != "wallet" {
t.Fatalf("expected default worker bucketing to ignore custom diff, got %+v", records)
}
}

120
tls_runtime.go Normal file
View file

@ -0,0 +1,120 @@
package proxy
import (
"crypto/tls"
"strconv"
"strings"
)
func buildTLSConfig(config TLSConfig) *tls.Config {
tlsConfig := &tls.Config{}
if versions := parseTLSVersions(config.Protocols); versions != nil {
tlsConfig.MinVersion = versions.min
tlsConfig.MaxVersion = versions.max
}
if suites := parseCipherSuites(config.Ciphers); len(suites) > 0 {
tlsConfig.CipherSuites = suites
}
return tlsConfig
}
type tlsVersionBounds struct {
min uint16
max uint16
}
func parseTLSVersions(value string) *tlsVersionBounds {
if strings.TrimSpace(value) == "" {
return nil
}
bounds := tlsVersionBounds{}
for _, token := range splitTLSList(value) {
version, ok := parseTLSVersionToken(token)
if !ok {
continue
}
if bounds.min == 0 || version < bounds.min {
bounds.min = version
}
if version > bounds.max {
bounds.max = version
}
}
if bounds.min == 0 || bounds.max == 0 {
return nil
}
return &bounds
}
func parseTLSVersionToken(token string) (uint16, bool) {
switch strings.ToLower(strings.TrimSpace(token)) {
case "tls1.0", "tlsv1.0", "tls1", "tlsv1", "1.0", "tls10":
return tls.VersionTLS10, true
case "tls1.1", "tlsv1.1", "1.1", "tls11":
return tls.VersionTLS11, true
case "tls1.2", "tlsv1.2", "1.2", "tls12":
return tls.VersionTLS12, true
case "tls1.3", "tlsv1.3", "1.3", "tls13":
return tls.VersionTLS13, true
}
if raw, errorValue := strconv.ParseUint(strings.TrimSpace(token), 10, 16); errorValue == nil {
switch uint16(raw) {
case tls.VersionTLS10, tls.VersionTLS11, tls.VersionTLS12, tls.VersionTLS13:
return uint16(raw), true
}
}
return 0, false
}
func parseCipherSuites(value string) []uint16 {
if strings.TrimSpace(value) == "" {
return nil
}
var suites []uint16
for _, token := range splitTLSList(value) {
if suite, ok := tlsCipherSuiteNames[strings.ToUpper(strings.TrimSpace(token))]; ok {
suites = append(suites, suite)
}
}
return suites
}
func splitTLSList(value string) []string {
return strings.FieldsFunc(value, func(r rune) bool {
switch r {
case ':', ',', ' ', ';':
return true
default:
return false
}
})
}
var tlsCipherSuiteNames = map[string]uint16{
"TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
"TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
"TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256,
"TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384,
"TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256,
"ECDHE-RSA-AES128-GCM-SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
"ECDHE-RSA-AES256-GCM-SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
"ECDHE-ECDSA-AES128-GCM-SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
"ECDHE-ECDSA-AES256-GCM-SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
"AES128-GCM-SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
"AES256-GCM-SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
"ECDHE-RSA-CHACHA20-POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
"ECDHE-ECDSA-CHACHA20-POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
"CHACHA20-POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
}

48
tls_runtime_test.go Normal file
View file

@ -0,0 +1,48 @@
package proxy
import (
"crypto/tls"
"testing"
)
func TestTLSRuntime_buildTLSConfig_Good(t *testing.T) {
config := buildTLSConfig(TLSConfig{
Ciphers: "ECDHE-RSA-AES128-GCM-SHA256:TLS_AES_128_GCM_SHA256",
Protocols: "TLSv1.2,TLSv1.3",
})
if config.MinVersion != tls.VersionTLS12 {
t.Fatalf("expected min version TLS1.2, got %d", config.MinVersion)
}
if config.MaxVersion != tls.VersionTLS13 {
t.Fatalf("expected max version TLS1.3, got %d", config.MaxVersion)
}
if len(config.CipherSuites) != 2 || config.CipherSuites[0] != tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 || config.CipherSuites[1] != tls.TLS_AES_128_GCM_SHA256 {
t.Fatalf("unexpected cipher suites: %#v", config.CipherSuites)
}
}
func TestTLSRuntime_buildTLSConfig_Bad(t *testing.T) {
config := buildTLSConfig(TLSConfig{Protocols: "bogus", Ciphers: "bogus"})
if config.MinVersion != 0 || config.MaxVersion != 0 {
t.Fatalf("expected default versions for invalid input, got min=%d max=%d", config.MinVersion, config.MaxVersion)
}
if len(config.CipherSuites) != 0 {
t.Fatalf("expected no cipher suites for invalid input, got %#v", config.CipherSuites)
}
}
func TestTLSRuntime_buildTLSConfig_Ugly(t *testing.T) {
config := buildTLSConfig(TLSConfig{Protocols: "1.1:1.2:1.3", Ciphers: "AES128-GCM-SHA256,unknown"})
if config.MinVersion != tls.VersionTLS11 {
t.Fatalf("expected min version TLS1.1, got %d", config.MinVersion)
}
if config.MaxVersion != tls.VersionTLS13 {
t.Fatalf("expected max version TLS1.3, got %d", config.MaxVersion)
}
if len(config.CipherSuites) != 1 || config.CipherSuites[0] != tls.TLS_RSA_WITH_AES_128_GCM_SHA256 {
t.Fatalf("unexpected cipher suites: %#v", config.CipherSuites)
}
}

216
worker.go
View file

@ -1,6 +1,7 @@
package proxy package proxy
import ( import (
"strconv"
"sync" "sync"
"time" "time"
) )
@ -10,11 +11,12 @@ import (
// //
// w := proxy.NewWorkers(proxy.WorkersByRigID, bus) // w := proxy.NewWorkers(proxy.WorkersByRigID, bus)
type Workers struct { type Workers struct {
mode WorkersMode mode WorkersMode
entries []WorkerRecord // ordered by first-seen (stable) customDiffStats bool
nameIndex map[string]int // workerName → entries index entries []WorkerRecord // ordered by first-seen (stable)
idIndex map[int64]int // minerID → entries index nameIndex map[string]int // workerName → entries index
mu sync.RWMutex idIndex map[int64]int // minerID → entries index
mu sync.RWMutex
} }
// WorkerRecord is the per-identity aggregate. // WorkerRecord is the per-identity aggregate.
@ -27,7 +29,209 @@ type WorkerRecord struct {
Accepted uint64 Accepted uint64
Rejected uint64 Rejected uint64
Invalid uint64 Invalid uint64
Hashes uint64 // sum of accepted share difficulties Hashes uint64 // sum of accepted share difficulties
LastHashAt time.Time LastHashAt time.Time
windows [5]tickWindow // 60s, 600s, 3600s, 12h, 24h windows [5]tickWindow // 60s, 600s, 3600s, 12h, 24h
} }
// Hashrate returns the H/s for a given window (seconds: 60, 600, 3600, 43200, 86400).
//
// hr60 := record.Hashrate(60)
func (r *WorkerRecord) Hashrate(seconds int) float64 {
for index, windowSize := range hashrateWindowSizes {
if windowSize == seconds {
return float64(sumBuckets(r.windows[index].buckets)) / float64(seconds)
}
}
return 0
}
// NewWorkers creates the worker aggregate and subscribes it to the event bus.
//
// w := proxy.NewWorkers(proxy.WorkersByRigID, bus)
func NewWorkers(mode WorkersMode, bus *EventBus) *Workers {
workers := &Workers{
mode: mode,
entries: make([]WorkerRecord, 0),
nameIndex: make(map[string]int),
idIndex: make(map[int64]int),
}
if bus != nil {
bus.Subscribe(EventLogin, workers.onLogin)
bus.Subscribe(EventAccept, workers.onAccept)
bus.Subscribe(EventReject, workers.onReject)
bus.Subscribe(EventClose, workers.onClose)
}
return workers
}
// SetCustomDiffStats toggles per-custom-difficulty worker bucketing.
//
// workers.SetCustomDiffStats(true)
func (w *Workers) SetCustomDiffStats(enabled bool) {
if w == nil {
return
}
w.mu.Lock()
w.customDiffStats = enabled
w.mu.Unlock()
}
// List returns a snapshot of all worker records in first-seen order.
//
// records := workers.List()
func (w *Workers) List() []WorkerRecord {
w.mu.RLock()
defer w.mu.RUnlock()
records := make([]WorkerRecord, len(w.entries))
copy(records, w.entries)
return records
}
// Tick advances all worker hashrate windows. Called by the proxy tick loop every second.
//
// workers.Tick()
func (w *Workers) Tick() {
w.mu.Lock()
defer w.mu.Unlock()
for entryIndex := range w.entries {
for windowIndex, size := range hashrateWindowSizes {
if windowIndex >= len(w.entries[entryIndex].windows) {
break
}
window := &w.entries[entryIndex].windows[windowIndex]
if window.size == 0 {
window.size = size
window.buckets = make([]uint64, size)
}
window.pos = (window.pos + 1) % window.size
window.buckets[window.pos] = 0
}
}
}
func (w *Workers) onLogin(event Event) {
if event.Miner == nil || w.mode == WorkersDisabled {
return
}
name := w.workerName(event.Miner)
if name == "" {
return
}
w.mu.Lock()
defer w.mu.Unlock()
index, exists := w.nameIndex[name]
if !exists {
record := WorkerRecord{Name: name}
for windowIndex, size := range hashrateWindowSizes {
if windowIndex >= len(record.windows) {
break
}
record.windows[windowIndex] = tickWindow{
buckets: make([]uint64, size),
size: size,
}
}
w.entries = append(w.entries, record)
index = len(w.entries) - 1
w.nameIndex[name] = index
}
record := &w.entries[index]
record.LastIP = event.Miner.IP()
record.Connections++
w.idIndex[event.Miner.ID()] = index
}
func (w *Workers) onAccept(event Event) {
w.updateShare(event, true)
}
func (w *Workers) onReject(event Event) {
w.updateShare(event, false)
}
func (w *Workers) onClose(event Event) {
if event.Miner == nil {
return
}
w.mu.Lock()
defer w.mu.Unlock()
delete(w.idIndex, event.Miner.ID())
}
func (w *Workers) updateShare(event Event, accepted bool) {
if event.Miner == nil || w.mode == WorkersDisabled {
return
}
w.mu.Lock()
defer w.mu.Unlock()
index, exists := w.idIndex[event.Miner.ID()]
if !exists {
return
}
record := &w.entries[index]
if accepted {
record.Accepted++
record.Hashes += event.Diff
record.LastHashAt = time.Now().UTC()
for windowIndex := range record.windows {
record.windows[windowIndex].buckets[record.windows[windowIndex].pos] += event.Diff
}
return
}
record.Rejected++
if isInvalidShareError(event.Error) {
record.Invalid++
}
}
func (w *Workers) workerName(miner *Miner) string {
if miner == nil {
return ""
}
w.mu.RLock()
customDiffStats := w.customDiffStats
w.mu.RUnlock()
name := ""
switch w.mode {
case WorkersByRigID:
if miner.RigID() != "" {
name = miner.RigID()
} else {
name = miner.User()
}
case WorkersByUser:
name = miner.User()
case WorkersByPass:
name = miner.Password()
case WorkersByAgent:
name = miner.Agent()
case WorkersByIP:
name = miner.IP()
default:
return ""
}
if !customDiffStats || miner.CustomDiff() == 0 || name == "" {
return name
}
return name + "+cd" + strconv.FormatUint(miner.CustomDiff(), 10)
}