Compare commits
138 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2470f1ac3d | ||
|
|
31a151d23c | ||
|
|
6f0747abc2 | ||
|
|
711c4259f7 | ||
|
|
8cf01f2618 | ||
|
|
b6b44b1f7b | ||
|
|
e1eadf705d | ||
|
|
ea378354de | ||
|
|
8a9046356e | ||
|
|
d1a899805e | ||
|
|
5680539dbb | ||
|
|
82b2375058 | ||
|
|
01a0cc5907 | ||
|
|
031f0c0f17 | ||
|
|
75d151b4e5 | ||
|
|
70fcbd4d43 | ||
|
|
686f4ea54f | ||
|
|
af96bfce94 | ||
|
|
1ae781608c | ||
|
|
ee128e944d | ||
|
|
1f8ff58b20 | ||
|
|
bc6113c80d | ||
|
|
b3fd1fef61 | ||
|
|
e518f2df32 | ||
|
|
30ff013158 | ||
|
|
be47d7afde | ||
|
|
65f6c733a0 | ||
|
|
f4f0081eb0 | ||
|
|
f0d5f6ae86 | ||
|
|
0a7c99264b | ||
|
|
35db5f6840 | ||
|
|
8a52856719 | ||
|
|
5d8d82b9b5 | ||
|
|
356eb9cec1 | ||
|
|
cbde021d0c | ||
|
|
f2f7dfed75 | ||
|
|
ce3b7a50cd | ||
|
|
ecd4130457 | ||
|
|
5a3fcf4fab | ||
|
|
7dd9807a6e | ||
|
|
7b2a7ccd88 | ||
|
|
9f34bc7200 | ||
|
|
a1f47f5792 | ||
|
|
b5e4a6499f | ||
|
|
b9b3c47b4c | ||
|
|
fefae4b3e5 | ||
|
|
264479d57b | ||
|
|
d43c8ee4c1 | ||
|
|
05b0bb5ea4 | ||
|
|
2a49caca03 | ||
|
|
3cd0909d74 | ||
|
|
d0ae26a1a2 | ||
|
|
3f9da136e9 | ||
|
|
f3c5175785 | ||
|
|
e94616922d | ||
|
|
d8b4bf2775 | ||
|
|
3debd08a64 | ||
|
|
eabe9b521d | ||
|
|
a11d5b0969 | ||
|
|
766c4d1946 | ||
|
|
8ad123ecab | ||
|
|
55d44df9c2 | ||
|
|
9d2b1f368c | ||
|
|
2364633afc | ||
|
|
9460f82738 | ||
|
|
cf4136c8f0 | ||
|
|
460aae14fb | ||
|
|
bbdff60580 | ||
|
|
a76e6be1c7 | ||
|
|
96f7f18c96 | ||
|
|
77435d44fe | ||
|
|
ad069a45d5 | ||
|
|
7a48e479ec | ||
|
|
fd88492b00 | ||
|
|
fd6bc01b87 | ||
|
|
9e44fb6ea3 | ||
|
|
fd76640d69 | ||
|
|
fb5453c097 | ||
|
|
34f95071d9 | ||
|
|
4e5311215d | ||
|
|
2d39783dc4 | ||
|
|
e2bd10c94f | ||
|
|
1e6ba01d03 | ||
|
|
c0efdfb0ca | ||
|
|
619b3c500d | ||
|
|
8a321e2467 | ||
|
|
167ecc2bdc | ||
|
|
0bb5ce827b | ||
|
|
6f0f695054 | ||
|
|
4a0213e89f | ||
|
|
84362d9dc5 | ||
|
|
4006f33c1e | ||
|
|
9b6a251145 | ||
|
|
0c746e4ea7 | ||
|
|
e594b04d7c | ||
|
|
187a366d74 | ||
|
|
5ba21cb9bf | ||
|
|
2b8bba790c | ||
|
|
cfd669e4d2 | ||
|
|
6422a948bf | ||
|
|
8bde2c14d0 | ||
|
|
a79b35abaf | ||
|
|
5e343a7354 | ||
|
|
4c2a0ffab7 | ||
|
|
33d35ed063 | ||
|
|
c74f62e6d7 | ||
|
|
8b47e6a11b | ||
|
|
6d6934f37b | ||
|
|
c62f2c86a9 | ||
|
|
1548643c65 | ||
|
|
1065b78b7c | ||
|
|
9028334d49 | ||
|
|
186524b3a8 | ||
|
|
d9c59c668d | ||
|
|
8faac7eee6 | ||
|
|
ce7d3301fc | ||
|
|
c7d688ccfa | ||
|
|
d42c21438a | ||
|
|
86c07943b0 | ||
|
|
35d8c524e4 | ||
|
|
d47d89af7a | ||
|
|
b66739b64f | ||
|
|
3efa7f34d0 | ||
|
|
b3ad79d832 | ||
|
|
d10a57e377 | ||
|
|
6d6da10885 | ||
|
|
b16ebc1a28 | ||
|
|
2f59714cce | ||
|
|
21fce78ffe | ||
|
|
e92c6070be | ||
|
|
c250a4d6f2 | ||
|
|
4a281e6e25 | ||
|
|
1bcbb389e6 | ||
|
|
bc67e73ca0 | ||
|
|
31a8ba558f | ||
|
|
6f4d7019e2 | ||
|
|
64443c41f6 | ||
|
|
7f44596858 |
64 changed files with 7351 additions and 873 deletions
14
CODEX.md
Normal file
14
CODEX.md
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
# CODEX.md
|
||||
|
||||
This repository uses `CLAUDE.md` as the detailed source of truth for working conventions.
|
||||
This file exists so agent workflows that expect `CODEX.md` can resolve the repo rules directly.
|
||||
|
||||
## Core Conventions
|
||||
|
||||
- Read `docs/RFC.md` before changing behaviour.
|
||||
- Preserve existing user changes in the worktree.
|
||||
- Prefer `rg` for search and `apply_patch` for edits.
|
||||
- Keep names predictable and comments example-driven.
|
||||
- Run `go test ./...` and `go test -race ./...` before committing when practical.
|
||||
- Commit with a conventional message and include the required co-author line when requested by repo policy.
|
||||
|
||||
121
accesslog_impl.go
Normal file
121
accesslog_impl.go
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type accessLogSink struct {
|
||||
path string
|
||||
file *os.File
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newAccessLogSink(path string) *accessLogSink {
|
||||
return &accessLogSink{path: path}
|
||||
}
|
||||
|
||||
func (l *accessLogSink) SetPath(path string) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.path == path {
|
||||
return
|
||||
}
|
||||
l.path = path
|
||||
if l.file != nil {
|
||||
_ = l.file.Close()
|
||||
l.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (l *accessLogSink) Close() {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.file != nil {
|
||||
_ = l.file.Close()
|
||||
l.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (l *accessLogSink) OnLogin(e Event) {
|
||||
if l == nil || e.Miner == nil {
|
||||
return
|
||||
}
|
||||
l.writeConnectLine(e.Miner.IP(), e.Miner.User(), e.Miner.Agent())
|
||||
}
|
||||
|
||||
func (l *accessLogSink) OnClose(e Event) {
|
||||
if l == nil || e.Miner == nil {
|
||||
return
|
||||
}
|
||||
l.writeCloseLine(e.Miner.IP(), e.Miner.User(), e.Miner.RX(), e.Miner.TX())
|
||||
}
|
||||
|
||||
func (l *accessLogSink) writeConnectLine(ip, user, agent string) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if strings.TrimSpace(l.path) == "" {
|
||||
return
|
||||
}
|
||||
if l.file == nil {
|
||||
file, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
l.file = file
|
||||
}
|
||||
var builder strings.Builder
|
||||
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||
builder.WriteByte(' ')
|
||||
builder.WriteString("CONNECT")
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(ip)
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(user)
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(agent)
|
||||
builder.WriteByte('\n')
|
||||
_, _ = l.file.WriteString(builder.String())
|
||||
}
|
||||
|
||||
func (l *accessLogSink) writeCloseLine(ip, user string, rx, tx uint64) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if strings.TrimSpace(l.path) == "" {
|
||||
return
|
||||
}
|
||||
if l.file == nil {
|
||||
file, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
l.file = file
|
||||
}
|
||||
var builder strings.Builder
|
||||
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||
builder.WriteByte(' ')
|
||||
builder.WriteString("CLOSE")
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(ip)
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(user)
|
||||
builder.WriteString(" rx=")
|
||||
builder.WriteString(formatUint(rx))
|
||||
builder.WriteString(" tx=")
|
||||
builder.WriteString(formatUint(tx))
|
||||
builder.WriteByte('\n')
|
||||
_, _ = l.file.WriteString(builder.String())
|
||||
}
|
||||
|
||||
func formatUint(value uint64) string {
|
||||
return strconv.FormatUint(value, 10)
|
||||
}
|
||||
102
accesslog_test.go
Normal file
102
accesslog_test.go
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestProxy_AccessLog_WritesLifecycleLines(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "access.log")
|
||||
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
AccessLogFile: path,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
p, result := New(cfg)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||
}
|
||||
|
||||
miner := &Miner{
|
||||
ip: "10.0.0.1",
|
||||
user: "WALLET",
|
||||
agent: "XMRig/6.21.0",
|
||||
rx: 512,
|
||||
tx: 4096,
|
||||
conn: noopConn{},
|
||||
state: MinerStateReady,
|
||||
rpcID: "session",
|
||||
}
|
||||
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
p.events.Dispatch(Event{Type: EventClose, Miner: miner})
|
||||
p.Stop()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("read access log: %v", err)
|
||||
}
|
||||
text := string(data)
|
||||
if !strings.Contains(text, "CONNECT 10.0.0.1 WALLET XMRig/6.21.0") {
|
||||
t.Fatalf("expected CONNECT line, got %q", text)
|
||||
}
|
||||
if !strings.Contains(text, "CLOSE 10.0.0.1 WALLET rx=512 tx=4096") {
|
||||
t.Fatalf("expected CLOSE line, got %q", text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_AccessLog_WritesFixedColumns(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "access.log")
|
||||
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
AccessLogFile: path,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
p, result := New(cfg)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||
}
|
||||
|
||||
miner := &Miner{
|
||||
ip: "10.0.0.1",
|
||||
user: "WALLET",
|
||||
conn: noopConn{},
|
||||
}
|
||||
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
p.events.Dispatch(Event{Type: EventClose, Miner: miner})
|
||||
p.Stop()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("read access log: %v", err)
|
||||
}
|
||||
text := string(data)
|
||||
if !strings.Contains(text, "CONNECT 10.0.0.1 WALLET") {
|
||||
t.Fatalf("expected CONNECT line without counters, got %q", text)
|
||||
}
|
||||
if !strings.Contains(text, "CLOSE 10.0.0.1 WALLET rx=0 tx=0") {
|
||||
t.Fatalf("expected CLOSE line with counters only, got %q", text)
|
||||
}
|
||||
}
|
||||
|
||||
type noopConn struct{}
|
||||
|
||||
func (noopConn) Read([]byte) (int, error) { return 0, os.ErrClosed }
|
||||
func (noopConn) Write([]byte) (int, error) { return 0, os.ErrClosed }
|
||||
func (noopConn) Close() error { return nil }
|
||||
func (noopConn) LocalAddr() net.Addr { return nil }
|
||||
func (noopConn) RemoteAddr() net.Addr { return nil }
|
||||
func (noopConn) SetDeadline(time.Time) error { return nil }
|
||||
func (noopConn) SetReadDeadline(time.Time) error { return nil }
|
||||
func (noopConn) SetWriteDeadline(time.Time) error { return nil }
|
||||
197
api/router.go
197
api/router.go
|
|
@ -1,12 +1,7 @@
|
|||
// Package api implements the HTTP monitoring endpoints for the proxy.
|
||||
// Package api mounts the monitoring endpoints on an HTTP mux.
|
||||
//
|
||||
// Registered routes:
|
||||
//
|
||||
// GET /1/summary — aggregated proxy stats
|
||||
// GET /1/workers — per-worker hashrate table
|
||||
// GET /1/miners — per-connection state table
|
||||
//
|
||||
// proxyapi.RegisterRoutes(apiRouter, p)
|
||||
// mux := http.NewServeMux()
|
||||
// api.RegisterRoutes(mux, proxyInstance)
|
||||
package api
|
||||
|
||||
import (
|
||||
|
|
@ -16,172 +11,52 @@ import (
|
|||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
// Router matches the standard http.ServeMux registration shape.
|
||||
type Router interface {
|
||||
// RouteRegistrar accepts HTTP handler registrations.
|
||||
//
|
||||
// mux := http.NewServeMux()
|
||||
// api.RegisterRoutes(mux, proxyInstance)
|
||||
type RouteRegistrar interface {
|
||||
HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request))
|
||||
}
|
||||
|
||||
// SummaryResponse is the /1/summary JSON body.
|
||||
// mux := http.NewServeMux()
|
||||
// api.RegisterRoutes(mux, proxyInstance)
|
||||
// _ = mux
|
||||
//
|
||||
// {"version":"1.0.0","mode":"nicehash","hashrate":{"total":[...]}, ...}
|
||||
type SummaryResponse struct {
|
||||
Version string `json:"version"`
|
||||
Mode string `json:"mode"`
|
||||
Hashrate HashrateResponse `json:"hashrate"`
|
||||
Miners MinersCountResponse `json:"miners"`
|
||||
Workers uint64 `json:"workers"`
|
||||
Upstreams UpstreamResponse `json:"upstreams"`
|
||||
Results ResultsResponse `json:"results"`
|
||||
}
|
||||
|
||||
// HashrateResponse carries the per-window hashrate array.
|
||||
//
|
||||
// HashrateResponse{Total: [6]float64{12345.67, 11900.00, 12100.00, 11800.00, 12000.00, 12200.00}}
|
||||
type HashrateResponse struct {
|
||||
Total [6]float64 `json:"total"`
|
||||
}
|
||||
|
||||
// MinersCountResponse carries current and peak miner counts.
|
||||
//
|
||||
// MinersCountResponse{Now: 142, Max: 200}
|
||||
type MinersCountResponse struct {
|
||||
Now uint64 `json:"now"`
|
||||
Max uint64 `json:"max"`
|
||||
}
|
||||
|
||||
// UpstreamResponse carries pool connection state counts.
|
||||
//
|
||||
// UpstreamResponse{Active: 1, Sleep: 0, Error: 0, Total: 1, Ratio: 142.0}
|
||||
type UpstreamResponse struct {
|
||||
Active uint64 `json:"active"`
|
||||
Sleep uint64 `json:"sleep"`
|
||||
Error uint64 `json:"error"`
|
||||
Total uint64 `json:"total"`
|
||||
Ratio float64 `json:"ratio"`
|
||||
}
|
||||
|
||||
// ResultsResponse carries share acceptance statistics.
|
||||
//
|
||||
// ResultsResponse{Accepted: 4821, Rejected: 3, Invalid: 0, Expired: 12}
|
||||
type ResultsResponse struct {
|
||||
Accepted uint64 `json:"accepted"`
|
||||
Rejected uint64 `json:"rejected"`
|
||||
Invalid uint64 `json:"invalid"`
|
||||
Expired uint64 `json:"expired"`
|
||||
AvgTime uint32 `json:"avg_time"`
|
||||
Latency uint32 `json:"latency"`
|
||||
HashesTotal uint64 `json:"hashes_total"`
|
||||
Best [10]uint64 `json:"best"`
|
||||
}
|
||||
|
||||
// RegisterRoutes wires the monitoring endpoints onto the supplied router.
|
||||
func RegisterRoutes(r Router, p *proxy.Proxy) {
|
||||
if r == nil || p == nil {
|
||||
// The mounted routes are GET /1/summary, /1/workers, and /1/miners.
|
||||
func RegisterRoutes(router RouteRegistrar, p *proxy.Proxy) {
|
||||
if router == nil || p == nil {
|
||||
return
|
||||
}
|
||||
r.HandleFunc("/1/summary", func(w http.ResponseWriter, req *http.Request) {
|
||||
writeJSON(w, summaryResponse(p))
|
||||
})
|
||||
r.HandleFunc("/1/workers", func(w http.ResponseWriter, req *http.Request) {
|
||||
writeJSON(w, workersResponse(p))
|
||||
})
|
||||
r.HandleFunc("/1/miners", func(w http.ResponseWriter, req *http.Request) {
|
||||
writeJSON(w, minersResponse(p))
|
||||
registerJSONGetRoute(router, p, proxy.MonitoringRouteSummary, func() any { return p.SummaryDocument() })
|
||||
registerJSONGetRoute(router, p, proxy.MonitoringRouteWorkers, func() any { return p.WorkersDocument() })
|
||||
registerJSONGetRoute(router, p, proxy.MonitoringRouteMiners, func() any { return p.MinersDocument() })
|
||||
}
|
||||
|
||||
func registerJSONGetRoute(router RouteRegistrar, proxyInstance *proxy.Proxy, pattern string, renderDocument func() any) {
|
||||
router.HandleFunc(pattern, func(w http.ResponseWriter, request *http.Request) {
|
||||
if status, ok := allowMonitoringRequest(proxyInstance, request); !ok {
|
||||
switch status {
|
||||
case http.StatusMethodNotAllowed:
|
||||
w.Header().Set("Allow", http.MethodGet)
|
||||
case http.StatusUnauthorized:
|
||||
w.Header().Set("WWW-Authenticate", "Bearer")
|
||||
}
|
||||
w.WriteHeader(status)
|
||||
return
|
||||
}
|
||||
writeJSON(w, renderDocument())
|
||||
})
|
||||
}
|
||||
|
||||
func summaryResponse(p *proxy.Proxy) SummaryResponse {
|
||||
summary := p.Summary()
|
||||
now, max := p.MinerCount()
|
||||
upstreams := p.Upstreams()
|
||||
return SummaryResponse{
|
||||
Version: "1.0.0",
|
||||
Mode: p.Mode(),
|
||||
Hashrate: HashrateResponse{
|
||||
Total: summary.Hashrate,
|
||||
},
|
||||
Miners: MinersCountResponse{
|
||||
Now: now,
|
||||
Max: max,
|
||||
},
|
||||
Workers: uint64(len(p.WorkerRecords())),
|
||||
Upstreams: UpstreamResponse{
|
||||
Active: upstreams.Active,
|
||||
Sleep: upstreams.Sleep,
|
||||
Error: upstreams.Error,
|
||||
Total: upstreams.Total,
|
||||
Ratio: ratio(now, upstreams.Total),
|
||||
},
|
||||
Results: ResultsResponse{
|
||||
Accepted: summary.Accepted,
|
||||
Rejected: summary.Rejected,
|
||||
Invalid: summary.Invalid,
|
||||
Expired: summary.Expired,
|
||||
AvgTime: summary.AvgTime,
|
||||
Latency: summary.AvgLatency,
|
||||
HashesTotal: summary.Hashes,
|
||||
Best: summary.TopDiff,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func workersResponse(p *proxy.Proxy) any {
|
||||
records := p.WorkerRecords()
|
||||
rows := make([]any, 0, len(records))
|
||||
for _, record := range records {
|
||||
rows = append(rows, []any{
|
||||
record.Name,
|
||||
record.LastIP,
|
||||
record.Connections,
|
||||
record.Accepted,
|
||||
record.Rejected,
|
||||
record.Invalid,
|
||||
record.Hashes,
|
||||
record.LastHashAt.Unix(),
|
||||
record.Hashrate(60),
|
||||
record.Hashrate(600),
|
||||
record.Hashrate(3600),
|
||||
record.Hashrate(43200),
|
||||
record.Hashrate(86400),
|
||||
})
|
||||
}
|
||||
return map[string]any{
|
||||
"mode": string(p.WorkersMode()),
|
||||
"workers": rows,
|
||||
}
|
||||
}
|
||||
|
||||
func minersResponse(p *proxy.Proxy) any {
|
||||
records := p.MinerSnapshots()
|
||||
rows := make([]any, 0, len(records))
|
||||
for _, miner := range records {
|
||||
rows = append(rows, []any{
|
||||
miner.ID,
|
||||
miner.IP,
|
||||
miner.TX,
|
||||
miner.RX,
|
||||
miner.State,
|
||||
miner.Diff,
|
||||
miner.User,
|
||||
miner.Password,
|
||||
miner.RigID,
|
||||
miner.Agent,
|
||||
})
|
||||
}
|
||||
return map[string]any{
|
||||
"format": []string{"id", "ip", "tx", "rx", "state", "diff", "user", "password", "rig_id", "agent"},
|
||||
"miners": rows,
|
||||
func allowMonitoringRequest(proxyInstance *proxy.Proxy, request *http.Request) (int, bool) {
|
||||
if proxyInstance == nil {
|
||||
return http.StatusServiceUnavailable, false
|
||||
}
|
||||
return proxyInstance.AllowMonitoringRequest(request)
|
||||
}
|
||||
|
||||
func writeJSON(w http.ResponseWriter, payload any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(payload)
|
||||
}
|
||||
|
||||
func ratio(now, total uint64) float64 {
|
||||
if total == 0 {
|
||||
return 0
|
||||
}
|
||||
return float64(now) / float64(total)
|
||||
}
|
||||
|
|
|
|||
201
api/router_test.go
Normal file
201
api/router_test.go
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
func TestRegisterRoutes_GETSummary_Good(t *testing.T) {
|
||||
config := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
p, result := proxy.New(config)
|
||||
if !result.OK {
|
||||
t.Fatalf("new proxy: %v", result.Error)
|
||||
}
|
||||
|
||||
router := http.NewServeMux()
|
||||
RegisterRoutes(router, p)
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
|
||||
recorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code != http.StatusOK {
|
||||
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
|
||||
}
|
||||
|
||||
var document proxy.SummaryDocument
|
||||
if err := json.Unmarshal(recorder.Body.Bytes(), &document); err != nil {
|
||||
t.Fatalf("decode summary document: %v", err)
|
||||
}
|
||||
if document.Mode != "nicehash" {
|
||||
t.Fatalf("expected mode %q, got %q", "nicehash", document.Mode)
|
||||
}
|
||||
if document.Version != "1.0.0" {
|
||||
t.Fatalf("expected version %q, got %q", "1.0.0", document.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterRoutes_POSTSummary_Bad(t *testing.T) {
|
||||
config := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
HTTP: proxy.HTTPConfig{
|
||||
Restricted: true,
|
||||
},
|
||||
}
|
||||
p, result := proxy.New(config)
|
||||
if !result.OK {
|
||||
t.Fatalf("new proxy: %v", result.Error)
|
||||
}
|
||||
|
||||
router := http.NewServeMux()
|
||||
RegisterRoutes(router, p)
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "/1/summary", nil)
|
||||
recorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code != http.StatusMethodNotAllowed {
|
||||
t.Fatalf("expected %d, got %d", http.StatusMethodNotAllowed, recorder.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterRoutes_POSTSummary_Unrestricted_Good(t *testing.T) {
|
||||
config := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
p, result := proxy.New(config)
|
||||
if !result.OK {
|
||||
t.Fatalf("new proxy: %v", result.Error)
|
||||
}
|
||||
|
||||
router := http.NewServeMux()
|
||||
RegisterRoutes(router, p)
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "/1/summary", nil)
|
||||
recorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code != http.StatusOK {
|
||||
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
|
||||
}
|
||||
|
||||
var document proxy.SummaryDocument
|
||||
if err := json.Unmarshal(recorder.Body.Bytes(), &document); err != nil {
|
||||
t.Fatalf("decode summary document: %v", err)
|
||||
}
|
||||
if document.Mode != "nicehash" {
|
||||
t.Fatalf("expected mode %q, got %q", "nicehash", document.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterRoutes_GETMiners_Ugly(t *testing.T) {
|
||||
config := &proxy.Config{
|
||||
Mode: "simple",
|
||||
Workers: proxy.WorkersDisabled,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
p, result := proxy.New(config)
|
||||
if !result.OK {
|
||||
t.Fatalf("new proxy: %v", result.Error)
|
||||
}
|
||||
|
||||
router := http.NewServeMux()
|
||||
RegisterRoutes(router, p)
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "/1/miners", nil)
|
||||
recorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code != http.StatusOK {
|
||||
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
|
||||
}
|
||||
|
||||
var document proxy.MinersDocument
|
||||
if err := json.Unmarshal(recorder.Body.Bytes(), &document); err != nil {
|
||||
t.Fatalf("decode miners document: %v", err)
|
||||
}
|
||||
if len(document.Format) != 10 {
|
||||
t.Fatalf("expected 10 miner columns, got %d", len(document.Format))
|
||||
}
|
||||
if len(document.Miners) != 0 {
|
||||
t.Fatalf("expected no miners in a new proxy, got %d", len(document.Miners))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterRoutes_GETSummaryAuthRequired_Bad(t *testing.T) {
|
||||
config := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
HTTP: proxy.HTTPConfig{
|
||||
Enabled: true,
|
||||
Restricted: true,
|
||||
AccessToken: "secret",
|
||||
},
|
||||
}
|
||||
p, result := proxy.New(config)
|
||||
if !result.OK {
|
||||
t.Fatalf("new proxy: %v", result.Error)
|
||||
}
|
||||
|
||||
router := http.NewServeMux()
|
||||
RegisterRoutes(router, p)
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
|
||||
recorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code != http.StatusUnauthorized {
|
||||
t.Fatalf("expected %d, got %d", http.StatusUnauthorized, recorder.Code)
|
||||
}
|
||||
if got := recorder.Header().Get("WWW-Authenticate"); got != "Bearer" {
|
||||
t.Fatalf("expected bearer challenge, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterRoutes_GETSummaryAuthGranted_Ugly(t *testing.T) {
|
||||
config := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
HTTP: proxy.HTTPConfig{
|
||||
Enabled: true,
|
||||
Restricted: true,
|
||||
AccessToken: "secret",
|
||||
},
|
||||
}
|
||||
p, result := proxy.New(config)
|
||||
if !result.OK {
|
||||
t.Fatalf("new proxy: %v", result.Error)
|
||||
}
|
||||
|
||||
router := http.NewServeMux()
|
||||
RegisterRoutes(router, p)
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
|
||||
request.Header.Set("Authorization", "Bearer secret")
|
||||
recorder := httptest.NewRecorder()
|
||||
router.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code != http.StatusOK {
|
||||
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
|
||||
}
|
||||
}
|
||||
113
api_rows.go
Normal file
113
api_rows.go
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
package proxy
|
||||
|
||||
const (
|
||||
// MonitoringRouteSummary documents the summary endpoint path.
|
||||
//
|
||||
// http.Get("http://127.0.0.1:8080" + proxy.MonitoringRouteSummary)
|
||||
MonitoringRouteSummary = "/1/summary"
|
||||
|
||||
// MonitoringRouteWorkers documents the workers endpoint path.
|
||||
//
|
||||
// http.Get("http://127.0.0.1:8080" + proxy.MonitoringRouteWorkers)
|
||||
MonitoringRouteWorkers = "/1/workers"
|
||||
|
||||
// MonitoringRouteMiners documents the miners endpoint path.
|
||||
//
|
||||
// http.Get("http://127.0.0.1:8080" + proxy.MonitoringRouteMiners)
|
||||
MonitoringRouteMiners = "/1/miners"
|
||||
|
||||
// SummaryDocumentVersion is the monitoring API version.
|
||||
//
|
||||
// doc := proxy.SummaryDocument{Version: proxy.SummaryDocumentVersion}
|
||||
SummaryDocumentVersion = "1.0.0"
|
||||
)
|
||||
|
||||
var (
|
||||
// MinersDocumentFormat defines the fixed /1/miners column order.
|
||||
//
|
||||
// doc := proxy.MinersDocument{Format: append([]string(nil), proxy.MinersDocumentFormat...)}
|
||||
MinersDocumentFormat = []string{"id", "ip", "tx", "rx", "state", "diff", "user", "password", "rig_id", "agent"}
|
||||
|
||||
workerHashrateWindows = [5]int{60, 600, 3600, 43200, 86400}
|
||||
)
|
||||
|
||||
// WorkerRow{"rig-alpha", "10.0.0.1", 1, 10, 0, 0, 10000, 1712232000, 1.0, 1.0, 1.0, 1.0, 1.0}
|
||||
type WorkerRow [13]any
|
||||
|
||||
// MinerRow{1, "10.0.0.1:49152", 4096, 512, 2, 10000, "WALLET", maskedPassword, "rig-alpha", "XMRig/6.21.0"}
|
||||
type MinerRow [10]any
|
||||
|
||||
// doc := p.SummaryDocument()
|
||||
// _ = doc.Results.Accepted
|
||||
// _ = doc.Upstreams.Ratio
|
||||
type SummaryDocument struct {
|
||||
Version string `json:"version"`
|
||||
Mode string `json:"mode"`
|
||||
Hashrate HashrateDocument `json:"hashrate"`
|
||||
Miners MinersCountDocument `json:"miners"`
|
||||
Workers uint64 `json:"workers"`
|
||||
Upstreams UpstreamDocument `json:"upstreams"`
|
||||
Results ResultsDocument `json:"results"`
|
||||
CustomDiffStats map[uint64]CustomDiffBucketStats `json:"custom_diff_stats,omitempty"`
|
||||
}
|
||||
|
||||
// SummaryResponse is the RFC name for SummaryDocument.
|
||||
type SummaryResponse = SummaryDocument
|
||||
|
||||
// HashrateDocument{Total: [6]float64{12345.67, 11900.00, 12100.00, 11800.00, 12000.00, 12200.00}}
|
||||
type HashrateDocument struct {
|
||||
Total [6]float64 `json:"total"`
|
||||
}
|
||||
|
||||
// HashrateResponse is the RFC name for HashrateDocument.
|
||||
type HashrateResponse = HashrateDocument
|
||||
|
||||
// MinersCountDocument{Now: 142, Max: 200}
|
||||
type MinersCountDocument struct {
|
||||
Now uint64 `json:"now"`
|
||||
Max uint64 `json:"max"`
|
||||
}
|
||||
|
||||
// MinersCountResponse is the RFC name for MinersCountDocument.
|
||||
type MinersCountResponse = MinersCountDocument
|
||||
|
||||
// UpstreamDocument{Active: 1, Sleep: 0, Error: 0, Total: 1, Ratio: 142.0}
|
||||
type UpstreamDocument struct {
|
||||
Active uint64 `json:"active"`
|
||||
Sleep uint64 `json:"sleep"`
|
||||
Error uint64 `json:"error"`
|
||||
Total uint64 `json:"total"`
|
||||
Ratio float64 `json:"ratio"`
|
||||
}
|
||||
|
||||
// UpstreamResponse is the RFC name for UpstreamDocument.
|
||||
type UpstreamResponse = UpstreamDocument
|
||||
|
||||
// ResultsDocument{Accepted: 4821, Rejected: 3, Invalid: 0, Expired: 12}
|
||||
type ResultsDocument struct {
|
||||
Accepted uint64 `json:"accepted"`
|
||||
Rejected uint64 `json:"rejected"`
|
||||
Invalid uint64 `json:"invalid"`
|
||||
Expired uint64 `json:"expired"`
|
||||
AvgTime uint32 `json:"avg_time"`
|
||||
Latency uint32 `json:"latency"`
|
||||
HashesTotal uint64 `json:"hashes_total"`
|
||||
Best [10]uint64 `json:"best"`
|
||||
}
|
||||
|
||||
// ResultsResponse is the RFC name for ResultsDocument.
|
||||
type ResultsResponse = ResultsDocument
|
||||
|
||||
// doc := p.WorkersDocument()
|
||||
// _ = doc.Workers[0][0]
|
||||
type WorkersDocument struct {
|
||||
Mode string `json:"mode"`
|
||||
Workers []WorkerRow `json:"workers"`
|
||||
}
|
||||
|
||||
// doc := p.MinersDocument()
|
||||
// _ = doc.Miners[0][7]
|
||||
type MinersDocument struct {
|
||||
Format []string `json:"format"`
|
||||
Miners []MinerRow `json:"miners"`
|
||||
}
|
||||
26
config.go
26
config.go
|
|
@ -1,9 +1,14 @@
|
|||
package proxy
|
||||
|
||||
// Config is the top-level proxy configuration, loaded from JSON and hot-reloaded on change.
|
||||
// Config is the top-level proxy configuration loaded from JSON.
|
||||
//
|
||||
// cfg, result := proxy.LoadConfig("config.json")
|
||||
// if !result.OK { log.Fatal(result.Error) }
|
||||
// cfg := &proxy.Config{
|
||||
// Mode: "nicehash",
|
||||
// Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||
// Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
// Watch: true,
|
||||
// Workers: proxy.WorkersByRigID,
|
||||
// }
|
||||
type Config struct {
|
||||
Mode string `json:"mode"` // "nicehash" or "simple"
|
||||
Bind []BindAddr `json:"bind"` // listen addresses
|
||||
|
|
@ -16,11 +21,13 @@ type Config struct {
|
|||
AlgoExtension bool `json:"algo-ext"` // forward algo field in jobs
|
||||
Workers WorkersMode `json:"workers"` // "rig-id", "user", "password", "agent", "ip", "false"
|
||||
AccessLogFile string `json:"access-log-file"` // "" = disabled
|
||||
ShareLogFile string `json:"share-log-file"` // "" = disabled
|
||||
ReuseTimeout int `json:"reuse-timeout"` // seconds; simple mode upstream reuse
|
||||
Retries int `json:"retries"` // pool reconnect attempts
|
||||
RetryPause int `json:"retry-pause"` // seconds between retries
|
||||
Watch bool `json:"watch"` // hot-reload on file change
|
||||
RateLimit RateLimit `json:"rate-limit"` // per-IP connection rate limit
|
||||
configPath string
|
||||
}
|
||||
|
||||
// BindAddr is one TCP listen endpoint.
|
||||
|
|
@ -47,7 +54,7 @@ type PoolConfig struct {
|
|||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// TLSConfig controls inbound TLS on bind addresses that have TLS: true.
|
||||
// TLSConfig controls inbound TLS for miner listeners.
|
||||
//
|
||||
// proxy.TLSConfig{Enabled: true, CertFile: "/etc/proxy/cert.pem", KeyFile: "/etc/proxy/key.pem"}
|
||||
type TLSConfig struct {
|
||||
|
|
@ -69,15 +76,20 @@ type HTTPConfig struct {
|
|||
Restricted bool `json:"restricted"` // true = read-only GET only
|
||||
}
|
||||
|
||||
// RateLimit controls per-IP connection rate limiting using a token bucket.
|
||||
// RateLimit caps connection attempts per source IP.
|
||||
//
|
||||
// proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300}
|
||||
// limiter := proxy.NewRateLimiter(proxy.RateLimit{
|
||||
// MaxConnectionsPerMinute: 30,
|
||||
// BanDurationSeconds: 300,
|
||||
// })
|
||||
type RateLimit struct {
|
||||
MaxConnectionsPerMinute int `json:"max-connections-per-minute"` // 0 = disabled
|
||||
BanDurationSeconds int `json:"ban-duration"` // 0 = no ban
|
||||
}
|
||||
|
||||
// WorkersMode controls which login field becomes the worker name.
|
||||
// WorkersMode picks the login field used as the worker name.
|
||||
//
|
||||
// cfg.Workers = proxy.WorkersByRigID
|
||||
type WorkersMode string
|
||||
|
||||
const (
|
||||
|
|
|
|||
63
config_load_test.go
Normal file
63
config_load_test.go
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConfig_LoadConfig_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "config.json")
|
||||
data := []byte(`{"mode":"nicehash","workers":"rig-id","bind":[{"host":"0.0.0.0","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
|
||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
||||
t.Fatalf("expected config file write to succeed: %v", err)
|
||||
}
|
||||
|
||||
cfg, result := LoadConfig(path)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected load to succeed, got error: %v", result.Error)
|
||||
}
|
||||
if cfg == nil {
|
||||
t.Fatal("expected config to be returned")
|
||||
}
|
||||
if got := cfg.Mode; got != "nicehash" {
|
||||
t.Fatalf("expected mode to round-trip, got %q", got)
|
||||
}
|
||||
if got := cfg.configPath; got != path {
|
||||
t.Fatalf("expected config path to be recorded, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_LoadConfig_Bad(t *testing.T) {
|
||||
cfg, result := LoadConfig(filepath.Join(t.TempDir(), "missing.json"))
|
||||
if result.OK {
|
||||
t.Fatalf("expected missing config file to fail, got cfg=%+v", cfg)
|
||||
}
|
||||
if cfg != nil {
|
||||
t.Fatalf("expected no config on read failure, got %+v", cfg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_LoadConfig_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "config.json")
|
||||
data := []byte(`{"mode":"invalid","workers":"rig-id","bind":[],"pools":[]}`)
|
||||
if err := os.WriteFile(path, data, 0o600); err != nil {
|
||||
t.Fatalf("expected config file write to succeed: %v", err)
|
||||
}
|
||||
|
||||
cfg, result := LoadConfig(path)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected syntactically valid JSON to load, got error: %v", result.Error)
|
||||
}
|
||||
if cfg == nil {
|
||||
t.Fatal("expected config to be returned")
|
||||
}
|
||||
if got := cfg.Mode; got != "invalid" {
|
||||
t.Fatalf("expected invalid mode value to be preserved, got %q", got)
|
||||
}
|
||||
if validation := cfg.Validate(); validation.OK {
|
||||
t.Fatal("expected semantic validation to fail separately from loading")
|
||||
}
|
||||
}
|
||||
90
config_test.go
Normal file
90
config_test.go
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
package proxy
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestConfig_Validate_Good(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
|
||||
if result := cfg.Validate(); !result.OK {
|
||||
t.Fatalf("expected valid config, got error: %v", result.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_Validate_Bad(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
|
||||
if result := cfg.Validate(); result.OK {
|
||||
t.Fatalf("expected missing mode to fail validation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_Validate_Ugly(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersMode("unknown"),
|
||||
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "", Enabled: true}},
|
||||
}
|
||||
|
||||
if result := cfg.Validate(); result.OK {
|
||||
t.Fatalf("expected invalid workers and empty pool url to fail validation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_Validate_NoEnabledPool_Good(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Mode: "simple",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||
Pools: []PoolConfig{
|
||||
{URL: "pool-a.example:3333", Enabled: false},
|
||||
{URL: "pool-b.example:4444", Enabled: false},
|
||||
},
|
||||
}
|
||||
|
||||
if result := cfg.Validate(); !result.OK {
|
||||
t.Fatalf("expected config with no enabled pools to be valid, got error: %v", result.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_New_WhitespaceMode_Good(t *testing.T) {
|
||||
originalFactory, hadFactory := splitterFactoryForMode("nicehash")
|
||||
if hadFactory {
|
||||
t.Cleanup(func() {
|
||||
RegisterSplitterFactory("nicehash", originalFactory)
|
||||
})
|
||||
}
|
||||
|
||||
called := false
|
||||
RegisterSplitterFactory("nicehash", func(*Config, *EventBus) Splitter {
|
||||
called = true
|
||||
return &noopSplitter{}
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
Mode: " nicehash ",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
|
||||
p, result := New(cfg)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected whitespace-padded mode to remain valid, got error: %v", result.Error)
|
||||
}
|
||||
if !called {
|
||||
t.Fatalf("expected trimmed mode lookup to invoke the registered splitter factory")
|
||||
}
|
||||
if _, ok := p.splitter.(*noopSplitter); !ok {
|
||||
t.Fatalf("expected test splitter to be wired, got %#v", p.splitter)
|
||||
}
|
||||
}
|
||||
136
configwatcher_test.go
Normal file
136
configwatcher_test.go
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestConfigWatcher_New_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "config.json")
|
||||
if err := os.WriteFile(path, []byte(`{"mode":"nicehash","workers":"false","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`), 0o644); err != nil {
|
||||
t.Fatalf("write config file: %v", err)
|
||||
}
|
||||
|
||||
watcher := NewConfigWatcher(path, func(*Config) {})
|
||||
if watcher == nil {
|
||||
t.Fatal("expected watcher")
|
||||
}
|
||||
if watcher.lastModifiedAt.IsZero() {
|
||||
t.Fatal("expected last modification time to be initialised from the file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigWatcher_Start_Good(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "config.json")
|
||||
initial := []byte(`{"mode":"nicehash","workers":"false","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
|
||||
if err := os.WriteFile(path, initial, 0o644); err != nil {
|
||||
t.Fatalf("write initial config file: %v", err)
|
||||
}
|
||||
|
||||
updates := make(chan *Config, 1)
|
||||
watcher := NewConfigWatcher(path, func(cfg *Config) {
|
||||
select {
|
||||
case updates <- cfg:
|
||||
default:
|
||||
}
|
||||
})
|
||||
if watcher == nil {
|
||||
t.Fatal("expected watcher")
|
||||
}
|
||||
watcher.Start()
|
||||
defer watcher.Stop()
|
||||
|
||||
updated := []byte(`{"mode":"simple","workers":"user","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
|
||||
if err := os.WriteFile(path, updated, 0o644); err != nil {
|
||||
t.Fatalf("write updated config file: %v", err)
|
||||
}
|
||||
now := time.Now()
|
||||
if err := os.Chtimes(path, now, now.Add(2*time.Second)); err != nil {
|
||||
t.Fatalf("touch updated config file: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case cfg := <-updates:
|
||||
if cfg == nil {
|
||||
t.Fatal("expected config update")
|
||||
}
|
||||
if got := cfg.Mode; got != "simple" {
|
||||
t.Fatalf("expected updated mode, got %q", got)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("expected watcher to reload updated config")
|
||||
}
|
||||
}
|
||||
|
||||
// TestConfigWatcher_Start_Bad verifies a watcher with a nonexistent path does not panic
|
||||
// and does not call the onChange callback.
|
||||
//
|
||||
// watcher := proxy.NewConfigWatcher("/nonexistent/config.json", func(cfg *proxy.Config) {
|
||||
// // never called
|
||||
// })
|
||||
// watcher.Start()
|
||||
// watcher.Stop()
|
||||
func TestConfigWatcher_Start_Bad(t *testing.T) {
|
||||
called := make(chan struct{}, 1)
|
||||
watcher := NewConfigWatcher("/nonexistent/path/config.json", func(*Config) {
|
||||
select {
|
||||
case called <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
if watcher == nil {
|
||||
t.Fatal("expected watcher even for a nonexistent path")
|
||||
}
|
||||
watcher.Start()
|
||||
defer watcher.Stop()
|
||||
|
||||
select {
|
||||
case <-called:
|
||||
t.Fatal("expected no callback for nonexistent config file")
|
||||
case <-time.After(2 * time.Second):
|
||||
// expected: no update fired
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigWatcher_Start_Ugly(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "config.json")
|
||||
initial := []byte(`{"mode":"nicehash","workers":"false","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
|
||||
if err := os.WriteFile(path, initial, 0o644); err != nil {
|
||||
t.Fatalf("write initial config file: %v", err)
|
||||
}
|
||||
|
||||
updates := make(chan *Config, 1)
|
||||
watcher := NewConfigWatcher(path, func(cfg *Config) {
|
||||
select {
|
||||
case updates <- cfg:
|
||||
default:
|
||||
}
|
||||
})
|
||||
if watcher == nil {
|
||||
t.Fatal("expected watcher")
|
||||
}
|
||||
watcher.Start()
|
||||
defer watcher.Stop()
|
||||
|
||||
now := time.Now()
|
||||
if err := os.Chtimes(path, now, now.Add(2*time.Second)); err != nil {
|
||||
t.Fatalf("touch config file: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case cfg := <-updates:
|
||||
if cfg == nil {
|
||||
t.Fatal("expected config update")
|
||||
}
|
||||
if got := cfg.Mode; got != "nicehash" {
|
||||
t.Fatalf("expected unchanged mode, got %q", got)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("expected watcher to reload touched config")
|
||||
}
|
||||
}
|
||||
369
core_impl.go
369
core_impl.go
|
|
@ -3,9 +3,9 @@ package proxy
|
|||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
|
|
@ -16,81 +16,127 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// Result is a small success/error carrier used by constructors and loaders.
|
||||
// Result is the success/error carrier used by constructors and loaders.
|
||||
//
|
||||
// cfg, result := proxy.LoadConfig("config.json")
|
||||
// if !result.OK {
|
||||
// return result.Error
|
||||
// }
|
||||
type Result struct {
|
||||
OK bool
|
||||
Error error
|
||||
}
|
||||
|
||||
func successResult() Result {
|
||||
func newSuccessResult() Result {
|
||||
return Result{OK: true}
|
||||
}
|
||||
|
||||
func errorResult(err error) Result {
|
||||
func newErrorResult(err error) Result {
|
||||
return Result{OK: false, Error: err}
|
||||
}
|
||||
|
||||
var splitterRegistryMu sync.RWMutex
|
||||
var splitterRegistry = map[string]func(*Config, *EventBus) Splitter{}
|
||||
var splitterFactoriesMu sync.RWMutex
|
||||
var splitterFactoriesByMode = map[string]func(*Config, *EventBus) Splitter{}
|
||||
|
||||
// RegisterSplitterFactory registers a mode-specific splitter constructor.
|
||||
// Packages such as splitter/nicehash and splitter/simple call this from init.
|
||||
// RegisterSplitterFactory installs the constructor used for one proxy mode.
|
||||
//
|
||||
// proxy.RegisterSplitterFactory("simple", func(cfg *proxy.Config, bus *proxy.EventBus) proxy.Splitter {
|
||||
// return simple.NewSimpleSplitter(cfg, bus, nil)
|
||||
// })
|
||||
func RegisterSplitterFactory(mode string, factory func(*Config, *EventBus) Splitter) {
|
||||
splitterRegistryMu.Lock()
|
||||
defer splitterRegistryMu.Unlock()
|
||||
splitterRegistry[strings.ToLower(mode)] = factory
|
||||
splitterFactoriesMu.Lock()
|
||||
defer splitterFactoriesMu.Unlock()
|
||||
splitterFactoriesByMode[strings.ToLower(strings.TrimSpace(mode))] = factory
|
||||
}
|
||||
|
||||
func getSplitterFactory(mode string) (func(*Config, *EventBus) Splitter, bool) {
|
||||
splitterRegistryMu.RLock()
|
||||
defer splitterRegistryMu.RUnlock()
|
||||
factory, ok := splitterRegistry[strings.ToLower(mode)]
|
||||
func splitterFactoryForMode(mode string) (func(*Config, *EventBus) Splitter, bool) {
|
||||
splitterFactoriesMu.RLock()
|
||||
defer splitterFactoriesMu.RUnlock()
|
||||
factory, ok := splitterFactoriesByMode[strings.ToLower(strings.TrimSpace(mode))]
|
||||
return factory, ok
|
||||
}
|
||||
|
||||
// LoadConfig reads and unmarshals a JSON config file.
|
||||
// cfg, result := proxy.LoadConfig("/etc/proxy.json")
|
||||
//
|
||||
// if !result.OK {
|
||||
// return result.Error
|
||||
// }
|
||||
func LoadConfig(path string) (*Config, Result) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, errorResult(err)
|
||||
return nil, newErrorResult(NewScopedError("proxy.config", "read config failed", err))
|
||||
}
|
||||
|
||||
cfg := &Config{}
|
||||
if err := json.Unmarshal(data, cfg); err != nil {
|
||||
return nil, errorResult(err)
|
||||
config := &Config{}
|
||||
if err := json.Unmarshal(data, config); err != nil {
|
||||
return nil, newErrorResult(NewScopedError("proxy.config", "parse config failed", err))
|
||||
}
|
||||
|
||||
if cfg.Mode == "" {
|
||||
cfg.Mode = "nicehash"
|
||||
}
|
||||
return cfg, cfg.Validate()
|
||||
config.configPath = path
|
||||
return config, newSuccessResult()
|
||||
}
|
||||
|
||||
// Validate checks that mandatory bind and pool settings are present.
|
||||
// cfg := &proxy.Config{
|
||||
// Mode: "nicehash",
|
||||
// Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||
// Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
// Workers: proxy.WorkersByRigID,
|
||||
// }
|
||||
//
|
||||
// if result := cfg.Validate(); !result.OK {
|
||||
// return result
|
||||
// }
|
||||
func (c *Config) Validate() Result {
|
||||
if c == nil {
|
||||
return errorResult(errors.New("config is nil"))
|
||||
return newErrorResult(NewScopedError("proxy.config", "config is nil", nil))
|
||||
}
|
||||
if !isValidMode(c.Mode) {
|
||||
return newErrorResult(NewScopedError("proxy.config", "mode must be \"nicehash\" or \"simple\"", nil))
|
||||
}
|
||||
if !isValidWorkersMode(c.Workers) {
|
||||
return newErrorResult(NewScopedError("proxy.config", "workers must be one of \"rig-id\", \"user\", \"password\", \"agent\", \"ip\", or \"false\"", nil))
|
||||
}
|
||||
if len(c.Bind) == 0 {
|
||||
return errorResult(errors.New("bind list is empty"))
|
||||
return newErrorResult(NewScopedError("proxy.config", "bind list is empty", nil))
|
||||
}
|
||||
if len(c.Pools) == 0 {
|
||||
return errorResult(errors.New("pool list is empty"))
|
||||
return newErrorResult(NewScopedError("proxy.config", "pool list is empty", nil))
|
||||
}
|
||||
for _, pool := range c.Pools {
|
||||
if pool.Enabled && strings.TrimSpace(pool.URL) == "" {
|
||||
return errorResult(errors.New("enabled pool url is empty"))
|
||||
return newErrorResult(NewScopedError("proxy.config", "enabled pool url is empty", nil))
|
||||
}
|
||||
}
|
||||
return successResult()
|
||||
return newSuccessResult()
|
||||
}
|
||||
|
||||
// NewEventBus creates an empty synchronous event dispatcher.
|
||||
func isValidMode(mode string) bool {
|
||||
switch strings.ToLower(strings.TrimSpace(mode)) {
|
||||
case "nicehash", "simple":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isValidWorkersMode(mode WorkersMode) bool {
|
||||
switch WorkersMode(strings.TrimSpace(string(mode))) {
|
||||
case WorkersByRigID, WorkersByUser, WorkersByPass, WorkersByAgent, WorkersByIP, WorkersDisabled:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// bus := proxy.NewEventBus()
|
||||
//
|
||||
// bus.Subscribe(proxy.EventLogin, func(e proxy.Event) {
|
||||
// _ = e.Miner
|
||||
// })
|
||||
func NewEventBus() *EventBus {
|
||||
return &EventBus{listeners: make(map[EventType][]EventHandler)}
|
||||
}
|
||||
|
||||
// Subscribe registers a handler for the given event type.
|
||||
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
|
||||
func (b *EventBus) Subscribe(t EventType, h EventHandler) {
|
||||
if b == nil || h == nil {
|
||||
return
|
||||
|
|
@ -103,7 +149,7 @@ func (b *EventBus) Subscribe(t EventType, h EventHandler) {
|
|||
b.listeners[t] = append(b.listeners[t], h)
|
||||
}
|
||||
|
||||
// Dispatch calls all registered handlers for the event's type.
|
||||
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: miner})
|
||||
func (b *EventBus) Dispatch(e Event) {
|
||||
if b == nil {
|
||||
return
|
||||
|
|
@ -112,16 +158,69 @@ func (b *EventBus) Dispatch(e Event) {
|
|||
handlers := append([]EventHandler(nil), b.listeners[e.Type]...)
|
||||
b.mu.RUnlock()
|
||||
for _, handler := range handlers {
|
||||
handler(e)
|
||||
func() {
|
||||
defer func() {
|
||||
_ = recover()
|
||||
}()
|
||||
handler(e)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
type shareSinkGroup struct {
|
||||
sinks []ShareSink
|
||||
}
|
||||
|
||||
func newShareSinkGroup(sinks ...ShareSink) *shareSinkGroup {
|
||||
group := &shareSinkGroup{sinks: make([]ShareSink, 0, len(sinks))}
|
||||
for _, sink := range sinks {
|
||||
if sink != nil {
|
||||
group.sinks = append(group.sinks, sink)
|
||||
}
|
||||
}
|
||||
return group
|
||||
}
|
||||
|
||||
func (g *shareSinkGroup) OnAccept(e Event) {
|
||||
if g == nil {
|
||||
return
|
||||
}
|
||||
for _, sink := range g.sinks {
|
||||
func() {
|
||||
defer func() {
|
||||
_ = recover()
|
||||
}()
|
||||
sink.OnAccept(e)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (g *shareSinkGroup) OnReject(e Event) {
|
||||
if g == nil {
|
||||
return
|
||||
}
|
||||
for _, sink := range g.sinks {
|
||||
func() {
|
||||
defer func() {
|
||||
_ = recover()
|
||||
}()
|
||||
sink.OnReject(e)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// IsValid returns true when the job contains a blob and job id.
|
||||
//
|
||||
// if !job.IsValid() {
|
||||
// return
|
||||
// }
|
||||
func (j Job) IsValid() bool {
|
||||
return j.Blob != "" && j.JobID != ""
|
||||
}
|
||||
|
||||
// BlobWithFixedByte replaces the blob byte at position 39 with fixedByte.
|
||||
//
|
||||
// partitioned := job.BlobWithFixedByte(0x2A)
|
||||
func (j Job) BlobWithFixedByte(fixedByte uint8) string {
|
||||
if len(j.Blob) < 80 {
|
||||
return j.Blob
|
||||
|
|
@ -134,7 +233,9 @@ func (j Job) BlobWithFixedByte(fixedByte uint8) string {
|
|||
return string(blob)
|
||||
}
|
||||
|
||||
// DifficultyFromTarget converts the target to a rough integer difficulty.
|
||||
// DifficultyFromTarget converts the 8-char little-endian target into a difficulty.
|
||||
//
|
||||
// diff := job.DifficultyFromTarget()
|
||||
func (j Job) DifficultyFromTarget() uint64 {
|
||||
if len(j.Target) != 8 {
|
||||
return 0
|
||||
|
|
@ -147,46 +248,86 @@ func (j Job) DifficultyFromTarget() uint64 {
|
|||
if target == 0 {
|
||||
return 0
|
||||
}
|
||||
return uint64(math.MaxUint32 / uint64(target))
|
||||
return uint64(math.MaxUint32) / uint64(target)
|
||||
}
|
||||
|
||||
// targetFromDifficulty converts a difficulty into the 8-char little-endian hex target.
|
||||
//
|
||||
// target := targetFromDifficulty(10000) // "b88d0600"
|
||||
func targetFromDifficulty(diff uint64) string {
|
||||
if diff <= 1 {
|
||||
return "ffffffff"
|
||||
}
|
||||
maxTarget := uint64(math.MaxUint32)
|
||||
target := (maxTarget + diff - 1) / diff
|
||||
if target == 0 {
|
||||
target = 1
|
||||
}
|
||||
if target > maxTarget {
|
||||
target = maxTarget
|
||||
}
|
||||
var raw [4]byte
|
||||
binary.LittleEndian.PutUint32(raw[:], uint32(target))
|
||||
return hex.EncodeToString(raw[:])
|
||||
}
|
||||
|
||||
// EffectiveShareDifficulty returns the share difficulty capped by the miner's custom diff.
|
||||
// If no custom diff is set or the pool diff is already lower, the pool diff is returned.
|
||||
//
|
||||
// diff := proxy.EffectiveShareDifficulty(job, miner) // 25000 when customDiff < poolDiff
|
||||
func EffectiveShareDifficulty(job Job, miner *Miner) uint64 {
|
||||
diff := job.DifficultyFromTarget()
|
||||
if miner == nil || miner.customDiff == 0 || diff == 0 || diff <= miner.customDiff {
|
||||
return diff
|
||||
}
|
||||
return miner.customDiff
|
||||
}
|
||||
|
||||
// NewCustomDiff creates a login-time custom difficulty resolver.
|
||||
//
|
||||
// resolver := proxy.NewCustomDiff(50000)
|
||||
// resolver.OnLogin(proxy.Event{Miner: miner})
|
||||
func NewCustomDiff(globalDiff uint64) *CustomDiff {
|
||||
return &CustomDiff{globalDiff: globalDiff}
|
||||
cd := &CustomDiff{}
|
||||
cd.globalDiff.Store(globalDiff)
|
||||
return cd
|
||||
}
|
||||
|
||||
// OnLogin parses +N suffixes and applies global difficulty fallbacks.
|
||||
// OnLogin normalises the login user once during handshake.
|
||||
//
|
||||
// cd.OnLogin(proxy.Event{Miner: &proxy.Miner{user: "WALLET+50000"}})
|
||||
func (cd *CustomDiff) OnLogin(e Event) {
|
||||
if cd == nil || e.Miner == nil {
|
||||
return
|
||||
}
|
||||
miner := e.Miner
|
||||
user := miner.user
|
||||
plus := strings.LastIndex(user, "+")
|
||||
if plus >= 0 && plus < len(user)-1 {
|
||||
if parsed, err := strconv.ParseUint(user[plus+1:], 10, 64); err == nil {
|
||||
miner.user = user[:plus]
|
||||
miner.customDiff = parsed
|
||||
}
|
||||
if e.Miner.customDiffResolved {
|
||||
return
|
||||
}
|
||||
if cd.globalDiff > 0 {
|
||||
miner.customDiff = cd.globalDiff
|
||||
}
|
||||
resolved := resolveLoginCustomDiff(e.Miner.user, cd.globalDiff.Load())
|
||||
e.Miner.user = resolved.user
|
||||
e.Miner.customDiff = resolved.diff
|
||||
e.Miner.customDiffFromLogin = resolved.fromLogin
|
||||
e.Miner.customDiffResolved = true
|
||||
}
|
||||
|
||||
// NewRateLimiter creates a per-IP token bucket limiter.
|
||||
func NewRateLimiter(cfg RateLimit) *RateLimiter {
|
||||
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300})
|
||||
//
|
||||
// if limiter.Allow("203.0.113.42:3333") {
|
||||
// // first 30 connection attempts per minute are allowed
|
||||
// }
|
||||
func NewRateLimiter(config RateLimit) *RateLimiter {
|
||||
return &RateLimiter{
|
||||
cfg: cfg,
|
||||
buckets: make(map[string]*tokenBucket),
|
||||
banned: make(map[string]time.Time),
|
||||
limit: config,
|
||||
bucketByHost: make(map[string]*tokenBucket),
|
||||
banUntilByHost: make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
// Allow returns true if the IP address is permitted to open a new connection.
|
||||
// if limiter.Allow("203.0.113.42:3333") {
|
||||
// // hostOnly("203.0.113.42:3333") == "203.0.113.42"
|
||||
// }
|
||||
func (rl *RateLimiter) Allow(ip string) bool {
|
||||
if rl == nil || rl.cfg.MaxConnectionsPerMinute <= 0 {
|
||||
if rl == nil || rl.limit.MaxConnectionsPerMinute <= 0 {
|
||||
return true
|
||||
}
|
||||
host := hostOnly(ip)
|
||||
|
|
@ -195,23 +336,23 @@ func (rl *RateLimiter) Allow(ip string) bool {
|
|||
rl.mu.Lock()
|
||||
defer rl.mu.Unlock()
|
||||
|
||||
if until, banned := rl.banned[host]; banned {
|
||||
if until, banned := rl.banUntilByHost[host]; banned {
|
||||
if now.Before(until) {
|
||||
return false
|
||||
}
|
||||
delete(rl.banned, host)
|
||||
delete(rl.banUntilByHost, host)
|
||||
}
|
||||
|
||||
bucket, ok := rl.buckets[host]
|
||||
bucket, ok := rl.bucketByHost[host]
|
||||
if !ok {
|
||||
bucket = &tokenBucket{tokens: rl.cfg.MaxConnectionsPerMinute, lastRefill: now}
|
||||
rl.buckets[host] = bucket
|
||||
bucket = &tokenBucket{tokens: rl.limit.MaxConnectionsPerMinute, lastRefill: now}
|
||||
rl.bucketByHost[host] = bucket
|
||||
}
|
||||
|
||||
refillBucket(bucket, rl.cfg.MaxConnectionsPerMinute, now)
|
||||
refillBucket(bucket, rl.limit.MaxConnectionsPerMinute, now)
|
||||
if bucket.tokens <= 0 {
|
||||
if rl.cfg.BanDurationSeconds > 0 {
|
||||
rl.banned[host] = now.Add(time.Duration(rl.cfg.BanDurationSeconds) * time.Second)
|
||||
if rl.limit.BanDurationSeconds > 0 {
|
||||
rl.banUntilByHost[host] = now.Add(time.Duration(rl.limit.BanDurationSeconds) * time.Second)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -222,8 +363,10 @@ func (rl *RateLimiter) Allow(ip string) bool {
|
|||
}
|
||||
|
||||
// Tick removes expired ban entries and refills token buckets.
|
||||
//
|
||||
// limiter.Tick()
|
||||
func (rl *RateLimiter) Tick() {
|
||||
if rl == nil || rl.cfg.MaxConnectionsPerMinute <= 0 {
|
||||
if rl == nil || rl.limit.MaxConnectionsPerMinute <= 0 {
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
|
|
@ -231,64 +374,102 @@ func (rl *RateLimiter) Tick() {
|
|||
rl.mu.Lock()
|
||||
defer rl.mu.Unlock()
|
||||
|
||||
for host, until := range rl.banned {
|
||||
for host, until := range rl.banUntilByHost {
|
||||
if !now.Before(until) {
|
||||
delete(rl.banned, host)
|
||||
delete(rl.banUntilByHost, host)
|
||||
}
|
||||
}
|
||||
for _, bucket := range rl.buckets {
|
||||
refillBucket(bucket, rl.cfg.MaxConnectionsPerMinute, now)
|
||||
for _, bucket := range rl.bucketByHost {
|
||||
refillBucket(bucket, rl.limit.MaxConnectionsPerMinute, now)
|
||||
}
|
||||
}
|
||||
|
||||
// NewConfigWatcher creates a polling watcher for a config file.
|
||||
func NewConfigWatcher(path string, onChange func(*Config)) *ConfigWatcher {
|
||||
return &ConfigWatcher{
|
||||
path: path,
|
||||
onChange: onChange,
|
||||
done: make(chan struct{}),
|
||||
// watcher := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
|
||||
// p.Reload(cfg)
|
||||
// })
|
||||
//
|
||||
// watcher.Start() // polls once per second and reloads after the file mtime changes
|
||||
func NewConfigWatcher(configPath string, onChange func(*Config)) *ConfigWatcher {
|
||||
watcher := &ConfigWatcher{
|
||||
configPath: configPath,
|
||||
onConfigChange: onChange,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
if info, err := os.Stat(configPath); err == nil {
|
||||
watcher.lastModifiedAt = info.ModTime()
|
||||
}
|
||||
return watcher
|
||||
}
|
||||
|
||||
// Start begins the 1-second polling loop.
|
||||
// watcher.Start()
|
||||
func (w *ConfigWatcher) Start() {
|
||||
if w == nil || w.path == "" || w.onChange == nil {
|
||||
if w == nil || w.configPath == "" || w.onConfigChange == nil {
|
||||
return
|
||||
}
|
||||
w.mu.Lock()
|
||||
if w.started {
|
||||
w.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if w.stopCh == nil {
|
||||
w.stopCh = make(chan struct{})
|
||||
} else {
|
||||
select {
|
||||
case <-w.stopCh:
|
||||
w.stopCh = make(chan struct{})
|
||||
default:
|
||||
}
|
||||
}
|
||||
stopCh := w.stopCh
|
||||
configPath := w.configPath
|
||||
onConfigChange := w.onConfigChange
|
||||
w.started = true
|
||||
w.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
info, err := os.Stat(w.path)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
mod := info.ModTime()
|
||||
if mod.After(w.lastMod) {
|
||||
w.lastMod = mod
|
||||
cfg, result := LoadConfig(w.path)
|
||||
if result.OK && cfg != nil {
|
||||
w.onChange(cfg)
|
||||
if info, err := os.Stat(configPath); err == nil {
|
||||
w.mu.Lock()
|
||||
changed := info.ModTime() != w.lastModifiedAt
|
||||
if changed {
|
||||
w.lastModifiedAt = info.ModTime()
|
||||
}
|
||||
w.mu.Unlock()
|
||||
if !changed {
|
||||
continue
|
||||
}
|
||||
config, result := LoadConfig(configPath)
|
||||
if result.OK && config != nil {
|
||||
onConfigChange(config)
|
||||
}
|
||||
}
|
||||
case <-w.done:
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop ends the watcher goroutine.
|
||||
// watcher.Stop()
|
||||
func (w *ConfigWatcher) Stop() {
|
||||
if w == nil {
|
||||
return
|
||||
}
|
||||
w.mu.Lock()
|
||||
stopCh := w.stopCh
|
||||
w.started = false
|
||||
w.mu.Unlock()
|
||||
if stopCh == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
case <-stopCh:
|
||||
default:
|
||||
close(w.done)
|
||||
close(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -311,9 +492,9 @@ func refillBucket(bucket *tokenBucket, limit int, now time.Time) {
|
|||
}
|
||||
return
|
||||
}
|
||||
interval := time.Duration(60/limit) * time.Second
|
||||
interval := time.Duration(time.Minute) / time.Duration(limit)
|
||||
if interval <= 0 {
|
||||
interval = time.Second
|
||||
interval = time.Nanosecond
|
||||
}
|
||||
elapsed := now.Sub(bucket.lastRefill)
|
||||
if elapsed < interval {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,12 @@ package proxy
|
|||
|
||||
import "testing"
|
||||
|
||||
func TestCustomDiff_OnLogin(t *testing.T) {
|
||||
// TestCustomDiff_Apply_Good verifies a user suffix "+50000" sets customDiff and strips the suffix.
|
||||
//
|
||||
// cd := proxy.NewCustomDiff(10000)
|
||||
// cd.Apply(&proxy.Miner{user: "WALLET+50000"})
|
||||
// // miner.User() == "WALLET", miner.customDiff == 50000
|
||||
func TestCustomDiff_Apply_Good(t *testing.T) {
|
||||
cd := NewCustomDiff(10000)
|
||||
miner := &Miner{user: "WALLET+50000"}
|
||||
cd.OnLogin(Event{Miner: miner})
|
||||
|
|
@ -12,19 +17,67 @@ func TestCustomDiff_OnLogin(t *testing.T) {
|
|||
if miner.customDiff != 50000 {
|
||||
t.Fatalf("expected custom diff 50000, got %d", miner.customDiff)
|
||||
}
|
||||
}
|
||||
|
||||
miner = &Miner{user: "WALLET+abc"}
|
||||
// TestCustomDiff_Apply_Bad verifies "+abc" (non-numeric) leaves user unchanged, customDiff=0.
|
||||
//
|
||||
// cd := proxy.NewCustomDiff(10000)
|
||||
// cd.Apply(&proxy.Miner{user: "WALLET+abc"})
|
||||
// // miner.User() == "WALLET+abc", miner.customDiff == 0
|
||||
func TestCustomDiff_Apply_Bad(t *testing.T) {
|
||||
cd := NewCustomDiff(10000)
|
||||
miner := &Miner{user: "WALLET+abc"}
|
||||
cd.OnLogin(Event{Miner: miner})
|
||||
if miner.User() != "WALLET+abc" {
|
||||
t.Fatalf("expected invalid suffix to remain unchanged")
|
||||
t.Fatalf("expected invalid suffix to remain unchanged, got %q", miner.User())
|
||||
}
|
||||
if miner.customDiff != 0 {
|
||||
t.Fatalf("expected custom diff 0 for invalid suffix, got %d", miner.customDiff)
|
||||
}
|
||||
|
||||
miner = &Miner{user: "WALLET"}
|
||||
cd.OnLogin(Event{Miner: miner})
|
||||
if miner.customDiff != 10000 {
|
||||
t.Fatalf("expected global diff fallback, got %d", miner.customDiff)
|
||||
t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCustomDiff_Apply_Ugly verifies globalDiff=10000 is used when no suffix is present.
|
||||
//
|
||||
// cd := proxy.NewCustomDiff(10000)
|
||||
// cd.Apply(&proxy.Miner{user: "WALLET"})
|
||||
// // miner.customDiff == 10000 (falls back to global)
|
||||
func TestCustomDiff_Apply_Ugly(t *testing.T) {
|
||||
cd := NewCustomDiff(10000)
|
||||
miner := &Miner{user: "WALLET"}
|
||||
cd.OnLogin(Event{Miner: miner})
|
||||
if miner.customDiff != 10000 {
|
||||
t.Fatalf("expected global diff fallback 10000, got %d", miner.customDiff)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCustomDiff_OnLogin_NonNumericSuffix verifies a non-decimal suffix after plus is ignored.
|
||||
//
|
||||
// cd := proxy.NewCustomDiff(10000)
|
||||
// cd.OnLogin(proxy.Event{Miner: &proxy.Miner{user: "WALLET+50000extra"}})
|
||||
func TestCustomDiff_OnLogin_NonNumericSuffix(t *testing.T) {
|
||||
cd := NewCustomDiff(10000)
|
||||
miner := &Miner{user: "WALLET+50000extra"}
|
||||
|
||||
cd.OnLogin(Event{Miner: miner})
|
||||
|
||||
if miner.User() != "WALLET+50000extra" {
|
||||
t.Fatalf("expected non-numeric suffix plus segment to remain unchanged, got %q", miner.User())
|
||||
}
|
||||
if miner.customDiff != 0 {
|
||||
t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEffectiveShareDifficulty_CustomDiffCapsPoolDifficulty verifies the cap applied by custom diff.
|
||||
//
|
||||
// job := proxy.Job{Target: "01000000"}
|
||||
// miner := &proxy.Miner{customDiff: 25000}
|
||||
// proxy.EffectiveShareDifficulty(job, miner) // 25000 (capped)
|
||||
func TestEffectiveShareDifficulty_CustomDiffCapsPoolDifficulty(t *testing.T) {
|
||||
job := Job{Target: "01000000"}
|
||||
miner := &Miner{customDiff: 25000}
|
||||
|
||||
if got := EffectiveShareDifficulty(job, miner); got != 25000 {
|
||||
t.Fatalf("expected capped difficulty 25000, got %d", got)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
122
customdiffstats.go
Normal file
122
customdiffstats.go
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// CustomDiffBucketStats tracks per-custom-difficulty share outcomes.
|
||||
type CustomDiffBucketStats struct {
|
||||
Accepted uint64 `json:"accepted"`
|
||||
Rejected uint64 `json:"rejected"`
|
||||
Invalid uint64 `json:"invalid"`
|
||||
Expired uint64 `json:"expired"`
|
||||
HashesTotal uint64 `json:"hashes_total"`
|
||||
}
|
||||
|
||||
// CustomDiffBuckets groups share totals by the miner's resolved custom difficulty.
|
||||
//
|
||||
// buckets := NewCustomDiffBuckets(true)
|
||||
// buckets.OnAccept(Event{Miner: &Miner{customDiff: 50000}, Diff: 25000})
|
||||
type CustomDiffBuckets struct {
|
||||
enabled bool
|
||||
buckets map[uint64]*CustomDiffBucketStats
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewCustomDiffBuckets creates a per-difficulty share tracker.
|
||||
func NewCustomDiffBuckets(enabled bool) *CustomDiffBuckets {
|
||||
return &CustomDiffBuckets{
|
||||
enabled: enabled,
|
||||
buckets: make(map[uint64]*CustomDiffBucketStats),
|
||||
}
|
||||
}
|
||||
|
||||
// SetEnabled toggles recording without discarding any collected buckets.
|
||||
func (b *CustomDiffBuckets) SetEnabled(enabled bool) {
|
||||
if b == nil {
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.enabled = enabled
|
||||
}
|
||||
|
||||
// OnAccept records an accepted share for the miner's custom difficulty bucket.
|
||||
func (b *CustomDiffBuckets) OnAccept(e Event) {
|
||||
if b == nil || !b.enabled || e.Miner == nil {
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
bucket := b.bucketLocked(e.Miner.customDiff)
|
||||
bucket.Accepted++
|
||||
if e.Expired {
|
||||
bucket.Expired++
|
||||
}
|
||||
if e.Diff > 0 {
|
||||
bucket.HashesTotal += e.Diff
|
||||
}
|
||||
}
|
||||
|
||||
// OnReject records a rejected share for the miner's custom difficulty bucket.
|
||||
func (b *CustomDiffBuckets) OnReject(e Event) {
|
||||
if b == nil || !b.enabled || e.Miner == nil {
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
bucket := b.bucketLocked(e.Miner.customDiff)
|
||||
bucket.Rejected++
|
||||
if isInvalidShareReason(e.Error) {
|
||||
bucket.Invalid++
|
||||
}
|
||||
}
|
||||
|
||||
// Snapshot returns a copy of the current bucket totals.
|
||||
//
|
||||
// summary := buckets.Snapshot()
|
||||
func (b *CustomDiffBuckets) Snapshot() map[uint64]CustomDiffBucketStats {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if !b.enabled || len(b.buckets) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make(map[uint64]CustomDiffBucketStats, len(b.buckets))
|
||||
for diff, bucket := range b.buckets {
|
||||
if bucket == nil {
|
||||
continue
|
||||
}
|
||||
out[diff] = *bucket
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (b *CustomDiffBuckets) bucketLocked(diff uint64) *CustomDiffBucketStats {
|
||||
if b.buckets == nil {
|
||||
b.buckets = make(map[uint64]*CustomDiffBucketStats)
|
||||
}
|
||||
bucket, ok := b.buckets[diff]
|
||||
if !ok {
|
||||
bucket = &CustomDiffBucketStats{}
|
||||
b.buckets[diff] = bucket
|
||||
}
|
||||
return bucket
|
||||
}
|
||||
|
||||
func isInvalidShareReason(reason string) bool {
|
||||
reason = strings.ToLower(reason)
|
||||
if reason == "" {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(reason, "low diff") ||
|
||||
strings.Contains(reason, "lowdifficulty") ||
|
||||
strings.Contains(reason, "low difficulty") ||
|
||||
strings.Contains(reason, "malformed") ||
|
||||
strings.Contains(reason, "difficulty") ||
|
||||
strings.Contains(reason, "invalid") ||
|
||||
strings.Contains(reason, "nonce")
|
||||
}
|
||||
78
customdiffstats_test.go
Normal file
78
customdiffstats_test.go
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
package proxy
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestProxy_CustomDiffStats_Good(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
CustomDiffStats: true,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
p, result := New(cfg)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||
}
|
||||
|
||||
miner := &Miner{customDiff: 50000}
|
||||
p.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 75, Expired: true})
|
||||
|
||||
summary := p.Summary()
|
||||
bucket, ok := summary.CustomDiffStats[50000]
|
||||
if !ok {
|
||||
t.Fatalf("expected custom diff bucket 50000 to be present")
|
||||
}
|
||||
if bucket.Accepted != 1 || bucket.Expired != 1 || bucket.HashesTotal != 75 {
|
||||
t.Fatalf("unexpected bucket totals: %+v", bucket)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_CustomDiffStats_Bad(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
CustomDiffStats: true,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
p, result := New(cfg)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||
}
|
||||
|
||||
miner := &Miner{customDiff: 10000}
|
||||
p.events.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Low difficulty share"})
|
||||
p.events.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Malformed share"})
|
||||
|
||||
summary := p.Summary()
|
||||
bucket, ok := summary.CustomDiffStats[10000]
|
||||
if !ok {
|
||||
t.Fatalf("expected custom diff bucket 10000 to be present")
|
||||
}
|
||||
if bucket.Rejected != 2 || bucket.Invalid != 2 {
|
||||
t.Fatalf("unexpected bucket totals: %+v", bucket)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_CustomDiffStats_Ugly(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
CustomDiffStats: false,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
p, result := New(cfg)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||
}
|
||||
|
||||
miner := &Miner{customDiff: 25000}
|
||||
p.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 1})
|
||||
|
||||
summary := p.Summary()
|
||||
if len(summary.CustomDiffStats) != 0 {
|
||||
t.Fatalf("expected custom diff stats to remain disabled, got %+v", summary.CustomDiffStats)
|
||||
}
|
||||
}
|
||||
5
docs/specs/core/go/RFC.md
Normal file
5
docs/specs/core/go/RFC.md
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
# go-proxy RFC
|
||||
|
||||
This path mirrors the authoritative proxy contract in [`../../../../RFC.md`](../../../../RFC.md).
|
||||
|
||||
Use the root RFC for the full implementation contract.
|
||||
5
docs/specs/rfc/RFC-CORE-008-AGENT-EXPERIENCE.md
Normal file
5
docs/specs/rfc/RFC-CORE-008-AGENT-EXPERIENCE.md
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
# RFC-CORE-008: Agent Experience
|
||||
|
||||
This path mirrors the local AX guidance in [`../../../../.core/reference/RFC-025-AGENT-EXPERIENCE.md`](../../../../.core/reference/RFC-025-AGENT-EXPERIENCE.md).
|
||||
|
||||
Use the reference copy for the full design principles.
|
||||
38
error.go
Normal file
38
error.go
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
package proxy
|
||||
|
||||
// ScopedError carries a stable error scope alongside a human-readable message.
|
||||
//
|
||||
// err := proxy.NewScopedError("proxy.config", "load failed", io.EOF)
|
||||
type ScopedError struct {
|
||||
Scope string
|
||||
Message string
|
||||
Cause error
|
||||
}
|
||||
|
||||
// NewScopedError creates an error that keeps a greppable scope token in the failure path.
|
||||
//
|
||||
// err := proxy.NewScopedError("proxy.server", "listen failed", cause)
|
||||
func NewScopedError(scope, message string, cause error) error {
|
||||
return &ScopedError{
|
||||
Scope: scope,
|
||||
Message: message,
|
||||
Cause: cause,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ScopedError) Error() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
if e.Cause == nil {
|
||||
return e.Scope + ": " + e.Message
|
||||
}
|
||||
return e.Scope + ": " + e.Message + ": " + e.Cause.Error()
|
||||
}
|
||||
|
||||
func (e *ScopedError) Unwrap() error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return e.Cause
|
||||
}
|
||||
43
error_test.go
Normal file
43
error_test.go
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestError_NewScopedError_Good(t *testing.T) {
|
||||
err := NewScopedError("proxy.config", "bind list is empty", nil)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("expected scoped error")
|
||||
}
|
||||
if got := err.Error(); got != "proxy.config: bind list is empty" {
|
||||
t.Fatalf("unexpected scoped error string: %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestError_NewScopedError_Bad(t *testing.T) {
|
||||
cause := errors.New("permission denied")
|
||||
err := NewScopedError("proxy.config", "read config failed", cause)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("expected scoped error")
|
||||
}
|
||||
if !errors.Is(err, cause) {
|
||||
t.Fatalf("expected errors.Is to unwrap the original cause")
|
||||
}
|
||||
if got := err.Error(); got != "proxy.config: read config failed: permission denied" {
|
||||
t.Fatalf("unexpected wrapped error string: %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestError_NewScopedError_Ugly(t *testing.T) {
|
||||
var scoped *ScopedError
|
||||
|
||||
if got := scoped.Error(); got != "" {
|
||||
t.Fatalf("expected nil scoped error string to be empty, got %q", got)
|
||||
}
|
||||
if scoped.Unwrap() != nil {
|
||||
t.Fatalf("expected nil scoped error to unwrap to nil")
|
||||
}
|
||||
}
|
||||
16
events.go
16
events.go
|
|
@ -2,18 +2,21 @@ package proxy
|
|||
|
||||
import "sync"
|
||||
|
||||
// EventBus dispatches proxy lifecycle events to registered listeners.
|
||||
// Dispatch is synchronous on the calling goroutine. Listeners must not block.
|
||||
// EventBus dispatches proxy lifecycle events to synchronous listeners.
|
||||
//
|
||||
// bus := proxy.NewEventBus()
|
||||
// bus.Subscribe(proxy.EventLogin, customDiff.OnLogin)
|
||||
// bus.Subscribe(proxy.EventLogin, func(e proxy.Event) {
|
||||
// _ = e.Miner.User()
|
||||
// })
|
||||
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
|
||||
type EventBus struct {
|
||||
listeners map[EventType][]EventHandler
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// EventType identifies the proxy lifecycle event.
|
||||
// EventType identifies one proxy lifecycle event.
|
||||
//
|
||||
// proxy.EventLogin
|
||||
type EventType int
|
||||
|
||||
const (
|
||||
|
|
@ -24,12 +27,13 @@ const (
|
|||
)
|
||||
|
||||
// EventHandler is the callback signature for all event types.
|
||||
//
|
||||
// handler := func(e proxy.Event) { _ = e.Miner }
|
||||
type EventHandler func(Event)
|
||||
|
||||
// Event carries the data for any proxy lifecycle event.
|
||||
// Fields not relevant to the event type are zero/nil.
|
||||
//
|
||||
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m})
|
||||
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m})
|
||||
type Event struct {
|
||||
Type EventType
|
||||
Miner *Miner // always set
|
||||
|
|
|
|||
224
http_auth_test.go
Normal file
224
http_auth_test.go
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestProxy_allowHTTP_Good(t *testing.T) {
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
HTTP: HTTPConfig{
|
||||
Restricted: true,
|
||||
AccessToken: "secret",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
status, ok := p.AllowMonitoringRequest(&http.Request{
|
||||
Method: http.MethodGet,
|
||||
Header: http.Header{
|
||||
"Authorization": []string{"Bearer secret"},
|
||||
},
|
||||
})
|
||||
if !ok {
|
||||
t.Fatalf("expected authorised request to pass, got status %d", status)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusOK, status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_allowHTTP_Bad(t *testing.T) {
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
HTTP: HTTPConfig{
|
||||
Restricted: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodPost})
|
||||
if ok {
|
||||
t.Fatal("expected non-GET request to be rejected")
|
||||
}
|
||||
if status != http.StatusMethodNotAllowed {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusMethodNotAllowed, status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_allowHTTP_Unrestricted_Good(t *testing.T) {
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
HTTP: HTTPConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodGet})
|
||||
if !ok {
|
||||
t.Fatalf("expected unrestricted request to pass, got status %d", status)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusOK, status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_allowHTTP_Unrestricted_Bad(t *testing.T) {
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
HTTP: HTTPConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodPost})
|
||||
if !ok {
|
||||
t.Fatalf("expected unrestricted non-GET request to pass, got status %d", status)
|
||||
}
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusOK, status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_allowHTTP_Ugly(t *testing.T) {
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
HTTP: HTTPConfig{
|
||||
AccessToken: "secret",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
status, ok := p.AllowMonitoringRequest(&http.Request{
|
||||
Method: http.MethodGet,
|
||||
Header: http.Header{
|
||||
"Authorization": []string{"Bearer wrong"},
|
||||
},
|
||||
})
|
||||
if ok {
|
||||
t.Fatal("expected invalid token to be rejected")
|
||||
}
|
||||
if status != http.StatusUnauthorized {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusUnauthorized, status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_allowHTTP_NilConfig_Ugly(t *testing.T) {
|
||||
p := &Proxy{}
|
||||
|
||||
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodGet})
|
||||
if ok {
|
||||
t.Fatal("expected nil config request to be rejected")
|
||||
}
|
||||
if status != http.StatusServiceUnavailable {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusServiceUnavailable, status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_startHTTP_Good(t *testing.T) {
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
HTTP: HTTPConfig{
|
||||
Enabled: true,
|
||||
Host: "127.0.0.1",
|
||||
Port: 0,
|
||||
},
|
||||
},
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
if ok := p.startMonitoringServer(); !ok {
|
||||
t.Fatal("expected HTTP server to start on a free port")
|
||||
}
|
||||
p.Stop()
|
||||
}
|
||||
|
||||
func TestProxy_startHTTP_NilConfig_Bad(t *testing.T) {
|
||||
p := &Proxy{}
|
||||
|
||||
if ok := p.startMonitoringServer(); ok {
|
||||
t.Fatal("expected nil config to skip HTTP server start")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_startHTTP_Bad(t *testing.T) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("listen on ephemeral port: %v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
host, port, err := net.SplitHostPort(listener.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("split listener addr: %v", err)
|
||||
}
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
t.Fatalf("parse listener port: %v", err)
|
||||
}
|
||||
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
HTTP: HTTPConfig{
|
||||
Enabled: true,
|
||||
Host: host,
|
||||
Port: uint16(portNum),
|
||||
},
|
||||
},
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
if ok := p.startMonitoringServer(); ok {
|
||||
t.Fatal("expected HTTP server start to fail when the port is already in use")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_registerMonitoringRoute_MethodNotAllowed_Bad(t *testing.T) {
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
HTTP: HTTPConfig{
|
||||
Restricted: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
p.registerMonitoringRoute(mux, "/1/summary", func() any { return map[string]string{"status": "ok"} })
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "/1/summary", nil)
|
||||
recorder := httptest.NewRecorder()
|
||||
mux.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code != http.StatusMethodNotAllowed {
|
||||
t.Fatalf("expected %d, got %d", http.StatusMethodNotAllowed, recorder.Code)
|
||||
}
|
||||
if got := recorder.Header().Get("Allow"); got != http.MethodGet {
|
||||
t.Fatalf("expected Allow header %q, got %q", http.MethodGet, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_registerMonitoringRoute_Unauthorized_Ugly(t *testing.T) {
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
HTTP: HTTPConfig{
|
||||
AccessToken: "secret",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
p.registerMonitoringRoute(mux, "/1/summary", func() any { return map[string]string{"status": "ok"} })
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
|
||||
recorder := httptest.NewRecorder()
|
||||
mux.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code != http.StatusUnauthorized {
|
||||
t.Fatalf("expected %d, got %d", http.StatusUnauthorized, recorder.Code)
|
||||
}
|
||||
if got := recorder.Header().Get("WWW-Authenticate"); got != "Bearer" {
|
||||
t.Fatalf("expected WWW-Authenticate header %q, got %q", "Bearer", got)
|
||||
}
|
||||
}
|
||||
6
job.go
6
job.go
|
|
@ -1,13 +1,15 @@
|
|||
package proxy
|
||||
|
||||
// Job holds the current work unit received from a pool. Immutable once assigned.
|
||||
// Job holds one pool work unit and its metadata.
|
||||
//
|
||||
// j := proxy.Job{
|
||||
// Blob: "0707d5ef...b01",
|
||||
// Blob: strings.Repeat("0", 160),
|
||||
// JobID: "4BiGm3/RgGQzgkTI",
|
||||
// Target: "b88d0600",
|
||||
// Algo: "cn/r",
|
||||
// }
|
||||
// _ = j.BlobWithFixedByte(0x2A)
|
||||
// _ = j.DifficultyFromTarget()
|
||||
type Job struct {
|
||||
Blob string // hex-encoded block template (160 hex chars = 80 bytes)
|
||||
JobID string // pool-assigned identifier
|
||||
|
|
|
|||
102
job_test.go
102
job_test.go
|
|
@ -5,7 +5,11 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestJob_BlobWithFixedByte(t *testing.T) {
|
||||
// TestJob_BlobWithFixedByte_Good verifies nonce patching on a full 160-char blob.
|
||||
//
|
||||
// job := proxy.Job{Blob: strings.Repeat("0", 160)}
|
||||
// result := job.BlobWithFixedByte(0x2A) // chars 78-79 become "2a"
|
||||
func TestJob_BlobWithFixedByte_Good(t *testing.T) {
|
||||
job := Job{Blob: strings.Repeat("0", 160)}
|
||||
got := job.BlobWithFixedByte(0x2A)
|
||||
if len(got) != 160 {
|
||||
|
|
@ -16,9 +20,97 @@ func TestJob_BlobWithFixedByte(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJob_DifficultyFromTarget(t *testing.T) {
|
||||
job := Job{Target: "b88d0600"}
|
||||
if got := job.DifficultyFromTarget(); got == 0 {
|
||||
t.Fatalf("expected non-zero difficulty")
|
||||
// TestJob_BlobWithFixedByte_Bad verifies a short blob is returned unchanged.
|
||||
//
|
||||
// job := proxy.Job{Blob: "0000"}
|
||||
// result := job.BlobWithFixedByte(0x2A) // too short, returned as-is
|
||||
func TestJob_BlobWithFixedByte_Bad(t *testing.T) {
|
||||
shortBlob := "0000"
|
||||
job := Job{Blob: shortBlob}
|
||||
got := job.BlobWithFixedByte(0x2A)
|
||||
if got != shortBlob {
|
||||
t.Fatalf("expected short blob to be returned unchanged, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestJob_BlobWithFixedByte_Ugly verifies fixedByte 0xFF renders as lowercase "ff".
|
||||
//
|
||||
// job := proxy.Job{Blob: strings.Repeat("0", 160)}
|
||||
// result := job.BlobWithFixedByte(0xFF) // chars 78-79 become "ff" (not "FF")
|
||||
func TestJob_BlobWithFixedByte_Ugly(t *testing.T) {
|
||||
job := Job{Blob: strings.Repeat("0", 160)}
|
||||
got := job.BlobWithFixedByte(0xFF)
|
||||
if got[78:80] != "ff" {
|
||||
t.Fatalf("expected lowercase 'ff', got %q", got[78:80])
|
||||
}
|
||||
if len(got) != 160 {
|
||||
t.Fatalf("expected blob length preserved, got %d", len(got))
|
||||
}
|
||||
}
|
||||
|
||||
// TestJob_DifficultyFromTarget_Good verifies a known target converts to the expected difficulty.
|
||||
//
|
||||
// job := proxy.Job{Target: "b88d0600"}
|
||||
// diff := job.DifficultyFromTarget() // 10000
|
||||
func TestJob_DifficultyFromTarget_Good(t *testing.T) {
|
||||
job := Job{Target: "b88d0600"}
|
||||
if got := job.DifficultyFromTarget(); got != 10000 {
|
||||
t.Fatalf("expected difficulty 10000, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestJob_DifficultyFromTarget_Bad verifies a zero target produces difficulty 0 without panic.
|
||||
//
|
||||
// job := proxy.Job{Target: "00000000"}
|
||||
// diff := job.DifficultyFromTarget() // 0 (no divide-by-zero)
|
||||
func TestJob_DifficultyFromTarget_Bad(t *testing.T) {
|
||||
job := Job{Target: "00000000"}
|
||||
if got := job.DifficultyFromTarget(); got != 0 {
|
||||
t.Fatalf("expected difficulty 0 for zero target, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestJob_DifficultyFromTarget_Ugly verifies the maximum target "ffffffff" yields difficulty 1.
|
||||
//
|
||||
// job := proxy.Job{Target: "ffffffff"}
|
||||
// diff := job.DifficultyFromTarget() // 1
|
||||
func TestJob_DifficultyFromTarget_Ugly(t *testing.T) {
|
||||
job := Job{Target: "ffffffff"}
|
||||
if got := job.DifficultyFromTarget(); got != 1 {
|
||||
t.Fatalf("expected minimum difficulty 1, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestJob_IsValid_Good verifies a job with blob and job ID is valid.
|
||||
//
|
||||
// job := proxy.Job{Blob: "abc", JobID: "job-1"}
|
||||
// job.IsValid() // true
|
||||
func TestJob_IsValid_Good(t *testing.T) {
|
||||
job := Job{Blob: "abc", JobID: "job-1"}
|
||||
if !job.IsValid() {
|
||||
t.Fatalf("expected job with blob and job id to be valid")
|
||||
}
|
||||
}
|
||||
|
||||
// TestJob_IsValid_Bad verifies a job with empty blob or job ID is invalid.
|
||||
//
|
||||
// job := proxy.Job{Blob: "", JobID: "job-1"}
|
||||
// job.IsValid() // false
|
||||
func TestJob_IsValid_Bad(t *testing.T) {
|
||||
if (Job{Blob: "", JobID: "job-1"}).IsValid() {
|
||||
t.Fatalf("expected empty blob to be invalid")
|
||||
}
|
||||
if (Job{Blob: "abc", JobID: ""}).IsValid() {
|
||||
t.Fatalf("expected empty job id to be invalid")
|
||||
}
|
||||
}
|
||||
|
||||
// TestJob_IsValid_Ugly verifies a zero-value job is invalid.
|
||||
//
|
||||
// job := proxy.Job{}
|
||||
// job.IsValid() // false
|
||||
func TestJob_IsValid_Ugly(t *testing.T) {
|
||||
if (Job{}).IsValid() {
|
||||
t.Fatalf("expected zero-value job to be invalid")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ import (
|
|||
// Line format (connect): 2026-04-04T12:00:00Z CONNECT <ip> <user> <agent>
|
||||
// Line format (close): 2026-04-04T12:00:00Z CLOSE <ip> <user> rx=<bytes> tx=<bytes>
|
||||
//
|
||||
// al, result := log.NewAccessLog("/var/log/proxy-access.log")
|
||||
// al := log.NewAccessLog("/var/log/proxy-access.log")
|
||||
// bus.Subscribe(proxy.EventLogin, al.OnLogin)
|
||||
// bus.Subscribe(proxy.EventClose, al.OnClose)
|
||||
type AccessLog struct {
|
||||
path string
|
||||
mu sync.Mutex
|
||||
f *os.File
|
||||
file *os.File
|
||||
}
|
||||
|
|
|
|||
144
log/impl.go
144
log/impl.go
|
|
@ -10,32 +10,79 @@ import (
|
|||
)
|
||||
|
||||
// NewAccessLog creates an append-only access log.
|
||||
//
|
||||
// al := log.NewAccessLog("/var/log/proxy-access.log")
|
||||
// defer al.Close()
|
||||
func NewAccessLog(path string) *AccessLog {
|
||||
return &AccessLog{path: path}
|
||||
}
|
||||
|
||||
// OnLogin writes a CONNECT line.
|
||||
// Close releases the underlying file handle if the log has been opened.
|
||||
//
|
||||
// al := log.NewAccessLog("/var/log/proxy-access.log")
|
||||
// defer al.Close()
|
||||
func (l *AccessLog) Close() {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.file != nil {
|
||||
_ = l.file.Close()
|
||||
l.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
// OnLogin writes a connect line such as:
|
||||
//
|
||||
// al.OnLogin(proxy.Event{Miner: &proxy.Miner{}})
|
||||
// // 2026-04-04T12:00:00Z CONNECT 10.0.0.1 WALLET XMRig/6.21.0
|
||||
func (l *AccessLog) OnLogin(e proxy.Event) {
|
||||
if l == nil || e.Miner == nil {
|
||||
return
|
||||
}
|
||||
l.writeLine("CONNECT", e.Miner.IP(), e.Miner.User(), e.Miner.Agent(), 0, 0)
|
||||
l.writeConnectLine(e.Miner.IP(), e.Miner.User(), e.Miner.Agent())
|
||||
}
|
||||
|
||||
// OnClose writes a CLOSE line with byte counts.
|
||||
// OnClose writes a close line such as:
|
||||
//
|
||||
// al.OnClose(proxy.Event{Miner: &proxy.Miner{}})
|
||||
// // 2026-04-04T12:00:00Z CLOSE 10.0.0.1 WALLET rx=512 tx=4096
|
||||
func (l *AccessLog) OnClose(e proxy.Event) {
|
||||
if l == nil || e.Miner == nil {
|
||||
return
|
||||
}
|
||||
l.writeLine("CLOSE", e.Miner.IP(), e.Miner.User(), "", e.Miner.RX(), e.Miner.TX())
|
||||
l.writeCloseLine(e.Miner.IP(), e.Miner.User(), e.Miner.RX(), e.Miner.TX())
|
||||
}
|
||||
|
||||
// NewShareLog creates an append-only share log.
|
||||
//
|
||||
// sl := log.NewShareLog("/var/log/proxy-shares.log")
|
||||
// defer sl.Close()
|
||||
func NewShareLog(path string) *ShareLog {
|
||||
return &ShareLog{path: path}
|
||||
}
|
||||
|
||||
// OnAccept writes an ACCEPT line.
|
||||
// Close releases the underlying file handle if the log has been opened.
|
||||
//
|
||||
// sl := log.NewShareLog("/var/log/proxy-shares.log")
|
||||
// defer sl.Close()
|
||||
func (l *ShareLog) Close() {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.file != nil {
|
||||
_ = l.file.Close()
|
||||
l.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
// OnAccept writes an accept line such as:
|
||||
//
|
||||
// sl.OnAccept(proxy.Event{Miner: &proxy.Miner{}, Diff: 100000, Latency: 82})
|
||||
// // 2026-04-04T12:00:00Z ACCEPT WALLET diff=100000 latency=82ms
|
||||
func (l *ShareLog) OnAccept(e proxy.Event) {
|
||||
if l == nil || e.Miner == nil {
|
||||
return
|
||||
|
|
@ -43,7 +90,10 @@ func (l *ShareLog) OnAccept(e proxy.Event) {
|
|||
l.writeAcceptLine(e.Miner.User(), e.Diff, uint64(e.Latency))
|
||||
}
|
||||
|
||||
// OnReject writes a REJECT line.
|
||||
// OnReject writes a reject line such as:
|
||||
//
|
||||
// sl.OnReject(proxy.Event{Miner: &proxy.Miner{}, Error: "Invalid nonce"})
|
||||
// // 2026-04-04T12:00:00Z REJECT WALLET reason="Invalid nonce"
|
||||
func (l *ShareLog) OnReject(e proxy.Event) {
|
||||
if l == nil || e.Miner == nil {
|
||||
return
|
||||
|
|
@ -51,38 +101,52 @@ func (l *ShareLog) OnReject(e proxy.Event) {
|
|||
l.writeRejectLine(e.Miner.User(), e.Error)
|
||||
}
|
||||
|
||||
func (l *AccessLog) writeLine(kind, ip, user, agent string, rx, tx uint64) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if err := l.ensureFile(); err != nil {
|
||||
func (accessLog *AccessLog) writeConnectLine(ip, user, agent string) {
|
||||
accessLog.mu.Lock()
|
||||
defer accessLog.mu.Unlock()
|
||||
if err := accessLog.ensureFile(); err != nil {
|
||||
return
|
||||
}
|
||||
var builder strings.Builder
|
||||
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||
builder.WriteByte(' ')
|
||||
builder.WriteString(kind)
|
||||
builder.WriteString("CONNECT")
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(ip)
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(user)
|
||||
if agent != "" {
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(agent)
|
||||
}
|
||||
if rx > 0 || tx > 0 {
|
||||
builder.WriteString(" rx=")
|
||||
builder.WriteString(strconv.FormatUint(rx, 10))
|
||||
builder.WriteString(" tx=")
|
||||
builder.WriteString(strconv.FormatUint(tx, 10))
|
||||
}
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(agent)
|
||||
builder.WriteByte('\n')
|
||||
_, _ = l.f.WriteString(builder.String())
|
||||
_, _ = accessLog.file.WriteString(builder.String())
|
||||
}
|
||||
|
||||
func (l *ShareLog) writeAcceptLine(user string, diff uint64, latency uint64) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if err := l.ensureFile(); err != nil {
|
||||
func (accessLog *AccessLog) writeCloseLine(ip, user string, rx, tx uint64) {
|
||||
accessLog.mu.Lock()
|
||||
defer accessLog.mu.Unlock()
|
||||
if err := accessLog.ensureFile(); err != nil {
|
||||
return
|
||||
}
|
||||
var builder strings.Builder
|
||||
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||
builder.WriteByte(' ')
|
||||
builder.WriteString("CLOSE")
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(ip)
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(user)
|
||||
builder.WriteString(" rx=")
|
||||
builder.WriteString(strconv.FormatUint(rx, 10))
|
||||
builder.WriteString(" tx=")
|
||||
builder.WriteString(strconv.FormatUint(tx, 10))
|
||||
builder.WriteByte('\n')
|
||||
_, _ = accessLog.file.WriteString(builder.String())
|
||||
}
|
||||
|
||||
func (shareLog *ShareLog) writeAcceptLine(user string, diff uint64, latency uint64) {
|
||||
shareLog.mu.Lock()
|
||||
defer shareLog.mu.Unlock()
|
||||
if err := shareLog.ensureFile(); err != nil {
|
||||
return
|
||||
}
|
||||
var builder strings.Builder
|
||||
|
|
@ -96,13 +160,13 @@ func (l *ShareLog) writeAcceptLine(user string, diff uint64, latency uint64) {
|
|||
builder.WriteString(strconv.FormatUint(latency, 10))
|
||||
builder.WriteString("ms")
|
||||
builder.WriteByte('\n')
|
||||
_, _ = l.f.WriteString(builder.String())
|
||||
_, _ = shareLog.file.WriteString(builder.String())
|
||||
}
|
||||
|
||||
func (l *ShareLog) writeRejectLine(user, reason string) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if err := l.ensureFile(); err != nil {
|
||||
func (shareLog *ShareLog) writeRejectLine(user, reason string) {
|
||||
shareLog.mu.Lock()
|
||||
defer shareLog.mu.Unlock()
|
||||
if err := shareLog.ensureFile(); err != nil {
|
||||
return
|
||||
}
|
||||
var builder strings.Builder
|
||||
|
|
@ -112,29 +176,29 @@ func (l *ShareLog) writeRejectLine(user, reason string) {
|
|||
builder.WriteString(" reason=\"")
|
||||
builder.WriteString(reason)
|
||||
builder.WriteString("\"\n")
|
||||
_, _ = l.f.WriteString(builder.String())
|
||||
_, _ = shareLog.file.WriteString(builder.String())
|
||||
}
|
||||
|
||||
func (l *AccessLog) ensureFile() error {
|
||||
if l.f != nil {
|
||||
func (accessLog *AccessLog) ensureFile() error {
|
||||
if accessLog.file != nil {
|
||||
return nil
|
||||
}
|
||||
f, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
f, err := os.OpenFile(accessLog.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.f = f
|
||||
accessLog.file = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *ShareLog) ensureFile() error {
|
||||
if l.f != nil {
|
||||
func (shareLog *ShareLog) ensureFile() error {
|
||||
if shareLog.file != nil {
|
||||
return nil
|
||||
}
|
||||
f, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
f, err := os.OpenFile(shareLog.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.f = f
|
||||
shareLog.file = f
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
341
log/impl_test.go
Normal file
341
log/impl_test.go
Normal file
|
|
@ -0,0 +1,341 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
// TestAccessLog_OnLogin_Good verifies a CONNECT line is written with the expected columns.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnLogin(proxy.Event{Miner: miner}) // writes "CONNECT 10.0.0.1 WALLET XMRig/6.21.0"
|
||||
func TestAccessLog_OnLogin_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
defer al.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
al.OnLogin(proxy.Event{Miner: miner})
|
||||
al.Close()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("expected log file to exist: %v", err)
|
||||
}
|
||||
line := strings.TrimSpace(string(data))
|
||||
if !strings.Contains(line, "CONNECT") {
|
||||
t.Fatalf("expected CONNECT in log line, got %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessLog_OnLogin_Bad verifies a nil miner event does not panic or write anything.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnLogin(proxy.Event{Miner: nil}) // no-op
|
||||
func TestAccessLog_OnLogin_Bad(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
defer al.Close()
|
||||
|
||||
al.OnLogin(proxy.Event{Miner: nil})
|
||||
al.Close()
|
||||
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
data, _ := os.ReadFile(path)
|
||||
if len(data) > 0 {
|
||||
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessLog_OnLogin_Ugly verifies a nil AccessLog does not panic.
|
||||
//
|
||||
// var al *log.AccessLog
|
||||
// al.OnLogin(proxy.Event{Miner: miner}) // no-op, no panic
|
||||
func TestAccessLog_OnLogin_Ugly(t *testing.T) {
|
||||
var al *AccessLog
|
||||
miner := newTestMiner(t)
|
||||
al.OnLogin(proxy.Event{Miner: miner})
|
||||
}
|
||||
|
||||
// TestAccessLog_OnClose_Good verifies a CLOSE line includes rx and tx byte counts.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnClose(proxy.Event{Miner: miner}) // writes "CLOSE <ip> <user> rx=0 tx=0"
|
||||
func TestAccessLog_OnClose_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
defer al.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
al.OnClose(proxy.Event{Miner: miner})
|
||||
al.Close()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("expected log file to exist: %v", err)
|
||||
}
|
||||
line := strings.TrimSpace(string(data))
|
||||
if !strings.Contains(line, "CLOSE") {
|
||||
t.Fatalf("expected CLOSE in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "rx=") {
|
||||
t.Fatalf("expected rx= in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "tx=") {
|
||||
t.Fatalf("expected tx= in log line, got %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessLog_OnClose_Bad verifies a nil miner close event produces no output.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnClose(proxy.Event{Miner: nil}) // no-op
|
||||
func TestAccessLog_OnClose_Bad(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
defer al.Close()
|
||||
|
||||
al.OnClose(proxy.Event{Miner: nil})
|
||||
al.Close()
|
||||
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
data, _ := os.ReadFile(path)
|
||||
if len(data) > 0 {
|
||||
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAccessLog_OnClose_Ugly verifies close on an empty-path log is a no-op.
|
||||
//
|
||||
// al := log.NewAccessLog("")
|
||||
// al.OnClose(proxy.Event{Miner: miner}) // no-op, empty path
|
||||
func TestAccessLog_OnClose_Ugly(t *testing.T) {
|
||||
al := NewAccessLog("")
|
||||
defer al.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
al.OnClose(proxy.Event{Miner: miner})
|
||||
}
|
||||
|
||||
// TestShareLog_OnAccept_Good verifies an ACCEPT line is written with diff and latency.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000, Latency: 82})
|
||||
func TestShareLog_OnAccept_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
defer sl.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000, Latency: 82})
|
||||
sl.Close()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("expected log file to exist: %v", err)
|
||||
}
|
||||
line := strings.TrimSpace(string(data))
|
||||
if !strings.Contains(line, "ACCEPT") {
|
||||
t.Fatalf("expected ACCEPT in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "diff=100000") {
|
||||
t.Fatalf("expected diff=100000 in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "latency=82ms") {
|
||||
t.Fatalf("expected latency=82ms in log line, got %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
// TestShareLog_OnAccept_Bad verifies a nil miner accept event produces no output.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnAccept(proxy.Event{Miner: nil}) // no-op
|
||||
func TestShareLog_OnAccept_Bad(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
defer sl.Close()
|
||||
|
||||
sl.OnAccept(proxy.Event{Miner: nil, Diff: 100000})
|
||||
sl.Close()
|
||||
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
data, _ := os.ReadFile(path)
|
||||
if len(data) > 0 {
|
||||
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestShareLog_OnAccept_Ugly verifies a nil ShareLog does not panic.
|
||||
//
|
||||
// var sl *log.ShareLog
|
||||
// sl.OnAccept(proxy.Event{Miner: miner}) // no-op, no panic
|
||||
func TestShareLog_OnAccept_Ugly(t *testing.T) {
|
||||
var sl *ShareLog
|
||||
miner := newTestMiner(t)
|
||||
sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000})
|
||||
}
|
||||
|
||||
// TestShareLog_OnReject_Good verifies a REJECT line is written with the rejection reason.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
|
||||
func TestShareLog_OnReject_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
defer sl.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
|
||||
sl.Close()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("expected log file to exist: %v", err)
|
||||
}
|
||||
line := strings.TrimSpace(string(data))
|
||||
if !strings.Contains(line, "REJECT") {
|
||||
t.Fatalf("expected REJECT in log line, got %q", line)
|
||||
}
|
||||
if !strings.Contains(line, "Low difficulty share") {
|
||||
t.Fatalf("expected rejection reason in log line, got %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
// TestShareLog_OnReject_Bad verifies a nil miner reject event produces no output.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnReject(proxy.Event{Miner: nil}) // no-op
|
||||
func TestShareLog_OnReject_Bad(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
defer sl.Close()
|
||||
|
||||
sl.OnReject(proxy.Event{Miner: nil, Error: "Low difficulty share"})
|
||||
sl.Close()
|
||||
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
data, _ := os.ReadFile(path)
|
||||
if len(data) > 0 {
|
||||
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestShareLog_OnReject_Ugly verifies an empty-path ShareLog silently discards the reject line.
|
||||
//
|
||||
// sl := log.NewShareLog("")
|
||||
// sl.OnReject(proxy.Event{Miner: miner, Error: "reason"}) // no-op, empty path
|
||||
func TestShareLog_OnReject_Ugly(t *testing.T) {
|
||||
sl := NewShareLog("")
|
||||
defer sl.Close()
|
||||
|
||||
miner := newTestMiner(t)
|
||||
sl.OnReject(proxy.Event{Miner: miner, Error: "reason"})
|
||||
}
|
||||
|
||||
// TestAccessLog_Close_Good verifies Close releases the file handle and is safe to call twice.
|
||||
//
|
||||
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||
// al.OnLogin(proxy.Event{Miner: miner})
|
||||
// al.Close()
|
||||
// al.Close() // double close is safe
|
||||
func TestAccessLog_Close_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "access.log")
|
||||
al := NewAccessLog(path)
|
||||
|
||||
miner := newTestMiner(t)
|
||||
al.OnLogin(proxy.Event{Miner: miner})
|
||||
al.Close()
|
||||
al.Close()
|
||||
}
|
||||
|
||||
// TestAccessLog_Close_Bad verifies Close on a nil AccessLog does not panic.
|
||||
//
|
||||
// var al *log.AccessLog
|
||||
// al.Close() // no-op, no panic
|
||||
func TestAccessLog_Close_Bad(t *testing.T) {
|
||||
var al *AccessLog
|
||||
al.Close()
|
||||
}
|
||||
|
||||
// TestAccessLog_Close_Ugly verifies Close on a never-opened log does not panic.
|
||||
//
|
||||
// al := log.NewAccessLog("/nonexistent/dir/access.log")
|
||||
// al.Close() // no file was ever opened
|
||||
func TestAccessLog_Close_Ugly(t *testing.T) {
|
||||
al := NewAccessLog("/nonexistent/dir/access.log")
|
||||
al.Close()
|
||||
}
|
||||
|
||||
// TestShareLog_Close_Good verifies Close releases the file handle and is safe to call twice.
|
||||
//
|
||||
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 1000})
|
||||
// sl.Close()
|
||||
// sl.Close() // double close is safe
|
||||
func TestShareLog_Close_Good(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "shares.log")
|
||||
sl := NewShareLog(path)
|
||||
|
||||
miner := newTestMiner(t)
|
||||
sl.OnAccept(proxy.Event{Miner: miner, Diff: 1000})
|
||||
sl.Close()
|
||||
sl.Close()
|
||||
}
|
||||
|
||||
// TestShareLog_Close_Bad verifies Close on a nil ShareLog does not panic.
|
||||
//
|
||||
// var sl *log.ShareLog
|
||||
// sl.Close() // no-op, no panic
|
||||
func TestShareLog_Close_Bad(t *testing.T) {
|
||||
var sl *ShareLog
|
||||
sl.Close()
|
||||
}
|
||||
|
||||
// TestShareLog_Close_Ugly verifies Close on a never-opened log does not panic.
|
||||
//
|
||||
// sl := log.NewShareLog("/nonexistent/dir/shares.log")
|
||||
// sl.Close() // no file was ever opened
|
||||
func TestShareLog_Close_Ugly(t *testing.T) {
|
||||
sl := NewShareLog("/nonexistent/dir/shares.log")
|
||||
sl.Close()
|
||||
}
|
||||
|
||||
// newTestMiner creates a minimal miner for log testing using a net.Pipe connection.
|
||||
func newTestMiner(t *testing.T) *proxy.Miner {
|
||||
t.Helper()
|
||||
client, server := net.Pipe()
|
||||
t.Cleanup(func() {
|
||||
_ = client.Close()
|
||||
_ = server.Close()
|
||||
})
|
||||
miner := proxy.NewMiner(client, 3333, nil)
|
||||
miner.SetID(1)
|
||||
return miner
|
||||
}
|
||||
|
||||
// pipeAddr satisfies the net.Addr interface for pipe-based test connections.
|
||||
type pipeAddr struct{}
|
||||
|
||||
func (a pipeAddr) Network() string { return "pipe" }
|
||||
func (a pipeAddr) String() string { return "pipe" }
|
||||
|
||||
// pipeConn wraps an os.Pipe as a net.Conn for tests that need a closeable socket.
|
||||
type pipeConn struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func (p *pipeConn) RemoteAddr() net.Addr { return pipeAddr{} }
|
||||
func (p *pipeConn) LocalAddr() net.Addr { return pipeAddr{} }
|
||||
func (p *pipeConn) SetDeadline(_ time.Time) error { return nil }
|
||||
func (p *pipeConn) SetReadDeadline(_ time.Time) error { return nil }
|
||||
func (p *pipeConn) SetWriteDeadline(_ time.Time) error { return nil }
|
||||
|
|
@ -16,5 +16,5 @@ import (
|
|||
type ShareLog struct {
|
||||
path string
|
||||
mu sync.Mutex
|
||||
f *os.File
|
||||
file *os.File
|
||||
}
|
||||
|
|
|
|||
68
miner.go
68
miner.go
|
|
@ -25,35 +25,41 @@ const (
|
|||
// m := proxy.NewMiner(conn, 3333, nil)
|
||||
// m.Start()
|
||||
type Miner struct {
|
||||
id int64 // monotonically increasing per-process; atomic assignment
|
||||
rpcID string // UUID v4 sent to miner as session id
|
||||
state MinerState
|
||||
extAlgo bool // miner sent algo list in login params
|
||||
extNH bool // NiceHash mode active (fixed byte splitting)
|
||||
ip string // remote IP (without port, for logging)
|
||||
localPort uint16
|
||||
user string // login params.login (wallet address), custom diff suffix stripped
|
||||
password string // login params.pass
|
||||
agent string // login params.agent
|
||||
rigID string // login params.rigid (optional extension)
|
||||
fixedByte uint8 // NiceHash slot index (0-255)
|
||||
mapperID int64 // which NonceMapper owns this miner; -1 = unassigned
|
||||
routeID int64 // SimpleMapper ID in simple mode; -1 = unassigned
|
||||
customDiff uint64 // 0 = use pool diff; non-zero = cap diff to this value
|
||||
accessPassword string
|
||||
globalDiff uint64
|
||||
diff uint64 // last difficulty sent to this miner from the pool
|
||||
rx uint64 // bytes received from miner
|
||||
tx uint64 // bytes sent from miner
|
||||
currentJob Job
|
||||
connectedAt time.Time
|
||||
lastActivityAt time.Time
|
||||
conn net.Conn
|
||||
tlsConn *tls.Conn // nil if plain TCP
|
||||
sendMu sync.Mutex // serialises writes to conn
|
||||
buf [16384]byte // per-miner send buffer; avoids per-write allocations
|
||||
onLogin func(*Miner)
|
||||
onSubmit func(*Miner, *SubmitEvent)
|
||||
onClose func(*Miner)
|
||||
closeOnce sync.Once
|
||||
id int64 // monotonically increasing per-process; atomic assignment
|
||||
rpcID string // UUID v4 sent to miner as session id
|
||||
state MinerState
|
||||
extAlgo bool // miner sent algo list in login params
|
||||
loginAlgos []string
|
||||
extNH bool // NiceHash mode active (fixed byte splitting)
|
||||
algoEnabled bool // proxy is configured to negotiate the algo extension
|
||||
ip string // remote IP (without port, for logging)
|
||||
remoteAddr string
|
||||
localPort uint16
|
||||
user string // login params.login (wallet address), custom diff suffix stripped
|
||||
password string // login params.pass
|
||||
agent string // login params.agent
|
||||
rigID string // login params.rigid (optional extension)
|
||||
fixedByte uint8 // NiceHash slot index (0-255)
|
||||
mapperID int64 // which NonceMapper owns this miner; -1 = unassigned
|
||||
routeID int64 // SimpleMapper ID in simple mode; -1 = unassigned
|
||||
customDiff uint64 // 0 = use pool diff; non-zero = cap diff to this value
|
||||
customDiffResolved bool
|
||||
customDiffFromLogin bool
|
||||
accessPassword string
|
||||
globalDiff uint64
|
||||
diff uint64 // last difficulty sent to this miner from the pool
|
||||
rx uint64 // bytes received from miner
|
||||
tx uint64 // bytes sent from miner
|
||||
currentJob Job
|
||||
connectedAt time.Time
|
||||
lastActivityAt time.Time
|
||||
conn net.Conn
|
||||
tlsConn *tls.Conn // nil if plain TCP
|
||||
sendMu sync.Mutex // serialises writes to conn
|
||||
buf [16384]byte // per-miner send buffer; avoids per-write allocations
|
||||
onLogin func(*Miner)
|
||||
onLoginReady func(*Miner)
|
||||
onSubmit func(*Miner, *SubmitEvent)
|
||||
onClose func(*Miner)
|
||||
closeOnce sync.Once
|
||||
}
|
||||
|
|
|
|||
467
miner_login_test.go
Normal file
467
miner_login_test.go
Normal file
|
|
@ -0,0 +1,467 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMiner_HandleLogin_Good(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := NewMiner(minerConn, 3333, nil)
|
||||
miner.algoEnabled = true
|
||||
miner.extNH = true
|
||||
miner.fixedByte = 0x2a
|
||||
miner.onLogin = func(m *Miner) {
|
||||
m.SetMapperID(1)
|
||||
}
|
||||
miner.currentJob = Job{
|
||||
Blob: strings.Repeat("0", 160),
|
||||
JobID: "job-1",
|
||||
Target: "b88d0600",
|
||||
Algo: "cn/r",
|
||||
Height: 7,
|
||||
SeedHash: "seed",
|
||||
}
|
||||
|
||||
params, err := json.Marshal(loginParams{
|
||||
Login: "wallet",
|
||||
Pass: "x",
|
||||
Agent: "xmrig",
|
||||
Algo: []string{"cn/r"},
|
||||
RigID: "rig-1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal login params: %v", err)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
miner.handleLogin(stratumRequest{ID: 1, Method: "login", Params: params})
|
||||
close(done)
|
||||
}()
|
||||
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read login response: %v", err)
|
||||
}
|
||||
<-done
|
||||
|
||||
var payload struct {
|
||||
Error json.RawMessage `json:"error"`
|
||||
Result struct {
|
||||
ID string `json:"id"`
|
||||
Status string `json:"status"`
|
||||
Extensions []string `json:"extensions"`
|
||||
Job map[string]any `json:"job"`
|
||||
} `json:"result"`
|
||||
}
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal login response: %v", err)
|
||||
}
|
||||
|
||||
if string(payload.Error) != "null" {
|
||||
t.Fatalf("expected login response error to be null, got %s", string(payload.Error))
|
||||
}
|
||||
if payload.Result.Status != "OK" {
|
||||
t.Fatalf("expected login success, got %q", payload.Result.Status)
|
||||
}
|
||||
if payload.Result.ID == "" {
|
||||
t.Fatalf("expected rpc id in login response")
|
||||
}
|
||||
if len(payload.Result.Extensions) != 1 || payload.Result.Extensions[0] != "algo" {
|
||||
t.Fatalf("expected algo extension, got %#v", payload.Result.Extensions)
|
||||
}
|
||||
if got := miner.LoginAlgos(); len(got) != 1 || got[0] != "cn/r" {
|
||||
t.Fatalf("expected login algo list to be stored, got %#v", got)
|
||||
}
|
||||
if got := payload.Result.Job["job_id"]; got != "job-1" {
|
||||
t.Fatalf("expected embedded job, got %#v", got)
|
||||
}
|
||||
if got := payload.Result.Job["algo"]; got != "cn/r" {
|
||||
t.Fatalf("expected embedded algo, got %#v", got)
|
||||
}
|
||||
blob, _ := payload.Result.Job["blob"].(string)
|
||||
if blob[78:80] != "2a" {
|
||||
t.Fatalf("expected fixed-byte patched blob, got %q", blob[78:80])
|
||||
}
|
||||
if miner.State() != MinerStateReady {
|
||||
t.Fatalf("expected miner ready after login reply with job, got %d", miner.State())
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_New_Watch_Good(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
Watch: true,
|
||||
configPath: "/tmp/proxy.json",
|
||||
}
|
||||
|
||||
proxyInstance, result := New(cfg)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||
}
|
||||
if proxyInstance.watcher == nil {
|
||||
t.Fatalf("expected config watcher when watch is enabled and source path is known")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMiner_HandleLogin_Ugly(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
miner := &Miner{}
|
||||
miner.SetID(int64(i + 1))
|
||||
miner.SetMapperID(int64(i + 1))
|
||||
}
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer serverConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := NewMiner(serverConn, 3333, nil)
|
||||
miner.extNH = true
|
||||
miner.onLogin = func(*Miner) {}
|
||||
|
||||
params, err := json.Marshal(loginParams{
|
||||
Login: "wallet",
|
||||
Pass: "x",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal login params: %v", err)
|
||||
}
|
||||
|
||||
done := make(chan []byte, 1)
|
||||
go func() {
|
||||
line, readErr := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if readErr != nil {
|
||||
done <- nil
|
||||
return
|
||||
}
|
||||
done <- line
|
||||
}()
|
||||
|
||||
miner.handleLogin(stratumRequest{ID: 2, Method: "login", Params: params})
|
||||
|
||||
line := <-done
|
||||
if line == nil {
|
||||
t.Fatal("expected login rejection response")
|
||||
}
|
||||
|
||||
var payload struct {
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
Result map[string]any `json:"result"`
|
||||
}
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal login response: %v", err)
|
||||
}
|
||||
if payload.Error.Message != "Proxy is full, try again later" {
|
||||
t.Fatalf("expected full-table error, got %q", payload.Error.Message)
|
||||
}
|
||||
if payload.Result != nil {
|
||||
t.Fatalf("expected no login success payload, got %#v", payload.Result)
|
||||
}
|
||||
if miner.MapperID() != -1 {
|
||||
t.Fatalf("expected rejected miner to remain unassigned, got mapper %d", miner.MapperID())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMiner_HandleLogin_FailedAssignmentDoesNotDispatchLoginEvent(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
proxyInstance := &Proxy{
|
||||
config: &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByUser,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
},
|
||||
events: NewEventBus(),
|
||||
stats: NewStats(),
|
||||
workers: NewWorkers(WorkersByUser, nil),
|
||||
miners: make(map[int64]*Miner),
|
||||
}
|
||||
proxyInstance.events.Subscribe(EventLogin, proxyInstance.stats.OnLogin)
|
||||
proxyInstance.workers.bindEvents(proxyInstance.events)
|
||||
|
||||
miner := NewMiner(minerConn, 3333, nil)
|
||||
miner.extNH = true
|
||||
miner.onLogin = func(*Miner) {}
|
||||
miner.onLoginReady = func(m *Miner) {
|
||||
proxyInstance.events.Dispatch(Event{Type: EventLogin, Miner: m})
|
||||
}
|
||||
proxyInstance.miners[miner.ID()] = miner
|
||||
|
||||
params, err := json.Marshal(loginParams{
|
||||
Login: "wallet",
|
||||
Pass: "x",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal login params: %v", err)
|
||||
}
|
||||
|
||||
go miner.handleLogin(stratumRequest{ID: 12, Method: "login", Params: params})
|
||||
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read login rejection: %v", err)
|
||||
}
|
||||
|
||||
var payload struct {
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal login rejection: %v", err)
|
||||
}
|
||||
if payload.Error.Message != "Proxy is full, try again later" {
|
||||
t.Fatalf("expected full-table rejection, got %q", payload.Error.Message)
|
||||
}
|
||||
if now, max := proxyInstance.MinerCount(); now != 0 || max != 0 {
|
||||
t.Fatalf("expected failed login not to affect miner counts, got now=%d max=%d", now, max)
|
||||
}
|
||||
if records := proxyInstance.WorkerRecords(); len(records) != 0 {
|
||||
t.Fatalf("expected failed login not to create worker records, got %d", len(records))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMiner_HandleLogin_CustomDiffCap_Good(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := NewMiner(minerConn, 3333, nil)
|
||||
miner.onLogin = func(m *Miner) {
|
||||
m.SetRouteID(1)
|
||||
m.customDiff = 5000
|
||||
}
|
||||
miner.currentJob = Job{
|
||||
Blob: strings.Repeat("0", 160),
|
||||
JobID: "job-1",
|
||||
Target: "01000000",
|
||||
}
|
||||
|
||||
params, err := json.Marshal(loginParams{
|
||||
Login: "wallet",
|
||||
Pass: "x",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal login params: %v", err)
|
||||
}
|
||||
|
||||
go miner.handleLogin(stratumRequest{ID: 3, Method: "login", Params: params})
|
||||
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read login response: %v", err)
|
||||
}
|
||||
|
||||
var payload struct {
|
||||
Result struct {
|
||||
Job struct {
|
||||
Target string `json:"target"`
|
||||
} `json:"job"`
|
||||
} `json:"result"`
|
||||
}
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal login response: %v", err)
|
||||
}
|
||||
|
||||
originalDiff := miner.currentJob.DifficultyFromTarget()
|
||||
cappedDiff := Job{Target: payload.Result.Job.Target}.DifficultyFromTarget()
|
||||
if cappedDiff == 0 || cappedDiff > 5000 {
|
||||
t.Fatalf("expected capped difficulty at or below 5000, got %d", cappedDiff)
|
||||
}
|
||||
if cappedDiff >= originalDiff {
|
||||
t.Fatalf("expected lowered target difficulty below %d, got %d", originalDiff, cappedDiff)
|
||||
}
|
||||
if miner.diff != cappedDiff {
|
||||
t.Fatalf("expected miner diff %d, got %d", cappedDiff, miner.diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMiner_HandleLogin_CustomDiffSuffix_Good(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := NewMiner(minerConn, 3333, nil)
|
||||
miner.onLogin = func(m *Miner) {
|
||||
m.SetRouteID(1)
|
||||
}
|
||||
|
||||
params, err := json.Marshal(loginParams{
|
||||
Login: "wallet+50000",
|
||||
Pass: "x",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal login params: %v", err)
|
||||
}
|
||||
|
||||
go miner.handleLogin(stratumRequest{ID: 4, Method: "login", Params: params})
|
||||
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read login response: %v", err)
|
||||
}
|
||||
|
||||
var payload struct {
|
||||
Result struct {
|
||||
Status string `json:"status"`
|
||||
} `json:"result"`
|
||||
}
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal login response: %v", err)
|
||||
}
|
||||
if payload.Result.Status != "OK" {
|
||||
t.Fatalf("expected login success, got %q", payload.Result.Status)
|
||||
}
|
||||
if got := miner.User(); got != "wallet" {
|
||||
t.Fatalf("expected stripped wallet name, got %q", got)
|
||||
}
|
||||
if got := miner.customDiff; got != 50000 {
|
||||
t.Fatalf("expected custom diff 50000, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMiner_HandleKeepalived_Good(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := NewMiner(minerConn, 3333, nil)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
miner.handleKeepalived(stratumRequest{ID: 9, Method: "keepalived"})
|
||||
close(done)
|
||||
}()
|
||||
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read keepalived response: %v", err)
|
||||
}
|
||||
<-done
|
||||
|
||||
var payload map[string]json.RawMessage
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal keepalived response: %v", err)
|
||||
}
|
||||
if _, ok := payload["error"]; !ok {
|
||||
t.Fatalf("expected keepalived response to include error field, got %s", string(line))
|
||||
}
|
||||
if string(payload["error"]) != "null" {
|
||||
t.Fatalf("expected keepalived response error to be null, got %s", string(payload["error"]))
|
||||
}
|
||||
var result struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
if err := json.Unmarshal(payload["result"], &result); err != nil {
|
||||
t.Fatalf("unmarshal keepalived result: %v", err)
|
||||
}
|
||||
if result.Status != "KEEPALIVED" {
|
||||
t.Fatalf("expected KEEPALIVED status, got %q", result.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMiner_ReadLoop_RFCLineLimit_Good(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := NewMiner(minerConn, 3333, nil)
|
||||
miner.onLogin = func(m *Miner) {
|
||||
m.SetRouteID(1)
|
||||
}
|
||||
miner.Start()
|
||||
|
||||
params, err := json.Marshal(loginParams{
|
||||
Login: "wallet",
|
||||
Pass: "x",
|
||||
Agent: strings.Repeat("a", 5000),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal login params: %v", err)
|
||||
}
|
||||
request, err := json.Marshal(stratumRequest{ID: 4, Method: "login", Params: params})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal request: %v", err)
|
||||
}
|
||||
if len(request) >= maxStratumLineLength {
|
||||
t.Fatalf("expected test request below RFC limit, got %d bytes", len(request))
|
||||
}
|
||||
|
||||
if _, err := clientConn.Write(append(request, '\n')); err != nil {
|
||||
t.Fatalf("write login request: %v", err)
|
||||
}
|
||||
_ = clientConn.SetReadDeadline(time.Now().Add(time.Second))
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read login response: %v", err)
|
||||
}
|
||||
if len(line) == 0 {
|
||||
t.Fatal("expected login response for request under RFC limit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMiner_ReadLoop_RFCLineLimit_Ugly(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := NewMiner(minerConn, 3333, nil)
|
||||
miner.Start()
|
||||
|
||||
params, err := json.Marshal(loginParams{
|
||||
Login: "wallet",
|
||||
Pass: "x",
|
||||
Agent: strings.Repeat("b", maxStratumLineLength),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal login params: %v", err)
|
||||
}
|
||||
request, err := json.Marshal(stratumRequest{ID: 5, Method: "login", Params: params})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal request: %v", err)
|
||||
}
|
||||
if len(request) <= maxStratumLineLength {
|
||||
t.Fatalf("expected test request above RFC limit, got %d bytes", len(request))
|
||||
}
|
||||
|
||||
writeDone := make(chan error, 1)
|
||||
go func() {
|
||||
_, writeErr := clientConn.Write(append(request, '\n'))
|
||||
writeDone <- writeErr
|
||||
}()
|
||||
|
||||
var writeErr error
|
||||
select {
|
||||
case writeErr = <-writeDone:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timed out writing oversized request")
|
||||
}
|
||||
if writeErr == nil {
|
||||
_ = clientConn.SetReadDeadline(time.Now().Add(time.Second))
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err == nil || len(line) > 0 {
|
||||
t.Fatalf("expected oversized request to close the connection, got line=%q err=%v", string(line), err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.Contains(writeErr.Error(), "closed pipe") {
|
||||
t.Fatalf("expected oversized request to close the connection, got write error %v", writeErr)
|
||||
}
|
||||
}
|
||||
43
miner_wire_test.go
Normal file
43
miner_wire_test.go
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMiner_Success_WritesNullError_Good(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := NewMiner(minerConn, 3333, nil)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
miner.Success(7, "OK")
|
||||
close(done)
|
||||
}()
|
||||
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read success response: %v", err)
|
||||
}
|
||||
<-done
|
||||
|
||||
var payload struct {
|
||||
Error json.RawMessage `json:"error"`
|
||||
Result struct {
|
||||
Status string `json:"status"`
|
||||
} `json:"result"`
|
||||
}
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal success response: %v", err)
|
||||
}
|
||||
if string(payload.Error) != "null" {
|
||||
t.Fatalf("expected success response error to be null, got %s", string(payload.Error))
|
||||
}
|
||||
if payload.Result.Status != "OK" {
|
||||
t.Fatalf("expected success status OK, got %q", payload.Result.Status)
|
||||
}
|
||||
}
|
||||
87
miners_document_test.go
Normal file
87
miners_document_test.go
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
package proxy
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestProxy_MinersDocument_Good(t *testing.T) {
|
||||
p := &Proxy{
|
||||
miners: map[int64]*Miner{
|
||||
1: {
|
||||
id: 1,
|
||||
ip: "10.0.0.1:49152",
|
||||
tx: 4096,
|
||||
rx: 512,
|
||||
state: MinerStateReady,
|
||||
diff: 100000,
|
||||
user: "WALLET",
|
||||
password: "secret",
|
||||
rigID: "rig-alpha",
|
||||
agent: "XMRig/6.21.0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
document := p.MinersDocument()
|
||||
if len(document.Miners) != 1 {
|
||||
t.Fatalf("expected one miner row, got %d", len(document.Miners))
|
||||
}
|
||||
row := document.Miners[0]
|
||||
if len(row) != 10 {
|
||||
t.Fatalf("expected 10 miner columns, got %d", len(row))
|
||||
}
|
||||
if row[7] != "********" {
|
||||
t.Fatalf("expected masked password, got %#v", row[7])
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_MinersDocument_Bad(t *testing.T) {
|
||||
var p *Proxy
|
||||
|
||||
document := p.MinersDocument()
|
||||
if len(document.Miners) != 0 {
|
||||
t.Fatalf("expected no miners for a nil proxy, got %d", len(document.Miners))
|
||||
}
|
||||
if len(document.Format) != 10 {
|
||||
t.Fatalf("expected miner format columns to remain stable, got %d", len(document.Format))
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_MinersDocument_Ugly(t *testing.T) {
|
||||
p := &Proxy{
|
||||
miners: map[int64]*Miner{
|
||||
1: {
|
||||
id: 1,
|
||||
ip: "10.0.0.1:49152",
|
||||
tx: 4096,
|
||||
rx: 512,
|
||||
state: MinerStateReady,
|
||||
diff: 100000,
|
||||
user: "WALLET",
|
||||
password: "secret-a",
|
||||
rigID: "rig-alpha",
|
||||
agent: "XMRig/6.21.0",
|
||||
},
|
||||
2: {
|
||||
id: 2,
|
||||
ip: "10.0.0.2:49152",
|
||||
tx: 2048,
|
||||
rx: 256,
|
||||
state: MinerStateWaitReady,
|
||||
diff: 50000,
|
||||
user: "WALLET2",
|
||||
password: "secret-b",
|
||||
rigID: "rig-beta",
|
||||
agent: "XMRig/6.22.0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
document := p.MinersDocument()
|
||||
if len(document.Miners) != 2 {
|
||||
t.Fatalf("expected two miner rows, got %d", len(document.Miners))
|
||||
}
|
||||
for i, row := range document.Miners {
|
||||
if row[7] != "********" {
|
||||
t.Fatalf("expected masked password in row %d, got %#v", i, row[7])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,7 +1,9 @@
|
|||
// Package pool implements the outbound stratum pool client and failover strategy.
|
||||
// Package pool implements the outbound pool client and failover strategy.
|
||||
//
|
||||
// client := pool.NewStratumClient(poolCfg, listener)
|
||||
// client.Connect()
|
||||
// client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.example:3333", User: "WALLET", Pass: "x"}, listener)
|
||||
// if result := client.Connect(); result.OK {
|
||||
// client.Login()
|
||||
// }
|
||||
package pool
|
||||
|
||||
import (
|
||||
|
|
@ -12,14 +14,13 @@ import (
|
|||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
// StratumClient is one outbound stratum TCP (optionally TLS) connection to a pool.
|
||||
// The proxy presents itself to the pool as a standard stratum miner using the
|
||||
// wallet address and password from PoolConfig.
|
||||
// client := pool.NewStratumClient(poolCfg, listener)
|
||||
//
|
||||
// client := pool.NewStratumClient(poolCfg, listener)
|
||||
// client.Connect()
|
||||
// if result := client.Connect(); result.OK {
|
||||
// client.Login()
|
||||
// }
|
||||
type StratumClient struct {
|
||||
cfg proxy.PoolConfig
|
||||
config proxy.PoolConfig
|
||||
listener StratumListener
|
||||
conn net.Conn
|
||||
tlsConn *tls.Conn // nil if plain TCP
|
||||
|
|
@ -32,7 +33,9 @@ type StratumClient struct {
|
|||
sendMu sync.Mutex
|
||||
}
|
||||
|
||||
// StratumListener receives events from the pool connection.
|
||||
// type listener struct{}
|
||||
//
|
||||
// func (listener) OnJob(job proxy.Job) {}
|
||||
type StratumListener interface {
|
||||
// OnJob is called when the pool pushes a new job notification or the login reply contains a job.
|
||||
OnJob(job proxy.Job)
|
||||
|
|
|
|||
197
pool/impl.go
197
pool/impl.go
|
|
@ -6,7 +6,6 @@ import (
|
|||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
|
|
@ -18,16 +17,27 @@ import (
|
|||
)
|
||||
|
||||
// NewStrategyFactory creates a StrategyFactory for the supplied config.
|
||||
func NewStrategyFactory(cfg *proxy.Config) StrategyFactory {
|
||||
//
|
||||
// factory := pool.NewStrategyFactory(&proxy.Config{Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}}})
|
||||
// strategy := factory(listener)
|
||||
func NewStrategyFactory(config *proxy.Config) StrategyFactory {
|
||||
return func(listener StratumListener) Strategy {
|
||||
return NewFailoverStrategy(cfg.Pools, listener, cfg)
|
||||
var pools []proxy.PoolConfig
|
||||
if config != nil {
|
||||
pools = config.Pools
|
||||
}
|
||||
return NewFailoverStrategy(pools, listener, config)
|
||||
}
|
||||
}
|
||||
|
||||
// NewStratumClient constructs a pool client.
|
||||
func NewStratumClient(cfg proxy.PoolConfig, listener StratumListener) *StratumClient {
|
||||
// client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.example:3333", User: "WALLET", Pass: "x"}, listener)
|
||||
//
|
||||
// if result := client.Connect(); result.OK {
|
||||
// client.Login()
|
||||
// }
|
||||
func NewStratumClient(poolConfig proxy.PoolConfig, listener StratumListener) *StratumClient {
|
||||
return &StratumClient{
|
||||
cfg: cfg,
|
||||
config: poolConfig,
|
||||
listener: listener,
|
||||
pending: make(map[int64]struct{}),
|
||||
}
|
||||
|
|
@ -43,20 +53,20 @@ func (c *StratumClient) IsActive() bool {
|
|||
return c.active
|
||||
}
|
||||
|
||||
// Connect dials the pool.
|
||||
// result := client.Connect()
|
||||
func (c *StratumClient) Connect() proxy.Result {
|
||||
if c == nil {
|
||||
return proxy.Result{OK: false, Error: errors.New("client is nil")}
|
||||
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.client", "client is nil", nil)}
|
||||
}
|
||||
addr := c.cfg.URL
|
||||
addr := c.config.URL
|
||||
if addr == "" {
|
||||
return proxy.Result{OK: false, Error: errors.New("pool url is empty")}
|
||||
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.client", "pool url is empty", nil)}
|
||||
}
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
return proxy.Result{OK: false, Error: err}
|
||||
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.client", "dial pool failed", err)}
|
||||
}
|
||||
if c.cfg.TLS {
|
||||
if c.config.TLS {
|
||||
host := addr
|
||||
if strings.Contains(addr, ":") {
|
||||
host, _, _ = net.SplitHostPort(addr)
|
||||
|
|
@ -65,18 +75,18 @@ func (c *StratumClient) Connect() proxy.Result {
|
|||
tlsConn := tls.Client(conn, tlsCfg)
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
_ = conn.Close()
|
||||
return proxy.Result{OK: false, Error: err}
|
||||
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.tls", "handshake failed", err)}
|
||||
}
|
||||
if fp := strings.TrimSpace(strings.ToLower(c.cfg.TLSFingerprint)); fp != "" {
|
||||
if fp := strings.TrimSpace(strings.ToLower(c.config.TLSFingerprint)); fp != "" {
|
||||
cert := tlsConn.ConnectionState().PeerCertificates
|
||||
if len(cert) == 0 {
|
||||
_ = tlsConn.Close()
|
||||
return proxy.Result{OK: false, Error: errors.New("missing certificate")}
|
||||
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.tls", "missing certificate", nil)}
|
||||
}
|
||||
sum := sha256.Sum256(cert[0].Raw)
|
||||
if hex.EncodeToString(sum[:]) != fp {
|
||||
_ = tlsConn.Close()
|
||||
return proxy.Result{OK: false, Error: errors.New("tls fingerprint mismatch")}
|
||||
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.tls", "tls fingerprint mismatch", nil)}
|
||||
}
|
||||
}
|
||||
c.conn = tlsConn
|
||||
|
|
@ -88,20 +98,22 @@ func (c *StratumClient) Connect() proxy.Result {
|
|||
return proxy.Result{OK: true}
|
||||
}
|
||||
|
||||
// Login sends the miner-style login request to the pool.
|
||||
// client.Login()
|
||||
//
|
||||
// A login reply with a job triggers `OnJob` immediately.
|
||||
func (c *StratumClient) Login() {
|
||||
if c == nil || c.conn == nil {
|
||||
return
|
||||
}
|
||||
params := map[string]any{
|
||||
"login": c.cfg.User,
|
||||
"pass": c.cfg.Pass,
|
||||
"login": c.config.User,
|
||||
"pass": c.config.Pass,
|
||||
}
|
||||
if c.cfg.RigID != "" {
|
||||
params["rigid"] = c.cfg.RigID
|
||||
if c.config.RigID != "" {
|
||||
params["rigid"] = c.config.RigID
|
||||
}
|
||||
if c.cfg.Algo != "" {
|
||||
params["algo"] = []string{c.cfg.Algo}
|
||||
if c.config.Algo != "" {
|
||||
params["algo"] = []string{c.config.Algo}
|
||||
}
|
||||
req := map[string]any{
|
||||
"id": 1,
|
||||
|
|
@ -112,7 +124,7 @@ func (c *StratumClient) Login() {
|
|||
_ = c.writeJSON(req)
|
||||
}
|
||||
|
||||
// Submit forwards a share to the pool.
|
||||
// seq := client.Submit("job-1", "deadbeef", "HASH64HEX", "cn/r")
|
||||
func (c *StratumClient) Submit(jobID, nonce, result, algo string) int64 {
|
||||
if c == nil {
|
||||
return 0
|
||||
|
|
@ -134,18 +146,39 @@ func (c *StratumClient) Submit(jobID, nonce, result, algo string) int64 {
|
|||
"algo": algo,
|
||||
},
|
||||
}
|
||||
_ = c.writeJSON(req)
|
||||
if err := c.writeJSON(req); err != nil {
|
||||
c.mu.Lock()
|
||||
delete(c.pending, seq)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
return seq
|
||||
}
|
||||
|
||||
// Disconnect closes the connection and notifies the listener.
|
||||
// client.Keepalive()
|
||||
func (c *StratumClient) Keepalive() {
|
||||
if c == nil || c.conn == nil || !c.IsActive() {
|
||||
return
|
||||
}
|
||||
req := map[string]any{
|
||||
"id": atomic.AddInt64(&c.seq, 1),
|
||||
"jsonrpc": "2.0",
|
||||
"method": "keepalived",
|
||||
"params": map[string]any{
|
||||
"id": c.sessionID,
|
||||
},
|
||||
}
|
||||
_ = c.writeJSON(req)
|
||||
}
|
||||
|
||||
// client.Disconnect()
|
||||
func (c *StratumClient) Disconnect() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.closedOnce.Do(func() {
|
||||
if c.conn != nil {
|
||||
_ = c.conn.Close()
|
||||
conn := c.resetConnectionState()
|
||||
if conn != nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
if c.listener != nil {
|
||||
c.listener.OnDisconnect()
|
||||
|
|
@ -155,26 +188,43 @@ func (c *StratumClient) Disconnect() {
|
|||
|
||||
func (c *StratumClient) notifyDisconnect() {
|
||||
c.closedOnce.Do(func() {
|
||||
c.resetConnectionState()
|
||||
if c.listener != nil {
|
||||
c.listener.OnDisconnect()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (c *StratumClient) resetConnectionState() net.Conn {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
conn := c.conn
|
||||
c.conn = nil
|
||||
c.tlsConn = nil
|
||||
c.sessionID = ""
|
||||
c.active = false
|
||||
c.pending = make(map[int64]struct{})
|
||||
return conn
|
||||
}
|
||||
|
||||
func (c *StratumClient) writeJSON(payload any) error {
|
||||
c.sendMu.Lock()
|
||||
defer c.sendMu.Unlock()
|
||||
if c.conn == nil {
|
||||
return errors.New("connection is nil")
|
||||
return proxy.NewScopedError("proxy.pool.client", "connection is nil", nil)
|
||||
}
|
||||
data, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
return proxy.NewScopedError("proxy.pool.client", "marshal request failed", err)
|
||||
}
|
||||
data = append(data, '\n')
|
||||
_, err = c.conn.Write(data)
|
||||
if err != nil {
|
||||
c.notifyDisconnect()
|
||||
return proxy.NewScopedError("proxy.pool.client", "write request failed", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
@ -251,6 +301,11 @@ func (c *StratumClient) handleMessage(line []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
if len(base.Error) > 0 && requestID(base.ID) == 1 {
|
||||
c.notifyDisconnect()
|
||||
return
|
||||
}
|
||||
|
||||
if base.Method == "job" {
|
||||
var params struct {
|
||||
Blob string `json:"blob"`
|
||||
|
|
@ -319,37 +374,38 @@ func (c *StratumClient) handleMessage(line []byte) {
|
|||
}
|
||||
|
||||
// NewFailoverStrategy creates the ordered pool failover wrapper.
|
||||
func NewFailoverStrategy(pools []proxy.PoolConfig, listener StratumListener, cfg *proxy.Config) *FailoverStrategy {
|
||||
func NewFailoverStrategy(pools []proxy.PoolConfig, listener StratumListener, config *proxy.Config) *FailoverStrategy {
|
||||
return &FailoverStrategy{
|
||||
pools: pools,
|
||||
listener: listener,
|
||||
cfg: cfg,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Connect establishes the first reachable pool connection.
|
||||
// strategy.Connect()
|
||||
func (s *FailoverStrategy) Connect() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.closing = false
|
||||
s.connectLocked(0)
|
||||
}
|
||||
|
||||
func (s *FailoverStrategy) connectLocked(start int) {
|
||||
enabled := enabledPools(s.pools)
|
||||
enabled := enabledPools(s.currentPools())
|
||||
if len(enabled) == 0 {
|
||||
return
|
||||
}
|
||||
retries := 1
|
||||
retryPause := time.Second
|
||||
if s.cfg != nil {
|
||||
if s.cfg.Retries > 0 {
|
||||
retries = s.cfg.Retries
|
||||
if s.config != nil {
|
||||
if s.config.Retries > 0 {
|
||||
retries = s.config.Retries
|
||||
}
|
||||
if s.cfg.RetryPause > 0 {
|
||||
retryPause = time.Duration(s.cfg.RetryPause) * time.Second
|
||||
if s.config.RetryPause > 0 {
|
||||
retryPause = time.Duration(s.config.RetryPause) * time.Second
|
||||
}
|
||||
}
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
|
|
@ -368,7 +424,17 @@ func (s *FailoverStrategy) connectLocked(start int) {
|
|||
}
|
||||
}
|
||||
|
||||
// Submit sends the share through the active client.
|
||||
func (s *FailoverStrategy) currentPools() []proxy.PoolConfig {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
if s.config != nil && len(s.config.Pools) > 0 {
|
||||
return s.config.Pools
|
||||
}
|
||||
return s.pools
|
||||
}
|
||||
|
||||
// seq := strategy.Submit(jobID, nonce, result, algo)
|
||||
func (s *FailoverStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||
if s == nil || s.client == nil {
|
||||
return 0
|
||||
|
|
@ -376,24 +442,51 @@ func (s *FailoverStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
|||
return s.client.Submit(jobID, nonce, result, algo)
|
||||
}
|
||||
|
||||
// Disconnect closes the active client.
|
||||
// strategy.Disconnect()
|
||||
func (s *FailoverStrategy) Disconnect() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.client != nil {
|
||||
s.client.Disconnect()
|
||||
s.client = nil
|
||||
client := s.client
|
||||
s.closing = true
|
||||
s.client = nil
|
||||
s.mu.Unlock()
|
||||
if client != nil {
|
||||
client.Disconnect()
|
||||
}
|
||||
}
|
||||
|
||||
// IsActive reports whether the current client has received a job.
|
||||
// strategy.ReloadPools()
|
||||
func (s *FailoverStrategy) ReloadPools() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.current = 0
|
||||
s.mu.Unlock()
|
||||
s.Disconnect()
|
||||
s.Connect()
|
||||
}
|
||||
|
||||
// active := strategy.IsActive()
|
||||
func (s *FailoverStrategy) IsActive() bool {
|
||||
return s != nil && s.client != nil && s.client.IsActive()
|
||||
}
|
||||
|
||||
// Tick keeps an active pool connection alive when configured.
|
||||
func (s *FailoverStrategy) Tick(ticks uint64) {
|
||||
if s == nil || ticks == 0 || ticks%60 != 0 {
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
client := s.client
|
||||
s.mu.Unlock()
|
||||
if client != nil && client.config.Keepalive {
|
||||
client.Keepalive()
|
||||
}
|
||||
}
|
||||
|
||||
// OnJob forwards the pool job to the outer listener.
|
||||
func (s *FailoverStrategy) OnJob(job proxy.Job) {
|
||||
if s != nil && s.listener != nil {
|
||||
|
|
@ -408,11 +501,21 @@ func (s *FailoverStrategy) OnResultAccepted(sequence int64, accepted bool, error
|
|||
}
|
||||
}
|
||||
|
||||
// OnDisconnect retries from the primary pool and forwards the disconnect.
|
||||
// strategy.OnDisconnect()
|
||||
func (s *FailoverStrategy) OnDisconnect() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.client = nil
|
||||
closing := s.closing
|
||||
if closing {
|
||||
s.closing = false
|
||||
}
|
||||
s.mu.Unlock()
|
||||
if closing {
|
||||
return
|
||||
}
|
||||
if s.listener != nil {
|
||||
s.listener.OnDisconnect()
|
||||
}
|
||||
|
|
|
|||
168
pool/impl_test.go
Normal file
168
pool/impl_test.go
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
// TestFailoverStrategy_CurrentPools_Good verifies that currentPools follows the live config.
|
||||
//
|
||||
// strategy := pool.NewFailoverStrategy(cfg.Pools, nil, cfg)
|
||||
// strategy.currentPools() // returns cfg.Pools
|
||||
func TestFailoverStrategy_CurrentPools_Good(t *testing.T) {
|
||||
cfg := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||
}
|
||||
strategy := NewFailoverStrategy(cfg.Pools, nil, cfg)
|
||||
|
||||
if got := len(strategy.currentPools()); got != 1 {
|
||||
t.Fatalf("expected 1 pool, got %d", got)
|
||||
}
|
||||
|
||||
cfg.Pools = []proxy.PoolConfig{{URL: "pool-b.example:4444", Enabled: true}}
|
||||
|
||||
if got := strategy.currentPools(); len(got) != 1 || got[0].URL != "pool-b.example:4444" {
|
||||
t.Fatalf("expected current pools to follow config reload, got %+v", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_CurrentPools_Bad verifies that a nil strategy returns an empty pool list.
|
||||
//
|
||||
// var strategy *pool.FailoverStrategy
|
||||
// strategy.currentPools() // nil
|
||||
func TestFailoverStrategy_CurrentPools_Bad(t *testing.T) {
|
||||
var strategy *FailoverStrategy
|
||||
pools := strategy.currentPools()
|
||||
if pools != nil {
|
||||
t.Fatalf("expected nil pools from nil strategy, got %+v", pools)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_CurrentPools_Ugly verifies that a strategy with a nil config
|
||||
// falls back to the pools passed at construction time.
|
||||
//
|
||||
// strategy := pool.NewFailoverStrategy(initialPools, nil, nil)
|
||||
// strategy.currentPools() // returns initialPools
|
||||
func TestFailoverStrategy_CurrentPools_Ugly(t *testing.T) {
|
||||
initialPools := []proxy.PoolConfig{
|
||||
{URL: "fallback.example:3333", Enabled: true},
|
||||
{URL: "fallback.example:4444", Enabled: false},
|
||||
}
|
||||
strategy := NewFailoverStrategy(initialPools, nil, nil)
|
||||
|
||||
got := strategy.currentPools()
|
||||
if len(got) != 2 {
|
||||
t.Fatalf("expected 2 pools from constructor fallback, got %d", len(got))
|
||||
}
|
||||
if got[0].URL != "fallback.example:3333" {
|
||||
t.Fatalf("expected constructor pool URL, got %q", got[0].URL)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_EnabledPools_Good verifies that only enabled pools are selected.
|
||||
//
|
||||
// enabled := pool.enabledPools(pools) // filters to enabled-only
|
||||
func TestFailoverStrategy_EnabledPools_Good(t *testing.T) {
|
||||
pools := []proxy.PoolConfig{
|
||||
{URL: "active.example:3333", Enabled: true},
|
||||
{URL: "disabled.example:3333", Enabled: false},
|
||||
{URL: "active2.example:3333", Enabled: true},
|
||||
}
|
||||
got := enabledPools(pools)
|
||||
if len(got) != 2 {
|
||||
t.Fatalf("expected 2 enabled pools, got %d", len(got))
|
||||
}
|
||||
if got[0].URL != "active.example:3333" || got[1].URL != "active2.example:3333" {
|
||||
t.Fatalf("expected only enabled pool URLs, got %+v", got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_EnabledPools_Bad verifies that an empty pool list returns empty.
|
||||
//
|
||||
// pool.enabledPools(nil) // empty
|
||||
func TestFailoverStrategy_EnabledPools_Bad(t *testing.T) {
|
||||
got := enabledPools(nil)
|
||||
if len(got) != 0 {
|
||||
t.Fatalf("expected 0 pools from nil input, got %d", len(got))
|
||||
}
|
||||
}
|
||||
|
||||
// TestFailoverStrategy_EnabledPools_Ugly verifies that all-disabled pools return empty.
|
||||
//
|
||||
// pool.enabledPools([]proxy.PoolConfig{{Enabled: false}}) // empty
|
||||
func TestFailoverStrategy_EnabledPools_Ugly(t *testing.T) {
|
||||
pools := []proxy.PoolConfig{
|
||||
{URL: "a.example:3333", Enabled: false},
|
||||
{URL: "b.example:3333", Enabled: false},
|
||||
}
|
||||
got := enabledPools(pools)
|
||||
if len(got) != 0 {
|
||||
t.Fatalf("expected 0 enabled pools when all disabled, got %d", len(got))
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewStrategyFactory_Good verifies the factory creates a strategy connected to the config.
|
||||
//
|
||||
// factory := pool.NewStrategyFactory(cfg)
|
||||
// strategy := factory(listener) // creates FailoverStrategy
|
||||
func TestNewStrategyFactory_Good(t *testing.T) {
|
||||
cfg := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
factory := NewStrategyFactory(cfg)
|
||||
if factory == nil {
|
||||
t.Fatal("expected a non-nil factory")
|
||||
}
|
||||
strategy := factory(nil)
|
||||
if strategy == nil {
|
||||
t.Fatal("expected a non-nil strategy from factory")
|
||||
}
|
||||
if strategy.IsActive() {
|
||||
t.Fatal("expected new strategy to be inactive before connecting")
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewStrategyFactory_Bad verifies a factory created with nil config does not panic.
|
||||
//
|
||||
// factory := pool.NewStrategyFactory(nil)
|
||||
// strategy := factory(nil)
|
||||
func TestNewStrategyFactory_Bad(t *testing.T) {
|
||||
factory := NewStrategyFactory(nil)
|
||||
strategy := factory(nil)
|
||||
if strategy == nil {
|
||||
t.Fatal("expected a non-nil strategy even from nil config")
|
||||
}
|
||||
}
|
||||
|
||||
// TestNewStrategyFactory_Ugly verifies the factory forwards the correct pool list to the strategy.
|
||||
//
|
||||
// cfg.Pools = append(cfg.Pools, proxy.PoolConfig{URL: "added.example:3333", Enabled: true})
|
||||
// strategy := factory(nil)
|
||||
// // strategy sees the updated pools via the shared config pointer
|
||||
func TestNewStrategyFactory_Ugly(t *testing.T) {
|
||||
cfg := &proxy.Config{
|
||||
Mode: "nicehash",
|
||||
Workers: proxy.WorkersByRigID,
|
||||
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
factory := NewStrategyFactory(cfg)
|
||||
cfg.Pools = append(cfg.Pools, proxy.PoolConfig{URL: "added.example:3333", Enabled: true})
|
||||
|
||||
strategy := factory(nil)
|
||||
fs, ok := strategy.(*FailoverStrategy)
|
||||
if !ok {
|
||||
t.Fatal("expected FailoverStrategy")
|
||||
}
|
||||
pools := fs.currentPools()
|
||||
if len(pools) != 2 {
|
||||
t.Fatalf("expected 2 pools after config update, got %d", len(pools))
|
||||
}
|
||||
}
|
||||
112
pool/keepalive_test.go
Normal file
112
pool/keepalive_test.go
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestStratumClient_Keepalive_Good(t *testing.T) {
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer serverConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
client := &StratumClient{
|
||||
conn: clientConn,
|
||||
active: true,
|
||||
sessionID: "session-1",
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
client.Keepalive()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
line, err := bufio.NewReader(serverConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read keepalive request: %v", err)
|
||||
}
|
||||
<-done
|
||||
|
||||
var payload map[string]any
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal keepalive request: %v", err)
|
||||
}
|
||||
if got := payload["method"]; got != "keepalived" {
|
||||
t.Fatalf("expected keepalived method, got %#v", got)
|
||||
}
|
||||
params, ok := payload["params"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected params object, got %#v", payload["params"])
|
||||
}
|
||||
if got := params["id"]; got != "session-1" {
|
||||
t.Fatalf("expected session id in keepalive payload, got %#v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStratumClient_Keepalive_Bad(t *testing.T) {
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer serverConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
client := &StratumClient{
|
||||
conn: clientConn,
|
||||
active: false,
|
||||
}
|
||||
|
||||
client.Keepalive()
|
||||
|
||||
if err := serverConn.SetReadDeadline(time.Now().Add(50 * time.Millisecond)); err != nil {
|
||||
t.Fatalf("set deadline: %v", err)
|
||||
}
|
||||
buf := make([]byte, 1)
|
||||
if _, err := serverConn.Read(buf); err == nil {
|
||||
t.Fatalf("expected no keepalive data while inactive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStratumClient_Keepalive_Ugly(t *testing.T) {
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer serverConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
client := &StratumClient{
|
||||
conn: clientConn,
|
||||
active: true,
|
||||
sessionID: "session-2",
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(serverConn)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
client.Keepalive()
|
||||
client.Keepalive()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
first, err := reader.ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read first keepalive request: %v", err)
|
||||
}
|
||||
second, err := reader.ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read second keepalive request: %v", err)
|
||||
}
|
||||
<-done
|
||||
|
||||
var firstPayload map[string]any
|
||||
if err := json.Unmarshal(first, &firstPayload); err != nil {
|
||||
t.Fatalf("unmarshal first keepalive request: %v", err)
|
||||
}
|
||||
var secondPayload map[string]any
|
||||
if err := json.Unmarshal(second, &secondPayload); err != nil {
|
||||
t.Fatalf("unmarshal second keepalive request: %v", err)
|
||||
}
|
||||
|
||||
if firstPayload["id"] == secondPayload["id"] {
|
||||
t.Fatalf("expected keepalive request ids to be unique")
|
||||
}
|
||||
}
|
||||
|
|
@ -7,31 +7,39 @@ import (
|
|||
)
|
||||
|
||||
// FailoverStrategy wraps an ordered slice of PoolConfig entries.
|
||||
// It connects to the first enabled pool and fails over in order on error.
|
||||
// On reconnect it always retries from the primary first.
|
||||
//
|
||||
// strategy := pool.NewFailoverStrategy(cfg.Pools, listener, cfg)
|
||||
// strategy := pool.NewFailoverStrategy([]proxy.PoolConfig{
|
||||
// {URL: "primary.example:3333", Enabled: true},
|
||||
// {URL: "backup.example:3333", Enabled: true},
|
||||
// }, listener, cfg)
|
||||
// strategy.Connect()
|
||||
type FailoverStrategy struct {
|
||||
pools []proxy.PoolConfig
|
||||
current int
|
||||
client *StratumClient
|
||||
listener StratumListener
|
||||
cfg *proxy.Config
|
||||
config *proxy.Config
|
||||
closing bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// StrategyFactory creates a new FailoverStrategy for a given StratumListener.
|
||||
// Used by splitters to create per-mapper strategies without coupling to Config.
|
||||
// StrategyFactory creates a FailoverStrategy for a given StratumListener.
|
||||
//
|
||||
// factory := pool.NewStrategyFactory(cfg)
|
||||
// strategy := factory(listener) // each mapper calls this
|
||||
// strategy := factory(listener)
|
||||
type StrategyFactory func(listener StratumListener) Strategy
|
||||
|
||||
// Strategy is the interface the splitters use to submit shares and check pool state.
|
||||
// Strategy is the interface splitters use to submit shares and inspect pool state.
|
||||
type Strategy interface {
|
||||
Connect()
|
||||
Submit(jobID, nonce, result, algo string) int64
|
||||
Disconnect()
|
||||
IsActive() bool
|
||||
}
|
||||
|
||||
// ReloadableStrategy re-establishes an upstream connection after config changes.
|
||||
//
|
||||
// strategy.ReloadPools()
|
||||
type ReloadableStrategy interface {
|
||||
ReloadPools()
|
||||
}
|
||||
|
|
|
|||
148
pool/strategy_disconnect_test.go
Normal file
148
pool/strategy_disconnect_test.go
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
type disconnectSpy struct {
|
||||
disconnects atomic.Int64
|
||||
}
|
||||
|
||||
func (s *disconnectSpy) OnJob(proxy.Job) {}
|
||||
|
||||
func (s *disconnectSpy) OnResultAccepted(int64, bool, string) {}
|
||||
|
||||
func (s *disconnectSpy) OnDisconnect() {
|
||||
s.disconnects.Add(1)
|
||||
}
|
||||
|
||||
func TestFailoverStrategy_Disconnect_Good(t *testing.T) {
|
||||
spy := &disconnectSpy{}
|
||||
strategy := &FailoverStrategy{
|
||||
listener: spy,
|
||||
client: &StratumClient{listener: nil},
|
||||
}
|
||||
strategy.client.listener = strategy
|
||||
|
||||
strategy.Disconnect()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
if got := spy.disconnects.Load(); got != 0 {
|
||||
t.Fatalf("expected intentional disconnect to suppress reconnect, got %d listener calls", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailoverStrategy_Disconnect_Bad(t *testing.T) {
|
||||
spy := &disconnectSpy{}
|
||||
strategy := &FailoverStrategy{listener: spy}
|
||||
|
||||
strategy.OnDisconnect()
|
||||
|
||||
if got := spy.disconnects.Load(); got != 1 {
|
||||
t.Fatalf("expected external disconnect to notify listener once, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailoverStrategy_Disconnect_Ugly(t *testing.T) {
|
||||
spy := &disconnectSpy{}
|
||||
strategy := &FailoverStrategy{
|
||||
listener: spy,
|
||||
client: &StratumClient{listener: nil},
|
||||
}
|
||||
strategy.client.listener = strategy
|
||||
|
||||
strategy.Disconnect()
|
||||
strategy.Disconnect()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
if got := spy.disconnects.Load(); got != 0 {
|
||||
t.Fatalf("expected repeated intentional disconnects to remain silent, got %d listener calls", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStratumClient_NotifyDisconnect_ClearsState_Good(t *testing.T) {
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer serverConn.Close()
|
||||
|
||||
spy := &disconnectSpy{}
|
||||
client := &StratumClient{
|
||||
conn: clientConn,
|
||||
listener: spy,
|
||||
sessionID: "session-1",
|
||||
active: true,
|
||||
pending: map[int64]struct{}{
|
||||
7: {},
|
||||
},
|
||||
}
|
||||
|
||||
client.notifyDisconnect()
|
||||
|
||||
if got := spy.disconnects.Load(); got != 1 {
|
||||
t.Fatalf("expected one disconnect notification, got %d", got)
|
||||
}
|
||||
if client.conn != nil {
|
||||
t.Fatalf("expected pooled connection to be cleared")
|
||||
}
|
||||
if client.sessionID != "" {
|
||||
t.Fatalf("expected session id to be cleared, got %q", client.sessionID)
|
||||
}
|
||||
if client.IsActive() {
|
||||
t.Fatalf("expected client to stop reporting active after disconnect")
|
||||
}
|
||||
if len(client.pending) != 0 {
|
||||
t.Fatalf("expected pending submit state to be cleared, got %d entries", len(client.pending))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailoverStrategy_OnDisconnect_ClearsClient_Bad(t *testing.T) {
|
||||
spy := &disconnectSpy{}
|
||||
strategy := &FailoverStrategy{
|
||||
listener: spy,
|
||||
client: &StratumClient{active: true, pending: make(map[int64]struct{})},
|
||||
}
|
||||
|
||||
strategy.OnDisconnect()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
if strategy.client != nil {
|
||||
t.Fatalf("expected strategy to drop the stale client before reconnect")
|
||||
}
|
||||
if strategy.IsActive() {
|
||||
t.Fatalf("expected strategy to report inactive while reconnect is pending")
|
||||
}
|
||||
if got := spy.disconnects.Load(); got != 1 {
|
||||
t.Fatalf("expected one disconnect notification, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStratumClient_HandleMessage_LoginErrorDisconnects_Ugly(t *testing.T) {
|
||||
spy := &disconnectSpy{}
|
||||
client := &StratumClient{
|
||||
listener: spy,
|
||||
pending: make(map[int64]struct{}),
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(map[string]any{
|
||||
"id": 1,
|
||||
"jsonrpc": "2.0",
|
||||
"error": map[string]any{
|
||||
"code": -1,
|
||||
"message": "Invalid payment address provided",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("marshal login error payload: %v", err)
|
||||
}
|
||||
|
||||
client.handleMessage(payload)
|
||||
|
||||
if got := spy.disconnects.Load(); got != 1 {
|
||||
t.Fatalf("expected login failure to disconnect upstream once, got %d", got)
|
||||
}
|
||||
}
|
||||
145
proxy.go
145
proxy.go
|
|
@ -1,45 +1,59 @@
|
|||
// Package proxy is a CryptoNote stratum mining proxy library.
|
||||
//
|
||||
// It accepts miner connections over TCP (optionally TLS), splits the 32-bit nonce
|
||||
// space across up to 256 simultaneous miners per upstream pool connection (NiceHash
|
||||
// mode), and presents a small monitoring API.
|
||||
//
|
||||
// Full specification: docs/RFC.md
|
||||
// Package proxy is the mining proxy library.
|
||||
//
|
||||
// cfg := &proxy.Config{Mode: "nicehash", Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}}, Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}}, Workers: proxy.WorkersByRigID}
|
||||
// p, result := proxy.New(cfg)
|
||||
// if result.OK { p.Start() }
|
||||
// if result.OK {
|
||||
// p.Start()
|
||||
// }
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Proxy is the top-level orchestrator. It owns the server, splitter, stats, workers,
|
||||
// event bus, tick goroutine, and optional HTTP API.
|
||||
// Proxy wires the configured listeners, splitters, stats, workers, and log sinks.
|
||||
//
|
||||
// cfg := &proxy.Config{
|
||||
// Mode: "nicehash",
|
||||
// Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||
// Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
// Workers: proxy.WorkersByRigID,
|
||||
// }
|
||||
// p, result := proxy.New(cfg)
|
||||
// if result.OK { p.Start() }
|
||||
// if result.OK {
|
||||
// p.Start()
|
||||
// }
|
||||
type Proxy struct {
|
||||
config *Config
|
||||
splitter Splitter
|
||||
stats *Stats
|
||||
workers *Workers
|
||||
events *EventBus
|
||||
servers []*Server
|
||||
ticker *time.Ticker
|
||||
watcher *ConfigWatcher
|
||||
done chan struct{}
|
||||
stopOnce sync.Once
|
||||
minersMu sync.RWMutex
|
||||
miners map[int64]*Miner
|
||||
customDiff *CustomDiff
|
||||
rateLimit *RateLimiter
|
||||
httpServer *http.Server
|
||||
config *Config
|
||||
configMu sync.RWMutex
|
||||
splitter Splitter
|
||||
shareSink ShareSink
|
||||
stats *Stats
|
||||
workers *Workers
|
||||
events *EventBus
|
||||
servers []*Server
|
||||
ticker *time.Ticker
|
||||
watcher *ConfigWatcher
|
||||
done chan struct{}
|
||||
stopOnce sync.Once
|
||||
minersMu sync.RWMutex
|
||||
miners map[int64]*Miner
|
||||
customDiff *CustomDiff
|
||||
customDiffBuckets *CustomDiffBuckets
|
||||
rateLimit *RateLimiter
|
||||
httpServer *http.Server
|
||||
accessLog *accessLogSink
|
||||
shareLog *shareLogSink
|
||||
submitCount atomic.Int64
|
||||
}
|
||||
|
||||
// Splitter is the interface both NonceSplitter and SimpleSplitter satisfy.
|
||||
// Splitter routes miner logins, submits, and disconnects to the active upstream strategy.
|
||||
//
|
||||
// splitter := nicehash.NewNonceSplitter(cfg, bus, pool.NewStrategyFactory(cfg))
|
||||
// splitter.Connect()
|
||||
type Splitter interface {
|
||||
// Connect establishes the first pool upstream connection.
|
||||
Connect()
|
||||
|
|
@ -57,7 +71,18 @@ type Splitter interface {
|
|||
Upstreams() UpstreamStats
|
||||
}
|
||||
|
||||
// UpstreamStats carries pool connection state counts for monitoring.
|
||||
// ShareSink consumes share outcomes from the proxy event stream.
|
||||
//
|
||||
// sink.OnAccept(proxy.Event{Miner: miner, Diff: 100000})
|
||||
// sink.OnReject(proxy.Event{Miner: miner, Error: "Invalid nonce"})
|
||||
type ShareSink interface {
|
||||
OnAccept(Event)
|
||||
OnReject(Event)
|
||||
}
|
||||
|
||||
// UpstreamStats reports pool connection counts.
|
||||
//
|
||||
// stats := proxy.UpstreamStats{Active: 1, Sleep: 0, Error: 0, Total: 1}
|
||||
type UpstreamStats struct {
|
||||
Active uint64 // connections currently receiving jobs
|
||||
Sleep uint64 // idle connections (simple mode reuse pool)
|
||||
|
|
@ -65,12 +90,16 @@ type UpstreamStats struct {
|
|||
Total uint64 // Active + Sleep + Error
|
||||
}
|
||||
|
||||
// LoginEvent is dispatched when a miner completes the login handshake.
|
||||
// LoginEvent is dispatched when a miner completes login.
|
||||
//
|
||||
// event := proxy.LoginEvent{Miner: miner}
|
||||
type LoginEvent struct {
|
||||
Miner *Miner
|
||||
}
|
||||
|
||||
// SubmitEvent is dispatched when a miner submits a share.
|
||||
// SubmitEvent carries one miner share submission.
|
||||
//
|
||||
// event := proxy.SubmitEvent{Miner: miner, JobID: "job-1", Nonce: "deadbeef", Result: "HASH", RequestID: 2}
|
||||
type SubmitEvent struct {
|
||||
Miner *Miner
|
||||
JobID string
|
||||
|
|
@ -80,50 +109,56 @@ type SubmitEvent struct {
|
|||
RequestID int64
|
||||
}
|
||||
|
||||
// CloseEvent is dispatched when a miner TCP connection closes.
|
||||
// CloseEvent is dispatched when a miner connection closes.
|
||||
//
|
||||
// event := proxy.CloseEvent{Miner: miner}
|
||||
type CloseEvent struct {
|
||||
Miner *Miner
|
||||
}
|
||||
|
||||
// ConfigWatcher polls a config file for mtime changes and calls onChange on modification.
|
||||
// Uses 1-second polling; does not require fsnotify.
|
||||
// ConfigWatcher polls a config file every second and reloads on modification.
|
||||
//
|
||||
// w := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
|
||||
// watcher := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
|
||||
// p.Reload(cfg)
|
||||
// })
|
||||
// w.Start()
|
||||
// watcher.Start()
|
||||
type ConfigWatcher struct {
|
||||
path string
|
||||
onChange func(*Config)
|
||||
lastMod time.Time
|
||||
done chan struct{}
|
||||
configPath string
|
||||
onConfigChange func(*Config)
|
||||
lastModifiedAt time.Time
|
||||
stopCh chan struct{}
|
||||
mu sync.Mutex
|
||||
started bool
|
||||
}
|
||||
|
||||
// RateLimiter implements per-IP token bucket connection rate limiting.
|
||||
// Each unique IP has a bucket initialised to MaxConnectionsPerMinute tokens.
|
||||
// Each connection attempt consumes one token. Tokens refill at 1 per (60/max) seconds.
|
||||
// An IP that empties its bucket is added to a ban list for BanDurationSeconds.
|
||||
// RateLimiter throttles new connections per source IP.
|
||||
//
|
||||
// rl := proxy.NewRateLimiter(cfg.RateLimit)
|
||||
// if !rl.Allow("1.2.3.4") { conn.Close(); return }
|
||||
// limiter := proxy.NewRateLimiter(proxy.RateLimit{
|
||||
// MaxConnectionsPerMinute: 30,
|
||||
// BanDurationSeconds: 300,
|
||||
// })
|
||||
// if limiter.Allow("1.2.3.4:3333") {
|
||||
// // accept the socket
|
||||
// }
|
||||
type RateLimiter struct {
|
||||
cfg RateLimit
|
||||
buckets map[string]*tokenBucket
|
||||
banned map[string]time.Time
|
||||
mu sync.Mutex
|
||||
limit RateLimit
|
||||
bucketByHost map[string]*tokenBucket
|
||||
banUntilByHost map[string]time.Time
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// tokenBucket is a simple token bucket for one IP.
|
||||
// tokenBucket is the per-IP refillable counter.
|
||||
//
|
||||
// bucket := tokenBucket{tokens: 30, lastRefill: time.Now()}
|
||||
type tokenBucket struct {
|
||||
tokens int
|
||||
lastRefill time.Time
|
||||
}
|
||||
|
||||
// CustomDiff resolves and applies per-miner difficulty overrides at login time.
|
||||
// Resolution order: user-suffix (+N) > Config.CustomDiff > pool difficulty.
|
||||
// CustomDiff applies a login-time difficulty override.
|
||||
//
|
||||
// cd := proxy.NewCustomDiff(cfg.CustomDiff)
|
||||
// bus.Subscribe(proxy.EventLogin, cd.OnLogin)
|
||||
// resolver := proxy.NewCustomDiff(50000)
|
||||
// resolver.Apply(&Miner{user: "WALLET+75000"})
|
||||
type CustomDiff struct {
|
||||
globalDiff uint64
|
||||
globalDiff atomic.Uint64
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,13 +1,115 @@
|
|||
package proxy
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRateLimiter_Allow(t *testing.T) {
|
||||
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1})
|
||||
if !rl.Allow("1.2.3.4:1234") {
|
||||
t.Fatalf("expected first call to pass")
|
||||
}
|
||||
if rl.Allow("1.2.3.4:1234") {
|
||||
t.Fatalf("expected second call to fail")
|
||||
// TestRateLimiter_Allow_Good verifies the first N calls within budget are allowed.
|
||||
//
|
||||
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 10})
|
||||
// limiter.Allow("1.2.3.4:3333") // true (first 10 calls)
|
||||
func TestRateLimiter_Allow_Good(t *testing.T) {
|
||||
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60})
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
if !rl.Allow("1.2.3.4:3333") {
|
||||
t.Fatalf("expected call %d to be allowed", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestRateLimiter_Allow_Bad verifies the 11th call fails when budget is 10/min.
|
||||
//
|
||||
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 10})
|
||||
// // calls 1-10 pass, call 11 fails
|
||||
func TestRateLimiter_Allow_Bad(t *testing.T) {
|
||||
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60})
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
rl.Allow("1.2.3.4:3333")
|
||||
}
|
||||
if rl.Allow("1.2.3.4:3333") {
|
||||
t.Fatalf("expected 11th call to be rejected")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRateLimiter_Allow_Ugly verifies a banned IP stays banned for BanDurationSeconds.
|
||||
//
|
||||
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 300})
|
||||
// limiter.Allow("1.2.3.4:3333") // true (exhausts budget)
|
||||
// limiter.Allow("1.2.3.4:3333") // false (banned for 300 seconds)
|
||||
func TestRateLimiter_Allow_Ugly(t *testing.T) {
|
||||
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 300})
|
||||
|
||||
if !rl.Allow("1.2.3.4:3333") {
|
||||
t.Fatalf("expected first call to pass")
|
||||
}
|
||||
if rl.Allow("1.2.3.4:3333") {
|
||||
t.Fatalf("expected second call to fail")
|
||||
}
|
||||
|
||||
// Verify the IP is still banned even with a fresh bucket
|
||||
rl.mu.Lock()
|
||||
rl.bucketByHost["1.2.3.4"] = &tokenBucket{tokens: 100, lastRefill: time.Now()}
|
||||
rl.mu.Unlock()
|
||||
if rl.Allow("1.2.3.4:3333") {
|
||||
t.Fatalf("expected banned IP to remain banned regardless of fresh bucket")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRateLimiter_Tick_Good verifies Tick removes expired bans.
|
||||
//
|
||||
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1})
|
||||
// limiter.Tick()
|
||||
func TestRateLimiter_Tick_Good(t *testing.T) {
|
||||
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1})
|
||||
|
||||
rl.Allow("1.2.3.4:3333")
|
||||
rl.Allow("1.2.3.4:3333") // triggers ban
|
||||
|
||||
// Simulate expired ban
|
||||
rl.mu.Lock()
|
||||
rl.banUntilByHost["1.2.3.4"] = time.Now().Add(-time.Second)
|
||||
rl.mu.Unlock()
|
||||
|
||||
rl.Tick()
|
||||
|
||||
rl.mu.Lock()
|
||||
_, banned := rl.banUntilByHost["1.2.3.4"]
|
||||
rl.mu.Unlock()
|
||||
if banned {
|
||||
t.Fatalf("expected expired ban to be removed by Tick")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRateLimiter_Allow_ReplenishesHighLimits verifies token replenishment at high rates.
|
||||
//
|
||||
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 120})
|
||||
func TestRateLimiter_Allow_ReplenishesHighLimits(t *testing.T) {
|
||||
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 120, BanDurationSeconds: 1})
|
||||
rl.mu.Lock()
|
||||
rl.bucketByHost["1.2.3.4"] = &tokenBucket{
|
||||
tokens: 0,
|
||||
lastRefill: time.Now().Add(-30 * time.Second),
|
||||
}
|
||||
rl.mu.Unlock()
|
||||
|
||||
if !rl.Allow("1.2.3.4:1234") {
|
||||
t.Fatalf("expected bucket to replenish at 120/min")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRateLimiter_Disabled_Good verifies a zero-budget limiter allows all connections.
|
||||
//
|
||||
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 0})
|
||||
// limiter.Allow("any-ip") // always true
|
||||
func TestRateLimiter_Disabled_Good(t *testing.T) {
|
||||
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 0})
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if !rl.Allow("1.2.3.4:3333") {
|
||||
t.Fatalf("expected disabled limiter to allow all connections")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
404
reload_test.go
Normal file
404
reload_test.go
Normal file
|
|
@ -0,0 +1,404 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type reloadableSplitter struct {
|
||||
reloads int
|
||||
}
|
||||
|
||||
func (s *reloadableSplitter) Connect() {}
|
||||
func (s *reloadableSplitter) OnLogin(event *LoginEvent) {}
|
||||
func (s *reloadableSplitter) OnSubmit(event *SubmitEvent) {}
|
||||
func (s *reloadableSplitter) OnClose(event *CloseEvent) {}
|
||||
func (s *reloadableSplitter) Tick(ticks uint64) {}
|
||||
func (s *reloadableSplitter) GC() {}
|
||||
func (s *reloadableSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
|
||||
func (s *reloadableSplitter) ReloadPools() { s.reloads++ }
|
||||
|
||||
func TestProxy_Reload_Good(t *testing.T) {
|
||||
original := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||
}
|
||||
p := &Proxy{
|
||||
config: original,
|
||||
customDiff: NewCustomDiff(1),
|
||||
rateLimit: NewRateLimiter(RateLimit{}),
|
||||
}
|
||||
|
||||
updated := &Config{
|
||||
Mode: "simple",
|
||||
Workers: WorkersByUser,
|
||||
Bind: []BindAddr{{Host: "0.0.0.0", Port: 4444}},
|
||||
Pools: []PoolConfig{{URL: "pool-b.example:4444", Enabled: true}},
|
||||
CustomDiff: 50000,
|
||||
AccessPassword: "secret",
|
||||
CustomDiffStats: true,
|
||||
AlgoExtension: true,
|
||||
AccessLogFile: "/tmp/access.log",
|
||||
ReuseTimeout: 30,
|
||||
Retries: 5,
|
||||
RetryPause: 2,
|
||||
Watch: true,
|
||||
RateLimit: RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60},
|
||||
}
|
||||
|
||||
p.Reload(updated)
|
||||
|
||||
if p.config != original {
|
||||
t.Fatalf("expected reload to preserve the existing config pointer")
|
||||
}
|
||||
if got := p.config.Bind[0]; got.Host != "127.0.0.1" || got.Port != 3333 {
|
||||
t.Fatalf("expected bind addresses to remain unchanged, got %+v", got)
|
||||
}
|
||||
if p.config.Mode != "nicehash" {
|
||||
t.Fatalf("expected mode to remain unchanged, got %q", p.config.Mode)
|
||||
}
|
||||
if p.config.Workers != WorkersByUser {
|
||||
t.Fatalf("expected workers mode to reload, got %q", p.config.Workers)
|
||||
}
|
||||
if got := p.config.Pools[0].URL; got != "pool-b.example:4444" {
|
||||
t.Fatalf("expected pools to reload, got %q", got)
|
||||
}
|
||||
if got := p.customDiff.globalDiff.Load(); got != 50000 {
|
||||
t.Fatalf("expected custom diff to reload, got %d", got)
|
||||
}
|
||||
if !p.rateLimit.IsActive() {
|
||||
t.Fatalf("expected rate limiter to be replaced with active configuration")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Reload_WorkersMode_Good(t *testing.T) {
|
||||
miner := &Miner{id: 7, user: "wallet-a", rigID: "rig-a", ip: "10.0.0.7"}
|
||||
workers := NewWorkers(WorkersByRigID, nil)
|
||||
workers.OnLogin(Event{Miner: miner})
|
||||
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||
},
|
||||
workers: workers,
|
||||
miners: map[int64]*Miner{miner.id: miner},
|
||||
}
|
||||
|
||||
p.Reload(&Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByUser,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||
})
|
||||
|
||||
if got := p.WorkersMode(); got != WorkersByUser {
|
||||
t.Fatalf("expected proxy workers mode %q, got %q", WorkersByUser, got)
|
||||
}
|
||||
records := p.WorkerRecords()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one rebuilt worker record, got %d", len(records))
|
||||
}
|
||||
if got := records[0].Name; got != "wallet-a" {
|
||||
t.Fatalf("expected worker record to rebuild using user mode, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Reload_CustomDiff_Good(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := NewMiner(minerConn, 3333, nil)
|
||||
miner.state = MinerStateReady
|
||||
miner.globalDiff = 1000
|
||||
miner.customDiff = 1000
|
||||
miner.currentJob = Job{
|
||||
Blob: strings.Repeat("0", 160),
|
||||
JobID: "job-1",
|
||||
Target: "01000000",
|
||||
Algo: "cn/r",
|
||||
}
|
||||
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
CustomDiff: 1000,
|
||||
},
|
||||
customDiff: NewCustomDiff(1000),
|
||||
miners: map[int64]*Miner{miner.ID(): miner},
|
||||
}
|
||||
|
||||
done := make(chan map[string]any, 1)
|
||||
go func() {
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
done <- nil
|
||||
return
|
||||
}
|
||||
var payload map[string]any
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
done <- nil
|
||||
return
|
||||
}
|
||||
done <- payload
|
||||
}()
|
||||
|
||||
p.Reload(&Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
CustomDiff: 5000,
|
||||
})
|
||||
|
||||
select {
|
||||
case payload := <-done:
|
||||
if payload == nil {
|
||||
t.Fatal("expected reload to resend the current job with the new custom diff")
|
||||
}
|
||||
params, ok := payload["params"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected job params payload, got %#v", payload["params"])
|
||||
}
|
||||
target, _ := params["target"].(string)
|
||||
if got := (Job{Target: target}).DifficultyFromTarget(); got == 0 || got > 5000 {
|
||||
t.Fatalf("expected resent job difficulty at or below 5000, got %d", got)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timed out waiting for reload job refresh")
|
||||
}
|
||||
|
||||
if miner.customDiff != 5000 {
|
||||
t.Fatalf("expected active miner custom diff to reload, got %d", miner.customDiff)
|
||||
}
|
||||
if miner.globalDiff != 5000 {
|
||||
t.Fatalf("expected active miner global diff to reload, got %d", miner.globalDiff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Reload_CustomDiff_Bad(t *testing.T) {
|
||||
miner := &Miner{
|
||||
id: 9,
|
||||
state: MinerStateReady,
|
||||
globalDiff: 1000,
|
||||
customDiff: 7000,
|
||||
customDiffFromLogin: true,
|
||||
currentJob: Job{
|
||||
Blob: strings.Repeat("0", 160),
|
||||
JobID: "job-1",
|
||||
Target: "01000000",
|
||||
},
|
||||
}
|
||||
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
CustomDiff: 1000,
|
||||
},
|
||||
customDiff: NewCustomDiff(1000),
|
||||
miners: map[int64]*Miner{miner.ID(): miner},
|
||||
}
|
||||
|
||||
p.Reload(&Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
CustomDiff: 5000,
|
||||
})
|
||||
|
||||
if miner.customDiff != 7000 {
|
||||
t.Fatalf("expected login suffix custom diff to be preserved, got %d", miner.customDiff)
|
||||
}
|
||||
if miner.globalDiff != 5000 {
|
||||
t.Fatalf("expected miner global diff to update for future logins, got %d", miner.globalDiff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Reload_CustomDiff_Ugly(t *testing.T) {
|
||||
miner := &Miner{
|
||||
id: 11,
|
||||
state: MinerStateWaitLogin,
|
||||
globalDiff: 1000,
|
||||
customDiff: 1000,
|
||||
}
|
||||
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
CustomDiff: 1000,
|
||||
},
|
||||
customDiff: NewCustomDiff(1000),
|
||||
miners: map[int64]*Miner{miner.ID(): miner},
|
||||
}
|
||||
|
||||
p.Reload(&Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
CustomDiff: 0,
|
||||
})
|
||||
|
||||
if miner.customDiff != 0 {
|
||||
t.Fatalf("expected reload to clear the global custom diff for unauthenticated miners, got %d", miner.customDiff)
|
||||
}
|
||||
if miner.globalDiff != 0 {
|
||||
t.Fatalf("expected miner global diff to be cleared, got %d", miner.globalDiff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Reload_UpdatesServers(t *testing.T) {
|
||||
originalLimiter := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1})
|
||||
p := &Proxy{
|
||||
config: &Config{Mode: "nicehash", Workers: WorkersByRigID},
|
||||
rateLimit: originalLimiter,
|
||||
servers: []*Server{
|
||||
{limiter: originalLimiter},
|
||||
},
|
||||
}
|
||||
|
||||
p.Reload(&Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
RateLimit: RateLimit{MaxConnectionsPerMinute: 10},
|
||||
AccessLogFile: "",
|
||||
})
|
||||
|
||||
if got := p.servers[0].limiter; got != p.rateLimit {
|
||||
t.Fatalf("expected server limiter to be updated")
|
||||
}
|
||||
if p.rateLimit == originalLimiter {
|
||||
t.Fatalf("expected rate limiter instance to be replaced")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Reload_WatchEnabled_Good(t *testing.T) {
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
configPath: "/tmp/proxy.json",
|
||||
},
|
||||
}
|
||||
|
||||
p.Reload(&Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 4444}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
Watch: true,
|
||||
configPath: "/tmp/ignored.json",
|
||||
})
|
||||
|
||||
if p.watcher == nil {
|
||||
t.Fatalf("expected reload to create a watcher when watch is enabled")
|
||||
}
|
||||
if got := p.watcher.configPath; got != "/tmp/proxy.json" {
|
||||
t.Fatalf("expected watcher to keep the original config path, got %q", got)
|
||||
}
|
||||
p.watcher.Stop()
|
||||
}
|
||||
|
||||
func TestProxy_Reload_WatchDisabled_Bad(t *testing.T) {
|
||||
watcher := NewConfigWatcher("/tmp/proxy.json", func(*Config) {})
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
configPath: "/tmp/proxy.json",
|
||||
Watch: true,
|
||||
},
|
||||
watcher: watcher,
|
||||
}
|
||||
|
||||
p.Reload(&Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
Watch: false,
|
||||
configPath: "/tmp/ignored.json",
|
||||
})
|
||||
|
||||
if p.watcher != nil {
|
||||
t.Fatalf("expected reload to stop and clear the watcher when watch is disabled")
|
||||
}
|
||||
select {
|
||||
case <-watcher.stopCh:
|
||||
default:
|
||||
t.Fatalf("expected existing watcher to be stopped")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Reload_PoolsChanged_ReloadsSplitter_Good(t *testing.T) {
|
||||
splitter := &reloadableSplitter{}
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||
},
|
||||
splitter: splitter,
|
||||
}
|
||||
|
||||
p.Reload(&Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool-b.example:3333", Enabled: true}},
|
||||
})
|
||||
|
||||
if splitter.reloads != 1 {
|
||||
t.Fatalf("expected pool reload to reconnect upstreams once, got %d", splitter.reloads)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Reload_PoolsUnchanged_DoesNotReloadSplitter_Ugly(t *testing.T) {
|
||||
splitter := &reloadableSplitter{}
|
||||
p := &Proxy{
|
||||
config: &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||
},
|
||||
splitter: splitter,
|
||||
}
|
||||
|
||||
p.Reload(&Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||
})
|
||||
|
||||
if splitter.reloads != 0 {
|
||||
t.Fatalf("expected unchanged pool config to skip reconnect, got %d", splitter.reloads)
|
||||
}
|
||||
}
|
||||
23
server.go
23
server.go
|
|
@ -7,13 +7,20 @@ import (
|
|||
|
||||
// Server listens on one BindAddr and creates a Miner for each accepted connection.
|
||||
//
|
||||
// srv, result := proxy.NewServer(bind, tlsCfg, rateLimiter, onAccept)
|
||||
// srv.Start()
|
||||
// srv, result := proxy.NewServer(
|
||||
// proxy.BindAddr{Host: "0.0.0.0", Port: 3333, TLS: false},
|
||||
// nil,
|
||||
// proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 30}),
|
||||
// func(conn net.Conn, port uint16) { _ = conn; _ = port },
|
||||
// )
|
||||
// if result.OK {
|
||||
// srv.Start()
|
||||
// }
|
||||
type Server struct {
|
||||
addr BindAddr
|
||||
tlsCfg *tls.Config // nil for plain TCP
|
||||
limiter *RateLimiter
|
||||
onAccept func(net.Conn, uint16)
|
||||
listener net.Listener
|
||||
done chan struct{}
|
||||
addr BindAddr
|
||||
tlsConfig *tls.Config // nil for plain TCP
|
||||
limiter *RateLimiter
|
||||
onAccept func(net.Conn, uint16)
|
||||
listener net.Listener
|
||||
done chan struct{}
|
||||
}
|
||||
|
|
|
|||
95
sharelog_impl.go
Normal file
95
sharelog_impl.go
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type shareLogSink struct {
|
||||
path string
|
||||
file *os.File
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newShareLogSink(path string) *shareLogSink {
|
||||
return &shareLogSink{path: path}
|
||||
}
|
||||
|
||||
func (l *shareLogSink) SetPath(path string) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.path == path {
|
||||
return
|
||||
}
|
||||
l.path = path
|
||||
if l.file != nil {
|
||||
_ = l.file.Close()
|
||||
l.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (l *shareLogSink) Close() {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.file != nil {
|
||||
_ = l.file.Close()
|
||||
l.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (l *shareLogSink) OnAccept(e Event) {
|
||||
if l == nil || e.Miner == nil {
|
||||
return
|
||||
}
|
||||
l.writeLine("ACCEPT", e.Miner.User(), e.Diff, e.Latency, "")
|
||||
}
|
||||
|
||||
func (l *shareLogSink) OnReject(e Event) {
|
||||
if l == nil || e.Miner == nil {
|
||||
return
|
||||
}
|
||||
l.writeLine("REJECT", e.Miner.User(), 0, 0, e.Error)
|
||||
}
|
||||
|
||||
func (l *shareLogSink) writeLine(kind, user string, diff uint64, latency uint16, reason string) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if strings.TrimSpace(l.path) == "" {
|
||||
return
|
||||
}
|
||||
if l.file == nil {
|
||||
file, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
l.file = file
|
||||
}
|
||||
var builder strings.Builder
|
||||
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||
builder.WriteByte(' ')
|
||||
builder.WriteString(kind)
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(user)
|
||||
switch kind {
|
||||
case "ACCEPT":
|
||||
builder.WriteString(" diff=")
|
||||
builder.WriteString(formatUint(diff))
|
||||
builder.WriteString(" latency=")
|
||||
builder.WriteString(formatUint(uint64(latency)))
|
||||
builder.WriteString("ms")
|
||||
case "REJECT":
|
||||
builder.WriteString(" reason=\"")
|
||||
builder.WriteString(reason)
|
||||
builder.WriteString("\"")
|
||||
}
|
||||
builder.WriteByte('\n')
|
||||
_, _ = l.file.WriteString(builder.String())
|
||||
}
|
||||
46
sharelog_test.go
Normal file
46
sharelog_test.go
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestProxy_ShareLog_WritesOutcomeLines(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "shares.log")
|
||||
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByRigID,
|
||||
ShareLogFile: path,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
}
|
||||
p, result := New(cfg)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||
}
|
||||
|
||||
miner := &Miner{
|
||||
user: "WALLET",
|
||||
conn: noopConn{},
|
||||
state: MinerStateReady,
|
||||
}
|
||||
p.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 1234, Latency: 56})
|
||||
p.events.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Invalid nonce"})
|
||||
p.Stop()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("read share log: %v", err)
|
||||
}
|
||||
text := string(data)
|
||||
if !strings.Contains(text, "ACCEPT WALLET diff=1234 latency=56ms") {
|
||||
t.Fatalf("expected ACCEPT line, got %q", text)
|
||||
}
|
||||
if !strings.Contains(text, "REJECT WALLET reason=\"Invalid nonce\"") {
|
||||
t.Fatalf("expected REJECT line, got %q", text)
|
||||
}
|
||||
}
|
||||
92
splitter/nicehash/gc_test.go
Normal file
92
splitter/nicehash/gc_test.go
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
package nicehash
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type gcStrategy struct {
|
||||
mu sync.Mutex
|
||||
disconnected bool
|
||||
active bool
|
||||
}
|
||||
|
||||
func (s *gcStrategy) Connect() {}
|
||||
|
||||
func (s *gcStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *gcStrategy) Disconnect() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.disconnected = true
|
||||
}
|
||||
|
||||
func (s *gcStrategy) IsActive() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.active
|
||||
}
|
||||
|
||||
func TestNonceSplitter_GC_Good(t *testing.T) {
|
||||
strategy := &gcStrategy{active: false}
|
||||
mapper := &NonceMapper{
|
||||
id: 42,
|
||||
storage: NewNonceStorage(),
|
||||
strategy: strategy,
|
||||
lastUsed: time.Now().Add(-2 * time.Minute),
|
||||
pending: make(map[int64]SubmitContext),
|
||||
}
|
||||
mapper.storage.slots[0] = -1
|
||||
|
||||
splitter := &NonceSplitter{
|
||||
mappers: []*NonceMapper{mapper},
|
||||
mapperByID: map[int64]*NonceMapper{mapper.id: mapper},
|
||||
}
|
||||
|
||||
splitter.GC()
|
||||
|
||||
if len(splitter.mappers) != 0 {
|
||||
t.Fatalf("expected idle mapper to be reclaimed, got %d mapper(s)", len(splitter.mappers))
|
||||
}
|
||||
if _, ok := splitter.mapperByID[mapper.id]; ok {
|
||||
t.Fatalf("expected reclaimed mapper to be removed from lookup table")
|
||||
}
|
||||
if !strategy.disconnected {
|
||||
t.Fatalf("expected reclaimed mapper strategy to be disconnected")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonceSplitter_GC_Bad(t *testing.T) {
|
||||
var splitter *NonceSplitter
|
||||
|
||||
splitter.GC()
|
||||
}
|
||||
|
||||
func TestNonceSplitter_GC_Ugly(t *testing.T) {
|
||||
strategy := &gcStrategy{active: true}
|
||||
mapper := &NonceMapper{
|
||||
id: 99,
|
||||
storage: NewNonceStorage(),
|
||||
strategy: strategy,
|
||||
lastUsed: time.Now().Add(-2 * time.Minute),
|
||||
pending: make(map[int64]SubmitContext),
|
||||
}
|
||||
mapper.storage.slots[0] = 7
|
||||
|
||||
splitter := &NonceSplitter{
|
||||
mappers: []*NonceMapper{mapper},
|
||||
mapperByID: map[int64]*NonceMapper{mapper.id: mapper},
|
||||
}
|
||||
|
||||
splitter.GC()
|
||||
|
||||
if len(splitter.mappers) != 1 {
|
||||
t.Fatalf("expected active mapper to remain, got %d mapper(s)", len(splitter.mappers))
|
||||
}
|
||||
if strategy.disconnected {
|
||||
t.Fatalf("expected active mapper to stay connected")
|
||||
}
|
||||
}
|
||||
|
|
@ -8,20 +8,20 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
proxy.RegisterSplitterFactory("nicehash", func(cfg *proxy.Config, events *proxy.EventBus) proxy.Splitter {
|
||||
return NewNonceSplitter(cfg, events, pool.NewStrategyFactory(cfg))
|
||||
proxy.RegisterSplitterFactory("nicehash", func(config *proxy.Config, eventBus *proxy.EventBus) proxy.Splitter {
|
||||
return NewNonceSplitter(config, eventBus, pool.NewStrategyFactory(config))
|
||||
})
|
||||
}
|
||||
|
||||
// NewNonceSplitter creates a NiceHash splitter.
|
||||
func NewNonceSplitter(cfg *proxy.Config, events *proxy.EventBus, factory pool.StrategyFactory) *NonceSplitter {
|
||||
func NewNonceSplitter(config *proxy.Config, eventBus *proxy.EventBus, factory pool.StrategyFactory) *NonceSplitter {
|
||||
if factory == nil {
|
||||
factory = pool.NewStrategyFactory(cfg)
|
||||
factory = pool.NewStrategyFactory(config)
|
||||
}
|
||||
return &NonceSplitter{
|
||||
byID: make(map[int64]*NonceMapper),
|
||||
cfg: cfg,
|
||||
events: events,
|
||||
mapperByID: make(map[int64]*NonceMapper),
|
||||
config: config,
|
||||
events: eventBus,
|
||||
strategyFactory: factory,
|
||||
}
|
||||
}
|
||||
|
|
@ -37,10 +37,7 @@ func (s *NonceSplitter) Connect() {
|
|||
s.addMapperLocked()
|
||||
}
|
||||
for _, mapper := range s.mappers {
|
||||
if mapper.strategy != nil {
|
||||
mapper.strategy.Connect()
|
||||
return
|
||||
}
|
||||
mapper.Start()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -54,14 +51,14 @@ func (s *NonceSplitter) OnLogin(event *proxy.LoginEvent) {
|
|||
event.Miner.SetExtendedNiceHash(true)
|
||||
for _, mapper := range s.mappers {
|
||||
if mapper.Add(event.Miner) {
|
||||
s.byID[mapper.id] = mapper
|
||||
s.mapperByID[mapper.id] = mapper
|
||||
return
|
||||
}
|
||||
}
|
||||
mapper := s.addMapperLocked()
|
||||
if mapper != nil {
|
||||
_ = mapper.Add(event.Miner)
|
||||
s.byID[mapper.id] = mapper
|
||||
s.mapperByID[mapper.id] = mapper
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -71,7 +68,7 @@ func (s *NonceSplitter) OnSubmit(event *proxy.SubmitEvent) {
|
|||
return
|
||||
}
|
||||
s.mu.RLock()
|
||||
mapper := s.byID[event.Miner.MapperID()]
|
||||
mapper := s.mapperByID[event.Miner.MapperID()]
|
||||
s.mu.RUnlock()
|
||||
if mapper != nil {
|
||||
mapper.Submit(event)
|
||||
|
|
@ -84,7 +81,7 @@ func (s *NonceSplitter) OnClose(event *proxy.CloseEvent) {
|
|||
return
|
||||
}
|
||||
s.mu.RLock()
|
||||
mapper := s.byID[event.Miner.MapperID()]
|
||||
mapper := s.mapperByID[event.Miner.MapperID()]
|
||||
s.mu.RUnlock()
|
||||
if mapper != nil {
|
||||
mapper.Remove(event.Miner)
|
||||
|
|
@ -101,13 +98,17 @@ func (s *NonceSplitter) GC() {
|
|||
now := time.Now()
|
||||
next := s.mappers[:0]
|
||||
for _, mapper := range s.mappers {
|
||||
if mapper == nil || mapper.storage == nil {
|
||||
continue
|
||||
}
|
||||
free, dead, active := mapper.storage.SlotCount()
|
||||
if active == 0 && dead == 0 && now.Sub(mapper.lastUsed) > time.Minute {
|
||||
if active == 0 && now.Sub(mapper.lastUsed) > time.Minute {
|
||||
if mapper.strategy != nil {
|
||||
mapper.strategy.Disconnect()
|
||||
}
|
||||
delete(s.byID, mapper.id)
|
||||
delete(s.mapperByID, mapper.id)
|
||||
_ = free
|
||||
_ = dead
|
||||
continue
|
||||
}
|
||||
next = append(next, mapper)
|
||||
|
|
@ -116,7 +117,25 @@ func (s *NonceSplitter) GC() {
|
|||
}
|
||||
|
||||
// Tick is called once per second.
|
||||
func (s *NonceSplitter) Tick(ticks uint64) {}
|
||||
func (s *NonceSplitter) Tick(ticks uint64) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
strategies := make([]pool.Strategy, 0, len(s.mappers))
|
||||
s.mu.RLock()
|
||||
for _, mapper := range s.mappers {
|
||||
if mapper == nil || mapper.strategy == nil {
|
||||
continue
|
||||
}
|
||||
strategies = append(strategies, mapper.strategy)
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
for _, strategy := range strategies {
|
||||
if ticker, ok := strategy.(interface{ Tick(uint64) }); ok {
|
||||
ticker.Tick(ticks)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Upstreams returns pool connection counts.
|
||||
func (s *NonceSplitter) Upstreams() proxy.UpstreamStats {
|
||||
|
|
@ -129,40 +148,91 @@ func (s *NonceSplitter) Upstreams() proxy.UpstreamStats {
|
|||
for _, mapper := range s.mappers {
|
||||
if mapper.strategy != nil && mapper.strategy.IsActive() {
|
||||
stats.Active++
|
||||
} else if mapper.suspended > 0 {
|
||||
} else if mapper.suspended > 0 || !mapper.active {
|
||||
stats.Error++
|
||||
}
|
||||
}
|
||||
stats.Total = uint64(len(s.mappers))
|
||||
stats.Total = stats.Active + stats.Sleep + stats.Error
|
||||
return stats
|
||||
}
|
||||
|
||||
// Disconnect closes all upstream pool connections and forgets the current mapper set.
|
||||
func (s *NonceSplitter) Disconnect() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, mapper := range s.mappers {
|
||||
if mapper != nil && mapper.strategy != nil {
|
||||
mapper.strategy.Disconnect()
|
||||
}
|
||||
}
|
||||
s.mappers = nil
|
||||
s.mapperByID = make(map[int64]*NonceMapper)
|
||||
}
|
||||
|
||||
// ReloadPools reconnects each mapper strategy using the updated pool list.
|
||||
//
|
||||
// s.ReloadPools()
|
||||
func (s *NonceSplitter) ReloadPools() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
strategies := make([]pool.Strategy, 0, len(s.mappers))
|
||||
s.mu.RLock()
|
||||
for _, mapper := range s.mappers {
|
||||
if mapper == nil || mapper.strategy == nil {
|
||||
continue
|
||||
}
|
||||
strategies = append(strategies, mapper.strategy)
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
for _, strategy := range strategies {
|
||||
if reloadable, ok := strategy.(pool.ReloadableStrategy); ok {
|
||||
reloadable.ReloadPools()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *NonceSplitter) addMapperLocked() *NonceMapper {
|
||||
id := s.seq
|
||||
s.seq++
|
||||
mapper := NewNonceMapper(id, s.cfg, nil)
|
||||
id := s.nextMapperID
|
||||
s.nextMapperID++
|
||||
mapper := NewNonceMapper(id, s.config, nil)
|
||||
mapper.events = s.events
|
||||
mapper.lastUsed = time.Now()
|
||||
mapper.strategy = s.strategyFactory(mapper)
|
||||
s.mappers = append(s.mappers, mapper)
|
||||
if s.byID == nil {
|
||||
s.byID = make(map[int64]*NonceMapper)
|
||||
if s.mapperByID == nil {
|
||||
s.mapperByID = make(map[int64]*NonceMapper)
|
||||
}
|
||||
s.byID[mapper.id] = mapper
|
||||
s.mapperByID[mapper.id] = mapper
|
||||
mapper.Start()
|
||||
return mapper
|
||||
}
|
||||
|
||||
// NewNonceMapper creates a mapper for one upstream connection.
|
||||
func NewNonceMapper(id int64, cfg *proxy.Config, strategy pool.Strategy) *NonceMapper {
|
||||
func NewNonceMapper(id int64, config *proxy.Config, strategy pool.Strategy) *NonceMapper {
|
||||
return &NonceMapper{
|
||||
id: id,
|
||||
storage: NewNonceStorage(),
|
||||
strategy: strategy,
|
||||
pending: make(map[int64]SubmitContext),
|
||||
cfg: cfg,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Start connects the mapper's upstream strategy once.
|
||||
func (m *NonceMapper) Start() {
|
||||
if m == nil || m.strategy == nil {
|
||||
return
|
||||
}
|
||||
m.startOnce.Do(func() {
|
||||
m.lastUsed = time.Now()
|
||||
m.strategy.Connect()
|
||||
})
|
||||
}
|
||||
|
||||
// Add assigns a miner to a free slot.
|
||||
func (m *NonceMapper) Add(miner *proxy.Miner) bool {
|
||||
if m == nil || miner == nil {
|
||||
|
|
@ -179,7 +249,7 @@ func (m *NonceMapper) Add(miner *proxy.Miner) bool {
|
|||
job := m.storage.job
|
||||
m.storage.mu.Unlock()
|
||||
if job.IsValid() {
|
||||
miner.ForwardJob(job, job.Algo)
|
||||
miner.SetCurrentJob(job)
|
||||
}
|
||||
}
|
||||
return ok
|
||||
|
|
@ -212,14 +282,34 @@ func (m *NonceMapper) Submit(event *proxy.SubmitEvent) {
|
|||
if jobID == "" {
|
||||
jobID = job.JobID
|
||||
}
|
||||
if jobID == "" || (jobID != job.JobID && jobID != prevJob.JobID) {
|
||||
valid := m.storage.IsValidJobID(jobID)
|
||||
if jobID == "" || !valid {
|
||||
m.rejectInvalidJobLocked(event, job)
|
||||
return
|
||||
}
|
||||
submissionJob := job
|
||||
if jobID == prevJob.JobID && prevJob.JobID != "" {
|
||||
submissionJob = prevJob
|
||||
}
|
||||
seq := m.strategy.Submit(jobID, event.Nonce, event.Result, event.Algo)
|
||||
m.pending[seq] = SubmitContext{RequestID: event.RequestID, MinerID: event.Miner.ID(), JobID: jobID}
|
||||
m.pending[seq] = SubmitContext{
|
||||
RequestID: event.RequestID,
|
||||
MinerID: event.Miner.ID(),
|
||||
JobID: jobID,
|
||||
Diff: proxy.EffectiveShareDifficulty(submissionJob, event.Miner),
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
m.lastUsed = time.Now()
|
||||
}
|
||||
|
||||
func (m *NonceMapper) rejectInvalidJobLocked(event *proxy.SubmitEvent, job proxy.Job) {
|
||||
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
|
||||
if m.events != nil {
|
||||
jobCopy := job
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: event.Miner, Job: &jobCopy, Error: "Invalid job id"})
|
||||
}
|
||||
}
|
||||
|
||||
// IsActive reports whether the mapper has received a valid job.
|
||||
func (m *NonceMapper) IsActive() bool {
|
||||
if m == nil {
|
||||
|
|
@ -258,24 +348,40 @@ func (m *NonceMapper) OnResultAccepted(sequence int64, accepted bool, errorMessa
|
|||
job := m.storage.job
|
||||
prevJob := m.storage.prevJob
|
||||
m.storage.mu.Unlock()
|
||||
expired := ctx.JobID != "" && ctx.JobID == prevJob.JobID && ctx.JobID != job.JobID
|
||||
job, expired := resolveSubmissionJob(ctx.JobID, job, prevJob)
|
||||
m.mu.Unlock()
|
||||
if !ok || miner == nil {
|
||||
return
|
||||
}
|
||||
latency := uint16(0)
|
||||
if !ctx.StartedAt.IsZero() {
|
||||
elapsed := time.Since(ctx.StartedAt).Milliseconds()
|
||||
if elapsed > int64(^uint16(0)) {
|
||||
latency = ^uint16(0)
|
||||
} else {
|
||||
latency = uint16(elapsed)
|
||||
}
|
||||
}
|
||||
if accepted {
|
||||
miner.Success(ctx.RequestID, "OK")
|
||||
if m.events != nil {
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Job: &job, Diff: job.DifficultyFromTarget(), Latency: 0, Expired: expired})
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Job: &job, Diff: ctx.Diff, Latency: latency, Expired: expired})
|
||||
}
|
||||
return
|
||||
}
|
||||
miner.ReplyWithError(ctx.RequestID, errorMessage)
|
||||
if m.events != nil {
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Job: &job, Diff: job.DifficultyFromTarget(), Error: errorMessage})
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Job: &job, Diff: ctx.Diff, Error: errorMessage, Latency: latency})
|
||||
}
|
||||
}
|
||||
|
||||
func resolveSubmissionJob(jobID string, currentJob, previousJob proxy.Job) (proxy.Job, bool) {
|
||||
if jobID != "" && jobID == previousJob.JobID && jobID != currentJob.JobID {
|
||||
return previousJob, true
|
||||
}
|
||||
return currentJob, false
|
||||
}
|
||||
|
||||
func (m *NonceMapper) OnDisconnect() {
|
||||
if m == nil {
|
||||
return
|
||||
|
|
@ -286,12 +392,16 @@ func (m *NonceMapper) OnDisconnect() {
|
|||
m.suspended++
|
||||
}
|
||||
|
||||
// NewNonceStorage creates an empty slot table.
|
||||
// NewNonceStorage creates a 256-slot table ready for round-robin miner allocation.
|
||||
//
|
||||
// storage := nicehash.NewNonceStorage()
|
||||
func NewNonceStorage() *NonceStorage {
|
||||
return &NonceStorage{miners: make(map[int64]*proxy.Miner)}
|
||||
}
|
||||
|
||||
// Add finds the next free slot.
|
||||
// Add assigns the next free slot, such as 0x2a, to one miner.
|
||||
//
|
||||
// ok := storage.Add(&proxy.Miner{})
|
||||
func (s *NonceStorage) Add(miner *proxy.Miner) bool {
|
||||
if s == nil || miner == nil {
|
||||
return false
|
||||
|
|
@ -312,7 +422,9 @@ func (s *NonceStorage) Add(miner *proxy.Miner) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Remove marks a slot as dead.
|
||||
// Remove marks one miner's slot as dead until the next SetJob call.
|
||||
//
|
||||
// storage.Remove(miner)
|
||||
func (s *NonceStorage) Remove(miner *proxy.Miner) {
|
||||
if s == nil || miner == nil {
|
||||
return
|
||||
|
|
@ -326,7 +438,9 @@ func (s *NonceStorage) Remove(miner *proxy.Miner) {
|
|||
delete(s.miners, miner.ID())
|
||||
}
|
||||
|
||||
// SetJob replaces the current job and sends it to active miners.
|
||||
// SetJob broadcasts one pool job to all active miners and clears dead slots.
|
||||
//
|
||||
// storage.SetJob(proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-1"})
|
||||
func (s *NonceStorage) SetJob(job proxy.Job) {
|
||||
if s == nil || !job.IsValid() {
|
||||
return
|
||||
|
|
@ -352,17 +466,31 @@ func (s *NonceStorage) SetJob(job proxy.Job) {
|
|||
}
|
||||
}
|
||||
|
||||
// IsValidJobID returns true if the id matches the current or previous job.
|
||||
// IsValidJobID accepts the current job, or the immediately previous one after a pool roll.
|
||||
//
|
||||
// if !storage.IsValidJobID("job-1") { return }
|
||||
func (s *NonceStorage) IsValidJobID(id string) bool {
|
||||
if s == nil {
|
||||
return false
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return id != "" && (id == s.job.JobID || id == s.prevJob.JobID)
|
||||
if id == "" {
|
||||
return false
|
||||
}
|
||||
if id == s.job.JobID {
|
||||
return true
|
||||
}
|
||||
if id == s.prevJob.JobID && s.prevJob.JobID != "" {
|
||||
s.expired++
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SlotCount returns free, dead, and active counts.
|
||||
// SlotCount returns free, dead, and active slot counts such as 254, 1, 1.
|
||||
//
|
||||
// free, dead, active := storage.SlotCount()
|
||||
func (s *NonceStorage) SlotCount() (free, dead, active int) {
|
||||
if s == nil {
|
||||
return 0, 0, 0
|
||||
|
|
|
|||
|
|
@ -18,11 +18,12 @@ type NonceMapper struct {
|
|||
storage *NonceStorage
|
||||
strategy pool.Strategy // manages pool client lifecycle and failover
|
||||
pending map[int64]SubmitContext // sequence → {requestID, minerID}
|
||||
cfg *proxy.Config
|
||||
config *proxy.Config
|
||||
events *proxy.EventBus
|
||||
active bool // true once pool has sent at least one job
|
||||
suspended int // > 0 when pool connection is in error/reconnecting
|
||||
lastUsed time.Time
|
||||
startOnce sync.Once
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
|
|
@ -33,4 +34,6 @@ type SubmitContext struct {
|
|||
RequestID int64 // JSON-RPC id from the miner's submit request
|
||||
MinerID int64 // miner that submitted
|
||||
JobID string
|
||||
Diff uint64
|
||||
StartedAt time.Time
|
||||
}
|
||||
|
|
|
|||
243
splitter/nicehash/mapper_start_test.go
Normal file
243
splitter/nicehash/mapper_start_test.go
Normal file
|
|
@ -0,0 +1,243 @@
|
|||
package nicehash
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
type startCountingStrategy struct {
|
||||
mu sync.Mutex
|
||||
connect int
|
||||
}
|
||||
|
||||
func (s *startCountingStrategy) Connect() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.connect++
|
||||
}
|
||||
|
||||
func (s *startCountingStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *startCountingStrategy) Disconnect() {}
|
||||
|
||||
func (s *startCountingStrategy) IsActive() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.connect > 0
|
||||
}
|
||||
|
||||
type discardConn struct{}
|
||||
|
||||
func (discardConn) Read([]byte) (int, error) { return 0, nil }
|
||||
func (discardConn) Write(p []byte) (int, error) { return len(p), nil }
|
||||
func (discardConn) Close() error { return nil }
|
||||
func (discardConn) LocalAddr() net.Addr { return nil }
|
||||
func (discardConn) RemoteAddr() net.Addr { return nil }
|
||||
func (discardConn) SetDeadline(time.Time) error { return nil }
|
||||
func (discardConn) SetReadDeadline(time.Time) error { return nil }
|
||||
func (discardConn) SetWriteDeadline(time.Time) error { return nil }
|
||||
|
||||
func TestMapper_Start_Good(t *testing.T) {
|
||||
strategy := &startCountingStrategy{}
|
||||
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
|
||||
|
||||
mapper.Start()
|
||||
|
||||
if strategy.connect != 1 {
|
||||
t.Fatalf("expected one connect call, got %d", strategy.connect)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapper_Start_Bad(t *testing.T) {
|
||||
mapper := NewNonceMapper(1, &proxy.Config{}, nil)
|
||||
|
||||
mapper.Start()
|
||||
}
|
||||
|
||||
func TestMapper_Start_Ugly(t *testing.T) {
|
||||
strategy := &startCountingStrategy{}
|
||||
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
|
||||
|
||||
mapper.Start()
|
||||
mapper.Start()
|
||||
|
||||
if strategy.connect != 1 {
|
||||
t.Fatalf("expected Start to be idempotent, got %d connect calls", strategy.connect)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapper_Submit_InvalidJob_Good(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := proxy.NewMiner(minerConn, 3333, nil)
|
||||
miner.SetID(7)
|
||||
strategy := &startCountingStrategy{}
|
||||
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
|
||||
mapper.storage.job = proxy.Job{JobID: "job-1", Blob: "blob", Target: "b88d0600"}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
mapper.Submit(&proxy.SubmitEvent{
|
||||
Miner: miner,
|
||||
JobID: "job-missing",
|
||||
Nonce: "deadbeef",
|
||||
Result: "hash",
|
||||
RequestID: 42,
|
||||
})
|
||||
close(done)
|
||||
}()
|
||||
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read error reply: %v", err)
|
||||
}
|
||||
<-done
|
||||
|
||||
var payload struct {
|
||||
ID float64 `json:"id"`
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal error reply: %v", err)
|
||||
}
|
||||
if payload.ID != 42 {
|
||||
t.Fatalf("expected request id 42, got %v", payload.ID)
|
||||
}
|
||||
if payload.Error.Message != "Invalid job id" {
|
||||
t.Fatalf("expected invalid job error, got %q", payload.Error.Message)
|
||||
}
|
||||
if len(mapper.pending) != 0 {
|
||||
t.Fatalf("expected invalid submit not to create a pending entry")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapper_OnResultAccepted_ExpiredUsesPreviousJob(t *testing.T) {
|
||||
bus := proxy.NewEventBus()
|
||||
events := make(chan proxy.Event, 1)
|
||||
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
|
||||
events <- e
|
||||
})
|
||||
|
||||
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||
miner.SetID(7)
|
||||
mapper := NewNonceMapper(1, &proxy.Config{}, &startCountingStrategy{})
|
||||
mapper.events = bus
|
||||
mapper.storage.job = proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"}
|
||||
mapper.storage.prevJob = proxy.Job{JobID: "job-old", Blob: "blob-old", Target: "b88d0600"}
|
||||
mapper.storage.miners[miner.ID()] = miner
|
||||
if !mapper.storage.IsValidJobID("job-old") {
|
||||
t.Fatal("expected previous job to validate before result handling")
|
||||
}
|
||||
mapper.pending[9] = SubmitContext{
|
||||
RequestID: 42,
|
||||
MinerID: miner.ID(),
|
||||
JobID: "job-old",
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
|
||||
mapper.OnResultAccepted(9, true, "")
|
||||
|
||||
if got := mapper.storage.expired; got != 1 {
|
||||
t.Fatalf("expected one expired validation, got %d", got)
|
||||
}
|
||||
|
||||
select {
|
||||
case event := <-events:
|
||||
if !event.Expired {
|
||||
t.Fatalf("expected expired share to be flagged")
|
||||
}
|
||||
if event.Job == nil || event.Job.JobID != "job-old" {
|
||||
t.Fatalf("expected previous job to be attached, got %+v", event.Job)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("expected accept event")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapper_Submit_ExpiredJobUsesPreviousDifficulty(t *testing.T) {
|
||||
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||
miner.SetID(9)
|
||||
|
||||
strategy := &submitCaptureStrategy{}
|
||||
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
|
||||
mapper.storage.job = proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "ffffffff"}
|
||||
mapper.storage.prevJob = proxy.Job{JobID: "job-old", Blob: "blob-old", Target: "b88d0600"}
|
||||
mapper.storage.miners[miner.ID()] = miner
|
||||
|
||||
mapper.Submit(&proxy.SubmitEvent{
|
||||
Miner: miner,
|
||||
JobID: "job-old",
|
||||
Nonce: "deadbeef",
|
||||
Result: "hash",
|
||||
RequestID: 88,
|
||||
})
|
||||
|
||||
ctx, ok := mapper.pending[strategy.seq]
|
||||
if !ok {
|
||||
t.Fatal("expected pending submit context for expired job")
|
||||
}
|
||||
want := mapper.storage.prevJob.DifficultyFromTarget()
|
||||
if ctx.Diff != want {
|
||||
t.Fatalf("expected previous-job difficulty %d, got %d", want, ctx.Diff)
|
||||
}
|
||||
}
|
||||
|
||||
type submitCaptureStrategy struct {
|
||||
seq int64
|
||||
}
|
||||
|
||||
func (s *submitCaptureStrategy) Connect() {}
|
||||
|
||||
func (s *submitCaptureStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||
s.seq++
|
||||
return s.seq
|
||||
}
|
||||
|
||||
func (s *submitCaptureStrategy) Disconnect() {}
|
||||
|
||||
func (s *submitCaptureStrategy) IsActive() bool { return true }
|
||||
|
||||
func TestMapper_OnResultAccepted_CustomDiffUsesEffectiveDifficulty(t *testing.T) {
|
||||
bus := proxy.NewEventBus()
|
||||
events := make(chan proxy.Event, 1)
|
||||
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
|
||||
events <- e
|
||||
})
|
||||
|
||||
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||
miner.SetID(8)
|
||||
mapper := NewNonceMapper(1, &proxy.Config{}, &startCountingStrategy{})
|
||||
mapper.events = bus
|
||||
mapper.storage.job = proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"}
|
||||
mapper.storage.miners[miner.ID()] = miner
|
||||
mapper.pending[10] = SubmitContext{
|
||||
RequestID: 77,
|
||||
MinerID: miner.ID(),
|
||||
JobID: "job-new",
|
||||
Diff: 25000,
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
|
||||
mapper.OnResultAccepted(10, true, "")
|
||||
|
||||
select {
|
||||
case event := <-events:
|
||||
if event.Diff != 25000 {
|
||||
t.Fatalf("expected effective difficulty 25000, got %d", event.Diff)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("expected accept event")
|
||||
}
|
||||
}
|
||||
67
splitter/nicehash/reload_test.go
Normal file
67
splitter/nicehash/reload_test.go
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
package nicehash
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"dappco.re/go/proxy"
|
||||
"dappco.re/go/proxy/pool"
|
||||
)
|
||||
|
||||
type reloadableStrategy struct {
|
||||
reloads int
|
||||
}
|
||||
|
||||
func (s *reloadableStrategy) Connect() {}
|
||||
func (s *reloadableStrategy) Submit(jobID, nonce, result, algo string) int64 { return 0 }
|
||||
func (s *reloadableStrategy) Disconnect() {}
|
||||
func (s *reloadableStrategy) IsActive() bool { return true }
|
||||
func (s *reloadableStrategy) ReloadPools() { s.reloads++ }
|
||||
|
||||
var _ pool.ReloadableStrategy = (*reloadableStrategy)(nil)
|
||||
|
||||
func TestNonceSplitter_ReloadPools_Good(t *testing.T) {
|
||||
strategy := &reloadableStrategy{}
|
||||
splitter := &NonceSplitter{
|
||||
mappers: []*NonceMapper{
|
||||
{strategy: strategy},
|
||||
},
|
||||
}
|
||||
|
||||
splitter.ReloadPools()
|
||||
|
||||
if strategy.reloads != 1 {
|
||||
t.Fatalf("expected mapper strategy to reload once, got %d", strategy.reloads)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonceSplitter_ReloadPools_Bad(t *testing.T) {
|
||||
splitter := &NonceSplitter{
|
||||
mappers: []*NonceMapper{
|
||||
{strategy: nil},
|
||||
},
|
||||
}
|
||||
|
||||
splitter.ReloadPools()
|
||||
}
|
||||
|
||||
func TestNonceSplitter_ReloadPools_Ugly(t *testing.T) {
|
||||
splitter := NewNonceSplitter(&proxy.Config{}, proxy.NewEventBus(), func(listener pool.StratumListener) pool.Strategy {
|
||||
return &reloadableStrategy{}
|
||||
})
|
||||
splitter.mappers = []*NonceMapper{
|
||||
{strategy: &reloadableStrategy{}},
|
||||
{strategy: &reloadableStrategy{}},
|
||||
}
|
||||
|
||||
splitter.ReloadPools()
|
||||
|
||||
for index, mapper := range splitter.mappers {
|
||||
strategy, ok := mapper.strategy.(*reloadableStrategy)
|
||||
if !ok {
|
||||
t.Fatalf("expected reloadable strategy at mapper %d", index)
|
||||
}
|
||||
if strategy.reloads != 1 {
|
||||
t.Fatalf("expected mapper %d to reload once, got %d", index, strategy.reloads)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -23,10 +23,10 @@ import (
|
|||
// s.Connect()
|
||||
type NonceSplitter struct {
|
||||
mappers []*NonceMapper
|
||||
byID map[int64]*NonceMapper
|
||||
cfg *proxy.Config
|
||||
mapperByID map[int64]*NonceMapper
|
||||
config *proxy.Config
|
||||
events *proxy.EventBus
|
||||
strategyFactory pool.StrategyFactory
|
||||
mu sync.RWMutex
|
||||
seq int64
|
||||
nextMapperID int64
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ type NonceStorage struct {
|
|||
miners map[int64]*proxy.Miner // minerID → Miner pointer for active miners
|
||||
job proxy.Job // current job from pool
|
||||
prevJob proxy.Job // previous job (for stale submit validation)
|
||||
cursor int // search starts here (round-robin allocation)
|
||||
expired uint64
|
||||
cursor int // search starts here (round-robin allocation)
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,21 +6,155 @@ import (
|
|||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
func TestNonceStorage_AddAndRemove(t *testing.T) {
|
||||
// TestStorage_Add_Good verifies 256 sequential Add calls fill all slots with unique FixedByte values.
|
||||
//
|
||||
// storage := nicehash.NewNonceStorage()
|
||||
// for i := 0; i < 256; i++ {
|
||||
// m := &proxy.Miner{}
|
||||
// m.SetID(int64(i + 1))
|
||||
// ok := storage.Add(m) // true for all 256
|
||||
// }
|
||||
func TestStorage_Add_Good(t *testing.T) {
|
||||
storage := NewNonceStorage()
|
||||
seen := make(map[uint8]bool)
|
||||
for i := 0; i < 256; i++ {
|
||||
m := &proxy.Miner{}
|
||||
m.SetID(int64(i + 1))
|
||||
ok := storage.Add(m)
|
||||
if !ok {
|
||||
t.Fatalf("expected add %d to succeed", i)
|
||||
}
|
||||
if seen[m.FixedByte()] {
|
||||
t.Fatalf("duplicate fixed byte %d at add %d", m.FixedByte(), i)
|
||||
}
|
||||
seen[m.FixedByte()] = true
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorage_Add_Bad verifies the 257th Add returns false when all 256 slots are occupied.
|
||||
//
|
||||
// storage := nicehash.NewNonceStorage()
|
||||
// // fill 256 slots...
|
||||
// ok := storage.Add(overflowMiner) // false — table is full
|
||||
func TestStorage_Add_Bad(t *testing.T) {
|
||||
storage := NewNonceStorage()
|
||||
for i := 0; i < 256; i++ {
|
||||
m := &proxy.Miner{}
|
||||
m.SetID(int64(i + 1))
|
||||
storage.Add(m)
|
||||
}
|
||||
|
||||
overflow := &proxy.Miner{}
|
||||
overflow.SetID(257)
|
||||
if storage.Add(overflow) {
|
||||
t.Fatalf("expected 257th add to fail when table is full")
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorage_Add_Ugly verifies that a removed slot (dead) is reclaimed after SetJob clears it.
|
||||
//
|
||||
// storage := nicehash.NewNonceStorage()
|
||||
// storage.Add(miner)
|
||||
// storage.Remove(miner) // slot becomes dead (-minerID)
|
||||
// storage.SetJob(job) // dead slots cleared to 0
|
||||
// storage.Add(newMiner) // reclaimed slot succeeds
|
||||
func TestStorage_Add_Ugly(t *testing.T) {
|
||||
storage := NewNonceStorage()
|
||||
miner := &proxy.Miner{}
|
||||
miner.SetID(1)
|
||||
|
||||
if !storage.Add(miner) {
|
||||
t.Fatalf("expected add to succeed")
|
||||
}
|
||||
if miner.FixedByte() != 0 {
|
||||
t.Fatalf("expected first slot to be 0, got %d", miner.FixedByte())
|
||||
t.Fatalf("expected first add to succeed")
|
||||
}
|
||||
|
||||
storage.Remove(miner)
|
||||
free, dead, active := storage.SlotCount()
|
||||
if free != 255 || dead != 1 || active != 0 {
|
||||
t.Fatalf("unexpected slot counts: free=%d dead=%d active=%d", free, dead, active)
|
||||
if dead != 1 || active != 0 {
|
||||
t.Fatalf("expected 1 dead slot, got free=%d dead=%d active=%d", free, dead, active)
|
||||
}
|
||||
|
||||
// SetJob clears dead slots
|
||||
storage.SetJob(proxy.Job{Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", JobID: "job-1"})
|
||||
free, dead, active = storage.SlotCount()
|
||||
if dead != 0 {
|
||||
t.Fatalf("expected dead slots cleared after SetJob, got %d", dead)
|
||||
}
|
||||
|
||||
// Reclaim the slot
|
||||
newMiner := &proxy.Miner{}
|
||||
newMiner.SetID(2)
|
||||
if !storage.Add(newMiner) {
|
||||
t.Fatalf("expected reclaimed slot add to succeed")
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorage_IsValidJobID_Good verifies the current job ID is accepted.
|
||||
//
|
||||
// storage := nicehash.NewNonceStorage()
|
||||
// storage.SetJob(proxy.Job{JobID: "job-2", Blob: "..."})
|
||||
// storage.IsValidJobID("job-2") // true
|
||||
func TestStorage_IsValidJobID_Good(t *testing.T) {
|
||||
storage := NewNonceStorage()
|
||||
storage.SetJob(proxy.Job{
|
||||
JobID: "job-1",
|
||||
Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
})
|
||||
|
||||
if !storage.IsValidJobID("job-1") {
|
||||
t.Fatalf("expected current job to be valid")
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorage_IsValidJobID_Bad verifies an unknown job ID is rejected.
|
||||
//
|
||||
// storage := nicehash.NewNonceStorage()
|
||||
// storage.IsValidJobID("nonexistent") // false
|
||||
func TestStorage_IsValidJobID_Bad(t *testing.T) {
|
||||
storage := NewNonceStorage()
|
||||
storage.SetJob(proxy.Job{
|
||||
JobID: "job-1",
|
||||
Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
})
|
||||
|
||||
if storage.IsValidJobID("nonexistent") {
|
||||
t.Fatalf("expected unknown job id to be invalid")
|
||||
}
|
||||
if storage.IsValidJobID("") {
|
||||
t.Fatalf("expected empty job id to be invalid")
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorage_IsValidJobID_Ugly verifies the previous job ID is accepted but counts as expired.
|
||||
//
|
||||
// storage := nicehash.NewNonceStorage()
|
||||
// // job-1 is current, job-2 pushes job-1 to previous
|
||||
// storage.IsValidJobID("job-1") // true (but expired counter increments)
|
||||
func TestStorage_IsValidJobID_Ugly(t *testing.T) {
|
||||
storage := NewNonceStorage()
|
||||
blob160 := "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
storage.SetJob(proxy.Job{JobID: "job-1", Blob: blob160, ClientID: "session-1"})
|
||||
storage.SetJob(proxy.Job{JobID: "job-2", Blob: blob160, ClientID: "session-1"})
|
||||
|
||||
if !storage.IsValidJobID("job-2") {
|
||||
t.Fatalf("expected current job to be valid")
|
||||
}
|
||||
if !storage.IsValidJobID("job-1") {
|
||||
t.Fatalf("expected previous job to remain valid")
|
||||
}
|
||||
if storage.expired != 1 {
|
||||
t.Fatalf("expected one expired job validation, got %d", storage.expired)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorage_SlotCount_Good verifies free/dead/active counts on a fresh storage.
|
||||
//
|
||||
// storage := nicehash.NewNonceStorage()
|
||||
// free, dead, active := storage.SlotCount() // 256, 0, 0
|
||||
func TestStorage_SlotCount_Good(t *testing.T) {
|
||||
storage := NewNonceStorage()
|
||||
free, dead, active := storage.SlotCount()
|
||||
if free != 256 || dead != 0 || active != 0 {
|
||||
t.Fatalf("expected 256/0/0, got free=%d dead=%d active=%d", free, dead, active)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
69
splitter/nicehash/upstreams_test.go
Normal file
69
splitter/nicehash/upstreams_test.go
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
package nicehash
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"dappco.re/go/proxy"
|
||||
)
|
||||
|
||||
type upstreamStateStrategy struct {
|
||||
active bool
|
||||
}
|
||||
|
||||
func (s *upstreamStateStrategy) Connect() {}
|
||||
|
||||
func (s *upstreamStateStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *upstreamStateStrategy) Disconnect() {}
|
||||
|
||||
func (s *upstreamStateStrategy) IsActive() bool { return s.active }
|
||||
|
||||
func TestNonceSplitter_Upstreams_Good(t *testing.T) {
|
||||
splitter := &NonceSplitter{
|
||||
mappers: []*NonceMapper{
|
||||
{strategy: &upstreamStateStrategy{active: true}, active: true},
|
||||
{strategy: &upstreamStateStrategy{active: false}, active: false, suspended: 1},
|
||||
},
|
||||
}
|
||||
|
||||
stats := splitter.Upstreams()
|
||||
|
||||
if stats.Active != 1 {
|
||||
t.Fatalf("expected one active upstream, got %d", stats.Active)
|
||||
}
|
||||
if stats.Error != 1 {
|
||||
t.Fatalf("expected one error upstream, got %d", stats.Error)
|
||||
}
|
||||
if stats.Total != 2 {
|
||||
t.Fatalf("expected total to equal active + sleep + error, got %d", stats.Total)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonceSplitter_Upstreams_Bad(t *testing.T) {
|
||||
var splitter *NonceSplitter
|
||||
|
||||
stats := splitter.Upstreams()
|
||||
|
||||
if stats != (proxy.UpstreamStats{}) {
|
||||
t.Fatalf("expected zero-value stats for nil splitter, got %+v", stats)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonceSplitter_Upstreams_Ugly(t *testing.T) {
|
||||
splitter := &NonceSplitter{
|
||||
mappers: []*NonceMapper{
|
||||
{strategy: &upstreamStateStrategy{active: false}, active: false},
|
||||
},
|
||||
}
|
||||
|
||||
stats := splitter.Upstreams()
|
||||
|
||||
if stats.Error != 1 {
|
||||
t.Fatalf("expected an unready mapper to be counted as error, got %+v", stats)
|
||||
}
|
||||
if stats.Total != 1 {
|
||||
t.Fatalf("expected total to remain internally consistent, got %+v", stats)
|
||||
}
|
||||
}
|
||||
|
|
@ -8,21 +8,21 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
proxy.RegisterSplitterFactory("simple", func(cfg *proxy.Config, events *proxy.EventBus) proxy.Splitter {
|
||||
return NewSimpleSplitter(cfg, events, pool.NewStrategyFactory(cfg))
|
||||
proxy.RegisterSplitterFactory("simple", func(config *proxy.Config, eventBus *proxy.EventBus) proxy.Splitter {
|
||||
return NewSimpleSplitter(config, eventBus, pool.NewStrategyFactory(config))
|
||||
})
|
||||
}
|
||||
|
||||
// NewSimpleSplitter creates the passthrough splitter.
|
||||
func NewSimpleSplitter(cfg *proxy.Config, events *proxy.EventBus, factory pool.StrategyFactory) *SimpleSplitter {
|
||||
func NewSimpleSplitter(config *proxy.Config, eventBus *proxy.EventBus, factory pool.StrategyFactory) *SimpleSplitter {
|
||||
if factory == nil {
|
||||
factory = pool.NewStrategyFactory(cfg)
|
||||
factory = pool.NewStrategyFactory(config)
|
||||
}
|
||||
return &SimpleSplitter{
|
||||
active: make(map[int64]*SimpleMapper),
|
||||
idle: make(map[int64]*SimpleMapper),
|
||||
cfg: cfg,
|
||||
events: events,
|
||||
config: config,
|
||||
events: eventBus,
|
||||
factory: factory,
|
||||
}
|
||||
}
|
||||
|
|
@ -53,16 +53,20 @@ func (s *SimpleSplitter) OnLogin(event *proxy.LoginEvent) {
|
|||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
now := time.Now()
|
||||
|
||||
if s.cfg.ReuseTimeout > 0 {
|
||||
if s.config.ReuseTimeout > 0 {
|
||||
for id, mapper := range s.idle {
|
||||
if mapper.strategy != nil && mapper.strategy.IsActive() {
|
||||
if mapper.strategy != nil && mapper.strategy.IsActive() && !mapper.idleAt.IsZero() && now.Sub(mapper.idleAt) <= time.Duration(s.config.ReuseTimeout)*time.Second {
|
||||
delete(s.idle, id)
|
||||
mapper.miner = event.Miner
|
||||
mapper.idleAt = time.Time{}
|
||||
mapper.stopped = false
|
||||
s.active[event.Miner.ID()] = mapper
|
||||
event.Miner.SetRouteID(mapper.id)
|
||||
if mapper.currentJob.IsValid() {
|
||||
event.Miner.SetCurrentJob(mapper.currentJob)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -83,7 +87,7 @@ func (s *SimpleSplitter) OnSubmit(event *proxy.SubmitEvent) {
|
|||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
mapper := s.active[event.Miner.ID()]
|
||||
mapper := s.activeMapperByRouteIDLocked(event.Miner.RouteID())
|
||||
s.mu.Unlock()
|
||||
if mapper != nil {
|
||||
mapper.Submit(event)
|
||||
|
|
@ -105,7 +109,7 @@ func (s *SimpleSplitter) OnClose(event *proxy.CloseEvent) {
|
|||
mapper.miner = nil
|
||||
mapper.idleAt = time.Now()
|
||||
event.Miner.SetRouteID(-1)
|
||||
if s.cfg.ReuseTimeout > 0 {
|
||||
if s.config.ReuseTimeout > 0 {
|
||||
s.idle[mapper.id] = mapper
|
||||
return
|
||||
}
|
||||
|
|
@ -124,7 +128,7 @@ func (s *SimpleSplitter) GC() {
|
|||
defer s.mu.Unlock()
|
||||
now := time.Now()
|
||||
for id, mapper := range s.idle {
|
||||
if mapper.stopped || (s.cfg.ReuseTimeout > 0 && now.Sub(mapper.idleAt) > time.Duration(s.cfg.ReuseTimeout)*time.Second) {
|
||||
if mapper.stopped || (s.config.ReuseTimeout > 0 && now.Sub(mapper.idleAt) > time.Duration(s.config.ReuseTimeout)*time.Second) {
|
||||
if mapper.strategy != nil {
|
||||
mapper.strategy.Disconnect()
|
||||
}
|
||||
|
|
@ -133,8 +137,31 @@ func (s *SimpleSplitter) GC() {
|
|||
}
|
||||
}
|
||||
|
||||
// Tick is a no-op for simple mode.
|
||||
func (s *SimpleSplitter) Tick(ticks uint64) {}
|
||||
// Tick advances timeout checks in simple mode.
|
||||
func (s *SimpleSplitter) Tick(ticks uint64) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
strategies := make([]pool.Strategy, 0, len(s.active)+len(s.idle))
|
||||
s.mu.Lock()
|
||||
for _, mapper := range s.active {
|
||||
if mapper != nil && mapper.strategy != nil {
|
||||
strategies = append(strategies, mapper.strategy)
|
||||
}
|
||||
}
|
||||
for _, mapper := range s.idle {
|
||||
if mapper != nil && mapper.strategy != nil {
|
||||
strategies = append(strategies, mapper.strategy)
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
for _, strategy := range strategies {
|
||||
if ticker, ok := strategy.(interface{ Tick(uint64) }); ok {
|
||||
ticker.Tick(ticks)
|
||||
}
|
||||
}
|
||||
s.GC()
|
||||
}
|
||||
|
||||
// Upstreams returns active/idle/error counts.
|
||||
func (s *SimpleSplitter) Upstreams() proxy.UpstreamStats {
|
||||
|
|
@ -144,20 +171,85 @@ func (s *SimpleSplitter) Upstreams() proxy.UpstreamStats {
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
var stats proxy.UpstreamStats
|
||||
stats.Active = uint64(len(s.active))
|
||||
stats.Sleep = uint64(len(s.idle))
|
||||
stats.Total = stats.Active + stats.Sleep
|
||||
for _, mapper := range s.active {
|
||||
if mapper == nil {
|
||||
continue
|
||||
}
|
||||
if mapper.stopped || mapper.strategy == nil || !mapper.strategy.IsActive() {
|
||||
stats.Error++
|
||||
continue
|
||||
}
|
||||
stats.Active++
|
||||
}
|
||||
for _, mapper := range s.idle {
|
||||
if mapper == nil {
|
||||
continue
|
||||
}
|
||||
if mapper.stopped || mapper.strategy == nil || !mapper.strategy.IsActive() {
|
||||
stats.Error++
|
||||
continue
|
||||
}
|
||||
stats.Sleep++
|
||||
}
|
||||
stats.Total = stats.Active + stats.Sleep + stats.Error
|
||||
return stats
|
||||
}
|
||||
|
||||
func (s *SimpleSplitter) newMapperLocked() *SimpleMapper {
|
||||
id := s.seq
|
||||
s.seq++
|
||||
mapper := &SimpleMapper{
|
||||
id: id,
|
||||
events: s.events,
|
||||
pending: make(map[int64]*proxy.SubmitEvent),
|
||||
// Disconnect closes every active or idle upstream connection and clears the mapper tables.
|
||||
func (s *SimpleSplitter) Disconnect() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, mapper := range s.active {
|
||||
if mapper != nil && mapper.strategy != nil {
|
||||
mapper.strategy.Disconnect()
|
||||
}
|
||||
}
|
||||
for _, mapper := range s.idle {
|
||||
if mapper != nil && mapper.strategy != nil {
|
||||
mapper.strategy.Disconnect()
|
||||
}
|
||||
}
|
||||
s.active = make(map[int64]*SimpleMapper)
|
||||
s.idle = make(map[int64]*SimpleMapper)
|
||||
}
|
||||
|
||||
// ReloadPools reconnects each active or idle mapper using the updated pool list.
|
||||
//
|
||||
// s.ReloadPools()
|
||||
func (s *SimpleSplitter) ReloadPools() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
strategies := make([]pool.Strategy, 0, len(s.active)+len(s.idle))
|
||||
s.mu.Lock()
|
||||
for _, mapper := range s.active {
|
||||
if mapper == nil || mapper.strategy == nil {
|
||||
continue
|
||||
}
|
||||
strategies = append(strategies, mapper.strategy)
|
||||
}
|
||||
for _, mapper := range s.idle {
|
||||
if mapper == nil || mapper.strategy == nil {
|
||||
continue
|
||||
}
|
||||
strategies = append(strategies, mapper.strategy)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
for _, strategy := range strategies {
|
||||
if reloadable, ok := strategy.(pool.ReloadableStrategy); ok {
|
||||
reloadable.ReloadPools()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SimpleSplitter) newMapperLocked() *SimpleMapper {
|
||||
id := s.nextMapperID
|
||||
s.nextMapperID++
|
||||
mapper := NewSimpleMapper(id, nil)
|
||||
mapper.events = s.events
|
||||
mapper.strategy = s.factory(mapper)
|
||||
if mapper.strategy == nil {
|
||||
mapper.strategy = s.factory(mapper)
|
||||
|
|
@ -165,6 +257,18 @@ func (s *SimpleSplitter) newMapperLocked() *SimpleMapper {
|
|||
return mapper
|
||||
}
|
||||
|
||||
func (s *SimpleSplitter) activeMapperByRouteIDLocked(routeID int64) *SimpleMapper {
|
||||
if s == nil || routeID < 0 {
|
||||
return nil
|
||||
}
|
||||
for _, mapper := range s.active {
|
||||
if mapper != nil && mapper.id == routeID {
|
||||
return mapper
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Submit forwards a share to the pool.
|
||||
func (m *SimpleMapper) Submit(event *proxy.SubmitEvent) {
|
||||
if m == nil || event == nil || m.strategy == nil {
|
||||
|
|
@ -172,8 +276,36 @@ func (m *SimpleMapper) Submit(event *proxy.SubmitEvent) {
|
|||
}
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
seq := m.strategy.Submit(event.JobID, event.Nonce, event.Result, event.Algo)
|
||||
m.pending[seq] = event
|
||||
jobID := event.JobID
|
||||
if jobID == "" {
|
||||
jobID = m.currentJob.JobID
|
||||
}
|
||||
if jobID == "" || (jobID != m.currentJob.JobID && jobID != m.prevJob.JobID) {
|
||||
m.rejectInvalidJobLocked(event, m.currentJob)
|
||||
return
|
||||
}
|
||||
submissionJob := m.currentJob
|
||||
if jobID == m.prevJob.JobID && m.prevJob.JobID != "" {
|
||||
submissionJob = m.prevJob
|
||||
}
|
||||
seq := m.strategy.Submit(jobID, event.Nonce, event.Result, event.Algo)
|
||||
m.pending[seq] = submitContext{
|
||||
RequestID: event.RequestID,
|
||||
Diff: proxy.EffectiveShareDifficulty(submissionJob, event.Miner),
|
||||
StartedAt: time.Now(),
|
||||
JobID: jobID,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *SimpleMapper) rejectInvalidJobLocked(event *proxy.SubmitEvent, job proxy.Job) {
|
||||
if event == nil || event.Miner == nil {
|
||||
return
|
||||
}
|
||||
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
|
||||
if m.events != nil {
|
||||
jobCopy := job
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: event.Miner, Job: &jobCopy, Error: "Invalid job id"})
|
||||
}
|
||||
}
|
||||
|
||||
// OnJob forwards the latest pool job to the active miner.
|
||||
|
|
@ -182,6 +314,13 @@ func (m *SimpleMapper) OnJob(job proxy.Job) {
|
|||
return
|
||||
}
|
||||
m.mu.Lock()
|
||||
m.prevJob = m.currentJob
|
||||
if m.prevJob.ClientID != job.ClientID {
|
||||
m.prevJob = proxy.Job{}
|
||||
}
|
||||
m.currentJob = job
|
||||
m.stopped = false
|
||||
m.idleAt = time.Time{}
|
||||
miner := m.miner
|
||||
m.mu.Unlock()
|
||||
if miner == nil {
|
||||
|
|
@ -196,25 +335,42 @@ func (m *SimpleMapper) OnResultAccepted(sequence int64, accepted bool, errorMess
|
|||
return
|
||||
}
|
||||
m.mu.Lock()
|
||||
ctx := m.pending[sequence]
|
||||
delete(m.pending, sequence)
|
||||
ctx, ok := m.pending[sequence]
|
||||
if ok {
|
||||
delete(m.pending, sequence)
|
||||
}
|
||||
miner := m.miner
|
||||
currentJob := m.currentJob
|
||||
prevJob := m.prevJob
|
||||
m.mu.Unlock()
|
||||
if ctx == nil || miner == nil {
|
||||
if !ok || miner == nil {
|
||||
return
|
||||
}
|
||||
latency := uint16(0)
|
||||
if !ctx.StartedAt.IsZero() {
|
||||
elapsed := time.Since(ctx.StartedAt).Milliseconds()
|
||||
if elapsed > int64(^uint16(0)) {
|
||||
latency = ^uint16(0)
|
||||
} else {
|
||||
latency = uint16(elapsed)
|
||||
}
|
||||
}
|
||||
job := currentJob
|
||||
expired := false
|
||||
if ctx.JobID != "" && ctx.JobID == prevJob.JobID && ctx.JobID != currentJob.JobID {
|
||||
job = prevJob
|
||||
expired = true
|
||||
}
|
||||
if accepted {
|
||||
miner.Success(ctx.RequestID, "OK")
|
||||
if m.events != nil {
|
||||
job := miner.CurrentJob()
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Diff: job.DifficultyFromTarget(), Job: &job})
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Diff: ctx.Diff, Job: &job, Latency: latency, Expired: expired})
|
||||
}
|
||||
return
|
||||
}
|
||||
miner.ReplyWithError(ctx.RequestID, errorMessage)
|
||||
if m.events != nil {
|
||||
job := miner.CurrentJob()
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Diff: job.DifficultyFromTarget(), Job: &job, Error: errorMessage})
|
||||
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Diff: ctx.Diff, Job: &job, Error: errorMessage, Latency: latency})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
377
splitter/simple/impl_test.go
Normal file
377
splitter/simple/impl_test.go
Normal file
|
|
@ -0,0 +1,377 @@
|
|||
package simple
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dappco.re/go/proxy"
|
||||
"dappco.re/go/proxy/pool"
|
||||
)
|
||||
|
||||
type activeStrategy struct{}
|
||||
|
||||
func (a activeStrategy) Connect() {}
|
||||
func (a activeStrategy) Submit(string, string, string, string) int64 { return 0 }
|
||||
func (a activeStrategy) Disconnect() {}
|
||||
func (a activeStrategy) IsActive() bool { return true }
|
||||
|
||||
type submitRecordingStrategy struct {
|
||||
submits int
|
||||
}
|
||||
|
||||
func (s *submitRecordingStrategy) Connect() {}
|
||||
|
||||
func (s *submitRecordingStrategy) Submit(string, string, string, string) int64 {
|
||||
s.submits++
|
||||
return int64(s.submits)
|
||||
}
|
||||
|
||||
func (s *submitRecordingStrategy) Disconnect() {}
|
||||
|
||||
func (s *submitRecordingStrategy) IsActive() bool { return true }
|
||||
|
||||
func TestSimpleMapper_New_Good(t *testing.T) {
|
||||
strategy := activeStrategy{}
|
||||
mapper := NewSimpleMapper(7, strategy)
|
||||
|
||||
if mapper == nil {
|
||||
t.Fatal("expected mapper")
|
||||
}
|
||||
if mapper.id != 7 {
|
||||
t.Fatalf("expected mapper id 7, got %d", mapper.id)
|
||||
}
|
||||
if mapper.strategy != strategy {
|
||||
t.Fatalf("expected strategy to be stored")
|
||||
}
|
||||
if mapper.pending == nil {
|
||||
t.Fatal("expected pending map to be initialised")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleSplitter_OnLogin_Good(t *testing.T) {
|
||||
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||
return activeStrategy{}
|
||||
})
|
||||
miner := &proxy.Miner{}
|
||||
job := proxy.Job{JobID: "job-1", Blob: "blob"}
|
||||
mapper := &SimpleMapper{
|
||||
id: 7,
|
||||
strategy: activeStrategy{},
|
||||
currentJob: job,
|
||||
idleAt: time.Now(),
|
||||
}
|
||||
splitter.idle[mapper.id] = mapper
|
||||
|
||||
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
|
||||
|
||||
if miner.RouteID() != mapper.id {
|
||||
t.Fatalf("expected reclaimed mapper route id %d, got %d", mapper.id, miner.RouteID())
|
||||
}
|
||||
if got := miner.CurrentJob().JobID; got != job.JobID {
|
||||
t.Fatalf("expected current job to be restored on reuse, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleSplitter_OnLogin_Ugly(t *testing.T) {
|
||||
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||
return activeStrategy{}
|
||||
})
|
||||
miner := &proxy.Miner{}
|
||||
expired := &SimpleMapper{
|
||||
id: 7,
|
||||
strategy: activeStrategy{},
|
||||
idleAt: time.Now().Add(-time.Minute),
|
||||
}
|
||||
splitter.idle[expired.id] = expired
|
||||
|
||||
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
|
||||
|
||||
if miner.RouteID() == expired.id {
|
||||
t.Fatalf("expected expired mapper not to be reclaimed")
|
||||
}
|
||||
if miner.RouteID() != 0 {
|
||||
t.Fatalf("expected a new mapper to be allocated, got route id %d", miner.RouteID())
|
||||
}
|
||||
if len(splitter.active) != 1 {
|
||||
t.Fatalf("expected one active mapper, got %d", len(splitter.active))
|
||||
}
|
||||
if len(splitter.idle) != 1 {
|
||||
t.Fatalf("expected expired mapper to remain idle until GC, got %d idle mappers", len(splitter.idle))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleSplitter_OnSubmit_UsesRouteID_Good(t *testing.T) {
|
||||
strategy := &submitRecordingStrategy{}
|
||||
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, nil)
|
||||
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||
miner.SetID(21)
|
||||
miner.SetRouteID(7)
|
||||
|
||||
mapper := &SimpleMapper{
|
||||
id: 7,
|
||||
miner: miner,
|
||||
currentJob: proxy.Job{JobID: "job-1", Blob: "blob", Target: "b88d0600"},
|
||||
strategy: strategy,
|
||||
pending: make(map[int64]submitContext),
|
||||
}
|
||||
splitter.active[99] = mapper
|
||||
|
||||
splitter.OnSubmit(&proxy.SubmitEvent{
|
||||
Miner: miner,
|
||||
JobID: "job-1",
|
||||
Nonce: "deadbeef",
|
||||
Result: "hash",
|
||||
RequestID: 11,
|
||||
})
|
||||
|
||||
if strategy.submits != 1 {
|
||||
t.Fatalf("expected one submit routed by route id, got %d", strategy.submits)
|
||||
}
|
||||
if len(mapper.pending) != 1 {
|
||||
t.Fatalf("expected routed submit to create one pending entry, got %d", len(mapper.pending))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleSplitter_Upstreams_Good(t *testing.T) {
|
||||
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||
return activeStrategy{}
|
||||
})
|
||||
splitter.active[1] = &SimpleMapper{id: 1, strategy: activeStrategy{}}
|
||||
splitter.idle[2] = &SimpleMapper{id: 2, strategy: activeStrategy{}, idleAt: time.Now()}
|
||||
|
||||
stats := splitter.Upstreams()
|
||||
|
||||
if stats.Active != 1 {
|
||||
t.Fatalf("expected one active upstream, got %d", stats.Active)
|
||||
}
|
||||
if stats.Sleep != 1 {
|
||||
t.Fatalf("expected one sleeping upstream, got %d", stats.Sleep)
|
||||
}
|
||||
if stats.Error != 0 {
|
||||
t.Fatalf("expected no error upstreams, got %d", stats.Error)
|
||||
}
|
||||
if stats.Total != 2 {
|
||||
t.Fatalf("expected total upstreams to be 2, got %d", stats.Total)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleSplitter_Upstreams_Ugly(t *testing.T) {
|
||||
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||
return activeStrategy{}
|
||||
})
|
||||
splitter.active[1] = &SimpleMapper{id: 1, strategy: activeStrategy{}, stopped: true}
|
||||
splitter.idle[2] = &SimpleMapper{id: 2, strategy: activeStrategy{}, stopped: true, idleAt: time.Now()}
|
||||
|
||||
stats := splitter.Upstreams()
|
||||
|
||||
if stats.Active != 0 {
|
||||
t.Fatalf("expected no active upstreams, got %d", stats.Active)
|
||||
}
|
||||
if stats.Sleep != 0 {
|
||||
t.Fatalf("expected no sleeping upstreams, got %d", stats.Sleep)
|
||||
}
|
||||
if stats.Error != 2 {
|
||||
t.Fatalf("expected both upstreams to be counted as error, got %d", stats.Error)
|
||||
}
|
||||
if stats.Total != 2 {
|
||||
t.Fatalf("expected total upstreams to be 2, got %d", stats.Total)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleSplitter_Upstreams_RecoveryResetsStopped_Good(t *testing.T) {
|
||||
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||
return activeStrategy{}
|
||||
})
|
||||
mapper := &SimpleMapper{id: 1, strategy: activeStrategy{}, stopped: true}
|
||||
splitter.active[1] = mapper
|
||||
|
||||
before := splitter.Upstreams()
|
||||
if before.Error != 1 {
|
||||
t.Fatalf("expected disconnected mapper to count as error, got %+v", before)
|
||||
}
|
||||
|
||||
mapper.OnJob(proxy.Job{JobID: "job-1", Blob: "blob"})
|
||||
|
||||
after := splitter.Upstreams()
|
||||
if after.Active != 1 {
|
||||
t.Fatalf("expected recovered mapper to count as active, got %+v", after)
|
||||
}
|
||||
if after.Error != 0 {
|
||||
t.Fatalf("expected recovered mapper not to remain in error, got %+v", after)
|
||||
}
|
||||
}
|
||||
|
||||
type discardConn struct{}
|
||||
|
||||
func (discardConn) Read([]byte) (int, error) { return 0, io.EOF }
|
||||
func (discardConn) Write(p []byte) (int, error) { return len(p), nil }
|
||||
func (discardConn) Close() error { return nil }
|
||||
func (discardConn) LocalAddr() net.Addr { return nil }
|
||||
func (discardConn) RemoteAddr() net.Addr { return nil }
|
||||
func (discardConn) SetDeadline(time.Time) error { return nil }
|
||||
func (discardConn) SetReadDeadline(time.Time) error { return nil }
|
||||
func (discardConn) SetWriteDeadline(time.Time) error { return nil }
|
||||
|
||||
func TestSimpleMapper_OnResultAccepted_Expired(t *testing.T) {
|
||||
bus := proxy.NewEventBus()
|
||||
events := make(chan proxy.Event, 1)
|
||||
var once sync.Once
|
||||
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
|
||||
once.Do(func() {
|
||||
events <- e
|
||||
})
|
||||
})
|
||||
|
||||
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||
miner.SetID(1)
|
||||
mapper := &SimpleMapper{
|
||||
miner: miner,
|
||||
currentJob: proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"},
|
||||
prevJob: proxy.Job{JobID: "job-old", Blob: "blob-old", Target: "b88d0600"},
|
||||
events: bus,
|
||||
pending: map[int64]submitContext{
|
||||
7: {RequestID: 9, StartedAt: time.Now(), JobID: "job-old"},
|
||||
},
|
||||
}
|
||||
|
||||
mapper.OnResultAccepted(7, true, "")
|
||||
|
||||
select {
|
||||
case event := <-events:
|
||||
if !event.Expired {
|
||||
t.Fatalf("expected expired share to be flagged")
|
||||
}
|
||||
if event.Job == nil || event.Job.JobID != "job-old" {
|
||||
t.Fatalf("expected previous job to be attached, got %+v", event.Job)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("expected accept event")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleMapper_OnResultAccepted_CustomDiffUsesEffectiveDifficulty(t *testing.T) {
|
||||
bus := proxy.NewEventBus()
|
||||
events := make(chan proxy.Event, 1)
|
||||
var once sync.Once
|
||||
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
|
||||
once.Do(func() {
|
||||
events <- e
|
||||
})
|
||||
})
|
||||
|
||||
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||
miner.SetID(2)
|
||||
job := proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"}
|
||||
mapper := &SimpleMapper{
|
||||
miner: miner,
|
||||
currentJob: job,
|
||||
events: bus,
|
||||
pending: map[int64]submitContext{
|
||||
8: {
|
||||
RequestID: 10,
|
||||
Diff: 25000,
|
||||
StartedAt: time.Now(),
|
||||
JobID: "job-new",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mapper.OnResultAccepted(8, true, "")
|
||||
|
||||
select {
|
||||
case event := <-events:
|
||||
if event.Diff != 25000 {
|
||||
t.Fatalf("expected effective difficulty 25000, got %d", event.Diff)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("expected accept event")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleMapper_OnJob_PreservesPreviousJobForSamePoolSession_Good(t *testing.T) {
|
||||
mapper := &SimpleMapper{
|
||||
currentJob: proxy.Job{JobID: "job-1", Blob: "blob-1", ClientID: "session-a"},
|
||||
}
|
||||
|
||||
mapper.OnJob(proxy.Job{JobID: "job-2", Blob: "blob-2", ClientID: "session-a"})
|
||||
|
||||
if mapper.currentJob.JobID != "job-2" {
|
||||
t.Fatalf("expected current job to roll forward, got %q", mapper.currentJob.JobID)
|
||||
}
|
||||
if mapper.prevJob.JobID != "job-1" {
|
||||
t.Fatalf("expected previous job to remain available within one pool session, got %q", mapper.prevJob.JobID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleMapper_OnJob_ResetsPreviousJobAcrossPoolSessions_Ugly(t *testing.T) {
|
||||
mapper := &SimpleMapper{
|
||||
currentJob: proxy.Job{JobID: "job-1", Blob: "blob-1", ClientID: "session-a"},
|
||||
prevJob: proxy.Job{JobID: "job-0", Blob: "blob-0", ClientID: "session-a"},
|
||||
}
|
||||
|
||||
mapper.OnJob(proxy.Job{JobID: "job-2", Blob: "blob-2", ClientID: "session-b"})
|
||||
|
||||
if mapper.currentJob.JobID != "job-2" {
|
||||
t.Fatalf("expected current job to advance after session change, got %q", mapper.currentJob.JobID)
|
||||
}
|
||||
if mapper.prevJob.JobID != "" {
|
||||
t.Fatalf("expected previous job history to reset on new pool session, got %q", mapper.prevJob.JobID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleMapper_Submit_InvalidJob_Good(t *testing.T) {
|
||||
minerConn, clientConn := net.Pipe()
|
||||
defer minerConn.Close()
|
||||
defer clientConn.Close()
|
||||
|
||||
miner := proxy.NewMiner(minerConn, 3333, nil)
|
||||
mapper := &SimpleMapper{
|
||||
miner: miner,
|
||||
currentJob: proxy.Job{JobID: "job-1", Blob: "blob", Target: "b88d0600"},
|
||||
prevJob: proxy.Job{JobID: "job-0", Blob: "blob", Target: "b88d0600"},
|
||||
strategy: activeStrategy{},
|
||||
pending: make(map[int64]submitContext),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
mapper.Submit(&proxy.SubmitEvent{
|
||||
Miner: miner,
|
||||
JobID: "job-missing",
|
||||
Nonce: "deadbeef",
|
||||
Result: "hash",
|
||||
RequestID: 9,
|
||||
})
|
||||
close(done)
|
||||
}()
|
||||
|
||||
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("read error reply: %v", err)
|
||||
}
|
||||
<-done
|
||||
|
||||
var payload struct {
|
||||
ID float64 `json:"id"`
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
if err := json.Unmarshal(line, &payload); err != nil {
|
||||
t.Fatalf("unmarshal error reply: %v", err)
|
||||
}
|
||||
if payload.ID != 9 {
|
||||
t.Fatalf("expected request id 9, got %v", payload.ID)
|
||||
}
|
||||
if payload.Error.Message != "Invalid job id" {
|
||||
t.Fatalf("expected invalid job error, got %q", payload.Error.Message)
|
||||
}
|
||||
if len(mapper.pending) != 0 {
|
||||
t.Fatalf("expected invalid submit not to create a pending entry")
|
||||
}
|
||||
}
|
||||
|
|
@ -14,12 +14,32 @@ import (
|
|||
//
|
||||
// m := simple.NewSimpleMapper(id, strategy)
|
||||
type SimpleMapper struct {
|
||||
id int64
|
||||
miner *proxy.Miner // nil when idle
|
||||
strategy pool.Strategy
|
||||
idleAt time.Time // zero when active
|
||||
stopped bool
|
||||
events *proxy.EventBus
|
||||
pending map[int64]*proxy.SubmitEvent
|
||||
mu sync.Mutex
|
||||
id int64
|
||||
miner *proxy.Miner // nil when idle
|
||||
currentJob proxy.Job
|
||||
prevJob proxy.Job
|
||||
strategy pool.Strategy
|
||||
idleAt time.Time // zero when active
|
||||
stopped bool
|
||||
events *proxy.EventBus
|
||||
pending map[int64]submitContext
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type submitContext struct {
|
||||
RequestID int64
|
||||
Diff uint64
|
||||
StartedAt time.Time
|
||||
JobID string
|
||||
}
|
||||
|
||||
// NewSimpleMapper creates a passthrough mapper for one pool connection.
|
||||
//
|
||||
// m := simple.NewSimpleMapper(7, strategy)
|
||||
func NewSimpleMapper(id int64, strategy pool.Strategy) *SimpleMapper {
|
||||
return &SimpleMapper{
|
||||
id: id,
|
||||
strategy: strategy,
|
||||
pending: make(map[int64]submitContext),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
68
splitter/simple/reload_test.go
Normal file
68
splitter/simple/reload_test.go
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
package simple
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"dappco.re/go/proxy/pool"
|
||||
)
|
||||
|
||||
type reloadableStrategy struct {
|
||||
reloads int
|
||||
}
|
||||
|
||||
func (s *reloadableStrategy) Connect() {}
|
||||
func (s *reloadableStrategy) Submit(jobID, nonce, result, algo string) int64 { return 0 }
|
||||
func (s *reloadableStrategy) Disconnect() {}
|
||||
func (s *reloadableStrategy) IsActive() bool { return true }
|
||||
func (s *reloadableStrategy) ReloadPools() { s.reloads++ }
|
||||
|
||||
var _ pool.ReloadableStrategy = (*reloadableStrategy)(nil)
|
||||
|
||||
func TestSimpleSplitter_ReloadPools_Good(t *testing.T) {
|
||||
strategy := &reloadableStrategy{}
|
||||
splitter := &SimpleSplitter{
|
||||
active: map[int64]*SimpleMapper{
|
||||
1: {strategy: strategy},
|
||||
},
|
||||
idle: map[int64]*SimpleMapper{},
|
||||
}
|
||||
|
||||
splitter.ReloadPools()
|
||||
|
||||
if strategy.reloads != 1 {
|
||||
t.Fatalf("expected active mapper strategy to reload once, got %d", strategy.reloads)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleSplitter_ReloadPools_Bad(t *testing.T) {
|
||||
splitter := &SimpleSplitter{
|
||||
active: map[int64]*SimpleMapper{
|
||||
1: {strategy: nil},
|
||||
},
|
||||
idle: map[int64]*SimpleMapper{},
|
||||
}
|
||||
|
||||
splitter.ReloadPools()
|
||||
}
|
||||
|
||||
func TestSimpleSplitter_ReloadPools_Ugly(t *testing.T) {
|
||||
active := &reloadableStrategy{}
|
||||
idle := &reloadableStrategy{}
|
||||
splitter := &SimpleSplitter{
|
||||
active: map[int64]*SimpleMapper{
|
||||
1: {strategy: active},
|
||||
},
|
||||
idle: map[int64]*SimpleMapper{
|
||||
2: {strategy: idle},
|
||||
},
|
||||
}
|
||||
|
||||
splitter.ReloadPools()
|
||||
|
||||
if active.reloads != 1 {
|
||||
t.Fatalf("expected active mapper reload, got %d", active.reloads)
|
||||
}
|
||||
if idle.reloads != 1 {
|
||||
t.Fatalf("expected idle mapper reload, got %d", idle.reloads)
|
||||
}
|
||||
}
|
||||
|
|
@ -18,11 +18,11 @@ import (
|
|||
//
|
||||
// s := simple.NewSimpleSplitter(cfg, eventBus, strategyFactory)
|
||||
type SimpleSplitter struct {
|
||||
active map[int64]*SimpleMapper // minerID → mapper
|
||||
idle map[int64]*SimpleMapper // mapperID → mapper (reuse pool, keyed by mapper seq)
|
||||
cfg *proxy.Config
|
||||
events *proxy.EventBus
|
||||
factory pool.StrategyFactory
|
||||
mu sync.Mutex
|
||||
seq int64 // monotonic mapper sequence counter
|
||||
active map[int64]*SimpleMapper // minerID → mapper
|
||||
idle map[int64]*SimpleMapper // mapperID → mapper (reuse pool, keyed by mapper ID)
|
||||
config *proxy.Config
|
||||
events *proxy.EventBus
|
||||
factory pool.StrategyFactory
|
||||
mu sync.Mutex
|
||||
nextMapperID int64 // monotonic mapper ID counter
|
||||
}
|
||||
|
|
|
|||
1265
state_impl.go
1265
state_impl.go
File diff suppressed because it is too large
Load diff
137
state_stop_test.go
Normal file
137
state_stop_test.go
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestProxy_Stop_Good(t *testing.T) {
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer serverConn.Close()
|
||||
|
||||
miner := NewMiner(clientConn, 3333, nil)
|
||||
splitter := &stubSplitter{}
|
||||
proxyInstance := &Proxy{
|
||||
done: make(chan struct{}),
|
||||
miners: map[int64]*Miner{miner.ID(): miner},
|
||||
splitter: splitter,
|
||||
}
|
||||
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
buf := make([]byte, 1)
|
||||
_, err := serverConn.Read(buf)
|
||||
done <- err
|
||||
}()
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
proxyInstance.Stop()
|
||||
|
||||
select {
|
||||
case err := <-done:
|
||||
if err == nil {
|
||||
t.Fatalf("expected miner connection to close during Stop")
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("expected miner connection to close during Stop")
|
||||
}
|
||||
if !splitter.disconnected {
|
||||
t.Fatalf("expected splitter to be disconnected during Stop")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Stop_Bad(t *testing.T) {
|
||||
var proxyInstance *Proxy
|
||||
|
||||
proxyInstance.Stop()
|
||||
}
|
||||
|
||||
func TestProxy_Stop_Ugly(t *testing.T) {
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer serverConn.Close()
|
||||
|
||||
miner := NewMiner(clientConn, 3333, nil)
|
||||
proxyInstance := &Proxy{
|
||||
done: make(chan struct{}),
|
||||
miners: map[int64]*Miner{miner.ID(): miner},
|
||||
}
|
||||
|
||||
proxyInstance.Stop()
|
||||
proxyInstance.Stop()
|
||||
|
||||
buf := make([]byte, 1)
|
||||
if _, err := serverConn.Read(buf); err == nil {
|
||||
t.Fatalf("expected closed connection after repeated Stop calls")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxy_Stop_WaitsBeforeDisconnectingSubmitPaths(t *testing.T) {
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer serverConn.Close()
|
||||
|
||||
miner := NewMiner(clientConn, 3333, nil)
|
||||
splitter := &blockingStopSplitter{disconnectedCh: make(chan struct{})}
|
||||
proxyInstance := &Proxy{
|
||||
done: make(chan struct{}),
|
||||
miners: map[int64]*Miner{miner.ID(): miner},
|
||||
splitter: splitter,
|
||||
}
|
||||
proxyInstance.submitCount.Store(1)
|
||||
|
||||
stopped := make(chan struct{})
|
||||
go func() {
|
||||
proxyInstance.Stop()
|
||||
close(stopped)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-splitter.disconnectedCh:
|
||||
t.Fatalf("expected splitter disconnect to wait for submit drain")
|
||||
case <-stopped:
|
||||
t.Fatalf("expected Stop to keep waiting while submits are in flight")
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
}
|
||||
|
||||
proxyInstance.submitCount.Store(0)
|
||||
|
||||
select {
|
||||
case <-splitter.disconnectedCh:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("expected splitter disconnect after submit drain")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-stopped:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("expected Stop to finish after submit drain")
|
||||
}
|
||||
}
|
||||
|
||||
type stubSplitter struct {
|
||||
disconnected bool
|
||||
}
|
||||
|
||||
func (s *stubSplitter) Connect() {}
|
||||
func (s *stubSplitter) OnLogin(event *LoginEvent) {}
|
||||
func (s *stubSplitter) OnSubmit(event *SubmitEvent) {}
|
||||
func (s *stubSplitter) OnClose(event *CloseEvent) {}
|
||||
func (s *stubSplitter) Tick(ticks uint64) {}
|
||||
func (s *stubSplitter) GC() {}
|
||||
func (s *stubSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
|
||||
func (s *stubSplitter) Disconnect() { s.disconnected = true }
|
||||
|
||||
type blockingStopSplitter struct {
|
||||
disconnectedCh chan struct{}
|
||||
}
|
||||
|
||||
func (s *blockingStopSplitter) Connect() {}
|
||||
func (s *blockingStopSplitter) OnLogin(event *LoginEvent) {}
|
||||
func (s *blockingStopSplitter) OnSubmit(event *SubmitEvent) {}
|
||||
func (s *blockingStopSplitter) OnClose(event *CloseEvent) {}
|
||||
func (s *blockingStopSplitter) Tick(ticks uint64) {}
|
||||
func (s *blockingStopSplitter) GC() {}
|
||||
func (s *blockingStopSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
|
||||
func (s *blockingStopSplitter) Disconnect() {
|
||||
close(s.disconnectedCh)
|
||||
}
|
||||
33
state_submit_test.go
Normal file
33
state_submit_test.go
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestProxy_Stop_WaitsForSubmitDrain(t *testing.T) {
|
||||
p := &Proxy{
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
p.submitCount.Store(1)
|
||||
|
||||
stopped := make(chan struct{})
|
||||
go func() {
|
||||
p.Stop()
|
||||
close(stopped)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-stopped:
|
||||
t.Fatalf("expected Stop to wait for pending submits")
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
}
|
||||
|
||||
p.submitCount.Store(0)
|
||||
|
||||
select {
|
||||
case <-stopped:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("expected Stop to finish after pending submits drain")
|
||||
}
|
||||
}
|
||||
35
stats.go
35
stats.go
|
|
@ -9,9 +9,11 @@ import (
|
|||
// Stats tracks global proxy metrics. Hot-path counters are atomic. Hashrate windows
|
||||
// use a ring buffer per window size, advanced by Tick().
|
||||
//
|
||||
// s := proxy.NewStats()
|
||||
// bus.Subscribe(proxy.EventAccept, s.OnAccept)
|
||||
// bus.Subscribe(proxy.EventReject, s.OnReject)
|
||||
// stats := proxy.NewStats()
|
||||
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
|
||||
// bus.Subscribe(proxy.EventReject, stats.OnReject)
|
||||
// stats.Tick()
|
||||
// summary := stats.Summary()
|
||||
type Stats struct {
|
||||
accepted atomic.Uint64
|
||||
rejected atomic.Uint64
|
||||
|
|
@ -28,7 +30,6 @@ type Stats struct {
|
|||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Hashrate window sizes in seconds. Index maps to Stats.windows and SummaryResponse.Hashrate.
|
||||
const (
|
||||
HashrateWindow60s = 0 // 1 minute
|
||||
HashrateWindow600s = 1 // 10 minutes
|
||||
|
|
@ -38,7 +39,9 @@ const (
|
|||
HashrateWindowAll = 5 // all-time (single accumulator, no window)
|
||||
)
|
||||
|
||||
// tickWindow is a fixed-capacity ring buffer of per-second difficulty sums.
|
||||
// tickWindow is a fixed-capacity ring buffer of per-second difficulty totals.
|
||||
//
|
||||
// window := newTickWindow(60)
|
||||
type tickWindow struct {
|
||||
buckets []uint64
|
||||
pos int
|
||||
|
|
@ -47,15 +50,17 @@ type tickWindow struct {
|
|||
|
||||
// StatsSummary is the serialisable snapshot returned by Summary().
|
||||
//
|
||||
// summary := stats.Summary()
|
||||
// summary := proxy.NewStats().Summary()
|
||||
// _ = summary.Hashrate[0] // 60-second window H/s
|
||||
type StatsSummary struct {
|
||||
Accepted uint64 `json:"accepted"`
|
||||
Rejected uint64 `json:"rejected"`
|
||||
Invalid uint64 `json:"invalid"`
|
||||
Expired uint64 `json:"expired"`
|
||||
Hashes uint64 `json:"hashes_total"`
|
||||
AvgTime uint32 `json:"avg_time"` // seconds per accepted share
|
||||
AvgLatency uint32 `json:"latency"` // median pool response latency in ms
|
||||
Hashrate [6]float64 `json:"hashrate"` // H/s per window (index = HashrateWindow* constants)
|
||||
TopDiff [10]uint64 `json:"best"`
|
||||
Accepted uint64 `json:"accepted"`
|
||||
Rejected uint64 `json:"rejected"`
|
||||
Invalid uint64 `json:"invalid"`
|
||||
Expired uint64 `json:"expired"`
|
||||
Hashes uint64 `json:"hashes_total"`
|
||||
AvgTime uint32 `json:"avg_time"` // seconds per accepted share
|
||||
AvgLatency uint32 `json:"latency"` // median pool response latency in ms
|
||||
Hashrate [6]float64 `json:"hashrate"` // H/s per window (index = HashrateWindow* constants)
|
||||
TopDiff [10]uint64 `json:"best"`
|
||||
CustomDiffStats map[uint64]CustomDiffBucketStats `json:"custom_diff_stats,omitempty"`
|
||||
}
|
||||
|
|
|
|||
173
stats_test.go
Normal file
173
stats_test.go
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestStats_OnAccept_Good verifies that accepted counter, hashes, and topDiff are updated.
|
||||
//
|
||||
// stats := proxy.NewStats()
|
||||
// stats.OnAccept(proxy.Event{Diff: 100000, Latency: 82})
|
||||
// summary := stats.Summary()
|
||||
// _ = summary.Accepted // 1
|
||||
// _ = summary.Hashes // 100000
|
||||
func TestStats_OnAccept_Good(t *testing.T) {
|
||||
stats := NewStats()
|
||||
|
||||
stats.OnAccept(Event{Diff: 100000, Latency: 82})
|
||||
|
||||
summary := stats.Summary()
|
||||
if summary.Accepted != 1 {
|
||||
t.Fatalf("expected accepted 1, got %d", summary.Accepted)
|
||||
}
|
||||
if summary.Hashes != 100000 {
|
||||
t.Fatalf("expected hashes 100000, got %d", summary.Hashes)
|
||||
}
|
||||
if summary.TopDiff[0] != 100000 {
|
||||
t.Fatalf("expected top diff 100000, got %d", summary.TopDiff[0])
|
||||
}
|
||||
}
|
||||
|
||||
// TestStats_OnAccept_Bad verifies concurrent OnAccept calls do not race.
|
||||
//
|
||||
// stats := proxy.NewStats()
|
||||
// // 100 goroutines each call OnAccept — no data race under -race flag.
|
||||
func TestStats_OnAccept_Bad(t *testing.T) {
|
||||
stats := NewStats()
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func(diff uint64) {
|
||||
defer wg.Done()
|
||||
stats.OnAccept(Event{Diff: diff, Latency: 10})
|
||||
}(uint64(i + 1))
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
summary := stats.Summary()
|
||||
if summary.Accepted != 100 {
|
||||
t.Fatalf("expected 100 accepted, got %d", summary.Accepted)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStats_OnAccept_Ugly verifies that 15 accepts with varying diffs fill all topDiff slots.
|
||||
//
|
||||
// stats := proxy.NewStats()
|
||||
// // 15 accepts with diffs 1..15 → topDiff[9] is 6 (10th highest), not 0
|
||||
func TestStats_OnAccept_Ugly(t *testing.T) {
|
||||
stats := NewStats()
|
||||
|
||||
for i := 1; i <= 15; i++ {
|
||||
stats.OnAccept(Event{Diff: uint64(i)})
|
||||
}
|
||||
|
||||
summary := stats.Summary()
|
||||
// top 10 should be 15, 14, 13, ..., 6
|
||||
if summary.TopDiff[0] != 15 {
|
||||
t.Fatalf("expected top diff[0]=15, got %d", summary.TopDiff[0])
|
||||
}
|
||||
if summary.TopDiff[9] != 6 {
|
||||
t.Fatalf("expected top diff[9]=6, got %d", summary.TopDiff[9])
|
||||
}
|
||||
}
|
||||
|
||||
// TestStats_OnReject_Good verifies that rejected and invalid counters are updated.
|
||||
//
|
||||
// stats := proxy.NewStats()
|
||||
// stats.OnReject(proxy.Event{Error: "Low difficulty share"})
|
||||
func TestStats_OnReject_Good(t *testing.T) {
|
||||
stats := NewStats()
|
||||
|
||||
stats.OnReject(Event{Error: "Low difficulty share"})
|
||||
stats.OnReject(Event{Error: "Malformed share"})
|
||||
|
||||
summary := stats.Summary()
|
||||
if summary.Rejected != 2 {
|
||||
t.Fatalf("expected two rejected shares, got %d", summary.Rejected)
|
||||
}
|
||||
if summary.Invalid != 2 {
|
||||
t.Fatalf("expected two invalid shares, got %d", summary.Invalid)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStats_OnReject_Bad verifies that a non-invalid rejection increments rejected but not invalid.
|
||||
//
|
||||
// stats := proxy.NewStats()
|
||||
// stats.OnReject(proxy.Event{Error: "Stale share"})
|
||||
func TestStats_OnReject_Bad(t *testing.T) {
|
||||
stats := NewStats()
|
||||
|
||||
stats.OnReject(Event{Error: "Stale share"})
|
||||
|
||||
summary := stats.Summary()
|
||||
if summary.Rejected != 1 {
|
||||
t.Fatalf("expected one rejected, got %d", summary.Rejected)
|
||||
}
|
||||
if summary.Invalid != 0 {
|
||||
t.Fatalf("expected zero invalid for non-invalid reason, got %d", summary.Invalid)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStats_OnReject_Ugly verifies an expired accepted share increments both accepted and expired.
|
||||
//
|
||||
// stats := proxy.NewStats()
|
||||
// stats.OnAccept(proxy.Event{Diff: 1000, Expired: true})
|
||||
func TestStats_OnReject_Ugly(t *testing.T) {
|
||||
stats := NewStats()
|
||||
|
||||
stats.OnAccept(Event{Diff: 1000, Expired: true})
|
||||
|
||||
summary := stats.Summary()
|
||||
if summary.Accepted != 1 {
|
||||
t.Fatalf("expected accepted 1, got %d", summary.Accepted)
|
||||
}
|
||||
if summary.Expired != 1 {
|
||||
t.Fatalf("expected expired 1, got %d", summary.Expired)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStats_Tick_Good verifies that Tick advances the rolling window position.
|
||||
//
|
||||
// stats := proxy.NewStats()
|
||||
// stats.OnAccept(proxy.Event{Diff: 500})
|
||||
// stats.Tick()
|
||||
// summary := stats.Summary()
|
||||
func TestStats_Tick_Good(t *testing.T) {
|
||||
stats := NewStats()
|
||||
|
||||
stats.OnAccept(Event{Diff: 500})
|
||||
stats.Tick()
|
||||
|
||||
summary := stats.Summary()
|
||||
// After one tick, the hashrate should still include the 500 diff
|
||||
if summary.Hashrate[HashrateWindow60s] == 0 {
|
||||
t.Fatalf("expected non-zero 60s hashrate after accept and tick")
|
||||
}
|
||||
}
|
||||
|
||||
// TestStats_OnLogin_OnClose_Good verifies miner count tracking.
|
||||
//
|
||||
// stats := proxy.NewStats()
|
||||
// stats.OnLogin(proxy.Event{Miner: &proxy.Miner{}})
|
||||
// stats.OnClose(proxy.Event{Miner: &proxy.Miner{}})
|
||||
func TestStats_OnLogin_OnClose_Good(t *testing.T) {
|
||||
stats := NewStats()
|
||||
m := &Miner{}
|
||||
|
||||
stats.OnLogin(Event{Miner: m})
|
||||
if got := stats.miners.Load(); got != 1 {
|
||||
t.Fatalf("expected 1 miner, got %d", got)
|
||||
}
|
||||
if got := stats.maxMiners.Load(); got != 1 {
|
||||
t.Fatalf("expected max miners 1, got %d", got)
|
||||
}
|
||||
|
||||
stats.OnClose(Event{Miner: m})
|
||||
if got := stats.miners.Load(); got != 0 {
|
||||
t.Fatalf("expected 0 miners after close, got %d", got)
|
||||
}
|
||||
if got := stats.maxMiners.Load(); got != 1 {
|
||||
t.Fatalf("expected max miners to remain 1, got %d", got)
|
||||
}
|
||||
}
|
||||
36
tls_test.go
Normal file
36
tls_test.go
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTLS_applyTLSCiphers_Good(t *testing.T) {
|
||||
cfg := &tls.Config{}
|
||||
|
||||
applyTLSCiphers(cfg, "ECDHE-RSA-AES128-GCM-SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256")
|
||||
|
||||
if len(cfg.CipherSuites) != 2 {
|
||||
t.Fatalf("expected two recognised cipher suites, got %d", len(cfg.CipherSuites))
|
||||
}
|
||||
}
|
||||
|
||||
func TestTLS_applyTLSCiphers_Bad(t *testing.T) {
|
||||
cfg := &tls.Config{}
|
||||
|
||||
applyTLSCiphers(cfg, "made-up-cipher-one:made-up-cipher-two")
|
||||
|
||||
if len(cfg.CipherSuites) != 0 {
|
||||
t.Fatalf("expected unknown cipher names to be ignored, got %#v", cfg.CipherSuites)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTLS_applyTLSCiphers_Ugly(t *testing.T) {
|
||||
cfg := &tls.Config{}
|
||||
|
||||
applyTLSCiphers(cfg, " aes128-sha | ECDHE-RSA-AES256-GCM-SHA384 ; tls_ecdhe_ecdsa_with_aes_256_gcm_sha384 ")
|
||||
|
||||
if len(cfg.CipherSuites) != 3 {
|
||||
t.Fatalf("expected mixed separators and casing to be accepted, got %d", len(cfg.CipherSuites))
|
||||
}
|
||||
}
|
||||
20
worker.go
20
worker.go
|
|
@ -8,18 +8,22 @@ import (
|
|||
// Workers maintains per-worker aggregate stats. Workers are identified by name,
|
||||
// derived from the miner's login fields per WorkersMode.
|
||||
//
|
||||
// w := proxy.NewWorkers(proxy.WorkersByRigID, bus)
|
||||
// workers := proxy.NewWorkers(proxy.WorkersByRigID, bus)
|
||||
// workers.OnLogin(proxy.Event{Miner: miner})
|
||||
// records := workers.List()
|
||||
type Workers struct {
|
||||
mode WorkersMode
|
||||
entries []WorkerRecord // ordered by first-seen (stable)
|
||||
nameIndex map[string]int // workerName → entries index
|
||||
idIndex map[int64]int // minerID → entries index
|
||||
mu sync.RWMutex
|
||||
mode WorkersMode
|
||||
entries []WorkerRecord // ordered by first-seen (stable)
|
||||
nameIndex map[string]int // workerName → entries index
|
||||
idIndex map[int64]int // minerID → entries index
|
||||
subscribed bool
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// WorkerRecord is the per-identity aggregate.
|
||||
// WorkerRecord is the per-identity aggregate with rolling hashrate windows.
|
||||
//
|
||||
// hr60 := record.Hashrate(60)
|
||||
// record := proxy.WorkerRecord{Name: "rig-alpha", Accepted: 10, Hashes: 500000}
|
||||
// hr60 := record.Hashrate(60) // H/s over the last 60 seconds
|
||||
type WorkerRecord struct {
|
||||
Name string
|
||||
LastIP string
|
||||
|
|
|
|||
164
worker_test.go
Normal file
164
worker_test.go
Normal file
|
|
@ -0,0 +1,164 @@
|
|||
package proxy
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestWorker_NewWorkers_Good(t *testing.T) {
|
||||
bus := NewEventBus()
|
||||
workers := NewWorkers(WorkersByRigID, bus)
|
||||
miner := &Miner{id: 7, user: "wallet", rigID: "rig-1", ip: "10.0.0.1"}
|
||||
|
||||
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
|
||||
records := workers.List()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one worker record, got %d", len(records))
|
||||
}
|
||||
if records[0].Name != "rig-1" {
|
||||
t.Fatalf("expected rig id worker name, got %q", records[0].Name)
|
||||
}
|
||||
if records[0].Connections != 1 {
|
||||
t.Fatalf("expected one connection, got %d", records[0].Connections)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorker_NewWorkers_Bad(t *testing.T) {
|
||||
workers := NewWorkers(WorkersDisabled, nil)
|
||||
if workers == nil {
|
||||
t.Fatalf("expected workers instance")
|
||||
}
|
||||
if got := workers.List(); len(got) != 0 {
|
||||
t.Fatalf("expected no worker records, got %d", len(got))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorker_NewWorkers_Ugly(t *testing.T) {
|
||||
bus := NewEventBus()
|
||||
workers := NewWorkers(WorkersByUser, bus)
|
||||
workers.bindEvents(bus)
|
||||
|
||||
miner := &Miner{id: 11, user: "wallet", ip: "10.0.0.2"}
|
||||
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
|
||||
records := workers.List()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one worker record, got %d", len(records))
|
||||
}
|
||||
if records[0].Connections != 1 {
|
||||
t.Fatalf("expected a single subscription path, got %d connections", records[0].Connections)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWorker_Hashrate_Good verifies that recording an accepted share produces a nonzero
|
||||
// hashrate reading from the 60-second window.
|
||||
//
|
||||
// record := proxy.WorkerRecord{}
|
||||
// record.Hashrate(60) // > 0.0 after an accepted share
|
||||
func TestWorker_Hashrate_Good(t *testing.T) {
|
||||
bus := NewEventBus()
|
||||
workers := NewWorkers(WorkersByUser, bus)
|
||||
|
||||
miner := &Miner{id: 100, user: "hashtest", ip: "10.0.0.10"}
|
||||
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 50000})
|
||||
|
||||
records := workers.List()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one worker record, got %d", len(records))
|
||||
}
|
||||
hr := records[0].Hashrate(60)
|
||||
if hr <= 0 {
|
||||
t.Fatalf("expected nonzero hashrate for 60-second window after accept, got %f", hr)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWorker_Hashrate_Bad verifies that an invalid window size returns 0.
|
||||
//
|
||||
// record := proxy.WorkerRecord{}
|
||||
// record.Hashrate(999) // 0.0 (unsupported window)
|
||||
func TestWorker_Hashrate_Bad(t *testing.T) {
|
||||
bus := NewEventBus()
|
||||
workers := NewWorkers(WorkersByUser, bus)
|
||||
|
||||
miner := &Miner{id: 101, user: "hashtest-bad", ip: "10.0.0.11"}
|
||||
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 50000})
|
||||
|
||||
records := workers.List()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one worker record, got %d", len(records))
|
||||
}
|
||||
hr := records[0].Hashrate(999)
|
||||
if hr != 0 {
|
||||
t.Fatalf("expected zero hashrate for unsupported window, got %f", hr)
|
||||
}
|
||||
hrZero := records[0].Hashrate(0)
|
||||
if hrZero != 0 {
|
||||
t.Fatalf("expected zero hashrate for zero window, got %f", hrZero)
|
||||
}
|
||||
hrNeg := records[0].Hashrate(-1)
|
||||
if hrNeg != 0 {
|
||||
t.Fatalf("expected zero hashrate for negative window, got %f", hrNeg)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWorker_Hashrate_Ugly verifies that calling Hashrate on a nil record returns 0
|
||||
// and that a worker with no accepts also returns 0.
|
||||
//
|
||||
// var record *proxy.WorkerRecord
|
||||
// record.Hashrate(60) // 0.0
|
||||
func TestWorker_Hashrate_Ugly(t *testing.T) {
|
||||
var nilRecord *WorkerRecord
|
||||
if hr := nilRecord.Hashrate(60); hr != 0 {
|
||||
t.Fatalf("expected zero hashrate for nil record, got %f", hr)
|
||||
}
|
||||
|
||||
bus := NewEventBus()
|
||||
workers := NewWorkers(WorkersByUser, bus)
|
||||
|
||||
miner := &Miner{id: 102, user: "hashtest-ugly", ip: "10.0.0.12"}
|
||||
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
|
||||
records := workers.List()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one worker record, got %d", len(records))
|
||||
}
|
||||
hr := records[0].Hashrate(60)
|
||||
if hr != 0 {
|
||||
t.Fatalf("expected zero hashrate for worker with no accepts, got %f", hr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorker_CustomDiffOrdering_Good(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Mode: "nicehash",
|
||||
Workers: WorkersByUser,
|
||||
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||
CustomDiff: 50000,
|
||||
AccessLogFile: "",
|
||||
}
|
||||
|
||||
p, result := New(cfg)
|
||||
if !result.OK {
|
||||
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||
}
|
||||
|
||||
miner := &Miner{
|
||||
id: 21,
|
||||
user: "WALLET+50000",
|
||||
ip: "10.0.0.3",
|
||||
conn: noopConn{},
|
||||
}
|
||||
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||
|
||||
records := p.WorkerRecords()
|
||||
if len(records) != 1 {
|
||||
t.Fatalf("expected one worker record, got %d", len(records))
|
||||
}
|
||||
if records[0].Name != "WALLET" {
|
||||
t.Fatalf("expected custom diff login suffix to be stripped before worker registration, got %q", records[0].Name)
|
||||
}
|
||||
if miner.User() != "WALLET" {
|
||||
t.Fatalf("expected miner user to be stripped before downstream consumers, got %q", miner.User())
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue