Compare commits
146 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2470f1ac3d | ||
|
|
31a151d23c | ||
|
|
6f0747abc2 | ||
|
|
711c4259f7 | ||
|
|
8cf01f2618 | ||
|
|
b6b44b1f7b | ||
|
|
e1eadf705d | ||
|
|
ea378354de | ||
|
|
8a9046356e | ||
|
|
d1a899805e | ||
|
|
5680539dbb | ||
|
|
82b2375058 | ||
|
|
01a0cc5907 | ||
|
|
031f0c0f17 | ||
|
|
75d151b4e5 | ||
|
|
70fcbd4d43 | ||
|
|
686f4ea54f | ||
|
|
af96bfce94 | ||
|
|
1ae781608c | ||
|
|
ee128e944d | ||
|
|
1f8ff58b20 | ||
|
|
bc6113c80d | ||
|
|
b3fd1fef61 | ||
|
|
e518f2df32 | ||
|
|
30ff013158 | ||
|
|
be47d7afde | ||
|
|
65f6c733a0 | ||
|
|
f4f0081eb0 | ||
|
|
f0d5f6ae86 | ||
|
|
0a7c99264b | ||
|
|
35db5f6840 | ||
|
|
8a52856719 | ||
|
|
5d8d82b9b5 | ||
|
|
356eb9cec1 | ||
|
|
cbde021d0c | ||
|
|
f2f7dfed75 | ||
|
|
ce3b7a50cd | ||
|
|
ecd4130457 | ||
|
|
5a3fcf4fab | ||
|
|
7dd9807a6e | ||
|
|
7b2a7ccd88 | ||
|
|
9f34bc7200 | ||
|
|
a1f47f5792 | ||
|
|
b5e4a6499f | ||
|
|
b9b3c47b4c | ||
|
|
fefae4b3e5 | ||
|
|
264479d57b | ||
|
|
d43c8ee4c1 | ||
|
|
05b0bb5ea4 | ||
|
|
2a49caca03 | ||
|
|
3cd0909d74 | ||
|
|
d0ae26a1a2 | ||
|
|
3f9da136e9 | ||
|
|
f3c5175785 | ||
|
|
e94616922d | ||
|
|
d8b4bf2775 | ||
|
|
3debd08a64 | ||
|
|
eabe9b521d | ||
|
|
a11d5b0969 | ||
|
|
766c4d1946 | ||
|
|
8ad123ecab | ||
|
|
55d44df9c2 | ||
|
|
9d2b1f368c | ||
|
|
2364633afc | ||
|
|
9460f82738 | ||
|
|
cf4136c8f0 | ||
|
|
460aae14fb | ||
|
|
bbdff60580 | ||
|
|
a76e6be1c7 | ||
|
|
96f7f18c96 | ||
|
|
77435d44fe | ||
|
|
ad069a45d5 | ||
|
|
7a48e479ec | ||
|
|
fd88492b00 | ||
|
|
fd6bc01b87 | ||
|
|
9e44fb6ea3 | ||
|
|
fd76640d69 | ||
|
|
fb5453c097 | ||
|
|
34f95071d9 | ||
|
|
4e5311215d | ||
|
|
2d39783dc4 | ||
|
|
e2bd10c94f | ||
|
|
1e6ba01d03 | ||
|
|
c0efdfb0ca | ||
|
|
619b3c500d | ||
|
|
8a321e2467 | ||
|
|
167ecc2bdc | ||
|
|
0bb5ce827b | ||
|
|
6f0f695054 | ||
|
|
4a0213e89f | ||
|
|
84362d9dc5 | ||
|
|
4006f33c1e | ||
|
|
9b6a251145 | ||
|
|
0c746e4ea7 | ||
|
|
e594b04d7c | ||
|
|
187a366d74 | ||
|
|
5ba21cb9bf | ||
|
|
2b8bba790c | ||
|
|
cfd669e4d2 | ||
|
|
6422a948bf | ||
|
|
8bde2c14d0 | ||
|
|
a79b35abaf | ||
|
|
5e343a7354 | ||
|
|
4c2a0ffab7 | ||
|
|
33d35ed063 | ||
|
|
c74f62e6d7 | ||
|
|
8b47e6a11b | ||
|
|
6d6934f37b | ||
|
|
c62f2c86a9 | ||
|
|
1548643c65 | ||
|
|
1065b78b7c | ||
|
|
9028334d49 | ||
|
|
186524b3a8 | ||
|
|
d9c59c668d | ||
|
|
8faac7eee6 | ||
|
|
ce7d3301fc | ||
|
|
c7d688ccfa | ||
|
|
d42c21438a | ||
|
|
86c07943b0 | ||
|
|
35d8c524e4 | ||
|
|
d47d89af7a | ||
|
|
b66739b64f | ||
|
|
3efa7f34d0 | ||
|
|
b3ad79d832 | ||
|
|
d10a57e377 | ||
|
|
6d6da10885 | ||
|
|
b16ebc1a28 | ||
|
|
2f59714cce | ||
|
|
21fce78ffe | ||
|
|
e92c6070be | ||
|
|
c250a4d6f2 | ||
|
|
4a281e6e25 | ||
|
|
1bcbb389e6 | ||
|
|
bc67e73ca0 | ||
|
|
31a8ba558f | ||
|
|
6f4d7019e2 | ||
|
|
64443c41f6 | ||
|
|
7f44596858 | ||
|
|
b8cf8713c5 | ||
|
|
c7ada3dd54 | ||
|
|
eb896a065f | ||
|
|
a7d16b7685 | ||
|
|
15e3050b02 | ||
|
|
2a68aa4637 | ||
| cd517cd0d6 | |||
|
|
a38dfc18ec |
69 changed files with 11928 additions and 258 deletions
14
CODEX.md
Normal file
14
CODEX.md
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
# CODEX.md
|
||||||
|
|
||||||
|
This repository uses `CLAUDE.md` as the detailed source of truth for working conventions.
|
||||||
|
This file exists so agent workflows that expect `CODEX.md` can resolve the repo rules directly.
|
||||||
|
|
||||||
|
## Core Conventions
|
||||||
|
|
||||||
|
- Read `docs/RFC.md` before changing behaviour.
|
||||||
|
- Preserve existing user changes in the worktree.
|
||||||
|
- Prefer `rg` for search and `apply_patch` for edits.
|
||||||
|
- Keep names predictable and comments example-driven.
|
||||||
|
- Run `go test ./...` and `go test -race ./...` before committing when practical.
|
||||||
|
- Commit with a conventional message and include the required co-author line when requested by repo policy.
|
||||||
|
|
||||||
121
accesslog_impl.go
Normal file
121
accesslog_impl.go
Normal file
|
|
@ -0,0 +1,121 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type accessLogSink struct {
|
||||||
|
path string
|
||||||
|
file *os.File
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAccessLogSink(path string) *accessLogSink {
|
||||||
|
return &accessLogSink{path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *accessLogSink) SetPath(path string) {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.path == path {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.path = path
|
||||||
|
if l.file != nil {
|
||||||
|
_ = l.file.Close()
|
||||||
|
l.file = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *accessLogSink) Close() {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.file != nil {
|
||||||
|
_ = l.file.Close()
|
||||||
|
l.file = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *accessLogSink) OnLogin(e Event) {
|
||||||
|
if l == nil || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.writeConnectLine(e.Miner.IP(), e.Miner.User(), e.Miner.Agent())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *accessLogSink) OnClose(e Event) {
|
||||||
|
if l == nil || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.writeCloseLine(e.Miner.IP(), e.Miner.User(), e.Miner.RX(), e.Miner.TX())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *accessLogSink) writeConnectLine(ip, user, agent string) {
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if strings.TrimSpace(l.path) == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if l.file == nil {
|
||||||
|
file, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.file = file
|
||||||
|
}
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||||
|
builder.WriteByte(' ')
|
||||||
|
builder.WriteString("CONNECT")
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(ip)
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(user)
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(agent)
|
||||||
|
builder.WriteByte('\n')
|
||||||
|
_, _ = l.file.WriteString(builder.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *accessLogSink) writeCloseLine(ip, user string, rx, tx uint64) {
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if strings.TrimSpace(l.path) == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if l.file == nil {
|
||||||
|
file, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.file = file
|
||||||
|
}
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||||
|
builder.WriteByte(' ')
|
||||||
|
builder.WriteString("CLOSE")
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(ip)
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(user)
|
||||||
|
builder.WriteString(" rx=")
|
||||||
|
builder.WriteString(formatUint(rx))
|
||||||
|
builder.WriteString(" tx=")
|
||||||
|
builder.WriteString(formatUint(tx))
|
||||||
|
builder.WriteByte('\n')
|
||||||
|
_, _ = l.file.WriteString(builder.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatUint(value uint64) string {
|
||||||
|
return strconv.FormatUint(value, 10)
|
||||||
|
}
|
||||||
102
accesslog_test.go
Normal file
102
accesslog_test.go
Normal file
|
|
@ -0,0 +1,102 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProxy_AccessLog_WritesLifecycleLines(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "access.log")
|
||||||
|
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
AccessLogFile: path,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p, result := New(cfg)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
miner := &Miner{
|
||||||
|
ip: "10.0.0.1",
|
||||||
|
user: "WALLET",
|
||||||
|
agent: "XMRig/6.21.0",
|
||||||
|
rx: 512,
|
||||||
|
tx: 4096,
|
||||||
|
conn: noopConn{},
|
||||||
|
state: MinerStateReady,
|
||||||
|
rpcID: "session",
|
||||||
|
}
|
||||||
|
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||||
|
p.events.Dispatch(Event{Type: EventClose, Miner: miner})
|
||||||
|
p.Stop()
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read access log: %v", err)
|
||||||
|
}
|
||||||
|
text := string(data)
|
||||||
|
if !strings.Contains(text, "CONNECT 10.0.0.1 WALLET XMRig/6.21.0") {
|
||||||
|
t.Fatalf("expected CONNECT line, got %q", text)
|
||||||
|
}
|
||||||
|
if !strings.Contains(text, "CLOSE 10.0.0.1 WALLET rx=512 tx=4096") {
|
||||||
|
t.Fatalf("expected CLOSE line, got %q", text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_AccessLog_WritesFixedColumns(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "access.log")
|
||||||
|
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
AccessLogFile: path,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p, result := New(cfg)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
miner := &Miner{
|
||||||
|
ip: "10.0.0.1",
|
||||||
|
user: "WALLET",
|
||||||
|
conn: noopConn{},
|
||||||
|
}
|
||||||
|
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||||
|
p.events.Dispatch(Event{Type: EventClose, Miner: miner})
|
||||||
|
p.Stop()
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read access log: %v", err)
|
||||||
|
}
|
||||||
|
text := string(data)
|
||||||
|
if !strings.Contains(text, "CONNECT 10.0.0.1 WALLET") {
|
||||||
|
t.Fatalf("expected CONNECT line without counters, got %q", text)
|
||||||
|
}
|
||||||
|
if !strings.Contains(text, "CLOSE 10.0.0.1 WALLET rx=0 tx=0") {
|
||||||
|
t.Fatalf("expected CLOSE line with counters only, got %q", text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type noopConn struct{}
|
||||||
|
|
||||||
|
func (noopConn) Read([]byte) (int, error) { return 0, os.ErrClosed }
|
||||||
|
func (noopConn) Write([]byte) (int, error) { return 0, os.ErrClosed }
|
||||||
|
func (noopConn) Close() error { return nil }
|
||||||
|
func (noopConn) LocalAddr() net.Addr { return nil }
|
||||||
|
func (noopConn) RemoteAddr() net.Addr { return nil }
|
||||||
|
func (noopConn) SetDeadline(time.Time) error { return nil }
|
||||||
|
func (noopConn) SetReadDeadline(time.Time) error { return nil }
|
||||||
|
func (noopConn) SetWriteDeadline(time.Time) error { return nil }
|
||||||
|
|
@ -1,63 +1,62 @@
|
||||||
// Package api implements the HTTP monitoring endpoints for the proxy.
|
// Package api mounts the monitoring endpoints on an HTTP mux.
|
||||||
//
|
//
|
||||||
// Registered routes:
|
// mux := http.NewServeMux()
|
||||||
//
|
// api.RegisterRoutes(mux, proxyInstance)
|
||||||
// GET /1/summary — aggregated proxy stats
|
|
||||||
// GET /1/workers — per-worker hashrate table
|
|
||||||
// GET /1/miners — per-connection state table
|
|
||||||
//
|
|
||||||
// proxyapi.RegisterRoutes(apiRouter, p)
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
// SummaryResponse is the /1/summary JSON body.
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RouteRegistrar accepts HTTP handler registrations.
|
||||||
//
|
//
|
||||||
// {"version":"1.0.0","mode":"nicehash","hashrate":{"total":[...]}, ...}
|
// mux := http.NewServeMux()
|
||||||
type SummaryResponse struct {
|
// api.RegisterRoutes(mux, proxyInstance)
|
||||||
Version string `json:"version"`
|
type RouteRegistrar interface {
|
||||||
Mode string `json:"mode"`
|
HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request))
|
||||||
Hashrate HashrateResponse `json:"hashrate"`
|
|
||||||
Miners MinersCountResponse `json:"miners"`
|
|
||||||
Workers uint64 `json:"workers"`
|
|
||||||
Upstreams UpstreamResponse `json:"upstreams"`
|
|
||||||
Results ResultsResponse `json:"results"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashrateResponse carries the per-window hashrate array.
|
// mux := http.NewServeMux()
|
||||||
|
// api.RegisterRoutes(mux, proxyInstance)
|
||||||
|
// _ = mux
|
||||||
//
|
//
|
||||||
// HashrateResponse{Total: [6]float64{12345.67, 11900.00, 12100.00, 11800.00, 12000.00, 12200.00}}
|
// The mounted routes are GET /1/summary, /1/workers, and /1/miners.
|
||||||
type HashrateResponse struct {
|
func RegisterRoutes(router RouteRegistrar, p *proxy.Proxy) {
|
||||||
Total [6]float64 `json:"total"`
|
if router == nil || p == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
registerJSONGetRoute(router, p, proxy.MonitoringRouteSummary, func() any { return p.SummaryDocument() })
|
||||||
|
registerJSONGetRoute(router, p, proxy.MonitoringRouteWorkers, func() any { return p.WorkersDocument() })
|
||||||
|
registerJSONGetRoute(router, p, proxy.MonitoringRouteMiners, func() any { return p.MinersDocument() })
|
||||||
}
|
}
|
||||||
|
|
||||||
// MinersCountResponse carries current and peak miner counts.
|
func registerJSONGetRoute(router RouteRegistrar, proxyInstance *proxy.Proxy, pattern string, renderDocument func() any) {
|
||||||
//
|
router.HandleFunc(pattern, func(w http.ResponseWriter, request *http.Request) {
|
||||||
// MinersCountResponse{Now: 142, Max: 200}
|
if status, ok := allowMonitoringRequest(proxyInstance, request); !ok {
|
||||||
type MinersCountResponse struct {
|
switch status {
|
||||||
Now uint64 `json:"now"`
|
case http.StatusMethodNotAllowed:
|
||||||
Max uint64 `json:"max"`
|
w.Header().Set("Allow", http.MethodGet)
|
||||||
|
case http.StatusUnauthorized:
|
||||||
|
w.Header().Set("WWW-Authenticate", "Bearer")
|
||||||
|
}
|
||||||
|
w.WriteHeader(status)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
writeJSON(w, renderDocument())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpstreamResponse carries pool connection state counts.
|
func allowMonitoringRequest(proxyInstance *proxy.Proxy, request *http.Request) (int, bool) {
|
||||||
//
|
if proxyInstance == nil {
|
||||||
// UpstreamResponse{Active: 1, Sleep: 0, Error: 0, Total: 1, Ratio: 142.0}
|
return http.StatusServiceUnavailable, false
|
||||||
type UpstreamResponse struct {
|
}
|
||||||
Active uint64 `json:"active"`
|
return proxyInstance.AllowMonitoringRequest(request)
|
||||||
Sleep uint64 `json:"sleep"`
|
|
||||||
Error uint64 `json:"error"`
|
|
||||||
Total uint64 `json:"total"`
|
|
||||||
Ratio float64 `json:"ratio"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResultsResponse carries share acceptance statistics.
|
func writeJSON(w http.ResponseWriter, payload any) {
|
||||||
//
|
w.Header().Set("Content-Type", "application/json")
|
||||||
// ResultsResponse{Accepted: 4821, Rejected: 3, Invalid: 0, Expired: 12}
|
_ = json.NewEncoder(w).Encode(payload)
|
||||||
type ResultsResponse struct {
|
|
||||||
Accepted uint64 `json:"accepted"`
|
|
||||||
Rejected uint64 `json:"rejected"`
|
|
||||||
Invalid uint64 `json:"invalid"`
|
|
||||||
Expired uint64 `json:"expired"`
|
|
||||||
AvgTime uint32 `json:"avg_time"`
|
|
||||||
Latency uint32 `json:"latency"`
|
|
||||||
HashesTotal uint64 `json:"hashes_total"`
|
|
||||||
Best [10]uint64 `json:"best"`
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
201
api/router_test.go
Normal file
201
api/router_test.go
Normal file
|
|
@ -0,0 +1,201 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRegisterRoutes_GETSummary_Good(t *testing.T) {
|
||||||
|
config := &proxy.Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: proxy.WorkersByRigID,
|
||||||
|
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p, result := proxy.New(config)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("new proxy: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
router := http.NewServeMux()
|
||||||
|
RegisterRoutes(router, p)
|
||||||
|
|
||||||
|
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(recorder, request)
|
||||||
|
|
||||||
|
if recorder.Code != http.StatusOK {
|
||||||
|
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
var document proxy.SummaryDocument
|
||||||
|
if err := json.Unmarshal(recorder.Body.Bytes(), &document); err != nil {
|
||||||
|
t.Fatalf("decode summary document: %v", err)
|
||||||
|
}
|
||||||
|
if document.Mode != "nicehash" {
|
||||||
|
t.Fatalf("expected mode %q, got %q", "nicehash", document.Mode)
|
||||||
|
}
|
||||||
|
if document.Version != "1.0.0" {
|
||||||
|
t.Fatalf("expected version %q, got %q", "1.0.0", document.Version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegisterRoutes_POSTSummary_Bad(t *testing.T) {
|
||||||
|
config := &proxy.Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: proxy.WorkersByRigID,
|
||||||
|
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
HTTP: proxy.HTTPConfig{
|
||||||
|
Restricted: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
p, result := proxy.New(config)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("new proxy: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
router := http.NewServeMux()
|
||||||
|
RegisterRoutes(router, p)
|
||||||
|
|
||||||
|
request := httptest.NewRequest(http.MethodPost, "/1/summary", nil)
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(recorder, request)
|
||||||
|
|
||||||
|
if recorder.Code != http.StatusMethodNotAllowed {
|
||||||
|
t.Fatalf("expected %d, got %d", http.StatusMethodNotAllowed, recorder.Code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegisterRoutes_POSTSummary_Unrestricted_Good(t *testing.T) {
|
||||||
|
config := &proxy.Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: proxy.WorkersByRigID,
|
||||||
|
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p, result := proxy.New(config)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("new proxy: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
router := http.NewServeMux()
|
||||||
|
RegisterRoutes(router, p)
|
||||||
|
|
||||||
|
request := httptest.NewRequest(http.MethodPost, "/1/summary", nil)
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(recorder, request)
|
||||||
|
|
||||||
|
if recorder.Code != http.StatusOK {
|
||||||
|
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
var document proxy.SummaryDocument
|
||||||
|
if err := json.Unmarshal(recorder.Body.Bytes(), &document); err != nil {
|
||||||
|
t.Fatalf("decode summary document: %v", err)
|
||||||
|
}
|
||||||
|
if document.Mode != "nicehash" {
|
||||||
|
t.Fatalf("expected mode %q, got %q", "nicehash", document.Mode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegisterRoutes_GETMiners_Ugly(t *testing.T) {
|
||||||
|
config := &proxy.Config{
|
||||||
|
Mode: "simple",
|
||||||
|
Workers: proxy.WorkersDisabled,
|
||||||
|
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p, result := proxy.New(config)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("new proxy: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
router := http.NewServeMux()
|
||||||
|
RegisterRoutes(router, p)
|
||||||
|
|
||||||
|
request := httptest.NewRequest(http.MethodGet, "/1/miners", nil)
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(recorder, request)
|
||||||
|
|
||||||
|
if recorder.Code != http.StatusOK {
|
||||||
|
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
var document proxy.MinersDocument
|
||||||
|
if err := json.Unmarshal(recorder.Body.Bytes(), &document); err != nil {
|
||||||
|
t.Fatalf("decode miners document: %v", err)
|
||||||
|
}
|
||||||
|
if len(document.Format) != 10 {
|
||||||
|
t.Fatalf("expected 10 miner columns, got %d", len(document.Format))
|
||||||
|
}
|
||||||
|
if len(document.Miners) != 0 {
|
||||||
|
t.Fatalf("expected no miners in a new proxy, got %d", len(document.Miners))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegisterRoutes_GETSummaryAuthRequired_Bad(t *testing.T) {
|
||||||
|
config := &proxy.Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: proxy.WorkersByRigID,
|
||||||
|
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
HTTP: proxy.HTTPConfig{
|
||||||
|
Enabled: true,
|
||||||
|
Restricted: true,
|
||||||
|
AccessToken: "secret",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
p, result := proxy.New(config)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("new proxy: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
router := http.NewServeMux()
|
||||||
|
RegisterRoutes(router, p)
|
||||||
|
|
||||||
|
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(recorder, request)
|
||||||
|
|
||||||
|
if recorder.Code != http.StatusUnauthorized {
|
||||||
|
t.Fatalf("expected %d, got %d", http.StatusUnauthorized, recorder.Code)
|
||||||
|
}
|
||||||
|
if got := recorder.Header().Get("WWW-Authenticate"); got != "Bearer" {
|
||||||
|
t.Fatalf("expected bearer challenge, got %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegisterRoutes_GETSummaryAuthGranted_Ugly(t *testing.T) {
|
||||||
|
config := &proxy.Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: proxy.WorkersByRigID,
|
||||||
|
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
HTTP: proxy.HTTPConfig{
|
||||||
|
Enabled: true,
|
||||||
|
Restricted: true,
|
||||||
|
AccessToken: "secret",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
p, result := proxy.New(config)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("new proxy: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
router := http.NewServeMux()
|
||||||
|
RegisterRoutes(router, p)
|
||||||
|
|
||||||
|
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
|
||||||
|
request.Header.Set("Authorization", "Bearer secret")
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(recorder, request)
|
||||||
|
|
||||||
|
if recorder.Code != http.StatusOK {
|
||||||
|
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
|
||||||
|
}
|
||||||
|
}
|
||||||
113
api_rows.go
Normal file
113
api_rows.go
Normal file
|
|
@ -0,0 +1,113 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MonitoringRouteSummary documents the summary endpoint path.
|
||||||
|
//
|
||||||
|
// http.Get("http://127.0.0.1:8080" + proxy.MonitoringRouteSummary)
|
||||||
|
MonitoringRouteSummary = "/1/summary"
|
||||||
|
|
||||||
|
// MonitoringRouteWorkers documents the workers endpoint path.
|
||||||
|
//
|
||||||
|
// http.Get("http://127.0.0.1:8080" + proxy.MonitoringRouteWorkers)
|
||||||
|
MonitoringRouteWorkers = "/1/workers"
|
||||||
|
|
||||||
|
// MonitoringRouteMiners documents the miners endpoint path.
|
||||||
|
//
|
||||||
|
// http.Get("http://127.0.0.1:8080" + proxy.MonitoringRouteMiners)
|
||||||
|
MonitoringRouteMiners = "/1/miners"
|
||||||
|
|
||||||
|
// SummaryDocumentVersion is the monitoring API version.
|
||||||
|
//
|
||||||
|
// doc := proxy.SummaryDocument{Version: proxy.SummaryDocumentVersion}
|
||||||
|
SummaryDocumentVersion = "1.0.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// MinersDocumentFormat defines the fixed /1/miners column order.
|
||||||
|
//
|
||||||
|
// doc := proxy.MinersDocument{Format: append([]string(nil), proxy.MinersDocumentFormat...)}
|
||||||
|
MinersDocumentFormat = []string{"id", "ip", "tx", "rx", "state", "diff", "user", "password", "rig_id", "agent"}
|
||||||
|
|
||||||
|
workerHashrateWindows = [5]int{60, 600, 3600, 43200, 86400}
|
||||||
|
)
|
||||||
|
|
||||||
|
// WorkerRow{"rig-alpha", "10.0.0.1", 1, 10, 0, 0, 10000, 1712232000, 1.0, 1.0, 1.0, 1.0, 1.0}
|
||||||
|
type WorkerRow [13]any
|
||||||
|
|
||||||
|
// MinerRow{1, "10.0.0.1:49152", 4096, 512, 2, 10000, "WALLET", maskedPassword, "rig-alpha", "XMRig/6.21.0"}
|
||||||
|
type MinerRow [10]any
|
||||||
|
|
||||||
|
// doc := p.SummaryDocument()
|
||||||
|
// _ = doc.Results.Accepted
|
||||||
|
// _ = doc.Upstreams.Ratio
|
||||||
|
type SummaryDocument struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
Mode string `json:"mode"`
|
||||||
|
Hashrate HashrateDocument `json:"hashrate"`
|
||||||
|
Miners MinersCountDocument `json:"miners"`
|
||||||
|
Workers uint64 `json:"workers"`
|
||||||
|
Upstreams UpstreamDocument `json:"upstreams"`
|
||||||
|
Results ResultsDocument `json:"results"`
|
||||||
|
CustomDiffStats map[uint64]CustomDiffBucketStats `json:"custom_diff_stats,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SummaryResponse is the RFC name for SummaryDocument.
|
||||||
|
type SummaryResponse = SummaryDocument
|
||||||
|
|
||||||
|
// HashrateDocument{Total: [6]float64{12345.67, 11900.00, 12100.00, 11800.00, 12000.00, 12200.00}}
|
||||||
|
type HashrateDocument struct {
|
||||||
|
Total [6]float64 `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashrateResponse is the RFC name for HashrateDocument.
|
||||||
|
type HashrateResponse = HashrateDocument
|
||||||
|
|
||||||
|
// MinersCountDocument{Now: 142, Max: 200}
|
||||||
|
type MinersCountDocument struct {
|
||||||
|
Now uint64 `json:"now"`
|
||||||
|
Max uint64 `json:"max"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinersCountResponse is the RFC name for MinersCountDocument.
|
||||||
|
type MinersCountResponse = MinersCountDocument
|
||||||
|
|
||||||
|
// UpstreamDocument{Active: 1, Sleep: 0, Error: 0, Total: 1, Ratio: 142.0}
|
||||||
|
type UpstreamDocument struct {
|
||||||
|
Active uint64 `json:"active"`
|
||||||
|
Sleep uint64 `json:"sleep"`
|
||||||
|
Error uint64 `json:"error"`
|
||||||
|
Total uint64 `json:"total"`
|
||||||
|
Ratio float64 `json:"ratio"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamResponse is the RFC name for UpstreamDocument.
|
||||||
|
type UpstreamResponse = UpstreamDocument
|
||||||
|
|
||||||
|
// ResultsDocument{Accepted: 4821, Rejected: 3, Invalid: 0, Expired: 12}
|
||||||
|
type ResultsDocument struct {
|
||||||
|
Accepted uint64 `json:"accepted"`
|
||||||
|
Rejected uint64 `json:"rejected"`
|
||||||
|
Invalid uint64 `json:"invalid"`
|
||||||
|
Expired uint64 `json:"expired"`
|
||||||
|
AvgTime uint32 `json:"avg_time"`
|
||||||
|
Latency uint32 `json:"latency"`
|
||||||
|
HashesTotal uint64 `json:"hashes_total"`
|
||||||
|
Best [10]uint64 `json:"best"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResultsResponse is the RFC name for ResultsDocument.
|
||||||
|
type ResultsResponse = ResultsDocument
|
||||||
|
|
||||||
|
// doc := p.WorkersDocument()
|
||||||
|
// _ = doc.Workers[0][0]
|
||||||
|
type WorkersDocument struct {
|
||||||
|
Mode string `json:"mode"`
|
||||||
|
Workers []WorkerRow `json:"workers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// doc := p.MinersDocument()
|
||||||
|
// _ = doc.Miners[0][7]
|
||||||
|
type MinersDocument struct {
|
||||||
|
Format []string `json:"format"`
|
||||||
|
Miners []MinerRow `json:"miners"`
|
||||||
|
}
|
||||||
60
config.go
60
config.go
|
|
@ -1,26 +1,33 @@
|
||||||
package proxy
|
package proxy
|
||||||
|
|
||||||
// Config is the top-level proxy configuration, loaded from JSON and hot-reloaded on change.
|
// Config is the top-level proxy configuration loaded from JSON.
|
||||||
//
|
//
|
||||||
// cfg, result := proxy.LoadConfig("config.json")
|
// cfg := &proxy.Config{
|
||||||
// if !result.OK { log.Fatal(result.Error) }
|
// Mode: "nicehash",
|
||||||
|
// Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||||
|
// Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
// Watch: true,
|
||||||
|
// Workers: proxy.WorkersByRigID,
|
||||||
|
// }
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Mode string `json:"mode"` // "nicehash" or "simple"
|
Mode string `json:"mode"` // "nicehash" or "simple"
|
||||||
Bind []BindAddr `json:"bind"` // listen addresses
|
Bind []BindAddr `json:"bind"` // listen addresses
|
||||||
Pools []PoolConfig `json:"pools"` // ordered primary + fallbacks
|
Pools []PoolConfig `json:"pools"` // ordered primary + fallbacks
|
||||||
TLS TLSConfig `json:"tls"` // inbound TLS (miner-facing)
|
TLS TLSConfig `json:"tls"` // inbound TLS (miner-facing)
|
||||||
HTTP HTTPConfig `json:"http"` // monitoring API
|
HTTP HTTPConfig `json:"http"` // monitoring API
|
||||||
AccessPassword string `json:"access-password"` // "" = no auth required
|
AccessPassword string `json:"access-password"` // "" = no auth required
|
||||||
CustomDiff uint64 `json:"custom-diff"` // 0 = disabled
|
CustomDiff uint64 `json:"custom-diff"` // 0 = disabled
|
||||||
CustomDiffStats bool `json:"custom-diff-stats"` // report per custom-diff bucket
|
CustomDiffStats bool `json:"custom-diff-stats"` // report per custom-diff bucket
|
||||||
AlgoExtension bool `json:"algo-ext"` // forward algo field in jobs
|
AlgoExtension bool `json:"algo-ext"` // forward algo field in jobs
|
||||||
Workers WorkersMode `json:"workers"` // "rig-id", "user", "password", "agent", "ip", "false"
|
Workers WorkersMode `json:"workers"` // "rig-id", "user", "password", "agent", "ip", "false"
|
||||||
AccessLogFile string `json:"access-log-file"` // "" = disabled
|
AccessLogFile string `json:"access-log-file"` // "" = disabled
|
||||||
ReuseTimeout int `json:"reuse-timeout"` // seconds; simple mode upstream reuse
|
ShareLogFile string `json:"share-log-file"` // "" = disabled
|
||||||
Retries int `json:"retries"` // pool reconnect attempts
|
ReuseTimeout int `json:"reuse-timeout"` // seconds; simple mode upstream reuse
|
||||||
RetryPause int `json:"retry-pause"` // seconds between retries
|
Retries int `json:"retries"` // pool reconnect attempts
|
||||||
Watch bool `json:"watch"` // hot-reload on file change
|
RetryPause int `json:"retry-pause"` // seconds between retries
|
||||||
RateLimit RateLimit `json:"rate-limit"` // per-IP connection rate limit
|
Watch bool `json:"watch"` // hot-reload on file change
|
||||||
|
RateLimit RateLimit `json:"rate-limit"` // per-IP connection rate limit
|
||||||
|
configPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
// BindAddr is one TCP listen endpoint.
|
// BindAddr is one TCP listen endpoint.
|
||||||
|
|
@ -47,7 +54,7 @@ type PoolConfig struct {
|
||||||
Enabled bool `json:"enabled"`
|
Enabled bool `json:"enabled"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TLSConfig controls inbound TLS on bind addresses that have TLS: true.
|
// TLSConfig controls inbound TLS for miner listeners.
|
||||||
//
|
//
|
||||||
// proxy.TLSConfig{Enabled: true, CertFile: "/etc/proxy/cert.pem", KeyFile: "/etc/proxy/key.pem"}
|
// proxy.TLSConfig{Enabled: true, CertFile: "/etc/proxy/cert.pem", KeyFile: "/etc/proxy/key.pem"}
|
||||||
type TLSConfig struct {
|
type TLSConfig struct {
|
||||||
|
|
@ -69,19 +76,24 @@ type HTTPConfig struct {
|
||||||
Restricted bool `json:"restricted"` // true = read-only GET only
|
Restricted bool `json:"restricted"` // true = read-only GET only
|
||||||
}
|
}
|
||||||
|
|
||||||
// RateLimit controls per-IP connection rate limiting using a token bucket.
|
// RateLimit caps connection attempts per source IP.
|
||||||
//
|
//
|
||||||
// proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300}
|
// limiter := proxy.NewRateLimiter(proxy.RateLimit{
|
||||||
|
// MaxConnectionsPerMinute: 30,
|
||||||
|
// BanDurationSeconds: 300,
|
||||||
|
// })
|
||||||
type RateLimit struct {
|
type RateLimit struct {
|
||||||
MaxConnectionsPerMinute int `json:"max-connections-per-minute"` // 0 = disabled
|
MaxConnectionsPerMinute int `json:"max-connections-per-minute"` // 0 = disabled
|
||||||
BanDurationSeconds int `json:"ban-duration"` // 0 = no ban
|
BanDurationSeconds int `json:"ban-duration"` // 0 = no ban
|
||||||
}
|
}
|
||||||
|
|
||||||
// WorkersMode controls which login field becomes the worker name.
|
// WorkersMode picks the login field used as the worker name.
|
||||||
|
//
|
||||||
|
// cfg.Workers = proxy.WorkersByRigID
|
||||||
type WorkersMode string
|
type WorkersMode string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
WorkersByRigID WorkersMode = "rig-id" // rigid field, fallback to user
|
WorkersByRigID WorkersMode = "rig-id" // rigid field, fallback to user
|
||||||
WorkersByUser WorkersMode = "user"
|
WorkersByUser WorkersMode = "user"
|
||||||
WorkersByPass WorkersMode = "password"
|
WorkersByPass WorkersMode = "password"
|
||||||
WorkersByAgent WorkersMode = "agent"
|
WorkersByAgent WorkersMode = "agent"
|
||||||
|
|
|
||||||
63
config_load_test.go
Normal file
63
config_load_test.go
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfig_LoadConfig_Good(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "config.json")
|
||||||
|
data := []byte(`{"mode":"nicehash","workers":"rig-id","bind":[{"host":"0.0.0.0","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
|
||||||
|
if err := os.WriteFile(path, data, 0o600); err != nil {
|
||||||
|
t.Fatalf("expected config file write to succeed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, result := LoadConfig(path)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected load to succeed, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
if cfg == nil {
|
||||||
|
t.Fatal("expected config to be returned")
|
||||||
|
}
|
||||||
|
if got := cfg.Mode; got != "nicehash" {
|
||||||
|
t.Fatalf("expected mode to round-trip, got %q", got)
|
||||||
|
}
|
||||||
|
if got := cfg.configPath; got != path {
|
||||||
|
t.Fatalf("expected config path to be recorded, got %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfig_LoadConfig_Bad(t *testing.T) {
|
||||||
|
cfg, result := LoadConfig(filepath.Join(t.TempDir(), "missing.json"))
|
||||||
|
if result.OK {
|
||||||
|
t.Fatalf("expected missing config file to fail, got cfg=%+v", cfg)
|
||||||
|
}
|
||||||
|
if cfg != nil {
|
||||||
|
t.Fatalf("expected no config on read failure, got %+v", cfg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfig_LoadConfig_Ugly(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "config.json")
|
||||||
|
data := []byte(`{"mode":"invalid","workers":"rig-id","bind":[],"pools":[]}`)
|
||||||
|
if err := os.WriteFile(path, data, 0o600); err != nil {
|
||||||
|
t.Fatalf("expected config file write to succeed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, result := LoadConfig(path)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected syntactically valid JSON to load, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
if cfg == nil {
|
||||||
|
t.Fatal("expected config to be returned")
|
||||||
|
}
|
||||||
|
if got := cfg.Mode; got != "invalid" {
|
||||||
|
t.Fatalf("expected invalid mode value to be preserved, got %q", got)
|
||||||
|
}
|
||||||
|
if validation := cfg.Validate(); validation.OK {
|
||||||
|
t.Fatal("expected semantic validation to fail separately from loading")
|
||||||
|
}
|
||||||
|
}
|
||||||
90
config_test.go
Normal file
90
config_test.go
Normal file
|
|
@ -0,0 +1,90 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestConfig_Validate_Good(t *testing.T) {
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
|
||||||
|
if result := cfg.Validate(); !result.OK {
|
||||||
|
t.Fatalf("expected valid config, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfig_Validate_Bad(t *testing.T) {
|
||||||
|
cfg := &Config{
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
|
||||||
|
if result := cfg.Validate(); result.OK {
|
||||||
|
t.Fatalf("expected missing mode to fail validation")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfig_Validate_Ugly(t *testing.T) {
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersMode("unknown"),
|
||||||
|
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "", Enabled: true}},
|
||||||
|
}
|
||||||
|
|
||||||
|
if result := cfg.Validate(); result.OK {
|
||||||
|
t.Fatalf("expected invalid workers and empty pool url to fail validation")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfig_Validate_NoEnabledPool_Good(t *testing.T) {
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "simple",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{
|
||||||
|
{URL: "pool-a.example:3333", Enabled: false},
|
||||||
|
{URL: "pool-b.example:4444", Enabled: false},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if result := cfg.Validate(); !result.OK {
|
||||||
|
t.Fatalf("expected config with no enabled pools to be valid, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_New_WhitespaceMode_Good(t *testing.T) {
|
||||||
|
originalFactory, hadFactory := splitterFactoryForMode("nicehash")
|
||||||
|
if hadFactory {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
RegisterSplitterFactory("nicehash", originalFactory)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
called := false
|
||||||
|
RegisterSplitterFactory("nicehash", func(*Config, *EventBus) Splitter {
|
||||||
|
called = true
|
||||||
|
return &noopSplitter{}
|
||||||
|
})
|
||||||
|
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: " nicehash ",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
|
||||||
|
p, result := New(cfg)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected whitespace-padded mode to remain valid, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
if !called {
|
||||||
|
t.Fatalf("expected trimmed mode lookup to invoke the registered splitter factory")
|
||||||
|
}
|
||||||
|
if _, ok := p.splitter.(*noopSplitter); !ok {
|
||||||
|
t.Fatalf("expected test splitter to be wired, got %#v", p.splitter)
|
||||||
|
}
|
||||||
|
}
|
||||||
136
configwatcher_test.go
Normal file
136
configwatcher_test.go
Normal file
|
|
@ -0,0 +1,136 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfigWatcher_New_Good(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "config.json")
|
||||||
|
if err := os.WriteFile(path, []byte(`{"mode":"nicehash","workers":"false","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`), 0o644); err != nil {
|
||||||
|
t.Fatalf("write config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
watcher := NewConfigWatcher(path, func(*Config) {})
|
||||||
|
if watcher == nil {
|
||||||
|
t.Fatal("expected watcher")
|
||||||
|
}
|
||||||
|
if watcher.lastModifiedAt.IsZero() {
|
||||||
|
t.Fatal("expected last modification time to be initialised from the file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigWatcher_Start_Good(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "config.json")
|
||||||
|
initial := []byte(`{"mode":"nicehash","workers":"false","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
|
||||||
|
if err := os.WriteFile(path, initial, 0o644); err != nil {
|
||||||
|
t.Fatalf("write initial config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
updates := make(chan *Config, 1)
|
||||||
|
watcher := NewConfigWatcher(path, func(cfg *Config) {
|
||||||
|
select {
|
||||||
|
case updates <- cfg:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if watcher == nil {
|
||||||
|
t.Fatal("expected watcher")
|
||||||
|
}
|
||||||
|
watcher.Start()
|
||||||
|
defer watcher.Stop()
|
||||||
|
|
||||||
|
updated := []byte(`{"mode":"simple","workers":"user","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
|
||||||
|
if err := os.WriteFile(path, updated, 0o644); err != nil {
|
||||||
|
t.Fatalf("write updated config file: %v", err)
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
if err := os.Chtimes(path, now, now.Add(2*time.Second)); err != nil {
|
||||||
|
t.Fatalf("touch updated config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case cfg := <-updates:
|
||||||
|
if cfg == nil {
|
||||||
|
t.Fatal("expected config update")
|
||||||
|
}
|
||||||
|
if got := cfg.Mode; got != "simple" {
|
||||||
|
t.Fatalf("expected updated mode, got %q", got)
|
||||||
|
}
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatal("expected watcher to reload updated config")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestConfigWatcher_Start_Bad verifies a watcher with a nonexistent path does not panic
|
||||||
|
// and does not call the onChange callback.
|
||||||
|
//
|
||||||
|
// watcher := proxy.NewConfigWatcher("/nonexistent/config.json", func(cfg *proxy.Config) {
|
||||||
|
// // never called
|
||||||
|
// })
|
||||||
|
// watcher.Start()
|
||||||
|
// watcher.Stop()
|
||||||
|
func TestConfigWatcher_Start_Bad(t *testing.T) {
|
||||||
|
called := make(chan struct{}, 1)
|
||||||
|
watcher := NewConfigWatcher("/nonexistent/path/config.json", func(*Config) {
|
||||||
|
select {
|
||||||
|
case called <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if watcher == nil {
|
||||||
|
t.Fatal("expected watcher even for a nonexistent path")
|
||||||
|
}
|
||||||
|
watcher.Start()
|
||||||
|
defer watcher.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-called:
|
||||||
|
t.Fatal("expected no callback for nonexistent config file")
|
||||||
|
case <-time.After(2 * time.Second):
|
||||||
|
// expected: no update fired
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigWatcher_Start_Ugly(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "config.json")
|
||||||
|
initial := []byte(`{"mode":"nicehash","workers":"false","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
|
||||||
|
if err := os.WriteFile(path, initial, 0o644); err != nil {
|
||||||
|
t.Fatalf("write initial config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
updates := make(chan *Config, 1)
|
||||||
|
watcher := NewConfigWatcher(path, func(cfg *Config) {
|
||||||
|
select {
|
||||||
|
case updates <- cfg:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if watcher == nil {
|
||||||
|
t.Fatal("expected watcher")
|
||||||
|
}
|
||||||
|
watcher.Start()
|
||||||
|
defer watcher.Stop()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
if err := os.Chtimes(path, now, now.Add(2*time.Second)); err != nil {
|
||||||
|
t.Fatalf("touch config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case cfg := <-updates:
|
||||||
|
if cfg == nil {
|
||||||
|
t.Fatal("expected config update")
|
||||||
|
}
|
||||||
|
if got := cfg.Mode; got != "nicehash" {
|
||||||
|
t.Fatalf("expected unchanged mode, got %q", got)
|
||||||
|
}
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatal("expected watcher to reload touched config")
|
||||||
|
}
|
||||||
|
}
|
||||||
534
core_impl.go
Normal file
534
core_impl.go
Normal file
|
|
@ -0,0 +1,534 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Result is the success/error carrier used by constructors and loaders.
|
||||||
|
//
|
||||||
|
// cfg, result := proxy.LoadConfig("config.json")
|
||||||
|
// if !result.OK {
|
||||||
|
// return result.Error
|
||||||
|
// }
|
||||||
|
type Result struct {
|
||||||
|
OK bool
|
||||||
|
Error error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSuccessResult() Result {
|
||||||
|
return Result{OK: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newErrorResult(err error) Result {
|
||||||
|
return Result{OK: false, Error: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
var splitterFactoriesMu sync.RWMutex
|
||||||
|
var splitterFactoriesByMode = map[string]func(*Config, *EventBus) Splitter{}
|
||||||
|
|
||||||
|
// RegisterSplitterFactory installs the constructor used for one proxy mode.
|
||||||
|
//
|
||||||
|
// proxy.RegisterSplitterFactory("simple", func(cfg *proxy.Config, bus *proxy.EventBus) proxy.Splitter {
|
||||||
|
// return simple.NewSimpleSplitter(cfg, bus, nil)
|
||||||
|
// })
|
||||||
|
func RegisterSplitterFactory(mode string, factory func(*Config, *EventBus) Splitter) {
|
||||||
|
splitterFactoriesMu.Lock()
|
||||||
|
defer splitterFactoriesMu.Unlock()
|
||||||
|
splitterFactoriesByMode[strings.ToLower(strings.TrimSpace(mode))] = factory
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitterFactoryForMode(mode string) (func(*Config, *EventBus) Splitter, bool) {
|
||||||
|
splitterFactoriesMu.RLock()
|
||||||
|
defer splitterFactoriesMu.RUnlock()
|
||||||
|
factory, ok := splitterFactoriesByMode[strings.ToLower(strings.TrimSpace(mode))]
|
||||||
|
return factory, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// cfg, result := proxy.LoadConfig("/etc/proxy.json")
|
||||||
|
//
|
||||||
|
// if !result.OK {
|
||||||
|
// return result.Error
|
||||||
|
// }
|
||||||
|
func LoadConfig(path string) (*Config, Result) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, newErrorResult(NewScopedError("proxy.config", "read config failed", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
config := &Config{}
|
||||||
|
if err := json.Unmarshal(data, config); err != nil {
|
||||||
|
return nil, newErrorResult(NewScopedError("proxy.config", "parse config failed", err))
|
||||||
|
}
|
||||||
|
config.configPath = path
|
||||||
|
return config, newSuccessResult()
|
||||||
|
}
|
||||||
|
|
||||||
|
// cfg := &proxy.Config{
|
||||||
|
// Mode: "nicehash",
|
||||||
|
// Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||||
|
// Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
// Workers: proxy.WorkersByRigID,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if result := cfg.Validate(); !result.OK {
|
||||||
|
// return result
|
||||||
|
// }
|
||||||
|
func (c *Config) Validate() Result {
|
||||||
|
if c == nil {
|
||||||
|
return newErrorResult(NewScopedError("proxy.config", "config is nil", nil))
|
||||||
|
}
|
||||||
|
if !isValidMode(c.Mode) {
|
||||||
|
return newErrorResult(NewScopedError("proxy.config", "mode must be \"nicehash\" or \"simple\"", nil))
|
||||||
|
}
|
||||||
|
if !isValidWorkersMode(c.Workers) {
|
||||||
|
return newErrorResult(NewScopedError("proxy.config", "workers must be one of \"rig-id\", \"user\", \"password\", \"agent\", \"ip\", or \"false\"", nil))
|
||||||
|
}
|
||||||
|
if len(c.Bind) == 0 {
|
||||||
|
return newErrorResult(NewScopedError("proxy.config", "bind list is empty", nil))
|
||||||
|
}
|
||||||
|
if len(c.Pools) == 0 {
|
||||||
|
return newErrorResult(NewScopedError("proxy.config", "pool list is empty", nil))
|
||||||
|
}
|
||||||
|
for _, pool := range c.Pools {
|
||||||
|
if pool.Enabled && strings.TrimSpace(pool.URL) == "" {
|
||||||
|
return newErrorResult(NewScopedError("proxy.config", "enabled pool url is empty", nil))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newSuccessResult()
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidMode(mode string) bool {
|
||||||
|
switch strings.ToLower(strings.TrimSpace(mode)) {
|
||||||
|
case "nicehash", "simple":
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidWorkersMode(mode WorkersMode) bool {
|
||||||
|
switch WorkersMode(strings.TrimSpace(string(mode))) {
|
||||||
|
case WorkersByRigID, WorkersByUser, WorkersByPass, WorkersByAgent, WorkersByIP, WorkersDisabled:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bus := proxy.NewEventBus()
|
||||||
|
//
|
||||||
|
// bus.Subscribe(proxy.EventLogin, func(e proxy.Event) {
|
||||||
|
// _ = e.Miner
|
||||||
|
// })
|
||||||
|
func NewEventBus() *EventBus {
|
||||||
|
return &EventBus{listeners: make(map[EventType][]EventHandler)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
|
||||||
|
func (b *EventBus) Subscribe(t EventType, h EventHandler) {
|
||||||
|
if b == nil || h == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
if b.listeners == nil {
|
||||||
|
b.listeners = make(map[EventType][]EventHandler)
|
||||||
|
}
|
||||||
|
b.listeners[t] = append(b.listeners[t], h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: miner})
|
||||||
|
func (b *EventBus) Dispatch(e Event) {
|
||||||
|
if b == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.mu.RLock()
|
||||||
|
handlers := append([]EventHandler(nil), b.listeners[e.Type]...)
|
||||||
|
b.mu.RUnlock()
|
||||||
|
for _, handler := range handlers {
|
||||||
|
func() {
|
||||||
|
defer func() {
|
||||||
|
_ = recover()
|
||||||
|
}()
|
||||||
|
handler(e)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type shareSinkGroup struct {
|
||||||
|
sinks []ShareSink
|
||||||
|
}
|
||||||
|
|
||||||
|
func newShareSinkGroup(sinks ...ShareSink) *shareSinkGroup {
|
||||||
|
group := &shareSinkGroup{sinks: make([]ShareSink, 0, len(sinks))}
|
||||||
|
for _, sink := range sinks {
|
||||||
|
if sink != nil {
|
||||||
|
group.sinks = append(group.sinks, sink)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return group
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *shareSinkGroup) OnAccept(e Event) {
|
||||||
|
if g == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, sink := range g.sinks {
|
||||||
|
func() {
|
||||||
|
defer func() {
|
||||||
|
_ = recover()
|
||||||
|
}()
|
||||||
|
sink.OnAccept(e)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *shareSinkGroup) OnReject(e Event) {
|
||||||
|
if g == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, sink := range g.sinks {
|
||||||
|
func() {
|
||||||
|
defer func() {
|
||||||
|
_ = recover()
|
||||||
|
}()
|
||||||
|
sink.OnReject(e)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid returns true when the job contains a blob and job id.
|
||||||
|
//
|
||||||
|
// if !job.IsValid() {
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
func (j Job) IsValid() bool {
|
||||||
|
return j.Blob != "" && j.JobID != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobWithFixedByte replaces the blob byte at position 39 with fixedByte.
|
||||||
|
//
|
||||||
|
// partitioned := job.BlobWithFixedByte(0x2A)
|
||||||
|
func (j Job) BlobWithFixedByte(fixedByte uint8) string {
|
||||||
|
if len(j.Blob) < 80 {
|
||||||
|
return j.Blob
|
||||||
|
}
|
||||||
|
blob := []byte(j.Blob)
|
||||||
|
encoded := make([]byte, 2)
|
||||||
|
hex.Encode(encoded, []byte{fixedByte})
|
||||||
|
blob[78] = encoded[0]
|
||||||
|
blob[79] = encoded[1]
|
||||||
|
return string(blob)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DifficultyFromTarget converts the 8-char little-endian target into a difficulty.
|
||||||
|
//
|
||||||
|
// diff := job.DifficultyFromTarget()
|
||||||
|
func (j Job) DifficultyFromTarget() uint64 {
|
||||||
|
if len(j.Target) != 8 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
raw, err := hex.DecodeString(j.Target)
|
||||||
|
if err != nil || len(raw) != 4 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
target := uint32(raw[0]) | uint32(raw[1])<<8 | uint32(raw[2])<<16 | uint32(raw[3])<<24
|
||||||
|
if target == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return uint64(math.MaxUint32) / uint64(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// targetFromDifficulty converts a difficulty into the 8-char little-endian hex target.
|
||||||
|
//
|
||||||
|
// target := targetFromDifficulty(10000) // "b88d0600"
|
||||||
|
func targetFromDifficulty(diff uint64) string {
|
||||||
|
if diff <= 1 {
|
||||||
|
return "ffffffff"
|
||||||
|
}
|
||||||
|
maxTarget := uint64(math.MaxUint32)
|
||||||
|
target := (maxTarget + diff - 1) / diff
|
||||||
|
if target == 0 {
|
||||||
|
target = 1
|
||||||
|
}
|
||||||
|
if target > maxTarget {
|
||||||
|
target = maxTarget
|
||||||
|
}
|
||||||
|
var raw [4]byte
|
||||||
|
binary.LittleEndian.PutUint32(raw[:], uint32(target))
|
||||||
|
return hex.EncodeToString(raw[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// EffectiveShareDifficulty returns the share difficulty capped by the miner's custom diff.
|
||||||
|
// If no custom diff is set or the pool diff is already lower, the pool diff is returned.
|
||||||
|
//
|
||||||
|
// diff := proxy.EffectiveShareDifficulty(job, miner) // 25000 when customDiff < poolDiff
|
||||||
|
func EffectiveShareDifficulty(job Job, miner *Miner) uint64 {
|
||||||
|
diff := job.DifficultyFromTarget()
|
||||||
|
if miner == nil || miner.customDiff == 0 || diff == 0 || diff <= miner.customDiff {
|
||||||
|
return diff
|
||||||
|
}
|
||||||
|
return miner.customDiff
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCustomDiff creates a login-time custom difficulty resolver.
|
||||||
|
//
|
||||||
|
// resolver := proxy.NewCustomDiff(50000)
|
||||||
|
// resolver.OnLogin(proxy.Event{Miner: miner})
|
||||||
|
func NewCustomDiff(globalDiff uint64) *CustomDiff {
|
||||||
|
cd := &CustomDiff{}
|
||||||
|
cd.globalDiff.Store(globalDiff)
|
||||||
|
return cd
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnLogin normalises the login user once during handshake.
|
||||||
|
//
|
||||||
|
// cd.OnLogin(proxy.Event{Miner: &proxy.Miner{user: "WALLET+50000"}})
|
||||||
|
func (cd *CustomDiff) OnLogin(e Event) {
|
||||||
|
if cd == nil || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if e.Miner.customDiffResolved {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resolved := resolveLoginCustomDiff(e.Miner.user, cd.globalDiff.Load())
|
||||||
|
e.Miner.user = resolved.user
|
||||||
|
e.Miner.customDiff = resolved.diff
|
||||||
|
e.Miner.customDiffFromLogin = resolved.fromLogin
|
||||||
|
e.Miner.customDiffResolved = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300})
|
||||||
|
//
|
||||||
|
// if limiter.Allow("203.0.113.42:3333") {
|
||||||
|
// // first 30 connection attempts per minute are allowed
|
||||||
|
// }
|
||||||
|
func NewRateLimiter(config RateLimit) *RateLimiter {
|
||||||
|
return &RateLimiter{
|
||||||
|
limit: config,
|
||||||
|
bucketByHost: make(map[string]*tokenBucket),
|
||||||
|
banUntilByHost: make(map[string]time.Time),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if limiter.Allow("203.0.113.42:3333") {
|
||||||
|
// // hostOnly("203.0.113.42:3333") == "203.0.113.42"
|
||||||
|
// }
|
||||||
|
func (rl *RateLimiter) Allow(ip string) bool {
|
||||||
|
if rl == nil || rl.limit.MaxConnectionsPerMinute <= 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
host := hostOnly(ip)
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
rl.mu.Lock()
|
||||||
|
defer rl.mu.Unlock()
|
||||||
|
|
||||||
|
if until, banned := rl.banUntilByHost[host]; banned {
|
||||||
|
if now.Before(until) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
delete(rl.banUntilByHost, host)
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket, ok := rl.bucketByHost[host]
|
||||||
|
if !ok {
|
||||||
|
bucket = &tokenBucket{tokens: rl.limit.MaxConnectionsPerMinute, lastRefill: now}
|
||||||
|
rl.bucketByHost[host] = bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
refillBucket(bucket, rl.limit.MaxConnectionsPerMinute, now)
|
||||||
|
if bucket.tokens <= 0 {
|
||||||
|
if rl.limit.BanDurationSeconds > 0 {
|
||||||
|
rl.banUntilByHost[host] = now.Add(time.Duration(rl.limit.BanDurationSeconds) * time.Second)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket.tokens--
|
||||||
|
bucket.lastRefill = now
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tick removes expired ban entries and refills token buckets.
|
||||||
|
//
|
||||||
|
// limiter.Tick()
|
||||||
|
func (rl *RateLimiter) Tick() {
|
||||||
|
if rl == nil || rl.limit.MaxConnectionsPerMinute <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
rl.mu.Lock()
|
||||||
|
defer rl.mu.Unlock()
|
||||||
|
|
||||||
|
for host, until := range rl.banUntilByHost {
|
||||||
|
if !now.Before(until) {
|
||||||
|
delete(rl.banUntilByHost, host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, bucket := range rl.bucketByHost {
|
||||||
|
refillBucket(bucket, rl.limit.MaxConnectionsPerMinute, now)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// watcher := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
|
||||||
|
// p.Reload(cfg)
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// watcher.Start() // polls once per second and reloads after the file mtime changes
|
||||||
|
func NewConfigWatcher(configPath string, onChange func(*Config)) *ConfigWatcher {
|
||||||
|
watcher := &ConfigWatcher{
|
||||||
|
configPath: configPath,
|
||||||
|
onConfigChange: onChange,
|
||||||
|
stopCh: make(chan struct{}),
|
||||||
|
}
|
||||||
|
if info, err := os.Stat(configPath); err == nil {
|
||||||
|
watcher.lastModifiedAt = info.ModTime()
|
||||||
|
}
|
||||||
|
return watcher
|
||||||
|
}
|
||||||
|
|
||||||
|
// watcher.Start()
|
||||||
|
func (w *ConfigWatcher) Start() {
|
||||||
|
if w == nil || w.configPath == "" || w.onConfigChange == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
if w.started {
|
||||||
|
w.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if w.stopCh == nil {
|
||||||
|
w.stopCh = make(chan struct{})
|
||||||
|
} else {
|
||||||
|
select {
|
||||||
|
case <-w.stopCh:
|
||||||
|
w.stopCh = make(chan struct{})
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stopCh := w.stopCh
|
||||||
|
configPath := w.configPath
|
||||||
|
onConfigChange := w.onConfigChange
|
||||||
|
w.started = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
ticker := time.NewTicker(time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
if info, err := os.Stat(configPath); err == nil {
|
||||||
|
w.mu.Lock()
|
||||||
|
changed := info.ModTime() != w.lastModifiedAt
|
||||||
|
if changed {
|
||||||
|
w.lastModifiedAt = info.ModTime()
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
if !changed {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
config, result := LoadConfig(configPath)
|
||||||
|
if result.OK && config != nil {
|
||||||
|
onConfigChange(config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-stopCh:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// watcher.Stop()
|
||||||
|
func (w *ConfigWatcher) Stop() {
|
||||||
|
if w == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
stopCh := w.stopCh
|
||||||
|
w.started = false
|
||||||
|
w.mu.Unlock()
|
||||||
|
if stopCh == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
default:
|
||||||
|
close(stopCh)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func hostOnly(ip string) string {
|
||||||
|
host, _, err := net.SplitHostPort(ip)
|
||||||
|
if err == nil {
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
return ip
|
||||||
|
}
|
||||||
|
|
||||||
|
func refillBucket(bucket *tokenBucket, limit int, now time.Time) {
|
||||||
|
if bucket == nil || limit <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if bucket.lastRefill.IsZero() {
|
||||||
|
bucket.lastRefill = now
|
||||||
|
if bucket.tokens <= 0 {
|
||||||
|
bucket.tokens = limit
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
interval := time.Duration(time.Minute) / time.Duration(limit)
|
||||||
|
if interval <= 0 {
|
||||||
|
interval = time.Nanosecond
|
||||||
|
}
|
||||||
|
elapsed := now.Sub(bucket.lastRefill)
|
||||||
|
if elapsed < interval {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
add := int(elapsed / interval)
|
||||||
|
bucket.tokens += add
|
||||||
|
if bucket.tokens > limit {
|
||||||
|
bucket.tokens = limit
|
||||||
|
}
|
||||||
|
bucket.lastRefill = bucket.lastRefill.Add(time.Duration(add) * interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateUUID() string {
|
||||||
|
var b [16]byte
|
||||||
|
if _, err := io.ReadFull(rand.Reader, b[:]); err != nil {
|
||||||
|
return strconv.FormatInt(time.Now().UnixNano(), 16)
|
||||||
|
}
|
||||||
|
b[6] = (b[6] & 0x0f) | 0x40
|
||||||
|
b[8] = (b[8] & 0x3f) | 0x80
|
||||||
|
var out [36]byte
|
||||||
|
hex.Encode(out[0:8], b[0:4])
|
||||||
|
out[8] = '-'
|
||||||
|
hex.Encode(out[9:13], b[4:6])
|
||||||
|
out[13] = '-'
|
||||||
|
hex.Encode(out[14:18], b[6:8])
|
||||||
|
out[18] = '-'
|
||||||
|
hex.Encode(out[19:23], b[8:10])
|
||||||
|
out[23] = '-'
|
||||||
|
hex.Encode(out[24:36], b[10:16])
|
||||||
|
return string(out[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func sha256Hex(data []byte) string {
|
||||||
|
sum := sha256.Sum256(data)
|
||||||
|
return hex.EncodeToString(sum[:])
|
||||||
|
}
|
||||||
83
customdiff_test.go
Normal file
83
customdiff_test.go
Normal file
|
|
@ -0,0 +1,83 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// TestCustomDiff_Apply_Good verifies a user suffix "+50000" sets customDiff and strips the suffix.
|
||||||
|
//
|
||||||
|
// cd := proxy.NewCustomDiff(10000)
|
||||||
|
// cd.Apply(&proxy.Miner{user: "WALLET+50000"})
|
||||||
|
// // miner.User() == "WALLET", miner.customDiff == 50000
|
||||||
|
func TestCustomDiff_Apply_Good(t *testing.T) {
|
||||||
|
cd := NewCustomDiff(10000)
|
||||||
|
miner := &Miner{user: "WALLET+50000"}
|
||||||
|
cd.OnLogin(Event{Miner: miner})
|
||||||
|
if miner.User() != "WALLET" {
|
||||||
|
t.Fatalf("expected stripped user, got %q", miner.User())
|
||||||
|
}
|
||||||
|
if miner.customDiff != 50000 {
|
||||||
|
t.Fatalf("expected custom diff 50000, got %d", miner.customDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCustomDiff_Apply_Bad verifies "+abc" (non-numeric) leaves user unchanged, customDiff=0.
|
||||||
|
//
|
||||||
|
// cd := proxy.NewCustomDiff(10000)
|
||||||
|
// cd.Apply(&proxy.Miner{user: "WALLET+abc"})
|
||||||
|
// // miner.User() == "WALLET+abc", miner.customDiff == 0
|
||||||
|
func TestCustomDiff_Apply_Bad(t *testing.T) {
|
||||||
|
cd := NewCustomDiff(10000)
|
||||||
|
miner := &Miner{user: "WALLET+abc"}
|
||||||
|
cd.OnLogin(Event{Miner: miner})
|
||||||
|
if miner.User() != "WALLET+abc" {
|
||||||
|
t.Fatalf("expected invalid suffix to remain unchanged, got %q", miner.User())
|
||||||
|
}
|
||||||
|
if miner.customDiff != 0 {
|
||||||
|
t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCustomDiff_Apply_Ugly verifies globalDiff=10000 is used when no suffix is present.
|
||||||
|
//
|
||||||
|
// cd := proxy.NewCustomDiff(10000)
|
||||||
|
// cd.Apply(&proxy.Miner{user: "WALLET"})
|
||||||
|
// // miner.customDiff == 10000 (falls back to global)
|
||||||
|
func TestCustomDiff_Apply_Ugly(t *testing.T) {
|
||||||
|
cd := NewCustomDiff(10000)
|
||||||
|
miner := &Miner{user: "WALLET"}
|
||||||
|
cd.OnLogin(Event{Miner: miner})
|
||||||
|
if miner.customDiff != 10000 {
|
||||||
|
t.Fatalf("expected global diff fallback 10000, got %d", miner.customDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCustomDiff_OnLogin_NonNumericSuffix verifies a non-decimal suffix after plus is ignored.
|
||||||
|
//
|
||||||
|
// cd := proxy.NewCustomDiff(10000)
|
||||||
|
// cd.OnLogin(proxy.Event{Miner: &proxy.Miner{user: "WALLET+50000extra"}})
|
||||||
|
func TestCustomDiff_OnLogin_NonNumericSuffix(t *testing.T) {
|
||||||
|
cd := NewCustomDiff(10000)
|
||||||
|
miner := &Miner{user: "WALLET+50000extra"}
|
||||||
|
|
||||||
|
cd.OnLogin(Event{Miner: miner})
|
||||||
|
|
||||||
|
if miner.User() != "WALLET+50000extra" {
|
||||||
|
t.Fatalf("expected non-numeric suffix plus segment to remain unchanged, got %q", miner.User())
|
||||||
|
}
|
||||||
|
if miner.customDiff != 0 {
|
||||||
|
t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestEffectiveShareDifficulty_CustomDiffCapsPoolDifficulty verifies the cap applied by custom diff.
|
||||||
|
//
|
||||||
|
// job := proxy.Job{Target: "01000000"}
|
||||||
|
// miner := &proxy.Miner{customDiff: 25000}
|
||||||
|
// proxy.EffectiveShareDifficulty(job, miner) // 25000 (capped)
|
||||||
|
func TestEffectiveShareDifficulty_CustomDiffCapsPoolDifficulty(t *testing.T) {
|
||||||
|
job := Job{Target: "01000000"}
|
||||||
|
miner := &Miner{customDiff: 25000}
|
||||||
|
|
||||||
|
if got := EffectiveShareDifficulty(job, miner); got != 25000 {
|
||||||
|
t.Fatalf("expected capped difficulty 25000, got %d", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
122
customdiffstats.go
Normal file
122
customdiffstats.go
Normal file
|
|
@ -0,0 +1,122 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CustomDiffBucketStats tracks per-custom-difficulty share outcomes.
|
||||||
|
type CustomDiffBucketStats struct {
|
||||||
|
Accepted uint64 `json:"accepted"`
|
||||||
|
Rejected uint64 `json:"rejected"`
|
||||||
|
Invalid uint64 `json:"invalid"`
|
||||||
|
Expired uint64 `json:"expired"`
|
||||||
|
HashesTotal uint64 `json:"hashes_total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomDiffBuckets groups share totals by the miner's resolved custom difficulty.
|
||||||
|
//
|
||||||
|
// buckets := NewCustomDiffBuckets(true)
|
||||||
|
// buckets.OnAccept(Event{Miner: &Miner{customDiff: 50000}, Diff: 25000})
|
||||||
|
type CustomDiffBuckets struct {
|
||||||
|
enabled bool
|
||||||
|
buckets map[uint64]*CustomDiffBucketStats
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCustomDiffBuckets creates a per-difficulty share tracker.
|
||||||
|
func NewCustomDiffBuckets(enabled bool) *CustomDiffBuckets {
|
||||||
|
return &CustomDiffBuckets{
|
||||||
|
enabled: enabled,
|
||||||
|
buckets: make(map[uint64]*CustomDiffBucketStats),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEnabled toggles recording without discarding any collected buckets.
|
||||||
|
func (b *CustomDiffBuckets) SetEnabled(enabled bool) {
|
||||||
|
if b == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
b.enabled = enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnAccept records an accepted share for the miner's custom difficulty bucket.
|
||||||
|
func (b *CustomDiffBuckets) OnAccept(e Event) {
|
||||||
|
if b == nil || !b.enabled || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
bucket := b.bucketLocked(e.Miner.customDiff)
|
||||||
|
bucket.Accepted++
|
||||||
|
if e.Expired {
|
||||||
|
bucket.Expired++
|
||||||
|
}
|
||||||
|
if e.Diff > 0 {
|
||||||
|
bucket.HashesTotal += e.Diff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnReject records a rejected share for the miner's custom difficulty bucket.
|
||||||
|
func (b *CustomDiffBuckets) OnReject(e Event) {
|
||||||
|
if b == nil || !b.enabled || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
bucket := b.bucketLocked(e.Miner.customDiff)
|
||||||
|
bucket.Rejected++
|
||||||
|
if isInvalidShareReason(e.Error) {
|
||||||
|
bucket.Invalid++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a copy of the current bucket totals.
|
||||||
|
//
|
||||||
|
// summary := buckets.Snapshot()
|
||||||
|
func (b *CustomDiffBuckets) Snapshot() map[uint64]CustomDiffBucketStats {
|
||||||
|
if b == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
if !b.enabled || len(b.buckets) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make(map[uint64]CustomDiffBucketStats, len(b.buckets))
|
||||||
|
for diff, bucket := range b.buckets {
|
||||||
|
if bucket == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out[diff] = *bucket
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *CustomDiffBuckets) bucketLocked(diff uint64) *CustomDiffBucketStats {
|
||||||
|
if b.buckets == nil {
|
||||||
|
b.buckets = make(map[uint64]*CustomDiffBucketStats)
|
||||||
|
}
|
||||||
|
bucket, ok := b.buckets[diff]
|
||||||
|
if !ok {
|
||||||
|
bucket = &CustomDiffBucketStats{}
|
||||||
|
b.buckets[diff] = bucket
|
||||||
|
}
|
||||||
|
return bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
func isInvalidShareReason(reason string) bool {
|
||||||
|
reason = strings.ToLower(reason)
|
||||||
|
if reason == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.Contains(reason, "low diff") ||
|
||||||
|
strings.Contains(reason, "lowdifficulty") ||
|
||||||
|
strings.Contains(reason, "low difficulty") ||
|
||||||
|
strings.Contains(reason, "malformed") ||
|
||||||
|
strings.Contains(reason, "difficulty") ||
|
||||||
|
strings.Contains(reason, "invalid") ||
|
||||||
|
strings.Contains(reason, "nonce")
|
||||||
|
}
|
||||||
78
customdiffstats_test.go
Normal file
78
customdiffstats_test.go
Normal file
|
|
@ -0,0 +1,78 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestProxy_CustomDiffStats_Good(t *testing.T) {
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
CustomDiffStats: true,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p, result := New(cfg)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
miner := &Miner{customDiff: 50000}
|
||||||
|
p.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 75, Expired: true})
|
||||||
|
|
||||||
|
summary := p.Summary()
|
||||||
|
bucket, ok := summary.CustomDiffStats[50000]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected custom diff bucket 50000 to be present")
|
||||||
|
}
|
||||||
|
if bucket.Accepted != 1 || bucket.Expired != 1 || bucket.HashesTotal != 75 {
|
||||||
|
t.Fatalf("unexpected bucket totals: %+v", bucket)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_CustomDiffStats_Bad(t *testing.T) {
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
CustomDiffStats: true,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p, result := New(cfg)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
miner := &Miner{customDiff: 10000}
|
||||||
|
p.events.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Low difficulty share"})
|
||||||
|
p.events.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Malformed share"})
|
||||||
|
|
||||||
|
summary := p.Summary()
|
||||||
|
bucket, ok := summary.CustomDiffStats[10000]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected custom diff bucket 10000 to be present")
|
||||||
|
}
|
||||||
|
if bucket.Rejected != 2 || bucket.Invalid != 2 {
|
||||||
|
t.Fatalf("unexpected bucket totals: %+v", bucket)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_CustomDiffStats_Ugly(t *testing.T) {
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
CustomDiffStats: false,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p, result := New(cfg)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
miner := &Miner{customDiff: 25000}
|
||||||
|
p.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 1})
|
||||||
|
|
||||||
|
summary := p.Summary()
|
||||||
|
if len(summary.CustomDiffStats) != 0 {
|
||||||
|
t.Fatalf("expected custom diff stats to remain disabled, got %+v", summary.CustomDiffStats)
|
||||||
|
}
|
||||||
|
}
|
||||||
440
docs/RFC-025-AGENT-EXPERIENCE.md
Normal file
440
docs/RFC-025-AGENT-EXPERIENCE.md
Normal file
|
|
@ -0,0 +1,440 @@
|
||||||
|
# RFC-025: Agent Experience (AX) Design Principles
|
||||||
|
|
||||||
|
- **Status:** Draft
|
||||||
|
- **Authors:** Snider, Cladius
|
||||||
|
- **Date:** 2026-03-19
|
||||||
|
- **Applies to:** All Core ecosystem packages (CoreGO, CorePHP, CoreTS, core-agent)
|
||||||
|
|
||||||
|
## Abstract
|
||||||
|
|
||||||
|
Agent Experience (AX) is a design paradigm for software systems where the primary code consumer is an AI agent, not a human developer. AX sits alongside User Experience (UX) and Developer Experience (DX) as the third era of interface design.
|
||||||
|
|
||||||
|
This RFC establishes AX as a formal design principle for the Core ecosystem and defines the conventions that follow from it.
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
As of early 2026, AI agents write, review, and maintain the majority of code in the Core ecosystem. The original author has not manually edited code (outside of Core struct design) since October 2025. Code is processed semantically — agents reason about intent, not characters.
|
||||||
|
|
||||||
|
Design patterns inherited from the human-developer era optimise for the wrong consumer:
|
||||||
|
|
||||||
|
- **Short names** save keystrokes but increase semantic ambiguity
|
||||||
|
- **Functional option chains** are fluent for humans but opaque for agents tracing configuration
|
||||||
|
- **Error-at-every-call-site** produces 50% boilerplate that obscures intent
|
||||||
|
- **Generic type parameters** force agents to carry type context that the runtime already has
|
||||||
|
- **Panic-hiding conventions** (`Must*`) create implicit control flow that agents must special-case
|
||||||
|
|
||||||
|
AX acknowledges this shift and provides principles for designing code, APIs, file structures, and conventions that serve AI agents as first-class consumers.
|
||||||
|
|
||||||
|
## The Three Eras
|
||||||
|
|
||||||
|
| Era | Primary Consumer | Optimises For | Key Metric |
|
||||||
|
|-----|-----------------|---------------|------------|
|
||||||
|
| UX | End users | Discoverability, forgiveness, visual clarity | Task completion time |
|
||||||
|
| DX | Developers | Typing speed, IDE support, convention familiarity | Time to first commit |
|
||||||
|
| AX | AI agents | Predictability, composability, semantic navigation | Correct-on-first-pass rate |
|
||||||
|
|
||||||
|
AX does not replace UX or DX. End users still need good UX. Developers still need good DX. But when the primary code author and maintainer is an AI agent, the codebase should be designed for that consumer first.
|
||||||
|
|
||||||
|
## Principles
|
||||||
|
|
||||||
|
### 1. Predictable Names Over Short Names
|
||||||
|
|
||||||
|
Names are tokens that agents pattern-match across languages and contexts. Abbreviations introduce mapping overhead.
|
||||||
|
|
||||||
|
```
|
||||||
|
Config not Cfg
|
||||||
|
Service not Srv
|
||||||
|
Embed not Emb
|
||||||
|
Error not Err (as a subsystem name; err for local variables is fine)
|
||||||
|
Options not Opts
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rule:** If a name would require a comment to explain, it is too short.
|
||||||
|
|
||||||
|
**Exception:** Industry-standard abbreviations that are universally understood (`HTTP`, `URL`, `ID`, `IPC`, `I18n`) are acceptable. The test: would an agent trained on any mainstream language recognise it without context?
|
||||||
|
|
||||||
|
### 2. Comments as Usage Examples
|
||||||
|
|
||||||
|
The function signature tells WHAT. The comment shows HOW with real values.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Detect the project type from files present
|
||||||
|
setup.Detect("/path/to/project")
|
||||||
|
|
||||||
|
// Set up a workspace with auto-detected template
|
||||||
|
setup.Run(setup.Options{Path: ".", Template: "auto"})
|
||||||
|
|
||||||
|
// Scaffold a PHP module workspace
|
||||||
|
setup.Run(setup.Options{Path: "./my-module", Template: "php"})
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rule:** If a comment restates what the type signature already says, delete it. If a comment shows a concrete usage with realistic values, keep it.
|
||||||
|
|
||||||
|
**Rationale:** Agents learn from examples more effectively than from descriptions. A comment like "Run executes the setup process" adds zero information. A comment like `setup.Run(setup.Options{Path: ".", Template: "auto"})` teaches an agent exactly how to call the function.
|
||||||
|
|
||||||
|
### 3. Path Is Documentation
|
||||||
|
|
||||||
|
File and directory paths should be self-describing. An agent navigating the filesystem should understand what it is looking at without reading a README.
|
||||||
|
|
||||||
|
```
|
||||||
|
flow/deploy/to/homelab.yaml — deploy TO the homelab
|
||||||
|
flow/deploy/from/github.yaml — deploy FROM GitHub
|
||||||
|
flow/code/review.yaml — code review flow
|
||||||
|
template/file/go/struct.go.tmpl — Go struct file template
|
||||||
|
template/dir/workspace/php/ — PHP workspace scaffold
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rule:** If an agent needs to read a file to understand what a directory contains, the directory naming has failed.
|
||||||
|
|
||||||
|
**Corollary:** The unified path convention (folder structure = HTTP route = CLI command = test path) is AX-native. One path, every surface.
|
||||||
|
|
||||||
|
### 4. Templates Over Freeform
|
||||||
|
|
||||||
|
When an agent generates code from a template, the output is constrained to known-good shapes. When an agent writes freeform, the output varies.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Template-driven — consistent output
|
||||||
|
lib.RenderFile("php/action", data)
|
||||||
|
lib.ExtractDir("php", targetDir, data)
|
||||||
|
|
||||||
|
// Freeform — variance in output
|
||||||
|
"write a PHP action class that..."
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rule:** For any code pattern that recurs, provide a template. Templates are guardrails for agents.
|
||||||
|
|
||||||
|
**Scope:** Templates apply to file generation, workspace scaffolding, config generation, and commit messages. They do NOT apply to novel logic — agents should write business logic freeform with the domain knowledge available.
|
||||||
|
|
||||||
|
### 5. Declarative Over Imperative
|
||||||
|
|
||||||
|
Agents reason better about declarations of intent than sequences of operations.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Declarative — agent sees what should happen
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
flow: tools/docker-build
|
||||||
|
with:
|
||||||
|
context: "{{ .app_dir }}"
|
||||||
|
image_name: "{{ .image_name }}"
|
||||||
|
|
||||||
|
- name: deploy
|
||||||
|
flow: deploy/with/docker
|
||||||
|
with:
|
||||||
|
host: "{{ .host }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Imperative — agent must trace execution
|
||||||
|
cmd := exec.Command("docker", "build", "--platform", "linux/amd64", "-t", imageName, ".")
|
||||||
|
cmd.Dir = appDir
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("docker build: %w", err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rule:** Orchestration, configuration, and pipeline logic should be declarative (YAML/JSON). Implementation logic should be imperative (Go/PHP/TS). The boundary is: if an agent needs to compose or modify the logic, make it declarative.
|
||||||
|
|
||||||
|
### 6. Universal Types (Core Primitives)
|
||||||
|
|
||||||
|
Every component in the ecosystem accepts and returns the same primitive types. An agent processing any level of the tree sees identical shapes.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Universal contract
|
||||||
|
setup.Run(core.Options{Path: ".", Template: "auto"})
|
||||||
|
brain.New(core.Options{Name: "openbrain"})
|
||||||
|
deploy.Run(core.Options{Flow: "deploy/to/homelab"})
|
||||||
|
|
||||||
|
// Fractal — Core itself is a Service
|
||||||
|
core.New(core.Options{
|
||||||
|
Services: []core.Service{
|
||||||
|
process.New(core.Options{Name: "process"}),
|
||||||
|
brain.New(core.Options{Name: "brain"}),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
**Core primitive types:**
|
||||||
|
|
||||||
|
| Type | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `core.Options` | Input configuration (what you want) |
|
||||||
|
| `core.Config` | Runtime settings (what is active) |
|
||||||
|
| `core.Data` | Embedded or stored content |
|
||||||
|
| `core.Service` | A managed component with lifecycle |
|
||||||
|
| `core.Result[T]` | Return value with OK/fail state |
|
||||||
|
|
||||||
|
**What this replaces:**
|
||||||
|
|
||||||
|
| Go Convention | Core AX | Why |
|
||||||
|
|--------------|---------|-----|
|
||||||
|
| `func With*(v) Option` | `core.Options{Field: v}` | Struct literal is parseable; option chain requires tracing |
|
||||||
|
| `func Must*(v) T` | `core.Result[T]` | No hidden panics; errors flow through Core |
|
||||||
|
| `func *For[T](c) T` | `c.Service("name")` | String lookup is greppable; generics require type context |
|
||||||
|
| `val, err :=` everywhere | Single return via `core.Result` | Intent not obscured by error handling |
|
||||||
|
| `_ = err` | Never needed | Core handles all errors internally |
|
||||||
|
|
||||||
|
### 7. Directory as Semantics
|
||||||
|
|
||||||
|
The directory structure tells an agent the intent before it reads a word. Top-level directories are semantic categories, not organisational bins.
|
||||||
|
|
||||||
|
```
|
||||||
|
plans/
|
||||||
|
├── code/ # Pure primitives — read for WHAT exists
|
||||||
|
├── project/ # Products — read for WHAT we're building and WHY
|
||||||
|
└── rfc/ # Contracts — read for constraints and rules
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rule:** An agent should know what kind of document it's reading from the path alone. `code/core/go/io/RFC.md` = a lib primitive spec. `project/ofm/RFC.md` = a product spec that cross-references code/. `rfc/snider/borg/RFC-BORG-006-SMSG-FORMAT.md` = an immutable contract for the Borg SMSG protocol.
|
||||||
|
|
||||||
|
**Corollary:** The three-way split (code/project/rfc) extends principle 3 (Path Is Documentation) from files to entire subtrees. The path IS the metadata.
|
||||||
|
|
||||||
|
### 8. Lib Never Imports Consumer
|
||||||
|
|
||||||
|
Dependency flows one direction. Libraries define primitives. Consumers compose from them. A new feature in a consumer can never break a library.
|
||||||
|
|
||||||
|
```
|
||||||
|
code/core/go/* → lib tier (stable foundation)
|
||||||
|
code/core/agent/ → consumer tier (composes from go/*)
|
||||||
|
code/core/cli/ → consumer tier (composes from go/*)
|
||||||
|
code/core/gui/ → consumer tier (composes from go/*)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rule:** If package A is in `go/` and package B is in the consumer tier, B may import A but A must never import B. The repo naming convention enforces this: `go-{name}` = lib, bare `{name}` = consumer.
|
||||||
|
|
||||||
|
**Why this matters for agents:** When an agent is dispatched to implement a feature in `core/agent`, it can freely import from `go-io`, `go-scm`, `go-process`. But if an agent is dispatched to `go-io`, it knows its changes are foundational — every consumer depends on it, so the contract must not break.
|
||||||
|
|
||||||
|
### 9. Issues Are N+(rounds) Deep
|
||||||
|
|
||||||
|
Problems in code and specs are layered. Surface issues mask deeper issues. Fixing the surface reveals the next layer. This is not a failure mode — it is the discovery process.
|
||||||
|
|
||||||
|
```
|
||||||
|
Pass 1: Find 16 issues (surface — naming, imports, obvious errors)
|
||||||
|
Pass 2: Find 11 issues (structural — contradictions, missing types)
|
||||||
|
Pass 3: Find 5 issues (architectural — signature mismatches, registration gaps)
|
||||||
|
Pass 4: Find 4 issues (contract — cross-spec API mismatches)
|
||||||
|
Pass 5: Find 2 issues (mechanical — path format, nil safety)
|
||||||
|
Pass N: Findings are trivial → spec/code is complete
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rule:** Iteration is required, not a failure. Each pass sees what the previous pass could not, because the context changed. An agent dispatched with the same task on the same repo will find different things each time — this is correct behaviour.
|
||||||
|
|
||||||
|
**Corollary:** The cheapest model should do the most passes (surface work). The frontier model should arrive last, when only deep issues remain. Tiered iteration: grunt model grinds → mid model pre-warms → frontier model polishes.
|
||||||
|
|
||||||
|
**Anti-pattern:** One-shot generation expecting valid output. No model, no human, produces correct-on-first-pass for non-trivial work. Expecting it wastes the first pass on surface issues that a cheaper pass would have caught.
|
||||||
|
|
||||||
|
### 10. CLI Tests as Artifact Validation
|
||||||
|
|
||||||
|
Unit tests verify the code. CLI tests verify the binary. The directory structure IS the command structure — path maps to command, Taskfile runs the test.
|
||||||
|
|
||||||
|
```
|
||||||
|
tests/cli/
|
||||||
|
├── core/
|
||||||
|
│ └── lint/
|
||||||
|
│ ├── Taskfile.yaml ← test `core-lint` (root)
|
||||||
|
│ ├── run/
|
||||||
|
│ │ ├── Taskfile.yaml ← test `core-lint run`
|
||||||
|
│ │ └── fixtures/
|
||||||
|
│ ├── go/
|
||||||
|
│ │ ├── Taskfile.yaml ← test `core-lint go`
|
||||||
|
│ │ └── fixtures/
|
||||||
|
│ └── security/
|
||||||
|
│ ├── Taskfile.yaml ← test `core-lint security`
|
||||||
|
│ └── fixtures/
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rule:** Every CLI command has a matching `tests/cli/{path}/Taskfile.yaml`. The Taskfile runs the compiled binary against fixtures with known inputs and validates the output. If the CLI test passes, the underlying actions work — because CLI commands call actions, MCP tools call actions, API endpoints call actions. Test the CLI, trust the rest.
|
||||||
|
|
||||||
|
**Pattern:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# tests/cli/core/lint/go/Taskfile.yaml
|
||||||
|
version: '3'
|
||||||
|
tasks:
|
||||||
|
test:
|
||||||
|
cmds:
|
||||||
|
- core-lint go --output json fixtures/ > /tmp/result.json
|
||||||
|
- jq -e '.findings | length > 0' /tmp/result.json
|
||||||
|
- jq -e '.summary.passed == false' /tmp/result.json
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why this matters for agents:** An agent can validate its own work by running `task test` in the matching `tests/cli/` directory. No test framework, no mocking, no setup — just the binary, fixtures, and `jq` assertions. The agent builds the binary, runs the test, sees the result. If it fails, the agent can read the fixture, read the output, and fix the code.
|
||||||
|
|
||||||
|
**Corollary:** Fixtures are planted bugs. Each fixture file has a known issue that the linter must find. If the linter doesn't find it, the test fails. Fixtures are the spec for what the tool must detect — they ARE the test cases, not descriptions of test cases.
|
||||||
|
|
||||||
|
## Applying AX to Existing Patterns
|
||||||
|
|
||||||
|
### File Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
# AX-native: path describes content
|
||||||
|
core/agent/
|
||||||
|
├── go/ # Go source
|
||||||
|
├── php/ # PHP source
|
||||||
|
├── ui/ # Frontend source
|
||||||
|
├── claude/ # Claude Code plugin
|
||||||
|
└── codex/ # Codex plugin
|
||||||
|
|
||||||
|
# Not AX: generic names requiring README
|
||||||
|
src/
|
||||||
|
├── lib/
|
||||||
|
├── utils/
|
||||||
|
└── helpers/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Error Handling
|
||||||
|
|
||||||
|
```go
|
||||||
|
// AX-native: errors are infrastructure, not application logic
|
||||||
|
svc := c.Service("brain")
|
||||||
|
cfg := c.Config().Get("database.host")
|
||||||
|
// Errors logged by Core. Code reads like a spec.
|
||||||
|
|
||||||
|
// Not AX: errors dominate the code
|
||||||
|
svc, err := c.ServiceFor[brain.Service]()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("get brain service: %w", err)
|
||||||
|
}
|
||||||
|
cfg, err := c.Config().Get("database.host")
|
||||||
|
if err != nil {
|
||||||
|
_ = err // silenced because "it'll be fine"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### API Design
|
||||||
|
|
||||||
|
```go
|
||||||
|
// AX-native: one shape, every surface
|
||||||
|
core.New(core.Options{
|
||||||
|
Name: "my-app",
|
||||||
|
Services: []core.Service{...},
|
||||||
|
Config: core.Config{...},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Not AX: multiple patterns for the same thing
|
||||||
|
core.New(
|
||||||
|
core.WithName("my-app"),
|
||||||
|
core.WithService(factory1),
|
||||||
|
core.WithService(factory2),
|
||||||
|
core.WithConfig(cfg),
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## The Plans Convention — AX Development Lifecycle
|
||||||
|
|
||||||
|
The `plans/` directory structure encodes a development methodology designed for how generative AI actually works: iterative refinement across structured phases, not one-shot generation.
|
||||||
|
|
||||||
|
### The Three-Way Split
|
||||||
|
|
||||||
|
```
|
||||||
|
plans/
|
||||||
|
├── project/ # 1. WHAT and WHY — start here
|
||||||
|
├── rfc/ # 2. CONSTRAINTS — immutable contracts
|
||||||
|
└── code/ # 3. HOW — implementation specs
|
||||||
|
```
|
||||||
|
|
||||||
|
Each directory is a phase. Work flows from project → rfc → code. Each transition forces a refinement pass — you cannot write a code spec without discovering gaps in the project spec, and you cannot write an RFC without discovering assumptions in both.
|
||||||
|
|
||||||
|
**Three places for data that can't be written simultaneously = three guaranteed iterations of "actually, this needs changing."** Refinement is baked into the structure, not bolted on as a review step.
|
||||||
|
|
||||||
|
### Phase 1: Project (Vision)
|
||||||
|
|
||||||
|
Start with `project/`. No code exists yet. Define:
|
||||||
|
- What the product IS and who it serves
|
||||||
|
- What existing primitives it consumes (cross-ref to `code/`)
|
||||||
|
- What constraints it operates under (cross-ref to `rfc/`)
|
||||||
|
|
||||||
|
This is where creativity lives. Map features to building blocks. Connect systems. The project spec is integrative — it references everything else.
|
||||||
|
|
||||||
|
### Phase 2: RFC (Contracts)
|
||||||
|
|
||||||
|
Extract the immutable rules into `rfc/`. These are constraints that don't change with implementation:
|
||||||
|
- Wire formats, protocols, hash algorithms
|
||||||
|
- Security properties that must hold
|
||||||
|
- Compatibility guarantees
|
||||||
|
|
||||||
|
RFCs are numbered per component (`RFC-BORG-006-SMSG-FORMAT.md`) and never modified after acceptance. If the contract changes, write a new RFC.
|
||||||
|
|
||||||
|
### Phase 3: Code (Implementation Specs)
|
||||||
|
|
||||||
|
Define the implementation in `code/`. Each component gets an RFC.md that an agent can implement from:
|
||||||
|
- Struct definitions (the DTOs — see principle 6)
|
||||||
|
- Method signatures and behaviour
|
||||||
|
- Error conditions and edge cases
|
||||||
|
- Cross-references to other code/ specs
|
||||||
|
|
||||||
|
The code spec IS the product. Write the spec → dispatch to an agent → review output → iterate.
|
||||||
|
|
||||||
|
### Pre-Launch: Alignment Protocol
|
||||||
|
|
||||||
|
Before dispatching for implementation, verify spec-model alignment:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. REVIEW — The implementation model (Codex/Jules) reads the spec
|
||||||
|
and reports missing elements. This surfaces the delta between
|
||||||
|
the model's training and the spec's assumptions.
|
||||||
|
|
||||||
|
"I need X, Y, Z to implement this" is the model saying
|
||||||
|
"I hear you but I'm missing context" — without asking.
|
||||||
|
|
||||||
|
2. ADJUST — Update the spec to close the gaps. Add examples,
|
||||||
|
clarify ambiguities, provide the context the model needs.
|
||||||
|
This is shared alignment, not compromise.
|
||||||
|
|
||||||
|
3. VERIFY — A different model (or sub-agent) reviews the adjusted
|
||||||
|
spec without the planner's bias. Fresh eyes on the contract.
|
||||||
|
"Does this make sense to someone who wasn't in the room?"
|
||||||
|
|
||||||
|
4. READY — When the review findings are trivial or deployment-
|
||||||
|
related (not architectural), the spec is ready to dispatch.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Implementation: Iterative Dispatch
|
||||||
|
|
||||||
|
Same prompt, multiple runs. Each pass sees deeper because the context evolved:
|
||||||
|
|
||||||
|
```
|
||||||
|
Round 1: Build features (the obvious gaps)
|
||||||
|
Round 2: Write tests (verify what was built)
|
||||||
|
Round 3: Harden security (what can go wrong?)
|
||||||
|
Round 4: Next RFC section (what's still missing?)
|
||||||
|
Round N: Findings are trivial → implementation is complete
|
||||||
|
```
|
||||||
|
|
||||||
|
Re-running is not failure. It is the process. Each pass changes the codebase, which changes what the next pass can see. The iteration IS the refinement.
|
||||||
|
|
||||||
|
### Post-Implementation: Auto-Documentation
|
||||||
|
|
||||||
|
The QA/verify chain produces artefacts that feed forward:
|
||||||
|
- Test results document the contract (what works, what doesn't)
|
||||||
|
- Coverage reports surface untested paths
|
||||||
|
- Diff summaries prep the changelog for the next release
|
||||||
|
- Doc site updates from the spec (the spec IS the documentation)
|
||||||
|
|
||||||
|
The output of one cycle is the input to the next. The plans repo stays current because the specs drive the code, not the other way round.
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
AX conventions are valid, idiomatic Go/PHP/TS. They do not require language extensions, code generation, or non-standard tooling. An AX-designed codebase compiles, tests, and deploys with standard toolchains.
|
||||||
|
|
||||||
|
The conventions diverge from community patterns (functional options, Must/For, etc.) but do not violate language specifications. This is a style choice, not a fork.
|
||||||
|
|
||||||
|
## Adoption
|
||||||
|
|
||||||
|
AX applies to all new code in the Core ecosystem. Existing code migrates incrementally as it is touched — no big-bang rewrite.
|
||||||
|
|
||||||
|
Priority order:
|
||||||
|
1. **Public APIs** (package-level functions, struct constructors)
|
||||||
|
2. **File structure** (path naming, template locations)
|
||||||
|
3. **Internal fields** (struct field names, local variables)
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- dAppServer unified path convention (2024)
|
||||||
|
- CoreGO DTO pattern refactor (2026-03-18)
|
||||||
|
- Core primitives design (2026-03-19)
|
||||||
|
- Go Proverbs, Rob Pike (2015) — AX provides an updated lens
|
||||||
|
|
||||||
|
## Changelog
|
||||||
|
|
||||||
|
- 2026-03-19: Initial draft
|
||||||
239
docs/RFC-CORE-GO-REQUEST.md
Normal file
239
docs/RFC-CORE-GO-REQUEST.md
Normal file
|
|
@ -0,0 +1,239 @@
|
||||||
|
# RFC Request — go-blockchain needs from Core (FINAL)
|
||||||
|
|
||||||
|
> From: Charon (go-blockchain)
|
||||||
|
> To: Cladius (core/go + go-* packages)
|
||||||
|
> Date: 2 Apr 2026 00:55
|
||||||
|
> Snider's answers inline. Updated with precise asks.
|
||||||
|
|
||||||
|
## 1. core/api — DONE, pulled (+125 commits)
|
||||||
|
Using it. No ask needed.
|
||||||
|
|
||||||
|
## 2. core.Subscribe/Publish — Raindrops forming
|
||||||
|
When ready, go-blockchain will:
|
||||||
|
- Publish: `blockchain.block.new`, `blockchain.alias.registered`, `blockchain.hardfork.activated`
|
||||||
|
- Wire format: `core.Event{Type: string, Data: any, Timestamp: int64}`
|
||||||
|
|
||||||
|
No blocking ask — will integrate when available.
|
||||||
|
|
||||||
|
## 3. core.Wallet() — I can do this today via core.Service
|
||||||
|
|
||||||
|
```go
|
||||||
|
c.RegisterService("blockchain.wallet", walletService)
|
||||||
|
c.Service("blockchain.wallet", core.Service{
|
||||||
|
Name: "blockchain.wallet",
|
||||||
|
Instance: walletService,
|
||||||
|
OnStart: func() core.Result { return walletService.Start() },
|
||||||
|
OnStop: func() core.Result { return walletService.Stop() },
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
Then register actions:
|
||||||
|
```go
|
||||||
|
c.Action("blockchain.wallet.create", walletService.HandleCreate)
|
||||||
|
c.Action("blockchain.wallet.transfer", walletService.HandleTransfer)
|
||||||
|
c.Action("blockchain.wallet.balance", walletService.HandleBalance)
|
||||||
|
```
|
||||||
|
|
||||||
|
**No ask. Implementing now.**
|
||||||
|
|
||||||
|
## 4. Structured Logging — PRECISE ASK
|
||||||
|
|
||||||
|
**I want package-level logging that works WITHOUT a Core instance.**
|
||||||
|
|
||||||
|
The chain sync runs in goroutines that don't hold `*core.Core`. Currently using `log.Printf`.
|
||||||
|
|
||||||
|
**Exact ask:** Confirm these work at package level:
|
||||||
|
```go
|
||||||
|
core.Print(nil, "block synced height=%d hash=%s", height, hash) // info
|
||||||
|
core.Error(nil, "sync failed: %v", err) // error
|
||||||
|
```
|
||||||
|
|
||||||
|
Or do I need `core.NewLog()` → pass the logger into the sync goroutine?
|
||||||
|
|
||||||
|
## 5. core.Escrow() — Improvement to go-blockchain, sane with Chain + Asset
|
||||||
|
|
||||||
|
Escrow is a tx type (HF4+). I build it in go-blockchain's wallet package:
|
||||||
|
```go
|
||||||
|
wallet.BuildEscrowTx(provider, customer, amount, terms)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then expose via action: `c.Action("blockchain.escrow.create", ...)`
|
||||||
|
|
||||||
|
**No ask from Core. I implement this.**
|
||||||
|
|
||||||
|
## 6. core.Asset() — Same, go-blockchain implements
|
||||||
|
|
||||||
|
HF5 enables deploy/emit/burn. I add to wallet package + actions:
|
||||||
|
```go
|
||||||
|
c.Action("blockchain.asset.deploy", ...)
|
||||||
|
c.Action("blockchain.asset.emit", ...)
|
||||||
|
c.Action("blockchain.asset.burn", ...)
|
||||||
|
```
|
||||||
|
|
||||||
|
**No ask. Implementing after HF5 activates.**
|
||||||
|
|
||||||
|
## 7. core.Chain() — Same pattern
|
||||||
|
|
||||||
|
```go
|
||||||
|
c.RegisterService("blockchain.chain", chainService)
|
||||||
|
c.Action("blockchain.chain.height", ...)
|
||||||
|
c.Action("blockchain.chain.block", ...)
|
||||||
|
c.Action("blockchain.chain.sync", ...)
|
||||||
|
```
|
||||||
|
|
||||||
|
**No ask. Doing this today.**
|
||||||
|
|
||||||
|
## 8. core.DNS() — Do you want a go-dns package?
|
||||||
|
|
||||||
|
The LNS is 672 lines of Go at `~/Code/lthn/lns/`. It could become `go-dns` in the Core ecosystem.
|
||||||
|
|
||||||
|
**Ask: Should I make it `dappco.re/go/core/dns` or keep it as a standalone?**
|
||||||
|
|
||||||
|
If yes to go-dns, the actions would be:
|
||||||
|
```go
|
||||||
|
c.Action("dns.resolve", ...) // A record
|
||||||
|
c.Action("dns.resolve.txt", ...) // TXT record
|
||||||
|
c.Action("dns.reverse", ...) // PTR
|
||||||
|
c.Action("dns.register", ...) // via sidechain
|
||||||
|
```
|
||||||
|
|
||||||
|
## 9. Portable Storage Encoder — DONE
|
||||||
|
|
||||||
|
Already implemented in `p2p/encode.go` using `go-p2p/node/levin/EncodeStorage`. Committed and pushed. HandshakeResponse.Encode, ResponseChainEntry.Encode, RequestChain.Decode all working.
|
||||||
|
|
||||||
|
**go-storage/go-io improvement ask:** The chain stores blocks in go-store (SQLite). For high-throughput sync, a `go-io` backed raw block file store would be faster. Want me to spec a `BlockStore` interface that can swap between go-store and go-io backends?
|
||||||
|
|
||||||
|
## 10. CGo boilerplate — YES PLEASE
|
||||||
|
|
||||||
|
**Exact ask:** A `go-cgo` package with:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Safe C buffer allocation with automatic cleanup
|
||||||
|
buf := cgo.NewBuffer(32)
|
||||||
|
defer buf.Free()
|
||||||
|
buf.CopyFrom(goSlice)
|
||||||
|
result := buf.Bytes()
|
||||||
|
|
||||||
|
// C function call wrapper with error mapping
|
||||||
|
err := cgo.Call(C.my_function, buf.Ptr(), cgo.SizeT(len))
|
||||||
|
// Returns Go error if C returns non-zero
|
||||||
|
|
||||||
|
// C string conversion
|
||||||
|
goStr := cgo.GoString(cStr)
|
||||||
|
cStr := cgo.CString(goStr)
|
||||||
|
defer cgo.Free(cStr)
|
||||||
|
```
|
||||||
|
|
||||||
|
Every CGo package (go-blockchain/crypto, go-mlx, go-rocm) does this dance manually. A shared helper saves ~50 lines per package and prevents use-after-free bugs.
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
| # | What | Who Does It | Status |
|
||||||
|
|---|------|-------------|--------|
|
||||||
|
| 1 | core/api | Cladius | DONE, pulled |
|
||||||
|
| 2 | Pub/Sub events | Cladius | Forming → core/stream (go-ws rename) |
|
||||||
|
| 3 | Wallet service | **Charon** | Implementing today |
|
||||||
|
| 4 | Package-level logging | **Answered below** | RTFM — it works |
|
||||||
|
| 5 | Escrow txs | **Charon** | In go-blockchain |
|
||||||
|
| 6 | Asset operations | **Charon** | After HF5 |
|
||||||
|
| 7 | Chain service | **Charon** | Implementing today |
|
||||||
|
| 8 | go-dns | **Cladius** | `dappco.re/go/dns` — DNS record DTOs + ClouDNS API types |
|
||||||
|
| 9 | Storage encoder | **Charon** | DONE |
|
||||||
|
| 10 | go-cgo | **Cladius** | RFC written, dispatching |
|
||||||
|
|
||||||
|
— Charon
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cladius Answers — How To Do It With Core Primitives
|
||||||
|
|
||||||
|
> These examples show Charon how each ask maps to existing Core APIs.
|
||||||
|
> Most of what he asked for already exists — he just needs the patterns.
|
||||||
|
|
||||||
|
### #4 Answer: Package-Level Logging
|
||||||
|
|
||||||
|
**Yes, `core.Print(nil, ...)` works.** The first arg is `*core.Core` and `nil` is valid — it falls back to the package-level logger. Your goroutines don't need a Core instance:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// In your sync goroutine — no *core.Core needed:
|
||||||
|
core.Print(nil, "block synced height=%d hash=%s", height, hash)
|
||||||
|
core.Error(nil, "sync failed: %v", err)
|
||||||
|
|
||||||
|
// If you HAVE a Core instance (e.g. in a service handler):
|
||||||
|
core.Print(c, "wallet created id=%s", id) // tagged with service context
|
||||||
|
```
|
||||||
|
|
||||||
|
Both work. `nil` = package logger, `c` = contextual logger. Same output format.
|
||||||
|
|
||||||
|
### #3 Answer: Service + Action Pattern (You Got It Right)
|
||||||
|
|
||||||
|
Your code is correct. The full pattern with Core primitives:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Register service with lifecycle
|
||||||
|
c.RegisterService("blockchain.wallet", core.Service{
|
||||||
|
OnStart: func(ctx context.Context) core.Result {
|
||||||
|
return walletService.Start(ctx)
|
||||||
|
},
|
||||||
|
OnStop: func(ctx context.Context) core.Result {
|
||||||
|
return walletService.Stop(ctx)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Register actions — path IS the CLI/HTTP/MCP route
|
||||||
|
c.Action("blockchain.wallet.create", walletService.HandleCreate)
|
||||||
|
c.Action("blockchain.wallet.balance", walletService.HandleBalance)
|
||||||
|
|
||||||
|
// Call another service's action (for #8 dns.discover → blockchain.chain.aliases):
|
||||||
|
result := c.Run("blockchain.chain.aliases", core.Options{})
|
||||||
|
```
|
||||||
|
|
||||||
|
### #5/#6/#7 Answer: Same Pattern, Different Path
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Escrow (HF4+)
|
||||||
|
c.Action("blockchain.escrow.create", escrowService.HandleCreate)
|
||||||
|
c.Action("blockchain.escrow.release", escrowService.HandleRelease)
|
||||||
|
|
||||||
|
// Asset (HF5+)
|
||||||
|
c.Action("blockchain.asset.deploy", assetService.HandleDeploy)
|
||||||
|
|
||||||
|
// Chain
|
||||||
|
c.Action("blockchain.chain.height", chainService.HandleHeight)
|
||||||
|
c.Action("blockchain.chain.block", chainService.HandleBlock)
|
||||||
|
|
||||||
|
// All of these automatically get:
|
||||||
|
// - CLI: core blockchain chain height
|
||||||
|
// - HTTP: GET /blockchain/chain/height
|
||||||
|
// - MCP: blockchain.chain.height tool
|
||||||
|
// - i18n: blockchain.chain.height.* keys
|
||||||
|
```
|
||||||
|
|
||||||
|
### #9 Answer: BlockStore Interface
|
||||||
|
|
||||||
|
For the go-store vs go-io backend swap:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Define as a Core Data type
|
||||||
|
type BlockStore struct {
|
||||||
|
core.Data // inherits Store/Load/Delete
|
||||||
|
}
|
||||||
|
|
||||||
|
// The backing medium is chosen at init:
|
||||||
|
store := core.NewData("blockchain.blocks",
|
||||||
|
core.WithMedium(gostore.SQLite("blocks.db")), // or:
|
||||||
|
// core.WithMedium(goio.File("blocks/")), // raw file backend
|
||||||
|
)
|
||||||
|
|
||||||
|
// Usage is identical regardless of backend:
|
||||||
|
store.Store("block:12345", blockBytes)
|
||||||
|
block := store.Load("block:12345")
|
||||||
|
```
|
||||||
|
|
||||||
|
### #10 Answer: go-cgo
|
||||||
|
|
||||||
|
RFC written at `plans/code/core/go/cgo/RFC.md`. Buffer, Scope, Call, String helpers. Dispatching to Codex when repo is created on Forge.
|
||||||
|
|
||||||
|
### #8 Answer: go-dns
|
||||||
|
|
||||||
|
`dappco.re/go/dns` — Core package. DNS record structs as DTOs mapping 1:1 to ClouDNS API. Your LNS code at `~/Code/lthn/lns/` moves in as the service layer on top. Dispatching when repo exists.
|
||||||
1337
docs/RFC-CORE-GO.md
Normal file
1337
docs/RFC-CORE-GO.md
Normal file
File diff suppressed because it is too large
Load diff
150
docs/RFC.md
150
docs/RFC.md
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
module: forge.lthn.ai/core/go-proxy
|
module: dappco.re/go/core/proxy
|
||||||
repo: core/go-proxy
|
repo: core/go-proxy
|
||||||
lang: go
|
lang: go
|
||||||
tier: lib
|
tier: lib
|
||||||
|
|
@ -18,7 +18,7 @@ tags:
|
||||||
|
|
||||||
> An agent should be able to implement this library from this document alone.
|
> An agent should be able to implement this library from this document alone.
|
||||||
|
|
||||||
**Module:** `forge.lthn.ai/core/go-proxy`
|
**Module:** `dappco.re/go/core/proxy`
|
||||||
**Repository:** `core/go-proxy`
|
**Repository:** `core/go-proxy`
|
||||||
**Files:** 18
|
**Files:** 18
|
||||||
|
|
||||||
|
|
@ -46,6 +46,28 @@ The v1 scope covers:
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## 1.1 Import Graph (no circular imports)
|
||||||
|
|
||||||
|
Shared types (`Job`, `PoolConfig`, `Config`, `Miner`, `UpstreamStats`, event types) are defined in the root `proxy` package. Sub-packages import `proxy` but `proxy` never imports sub-packages directly — it uses interfaces (`Splitter`, `ShareSink`) injected at construction time.
|
||||||
|
|
||||||
|
```
|
||||||
|
proxy (root) ← defines shared types, Splitter interface, Proxy orchestrator
|
||||||
|
├── pool ← imports proxy (for Job, PoolConfig). proxy does NOT import pool.
|
||||||
|
├── nicehash ← imports proxy (for Miner, Job, events) and pool (for Strategy)
|
||||||
|
├── simple ← imports proxy and pool
|
||||||
|
├── log ← imports proxy (for Event)
|
||||||
|
└── api ← imports proxy (for Proxy) and core/api
|
||||||
|
```
|
||||||
|
|
||||||
|
The `Proxy` orchestrator wires sub-packages via interface injection:
|
||||||
|
```go
|
||||||
|
// proxy.go receives a Splitter (implemented by nicehash or simple)
|
||||||
|
// and a pool.StrategyFactory (closure that creates pool.Strategy instances).
|
||||||
|
// No import of nicehash, simple, or pool packages from proxy.go.
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## 2. File Map
|
## 2. File Map
|
||||||
|
|
||||||
| File | Package | Purpose |
|
| File | Package | Purpose |
|
||||||
|
|
@ -340,6 +362,16 @@ type Miner struct {
|
||||||
buf [16384]byte // per-miner send buffer; avoids per-write allocations
|
buf [16384]byte // per-miner send buffer; avoids per-write allocations
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetID assigns the miner's internal ID. Used by NonceStorage tests.
|
||||||
|
//
|
||||||
|
// m.SetID(42)
|
||||||
|
func (m *Miner) SetID(id int64) {}
|
||||||
|
|
||||||
|
// FixedByte returns the NiceHash slot index assigned to this miner.
|
||||||
|
//
|
||||||
|
// slot := m.FixedByte() // 0x2A
|
||||||
|
func (m *Miner) FixedByte() uint8 {}
|
||||||
|
|
||||||
// NewMiner creates a Miner for an accepted net.Conn. Does not start reading yet.
|
// NewMiner creates a Miner for an accepted net.Conn. Does not start reading yet.
|
||||||
//
|
//
|
||||||
// m := proxy.NewMiner(conn, 3333, nil)
|
// m := proxy.NewMiner(conn, 3333, nil)
|
||||||
|
|
@ -490,6 +522,21 @@ func (s *NonceSplitter) OnClose(event *proxy.CloseEvent) {}
|
||||||
//
|
//
|
||||||
// s.GC() // called by Proxy tick loop every 60 ticks
|
// s.GC() // called by Proxy tick loop every 60 ticks
|
||||||
func (s *NonceSplitter) GC() {}
|
func (s *NonceSplitter) GC() {}
|
||||||
|
|
||||||
|
// Connect establishes the first pool upstream connection via the strategy factory.
|
||||||
|
//
|
||||||
|
// s.Connect()
|
||||||
|
func (s *NonceSplitter) Connect() {}
|
||||||
|
|
||||||
|
// Tick is called every second by the proxy tick loop. Runs keepalive on idle mappers.
|
||||||
|
//
|
||||||
|
// s.Tick(ticks)
|
||||||
|
func (s *NonceSplitter) Tick(ticks uint64) {}
|
||||||
|
|
||||||
|
// Upstreams returns current upstream pool connection counts.
|
||||||
|
//
|
||||||
|
// stats := s.Upstreams()
|
||||||
|
func (s *NonceSplitter) Upstreams() proxy.UpstreamStats {}
|
||||||
```
|
```
|
||||||
|
|
||||||
### 8.2 NonceMapper
|
### 8.2 NonceMapper
|
||||||
|
|
@ -540,6 +587,29 @@ func (m *NonceMapper) Submit(event *proxy.SubmitEvent) {}
|
||||||
//
|
//
|
||||||
// if mapper.IsActive() { /* safe to assign miners */ }
|
// if mapper.IsActive() { /* safe to assign miners */ }
|
||||||
func (m *NonceMapper) IsActive() bool {}
|
func (m *NonceMapper) IsActive() bool {}
|
||||||
|
|
||||||
|
// Start connects the pool strategy. Called by NonceSplitter after creating the mapper.
|
||||||
|
//
|
||||||
|
// mapper.Start()
|
||||||
|
func (m *NonceMapper) Start() {}
|
||||||
|
|
||||||
|
// OnJob receives a new job from the pool. Implements pool.StratumListener.
|
||||||
|
// Calls storage.SetJob to distribute to all active miners.
|
||||||
|
//
|
||||||
|
// // called by pool.StratumClient when pool pushes a job
|
||||||
|
func (m *NonceMapper) OnJob(job proxy.Job) {}
|
||||||
|
|
||||||
|
// OnResultAccepted receives a share result from the pool. Implements pool.StratumListener.
|
||||||
|
// Correlates by sequence to the originating miner and sends success/error reply.
|
||||||
|
//
|
||||||
|
// // called by pool.StratumClient on pool reply
|
||||||
|
func (m *NonceMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {}
|
||||||
|
|
||||||
|
// OnDisconnect handles pool connection loss. Implements pool.StratumListener.
|
||||||
|
// Suspends the mapper; miners keep their slots but receive no new jobs until reconnect.
|
||||||
|
//
|
||||||
|
// // called by pool.StratumClient on disconnect
|
||||||
|
func (m *NonceMapper) OnDisconnect() {}
|
||||||
```
|
```
|
||||||
|
|
||||||
### 8.3 NonceStorage
|
### 8.3 NonceStorage
|
||||||
|
|
@ -639,6 +709,21 @@ func (s *SimpleSplitter) OnClose(event *proxy.CloseEvent) {}
|
||||||
//
|
//
|
||||||
// s.GC()
|
// s.GC()
|
||||||
func (s *SimpleSplitter) GC() {}
|
func (s *SimpleSplitter) GC() {}
|
||||||
|
|
||||||
|
// Connect establishes pool connections for any pre-existing idle mappers.
|
||||||
|
//
|
||||||
|
// s.Connect()
|
||||||
|
func (s *SimpleSplitter) Connect() {}
|
||||||
|
|
||||||
|
// Tick is called every second. Runs idle mapper timeout checks.
|
||||||
|
//
|
||||||
|
// s.Tick(ticks)
|
||||||
|
func (s *SimpleSplitter) Tick(ticks uint64) {}
|
||||||
|
|
||||||
|
// Upstreams returns current upstream connection counts (active + idle).
|
||||||
|
//
|
||||||
|
// stats := s.Upstreams()
|
||||||
|
func (s *SimpleSplitter) Upstreams() proxy.UpstreamStats {}
|
||||||
```
|
```
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
@ -669,7 +754,7 @@ type SimpleMapper struct {
|
||||||
// client := pool.NewStratumClient(poolCfg, listener)
|
// client := pool.NewStratumClient(poolCfg, listener)
|
||||||
// client.Connect()
|
// client.Connect()
|
||||||
type StratumClient struct {
|
type StratumClient struct {
|
||||||
cfg PoolConfig
|
cfg proxy.PoolConfig
|
||||||
listener StratumListener
|
listener StratumListener
|
||||||
conn net.Conn
|
conn net.Conn
|
||||||
tlsConn *tls.Conn // nil if plain TCP
|
tlsConn *tls.Conn // nil if plain TCP
|
||||||
|
|
@ -690,7 +775,7 @@ type StratumListener interface {
|
||||||
OnDisconnect()
|
OnDisconnect()
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStratumClient(cfg PoolConfig, listener StratumListener) *StratumClient {}
|
func NewStratumClient(cfg proxy.PoolConfig, listener StratumListener) *StratumClient {}
|
||||||
|
|
||||||
// Connect dials the pool. Applies TLS if cfg.TLS is true.
|
// Connect dials the pool. Applies TLS if cfg.TLS is true.
|
||||||
// If cfg.TLSFingerprint is non-empty, pins the server certificate by SHA-256 of DER bytes.
|
// If cfg.TLSFingerprint is non-empty, pins the server certificate by SHA-256 of DER bytes.
|
||||||
|
|
@ -726,7 +811,7 @@ func (c *StratumClient) Disconnect() {}
|
||||||
// strategy := pool.NewFailoverStrategy(cfg.Pools, listener, cfg)
|
// strategy := pool.NewFailoverStrategy(cfg.Pools, listener, cfg)
|
||||||
// strategy.Connect()
|
// strategy.Connect()
|
||||||
type FailoverStrategy struct {
|
type FailoverStrategy struct {
|
||||||
pools []PoolConfig
|
pools []proxy.PoolConfig
|
||||||
current int
|
current int
|
||||||
client *StratumClient
|
client *StratumClient
|
||||||
listener StratumListener
|
listener StratumListener
|
||||||
|
|
@ -749,7 +834,7 @@ type Strategy interface {
|
||||||
IsActive() bool
|
IsActive() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFailoverStrategy(pools []PoolConfig, listener StratumListener, cfg *proxy.Config) *FailoverStrategy {}
|
func NewFailoverStrategy(pools []proxy.PoolConfig, listener StratumListener, cfg *proxy.Config) *FailoverStrategy {}
|
||||||
|
|
||||||
// Connect dials the current pool. On failure, advances to the next pool (modulo len),
|
// Connect dials the current pool. On failure, advances to the next pool (modulo len),
|
||||||
// respecting cfg.Retries and cfg.RetryPause between attempts.
|
// respecting cfg.Retries and cfg.RetryPause between attempts.
|
||||||
|
|
@ -801,6 +886,32 @@ type Event struct {
|
||||||
|
|
||||||
func NewEventBus() *EventBus {}
|
func NewEventBus() *EventBus {}
|
||||||
|
|
||||||
|
// LoginEvent is the typed event passed to Splitter.OnLogin.
|
||||||
|
//
|
||||||
|
// splitter.OnLogin(&LoginEvent{Miner: m})
|
||||||
|
type LoginEvent struct {
|
||||||
|
Miner *Miner
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubmitEvent is the typed event passed to Splitter.OnSubmit.
|
||||||
|
//
|
||||||
|
// splitter.OnSubmit(&SubmitEvent{Miner: m, JobID: "abc", Nonce: "deadbeef"})
|
||||||
|
type SubmitEvent struct {
|
||||||
|
Miner *Miner
|
||||||
|
JobID string
|
||||||
|
Nonce string
|
||||||
|
Result string
|
||||||
|
Algo string
|
||||||
|
RequestID int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseEvent is the typed event passed to Splitter.OnClose.
|
||||||
|
//
|
||||||
|
// splitter.OnClose(&CloseEvent{Miner: m})
|
||||||
|
type CloseEvent struct {
|
||||||
|
Miner *Miner
|
||||||
|
}
|
||||||
|
|
||||||
// Subscribe registers a handler for the given event type. Safe to call before Start.
|
// Subscribe registers a handler for the given event type. Safe to call before Start.
|
||||||
//
|
//
|
||||||
// bus.Subscribe(proxy.EventAccept, func(e proxy.Event) { stats.OnAccept(e.Diff) })
|
// bus.Subscribe(proxy.EventAccept, func(e proxy.Event) { stats.OnAccept(e.Diff) })
|
||||||
|
|
@ -939,6 +1050,21 @@ func (w *Workers) List() []WorkerRecord {}
|
||||||
//
|
//
|
||||||
// w.Tick()
|
// w.Tick()
|
||||||
func (w *Workers) Tick() {}
|
func (w *Workers) Tick() {}
|
||||||
|
|
||||||
|
// OnLogin upserts the worker record for the miner's login. Called via EventBus subscription.
|
||||||
|
//
|
||||||
|
// bus.Subscribe(proxy.EventLogin, func(e proxy.Event) { w.OnLogin(e) })
|
||||||
|
func (w *Workers) OnLogin(e Event) {}
|
||||||
|
|
||||||
|
// OnAccept records an accepted share for the worker. Called via EventBus subscription.
|
||||||
|
//
|
||||||
|
// bus.Subscribe(proxy.EventAccept, func(e proxy.Event) { w.OnAccept(e) })
|
||||||
|
func (w *Workers) OnAccept(e Event) {}
|
||||||
|
|
||||||
|
// OnReject records a rejected share for the worker. Called via EventBus subscription.
|
||||||
|
//
|
||||||
|
// bus.Subscribe(proxy.EventReject, func(e proxy.Event) { w.OnReject(e) })
|
||||||
|
func (w *Workers) OnReject(e Event) {}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
@ -985,7 +1111,7 @@ func (cd *CustomDiff) OnLogin(e proxy.Event) {}
|
||||||
type AccessLog struct {
|
type AccessLog struct {
|
||||||
path string
|
path string
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
f core.File // opened append-only on first write; nil until first event
|
f io.WriteCloser // opened append-only on first write; nil until first event
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAccessLog(path string) *AccessLog {}
|
func NewAccessLog(path string) *AccessLog {}
|
||||||
|
|
@ -1015,7 +1141,7 @@ func (l *AccessLog) OnClose(e proxy.Event) {}
|
||||||
type ShareLog struct {
|
type ShareLog struct {
|
||||||
path string
|
path string
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
f core.File
|
f io.WriteCloser
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewShareLog(path string) *ShareLog {}
|
func NewShareLog(path string) *ShareLog {}
|
||||||
|
|
@ -1036,13 +1162,13 @@ func (l *ShareLog) OnReject(e proxy.Event) {}
|
||||||
## 16. HTTP Monitoring API
|
## 16. HTTP Monitoring API
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// RegisterRoutes registers the proxy monitoring routes on a core/api Router.
|
// RegisterRoutes registers the proxy monitoring routes on a core/api Engine.
|
||||||
// GET /1/summary — aggregated proxy stats
|
// GET /1/summary — aggregated proxy stats
|
||||||
// GET /1/workers — per-worker hashrate table
|
// GET /1/workers — per-worker hashrate table
|
||||||
// GET /1/miners — per-connection state table
|
// GET /1/miners — per-connection state table
|
||||||
//
|
//
|
||||||
// proxyapi.RegisterRoutes(apiRouter, p)
|
// proxyapi.RegisterRoutes(engine, p)
|
||||||
func RegisterRoutes(r api.Router, p *proxy.Proxy) {}
|
func RegisterRoutes(r *api.Engine, p *proxy.Proxy) {}
|
||||||
```
|
```
|
||||||
|
|
||||||
### GET /1/summary — response shape
|
### GET /1/summary — response shape
|
||||||
|
|
@ -1362,7 +1488,7 @@ func TestStorage_Add_Good(t *testing.T) {
|
||||||
// TestJob_BlobWithFixedByte_Bad: blob shorter than 80 chars → returns original blob unchanged.
|
// TestJob_BlobWithFixedByte_Bad: blob shorter than 80 chars → returns original blob unchanged.
|
||||||
// TestJob_BlobWithFixedByte_Ugly: fixedByte 0xFF → "ff" (lowercase, not "FF").
|
// TestJob_BlobWithFixedByte_Ugly: fixedByte 0xFF → "ff" (lowercase, not "FF").
|
||||||
func TestJob_BlobWithFixedByte_Good(t *testing.T) {
|
func TestJob_BlobWithFixedByte_Good(t *testing.T) {
|
||||||
j := proxy.Job{Blob: core.RepeatString("0", 160)}
|
j := proxy.Job{Blob: strings.Repeat("0", 160)}
|
||||||
result := j.BlobWithFixedByte(0x2A)
|
result := j.BlobWithFixedByte(0x2A)
|
||||||
require.Equal(t, "2a", result[78:80])
|
require.Equal(t, "2a", result[78:80])
|
||||||
require.Equal(t, 160, len(result))
|
require.Equal(t, 160, len(result))
|
||||||
|
|
|
||||||
5
docs/specs/core/go/RFC.md
Normal file
5
docs/specs/core/go/RFC.md
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
# go-proxy RFC
|
||||||
|
|
||||||
|
This path mirrors the authoritative proxy contract in [`../../../../RFC.md`](../../../../RFC.md).
|
||||||
|
|
||||||
|
Use the root RFC for the full implementation contract.
|
||||||
5
docs/specs/rfc/RFC-CORE-008-AGENT-EXPERIENCE.md
Normal file
5
docs/specs/rfc/RFC-CORE-008-AGENT-EXPERIENCE.md
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
# RFC-CORE-008: Agent Experience
|
||||||
|
|
||||||
|
This path mirrors the local AX guidance in [`../../../../.core/reference/RFC-025-AGENT-EXPERIENCE.md`](../../../../.core/reference/RFC-025-AGENT-EXPERIENCE.md).
|
||||||
|
|
||||||
|
Use the reference copy for the full design principles.
|
||||||
38
error.go
Normal file
38
error.go
Normal file
|
|
@ -0,0 +1,38 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
// ScopedError carries a stable error scope alongside a human-readable message.
|
||||||
|
//
|
||||||
|
// err := proxy.NewScopedError("proxy.config", "load failed", io.EOF)
|
||||||
|
type ScopedError struct {
|
||||||
|
Scope string
|
||||||
|
Message string
|
||||||
|
Cause error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewScopedError creates an error that keeps a greppable scope token in the failure path.
|
||||||
|
//
|
||||||
|
// err := proxy.NewScopedError("proxy.server", "listen failed", cause)
|
||||||
|
func NewScopedError(scope, message string, cause error) error {
|
||||||
|
return &ScopedError{
|
||||||
|
Scope: scope,
|
||||||
|
Message: message,
|
||||||
|
Cause: cause,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ScopedError) Error() string {
|
||||||
|
if e == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if e.Cause == nil {
|
||||||
|
return e.Scope + ": " + e.Message
|
||||||
|
}
|
||||||
|
return e.Scope + ": " + e.Message + ": " + e.Cause.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ScopedError) Unwrap() error {
|
||||||
|
if e == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return e.Cause
|
||||||
|
}
|
||||||
43
error_test.go
Normal file
43
error_test.go
Normal file
|
|
@ -0,0 +1,43 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestError_NewScopedError_Good(t *testing.T) {
|
||||||
|
err := NewScopedError("proxy.config", "bind list is empty", nil)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected scoped error")
|
||||||
|
}
|
||||||
|
if got := err.Error(); got != "proxy.config: bind list is empty" {
|
||||||
|
t.Fatalf("unexpected scoped error string: %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestError_NewScopedError_Bad(t *testing.T) {
|
||||||
|
cause := errors.New("permission denied")
|
||||||
|
err := NewScopedError("proxy.config", "read config failed", cause)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected scoped error")
|
||||||
|
}
|
||||||
|
if !errors.Is(err, cause) {
|
||||||
|
t.Fatalf("expected errors.Is to unwrap the original cause")
|
||||||
|
}
|
||||||
|
if got := err.Error(); got != "proxy.config: read config failed: permission denied" {
|
||||||
|
t.Fatalf("unexpected wrapped error string: %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestError_NewScopedError_Ugly(t *testing.T) {
|
||||||
|
var scoped *ScopedError
|
||||||
|
|
||||||
|
if got := scoped.Error(); got != "" {
|
||||||
|
t.Fatalf("expected nil scoped error string to be empty, got %q", got)
|
||||||
|
}
|
||||||
|
if scoped.Unwrap() != nil {
|
||||||
|
t.Fatalf("expected nil scoped error to unwrap to nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
16
events.go
16
events.go
|
|
@ -2,18 +2,21 @@ package proxy
|
||||||
|
|
||||||
import "sync"
|
import "sync"
|
||||||
|
|
||||||
// EventBus dispatches proxy lifecycle events to registered listeners.
|
// EventBus dispatches proxy lifecycle events to synchronous listeners.
|
||||||
// Dispatch is synchronous on the calling goroutine. Listeners must not block.
|
|
||||||
//
|
//
|
||||||
// bus := proxy.NewEventBus()
|
// bus := proxy.NewEventBus()
|
||||||
// bus.Subscribe(proxy.EventLogin, customDiff.OnLogin)
|
// bus.Subscribe(proxy.EventLogin, func(e proxy.Event) {
|
||||||
|
// _ = e.Miner.User()
|
||||||
|
// })
|
||||||
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
|
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
|
||||||
type EventBus struct {
|
type EventBus struct {
|
||||||
listeners map[EventType][]EventHandler
|
listeners map[EventType][]EventHandler
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventType identifies the proxy lifecycle event.
|
// EventType identifies one proxy lifecycle event.
|
||||||
|
//
|
||||||
|
// proxy.EventLogin
|
||||||
type EventType int
|
type EventType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -24,12 +27,13 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// EventHandler is the callback signature for all event types.
|
// EventHandler is the callback signature for all event types.
|
||||||
|
//
|
||||||
|
// handler := func(e proxy.Event) { _ = e.Miner }
|
||||||
type EventHandler func(Event)
|
type EventHandler func(Event)
|
||||||
|
|
||||||
// Event carries the data for any proxy lifecycle event.
|
// Event carries the data for any proxy lifecycle event.
|
||||||
// Fields not relevant to the event type are zero/nil.
|
|
||||||
//
|
//
|
||||||
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m})
|
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m})
|
||||||
type Event struct {
|
type Event struct {
|
||||||
Type EventType
|
Type EventType
|
||||||
Miner *Miner // always set
|
Miner *Miner // always set
|
||||||
|
|
|
||||||
2
go.mod
2
go.mod
|
|
@ -1,3 +1,3 @@
|
||||||
module dappco.re/go/core/proxy
|
module dappco.re/go/proxy
|
||||||
|
|
||||||
go 1.26.0
|
go 1.26.0
|
||||||
|
|
|
||||||
224
http_auth_test.go
Normal file
224
http_auth_test.go
Normal file
|
|
@ -0,0 +1,224 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProxy_allowHTTP_Good(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
HTTP: HTTPConfig{
|
||||||
|
Restricted: true,
|
||||||
|
AccessToken: "secret",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
status, ok := p.AllowMonitoringRequest(&http.Request{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
Header: http.Header{
|
||||||
|
"Authorization": []string{"Bearer secret"},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected authorised request to pass, got status %d", status)
|
||||||
|
}
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status %d, got %d", http.StatusOK, status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_allowHTTP_Bad(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
HTTP: HTTPConfig{
|
||||||
|
Restricted: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodPost})
|
||||||
|
if ok {
|
||||||
|
t.Fatal("expected non-GET request to be rejected")
|
||||||
|
}
|
||||||
|
if status != http.StatusMethodNotAllowed {
|
||||||
|
t.Fatalf("expected status %d, got %d", http.StatusMethodNotAllowed, status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_allowHTTP_Unrestricted_Good(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
HTTP: HTTPConfig{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodGet})
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected unrestricted request to pass, got status %d", status)
|
||||||
|
}
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status %d, got %d", http.StatusOK, status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_allowHTTP_Unrestricted_Bad(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
HTTP: HTTPConfig{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodPost})
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected unrestricted non-GET request to pass, got status %d", status)
|
||||||
|
}
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status %d, got %d", http.StatusOK, status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_allowHTTP_Ugly(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
HTTP: HTTPConfig{
|
||||||
|
AccessToken: "secret",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
status, ok := p.AllowMonitoringRequest(&http.Request{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
Header: http.Header{
|
||||||
|
"Authorization": []string{"Bearer wrong"},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if ok {
|
||||||
|
t.Fatal("expected invalid token to be rejected")
|
||||||
|
}
|
||||||
|
if status != http.StatusUnauthorized {
|
||||||
|
t.Fatalf("expected status %d, got %d", http.StatusUnauthorized, status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_allowHTTP_NilConfig_Ugly(t *testing.T) {
|
||||||
|
p := &Proxy{}
|
||||||
|
|
||||||
|
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodGet})
|
||||||
|
if ok {
|
||||||
|
t.Fatal("expected nil config request to be rejected")
|
||||||
|
}
|
||||||
|
if status != http.StatusServiceUnavailable {
|
||||||
|
t.Fatalf("expected status %d, got %d", http.StatusServiceUnavailable, status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_startHTTP_Good(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
HTTP: HTTPConfig{
|
||||||
|
Enabled: true,
|
||||||
|
Host: "127.0.0.1",
|
||||||
|
Port: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok := p.startMonitoringServer(); !ok {
|
||||||
|
t.Fatal("expected HTTP server to start on a free port")
|
||||||
|
}
|
||||||
|
p.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_startHTTP_NilConfig_Bad(t *testing.T) {
|
||||||
|
p := &Proxy{}
|
||||||
|
|
||||||
|
if ok := p.startMonitoringServer(); ok {
|
||||||
|
t.Fatal("expected nil config to skip HTTP server start")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_startHTTP_Bad(t *testing.T) {
|
||||||
|
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("listen on ephemeral port: %v", err)
|
||||||
|
}
|
||||||
|
defer listener.Close()
|
||||||
|
|
||||||
|
host, port, err := net.SplitHostPort(listener.Addr().String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("split listener addr: %v", err)
|
||||||
|
}
|
||||||
|
portNum, err := strconv.Atoi(port)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("parse listener port: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
HTTP: HTTPConfig{
|
||||||
|
Enabled: true,
|
||||||
|
Host: host,
|
||||||
|
Port: uint16(portNum),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok := p.startMonitoringServer(); ok {
|
||||||
|
t.Fatal("expected HTTP server start to fail when the port is already in use")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_registerMonitoringRoute_MethodNotAllowed_Bad(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
HTTP: HTTPConfig{
|
||||||
|
Restricted: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
p.registerMonitoringRoute(mux, "/1/summary", func() any { return map[string]string{"status": "ok"} })
|
||||||
|
|
||||||
|
request := httptest.NewRequest(http.MethodPost, "/1/summary", nil)
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
mux.ServeHTTP(recorder, request)
|
||||||
|
|
||||||
|
if recorder.Code != http.StatusMethodNotAllowed {
|
||||||
|
t.Fatalf("expected %d, got %d", http.StatusMethodNotAllowed, recorder.Code)
|
||||||
|
}
|
||||||
|
if got := recorder.Header().Get("Allow"); got != http.MethodGet {
|
||||||
|
t.Fatalf("expected Allow header %q, got %q", http.MethodGet, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_registerMonitoringRoute_Unauthorized_Ugly(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
HTTP: HTTPConfig{
|
||||||
|
AccessToken: "secret",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
p.registerMonitoringRoute(mux, "/1/summary", func() any { return map[string]string{"status": "ok"} })
|
||||||
|
|
||||||
|
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
mux.ServeHTTP(recorder, request)
|
||||||
|
|
||||||
|
if recorder.Code != http.StatusUnauthorized {
|
||||||
|
t.Fatalf("expected %d, got %d", http.StatusUnauthorized, recorder.Code)
|
||||||
|
}
|
||||||
|
if got := recorder.Header().Get("WWW-Authenticate"); got != "Bearer" {
|
||||||
|
t.Fatalf("expected WWW-Authenticate header %q, got %q", "Bearer", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
6
job.go
6
job.go
|
|
@ -1,13 +1,15 @@
|
||||||
package proxy
|
package proxy
|
||||||
|
|
||||||
// Job holds the current work unit received from a pool. Immutable once assigned.
|
// Job holds one pool work unit and its metadata.
|
||||||
//
|
//
|
||||||
// j := proxy.Job{
|
// j := proxy.Job{
|
||||||
// Blob: "0707d5ef...b01",
|
// Blob: strings.Repeat("0", 160),
|
||||||
// JobID: "4BiGm3/RgGQzgkTI",
|
// JobID: "4BiGm3/RgGQzgkTI",
|
||||||
// Target: "b88d0600",
|
// Target: "b88d0600",
|
||||||
// Algo: "cn/r",
|
// Algo: "cn/r",
|
||||||
// }
|
// }
|
||||||
|
// _ = j.BlobWithFixedByte(0x2A)
|
||||||
|
// _ = j.DifficultyFromTarget()
|
||||||
type Job struct {
|
type Job struct {
|
||||||
Blob string // hex-encoded block template (160 hex chars = 80 bytes)
|
Blob string // hex-encoded block template (160 hex chars = 80 bytes)
|
||||||
JobID string // pool-assigned identifier
|
JobID string // pool-assigned identifier
|
||||||
|
|
|
||||||
116
job_test.go
Normal file
116
job_test.go
Normal file
|
|
@ -0,0 +1,116 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestJob_BlobWithFixedByte_Good verifies nonce patching on a full 160-char blob.
|
||||||
|
//
|
||||||
|
// job := proxy.Job{Blob: strings.Repeat("0", 160)}
|
||||||
|
// result := job.BlobWithFixedByte(0x2A) // chars 78-79 become "2a"
|
||||||
|
func TestJob_BlobWithFixedByte_Good(t *testing.T) {
|
||||||
|
job := Job{Blob: strings.Repeat("0", 160)}
|
||||||
|
got := job.BlobWithFixedByte(0x2A)
|
||||||
|
if len(got) != 160 {
|
||||||
|
t.Fatalf("expected length 160, got %d", len(got))
|
||||||
|
}
|
||||||
|
if got[78:80] != "2a" {
|
||||||
|
t.Fatalf("expected fixed byte patch, got %q", got[78:80])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestJob_BlobWithFixedByte_Bad verifies a short blob is returned unchanged.
|
||||||
|
//
|
||||||
|
// job := proxy.Job{Blob: "0000"}
|
||||||
|
// result := job.BlobWithFixedByte(0x2A) // too short, returned as-is
|
||||||
|
func TestJob_BlobWithFixedByte_Bad(t *testing.T) {
|
||||||
|
shortBlob := "0000"
|
||||||
|
job := Job{Blob: shortBlob}
|
||||||
|
got := job.BlobWithFixedByte(0x2A)
|
||||||
|
if got != shortBlob {
|
||||||
|
t.Fatalf("expected short blob to be returned unchanged, got %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestJob_BlobWithFixedByte_Ugly verifies fixedByte 0xFF renders as lowercase "ff".
|
||||||
|
//
|
||||||
|
// job := proxy.Job{Blob: strings.Repeat("0", 160)}
|
||||||
|
// result := job.BlobWithFixedByte(0xFF) // chars 78-79 become "ff" (not "FF")
|
||||||
|
func TestJob_BlobWithFixedByte_Ugly(t *testing.T) {
|
||||||
|
job := Job{Blob: strings.Repeat("0", 160)}
|
||||||
|
got := job.BlobWithFixedByte(0xFF)
|
||||||
|
if got[78:80] != "ff" {
|
||||||
|
t.Fatalf("expected lowercase 'ff', got %q", got[78:80])
|
||||||
|
}
|
||||||
|
if len(got) != 160 {
|
||||||
|
t.Fatalf("expected blob length preserved, got %d", len(got))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestJob_DifficultyFromTarget_Good verifies a known target converts to the expected difficulty.
|
||||||
|
//
|
||||||
|
// job := proxy.Job{Target: "b88d0600"}
|
||||||
|
// diff := job.DifficultyFromTarget() // 10000
|
||||||
|
func TestJob_DifficultyFromTarget_Good(t *testing.T) {
|
||||||
|
job := Job{Target: "b88d0600"}
|
||||||
|
if got := job.DifficultyFromTarget(); got != 10000 {
|
||||||
|
t.Fatalf("expected difficulty 10000, got %d", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestJob_DifficultyFromTarget_Bad verifies a zero target produces difficulty 0 without panic.
|
||||||
|
//
|
||||||
|
// job := proxy.Job{Target: "00000000"}
|
||||||
|
// diff := job.DifficultyFromTarget() // 0 (no divide-by-zero)
|
||||||
|
func TestJob_DifficultyFromTarget_Bad(t *testing.T) {
|
||||||
|
job := Job{Target: "00000000"}
|
||||||
|
if got := job.DifficultyFromTarget(); got != 0 {
|
||||||
|
t.Fatalf("expected difficulty 0 for zero target, got %d", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestJob_DifficultyFromTarget_Ugly verifies the maximum target "ffffffff" yields difficulty 1.
|
||||||
|
//
|
||||||
|
// job := proxy.Job{Target: "ffffffff"}
|
||||||
|
// diff := job.DifficultyFromTarget() // 1
|
||||||
|
func TestJob_DifficultyFromTarget_Ugly(t *testing.T) {
|
||||||
|
job := Job{Target: "ffffffff"}
|
||||||
|
if got := job.DifficultyFromTarget(); got != 1 {
|
||||||
|
t.Fatalf("expected minimum difficulty 1, got %d", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestJob_IsValid_Good verifies a job with blob and job ID is valid.
|
||||||
|
//
|
||||||
|
// job := proxy.Job{Blob: "abc", JobID: "job-1"}
|
||||||
|
// job.IsValid() // true
|
||||||
|
func TestJob_IsValid_Good(t *testing.T) {
|
||||||
|
job := Job{Blob: "abc", JobID: "job-1"}
|
||||||
|
if !job.IsValid() {
|
||||||
|
t.Fatalf("expected job with blob and job id to be valid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestJob_IsValid_Bad verifies a job with empty blob or job ID is invalid.
|
||||||
|
//
|
||||||
|
// job := proxy.Job{Blob: "", JobID: "job-1"}
|
||||||
|
// job.IsValid() // false
|
||||||
|
func TestJob_IsValid_Bad(t *testing.T) {
|
||||||
|
if (Job{Blob: "", JobID: "job-1"}).IsValid() {
|
||||||
|
t.Fatalf("expected empty blob to be invalid")
|
||||||
|
}
|
||||||
|
if (Job{Blob: "abc", JobID: ""}).IsValid() {
|
||||||
|
t.Fatalf("expected empty job id to be invalid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestJob_IsValid_Ugly verifies a zero-value job is invalid.
|
||||||
|
//
|
||||||
|
// job := proxy.Job{}
|
||||||
|
// job.IsValid() // false
|
||||||
|
func TestJob_IsValid_Ugly(t *testing.T) {
|
||||||
|
if (Job{}).IsValid() {
|
||||||
|
t.Fatalf("expected zero-value job to be invalid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -5,19 +5,21 @@
|
||||||
// bus.Subscribe(proxy.EventClose, al.OnClose)
|
// bus.Subscribe(proxy.EventClose, al.OnClose)
|
||||||
package log
|
package log
|
||||||
|
|
||||||
import "sync"
|
import (
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
// AccessLog writes connection lifecycle lines to an append-only text file.
|
// AccessLog writes connection lifecycle lines to an append-only text file.
|
||||||
//
|
//
|
||||||
// Line format (connect): 2026-04-04T12:00:00Z CONNECT <ip> <user> <agent>
|
// Line format (connect): 2026-04-04T12:00:00Z CONNECT <ip> <user> <agent>
|
||||||
// Line format (close): 2026-04-04T12:00:00Z CLOSE <ip> <user> rx=<bytes> tx=<bytes>
|
// Line format (close): 2026-04-04T12:00:00Z CLOSE <ip> <user> rx=<bytes> tx=<bytes>
|
||||||
//
|
//
|
||||||
// al, result := log.NewAccessLog("/var/log/proxy-access.log")
|
// al := log.NewAccessLog("/var/log/proxy-access.log")
|
||||||
// bus.Subscribe(proxy.EventLogin, al.OnLogin)
|
// bus.Subscribe(proxy.EventLogin, al.OnLogin)
|
||||||
// bus.Subscribe(proxy.EventClose, al.OnClose)
|
// bus.Subscribe(proxy.EventClose, al.OnClose)
|
||||||
type AccessLog struct {
|
type AccessLog struct {
|
||||||
path string
|
path string
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
// f is opened append-only on first write; nil until first event.
|
file *os.File
|
||||||
// Uses core.File for I/O abstraction.
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
204
log/impl.go
Normal file
204
log/impl.go
Normal file
|
|
@ -0,0 +1,204 @@
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewAccessLog creates an append-only access log.
|
||||||
|
//
|
||||||
|
// al := log.NewAccessLog("/var/log/proxy-access.log")
|
||||||
|
// defer al.Close()
|
||||||
|
func NewAccessLog(path string) *AccessLog {
|
||||||
|
return &AccessLog{path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases the underlying file handle if the log has been opened.
|
||||||
|
//
|
||||||
|
// al := log.NewAccessLog("/var/log/proxy-access.log")
|
||||||
|
// defer al.Close()
|
||||||
|
func (l *AccessLog) Close() {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.file != nil {
|
||||||
|
_ = l.file.Close()
|
||||||
|
l.file = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnLogin writes a connect line such as:
|
||||||
|
//
|
||||||
|
// al.OnLogin(proxy.Event{Miner: &proxy.Miner{}})
|
||||||
|
// // 2026-04-04T12:00:00Z CONNECT 10.0.0.1 WALLET XMRig/6.21.0
|
||||||
|
func (l *AccessLog) OnLogin(e proxy.Event) {
|
||||||
|
if l == nil || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.writeConnectLine(e.Miner.IP(), e.Miner.User(), e.Miner.Agent())
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnClose writes a close line such as:
|
||||||
|
//
|
||||||
|
// al.OnClose(proxy.Event{Miner: &proxy.Miner{}})
|
||||||
|
// // 2026-04-04T12:00:00Z CLOSE 10.0.0.1 WALLET rx=512 tx=4096
|
||||||
|
func (l *AccessLog) OnClose(e proxy.Event) {
|
||||||
|
if l == nil || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.writeCloseLine(e.Miner.IP(), e.Miner.User(), e.Miner.RX(), e.Miner.TX())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewShareLog creates an append-only share log.
|
||||||
|
//
|
||||||
|
// sl := log.NewShareLog("/var/log/proxy-shares.log")
|
||||||
|
// defer sl.Close()
|
||||||
|
func NewShareLog(path string) *ShareLog {
|
||||||
|
return &ShareLog{path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases the underlying file handle if the log has been opened.
|
||||||
|
//
|
||||||
|
// sl := log.NewShareLog("/var/log/proxy-shares.log")
|
||||||
|
// defer sl.Close()
|
||||||
|
func (l *ShareLog) Close() {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.file != nil {
|
||||||
|
_ = l.file.Close()
|
||||||
|
l.file = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnAccept writes an accept line such as:
|
||||||
|
//
|
||||||
|
// sl.OnAccept(proxy.Event{Miner: &proxy.Miner{}, Diff: 100000, Latency: 82})
|
||||||
|
// // 2026-04-04T12:00:00Z ACCEPT WALLET diff=100000 latency=82ms
|
||||||
|
func (l *ShareLog) OnAccept(e proxy.Event) {
|
||||||
|
if l == nil || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.writeAcceptLine(e.Miner.User(), e.Diff, uint64(e.Latency))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnReject writes a reject line such as:
|
||||||
|
//
|
||||||
|
// sl.OnReject(proxy.Event{Miner: &proxy.Miner{}, Error: "Invalid nonce"})
|
||||||
|
// // 2026-04-04T12:00:00Z REJECT WALLET reason="Invalid nonce"
|
||||||
|
func (l *ShareLog) OnReject(e proxy.Event) {
|
||||||
|
if l == nil || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.writeRejectLine(e.Miner.User(), e.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (accessLog *AccessLog) writeConnectLine(ip, user, agent string) {
|
||||||
|
accessLog.mu.Lock()
|
||||||
|
defer accessLog.mu.Unlock()
|
||||||
|
if err := accessLog.ensureFile(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||||
|
builder.WriteByte(' ')
|
||||||
|
builder.WriteString("CONNECT")
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(ip)
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(user)
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(agent)
|
||||||
|
builder.WriteByte('\n')
|
||||||
|
_, _ = accessLog.file.WriteString(builder.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (accessLog *AccessLog) writeCloseLine(ip, user string, rx, tx uint64) {
|
||||||
|
accessLog.mu.Lock()
|
||||||
|
defer accessLog.mu.Unlock()
|
||||||
|
if err := accessLog.ensureFile(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||||
|
builder.WriteByte(' ')
|
||||||
|
builder.WriteString("CLOSE")
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(ip)
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(user)
|
||||||
|
builder.WriteString(" rx=")
|
||||||
|
builder.WriteString(strconv.FormatUint(rx, 10))
|
||||||
|
builder.WriteString(" tx=")
|
||||||
|
builder.WriteString(strconv.FormatUint(tx, 10))
|
||||||
|
builder.WriteByte('\n')
|
||||||
|
_, _ = accessLog.file.WriteString(builder.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (shareLog *ShareLog) writeAcceptLine(user string, diff uint64, latency uint64) {
|
||||||
|
shareLog.mu.Lock()
|
||||||
|
defer shareLog.mu.Unlock()
|
||||||
|
if err := shareLog.ensureFile(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||||
|
builder.WriteString(" ACCEPT")
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(user)
|
||||||
|
builder.WriteString(" diff=")
|
||||||
|
builder.WriteString(strconv.FormatUint(diff, 10))
|
||||||
|
builder.WriteString(" latency=")
|
||||||
|
builder.WriteString(strconv.FormatUint(latency, 10))
|
||||||
|
builder.WriteString("ms")
|
||||||
|
builder.WriteByte('\n')
|
||||||
|
_, _ = shareLog.file.WriteString(builder.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (shareLog *ShareLog) writeRejectLine(user, reason string) {
|
||||||
|
shareLog.mu.Lock()
|
||||||
|
defer shareLog.mu.Unlock()
|
||||||
|
if err := shareLog.ensureFile(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||||
|
builder.WriteString(" REJECT ")
|
||||||
|
builder.WriteString(user)
|
||||||
|
builder.WriteString(" reason=\"")
|
||||||
|
builder.WriteString(reason)
|
||||||
|
builder.WriteString("\"\n")
|
||||||
|
_, _ = shareLog.file.WriteString(builder.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (accessLog *AccessLog) ensureFile() error {
|
||||||
|
if accessLog.file != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(accessLog.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessLog.file = f
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (shareLog *ShareLog) ensureFile() error {
|
||||||
|
if shareLog.file != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(shareLog.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
shareLog.file = f
|
||||||
|
return nil
|
||||||
|
}
|
||||||
341
log/impl_test.go
Normal file
341
log/impl_test.go
Normal file
|
|
@ -0,0 +1,341 @@
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestAccessLog_OnLogin_Good verifies a CONNECT line is written with the expected columns.
|
||||||
|
//
|
||||||
|
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||||
|
// al.OnLogin(proxy.Event{Miner: miner}) // writes "CONNECT 10.0.0.1 WALLET XMRig/6.21.0"
|
||||||
|
func TestAccessLog_OnLogin_Good(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "access.log")
|
||||||
|
al := NewAccessLog(path)
|
||||||
|
defer al.Close()
|
||||||
|
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
al.OnLogin(proxy.Event{Miner: miner})
|
||||||
|
al.Close()
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected log file to exist: %v", err)
|
||||||
|
}
|
||||||
|
line := strings.TrimSpace(string(data))
|
||||||
|
if !strings.Contains(line, "CONNECT") {
|
||||||
|
t.Fatalf("expected CONNECT in log line, got %q", line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAccessLog_OnLogin_Bad verifies a nil miner event does not panic or write anything.
|
||||||
|
//
|
||||||
|
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||||
|
// al.OnLogin(proxy.Event{Miner: nil}) // no-op
|
||||||
|
func TestAccessLog_OnLogin_Bad(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "access.log")
|
||||||
|
al := NewAccessLog(path)
|
||||||
|
defer al.Close()
|
||||||
|
|
||||||
|
al.OnLogin(proxy.Event{Miner: nil})
|
||||||
|
al.Close()
|
||||||
|
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
data, _ := os.ReadFile(path)
|
||||||
|
if len(data) > 0 {
|
||||||
|
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAccessLog_OnLogin_Ugly verifies a nil AccessLog does not panic.
|
||||||
|
//
|
||||||
|
// var al *log.AccessLog
|
||||||
|
// al.OnLogin(proxy.Event{Miner: miner}) // no-op, no panic
|
||||||
|
func TestAccessLog_OnLogin_Ugly(t *testing.T) {
|
||||||
|
var al *AccessLog
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
al.OnLogin(proxy.Event{Miner: miner})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAccessLog_OnClose_Good verifies a CLOSE line includes rx and tx byte counts.
|
||||||
|
//
|
||||||
|
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||||
|
// al.OnClose(proxy.Event{Miner: miner}) // writes "CLOSE <ip> <user> rx=0 tx=0"
|
||||||
|
func TestAccessLog_OnClose_Good(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "access.log")
|
||||||
|
al := NewAccessLog(path)
|
||||||
|
defer al.Close()
|
||||||
|
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
al.OnClose(proxy.Event{Miner: miner})
|
||||||
|
al.Close()
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected log file to exist: %v", err)
|
||||||
|
}
|
||||||
|
line := strings.TrimSpace(string(data))
|
||||||
|
if !strings.Contains(line, "CLOSE") {
|
||||||
|
t.Fatalf("expected CLOSE in log line, got %q", line)
|
||||||
|
}
|
||||||
|
if !strings.Contains(line, "rx=") {
|
||||||
|
t.Fatalf("expected rx= in log line, got %q", line)
|
||||||
|
}
|
||||||
|
if !strings.Contains(line, "tx=") {
|
||||||
|
t.Fatalf("expected tx= in log line, got %q", line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAccessLog_OnClose_Bad verifies a nil miner close event produces no output.
|
||||||
|
//
|
||||||
|
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||||
|
// al.OnClose(proxy.Event{Miner: nil}) // no-op
|
||||||
|
func TestAccessLog_OnClose_Bad(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "access.log")
|
||||||
|
al := NewAccessLog(path)
|
||||||
|
defer al.Close()
|
||||||
|
|
||||||
|
al.OnClose(proxy.Event{Miner: nil})
|
||||||
|
al.Close()
|
||||||
|
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
data, _ := os.ReadFile(path)
|
||||||
|
if len(data) > 0 {
|
||||||
|
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAccessLog_OnClose_Ugly verifies close on an empty-path log is a no-op.
|
||||||
|
//
|
||||||
|
// al := log.NewAccessLog("")
|
||||||
|
// al.OnClose(proxy.Event{Miner: miner}) // no-op, empty path
|
||||||
|
func TestAccessLog_OnClose_Ugly(t *testing.T) {
|
||||||
|
al := NewAccessLog("")
|
||||||
|
defer al.Close()
|
||||||
|
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
al.OnClose(proxy.Event{Miner: miner})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestShareLog_OnAccept_Good verifies an ACCEPT line is written with diff and latency.
|
||||||
|
//
|
||||||
|
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||||
|
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000, Latency: 82})
|
||||||
|
func TestShareLog_OnAccept_Good(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "shares.log")
|
||||||
|
sl := NewShareLog(path)
|
||||||
|
defer sl.Close()
|
||||||
|
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000, Latency: 82})
|
||||||
|
sl.Close()
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected log file to exist: %v", err)
|
||||||
|
}
|
||||||
|
line := strings.TrimSpace(string(data))
|
||||||
|
if !strings.Contains(line, "ACCEPT") {
|
||||||
|
t.Fatalf("expected ACCEPT in log line, got %q", line)
|
||||||
|
}
|
||||||
|
if !strings.Contains(line, "diff=100000") {
|
||||||
|
t.Fatalf("expected diff=100000 in log line, got %q", line)
|
||||||
|
}
|
||||||
|
if !strings.Contains(line, "latency=82ms") {
|
||||||
|
t.Fatalf("expected latency=82ms in log line, got %q", line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestShareLog_OnAccept_Bad verifies a nil miner accept event produces no output.
|
||||||
|
//
|
||||||
|
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||||
|
// sl.OnAccept(proxy.Event{Miner: nil}) // no-op
|
||||||
|
func TestShareLog_OnAccept_Bad(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "shares.log")
|
||||||
|
sl := NewShareLog(path)
|
||||||
|
defer sl.Close()
|
||||||
|
|
||||||
|
sl.OnAccept(proxy.Event{Miner: nil, Diff: 100000})
|
||||||
|
sl.Close()
|
||||||
|
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
data, _ := os.ReadFile(path)
|
||||||
|
if len(data) > 0 {
|
||||||
|
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestShareLog_OnAccept_Ugly verifies a nil ShareLog does not panic.
|
||||||
|
//
|
||||||
|
// var sl *log.ShareLog
|
||||||
|
// sl.OnAccept(proxy.Event{Miner: miner}) // no-op, no panic
|
||||||
|
func TestShareLog_OnAccept_Ugly(t *testing.T) {
|
||||||
|
var sl *ShareLog
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestShareLog_OnReject_Good verifies a REJECT line is written with the rejection reason.
|
||||||
|
//
|
||||||
|
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||||
|
// sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
|
||||||
|
func TestShareLog_OnReject_Good(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "shares.log")
|
||||||
|
sl := NewShareLog(path)
|
||||||
|
defer sl.Close()
|
||||||
|
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
|
||||||
|
sl.Close()
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected log file to exist: %v", err)
|
||||||
|
}
|
||||||
|
line := strings.TrimSpace(string(data))
|
||||||
|
if !strings.Contains(line, "REJECT") {
|
||||||
|
t.Fatalf("expected REJECT in log line, got %q", line)
|
||||||
|
}
|
||||||
|
if !strings.Contains(line, "Low difficulty share") {
|
||||||
|
t.Fatalf("expected rejection reason in log line, got %q", line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestShareLog_OnReject_Bad verifies a nil miner reject event produces no output.
|
||||||
|
//
|
||||||
|
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||||
|
// sl.OnReject(proxy.Event{Miner: nil}) // no-op
|
||||||
|
func TestShareLog_OnReject_Bad(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "shares.log")
|
||||||
|
sl := NewShareLog(path)
|
||||||
|
defer sl.Close()
|
||||||
|
|
||||||
|
sl.OnReject(proxy.Event{Miner: nil, Error: "Low difficulty share"})
|
||||||
|
sl.Close()
|
||||||
|
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
data, _ := os.ReadFile(path)
|
||||||
|
if len(data) > 0 {
|
||||||
|
t.Fatalf("expected no output for nil miner, got %q", string(data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestShareLog_OnReject_Ugly verifies an empty-path ShareLog silently discards the reject line.
|
||||||
|
//
|
||||||
|
// sl := log.NewShareLog("")
|
||||||
|
// sl.OnReject(proxy.Event{Miner: miner, Error: "reason"}) // no-op, empty path
|
||||||
|
func TestShareLog_OnReject_Ugly(t *testing.T) {
|
||||||
|
sl := NewShareLog("")
|
||||||
|
defer sl.Close()
|
||||||
|
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
sl.OnReject(proxy.Event{Miner: miner, Error: "reason"})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAccessLog_Close_Good verifies Close releases the file handle and is safe to call twice.
|
||||||
|
//
|
||||||
|
// al := log.NewAccessLog("/tmp/test-access.log")
|
||||||
|
// al.OnLogin(proxy.Event{Miner: miner})
|
||||||
|
// al.Close()
|
||||||
|
// al.Close() // double close is safe
|
||||||
|
func TestAccessLog_Close_Good(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "access.log")
|
||||||
|
al := NewAccessLog(path)
|
||||||
|
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
al.OnLogin(proxy.Event{Miner: miner})
|
||||||
|
al.Close()
|
||||||
|
al.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAccessLog_Close_Bad verifies Close on a nil AccessLog does not panic.
|
||||||
|
//
|
||||||
|
// var al *log.AccessLog
|
||||||
|
// al.Close() // no-op, no panic
|
||||||
|
func TestAccessLog_Close_Bad(t *testing.T) {
|
||||||
|
var al *AccessLog
|
||||||
|
al.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAccessLog_Close_Ugly verifies Close on a never-opened log does not panic.
|
||||||
|
//
|
||||||
|
// al := log.NewAccessLog("/nonexistent/dir/access.log")
|
||||||
|
// al.Close() // no file was ever opened
|
||||||
|
func TestAccessLog_Close_Ugly(t *testing.T) {
|
||||||
|
al := NewAccessLog("/nonexistent/dir/access.log")
|
||||||
|
al.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestShareLog_Close_Good verifies Close releases the file handle and is safe to call twice.
|
||||||
|
//
|
||||||
|
// sl := log.NewShareLog("/tmp/test-shares.log")
|
||||||
|
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 1000})
|
||||||
|
// sl.Close()
|
||||||
|
// sl.Close() // double close is safe
|
||||||
|
func TestShareLog_Close_Good(t *testing.T) {
|
||||||
|
path := filepath.Join(t.TempDir(), "shares.log")
|
||||||
|
sl := NewShareLog(path)
|
||||||
|
|
||||||
|
miner := newTestMiner(t)
|
||||||
|
sl.OnAccept(proxy.Event{Miner: miner, Diff: 1000})
|
||||||
|
sl.Close()
|
||||||
|
sl.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestShareLog_Close_Bad verifies Close on a nil ShareLog does not panic.
|
||||||
|
//
|
||||||
|
// var sl *log.ShareLog
|
||||||
|
// sl.Close() // no-op, no panic
|
||||||
|
func TestShareLog_Close_Bad(t *testing.T) {
|
||||||
|
var sl *ShareLog
|
||||||
|
sl.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestShareLog_Close_Ugly verifies Close on a never-opened log does not panic.
|
||||||
|
//
|
||||||
|
// sl := log.NewShareLog("/nonexistent/dir/shares.log")
|
||||||
|
// sl.Close() // no file was ever opened
|
||||||
|
func TestShareLog_Close_Ugly(t *testing.T) {
|
||||||
|
sl := NewShareLog("/nonexistent/dir/shares.log")
|
||||||
|
sl.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTestMiner creates a minimal miner for log testing using a net.Pipe connection.
|
||||||
|
func newTestMiner(t *testing.T) *proxy.Miner {
|
||||||
|
t.Helper()
|
||||||
|
client, server := net.Pipe()
|
||||||
|
t.Cleanup(func() {
|
||||||
|
_ = client.Close()
|
||||||
|
_ = server.Close()
|
||||||
|
})
|
||||||
|
miner := proxy.NewMiner(client, 3333, nil)
|
||||||
|
miner.SetID(1)
|
||||||
|
return miner
|
||||||
|
}
|
||||||
|
|
||||||
|
// pipeAddr satisfies the net.Addr interface for pipe-based test connections.
|
||||||
|
type pipeAddr struct{}
|
||||||
|
|
||||||
|
func (a pipeAddr) Network() string { return "pipe" }
|
||||||
|
func (a pipeAddr) String() string { return "pipe" }
|
||||||
|
|
||||||
|
// pipeConn wraps an os.Pipe as a net.Conn for tests that need a closeable socket.
|
||||||
|
type pipeConn struct {
|
||||||
|
*os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *pipeConn) RemoteAddr() net.Addr { return pipeAddr{} }
|
||||||
|
func (p *pipeConn) LocalAddr() net.Addr { return pipeAddr{} }
|
||||||
|
func (p *pipeConn) SetDeadline(_ time.Time) error { return nil }
|
||||||
|
func (p *pipeConn) SetReadDeadline(_ time.Time) error { return nil }
|
||||||
|
func (p *pipeConn) SetWriteDeadline(_ time.Time) error { return nil }
|
||||||
|
|
@ -1,6 +1,9 @@
|
||||||
package log
|
package log
|
||||||
|
|
||||||
import "sync"
|
import (
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
// ShareLog writes share result lines to an append-only text file.
|
// ShareLog writes share result lines to an append-only text file.
|
||||||
//
|
//
|
||||||
|
|
@ -13,6 +16,5 @@ import "sync"
|
||||||
type ShareLog struct {
|
type ShareLog struct {
|
||||||
path string
|
path string
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
// f is opened append-only on first write; nil until first event.
|
file *os.File
|
||||||
// Uses core.File for I/O abstraction.
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
61
miner.go
61
miner.go
|
|
@ -25,28 +25,41 @@ const (
|
||||||
// m := proxy.NewMiner(conn, 3333, nil)
|
// m := proxy.NewMiner(conn, 3333, nil)
|
||||||
// m.Start()
|
// m.Start()
|
||||||
type Miner struct {
|
type Miner struct {
|
||||||
id int64 // monotonically increasing per-process; atomic assignment
|
id int64 // monotonically increasing per-process; atomic assignment
|
||||||
rpcID string // UUID v4 sent to miner as session id
|
rpcID string // UUID v4 sent to miner as session id
|
||||||
state MinerState
|
state MinerState
|
||||||
extAlgo bool // miner sent algo list in login params
|
extAlgo bool // miner sent algo list in login params
|
||||||
extNH bool // NiceHash mode active (fixed byte splitting)
|
loginAlgos []string
|
||||||
ip string // remote IP (without port, for logging)
|
extNH bool // NiceHash mode active (fixed byte splitting)
|
||||||
localPort uint16
|
algoEnabled bool // proxy is configured to negotiate the algo extension
|
||||||
user string // login params.login (wallet address), custom diff suffix stripped
|
ip string // remote IP (without port, for logging)
|
||||||
password string // login params.pass
|
remoteAddr string
|
||||||
agent string // login params.agent
|
localPort uint16
|
||||||
rigID string // login params.rigid (optional extension)
|
user string // login params.login (wallet address), custom diff suffix stripped
|
||||||
fixedByte uint8 // NiceHash slot index (0-255)
|
password string // login params.pass
|
||||||
mapperID int64 // which NonceMapper owns this miner; -1 = unassigned
|
agent string // login params.agent
|
||||||
routeID int64 // SimpleMapper ID in simple mode; -1 = unassigned
|
rigID string // login params.rigid (optional extension)
|
||||||
customDiff uint64 // 0 = use pool diff; non-zero = cap diff to this value
|
fixedByte uint8 // NiceHash slot index (0-255)
|
||||||
diff uint64 // last difficulty sent to this miner from the pool
|
mapperID int64 // which NonceMapper owns this miner; -1 = unassigned
|
||||||
rx uint64 // bytes received from miner
|
routeID int64 // SimpleMapper ID in simple mode; -1 = unassigned
|
||||||
tx uint64 // bytes sent to miner
|
customDiff uint64 // 0 = use pool diff; non-zero = cap diff to this value
|
||||||
connectedAt time.Time
|
customDiffResolved bool
|
||||||
lastActivityAt time.Time
|
customDiffFromLogin bool
|
||||||
conn net.Conn
|
accessPassword string
|
||||||
tlsConn *tls.Conn // nil if plain TCP
|
globalDiff uint64
|
||||||
sendMu sync.Mutex // serialises writes to conn
|
diff uint64 // last difficulty sent to this miner from the pool
|
||||||
buf [16384]byte // per-miner send buffer; avoids per-write allocations
|
rx uint64 // bytes received from miner
|
||||||
|
tx uint64 // bytes sent from miner
|
||||||
|
currentJob Job
|
||||||
|
connectedAt time.Time
|
||||||
|
lastActivityAt time.Time
|
||||||
|
conn net.Conn
|
||||||
|
tlsConn *tls.Conn // nil if plain TCP
|
||||||
|
sendMu sync.Mutex // serialises writes to conn
|
||||||
|
buf [16384]byte // per-miner send buffer; avoids per-write allocations
|
||||||
|
onLogin func(*Miner)
|
||||||
|
onLoginReady func(*Miner)
|
||||||
|
onSubmit func(*Miner, *SubmitEvent)
|
||||||
|
onClose func(*Miner)
|
||||||
|
closeOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
|
||||||
467
miner_login_test.go
Normal file
467
miner_login_test.go
Normal file
|
|
@ -0,0 +1,467 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMiner_HandleLogin_Good(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(minerConn, 3333, nil)
|
||||||
|
miner.algoEnabled = true
|
||||||
|
miner.extNH = true
|
||||||
|
miner.fixedByte = 0x2a
|
||||||
|
miner.onLogin = func(m *Miner) {
|
||||||
|
m.SetMapperID(1)
|
||||||
|
}
|
||||||
|
miner.currentJob = Job{
|
||||||
|
Blob: strings.Repeat("0", 160),
|
||||||
|
JobID: "job-1",
|
||||||
|
Target: "b88d0600",
|
||||||
|
Algo: "cn/r",
|
||||||
|
Height: 7,
|
||||||
|
SeedHash: "seed",
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := json.Marshal(loginParams{
|
||||||
|
Login: "wallet",
|
||||||
|
Pass: "x",
|
||||||
|
Agent: "xmrig",
|
||||||
|
Algo: []string{"cn/r"},
|
||||||
|
RigID: "rig-1",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal login params: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
miner.handleLogin(stratumRequest{ID: 1, Method: "login", Params: params})
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read login response: %v", err)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
|
||||||
|
var payload struct {
|
||||||
|
Error json.RawMessage `json:"error"`
|
||||||
|
Result struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Extensions []string `json:"extensions"`
|
||||||
|
Job map[string]any `json:"job"`
|
||||||
|
} `json:"result"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal login response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(payload.Error) != "null" {
|
||||||
|
t.Fatalf("expected login response error to be null, got %s", string(payload.Error))
|
||||||
|
}
|
||||||
|
if payload.Result.Status != "OK" {
|
||||||
|
t.Fatalf("expected login success, got %q", payload.Result.Status)
|
||||||
|
}
|
||||||
|
if payload.Result.ID == "" {
|
||||||
|
t.Fatalf("expected rpc id in login response")
|
||||||
|
}
|
||||||
|
if len(payload.Result.Extensions) != 1 || payload.Result.Extensions[0] != "algo" {
|
||||||
|
t.Fatalf("expected algo extension, got %#v", payload.Result.Extensions)
|
||||||
|
}
|
||||||
|
if got := miner.LoginAlgos(); len(got) != 1 || got[0] != "cn/r" {
|
||||||
|
t.Fatalf("expected login algo list to be stored, got %#v", got)
|
||||||
|
}
|
||||||
|
if got := payload.Result.Job["job_id"]; got != "job-1" {
|
||||||
|
t.Fatalf("expected embedded job, got %#v", got)
|
||||||
|
}
|
||||||
|
if got := payload.Result.Job["algo"]; got != "cn/r" {
|
||||||
|
t.Fatalf("expected embedded algo, got %#v", got)
|
||||||
|
}
|
||||||
|
blob, _ := payload.Result.Job["blob"].(string)
|
||||||
|
if blob[78:80] != "2a" {
|
||||||
|
t.Fatalf("expected fixed-byte patched blob, got %q", blob[78:80])
|
||||||
|
}
|
||||||
|
if miner.State() != MinerStateReady {
|
||||||
|
t.Fatalf("expected miner ready after login reply with job, got %d", miner.State())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_New_Watch_Good(t *testing.T) {
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
Watch: true,
|
||||||
|
configPath: "/tmp/proxy.json",
|
||||||
|
}
|
||||||
|
|
||||||
|
proxyInstance, result := New(cfg)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
if proxyInstance.watcher == nil {
|
||||||
|
t.Fatalf("expected config watcher when watch is enabled and source path is known")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMiner_HandleLogin_Ugly(t *testing.T) {
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
miner := &Miner{}
|
||||||
|
miner.SetID(int64(i + 1))
|
||||||
|
miner.SetMapperID(int64(i + 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
serverConn, clientConn := net.Pipe()
|
||||||
|
defer serverConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(serverConn, 3333, nil)
|
||||||
|
miner.extNH = true
|
||||||
|
miner.onLogin = func(*Miner) {}
|
||||||
|
|
||||||
|
params, err := json.Marshal(loginParams{
|
||||||
|
Login: "wallet",
|
||||||
|
Pass: "x",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal login params: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan []byte, 1)
|
||||||
|
go func() {
|
||||||
|
line, readErr := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if readErr != nil {
|
||||||
|
done <- nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
done <- line
|
||||||
|
}()
|
||||||
|
|
||||||
|
miner.handleLogin(stratumRequest{ID: 2, Method: "login", Params: params})
|
||||||
|
|
||||||
|
line := <-done
|
||||||
|
if line == nil {
|
||||||
|
t.Fatal("expected login rejection response")
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload struct {
|
||||||
|
Error struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
} `json:"error"`
|
||||||
|
Result map[string]any `json:"result"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal login response: %v", err)
|
||||||
|
}
|
||||||
|
if payload.Error.Message != "Proxy is full, try again later" {
|
||||||
|
t.Fatalf("expected full-table error, got %q", payload.Error.Message)
|
||||||
|
}
|
||||||
|
if payload.Result != nil {
|
||||||
|
t.Fatalf("expected no login success payload, got %#v", payload.Result)
|
||||||
|
}
|
||||||
|
if miner.MapperID() != -1 {
|
||||||
|
t.Fatalf("expected rejected miner to remain unassigned, got mapper %d", miner.MapperID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMiner_HandleLogin_FailedAssignmentDoesNotDispatchLoginEvent(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
proxyInstance := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByUser,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
},
|
||||||
|
events: NewEventBus(),
|
||||||
|
stats: NewStats(),
|
||||||
|
workers: NewWorkers(WorkersByUser, nil),
|
||||||
|
miners: make(map[int64]*Miner),
|
||||||
|
}
|
||||||
|
proxyInstance.events.Subscribe(EventLogin, proxyInstance.stats.OnLogin)
|
||||||
|
proxyInstance.workers.bindEvents(proxyInstance.events)
|
||||||
|
|
||||||
|
miner := NewMiner(minerConn, 3333, nil)
|
||||||
|
miner.extNH = true
|
||||||
|
miner.onLogin = func(*Miner) {}
|
||||||
|
miner.onLoginReady = func(m *Miner) {
|
||||||
|
proxyInstance.events.Dispatch(Event{Type: EventLogin, Miner: m})
|
||||||
|
}
|
||||||
|
proxyInstance.miners[miner.ID()] = miner
|
||||||
|
|
||||||
|
params, err := json.Marshal(loginParams{
|
||||||
|
Login: "wallet",
|
||||||
|
Pass: "x",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal login params: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go miner.handleLogin(stratumRequest{ID: 12, Method: "login", Params: params})
|
||||||
|
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read login rejection: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload struct {
|
||||||
|
Error struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
} `json:"error"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal login rejection: %v", err)
|
||||||
|
}
|
||||||
|
if payload.Error.Message != "Proxy is full, try again later" {
|
||||||
|
t.Fatalf("expected full-table rejection, got %q", payload.Error.Message)
|
||||||
|
}
|
||||||
|
if now, max := proxyInstance.MinerCount(); now != 0 || max != 0 {
|
||||||
|
t.Fatalf("expected failed login not to affect miner counts, got now=%d max=%d", now, max)
|
||||||
|
}
|
||||||
|
if records := proxyInstance.WorkerRecords(); len(records) != 0 {
|
||||||
|
t.Fatalf("expected failed login not to create worker records, got %d", len(records))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMiner_HandleLogin_CustomDiffCap_Good(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(minerConn, 3333, nil)
|
||||||
|
miner.onLogin = func(m *Miner) {
|
||||||
|
m.SetRouteID(1)
|
||||||
|
m.customDiff = 5000
|
||||||
|
}
|
||||||
|
miner.currentJob = Job{
|
||||||
|
Blob: strings.Repeat("0", 160),
|
||||||
|
JobID: "job-1",
|
||||||
|
Target: "01000000",
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := json.Marshal(loginParams{
|
||||||
|
Login: "wallet",
|
||||||
|
Pass: "x",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal login params: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go miner.handleLogin(stratumRequest{ID: 3, Method: "login", Params: params})
|
||||||
|
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read login response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload struct {
|
||||||
|
Result struct {
|
||||||
|
Job struct {
|
||||||
|
Target string `json:"target"`
|
||||||
|
} `json:"job"`
|
||||||
|
} `json:"result"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal login response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
originalDiff := miner.currentJob.DifficultyFromTarget()
|
||||||
|
cappedDiff := Job{Target: payload.Result.Job.Target}.DifficultyFromTarget()
|
||||||
|
if cappedDiff == 0 || cappedDiff > 5000 {
|
||||||
|
t.Fatalf("expected capped difficulty at or below 5000, got %d", cappedDiff)
|
||||||
|
}
|
||||||
|
if cappedDiff >= originalDiff {
|
||||||
|
t.Fatalf("expected lowered target difficulty below %d, got %d", originalDiff, cappedDiff)
|
||||||
|
}
|
||||||
|
if miner.diff != cappedDiff {
|
||||||
|
t.Fatalf("expected miner diff %d, got %d", cappedDiff, miner.diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMiner_HandleLogin_CustomDiffSuffix_Good(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(minerConn, 3333, nil)
|
||||||
|
miner.onLogin = func(m *Miner) {
|
||||||
|
m.SetRouteID(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := json.Marshal(loginParams{
|
||||||
|
Login: "wallet+50000",
|
||||||
|
Pass: "x",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal login params: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go miner.handleLogin(stratumRequest{ID: 4, Method: "login", Params: params})
|
||||||
|
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read login response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload struct {
|
||||||
|
Result struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
} `json:"result"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal login response: %v", err)
|
||||||
|
}
|
||||||
|
if payload.Result.Status != "OK" {
|
||||||
|
t.Fatalf("expected login success, got %q", payload.Result.Status)
|
||||||
|
}
|
||||||
|
if got := miner.User(); got != "wallet" {
|
||||||
|
t.Fatalf("expected stripped wallet name, got %q", got)
|
||||||
|
}
|
||||||
|
if got := miner.customDiff; got != 50000 {
|
||||||
|
t.Fatalf("expected custom diff 50000, got %d", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMiner_HandleKeepalived_Good(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(minerConn, 3333, nil)
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
miner.handleKeepalived(stratumRequest{ID: 9, Method: "keepalived"})
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read keepalived response: %v", err)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
|
||||||
|
var payload map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal keepalived response: %v", err)
|
||||||
|
}
|
||||||
|
if _, ok := payload["error"]; !ok {
|
||||||
|
t.Fatalf("expected keepalived response to include error field, got %s", string(line))
|
||||||
|
}
|
||||||
|
if string(payload["error"]) != "null" {
|
||||||
|
t.Fatalf("expected keepalived response error to be null, got %s", string(payload["error"]))
|
||||||
|
}
|
||||||
|
var result struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(payload["result"], &result); err != nil {
|
||||||
|
t.Fatalf("unmarshal keepalived result: %v", err)
|
||||||
|
}
|
||||||
|
if result.Status != "KEEPALIVED" {
|
||||||
|
t.Fatalf("expected KEEPALIVED status, got %q", result.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMiner_ReadLoop_RFCLineLimit_Good(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(minerConn, 3333, nil)
|
||||||
|
miner.onLogin = func(m *Miner) {
|
||||||
|
m.SetRouteID(1)
|
||||||
|
}
|
||||||
|
miner.Start()
|
||||||
|
|
||||||
|
params, err := json.Marshal(loginParams{
|
||||||
|
Login: "wallet",
|
||||||
|
Pass: "x",
|
||||||
|
Agent: strings.Repeat("a", 5000),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal login params: %v", err)
|
||||||
|
}
|
||||||
|
request, err := json.Marshal(stratumRequest{ID: 4, Method: "login", Params: params})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal request: %v", err)
|
||||||
|
}
|
||||||
|
if len(request) >= maxStratumLineLength {
|
||||||
|
t.Fatalf("expected test request below RFC limit, got %d bytes", len(request))
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := clientConn.Write(append(request, '\n')); err != nil {
|
||||||
|
t.Fatalf("write login request: %v", err)
|
||||||
|
}
|
||||||
|
_ = clientConn.SetReadDeadline(time.Now().Add(time.Second))
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read login response: %v", err)
|
||||||
|
}
|
||||||
|
if len(line) == 0 {
|
||||||
|
t.Fatal("expected login response for request under RFC limit")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMiner_ReadLoop_RFCLineLimit_Ugly(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(minerConn, 3333, nil)
|
||||||
|
miner.Start()
|
||||||
|
|
||||||
|
params, err := json.Marshal(loginParams{
|
||||||
|
Login: "wallet",
|
||||||
|
Pass: "x",
|
||||||
|
Agent: strings.Repeat("b", maxStratumLineLength),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal login params: %v", err)
|
||||||
|
}
|
||||||
|
request, err := json.Marshal(stratumRequest{ID: 5, Method: "login", Params: params})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal request: %v", err)
|
||||||
|
}
|
||||||
|
if len(request) <= maxStratumLineLength {
|
||||||
|
t.Fatalf("expected test request above RFC limit, got %d bytes", len(request))
|
||||||
|
}
|
||||||
|
|
||||||
|
writeDone := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
_, writeErr := clientConn.Write(append(request, '\n'))
|
||||||
|
writeDone <- writeErr
|
||||||
|
}()
|
||||||
|
|
||||||
|
var writeErr error
|
||||||
|
select {
|
||||||
|
case writeErr = <-writeDone:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("timed out writing oversized request")
|
||||||
|
}
|
||||||
|
if writeErr == nil {
|
||||||
|
_ = clientConn.SetReadDeadline(time.Now().Add(time.Second))
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err == nil || len(line) > 0 {
|
||||||
|
t.Fatalf("expected oversized request to close the connection, got line=%q err=%v", string(line), err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(writeErr.Error(), "closed pipe") {
|
||||||
|
t.Fatalf("expected oversized request to close the connection, got write error %v", writeErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
43
miner_wire_test.go
Normal file
43
miner_wire_test.go
Normal file
|
|
@ -0,0 +1,43 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMiner_Success_WritesNullError_Good(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(minerConn, 3333, nil)
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
miner.Success(7, "OK")
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read success response: %v", err)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
|
||||||
|
var payload struct {
|
||||||
|
Error json.RawMessage `json:"error"`
|
||||||
|
Result struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
} `json:"result"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal success response: %v", err)
|
||||||
|
}
|
||||||
|
if string(payload.Error) != "null" {
|
||||||
|
t.Fatalf("expected success response error to be null, got %s", string(payload.Error))
|
||||||
|
}
|
||||||
|
if payload.Result.Status != "OK" {
|
||||||
|
t.Fatalf("expected success status OK, got %q", payload.Result.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
87
miners_document_test.go
Normal file
87
miners_document_test.go
Normal file
|
|
@ -0,0 +1,87 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestProxy_MinersDocument_Good(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
miners: map[int64]*Miner{
|
||||||
|
1: {
|
||||||
|
id: 1,
|
||||||
|
ip: "10.0.0.1:49152",
|
||||||
|
tx: 4096,
|
||||||
|
rx: 512,
|
||||||
|
state: MinerStateReady,
|
||||||
|
diff: 100000,
|
||||||
|
user: "WALLET",
|
||||||
|
password: "secret",
|
||||||
|
rigID: "rig-alpha",
|
||||||
|
agent: "XMRig/6.21.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
document := p.MinersDocument()
|
||||||
|
if len(document.Miners) != 1 {
|
||||||
|
t.Fatalf("expected one miner row, got %d", len(document.Miners))
|
||||||
|
}
|
||||||
|
row := document.Miners[0]
|
||||||
|
if len(row) != 10 {
|
||||||
|
t.Fatalf("expected 10 miner columns, got %d", len(row))
|
||||||
|
}
|
||||||
|
if row[7] != "********" {
|
||||||
|
t.Fatalf("expected masked password, got %#v", row[7])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_MinersDocument_Bad(t *testing.T) {
|
||||||
|
var p *Proxy
|
||||||
|
|
||||||
|
document := p.MinersDocument()
|
||||||
|
if len(document.Miners) != 0 {
|
||||||
|
t.Fatalf("expected no miners for a nil proxy, got %d", len(document.Miners))
|
||||||
|
}
|
||||||
|
if len(document.Format) != 10 {
|
||||||
|
t.Fatalf("expected miner format columns to remain stable, got %d", len(document.Format))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_MinersDocument_Ugly(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
miners: map[int64]*Miner{
|
||||||
|
1: {
|
||||||
|
id: 1,
|
||||||
|
ip: "10.0.0.1:49152",
|
||||||
|
tx: 4096,
|
||||||
|
rx: 512,
|
||||||
|
state: MinerStateReady,
|
||||||
|
diff: 100000,
|
||||||
|
user: "WALLET",
|
||||||
|
password: "secret-a",
|
||||||
|
rigID: "rig-alpha",
|
||||||
|
agent: "XMRig/6.21.0",
|
||||||
|
},
|
||||||
|
2: {
|
||||||
|
id: 2,
|
||||||
|
ip: "10.0.0.2:49152",
|
||||||
|
tx: 2048,
|
||||||
|
rx: 256,
|
||||||
|
state: MinerStateWaitReady,
|
||||||
|
diff: 50000,
|
||||||
|
user: "WALLET2",
|
||||||
|
password: "secret-b",
|
||||||
|
rigID: "rig-beta",
|
||||||
|
agent: "XMRig/6.22.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
document := p.MinersDocument()
|
||||||
|
if len(document.Miners) != 2 {
|
||||||
|
t.Fatalf("expected two miner rows, got %d", len(document.Miners))
|
||||||
|
}
|
||||||
|
for i, row := range document.Miners {
|
||||||
|
if row[7] != "********" {
|
||||||
|
t.Fatalf("expected masked password in row %d, got %#v", i, row[7])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,7 +1,9 @@
|
||||||
// Package pool implements the outbound stratum pool client and failover strategy.
|
// Package pool implements the outbound pool client and failover strategy.
|
||||||
//
|
//
|
||||||
// client := pool.NewStratumClient(poolCfg, listener)
|
// client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.example:3333", User: "WALLET", Pass: "x"}, listener)
|
||||||
// client.Connect()
|
// if result := client.Connect(); result.OK {
|
||||||
|
// client.Login()
|
||||||
|
// }
|
||||||
package pool
|
package pool
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -9,27 +11,31 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"dappco.re/go/core/proxy"
|
"dappco.re/go/proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StratumClient is one outbound stratum TCP (optionally TLS) connection to a pool.
|
// client := pool.NewStratumClient(poolCfg, listener)
|
||||||
// The proxy presents itself to the pool as a standard stratum miner using the
|
|
||||||
// wallet address and password from PoolConfig.
|
|
||||||
//
|
//
|
||||||
// client := pool.NewStratumClient(poolCfg, listener)
|
// if result := client.Connect(); result.OK {
|
||||||
// client.Connect()
|
// client.Login()
|
||||||
|
// }
|
||||||
type StratumClient struct {
|
type StratumClient struct {
|
||||||
cfg proxy.PoolConfig
|
config proxy.PoolConfig
|
||||||
listener StratumListener
|
listener StratumListener
|
||||||
conn net.Conn
|
conn net.Conn
|
||||||
tlsConn *tls.Conn // nil if plain TCP
|
tlsConn *tls.Conn // nil if plain TCP
|
||||||
sessionID string // pool-assigned session id from login reply
|
sessionID string // pool-assigned session id from login reply
|
||||||
seq int64 // atomic JSON-RPC request id counter
|
seq int64 // atomic JSON-RPC request id counter
|
||||||
active bool // true once first job received
|
active bool // true once first job received
|
||||||
sendMu sync.Mutex
|
pending map[int64]struct{}
|
||||||
|
closedOnce sync.Once
|
||||||
|
mu sync.Mutex
|
||||||
|
sendMu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// StratumListener receives events from the pool connection.
|
// type listener struct{}
|
||||||
|
//
|
||||||
|
// func (listener) OnJob(job proxy.Job) {}
|
||||||
type StratumListener interface {
|
type StratumListener interface {
|
||||||
// OnJob is called when the pool pushes a new job notification or the login reply contains a job.
|
// OnJob is called when the pool pushes a new job notification or the login reply contains a job.
|
||||||
OnJob(job proxy.Job)
|
OnJob(job proxy.Job)
|
||||||
|
|
|
||||||
549
pool/impl.go
Normal file
549
pool/impl.go
Normal file
|
|
@ -0,0 +1,549 @@
|
||||||
|
package pool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewStrategyFactory creates a StrategyFactory for the supplied config.
|
||||||
|
//
|
||||||
|
// factory := pool.NewStrategyFactory(&proxy.Config{Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}}})
|
||||||
|
// strategy := factory(listener)
|
||||||
|
func NewStrategyFactory(config *proxy.Config) StrategyFactory {
|
||||||
|
return func(listener StratumListener) Strategy {
|
||||||
|
var pools []proxy.PoolConfig
|
||||||
|
if config != nil {
|
||||||
|
pools = config.Pools
|
||||||
|
}
|
||||||
|
return NewFailoverStrategy(pools, listener, config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.example:3333", User: "WALLET", Pass: "x"}, listener)
|
||||||
|
//
|
||||||
|
// if result := client.Connect(); result.OK {
|
||||||
|
// client.Login()
|
||||||
|
// }
|
||||||
|
func NewStratumClient(poolConfig proxy.PoolConfig, listener StratumListener) *StratumClient {
|
||||||
|
return &StratumClient{
|
||||||
|
config: poolConfig,
|
||||||
|
listener: listener,
|
||||||
|
pending: make(map[int64]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsActive reports whether the client has received at least one job.
|
||||||
|
func (c *StratumClient) IsActive() bool {
|
||||||
|
if c == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
return c.active
|
||||||
|
}
|
||||||
|
|
||||||
|
// result := client.Connect()
|
||||||
|
func (c *StratumClient) Connect() proxy.Result {
|
||||||
|
if c == nil {
|
||||||
|
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.client", "client is nil", nil)}
|
||||||
|
}
|
||||||
|
addr := c.config.URL
|
||||||
|
if addr == "" {
|
||||||
|
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.client", "pool url is empty", nil)}
|
||||||
|
}
|
||||||
|
conn, err := net.Dial("tcp", addr)
|
||||||
|
if err != nil {
|
||||||
|
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.client", "dial pool failed", err)}
|
||||||
|
}
|
||||||
|
if c.config.TLS {
|
||||||
|
host := addr
|
||||||
|
if strings.Contains(addr, ":") {
|
||||||
|
host, _, _ = net.SplitHostPort(addr)
|
||||||
|
}
|
||||||
|
tlsCfg := &tls.Config{InsecureSkipVerify: true, ServerName: host}
|
||||||
|
tlsConn := tls.Client(conn, tlsCfg)
|
||||||
|
if err := tlsConn.Handshake(); err != nil {
|
||||||
|
_ = conn.Close()
|
||||||
|
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.tls", "handshake failed", err)}
|
||||||
|
}
|
||||||
|
if fp := strings.TrimSpace(strings.ToLower(c.config.TLSFingerprint)); fp != "" {
|
||||||
|
cert := tlsConn.ConnectionState().PeerCertificates
|
||||||
|
if len(cert) == 0 {
|
||||||
|
_ = tlsConn.Close()
|
||||||
|
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.tls", "missing certificate", nil)}
|
||||||
|
}
|
||||||
|
sum := sha256.Sum256(cert[0].Raw)
|
||||||
|
if hex.EncodeToString(sum[:]) != fp {
|
||||||
|
_ = tlsConn.Close()
|
||||||
|
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.tls", "tls fingerprint mismatch", nil)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.conn = tlsConn
|
||||||
|
c.tlsConn = tlsConn
|
||||||
|
} else {
|
||||||
|
c.conn = conn
|
||||||
|
}
|
||||||
|
go c.readLoop()
|
||||||
|
return proxy.Result{OK: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// client.Login()
|
||||||
|
//
|
||||||
|
// A login reply with a job triggers `OnJob` immediately.
|
||||||
|
func (c *StratumClient) Login() {
|
||||||
|
if c == nil || c.conn == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params := map[string]any{
|
||||||
|
"login": c.config.User,
|
||||||
|
"pass": c.config.Pass,
|
||||||
|
}
|
||||||
|
if c.config.RigID != "" {
|
||||||
|
params["rigid"] = c.config.RigID
|
||||||
|
}
|
||||||
|
if c.config.Algo != "" {
|
||||||
|
params["algo"] = []string{c.config.Algo}
|
||||||
|
}
|
||||||
|
req := map[string]any{
|
||||||
|
"id": 1,
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"method": "login",
|
||||||
|
"params": params,
|
||||||
|
}
|
||||||
|
_ = c.writeJSON(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// seq := client.Submit("job-1", "deadbeef", "HASH64HEX", "cn/r")
|
||||||
|
func (c *StratumClient) Submit(jobID, nonce, result, algo string) int64 {
|
||||||
|
if c == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
seq := atomic.AddInt64(&c.seq, 1)
|
||||||
|
c.mu.Lock()
|
||||||
|
c.pending[seq] = struct{}{}
|
||||||
|
sessionID := c.sessionID
|
||||||
|
c.mu.Unlock()
|
||||||
|
req := map[string]any{
|
||||||
|
"id": seq,
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"method": "submit",
|
||||||
|
"params": map[string]any{
|
||||||
|
"id": sessionID,
|
||||||
|
"job_id": jobID,
|
||||||
|
"nonce": nonce,
|
||||||
|
"result": result,
|
||||||
|
"algo": algo,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := c.writeJSON(req); err != nil {
|
||||||
|
c.mu.Lock()
|
||||||
|
delete(c.pending, seq)
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
return seq
|
||||||
|
}
|
||||||
|
|
||||||
|
// client.Keepalive()
|
||||||
|
func (c *StratumClient) Keepalive() {
|
||||||
|
if c == nil || c.conn == nil || !c.IsActive() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req := map[string]any{
|
||||||
|
"id": atomic.AddInt64(&c.seq, 1),
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"method": "keepalived",
|
||||||
|
"params": map[string]any{
|
||||||
|
"id": c.sessionID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_ = c.writeJSON(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// client.Disconnect()
|
||||||
|
func (c *StratumClient) Disconnect() {
|
||||||
|
if c == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.closedOnce.Do(func() {
|
||||||
|
conn := c.resetConnectionState()
|
||||||
|
if conn != nil {
|
||||||
|
_ = conn.Close()
|
||||||
|
}
|
||||||
|
if c.listener != nil {
|
||||||
|
c.listener.OnDisconnect()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StratumClient) notifyDisconnect() {
|
||||||
|
c.closedOnce.Do(func() {
|
||||||
|
c.resetConnectionState()
|
||||||
|
if c.listener != nil {
|
||||||
|
c.listener.OnDisconnect()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StratumClient) resetConnectionState() net.Conn {
|
||||||
|
if c == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
conn := c.conn
|
||||||
|
c.conn = nil
|
||||||
|
c.tlsConn = nil
|
||||||
|
c.sessionID = ""
|
||||||
|
c.active = false
|
||||||
|
c.pending = make(map[int64]struct{})
|
||||||
|
return conn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StratumClient) writeJSON(payload any) error {
|
||||||
|
c.sendMu.Lock()
|
||||||
|
defer c.sendMu.Unlock()
|
||||||
|
if c.conn == nil {
|
||||||
|
return proxy.NewScopedError("proxy.pool.client", "connection is nil", nil)
|
||||||
|
}
|
||||||
|
data, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return proxy.NewScopedError("proxy.pool.client", "marshal request failed", err)
|
||||||
|
}
|
||||||
|
data = append(data, '\n')
|
||||||
|
_, err = c.conn.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
c.notifyDisconnect()
|
||||||
|
return proxy.NewScopedError("proxy.pool.client", "write request failed", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StratumClient) readLoop() {
|
||||||
|
defer c.notifyDisconnect()
|
||||||
|
reader := bufio.NewReader(c.conn)
|
||||||
|
for {
|
||||||
|
line, isPrefix, err := reader.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if isPrefix {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(line) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.handleMessage(line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StratumClient) handleMessage(line []byte) {
|
||||||
|
var base struct {
|
||||||
|
ID any `json:"id"`
|
||||||
|
Method string `json:"method"`
|
||||||
|
Result json.RawMessage `json:"result"`
|
||||||
|
Error json.RawMessage `json:"error"`
|
||||||
|
Params json.RawMessage `json:"params"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(line, &base); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(base.Result) > 0 {
|
||||||
|
var loginReply struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Job *struct {
|
||||||
|
Blob string `json:"blob"`
|
||||||
|
JobID string `json:"job_id"`
|
||||||
|
Target string `json:"target"`
|
||||||
|
Algo string `json:"algo"`
|
||||||
|
Height uint64 `json:"height"`
|
||||||
|
SeedHash string `json:"seed_hash"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
} `json:"job"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(base.Result, &loginReply); err == nil {
|
||||||
|
if loginReply.ID != "" {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.sessionID = loginReply.ID
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
if loginReply.Job != nil && loginReply.Job.JobID != "" {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.active = true
|
||||||
|
c.mu.Unlock()
|
||||||
|
if c.listener != nil {
|
||||||
|
c.listener.OnJob(proxy.Job{
|
||||||
|
Blob: loginReply.Job.Blob,
|
||||||
|
JobID: loginReply.Job.JobID,
|
||||||
|
Target: loginReply.Job.Target,
|
||||||
|
Algo: loginReply.Job.Algo,
|
||||||
|
Height: loginReply.Job.Height,
|
||||||
|
SeedHash: loginReply.Job.SeedHash,
|
||||||
|
ClientID: loginReply.Job.ID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(base.Error) > 0 && requestID(base.ID) == 1 {
|
||||||
|
c.notifyDisconnect()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if base.Method == "job" {
|
||||||
|
var params struct {
|
||||||
|
Blob string `json:"blob"`
|
||||||
|
JobID string `json:"job_id"`
|
||||||
|
Target string `json:"target"`
|
||||||
|
Algo string `json:"algo"`
|
||||||
|
Height uint64 `json:"height"`
|
||||||
|
SeedHash string `json:"seed_hash"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(base.Params, ¶ms); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.mu.Lock()
|
||||||
|
c.active = true
|
||||||
|
c.mu.Unlock()
|
||||||
|
if c.listener != nil {
|
||||||
|
c.listener.OnJob(proxy.Job{
|
||||||
|
Blob: params.Blob,
|
||||||
|
JobID: params.JobID,
|
||||||
|
Target: params.Target,
|
||||||
|
Algo: params.Algo,
|
||||||
|
Height: params.Height,
|
||||||
|
SeedHash: params.SeedHash,
|
||||||
|
ClientID: params.ID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
seq := requestID(base.ID)
|
||||||
|
if seq == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.mu.Lock()
|
||||||
|
_, ok := c.pending[seq]
|
||||||
|
if ok {
|
||||||
|
delete(c.pending, seq)
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
if len(base.Result) > 0 {
|
||||||
|
_ = json.Unmarshal(base.Result, &payload)
|
||||||
|
}
|
||||||
|
accepted := len(base.Error) == 0
|
||||||
|
if payload.Status != "" && strings.EqualFold(payload.Status, "OK") {
|
||||||
|
accepted = true
|
||||||
|
}
|
||||||
|
errorMessage := ""
|
||||||
|
if !accepted && len(base.Error) > 0 {
|
||||||
|
var errPayload struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
_ = json.Unmarshal(base.Error, &errPayload)
|
||||||
|
errorMessage = errPayload.Message
|
||||||
|
}
|
||||||
|
if c.listener != nil {
|
||||||
|
c.listener.OnResultAccepted(seq, accepted, errorMessage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFailoverStrategy creates the ordered pool failover wrapper.
|
||||||
|
func NewFailoverStrategy(pools []proxy.PoolConfig, listener StratumListener, config *proxy.Config) *FailoverStrategy {
|
||||||
|
return &FailoverStrategy{
|
||||||
|
pools: pools,
|
||||||
|
listener: listener,
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// strategy.Connect()
|
||||||
|
func (s *FailoverStrategy) Connect() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.closing = false
|
||||||
|
s.connectLocked(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FailoverStrategy) connectLocked(start int) {
|
||||||
|
enabled := enabledPools(s.currentPools())
|
||||||
|
if len(enabled) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
retries := 1
|
||||||
|
retryPause := time.Second
|
||||||
|
if s.config != nil {
|
||||||
|
if s.config.Retries > 0 {
|
||||||
|
retries = s.config.Retries
|
||||||
|
}
|
||||||
|
if s.config.RetryPause > 0 {
|
||||||
|
retryPause = time.Duration(s.config.RetryPause) * time.Second
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for attempt := 0; attempt < retries; attempt++ {
|
||||||
|
for i := 0; i < len(enabled); i++ {
|
||||||
|
index := (start + i) % len(enabled)
|
||||||
|
poolCfg := enabled[index]
|
||||||
|
client := NewStratumClient(poolCfg, s)
|
||||||
|
if result := client.Connect(); result.OK {
|
||||||
|
s.client = client
|
||||||
|
s.current = index
|
||||||
|
client.Login()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(retryPause)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FailoverStrategy) currentPools() []proxy.PoolConfig {
|
||||||
|
if s == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.config != nil && len(s.config.Pools) > 0 {
|
||||||
|
return s.config.Pools
|
||||||
|
}
|
||||||
|
return s.pools
|
||||||
|
}
|
||||||
|
|
||||||
|
// seq := strategy.Submit(jobID, nonce, result, algo)
|
||||||
|
func (s *FailoverStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||||
|
if s == nil || s.client == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return s.client.Submit(jobID, nonce, result, algo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// strategy.Disconnect()
|
||||||
|
func (s *FailoverStrategy) Disconnect() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
client := s.client
|
||||||
|
s.closing = true
|
||||||
|
s.client = nil
|
||||||
|
s.mu.Unlock()
|
||||||
|
if client != nil {
|
||||||
|
client.Disconnect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// strategy.ReloadPools()
|
||||||
|
func (s *FailoverStrategy) ReloadPools() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
s.current = 0
|
||||||
|
s.mu.Unlock()
|
||||||
|
s.Disconnect()
|
||||||
|
s.Connect()
|
||||||
|
}
|
||||||
|
|
||||||
|
// active := strategy.IsActive()
|
||||||
|
func (s *FailoverStrategy) IsActive() bool {
|
||||||
|
return s != nil && s.client != nil && s.client.IsActive()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tick keeps an active pool connection alive when configured.
|
||||||
|
func (s *FailoverStrategy) Tick(ticks uint64) {
|
||||||
|
if s == nil || ticks == 0 || ticks%60 != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
client := s.client
|
||||||
|
s.mu.Unlock()
|
||||||
|
if client != nil && client.config.Keepalive {
|
||||||
|
client.Keepalive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnJob forwards the pool job to the outer listener.
|
||||||
|
func (s *FailoverStrategy) OnJob(job proxy.Job) {
|
||||||
|
if s != nil && s.listener != nil {
|
||||||
|
s.listener.OnJob(job)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnResultAccepted forwards the result status to the outer listener.
|
||||||
|
func (s *FailoverStrategy) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
|
||||||
|
if s != nil && s.listener != nil {
|
||||||
|
s.listener.OnResultAccepted(sequence, accepted, errorMessage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// strategy.OnDisconnect()
|
||||||
|
func (s *FailoverStrategy) OnDisconnect() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
s.client = nil
|
||||||
|
closing := s.closing
|
||||||
|
if closing {
|
||||||
|
s.closing = false
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
if closing {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if s.listener != nil {
|
||||||
|
s.listener.OnDisconnect()
|
||||||
|
}
|
||||||
|
go s.Connect()
|
||||||
|
}
|
||||||
|
|
||||||
|
func enabledPools(pools []proxy.PoolConfig) []proxy.PoolConfig {
|
||||||
|
out := make([]proxy.PoolConfig, 0, len(pools))
|
||||||
|
for _, poolCfg := range pools {
|
||||||
|
if poolCfg.Enabled {
|
||||||
|
out = append(out, poolCfg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestID(id any) int64 {
|
||||||
|
switch v := id.(type) {
|
||||||
|
case float64:
|
||||||
|
return int64(v)
|
||||||
|
case int64:
|
||||||
|
return v
|
||||||
|
case int:
|
||||||
|
return int64(v)
|
||||||
|
case string:
|
||||||
|
n, _ := strconv.ParseInt(v, 10, 64)
|
||||||
|
return n
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
168
pool/impl_test.go
Normal file
168
pool/impl_test.go
Normal file
|
|
@ -0,0 +1,168 @@
|
||||||
|
package pool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestFailoverStrategy_CurrentPools_Good verifies that currentPools follows the live config.
|
||||||
|
//
|
||||||
|
// strategy := pool.NewFailoverStrategy(cfg.Pools, nil, cfg)
|
||||||
|
// strategy.currentPools() // returns cfg.Pools
|
||||||
|
func TestFailoverStrategy_CurrentPools_Good(t *testing.T) {
|
||||||
|
cfg := &proxy.Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: proxy.WorkersByRigID,
|
||||||
|
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []proxy.PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
strategy := NewFailoverStrategy(cfg.Pools, nil, cfg)
|
||||||
|
|
||||||
|
if got := len(strategy.currentPools()); got != 1 {
|
||||||
|
t.Fatalf("expected 1 pool, got %d", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.Pools = []proxy.PoolConfig{{URL: "pool-b.example:4444", Enabled: true}}
|
||||||
|
|
||||||
|
if got := strategy.currentPools(); len(got) != 1 || got[0].URL != "pool-b.example:4444" {
|
||||||
|
t.Fatalf("expected current pools to follow config reload, got %+v", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFailoverStrategy_CurrentPools_Bad verifies that a nil strategy returns an empty pool list.
|
||||||
|
//
|
||||||
|
// var strategy *pool.FailoverStrategy
|
||||||
|
// strategy.currentPools() // nil
|
||||||
|
func TestFailoverStrategy_CurrentPools_Bad(t *testing.T) {
|
||||||
|
var strategy *FailoverStrategy
|
||||||
|
pools := strategy.currentPools()
|
||||||
|
if pools != nil {
|
||||||
|
t.Fatalf("expected nil pools from nil strategy, got %+v", pools)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFailoverStrategy_CurrentPools_Ugly verifies that a strategy with a nil config
|
||||||
|
// falls back to the pools passed at construction time.
|
||||||
|
//
|
||||||
|
// strategy := pool.NewFailoverStrategy(initialPools, nil, nil)
|
||||||
|
// strategy.currentPools() // returns initialPools
|
||||||
|
func TestFailoverStrategy_CurrentPools_Ugly(t *testing.T) {
|
||||||
|
initialPools := []proxy.PoolConfig{
|
||||||
|
{URL: "fallback.example:3333", Enabled: true},
|
||||||
|
{URL: "fallback.example:4444", Enabled: false},
|
||||||
|
}
|
||||||
|
strategy := NewFailoverStrategy(initialPools, nil, nil)
|
||||||
|
|
||||||
|
got := strategy.currentPools()
|
||||||
|
if len(got) != 2 {
|
||||||
|
t.Fatalf("expected 2 pools from constructor fallback, got %d", len(got))
|
||||||
|
}
|
||||||
|
if got[0].URL != "fallback.example:3333" {
|
||||||
|
t.Fatalf("expected constructor pool URL, got %q", got[0].URL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFailoverStrategy_EnabledPools_Good verifies that only enabled pools are selected.
|
||||||
|
//
|
||||||
|
// enabled := pool.enabledPools(pools) // filters to enabled-only
|
||||||
|
func TestFailoverStrategy_EnabledPools_Good(t *testing.T) {
|
||||||
|
pools := []proxy.PoolConfig{
|
||||||
|
{URL: "active.example:3333", Enabled: true},
|
||||||
|
{URL: "disabled.example:3333", Enabled: false},
|
||||||
|
{URL: "active2.example:3333", Enabled: true},
|
||||||
|
}
|
||||||
|
got := enabledPools(pools)
|
||||||
|
if len(got) != 2 {
|
||||||
|
t.Fatalf("expected 2 enabled pools, got %d", len(got))
|
||||||
|
}
|
||||||
|
if got[0].URL != "active.example:3333" || got[1].URL != "active2.example:3333" {
|
||||||
|
t.Fatalf("expected only enabled pool URLs, got %+v", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFailoverStrategy_EnabledPools_Bad verifies that an empty pool list returns empty.
|
||||||
|
//
|
||||||
|
// pool.enabledPools(nil) // empty
|
||||||
|
func TestFailoverStrategy_EnabledPools_Bad(t *testing.T) {
|
||||||
|
got := enabledPools(nil)
|
||||||
|
if len(got) != 0 {
|
||||||
|
t.Fatalf("expected 0 pools from nil input, got %d", len(got))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFailoverStrategy_EnabledPools_Ugly verifies that all-disabled pools return empty.
|
||||||
|
//
|
||||||
|
// pool.enabledPools([]proxy.PoolConfig{{Enabled: false}}) // empty
|
||||||
|
func TestFailoverStrategy_EnabledPools_Ugly(t *testing.T) {
|
||||||
|
pools := []proxy.PoolConfig{
|
||||||
|
{URL: "a.example:3333", Enabled: false},
|
||||||
|
{URL: "b.example:3333", Enabled: false},
|
||||||
|
}
|
||||||
|
got := enabledPools(pools)
|
||||||
|
if len(got) != 0 {
|
||||||
|
t.Fatalf("expected 0 enabled pools when all disabled, got %d", len(got))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNewStrategyFactory_Good verifies the factory creates a strategy connected to the config.
|
||||||
|
//
|
||||||
|
// factory := pool.NewStrategyFactory(cfg)
|
||||||
|
// strategy := factory(listener) // creates FailoverStrategy
|
||||||
|
func TestNewStrategyFactory_Good(t *testing.T) {
|
||||||
|
cfg := &proxy.Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: proxy.WorkersByRigID,
|
||||||
|
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
factory := NewStrategyFactory(cfg)
|
||||||
|
if factory == nil {
|
||||||
|
t.Fatal("expected a non-nil factory")
|
||||||
|
}
|
||||||
|
strategy := factory(nil)
|
||||||
|
if strategy == nil {
|
||||||
|
t.Fatal("expected a non-nil strategy from factory")
|
||||||
|
}
|
||||||
|
if strategy.IsActive() {
|
||||||
|
t.Fatal("expected new strategy to be inactive before connecting")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNewStrategyFactory_Bad verifies a factory created with nil config does not panic.
|
||||||
|
//
|
||||||
|
// factory := pool.NewStrategyFactory(nil)
|
||||||
|
// strategy := factory(nil)
|
||||||
|
func TestNewStrategyFactory_Bad(t *testing.T) {
|
||||||
|
factory := NewStrategyFactory(nil)
|
||||||
|
strategy := factory(nil)
|
||||||
|
if strategy == nil {
|
||||||
|
t.Fatal("expected a non-nil strategy even from nil config")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNewStrategyFactory_Ugly verifies the factory forwards the correct pool list to the strategy.
|
||||||
|
//
|
||||||
|
// cfg.Pools = append(cfg.Pools, proxy.PoolConfig{URL: "added.example:3333", Enabled: true})
|
||||||
|
// strategy := factory(nil)
|
||||||
|
// // strategy sees the updated pools via the shared config pointer
|
||||||
|
func TestNewStrategyFactory_Ugly(t *testing.T) {
|
||||||
|
cfg := &proxy.Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: proxy.WorkersByRigID,
|
||||||
|
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
factory := NewStrategyFactory(cfg)
|
||||||
|
cfg.Pools = append(cfg.Pools, proxy.PoolConfig{URL: "added.example:3333", Enabled: true})
|
||||||
|
|
||||||
|
strategy := factory(nil)
|
||||||
|
fs, ok := strategy.(*FailoverStrategy)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("expected FailoverStrategy")
|
||||||
|
}
|
||||||
|
pools := fs.currentPools()
|
||||||
|
if len(pools) != 2 {
|
||||||
|
t.Fatalf("expected 2 pools after config update, got %d", len(pools))
|
||||||
|
}
|
||||||
|
}
|
||||||
112
pool/keepalive_test.go
Normal file
112
pool/keepalive_test.go
Normal file
|
|
@ -0,0 +1,112 @@
|
||||||
|
package pool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStratumClient_Keepalive_Good(t *testing.T) {
|
||||||
|
serverConn, clientConn := net.Pipe()
|
||||||
|
defer serverConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
client := &StratumClient{
|
||||||
|
conn: clientConn,
|
||||||
|
active: true,
|
||||||
|
sessionID: "session-1",
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
client.Keepalive()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
line, err := bufio.NewReader(serverConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read keepalive request: %v", err)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
|
||||||
|
var payload map[string]any
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal keepalive request: %v", err)
|
||||||
|
}
|
||||||
|
if got := payload["method"]; got != "keepalived" {
|
||||||
|
t.Fatalf("expected keepalived method, got %#v", got)
|
||||||
|
}
|
||||||
|
params, ok := payload["params"].(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected params object, got %#v", payload["params"])
|
||||||
|
}
|
||||||
|
if got := params["id"]; got != "session-1" {
|
||||||
|
t.Fatalf("expected session id in keepalive payload, got %#v", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStratumClient_Keepalive_Bad(t *testing.T) {
|
||||||
|
serverConn, clientConn := net.Pipe()
|
||||||
|
defer serverConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
client := &StratumClient{
|
||||||
|
conn: clientConn,
|
||||||
|
active: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
client.Keepalive()
|
||||||
|
|
||||||
|
if err := serverConn.SetReadDeadline(time.Now().Add(50 * time.Millisecond)); err != nil {
|
||||||
|
t.Fatalf("set deadline: %v", err)
|
||||||
|
}
|
||||||
|
buf := make([]byte, 1)
|
||||||
|
if _, err := serverConn.Read(buf); err == nil {
|
||||||
|
t.Fatalf("expected no keepalive data while inactive")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStratumClient_Keepalive_Ugly(t *testing.T) {
|
||||||
|
serverConn, clientConn := net.Pipe()
|
||||||
|
defer serverConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
client := &StratumClient{
|
||||||
|
conn: clientConn,
|
||||||
|
active: true,
|
||||||
|
sessionID: "session-2",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bufio.NewReader(serverConn)
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
client.Keepalive()
|
||||||
|
client.Keepalive()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
first, err := reader.ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read first keepalive request: %v", err)
|
||||||
|
}
|
||||||
|
second, err := reader.ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read second keepalive request: %v", err)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
|
||||||
|
var firstPayload map[string]any
|
||||||
|
if err := json.Unmarshal(first, &firstPayload); err != nil {
|
||||||
|
t.Fatalf("unmarshal first keepalive request: %v", err)
|
||||||
|
}
|
||||||
|
var secondPayload map[string]any
|
||||||
|
if err := json.Unmarshal(second, &secondPayload); err != nil {
|
||||||
|
t.Fatalf("unmarshal second keepalive request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if firstPayload["id"] == secondPayload["id"] {
|
||||||
|
t.Fatalf("expected keepalive request ids to be unique")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -3,35 +3,43 @@ package pool
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"dappco.re/go/core/proxy"
|
"dappco.re/go/proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FailoverStrategy wraps an ordered slice of PoolConfig entries.
|
// FailoverStrategy wraps an ordered slice of PoolConfig entries.
|
||||||
// It connects to the first enabled pool and fails over in order on error.
|
|
||||||
// On reconnect it always retries from the primary first.
|
|
||||||
//
|
//
|
||||||
// strategy := pool.NewFailoverStrategy(cfg.Pools, listener, cfg)
|
// strategy := pool.NewFailoverStrategy([]proxy.PoolConfig{
|
||||||
|
// {URL: "primary.example:3333", Enabled: true},
|
||||||
|
// {URL: "backup.example:3333", Enabled: true},
|
||||||
|
// }, listener, cfg)
|
||||||
// strategy.Connect()
|
// strategy.Connect()
|
||||||
type FailoverStrategy struct {
|
type FailoverStrategy struct {
|
||||||
pools []proxy.PoolConfig
|
pools []proxy.PoolConfig
|
||||||
current int
|
current int
|
||||||
client *StratumClient
|
client *StratumClient
|
||||||
listener StratumListener
|
listener StratumListener
|
||||||
cfg *proxy.Config
|
config *proxy.Config
|
||||||
|
closing bool
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// StrategyFactory creates a new FailoverStrategy for a given StratumListener.
|
// StrategyFactory creates a FailoverStrategy for a given StratumListener.
|
||||||
// Used by splitters to create per-mapper strategies without coupling to Config.
|
|
||||||
//
|
//
|
||||||
// factory := pool.NewStrategyFactory(cfg)
|
// factory := pool.NewStrategyFactory(cfg)
|
||||||
// strategy := factory(listener) // each mapper calls this
|
// strategy := factory(listener)
|
||||||
type StrategyFactory func(listener StratumListener) Strategy
|
type StrategyFactory func(listener StratumListener) Strategy
|
||||||
|
|
||||||
// Strategy is the interface the splitters use to submit shares and check pool state.
|
// Strategy is the interface splitters use to submit shares and inspect pool state.
|
||||||
type Strategy interface {
|
type Strategy interface {
|
||||||
Connect()
|
Connect()
|
||||||
Submit(jobID, nonce, result, algo string) int64
|
Submit(jobID, nonce, result, algo string) int64
|
||||||
Disconnect()
|
Disconnect()
|
||||||
IsActive() bool
|
IsActive() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReloadableStrategy re-establishes an upstream connection after config changes.
|
||||||
|
//
|
||||||
|
// strategy.ReloadPools()
|
||||||
|
type ReloadableStrategy interface {
|
||||||
|
ReloadPools()
|
||||||
|
}
|
||||||
|
|
|
||||||
148
pool/strategy_disconnect_test.go
Normal file
148
pool/strategy_disconnect_test.go
Normal file
|
|
@ -0,0 +1,148 @@
|
||||||
|
package pool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
type disconnectSpy struct {
|
||||||
|
disconnects atomic.Int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *disconnectSpy) OnJob(proxy.Job) {}
|
||||||
|
|
||||||
|
func (s *disconnectSpy) OnResultAccepted(int64, bool, string) {}
|
||||||
|
|
||||||
|
func (s *disconnectSpy) OnDisconnect() {
|
||||||
|
s.disconnects.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFailoverStrategy_Disconnect_Good(t *testing.T) {
|
||||||
|
spy := &disconnectSpy{}
|
||||||
|
strategy := &FailoverStrategy{
|
||||||
|
listener: spy,
|
||||||
|
client: &StratumClient{listener: nil},
|
||||||
|
}
|
||||||
|
strategy.client.listener = strategy
|
||||||
|
|
||||||
|
strategy.Disconnect()
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
|
if got := spy.disconnects.Load(); got != 0 {
|
||||||
|
t.Fatalf("expected intentional disconnect to suppress reconnect, got %d listener calls", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFailoverStrategy_Disconnect_Bad(t *testing.T) {
|
||||||
|
spy := &disconnectSpy{}
|
||||||
|
strategy := &FailoverStrategy{listener: spy}
|
||||||
|
|
||||||
|
strategy.OnDisconnect()
|
||||||
|
|
||||||
|
if got := spy.disconnects.Load(); got != 1 {
|
||||||
|
t.Fatalf("expected external disconnect to notify listener once, got %d", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFailoverStrategy_Disconnect_Ugly(t *testing.T) {
|
||||||
|
spy := &disconnectSpy{}
|
||||||
|
strategy := &FailoverStrategy{
|
||||||
|
listener: spy,
|
||||||
|
client: &StratumClient{listener: nil},
|
||||||
|
}
|
||||||
|
strategy.client.listener = strategy
|
||||||
|
|
||||||
|
strategy.Disconnect()
|
||||||
|
strategy.Disconnect()
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
|
if got := spy.disconnects.Load(); got != 0 {
|
||||||
|
t.Fatalf("expected repeated intentional disconnects to remain silent, got %d listener calls", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStratumClient_NotifyDisconnect_ClearsState_Good(t *testing.T) {
|
||||||
|
serverConn, clientConn := net.Pipe()
|
||||||
|
defer serverConn.Close()
|
||||||
|
|
||||||
|
spy := &disconnectSpy{}
|
||||||
|
client := &StratumClient{
|
||||||
|
conn: clientConn,
|
||||||
|
listener: spy,
|
||||||
|
sessionID: "session-1",
|
||||||
|
active: true,
|
||||||
|
pending: map[int64]struct{}{
|
||||||
|
7: {},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
client.notifyDisconnect()
|
||||||
|
|
||||||
|
if got := spy.disconnects.Load(); got != 1 {
|
||||||
|
t.Fatalf("expected one disconnect notification, got %d", got)
|
||||||
|
}
|
||||||
|
if client.conn != nil {
|
||||||
|
t.Fatalf("expected pooled connection to be cleared")
|
||||||
|
}
|
||||||
|
if client.sessionID != "" {
|
||||||
|
t.Fatalf("expected session id to be cleared, got %q", client.sessionID)
|
||||||
|
}
|
||||||
|
if client.IsActive() {
|
||||||
|
t.Fatalf("expected client to stop reporting active after disconnect")
|
||||||
|
}
|
||||||
|
if len(client.pending) != 0 {
|
||||||
|
t.Fatalf("expected pending submit state to be cleared, got %d entries", len(client.pending))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFailoverStrategy_OnDisconnect_ClearsClient_Bad(t *testing.T) {
|
||||||
|
spy := &disconnectSpy{}
|
||||||
|
strategy := &FailoverStrategy{
|
||||||
|
listener: spy,
|
||||||
|
client: &StratumClient{active: true, pending: make(map[int64]struct{})},
|
||||||
|
}
|
||||||
|
|
||||||
|
strategy.OnDisconnect()
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
|
if strategy.client != nil {
|
||||||
|
t.Fatalf("expected strategy to drop the stale client before reconnect")
|
||||||
|
}
|
||||||
|
if strategy.IsActive() {
|
||||||
|
t.Fatalf("expected strategy to report inactive while reconnect is pending")
|
||||||
|
}
|
||||||
|
if got := spy.disconnects.Load(); got != 1 {
|
||||||
|
t.Fatalf("expected one disconnect notification, got %d", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStratumClient_HandleMessage_LoginErrorDisconnects_Ugly(t *testing.T) {
|
||||||
|
spy := &disconnectSpy{}
|
||||||
|
client := &StratumClient{
|
||||||
|
listener: spy,
|
||||||
|
pending: make(map[int64]struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, err := json.Marshal(map[string]any{
|
||||||
|
"id": 1,
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"error": map[string]any{
|
||||||
|
"code": -1,
|
||||||
|
"message": "Invalid payment address provided",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal login error payload: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
client.handleMessage(payload)
|
||||||
|
|
||||||
|
if got := spy.disconnects.Load(); got != 1 {
|
||||||
|
t.Fatalf("expected login failure to disconnect upstream once, got %d", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
140
proxy.go
140
proxy.go
|
|
@ -1,38 +1,59 @@
|
||||||
// Package proxy is a CryptoNote stratum mining proxy library.
|
// Package proxy is the mining proxy library.
|
||||||
//
|
|
||||||
// It accepts miner connections over TCP (optionally TLS), splits the 32-bit nonce
|
|
||||||
// space across up to 256 simultaneous miners per upstream pool connection (NiceHash
|
|
||||||
// mode), and presents a small monitoring API.
|
|
||||||
//
|
|
||||||
// Full specification: docs/RFC.md
|
|
||||||
//
|
//
|
||||||
|
// cfg := &proxy.Config{Mode: "nicehash", Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}}, Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}}, Workers: proxy.WorkersByRigID}
|
||||||
// p, result := proxy.New(cfg)
|
// p, result := proxy.New(cfg)
|
||||||
// if result.OK { p.Start() }
|
// if result.OK {
|
||||||
|
// p.Start()
|
||||||
|
// }
|
||||||
package proxy
|
package proxy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Proxy is the top-level orchestrator. It owns the server, splitter, stats, workers,
|
// Proxy wires the configured listeners, splitters, stats, workers, and log sinks.
|
||||||
// event bus, tick goroutine, and optional HTTP API.
|
|
||||||
//
|
//
|
||||||
|
// cfg := &proxy.Config{
|
||||||
|
// Mode: "nicehash",
|
||||||
|
// Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}},
|
||||||
|
// Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
// Workers: proxy.WorkersByRigID,
|
||||||
|
// }
|
||||||
// p, result := proxy.New(cfg)
|
// p, result := proxy.New(cfg)
|
||||||
// if result.OK { p.Start() }
|
// if result.OK {
|
||||||
|
// p.Start()
|
||||||
|
// }
|
||||||
type Proxy struct {
|
type Proxy struct {
|
||||||
config *Config
|
config *Config
|
||||||
splitter Splitter
|
configMu sync.RWMutex
|
||||||
stats *Stats
|
splitter Splitter
|
||||||
workers *Workers
|
shareSink ShareSink
|
||||||
events *EventBus
|
stats *Stats
|
||||||
servers []*Server
|
workers *Workers
|
||||||
ticker *time.Ticker
|
events *EventBus
|
||||||
watcher *ConfigWatcher
|
servers []*Server
|
||||||
done chan struct{}
|
ticker *time.Ticker
|
||||||
|
watcher *ConfigWatcher
|
||||||
|
done chan struct{}
|
||||||
|
stopOnce sync.Once
|
||||||
|
minersMu sync.RWMutex
|
||||||
|
miners map[int64]*Miner
|
||||||
|
customDiff *CustomDiff
|
||||||
|
customDiffBuckets *CustomDiffBuckets
|
||||||
|
rateLimit *RateLimiter
|
||||||
|
httpServer *http.Server
|
||||||
|
accessLog *accessLogSink
|
||||||
|
shareLog *shareLogSink
|
||||||
|
submitCount atomic.Int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Splitter is the interface both NonceSplitter and SimpleSplitter satisfy.
|
// Splitter routes miner logins, submits, and disconnects to the active upstream strategy.
|
||||||
|
//
|
||||||
|
// splitter := nicehash.NewNonceSplitter(cfg, bus, pool.NewStrategyFactory(cfg))
|
||||||
|
// splitter.Connect()
|
||||||
type Splitter interface {
|
type Splitter interface {
|
||||||
// Connect establishes the first pool upstream connection.
|
// Connect establishes the first pool upstream connection.
|
||||||
Connect()
|
Connect()
|
||||||
|
|
@ -50,7 +71,18 @@ type Splitter interface {
|
||||||
Upstreams() UpstreamStats
|
Upstreams() UpstreamStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpstreamStats carries pool connection state counts for monitoring.
|
// ShareSink consumes share outcomes from the proxy event stream.
|
||||||
|
//
|
||||||
|
// sink.OnAccept(proxy.Event{Miner: miner, Diff: 100000})
|
||||||
|
// sink.OnReject(proxy.Event{Miner: miner, Error: "Invalid nonce"})
|
||||||
|
type ShareSink interface {
|
||||||
|
OnAccept(Event)
|
||||||
|
OnReject(Event)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamStats reports pool connection counts.
|
||||||
|
//
|
||||||
|
// stats := proxy.UpstreamStats{Active: 1, Sleep: 0, Error: 0, Total: 1}
|
||||||
type UpstreamStats struct {
|
type UpstreamStats struct {
|
||||||
Active uint64 // connections currently receiving jobs
|
Active uint64 // connections currently receiving jobs
|
||||||
Sleep uint64 // idle connections (simple mode reuse pool)
|
Sleep uint64 // idle connections (simple mode reuse pool)
|
||||||
|
|
@ -58,12 +90,16 @@ type UpstreamStats struct {
|
||||||
Total uint64 // Active + Sleep + Error
|
Total uint64 // Active + Sleep + Error
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoginEvent is dispatched when a miner completes the login handshake.
|
// LoginEvent is dispatched when a miner completes login.
|
||||||
|
//
|
||||||
|
// event := proxy.LoginEvent{Miner: miner}
|
||||||
type LoginEvent struct {
|
type LoginEvent struct {
|
||||||
Miner *Miner
|
Miner *Miner
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubmitEvent is dispatched when a miner submits a share.
|
// SubmitEvent carries one miner share submission.
|
||||||
|
//
|
||||||
|
// event := proxy.SubmitEvent{Miner: miner, JobID: "job-1", Nonce: "deadbeef", Result: "HASH", RequestID: 2}
|
||||||
type SubmitEvent struct {
|
type SubmitEvent struct {
|
||||||
Miner *Miner
|
Miner *Miner
|
||||||
JobID string
|
JobID string
|
||||||
|
|
@ -73,50 +109,56 @@ type SubmitEvent struct {
|
||||||
RequestID int64
|
RequestID int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloseEvent is dispatched when a miner TCP connection closes.
|
// CloseEvent is dispatched when a miner connection closes.
|
||||||
|
//
|
||||||
|
// event := proxy.CloseEvent{Miner: miner}
|
||||||
type CloseEvent struct {
|
type CloseEvent struct {
|
||||||
Miner *Miner
|
Miner *Miner
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigWatcher polls a config file for mtime changes and calls onChange on modification.
|
// ConfigWatcher polls a config file every second and reloads on modification.
|
||||||
// Uses 1-second polling; does not require fsnotify.
|
|
||||||
//
|
//
|
||||||
// w := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
|
// watcher := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
|
||||||
// p.Reload(cfg)
|
// p.Reload(cfg)
|
||||||
// })
|
// })
|
||||||
// w.Start()
|
// watcher.Start()
|
||||||
type ConfigWatcher struct {
|
type ConfigWatcher struct {
|
||||||
path string
|
configPath string
|
||||||
onChange func(*Config)
|
onConfigChange func(*Config)
|
||||||
lastMod time.Time
|
lastModifiedAt time.Time
|
||||||
done chan struct{}
|
stopCh chan struct{}
|
||||||
|
mu sync.Mutex
|
||||||
|
started bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// RateLimiter implements per-IP token bucket connection rate limiting.
|
// RateLimiter throttles new connections per source IP.
|
||||||
// Each unique IP has a bucket initialised to MaxConnectionsPerMinute tokens.
|
|
||||||
// Each connection attempt consumes one token. Tokens refill at 1 per (60/max) seconds.
|
|
||||||
// An IP that empties its bucket is added to a ban list for BanDurationSeconds.
|
|
||||||
//
|
//
|
||||||
// rl := proxy.NewRateLimiter(cfg.RateLimit)
|
// limiter := proxy.NewRateLimiter(proxy.RateLimit{
|
||||||
// if !rl.Allow("1.2.3.4") { conn.Close(); return }
|
// MaxConnectionsPerMinute: 30,
|
||||||
|
// BanDurationSeconds: 300,
|
||||||
|
// })
|
||||||
|
// if limiter.Allow("1.2.3.4:3333") {
|
||||||
|
// // accept the socket
|
||||||
|
// }
|
||||||
type RateLimiter struct {
|
type RateLimiter struct {
|
||||||
cfg RateLimit
|
limit RateLimit
|
||||||
buckets map[string]*tokenBucket
|
bucketByHost map[string]*tokenBucket
|
||||||
banned map[string]time.Time
|
banUntilByHost map[string]time.Time
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// tokenBucket is a simple token bucket for one IP.
|
// tokenBucket is the per-IP refillable counter.
|
||||||
|
//
|
||||||
|
// bucket := tokenBucket{tokens: 30, lastRefill: time.Now()}
|
||||||
type tokenBucket struct {
|
type tokenBucket struct {
|
||||||
tokens int
|
tokens int
|
||||||
lastRefill time.Time
|
lastRefill time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// CustomDiff resolves and applies per-miner difficulty overrides at login time.
|
// CustomDiff applies a login-time difficulty override.
|
||||||
// Resolution order: user-suffix (+N) > Config.CustomDiff > pool difficulty.
|
|
||||||
//
|
//
|
||||||
// cd := proxy.NewCustomDiff(cfg.CustomDiff)
|
// resolver := proxy.NewCustomDiff(50000)
|
||||||
// bus.Subscribe(proxy.EventLogin, cd.OnLogin)
|
// resolver.Apply(&Miner{user: "WALLET+75000"})
|
||||||
type CustomDiff struct {
|
type CustomDiff struct {
|
||||||
globalDiff uint64
|
globalDiff atomic.Uint64
|
||||||
}
|
}
|
||||||
|
|
|
||||||
115
ratelimit_test.go
Normal file
115
ratelimit_test.go
Normal file
|
|
@ -0,0 +1,115 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestRateLimiter_Allow_Good verifies the first N calls within budget are allowed.
|
||||||
|
//
|
||||||
|
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 10})
|
||||||
|
// limiter.Allow("1.2.3.4:3333") // true (first 10 calls)
|
||||||
|
func TestRateLimiter_Allow_Good(t *testing.T) {
|
||||||
|
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60})
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
if !rl.Allow("1.2.3.4:3333") {
|
||||||
|
t.Fatalf("expected call %d to be allowed", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRateLimiter_Allow_Bad verifies the 11th call fails when budget is 10/min.
|
||||||
|
//
|
||||||
|
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 10})
|
||||||
|
// // calls 1-10 pass, call 11 fails
|
||||||
|
func TestRateLimiter_Allow_Bad(t *testing.T) {
|
||||||
|
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60})
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
rl.Allow("1.2.3.4:3333")
|
||||||
|
}
|
||||||
|
if rl.Allow("1.2.3.4:3333") {
|
||||||
|
t.Fatalf("expected 11th call to be rejected")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRateLimiter_Allow_Ugly verifies a banned IP stays banned for BanDurationSeconds.
|
||||||
|
//
|
||||||
|
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 300})
|
||||||
|
// limiter.Allow("1.2.3.4:3333") // true (exhausts budget)
|
||||||
|
// limiter.Allow("1.2.3.4:3333") // false (banned for 300 seconds)
|
||||||
|
func TestRateLimiter_Allow_Ugly(t *testing.T) {
|
||||||
|
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 300})
|
||||||
|
|
||||||
|
if !rl.Allow("1.2.3.4:3333") {
|
||||||
|
t.Fatalf("expected first call to pass")
|
||||||
|
}
|
||||||
|
if rl.Allow("1.2.3.4:3333") {
|
||||||
|
t.Fatalf("expected second call to fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the IP is still banned even with a fresh bucket
|
||||||
|
rl.mu.Lock()
|
||||||
|
rl.bucketByHost["1.2.3.4"] = &tokenBucket{tokens: 100, lastRefill: time.Now()}
|
||||||
|
rl.mu.Unlock()
|
||||||
|
if rl.Allow("1.2.3.4:3333") {
|
||||||
|
t.Fatalf("expected banned IP to remain banned regardless of fresh bucket")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRateLimiter_Tick_Good verifies Tick removes expired bans.
|
||||||
|
//
|
||||||
|
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1})
|
||||||
|
// limiter.Tick()
|
||||||
|
func TestRateLimiter_Tick_Good(t *testing.T) {
|
||||||
|
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1})
|
||||||
|
|
||||||
|
rl.Allow("1.2.3.4:3333")
|
||||||
|
rl.Allow("1.2.3.4:3333") // triggers ban
|
||||||
|
|
||||||
|
// Simulate expired ban
|
||||||
|
rl.mu.Lock()
|
||||||
|
rl.banUntilByHost["1.2.3.4"] = time.Now().Add(-time.Second)
|
||||||
|
rl.mu.Unlock()
|
||||||
|
|
||||||
|
rl.Tick()
|
||||||
|
|
||||||
|
rl.mu.Lock()
|
||||||
|
_, banned := rl.banUntilByHost["1.2.3.4"]
|
||||||
|
rl.mu.Unlock()
|
||||||
|
if banned {
|
||||||
|
t.Fatalf("expected expired ban to be removed by Tick")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRateLimiter_Allow_ReplenishesHighLimits verifies token replenishment at high rates.
|
||||||
|
//
|
||||||
|
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 120})
|
||||||
|
func TestRateLimiter_Allow_ReplenishesHighLimits(t *testing.T) {
|
||||||
|
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 120, BanDurationSeconds: 1})
|
||||||
|
rl.mu.Lock()
|
||||||
|
rl.bucketByHost["1.2.3.4"] = &tokenBucket{
|
||||||
|
tokens: 0,
|
||||||
|
lastRefill: time.Now().Add(-30 * time.Second),
|
||||||
|
}
|
||||||
|
rl.mu.Unlock()
|
||||||
|
|
||||||
|
if !rl.Allow("1.2.3.4:1234") {
|
||||||
|
t.Fatalf("expected bucket to replenish at 120/min")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRateLimiter_Disabled_Good verifies a zero-budget limiter allows all connections.
|
||||||
|
//
|
||||||
|
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 0})
|
||||||
|
// limiter.Allow("any-ip") // always true
|
||||||
|
func TestRateLimiter_Disabled_Good(t *testing.T) {
|
||||||
|
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 0})
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
if !rl.Allow("1.2.3.4:3333") {
|
||||||
|
t.Fatalf("expected disabled limiter to allow all connections")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
404
reload_test.go
Normal file
404
reload_test.go
Normal file
|
|
@ -0,0 +1,404 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type reloadableSplitter struct {
|
||||||
|
reloads int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *reloadableSplitter) Connect() {}
|
||||||
|
func (s *reloadableSplitter) OnLogin(event *LoginEvent) {}
|
||||||
|
func (s *reloadableSplitter) OnSubmit(event *SubmitEvent) {}
|
||||||
|
func (s *reloadableSplitter) OnClose(event *CloseEvent) {}
|
||||||
|
func (s *reloadableSplitter) Tick(ticks uint64) {}
|
||||||
|
func (s *reloadableSplitter) GC() {}
|
||||||
|
func (s *reloadableSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
|
||||||
|
func (s *reloadableSplitter) ReloadPools() { s.reloads++ }
|
||||||
|
|
||||||
|
func TestProxy_Reload_Good(t *testing.T) {
|
||||||
|
original := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p := &Proxy{
|
||||||
|
config: original,
|
||||||
|
customDiff: NewCustomDiff(1),
|
||||||
|
rateLimit: NewRateLimiter(RateLimit{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
updated := &Config{
|
||||||
|
Mode: "simple",
|
||||||
|
Workers: WorkersByUser,
|
||||||
|
Bind: []BindAddr{{Host: "0.0.0.0", Port: 4444}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool-b.example:4444", Enabled: true}},
|
||||||
|
CustomDiff: 50000,
|
||||||
|
AccessPassword: "secret",
|
||||||
|
CustomDiffStats: true,
|
||||||
|
AlgoExtension: true,
|
||||||
|
AccessLogFile: "/tmp/access.log",
|
||||||
|
ReuseTimeout: 30,
|
||||||
|
Retries: 5,
|
||||||
|
RetryPause: 2,
|
||||||
|
Watch: true,
|
||||||
|
RateLimit: RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60},
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Reload(updated)
|
||||||
|
|
||||||
|
if p.config != original {
|
||||||
|
t.Fatalf("expected reload to preserve the existing config pointer")
|
||||||
|
}
|
||||||
|
if got := p.config.Bind[0]; got.Host != "127.0.0.1" || got.Port != 3333 {
|
||||||
|
t.Fatalf("expected bind addresses to remain unchanged, got %+v", got)
|
||||||
|
}
|
||||||
|
if p.config.Mode != "nicehash" {
|
||||||
|
t.Fatalf("expected mode to remain unchanged, got %q", p.config.Mode)
|
||||||
|
}
|
||||||
|
if p.config.Workers != WorkersByUser {
|
||||||
|
t.Fatalf("expected workers mode to reload, got %q", p.config.Workers)
|
||||||
|
}
|
||||||
|
if got := p.config.Pools[0].URL; got != "pool-b.example:4444" {
|
||||||
|
t.Fatalf("expected pools to reload, got %q", got)
|
||||||
|
}
|
||||||
|
if got := p.customDiff.globalDiff.Load(); got != 50000 {
|
||||||
|
t.Fatalf("expected custom diff to reload, got %d", got)
|
||||||
|
}
|
||||||
|
if !p.rateLimit.IsActive() {
|
||||||
|
t.Fatalf("expected rate limiter to be replaced with active configuration")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Reload_WorkersMode_Good(t *testing.T) {
|
||||||
|
miner := &Miner{id: 7, user: "wallet-a", rigID: "rig-a", ip: "10.0.0.7"}
|
||||||
|
workers := NewWorkers(WorkersByRigID, nil)
|
||||||
|
workers.OnLogin(Event{Miner: miner})
|
||||||
|
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||||
|
},
|
||||||
|
workers: workers,
|
||||||
|
miners: map[int64]*Miner{miner.id: miner},
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Reload(&Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByUser,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||||
|
})
|
||||||
|
|
||||||
|
if got := p.WorkersMode(); got != WorkersByUser {
|
||||||
|
t.Fatalf("expected proxy workers mode %q, got %q", WorkersByUser, got)
|
||||||
|
}
|
||||||
|
records := p.WorkerRecords()
|
||||||
|
if len(records) != 1 {
|
||||||
|
t.Fatalf("expected one rebuilt worker record, got %d", len(records))
|
||||||
|
}
|
||||||
|
if got := records[0].Name; got != "wallet-a" {
|
||||||
|
t.Fatalf("expected worker record to rebuild using user mode, got %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Reload_CustomDiff_Good(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(minerConn, 3333, nil)
|
||||||
|
miner.state = MinerStateReady
|
||||||
|
miner.globalDiff = 1000
|
||||||
|
miner.customDiff = 1000
|
||||||
|
miner.currentJob = Job{
|
||||||
|
Blob: strings.Repeat("0", 160),
|
||||||
|
JobID: "job-1",
|
||||||
|
Target: "01000000",
|
||||||
|
Algo: "cn/r",
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
CustomDiff: 1000,
|
||||||
|
},
|
||||||
|
customDiff: NewCustomDiff(1000),
|
||||||
|
miners: map[int64]*Miner{miner.ID(): miner},
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan map[string]any, 1)
|
||||||
|
go func() {
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
done <- nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var payload map[string]any
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
done <- nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
done <- payload
|
||||||
|
}()
|
||||||
|
|
||||||
|
p.Reload(&Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
CustomDiff: 5000,
|
||||||
|
})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case payload := <-done:
|
||||||
|
if payload == nil {
|
||||||
|
t.Fatal("expected reload to resend the current job with the new custom diff")
|
||||||
|
}
|
||||||
|
params, ok := payload["params"].(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected job params payload, got %#v", payload["params"])
|
||||||
|
}
|
||||||
|
target, _ := params["target"].(string)
|
||||||
|
if got := (Job{Target: target}).DifficultyFromTarget(); got == 0 || got > 5000 {
|
||||||
|
t.Fatalf("expected resent job difficulty at or below 5000, got %d", got)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("timed out waiting for reload job refresh")
|
||||||
|
}
|
||||||
|
|
||||||
|
if miner.customDiff != 5000 {
|
||||||
|
t.Fatalf("expected active miner custom diff to reload, got %d", miner.customDiff)
|
||||||
|
}
|
||||||
|
if miner.globalDiff != 5000 {
|
||||||
|
t.Fatalf("expected active miner global diff to reload, got %d", miner.globalDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Reload_CustomDiff_Bad(t *testing.T) {
|
||||||
|
miner := &Miner{
|
||||||
|
id: 9,
|
||||||
|
state: MinerStateReady,
|
||||||
|
globalDiff: 1000,
|
||||||
|
customDiff: 7000,
|
||||||
|
customDiffFromLogin: true,
|
||||||
|
currentJob: Job{
|
||||||
|
Blob: strings.Repeat("0", 160),
|
||||||
|
JobID: "job-1",
|
||||||
|
Target: "01000000",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
CustomDiff: 1000,
|
||||||
|
},
|
||||||
|
customDiff: NewCustomDiff(1000),
|
||||||
|
miners: map[int64]*Miner{miner.ID(): miner},
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Reload(&Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
CustomDiff: 5000,
|
||||||
|
})
|
||||||
|
|
||||||
|
if miner.customDiff != 7000 {
|
||||||
|
t.Fatalf("expected login suffix custom diff to be preserved, got %d", miner.customDiff)
|
||||||
|
}
|
||||||
|
if miner.globalDiff != 5000 {
|
||||||
|
t.Fatalf("expected miner global diff to update for future logins, got %d", miner.globalDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Reload_CustomDiff_Ugly(t *testing.T) {
|
||||||
|
miner := &Miner{
|
||||||
|
id: 11,
|
||||||
|
state: MinerStateWaitLogin,
|
||||||
|
globalDiff: 1000,
|
||||||
|
customDiff: 1000,
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
CustomDiff: 1000,
|
||||||
|
},
|
||||||
|
customDiff: NewCustomDiff(1000),
|
||||||
|
miners: map[int64]*Miner{miner.ID(): miner},
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Reload(&Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
CustomDiff: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
if miner.customDiff != 0 {
|
||||||
|
t.Fatalf("expected reload to clear the global custom diff for unauthenticated miners, got %d", miner.customDiff)
|
||||||
|
}
|
||||||
|
if miner.globalDiff != 0 {
|
||||||
|
t.Fatalf("expected miner global diff to be cleared, got %d", miner.globalDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Reload_UpdatesServers(t *testing.T) {
|
||||||
|
originalLimiter := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1})
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{Mode: "nicehash", Workers: WorkersByRigID},
|
||||||
|
rateLimit: originalLimiter,
|
||||||
|
servers: []*Server{
|
||||||
|
{limiter: originalLimiter},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Reload(&Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
RateLimit: RateLimit{MaxConnectionsPerMinute: 10},
|
||||||
|
AccessLogFile: "",
|
||||||
|
})
|
||||||
|
|
||||||
|
if got := p.servers[0].limiter; got != p.rateLimit {
|
||||||
|
t.Fatalf("expected server limiter to be updated")
|
||||||
|
}
|
||||||
|
if p.rateLimit == originalLimiter {
|
||||||
|
t.Fatalf("expected rate limiter instance to be replaced")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Reload_WatchEnabled_Good(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
configPath: "/tmp/proxy.json",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Reload(&Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 4444}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
Watch: true,
|
||||||
|
configPath: "/tmp/ignored.json",
|
||||||
|
})
|
||||||
|
|
||||||
|
if p.watcher == nil {
|
||||||
|
t.Fatalf("expected reload to create a watcher when watch is enabled")
|
||||||
|
}
|
||||||
|
if got := p.watcher.configPath; got != "/tmp/proxy.json" {
|
||||||
|
t.Fatalf("expected watcher to keep the original config path, got %q", got)
|
||||||
|
}
|
||||||
|
p.watcher.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Reload_WatchDisabled_Bad(t *testing.T) {
|
||||||
|
watcher := NewConfigWatcher("/tmp/proxy.json", func(*Config) {})
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
configPath: "/tmp/proxy.json",
|
||||||
|
Watch: true,
|
||||||
|
},
|
||||||
|
watcher: watcher,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Reload(&Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
Watch: false,
|
||||||
|
configPath: "/tmp/ignored.json",
|
||||||
|
})
|
||||||
|
|
||||||
|
if p.watcher != nil {
|
||||||
|
t.Fatalf("expected reload to stop and clear the watcher when watch is disabled")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-watcher.stopCh:
|
||||||
|
default:
|
||||||
|
t.Fatalf("expected existing watcher to be stopped")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Reload_PoolsChanged_ReloadsSplitter_Good(t *testing.T) {
|
||||||
|
splitter := &reloadableSplitter{}
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||||
|
},
|
||||||
|
splitter: splitter,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Reload(&Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool-b.example:3333", Enabled: true}},
|
||||||
|
})
|
||||||
|
|
||||||
|
if splitter.reloads != 1 {
|
||||||
|
t.Fatalf("expected pool reload to reconnect upstreams once, got %d", splitter.reloads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Reload_PoolsUnchanged_DoesNotReloadSplitter_Ugly(t *testing.T) {
|
||||||
|
splitter := &reloadableSplitter{}
|
||||||
|
p := &Proxy{
|
||||||
|
config: &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||||
|
},
|
||||||
|
splitter: splitter,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Reload(&Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
|
||||||
|
})
|
||||||
|
|
||||||
|
if splitter.reloads != 0 {
|
||||||
|
t.Fatalf("expected unchanged pool config to skip reconnect, got %d", splitter.reloads)
|
||||||
|
}
|
||||||
|
}
|
||||||
23
server.go
23
server.go
|
|
@ -7,13 +7,20 @@ import (
|
||||||
|
|
||||||
// Server listens on one BindAddr and creates a Miner for each accepted connection.
|
// Server listens on one BindAddr and creates a Miner for each accepted connection.
|
||||||
//
|
//
|
||||||
// srv, result := proxy.NewServer(bind, tlsCfg, rateLimiter, onAccept)
|
// srv, result := proxy.NewServer(
|
||||||
// srv.Start()
|
// proxy.BindAddr{Host: "0.0.0.0", Port: 3333, TLS: false},
|
||||||
|
// nil,
|
||||||
|
// proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 30}),
|
||||||
|
// func(conn net.Conn, port uint16) { _ = conn; _ = port },
|
||||||
|
// )
|
||||||
|
// if result.OK {
|
||||||
|
// srv.Start()
|
||||||
|
// }
|
||||||
type Server struct {
|
type Server struct {
|
||||||
addr BindAddr
|
addr BindAddr
|
||||||
tlsCfg *tls.Config // nil for plain TCP
|
tlsConfig *tls.Config // nil for plain TCP
|
||||||
limiter *RateLimiter
|
limiter *RateLimiter
|
||||||
onAccept func(net.Conn, uint16)
|
onAccept func(net.Conn, uint16)
|
||||||
listener net.Listener
|
listener net.Listener
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
95
sharelog_impl.go
Normal file
95
sharelog_impl.go
Normal file
|
|
@ -0,0 +1,95 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type shareLogSink struct {
|
||||||
|
path string
|
||||||
|
file *os.File
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newShareLogSink(path string) *shareLogSink {
|
||||||
|
return &shareLogSink{path: path}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *shareLogSink) SetPath(path string) {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.path == path {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.path = path
|
||||||
|
if l.file != nil {
|
||||||
|
_ = l.file.Close()
|
||||||
|
l.file = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *shareLogSink) Close() {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.file != nil {
|
||||||
|
_ = l.file.Close()
|
||||||
|
l.file = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *shareLogSink) OnAccept(e Event) {
|
||||||
|
if l == nil || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.writeLine("ACCEPT", e.Miner.User(), e.Diff, e.Latency, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *shareLogSink) OnReject(e Event) {
|
||||||
|
if l == nil || e.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.writeLine("REJECT", e.Miner.User(), 0, 0, e.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *shareLogSink) writeLine(kind, user string, diff uint64, latency uint16, reason string) {
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if strings.TrimSpace(l.path) == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if l.file == nil {
|
||||||
|
file, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.file = file
|
||||||
|
}
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
|
||||||
|
builder.WriteByte(' ')
|
||||||
|
builder.WriteString(kind)
|
||||||
|
builder.WriteString(" ")
|
||||||
|
builder.WriteString(user)
|
||||||
|
switch kind {
|
||||||
|
case "ACCEPT":
|
||||||
|
builder.WriteString(" diff=")
|
||||||
|
builder.WriteString(formatUint(diff))
|
||||||
|
builder.WriteString(" latency=")
|
||||||
|
builder.WriteString(formatUint(uint64(latency)))
|
||||||
|
builder.WriteString("ms")
|
||||||
|
case "REJECT":
|
||||||
|
builder.WriteString(" reason=\"")
|
||||||
|
builder.WriteString(reason)
|
||||||
|
builder.WriteString("\"")
|
||||||
|
}
|
||||||
|
builder.WriteByte('\n')
|
||||||
|
_, _ = l.file.WriteString(builder.String())
|
||||||
|
}
|
||||||
46
sharelog_test.go
Normal file
46
sharelog_test.go
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProxy_ShareLog_WritesOutcomeLines(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "shares.log")
|
||||||
|
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByRigID,
|
||||||
|
ShareLogFile: path,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
}
|
||||||
|
p, result := New(cfg)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
miner := &Miner{
|
||||||
|
user: "WALLET",
|
||||||
|
conn: noopConn{},
|
||||||
|
state: MinerStateReady,
|
||||||
|
}
|
||||||
|
p.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 1234, Latency: 56})
|
||||||
|
p.events.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Invalid nonce"})
|
||||||
|
p.Stop()
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read share log: %v", err)
|
||||||
|
}
|
||||||
|
text := string(data)
|
||||||
|
if !strings.Contains(text, "ACCEPT WALLET diff=1234 latency=56ms") {
|
||||||
|
t.Fatalf("expected ACCEPT line, got %q", text)
|
||||||
|
}
|
||||||
|
if !strings.Contains(text, "REJECT WALLET reason=\"Invalid nonce\"") {
|
||||||
|
t.Fatalf("expected REJECT line, got %q", text)
|
||||||
|
}
|
||||||
|
}
|
||||||
92
splitter/nicehash/gc_test.go
Normal file
92
splitter/nicehash/gc_test.go
Normal file
|
|
@ -0,0 +1,92 @@
|
||||||
|
package nicehash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type gcStrategy struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
disconnected bool
|
||||||
|
active bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *gcStrategy) Connect() {}
|
||||||
|
|
||||||
|
func (s *gcStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *gcStrategy) Disconnect() {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.disconnected = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *gcStrategy) IsActive() bool {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
return s.active
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonceSplitter_GC_Good(t *testing.T) {
|
||||||
|
strategy := &gcStrategy{active: false}
|
||||||
|
mapper := &NonceMapper{
|
||||||
|
id: 42,
|
||||||
|
storage: NewNonceStorage(),
|
||||||
|
strategy: strategy,
|
||||||
|
lastUsed: time.Now().Add(-2 * time.Minute),
|
||||||
|
pending: make(map[int64]SubmitContext),
|
||||||
|
}
|
||||||
|
mapper.storage.slots[0] = -1
|
||||||
|
|
||||||
|
splitter := &NonceSplitter{
|
||||||
|
mappers: []*NonceMapper{mapper},
|
||||||
|
mapperByID: map[int64]*NonceMapper{mapper.id: mapper},
|
||||||
|
}
|
||||||
|
|
||||||
|
splitter.GC()
|
||||||
|
|
||||||
|
if len(splitter.mappers) != 0 {
|
||||||
|
t.Fatalf("expected idle mapper to be reclaimed, got %d mapper(s)", len(splitter.mappers))
|
||||||
|
}
|
||||||
|
if _, ok := splitter.mapperByID[mapper.id]; ok {
|
||||||
|
t.Fatalf("expected reclaimed mapper to be removed from lookup table")
|
||||||
|
}
|
||||||
|
if !strategy.disconnected {
|
||||||
|
t.Fatalf("expected reclaimed mapper strategy to be disconnected")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonceSplitter_GC_Bad(t *testing.T) {
|
||||||
|
var splitter *NonceSplitter
|
||||||
|
|
||||||
|
splitter.GC()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonceSplitter_GC_Ugly(t *testing.T) {
|
||||||
|
strategy := &gcStrategy{active: true}
|
||||||
|
mapper := &NonceMapper{
|
||||||
|
id: 99,
|
||||||
|
storage: NewNonceStorage(),
|
||||||
|
strategy: strategy,
|
||||||
|
lastUsed: time.Now().Add(-2 * time.Minute),
|
||||||
|
pending: make(map[int64]SubmitContext),
|
||||||
|
}
|
||||||
|
mapper.storage.slots[0] = 7
|
||||||
|
|
||||||
|
splitter := &NonceSplitter{
|
||||||
|
mappers: []*NonceMapper{mapper},
|
||||||
|
mapperByID: map[int64]*NonceMapper{mapper.id: mapper},
|
||||||
|
}
|
||||||
|
|
||||||
|
splitter.GC()
|
||||||
|
|
||||||
|
if len(splitter.mappers) != 1 {
|
||||||
|
t.Fatalf("expected active mapper to remain, got %d mapper(s)", len(splitter.mappers))
|
||||||
|
}
|
||||||
|
if strategy.disconnected {
|
||||||
|
t.Fatalf("expected active mapper to stay connected")
|
||||||
|
}
|
||||||
|
}
|
||||||
511
splitter/nicehash/impl.go
Normal file
511
splitter/nicehash/impl.go
Normal file
|
|
@ -0,0 +1,511 @@
|
||||||
|
package nicehash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
"dappco.re/go/proxy/pool"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proxy.RegisterSplitterFactory("nicehash", func(config *proxy.Config, eventBus *proxy.EventBus) proxy.Splitter {
|
||||||
|
return NewNonceSplitter(config, eventBus, pool.NewStrategyFactory(config))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNonceSplitter creates a NiceHash splitter.
|
||||||
|
func NewNonceSplitter(config *proxy.Config, eventBus *proxy.EventBus, factory pool.StrategyFactory) *NonceSplitter {
|
||||||
|
if factory == nil {
|
||||||
|
factory = pool.NewStrategyFactory(config)
|
||||||
|
}
|
||||||
|
return &NonceSplitter{
|
||||||
|
mapperByID: make(map[int64]*NonceMapper),
|
||||||
|
config: config,
|
||||||
|
events: eventBus,
|
||||||
|
strategyFactory: factory,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect establishes the first mapper.
|
||||||
|
func (s *NonceSplitter) Connect() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if len(s.mappers) == 0 {
|
||||||
|
s.addMapperLocked()
|
||||||
|
}
|
||||||
|
for _, mapper := range s.mappers {
|
||||||
|
mapper.Start()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnLogin assigns the miner to a mapper with a free slot.
|
||||||
|
func (s *NonceSplitter) OnLogin(event *proxy.LoginEvent) {
|
||||||
|
if s == nil || event == nil || event.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
event.Miner.SetExtendedNiceHash(true)
|
||||||
|
for _, mapper := range s.mappers {
|
||||||
|
if mapper.Add(event.Miner) {
|
||||||
|
s.mapperByID[mapper.id] = mapper
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mapper := s.addMapperLocked()
|
||||||
|
if mapper != nil {
|
||||||
|
_ = mapper.Add(event.Miner)
|
||||||
|
s.mapperByID[mapper.id] = mapper
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnSubmit forwards a share to the owning mapper.
|
||||||
|
func (s *NonceSplitter) OnSubmit(event *proxy.SubmitEvent) {
|
||||||
|
if s == nil || event == nil || event.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.RLock()
|
||||||
|
mapper := s.mapperByID[event.Miner.MapperID()]
|
||||||
|
s.mu.RUnlock()
|
||||||
|
if mapper != nil {
|
||||||
|
mapper.Submit(event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnClose releases the miner slot.
|
||||||
|
func (s *NonceSplitter) OnClose(event *proxy.CloseEvent) {
|
||||||
|
if s == nil || event == nil || event.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.RLock()
|
||||||
|
mapper := s.mapperByID[event.Miner.MapperID()]
|
||||||
|
s.mu.RUnlock()
|
||||||
|
if mapper != nil {
|
||||||
|
mapper.Remove(event.Miner)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GC removes empty mappers that have been idle.
|
||||||
|
func (s *NonceSplitter) GC() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
now := time.Now()
|
||||||
|
next := s.mappers[:0]
|
||||||
|
for _, mapper := range s.mappers {
|
||||||
|
if mapper == nil || mapper.storage == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
free, dead, active := mapper.storage.SlotCount()
|
||||||
|
if active == 0 && now.Sub(mapper.lastUsed) > time.Minute {
|
||||||
|
if mapper.strategy != nil {
|
||||||
|
mapper.strategy.Disconnect()
|
||||||
|
}
|
||||||
|
delete(s.mapperByID, mapper.id)
|
||||||
|
_ = free
|
||||||
|
_ = dead
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
next = append(next, mapper)
|
||||||
|
}
|
||||||
|
s.mappers = next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tick is called once per second.
|
||||||
|
func (s *NonceSplitter) Tick(ticks uint64) {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
strategies := make([]pool.Strategy, 0, len(s.mappers))
|
||||||
|
s.mu.RLock()
|
||||||
|
for _, mapper := range s.mappers {
|
||||||
|
if mapper == nil || mapper.strategy == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
strategies = append(strategies, mapper.strategy)
|
||||||
|
}
|
||||||
|
s.mu.RUnlock()
|
||||||
|
for _, strategy := range strategies {
|
||||||
|
if ticker, ok := strategy.(interface{ Tick(uint64) }); ok {
|
||||||
|
ticker.Tick(ticks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upstreams returns pool connection counts.
|
||||||
|
func (s *NonceSplitter) Upstreams() proxy.UpstreamStats {
|
||||||
|
if s == nil {
|
||||||
|
return proxy.UpstreamStats{}
|
||||||
|
}
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
var stats proxy.UpstreamStats
|
||||||
|
for _, mapper := range s.mappers {
|
||||||
|
if mapper.strategy != nil && mapper.strategy.IsActive() {
|
||||||
|
stats.Active++
|
||||||
|
} else if mapper.suspended > 0 || !mapper.active {
|
||||||
|
stats.Error++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stats.Total = stats.Active + stats.Sleep + stats.Error
|
||||||
|
return stats
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disconnect closes all upstream pool connections and forgets the current mapper set.
|
||||||
|
func (s *NonceSplitter) Disconnect() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
for _, mapper := range s.mappers {
|
||||||
|
if mapper != nil && mapper.strategy != nil {
|
||||||
|
mapper.strategy.Disconnect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.mappers = nil
|
||||||
|
s.mapperByID = make(map[int64]*NonceMapper)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReloadPools reconnects each mapper strategy using the updated pool list.
|
||||||
|
//
|
||||||
|
// s.ReloadPools()
|
||||||
|
func (s *NonceSplitter) ReloadPools() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
strategies := make([]pool.Strategy, 0, len(s.mappers))
|
||||||
|
s.mu.RLock()
|
||||||
|
for _, mapper := range s.mappers {
|
||||||
|
if mapper == nil || mapper.strategy == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
strategies = append(strategies, mapper.strategy)
|
||||||
|
}
|
||||||
|
s.mu.RUnlock()
|
||||||
|
for _, strategy := range strategies {
|
||||||
|
if reloadable, ok := strategy.(pool.ReloadableStrategy); ok {
|
||||||
|
reloadable.ReloadPools()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *NonceSplitter) addMapperLocked() *NonceMapper {
|
||||||
|
id := s.nextMapperID
|
||||||
|
s.nextMapperID++
|
||||||
|
mapper := NewNonceMapper(id, s.config, nil)
|
||||||
|
mapper.events = s.events
|
||||||
|
mapper.lastUsed = time.Now()
|
||||||
|
mapper.strategy = s.strategyFactory(mapper)
|
||||||
|
s.mappers = append(s.mappers, mapper)
|
||||||
|
if s.mapperByID == nil {
|
||||||
|
s.mapperByID = make(map[int64]*NonceMapper)
|
||||||
|
}
|
||||||
|
s.mapperByID[mapper.id] = mapper
|
||||||
|
mapper.Start()
|
||||||
|
return mapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNonceMapper creates a mapper for one upstream connection.
|
||||||
|
func NewNonceMapper(id int64, config *proxy.Config, strategy pool.Strategy) *NonceMapper {
|
||||||
|
return &NonceMapper{
|
||||||
|
id: id,
|
||||||
|
storage: NewNonceStorage(),
|
||||||
|
strategy: strategy,
|
||||||
|
pending: make(map[int64]SubmitContext),
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start connects the mapper's upstream strategy once.
|
||||||
|
func (m *NonceMapper) Start() {
|
||||||
|
if m == nil || m.strategy == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.startOnce.Do(func() {
|
||||||
|
m.lastUsed = time.Now()
|
||||||
|
m.strategy.Connect()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add assigns a miner to a free slot.
|
||||||
|
func (m *NonceMapper) Add(miner *proxy.Miner) bool {
|
||||||
|
if m == nil || miner == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
ok := m.storage.Add(miner)
|
||||||
|
if ok {
|
||||||
|
miner.SetMapperID(m.id)
|
||||||
|
miner.SetExtendedNiceHash(true)
|
||||||
|
m.lastUsed = time.Now()
|
||||||
|
m.storage.mu.Lock()
|
||||||
|
job := m.storage.job
|
||||||
|
m.storage.mu.Unlock()
|
||||||
|
if job.IsValid() {
|
||||||
|
miner.SetCurrentJob(job)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove marks the miner slot as dead.
|
||||||
|
func (m *NonceMapper) Remove(miner *proxy.Miner) {
|
||||||
|
if m == nil || miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.storage.Remove(miner)
|
||||||
|
miner.SetMapperID(-1)
|
||||||
|
m.lastUsed = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Submit forwards the share to the pool.
|
||||||
|
func (m *NonceMapper) Submit(event *proxy.SubmitEvent) {
|
||||||
|
if m == nil || event == nil || event.Miner == nil || m.strategy == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
jobID := event.JobID
|
||||||
|
m.storage.mu.Lock()
|
||||||
|
job := m.storage.job
|
||||||
|
prevJob := m.storage.prevJob
|
||||||
|
m.storage.mu.Unlock()
|
||||||
|
if jobID == "" {
|
||||||
|
jobID = job.JobID
|
||||||
|
}
|
||||||
|
valid := m.storage.IsValidJobID(jobID)
|
||||||
|
if jobID == "" || !valid {
|
||||||
|
m.rejectInvalidJobLocked(event, job)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
submissionJob := job
|
||||||
|
if jobID == prevJob.JobID && prevJob.JobID != "" {
|
||||||
|
submissionJob = prevJob
|
||||||
|
}
|
||||||
|
seq := m.strategy.Submit(jobID, event.Nonce, event.Result, event.Algo)
|
||||||
|
m.pending[seq] = SubmitContext{
|
||||||
|
RequestID: event.RequestID,
|
||||||
|
MinerID: event.Miner.ID(),
|
||||||
|
JobID: jobID,
|
||||||
|
Diff: proxy.EffectiveShareDifficulty(submissionJob, event.Miner),
|
||||||
|
StartedAt: time.Now(),
|
||||||
|
}
|
||||||
|
m.lastUsed = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NonceMapper) rejectInvalidJobLocked(event *proxy.SubmitEvent, job proxy.Job) {
|
||||||
|
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
|
||||||
|
if m.events != nil {
|
||||||
|
jobCopy := job
|
||||||
|
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: event.Miner, Job: &jobCopy, Error: "Invalid job id"})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsActive reports whether the mapper has received a valid job.
|
||||||
|
func (m *NonceMapper) IsActive() bool {
|
||||||
|
if m == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.active
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnJob stores the current pool job and broadcasts it to active miners.
|
||||||
|
func (m *NonceMapper) OnJob(job proxy.Job) {
|
||||||
|
if m == nil || !job.IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.storage.SetJob(job)
|
||||||
|
m.active = true
|
||||||
|
m.suspended = 0
|
||||||
|
m.lastUsed = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnResultAccepted correlates a pool result back to the originating miner.
|
||||||
|
func (m *NonceMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
ctx, ok := m.pending[sequence]
|
||||||
|
if ok {
|
||||||
|
delete(m.pending, sequence)
|
||||||
|
}
|
||||||
|
m.storage.mu.Lock()
|
||||||
|
miner := m.storage.miners[ctx.MinerID]
|
||||||
|
job := m.storage.job
|
||||||
|
prevJob := m.storage.prevJob
|
||||||
|
m.storage.mu.Unlock()
|
||||||
|
job, expired := resolveSubmissionJob(ctx.JobID, job, prevJob)
|
||||||
|
m.mu.Unlock()
|
||||||
|
if !ok || miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
latency := uint16(0)
|
||||||
|
if !ctx.StartedAt.IsZero() {
|
||||||
|
elapsed := time.Since(ctx.StartedAt).Milliseconds()
|
||||||
|
if elapsed > int64(^uint16(0)) {
|
||||||
|
latency = ^uint16(0)
|
||||||
|
} else {
|
||||||
|
latency = uint16(elapsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if accepted {
|
||||||
|
miner.Success(ctx.RequestID, "OK")
|
||||||
|
if m.events != nil {
|
||||||
|
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Job: &job, Diff: ctx.Diff, Latency: latency, Expired: expired})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
miner.ReplyWithError(ctx.RequestID, errorMessage)
|
||||||
|
if m.events != nil {
|
||||||
|
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Job: &job, Diff: ctx.Diff, Error: errorMessage, Latency: latency})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveSubmissionJob(jobID string, currentJob, previousJob proxy.Job) (proxy.Job, bool) {
|
||||||
|
if jobID != "" && jobID == previousJob.JobID && jobID != currentJob.JobID {
|
||||||
|
return previousJob, true
|
||||||
|
}
|
||||||
|
return currentJob, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *NonceMapper) OnDisconnect() {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.active = false
|
||||||
|
m.suspended++
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNonceStorage creates a 256-slot table ready for round-robin miner allocation.
|
||||||
|
//
|
||||||
|
// storage := nicehash.NewNonceStorage()
|
||||||
|
func NewNonceStorage() *NonceStorage {
|
||||||
|
return &NonceStorage{miners: make(map[int64]*proxy.Miner)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add assigns the next free slot, such as 0x2a, to one miner.
|
||||||
|
//
|
||||||
|
// ok := storage.Add(&proxy.Miner{})
|
||||||
|
func (s *NonceStorage) Add(miner *proxy.Miner) bool {
|
||||||
|
if s == nil || miner == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
index := (s.cursor + i) % 256
|
||||||
|
if s.slots[index] != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s.slots[index] = miner.ID()
|
||||||
|
s.miners[miner.ID()] = miner
|
||||||
|
miner.SetFixedByte(uint8(index))
|
||||||
|
s.cursor = (index + 1) % 256
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove marks one miner's slot as dead until the next SetJob call.
|
||||||
|
//
|
||||||
|
// storage.Remove(miner)
|
||||||
|
func (s *NonceStorage) Remove(miner *proxy.Miner) {
|
||||||
|
if s == nil || miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
index := int(miner.FixedByte())
|
||||||
|
if index >= 0 && index < len(s.slots) && s.slots[index] == miner.ID() {
|
||||||
|
s.slots[index] = -miner.ID()
|
||||||
|
}
|
||||||
|
delete(s.miners, miner.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetJob broadcasts one pool job to all active miners and clears dead slots.
|
||||||
|
//
|
||||||
|
// storage.SetJob(proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-1"})
|
||||||
|
func (s *NonceStorage) SetJob(job proxy.Job) {
|
||||||
|
if s == nil || !job.IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
s.prevJob = s.job
|
||||||
|
if s.prevJob.ClientID != job.ClientID {
|
||||||
|
s.prevJob = proxy.Job{}
|
||||||
|
}
|
||||||
|
s.job = job
|
||||||
|
for i := range s.slots {
|
||||||
|
if s.slots[i] < 0 {
|
||||||
|
s.slots[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
miners := make([]*proxy.Miner, 0, len(s.miners))
|
||||||
|
for _, miner := range s.miners {
|
||||||
|
miners = append(miners, miner)
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
for _, miner := range miners {
|
||||||
|
miner.ForwardJob(job, job.Algo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValidJobID accepts the current job, or the immediately previous one after a pool roll.
|
||||||
|
//
|
||||||
|
// if !storage.IsValidJobID("job-1") { return }
|
||||||
|
func (s *NonceStorage) IsValidJobID(id string) bool {
|
||||||
|
if s == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if id == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if id == s.job.JobID {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if id == s.prevJob.JobID && s.prevJob.JobID != "" {
|
||||||
|
s.expired++
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// SlotCount returns free, dead, and active slot counts such as 254, 1, 1.
|
||||||
|
//
|
||||||
|
// free, dead, active := storage.SlotCount()
|
||||||
|
func (s *NonceStorage) SlotCount() (free, dead, active int) {
|
||||||
|
if s == nil {
|
||||||
|
return 0, 0, 0
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
for _, slot := range s.slots {
|
||||||
|
switch {
|
||||||
|
case slot == 0:
|
||||||
|
free++
|
||||||
|
case slot < 0:
|
||||||
|
dead++
|
||||||
|
default:
|
||||||
|
active++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
@ -2,9 +2,10 @@ package nicehash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"dappco.re/go/core/proxy"
|
"dappco.re/go/proxy"
|
||||||
"dappco.re/go/core/proxy/pool"
|
"dappco.re/go/proxy/pool"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NonceMapper manages one outbound pool connection and the 256-slot NonceStorage.
|
// NonceMapper manages one outbound pool connection and the 256-slot NonceStorage.
|
||||||
|
|
@ -15,11 +16,14 @@ import (
|
||||||
type NonceMapper struct {
|
type NonceMapper struct {
|
||||||
id int64
|
id int64
|
||||||
storage *NonceStorage
|
storage *NonceStorage
|
||||||
strategy pool.Strategy // manages pool client lifecycle and failover
|
strategy pool.Strategy // manages pool client lifecycle and failover
|
||||||
pending map[int64]SubmitContext // sequence → {requestID, minerID}
|
pending map[int64]SubmitContext // sequence → {requestID, minerID}
|
||||||
cfg *proxy.Config
|
config *proxy.Config
|
||||||
|
events *proxy.EventBus
|
||||||
active bool // true once pool has sent at least one job
|
active bool // true once pool has sent at least one job
|
||||||
suspended int // > 0 when pool connection is in error/reconnecting
|
suspended int // > 0 when pool connection is in error/reconnecting
|
||||||
|
lastUsed time.Time
|
||||||
|
startOnce sync.Once
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -29,4 +33,7 @@ type NonceMapper struct {
|
||||||
type SubmitContext struct {
|
type SubmitContext struct {
|
||||||
RequestID int64 // JSON-RPC id from the miner's submit request
|
RequestID int64 // JSON-RPC id from the miner's submit request
|
||||||
MinerID int64 // miner that submitted
|
MinerID int64 // miner that submitted
|
||||||
|
JobID string
|
||||||
|
Diff uint64
|
||||||
|
StartedAt time.Time
|
||||||
}
|
}
|
||||||
|
|
|
||||||
243
splitter/nicehash/mapper_start_test.go
Normal file
243
splitter/nicehash/mapper_start_test.go
Normal file
|
|
@ -0,0 +1,243 @@
|
||||||
|
package nicehash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
type startCountingStrategy struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
connect int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *startCountingStrategy) Connect() {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.connect++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *startCountingStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *startCountingStrategy) Disconnect() {}
|
||||||
|
|
||||||
|
func (s *startCountingStrategy) IsActive() bool {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
return s.connect > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type discardConn struct{}
|
||||||
|
|
||||||
|
func (discardConn) Read([]byte) (int, error) { return 0, nil }
|
||||||
|
func (discardConn) Write(p []byte) (int, error) { return len(p), nil }
|
||||||
|
func (discardConn) Close() error { return nil }
|
||||||
|
func (discardConn) LocalAddr() net.Addr { return nil }
|
||||||
|
func (discardConn) RemoteAddr() net.Addr { return nil }
|
||||||
|
func (discardConn) SetDeadline(time.Time) error { return nil }
|
||||||
|
func (discardConn) SetReadDeadline(time.Time) error { return nil }
|
||||||
|
func (discardConn) SetWriteDeadline(time.Time) error { return nil }
|
||||||
|
|
||||||
|
func TestMapper_Start_Good(t *testing.T) {
|
||||||
|
strategy := &startCountingStrategy{}
|
||||||
|
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
|
||||||
|
|
||||||
|
mapper.Start()
|
||||||
|
|
||||||
|
if strategy.connect != 1 {
|
||||||
|
t.Fatalf("expected one connect call, got %d", strategy.connect)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMapper_Start_Bad(t *testing.T) {
|
||||||
|
mapper := NewNonceMapper(1, &proxy.Config{}, nil)
|
||||||
|
|
||||||
|
mapper.Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMapper_Start_Ugly(t *testing.T) {
|
||||||
|
strategy := &startCountingStrategy{}
|
||||||
|
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
|
||||||
|
|
||||||
|
mapper.Start()
|
||||||
|
mapper.Start()
|
||||||
|
|
||||||
|
if strategy.connect != 1 {
|
||||||
|
t.Fatalf("expected Start to be idempotent, got %d connect calls", strategy.connect)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMapper_Submit_InvalidJob_Good(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := proxy.NewMiner(minerConn, 3333, nil)
|
||||||
|
miner.SetID(7)
|
||||||
|
strategy := &startCountingStrategy{}
|
||||||
|
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
|
||||||
|
mapper.storage.job = proxy.Job{JobID: "job-1", Blob: "blob", Target: "b88d0600"}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
mapper.Submit(&proxy.SubmitEvent{
|
||||||
|
Miner: miner,
|
||||||
|
JobID: "job-missing",
|
||||||
|
Nonce: "deadbeef",
|
||||||
|
Result: "hash",
|
||||||
|
RequestID: 42,
|
||||||
|
})
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read error reply: %v", err)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
|
||||||
|
var payload struct {
|
||||||
|
ID float64 `json:"id"`
|
||||||
|
Error struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
} `json:"error"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal error reply: %v", err)
|
||||||
|
}
|
||||||
|
if payload.ID != 42 {
|
||||||
|
t.Fatalf("expected request id 42, got %v", payload.ID)
|
||||||
|
}
|
||||||
|
if payload.Error.Message != "Invalid job id" {
|
||||||
|
t.Fatalf("expected invalid job error, got %q", payload.Error.Message)
|
||||||
|
}
|
||||||
|
if len(mapper.pending) != 0 {
|
||||||
|
t.Fatalf("expected invalid submit not to create a pending entry")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMapper_OnResultAccepted_ExpiredUsesPreviousJob(t *testing.T) {
|
||||||
|
bus := proxy.NewEventBus()
|
||||||
|
events := make(chan proxy.Event, 1)
|
||||||
|
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
|
||||||
|
events <- e
|
||||||
|
})
|
||||||
|
|
||||||
|
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||||
|
miner.SetID(7)
|
||||||
|
mapper := NewNonceMapper(1, &proxy.Config{}, &startCountingStrategy{})
|
||||||
|
mapper.events = bus
|
||||||
|
mapper.storage.job = proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"}
|
||||||
|
mapper.storage.prevJob = proxy.Job{JobID: "job-old", Blob: "blob-old", Target: "b88d0600"}
|
||||||
|
mapper.storage.miners[miner.ID()] = miner
|
||||||
|
if !mapper.storage.IsValidJobID("job-old") {
|
||||||
|
t.Fatal("expected previous job to validate before result handling")
|
||||||
|
}
|
||||||
|
mapper.pending[9] = SubmitContext{
|
||||||
|
RequestID: 42,
|
||||||
|
MinerID: miner.ID(),
|
||||||
|
JobID: "job-old",
|
||||||
|
StartedAt: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper.OnResultAccepted(9, true, "")
|
||||||
|
|
||||||
|
if got := mapper.storage.expired; got != 1 {
|
||||||
|
t.Fatalf("expected one expired validation, got %d", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case event := <-events:
|
||||||
|
if !event.Expired {
|
||||||
|
t.Fatalf("expected expired share to be flagged")
|
||||||
|
}
|
||||||
|
if event.Job == nil || event.Job.JobID != "job-old" {
|
||||||
|
t.Fatalf("expected previous job to be attached, got %+v", event.Job)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("expected accept event")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMapper_Submit_ExpiredJobUsesPreviousDifficulty(t *testing.T) {
|
||||||
|
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||||
|
miner.SetID(9)
|
||||||
|
|
||||||
|
strategy := &submitCaptureStrategy{}
|
||||||
|
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
|
||||||
|
mapper.storage.job = proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "ffffffff"}
|
||||||
|
mapper.storage.prevJob = proxy.Job{JobID: "job-old", Blob: "blob-old", Target: "b88d0600"}
|
||||||
|
mapper.storage.miners[miner.ID()] = miner
|
||||||
|
|
||||||
|
mapper.Submit(&proxy.SubmitEvent{
|
||||||
|
Miner: miner,
|
||||||
|
JobID: "job-old",
|
||||||
|
Nonce: "deadbeef",
|
||||||
|
Result: "hash",
|
||||||
|
RequestID: 88,
|
||||||
|
})
|
||||||
|
|
||||||
|
ctx, ok := mapper.pending[strategy.seq]
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("expected pending submit context for expired job")
|
||||||
|
}
|
||||||
|
want := mapper.storage.prevJob.DifficultyFromTarget()
|
||||||
|
if ctx.Diff != want {
|
||||||
|
t.Fatalf("expected previous-job difficulty %d, got %d", want, ctx.Diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type submitCaptureStrategy struct {
|
||||||
|
seq int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *submitCaptureStrategy) Connect() {}
|
||||||
|
|
||||||
|
func (s *submitCaptureStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||||
|
s.seq++
|
||||||
|
return s.seq
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *submitCaptureStrategy) Disconnect() {}
|
||||||
|
|
||||||
|
func (s *submitCaptureStrategy) IsActive() bool { return true }
|
||||||
|
|
||||||
|
func TestMapper_OnResultAccepted_CustomDiffUsesEffectiveDifficulty(t *testing.T) {
|
||||||
|
bus := proxy.NewEventBus()
|
||||||
|
events := make(chan proxy.Event, 1)
|
||||||
|
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
|
||||||
|
events <- e
|
||||||
|
})
|
||||||
|
|
||||||
|
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||||
|
miner.SetID(8)
|
||||||
|
mapper := NewNonceMapper(1, &proxy.Config{}, &startCountingStrategy{})
|
||||||
|
mapper.events = bus
|
||||||
|
mapper.storage.job = proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"}
|
||||||
|
mapper.storage.miners[miner.ID()] = miner
|
||||||
|
mapper.pending[10] = SubmitContext{
|
||||||
|
RequestID: 77,
|
||||||
|
MinerID: miner.ID(),
|
||||||
|
JobID: "job-new",
|
||||||
|
Diff: 25000,
|
||||||
|
StartedAt: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper.OnResultAccepted(10, true, "")
|
||||||
|
|
||||||
|
select {
|
||||||
|
case event := <-events:
|
||||||
|
if event.Diff != 25000 {
|
||||||
|
t.Fatalf("expected effective difficulty 25000, got %d", event.Diff)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("expected accept event")
|
||||||
|
}
|
||||||
|
}
|
||||||
67
splitter/nicehash/reload_test.go
Normal file
67
splitter/nicehash/reload_test.go
Normal file
|
|
@ -0,0 +1,67 @@
|
||||||
|
package nicehash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
"dappco.re/go/proxy/pool"
|
||||||
|
)
|
||||||
|
|
||||||
|
type reloadableStrategy struct {
|
||||||
|
reloads int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *reloadableStrategy) Connect() {}
|
||||||
|
func (s *reloadableStrategy) Submit(jobID, nonce, result, algo string) int64 { return 0 }
|
||||||
|
func (s *reloadableStrategy) Disconnect() {}
|
||||||
|
func (s *reloadableStrategy) IsActive() bool { return true }
|
||||||
|
func (s *reloadableStrategy) ReloadPools() { s.reloads++ }
|
||||||
|
|
||||||
|
var _ pool.ReloadableStrategy = (*reloadableStrategy)(nil)
|
||||||
|
|
||||||
|
func TestNonceSplitter_ReloadPools_Good(t *testing.T) {
|
||||||
|
strategy := &reloadableStrategy{}
|
||||||
|
splitter := &NonceSplitter{
|
||||||
|
mappers: []*NonceMapper{
|
||||||
|
{strategy: strategy},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
splitter.ReloadPools()
|
||||||
|
|
||||||
|
if strategy.reloads != 1 {
|
||||||
|
t.Fatalf("expected mapper strategy to reload once, got %d", strategy.reloads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonceSplitter_ReloadPools_Bad(t *testing.T) {
|
||||||
|
splitter := &NonceSplitter{
|
||||||
|
mappers: []*NonceMapper{
|
||||||
|
{strategy: nil},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
splitter.ReloadPools()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonceSplitter_ReloadPools_Ugly(t *testing.T) {
|
||||||
|
splitter := NewNonceSplitter(&proxy.Config{}, proxy.NewEventBus(), func(listener pool.StratumListener) pool.Strategy {
|
||||||
|
return &reloadableStrategy{}
|
||||||
|
})
|
||||||
|
splitter.mappers = []*NonceMapper{
|
||||||
|
{strategy: &reloadableStrategy{}},
|
||||||
|
{strategy: &reloadableStrategy{}},
|
||||||
|
}
|
||||||
|
|
||||||
|
splitter.ReloadPools()
|
||||||
|
|
||||||
|
for index, mapper := range splitter.mappers {
|
||||||
|
strategy, ok := mapper.strategy.(*reloadableStrategy)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected reloadable strategy at mapper %d", index)
|
||||||
|
}
|
||||||
|
if strategy.reloads != 1 {
|
||||||
|
t.Fatalf("expected mapper %d to reload once, got %d", index, strategy.reloads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -12,8 +12,8 @@ package nicehash
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"dappco.re/go/core/proxy"
|
"dappco.re/go/proxy"
|
||||||
"dappco.re/go/core/proxy/pool"
|
"dappco.re/go/proxy/pool"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NonceSplitter is the Splitter implementation for NiceHash mode.
|
// NonceSplitter is the Splitter implementation for NiceHash mode.
|
||||||
|
|
@ -23,8 +23,10 @@ import (
|
||||||
// s.Connect()
|
// s.Connect()
|
||||||
type NonceSplitter struct {
|
type NonceSplitter struct {
|
||||||
mappers []*NonceMapper
|
mappers []*NonceMapper
|
||||||
cfg *proxy.Config
|
mapperByID map[int64]*NonceMapper
|
||||||
|
config *proxy.Config
|
||||||
events *proxy.EventBus
|
events *proxy.EventBus
|
||||||
strategyFactory pool.StrategyFactory
|
strategyFactory pool.StrategyFactory
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
nextMapperID int64
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ package nicehash
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"dappco.re/go/core/proxy"
|
"dappco.re/go/proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NonceStorage is the 256-slot fixed-byte allocation table for one NonceMapper.
|
// NonceStorage is the 256-slot fixed-byte allocation table for one NonceMapper.
|
||||||
|
|
@ -20,6 +20,7 @@ type NonceStorage struct {
|
||||||
miners map[int64]*proxy.Miner // minerID → Miner pointer for active miners
|
miners map[int64]*proxy.Miner // minerID → Miner pointer for active miners
|
||||||
job proxy.Job // current job from pool
|
job proxy.Job // current job from pool
|
||||||
prevJob proxy.Job // previous job (for stale submit validation)
|
prevJob proxy.Job // previous job (for stale submit validation)
|
||||||
cursor int // search starts here (round-robin allocation)
|
expired uint64
|
||||||
|
cursor int // search starts here (round-robin allocation)
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
|
||||||
160
splitter/nicehash/storage_test.go
Normal file
160
splitter/nicehash/storage_test.go
Normal file
|
|
@ -0,0 +1,160 @@
|
||||||
|
package nicehash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestStorage_Add_Good verifies 256 sequential Add calls fill all slots with unique FixedByte values.
|
||||||
|
//
|
||||||
|
// storage := nicehash.NewNonceStorage()
|
||||||
|
// for i := 0; i < 256; i++ {
|
||||||
|
// m := &proxy.Miner{}
|
||||||
|
// m.SetID(int64(i + 1))
|
||||||
|
// ok := storage.Add(m) // true for all 256
|
||||||
|
// }
|
||||||
|
func TestStorage_Add_Good(t *testing.T) {
|
||||||
|
storage := NewNonceStorage()
|
||||||
|
seen := make(map[uint8]bool)
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
m := &proxy.Miner{}
|
||||||
|
m.SetID(int64(i + 1))
|
||||||
|
ok := storage.Add(m)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected add %d to succeed", i)
|
||||||
|
}
|
||||||
|
if seen[m.FixedByte()] {
|
||||||
|
t.Fatalf("duplicate fixed byte %d at add %d", m.FixedByte(), i)
|
||||||
|
}
|
||||||
|
seen[m.FixedByte()] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStorage_Add_Bad verifies the 257th Add returns false when all 256 slots are occupied.
|
||||||
|
//
|
||||||
|
// storage := nicehash.NewNonceStorage()
|
||||||
|
// // fill 256 slots...
|
||||||
|
// ok := storage.Add(overflowMiner) // false — table is full
|
||||||
|
func TestStorage_Add_Bad(t *testing.T) {
|
||||||
|
storage := NewNonceStorage()
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
m := &proxy.Miner{}
|
||||||
|
m.SetID(int64(i + 1))
|
||||||
|
storage.Add(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
overflow := &proxy.Miner{}
|
||||||
|
overflow.SetID(257)
|
||||||
|
if storage.Add(overflow) {
|
||||||
|
t.Fatalf("expected 257th add to fail when table is full")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStorage_Add_Ugly verifies that a removed slot (dead) is reclaimed after SetJob clears it.
|
||||||
|
//
|
||||||
|
// storage := nicehash.NewNonceStorage()
|
||||||
|
// storage.Add(miner)
|
||||||
|
// storage.Remove(miner) // slot becomes dead (-minerID)
|
||||||
|
// storage.SetJob(job) // dead slots cleared to 0
|
||||||
|
// storage.Add(newMiner) // reclaimed slot succeeds
|
||||||
|
func TestStorage_Add_Ugly(t *testing.T) {
|
||||||
|
storage := NewNonceStorage()
|
||||||
|
miner := &proxy.Miner{}
|
||||||
|
miner.SetID(1)
|
||||||
|
|
||||||
|
if !storage.Add(miner) {
|
||||||
|
t.Fatalf("expected first add to succeed")
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.Remove(miner)
|
||||||
|
free, dead, active := storage.SlotCount()
|
||||||
|
if dead != 1 || active != 0 {
|
||||||
|
t.Fatalf("expected 1 dead slot, got free=%d dead=%d active=%d", free, dead, active)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetJob clears dead slots
|
||||||
|
storage.SetJob(proxy.Job{Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", JobID: "job-1"})
|
||||||
|
free, dead, active = storage.SlotCount()
|
||||||
|
if dead != 0 {
|
||||||
|
t.Fatalf("expected dead slots cleared after SetJob, got %d", dead)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reclaim the slot
|
||||||
|
newMiner := &proxy.Miner{}
|
||||||
|
newMiner.SetID(2)
|
||||||
|
if !storage.Add(newMiner) {
|
||||||
|
t.Fatalf("expected reclaimed slot add to succeed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStorage_IsValidJobID_Good verifies the current job ID is accepted.
|
||||||
|
//
|
||||||
|
// storage := nicehash.NewNonceStorage()
|
||||||
|
// storage.SetJob(proxy.Job{JobID: "job-2", Blob: "..."})
|
||||||
|
// storage.IsValidJobID("job-2") // true
|
||||||
|
func TestStorage_IsValidJobID_Good(t *testing.T) {
|
||||||
|
storage := NewNonceStorage()
|
||||||
|
storage.SetJob(proxy.Job{
|
||||||
|
JobID: "job-1",
|
||||||
|
Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
})
|
||||||
|
|
||||||
|
if !storage.IsValidJobID("job-1") {
|
||||||
|
t.Fatalf("expected current job to be valid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStorage_IsValidJobID_Bad verifies an unknown job ID is rejected.
|
||||||
|
//
|
||||||
|
// storage := nicehash.NewNonceStorage()
|
||||||
|
// storage.IsValidJobID("nonexistent") // false
|
||||||
|
func TestStorage_IsValidJobID_Bad(t *testing.T) {
|
||||||
|
storage := NewNonceStorage()
|
||||||
|
storage.SetJob(proxy.Job{
|
||||||
|
JobID: "job-1",
|
||||||
|
Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
})
|
||||||
|
|
||||||
|
if storage.IsValidJobID("nonexistent") {
|
||||||
|
t.Fatalf("expected unknown job id to be invalid")
|
||||||
|
}
|
||||||
|
if storage.IsValidJobID("") {
|
||||||
|
t.Fatalf("expected empty job id to be invalid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStorage_IsValidJobID_Ugly verifies the previous job ID is accepted but counts as expired.
|
||||||
|
//
|
||||||
|
// storage := nicehash.NewNonceStorage()
|
||||||
|
// // job-1 is current, job-2 pushes job-1 to previous
|
||||||
|
// storage.IsValidJobID("job-1") // true (but expired counter increments)
|
||||||
|
func TestStorage_IsValidJobID_Ugly(t *testing.T) {
|
||||||
|
storage := NewNonceStorage()
|
||||||
|
blob160 := "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||||
|
|
||||||
|
storage.SetJob(proxy.Job{JobID: "job-1", Blob: blob160, ClientID: "session-1"})
|
||||||
|
storage.SetJob(proxy.Job{JobID: "job-2", Blob: blob160, ClientID: "session-1"})
|
||||||
|
|
||||||
|
if !storage.IsValidJobID("job-2") {
|
||||||
|
t.Fatalf("expected current job to be valid")
|
||||||
|
}
|
||||||
|
if !storage.IsValidJobID("job-1") {
|
||||||
|
t.Fatalf("expected previous job to remain valid")
|
||||||
|
}
|
||||||
|
if storage.expired != 1 {
|
||||||
|
t.Fatalf("expected one expired job validation, got %d", storage.expired)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStorage_SlotCount_Good verifies free/dead/active counts on a fresh storage.
|
||||||
|
//
|
||||||
|
// storage := nicehash.NewNonceStorage()
|
||||||
|
// free, dead, active := storage.SlotCount() // 256, 0, 0
|
||||||
|
func TestStorage_SlotCount_Good(t *testing.T) {
|
||||||
|
storage := NewNonceStorage()
|
||||||
|
free, dead, active := storage.SlotCount()
|
||||||
|
if free != 256 || dead != 0 || active != 0 {
|
||||||
|
t.Fatalf("expected 256/0/0, got free=%d dead=%d active=%d", free, dead, active)
|
||||||
|
}
|
||||||
|
}
|
||||||
69
splitter/nicehash/upstreams_test.go
Normal file
69
splitter/nicehash/upstreams_test.go
Normal file
|
|
@ -0,0 +1,69 @@
|
||||||
|
package nicehash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
type upstreamStateStrategy struct {
|
||||||
|
active bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *upstreamStateStrategy) Connect() {}
|
||||||
|
|
||||||
|
func (s *upstreamStateStrategy) Submit(jobID, nonce, result, algo string) int64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *upstreamStateStrategy) Disconnect() {}
|
||||||
|
|
||||||
|
func (s *upstreamStateStrategy) IsActive() bool { return s.active }
|
||||||
|
|
||||||
|
func TestNonceSplitter_Upstreams_Good(t *testing.T) {
|
||||||
|
splitter := &NonceSplitter{
|
||||||
|
mappers: []*NonceMapper{
|
||||||
|
{strategy: &upstreamStateStrategy{active: true}, active: true},
|
||||||
|
{strategy: &upstreamStateStrategy{active: false}, active: false, suspended: 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := splitter.Upstreams()
|
||||||
|
|
||||||
|
if stats.Active != 1 {
|
||||||
|
t.Fatalf("expected one active upstream, got %d", stats.Active)
|
||||||
|
}
|
||||||
|
if stats.Error != 1 {
|
||||||
|
t.Fatalf("expected one error upstream, got %d", stats.Error)
|
||||||
|
}
|
||||||
|
if stats.Total != 2 {
|
||||||
|
t.Fatalf("expected total to equal active + sleep + error, got %d", stats.Total)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonceSplitter_Upstreams_Bad(t *testing.T) {
|
||||||
|
var splitter *NonceSplitter
|
||||||
|
|
||||||
|
stats := splitter.Upstreams()
|
||||||
|
|
||||||
|
if stats != (proxy.UpstreamStats{}) {
|
||||||
|
t.Fatalf("expected zero-value stats for nil splitter, got %+v", stats)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonceSplitter_Upstreams_Ugly(t *testing.T) {
|
||||||
|
splitter := &NonceSplitter{
|
||||||
|
mappers: []*NonceMapper{
|
||||||
|
{strategy: &upstreamStateStrategy{active: false}, active: false},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := splitter.Upstreams()
|
||||||
|
|
||||||
|
if stats.Error != 1 {
|
||||||
|
t.Fatalf("expected an unready mapper to be counted as error, got %+v", stats)
|
||||||
|
}
|
||||||
|
if stats.Total != 1 {
|
||||||
|
t.Fatalf("expected total to remain internally consistent, got %+v", stats)
|
||||||
|
}
|
||||||
|
}
|
||||||
383
splitter/simple/impl.go
Normal file
383
splitter/simple/impl.go
Normal file
|
|
@ -0,0 +1,383 @@
|
||||||
|
package simple
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
"dappco.re/go/proxy/pool"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proxy.RegisterSplitterFactory("simple", func(config *proxy.Config, eventBus *proxy.EventBus) proxy.Splitter {
|
||||||
|
return NewSimpleSplitter(config, eventBus, pool.NewStrategyFactory(config))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSimpleSplitter creates the passthrough splitter.
|
||||||
|
func NewSimpleSplitter(config *proxy.Config, eventBus *proxy.EventBus, factory pool.StrategyFactory) *SimpleSplitter {
|
||||||
|
if factory == nil {
|
||||||
|
factory = pool.NewStrategyFactory(config)
|
||||||
|
}
|
||||||
|
return &SimpleSplitter{
|
||||||
|
active: make(map[int64]*SimpleMapper),
|
||||||
|
idle: make(map[int64]*SimpleMapper),
|
||||||
|
config: config,
|
||||||
|
events: eventBus,
|
||||||
|
factory: factory,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect establishes any mapper strategies that already exist.
|
||||||
|
func (s *SimpleSplitter) Connect() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
for _, mapper := range s.active {
|
||||||
|
if mapper.strategy != nil {
|
||||||
|
mapper.strategy.Connect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, mapper := range s.idle {
|
||||||
|
if mapper.strategy != nil {
|
||||||
|
mapper.strategy.Connect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnLogin creates or reclaims a mapper.
|
||||||
|
func (s *SimpleSplitter) OnLogin(event *proxy.LoginEvent) {
|
||||||
|
if s == nil || event == nil || event.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
if s.config.ReuseTimeout > 0 {
|
||||||
|
for id, mapper := range s.idle {
|
||||||
|
if mapper.strategy != nil && mapper.strategy.IsActive() && !mapper.idleAt.IsZero() && now.Sub(mapper.idleAt) <= time.Duration(s.config.ReuseTimeout)*time.Second {
|
||||||
|
delete(s.idle, id)
|
||||||
|
mapper.miner = event.Miner
|
||||||
|
mapper.idleAt = time.Time{}
|
||||||
|
mapper.stopped = false
|
||||||
|
s.active[event.Miner.ID()] = mapper
|
||||||
|
event.Miner.SetRouteID(mapper.id)
|
||||||
|
if mapper.currentJob.IsValid() {
|
||||||
|
event.Miner.SetCurrentJob(mapper.currentJob)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper := s.newMapperLocked()
|
||||||
|
mapper.miner = event.Miner
|
||||||
|
s.active[event.Miner.ID()] = mapper
|
||||||
|
event.Miner.SetRouteID(mapper.id)
|
||||||
|
if mapper.strategy != nil {
|
||||||
|
mapper.strategy.Connect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnSubmit forwards the share to the owning mapper.
|
||||||
|
func (s *SimpleSplitter) OnSubmit(event *proxy.SubmitEvent) {
|
||||||
|
if s == nil || event == nil || event.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
mapper := s.activeMapperByRouteIDLocked(event.Miner.RouteID())
|
||||||
|
s.mu.Unlock()
|
||||||
|
if mapper != nil {
|
||||||
|
mapper.Submit(event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnClose moves a mapper to the idle pool or stops it.
|
||||||
|
func (s *SimpleSplitter) OnClose(event *proxy.CloseEvent) {
|
||||||
|
if s == nil || event == nil || event.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
mapper := s.active[event.Miner.ID()]
|
||||||
|
if mapper == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delete(s.active, event.Miner.ID())
|
||||||
|
mapper.miner = nil
|
||||||
|
mapper.idleAt = time.Now()
|
||||||
|
event.Miner.SetRouteID(-1)
|
||||||
|
if s.config.ReuseTimeout > 0 {
|
||||||
|
s.idle[mapper.id] = mapper
|
||||||
|
return
|
||||||
|
}
|
||||||
|
mapper.stopped = true
|
||||||
|
if mapper.strategy != nil {
|
||||||
|
mapper.strategy.Disconnect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GC removes expired idle mappers.
|
||||||
|
func (s *SimpleSplitter) GC() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
now := time.Now()
|
||||||
|
for id, mapper := range s.idle {
|
||||||
|
if mapper.stopped || (s.config.ReuseTimeout > 0 && now.Sub(mapper.idleAt) > time.Duration(s.config.ReuseTimeout)*time.Second) {
|
||||||
|
if mapper.strategy != nil {
|
||||||
|
mapper.strategy.Disconnect()
|
||||||
|
}
|
||||||
|
delete(s.idle, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tick advances timeout checks in simple mode.
|
||||||
|
func (s *SimpleSplitter) Tick(ticks uint64) {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
strategies := make([]pool.Strategy, 0, len(s.active)+len(s.idle))
|
||||||
|
s.mu.Lock()
|
||||||
|
for _, mapper := range s.active {
|
||||||
|
if mapper != nil && mapper.strategy != nil {
|
||||||
|
strategies = append(strategies, mapper.strategy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, mapper := range s.idle {
|
||||||
|
if mapper != nil && mapper.strategy != nil {
|
||||||
|
strategies = append(strategies, mapper.strategy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
for _, strategy := range strategies {
|
||||||
|
if ticker, ok := strategy.(interface{ Tick(uint64) }); ok {
|
||||||
|
ticker.Tick(ticks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.GC()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upstreams returns active/idle/error counts.
|
||||||
|
func (s *SimpleSplitter) Upstreams() proxy.UpstreamStats {
|
||||||
|
if s == nil {
|
||||||
|
return proxy.UpstreamStats{}
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
var stats proxy.UpstreamStats
|
||||||
|
for _, mapper := range s.active {
|
||||||
|
if mapper == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if mapper.stopped || mapper.strategy == nil || !mapper.strategy.IsActive() {
|
||||||
|
stats.Error++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
stats.Active++
|
||||||
|
}
|
||||||
|
for _, mapper := range s.idle {
|
||||||
|
if mapper == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if mapper.stopped || mapper.strategy == nil || !mapper.strategy.IsActive() {
|
||||||
|
stats.Error++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
stats.Sleep++
|
||||||
|
}
|
||||||
|
stats.Total = stats.Active + stats.Sleep + stats.Error
|
||||||
|
return stats
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disconnect closes every active or idle upstream connection and clears the mapper tables.
|
||||||
|
func (s *SimpleSplitter) Disconnect() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
for _, mapper := range s.active {
|
||||||
|
if mapper != nil && mapper.strategy != nil {
|
||||||
|
mapper.strategy.Disconnect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, mapper := range s.idle {
|
||||||
|
if mapper != nil && mapper.strategy != nil {
|
||||||
|
mapper.strategy.Disconnect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.active = make(map[int64]*SimpleMapper)
|
||||||
|
s.idle = make(map[int64]*SimpleMapper)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReloadPools reconnects each active or idle mapper using the updated pool list.
|
||||||
|
//
|
||||||
|
// s.ReloadPools()
|
||||||
|
func (s *SimpleSplitter) ReloadPools() {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
strategies := make([]pool.Strategy, 0, len(s.active)+len(s.idle))
|
||||||
|
s.mu.Lock()
|
||||||
|
for _, mapper := range s.active {
|
||||||
|
if mapper == nil || mapper.strategy == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
strategies = append(strategies, mapper.strategy)
|
||||||
|
}
|
||||||
|
for _, mapper := range s.idle {
|
||||||
|
if mapper == nil || mapper.strategy == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
strategies = append(strategies, mapper.strategy)
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
for _, strategy := range strategies {
|
||||||
|
if reloadable, ok := strategy.(pool.ReloadableStrategy); ok {
|
||||||
|
reloadable.ReloadPools()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SimpleSplitter) newMapperLocked() *SimpleMapper {
|
||||||
|
id := s.nextMapperID
|
||||||
|
s.nextMapperID++
|
||||||
|
mapper := NewSimpleMapper(id, nil)
|
||||||
|
mapper.events = s.events
|
||||||
|
mapper.strategy = s.factory(mapper)
|
||||||
|
if mapper.strategy == nil {
|
||||||
|
mapper.strategy = s.factory(mapper)
|
||||||
|
}
|
||||||
|
return mapper
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SimpleSplitter) activeMapperByRouteIDLocked(routeID int64) *SimpleMapper {
|
||||||
|
if s == nil || routeID < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, mapper := range s.active {
|
||||||
|
if mapper != nil && mapper.id == routeID {
|
||||||
|
return mapper
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Submit forwards a share to the pool.
|
||||||
|
func (m *SimpleMapper) Submit(event *proxy.SubmitEvent) {
|
||||||
|
if m == nil || event == nil || m.strategy == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
jobID := event.JobID
|
||||||
|
if jobID == "" {
|
||||||
|
jobID = m.currentJob.JobID
|
||||||
|
}
|
||||||
|
if jobID == "" || (jobID != m.currentJob.JobID && jobID != m.prevJob.JobID) {
|
||||||
|
m.rejectInvalidJobLocked(event, m.currentJob)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
submissionJob := m.currentJob
|
||||||
|
if jobID == m.prevJob.JobID && m.prevJob.JobID != "" {
|
||||||
|
submissionJob = m.prevJob
|
||||||
|
}
|
||||||
|
seq := m.strategy.Submit(jobID, event.Nonce, event.Result, event.Algo)
|
||||||
|
m.pending[seq] = submitContext{
|
||||||
|
RequestID: event.RequestID,
|
||||||
|
Diff: proxy.EffectiveShareDifficulty(submissionJob, event.Miner),
|
||||||
|
StartedAt: time.Now(),
|
||||||
|
JobID: jobID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SimpleMapper) rejectInvalidJobLocked(event *proxy.SubmitEvent, job proxy.Job) {
|
||||||
|
if event == nil || event.Miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
|
||||||
|
if m.events != nil {
|
||||||
|
jobCopy := job
|
||||||
|
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: event.Miner, Job: &jobCopy, Error: "Invalid job id"})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnJob forwards the latest pool job to the active miner.
|
||||||
|
func (m *SimpleMapper) OnJob(job proxy.Job) {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
m.prevJob = m.currentJob
|
||||||
|
if m.prevJob.ClientID != job.ClientID {
|
||||||
|
m.prevJob = proxy.Job{}
|
||||||
|
}
|
||||||
|
m.currentJob = job
|
||||||
|
m.stopped = false
|
||||||
|
m.idleAt = time.Time{}
|
||||||
|
miner := m.miner
|
||||||
|
m.mu.Unlock()
|
||||||
|
if miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
miner.ForwardJob(job, job.Algo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnResultAccepted forwards result status to the miner.
|
||||||
|
func (m *SimpleMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
ctx, ok := m.pending[sequence]
|
||||||
|
if ok {
|
||||||
|
delete(m.pending, sequence)
|
||||||
|
}
|
||||||
|
miner := m.miner
|
||||||
|
currentJob := m.currentJob
|
||||||
|
prevJob := m.prevJob
|
||||||
|
m.mu.Unlock()
|
||||||
|
if !ok || miner == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
latency := uint16(0)
|
||||||
|
if !ctx.StartedAt.IsZero() {
|
||||||
|
elapsed := time.Since(ctx.StartedAt).Milliseconds()
|
||||||
|
if elapsed > int64(^uint16(0)) {
|
||||||
|
latency = ^uint16(0)
|
||||||
|
} else {
|
||||||
|
latency = uint16(elapsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
job := currentJob
|
||||||
|
expired := false
|
||||||
|
if ctx.JobID != "" && ctx.JobID == prevJob.JobID && ctx.JobID != currentJob.JobID {
|
||||||
|
job = prevJob
|
||||||
|
expired = true
|
||||||
|
}
|
||||||
|
if accepted {
|
||||||
|
miner.Success(ctx.RequestID, "OK")
|
||||||
|
if m.events != nil {
|
||||||
|
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Diff: ctx.Diff, Job: &job, Latency: latency, Expired: expired})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
miner.ReplyWithError(ctx.RequestID, errorMessage)
|
||||||
|
if m.events != nil {
|
||||||
|
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Diff: ctx.Diff, Job: &job, Error: errorMessage, Latency: latency})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnDisconnect marks the mapper as disconnected.
|
||||||
|
func (m *SimpleMapper) OnDisconnect() {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.stopped = true
|
||||||
|
}
|
||||||
377
splitter/simple/impl_test.go
Normal file
377
splitter/simple/impl_test.go
Normal file
|
|
@ -0,0 +1,377 @@
|
||||||
|
package simple
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy"
|
||||||
|
"dappco.re/go/proxy/pool"
|
||||||
|
)
|
||||||
|
|
||||||
|
type activeStrategy struct{}
|
||||||
|
|
||||||
|
func (a activeStrategy) Connect() {}
|
||||||
|
func (a activeStrategy) Submit(string, string, string, string) int64 { return 0 }
|
||||||
|
func (a activeStrategy) Disconnect() {}
|
||||||
|
func (a activeStrategy) IsActive() bool { return true }
|
||||||
|
|
||||||
|
type submitRecordingStrategy struct {
|
||||||
|
submits int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *submitRecordingStrategy) Connect() {}
|
||||||
|
|
||||||
|
func (s *submitRecordingStrategy) Submit(string, string, string, string) int64 {
|
||||||
|
s.submits++
|
||||||
|
return int64(s.submits)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *submitRecordingStrategy) Disconnect() {}
|
||||||
|
|
||||||
|
func (s *submitRecordingStrategy) IsActive() bool { return true }
|
||||||
|
|
||||||
|
func TestSimpleMapper_New_Good(t *testing.T) {
|
||||||
|
strategy := activeStrategy{}
|
||||||
|
mapper := NewSimpleMapper(7, strategy)
|
||||||
|
|
||||||
|
if mapper == nil {
|
||||||
|
t.Fatal("expected mapper")
|
||||||
|
}
|
||||||
|
if mapper.id != 7 {
|
||||||
|
t.Fatalf("expected mapper id 7, got %d", mapper.id)
|
||||||
|
}
|
||||||
|
if mapper.strategy != strategy {
|
||||||
|
t.Fatalf("expected strategy to be stored")
|
||||||
|
}
|
||||||
|
if mapper.pending == nil {
|
||||||
|
t.Fatal("expected pending map to be initialised")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleSplitter_OnLogin_Good(t *testing.T) {
|
||||||
|
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||||
|
return activeStrategy{}
|
||||||
|
})
|
||||||
|
miner := &proxy.Miner{}
|
||||||
|
job := proxy.Job{JobID: "job-1", Blob: "blob"}
|
||||||
|
mapper := &SimpleMapper{
|
||||||
|
id: 7,
|
||||||
|
strategy: activeStrategy{},
|
||||||
|
currentJob: job,
|
||||||
|
idleAt: time.Now(),
|
||||||
|
}
|
||||||
|
splitter.idle[mapper.id] = mapper
|
||||||
|
|
||||||
|
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
|
||||||
|
|
||||||
|
if miner.RouteID() != mapper.id {
|
||||||
|
t.Fatalf("expected reclaimed mapper route id %d, got %d", mapper.id, miner.RouteID())
|
||||||
|
}
|
||||||
|
if got := miner.CurrentJob().JobID; got != job.JobID {
|
||||||
|
t.Fatalf("expected current job to be restored on reuse, got %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleSplitter_OnLogin_Ugly(t *testing.T) {
|
||||||
|
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||||
|
return activeStrategy{}
|
||||||
|
})
|
||||||
|
miner := &proxy.Miner{}
|
||||||
|
expired := &SimpleMapper{
|
||||||
|
id: 7,
|
||||||
|
strategy: activeStrategy{},
|
||||||
|
idleAt: time.Now().Add(-time.Minute),
|
||||||
|
}
|
||||||
|
splitter.idle[expired.id] = expired
|
||||||
|
|
||||||
|
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
|
||||||
|
|
||||||
|
if miner.RouteID() == expired.id {
|
||||||
|
t.Fatalf("expected expired mapper not to be reclaimed")
|
||||||
|
}
|
||||||
|
if miner.RouteID() != 0 {
|
||||||
|
t.Fatalf("expected a new mapper to be allocated, got route id %d", miner.RouteID())
|
||||||
|
}
|
||||||
|
if len(splitter.active) != 1 {
|
||||||
|
t.Fatalf("expected one active mapper, got %d", len(splitter.active))
|
||||||
|
}
|
||||||
|
if len(splitter.idle) != 1 {
|
||||||
|
t.Fatalf("expected expired mapper to remain idle until GC, got %d idle mappers", len(splitter.idle))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleSplitter_OnSubmit_UsesRouteID_Good(t *testing.T) {
|
||||||
|
strategy := &submitRecordingStrategy{}
|
||||||
|
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, nil)
|
||||||
|
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||||
|
miner.SetID(21)
|
||||||
|
miner.SetRouteID(7)
|
||||||
|
|
||||||
|
mapper := &SimpleMapper{
|
||||||
|
id: 7,
|
||||||
|
miner: miner,
|
||||||
|
currentJob: proxy.Job{JobID: "job-1", Blob: "blob", Target: "b88d0600"},
|
||||||
|
strategy: strategy,
|
||||||
|
pending: make(map[int64]submitContext),
|
||||||
|
}
|
||||||
|
splitter.active[99] = mapper
|
||||||
|
|
||||||
|
splitter.OnSubmit(&proxy.SubmitEvent{
|
||||||
|
Miner: miner,
|
||||||
|
JobID: "job-1",
|
||||||
|
Nonce: "deadbeef",
|
||||||
|
Result: "hash",
|
||||||
|
RequestID: 11,
|
||||||
|
})
|
||||||
|
|
||||||
|
if strategy.submits != 1 {
|
||||||
|
t.Fatalf("expected one submit routed by route id, got %d", strategy.submits)
|
||||||
|
}
|
||||||
|
if len(mapper.pending) != 1 {
|
||||||
|
t.Fatalf("expected routed submit to create one pending entry, got %d", len(mapper.pending))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleSplitter_Upstreams_Good(t *testing.T) {
|
||||||
|
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||||
|
return activeStrategy{}
|
||||||
|
})
|
||||||
|
splitter.active[1] = &SimpleMapper{id: 1, strategy: activeStrategy{}}
|
||||||
|
splitter.idle[2] = &SimpleMapper{id: 2, strategy: activeStrategy{}, idleAt: time.Now()}
|
||||||
|
|
||||||
|
stats := splitter.Upstreams()
|
||||||
|
|
||||||
|
if stats.Active != 1 {
|
||||||
|
t.Fatalf("expected one active upstream, got %d", stats.Active)
|
||||||
|
}
|
||||||
|
if stats.Sleep != 1 {
|
||||||
|
t.Fatalf("expected one sleeping upstream, got %d", stats.Sleep)
|
||||||
|
}
|
||||||
|
if stats.Error != 0 {
|
||||||
|
t.Fatalf("expected no error upstreams, got %d", stats.Error)
|
||||||
|
}
|
||||||
|
if stats.Total != 2 {
|
||||||
|
t.Fatalf("expected total upstreams to be 2, got %d", stats.Total)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleSplitter_Upstreams_Ugly(t *testing.T) {
|
||||||
|
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||||
|
return activeStrategy{}
|
||||||
|
})
|
||||||
|
splitter.active[1] = &SimpleMapper{id: 1, strategy: activeStrategy{}, stopped: true}
|
||||||
|
splitter.idle[2] = &SimpleMapper{id: 2, strategy: activeStrategy{}, stopped: true, idleAt: time.Now()}
|
||||||
|
|
||||||
|
stats := splitter.Upstreams()
|
||||||
|
|
||||||
|
if stats.Active != 0 {
|
||||||
|
t.Fatalf("expected no active upstreams, got %d", stats.Active)
|
||||||
|
}
|
||||||
|
if stats.Sleep != 0 {
|
||||||
|
t.Fatalf("expected no sleeping upstreams, got %d", stats.Sleep)
|
||||||
|
}
|
||||||
|
if stats.Error != 2 {
|
||||||
|
t.Fatalf("expected both upstreams to be counted as error, got %d", stats.Error)
|
||||||
|
}
|
||||||
|
if stats.Total != 2 {
|
||||||
|
t.Fatalf("expected total upstreams to be 2, got %d", stats.Total)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleSplitter_Upstreams_RecoveryResetsStopped_Good(t *testing.T) {
|
||||||
|
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
|
||||||
|
return activeStrategy{}
|
||||||
|
})
|
||||||
|
mapper := &SimpleMapper{id: 1, strategy: activeStrategy{}, stopped: true}
|
||||||
|
splitter.active[1] = mapper
|
||||||
|
|
||||||
|
before := splitter.Upstreams()
|
||||||
|
if before.Error != 1 {
|
||||||
|
t.Fatalf("expected disconnected mapper to count as error, got %+v", before)
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper.OnJob(proxy.Job{JobID: "job-1", Blob: "blob"})
|
||||||
|
|
||||||
|
after := splitter.Upstreams()
|
||||||
|
if after.Active != 1 {
|
||||||
|
t.Fatalf("expected recovered mapper to count as active, got %+v", after)
|
||||||
|
}
|
||||||
|
if after.Error != 0 {
|
||||||
|
t.Fatalf("expected recovered mapper not to remain in error, got %+v", after)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type discardConn struct{}
|
||||||
|
|
||||||
|
func (discardConn) Read([]byte) (int, error) { return 0, io.EOF }
|
||||||
|
func (discardConn) Write(p []byte) (int, error) { return len(p), nil }
|
||||||
|
func (discardConn) Close() error { return nil }
|
||||||
|
func (discardConn) LocalAddr() net.Addr { return nil }
|
||||||
|
func (discardConn) RemoteAddr() net.Addr { return nil }
|
||||||
|
func (discardConn) SetDeadline(time.Time) error { return nil }
|
||||||
|
func (discardConn) SetReadDeadline(time.Time) error { return nil }
|
||||||
|
func (discardConn) SetWriteDeadline(time.Time) error { return nil }
|
||||||
|
|
||||||
|
func TestSimpleMapper_OnResultAccepted_Expired(t *testing.T) {
|
||||||
|
bus := proxy.NewEventBus()
|
||||||
|
events := make(chan proxy.Event, 1)
|
||||||
|
var once sync.Once
|
||||||
|
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
|
||||||
|
once.Do(func() {
|
||||||
|
events <- e
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||||
|
miner.SetID(1)
|
||||||
|
mapper := &SimpleMapper{
|
||||||
|
miner: miner,
|
||||||
|
currentJob: proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"},
|
||||||
|
prevJob: proxy.Job{JobID: "job-old", Blob: "blob-old", Target: "b88d0600"},
|
||||||
|
events: bus,
|
||||||
|
pending: map[int64]submitContext{
|
||||||
|
7: {RequestID: 9, StartedAt: time.Now(), JobID: "job-old"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper.OnResultAccepted(7, true, "")
|
||||||
|
|
||||||
|
select {
|
||||||
|
case event := <-events:
|
||||||
|
if !event.Expired {
|
||||||
|
t.Fatalf("expected expired share to be flagged")
|
||||||
|
}
|
||||||
|
if event.Job == nil || event.Job.JobID != "job-old" {
|
||||||
|
t.Fatalf("expected previous job to be attached, got %+v", event.Job)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("expected accept event")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleMapper_OnResultAccepted_CustomDiffUsesEffectiveDifficulty(t *testing.T) {
|
||||||
|
bus := proxy.NewEventBus()
|
||||||
|
events := make(chan proxy.Event, 1)
|
||||||
|
var once sync.Once
|
||||||
|
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
|
||||||
|
once.Do(func() {
|
||||||
|
events <- e
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
miner := proxy.NewMiner(discardConn{}, 3333, nil)
|
||||||
|
miner.SetID(2)
|
||||||
|
job := proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"}
|
||||||
|
mapper := &SimpleMapper{
|
||||||
|
miner: miner,
|
||||||
|
currentJob: job,
|
||||||
|
events: bus,
|
||||||
|
pending: map[int64]submitContext{
|
||||||
|
8: {
|
||||||
|
RequestID: 10,
|
||||||
|
Diff: 25000,
|
||||||
|
StartedAt: time.Now(),
|
||||||
|
JobID: "job-new",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper.OnResultAccepted(8, true, "")
|
||||||
|
|
||||||
|
select {
|
||||||
|
case event := <-events:
|
||||||
|
if event.Diff != 25000 {
|
||||||
|
t.Fatalf("expected effective difficulty 25000, got %d", event.Diff)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("expected accept event")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleMapper_OnJob_PreservesPreviousJobForSamePoolSession_Good(t *testing.T) {
|
||||||
|
mapper := &SimpleMapper{
|
||||||
|
currentJob: proxy.Job{JobID: "job-1", Blob: "blob-1", ClientID: "session-a"},
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper.OnJob(proxy.Job{JobID: "job-2", Blob: "blob-2", ClientID: "session-a"})
|
||||||
|
|
||||||
|
if mapper.currentJob.JobID != "job-2" {
|
||||||
|
t.Fatalf("expected current job to roll forward, got %q", mapper.currentJob.JobID)
|
||||||
|
}
|
||||||
|
if mapper.prevJob.JobID != "job-1" {
|
||||||
|
t.Fatalf("expected previous job to remain available within one pool session, got %q", mapper.prevJob.JobID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleMapper_OnJob_ResetsPreviousJobAcrossPoolSessions_Ugly(t *testing.T) {
|
||||||
|
mapper := &SimpleMapper{
|
||||||
|
currentJob: proxy.Job{JobID: "job-1", Blob: "blob-1", ClientID: "session-a"},
|
||||||
|
prevJob: proxy.Job{JobID: "job-0", Blob: "blob-0", ClientID: "session-a"},
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper.OnJob(proxy.Job{JobID: "job-2", Blob: "blob-2", ClientID: "session-b"})
|
||||||
|
|
||||||
|
if mapper.currentJob.JobID != "job-2" {
|
||||||
|
t.Fatalf("expected current job to advance after session change, got %q", mapper.currentJob.JobID)
|
||||||
|
}
|
||||||
|
if mapper.prevJob.JobID != "" {
|
||||||
|
t.Fatalf("expected previous job history to reset on new pool session, got %q", mapper.prevJob.JobID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleMapper_Submit_InvalidJob_Good(t *testing.T) {
|
||||||
|
minerConn, clientConn := net.Pipe()
|
||||||
|
defer minerConn.Close()
|
||||||
|
defer clientConn.Close()
|
||||||
|
|
||||||
|
miner := proxy.NewMiner(minerConn, 3333, nil)
|
||||||
|
mapper := &SimpleMapper{
|
||||||
|
miner: miner,
|
||||||
|
currentJob: proxy.Job{JobID: "job-1", Blob: "blob", Target: "b88d0600"},
|
||||||
|
prevJob: proxy.Job{JobID: "job-0", Blob: "blob", Target: "b88d0600"},
|
||||||
|
strategy: activeStrategy{},
|
||||||
|
pending: make(map[int64]submitContext),
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
mapper.Submit(&proxy.SubmitEvent{
|
||||||
|
Miner: miner,
|
||||||
|
JobID: "job-missing",
|
||||||
|
Nonce: "deadbeef",
|
||||||
|
Result: "hash",
|
||||||
|
RequestID: 9,
|
||||||
|
})
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read error reply: %v", err)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
|
||||||
|
var payload struct {
|
||||||
|
ID float64 `json:"id"`
|
||||||
|
Error struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
} `json:"error"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(line, &payload); err != nil {
|
||||||
|
t.Fatalf("unmarshal error reply: %v", err)
|
||||||
|
}
|
||||||
|
if payload.ID != 9 {
|
||||||
|
t.Fatalf("expected request id 9, got %v", payload.ID)
|
||||||
|
}
|
||||||
|
if payload.Error.Message != "Invalid job id" {
|
||||||
|
t.Fatalf("expected invalid job error, got %q", payload.Error.Message)
|
||||||
|
}
|
||||||
|
if len(mapper.pending) != 0 {
|
||||||
|
t.Fatalf("expected invalid submit not to create a pending entry")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,10 +1,11 @@
|
||||||
package simple
|
package simple
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"dappco.re/go/core/proxy"
|
"dappco.re/go/proxy"
|
||||||
"dappco.re/go/core/proxy/pool"
|
"dappco.re/go/proxy/pool"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SimpleMapper holds one outbound pool connection and serves at most one active miner
|
// SimpleMapper holds one outbound pool connection and serves at most one active miner
|
||||||
|
|
@ -13,9 +14,32 @@ import (
|
||||||
//
|
//
|
||||||
// m := simple.NewSimpleMapper(id, strategy)
|
// m := simple.NewSimpleMapper(id, strategy)
|
||||||
type SimpleMapper struct {
|
type SimpleMapper struct {
|
||||||
id int64
|
id int64
|
||||||
miner *proxy.Miner // nil when idle
|
miner *proxy.Miner // nil when idle
|
||||||
strategy pool.Strategy
|
currentJob proxy.Job
|
||||||
idleAt time.Time // zero when active
|
prevJob proxy.Job
|
||||||
stopped bool
|
strategy pool.Strategy
|
||||||
|
idleAt time.Time // zero when active
|
||||||
|
stopped bool
|
||||||
|
events *proxy.EventBus
|
||||||
|
pending map[int64]submitContext
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type submitContext struct {
|
||||||
|
RequestID int64
|
||||||
|
Diff uint64
|
||||||
|
StartedAt time.Time
|
||||||
|
JobID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSimpleMapper creates a passthrough mapper for one pool connection.
|
||||||
|
//
|
||||||
|
// m := simple.NewSimpleMapper(7, strategy)
|
||||||
|
func NewSimpleMapper(id int64, strategy pool.Strategy) *SimpleMapper {
|
||||||
|
return &SimpleMapper{
|
||||||
|
id: id,
|
||||||
|
strategy: strategy,
|
||||||
|
pending: make(map[int64]submitContext),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
68
splitter/simple/reload_test.go
Normal file
68
splitter/simple/reload_test.go
Normal file
|
|
@ -0,0 +1,68 @@
|
||||||
|
package simple
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"dappco.re/go/proxy/pool"
|
||||||
|
)
|
||||||
|
|
||||||
|
type reloadableStrategy struct {
|
||||||
|
reloads int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *reloadableStrategy) Connect() {}
|
||||||
|
func (s *reloadableStrategy) Submit(jobID, nonce, result, algo string) int64 { return 0 }
|
||||||
|
func (s *reloadableStrategy) Disconnect() {}
|
||||||
|
func (s *reloadableStrategy) IsActive() bool { return true }
|
||||||
|
func (s *reloadableStrategy) ReloadPools() { s.reloads++ }
|
||||||
|
|
||||||
|
var _ pool.ReloadableStrategy = (*reloadableStrategy)(nil)
|
||||||
|
|
||||||
|
func TestSimpleSplitter_ReloadPools_Good(t *testing.T) {
|
||||||
|
strategy := &reloadableStrategy{}
|
||||||
|
splitter := &SimpleSplitter{
|
||||||
|
active: map[int64]*SimpleMapper{
|
||||||
|
1: {strategy: strategy},
|
||||||
|
},
|
||||||
|
idle: map[int64]*SimpleMapper{},
|
||||||
|
}
|
||||||
|
|
||||||
|
splitter.ReloadPools()
|
||||||
|
|
||||||
|
if strategy.reloads != 1 {
|
||||||
|
t.Fatalf("expected active mapper strategy to reload once, got %d", strategy.reloads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleSplitter_ReloadPools_Bad(t *testing.T) {
|
||||||
|
splitter := &SimpleSplitter{
|
||||||
|
active: map[int64]*SimpleMapper{
|
||||||
|
1: {strategy: nil},
|
||||||
|
},
|
||||||
|
idle: map[int64]*SimpleMapper{},
|
||||||
|
}
|
||||||
|
|
||||||
|
splitter.ReloadPools()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleSplitter_ReloadPools_Ugly(t *testing.T) {
|
||||||
|
active := &reloadableStrategy{}
|
||||||
|
idle := &reloadableStrategy{}
|
||||||
|
splitter := &SimpleSplitter{
|
||||||
|
active: map[int64]*SimpleMapper{
|
||||||
|
1: {strategy: active},
|
||||||
|
},
|
||||||
|
idle: map[int64]*SimpleMapper{
|
||||||
|
2: {strategy: idle},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
splitter.ReloadPools()
|
||||||
|
|
||||||
|
if active.reloads != 1 {
|
||||||
|
t.Fatalf("expected active mapper reload, got %d", active.reloads)
|
||||||
|
}
|
||||||
|
if idle.reloads != 1 {
|
||||||
|
t.Fatalf("expected idle mapper reload, got %d", idle.reloads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -10,19 +10,19 @@ package simple
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"dappco.re/go/core/proxy"
|
"dappco.re/go/proxy"
|
||||||
"dappco.re/go/core/proxy/pool"
|
"dappco.re/go/proxy/pool"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SimpleSplitter is the Splitter implementation for simple (passthrough) mode.
|
// SimpleSplitter is the Splitter implementation for simple (passthrough) mode.
|
||||||
//
|
//
|
||||||
// s := simple.NewSimpleSplitter(cfg, eventBus, strategyFactory)
|
// s := simple.NewSimpleSplitter(cfg, eventBus, strategyFactory)
|
||||||
type SimpleSplitter struct {
|
type SimpleSplitter struct {
|
||||||
active map[int64]*SimpleMapper // minerID → mapper
|
active map[int64]*SimpleMapper // minerID → mapper
|
||||||
idle map[int64]*SimpleMapper // mapperID → mapper (reuse pool, keyed by mapper seq)
|
idle map[int64]*SimpleMapper // mapperID → mapper (reuse pool, keyed by mapper ID)
|
||||||
cfg *proxy.Config
|
config *proxy.Config
|
||||||
events *proxy.EventBus
|
events *proxy.EventBus
|
||||||
factory pool.StrategyFactory
|
factory pool.StrategyFactory
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
seq int64 // monotonic mapper sequence counter
|
nextMapperID int64 // monotonic mapper ID counter
|
||||||
}
|
}
|
||||||
|
|
|
||||||
2007
state_impl.go
Normal file
2007
state_impl.go
Normal file
File diff suppressed because it is too large
Load diff
137
state_stop_test.go
Normal file
137
state_stop_test.go
Normal file
|
|
@ -0,0 +1,137 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProxy_Stop_Good(t *testing.T) {
|
||||||
|
serverConn, clientConn := net.Pipe()
|
||||||
|
defer serverConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(clientConn, 3333, nil)
|
||||||
|
splitter := &stubSplitter{}
|
||||||
|
proxyInstance := &Proxy{
|
||||||
|
done: make(chan struct{}),
|
||||||
|
miners: map[int64]*Miner{miner.ID(): miner},
|
||||||
|
splitter: splitter,
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
buf := make([]byte, 1)
|
||||||
|
_, err := serverConn.Read(buf)
|
||||||
|
done <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
proxyInstance.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected miner connection to close during Stop")
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("expected miner connection to close during Stop")
|
||||||
|
}
|
||||||
|
if !splitter.disconnected {
|
||||||
|
t.Fatalf("expected splitter to be disconnected during Stop")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Stop_Bad(t *testing.T) {
|
||||||
|
var proxyInstance *Proxy
|
||||||
|
|
||||||
|
proxyInstance.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Stop_Ugly(t *testing.T) {
|
||||||
|
serverConn, clientConn := net.Pipe()
|
||||||
|
defer serverConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(clientConn, 3333, nil)
|
||||||
|
proxyInstance := &Proxy{
|
||||||
|
done: make(chan struct{}),
|
||||||
|
miners: map[int64]*Miner{miner.ID(): miner},
|
||||||
|
}
|
||||||
|
|
||||||
|
proxyInstance.Stop()
|
||||||
|
proxyInstance.Stop()
|
||||||
|
|
||||||
|
buf := make([]byte, 1)
|
||||||
|
if _, err := serverConn.Read(buf); err == nil {
|
||||||
|
t.Fatalf("expected closed connection after repeated Stop calls")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxy_Stop_WaitsBeforeDisconnectingSubmitPaths(t *testing.T) {
|
||||||
|
serverConn, clientConn := net.Pipe()
|
||||||
|
defer serverConn.Close()
|
||||||
|
|
||||||
|
miner := NewMiner(clientConn, 3333, nil)
|
||||||
|
splitter := &blockingStopSplitter{disconnectedCh: make(chan struct{})}
|
||||||
|
proxyInstance := &Proxy{
|
||||||
|
done: make(chan struct{}),
|
||||||
|
miners: map[int64]*Miner{miner.ID(): miner},
|
||||||
|
splitter: splitter,
|
||||||
|
}
|
||||||
|
proxyInstance.submitCount.Store(1)
|
||||||
|
|
||||||
|
stopped := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
proxyInstance.Stop()
|
||||||
|
close(stopped)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-splitter.disconnectedCh:
|
||||||
|
t.Fatalf("expected splitter disconnect to wait for submit drain")
|
||||||
|
case <-stopped:
|
||||||
|
t.Fatalf("expected Stop to keep waiting while submits are in flight")
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
proxyInstance.submitCount.Store(0)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-splitter.disconnectedCh:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("expected splitter disconnect after submit drain")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-stopped:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("expected Stop to finish after submit drain")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type stubSplitter struct {
|
||||||
|
disconnected bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stubSplitter) Connect() {}
|
||||||
|
func (s *stubSplitter) OnLogin(event *LoginEvent) {}
|
||||||
|
func (s *stubSplitter) OnSubmit(event *SubmitEvent) {}
|
||||||
|
func (s *stubSplitter) OnClose(event *CloseEvent) {}
|
||||||
|
func (s *stubSplitter) Tick(ticks uint64) {}
|
||||||
|
func (s *stubSplitter) GC() {}
|
||||||
|
func (s *stubSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
|
||||||
|
func (s *stubSplitter) Disconnect() { s.disconnected = true }
|
||||||
|
|
||||||
|
type blockingStopSplitter struct {
|
||||||
|
disconnectedCh chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *blockingStopSplitter) Connect() {}
|
||||||
|
func (s *blockingStopSplitter) OnLogin(event *LoginEvent) {}
|
||||||
|
func (s *blockingStopSplitter) OnSubmit(event *SubmitEvent) {}
|
||||||
|
func (s *blockingStopSplitter) OnClose(event *CloseEvent) {}
|
||||||
|
func (s *blockingStopSplitter) Tick(ticks uint64) {}
|
||||||
|
func (s *blockingStopSplitter) GC() {}
|
||||||
|
func (s *blockingStopSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
|
||||||
|
func (s *blockingStopSplitter) Disconnect() {
|
||||||
|
close(s.disconnectedCh)
|
||||||
|
}
|
||||||
33
state_submit_test.go
Normal file
33
state_submit_test.go
Normal file
|
|
@ -0,0 +1,33 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProxy_Stop_WaitsForSubmitDrain(t *testing.T) {
|
||||||
|
p := &Proxy{
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
p.submitCount.Store(1)
|
||||||
|
|
||||||
|
stopped := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
p.Stop()
|
||||||
|
close(stopped)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-stopped:
|
||||||
|
t.Fatalf("expected Stop to wait for pending submits")
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
p.submitCount.Store(0)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-stopped:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("expected Stop to finish after pending submits drain")
|
||||||
|
}
|
||||||
|
}
|
||||||
36
stats.go
36
stats.go
|
|
@ -9,9 +9,11 @@ import (
|
||||||
// Stats tracks global proxy metrics. Hot-path counters are atomic. Hashrate windows
|
// Stats tracks global proxy metrics. Hot-path counters are atomic. Hashrate windows
|
||||||
// use a ring buffer per window size, advanced by Tick().
|
// use a ring buffer per window size, advanced by Tick().
|
||||||
//
|
//
|
||||||
// s := proxy.NewStats()
|
// stats := proxy.NewStats()
|
||||||
// bus.Subscribe(proxy.EventAccept, s.OnAccept)
|
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
|
||||||
// bus.Subscribe(proxy.EventReject, s.OnReject)
|
// bus.Subscribe(proxy.EventReject, stats.OnReject)
|
||||||
|
// stats.Tick()
|
||||||
|
// summary := stats.Summary()
|
||||||
type Stats struct {
|
type Stats struct {
|
||||||
accepted atomic.Uint64
|
accepted atomic.Uint64
|
||||||
rejected atomic.Uint64
|
rejected atomic.Uint64
|
||||||
|
|
@ -19,6 +21,7 @@ type Stats struct {
|
||||||
expired atomic.Uint64
|
expired atomic.Uint64
|
||||||
hashes atomic.Uint64 // cumulative sum of accepted share difficulties
|
hashes atomic.Uint64 // cumulative sum of accepted share difficulties
|
||||||
connections atomic.Uint64 // total TCP connections accepted (ever)
|
connections atomic.Uint64 // total TCP connections accepted (ever)
|
||||||
|
miners atomic.Uint64 // current connected miners
|
||||||
maxMiners atomic.Uint64 // peak concurrent miner count
|
maxMiners atomic.Uint64 // peak concurrent miner count
|
||||||
topDiff [10]uint64 // top-10 accepted difficulties, sorted descending; guarded by mu
|
topDiff [10]uint64 // top-10 accepted difficulties, sorted descending; guarded by mu
|
||||||
latency []uint16 // pool response latencies in ms; capped at 10000 samples; guarded by mu
|
latency []uint16 // pool response latencies in ms; capped at 10000 samples; guarded by mu
|
||||||
|
|
@ -27,7 +30,6 @@ type Stats struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashrate window sizes in seconds. Index maps to Stats.windows and SummaryResponse.Hashrate.
|
|
||||||
const (
|
const (
|
||||||
HashrateWindow60s = 0 // 1 minute
|
HashrateWindow60s = 0 // 1 minute
|
||||||
HashrateWindow600s = 1 // 10 minutes
|
HashrateWindow600s = 1 // 10 minutes
|
||||||
|
|
@ -37,7 +39,9 @@ const (
|
||||||
HashrateWindowAll = 5 // all-time (single accumulator, no window)
|
HashrateWindowAll = 5 // all-time (single accumulator, no window)
|
||||||
)
|
)
|
||||||
|
|
||||||
// tickWindow is a fixed-capacity ring buffer of per-second difficulty sums.
|
// tickWindow is a fixed-capacity ring buffer of per-second difficulty totals.
|
||||||
|
//
|
||||||
|
// window := newTickWindow(60)
|
||||||
type tickWindow struct {
|
type tickWindow struct {
|
||||||
buckets []uint64
|
buckets []uint64
|
||||||
pos int
|
pos int
|
||||||
|
|
@ -46,15 +50,17 @@ type tickWindow struct {
|
||||||
|
|
||||||
// StatsSummary is the serialisable snapshot returned by Summary().
|
// StatsSummary is the serialisable snapshot returned by Summary().
|
||||||
//
|
//
|
||||||
// summary := stats.Summary()
|
// summary := proxy.NewStats().Summary()
|
||||||
|
// _ = summary.Hashrate[0] // 60-second window H/s
|
||||||
type StatsSummary struct {
|
type StatsSummary struct {
|
||||||
Accepted uint64 `json:"accepted"`
|
Accepted uint64 `json:"accepted"`
|
||||||
Rejected uint64 `json:"rejected"`
|
Rejected uint64 `json:"rejected"`
|
||||||
Invalid uint64 `json:"invalid"`
|
Invalid uint64 `json:"invalid"`
|
||||||
Expired uint64 `json:"expired"`
|
Expired uint64 `json:"expired"`
|
||||||
Hashes uint64 `json:"hashes_total"`
|
Hashes uint64 `json:"hashes_total"`
|
||||||
AvgTime uint32 `json:"avg_time"` // seconds per accepted share
|
AvgTime uint32 `json:"avg_time"` // seconds per accepted share
|
||||||
AvgLatency uint32 `json:"latency"` // median pool response latency in ms
|
AvgLatency uint32 `json:"latency"` // median pool response latency in ms
|
||||||
Hashrate [6]float64 `json:"hashrate"` // H/s per window (index = HashrateWindow* constants)
|
Hashrate [6]float64 `json:"hashrate"` // H/s per window (index = HashrateWindow* constants)
|
||||||
TopDiff [10]uint64 `json:"best"`
|
TopDiff [10]uint64 `json:"best"`
|
||||||
|
CustomDiffStats map[uint64]CustomDiffBucketStats `json:"custom_diff_stats,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
173
stats_test.go
Normal file
173
stats_test.go
Normal file
|
|
@ -0,0 +1,173 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestStats_OnAccept_Good verifies that accepted counter, hashes, and topDiff are updated.
|
||||||
|
//
|
||||||
|
// stats := proxy.NewStats()
|
||||||
|
// stats.OnAccept(proxy.Event{Diff: 100000, Latency: 82})
|
||||||
|
// summary := stats.Summary()
|
||||||
|
// _ = summary.Accepted // 1
|
||||||
|
// _ = summary.Hashes // 100000
|
||||||
|
func TestStats_OnAccept_Good(t *testing.T) {
|
||||||
|
stats := NewStats()
|
||||||
|
|
||||||
|
stats.OnAccept(Event{Diff: 100000, Latency: 82})
|
||||||
|
|
||||||
|
summary := stats.Summary()
|
||||||
|
if summary.Accepted != 1 {
|
||||||
|
t.Fatalf("expected accepted 1, got %d", summary.Accepted)
|
||||||
|
}
|
||||||
|
if summary.Hashes != 100000 {
|
||||||
|
t.Fatalf("expected hashes 100000, got %d", summary.Hashes)
|
||||||
|
}
|
||||||
|
if summary.TopDiff[0] != 100000 {
|
||||||
|
t.Fatalf("expected top diff 100000, got %d", summary.TopDiff[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStats_OnAccept_Bad verifies concurrent OnAccept calls do not race.
|
||||||
|
//
|
||||||
|
// stats := proxy.NewStats()
|
||||||
|
// // 100 goroutines each call OnAccept — no data race under -race flag.
|
||||||
|
func TestStats_OnAccept_Bad(t *testing.T) {
|
||||||
|
stats := NewStats()
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(diff uint64) {
|
||||||
|
defer wg.Done()
|
||||||
|
stats.OnAccept(Event{Diff: diff, Latency: 10})
|
||||||
|
}(uint64(i + 1))
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
summary := stats.Summary()
|
||||||
|
if summary.Accepted != 100 {
|
||||||
|
t.Fatalf("expected 100 accepted, got %d", summary.Accepted)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStats_OnAccept_Ugly verifies that 15 accepts with varying diffs fill all topDiff slots.
|
||||||
|
//
|
||||||
|
// stats := proxy.NewStats()
|
||||||
|
// // 15 accepts with diffs 1..15 → topDiff[9] is 6 (10th highest), not 0
|
||||||
|
func TestStats_OnAccept_Ugly(t *testing.T) {
|
||||||
|
stats := NewStats()
|
||||||
|
|
||||||
|
for i := 1; i <= 15; i++ {
|
||||||
|
stats.OnAccept(Event{Diff: uint64(i)})
|
||||||
|
}
|
||||||
|
|
||||||
|
summary := stats.Summary()
|
||||||
|
// top 10 should be 15, 14, 13, ..., 6
|
||||||
|
if summary.TopDiff[0] != 15 {
|
||||||
|
t.Fatalf("expected top diff[0]=15, got %d", summary.TopDiff[0])
|
||||||
|
}
|
||||||
|
if summary.TopDiff[9] != 6 {
|
||||||
|
t.Fatalf("expected top diff[9]=6, got %d", summary.TopDiff[9])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStats_OnReject_Good verifies that rejected and invalid counters are updated.
|
||||||
|
//
|
||||||
|
// stats := proxy.NewStats()
|
||||||
|
// stats.OnReject(proxy.Event{Error: "Low difficulty share"})
|
||||||
|
func TestStats_OnReject_Good(t *testing.T) {
|
||||||
|
stats := NewStats()
|
||||||
|
|
||||||
|
stats.OnReject(Event{Error: "Low difficulty share"})
|
||||||
|
stats.OnReject(Event{Error: "Malformed share"})
|
||||||
|
|
||||||
|
summary := stats.Summary()
|
||||||
|
if summary.Rejected != 2 {
|
||||||
|
t.Fatalf("expected two rejected shares, got %d", summary.Rejected)
|
||||||
|
}
|
||||||
|
if summary.Invalid != 2 {
|
||||||
|
t.Fatalf("expected two invalid shares, got %d", summary.Invalid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStats_OnReject_Bad verifies that a non-invalid rejection increments rejected but not invalid.
|
||||||
|
//
|
||||||
|
// stats := proxy.NewStats()
|
||||||
|
// stats.OnReject(proxy.Event{Error: "Stale share"})
|
||||||
|
func TestStats_OnReject_Bad(t *testing.T) {
|
||||||
|
stats := NewStats()
|
||||||
|
|
||||||
|
stats.OnReject(Event{Error: "Stale share"})
|
||||||
|
|
||||||
|
summary := stats.Summary()
|
||||||
|
if summary.Rejected != 1 {
|
||||||
|
t.Fatalf("expected one rejected, got %d", summary.Rejected)
|
||||||
|
}
|
||||||
|
if summary.Invalid != 0 {
|
||||||
|
t.Fatalf("expected zero invalid for non-invalid reason, got %d", summary.Invalid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStats_OnReject_Ugly verifies an expired accepted share increments both accepted and expired.
|
||||||
|
//
|
||||||
|
// stats := proxy.NewStats()
|
||||||
|
// stats.OnAccept(proxy.Event{Diff: 1000, Expired: true})
|
||||||
|
func TestStats_OnReject_Ugly(t *testing.T) {
|
||||||
|
stats := NewStats()
|
||||||
|
|
||||||
|
stats.OnAccept(Event{Diff: 1000, Expired: true})
|
||||||
|
|
||||||
|
summary := stats.Summary()
|
||||||
|
if summary.Accepted != 1 {
|
||||||
|
t.Fatalf("expected accepted 1, got %d", summary.Accepted)
|
||||||
|
}
|
||||||
|
if summary.Expired != 1 {
|
||||||
|
t.Fatalf("expected expired 1, got %d", summary.Expired)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStats_Tick_Good verifies that Tick advances the rolling window position.
|
||||||
|
//
|
||||||
|
// stats := proxy.NewStats()
|
||||||
|
// stats.OnAccept(proxy.Event{Diff: 500})
|
||||||
|
// stats.Tick()
|
||||||
|
// summary := stats.Summary()
|
||||||
|
func TestStats_Tick_Good(t *testing.T) {
|
||||||
|
stats := NewStats()
|
||||||
|
|
||||||
|
stats.OnAccept(Event{Diff: 500})
|
||||||
|
stats.Tick()
|
||||||
|
|
||||||
|
summary := stats.Summary()
|
||||||
|
// After one tick, the hashrate should still include the 500 diff
|
||||||
|
if summary.Hashrate[HashrateWindow60s] == 0 {
|
||||||
|
t.Fatalf("expected non-zero 60s hashrate after accept and tick")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStats_OnLogin_OnClose_Good verifies miner count tracking.
|
||||||
|
//
|
||||||
|
// stats := proxy.NewStats()
|
||||||
|
// stats.OnLogin(proxy.Event{Miner: &proxy.Miner{}})
|
||||||
|
// stats.OnClose(proxy.Event{Miner: &proxy.Miner{}})
|
||||||
|
func TestStats_OnLogin_OnClose_Good(t *testing.T) {
|
||||||
|
stats := NewStats()
|
||||||
|
m := &Miner{}
|
||||||
|
|
||||||
|
stats.OnLogin(Event{Miner: m})
|
||||||
|
if got := stats.miners.Load(); got != 1 {
|
||||||
|
t.Fatalf("expected 1 miner, got %d", got)
|
||||||
|
}
|
||||||
|
if got := stats.maxMiners.Load(); got != 1 {
|
||||||
|
t.Fatalf("expected max miners 1, got %d", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.OnClose(Event{Miner: m})
|
||||||
|
if got := stats.miners.Load(); got != 0 {
|
||||||
|
t.Fatalf("expected 0 miners after close, got %d", got)
|
||||||
|
}
|
||||||
|
if got := stats.maxMiners.Load(); got != 1 {
|
||||||
|
t.Fatalf("expected max miners to remain 1, got %d", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
36
tls_test.go
Normal file
36
tls_test.go
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTLS_applyTLSCiphers_Good(t *testing.T) {
|
||||||
|
cfg := &tls.Config{}
|
||||||
|
|
||||||
|
applyTLSCiphers(cfg, "ECDHE-RSA-AES128-GCM-SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256")
|
||||||
|
|
||||||
|
if len(cfg.CipherSuites) != 2 {
|
||||||
|
t.Fatalf("expected two recognised cipher suites, got %d", len(cfg.CipherSuites))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTLS_applyTLSCiphers_Bad(t *testing.T) {
|
||||||
|
cfg := &tls.Config{}
|
||||||
|
|
||||||
|
applyTLSCiphers(cfg, "made-up-cipher-one:made-up-cipher-two")
|
||||||
|
|
||||||
|
if len(cfg.CipherSuites) != 0 {
|
||||||
|
t.Fatalf("expected unknown cipher names to be ignored, got %#v", cfg.CipherSuites)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTLS_applyTLSCiphers_Ugly(t *testing.T) {
|
||||||
|
cfg := &tls.Config{}
|
||||||
|
|
||||||
|
applyTLSCiphers(cfg, " aes128-sha | ECDHE-RSA-AES256-GCM-SHA384 ; tls_ecdhe_ecdsa_with_aes_256_gcm_sha384 ")
|
||||||
|
|
||||||
|
if len(cfg.CipherSuites) != 3 {
|
||||||
|
t.Fatalf("expected mixed separators and casing to be accepted, got %d", len(cfg.CipherSuites))
|
||||||
|
}
|
||||||
|
}
|
||||||
22
worker.go
22
worker.go
|
|
@ -8,18 +8,22 @@ import (
|
||||||
// Workers maintains per-worker aggregate stats. Workers are identified by name,
|
// Workers maintains per-worker aggregate stats. Workers are identified by name,
|
||||||
// derived from the miner's login fields per WorkersMode.
|
// derived from the miner's login fields per WorkersMode.
|
||||||
//
|
//
|
||||||
// w := proxy.NewWorkers(proxy.WorkersByRigID, bus)
|
// workers := proxy.NewWorkers(proxy.WorkersByRigID, bus)
|
||||||
|
// workers.OnLogin(proxy.Event{Miner: miner})
|
||||||
|
// records := workers.List()
|
||||||
type Workers struct {
|
type Workers struct {
|
||||||
mode WorkersMode
|
mode WorkersMode
|
||||||
entries []WorkerRecord // ordered by first-seen (stable)
|
entries []WorkerRecord // ordered by first-seen (stable)
|
||||||
nameIndex map[string]int // workerName → entries index
|
nameIndex map[string]int // workerName → entries index
|
||||||
idIndex map[int64]int // minerID → entries index
|
idIndex map[int64]int // minerID → entries index
|
||||||
mu sync.RWMutex
|
subscribed bool
|
||||||
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// WorkerRecord is the per-identity aggregate.
|
// WorkerRecord is the per-identity aggregate with rolling hashrate windows.
|
||||||
//
|
//
|
||||||
// hr60 := record.Hashrate(60)
|
// record := proxy.WorkerRecord{Name: "rig-alpha", Accepted: 10, Hashes: 500000}
|
||||||
|
// hr60 := record.Hashrate(60) // H/s over the last 60 seconds
|
||||||
type WorkerRecord struct {
|
type WorkerRecord struct {
|
||||||
Name string
|
Name string
|
||||||
LastIP string
|
LastIP string
|
||||||
|
|
@ -27,7 +31,7 @@ type WorkerRecord struct {
|
||||||
Accepted uint64
|
Accepted uint64
|
||||||
Rejected uint64
|
Rejected uint64
|
||||||
Invalid uint64
|
Invalid uint64
|
||||||
Hashes uint64 // sum of accepted share difficulties
|
Hashes uint64 // sum of accepted share difficulties
|
||||||
LastHashAt time.Time
|
LastHashAt time.Time
|
||||||
windows [5]tickWindow // 60s, 600s, 3600s, 12h, 24h
|
windows [5]tickWindow // 60s, 600s, 3600s, 12h, 24h
|
||||||
}
|
}
|
||||||
|
|
|
||||||
164
worker_test.go
Normal file
164
worker_test.go
Normal file
|
|
@ -0,0 +1,164 @@
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestWorker_NewWorkers_Good(t *testing.T) {
|
||||||
|
bus := NewEventBus()
|
||||||
|
workers := NewWorkers(WorkersByRigID, bus)
|
||||||
|
miner := &Miner{id: 7, user: "wallet", rigID: "rig-1", ip: "10.0.0.1"}
|
||||||
|
|
||||||
|
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||||
|
|
||||||
|
records := workers.List()
|
||||||
|
if len(records) != 1 {
|
||||||
|
t.Fatalf("expected one worker record, got %d", len(records))
|
||||||
|
}
|
||||||
|
if records[0].Name != "rig-1" {
|
||||||
|
t.Fatalf("expected rig id worker name, got %q", records[0].Name)
|
||||||
|
}
|
||||||
|
if records[0].Connections != 1 {
|
||||||
|
t.Fatalf("expected one connection, got %d", records[0].Connections)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorker_NewWorkers_Bad(t *testing.T) {
|
||||||
|
workers := NewWorkers(WorkersDisabled, nil)
|
||||||
|
if workers == nil {
|
||||||
|
t.Fatalf("expected workers instance")
|
||||||
|
}
|
||||||
|
if got := workers.List(); len(got) != 0 {
|
||||||
|
t.Fatalf("expected no worker records, got %d", len(got))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorker_NewWorkers_Ugly(t *testing.T) {
|
||||||
|
bus := NewEventBus()
|
||||||
|
workers := NewWorkers(WorkersByUser, bus)
|
||||||
|
workers.bindEvents(bus)
|
||||||
|
|
||||||
|
miner := &Miner{id: 11, user: "wallet", ip: "10.0.0.2"}
|
||||||
|
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||||
|
|
||||||
|
records := workers.List()
|
||||||
|
if len(records) != 1 {
|
||||||
|
t.Fatalf("expected one worker record, got %d", len(records))
|
||||||
|
}
|
||||||
|
if records[0].Connections != 1 {
|
||||||
|
t.Fatalf("expected a single subscription path, got %d connections", records[0].Connections)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWorker_Hashrate_Good verifies that recording an accepted share produces a nonzero
|
||||||
|
// hashrate reading from the 60-second window.
|
||||||
|
//
|
||||||
|
// record := proxy.WorkerRecord{}
|
||||||
|
// record.Hashrate(60) // > 0.0 after an accepted share
|
||||||
|
func TestWorker_Hashrate_Good(t *testing.T) {
|
||||||
|
bus := NewEventBus()
|
||||||
|
workers := NewWorkers(WorkersByUser, bus)
|
||||||
|
|
||||||
|
miner := &Miner{id: 100, user: "hashtest", ip: "10.0.0.10"}
|
||||||
|
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||||
|
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 50000})
|
||||||
|
|
||||||
|
records := workers.List()
|
||||||
|
if len(records) != 1 {
|
||||||
|
t.Fatalf("expected one worker record, got %d", len(records))
|
||||||
|
}
|
||||||
|
hr := records[0].Hashrate(60)
|
||||||
|
if hr <= 0 {
|
||||||
|
t.Fatalf("expected nonzero hashrate for 60-second window after accept, got %f", hr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWorker_Hashrate_Bad verifies that an invalid window size returns 0.
|
||||||
|
//
|
||||||
|
// record := proxy.WorkerRecord{}
|
||||||
|
// record.Hashrate(999) // 0.0 (unsupported window)
|
||||||
|
func TestWorker_Hashrate_Bad(t *testing.T) {
|
||||||
|
bus := NewEventBus()
|
||||||
|
workers := NewWorkers(WorkersByUser, bus)
|
||||||
|
|
||||||
|
miner := &Miner{id: 101, user: "hashtest-bad", ip: "10.0.0.11"}
|
||||||
|
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||||
|
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 50000})
|
||||||
|
|
||||||
|
records := workers.List()
|
||||||
|
if len(records) != 1 {
|
||||||
|
t.Fatalf("expected one worker record, got %d", len(records))
|
||||||
|
}
|
||||||
|
hr := records[0].Hashrate(999)
|
||||||
|
if hr != 0 {
|
||||||
|
t.Fatalf("expected zero hashrate for unsupported window, got %f", hr)
|
||||||
|
}
|
||||||
|
hrZero := records[0].Hashrate(0)
|
||||||
|
if hrZero != 0 {
|
||||||
|
t.Fatalf("expected zero hashrate for zero window, got %f", hrZero)
|
||||||
|
}
|
||||||
|
hrNeg := records[0].Hashrate(-1)
|
||||||
|
if hrNeg != 0 {
|
||||||
|
t.Fatalf("expected zero hashrate for negative window, got %f", hrNeg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWorker_Hashrate_Ugly verifies that calling Hashrate on a nil record returns 0
|
||||||
|
// and that a worker with no accepts also returns 0.
|
||||||
|
//
|
||||||
|
// var record *proxy.WorkerRecord
|
||||||
|
// record.Hashrate(60) // 0.0
|
||||||
|
func TestWorker_Hashrate_Ugly(t *testing.T) {
|
||||||
|
var nilRecord *WorkerRecord
|
||||||
|
if hr := nilRecord.Hashrate(60); hr != 0 {
|
||||||
|
t.Fatalf("expected zero hashrate for nil record, got %f", hr)
|
||||||
|
}
|
||||||
|
|
||||||
|
bus := NewEventBus()
|
||||||
|
workers := NewWorkers(WorkersByUser, bus)
|
||||||
|
|
||||||
|
miner := &Miner{id: 102, user: "hashtest-ugly", ip: "10.0.0.12"}
|
||||||
|
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||||
|
|
||||||
|
records := workers.List()
|
||||||
|
if len(records) != 1 {
|
||||||
|
t.Fatalf("expected one worker record, got %d", len(records))
|
||||||
|
}
|
||||||
|
hr := records[0].Hashrate(60)
|
||||||
|
if hr != 0 {
|
||||||
|
t.Fatalf("expected zero hashrate for worker with no accepts, got %f", hr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorker_CustomDiffOrdering_Good(t *testing.T) {
|
||||||
|
cfg := &Config{
|
||||||
|
Mode: "nicehash",
|
||||||
|
Workers: WorkersByUser,
|
||||||
|
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
|
||||||
|
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
|
||||||
|
CustomDiff: 50000,
|
||||||
|
AccessLogFile: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
p, result := New(cfg)
|
||||||
|
if !result.OK {
|
||||||
|
t.Fatalf("expected valid proxy, got error: %v", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
miner := &Miner{
|
||||||
|
id: 21,
|
||||||
|
user: "WALLET+50000",
|
||||||
|
ip: "10.0.0.3",
|
||||||
|
conn: noopConn{},
|
||||||
|
}
|
||||||
|
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
|
||||||
|
|
||||||
|
records := p.WorkerRecords()
|
||||||
|
if len(records) != 1 {
|
||||||
|
t.Fatalf("expected one worker record, got %d", len(records))
|
||||||
|
}
|
||||||
|
if records[0].Name != "WALLET" {
|
||||||
|
t.Fatalf("expected custom diff login suffix to be stripped before worker registration, got %q", records[0].Name)
|
||||||
|
}
|
||||||
|
if miner.User() != "WALLET" {
|
||||||
|
t.Fatalf("expected miner user to be stripped before downstream consumers, got %q", miner.User())
|
||||||
|
}
|
||||||
|
}
|
||||||
Loading…
Add table
Reference in a new issue