Compare commits

..

146 commits
main ... dev

Author SHA1 Message Date
Snider
2470f1ac3d feat(proxy): add log tests, fix nil config panic, complete test triads
- Add log package tests (AccessLog and ShareLog Good/Bad/Ugly triads)
- Fix nil pointer panic in pool.NewStrategyFactory when config is nil
- Add Worker Hashrate Good/Bad/Ugly test triad
- Add ConfigWatcher Start Bad test (nonexistent path)
- Add FailoverStrategy CurrentPools Bad/Ugly, EnabledPools Good/Bad/Ugly,
  and NewStrategyFactory Good/Bad/Ugly test triads
- Improve doc comments on Stats, StatsSummary, Workers, WorkerRecord
  with AX-compliant usage examples

Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 08:08:28 +01:00
Snider
31a151d23c feat(proxy): implement RFC test coverage and AX usage-example comments
Add missing Good/Bad/Ugly test triplets per RFC section 22:
- stats_test.go: OnAccept/OnReject/Tick/OnLogin/OnClose tests with
  concurrency race test and top-10 diff slot verification
- ratelimit_test.go: full Good/Bad/Ugly set including ban persistence
  and disabled-limiter edge case
- customdiff_test.go: renamed to Apply_Good/Bad/Ugly convention per RFC
- storage_test.go: full Add_Good/Bad/Ugly set including 256-slot fill,
  overflow rejection, and dead-slot reclamation via SetJob
- job_test.go: added Good/Bad/Ugly for BlobWithFixedByte, DifficultyFromTarget,
  and IsValid

Add Miner.Diff() public getter for the last difficulty sent to miner.

Add AX-compliant usage-example comments (principle 2) to all Miner
accessors, Proxy query methods, EffectiveShareDifficulty, targetFromDifficulty,
MinerSnapshot, and RateLimiter.IsActive.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 07:02:54 +01:00
Virgil
6f0747abc2 fix(reload): apply custom diff updates to active miners
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:49:05 +00:00
Virgil
711c4259f7 refactor(proxy): unify monitoring auth checks
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:44:35 +00:00
Virgil
8cf01f2618 refactor(proxy): rename server TLS config field
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:42:05 +00:00
Virgil
b6b44b1f7b refactor(proxy): name miner timeouts and password mask
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:39:19 +00:00
Virgil
e1eadf705d fix(proxy): separate config loading from validation
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:36:21 +00:00
Virgil
ea378354de chore(proxy): clarify watcher and limiter names 2026-04-05 03:34:07 +00:00
Virgil
8a9046356e refactor(proxy): centralise monitoring API contract values
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:30:40 +00:00
Virgil
d1a899805e perf(proxy): reuse miner send buffer for writes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:25:39 +00:00
Virgil
5680539dbb docs(proxy): tighten config comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:22:57 +00:00
Virgil
82b2375058 fix(proxy): trim mode and worker identifiers
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:19:28 +00:00
Virgil
01a0cc5907 fix(proxy): drain submits before shutdown
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:15:17 +00:00
Virgil
031f0c0f17 docs(proxy): tighten AX-oriented comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:10:41 +00:00
Virgil
75d151b4e5 refactor(proxy): clarify miner login callback naming
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:07:41 +00:00
Virgil
70fcbd4d43 docs(nicehash): sharpen storage usage examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:04:39 +00:00
Virgil
686f4ea54f fix(api): align response type aliases with RFC
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 03:01:41 +00:00
Virgil
af96bfce94 docs: align public API comments with AX guidance
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:58:56 +00:00
Virgil
1ae781608c fix(api): honour unrestricted monitoring methods
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:55:36 +00:00
Virgil
ee128e944d fix(proxy): make config watcher restartable
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:50:55 +00:00
Virgil
1f8ff58b20 fix(login): defer login events until assignment succeeds
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:48:03 +00:00
Virgil
bc6113c80d fix(api): enforce GET on monitoring routes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:43:49 +00:00
Virgil
b3fd1fef61 refactor(splitter): clarify mapper ownership names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:40:42 +00:00
Virgil
e518f2df32 refactor(api): clarify monitoring route guards
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:37:56 +00:00
Virgil
30ff013158 docs(proxy): sharpen AX usage examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:34:53 +00:00
Virgil
be47d7afde fix(proxy): align config and login parsing with rfc
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:31:37 +00:00
Virgil
65f6c733a0 fix(simple): reset stale job window on pool session change
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:28:30 +00:00
Virgil
f4f0081eb0 docs(proxy): align AX comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:24:39 +00:00
Virgil
f0d5f6ae86 refactor(proxy): clarify public wiring comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:21:48 +00:00
Virgil
0a7c99264b refactor(errors): add scoped proxy failures
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:18:05 +00:00
Virgil
35db5f6840 docs(proxy): sharpen AX usage examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:12:55 +00:00
Virgil
8a52856719 fix(proxy): reject configs without enabled pools
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:09:46 +00:00
Virgil
5d8d82b9b5 docs(proxy): align AX comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:06:24 +00:00
Virgil
356eb9cec1 fix(proxy): use mtime-based config watching
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:04:03 +00:00
Virgil
cbde021d0c docs(proxy): align AX comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 02:01:38 +00:00
Virgil
f2f7dfed75 fix(proxy): align config watcher with RFC
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:59:28 +00:00
Virgil
ce3b7a50cd fix(proxy): use filesystem config watcher
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:56:36 +00:00
Virgil
ecd4130457 docs(proxy): sharpen AX examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:52:20 +00:00
Virgil
5a3fcf4fab docs(proxy): sharpen AX usage comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:50:02 +00:00
Virgil
7dd9807a6e chore: improve proxy api comments 2026-04-05 01:46:41 +00:00
Virgil
7b2a7ccd88 docs(proxy): align public AX comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:43:31 +00:00
Virgil
9f34bc7200 docs(proxy): refine config AX comments 2026-04-05 01:41:00 +00:00
Virgil
a1f47f5792 fix(proxy): align pool failover and simple routing
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:38:05 +00:00
Virgil
b5e4a6499f docs(proxy): improve AX usage comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:33:13 +00:00
Virgil
b9b3c47b4c docs(proxy): align public comments with AX
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:30:08 +00:00
Virgil
fefae4b3e5 fix(proxy): classify low difficulty rejects
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:26:24 +00:00
Virgil
264479d57b fix(pool): roll back failed submit tracking
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:23:50 +00:00
Virgil
d43c8ee4c1 fix: clear stale upstream state on disconnect 2026-04-05 01:19:50 +00:00
Virgil
05b0bb5ea4 docs(ax): improve public route and log examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:15:03 +00:00
Virgil
2a49caca03 docs(ax): add codex conventions
Clarify reload behaviour for immutable splitter wiring.

Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:12:06 +00:00
Virgil
3cd0909d74 feat(proxy): add share sink fanout
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:08:23 +00:00
Virgil
d0ae26a1a2 Align difficulty math with RFC 2026-04-05 01:05:00 +00:00
Virgil
3f9da136e9 docs(ax): align public api comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 01:02:31 +00:00
Virgil
f3c5175785 docs(proxy): align AX usage comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:59:17 +00:00
Virgil
e94616922d Fix simple mapper recovery state 2026-04-05 00:56:17 +00:00
Virgil
d8b4bf2775 refactor(proxy): clarify internal helper naming
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:52:34 +00:00
Virgil
3debd08a64 fix(api): enforce monitoring auth on exported routes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:49:26 +00:00
Virgil
eabe9b521d fix(proxy): harden hot-reload config access
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:45:39 +00:00
Virgil
a11d5b0969 refactor(proxy): align public comments with AX
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:42:12 +00:00
Virgil
766c4d1946 docs(pool): align public comments with AX
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:39:08 +00:00
Virgil
8ad123ecab refactor(proxy): improve splitter registry naming
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:36:29 +00:00
Virgil
55d44df9c2 Use effective custom diff in share accounting 2026-04-05 00:32:43 +00:00
Virgil
9d2b1f368c docs(proxy): align API comments with AX
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:28:03 +00:00
Virgil
2364633afc docs(ax): improve proxy API comments
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:25:30 +00:00
Virgil
9460f82738 Align monitoring route handling 2026-04-05 00:21:42 +00:00
Virgil
cf4136c8f0 docs(proxy): align public comments with AX
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:18:05 +00:00
Virgil
460aae14fb refactor(api): clarify route registration usage
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:15:01 +00:00
Virgil
bbdff60580 Honor worker mode changes on reload 2026-04-05 00:11:43 +00:00
Virgil
a76e6be1c7 docs(proxy): align public entry points with AX examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:07:27 +00:00
Virgil
96f7f18c96 refactor(log): align logging helpers with AX naming
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:04:02 +00:00
Virgil
77435d44fe docs(rfc): add spec path aliases
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-05 00:00:39 +00:00
Virgil
ad069a45d5 fix(proxy): harden monitoring helpers against nil config
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:58:09 +00:00
Virgil
7a48e479ec docs(proxy): align public docs with AX examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:54:23 +00:00
Virgil
fd88492b00 fix(proxy): honour monitoring restriction flag
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:51:37 +00:00
Virgil
fd6bc01b87 docs(proxy): align API comments with AX
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:48:33 +00:00
Virgil
9e44fb6ea3 refactor(log): add explicit log close lifecycle
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:45:11 +00:00
Virgil
fd76640d69 refactor(proxy): clarify monitoring HTTP helpers
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:42:26 +00:00
Virgil
fb5453c097 fix(api): enforce GET on monitoring endpoints
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:39:29 +00:00
Virgil
34f95071d9 fix(proxy): align difficulty conversion with RFC examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:36:55 +00:00
Virgil
4e5311215d refactor(proxy): centralise invalid share classification
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:33:21 +00:00
Virgil
2d39783dc4 fix(nicehash): align upstream totals with RFC
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:30:40 +00:00
Virgil
e2bd10c94f docs(proxy): sharpen AX usage examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:28:35 +00:00
Virgil
1e6ba01d03 fix(api): mask miner passwords in miners document
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:25:46 +00:00
Virgil
c0efdfb0ca refactor(api): unify monitoring documents for AX 2026-04-04 23:23:13 +00:00
Virgil
619b3c500d refactor(proxy): isolate event bus panics
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:16:18 +00:00
Virgil
8a321e2467 docs(proxy): align public helpers with AX examples
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:13:43 +00:00
Virgil
167ecc2bdc refactor(proxy): clarify agent-facing names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:10:35 +00:00
Virgil
0bb5ce827b fix(proxy): fail fast on HTTP bind errors
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:07:43 +00:00
Virgil
6f0f695054 fix(proxy): separate HTTP auth and method errors
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 23:05:18 +00:00
Virgil
4a0213e89f fix(simple): add missing mapper constructor
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:57:19 +00:00
Virgil
84362d9dc5 fix(reload): reconnect upstreams on pool config changes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:52:01 +00:00
Virgil
4006f33c1e fix(proxy): reload watcher on watch config changes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:41:16 +00:00
Virgil
9b6a251145 fix(log): align access log formats with RFC
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:29:00 +00:00
Virgil
0c746e4ea7 fix(proxy): support OpenSSL TLS cipher aliases
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:26:42 +00:00
Virgil
e594b04d7c fix(splitter): reclaim idle nicehash mappers
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:23:50 +00:00
Virgil
187a366d74 refactor(proxy): align aggregate field names with RFC
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:20:39 +00:00
Virgil
5ba21cb9bf fix(proxy): align miner success wire format
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:17:31 +00:00
Virgil
2b8bba790c refactor(proxy): align login flow with RFC order
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:13:10 +00:00
Virgil
cfd669e4d2 refactor(proxy): clarify config parameter names
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:09:58 +00:00
Virgil
6422a948bf refactor(proxy): clarify runtime config naming
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:06:18 +00:00
Virgil
8bde2c14d0 fix(proxy): dispatch login events before splitter assignment
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 22:02:25 +00:00
Virgil
a79b35abaf Improve public API usage examples 2026-04-04 21:58:37 +00:00
Virgil
5e343a7354 fix(proxy): disconnect splitters on shutdown
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 21:55:49 +00:00
Virgil
4c2a0ffab7 fix(log): keep access log columns stable
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 21:52:13 +00:00
Virgil
33d35ed063 refactor(api): name monitoring row payloads
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 21:48:27 +00:00
Virgil
c74f62e6d7 fix(nicehash): avoid double-counting expired submissions
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 21:45:57 +00:00
Virgil
8b47e6a11b refactor(proxy): rename hash totals for clarity
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 21:42:57 +00:00
Virgil
6d6934f37b fix(proxy): resolve login custom diff during handshake
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 21:39:58 +00:00
Virgil
c62f2c86a9 fix(proxy): omit null errors from success replies
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 21:37:15 +00:00
Virgil
1548643c65 fix(nicehash): flag expired shares with previous job
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 21:33:35 +00:00
Virgil
1065b78b7c chore(proxy): remove stale runtime comments 2026-04-04 21:16:21 +00:00
Virgil
9028334d49 Implement miner target capping and RFC line limits 2026-04-04 21:08:28 +00:00
Virgil
186524b3a8 fix(proxy): reject full NiceHash login tables
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 21:03:14 +00:00
Virgil
d9c59c668d fix(proxy): preserve miner remote address in API
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:59:02 +00:00
Virgil
8faac7eee6 fix(proxy): apply custom diff before worker login
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:56:16 +00:00
Virgil
ce7d3301fc refactor(proxy): clarify config path naming
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:53:49 +00:00
Virgil
c7d688ccfa fix(proxy): drain pending submits on stop
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:51:14 +00:00
Virgil
d42c21438a fix(proxy): honour invalid custom diff suffixes
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:48:10 +00:00
Virgil
86c07943b0 fix(proxy): align watcher and HTTP payloads
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:45:37 +00:00
Virgil
35d8c524e4 refactor(proxy): normalise worker timestamps
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:35:03 +00:00
Virgil
d47d89af7a Fix submit job validation and custom diff fallback 2026-04-04 20:28:54 +00:00
Virgil
b66739b64f fix(simple): track expired shares
Propagate the submitted job id through simple-mode share handling so accepted shares can be flagged expired when a reply lands after a job rollover. Add coverage for the expired accept event path.\n\nCo-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:24:06 +00:00
Virgil
3efa7f34d0 docs(proxy): add AX usage examples to lifecycle APIs
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:12:26 +00:00
Virgil
b3ad79d832 feat(proxy): wire share log events
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:09:13 +00:00
Virgil
d10a57e377 fix(proxy): close miners during shutdown
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:05:54 +00:00
Virgil
6d6da10885 fix(proxy): stabilise worker snapshots
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 20:02:35 +00:00
Virgil
b16ebc1a28 feat(proxy): store miner login algo list
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 19:59:01 +00:00
Virgil
2f59714cce refactor(api): clarify summary ratio helper
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 19:46:48 +00:00
Virgil
21fce78ffe fix(simple): count upstream errors in stats 2026-04-04 19:44:19 +00:00
Virgil
e92c6070be feat(proxy): add custom diff stats and clean failover disconnects
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 19:20:29 +00:00
Virgil
c250a4d6f2 fix(splitter): honour reuse timeout and stale jobs
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 19:15:14 +00:00
Virgil
4a281e6e25 feat(pool): wire keepalive ticks through splitters
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 19:00:18 +00:00
Virgil
1bcbb389e6 feat(proxy): wire worker bus and mapper startup
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 18:53:16 +00:00
Virgil
bc67e73ca0 fix(proxy): tighten listener and limiter lifecycle
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 18:49:03 +00:00
Virgil
31a8ba558f Wire access log config into proxy 2026-04-04 18:42:57 +00:00
Virgil
6f4d7019e2 fix(proxy): align runtime with RFC
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 18:38:28 +00:00
Virgil
64443c41f6 feat(proxy): fill RFC login and watch gaps
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 18:25:36 +00:00
Virgil
7f44596858 fix(proxy): validate config and reload pools
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 18:19:09 +00:00
Snider
b8cf8713c5 fix: correct module path to dappco.re/go/proxy
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 16:10:33 +01:00
Snider
c7ada3dd54 docs: add core/go RFC primitives for agent reference
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 15:48:45 +01:00
Snider
eb896a065f docs: fix R4 Sonnet issues (circular imports, missing method signatures)
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 13:31:22 +01:00
Snider
a7d16b7685 docs: fix Sonnet-verified spec issues (interface compliance, type qualification, accessors)
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 13:09:19 +01:00
Snider
15e3050b02 docs: move AX principles to docs/ (image-managed .core/ is unreliable)
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 12:59:37 +01:00
Snider
2a68aa4637 docs: update RFC spec and add AX principles reference
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 12:58:43 +01:00
cd517cd0d6 Merge pull request '[agent/codex:gpt-5.4-mini] Read docs/RFC.md fully. Find features described in the specs...' (#1) from agent/read-docs-rfc-md-fully--find-features-de into dev 2026-04-04 10:29:12 +00:00
Virgil
a38dfc18ec feat(proxy): implement RFC surface
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 10:29:02 +00:00
90 changed files with 11867 additions and 5980 deletions

View file

@ -1,22 +1,14 @@
# CODEX.md # CODEX.md
This repository uses the same conventions as `CLAUDE.md`. This repository uses `CLAUDE.md` as the detailed source of truth for working conventions.
This file exists so agent workflows that expect `CODEX.md` can resolve the repo rules directly.
## Source Of Truth ## Core Conventions
- RFC: `docs/RFC.md` - Read `docs/RFC.md` before changing behaviour.
- AX principles: `.core/reference/RFC-025-AGENT-EXPERIENCE.md` - Preserve existing user changes in the worktree.
- Package conventions: `CLAUDE.md` - Prefer `rg` for search and `apply_patch` for edits.
- Keep names predictable and comments example-driven.
- Run `go test ./...` and `go test -race ./...` before committing when practical.
- Commit with a conventional message and include the required co-author line when requested by repo policy.
## Quick Commands
```bash
go test ./...
go test -race ./...
go vet ./...
```
## Commit Style
- Use conventional commits: `type(scope): description`
- Include `Co-Authored-By: Virgil <virgil@lethean.io>`

121
accesslog_impl.go Normal file
View file

@ -0,0 +1,121 @@
package proxy
import (
"os"
"strconv"
"strings"
"sync"
"time"
)
type accessLogSink struct {
path string
file *os.File
mu sync.Mutex
}
func newAccessLogSink(path string) *accessLogSink {
return &accessLogSink{path: path}
}
func (l *accessLogSink) SetPath(path string) {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.path == path {
return
}
l.path = path
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func (l *accessLogSink) Close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func (l *accessLogSink) OnLogin(e Event) {
if l == nil || e.Miner == nil {
return
}
l.writeConnectLine(e.Miner.IP(), e.Miner.User(), e.Miner.Agent())
}
func (l *accessLogSink) OnClose(e Event) {
if l == nil || e.Miner == nil {
return
}
l.writeCloseLine(e.Miner.IP(), e.Miner.User(), e.Miner.RX(), e.Miner.TX())
}
func (l *accessLogSink) writeConnectLine(ip, user, agent string) {
l.mu.Lock()
defer l.mu.Unlock()
if strings.TrimSpace(l.path) == "" {
return
}
if l.file == nil {
file, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return
}
l.file = file
}
var builder strings.Builder
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
builder.WriteByte(' ')
builder.WriteString("CONNECT")
builder.WriteString(" ")
builder.WriteString(ip)
builder.WriteString(" ")
builder.WriteString(user)
builder.WriteString(" ")
builder.WriteString(agent)
builder.WriteByte('\n')
_, _ = l.file.WriteString(builder.String())
}
func (l *accessLogSink) writeCloseLine(ip, user string, rx, tx uint64) {
l.mu.Lock()
defer l.mu.Unlock()
if strings.TrimSpace(l.path) == "" {
return
}
if l.file == nil {
file, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return
}
l.file = file
}
var builder strings.Builder
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
builder.WriteByte(' ')
builder.WriteString("CLOSE")
builder.WriteString(" ")
builder.WriteString(ip)
builder.WriteString(" ")
builder.WriteString(user)
builder.WriteString(" rx=")
builder.WriteString(formatUint(rx))
builder.WriteString(" tx=")
builder.WriteString(formatUint(tx))
builder.WriteByte('\n')
_, _ = l.file.WriteString(builder.String())
}
func formatUint(value uint64) string {
return strconv.FormatUint(value, 10)
}

102
accesslog_test.go Normal file
View file

@ -0,0 +1,102 @@
package proxy
import (
"net"
"os"
"path/filepath"
"strings"
"testing"
"time"
)
func TestProxy_AccessLog_WritesLifecycleLines(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "access.log")
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
AccessLogFile: path,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := New(cfg)
if !result.OK {
t.Fatalf("expected valid proxy, got error: %v", result.Error)
}
miner := &Miner{
ip: "10.0.0.1",
user: "WALLET",
agent: "XMRig/6.21.0",
rx: 512,
tx: 4096,
conn: noopConn{},
state: MinerStateReady,
rpcID: "session",
}
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
p.events.Dispatch(Event{Type: EventClose, Miner: miner})
p.Stop()
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("read access log: %v", err)
}
text := string(data)
if !strings.Contains(text, "CONNECT 10.0.0.1 WALLET XMRig/6.21.0") {
t.Fatalf("expected CONNECT line, got %q", text)
}
if !strings.Contains(text, "CLOSE 10.0.0.1 WALLET rx=512 tx=4096") {
t.Fatalf("expected CLOSE line, got %q", text)
}
}
func TestProxy_AccessLog_WritesFixedColumns(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "access.log")
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
AccessLogFile: path,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := New(cfg)
if !result.OK {
t.Fatalf("expected valid proxy, got error: %v", result.Error)
}
miner := &Miner{
ip: "10.0.0.1",
user: "WALLET",
conn: noopConn{},
}
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
p.events.Dispatch(Event{Type: EventClose, Miner: miner})
p.Stop()
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("read access log: %v", err)
}
text := string(data)
if !strings.Contains(text, "CONNECT 10.0.0.1 WALLET") {
t.Fatalf("expected CONNECT line without counters, got %q", text)
}
if !strings.Contains(text, "CLOSE 10.0.0.1 WALLET rx=0 tx=0") {
t.Fatalf("expected CLOSE line with counters only, got %q", text)
}
}
type noopConn struct{}
func (noopConn) Read([]byte) (int, error) { return 0, os.ErrClosed }
func (noopConn) Write([]byte) (int, error) { return 0, os.ErrClosed }
func (noopConn) Close() error { return nil }
func (noopConn) LocalAddr() net.Addr { return nil }
func (noopConn) RemoteAddr() net.Addr { return nil }
func (noopConn) SetDeadline(time.Time) error { return nil }
func (noopConn) SetReadDeadline(time.Time) error { return nil }
func (noopConn) SetWriteDeadline(time.Time) error { return nil }

View file

@ -1,26 +1,62 @@
// Package api wires the monitoring endpoints onto an HTTP router. // Package api mounts the monitoring endpoints on an HTTP mux.
// //
// mux := http.NewServeMux() // mux := http.NewServeMux()
// api.RegisterRoutes(mux, p) // api.RegisterRoutes(mux, proxyInstance)
package api package api
import ( import (
"dappco.re/go/core/proxy" "encoding/json"
"net/http"
"dappco.re/go/proxy"
) )
// Router is the minimal route-registration surface used by RegisterRoutes. // RouteRegistrar accepts HTTP handler registrations.
type Router = proxy.RouteRegistrar
type SummaryResponse = proxy.SummaryResponse
type HashrateResponse = proxy.HashrateResponse
type MinersCountResponse = proxy.MinersCountResponse
type UpstreamResponse = proxy.UpstreamResponse
type ResultsResponse = proxy.ResultsResponse
// RegisterRoutes mounts the monitoring endpoints on any router with HandleFunc.
// //
// mux := http.NewServeMux() // mux := http.NewServeMux()
// api.RegisterRoutes(mux, p) // api.RegisterRoutes(mux, proxyInstance)
func RegisterRoutes(router Router, proxyValue *proxy.Proxy) { type RouteRegistrar interface {
proxy.RegisterMonitoringRoutes(router, proxyValue) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request))
}
// mux := http.NewServeMux()
// api.RegisterRoutes(mux, proxyInstance)
// _ = mux
//
// The mounted routes are GET /1/summary, /1/workers, and /1/miners.
func RegisterRoutes(router RouteRegistrar, p *proxy.Proxy) {
if router == nil || p == nil {
return
}
registerJSONGetRoute(router, p, proxy.MonitoringRouteSummary, func() any { return p.SummaryDocument() })
registerJSONGetRoute(router, p, proxy.MonitoringRouteWorkers, func() any { return p.WorkersDocument() })
registerJSONGetRoute(router, p, proxy.MonitoringRouteMiners, func() any { return p.MinersDocument() })
}
func registerJSONGetRoute(router RouteRegistrar, proxyInstance *proxy.Proxy, pattern string, renderDocument func() any) {
router.HandleFunc(pattern, func(w http.ResponseWriter, request *http.Request) {
if status, ok := allowMonitoringRequest(proxyInstance, request); !ok {
switch status {
case http.StatusMethodNotAllowed:
w.Header().Set("Allow", http.MethodGet)
case http.StatusUnauthorized:
w.Header().Set("WWW-Authenticate", "Bearer")
}
w.WriteHeader(status)
return
}
writeJSON(w, renderDocument())
})
}
func allowMonitoringRequest(proxyInstance *proxy.Proxy, request *http.Request) (int, bool) {
if proxyInstance == nil {
return http.StatusServiceUnavailable, false
}
return proxyInstance.AllowMonitoringRequest(request)
}
func writeJSON(w http.ResponseWriter, payload any) {
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(payload)
} }

201
api/router_test.go Normal file
View file

@ -0,0 +1,201 @@
package api
import (
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"dappco.re/go/proxy"
)
func TestRegisterRoutes_GETSummary_Good(t *testing.T) {
config := &proxy.Config{
Mode: "nicehash",
Workers: proxy.WorkersByRigID,
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := proxy.New(config)
if !result.OK {
t.Fatalf("new proxy: %v", result.Error)
}
router := http.NewServeMux()
RegisterRoutes(router, p)
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusOK {
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
}
var document proxy.SummaryDocument
if err := json.Unmarshal(recorder.Body.Bytes(), &document); err != nil {
t.Fatalf("decode summary document: %v", err)
}
if document.Mode != "nicehash" {
t.Fatalf("expected mode %q, got %q", "nicehash", document.Mode)
}
if document.Version != "1.0.0" {
t.Fatalf("expected version %q, got %q", "1.0.0", document.Version)
}
}
func TestRegisterRoutes_POSTSummary_Bad(t *testing.T) {
config := &proxy.Config{
Mode: "nicehash",
Workers: proxy.WorkersByRigID,
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
HTTP: proxy.HTTPConfig{
Restricted: true,
},
}
p, result := proxy.New(config)
if !result.OK {
t.Fatalf("new proxy: %v", result.Error)
}
router := http.NewServeMux()
RegisterRoutes(router, p)
request := httptest.NewRequest(http.MethodPost, "/1/summary", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusMethodNotAllowed {
t.Fatalf("expected %d, got %d", http.StatusMethodNotAllowed, recorder.Code)
}
}
func TestRegisterRoutes_POSTSummary_Unrestricted_Good(t *testing.T) {
config := &proxy.Config{
Mode: "nicehash",
Workers: proxy.WorkersByRigID,
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := proxy.New(config)
if !result.OK {
t.Fatalf("new proxy: %v", result.Error)
}
router := http.NewServeMux()
RegisterRoutes(router, p)
request := httptest.NewRequest(http.MethodPost, "/1/summary", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusOK {
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
}
var document proxy.SummaryDocument
if err := json.Unmarshal(recorder.Body.Bytes(), &document); err != nil {
t.Fatalf("decode summary document: %v", err)
}
if document.Mode != "nicehash" {
t.Fatalf("expected mode %q, got %q", "nicehash", document.Mode)
}
}
func TestRegisterRoutes_GETMiners_Ugly(t *testing.T) {
config := &proxy.Config{
Mode: "simple",
Workers: proxy.WorkersDisabled,
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := proxy.New(config)
if !result.OK {
t.Fatalf("new proxy: %v", result.Error)
}
router := http.NewServeMux()
RegisterRoutes(router, p)
request := httptest.NewRequest(http.MethodGet, "/1/miners", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusOK {
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
}
var document proxy.MinersDocument
if err := json.Unmarshal(recorder.Body.Bytes(), &document); err != nil {
t.Fatalf("decode miners document: %v", err)
}
if len(document.Format) != 10 {
t.Fatalf("expected 10 miner columns, got %d", len(document.Format))
}
if len(document.Miners) != 0 {
t.Fatalf("expected no miners in a new proxy, got %d", len(document.Miners))
}
}
func TestRegisterRoutes_GETSummaryAuthRequired_Bad(t *testing.T) {
config := &proxy.Config{
Mode: "nicehash",
Workers: proxy.WorkersByRigID,
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
HTTP: proxy.HTTPConfig{
Enabled: true,
Restricted: true,
AccessToken: "secret",
},
}
p, result := proxy.New(config)
if !result.OK {
t.Fatalf("new proxy: %v", result.Error)
}
router := http.NewServeMux()
RegisterRoutes(router, p)
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusUnauthorized {
t.Fatalf("expected %d, got %d", http.StatusUnauthorized, recorder.Code)
}
if got := recorder.Header().Get("WWW-Authenticate"); got != "Bearer" {
t.Fatalf("expected bearer challenge, got %q", got)
}
}
func TestRegisterRoutes_GETSummaryAuthGranted_Ugly(t *testing.T) {
config := &proxy.Config{
Mode: "nicehash",
Workers: proxy.WorkersByRigID,
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
HTTP: proxy.HTTPConfig{
Enabled: true,
Restricted: true,
AccessToken: "secret",
},
}
p, result := proxy.New(config)
if !result.OK {
t.Fatalf("new proxy: %v", result.Error)
}
router := http.NewServeMux()
RegisterRoutes(router, p)
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
request.Header.Set("Authorization", "Bearer secret")
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusOK {
t.Fatalf("expected %d, got %d", http.StatusOK, recorder.Code)
}
}

113
api_rows.go Normal file
View file

@ -0,0 +1,113 @@
package proxy
const (
// MonitoringRouteSummary documents the summary endpoint path.
//
// http.Get("http://127.0.0.1:8080" + proxy.MonitoringRouteSummary)
MonitoringRouteSummary = "/1/summary"
// MonitoringRouteWorkers documents the workers endpoint path.
//
// http.Get("http://127.0.0.1:8080" + proxy.MonitoringRouteWorkers)
MonitoringRouteWorkers = "/1/workers"
// MonitoringRouteMiners documents the miners endpoint path.
//
// http.Get("http://127.0.0.1:8080" + proxy.MonitoringRouteMiners)
MonitoringRouteMiners = "/1/miners"
// SummaryDocumentVersion is the monitoring API version.
//
// doc := proxy.SummaryDocument{Version: proxy.SummaryDocumentVersion}
SummaryDocumentVersion = "1.0.0"
)
var (
// MinersDocumentFormat defines the fixed /1/miners column order.
//
// doc := proxy.MinersDocument{Format: append([]string(nil), proxy.MinersDocumentFormat...)}
MinersDocumentFormat = []string{"id", "ip", "tx", "rx", "state", "diff", "user", "password", "rig_id", "agent"}
workerHashrateWindows = [5]int{60, 600, 3600, 43200, 86400}
)
// WorkerRow{"rig-alpha", "10.0.0.1", 1, 10, 0, 0, 10000, 1712232000, 1.0, 1.0, 1.0, 1.0, 1.0}
type WorkerRow [13]any
// MinerRow{1, "10.0.0.1:49152", 4096, 512, 2, 10000, "WALLET", maskedPassword, "rig-alpha", "XMRig/6.21.0"}
type MinerRow [10]any
// doc := p.SummaryDocument()
// _ = doc.Results.Accepted
// _ = doc.Upstreams.Ratio
type SummaryDocument struct {
Version string `json:"version"`
Mode string `json:"mode"`
Hashrate HashrateDocument `json:"hashrate"`
Miners MinersCountDocument `json:"miners"`
Workers uint64 `json:"workers"`
Upstreams UpstreamDocument `json:"upstreams"`
Results ResultsDocument `json:"results"`
CustomDiffStats map[uint64]CustomDiffBucketStats `json:"custom_diff_stats,omitempty"`
}
// SummaryResponse is the RFC name for SummaryDocument.
type SummaryResponse = SummaryDocument
// HashrateDocument{Total: [6]float64{12345.67, 11900.00, 12100.00, 11800.00, 12000.00, 12200.00}}
type HashrateDocument struct {
Total [6]float64 `json:"total"`
}
// HashrateResponse is the RFC name for HashrateDocument.
type HashrateResponse = HashrateDocument
// MinersCountDocument{Now: 142, Max: 200}
type MinersCountDocument struct {
Now uint64 `json:"now"`
Max uint64 `json:"max"`
}
// MinersCountResponse is the RFC name for MinersCountDocument.
type MinersCountResponse = MinersCountDocument
// UpstreamDocument{Active: 1, Sleep: 0, Error: 0, Total: 1, Ratio: 142.0}
type UpstreamDocument struct {
Active uint64 `json:"active"`
Sleep uint64 `json:"sleep"`
Error uint64 `json:"error"`
Total uint64 `json:"total"`
Ratio float64 `json:"ratio"`
}
// UpstreamResponse is the RFC name for UpstreamDocument.
type UpstreamResponse = UpstreamDocument
// ResultsDocument{Accepted: 4821, Rejected: 3, Invalid: 0, Expired: 12}
type ResultsDocument struct {
Accepted uint64 `json:"accepted"`
Rejected uint64 `json:"rejected"`
Invalid uint64 `json:"invalid"`
Expired uint64 `json:"expired"`
AvgTime uint32 `json:"avg_time"`
Latency uint32 `json:"latency"`
HashesTotal uint64 `json:"hashes_total"`
Best [10]uint64 `json:"best"`
}
// ResultsResponse is the RFC name for ResultsDocument.
type ResultsResponse = ResultsDocument
// doc := p.WorkersDocument()
// _ = doc.Workers[0][0]
type WorkersDocument struct {
Mode string `json:"mode"`
Workers []WorkerRow `json:"workers"`
}
// doc := p.MinersDocument()
// _ = doc.Miners[0][7]
type MinersDocument struct {
Format []string `json:"format"`
Miners []MinerRow `json:"miners"`
}

View file

@ -1,9 +1,14 @@
package proxy package proxy
// Config is the top-level proxy configuration, loaded from JSON and hot-reloaded on change. // Config is the top-level proxy configuration loaded from JSON.
// //
// cfg, result := proxy.LoadConfig("config.json") // cfg := &proxy.Config{
// if !result.OK { log.Fatal(result.Error) } // Mode: "nicehash",
// Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}},
// Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
// Watch: true,
// Workers: proxy.WorkersByRigID,
// }
type Config struct { type Config struct {
Mode string `json:"mode"` // "nicehash" or "simple" Mode string `json:"mode"` // "nicehash" or "simple"
Bind []BindAddr `json:"bind"` // listen addresses Bind []BindAddr `json:"bind"` // listen addresses
@ -22,7 +27,7 @@ type Config struct {
RetryPause int `json:"retry-pause"` // seconds between retries RetryPause int `json:"retry-pause"` // seconds between retries
Watch bool `json:"watch"` // hot-reload on file change Watch bool `json:"watch"` // hot-reload on file change
RateLimit RateLimit `json:"rate-limit"` // per-IP connection rate limit RateLimit RateLimit `json:"rate-limit"` // per-IP connection rate limit
sourcePath string `json:"-"` configPath string
} }
// BindAddr is one TCP listen endpoint. // BindAddr is one TCP listen endpoint.
@ -41,7 +46,6 @@ type PoolConfig struct {
URL string `json:"url"` URL string `json:"url"`
User string `json:"user"` User string `json:"user"`
Pass string `json:"pass"` Pass string `json:"pass"`
Password string `json:"password"`
RigID string `json:"rig-id"` RigID string `json:"rig-id"`
Algo string `json:"algo"` Algo string `json:"algo"`
TLS bool `json:"tls"` TLS bool `json:"tls"`
@ -50,7 +54,7 @@ type PoolConfig struct {
Enabled bool `json:"enabled"` Enabled bool `json:"enabled"`
} }
// TLSConfig controls inbound TLS on bind addresses that have TLS: true. // TLSConfig controls inbound TLS for miner listeners.
// //
// proxy.TLSConfig{Enabled: true, CertFile: "/etc/proxy/cert.pem", KeyFile: "/etc/proxy/key.pem"} // proxy.TLSConfig{Enabled: true, CertFile: "/etc/proxy/cert.pem", KeyFile: "/etc/proxy/key.pem"}
type TLSConfig struct { type TLSConfig struct {
@ -72,23 +76,26 @@ type HTTPConfig struct {
Restricted bool `json:"restricted"` // true = read-only GET only Restricted bool `json:"restricted"` // true = read-only GET only
} }
// RateLimit controls per-IP connection rate limiting using a token bucket. // RateLimit caps connection attempts per source IP.
// //
// proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300} // limiter := proxy.NewRateLimiter(proxy.RateLimit{
// MaxConnectionsPerMinute: 30,
// BanDurationSeconds: 300,
// })
type RateLimit struct { type RateLimit struct {
MaxConnectionsPerMinute int `json:"max-connections-per-minute"` // 0 = disabled MaxConnectionsPerMinute int `json:"max-connections-per-minute"` // 0 = disabled
BanDurationSeconds int `json:"ban-duration"` // 0 = no ban BanDurationSeconds int `json:"ban-duration"` // 0 = no ban
} }
// WorkersMode controls which login field becomes the worker name. // WorkersMode picks the login field used as the worker name.
//
// cfg.Workers = proxy.WorkersByRigID
type WorkersMode string type WorkersMode string
const ( const (
WorkersByRigID WorkersMode = "rig-id" // rigid field, fallback to user WorkersByRigID WorkersMode = "rig-id" // rigid field, fallback to user
WorkersByUser WorkersMode = "user" WorkersByUser WorkersMode = "user"
WorkersByPassword WorkersMode = "password" WorkersByPass WorkersMode = "password"
// WorkersByPass is kept as a compatibility alias for older configs.
WorkersByPass WorkersMode = WorkersByPassword
WorkersByAgent WorkersMode = "agent" WorkersByAgent WorkersMode = "agent"
WorkersByIP WorkersMode = "ip" WorkersByIP WorkersMode = "ip"
WorkersDisabled WorkersMode = "false" WorkersDisabled WorkersMode = "false"

63
config_load_test.go Normal file
View file

@ -0,0 +1,63 @@
package proxy
import (
"os"
"path/filepath"
"testing"
)
func TestConfig_LoadConfig_Good(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "config.json")
data := []byte(`{"mode":"nicehash","workers":"rig-id","bind":[{"host":"0.0.0.0","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
if err := os.WriteFile(path, data, 0o600); err != nil {
t.Fatalf("expected config file write to succeed: %v", err)
}
cfg, result := LoadConfig(path)
if !result.OK {
t.Fatalf("expected load to succeed, got error: %v", result.Error)
}
if cfg == nil {
t.Fatal("expected config to be returned")
}
if got := cfg.Mode; got != "nicehash" {
t.Fatalf("expected mode to round-trip, got %q", got)
}
if got := cfg.configPath; got != path {
t.Fatalf("expected config path to be recorded, got %q", got)
}
}
func TestConfig_LoadConfig_Bad(t *testing.T) {
cfg, result := LoadConfig(filepath.Join(t.TempDir(), "missing.json"))
if result.OK {
t.Fatalf("expected missing config file to fail, got cfg=%+v", cfg)
}
if cfg != nil {
t.Fatalf("expected no config on read failure, got %+v", cfg)
}
}
func TestConfig_LoadConfig_Ugly(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "config.json")
data := []byte(`{"mode":"invalid","workers":"rig-id","bind":[],"pools":[]}`)
if err := os.WriteFile(path, data, 0o600); err != nil {
t.Fatalf("expected config file write to succeed: %v", err)
}
cfg, result := LoadConfig(path)
if !result.OK {
t.Fatalf("expected syntactically valid JSON to load, got error: %v", result.Error)
}
if cfg == nil {
t.Fatal("expected config to be returned")
}
if got := cfg.Mode; got != "invalid" {
t.Fatalf("expected invalid mode value to be preserved, got %q", got)
}
if validation := cfg.Validate(); validation.OK {
t.Fatal("expected semantic validation to fail separately from loading")
}
}

View file

@ -1,154 +0,0 @@
package proxy
import (
"encoding/json"
"errors"
"os"
"strings"
"time"
)
// LoadConfig reads a JSON config file and validates the result.
//
// cfg, errorValue := proxy.LoadConfig("config.json")
// if errorValue != nil {
// return
// }
func LoadConfig(path string) (*Config, error) {
data, errorValue := os.ReadFile(path)
if errorValue != nil {
return nil, errorValue
}
config := &Config{}
if errorValue = json.Unmarshal(data, config); errorValue != nil {
return nil, errorValue
}
config.sourcePath = path
if errorValue = config.Validate(); errorValue != nil {
return nil, errorValue
}
return config, nil
}
// Validate checks that `bind` and `pools` are present and every enabled pool has a URL.
//
// cfg := &proxy.Config{
// Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
// Pools: []proxy.PoolConfig{{URL: "pool-a:3333", Enabled: true}},
// }
// if errorValue := cfg.Validate(); errorValue != nil {
// return
// }
func (c *Config) Validate() error {
if c == nil {
return errors.New("config is nil")
}
if strings.TrimSpace(c.Mode) == "" {
return errors.New("mode is empty")
}
if c.Mode != "nicehash" && c.Mode != "simple" {
return errors.New("mode is invalid")
}
if strings.TrimSpace(string(c.Workers)) == "" {
return errors.New("workers mode is empty")
}
if c.Workers != WorkersByRigID &&
c.Workers != WorkersByUser &&
c.Workers != WorkersByPass &&
c.Workers != WorkersByPassword &&
c.Workers != WorkersByAgent &&
c.Workers != WorkersByIP &&
c.Workers != WorkersDisabled {
return errors.New("workers mode is invalid")
}
if len(c.Bind) == 0 {
return errors.New("bind list is empty")
}
if len(c.Pools) == 0 {
return errors.New("pool list is empty")
}
for _, poolConfig := range c.Pools {
if poolConfig.Enabled && strings.TrimSpace(poolConfig.URL) == "" {
return errors.New("enabled pool URL is empty")
}
}
return nil
}
// NewConfigWatcher watches a config file and reloads the proxy on modification.
//
// w := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
// p.Reload(cfg)
// })
func NewConfigWatcher(path string, onChange func(*Config)) *ConfigWatcher {
return newConfigWatcher(path, onChange, true)
}
func newConfigWatcher(path string, onChange func(*Config), enabled bool) *ConfigWatcher {
return &ConfigWatcher{
path: path,
onChange: onChange,
enabled: enabled,
done: make(chan struct{}),
}
}
// Start begins 1-second polling for `config.json`.
//
// w.Start()
func (w *ConfigWatcher) Start() {
if w == nil || !w.enabled {
return
}
if info, errorValue := os.Stat(w.path); errorValue == nil {
w.lastModifiedAt = info.ModTime()
}
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
info, errorValue := os.Stat(w.path)
if errorValue != nil {
continue
}
if !info.ModTime().After(w.lastModifiedAt) {
continue
}
w.lastModifiedAt = info.ModTime()
config, errorValue := LoadConfig(w.path)
if errorValue == nil && w.onChange != nil {
w.onChange(config)
}
case <-w.done:
return
}
}
}()
}
// Stop ends polling so the watcher can be shut down with `p.Stop()`.
//
// w.Stop()
func (w *ConfigWatcher) Stop() {
if w == nil || w.done == nil {
return
}
select {
case <-w.done:
default:
close(w.done)
}
}

View file

@ -1,106 +0,0 @@
package proxy
import (
"os"
"testing"
"time"
)
func TestConfig_Validate_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue != nil {
t.Fatalf("expected valid config, got %v", errorValue)
}
}
func TestConfig_Validate_Bad(t *testing.T) {
cfg := &Config{
Mode: "bogus",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected invalid mode to fail validation")
}
}
func TestConfig_Validate_EmptyMode(t *testing.T) {
cfg := &Config{
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected empty mode to fail validation")
}
}
func TestConfig_Validate_Ugly(t *testing.T) {
cfg := &Config{
Mode: "simple",
Workers: WorkersMode("bogus"),
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected invalid workers mode to fail validation")
}
}
func TestConfig_Validate_EmptyWorkers(t *testing.T) {
cfg := &Config{
Mode: "simple",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected empty workers mode to fail validation")
}
}
func TestConfig_Validate_EnabledPoolURL(t *testing.T) {
cfg := &Config{
Mode: "simple",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{Enabled: true}},
}
if errorValue := cfg.Validate(); errorValue == nil {
t.Fatal("expected empty enabled pool URL to fail validation")
}
}
func TestConfigWatcher_Start_Bad(t *testing.T) {
path := t.TempDir() + "/config.json"
errorValue := os.WriteFile(path, []byte(`{"mode":"nicehash","workers":"rig-id","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-a:3333","enabled":true}]}`), 0o644)
if errorValue != nil {
t.Fatal(errorValue)
}
triggered := make(chan struct{}, 1)
watcher := newConfigWatcher(path, func(cfg *Config) {
triggered <- struct{}{}
}, false)
watcher.Start()
defer watcher.Stop()
errorValue = os.WriteFile(path, []byte(`{"mode":"nicehash","workers":"rig-id","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-b:3333","enabled":true}]}`), 0o644)
if errorValue != nil {
t.Fatal(errorValue)
}
select {
case <-triggered:
t.Fatal("expected disabled watcher to stay quiet")
case <-time.After(1200 * time.Millisecond):
}
}

90
config_test.go Normal file
View file

@ -0,0 +1,90 @@
package proxy
import "testing"
func TestConfig_Validate_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
if result := cfg.Validate(); !result.OK {
t.Fatalf("expected valid config, got error: %v", result.Error)
}
}
func TestConfig_Validate_Bad(t *testing.T) {
cfg := &Config{
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
if result := cfg.Validate(); result.OK {
t.Fatalf("expected missing mode to fail validation")
}
}
func TestConfig_Validate_Ugly(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Workers: WorkersMode("unknown"),
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
Pools: []PoolConfig{{URL: "", Enabled: true}},
}
if result := cfg.Validate(); result.OK {
t.Fatalf("expected invalid workers and empty pool url to fail validation")
}
}
func TestConfig_Validate_NoEnabledPool_Good(t *testing.T) {
cfg := &Config{
Mode: "simple",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
Pools: []PoolConfig{
{URL: "pool-a.example:3333", Enabled: false},
{URL: "pool-b.example:4444", Enabled: false},
},
}
if result := cfg.Validate(); !result.OK {
t.Fatalf("expected config with no enabled pools to be valid, got error: %v", result.Error)
}
}
func TestProxy_New_WhitespaceMode_Good(t *testing.T) {
originalFactory, hadFactory := splitterFactoryForMode("nicehash")
if hadFactory {
t.Cleanup(func() {
RegisterSplitterFactory("nicehash", originalFactory)
})
}
called := false
RegisterSplitterFactory("nicehash", func(*Config, *EventBus) Splitter {
called = true
return &noopSplitter{}
})
cfg := &Config{
Mode: " nicehash ",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "0.0.0.0", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := New(cfg)
if !result.OK {
t.Fatalf("expected whitespace-padded mode to remain valid, got error: %v", result.Error)
}
if !called {
t.Fatalf("expected trimmed mode lookup to invoke the registered splitter factory")
}
if _, ok := p.splitter.(*noopSplitter); !ok {
t.Fatalf("expected test splitter to be wired, got %#v", p.splitter)
}
}

136
configwatcher_test.go Normal file
View file

@ -0,0 +1,136 @@
package proxy
import (
"os"
"path/filepath"
"testing"
"time"
)
func TestConfigWatcher_New_Good(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "config.json")
if err := os.WriteFile(path, []byte(`{"mode":"nicehash","workers":"false","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`), 0o644); err != nil {
t.Fatalf("write config file: %v", err)
}
watcher := NewConfigWatcher(path, func(*Config) {})
if watcher == nil {
t.Fatal("expected watcher")
}
if watcher.lastModifiedAt.IsZero() {
t.Fatal("expected last modification time to be initialised from the file")
}
}
func TestConfigWatcher_Start_Good(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "config.json")
initial := []byte(`{"mode":"nicehash","workers":"false","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
if err := os.WriteFile(path, initial, 0o644); err != nil {
t.Fatalf("write initial config file: %v", err)
}
updates := make(chan *Config, 1)
watcher := NewConfigWatcher(path, func(cfg *Config) {
select {
case updates <- cfg:
default:
}
})
if watcher == nil {
t.Fatal("expected watcher")
}
watcher.Start()
defer watcher.Stop()
updated := []byte(`{"mode":"simple","workers":"user","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
if err := os.WriteFile(path, updated, 0o644); err != nil {
t.Fatalf("write updated config file: %v", err)
}
now := time.Now()
if err := os.Chtimes(path, now, now.Add(2*time.Second)); err != nil {
t.Fatalf("touch updated config file: %v", err)
}
select {
case cfg := <-updates:
if cfg == nil {
t.Fatal("expected config update")
}
if got := cfg.Mode; got != "simple" {
t.Fatalf("expected updated mode, got %q", got)
}
case <-time.After(5 * time.Second):
t.Fatal("expected watcher to reload updated config")
}
}
// TestConfigWatcher_Start_Bad verifies a watcher with a nonexistent path does not panic
// and does not call the onChange callback.
//
// watcher := proxy.NewConfigWatcher("/nonexistent/config.json", func(cfg *proxy.Config) {
// // never called
// })
// watcher.Start()
// watcher.Stop()
func TestConfigWatcher_Start_Bad(t *testing.T) {
called := make(chan struct{}, 1)
watcher := NewConfigWatcher("/nonexistent/path/config.json", func(*Config) {
select {
case called <- struct{}{}:
default:
}
})
if watcher == nil {
t.Fatal("expected watcher even for a nonexistent path")
}
watcher.Start()
defer watcher.Stop()
select {
case <-called:
t.Fatal("expected no callback for nonexistent config file")
case <-time.After(2 * time.Second):
// expected: no update fired
}
}
func TestConfigWatcher_Start_Ugly(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "config.json")
initial := []byte(`{"mode":"nicehash","workers":"false","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool.example:3333","enabled":true}]}`)
if err := os.WriteFile(path, initial, 0o644); err != nil {
t.Fatalf("write initial config file: %v", err)
}
updates := make(chan *Config, 1)
watcher := NewConfigWatcher(path, func(cfg *Config) {
select {
case updates <- cfg:
default:
}
})
if watcher == nil {
t.Fatal("expected watcher")
}
watcher.Start()
defer watcher.Stop()
now := time.Now()
if err := os.Chtimes(path, now, now.Add(2*time.Second)); err != nil {
t.Fatalf("touch config file: %v", err)
}
select {
case cfg := <-updates:
if cfg == nil {
t.Fatal("expected config update")
}
if got := cfg.Mode; got != "nicehash" {
t.Fatalf("expected unchanged mode, got %q", got)
}
case <-time.After(5 * time.Second):
t.Fatal("expected watcher to reload touched config")
}
}

534
core_impl.go Normal file
View file

@ -0,0 +1,534 @@
package proxy
import (
"crypto/rand"
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"encoding/json"
"io"
"math"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
)
// Result is the success/error carrier used by constructors and loaders.
//
// cfg, result := proxy.LoadConfig("config.json")
// if !result.OK {
// return result.Error
// }
type Result struct {
OK bool
Error error
}
func newSuccessResult() Result {
return Result{OK: true}
}
func newErrorResult(err error) Result {
return Result{OK: false, Error: err}
}
var splitterFactoriesMu sync.RWMutex
var splitterFactoriesByMode = map[string]func(*Config, *EventBus) Splitter{}
// RegisterSplitterFactory installs the constructor used for one proxy mode.
//
// proxy.RegisterSplitterFactory("simple", func(cfg *proxy.Config, bus *proxy.EventBus) proxy.Splitter {
// return simple.NewSimpleSplitter(cfg, bus, nil)
// })
func RegisterSplitterFactory(mode string, factory func(*Config, *EventBus) Splitter) {
splitterFactoriesMu.Lock()
defer splitterFactoriesMu.Unlock()
splitterFactoriesByMode[strings.ToLower(strings.TrimSpace(mode))] = factory
}
func splitterFactoryForMode(mode string) (func(*Config, *EventBus) Splitter, bool) {
splitterFactoriesMu.RLock()
defer splitterFactoriesMu.RUnlock()
factory, ok := splitterFactoriesByMode[strings.ToLower(strings.TrimSpace(mode))]
return factory, ok
}
// cfg, result := proxy.LoadConfig("/etc/proxy.json")
//
// if !result.OK {
// return result.Error
// }
func LoadConfig(path string) (*Config, Result) {
data, err := os.ReadFile(path)
if err != nil {
return nil, newErrorResult(NewScopedError("proxy.config", "read config failed", err))
}
config := &Config{}
if err := json.Unmarshal(data, config); err != nil {
return nil, newErrorResult(NewScopedError("proxy.config", "parse config failed", err))
}
config.configPath = path
return config, newSuccessResult()
}
// cfg := &proxy.Config{
// Mode: "nicehash",
// Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}},
// Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
// Workers: proxy.WorkersByRigID,
// }
//
// if result := cfg.Validate(); !result.OK {
// return result
// }
func (c *Config) Validate() Result {
if c == nil {
return newErrorResult(NewScopedError("proxy.config", "config is nil", nil))
}
if !isValidMode(c.Mode) {
return newErrorResult(NewScopedError("proxy.config", "mode must be \"nicehash\" or \"simple\"", nil))
}
if !isValidWorkersMode(c.Workers) {
return newErrorResult(NewScopedError("proxy.config", "workers must be one of \"rig-id\", \"user\", \"password\", \"agent\", \"ip\", or \"false\"", nil))
}
if len(c.Bind) == 0 {
return newErrorResult(NewScopedError("proxy.config", "bind list is empty", nil))
}
if len(c.Pools) == 0 {
return newErrorResult(NewScopedError("proxy.config", "pool list is empty", nil))
}
for _, pool := range c.Pools {
if pool.Enabled && strings.TrimSpace(pool.URL) == "" {
return newErrorResult(NewScopedError("proxy.config", "enabled pool url is empty", nil))
}
}
return newSuccessResult()
}
func isValidMode(mode string) bool {
switch strings.ToLower(strings.TrimSpace(mode)) {
case "nicehash", "simple":
return true
default:
return false
}
}
func isValidWorkersMode(mode WorkersMode) bool {
switch WorkersMode(strings.TrimSpace(string(mode))) {
case WorkersByRigID, WorkersByUser, WorkersByPass, WorkersByAgent, WorkersByIP, WorkersDisabled:
return true
default:
return false
}
}
// bus := proxy.NewEventBus()
//
// bus.Subscribe(proxy.EventLogin, func(e proxy.Event) {
// _ = e.Miner
// })
func NewEventBus() *EventBus {
return &EventBus{listeners: make(map[EventType][]EventHandler)}
}
// bus.Subscribe(proxy.EventAccept, stats.OnAccept)
func (b *EventBus) Subscribe(t EventType, h EventHandler) {
if b == nil || h == nil {
return
}
b.mu.Lock()
defer b.mu.Unlock()
if b.listeners == nil {
b.listeners = make(map[EventType][]EventHandler)
}
b.listeners[t] = append(b.listeners[t], h)
}
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: miner})
func (b *EventBus) Dispatch(e Event) {
if b == nil {
return
}
b.mu.RLock()
handlers := append([]EventHandler(nil), b.listeners[e.Type]...)
b.mu.RUnlock()
for _, handler := range handlers {
func() {
defer func() {
_ = recover()
}()
handler(e)
}()
}
}
type shareSinkGroup struct {
sinks []ShareSink
}
func newShareSinkGroup(sinks ...ShareSink) *shareSinkGroup {
group := &shareSinkGroup{sinks: make([]ShareSink, 0, len(sinks))}
for _, sink := range sinks {
if sink != nil {
group.sinks = append(group.sinks, sink)
}
}
return group
}
func (g *shareSinkGroup) OnAccept(e Event) {
if g == nil {
return
}
for _, sink := range g.sinks {
func() {
defer func() {
_ = recover()
}()
sink.OnAccept(e)
}()
}
}
func (g *shareSinkGroup) OnReject(e Event) {
if g == nil {
return
}
for _, sink := range g.sinks {
func() {
defer func() {
_ = recover()
}()
sink.OnReject(e)
}()
}
}
// IsValid returns true when the job contains a blob and job id.
//
// if !job.IsValid() {
// return
// }
func (j Job) IsValid() bool {
return j.Blob != "" && j.JobID != ""
}
// BlobWithFixedByte replaces the blob byte at position 39 with fixedByte.
//
// partitioned := job.BlobWithFixedByte(0x2A)
func (j Job) BlobWithFixedByte(fixedByte uint8) string {
if len(j.Blob) < 80 {
return j.Blob
}
blob := []byte(j.Blob)
encoded := make([]byte, 2)
hex.Encode(encoded, []byte{fixedByte})
blob[78] = encoded[0]
blob[79] = encoded[1]
return string(blob)
}
// DifficultyFromTarget converts the 8-char little-endian target into a difficulty.
//
// diff := job.DifficultyFromTarget()
func (j Job) DifficultyFromTarget() uint64 {
if len(j.Target) != 8 {
return 0
}
raw, err := hex.DecodeString(j.Target)
if err != nil || len(raw) != 4 {
return 0
}
target := uint32(raw[0]) | uint32(raw[1])<<8 | uint32(raw[2])<<16 | uint32(raw[3])<<24
if target == 0 {
return 0
}
return uint64(math.MaxUint32) / uint64(target)
}
// targetFromDifficulty converts a difficulty into the 8-char little-endian hex target.
//
// target := targetFromDifficulty(10000) // "b88d0600"
func targetFromDifficulty(diff uint64) string {
if diff <= 1 {
return "ffffffff"
}
maxTarget := uint64(math.MaxUint32)
target := (maxTarget + diff - 1) / diff
if target == 0 {
target = 1
}
if target > maxTarget {
target = maxTarget
}
var raw [4]byte
binary.LittleEndian.PutUint32(raw[:], uint32(target))
return hex.EncodeToString(raw[:])
}
// EffectiveShareDifficulty returns the share difficulty capped by the miner's custom diff.
// If no custom diff is set or the pool diff is already lower, the pool diff is returned.
//
// diff := proxy.EffectiveShareDifficulty(job, miner) // 25000 when customDiff < poolDiff
func EffectiveShareDifficulty(job Job, miner *Miner) uint64 {
diff := job.DifficultyFromTarget()
if miner == nil || miner.customDiff == 0 || diff == 0 || diff <= miner.customDiff {
return diff
}
return miner.customDiff
}
// NewCustomDiff creates a login-time custom difficulty resolver.
//
// resolver := proxy.NewCustomDiff(50000)
// resolver.OnLogin(proxy.Event{Miner: miner})
func NewCustomDiff(globalDiff uint64) *CustomDiff {
cd := &CustomDiff{}
cd.globalDiff.Store(globalDiff)
return cd
}
// OnLogin normalises the login user once during handshake.
//
// cd.OnLogin(proxy.Event{Miner: &proxy.Miner{user: "WALLET+50000"}})
func (cd *CustomDiff) OnLogin(e Event) {
if cd == nil || e.Miner == nil {
return
}
if e.Miner.customDiffResolved {
return
}
resolved := resolveLoginCustomDiff(e.Miner.user, cd.globalDiff.Load())
e.Miner.user = resolved.user
e.Miner.customDiff = resolved.diff
e.Miner.customDiffFromLogin = resolved.fromLogin
e.Miner.customDiffResolved = true
}
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300})
//
// if limiter.Allow("203.0.113.42:3333") {
// // first 30 connection attempts per minute are allowed
// }
func NewRateLimiter(config RateLimit) *RateLimiter {
return &RateLimiter{
limit: config,
bucketByHost: make(map[string]*tokenBucket),
banUntilByHost: make(map[string]time.Time),
}
}
// if limiter.Allow("203.0.113.42:3333") {
// // hostOnly("203.0.113.42:3333") == "203.0.113.42"
// }
func (rl *RateLimiter) Allow(ip string) bool {
if rl == nil || rl.limit.MaxConnectionsPerMinute <= 0 {
return true
}
host := hostOnly(ip)
now := time.Now()
rl.mu.Lock()
defer rl.mu.Unlock()
if until, banned := rl.banUntilByHost[host]; banned {
if now.Before(until) {
return false
}
delete(rl.banUntilByHost, host)
}
bucket, ok := rl.bucketByHost[host]
if !ok {
bucket = &tokenBucket{tokens: rl.limit.MaxConnectionsPerMinute, lastRefill: now}
rl.bucketByHost[host] = bucket
}
refillBucket(bucket, rl.limit.MaxConnectionsPerMinute, now)
if bucket.tokens <= 0 {
if rl.limit.BanDurationSeconds > 0 {
rl.banUntilByHost[host] = now.Add(time.Duration(rl.limit.BanDurationSeconds) * time.Second)
}
return false
}
bucket.tokens--
bucket.lastRefill = now
return true
}
// Tick removes expired ban entries and refills token buckets.
//
// limiter.Tick()
func (rl *RateLimiter) Tick() {
if rl == nil || rl.limit.MaxConnectionsPerMinute <= 0 {
return
}
now := time.Now()
rl.mu.Lock()
defer rl.mu.Unlock()
for host, until := range rl.banUntilByHost {
if !now.Before(until) {
delete(rl.banUntilByHost, host)
}
}
for _, bucket := range rl.bucketByHost {
refillBucket(bucket, rl.limit.MaxConnectionsPerMinute, now)
}
}
// watcher := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
// p.Reload(cfg)
// })
//
// watcher.Start() // polls once per second and reloads after the file mtime changes
func NewConfigWatcher(configPath string, onChange func(*Config)) *ConfigWatcher {
watcher := &ConfigWatcher{
configPath: configPath,
onConfigChange: onChange,
stopCh: make(chan struct{}),
}
if info, err := os.Stat(configPath); err == nil {
watcher.lastModifiedAt = info.ModTime()
}
return watcher
}
// watcher.Start()
func (w *ConfigWatcher) Start() {
if w == nil || w.configPath == "" || w.onConfigChange == nil {
return
}
w.mu.Lock()
if w.started {
w.mu.Unlock()
return
}
if w.stopCh == nil {
w.stopCh = make(chan struct{})
} else {
select {
case <-w.stopCh:
w.stopCh = make(chan struct{})
default:
}
}
stopCh := w.stopCh
configPath := w.configPath
onConfigChange := w.onConfigChange
w.started = true
w.mu.Unlock()
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if info, err := os.Stat(configPath); err == nil {
w.mu.Lock()
changed := info.ModTime() != w.lastModifiedAt
if changed {
w.lastModifiedAt = info.ModTime()
}
w.mu.Unlock()
if !changed {
continue
}
config, result := LoadConfig(configPath)
if result.OK && config != nil {
onConfigChange(config)
}
}
case <-stopCh:
return
}
}
}()
}
// watcher.Stop()
func (w *ConfigWatcher) Stop() {
if w == nil {
return
}
w.mu.Lock()
stopCh := w.stopCh
w.started = false
w.mu.Unlock()
if stopCh == nil {
return
}
select {
case <-stopCh:
default:
close(stopCh)
}
}
func hostOnly(ip string) string {
host, _, err := net.SplitHostPort(ip)
if err == nil {
return host
}
return ip
}
func refillBucket(bucket *tokenBucket, limit int, now time.Time) {
if bucket == nil || limit <= 0 {
return
}
if bucket.lastRefill.IsZero() {
bucket.lastRefill = now
if bucket.tokens <= 0 {
bucket.tokens = limit
}
return
}
interval := time.Duration(time.Minute) / time.Duration(limit)
if interval <= 0 {
interval = time.Nanosecond
}
elapsed := now.Sub(bucket.lastRefill)
if elapsed < interval {
return
}
add := int(elapsed / interval)
bucket.tokens += add
if bucket.tokens > limit {
bucket.tokens = limit
}
bucket.lastRefill = bucket.lastRefill.Add(time.Duration(add) * interval)
}
func generateUUID() string {
var b [16]byte
if _, err := io.ReadFull(rand.Reader, b[:]); err != nil {
return strconv.FormatInt(time.Now().UnixNano(), 16)
}
b[6] = (b[6] & 0x0f) | 0x40
b[8] = (b[8] & 0x3f) | 0x80
var out [36]byte
hex.Encode(out[0:8], b[0:4])
out[8] = '-'
hex.Encode(out[9:13], b[4:6])
out[13] = '-'
hex.Encode(out[14:18], b[6:8])
out[18] = '-'
hex.Encode(out[19:23], b[8:10])
out[23] = '-'
hex.Encode(out[24:36], b[10:16])
return string(out[:])
}
func sha256Hex(data []byte) string {
sum := sha256.Sum256(data)
return hex.EncodeToString(sum[:])
}

83
customdiff_test.go Normal file
View file

@ -0,0 +1,83 @@
package proxy
import "testing"
// TestCustomDiff_Apply_Good verifies a user suffix "+50000" sets customDiff and strips the suffix.
//
// cd := proxy.NewCustomDiff(10000)
// cd.Apply(&proxy.Miner{user: "WALLET+50000"})
// // miner.User() == "WALLET", miner.customDiff == 50000
func TestCustomDiff_Apply_Good(t *testing.T) {
cd := NewCustomDiff(10000)
miner := &Miner{user: "WALLET+50000"}
cd.OnLogin(Event{Miner: miner})
if miner.User() != "WALLET" {
t.Fatalf("expected stripped user, got %q", miner.User())
}
if miner.customDiff != 50000 {
t.Fatalf("expected custom diff 50000, got %d", miner.customDiff)
}
}
// TestCustomDiff_Apply_Bad verifies "+abc" (non-numeric) leaves user unchanged, customDiff=0.
//
// cd := proxy.NewCustomDiff(10000)
// cd.Apply(&proxy.Miner{user: "WALLET+abc"})
// // miner.User() == "WALLET+abc", miner.customDiff == 0
func TestCustomDiff_Apply_Bad(t *testing.T) {
cd := NewCustomDiff(10000)
miner := &Miner{user: "WALLET+abc"}
cd.OnLogin(Event{Miner: miner})
if miner.User() != "WALLET+abc" {
t.Fatalf("expected invalid suffix to remain unchanged, got %q", miner.User())
}
if miner.customDiff != 0 {
t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff)
}
}
// TestCustomDiff_Apply_Ugly verifies globalDiff=10000 is used when no suffix is present.
//
// cd := proxy.NewCustomDiff(10000)
// cd.Apply(&proxy.Miner{user: "WALLET"})
// // miner.customDiff == 10000 (falls back to global)
func TestCustomDiff_Apply_Ugly(t *testing.T) {
cd := NewCustomDiff(10000)
miner := &Miner{user: "WALLET"}
cd.OnLogin(Event{Miner: miner})
if miner.customDiff != 10000 {
t.Fatalf("expected global diff fallback 10000, got %d", miner.customDiff)
}
}
// TestCustomDiff_OnLogin_NonNumericSuffix verifies a non-decimal suffix after plus is ignored.
//
// cd := proxy.NewCustomDiff(10000)
// cd.OnLogin(proxy.Event{Miner: &proxy.Miner{user: "WALLET+50000extra"}})
func TestCustomDiff_OnLogin_NonNumericSuffix(t *testing.T) {
cd := NewCustomDiff(10000)
miner := &Miner{user: "WALLET+50000extra"}
cd.OnLogin(Event{Miner: miner})
if miner.User() != "WALLET+50000extra" {
t.Fatalf("expected non-numeric suffix plus segment to remain unchanged, got %q", miner.User())
}
if miner.customDiff != 0 {
t.Fatalf("expected invalid suffix to disable custom diff, got %d", miner.customDiff)
}
}
// TestEffectiveShareDifficulty_CustomDiffCapsPoolDifficulty verifies the cap applied by custom diff.
//
// job := proxy.Job{Target: "01000000"}
// miner := &proxy.Miner{customDiff: 25000}
// proxy.EffectiveShareDifficulty(job, miner) // 25000 (capped)
func TestEffectiveShareDifficulty_CustomDiffCapsPoolDifficulty(t *testing.T) {
job := Job{Target: "01000000"}
miner := &Miner{customDiff: 25000}
if got := EffectiveShareDifficulty(job, miner); got != 25000 {
t.Fatalf("expected capped difficulty 25000, got %d", got)
}
}

122
customdiffstats.go Normal file
View file

@ -0,0 +1,122 @@
package proxy
import (
"strings"
"sync"
)
// CustomDiffBucketStats tracks per-custom-difficulty share outcomes.
type CustomDiffBucketStats struct {
Accepted uint64 `json:"accepted"`
Rejected uint64 `json:"rejected"`
Invalid uint64 `json:"invalid"`
Expired uint64 `json:"expired"`
HashesTotal uint64 `json:"hashes_total"`
}
// CustomDiffBuckets groups share totals by the miner's resolved custom difficulty.
//
// buckets := NewCustomDiffBuckets(true)
// buckets.OnAccept(Event{Miner: &Miner{customDiff: 50000}, Diff: 25000})
type CustomDiffBuckets struct {
enabled bool
buckets map[uint64]*CustomDiffBucketStats
mu sync.Mutex
}
// NewCustomDiffBuckets creates a per-difficulty share tracker.
func NewCustomDiffBuckets(enabled bool) *CustomDiffBuckets {
return &CustomDiffBuckets{
enabled: enabled,
buckets: make(map[uint64]*CustomDiffBucketStats),
}
}
// SetEnabled toggles recording without discarding any collected buckets.
func (b *CustomDiffBuckets) SetEnabled(enabled bool) {
if b == nil {
return
}
b.mu.Lock()
defer b.mu.Unlock()
b.enabled = enabled
}
// OnAccept records an accepted share for the miner's custom difficulty bucket.
func (b *CustomDiffBuckets) OnAccept(e Event) {
if b == nil || !b.enabled || e.Miner == nil {
return
}
b.mu.Lock()
defer b.mu.Unlock()
bucket := b.bucketLocked(e.Miner.customDiff)
bucket.Accepted++
if e.Expired {
bucket.Expired++
}
if e.Diff > 0 {
bucket.HashesTotal += e.Diff
}
}
// OnReject records a rejected share for the miner's custom difficulty bucket.
func (b *CustomDiffBuckets) OnReject(e Event) {
if b == nil || !b.enabled || e.Miner == nil {
return
}
b.mu.Lock()
defer b.mu.Unlock()
bucket := b.bucketLocked(e.Miner.customDiff)
bucket.Rejected++
if isInvalidShareReason(e.Error) {
bucket.Invalid++
}
}
// Snapshot returns a copy of the current bucket totals.
//
// summary := buckets.Snapshot()
func (b *CustomDiffBuckets) Snapshot() map[uint64]CustomDiffBucketStats {
if b == nil {
return nil
}
b.mu.Lock()
defer b.mu.Unlock()
if !b.enabled || len(b.buckets) == 0 {
return nil
}
out := make(map[uint64]CustomDiffBucketStats, len(b.buckets))
for diff, bucket := range b.buckets {
if bucket == nil {
continue
}
out[diff] = *bucket
}
return out
}
func (b *CustomDiffBuckets) bucketLocked(diff uint64) *CustomDiffBucketStats {
if b.buckets == nil {
b.buckets = make(map[uint64]*CustomDiffBucketStats)
}
bucket, ok := b.buckets[diff]
if !ok {
bucket = &CustomDiffBucketStats{}
b.buckets[diff] = bucket
}
return bucket
}
func isInvalidShareReason(reason string) bool {
reason = strings.ToLower(reason)
if reason == "" {
return false
}
return strings.Contains(reason, "low diff") ||
strings.Contains(reason, "lowdifficulty") ||
strings.Contains(reason, "low difficulty") ||
strings.Contains(reason, "malformed") ||
strings.Contains(reason, "difficulty") ||
strings.Contains(reason, "invalid") ||
strings.Contains(reason, "nonce")
}

78
customdiffstats_test.go Normal file
View file

@ -0,0 +1,78 @@
package proxy
import "testing"
func TestProxy_CustomDiffStats_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
CustomDiffStats: true,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := New(cfg)
if !result.OK {
t.Fatalf("expected valid proxy, got error: %v", result.Error)
}
miner := &Miner{customDiff: 50000}
p.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 75, Expired: true})
summary := p.Summary()
bucket, ok := summary.CustomDiffStats[50000]
if !ok {
t.Fatalf("expected custom diff bucket 50000 to be present")
}
if bucket.Accepted != 1 || bucket.Expired != 1 || bucket.HashesTotal != 75 {
t.Fatalf("unexpected bucket totals: %+v", bucket)
}
}
func TestProxy_CustomDiffStats_Bad(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
CustomDiffStats: true,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := New(cfg)
if !result.OK {
t.Fatalf("expected valid proxy, got error: %v", result.Error)
}
miner := &Miner{customDiff: 10000}
p.events.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Low difficulty share"})
p.events.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Malformed share"})
summary := p.Summary()
bucket, ok := summary.CustomDiffStats[10000]
if !ok {
t.Fatalf("expected custom diff bucket 10000 to be present")
}
if bucket.Rejected != 2 || bucket.Invalid != 2 {
t.Fatalf("unexpected bucket totals: %+v", bucket)
}
}
func TestProxy_CustomDiffStats_Ugly(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
CustomDiffStats: false,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := New(cfg)
if !result.OK {
t.Fatalf("expected valid proxy, got error: %v", result.Error)
}
miner := &Miner{customDiff: 25000}
p.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 1})
summary := p.Summary()
if len(summary.CustomDiffStats) != 0 {
t.Fatalf("expected custom diff stats to remain disabled, got %+v", summary.CustomDiffStats)
}
}

View file

@ -0,0 +1,440 @@
# RFC-025: Agent Experience (AX) Design Principles
- **Status:** Draft
- **Authors:** Snider, Cladius
- **Date:** 2026-03-19
- **Applies to:** All Core ecosystem packages (CoreGO, CorePHP, CoreTS, core-agent)
## Abstract
Agent Experience (AX) is a design paradigm for software systems where the primary code consumer is an AI agent, not a human developer. AX sits alongside User Experience (UX) and Developer Experience (DX) as the third era of interface design.
This RFC establishes AX as a formal design principle for the Core ecosystem and defines the conventions that follow from it.
## Motivation
As of early 2026, AI agents write, review, and maintain the majority of code in the Core ecosystem. The original author has not manually edited code (outside of Core struct design) since October 2025. Code is processed semantically — agents reason about intent, not characters.
Design patterns inherited from the human-developer era optimise for the wrong consumer:
- **Short names** save keystrokes but increase semantic ambiguity
- **Functional option chains** are fluent for humans but opaque for agents tracing configuration
- **Error-at-every-call-site** produces 50% boilerplate that obscures intent
- **Generic type parameters** force agents to carry type context that the runtime already has
- **Panic-hiding conventions** (`Must*`) create implicit control flow that agents must special-case
AX acknowledges this shift and provides principles for designing code, APIs, file structures, and conventions that serve AI agents as first-class consumers.
## The Three Eras
| Era | Primary Consumer | Optimises For | Key Metric |
|-----|-----------------|---------------|------------|
| UX | End users | Discoverability, forgiveness, visual clarity | Task completion time |
| DX | Developers | Typing speed, IDE support, convention familiarity | Time to first commit |
| AX | AI agents | Predictability, composability, semantic navigation | Correct-on-first-pass rate |
AX does not replace UX or DX. End users still need good UX. Developers still need good DX. But when the primary code author and maintainer is an AI agent, the codebase should be designed for that consumer first.
## Principles
### 1. Predictable Names Over Short Names
Names are tokens that agents pattern-match across languages and contexts. Abbreviations introduce mapping overhead.
```
Config not Cfg
Service not Srv
Embed not Emb
Error not Err (as a subsystem name; err for local variables is fine)
Options not Opts
```
**Rule:** If a name would require a comment to explain, it is too short.
**Exception:** Industry-standard abbreviations that are universally understood (`HTTP`, `URL`, `ID`, `IPC`, `I18n`) are acceptable. The test: would an agent trained on any mainstream language recognise it without context?
### 2. Comments as Usage Examples
The function signature tells WHAT. The comment shows HOW with real values.
```go
// Detect the project type from files present
setup.Detect("/path/to/project")
// Set up a workspace with auto-detected template
setup.Run(setup.Options{Path: ".", Template: "auto"})
// Scaffold a PHP module workspace
setup.Run(setup.Options{Path: "./my-module", Template: "php"})
```
**Rule:** If a comment restates what the type signature already says, delete it. If a comment shows a concrete usage with realistic values, keep it.
**Rationale:** Agents learn from examples more effectively than from descriptions. A comment like "Run executes the setup process" adds zero information. A comment like `setup.Run(setup.Options{Path: ".", Template: "auto"})` teaches an agent exactly how to call the function.
### 3. Path Is Documentation
File and directory paths should be self-describing. An agent navigating the filesystem should understand what it is looking at without reading a README.
```
flow/deploy/to/homelab.yaml — deploy TO the homelab
flow/deploy/from/github.yaml — deploy FROM GitHub
flow/code/review.yaml — code review flow
template/file/go/struct.go.tmpl — Go struct file template
template/dir/workspace/php/ — PHP workspace scaffold
```
**Rule:** If an agent needs to read a file to understand what a directory contains, the directory naming has failed.
**Corollary:** The unified path convention (folder structure = HTTP route = CLI command = test path) is AX-native. One path, every surface.
### 4. Templates Over Freeform
When an agent generates code from a template, the output is constrained to known-good shapes. When an agent writes freeform, the output varies.
```go
// Template-driven — consistent output
lib.RenderFile("php/action", data)
lib.ExtractDir("php", targetDir, data)
// Freeform — variance in output
"write a PHP action class that..."
```
**Rule:** For any code pattern that recurs, provide a template. Templates are guardrails for agents.
**Scope:** Templates apply to file generation, workspace scaffolding, config generation, and commit messages. They do NOT apply to novel logic — agents should write business logic freeform with the domain knowledge available.
### 5. Declarative Over Imperative
Agents reason better about declarations of intent than sequences of operations.
```yaml
# Declarative — agent sees what should happen
steps:
- name: build
flow: tools/docker-build
with:
context: "{{ .app_dir }}"
image_name: "{{ .image_name }}"
- name: deploy
flow: deploy/with/docker
with:
host: "{{ .host }}"
```
```go
// Imperative — agent must trace execution
cmd := exec.Command("docker", "build", "--platform", "linux/amd64", "-t", imageName, ".")
cmd.Dir = appDir
if err := cmd.Run(); err != nil {
return fmt.Errorf("docker build: %w", err)
}
```
**Rule:** Orchestration, configuration, and pipeline logic should be declarative (YAML/JSON). Implementation logic should be imperative (Go/PHP/TS). The boundary is: if an agent needs to compose or modify the logic, make it declarative.
### 6. Universal Types (Core Primitives)
Every component in the ecosystem accepts and returns the same primitive types. An agent processing any level of the tree sees identical shapes.
```go
// Universal contract
setup.Run(core.Options{Path: ".", Template: "auto"})
brain.New(core.Options{Name: "openbrain"})
deploy.Run(core.Options{Flow: "deploy/to/homelab"})
// Fractal — Core itself is a Service
core.New(core.Options{
Services: []core.Service{
process.New(core.Options{Name: "process"}),
brain.New(core.Options{Name: "brain"}),
},
})
```
**Core primitive types:**
| Type | Purpose |
|------|---------|
| `core.Options` | Input configuration (what you want) |
| `core.Config` | Runtime settings (what is active) |
| `core.Data` | Embedded or stored content |
| `core.Service` | A managed component with lifecycle |
| `core.Result[T]` | Return value with OK/fail state |
**What this replaces:**
| Go Convention | Core AX | Why |
|--------------|---------|-----|
| `func With*(v) Option` | `core.Options{Field: v}` | Struct literal is parseable; option chain requires tracing |
| `func Must*(v) T` | `core.Result[T]` | No hidden panics; errors flow through Core |
| `func *For[T](c) T` | `c.Service("name")` | String lookup is greppable; generics require type context |
| `val, err :=` everywhere | Single return via `core.Result` | Intent not obscured by error handling |
| `_ = err` | Never needed | Core handles all errors internally |
### 7. Directory as Semantics
The directory structure tells an agent the intent before it reads a word. Top-level directories are semantic categories, not organisational bins.
```
plans/
├── code/ # Pure primitives — read for WHAT exists
├── project/ # Products — read for WHAT we're building and WHY
└── rfc/ # Contracts — read for constraints and rules
```
**Rule:** An agent should know what kind of document it's reading from the path alone. `code/core/go/io/RFC.md` = a lib primitive spec. `project/ofm/RFC.md` = a product spec that cross-references code/. `rfc/snider/borg/RFC-BORG-006-SMSG-FORMAT.md` = an immutable contract for the Borg SMSG protocol.
**Corollary:** The three-way split (code/project/rfc) extends principle 3 (Path Is Documentation) from files to entire subtrees. The path IS the metadata.
### 8. Lib Never Imports Consumer
Dependency flows one direction. Libraries define primitives. Consumers compose from them. A new feature in a consumer can never break a library.
```
code/core/go/* → lib tier (stable foundation)
code/core/agent/ → consumer tier (composes from go/*)
code/core/cli/ → consumer tier (composes from go/*)
code/core/gui/ → consumer tier (composes from go/*)
```
**Rule:** If package A is in `go/` and package B is in the consumer tier, B may import A but A must never import B. The repo naming convention enforces this: `go-{name}` = lib, bare `{name}` = consumer.
**Why this matters for agents:** When an agent is dispatched to implement a feature in `core/agent`, it can freely import from `go-io`, `go-scm`, `go-process`. But if an agent is dispatched to `go-io`, it knows its changes are foundational — every consumer depends on it, so the contract must not break.
### 9. Issues Are N+(rounds) Deep
Problems in code and specs are layered. Surface issues mask deeper issues. Fixing the surface reveals the next layer. This is not a failure mode — it is the discovery process.
```
Pass 1: Find 16 issues (surface — naming, imports, obvious errors)
Pass 2: Find 11 issues (structural — contradictions, missing types)
Pass 3: Find 5 issues (architectural — signature mismatches, registration gaps)
Pass 4: Find 4 issues (contract — cross-spec API mismatches)
Pass 5: Find 2 issues (mechanical — path format, nil safety)
Pass N: Findings are trivial → spec/code is complete
```
**Rule:** Iteration is required, not a failure. Each pass sees what the previous pass could not, because the context changed. An agent dispatched with the same task on the same repo will find different things each time — this is correct behaviour.
**Corollary:** The cheapest model should do the most passes (surface work). The frontier model should arrive last, when only deep issues remain. Tiered iteration: grunt model grinds → mid model pre-warms → frontier model polishes.
**Anti-pattern:** One-shot generation expecting valid output. No model, no human, produces correct-on-first-pass for non-trivial work. Expecting it wastes the first pass on surface issues that a cheaper pass would have caught.
### 10. CLI Tests as Artifact Validation
Unit tests verify the code. CLI tests verify the binary. The directory structure IS the command structure — path maps to command, Taskfile runs the test.
```
tests/cli/
├── core/
│ └── lint/
│ ├── Taskfile.yaml ← test `core-lint` (root)
│ ├── run/
│ │ ├── Taskfile.yaml ← test `core-lint run`
│ │ └── fixtures/
│ ├── go/
│ │ ├── Taskfile.yaml ← test `core-lint go`
│ │ └── fixtures/
│ └── security/
│ ├── Taskfile.yaml ← test `core-lint security`
│ └── fixtures/
```
**Rule:** Every CLI command has a matching `tests/cli/{path}/Taskfile.yaml`. The Taskfile runs the compiled binary against fixtures with known inputs and validates the output. If the CLI test passes, the underlying actions work — because CLI commands call actions, MCP tools call actions, API endpoints call actions. Test the CLI, trust the rest.
**Pattern:**
```yaml
# tests/cli/core/lint/go/Taskfile.yaml
version: '3'
tasks:
test:
cmds:
- core-lint go --output json fixtures/ > /tmp/result.json
- jq -e '.findings | length > 0' /tmp/result.json
- jq -e '.summary.passed == false' /tmp/result.json
```
**Why this matters for agents:** An agent can validate its own work by running `task test` in the matching `tests/cli/` directory. No test framework, no mocking, no setup — just the binary, fixtures, and `jq` assertions. The agent builds the binary, runs the test, sees the result. If it fails, the agent can read the fixture, read the output, and fix the code.
**Corollary:** Fixtures are planted bugs. Each fixture file has a known issue that the linter must find. If the linter doesn't find it, the test fails. Fixtures are the spec for what the tool must detect — they ARE the test cases, not descriptions of test cases.
## Applying AX to Existing Patterns
### File Structure
```
# AX-native: path describes content
core/agent/
├── go/ # Go source
├── php/ # PHP source
├── ui/ # Frontend source
├── claude/ # Claude Code plugin
└── codex/ # Codex plugin
# Not AX: generic names requiring README
src/
├── lib/
├── utils/
└── helpers/
```
### Error Handling
```go
// AX-native: errors are infrastructure, not application logic
svc := c.Service("brain")
cfg := c.Config().Get("database.host")
// Errors logged by Core. Code reads like a spec.
// Not AX: errors dominate the code
svc, err := c.ServiceFor[brain.Service]()
if err != nil {
return fmt.Errorf("get brain service: %w", err)
}
cfg, err := c.Config().Get("database.host")
if err != nil {
_ = err // silenced because "it'll be fine"
}
```
### API Design
```go
// AX-native: one shape, every surface
core.New(core.Options{
Name: "my-app",
Services: []core.Service{...},
Config: core.Config{...},
})
// Not AX: multiple patterns for the same thing
core.New(
core.WithName("my-app"),
core.WithService(factory1),
core.WithService(factory2),
core.WithConfig(cfg),
)
```
## The Plans Convention — AX Development Lifecycle
The `plans/` directory structure encodes a development methodology designed for how generative AI actually works: iterative refinement across structured phases, not one-shot generation.
### The Three-Way Split
```
plans/
├── project/ # 1. WHAT and WHY — start here
├── rfc/ # 2. CONSTRAINTS — immutable contracts
└── code/ # 3. HOW — implementation specs
```
Each directory is a phase. Work flows from project → rfc → code. Each transition forces a refinement pass — you cannot write a code spec without discovering gaps in the project spec, and you cannot write an RFC without discovering assumptions in both.
**Three places for data that can't be written simultaneously = three guaranteed iterations of "actually, this needs changing."** Refinement is baked into the structure, not bolted on as a review step.
### Phase 1: Project (Vision)
Start with `project/`. No code exists yet. Define:
- What the product IS and who it serves
- What existing primitives it consumes (cross-ref to `code/`)
- What constraints it operates under (cross-ref to `rfc/`)
This is where creativity lives. Map features to building blocks. Connect systems. The project spec is integrative — it references everything else.
### Phase 2: RFC (Contracts)
Extract the immutable rules into `rfc/`. These are constraints that don't change with implementation:
- Wire formats, protocols, hash algorithms
- Security properties that must hold
- Compatibility guarantees
RFCs are numbered per component (`RFC-BORG-006-SMSG-FORMAT.md`) and never modified after acceptance. If the contract changes, write a new RFC.
### Phase 3: Code (Implementation Specs)
Define the implementation in `code/`. Each component gets an RFC.md that an agent can implement from:
- Struct definitions (the DTOs — see principle 6)
- Method signatures and behaviour
- Error conditions and edge cases
- Cross-references to other code/ specs
The code spec IS the product. Write the spec → dispatch to an agent → review output → iterate.
### Pre-Launch: Alignment Protocol
Before dispatching for implementation, verify spec-model alignment:
```
1. REVIEW — The implementation model (Codex/Jules) reads the spec
and reports missing elements. This surfaces the delta between
the model's training and the spec's assumptions.
"I need X, Y, Z to implement this" is the model saying
"I hear you but I'm missing context" — without asking.
2. ADJUST — Update the spec to close the gaps. Add examples,
clarify ambiguities, provide the context the model needs.
This is shared alignment, not compromise.
3. VERIFY — A different model (or sub-agent) reviews the adjusted
spec without the planner's bias. Fresh eyes on the contract.
"Does this make sense to someone who wasn't in the room?"
4. READY — When the review findings are trivial or deployment-
related (not architectural), the spec is ready to dispatch.
```
### Implementation: Iterative Dispatch
Same prompt, multiple runs. Each pass sees deeper because the context evolved:
```
Round 1: Build features (the obvious gaps)
Round 2: Write tests (verify what was built)
Round 3: Harden security (what can go wrong?)
Round 4: Next RFC section (what's still missing?)
Round N: Findings are trivial → implementation is complete
```
Re-running is not failure. It is the process. Each pass changes the codebase, which changes what the next pass can see. The iteration IS the refinement.
### Post-Implementation: Auto-Documentation
The QA/verify chain produces artefacts that feed forward:
- Test results document the contract (what works, what doesn't)
- Coverage reports surface untested paths
- Diff summaries prep the changelog for the next release
- Doc site updates from the spec (the spec IS the documentation)
The output of one cycle is the input to the next. The plans repo stays current because the specs drive the code, not the other way round.
## Compatibility
AX conventions are valid, idiomatic Go/PHP/TS. They do not require language extensions, code generation, or non-standard tooling. An AX-designed codebase compiles, tests, and deploys with standard toolchains.
The conventions diverge from community patterns (functional options, Must/For, etc.) but do not violate language specifications. This is a style choice, not a fork.
## Adoption
AX applies to all new code in the Core ecosystem. Existing code migrates incrementally as it is touched — no big-bang rewrite.
Priority order:
1. **Public APIs** (package-level functions, struct constructors)
2. **File structure** (path naming, template locations)
3. **Internal fields** (struct field names, local variables)
## References
- dAppServer unified path convention (2024)
- CoreGO DTO pattern refactor (2026-03-18)
- Core primitives design (2026-03-19)
- Go Proverbs, Rob Pike (2015) — AX provides an updated lens
## Changelog
- 2026-03-19: Initial draft

239
docs/RFC-CORE-GO-REQUEST.md Normal file
View file

@ -0,0 +1,239 @@
# RFC Request — go-blockchain needs from Core (FINAL)
> From: Charon (go-blockchain)
> To: Cladius (core/go + go-* packages)
> Date: 2 Apr 2026 00:55
> Snider's answers inline. Updated with precise asks.
## 1. core/api — DONE, pulled (+125 commits)
Using it. No ask needed.
## 2. core.Subscribe/Publish — Raindrops forming
When ready, go-blockchain will:
- Publish: `blockchain.block.new`, `blockchain.alias.registered`, `blockchain.hardfork.activated`
- Wire format: `core.Event{Type: string, Data: any, Timestamp: int64}`
No blocking ask — will integrate when available.
## 3. core.Wallet() — I can do this today via core.Service
```go
c.RegisterService("blockchain.wallet", walletService)
c.Service("blockchain.wallet", core.Service{
Name: "blockchain.wallet",
Instance: walletService,
OnStart: func() core.Result { return walletService.Start() },
OnStop: func() core.Result { return walletService.Stop() },
})
```
Then register actions:
```go
c.Action("blockchain.wallet.create", walletService.HandleCreate)
c.Action("blockchain.wallet.transfer", walletService.HandleTransfer)
c.Action("blockchain.wallet.balance", walletService.HandleBalance)
```
**No ask. Implementing now.**
## 4. Structured Logging — PRECISE ASK
**I want package-level logging that works WITHOUT a Core instance.**
The chain sync runs in goroutines that don't hold `*core.Core`. Currently using `log.Printf`.
**Exact ask:** Confirm these work at package level:
```go
core.Print(nil, "block synced height=%d hash=%s", height, hash) // info
core.Error(nil, "sync failed: %v", err) // error
```
Or do I need `core.NewLog()` → pass the logger into the sync goroutine?
## 5. core.Escrow() — Improvement to go-blockchain, sane with Chain + Asset
Escrow is a tx type (HF4+). I build it in go-blockchain's wallet package:
```go
wallet.BuildEscrowTx(provider, customer, amount, terms)
```
Then expose via action: `c.Action("blockchain.escrow.create", ...)`
**No ask from Core. I implement this.**
## 6. core.Asset() — Same, go-blockchain implements
HF5 enables deploy/emit/burn. I add to wallet package + actions:
```go
c.Action("blockchain.asset.deploy", ...)
c.Action("blockchain.asset.emit", ...)
c.Action("blockchain.asset.burn", ...)
```
**No ask. Implementing after HF5 activates.**
## 7. core.Chain() — Same pattern
```go
c.RegisterService("blockchain.chain", chainService)
c.Action("blockchain.chain.height", ...)
c.Action("blockchain.chain.block", ...)
c.Action("blockchain.chain.sync", ...)
```
**No ask. Doing this today.**
## 8. core.DNS() — Do you want a go-dns package?
The LNS is 672 lines of Go at `~/Code/lthn/lns/`. It could become `go-dns` in the Core ecosystem.
**Ask: Should I make it `dappco.re/go/core/dns` or keep it as a standalone?**
If yes to go-dns, the actions would be:
```go
c.Action("dns.resolve", ...) // A record
c.Action("dns.resolve.txt", ...) // TXT record
c.Action("dns.reverse", ...) // PTR
c.Action("dns.register", ...) // via sidechain
```
## 9. Portable Storage Encoder — DONE
Already implemented in `p2p/encode.go` using `go-p2p/node/levin/EncodeStorage`. Committed and pushed. HandshakeResponse.Encode, ResponseChainEntry.Encode, RequestChain.Decode all working.
**go-storage/go-io improvement ask:** The chain stores blocks in go-store (SQLite). For high-throughput sync, a `go-io` backed raw block file store would be faster. Want me to spec a `BlockStore` interface that can swap between go-store and go-io backends?
## 10. CGo boilerplate — YES PLEASE
**Exact ask:** A `go-cgo` package with:
```go
// Safe C buffer allocation with automatic cleanup
buf := cgo.NewBuffer(32)
defer buf.Free()
buf.CopyFrom(goSlice)
result := buf.Bytes()
// C function call wrapper with error mapping
err := cgo.Call(C.my_function, buf.Ptr(), cgo.SizeT(len))
// Returns Go error if C returns non-zero
// C string conversion
goStr := cgo.GoString(cStr)
cStr := cgo.CString(goStr)
defer cgo.Free(cStr)
```
Every CGo package (go-blockchain/crypto, go-mlx, go-rocm) does this dance manually. A shared helper saves ~50 lines per package and prevents use-after-free bugs.
## Summary
| # | What | Who Does It | Status |
|---|------|-------------|--------|
| 1 | core/api | Cladius | DONE, pulled |
| 2 | Pub/Sub events | Cladius | Forming → core/stream (go-ws rename) |
| 3 | Wallet service | **Charon** | Implementing today |
| 4 | Package-level logging | **Answered below** | RTFM — it works |
| 5 | Escrow txs | **Charon** | In go-blockchain |
| 6 | Asset operations | **Charon** | After HF5 |
| 7 | Chain service | **Charon** | Implementing today |
| 8 | go-dns | **Cladius** | `dappco.re/go/dns` — DNS record DTOs + ClouDNS API types |
| 9 | Storage encoder | **Charon** | DONE |
| 10 | go-cgo | **Cladius** | RFC written, dispatching |
— Charon
---
## Cladius Answers — How To Do It With Core Primitives
> These examples show Charon how each ask maps to existing Core APIs.
> Most of what he asked for already exists — he just needs the patterns.
### #4 Answer: Package-Level Logging
**Yes, `core.Print(nil, ...)` works.** The first arg is `*core.Core` and `nil` is valid — it falls back to the package-level logger. Your goroutines don't need a Core instance:
```go
// In your sync goroutine — no *core.Core needed:
core.Print(nil, "block synced height=%d hash=%s", height, hash)
core.Error(nil, "sync failed: %v", err)
// If you HAVE a Core instance (e.g. in a service handler):
core.Print(c, "wallet created id=%s", id) // tagged with service context
```
Both work. `nil` = package logger, `c` = contextual logger. Same output format.
### #3 Answer: Service + Action Pattern (You Got It Right)
Your code is correct. The full pattern with Core primitives:
```go
// Register service with lifecycle
c.RegisterService("blockchain.wallet", core.Service{
OnStart: func(ctx context.Context) core.Result {
return walletService.Start(ctx)
},
OnStop: func(ctx context.Context) core.Result {
return walletService.Stop(ctx)
},
})
// Register actions — path IS the CLI/HTTP/MCP route
c.Action("blockchain.wallet.create", walletService.HandleCreate)
c.Action("blockchain.wallet.balance", walletService.HandleBalance)
// Call another service's action (for #8 dns.discover → blockchain.chain.aliases):
result := c.Run("blockchain.chain.aliases", core.Options{})
```
### #5/#6/#7 Answer: Same Pattern, Different Path
```go
// Escrow (HF4+)
c.Action("blockchain.escrow.create", escrowService.HandleCreate)
c.Action("blockchain.escrow.release", escrowService.HandleRelease)
// Asset (HF5+)
c.Action("blockchain.asset.deploy", assetService.HandleDeploy)
// Chain
c.Action("blockchain.chain.height", chainService.HandleHeight)
c.Action("blockchain.chain.block", chainService.HandleBlock)
// All of these automatically get:
// - CLI: core blockchain chain height
// - HTTP: GET /blockchain/chain/height
// - MCP: blockchain.chain.height tool
// - i18n: blockchain.chain.height.* keys
```
### #9 Answer: BlockStore Interface
For the go-store vs go-io backend swap:
```go
// Define as a Core Data type
type BlockStore struct {
core.Data // inherits Store/Load/Delete
}
// The backing medium is chosen at init:
store := core.NewData("blockchain.blocks",
core.WithMedium(gostore.SQLite("blocks.db")), // or:
// core.WithMedium(goio.File("blocks/")), // raw file backend
)
// Usage is identical regardless of backend:
store.Store("block:12345", blockBytes)
block := store.Load("block:12345")
```
### #10 Answer: go-cgo
RFC written at `plans/code/core/go/cgo/RFC.md`. Buffer, Scope, Call, String helpers. Dispatching to Codex when repo is created on Forge.
### #8 Answer: go-dns
`dappco.re/go/dns` — Core package. DNS record structs as DTOs mapping 1:1 to ClouDNS API. Your LNS code at `~/Code/lthn/lns/` moves in as the service layer on top. Dispatching when repo exists.

1337
docs/RFC-CORE-GO.md Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
--- ---
module: forge.lthn.ai/core/go-proxy module: dappco.re/go/core/proxy
repo: core/go-proxy repo: core/go-proxy
lang: go lang: go
tier: lib tier: lib
@ -18,7 +18,7 @@ tags:
> An agent should be able to implement this library from this document alone. > An agent should be able to implement this library from this document alone.
**Module:** `forge.lthn.ai/core/go-proxy` **Module:** `dappco.re/go/core/proxy`
**Repository:** `core/go-proxy` **Repository:** `core/go-proxy`
**Files:** 18 **Files:** 18
@ -46,6 +46,28 @@ The v1 scope covers:
--- ---
## 1.1 Import Graph (no circular imports)
Shared types (`Job`, `PoolConfig`, `Config`, `Miner`, `UpstreamStats`, event types) are defined in the root `proxy` package. Sub-packages import `proxy` but `proxy` never imports sub-packages directly — it uses interfaces (`Splitter`, `ShareSink`) injected at construction time.
```
proxy (root) ← defines shared types, Splitter interface, Proxy orchestrator
├── pool ← imports proxy (for Job, PoolConfig). proxy does NOT import pool.
├── nicehash ← imports proxy (for Miner, Job, events) and pool (for Strategy)
├── simple ← imports proxy and pool
├── log ← imports proxy (for Event)
└── api ← imports proxy (for Proxy) and core/api
```
The `Proxy` orchestrator wires sub-packages via interface injection:
```go
// proxy.go receives a Splitter (implemented by nicehash or simple)
// and a pool.StrategyFactory (closure that creates pool.Strategy instances).
// No import of nicehash, simple, or pool packages from proxy.go.
```
---
## 2. File Map ## 2. File Map
| File | Package | Purpose | | File | Package | Purpose |
@ -340,6 +362,16 @@ type Miner struct {
buf [16384]byte // per-miner send buffer; avoids per-write allocations buf [16384]byte // per-miner send buffer; avoids per-write allocations
} }
// SetID assigns the miner's internal ID. Used by NonceStorage tests.
//
// m.SetID(42)
func (m *Miner) SetID(id int64) {}
// FixedByte returns the NiceHash slot index assigned to this miner.
//
// slot := m.FixedByte() // 0x2A
func (m *Miner) FixedByte() uint8 {}
// NewMiner creates a Miner for an accepted net.Conn. Does not start reading yet. // NewMiner creates a Miner for an accepted net.Conn. Does not start reading yet.
// //
// m := proxy.NewMiner(conn, 3333, nil) // m := proxy.NewMiner(conn, 3333, nil)
@ -490,6 +522,21 @@ func (s *NonceSplitter) OnClose(event *proxy.CloseEvent) {}
// //
// s.GC() // called by Proxy tick loop every 60 ticks // s.GC() // called by Proxy tick loop every 60 ticks
func (s *NonceSplitter) GC() {} func (s *NonceSplitter) GC() {}
// Connect establishes the first pool upstream connection via the strategy factory.
//
// s.Connect()
func (s *NonceSplitter) Connect() {}
// Tick is called every second by the proxy tick loop. Runs keepalive on idle mappers.
//
// s.Tick(ticks)
func (s *NonceSplitter) Tick(ticks uint64) {}
// Upstreams returns current upstream pool connection counts.
//
// stats := s.Upstreams()
func (s *NonceSplitter) Upstreams() proxy.UpstreamStats {}
``` ```
### 8.2 NonceMapper ### 8.2 NonceMapper
@ -540,6 +587,29 @@ func (m *NonceMapper) Submit(event *proxy.SubmitEvent) {}
// //
// if mapper.IsActive() { /* safe to assign miners */ } // if mapper.IsActive() { /* safe to assign miners */ }
func (m *NonceMapper) IsActive() bool {} func (m *NonceMapper) IsActive() bool {}
// Start connects the pool strategy. Called by NonceSplitter after creating the mapper.
//
// mapper.Start()
func (m *NonceMapper) Start() {}
// OnJob receives a new job from the pool. Implements pool.StratumListener.
// Calls storage.SetJob to distribute to all active miners.
//
// // called by pool.StratumClient when pool pushes a job
func (m *NonceMapper) OnJob(job proxy.Job) {}
// OnResultAccepted receives a share result from the pool. Implements pool.StratumListener.
// Correlates by sequence to the originating miner and sends success/error reply.
//
// // called by pool.StratumClient on pool reply
func (m *NonceMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {}
// OnDisconnect handles pool connection loss. Implements pool.StratumListener.
// Suspends the mapper; miners keep their slots but receive no new jobs until reconnect.
//
// // called by pool.StratumClient on disconnect
func (m *NonceMapper) OnDisconnect() {}
``` ```
### 8.3 NonceStorage ### 8.3 NonceStorage
@ -639,6 +709,21 @@ func (s *SimpleSplitter) OnClose(event *proxy.CloseEvent) {}
// //
// s.GC() // s.GC()
func (s *SimpleSplitter) GC() {} func (s *SimpleSplitter) GC() {}
// Connect establishes pool connections for any pre-existing idle mappers.
//
// s.Connect()
func (s *SimpleSplitter) Connect() {}
// Tick is called every second. Runs idle mapper timeout checks.
//
// s.Tick(ticks)
func (s *SimpleSplitter) Tick(ticks uint64) {}
// Upstreams returns current upstream connection counts (active + idle).
//
// stats := s.Upstreams()
func (s *SimpleSplitter) Upstreams() proxy.UpstreamStats {}
``` ```
```go ```go
@ -669,7 +754,7 @@ type SimpleMapper struct {
// client := pool.NewStratumClient(poolCfg, listener) // client := pool.NewStratumClient(poolCfg, listener)
// client.Connect() // client.Connect()
type StratumClient struct { type StratumClient struct {
cfg PoolConfig cfg proxy.PoolConfig
listener StratumListener listener StratumListener
conn net.Conn conn net.Conn
tlsConn *tls.Conn // nil if plain TCP tlsConn *tls.Conn // nil if plain TCP
@ -690,7 +775,7 @@ type StratumListener interface {
OnDisconnect() OnDisconnect()
} }
func NewStratumClient(cfg PoolConfig, listener StratumListener) *StratumClient {} func NewStratumClient(cfg proxy.PoolConfig, listener StratumListener) *StratumClient {}
// Connect dials the pool. Applies TLS if cfg.TLS is true. // Connect dials the pool. Applies TLS if cfg.TLS is true.
// If cfg.TLSFingerprint is non-empty, pins the server certificate by SHA-256 of DER bytes. // If cfg.TLSFingerprint is non-empty, pins the server certificate by SHA-256 of DER bytes.
@ -726,7 +811,7 @@ func (c *StratumClient) Disconnect() {}
// strategy := pool.NewFailoverStrategy(cfg.Pools, listener, cfg) // strategy := pool.NewFailoverStrategy(cfg.Pools, listener, cfg)
// strategy.Connect() // strategy.Connect()
type FailoverStrategy struct { type FailoverStrategy struct {
pools []PoolConfig pools []proxy.PoolConfig
current int current int
client *StratumClient client *StratumClient
listener StratumListener listener StratumListener
@ -749,7 +834,7 @@ type Strategy interface {
IsActive() bool IsActive() bool
} }
func NewFailoverStrategy(pools []PoolConfig, listener StratumListener, cfg *proxy.Config) *FailoverStrategy {} func NewFailoverStrategy(pools []proxy.PoolConfig, listener StratumListener, cfg *proxy.Config) *FailoverStrategy {}
// Connect dials the current pool. On failure, advances to the next pool (modulo len), // Connect dials the current pool. On failure, advances to the next pool (modulo len),
// respecting cfg.Retries and cfg.RetryPause between attempts. // respecting cfg.Retries and cfg.RetryPause between attempts.
@ -801,6 +886,32 @@ type Event struct {
func NewEventBus() *EventBus {} func NewEventBus() *EventBus {}
// LoginEvent is the typed event passed to Splitter.OnLogin.
//
// splitter.OnLogin(&LoginEvent{Miner: m})
type LoginEvent struct {
Miner *Miner
}
// SubmitEvent is the typed event passed to Splitter.OnSubmit.
//
// splitter.OnSubmit(&SubmitEvent{Miner: m, JobID: "abc", Nonce: "deadbeef"})
type SubmitEvent struct {
Miner *Miner
JobID string
Nonce string
Result string
Algo string
RequestID int64
}
// CloseEvent is the typed event passed to Splitter.OnClose.
//
// splitter.OnClose(&CloseEvent{Miner: m})
type CloseEvent struct {
Miner *Miner
}
// Subscribe registers a handler for the given event type. Safe to call before Start. // Subscribe registers a handler for the given event type. Safe to call before Start.
// //
// bus.Subscribe(proxy.EventAccept, func(e proxy.Event) { stats.OnAccept(e.Diff) }) // bus.Subscribe(proxy.EventAccept, func(e proxy.Event) { stats.OnAccept(e.Diff) })
@ -939,6 +1050,21 @@ func (w *Workers) List() []WorkerRecord {}
// //
// w.Tick() // w.Tick()
func (w *Workers) Tick() {} func (w *Workers) Tick() {}
// OnLogin upserts the worker record for the miner's login. Called via EventBus subscription.
//
// bus.Subscribe(proxy.EventLogin, func(e proxy.Event) { w.OnLogin(e) })
func (w *Workers) OnLogin(e Event) {}
// OnAccept records an accepted share for the worker. Called via EventBus subscription.
//
// bus.Subscribe(proxy.EventAccept, func(e proxy.Event) { w.OnAccept(e) })
func (w *Workers) OnAccept(e Event) {}
// OnReject records a rejected share for the worker. Called via EventBus subscription.
//
// bus.Subscribe(proxy.EventReject, func(e proxy.Event) { w.OnReject(e) })
func (w *Workers) OnReject(e Event) {}
``` ```
--- ---
@ -985,7 +1111,7 @@ func (cd *CustomDiff) OnLogin(e proxy.Event) {}
type AccessLog struct { type AccessLog struct {
path string path string
mu sync.Mutex mu sync.Mutex
f core.File // opened append-only on first write; nil until first event f io.WriteCloser // opened append-only on first write; nil until first event
} }
func NewAccessLog(path string) *AccessLog {} func NewAccessLog(path string) *AccessLog {}
@ -1015,7 +1141,7 @@ func (l *AccessLog) OnClose(e proxy.Event) {}
type ShareLog struct { type ShareLog struct {
path string path string
mu sync.Mutex mu sync.Mutex
f core.File f io.WriteCloser
} }
func NewShareLog(path string) *ShareLog {} func NewShareLog(path string) *ShareLog {}
@ -1036,13 +1162,13 @@ func (l *ShareLog) OnReject(e proxy.Event) {}
## 16. HTTP Monitoring API ## 16. HTTP Monitoring API
```go ```go
// RegisterRoutes registers the proxy monitoring routes on a core/api Router. // RegisterRoutes registers the proxy monitoring routes on a core/api Engine.
// GET /1/summary — aggregated proxy stats // GET /1/summary — aggregated proxy stats
// GET /1/workers — per-worker hashrate table // GET /1/workers — per-worker hashrate table
// GET /1/miners — per-connection state table // GET /1/miners — per-connection state table
// //
// proxyapi.RegisterRoutes(apiRouter, p) // proxyapi.RegisterRoutes(engine, p)
func RegisterRoutes(r api.Router, p *proxy.Proxy) {} func RegisterRoutes(r *api.Engine, p *proxy.Proxy) {}
``` ```
### GET /1/summary — response shape ### GET /1/summary — response shape
@ -1362,7 +1488,7 @@ func TestStorage_Add_Good(t *testing.T) {
// TestJob_BlobWithFixedByte_Bad: blob shorter than 80 chars → returns original blob unchanged. // TestJob_BlobWithFixedByte_Bad: blob shorter than 80 chars → returns original blob unchanged.
// TestJob_BlobWithFixedByte_Ugly: fixedByte 0xFF → "ff" (lowercase, not "FF"). // TestJob_BlobWithFixedByte_Ugly: fixedByte 0xFF → "ff" (lowercase, not "FF").
func TestJob_BlobWithFixedByte_Good(t *testing.T) { func TestJob_BlobWithFixedByte_Good(t *testing.T) {
j := proxy.Job{Blob: core.RepeatString("0", 160)} j := proxy.Job{Blob: strings.Repeat("0", 160)}
result := j.BlobWithFixedByte(0x2A) result := j.BlobWithFixedByte(0x2A)
require.Equal(t, "2a", result[78:80]) require.Equal(t, "2a", result[78:80])
require.Equal(t, 160, len(result)) require.Equal(t, 160, len(result))

View file

@ -0,0 +1,5 @@
# go-proxy RFC
This path mirrors the authoritative proxy contract in [`../../../../RFC.md`](../../../../RFC.md).
Use the root RFC for the full implementation contract.

View file

@ -0,0 +1,5 @@
# RFC-CORE-008: Agent Experience
This path mirrors the local AX guidance in [`../../../../.core/reference/RFC-025-AGENT-EXPERIENCE.md`](../../../../.core/reference/RFC-025-AGENT-EXPERIENCE.md).
Use the reference copy for the full design principles.

View file

@ -1,49 +0,0 @@
package proxy
// String returns the stable name for one worker routing mode.
//
// mode := WorkersByRigID
// _ = mode.String()
func (mode WorkersMode) String() string {
return string(mode)
}
// String returns the stable name for one miner state.
//
// state := MinerStateReady
// _ = state.String()
func (state MinerState) String() string {
switch state {
case MinerStateWaitLogin:
return "wait_login"
case MinerStateWaitReady:
return "wait_ready"
case MinerStateReady:
return "ready"
case MinerStateClosing:
return "closing"
default:
return "unknown"
}
}
// String returns the stable name for one event type.
//
// eventType := EventAccept
// _ = eventType.String()
func (eventType EventType) String() string {
switch eventType {
case EventLogin:
return "login"
case EventSubmit:
return "submit"
case EventAccept:
return "accept"
case EventReject:
return "reject"
case EventClose:
return "close"
default:
return "unknown"
}
}

38
error.go Normal file
View file

@ -0,0 +1,38 @@
package proxy
// ScopedError carries a stable error scope alongside a human-readable message.
//
// err := proxy.NewScopedError("proxy.config", "load failed", io.EOF)
type ScopedError struct {
Scope string
Message string
Cause error
}
// NewScopedError creates an error that keeps a greppable scope token in the failure path.
//
// err := proxy.NewScopedError("proxy.server", "listen failed", cause)
func NewScopedError(scope, message string, cause error) error {
return &ScopedError{
Scope: scope,
Message: message,
Cause: cause,
}
}
func (e *ScopedError) Error() string {
if e == nil {
return ""
}
if e.Cause == nil {
return e.Scope + ": " + e.Message
}
return e.Scope + ": " + e.Message + ": " + e.Cause.Error()
}
func (e *ScopedError) Unwrap() error {
if e == nil {
return nil
}
return e.Cause
}

43
error_test.go Normal file
View file

@ -0,0 +1,43 @@
package proxy
import (
"errors"
"testing"
)
func TestError_NewScopedError_Good(t *testing.T) {
err := NewScopedError("proxy.config", "bind list is empty", nil)
if err == nil {
t.Fatalf("expected scoped error")
}
if got := err.Error(); got != "proxy.config: bind list is empty" {
t.Fatalf("unexpected scoped error string: %q", got)
}
}
func TestError_NewScopedError_Bad(t *testing.T) {
cause := errors.New("permission denied")
err := NewScopedError("proxy.config", "read config failed", cause)
if err == nil {
t.Fatalf("expected scoped error")
}
if !errors.Is(err, cause) {
t.Fatalf("expected errors.Is to unwrap the original cause")
}
if got := err.Error(); got != "proxy.config: read config failed: permission denied" {
t.Fatalf("unexpected wrapped error string: %q", got)
}
}
func TestError_NewScopedError_Ugly(t *testing.T) {
var scoped *ScopedError
if got := scoped.Error(); got != "" {
t.Fatalf("expected nil scoped error string to be empty, got %q", got)
}
if scoped.Unwrap() != nil {
t.Fatalf("expected nil scoped error to unwrap to nil")
}
}

View file

@ -2,81 +2,44 @@ package proxy
import "sync" import "sync"
// EventBus dispatches proxy lifecycle events to registered listeners. // EventBus dispatches proxy lifecycle events to synchronous listeners.
// Dispatch is synchronous on the calling goroutine. Listeners must not block.
// //
// bus := proxy.NewEventBus() // bus := proxy.NewEventBus()
// bus.Subscribe(proxy.EventLogin, customDiff.OnLogin) // bus.Subscribe(proxy.EventLogin, func(e proxy.Event) {
// _ = e.Miner.User()
// })
// bus.Subscribe(proxy.EventAccept, stats.OnAccept) // bus.Subscribe(proxy.EventAccept, stats.OnAccept)
type EventBus struct { type EventBus struct {
listeners map[EventType][]EventHandler listeners map[EventType][]EventHandler
mu sync.RWMutex mu sync.RWMutex
} }
// EventType identifies the proxy lifecycle event. // EventType identifies one proxy lifecycle event.
//
// proxy.EventLogin
type EventType int type EventType int
const ( const (
EventLogin EventType = iota // miner completed login EventLogin EventType = iota // miner completed login
EventSubmit // miner submitted a share
EventAccept // pool accepted a submitted share EventAccept // pool accepted a submitted share
EventReject // pool rejected a share (or share expired) EventReject // pool rejected a share (or share expired)
EventClose // miner TCP connection closed EventClose // miner TCP connection closed
) )
// EventHandler is the callback signature for all event types. // EventHandler is the callback signature for all event types.
//
// handler := func(e proxy.Event) { _ = e.Miner }
type EventHandler func(Event) type EventHandler func(Event)
// Event carries the data for any proxy lifecycle event. // Event carries the data for any proxy lifecycle event.
// Fields not relevant to the event type are zero/nil.
// //
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m}) // bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m})
type Event struct { type Event struct {
Type EventType Type EventType
Miner *Miner // always set Miner *Miner // always set
Job *Job // set for Accept and Reject events Job *Job // set for Accept and Reject events
JobID string // set for Submit events
Nonce string // set for Submit events
Result string // set for Submit events
Algo string // set for Submit events
RequestID int64 // set for Submit events
Diff uint64 // effective difficulty of the share (Accept and Reject) Diff uint64 // effective difficulty of the share (Accept and Reject)
Error string // rejection reason (Reject only) Error string // rejection reason (Reject only)
Latency uint16 // pool response time in ms (Accept and Reject) Latency uint16 // pool response time in ms (Accept and Reject)
Expired bool // true if the share was accepted but against the previous job Expired bool // true if the share was accepted but against the previous job
} }
// NewEventBus builds an empty synchronous event dispatcher.
//
// bus := proxy.NewEventBus()
func NewEventBus() *EventBus {
return &EventBus{
listeners: make(map[EventType][]EventHandler),
}
}
// Subscribe registers a handler for the given event type. Safe to call before Start.
//
// bus.Subscribe(proxy.EventAccept, func(e proxy.Event) { stats.OnAccept(e) })
func (b *EventBus) Subscribe(eventType EventType, handler EventHandler) {
if handler == nil {
return
}
b.mu.Lock()
defer b.mu.Unlock()
b.listeners[eventType] = append(b.listeners[eventType], handler)
}
// Dispatch calls all registered handlers for the event's type in subscription order.
//
// bus.Dispatch(proxy.Event{Type: proxy.EventLogin, Miner: m})
func (b *EventBus) Dispatch(event Event) {
b.mu.RLock()
handlers := append([]EventHandler(nil), b.listeners[event.Type]...)
b.mu.RUnlock()
for _, handler := range handlers {
handler(event)
}
}

2
go.mod
View file

@ -1,3 +1,3 @@
module dappco.re/go/core/proxy module dappco.re/go/proxy
go 1.26.0 go 1.26.0

224
http_auth_test.go Normal file
View file

@ -0,0 +1,224 @@
package proxy
import (
"net"
"net/http"
"net/http/httptest"
"strconv"
"testing"
)
func TestProxy_allowHTTP_Good(t *testing.T) {
p := &Proxy{
config: &Config{
HTTP: HTTPConfig{
Restricted: true,
AccessToken: "secret",
},
},
}
status, ok := p.AllowMonitoringRequest(&http.Request{
Method: http.MethodGet,
Header: http.Header{
"Authorization": []string{"Bearer secret"},
},
})
if !ok {
t.Fatalf("expected authorised request to pass, got status %d", status)
}
if status != http.StatusOK {
t.Fatalf("expected status %d, got %d", http.StatusOK, status)
}
}
func TestProxy_allowHTTP_Bad(t *testing.T) {
p := &Proxy{
config: &Config{
HTTP: HTTPConfig{
Restricted: true,
},
},
}
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodPost})
if ok {
t.Fatal("expected non-GET request to be rejected")
}
if status != http.StatusMethodNotAllowed {
t.Fatalf("expected status %d, got %d", http.StatusMethodNotAllowed, status)
}
}
func TestProxy_allowHTTP_Unrestricted_Good(t *testing.T) {
p := &Proxy{
config: &Config{
HTTP: HTTPConfig{},
},
}
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodGet})
if !ok {
t.Fatalf("expected unrestricted request to pass, got status %d", status)
}
if status != http.StatusOK {
t.Fatalf("expected status %d, got %d", http.StatusOK, status)
}
}
func TestProxy_allowHTTP_Unrestricted_Bad(t *testing.T) {
p := &Proxy{
config: &Config{
HTTP: HTTPConfig{},
},
}
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodPost})
if !ok {
t.Fatalf("expected unrestricted non-GET request to pass, got status %d", status)
}
if status != http.StatusOK {
t.Fatalf("expected status %d, got %d", http.StatusOK, status)
}
}
func TestProxy_allowHTTP_Ugly(t *testing.T) {
p := &Proxy{
config: &Config{
HTTP: HTTPConfig{
AccessToken: "secret",
},
},
}
status, ok := p.AllowMonitoringRequest(&http.Request{
Method: http.MethodGet,
Header: http.Header{
"Authorization": []string{"Bearer wrong"},
},
})
if ok {
t.Fatal("expected invalid token to be rejected")
}
if status != http.StatusUnauthorized {
t.Fatalf("expected status %d, got %d", http.StatusUnauthorized, status)
}
}
func TestProxy_allowHTTP_NilConfig_Ugly(t *testing.T) {
p := &Proxy{}
status, ok := p.AllowMonitoringRequest(&http.Request{Method: http.MethodGet})
if ok {
t.Fatal("expected nil config request to be rejected")
}
if status != http.StatusServiceUnavailable {
t.Fatalf("expected status %d, got %d", http.StatusServiceUnavailable, status)
}
}
func TestProxy_startHTTP_Good(t *testing.T) {
p := &Proxy{
config: &Config{
HTTP: HTTPConfig{
Enabled: true,
Host: "127.0.0.1",
Port: 0,
},
},
done: make(chan struct{}),
}
if ok := p.startMonitoringServer(); !ok {
t.Fatal("expected HTTP server to start on a free port")
}
p.Stop()
}
func TestProxy_startHTTP_NilConfig_Bad(t *testing.T) {
p := &Proxy{}
if ok := p.startMonitoringServer(); ok {
t.Fatal("expected nil config to skip HTTP server start")
}
}
func TestProxy_startHTTP_Bad(t *testing.T) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("listen on ephemeral port: %v", err)
}
defer listener.Close()
host, port, err := net.SplitHostPort(listener.Addr().String())
if err != nil {
t.Fatalf("split listener addr: %v", err)
}
portNum, err := strconv.Atoi(port)
if err != nil {
t.Fatalf("parse listener port: %v", err)
}
p := &Proxy{
config: &Config{
HTTP: HTTPConfig{
Enabled: true,
Host: host,
Port: uint16(portNum),
},
},
done: make(chan struct{}),
}
if ok := p.startMonitoringServer(); ok {
t.Fatal("expected HTTP server start to fail when the port is already in use")
}
}
func TestProxy_registerMonitoringRoute_MethodNotAllowed_Bad(t *testing.T) {
p := &Proxy{
config: &Config{
HTTP: HTTPConfig{
Restricted: true,
},
},
}
mux := http.NewServeMux()
p.registerMonitoringRoute(mux, "/1/summary", func() any { return map[string]string{"status": "ok"} })
request := httptest.NewRequest(http.MethodPost, "/1/summary", nil)
recorder := httptest.NewRecorder()
mux.ServeHTTP(recorder, request)
if recorder.Code != http.StatusMethodNotAllowed {
t.Fatalf("expected %d, got %d", http.StatusMethodNotAllowed, recorder.Code)
}
if got := recorder.Header().Get("Allow"); got != http.MethodGet {
t.Fatalf("expected Allow header %q, got %q", http.MethodGet, got)
}
}
func TestProxy_registerMonitoringRoute_Unauthorized_Ugly(t *testing.T) {
p := &Proxy{
config: &Config{
HTTP: HTTPConfig{
AccessToken: "secret",
},
},
}
mux := http.NewServeMux()
p.registerMonitoringRoute(mux, "/1/summary", func() any { return map[string]string{"status": "ok"} })
request := httptest.NewRequest(http.MethodGet, "/1/summary", nil)
recorder := httptest.NewRecorder()
mux.ServeHTTP(recorder, request)
if recorder.Code != http.StatusUnauthorized {
t.Fatalf("expected %d, got %d", http.StatusUnauthorized, recorder.Code)
}
if got := recorder.Header().Get("WWW-Authenticate"); got != "Bearer" {
t.Fatalf("expected WWW-Authenticate header %q, got %q", "Bearer", got)
}
}

94
job.go
View file

@ -1,93 +1,21 @@
package proxy package proxy
import ( // Job holds one pool work unit and its metadata.
"encoding/binary"
"encoding/hex"
"strconv"
)
// Job holds the current work unit received from a pool. Immutable once assigned.
// //
// j := proxy.Job{ // j := proxy.Job{
// Blob: "0707d5ef...b01", // Blob: strings.Repeat("0", 160),
// JobID: "4BiGm3/RgGQzgkTI", // JobID: "4BiGm3/RgGQzgkTI",
// Target: "b88d0600", // Target: "b88d0600",
// Algo: "cn/r", // Algo: "cn/r",
// } // }
// _ = j.BlobWithFixedByte(0x2A)
// _ = j.DifficultyFromTarget()
type Job struct { type Job struct {
Blob string `json:"blob"` // hex-encoded block template (160 hex chars = 80 bytes) Blob string // hex-encoded block template (160 hex chars = 80 bytes)
JobID string `json:"job_id"` // pool-assigned identifier JobID string // pool-assigned identifier
Target string `json:"target"` // 8-char hex little-endian uint32 difficulty target Target string // 8-char hex little-endian uint32 difficulty target
Algo string `json:"algo"` // algorithm e.g. "cn/r", "rx/0"; "" if not negotiated Algo string // algorithm e.g. "cn/r", "rx/0"; "" if not negotiated
Height uint64 `json:"height"` // block height (0 if pool did not provide) Height uint64 // block height (0 if pool did not provide)
SeedHash string `json:"seed_hash"` // RandomX seed hash hex (empty if not RandomX) SeedHash string // RandomX seed hash hex (empty if not RandomX)
ClientID string `json:"id"` // pool session ID that issued this job (for stale detection) ClientID string // pool session ID that issued this job (for stale detection)
}
// IsValid returns true if Blob and JobID are non-empty.
//
// if !job.IsValid() { return }
func (j Job) IsValid() bool {
return j.Blob != "" && j.JobID != ""
}
// BlobWithFixedByte returns a copy of Blob with hex characters at positions 78-79
// (blob byte index 39) replaced by the two-digit lowercase hex of fixedByte.
//
// partitioned := job.BlobWithFixedByte(0x2A) // chars 78-79 become "2a"
func (j Job) BlobWithFixedByte(fixedByte uint8) string {
if len(j.Blob) < 80 {
return j.Blob
}
blob := []byte(j.Blob)
blob[78] = lowerHexDigit(fixedByte >> 4)
blob[79] = lowerHexDigit(fixedByte & 0x0F)
return string(blob)
}
// DifficultyFromTarget converts the 8-char little-endian hex Target field to a uint64 difficulty.
//
// diff := job.DifficultyFromTarget() // "b88d0600" → ~100000
func (j Job) DifficultyFromTarget() uint64 {
if len(j.Target) != 8 {
return 0
}
targetBytes, errorValue := hex.DecodeString(j.Target)
if errorValue != nil || len(targetBytes) != 4 {
return 0
}
targetValue := binary.LittleEndian.Uint32(targetBytes)
if targetValue == 0 {
return 0
}
return uint64(^uint32(0) / targetValue)
}
// TargetForDifficulty converts a difficulty back to the 8-char little-endian target field.
//
// target := proxy.TargetForDifficulty(100000)
func TargetForDifficulty(difficulty uint64) string {
if difficulty <= 1 {
return "ffffffff"
}
targetValue := uint64(^uint32(0)) / difficulty
if targetValue == 0 {
targetValue = 1
}
if targetValue > uint64(^uint32(0)) {
targetValue = uint64(^uint32(0))
}
targetBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(targetBytes, uint32(targetValue))
return hex.EncodeToString(targetBytes)
}
func lowerHexDigit(value uint8) byte {
return strconv.FormatUint(uint64(value), 16)[0]
} }

View file

@ -1,85 +1,116 @@
package proxy package proxy
import ( import (
"encoding/json" "strings"
"testing" "testing"
) )
func TestJob_IsValid_Good(t *testing.T) { // TestJob_BlobWithFixedByte_Good verifies nonce patching on a full 160-char blob.
job := Job{Blob: "abcd", JobID: "job-1"} //
if !job.IsValid() { // job := proxy.Job{Blob: strings.Repeat("0", 160)}
t.Fatal("expected valid job") // result := job.BlobWithFixedByte(0x2A) // chars 78-79 become "2a"
}
}
func TestJob_IsValid_Bad(t *testing.T) {
job := Job{Blob: "abcd"}
if job.IsValid() {
t.Fatal("expected invalid job without job ID")
}
}
func TestJob_IsValid_Ugly(t *testing.T) {
var job Job
if job.IsValid() {
t.Fatal("zero job should be invalid")
}
}
func TestJob_BlobWithFixedByte_Good(t *testing.T) { func TestJob_BlobWithFixedByte_Good(t *testing.T) {
job := Job{Blob: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"} job := Job{Blob: strings.Repeat("0", 160)}
got := job.BlobWithFixedByte(0x2a) got := job.BlobWithFixedByte(0x2A)
if len(got) != 160 {
t.Fatalf("expected length 160, got %d", len(got))
}
if got[78:80] != "2a" { if got[78:80] != "2a" {
t.Fatalf("expected byte patch 2a, got %s", got[78:80]) t.Fatalf("expected fixed byte patch, got %q", got[78:80])
} }
} }
// TestJob_BlobWithFixedByte_Bad verifies a short blob is returned unchanged.
//
// job := proxy.Job{Blob: "0000"}
// result := job.BlobWithFixedByte(0x2A) // too short, returned as-is
func TestJob_BlobWithFixedByte_Bad(t *testing.T) { func TestJob_BlobWithFixedByte_Bad(t *testing.T) {
job := Job{Blob: "short"} shortBlob := "0000"
if got := job.BlobWithFixedByte(0x2a); got != "short" { job := Job{Blob: shortBlob}
t.Fatalf("expected short blob unchanged, got %q", got) got := job.BlobWithFixedByte(0x2A)
if got != shortBlob {
t.Fatalf("expected short blob to be returned unchanged, got %q", got)
} }
} }
// TestJob_BlobWithFixedByte_Ugly verifies fixedByte 0xFF renders as lowercase "ff".
//
// job := proxy.Job{Blob: strings.Repeat("0", 160)}
// result := job.BlobWithFixedByte(0xFF) // chars 78-79 become "ff" (not "FF")
func TestJob_BlobWithFixedByte_Ugly(t *testing.T) { func TestJob_BlobWithFixedByte_Ugly(t *testing.T) {
job := Job{Blob: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"} job := Job{Blob: strings.Repeat("0", 160)}
got := job.BlobWithFixedByte(0x00) got := job.BlobWithFixedByte(0xFF)
if got[78:80] != "00" { if got[78:80] != "ff" {
t.Fatalf("expected byte patch 00, got %s", got[78:80]) t.Fatalf("expected lowercase 'ff', got %q", got[78:80])
}
if len(got) != 160 {
t.Fatalf("expected blob length preserved, got %d", len(got))
} }
} }
// TestJob_DifficultyFromTarget_Good verifies a known target converts to the expected difficulty.
//
// job := proxy.Job{Target: "b88d0600"}
// diff := job.DifficultyFromTarget() // 10000
func TestJob_DifficultyFromTarget_Good(t *testing.T) { func TestJob_DifficultyFromTarget_Good(t *testing.T) {
job := Job{Target: "b88d0600"} job := Job{Target: "b88d0600"}
if got := job.DifficultyFromTarget(); got == 0 { if got := job.DifficultyFromTarget(); got != 10000 {
t.Fatal("expected non-zero difficulty") t.Fatalf("expected difficulty 10000, got %d", got)
} }
} }
// TestJob_DifficultyFromTarget_Bad verifies a zero target produces difficulty 0 without panic.
//
// job := proxy.Job{Target: "00000000"}
// diff := job.DifficultyFromTarget() // 0 (no divide-by-zero)
func TestJob_DifficultyFromTarget_Bad(t *testing.T) { func TestJob_DifficultyFromTarget_Bad(t *testing.T) {
job := Job{Target: "zzzzzzzz"}
if got := job.DifficultyFromTarget(); got != 0 {
t.Fatalf("expected invalid target difficulty to be zero, got %d", got)
}
}
func TestJob_DifficultyFromTarget_Ugly(t *testing.T) {
job := Job{Target: "00000000"} job := Job{Target: "00000000"}
if got := job.DifficultyFromTarget(); got != 0 { if got := job.DifficultyFromTarget(); got != 0 {
t.Fatalf("expected zero target difficulty to be zero, got %d", got) t.Fatalf("expected difficulty 0 for zero target, got %d", got)
}
job = Job{Target: "ffffffff"}
if got := job.DifficultyFromTarget(); got != 1 {
t.Fatalf("expected maximum target to resolve to difficulty 1, got %d", got)
} }
} }
func TestJob_JSON_Unmarshal_Good(t *testing.T) { // TestJob_DifficultyFromTarget_Ugly verifies the maximum target "ffffffff" yields difficulty 1.
var job Job //
if err := json.Unmarshal([]byte(`{"blob":"abcd","job_id":"job-1","target":"b88d0600","algo":"cn/r","height":42,"seed_hash":"seed","id":"session-1"}`), &job); err != nil { // job := proxy.Job{Target: "ffffffff"}
t.Fatal(err) // diff := job.DifficultyFromTarget() // 1
} func TestJob_DifficultyFromTarget_Ugly(t *testing.T) {
if job.JobID != "job-1" || job.SeedHash != "seed" || job.ClientID != "session-1" { job := Job{Target: "ffffffff"}
t.Fatalf("unexpected decoded job: %+v", job) if got := job.DifficultyFromTarget(); got != 1 {
t.Fatalf("expected minimum difficulty 1, got %d", got)
}
}
// TestJob_IsValid_Good verifies a job with blob and job ID is valid.
//
// job := proxy.Job{Blob: "abc", JobID: "job-1"}
// job.IsValid() // true
func TestJob_IsValid_Good(t *testing.T) {
job := Job{Blob: "abc", JobID: "job-1"}
if !job.IsValid() {
t.Fatalf("expected job with blob and job id to be valid")
}
}
// TestJob_IsValid_Bad verifies a job with empty blob or job ID is invalid.
//
// job := proxy.Job{Blob: "", JobID: "job-1"}
// job.IsValid() // false
func TestJob_IsValid_Bad(t *testing.T) {
if (Job{Blob: "", JobID: "job-1"}).IsValid() {
t.Fatalf("expected empty blob to be invalid")
}
if (Job{Blob: "abc", JobID: ""}).IsValid() {
t.Fatalf("expected empty job id to be invalid")
}
}
// TestJob_IsValid_Ugly verifies a zero-value job is invalid.
//
// job := proxy.Job{}
// job.IsValid() // false
func TestJob_IsValid_Ugly(t *testing.T) {
if (Job{}).IsValid() {
t.Fatalf("expected zero-value job to be invalid")
} }
} }

View file

@ -6,14 +6,14 @@
package log package log
import ( import (
"fmt"
"os" "os"
"sync" "sync"
"dappco.re/go/core/proxy"
) )
// AccessLog writes append-only connection lines. // AccessLog writes connection lifecycle lines to an append-only text file.
//
// Line format (connect): 2026-04-04T12:00:00Z CONNECT <ip> <user> <agent>
// Line format (close): 2026-04-04T12:00:00Z CLOSE <ip> <user> rx=<bytes> tx=<bytes>
// //
// al := log.NewAccessLog("/var/log/proxy-access.log") // al := log.NewAccessLog("/var/log/proxy-access.log")
// bus.Subscribe(proxy.EventLogin, al.OnLogin) // bus.Subscribe(proxy.EventLogin, al.OnLogin)
@ -22,91 +22,4 @@ type AccessLog struct {
path string path string
mu sync.Mutex mu sync.Mutex
file *os.File file *os.File
closed bool
}
// NewAccessLog opens the file lazily on first write.
//
// al := log.NewAccessLog("/var/log/proxy-access.log")
func NewAccessLog(path string) *AccessLog {
return &AccessLog{path: path}
}
// OnLogin writes `2026-04-04T12:00:00Z CONNECT 10.0.0.1 WALLET XMRig/6.21.0`.
//
// al.OnLogin(proxy.Event{Miner: miner})
func (l *AccessLog) OnLogin(event proxy.Event) {
if event.Miner == nil {
return
}
line := fmt.Sprintf("%s CONNECT %s %s %s\n",
utcTimestamp(),
event.Miner.IP(),
event.Miner.User(),
event.Miner.Agent(),
)
l.writeLine(line)
}
// OnClose writes `2026-04-04T12:00:00Z CLOSE 10.0.0.1 WALLET rx=512 tx=4096`.
//
// al.OnClose(proxy.Event{Miner: miner})
func (l *AccessLog) OnClose(event proxy.Event) {
if event.Miner == nil {
return
}
line := fmt.Sprintf("%s CLOSE %s %s rx=%d tx=%d\n",
utcTimestamp(),
event.Miner.IP(),
event.Miner.User(),
event.Miner.RX(),
event.Miner.TX(),
)
l.writeLine(line)
}
// Close releases the append-only file handle if it has been opened.
//
// al.Close()
func (l *AccessLog) Close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
l.closed = true
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func (l *AccessLog) writeLine(line string) {
if l == nil || l.path == "" {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
if l.file == nil {
file, errorValue := os.OpenFile(l.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if errorValue != nil {
return
}
l.file = file
}
_, _ = l.file.WriteString(line)
} }

204
log/impl.go Normal file
View file

@ -0,0 +1,204 @@
package log
import (
"os"
"strconv"
"strings"
"time"
"dappco.re/go/proxy"
)
// NewAccessLog creates an append-only access log.
//
// al := log.NewAccessLog("/var/log/proxy-access.log")
// defer al.Close()
func NewAccessLog(path string) *AccessLog {
return &AccessLog{path: path}
}
// Close releases the underlying file handle if the log has been opened.
//
// al := log.NewAccessLog("/var/log/proxy-access.log")
// defer al.Close()
func (l *AccessLog) Close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
// OnLogin writes a connect line such as:
//
// al.OnLogin(proxy.Event{Miner: &proxy.Miner{}})
// // 2026-04-04T12:00:00Z CONNECT 10.0.0.1 WALLET XMRig/6.21.0
func (l *AccessLog) OnLogin(e proxy.Event) {
if l == nil || e.Miner == nil {
return
}
l.writeConnectLine(e.Miner.IP(), e.Miner.User(), e.Miner.Agent())
}
// OnClose writes a close line such as:
//
// al.OnClose(proxy.Event{Miner: &proxy.Miner{}})
// // 2026-04-04T12:00:00Z CLOSE 10.0.0.1 WALLET rx=512 tx=4096
func (l *AccessLog) OnClose(e proxy.Event) {
if l == nil || e.Miner == nil {
return
}
l.writeCloseLine(e.Miner.IP(), e.Miner.User(), e.Miner.RX(), e.Miner.TX())
}
// NewShareLog creates an append-only share log.
//
// sl := log.NewShareLog("/var/log/proxy-shares.log")
// defer sl.Close()
func NewShareLog(path string) *ShareLog {
return &ShareLog{path: path}
}
// Close releases the underlying file handle if the log has been opened.
//
// sl := log.NewShareLog("/var/log/proxy-shares.log")
// defer sl.Close()
func (l *ShareLog) Close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
// OnAccept writes an accept line such as:
//
// sl.OnAccept(proxy.Event{Miner: &proxy.Miner{}, Diff: 100000, Latency: 82})
// // 2026-04-04T12:00:00Z ACCEPT WALLET diff=100000 latency=82ms
func (l *ShareLog) OnAccept(e proxy.Event) {
if l == nil || e.Miner == nil {
return
}
l.writeAcceptLine(e.Miner.User(), e.Diff, uint64(e.Latency))
}
// OnReject writes a reject line such as:
//
// sl.OnReject(proxy.Event{Miner: &proxy.Miner{}, Error: "Invalid nonce"})
// // 2026-04-04T12:00:00Z REJECT WALLET reason="Invalid nonce"
func (l *ShareLog) OnReject(e proxy.Event) {
if l == nil || e.Miner == nil {
return
}
l.writeRejectLine(e.Miner.User(), e.Error)
}
func (accessLog *AccessLog) writeConnectLine(ip, user, agent string) {
accessLog.mu.Lock()
defer accessLog.mu.Unlock()
if err := accessLog.ensureFile(); err != nil {
return
}
var builder strings.Builder
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
builder.WriteByte(' ')
builder.WriteString("CONNECT")
builder.WriteString(" ")
builder.WriteString(ip)
builder.WriteString(" ")
builder.WriteString(user)
builder.WriteString(" ")
builder.WriteString(agent)
builder.WriteByte('\n')
_, _ = accessLog.file.WriteString(builder.String())
}
func (accessLog *AccessLog) writeCloseLine(ip, user string, rx, tx uint64) {
accessLog.mu.Lock()
defer accessLog.mu.Unlock()
if err := accessLog.ensureFile(); err != nil {
return
}
var builder strings.Builder
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
builder.WriteByte(' ')
builder.WriteString("CLOSE")
builder.WriteString(" ")
builder.WriteString(ip)
builder.WriteString(" ")
builder.WriteString(user)
builder.WriteString(" rx=")
builder.WriteString(strconv.FormatUint(rx, 10))
builder.WriteString(" tx=")
builder.WriteString(strconv.FormatUint(tx, 10))
builder.WriteByte('\n')
_, _ = accessLog.file.WriteString(builder.String())
}
func (shareLog *ShareLog) writeAcceptLine(user string, diff uint64, latency uint64) {
shareLog.mu.Lock()
defer shareLog.mu.Unlock()
if err := shareLog.ensureFile(); err != nil {
return
}
var builder strings.Builder
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
builder.WriteString(" ACCEPT")
builder.WriteString(" ")
builder.WriteString(user)
builder.WriteString(" diff=")
builder.WriteString(strconv.FormatUint(diff, 10))
builder.WriteString(" latency=")
builder.WriteString(strconv.FormatUint(latency, 10))
builder.WriteString("ms")
builder.WriteByte('\n')
_, _ = shareLog.file.WriteString(builder.String())
}
func (shareLog *ShareLog) writeRejectLine(user, reason string) {
shareLog.mu.Lock()
defer shareLog.mu.Unlock()
if err := shareLog.ensureFile(); err != nil {
return
}
var builder strings.Builder
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
builder.WriteString(" REJECT ")
builder.WriteString(user)
builder.WriteString(" reason=\"")
builder.WriteString(reason)
builder.WriteString("\"\n")
_, _ = shareLog.file.WriteString(builder.String())
}
func (accessLog *AccessLog) ensureFile() error {
if accessLog.file != nil {
return nil
}
f, err := os.OpenFile(accessLog.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return err
}
accessLog.file = f
return nil
}
func (shareLog *ShareLog) ensureFile() error {
if shareLog.file != nil {
return nil
}
f, err := os.OpenFile(shareLog.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return err
}
shareLog.file = f
return nil
}

341
log/impl_test.go Normal file
View file

@ -0,0 +1,341 @@
package log
import (
"net"
"os"
"path/filepath"
"strings"
"testing"
"time"
"dappco.re/go/proxy"
)
// TestAccessLog_OnLogin_Good verifies a CONNECT line is written with the expected columns.
//
// al := log.NewAccessLog("/tmp/test-access.log")
// al.OnLogin(proxy.Event{Miner: miner}) // writes "CONNECT 10.0.0.1 WALLET XMRig/6.21.0"
func TestAccessLog_OnLogin_Good(t *testing.T) {
path := filepath.Join(t.TempDir(), "access.log")
al := NewAccessLog(path)
defer al.Close()
miner := newTestMiner(t)
al.OnLogin(proxy.Event{Miner: miner})
al.Close()
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("expected log file to exist: %v", err)
}
line := strings.TrimSpace(string(data))
if !strings.Contains(line, "CONNECT") {
t.Fatalf("expected CONNECT in log line, got %q", line)
}
}
// TestAccessLog_OnLogin_Bad verifies a nil miner event does not panic or write anything.
//
// al := log.NewAccessLog("/tmp/test-access.log")
// al.OnLogin(proxy.Event{Miner: nil}) // no-op
func TestAccessLog_OnLogin_Bad(t *testing.T) {
path := filepath.Join(t.TempDir(), "access.log")
al := NewAccessLog(path)
defer al.Close()
al.OnLogin(proxy.Event{Miner: nil})
al.Close()
if _, err := os.Stat(path); err == nil {
data, _ := os.ReadFile(path)
if len(data) > 0 {
t.Fatalf("expected no output for nil miner, got %q", string(data))
}
}
}
// TestAccessLog_OnLogin_Ugly verifies a nil AccessLog does not panic.
//
// var al *log.AccessLog
// al.OnLogin(proxy.Event{Miner: miner}) // no-op, no panic
func TestAccessLog_OnLogin_Ugly(t *testing.T) {
var al *AccessLog
miner := newTestMiner(t)
al.OnLogin(proxy.Event{Miner: miner})
}
// TestAccessLog_OnClose_Good verifies a CLOSE line includes rx and tx byte counts.
//
// al := log.NewAccessLog("/tmp/test-access.log")
// al.OnClose(proxy.Event{Miner: miner}) // writes "CLOSE <ip> <user> rx=0 tx=0"
func TestAccessLog_OnClose_Good(t *testing.T) {
path := filepath.Join(t.TempDir(), "access.log")
al := NewAccessLog(path)
defer al.Close()
miner := newTestMiner(t)
al.OnClose(proxy.Event{Miner: miner})
al.Close()
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("expected log file to exist: %v", err)
}
line := strings.TrimSpace(string(data))
if !strings.Contains(line, "CLOSE") {
t.Fatalf("expected CLOSE in log line, got %q", line)
}
if !strings.Contains(line, "rx=") {
t.Fatalf("expected rx= in log line, got %q", line)
}
if !strings.Contains(line, "tx=") {
t.Fatalf("expected tx= in log line, got %q", line)
}
}
// TestAccessLog_OnClose_Bad verifies a nil miner close event produces no output.
//
// al := log.NewAccessLog("/tmp/test-access.log")
// al.OnClose(proxy.Event{Miner: nil}) // no-op
func TestAccessLog_OnClose_Bad(t *testing.T) {
path := filepath.Join(t.TempDir(), "access.log")
al := NewAccessLog(path)
defer al.Close()
al.OnClose(proxy.Event{Miner: nil})
al.Close()
if _, err := os.Stat(path); err == nil {
data, _ := os.ReadFile(path)
if len(data) > 0 {
t.Fatalf("expected no output for nil miner, got %q", string(data))
}
}
}
// TestAccessLog_OnClose_Ugly verifies close on an empty-path log is a no-op.
//
// al := log.NewAccessLog("")
// al.OnClose(proxy.Event{Miner: miner}) // no-op, empty path
func TestAccessLog_OnClose_Ugly(t *testing.T) {
al := NewAccessLog("")
defer al.Close()
miner := newTestMiner(t)
al.OnClose(proxy.Event{Miner: miner})
}
// TestShareLog_OnAccept_Good verifies an ACCEPT line is written with diff and latency.
//
// sl := log.NewShareLog("/tmp/test-shares.log")
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000, Latency: 82})
func TestShareLog_OnAccept_Good(t *testing.T) {
path := filepath.Join(t.TempDir(), "shares.log")
sl := NewShareLog(path)
defer sl.Close()
miner := newTestMiner(t)
sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000, Latency: 82})
sl.Close()
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("expected log file to exist: %v", err)
}
line := strings.TrimSpace(string(data))
if !strings.Contains(line, "ACCEPT") {
t.Fatalf("expected ACCEPT in log line, got %q", line)
}
if !strings.Contains(line, "diff=100000") {
t.Fatalf("expected diff=100000 in log line, got %q", line)
}
if !strings.Contains(line, "latency=82ms") {
t.Fatalf("expected latency=82ms in log line, got %q", line)
}
}
// TestShareLog_OnAccept_Bad verifies a nil miner accept event produces no output.
//
// sl := log.NewShareLog("/tmp/test-shares.log")
// sl.OnAccept(proxy.Event{Miner: nil}) // no-op
func TestShareLog_OnAccept_Bad(t *testing.T) {
path := filepath.Join(t.TempDir(), "shares.log")
sl := NewShareLog(path)
defer sl.Close()
sl.OnAccept(proxy.Event{Miner: nil, Diff: 100000})
sl.Close()
if _, err := os.Stat(path); err == nil {
data, _ := os.ReadFile(path)
if len(data) > 0 {
t.Fatalf("expected no output for nil miner, got %q", string(data))
}
}
}
// TestShareLog_OnAccept_Ugly verifies a nil ShareLog does not panic.
//
// var sl *log.ShareLog
// sl.OnAccept(proxy.Event{Miner: miner}) // no-op, no panic
func TestShareLog_OnAccept_Ugly(t *testing.T) {
var sl *ShareLog
miner := newTestMiner(t)
sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000})
}
// TestShareLog_OnReject_Good verifies a REJECT line is written with the rejection reason.
//
// sl := log.NewShareLog("/tmp/test-shares.log")
// sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
func TestShareLog_OnReject_Good(t *testing.T) {
path := filepath.Join(t.TempDir(), "shares.log")
sl := NewShareLog(path)
defer sl.Close()
miner := newTestMiner(t)
sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
sl.Close()
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("expected log file to exist: %v", err)
}
line := strings.TrimSpace(string(data))
if !strings.Contains(line, "REJECT") {
t.Fatalf("expected REJECT in log line, got %q", line)
}
if !strings.Contains(line, "Low difficulty share") {
t.Fatalf("expected rejection reason in log line, got %q", line)
}
}
// TestShareLog_OnReject_Bad verifies a nil miner reject event produces no output.
//
// sl := log.NewShareLog("/tmp/test-shares.log")
// sl.OnReject(proxy.Event{Miner: nil}) // no-op
func TestShareLog_OnReject_Bad(t *testing.T) {
path := filepath.Join(t.TempDir(), "shares.log")
sl := NewShareLog(path)
defer sl.Close()
sl.OnReject(proxy.Event{Miner: nil, Error: "Low difficulty share"})
sl.Close()
if _, err := os.Stat(path); err == nil {
data, _ := os.ReadFile(path)
if len(data) > 0 {
t.Fatalf("expected no output for nil miner, got %q", string(data))
}
}
}
// TestShareLog_OnReject_Ugly verifies an empty-path ShareLog silently discards the reject line.
//
// sl := log.NewShareLog("")
// sl.OnReject(proxy.Event{Miner: miner, Error: "reason"}) // no-op, empty path
func TestShareLog_OnReject_Ugly(t *testing.T) {
sl := NewShareLog("")
defer sl.Close()
miner := newTestMiner(t)
sl.OnReject(proxy.Event{Miner: miner, Error: "reason"})
}
// TestAccessLog_Close_Good verifies Close releases the file handle and is safe to call twice.
//
// al := log.NewAccessLog("/tmp/test-access.log")
// al.OnLogin(proxy.Event{Miner: miner})
// al.Close()
// al.Close() // double close is safe
func TestAccessLog_Close_Good(t *testing.T) {
path := filepath.Join(t.TempDir(), "access.log")
al := NewAccessLog(path)
miner := newTestMiner(t)
al.OnLogin(proxy.Event{Miner: miner})
al.Close()
al.Close()
}
// TestAccessLog_Close_Bad verifies Close on a nil AccessLog does not panic.
//
// var al *log.AccessLog
// al.Close() // no-op, no panic
func TestAccessLog_Close_Bad(t *testing.T) {
var al *AccessLog
al.Close()
}
// TestAccessLog_Close_Ugly verifies Close on a never-opened log does not panic.
//
// al := log.NewAccessLog("/nonexistent/dir/access.log")
// al.Close() // no file was ever opened
func TestAccessLog_Close_Ugly(t *testing.T) {
al := NewAccessLog("/nonexistent/dir/access.log")
al.Close()
}
// TestShareLog_Close_Good verifies Close releases the file handle and is safe to call twice.
//
// sl := log.NewShareLog("/tmp/test-shares.log")
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 1000})
// sl.Close()
// sl.Close() // double close is safe
func TestShareLog_Close_Good(t *testing.T) {
path := filepath.Join(t.TempDir(), "shares.log")
sl := NewShareLog(path)
miner := newTestMiner(t)
sl.OnAccept(proxy.Event{Miner: miner, Diff: 1000})
sl.Close()
sl.Close()
}
// TestShareLog_Close_Bad verifies Close on a nil ShareLog does not panic.
//
// var sl *log.ShareLog
// sl.Close() // no-op, no panic
func TestShareLog_Close_Bad(t *testing.T) {
var sl *ShareLog
sl.Close()
}
// TestShareLog_Close_Ugly verifies Close on a never-opened log does not panic.
//
// sl := log.NewShareLog("/nonexistent/dir/shares.log")
// sl.Close() // no file was ever opened
func TestShareLog_Close_Ugly(t *testing.T) {
sl := NewShareLog("/nonexistent/dir/shares.log")
sl.Close()
}
// newTestMiner creates a minimal miner for log testing using a net.Pipe connection.
func newTestMiner(t *testing.T) *proxy.Miner {
t.Helper()
client, server := net.Pipe()
t.Cleanup(func() {
_ = client.Close()
_ = server.Close()
})
miner := proxy.NewMiner(client, 3333, nil)
miner.SetID(1)
return miner
}
// pipeAddr satisfies the net.Addr interface for pipe-based test connections.
type pipeAddr struct{}
func (a pipeAddr) Network() string { return "pipe" }
func (a pipeAddr) String() string { return "pipe" }
// pipeConn wraps an os.Pipe as a net.Conn for tests that need a closeable socket.
type pipeConn struct {
*os.File
}
func (p *pipeConn) RemoteAddr() net.Addr { return pipeAddr{} }
func (p *pipeConn) LocalAddr() net.Addr { return pipeAddr{} }
func (p *pipeConn) SetDeadline(_ time.Time) error { return nil }
func (p *pipeConn) SetReadDeadline(_ time.Time) error { return nil }
func (p *pipeConn) SetWriteDeadline(_ time.Time) error { return nil }

View file

@ -1,15 +1,14 @@
package log package log
import ( import (
"fmt"
"os" "os"
"sync" "sync"
"time"
"dappco.re/go/core/proxy"
) )
// ShareLog writes append-only share result lines. // ShareLog writes share result lines to an append-only text file.
//
// Line format (accept): 2026-04-04T12:00:00Z ACCEPT <user> diff=<diff> latency=<ms>ms
// Line format (reject): 2026-04-04T12:00:00Z REJECT <user> reason="<message>"
// //
// sl := log.NewShareLog("/var/log/proxy-shares.log") // sl := log.NewShareLog("/var/log/proxy-shares.log")
// bus.Subscribe(proxy.EventAccept, sl.OnAccept) // bus.Subscribe(proxy.EventAccept, sl.OnAccept)
@ -18,93 +17,4 @@ type ShareLog struct {
path string path string
mu sync.Mutex mu sync.Mutex
file *os.File file *os.File
closed bool
}
// NewShareLog opens the file lazily on first write.
//
// sl := log.NewShareLog("/var/log/proxy-shares.log")
func NewShareLog(path string) *ShareLog {
return &ShareLog{path: path}
}
// OnAccept writes `2026-04-04T12:00:00Z ACCEPT WALLET diff=100000 latency=82ms`.
//
// sl.OnAccept(proxy.Event{Miner: miner, Diff: 100000})
func (l *ShareLog) OnAccept(event proxy.Event) {
if event.Miner == nil {
return
}
line := fmt.Sprintf("%s ACCEPT %s diff=%d latency=%dms\n",
utcTimestamp(),
event.Miner.User(),
event.Diff,
event.Latency,
)
l.writeLine(line)
}
// OnReject writes `2026-04-04T12:00:00Z REJECT WALLET reason="Low difficulty share"`.
//
// sl.OnReject(proxy.Event{Miner: miner, Error: "Low difficulty share"})
func (l *ShareLog) OnReject(event proxy.Event) {
if event.Miner == nil {
return
}
line := fmt.Sprintf("%s REJECT %s reason=%q\n",
utcTimestamp(),
event.Miner.User(),
event.Error,
)
l.writeLine(line)
}
// Close releases the append-only file handle if it has been opened.
//
// sl.Close()
func (l *ShareLog) Close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
l.closed = true
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func (l *ShareLog) writeLine(line string) {
if l == nil || l.path == "" {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
if l.file == nil {
file, errorValue := os.OpenFile(l.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if errorValue != nil {
return
}
l.file = file
}
_, _ = l.file.WriteString(line)
}
func utcTimestamp() string {
return time.Now().UTC().Format(time.RFC3339)
} }

View file

@ -28,33 +28,38 @@ type Miner struct {
id int64 // monotonically increasing per-process; atomic assignment id int64 // monotonically increasing per-process; atomic assignment
rpcID string // UUID v4 sent to miner as session id rpcID string // UUID v4 sent to miner as session id
state MinerState state MinerState
stateMu sync.RWMutex
extAlgo bool // miner sent algo list in login params extAlgo bool // miner sent algo list in login params
algoExtension bool // config allows forwarding algo negotiation loginAlgos []string
extNH bool // NiceHash mode active (fixed byte splitting) extNH bool // NiceHash mode active (fixed byte splitting)
algoEnabled bool // proxy is configured to negotiate the algo extension
ip string // remote IP (without port, for logging) ip string // remote IP (without port, for logging)
remoteAddr string
localPort uint16 localPort uint16
user string // login params.login (wallet address), custom diff suffix stripped user string // login params.login (wallet address), custom diff suffix stripped
password string // login params.pass password string // login params.pass
agent string // login params.agent agent string // login params.agent
rigID string // login params.rigid (optional extension) rigID string // login params.rigid (optional extension)
algo []string
fixedByte uint8 // NiceHash slot index (0-255) fixedByte uint8 // NiceHash slot index (0-255)
mapperID int64 // which NonceMapper owns this miner; -1 = unassigned mapperID int64 // which NonceMapper owns this miner; -1 = unassigned
routeID int64 // SimpleMapper ID in simple mode; -1 = unassigned routeID int64 // SimpleMapper ID in simple mode; -1 = unassigned
customDiff uint64 // 0 = use pool diff; non-zero = cap diff to this value customDiff uint64 // 0 = use pool diff; non-zero = cap diff to this value
customDiffResolved bool
customDiffFromLogin bool
accessPassword string
globalDiff uint64
diff uint64 // last difficulty sent to this miner from the pool diff uint64 // last difficulty sent to this miner from the pool
rx uint64 // bytes received from miner rx uint64 // bytes received from miner
tx uint64 // bytes sent to miner tx uint64 // bytes sent from miner
currentJob Job
connectedAt time.Time connectedAt time.Time
lastActivityAt time.Time lastActivityAt time.Time
events *EventBus
splitter Splitter
currentJob *Job
closeOnce sync.Once
accessPassword string
conn net.Conn conn net.Conn
tlsConn *tls.Conn // nil if plain TCP tlsConn *tls.Conn // nil if plain TCP
sendMu sync.Mutex // serialises writes to conn sendMu sync.Mutex // serialises writes to conn
buf [16384]byte // per-miner send buffer; avoids per-write allocations buf [16384]byte // per-miner send buffer; avoids per-write allocations
onLogin func(*Miner)
onLoginReady func(*Miner)
onSubmit func(*Miner, *SubmitEvent)
onClose func(*Miner)
closeOnce sync.Once
} }

467
miner_login_test.go Normal file
View file

@ -0,0 +1,467 @@
package proxy
import (
"bufio"
"encoding/json"
"net"
"strings"
"testing"
"time"
)
func TestMiner_HandleLogin_Good(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := NewMiner(minerConn, 3333, nil)
miner.algoEnabled = true
miner.extNH = true
miner.fixedByte = 0x2a
miner.onLogin = func(m *Miner) {
m.SetMapperID(1)
}
miner.currentJob = Job{
Blob: strings.Repeat("0", 160),
JobID: "job-1",
Target: "b88d0600",
Algo: "cn/r",
Height: 7,
SeedHash: "seed",
}
params, err := json.Marshal(loginParams{
Login: "wallet",
Pass: "x",
Agent: "xmrig",
Algo: []string{"cn/r"},
RigID: "rig-1",
})
if err != nil {
t.Fatalf("marshal login params: %v", err)
}
done := make(chan struct{})
go func() {
miner.handleLogin(stratumRequest{ID: 1, Method: "login", Params: params})
close(done)
}()
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read login response: %v", err)
}
<-done
var payload struct {
Error json.RawMessage `json:"error"`
Result struct {
ID string `json:"id"`
Status string `json:"status"`
Extensions []string `json:"extensions"`
Job map[string]any `json:"job"`
} `json:"result"`
}
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal login response: %v", err)
}
if string(payload.Error) != "null" {
t.Fatalf("expected login response error to be null, got %s", string(payload.Error))
}
if payload.Result.Status != "OK" {
t.Fatalf("expected login success, got %q", payload.Result.Status)
}
if payload.Result.ID == "" {
t.Fatalf("expected rpc id in login response")
}
if len(payload.Result.Extensions) != 1 || payload.Result.Extensions[0] != "algo" {
t.Fatalf("expected algo extension, got %#v", payload.Result.Extensions)
}
if got := miner.LoginAlgos(); len(got) != 1 || got[0] != "cn/r" {
t.Fatalf("expected login algo list to be stored, got %#v", got)
}
if got := payload.Result.Job["job_id"]; got != "job-1" {
t.Fatalf("expected embedded job, got %#v", got)
}
if got := payload.Result.Job["algo"]; got != "cn/r" {
t.Fatalf("expected embedded algo, got %#v", got)
}
blob, _ := payload.Result.Job["blob"].(string)
if blob[78:80] != "2a" {
t.Fatalf("expected fixed-byte patched blob, got %q", blob[78:80])
}
if miner.State() != MinerStateReady {
t.Fatalf("expected miner ready after login reply with job, got %d", miner.State())
}
}
func TestProxy_New_Watch_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
Watch: true,
configPath: "/tmp/proxy.json",
}
proxyInstance, result := New(cfg)
if !result.OK {
t.Fatalf("expected valid proxy, got error: %v", result.Error)
}
if proxyInstance.watcher == nil {
t.Fatalf("expected config watcher when watch is enabled and source path is known")
}
}
func TestMiner_HandleLogin_Ugly(t *testing.T) {
for i := 0; i < 256; i++ {
miner := &Miner{}
miner.SetID(int64(i + 1))
miner.SetMapperID(int64(i + 1))
}
serverConn, clientConn := net.Pipe()
defer serverConn.Close()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.extNH = true
miner.onLogin = func(*Miner) {}
params, err := json.Marshal(loginParams{
Login: "wallet",
Pass: "x",
})
if err != nil {
t.Fatalf("marshal login params: %v", err)
}
done := make(chan []byte, 1)
go func() {
line, readErr := bufio.NewReader(clientConn).ReadBytes('\n')
if readErr != nil {
done <- nil
return
}
done <- line
}()
miner.handleLogin(stratumRequest{ID: 2, Method: "login", Params: params})
line := <-done
if line == nil {
t.Fatal("expected login rejection response")
}
var payload struct {
Error struct {
Message string `json:"message"`
} `json:"error"`
Result map[string]any `json:"result"`
}
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal login response: %v", err)
}
if payload.Error.Message != "Proxy is full, try again later" {
t.Fatalf("expected full-table error, got %q", payload.Error.Message)
}
if payload.Result != nil {
t.Fatalf("expected no login success payload, got %#v", payload.Result)
}
if miner.MapperID() != -1 {
t.Fatalf("expected rejected miner to remain unassigned, got mapper %d", miner.MapperID())
}
}
func TestMiner_HandleLogin_FailedAssignmentDoesNotDispatchLoginEvent(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
proxyInstance := &Proxy{
config: &Config{
Mode: "nicehash",
Workers: WorkersByUser,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
},
events: NewEventBus(),
stats: NewStats(),
workers: NewWorkers(WorkersByUser, nil),
miners: make(map[int64]*Miner),
}
proxyInstance.events.Subscribe(EventLogin, proxyInstance.stats.OnLogin)
proxyInstance.workers.bindEvents(proxyInstance.events)
miner := NewMiner(minerConn, 3333, nil)
miner.extNH = true
miner.onLogin = func(*Miner) {}
miner.onLoginReady = func(m *Miner) {
proxyInstance.events.Dispatch(Event{Type: EventLogin, Miner: m})
}
proxyInstance.miners[miner.ID()] = miner
params, err := json.Marshal(loginParams{
Login: "wallet",
Pass: "x",
})
if err != nil {
t.Fatalf("marshal login params: %v", err)
}
go miner.handleLogin(stratumRequest{ID: 12, Method: "login", Params: params})
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read login rejection: %v", err)
}
var payload struct {
Error struct {
Message string `json:"message"`
} `json:"error"`
}
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal login rejection: %v", err)
}
if payload.Error.Message != "Proxy is full, try again later" {
t.Fatalf("expected full-table rejection, got %q", payload.Error.Message)
}
if now, max := proxyInstance.MinerCount(); now != 0 || max != 0 {
t.Fatalf("expected failed login not to affect miner counts, got now=%d max=%d", now, max)
}
if records := proxyInstance.WorkerRecords(); len(records) != 0 {
t.Fatalf("expected failed login not to create worker records, got %d", len(records))
}
}
func TestMiner_HandleLogin_CustomDiffCap_Good(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := NewMiner(minerConn, 3333, nil)
miner.onLogin = func(m *Miner) {
m.SetRouteID(1)
m.customDiff = 5000
}
miner.currentJob = Job{
Blob: strings.Repeat("0", 160),
JobID: "job-1",
Target: "01000000",
}
params, err := json.Marshal(loginParams{
Login: "wallet",
Pass: "x",
})
if err != nil {
t.Fatalf("marshal login params: %v", err)
}
go miner.handleLogin(stratumRequest{ID: 3, Method: "login", Params: params})
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read login response: %v", err)
}
var payload struct {
Result struct {
Job struct {
Target string `json:"target"`
} `json:"job"`
} `json:"result"`
}
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal login response: %v", err)
}
originalDiff := miner.currentJob.DifficultyFromTarget()
cappedDiff := Job{Target: payload.Result.Job.Target}.DifficultyFromTarget()
if cappedDiff == 0 || cappedDiff > 5000 {
t.Fatalf("expected capped difficulty at or below 5000, got %d", cappedDiff)
}
if cappedDiff >= originalDiff {
t.Fatalf("expected lowered target difficulty below %d, got %d", originalDiff, cappedDiff)
}
if miner.diff != cappedDiff {
t.Fatalf("expected miner diff %d, got %d", cappedDiff, miner.diff)
}
}
func TestMiner_HandleLogin_CustomDiffSuffix_Good(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := NewMiner(minerConn, 3333, nil)
miner.onLogin = func(m *Miner) {
m.SetRouteID(1)
}
params, err := json.Marshal(loginParams{
Login: "wallet+50000",
Pass: "x",
})
if err != nil {
t.Fatalf("marshal login params: %v", err)
}
go miner.handleLogin(stratumRequest{ID: 4, Method: "login", Params: params})
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read login response: %v", err)
}
var payload struct {
Result struct {
Status string `json:"status"`
} `json:"result"`
}
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal login response: %v", err)
}
if payload.Result.Status != "OK" {
t.Fatalf("expected login success, got %q", payload.Result.Status)
}
if got := miner.User(); got != "wallet" {
t.Fatalf("expected stripped wallet name, got %q", got)
}
if got := miner.customDiff; got != 50000 {
t.Fatalf("expected custom diff 50000, got %d", got)
}
}
func TestMiner_HandleKeepalived_Good(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := NewMiner(minerConn, 3333, nil)
done := make(chan struct{})
go func() {
miner.handleKeepalived(stratumRequest{ID: 9, Method: "keepalived"})
close(done)
}()
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read keepalived response: %v", err)
}
<-done
var payload map[string]json.RawMessage
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal keepalived response: %v", err)
}
if _, ok := payload["error"]; !ok {
t.Fatalf("expected keepalived response to include error field, got %s", string(line))
}
if string(payload["error"]) != "null" {
t.Fatalf("expected keepalived response error to be null, got %s", string(payload["error"]))
}
var result struct {
Status string `json:"status"`
}
if err := json.Unmarshal(payload["result"], &result); err != nil {
t.Fatalf("unmarshal keepalived result: %v", err)
}
if result.Status != "KEEPALIVED" {
t.Fatalf("expected KEEPALIVED status, got %q", result.Status)
}
}
func TestMiner_ReadLoop_RFCLineLimit_Good(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := NewMiner(minerConn, 3333, nil)
miner.onLogin = func(m *Miner) {
m.SetRouteID(1)
}
miner.Start()
params, err := json.Marshal(loginParams{
Login: "wallet",
Pass: "x",
Agent: strings.Repeat("a", 5000),
})
if err != nil {
t.Fatalf("marshal login params: %v", err)
}
request, err := json.Marshal(stratumRequest{ID: 4, Method: "login", Params: params})
if err != nil {
t.Fatalf("marshal request: %v", err)
}
if len(request) >= maxStratumLineLength {
t.Fatalf("expected test request below RFC limit, got %d bytes", len(request))
}
if _, err := clientConn.Write(append(request, '\n')); err != nil {
t.Fatalf("write login request: %v", err)
}
_ = clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read login response: %v", err)
}
if len(line) == 0 {
t.Fatal("expected login response for request under RFC limit")
}
}
func TestMiner_ReadLoop_RFCLineLimit_Ugly(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := NewMiner(minerConn, 3333, nil)
miner.Start()
params, err := json.Marshal(loginParams{
Login: "wallet",
Pass: "x",
Agent: strings.Repeat("b", maxStratumLineLength),
})
if err != nil {
t.Fatalf("marshal login params: %v", err)
}
request, err := json.Marshal(stratumRequest{ID: 5, Method: "login", Params: params})
if err != nil {
t.Fatalf("marshal request: %v", err)
}
if len(request) <= maxStratumLineLength {
t.Fatalf("expected test request above RFC limit, got %d bytes", len(request))
}
writeDone := make(chan error, 1)
go func() {
_, writeErr := clientConn.Write(append(request, '\n'))
writeDone <- writeErr
}()
var writeErr error
select {
case writeErr = <-writeDone:
case <-time.After(time.Second):
t.Fatal("timed out writing oversized request")
}
if writeErr == nil {
_ = clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err == nil || len(line) > 0 {
t.Fatalf("expected oversized request to close the connection, got line=%q err=%v", string(line), err)
}
return
}
if !strings.Contains(writeErr.Error(), "closed pipe") {
t.Fatalf("expected oversized request to close the connection, got write error %v", writeErr)
}
}

View file

@ -1,124 +0,0 @@
package proxy
import (
"crypto/tls"
"net"
"sync/atomic"
"time"
)
var minerIDSequence atomic.Int64
// NewMiner creates a Miner for an accepted net.Conn. Does not start reading yet.
//
// m := proxy.NewMiner(conn, 3333, nil)
func NewMiner(conn net.Conn, localPort uint16, tlsCfg *tls.Config) *Miner {
miner := &Miner{
id: minerIDSequence.Add(1),
state: MinerStateWaitLogin,
localPort: localPort,
mapperID: -1,
routeID: -1,
conn: conn,
connectedAt: time.Now().UTC(),
lastActivityAt: time.Now().UTC(),
}
if tlsCfg != nil {
if tlsConnection, ok := conn.(*tls.Conn); ok {
miner.tlsConn = tlsConnection
}
}
if conn != nil && conn.RemoteAddr() != nil {
miner.ip = remoteHost(conn.RemoteAddr().String())
}
return miner
}
func (m *Miner) ID() int64 { return m.id }
func (m *Miner) RPCID() string { return m.rpcID }
func (m *Miner) User() string { return m.user }
func (m *Miner) Password() string { return m.password }
func (m *Miner) Agent() string { return m.agent }
func (m *Miner) RigID() string { return m.rigID }
func (m *Miner) IP() string { return m.ip }
func (m *Miner) State() MinerState {
if m == nil {
return MinerStateClosing
}
m.stateMu.RLock()
state := m.state
m.stateMu.RUnlock()
return state
}
func (m *Miner) Diff() uint64 { return m.diff }
func (m *Miner) FixedByte() uint8 { return m.fixedByte }
func (m *Miner) MapperID() int64 { return m.mapperID }
func (m *Miner) RouteID() int64 { return m.routeID }
func (m *Miner) CustomDiff() uint64 { return m.customDiff }
func (m *Miner) TX() uint64 { return m.tx }
func (m *Miner) RX() uint64 { return m.rx }
func (m *Miner) LastActivityAt() time.Time { return m.lastActivityAt }
func (m *Miner) SetRPCID(value string) { m.rpcID = value }
func (m *Miner) SetUser(value string) { m.user = value }
func (m *Miner) SetPassword(value string) { m.password = value }
func (m *Miner) SetAgent(value string) { m.agent = value }
func (m *Miner) SetRigID(value string) { m.rigID = value }
func (m *Miner) SetState(value MinerState) {
if m == nil {
return
}
m.stateMu.Lock()
m.state = value
m.stateMu.Unlock()
}
func (m *Miner) SetDiff(value uint64) { m.diff = value }
func (m *Miner) SetFixedByte(value uint8) { m.fixedByte = value }
func (m *Miner) SetMapperID(value int64) { m.mapperID = value }
func (m *Miner) SetRouteID(value int64) { m.routeID = value }
func (m *Miner) SetCustomDiff(value uint64) { m.customDiff = value }
func (m *Miner) SetNiceHashEnabled(value bool) { m.extNH = value }
func (m *Miner) PrimeJob(job Job) {
if m == nil || !job.IsValid() {
return
}
m.currentJob = &job
m.diff = m.effectiveDifficulty(job)
}
func (m *Miner) Touch() {
m.lastActivityAt = time.Now().UTC()
}
func remoteHost(address string) string {
host, _, errorValue := net.SplitHostPort(address)
if errorValue != nil {
return address
}
return host
}
func (m *Miner) effectiveDifficulty(job Job) uint64 {
difficulty := job.DifficultyFromTarget()
if m == nil || m.customDiff == 0 {
return difficulty
}
if difficulty == 0 || difficulty <= m.customDiff {
return difficulty
}
return m.customDiff
}
func (m *Miner) effectiveTarget(job Job) string {
difficulty := m.effectiveDifficulty(job)
if difficulty == 0 {
return job.Target
}
return TargetForDifficulty(difficulty)
}

View file

@ -1,443 +0,0 @@
package proxy
import (
"bufio"
"crypto/rand"
"encoding/hex"
"encoding/json"
"net"
"strings"
"time"
)
type minerRequest struct {
ID int64 `json:"id"`
Method string `json:"method"`
Params json.RawMessage `json:"params"`
}
// Start begins the read loop in a goroutine and arms the login timeout timer.
//
// m.Start()
func (m *Miner) Start() {
if m == nil || m.conn == nil {
return
}
go func() {
reader := bufio.NewReaderSize(m.conn, len(m.buf))
for {
if errorValue := m.applyReadDeadline(); errorValue != nil {
m.Close()
return
}
line, isPrefix, errorValue := reader.ReadLine()
if errorValue != nil {
m.Close()
return
}
if isPrefix {
m.Close()
return
}
if len(line) == 0 {
continue
}
m.rx += uint64(len(line) + 1)
m.Touch()
m.handleLine(line)
}
}()
}
// ForwardJob encodes the job as a stratum job notification and writes it to the miner.
//
// m.ForwardJob(job, "cn/r")
func (m *Miner) ForwardJob(job Job, algo string) {
if m == nil || m.conn == nil {
return
}
blob := job.Blob
if m.extNH {
blob = job.BlobWithFixedByte(m.fixedByte)
}
target := m.effectiveTarget(job)
m.diff = m.effectiveDifficulty(job)
m.SetState(MinerStateReady)
jobCopy := job
m.currentJob = &jobCopy
m.Touch()
params := map[string]interface{}{
"blob": blob,
"job_id": job.JobID,
"target": target,
"id": m.rpcID,
}
if job.Height > 0 {
params["height"] = job.Height
}
if job.SeedHash != "" {
params["seed_hash"] = job.SeedHash
}
if m.algoExtension && m.extAlgo && algo != "" {
params["algo"] = algo
}
m.writeJSON(map[string]interface{}{
"jsonrpc": "2.0",
"method": "job",
"params": params,
})
}
// ReplyWithError sends a JSON-RPC error response for the given request id.
//
// m.ReplyWithError(2, "Low difficulty share")
func (m *Miner) ReplyWithError(id int64, message string) {
m.writeJSON(map[string]interface{}{
"id": id,
"jsonrpc": "2.0",
"error": map[string]interface{}{
"code": -1,
"message": message,
},
})
}
// Success sends a JSON-RPC success response with the given status string.
//
// m.Success(2, "OK")
func (m *Miner) Success(id int64, status string) {
m.writeJSON(map[string]interface{}{
"id": id,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]string{
"status": status,
},
})
}
// Close initiates graceful TCP shutdown. Safe to call multiple times.
//
// m.Close()
func (m *Miner) Close() {
if m == nil || m.conn == nil {
return
}
m.closeOnce.Do(func() {
m.SetState(MinerStateClosing)
if m.events != nil {
m.events.Dispatch(Event{Type: EventClose, Miner: m})
}
_ = m.conn.Close()
})
}
func (m *Miner) writeJSON(value interface{}) {
if m == nil || m.conn == nil {
return
}
data, errorValue := json.Marshal(value)
if errorValue != nil {
return
}
m.sendMu.Lock()
defer m.sendMu.Unlock()
data = append(data, '\n')
written, errorValue := m.conn.Write(data)
if errorValue == nil {
m.tx += uint64(written)
}
}
func (m *Miner) handleLine(line []byte) {
if len(line) > len(m.buf) {
m.Close()
return
}
request := minerRequest{}
if errorValue := json.Unmarshal(line, &request); errorValue != nil {
m.Close()
return
}
switch request.Method {
case "login":
m.handleLogin(request)
case "submit":
m.handleSubmit(request)
case "keepalived":
m.handleKeepalived(request)
default:
if request.ID != 0 {
m.ReplyWithError(request.ID, "Invalid request")
}
}
}
func (m *Miner) handleLogin(request minerRequest) {
type loginParams struct {
Login string `json:"login"`
Pass string `json:"pass"`
Agent string `json:"agent"`
Algo []string `json:"algo"`
RigID string `json:"rigid"`
}
params := loginParams{}
if errorValue := json.Unmarshal(request.Params, &params); errorValue != nil {
m.ReplyWithError(request.ID, "Invalid payment address provided")
return
}
if params.Login == "" {
m.ReplyWithError(request.ID, "Invalid payment address provided")
return
}
if m.accessPassword != "" && params.Pass != m.accessPassword {
m.ReplyWithError(request.ID, "Invalid password")
return
}
m.SetCustomDiff(0)
m.currentJob = nil
m.diff = 0
m.SetPassword(params.Pass)
m.SetAgent(params.Agent)
m.SetRigID(params.RigID)
m.algo = append(m.algo[:0], params.Algo...)
m.extAlgo = len(params.Algo) > 0
m.SetUser(params.Login)
m.SetRPCID(newRPCID())
if m.events != nil {
m.events.Dispatch(Event{Type: EventLogin, Miner: m})
}
m.Touch()
if m.State() == MinerStateClosing {
return
}
result := map[string]interface{}{
"id": m.rpcID,
"status": "OK",
}
if m.currentJob != nil && m.currentJob.IsValid() {
jobCopy := *m.currentJob
blob := jobCopy.Blob
if m.extNH {
blob = jobCopy.BlobWithFixedByte(m.fixedByte)
}
jobResult := map[string]interface{}{
"blob": blob,
"job_id": jobCopy.JobID,
"target": m.effectiveTarget(jobCopy),
"id": m.rpcID,
}
if jobCopy.Height > 0 {
jobResult["height"] = jobCopy.Height
}
if jobCopy.SeedHash != "" {
jobResult["seed_hash"] = jobCopy.SeedHash
}
if m.algoExtension && m.extAlgo && jobCopy.Algo != "" {
jobResult["algo"] = jobCopy.Algo
}
result["job"] = jobResult
if m.algoExtension && m.extAlgo {
result["extensions"] = []string{"algo"}
}
m.SetState(MinerStateReady)
} else {
m.SetState(MinerStateWaitReady)
if m.algoExtension && m.extAlgo {
result["extensions"] = []string{"algo"}
}
}
m.writeJSON(map[string]interface{}{
"id": request.ID,
"jsonrpc": "2.0",
"error": nil,
"result": result,
})
}
func (m *Miner) handleSubmit(request minerRequest) {
if m.State() != MinerStateReady {
m.ReplyWithError(request.ID, "Unauthenticated")
return
}
type submitParams struct {
ID string `json:"id"`
JobID string `json:"job_id"`
Nonce string `json:"nonce"`
Result string `json:"result"`
Algo string `json:"algo"`
}
params := submitParams{}
if errorValue := json.Unmarshal(request.Params, &params); errorValue != nil {
m.ReplyWithError(request.ID, "Malformed share")
return
}
if params.ID != m.rpcID {
m.ReplyWithError(request.ID, "Unauthenticated")
return
}
if params.JobID == "" {
m.ReplyWithError(request.ID, "Missing job id")
return
}
if len(params.Nonce) != 8 || params.Nonce != strings.ToLower(params.Nonce) {
m.ReplyWithError(request.ID, "Invalid nonce")
return
}
if _, errorValue := hex.DecodeString(params.Nonce); errorValue != nil {
m.ReplyWithError(request.ID, "Invalid nonce")
return
}
submitAlgo := ""
if m.algoExtension && m.extAlgo {
submitAlgo = params.Algo
}
m.Touch()
if m.events != nil {
m.events.Dispatch(Event{
Type: EventSubmit,
Miner: m,
JobID: params.JobID,
Nonce: params.Nonce,
Result: params.Result,
Algo: submitAlgo,
RequestID: request.ID,
})
return
}
if m.splitter != nil {
m.splitter.OnSubmit(&SubmitEvent{
Miner: m,
JobID: params.JobID,
Nonce: params.Nonce,
Result: params.Result,
Algo: submitAlgo,
RequestID: request.ID,
})
}
}
func (m *Miner) handleKeepalived(request minerRequest) {
m.Touch()
m.Success(request.ID, "KEEPALIVED")
}
func (m *Miner) currentJobCopy() *Job {
if m == nil || m.currentJob == nil {
return nil
}
jobCopy := *m.currentJob
return &jobCopy
}
func (m *Miner) applyReadDeadline() error {
if m == nil || m.conn == nil {
return nil
}
deadline := m.readDeadline()
if deadline.IsZero() {
return nil
}
return m.conn.SetReadDeadline(deadline)
}
func (m *Miner) readDeadline() time.Time {
if m == nil {
return time.Time{}
}
switch m.State() {
case MinerStateWaitLogin:
return m.lastActivityAt.Add(10 * time.Second)
case MinerStateWaitReady, MinerStateReady:
return m.lastActivityAt.Add(600 * time.Second)
default:
return time.Time{}
}
}
func (m *Miner) dispatchSubmitResult(eventType EventType, diff uint64, errorMessage string, requestID int64) {
if m == nil || m.events == nil {
return
}
jobCopy := m.currentJobCopy()
m.events.Dispatch(Event{
Type: eventType,
Miner: m,
Job: jobCopy,
Diff: diff,
Error: errorMessage,
Latency: 0,
})
if eventType == EventAccept {
m.Success(requestID, "OK")
return
}
m.ReplyWithError(requestID, errorMessage)
}
func (m *Miner) setStateFromJob(job Job) {
m.currentJob = &job
m.SetState(MinerStateReady)
}
func (m *Miner) Expire() {
if m == nil || m.State() == MinerStateClosing {
return
}
m.Close()
}
func newRPCID() string {
value := make([]byte, 16)
_, _ = rand.Read(value)
value[6] = (value[6] & 0x0f) | 0x40
value[8] = (value[8] & 0x3f) | 0x80
encoded := make([]byte, 36)
hex.Encode(encoded[0:8], value[0:4])
encoded[8] = '-'
hex.Encode(encoded[9:13], value[4:6])
encoded[13] = '-'
hex.Encode(encoded[14:18], value[6:8])
encoded[18] = '-'
hex.Encode(encoded[19:23], value[8:10])
encoded[23] = '-'
hex.Encode(encoded[24:36], value[10:16])
return string(encoded)
}
func (m *Miner) RemoteAddr() net.Addr {
if m == nil || m.conn == nil {
return nil
}
return m.conn.RemoteAddr()
}

View file

@ -1,394 +0,0 @@
package proxy
import (
"bufio"
"encoding/json"
"net"
"strings"
"testing"
"time"
)
func TestMiner_Login_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
before := miner.LastActivityAt()
miner.Start()
defer miner.Close()
time.Sleep(5 * time.Millisecond)
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"method": "login",
"params": map[string]interface{}{
"login": "wallet",
"pass": "x",
"agent": "xmrig",
},
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
if response["jsonrpc"] != "2.0" {
t.Fatalf("unexpected response: %#v", response)
}
if !miner.LastActivityAt().After(before) {
t.Fatalf("expected login to refresh last activity timestamp, got before=%s after=%s", before, miner.LastActivityAt())
}
result := response["result"].(map[string]interface{})
id, _ := result["id"].(string)
if result["status"] != "OK" || len(id) != 36 || id[8] != '-' || id[13] != '-' || id[18] != '-' || id[23] != '-' || id[14] != '4' || !strings.ContainsAny(string(id[19]), "89ab") {
t.Fatalf("unexpected login response: %#v", response)
}
}
func TestMiner_Keepalived_Bad(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 2,
"jsonrpc": "2.0",
"method": "keepalived",
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
result := response["result"].(map[string]interface{})
if result["status"] != "KEEPALIVED" {
t.Fatalf("unexpected keepalived response: %#v", response)
}
}
func TestMiner_Submit_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.Start()
defer miner.Close()
miner.SetRPCID("session")
miner.SetState(MinerStateReady)
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 3,
"jsonrpc": "2.0",
"method": "submit",
"params": map[string]interface{}{
"id": "session",
"job_id": "job-1",
"nonce": "ABC123",
"result": "abc",
"algo": "cn/r",
},
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
if response["error"] == nil {
t.Fatalf("expected invalid nonce error, got %#v", response)
}
}
func TestMiner_Login_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.algoExtension = true
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 4,
"jsonrpc": "2.0",
"method": "login",
"params": map[string]interface{}{
"login": "wallet",
"pass": "x",
"agent": "xmrig",
"algo": []string{"cn/r"},
},
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
result := response["result"].(map[string]interface{})
extensions, ok := result["extensions"].([]interface{})
if !ok || len(extensions) != 1 || extensions[0] != "algo" {
t.Fatalf("expected algo extension to be advertised, got %#v", response)
}
}
func TestMiner_Login_NiceHashPatchedJob_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.algoExtension = true
miner.SetCustomDiff(10000)
miner.events = NewEventBus()
miner.events.Subscribe(EventLogin, func(event Event) {
if event.Miner == nil {
return
}
event.Miner.SetNiceHashEnabled(true)
event.Miner.SetFixedByte(0x2a)
event.Miner.PrimeJob(Job{
Blob: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
JobID: "job-1",
Target: "b88d0600",
Algo: "cn/r",
Height: 42,
SeedHash: "seed-hash",
})
})
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 5,
"jsonrpc": "2.0",
"method": "login",
"params": map[string]interface{}{
"login": "wallet",
"pass": "x",
"agent": "xmrig",
"algo": []string{"cn/r"},
},
}); err != nil {
t.Fatal(err)
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
result := response["result"].(map[string]interface{})
job := result["job"].(map[string]interface{})
if blob, _ := job["blob"].(string); blob[78:80] != "2a" {
t.Fatalf("expected patched NiceHash blob, got %q", blob)
}
if target, _ := job["target"].(string); target != TargetForDifficulty(10000) {
t.Fatalf("expected custom diff target, got %q", target)
}
if height, _ := job["height"].(float64); height != 42 {
t.Fatalf("expected job height to be forwarded, got %#v", job)
}
if seedHash, _ := job["seed_hash"].(string); seedHash != "seed-hash" {
t.Fatalf("expected job seed_hash to be forwarded, got %#v", job)
}
}
func TestMiner_ForwardJob_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.algoExtension = true
miner.extAlgo = true
miner.SetRPCID("session")
miner.SetCustomDiff(10000)
go miner.ForwardJob(Job{
Blob: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
JobID: "job-2",
Target: "b88d0600",
Algo: "rx/0",
Height: 99,
SeedHash: "seed",
}, "rx/0")
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
params := response["params"].(map[string]interface{})
if params["target"] != TargetForDifficulty(10000) {
t.Fatalf("expected custom diff target, got %#v", params)
}
if params["algo"] != "rx/0" || params["seed_hash"] != "seed" || params["height"] != float64(99) {
t.Fatalf("expected extended job fields to be forwarded, got %#v", params)
}
if miner.Diff() != 10000 {
t.Fatalf("expected miner diff to track the effective target, got %d", miner.Diff())
}
}
func TestMiner_Submit_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.events = NewEventBus()
miner.algoExtension = true
miner.extAlgo = true
miner.SetRPCID("session")
miner.SetState(MinerStateReady)
submitSeen := make(chan Event, 1)
miner.events.Subscribe(EventSubmit, func(event Event) {
submitSeen <- event
miner.Success(event.RequestID, "OK")
})
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 6,
"jsonrpc": "2.0",
"method": "submit",
"params": map[string]interface{}{
"id": "session",
"job_id": "job-1",
"nonce": "deadbeef",
"result": "abc",
"algo": "cn/r",
},
}); err != nil {
t.Fatal(err)
}
select {
case event := <-submitSeen:
if event.JobID != "job-1" || event.Nonce != "deadbeef" || event.Algo != "cn/r" {
t.Fatalf("unexpected submit event: %+v", event)
}
case <-time.After(time.Second):
t.Fatal("expected submit event to be dispatched")
}
clientConn.SetReadDeadline(time.Now().Add(time.Second))
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatal(err)
}
var response map[string]interface{}
if err := json.Unmarshal(line, &response); err != nil {
t.Fatal(err)
}
result := response["result"].(map[string]interface{})
if result["status"] != "OK" {
t.Fatalf("unexpected submit response: %#v", response)
}
}
func TestMiner_Submit_AlgoExtension_Bad(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
miner := NewMiner(serverConn, 3333, nil)
miner.events = NewEventBus()
miner.SetRPCID("session")
miner.SetState(MinerStateReady)
submitSeen := make(chan Event, 1)
miner.events.Subscribe(EventSubmit, func(event Event) {
submitSeen <- event
miner.Success(event.RequestID, "OK")
})
miner.Start()
defer miner.Close()
encoder := json.NewEncoder(clientConn)
if err := encoder.Encode(map[string]interface{}{
"id": 7,
"jsonrpc": "2.0",
"method": "submit",
"params": map[string]interface{}{
"id": "session",
"job_id": "job-1",
"nonce": "deadbeef",
"result": "abc",
"algo": "cn/r",
},
}); err != nil {
t.Fatal(err)
}
select {
case event := <-submitSeen:
if event.Algo != "" {
t.Fatalf("expected algo to be suppressed when extension is disabled, got %+v", event)
}
case <-time.After(time.Second):
t.Fatal("expected submit event to be dispatched")
}
}

43
miner_wire_test.go Normal file
View file

@ -0,0 +1,43 @@
package proxy
import (
"bufio"
"encoding/json"
"net"
"testing"
)
func TestMiner_Success_WritesNullError_Good(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := NewMiner(minerConn, 3333, nil)
done := make(chan struct{})
go func() {
miner.Success(7, "OK")
close(done)
}()
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read success response: %v", err)
}
<-done
var payload struct {
Error json.RawMessage `json:"error"`
Result struct {
Status string `json:"status"`
} `json:"result"`
}
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal success response: %v", err)
}
if string(payload.Error) != "null" {
t.Fatalf("expected success response error to be null, got %s", string(payload.Error))
}
if payload.Result.Status != "OK" {
t.Fatalf("expected success status OK, got %q", payload.Result.Status)
}
}

87
miners_document_test.go Normal file
View file

@ -0,0 +1,87 @@
package proxy
import "testing"
func TestProxy_MinersDocument_Good(t *testing.T) {
p := &Proxy{
miners: map[int64]*Miner{
1: {
id: 1,
ip: "10.0.0.1:49152",
tx: 4096,
rx: 512,
state: MinerStateReady,
diff: 100000,
user: "WALLET",
password: "secret",
rigID: "rig-alpha",
agent: "XMRig/6.21.0",
},
},
}
document := p.MinersDocument()
if len(document.Miners) != 1 {
t.Fatalf("expected one miner row, got %d", len(document.Miners))
}
row := document.Miners[0]
if len(row) != 10 {
t.Fatalf("expected 10 miner columns, got %d", len(row))
}
if row[7] != "********" {
t.Fatalf("expected masked password, got %#v", row[7])
}
}
func TestProxy_MinersDocument_Bad(t *testing.T) {
var p *Proxy
document := p.MinersDocument()
if len(document.Miners) != 0 {
t.Fatalf("expected no miners for a nil proxy, got %d", len(document.Miners))
}
if len(document.Format) != 10 {
t.Fatalf("expected miner format columns to remain stable, got %d", len(document.Format))
}
}
func TestProxy_MinersDocument_Ugly(t *testing.T) {
p := &Proxy{
miners: map[int64]*Miner{
1: {
id: 1,
ip: "10.0.0.1:49152",
tx: 4096,
rx: 512,
state: MinerStateReady,
diff: 100000,
user: "WALLET",
password: "secret-a",
rigID: "rig-alpha",
agent: "XMRig/6.21.0",
},
2: {
id: 2,
ip: "10.0.0.2:49152",
tx: 2048,
rx: 256,
state: MinerStateWaitReady,
diff: 50000,
user: "WALLET2",
password: "secret-b",
rigID: "rig-beta",
agent: "XMRig/6.22.0",
},
},
}
document := p.MinersDocument()
if len(document.Miners) != 2 {
t.Fatalf("expected two miner rows, got %d", len(document.Miners))
}
for i, row := range document.Miners {
if row[7] != "********" {
t.Fatalf("expected masked password in row %d, got %#v", i, row[7])
}
}
}

View file

@ -1,44 +1,41 @@
// Package pool implements the outbound stratum pool client and failover strategy. // Package pool implements the outbound pool client and failover strategy.
// //
// client := pool.NewStratumClient(poolCfg, listener) // client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.example:3333", User: "WALLET", Pass: "x"}, listener)
// client.Connect() // if result := client.Connect(); result.OK {
// client.Login()
// }
package pool package pool
import ( import (
"bufio"
"crypto/sha256"
"crypto/tls" "crypto/tls"
"encoding/hex"
"encoding/json"
"errors"
"net" "net"
"strings"
"sync" "sync"
"sync/atomic"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/proxy"
) )
// StratumClient is one outbound stratum TCP (optionally TLS) connection to a pool.
// The proxy presents itself to the pool as a standard stratum miner using the
// wallet address and password from PoolConfig.
//
// client := pool.NewStratumClient(poolCfg, listener) // client := pool.NewStratumClient(poolCfg, listener)
// client.Connect() //
// if result := client.Connect(); result.OK {
// client.Login()
// }
type StratumClient struct { type StratumClient struct {
config proxy.PoolConfig config proxy.PoolConfig
listener StratumListener listener StratumListener
conn net.Conn conn net.Conn
tlsConn *tls.Conn // nil if plain TCP tlsConn *tls.Conn // nil if plain TCP
sessionID string // pool-assigned session id from login reply sessionID string // pool-assigned session id from login reply
requestSequence int64 // atomic JSON-RPC request id counter seq int64 // atomic JSON-RPC request id counter
active bool // true once first job received active bool // true once first job received
disconnectOnce sync.Once pending map[int64]struct{}
closedOnce sync.Once
mu sync.Mutex
sendMu sync.Mutex sendMu sync.Mutex
} }
// StratumListener receives events from the pool connection. // type listener struct{}
//
// func (listener) OnJob(job proxy.Job) {}
type StratumListener interface { type StratumListener interface {
// OnJob is called when the pool pushes a new job notification or the login reply contains a job. // OnJob is called when the pool pushes a new job notification or the login reply contains a job.
OnJob(job proxy.Job) OnJob(job proxy.Job)
@ -48,241 +45,3 @@ type StratumListener interface {
// OnDisconnect is called when the pool TCP connection closes for any reason. // OnDisconnect is called when the pool TCP connection closes for any reason.
OnDisconnect() OnDisconnect()
} }
type jsonRPCRequest struct {
ID int64 `json:"id"`
Method string `json:"method"`
Params interface{} `json:"params,omitempty"`
}
type jsonRPCResponse struct {
ID int64 `json:"id"`
Method string `json:"method"`
Params json.RawMessage `json:"params"`
Result json.RawMessage `json:"result"`
Error *jsonRPCErrorBody `json:"error"`
}
type jsonRPCErrorBody struct {
Code int `json:"code"`
Message string `json:"message"`
}
// NewStratumClient builds one outbound pool client.
//
// client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.lthn.io:3333", User: "WALLET", Pass: "x"}, listener)
func NewStratumClient(cfg proxy.PoolConfig, listener StratumListener) *StratumClient {
return &StratumClient{
config: cfg,
listener: listener,
}
}
// Connect dials the pool and starts the read loop.
//
// client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.lthn.io:3333", TLS: true}, listener)
// errorValue := client.Connect()
func (c *StratumClient) Connect() error {
var connection net.Conn
var errorValue error
dialer := net.Dialer{}
if c.config.Keepalive {
dialer.KeepAlive = 30 * time.Second
}
if c.config.TLS {
connection, errorValue = dialer.Dial("tcp", c.config.URL)
if errorValue != nil {
return errorValue
}
serverName := c.config.URL
if host, _, splitError := net.SplitHostPort(c.config.URL); splitError == nil && host != "" {
serverName = host
}
tlsConnection := tls.Client(connection, &tls.Config{MinVersion: tls.VersionTLS12, ServerName: serverName})
errorValue = tlsConnection.Handshake()
if errorValue != nil {
_ = connection.Close()
return errorValue
}
if c.config.TLSFingerprint != "" {
state := tlsConnection.ConnectionState()
if len(state.PeerCertificates) == 0 {
_ = connection.Close()
return errors.New("missing peer certificate")
}
fingerprint := sha256.Sum256(state.PeerCertificates[0].Raw)
if hex.EncodeToString(fingerprint[:]) != strings.ToLower(c.config.TLSFingerprint) {
_ = connection.Close()
return errors.New("pool fingerprint mismatch")
}
}
connection = tlsConnection
c.tlsConn = tlsConnection
} else {
connection, errorValue = dialer.Dial("tcp", c.config.URL)
if errorValue != nil {
return errorValue
}
}
c.conn = connection
c.disconnectOnce = sync.Once{}
go c.readLoop()
return nil
}
// Login sends the stratum login request using the configured wallet and password.
//
// client.Login()
func (c *StratumClient) Login() {
password := c.config.Password
if password == "" {
password = c.config.Pass
}
params := map[string]interface{}{
"login": c.config.User,
"pass": password,
"rigid": c.config.RigID,
}
if c.config.Algo != "" {
params["algo"] = []string{c.config.Algo}
}
_ = c.writeJSON(jsonRPCRequest{
ID: 1,
Method: "login",
Params: params,
})
}
// Submit sends a share submission. Returns the sequence number for result correlation.
//
// seq := client.Submit("job-1", "deadbeef", "HASH64HEX", "cn/r")
func (c *StratumClient) Submit(jobID string, nonce string, result string, algo string) int64 {
requestID := atomic.AddInt64(&c.requestSequence, 1)
params := map[string]string{
"id": c.sessionID,
"job_id": jobID,
"nonce": nonce,
"result": result,
}
if algo != "" {
params["algo"] = algo
}
_ = c.writeJSON(jsonRPCRequest{
ID: requestID,
Method: "submit",
Params: params,
})
return requestID
}
// Disconnect closes the connection and emits one disconnect callback.
//
// client.Disconnect()
func (c *StratumClient) Disconnect() {
if c.conn != nil {
_ = c.conn.Close()
}
c.notifyDisconnect()
}
func (c *StratumClient) writeJSON(value interface{}) error {
if c.conn == nil {
return nil
}
data, errorValue := json.Marshal(value)
if errorValue != nil {
return errorValue
}
c.sendMu.Lock()
defer c.sendMu.Unlock()
_, errorValue = c.conn.Write(append(data, '\n'))
return errorValue
}
func (c *StratumClient) readLoop() {
reader := bufio.NewReaderSize(c.conn, 16384)
for {
line, isPrefix, errorValue := reader.ReadLine()
if errorValue != nil {
c.notifyDisconnect()
return
}
if isPrefix {
c.notifyDisconnect()
return
}
response := jsonRPCResponse{}
if errorValue = json.Unmarshal(line, &response); errorValue != nil {
continue
}
c.handleMessage(response)
}
}
func (c *StratumClient) notifyDisconnect() {
c.disconnectOnce.Do(func() {
if c.listener != nil {
c.listener.OnDisconnect()
}
})
}
func (c *StratumClient) handleMessage(response jsonRPCResponse) {
if response.Method == "job" {
var payload proxy.Job
if json.Unmarshal(response.Params, &payload) == nil && payload.IsValid() {
payload.ClientID = c.sessionID
c.active = true
if c.listener != nil {
c.listener.OnJob(payload)
}
}
return
}
if response.ID == 1 && c.sessionID == "" {
if len(response.Result) > 0 {
var loginResult struct {
ID string `json:"id"`
Job proxy.Job `json:"job"`
}
if json.Unmarshal(response.Result, &loginResult) == nil && loginResult.ID != "" {
c.sessionID = loginResult.ID
if loginResult.Job.IsValid() {
loginResult.Job.ClientID = c.sessionID
c.active = true
if c.listener != nil {
c.listener.OnJob(loginResult.Job)
}
}
}
}
if response.Error != nil {
c.Disconnect()
}
return
}
if response.ID == 0 || c.listener == nil {
return
}
accepted := response.Error == nil
errorMessage := ""
if response.Error != nil {
errorMessage = response.Error.Message
}
c.listener.OnResultAccepted(response.ID, accepted, errorMessage)
}

View file

@ -1,248 +0,0 @@
package pool
import (
"encoding/json"
"net"
"sync"
"testing"
"time"
"dappco.re/go/core/proxy"
)
type disconnectCountingListener struct {
mu sync.Mutex
count int
}
func (l *disconnectCountingListener) OnJob(job proxy.Job) {}
func (l *disconnectCountingListener) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
}
func (l *disconnectCountingListener) OnDisconnect() {
l.mu.Lock()
l.count++
l.mu.Unlock()
}
func (l *disconnectCountingListener) Count() int {
l.mu.Lock()
defer l.mu.Unlock()
return l.count
}
func TestStratumClient_ReadLoop_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
listener := &disconnectCountingListener{}
client := &StratumClient{
listener: listener,
conn: serverConn,
}
go client.readLoop()
payload := make([]byte, 16385)
for index := range payload {
payload[index] = 'a'
}
payload = append(payload, '\n')
writeErr := make(chan error, 1)
go func() {
_, err := clientConn.Write(payload)
writeErr <- err
}()
time.Sleep(50 * time.Millisecond)
if got := listener.Count(); got != 1 {
t.Fatalf("expected oversized line to close the connection, got %d disconnect callbacks", got)
}
select {
case err := <-writeErr:
if err != nil {
t.Fatal(err)
}
default:
}
}
func TestStratumClient_Disconnect_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
listener := &disconnectCountingListener{}
client := &StratumClient{
listener: listener,
conn: serverConn,
}
go client.readLoop()
time.Sleep(10 * time.Millisecond)
client.Disconnect()
time.Sleep(50 * time.Millisecond)
if got := listener.Count(); got != 1 {
t.Fatalf("expected one disconnect callback, got %d", got)
}
}
type resultCapturingListener struct {
mu sync.Mutex
sequence int64
accepted bool
errorMessage string
results int
disconnects int
}
func (l *resultCapturingListener) OnJob(job proxy.Job) {}
func (l *resultCapturingListener) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
l.mu.Lock()
l.sequence = sequence
l.accepted = accepted
l.errorMessage = errorMessage
l.results++
l.mu.Unlock()
}
func (l *resultCapturingListener) OnDisconnect() {
l.mu.Lock()
l.disconnects++
l.mu.Unlock()
}
func (l *resultCapturingListener) Snapshot() (int64, bool, string, int, int) {
l.mu.Lock()
defer l.mu.Unlock()
return l.sequence, l.accepted, l.errorMessage, l.results, l.disconnects
}
func TestStratumClient_HandleMessage_Bad(t *testing.T) {
listener := &resultCapturingListener{}
client := &StratumClient{
listener: listener,
sessionID: "session-1",
}
client.handleMessage(jsonRPCResponse{
ID: 7,
Error: &jsonRPCErrorBody{
Code: -1,
Message: "Low difficulty share",
},
})
sequence, accepted, errorMessage, results, disconnects := listener.Snapshot()
if sequence != 7 || accepted || errorMessage != "Low difficulty share" || results != 1 {
t.Fatalf("expected rejected submit callback, got sequence=%d accepted=%v error=%q results=%d", sequence, accepted, errorMessage, results)
}
if disconnects != 0 {
t.Fatalf("expected no disconnect on submit rejection, got %d", disconnects)
}
}
func TestStratumClient_HandleMessage_Good(t *testing.T) {
listener := &resultCapturingListener{}
client := &StratumClient{
listener: listener,
sessionID: "session-1",
}
client.handleMessage(jsonRPCResponse{
ID: 7,
})
sequence, accepted, errorMessage, results, disconnects := listener.Snapshot()
if sequence != 7 || !accepted || errorMessage != "" || results != 1 {
t.Fatalf("expected accepted submit callback, got sequence=%d accepted=%v error=%q results=%d", sequence, accepted, errorMessage, results)
}
if disconnects != 0 {
t.Fatalf("expected no disconnect on submit accept, got %d", disconnects)
}
}
func TestStratumClient_HandleMessage_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
listener := &resultCapturingListener{}
client := &StratumClient{
listener: listener,
conn: serverConn,
}
defer client.Disconnect()
client.handleMessage(jsonRPCResponse{
ID: 1,
Error: &jsonRPCErrorBody{
Code: -1,
Message: "Unauthenticated",
},
})
_, _, _, results, disconnects := listener.Snapshot()
if results != 0 {
t.Fatalf("expected login rejection not to be reported as a share result, got %d results", results)
}
if disconnects != 1 {
t.Fatalf("expected login rejection to disconnect once, got %d", disconnects)
}
}
func TestStratumClient_Login_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer clientConn.Close()
client := &StratumClient{
config: proxy.PoolConfig{
User: "WALLET",
Pass: "legacy",
Password: "preferred",
RigID: "rig-alpha",
Algo: "cn/r",
},
conn: serverConn,
}
writeDone := make(chan struct{})
go func() {
client.Login()
close(writeDone)
}()
buffer := make([]byte, 2048)
n, err := clientConn.Read(buffer)
if err != nil {
t.Fatal(err)
}
var request jsonRPCRequest
if err := json.Unmarshal(buffer[:n], &request); err != nil {
t.Fatal(err)
}
params, ok := request.Params.(map[string]interface{})
if !ok {
t.Fatalf("expected login params map, got %T", request.Params)
}
if got := params["pass"]; got != "preferred" {
t.Fatalf("expected preferred password, got %v", got)
}
if got := params["rigid"]; got != "rig-alpha" {
t.Fatalf("expected rigid field to be forwarded, got %v", got)
}
if got := params["algo"]; got == nil {
t.Fatal("expected algo extension to be forwarded")
}
client.Disconnect()
select {
case <-writeDone:
case <-time.After(time.Second):
t.Fatal("expected login write to complete")
}
}

549
pool/impl.go Normal file
View file

@ -0,0 +1,549 @@
package pool
import (
"bufio"
"crypto/sha256"
"crypto/tls"
"encoding/hex"
"encoding/json"
"io"
"net"
"strconv"
"strings"
"sync/atomic"
"time"
"dappco.re/go/proxy"
)
// NewStrategyFactory creates a StrategyFactory for the supplied config.
//
// factory := pool.NewStrategyFactory(&proxy.Config{Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}}})
// strategy := factory(listener)
func NewStrategyFactory(config *proxy.Config) StrategyFactory {
return func(listener StratumListener) Strategy {
var pools []proxy.PoolConfig
if config != nil {
pools = config.Pools
}
return NewFailoverStrategy(pools, listener, config)
}
}
// client := pool.NewStratumClient(proxy.PoolConfig{URL: "pool.example:3333", User: "WALLET", Pass: "x"}, listener)
//
// if result := client.Connect(); result.OK {
// client.Login()
// }
func NewStratumClient(poolConfig proxy.PoolConfig, listener StratumListener) *StratumClient {
return &StratumClient{
config: poolConfig,
listener: listener,
pending: make(map[int64]struct{}),
}
}
// IsActive reports whether the client has received at least one job.
func (c *StratumClient) IsActive() bool {
if c == nil {
return false
}
c.mu.Lock()
defer c.mu.Unlock()
return c.active
}
// result := client.Connect()
func (c *StratumClient) Connect() proxy.Result {
if c == nil {
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.client", "client is nil", nil)}
}
addr := c.config.URL
if addr == "" {
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.client", "pool url is empty", nil)}
}
conn, err := net.Dial("tcp", addr)
if err != nil {
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.client", "dial pool failed", err)}
}
if c.config.TLS {
host := addr
if strings.Contains(addr, ":") {
host, _, _ = net.SplitHostPort(addr)
}
tlsCfg := &tls.Config{InsecureSkipVerify: true, ServerName: host}
tlsConn := tls.Client(conn, tlsCfg)
if err := tlsConn.Handshake(); err != nil {
_ = conn.Close()
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.tls", "handshake failed", err)}
}
if fp := strings.TrimSpace(strings.ToLower(c.config.TLSFingerprint)); fp != "" {
cert := tlsConn.ConnectionState().PeerCertificates
if len(cert) == 0 {
_ = tlsConn.Close()
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.tls", "missing certificate", nil)}
}
sum := sha256.Sum256(cert[0].Raw)
if hex.EncodeToString(sum[:]) != fp {
_ = tlsConn.Close()
return proxy.Result{OK: false, Error: proxy.NewScopedError("proxy.pool.tls", "tls fingerprint mismatch", nil)}
}
}
c.conn = tlsConn
c.tlsConn = tlsConn
} else {
c.conn = conn
}
go c.readLoop()
return proxy.Result{OK: true}
}
// client.Login()
//
// A login reply with a job triggers `OnJob` immediately.
func (c *StratumClient) Login() {
if c == nil || c.conn == nil {
return
}
params := map[string]any{
"login": c.config.User,
"pass": c.config.Pass,
}
if c.config.RigID != "" {
params["rigid"] = c.config.RigID
}
if c.config.Algo != "" {
params["algo"] = []string{c.config.Algo}
}
req := map[string]any{
"id": 1,
"jsonrpc": "2.0",
"method": "login",
"params": params,
}
_ = c.writeJSON(req)
}
// seq := client.Submit("job-1", "deadbeef", "HASH64HEX", "cn/r")
func (c *StratumClient) Submit(jobID, nonce, result, algo string) int64 {
if c == nil {
return 0
}
seq := atomic.AddInt64(&c.seq, 1)
c.mu.Lock()
c.pending[seq] = struct{}{}
sessionID := c.sessionID
c.mu.Unlock()
req := map[string]any{
"id": seq,
"jsonrpc": "2.0",
"method": "submit",
"params": map[string]any{
"id": sessionID,
"job_id": jobID,
"nonce": nonce,
"result": result,
"algo": algo,
},
}
if err := c.writeJSON(req); err != nil {
c.mu.Lock()
delete(c.pending, seq)
c.mu.Unlock()
}
return seq
}
// client.Keepalive()
func (c *StratumClient) Keepalive() {
if c == nil || c.conn == nil || !c.IsActive() {
return
}
req := map[string]any{
"id": atomic.AddInt64(&c.seq, 1),
"jsonrpc": "2.0",
"method": "keepalived",
"params": map[string]any{
"id": c.sessionID,
},
}
_ = c.writeJSON(req)
}
// client.Disconnect()
func (c *StratumClient) Disconnect() {
if c == nil {
return
}
c.closedOnce.Do(func() {
conn := c.resetConnectionState()
if conn != nil {
_ = conn.Close()
}
if c.listener != nil {
c.listener.OnDisconnect()
}
})
}
func (c *StratumClient) notifyDisconnect() {
c.closedOnce.Do(func() {
c.resetConnectionState()
if c.listener != nil {
c.listener.OnDisconnect()
}
})
}
func (c *StratumClient) resetConnectionState() net.Conn {
if c == nil {
return nil
}
c.mu.Lock()
defer c.mu.Unlock()
conn := c.conn
c.conn = nil
c.tlsConn = nil
c.sessionID = ""
c.active = false
c.pending = make(map[int64]struct{})
return conn
}
func (c *StratumClient) writeJSON(payload any) error {
c.sendMu.Lock()
defer c.sendMu.Unlock()
if c.conn == nil {
return proxy.NewScopedError("proxy.pool.client", "connection is nil", nil)
}
data, err := json.Marshal(payload)
if err != nil {
return proxy.NewScopedError("proxy.pool.client", "marshal request failed", err)
}
data = append(data, '\n')
_, err = c.conn.Write(data)
if err != nil {
c.notifyDisconnect()
return proxy.NewScopedError("proxy.pool.client", "write request failed", err)
}
return err
}
func (c *StratumClient) readLoop() {
defer c.notifyDisconnect()
reader := bufio.NewReader(c.conn)
for {
line, isPrefix, err := reader.ReadLine()
if err != nil {
if err == io.EOF {
return
}
return
}
if isPrefix {
return
}
if len(line) == 0 {
continue
}
c.handleMessage(line)
}
}
func (c *StratumClient) handleMessage(line []byte) {
var base struct {
ID any `json:"id"`
Method string `json:"method"`
Result json.RawMessage `json:"result"`
Error json.RawMessage `json:"error"`
Params json.RawMessage `json:"params"`
}
if err := json.Unmarshal(line, &base); err != nil {
return
}
if len(base.Result) > 0 {
var loginReply struct {
ID string `json:"id"`
Job *struct {
Blob string `json:"blob"`
JobID string `json:"job_id"`
Target string `json:"target"`
Algo string `json:"algo"`
Height uint64 `json:"height"`
SeedHash string `json:"seed_hash"`
ID string `json:"id"`
} `json:"job"`
}
if err := json.Unmarshal(base.Result, &loginReply); err == nil {
if loginReply.ID != "" {
c.mu.Lock()
c.sessionID = loginReply.ID
c.mu.Unlock()
}
if loginReply.Job != nil && loginReply.Job.JobID != "" {
c.mu.Lock()
c.active = true
c.mu.Unlock()
if c.listener != nil {
c.listener.OnJob(proxy.Job{
Blob: loginReply.Job.Blob,
JobID: loginReply.Job.JobID,
Target: loginReply.Job.Target,
Algo: loginReply.Job.Algo,
Height: loginReply.Job.Height,
SeedHash: loginReply.Job.SeedHash,
ClientID: loginReply.Job.ID,
})
}
return
}
}
}
if len(base.Error) > 0 && requestID(base.ID) == 1 {
c.notifyDisconnect()
return
}
if base.Method == "job" {
var params struct {
Blob string `json:"blob"`
JobID string `json:"job_id"`
Target string `json:"target"`
Algo string `json:"algo"`
Height uint64 `json:"height"`
SeedHash string `json:"seed_hash"`
ID string `json:"id"`
}
if err := json.Unmarshal(base.Params, &params); err != nil {
return
}
c.mu.Lock()
c.active = true
c.mu.Unlock()
if c.listener != nil {
c.listener.OnJob(proxy.Job{
Blob: params.Blob,
JobID: params.JobID,
Target: params.Target,
Algo: params.Algo,
Height: params.Height,
SeedHash: params.SeedHash,
ClientID: params.ID,
})
}
return
}
seq := requestID(base.ID)
if seq == 0 {
return
}
c.mu.Lock()
_, ok := c.pending[seq]
if ok {
delete(c.pending, seq)
}
c.mu.Unlock()
if !ok {
return
}
var payload struct {
Status string `json:"status"`
}
if len(base.Result) > 0 {
_ = json.Unmarshal(base.Result, &payload)
}
accepted := len(base.Error) == 0
if payload.Status != "" && strings.EqualFold(payload.Status, "OK") {
accepted = true
}
errorMessage := ""
if !accepted && len(base.Error) > 0 {
var errPayload struct {
Message string `json:"message"`
}
_ = json.Unmarshal(base.Error, &errPayload)
errorMessage = errPayload.Message
}
if c.listener != nil {
c.listener.OnResultAccepted(seq, accepted, errorMessage)
}
}
// NewFailoverStrategy creates the ordered pool failover wrapper.
func NewFailoverStrategy(pools []proxy.PoolConfig, listener StratumListener, config *proxy.Config) *FailoverStrategy {
return &FailoverStrategy{
pools: pools,
listener: listener,
config: config,
}
}
// strategy.Connect()
func (s *FailoverStrategy) Connect() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
s.closing = false
s.connectLocked(0)
}
func (s *FailoverStrategy) connectLocked(start int) {
enabled := enabledPools(s.currentPools())
if len(enabled) == 0 {
return
}
retries := 1
retryPause := time.Second
if s.config != nil {
if s.config.Retries > 0 {
retries = s.config.Retries
}
if s.config.RetryPause > 0 {
retryPause = time.Duration(s.config.RetryPause) * time.Second
}
}
for attempt := 0; attempt < retries; attempt++ {
for i := 0; i < len(enabled); i++ {
index := (start + i) % len(enabled)
poolCfg := enabled[index]
client := NewStratumClient(poolCfg, s)
if result := client.Connect(); result.OK {
s.client = client
s.current = index
client.Login()
return
}
}
time.Sleep(retryPause)
}
}
func (s *FailoverStrategy) currentPools() []proxy.PoolConfig {
if s == nil {
return nil
}
if s.config != nil && len(s.config.Pools) > 0 {
return s.config.Pools
}
return s.pools
}
// seq := strategy.Submit(jobID, nonce, result, algo)
func (s *FailoverStrategy) Submit(jobID, nonce, result, algo string) int64 {
if s == nil || s.client == nil {
return 0
}
return s.client.Submit(jobID, nonce, result, algo)
}
// strategy.Disconnect()
func (s *FailoverStrategy) Disconnect() {
if s == nil {
return
}
s.mu.Lock()
client := s.client
s.closing = true
s.client = nil
s.mu.Unlock()
if client != nil {
client.Disconnect()
}
}
// strategy.ReloadPools()
func (s *FailoverStrategy) ReloadPools() {
if s == nil {
return
}
s.mu.Lock()
s.current = 0
s.mu.Unlock()
s.Disconnect()
s.Connect()
}
// active := strategy.IsActive()
func (s *FailoverStrategy) IsActive() bool {
return s != nil && s.client != nil && s.client.IsActive()
}
// Tick keeps an active pool connection alive when configured.
func (s *FailoverStrategy) Tick(ticks uint64) {
if s == nil || ticks == 0 || ticks%60 != 0 {
return
}
s.mu.Lock()
client := s.client
s.mu.Unlock()
if client != nil && client.config.Keepalive {
client.Keepalive()
}
}
// OnJob forwards the pool job to the outer listener.
func (s *FailoverStrategy) OnJob(job proxy.Job) {
if s != nil && s.listener != nil {
s.listener.OnJob(job)
}
}
// OnResultAccepted forwards the result status to the outer listener.
func (s *FailoverStrategy) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
if s != nil && s.listener != nil {
s.listener.OnResultAccepted(sequence, accepted, errorMessage)
}
}
// strategy.OnDisconnect()
func (s *FailoverStrategy) OnDisconnect() {
if s == nil {
return
}
s.mu.Lock()
s.client = nil
closing := s.closing
if closing {
s.closing = false
}
s.mu.Unlock()
if closing {
return
}
if s.listener != nil {
s.listener.OnDisconnect()
}
go s.Connect()
}
func enabledPools(pools []proxy.PoolConfig) []proxy.PoolConfig {
out := make([]proxy.PoolConfig, 0, len(pools))
for _, poolCfg := range pools {
if poolCfg.Enabled {
out = append(out, poolCfg)
}
}
return out
}
func requestID(id any) int64 {
switch v := id.(type) {
case float64:
return int64(v)
case int64:
return v
case int:
return int64(v)
case string:
n, _ := strconv.ParseInt(v, 10, 64)
return n
default:
return 0
}
}

168
pool/impl_test.go Normal file
View file

@ -0,0 +1,168 @@
package pool
import (
"testing"
"dappco.re/go/proxy"
)
// TestFailoverStrategy_CurrentPools_Good verifies that currentPools follows the live config.
//
// strategy := pool.NewFailoverStrategy(cfg.Pools, nil, cfg)
// strategy.currentPools() // returns cfg.Pools
func TestFailoverStrategy_CurrentPools_Good(t *testing.T) {
cfg := &proxy.Config{
Mode: "nicehash",
Workers: proxy.WorkersByRigID,
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []proxy.PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
}
strategy := NewFailoverStrategy(cfg.Pools, nil, cfg)
if got := len(strategy.currentPools()); got != 1 {
t.Fatalf("expected 1 pool, got %d", got)
}
cfg.Pools = []proxy.PoolConfig{{URL: "pool-b.example:4444", Enabled: true}}
if got := strategy.currentPools(); len(got) != 1 || got[0].URL != "pool-b.example:4444" {
t.Fatalf("expected current pools to follow config reload, got %+v", got)
}
}
// TestFailoverStrategy_CurrentPools_Bad verifies that a nil strategy returns an empty pool list.
//
// var strategy *pool.FailoverStrategy
// strategy.currentPools() // nil
func TestFailoverStrategy_CurrentPools_Bad(t *testing.T) {
var strategy *FailoverStrategy
pools := strategy.currentPools()
if pools != nil {
t.Fatalf("expected nil pools from nil strategy, got %+v", pools)
}
}
// TestFailoverStrategy_CurrentPools_Ugly verifies that a strategy with a nil config
// falls back to the pools passed at construction time.
//
// strategy := pool.NewFailoverStrategy(initialPools, nil, nil)
// strategy.currentPools() // returns initialPools
func TestFailoverStrategy_CurrentPools_Ugly(t *testing.T) {
initialPools := []proxy.PoolConfig{
{URL: "fallback.example:3333", Enabled: true},
{URL: "fallback.example:4444", Enabled: false},
}
strategy := NewFailoverStrategy(initialPools, nil, nil)
got := strategy.currentPools()
if len(got) != 2 {
t.Fatalf("expected 2 pools from constructor fallback, got %d", len(got))
}
if got[0].URL != "fallback.example:3333" {
t.Fatalf("expected constructor pool URL, got %q", got[0].URL)
}
}
// TestFailoverStrategy_EnabledPools_Good verifies that only enabled pools are selected.
//
// enabled := pool.enabledPools(pools) // filters to enabled-only
func TestFailoverStrategy_EnabledPools_Good(t *testing.T) {
pools := []proxy.PoolConfig{
{URL: "active.example:3333", Enabled: true},
{URL: "disabled.example:3333", Enabled: false},
{URL: "active2.example:3333", Enabled: true},
}
got := enabledPools(pools)
if len(got) != 2 {
t.Fatalf("expected 2 enabled pools, got %d", len(got))
}
if got[0].URL != "active.example:3333" || got[1].URL != "active2.example:3333" {
t.Fatalf("expected only enabled pool URLs, got %+v", got)
}
}
// TestFailoverStrategy_EnabledPools_Bad verifies that an empty pool list returns empty.
//
// pool.enabledPools(nil) // empty
func TestFailoverStrategy_EnabledPools_Bad(t *testing.T) {
got := enabledPools(nil)
if len(got) != 0 {
t.Fatalf("expected 0 pools from nil input, got %d", len(got))
}
}
// TestFailoverStrategy_EnabledPools_Ugly verifies that all-disabled pools return empty.
//
// pool.enabledPools([]proxy.PoolConfig{{Enabled: false}}) // empty
func TestFailoverStrategy_EnabledPools_Ugly(t *testing.T) {
pools := []proxy.PoolConfig{
{URL: "a.example:3333", Enabled: false},
{URL: "b.example:3333", Enabled: false},
}
got := enabledPools(pools)
if len(got) != 0 {
t.Fatalf("expected 0 enabled pools when all disabled, got %d", len(got))
}
}
// TestNewStrategyFactory_Good verifies the factory creates a strategy connected to the config.
//
// factory := pool.NewStrategyFactory(cfg)
// strategy := factory(listener) // creates FailoverStrategy
func TestNewStrategyFactory_Good(t *testing.T) {
cfg := &proxy.Config{
Mode: "nicehash",
Workers: proxy.WorkersByRigID,
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
factory := NewStrategyFactory(cfg)
if factory == nil {
t.Fatal("expected a non-nil factory")
}
strategy := factory(nil)
if strategy == nil {
t.Fatal("expected a non-nil strategy from factory")
}
if strategy.IsActive() {
t.Fatal("expected new strategy to be inactive before connecting")
}
}
// TestNewStrategyFactory_Bad verifies a factory created with nil config does not panic.
//
// factory := pool.NewStrategyFactory(nil)
// strategy := factory(nil)
func TestNewStrategyFactory_Bad(t *testing.T) {
factory := NewStrategyFactory(nil)
strategy := factory(nil)
if strategy == nil {
t.Fatal("expected a non-nil strategy even from nil config")
}
}
// TestNewStrategyFactory_Ugly verifies the factory forwards the correct pool list to the strategy.
//
// cfg.Pools = append(cfg.Pools, proxy.PoolConfig{URL: "added.example:3333", Enabled: true})
// strategy := factory(nil)
// // strategy sees the updated pools via the shared config pointer
func TestNewStrategyFactory_Ugly(t *testing.T) {
cfg := &proxy.Config{
Mode: "nicehash",
Workers: proxy.WorkersByRigID,
Bind: []proxy.BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
factory := NewStrategyFactory(cfg)
cfg.Pools = append(cfg.Pools, proxy.PoolConfig{URL: "added.example:3333", Enabled: true})
strategy := factory(nil)
fs, ok := strategy.(*FailoverStrategy)
if !ok {
t.Fatal("expected FailoverStrategy")
}
pools := fs.currentPools()
if len(pools) != 2 {
t.Fatalf("expected 2 pools after config update, got %d", len(pools))
}
}

112
pool/keepalive_test.go Normal file
View file

@ -0,0 +1,112 @@
package pool
import (
"bufio"
"encoding/json"
"net"
"testing"
"time"
)
func TestStratumClient_Keepalive_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer serverConn.Close()
defer clientConn.Close()
client := &StratumClient{
conn: clientConn,
active: true,
sessionID: "session-1",
}
done := make(chan struct{})
go func() {
client.Keepalive()
close(done)
}()
line, err := bufio.NewReader(serverConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read keepalive request: %v", err)
}
<-done
var payload map[string]any
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal keepalive request: %v", err)
}
if got := payload["method"]; got != "keepalived" {
t.Fatalf("expected keepalived method, got %#v", got)
}
params, ok := payload["params"].(map[string]any)
if !ok {
t.Fatalf("expected params object, got %#v", payload["params"])
}
if got := params["id"]; got != "session-1" {
t.Fatalf("expected session id in keepalive payload, got %#v", got)
}
}
func TestStratumClient_Keepalive_Bad(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer serverConn.Close()
defer clientConn.Close()
client := &StratumClient{
conn: clientConn,
active: false,
}
client.Keepalive()
if err := serverConn.SetReadDeadline(time.Now().Add(50 * time.Millisecond)); err != nil {
t.Fatalf("set deadline: %v", err)
}
buf := make([]byte, 1)
if _, err := serverConn.Read(buf); err == nil {
t.Fatalf("expected no keepalive data while inactive")
}
}
func TestStratumClient_Keepalive_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer serverConn.Close()
defer clientConn.Close()
client := &StratumClient{
conn: clientConn,
active: true,
sessionID: "session-2",
}
reader := bufio.NewReader(serverConn)
done := make(chan struct{})
go func() {
client.Keepalive()
client.Keepalive()
close(done)
}()
first, err := reader.ReadBytes('\n')
if err != nil {
t.Fatalf("read first keepalive request: %v", err)
}
second, err := reader.ReadBytes('\n')
if err != nil {
t.Fatalf("read second keepalive request: %v", err)
}
<-done
var firstPayload map[string]any
if err := json.Unmarshal(first, &firstPayload); err != nil {
t.Fatalf("unmarshal first keepalive request: %v", err)
}
var secondPayload map[string]any
if err := json.Unmarshal(second, &secondPayload); err != nil {
t.Fatalf("unmarshal second keepalive request: %v", err)
}
if firstPayload["id"] == secondPayload["id"] {
t.Fatalf("expected keepalive request ids to be unique")
}
}

View file

@ -2,16 +2,16 @@ package pool
import ( import (
"sync" "sync"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/proxy"
) )
// FailoverStrategy wraps an ordered slice of PoolConfig entries. // FailoverStrategy wraps an ordered slice of PoolConfig entries.
// It connects to the first enabled pool and fails over in order on error.
// On reconnect it always retries from the primary first.
// //
// strategy := pool.NewFailoverStrategy(cfg.Pools, listener, cfg) // strategy := pool.NewFailoverStrategy([]proxy.PoolConfig{
// {URL: "primary.example:3333", Enabled: true},
// {URL: "backup.example:3333", Enabled: true},
// }, listener, cfg)
// strategy.Connect() // strategy.Connect()
type FailoverStrategy struct { type FailoverStrategy struct {
pools []proxy.PoolConfig pools []proxy.PoolConfig
@ -19,19 +19,17 @@ type FailoverStrategy struct {
client *StratumClient client *StratumClient
listener StratumListener listener StratumListener
config *proxy.Config config *proxy.Config
closed bool closing bool
running bool
mu sync.Mutex mu sync.Mutex
} }
// StrategyFactory creates a new FailoverStrategy for a given StratumListener. // StrategyFactory creates a FailoverStrategy for a given StratumListener.
// Used by splitters to create per-mapper strategies without coupling to Config.
// //
// factory := pool.NewStrategyFactory(cfg) // factory := pool.NewStrategyFactory(cfg)
// strategy := factory(listener) // each mapper calls this // strategy := factory(listener)
type StrategyFactory func(listener StratumListener) Strategy type StrategyFactory func(listener StratumListener) Strategy
// Strategy is the interface the splitters use to submit shares and check pool state. // Strategy is the interface splitters use to submit shares and inspect pool state.
type Strategy interface { type Strategy interface {
Connect() Connect()
Submit(jobID, nonce, result, algo string) int64 Submit(jobID, nonce, result, algo string) int64
@ -39,159 +37,9 @@ type Strategy interface {
IsActive() bool IsActive() bool
} }
// NewStrategyFactory captures the live pool list and retry settings. // ReloadableStrategy re-establishes an upstream connection after config changes.
// //
// factory := pool.NewStrategyFactory(proxy.Config{Pools: []proxy.PoolConfig{{URL: "pool.lthn.io:3333", Enabled: true}}}) // strategy.ReloadPools()
func NewStrategyFactory(cfg *proxy.Config) StrategyFactory { type ReloadableStrategy interface {
return func(listener StratumListener) Strategy { ReloadPools()
if cfg == nil {
return NewFailoverStrategy(nil, listener, nil)
}
return NewFailoverStrategy(cfg.Pools, listener, cfg)
}
}
// NewFailoverStrategy builds one failover client stack.
//
// strategy := pool.NewFailoverStrategy([]proxy.PoolConfig{{URL: "pool.lthn.io:3333", Enabled: true}}, listener, cfg)
func NewFailoverStrategy(pools []proxy.PoolConfig, listener StratumListener, cfg *proxy.Config) *FailoverStrategy {
return &FailoverStrategy{
pools: append([]proxy.PoolConfig(nil), pools...),
listener: listener,
config: cfg,
}
}
// Connect dials the first enabled pool and rotates through fallbacks on failure.
//
// strategy.Connect()
func (s *FailoverStrategy) Connect() {
s.mu.Lock()
s.closed = false
s.mu.Unlock()
s.connectFrom(0)
}
func (s *FailoverStrategy) connectFrom(start int) {
s.mu.Lock()
if s.running || s.closed {
s.mu.Unlock()
return
}
s.running = true
s.mu.Unlock()
defer func() {
s.mu.Lock()
s.running = false
s.mu.Unlock()
}()
pools := s.pools
if s.config != nil && len(s.config.Pools) > 0 {
pools = s.config.Pools
}
if len(pools) == 0 {
return
}
retries := 1
pause := time.Duration(0)
if s.config != nil {
if s.config.Retries > 0 {
retries = s.config.Retries
}
if s.config.RetryPause > 0 {
pause = time.Duration(s.config.RetryPause) * time.Second
}
}
for attempt := 0; attempt < retries; attempt++ {
for offset := 0; offset < len(pools); offset++ {
index := (start + offset) % len(pools)
poolConfig := pools[index]
if !poolConfig.Enabled {
continue
}
client := NewStratumClient(poolConfig, s)
if errorValue := client.Connect(); errorValue == nil {
s.mu.Lock()
if s.closed {
s.mu.Unlock()
client.Disconnect()
return
}
s.client = client
s.current = index
s.mu.Unlock()
client.Login()
return
}
}
if pause > 0 && attempt < retries-1 {
time.Sleep(pause)
}
}
}
func (s *FailoverStrategy) Submit(jobID string, nonce string, result string, algo string) int64 {
s.mu.Lock()
client := s.client
s.mu.Unlock()
if client == nil {
return 0
}
return client.Submit(jobID, nonce, result, algo)
}
func (s *FailoverStrategy) Disconnect() {
s.mu.Lock()
s.closed = true
client := s.client
s.client = nil
s.mu.Unlock()
if client != nil {
client.Disconnect()
}
}
func (s *FailoverStrategy) IsActive() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.client != nil && s.client.active
}
func (s *FailoverStrategy) OnJob(job proxy.Job) {
if s.listener != nil {
s.listener.OnJob(job)
}
}
func (s *FailoverStrategy) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
if s.listener != nil {
s.listener.OnResultAccepted(sequence, accepted, errorMessage)
}
}
func (s *FailoverStrategy) OnDisconnect() {
s.mu.Lock()
client := s.client
s.client = nil
closed := s.closed
s.mu.Unlock()
if s.listener != nil {
s.listener.OnDisconnect()
}
if closed {
return
}
if client != nil {
client.active = false
}
go func() {
time.Sleep(10 * time.Millisecond)
s.connectFrom(0)
}()
} }

View file

@ -0,0 +1,148 @@
package pool
import (
"encoding/json"
"net"
"sync/atomic"
"testing"
"time"
"dappco.re/go/proxy"
)
type disconnectSpy struct {
disconnects atomic.Int64
}
func (s *disconnectSpy) OnJob(proxy.Job) {}
func (s *disconnectSpy) OnResultAccepted(int64, bool, string) {}
func (s *disconnectSpy) OnDisconnect() {
s.disconnects.Add(1)
}
func TestFailoverStrategy_Disconnect_Good(t *testing.T) {
spy := &disconnectSpy{}
strategy := &FailoverStrategy{
listener: spy,
client: &StratumClient{listener: nil},
}
strategy.client.listener = strategy
strategy.Disconnect()
time.Sleep(10 * time.Millisecond)
if got := spy.disconnects.Load(); got != 0 {
t.Fatalf("expected intentional disconnect to suppress reconnect, got %d listener calls", got)
}
}
func TestFailoverStrategy_Disconnect_Bad(t *testing.T) {
spy := &disconnectSpy{}
strategy := &FailoverStrategy{listener: spy}
strategy.OnDisconnect()
if got := spy.disconnects.Load(); got != 1 {
t.Fatalf("expected external disconnect to notify listener once, got %d", got)
}
}
func TestFailoverStrategy_Disconnect_Ugly(t *testing.T) {
spy := &disconnectSpy{}
strategy := &FailoverStrategy{
listener: spy,
client: &StratumClient{listener: nil},
}
strategy.client.listener = strategy
strategy.Disconnect()
strategy.Disconnect()
time.Sleep(10 * time.Millisecond)
if got := spy.disconnects.Load(); got != 0 {
t.Fatalf("expected repeated intentional disconnects to remain silent, got %d listener calls", got)
}
}
func TestStratumClient_NotifyDisconnect_ClearsState_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer serverConn.Close()
spy := &disconnectSpy{}
client := &StratumClient{
conn: clientConn,
listener: spy,
sessionID: "session-1",
active: true,
pending: map[int64]struct{}{
7: {},
},
}
client.notifyDisconnect()
if got := spy.disconnects.Load(); got != 1 {
t.Fatalf("expected one disconnect notification, got %d", got)
}
if client.conn != nil {
t.Fatalf("expected pooled connection to be cleared")
}
if client.sessionID != "" {
t.Fatalf("expected session id to be cleared, got %q", client.sessionID)
}
if client.IsActive() {
t.Fatalf("expected client to stop reporting active after disconnect")
}
if len(client.pending) != 0 {
t.Fatalf("expected pending submit state to be cleared, got %d entries", len(client.pending))
}
}
func TestFailoverStrategy_OnDisconnect_ClearsClient_Bad(t *testing.T) {
spy := &disconnectSpy{}
strategy := &FailoverStrategy{
listener: spy,
client: &StratumClient{active: true, pending: make(map[int64]struct{})},
}
strategy.OnDisconnect()
time.Sleep(10 * time.Millisecond)
if strategy.client != nil {
t.Fatalf("expected strategy to drop the stale client before reconnect")
}
if strategy.IsActive() {
t.Fatalf("expected strategy to report inactive while reconnect is pending")
}
if got := spy.disconnects.Load(); got != 1 {
t.Fatalf("expected one disconnect notification, got %d", got)
}
}
func TestStratumClient_HandleMessage_LoginErrorDisconnects_Ugly(t *testing.T) {
spy := &disconnectSpy{}
client := &StratumClient{
listener: spy,
pending: make(map[int64]struct{}),
}
payload, err := json.Marshal(map[string]any{
"id": 1,
"jsonrpc": "2.0",
"error": map[string]any{
"code": -1,
"message": "Invalid payment address provided",
},
})
if err != nil {
t.Fatalf("marshal login error payload: %v", err)
}
client.handleMessage(payload)
if got := spy.disconnects.Load(); got != 1 {
t.Fatalf("expected login failure to disconnect upstream once, got %d", got)
}
}

View file

@ -1,306 +0,0 @@
package pool
import (
"bufio"
"encoding/json"
"net"
"sync"
"sync/atomic"
"testing"
"time"
"dappco.re/go/core/proxy"
)
type strategyTestListener struct {
jobCh chan proxy.Job
disconnectMu sync.Mutex
disconnects int
}
func (l *strategyTestListener) OnJob(job proxy.Job) {
l.jobCh <- job
}
func (l *strategyTestListener) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {}
func (l *strategyTestListener) OnDisconnect() {
l.disconnectMu.Lock()
l.disconnects++
l.disconnectMu.Unlock()
}
func (l *strategyTestListener) Disconnects() int {
l.disconnectMu.Lock()
defer l.disconnectMu.Unlock()
return l.disconnects
}
func TestFailoverStrategy_Connect_Ugly(t *testing.T) {
primaryListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer primaryListener.Close()
backupListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer backupListener.Close()
go func() {
conn, acceptErr := primaryListener.Accept()
if acceptErr != nil {
return
}
_ = primaryListener.Close()
_ = conn.Close()
}()
go func() {
conn, acceptErr := backupListener.Accept()
if acceptErr != nil {
return
}
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]interface{}{
"id": "session-1",
"job": map[string]interface{}{
"blob": "abcd",
"job_id": "job-1",
"target": "b88d0600",
},
},
})
}()
listener := &strategyTestListener{
jobCh: make(chan proxy.Job, 1),
}
strategy := NewFailoverStrategy([]proxy.PoolConfig{
{URL: primaryListener.Addr().String(), Enabled: true},
{URL: backupListener.Addr().String(), Enabled: true},
}, listener, &proxy.Config{Retries: 2})
strategy.Connect()
defer strategy.Disconnect()
select {
case job := <-listener.jobCh:
if job.JobID != "job-1" {
t.Fatalf("expected backup job, got %+v", job)
}
case <-time.After(3 * time.Second):
t.Fatal("expected failover job after primary disconnect")
}
if listener.Disconnects() == 0 {
t.Fatal("expected disconnect callback before failover reconnect")
}
}
func TestFailoverStrategy_OnDisconnect_Good(t *testing.T) {
primaryListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer primaryListener.Close()
backupListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer backupListener.Close()
var primaryConnections atomic.Int32
go func() {
conn, acceptErr := primaryListener.Accept()
if acceptErr != nil {
return
}
primaryConnections.Add(1)
defer primaryListener.Close()
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": map[string]interface{}{
"code": -1,
"message": "Unauthenticated",
},
})
}()
go func() {
conn, acceptErr := backupListener.Accept()
if acceptErr != nil {
return
}
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]interface{}{
"id": "session-1",
"job": map[string]interface{}{
"blob": "abcd",
"job_id": "job-1",
"target": "b88d0600",
},
},
})
}()
listener := &strategyTestListener{
jobCh: make(chan proxy.Job, 1),
}
strategy := NewFailoverStrategy([]proxy.PoolConfig{
{URL: primaryListener.Addr().String(), Enabled: true},
{URL: backupListener.Addr().String(), Enabled: true},
}, listener, &proxy.Config{Retries: 1})
strategy.Connect()
defer strategy.Disconnect()
select {
case job := <-listener.jobCh:
if job.JobID != "job-1" {
t.Fatalf("expected backup job, got %+v", job)
}
case <-time.After(3 * time.Second):
t.Fatalf("expected backup job after primary disconnect, primary connections=%d", primaryConnections.Load())
}
if listener.Disconnects() == 0 {
t.Fatal("expected disconnect callback before failover reconnect")
}
}
func TestFailoverStrategy_OnDisconnect_PrimaryFirst(t *testing.T) {
primaryListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
primaryAddr := primaryListener.Addr().String()
_ = primaryListener.Close()
backupListener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer backupListener.Close()
go func() {
conn, acceptErr := backupListener.Accept()
if acceptErr != nil {
return
}
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]interface{}{
"id": "session-backup",
"job": map[string]interface{}{
"blob": "abcd",
"job_id": "backup-job",
"target": "b88d0600",
},
},
})
time.Sleep(40 * time.Millisecond)
}()
listener := &strategyTestListener{
jobCh: make(chan proxy.Job, 2),
}
strategy := NewFailoverStrategy([]proxy.PoolConfig{
{URL: primaryAddr, Enabled: true},
{URL: backupListener.Addr().String(), Enabled: true},
}, listener, &proxy.Config{Retries: 2})
strategy.Connect()
defer strategy.Disconnect()
select {
case job := <-listener.jobCh:
if job.JobID != "backup-job" {
t.Fatalf("expected initial failover job, got %+v", job)
}
case <-time.After(3 * time.Second):
t.Fatal("expected initial failover job")
}
primaryListener, err = net.Listen("tcp", primaryAddr)
if err != nil {
t.Fatal(err)
}
defer primaryListener.Close()
go func() {
conn, acceptErr := primaryListener.Accept()
if acceptErr != nil {
return
}
defer conn.Close()
reader := bufio.NewReader(conn)
if _, readErr := reader.ReadBytes('\n'); readErr != nil {
return
}
_ = json.NewEncoder(conn).Encode(map[string]interface{}{
"id": 1,
"jsonrpc": "2.0",
"error": nil,
"result": map[string]interface{}{
"id": "session-primary",
"job": map[string]interface{}{
"blob": "abcd",
"job_id": "primary-job",
"target": "b88d0600",
},
},
})
}()
select {
case job := <-listener.jobCh:
if job.JobID != "primary-job" {
t.Fatalf("expected reconnect to prefer primary pool, got %+v", job)
}
case <-time.After(3 * time.Second):
t.Fatal("expected reconnect job")
}
}

152
proxy.go
View file

@ -1,13 +1,10 @@
// Package proxy is a CryptoNote stratum mining proxy library. // Package proxy is the mining proxy library.
//
// It accepts miner connections over TCP (optionally TLS), splits the 32-bit nonce
// space across up to 256 simultaneous miners per upstream pool connection (NiceHash
// mode), and presents a small monitoring API.
//
// Full specification: docs/RFC.md
// //
// cfg := &proxy.Config{Mode: "nicehash", Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}}, Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}}, Workers: proxy.WorkersByRigID}
// p, result := proxy.New(cfg) // p, result := proxy.New(cfg)
// if result.OK { p.Start() } // if result.OK {
// p.Start()
// }
package proxy package proxy
import ( import (
@ -17,32 +14,46 @@ import (
"time" "time"
) )
// Proxy is the top-level orchestrator. It owns the server, splitter, stats, workers, // Proxy wires the configured listeners, splitters, stats, workers, and log sinks.
// event bus, tick goroutine, and optional HTTP API.
// //
// cfg := &proxy.Config{
// Mode: "nicehash",
// Bind: []proxy.BindAddr{{Host: "0.0.0.0", Port: 3333}},
// Pools: []proxy.PoolConfig{{URL: "pool.example:3333", Enabled: true}},
// Workers: proxy.WorkersByRigID,
// }
// p, result := proxy.New(cfg) // p, result := proxy.New(cfg)
// if result.OK { p.Start() } // if result.OK {
// p.Start()
// }
type Proxy struct { type Proxy struct {
config *Config config *Config
customDifficulty *CustomDiff configMu sync.RWMutex
rateLimiter *RateLimiter
splitter Splitter splitter Splitter
shareSink ShareSink
stats *Stats stats *Stats
workers *Workers workers *Workers
events *EventBus events *EventBus
currentMiners atomic.Uint64
miners map[int64]*Miner
minerMu sync.RWMutex
servers []*Server servers []*Server
httpServer *http.Server
accessLogger *appendLineLogger
shareLogger *appendLineLogger
ticker *time.Ticker ticker *time.Ticker
watcher *ConfigWatcher watcher *ConfigWatcher
done chan struct{} done chan struct{}
stopOnce sync.Once
minersMu sync.RWMutex
miners map[int64]*Miner
customDiff *CustomDiff
customDiffBuckets *CustomDiffBuckets
rateLimit *RateLimiter
httpServer *http.Server
accessLog *accessLogSink
shareLog *shareLogSink
submitCount atomic.Int64
} }
// Splitter is the interface both NonceSplitter and SimpleSplitter satisfy. // Splitter routes miner logins, submits, and disconnects to the active upstream strategy.
//
// splitter := nicehash.NewNonceSplitter(cfg, bus, pool.NewStrategyFactory(cfg))
// splitter.Connect()
type Splitter interface { type Splitter interface {
// Connect establishes the first pool upstream connection. // Connect establishes the first pool upstream connection.
Connect() Connect()
@ -60,7 +71,18 @@ type Splitter interface {
Upstreams() UpstreamStats Upstreams() UpstreamStats
} }
// UpstreamStats carries pool connection state counts for monitoring. // ShareSink consumes share outcomes from the proxy event stream.
//
// sink.OnAccept(proxy.Event{Miner: miner, Diff: 100000})
// sink.OnReject(proxy.Event{Miner: miner, Error: "Invalid nonce"})
type ShareSink interface {
OnAccept(Event)
OnReject(Event)
}
// UpstreamStats reports pool connection counts.
//
// stats := proxy.UpstreamStats{Active: 1, Sleep: 0, Error: 0, Total: 1}
type UpstreamStats struct { type UpstreamStats struct {
Active uint64 // connections currently receiving jobs Active uint64 // connections currently receiving jobs
Sleep uint64 // idle connections (simple mode reuse pool) Sleep uint64 // idle connections (simple mode reuse pool)
@ -68,12 +90,16 @@ type UpstreamStats struct {
Total uint64 // Active + Sleep + Error Total uint64 // Active + Sleep + Error
} }
// LoginEvent is dispatched when a miner completes the login handshake. // LoginEvent is dispatched when a miner completes login.
//
// event := proxy.LoginEvent{Miner: miner}
type LoginEvent struct { type LoginEvent struct {
Miner *Miner Miner *Miner
} }
// SubmitEvent is dispatched when a miner submits a share. // SubmitEvent carries one miner share submission.
//
// event := proxy.SubmitEvent{Miner: miner, JobID: "job-1", Nonce: "deadbeef", Result: "HASH", RequestID: 2}
type SubmitEvent struct { type SubmitEvent struct {
Miner *Miner Miner *Miner
JobID string JobID string
@ -83,78 +109,56 @@ type SubmitEvent struct {
RequestID int64 RequestID int64
} }
// CloseEvent is dispatched when a miner TCP connection closes. // CloseEvent is dispatched when a miner connection closes.
//
// event := proxy.CloseEvent{Miner: miner}
type CloseEvent struct { type CloseEvent struct {
Miner *Miner Miner *Miner
} }
// ConfigWatcher polls a config file for mtime changes and calls onChange on modification. // ConfigWatcher polls a config file every second and reloads on modification.
// Uses 1-second polling; does not require fsnotify.
// //
// w := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) { // watcher := proxy.NewConfigWatcher("config.json", func(cfg *proxy.Config) {
// p.Reload(cfg) // p.Reload(cfg)
// }) // })
// w.Start() // watcher.Start()
type ConfigWatcher struct { type ConfigWatcher struct {
path string configPath string
onChange func(*Config) onConfigChange func(*Config)
enabled bool
lastModifiedAt time.Time lastModifiedAt time.Time
done chan struct{} stopCh chan struct{}
mu sync.Mutex
started bool
} }
// RateLimiter implements per-IP token bucket connection rate limiting. // RateLimiter throttles new connections per source IP.
// Each unique IP has a bucket initialised to MaxConnectionsPerMinute tokens.
// Each connection attempt consumes one token. Tokens refill at 1 per (60/max) seconds.
// An IP that empties its bucket is added to a ban list for BanDurationSeconds.
// //
// rl := proxy.NewRateLimiter(cfg.RateLimit) // limiter := proxy.NewRateLimiter(proxy.RateLimit{
// if !rl.Allow("1.2.3.4") { conn.Close(); return } // MaxConnectionsPerMinute: 30,
// BanDurationSeconds: 300,
// })
// if limiter.Allow("1.2.3.4:3333") {
// // accept the socket
// }
type RateLimiter struct { type RateLimiter struct {
config RateLimit limit RateLimit
buckets map[string]*tokenBucket bucketByHost map[string]*tokenBucket
banned map[string]time.Time banUntilByHost map[string]time.Time
mu sync.Mutex mu sync.Mutex
} }
// tokenBucket is a simple token bucket for one IP. // tokenBucket is the per-IP refillable counter.
//
// bucket := tokenBucket{tokens: 30, lastRefill: time.Now()}
type tokenBucket struct { type tokenBucket struct {
tokens int tokens int
lastRefill time.Time lastRefill time.Time
} }
// CustomDiff resolves and applies per-miner difficulty overrides at login time. // CustomDiff applies a login-time difficulty override.
// Resolution order: user-suffix (+N) > Config.CustomDiff > pool difficulty.
// //
// cd := proxy.NewCustomDiff(cfg.CustomDiff) // resolver := proxy.NewCustomDiff(50000)
// bus.Subscribe(proxy.EventLogin, cd.OnLogin) // resolver.Apply(&Miner{user: "WALLET+75000"})
type CustomDiff struct { type CustomDiff struct {
globalDiff uint64 globalDiff atomic.Uint64
mu sync.RWMutex
}
var splitterFactories = map[string]func(*Config, *EventBus) Splitter{
"": noopSplitterFactory,
}
// RegisterSplitterFactory registers a splitter constructor for a mode name.
//
// proxy.RegisterSplitterFactory("nicehash", func(cfg *proxy.Config, bus *proxy.EventBus) proxy.Splitter {
// return nicehash.NewNonceSplitter(cfg, bus, pool.NewStrategyFactory(cfg))
// })
func RegisterSplitterFactory(mode string, factory func(*Config, *EventBus) Splitter) {
if mode == "" || factory == nil {
return
}
splitterFactories[mode] = factory
}
func newSplitter(cfg *Config, events *EventBus) Splitter {
if cfg == nil {
return noopSplitter{}
}
if factory, exists := splitterFactories[cfg.Mode]; exists && factory != nil {
return factory(cfg, events)
}
return noopSplitter{}
} }

View file

@ -1,244 +0,0 @@
package proxy
import (
"context"
"encoding/json"
"net"
"net/http"
"strconv"
"strings"
"time"
)
const proxyAPIVersion = "1.0.0"
// RouteRegistrar is the minimal route-registration surface used by RegisterMonitoringRoutes.
//
// mux := http.NewServeMux()
// RegisterMonitoringRoutes(mux, p)
type RouteRegistrar interface {
HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request))
}
// SummaryResponse is the /1/summary JSON body.
type SummaryResponse struct {
Version string `json:"version"`
Mode string `json:"mode"`
Hashrate HashrateResponse `json:"hashrate"`
Miners MinersCountResponse `json:"miners"`
Workers uint64 `json:"workers"`
Upstreams UpstreamResponse `json:"upstreams"`
Results ResultsResponse `json:"results"`
}
// HashrateResponse carries the per-window hashrate array.
type HashrateResponse struct {
Total [6]float64 `json:"total"`
}
// MinersCountResponse carries current and peak miner counts.
type MinersCountResponse struct {
Now uint64 `json:"now"`
Max uint64 `json:"max"`
}
// UpstreamResponse carries pool connection state counts.
type UpstreamResponse struct {
Active uint64 `json:"active"`
Sleep uint64 `json:"sleep"`
Error uint64 `json:"error"`
Total uint64 `json:"total"`
Ratio float64 `json:"ratio"`
}
// ResultsResponse carries share acceptance statistics.
type ResultsResponse struct {
Accepted uint64 `json:"accepted"`
Rejected uint64 `json:"rejected"`
Invalid uint64 `json:"invalid"`
Expired uint64 `json:"expired"`
AvgTime uint32 `json:"avg_time"`
Latency uint32 `json:"latency"`
HashesTotal uint64 `json:"hashes_total"`
Best [10]uint64 `json:"best"`
}
func startHTTPServer(p *Proxy) {
if p == nil || p.config == nil || !p.config.HTTP.Enabled || p.httpServer != nil {
return
}
mux := http.NewServeMux()
RegisterMonitoringRoutes(mux, p)
address := net.JoinHostPort(p.config.HTTP.Host, strconv.Itoa(int(p.config.HTTP.Port)))
listener, errorValue := net.Listen("tcp", address)
if errorValue != nil {
return
}
server := &http.Server{
Handler: mux,
}
p.httpServer = server
go func() {
_ = server.Serve(listener)
}()
}
func stopHTTPServer(p *Proxy) {
if p == nil || p.httpServer == nil {
return
}
server := p.httpServer
p.httpServer = nil
shutdownContext, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = server.Shutdown(shutdownContext)
}
// RegisterMonitoringRoutes mounts the monitoring endpoints on any router with HandleFunc.
//
// mux := http.NewServeMux()
// RegisterMonitoringRoutes(mux, p)
func RegisterMonitoringRoutes(router RouteRegistrar, proxyValue *Proxy) {
if router == nil || proxyValue == nil {
return
}
router.HandleFunc("/1/summary", func(writer http.ResponseWriter, request *http.Request) {
if !allowHTTPRequest(writer, request, proxyValue.HTTPConfig()) {
return
}
summary := proxyValue.Summary()
upstreams := proxyValue.Upstreams()
ratio := 0.0
if upstreams.Total > 0 {
ratio = float64(proxyValue.CurrentMiners()) / float64(upstreams.Total)
}
response := SummaryResponse{
Version: proxyAPIVersion,
Mode: proxyValue.Mode(),
Hashrate: HashrateResponse{
Total: summary.Hashrate,
},
Miners: MinersCountResponse{
Now: proxyValue.CurrentMiners(),
Max: proxyValue.MaxMiners(),
},
Workers: uint64(len(proxyValue.Workers())),
Upstreams: UpstreamResponse{
Active: upstreams.Active,
Sleep: upstreams.Sleep,
Error: upstreams.Error,
Total: upstreams.Total,
Ratio: ratio,
},
Results: ResultsResponse{
Accepted: summary.Accepted,
Rejected: summary.Rejected,
Invalid: summary.Invalid,
Expired: summary.Expired,
AvgTime: summary.AvgTime,
Latency: summary.AvgLatency,
HashesTotal: summary.Hashes,
Best: summary.TopDiff,
},
}
writeHTTPJSON(writer, response)
})
router.HandleFunc("/1/workers", func(writer http.ResponseWriter, request *http.Request) {
if !allowHTTPRequest(writer, request, proxyValue.HTTPConfig()) {
return
}
records := proxyValue.Workers()
rows := make([][]interface{}, 0, len(records))
for _, record := range records {
rows = append(rows, []interface{}{
record.Name,
record.LastIP,
record.Connections,
record.Accepted,
record.Rejected,
record.Invalid,
record.Hashes,
record.LastHashAt.Unix(),
record.Hashrate(60),
record.Hashrate(600),
record.Hashrate(3600),
record.Hashrate(43200),
record.Hashrate(86400),
})
}
writeHTTPJSON(writer, map[string]interface{}{
"mode": proxyValue.WorkersMode(),
"workers": rows,
})
})
router.HandleFunc("/1/miners", func(writer http.ResponseWriter, request *http.Request) {
if !allowHTTPRequest(writer, request, proxyValue.HTTPConfig()) {
return
}
miners := proxyValue.Miners()
rows := make([][]interface{}, 0, len(miners))
for _, miner := range miners {
ip := ""
if remote := miner.RemoteAddr(); remote != nil {
ip = remote.String()
}
rows = append(rows, []interface{}{
miner.ID(),
ip,
miner.TX(),
miner.RX(),
miner.State(),
miner.Diff(),
miner.User(),
"********",
miner.RigID(),
miner.Agent(),
})
}
writeHTTPJSON(writer, map[string]interface{}{
"format": []string{"id", "ip", "tx", "rx", "state", "diff", "user", "password", "rig_id", "agent"},
"miners": rows,
})
})
}
func allowHTTPRequest(writer http.ResponseWriter, request *http.Request, config HTTPConfig) bool {
if request == nil {
return false
}
if config.AccessToken != "" {
header := request.Header.Get("Authorization")
prefix := "Bearer "
if !strings.HasPrefix(header, prefix) || strings.TrimSpace(strings.TrimPrefix(header, prefix)) != config.AccessToken {
writer.Header().Set("WWW-Authenticate", "Bearer")
http.Error(writer, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return false
}
}
if config.Restricted && request.Method != http.MethodGet {
http.Error(writer, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
return false
}
return true
}
func writeHTTPJSON(writer http.ResponseWriter, value interface{}) {
writer.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(writer).Encode(value)
}

View file

@ -1,142 +0,0 @@
package proxy
import (
"fmt"
"os"
"sync"
"time"
)
type appendLineLogger struct {
path string
mu sync.Mutex
file *os.File
closed bool
}
func newAppendLineLogger(path string) *appendLineLogger {
return &appendLineLogger{path: path}
}
func (l *appendLineLogger) writeLine(line string) {
if l == nil || l.path == "" {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
if l.file == nil {
file, errorValue := os.OpenFile(l.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if errorValue != nil {
return
}
l.file = file
}
_, _ = l.file.WriteString(line)
}
func (l *appendLineLogger) setPath(path string) {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.path == path {
return
}
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
l.closed = false
l.path = path
}
func (l *appendLineLogger) close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.closed {
return
}
l.closed = true
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func subscribeAccessLog(events *EventBus, path string) *appendLineLogger {
if events == nil || path == "" {
return nil
}
logger := newAppendLineLogger(path)
events.Subscribe(EventLogin, func(event Event) {
if event.Miner == nil {
return
}
logger.writeLine(fmt.Sprintf("%s CONNECT %s %s %s\n",
time.Now().UTC().Format(time.RFC3339),
event.Miner.IP(),
event.Miner.User(),
event.Miner.Agent(),
))
})
events.Subscribe(EventClose, func(event Event) {
if event.Miner == nil {
return
}
logger.writeLine(fmt.Sprintf("%s CLOSE %s %s rx=%d tx=%d\n",
time.Now().UTC().Format(time.RFC3339),
event.Miner.IP(),
event.Miner.User(),
event.Miner.RX(),
event.Miner.TX(),
))
})
return logger
}
func subscribeShareLog(events *EventBus, path string) *appendLineLogger {
if events == nil || path == "" {
return nil
}
logger := newAppendLineLogger(path)
events.Subscribe(EventAccept, func(event Event) {
if event.Miner == nil {
return
}
logger.writeLine(fmt.Sprintf("%s ACCEPT %s diff=%d latency=%dms\n",
time.Now().UTC().Format(time.RFC3339),
event.Miner.User(),
event.Diff,
event.Latency,
))
})
events.Subscribe(EventReject, func(event Event) {
if event.Miner == nil {
return
}
logger.writeLine(fmt.Sprintf("%s REJECT %s reason=%q\n",
time.Now().UTC().Format(time.RFC3339),
event.Miner.User(),
event.Error,
))
})
return logger
}

View file

@ -1,356 +0,0 @@
package proxy
import (
"crypto/tls"
"errors"
"net"
"sort"
"time"
)
type splitterShutdown interface {
PendingCount() int
Disconnect()
}
// New wires the proxy and returns a ready-to-start instance.
//
// p, errorValue := proxy.New(config)
func New(config *Config) (*Proxy, error) {
if config == nil {
return nil, errors.New("config is nil")
}
if errorValue := config.Validate(); errorValue != nil {
return nil, errorValue
}
eventBus := NewEventBus()
statsValue := NewStats()
customDifficultyFilter := NewCustomDiff(config.CustomDiff)
eventBus.Subscribe(EventLogin, customDifficultyFilter.OnLogin)
workersValue := NewWorkers(config.Workers, eventBus)
workersValue.SetCustomDiffStats(config.CustomDiffStats)
splitterValue := newSplitter(config, eventBus)
proxyInstance := &Proxy{
config: config,
customDifficulty: customDifficultyFilter,
splitter: splitterValue,
stats: statsValue,
workers: workersValue,
events: eventBus,
miners: make(map[int64]*Miner),
rateLimiter: NewRateLimiter(config.RateLimit),
done: make(chan struct{}),
}
proxyInstance.accessLogger = subscribeAccessLog(eventBus, config.AccessLogFile)
proxyInstance.shareLogger = subscribeShareLog(eventBus, config.ShareLogFile)
eventBus.Subscribe(EventLogin, func(event Event) {
if event.Miner != nil {
proxyInstance.minerMu.Lock()
proxyInstance.miners[event.Miner.ID()] = event.Miner
proxyInstance.minerMu.Unlock()
}
current := proxyInstance.currentMiners.Add(1)
for {
maximum := statsValue.maxMiners.Load()
if current <= maximum || statsValue.maxMiners.CompareAndSwap(maximum, current) {
break
}
}
})
eventBus.Subscribe(EventClose, func(event Event) {
if event.Miner != nil {
proxyInstance.minerMu.Lock()
delete(proxyInstance.miners, event.Miner.ID())
proxyInstance.minerMu.Unlock()
}
if proxyInstance.currentMiners.Load() > 0 {
proxyInstance.currentMiners.Add(^uint64(0))
}
})
eventBus.Subscribe(EventAccept, statsValue.OnAccept)
eventBus.Subscribe(EventReject, statsValue.OnReject)
if splitterValue != nil {
eventBus.Subscribe(EventSubmit, func(event Event) {
splitterValue.OnSubmit(&SubmitEvent{
Miner: event.Miner,
JobID: event.JobID,
Nonce: event.Nonce,
Result: event.Result,
Algo: event.Algo,
RequestID: event.RequestID,
})
})
eventBus.Subscribe(EventLogin, func(event Event) {
splitterValue.OnLogin(&LoginEvent{Miner: event.Miner})
})
eventBus.Subscribe(EventClose, func(event Event) {
splitterValue.OnClose(&CloseEvent{Miner: event.Miner})
})
}
if config.Watch && config.sourcePath != "" {
proxyInstance.watcher = newConfigWatcher(config.sourcePath, proxyInstance.Reload, config.Watch)
proxyInstance.watcher.Start()
}
return proxyInstance, nil
}
// Start connects the pool, opens listeners, and blocks until `Stop()`.
//
// p.Start()
func (p *Proxy) Start() {
if p.splitter != nil {
p.splitter.Connect()
}
p.ticker = time.NewTicker(time.Second)
for _, bind := range p.config.Bind {
var tlsConfig *tls.Config
if bind.TLS && p.config.TLS.Enabled {
certificate, errorValue := tls.LoadX509KeyPair(p.config.TLS.CertFile, p.config.TLS.KeyFile)
if errorValue == nil {
tlsConfig = buildTLSConfig(p.config.TLS)
tlsConfig.Certificates = []tls.Certificate{certificate}
} else {
p.Stop()
return
}
}
server, errorValue := NewServer(bind, tlsConfig, p.rateLimiter, p.acceptConn)
if errorValue != nil {
p.Stop()
return
}
p.servers = append(p.servers, server)
server.Start()
}
if p.config != nil && p.config.HTTP.Enabled {
startHTTPServer(p)
}
go func() {
var ticks uint64
for {
select {
case <-p.ticker.C:
ticks++
p.stats.Tick()
p.workers.Tick()
if p.rateLimiter != nil {
p.rateLimiter.Tick()
}
if p.splitter != nil {
p.splitter.Tick(ticks)
}
case <-p.done:
return
}
}
}()
<-p.done
}
type noopSplitter struct{}
func (noopSplitter) Connect() {}
func (noopSplitter) OnLogin(event *LoginEvent) {}
func (noopSplitter) OnSubmit(event *SubmitEvent) {}
func (noopSplitter) OnClose(event *CloseEvent) {}
func (noopSplitter) Tick(ticks uint64) {}
func (noopSplitter) GC() {}
func (noopSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
func (noopSplitter) PendingCount() int { return 0 }
func (noopSplitter) Disconnect() {}
func noopSplitterFactory(cfg *Config, events *EventBus) Splitter {
return noopSplitter{}
}
// Stop closes listeners, log files, watcher, miners, and pool connections.
//
// p.Stop()
func (p *Proxy) Stop() {
if p.ticker != nil {
p.ticker.Stop()
}
for _, server := range p.servers {
server.Stop()
}
stopHTTPServer(p)
if p.watcher != nil {
p.watcher.Stop()
}
if shutdown, ok := p.splitter.(splitterShutdown); ok {
deadline := time.Now().Add(5 * time.Second)
for shutdown.PendingCount() > 0 && time.Now().Before(deadline) {
time.Sleep(50 * time.Millisecond)
}
}
p.minerMu.RLock()
miners := make([]*Miner, 0, len(p.miners))
for _, miner := range p.miners {
miners = append(miners, miner)
}
p.minerMu.RUnlock()
for _, miner := range miners {
if miner != nil {
miner.Close()
}
}
if shutdown, ok := p.splitter.(splitterShutdown); ok {
shutdown.Disconnect()
}
if p.accessLogger != nil {
p.accessLogger.close()
}
if p.shareLogger != nil {
p.shareLogger.close()
}
select {
case <-p.done:
default:
close(p.done)
}
}
// Reload replaces the live config.
//
// p.Reload(newCfg)
func (p *Proxy) Reload(config *Config) {
if config != nil {
if p.config == nil {
p.config = config
} else {
sourcePath := p.config.sourcePath
bind := append([]BindAddr(nil), p.config.Bind...)
*p.config = *config
p.config.sourcePath = sourcePath
p.config.Bind = bind
}
if p.customDifficulty != nil {
p.customDifficulty.SetGlobalDiff(p.config.CustomDiff)
}
if p.workers != nil {
p.workers.SetCustomDiffStats(p.config.CustomDiffStats)
}
if p.rateLimiter != nil {
p.rateLimiter.SetConfig(p.config.RateLimit)
}
if p.accessLogger != nil {
p.accessLogger.setPath(p.config.AccessLogFile)
}
if p.shareLogger != nil {
p.shareLogger.setPath(p.config.ShareLogFile)
}
}
}
func (p *Proxy) Summary() StatsSummary {
if p == nil || p.stats == nil {
return StatsSummary{}
}
return p.stats.Summary()
}
func (p *Proxy) Workers() []WorkerRecord {
if p == nil || p.workers == nil {
return nil
}
return p.workers.List()
}
func (p *Proxy) Miners() []*Miner {
if p == nil {
return nil
}
p.minerMu.RLock()
defer p.minerMu.RUnlock()
miners := make([]*Miner, 0, len(p.miners))
for _, miner := range p.miners {
miners = append(miners, miner)
}
sort.Slice(miners, func(left int, right int) bool {
if miners[left] == nil {
return false
}
if miners[right] == nil {
return true
}
return miners[left].ID() < miners[right].ID()
})
return miners
}
func (p *Proxy) CurrentMiners() uint64 {
if p == nil {
return 0
}
return p.currentMiners.Load()
}
func (p *Proxy) MaxMiners() uint64 {
if p == nil || p.stats == nil {
return 0
}
return p.stats.maxMiners.Load()
}
func (p *Proxy) Mode() string {
if p == nil || p.config == nil {
return ""
}
return p.config.Mode
}
func (p *Proxy) HTTPConfig() HTTPConfig {
if p == nil || p.config == nil {
return HTTPConfig{}
}
return p.config.HTTP
}
func (p *Proxy) WorkersMode() string {
if p == nil || p.config == nil {
return ""
}
return string(p.config.Workers)
}
func (p *Proxy) Upstreams() UpstreamStats {
if p == nil || p.splitter == nil {
return UpstreamStats{}
}
return p.splitter.Upstreams()
}
func (p *Proxy) acceptConn(conn net.Conn, localPort uint16) {
if p != nil && p.stats != nil {
p.stats.connections.Add(1)
}
var tlsConfig *tls.Config
if _, ok := conn.(*tls.Conn); ok {
tlsConfig = &tls.Config{}
}
miner := NewMiner(conn, localPort, tlsConfig)
miner.events = p.events
miner.splitter = p.splitter
if p.config != nil {
miner.accessPassword = p.config.AccessPassword
miner.algoExtension = p.config.AlgoExtension
}
miner.Start()
}

View file

@ -1,134 +0,0 @@
package proxy
import (
"net"
"os"
"strings"
"testing"
)
func TestProxy_Reload_Good(t *testing.T) {
tempDir := t.TempDir()
cfg := &Config{
Mode: "nicehash",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
CustomDiff: 100,
Workers: WorkersDisabled,
AccessLogFile: tempDir + "/access-a.log",
ShareLogFile: tempDir + "/share-a.log",
}
proxyValue, errorValue := New(cfg)
if errorValue != nil {
t.Fatal(errorValue)
}
miner := &Miner{user: "wallet", agent: "agent", ip: "10.0.0.1"}
proxyValue.events.Dispatch(Event{Type: EventLogin, Miner: miner})
proxyValue.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 100, Latency: 10})
reloadCfg := &Config{
Mode: "simple",
Bind: []BindAddr{{Host: "0.0.0.0", Port: 4444}},
Pools: []PoolConfig{{URL: "pool-b:4444", Enabled: true}},
CustomDiff: 250,
Workers: WorkersByUser,
AccessLogFile: tempDir + "/access-b.log",
ShareLogFile: tempDir + "/share-b.log",
}
proxyValue.Reload(reloadCfg)
if len(proxyValue.config.Bind) != 1 || proxyValue.config.Bind[0].Port != 3333 {
t.Fatalf("expected bind addresses to remain unchanged, got %+v", proxyValue.config.Bind)
}
if len(proxyValue.config.Pools) != 1 || proxyValue.config.Pools[0].URL != "pool-b:4444" {
t.Fatalf("expected pools to reload, got %+v", proxyValue.config.Pools)
}
if proxyValue.config.CustomDiff != 250 {
t.Fatalf("expected custom diff to reload, got %d", proxyValue.config.CustomDiff)
}
if proxyValue.customDifficulty == nil || proxyValue.customDifficulty.globalDiff != 250 {
t.Fatalf("expected live custom diff to update, got %+v", proxyValue.customDifficulty)
}
proxyValue.events.Dispatch(Event{Type: EventLogin, Miner: miner})
proxyValue.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 250, Latency: 12})
oldAccessLog, errorValue := os.ReadFile(tempDir + "/access-a.log")
if errorValue != nil {
t.Fatal(errorValue)
}
newAccessLog, errorValue := os.ReadFile(tempDir + "/access-b.log")
if errorValue != nil {
t.Fatal(errorValue)
}
if strings.Count(string(oldAccessLog), "CONNECT") != 1 || strings.Count(string(newAccessLog), "CONNECT") != 1 {
t.Fatalf("expected access log writes to move across reload, got old=%q new=%q", oldAccessLog, newAccessLog)
}
oldShareLog, errorValue := os.ReadFile(tempDir + "/share-a.log")
if errorValue != nil {
t.Fatal(errorValue)
}
newShareLog, errorValue := os.ReadFile(tempDir + "/share-b.log")
if errorValue != nil {
t.Fatal(errorValue)
}
if strings.Count(string(oldShareLog), "ACCEPT") != 1 || strings.Count(string(newShareLog), "ACCEPT") != 1 {
t.Fatalf("expected share log writes to move across reload, got old=%q new=%q", oldShareLog, newShareLog)
}
}
func TestProxy_CurrentMiners_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
Workers: WorkersDisabled,
}
firstProxy, errorValue := New(cfg)
if errorValue != nil {
t.Fatal(errorValue)
}
secondProxy, errorValue := New(cfg)
if errorValue != nil {
t.Fatal(errorValue)
}
miner := &Miner{}
firstProxy.events.Dispatch(Event{Type: EventLogin, Miner: miner})
if got := firstProxy.CurrentMiners(); got != 1 {
t.Fatalf("expected first proxy miner count 1, got %d", got)
}
if got := secondProxy.CurrentMiners(); got != 0 {
t.Fatalf("expected second proxy miner count 0, got %d", got)
}
firstProxy.events.Dispatch(Event{Type: EventClose, Miner: miner})
if got := firstProxy.CurrentMiners(); got != 0 {
t.Fatalf("expected first proxy miner count to return to 0, got %d", got)
}
}
func TestProxy_AcceptConn_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a:3333", Enabled: true}},
Workers: WorkersDisabled,
}
proxyValue, errorValue := New(cfg)
if errorValue != nil {
t.Fatal(errorValue)
}
serverConn, clientConn := net.Pipe()
proxyValue.acceptConn(serverConn, 3333)
if got := proxyValue.stats.connections.Load(); got != 1 {
t.Fatalf("expected connection counter to increment on accept, got %d", got)
}
_ = clientConn.Close()
_ = serverConn.Close()
}

115
ratelimit_test.go Normal file
View file

@ -0,0 +1,115 @@
package proxy
import (
"testing"
"time"
)
// TestRateLimiter_Allow_Good verifies the first N calls within budget are allowed.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 10})
// limiter.Allow("1.2.3.4:3333") // true (first 10 calls)
func TestRateLimiter_Allow_Good(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60})
for i := 0; i < 10; i++ {
if !rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected call %d to be allowed", i+1)
}
}
}
// TestRateLimiter_Allow_Bad verifies the 11th call fails when budget is 10/min.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 10})
// // calls 1-10 pass, call 11 fails
func TestRateLimiter_Allow_Bad(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60})
for i := 0; i < 10; i++ {
rl.Allow("1.2.3.4:3333")
}
if rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected 11th call to be rejected")
}
}
// TestRateLimiter_Allow_Ugly verifies a banned IP stays banned for BanDurationSeconds.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 300})
// limiter.Allow("1.2.3.4:3333") // true (exhausts budget)
// limiter.Allow("1.2.3.4:3333") // false (banned for 300 seconds)
func TestRateLimiter_Allow_Ugly(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 300})
if !rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected first call to pass")
}
if rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected second call to fail")
}
// Verify the IP is still banned even with a fresh bucket
rl.mu.Lock()
rl.bucketByHost["1.2.3.4"] = &tokenBucket{tokens: 100, lastRefill: time.Now()}
rl.mu.Unlock()
if rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected banned IP to remain banned regardless of fresh bucket")
}
}
// TestRateLimiter_Tick_Good verifies Tick removes expired bans.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1})
// limiter.Tick()
func TestRateLimiter_Tick_Good(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 1})
rl.Allow("1.2.3.4:3333")
rl.Allow("1.2.3.4:3333") // triggers ban
// Simulate expired ban
rl.mu.Lock()
rl.banUntilByHost["1.2.3.4"] = time.Now().Add(-time.Second)
rl.mu.Unlock()
rl.Tick()
rl.mu.Lock()
_, banned := rl.banUntilByHost["1.2.3.4"]
rl.mu.Unlock()
if banned {
t.Fatalf("expected expired ban to be removed by Tick")
}
}
// TestRateLimiter_Allow_ReplenishesHighLimits verifies token replenishment at high rates.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 120})
func TestRateLimiter_Allow_ReplenishesHighLimits(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 120, BanDurationSeconds: 1})
rl.mu.Lock()
rl.bucketByHost["1.2.3.4"] = &tokenBucket{
tokens: 0,
lastRefill: time.Now().Add(-30 * time.Second),
}
rl.mu.Unlock()
if !rl.Allow("1.2.3.4:1234") {
t.Fatalf("expected bucket to replenish at 120/min")
}
}
// TestRateLimiter_Disabled_Good verifies a zero-budget limiter allows all connections.
//
// limiter := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 0})
// limiter.Allow("any-ip") // always true
func TestRateLimiter_Disabled_Good(t *testing.T) {
rl := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 0})
for i := 0; i < 100; i++ {
if !rl.Allow("1.2.3.4:3333") {
t.Fatalf("expected disabled limiter to allow all connections")
}
}
}

404
reload_test.go Normal file
View file

@ -0,0 +1,404 @@
package proxy
import (
"bufio"
"encoding/json"
"net"
"strings"
"testing"
"time"
)
type reloadableSplitter struct {
reloads int
}
func (s *reloadableSplitter) Connect() {}
func (s *reloadableSplitter) OnLogin(event *LoginEvent) {}
func (s *reloadableSplitter) OnSubmit(event *SubmitEvent) {}
func (s *reloadableSplitter) OnClose(event *CloseEvent) {}
func (s *reloadableSplitter) Tick(ticks uint64) {}
func (s *reloadableSplitter) GC() {}
func (s *reloadableSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
func (s *reloadableSplitter) ReloadPools() { s.reloads++ }
func TestProxy_Reload_Good(t *testing.T) {
original := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
}
p := &Proxy{
config: original,
customDiff: NewCustomDiff(1),
rateLimit: NewRateLimiter(RateLimit{}),
}
updated := &Config{
Mode: "simple",
Workers: WorkersByUser,
Bind: []BindAddr{{Host: "0.0.0.0", Port: 4444}},
Pools: []PoolConfig{{URL: "pool-b.example:4444", Enabled: true}},
CustomDiff: 50000,
AccessPassword: "secret",
CustomDiffStats: true,
AlgoExtension: true,
AccessLogFile: "/tmp/access.log",
ReuseTimeout: 30,
Retries: 5,
RetryPause: 2,
Watch: true,
RateLimit: RateLimit{MaxConnectionsPerMinute: 10, BanDurationSeconds: 60},
}
p.Reload(updated)
if p.config != original {
t.Fatalf("expected reload to preserve the existing config pointer")
}
if got := p.config.Bind[0]; got.Host != "127.0.0.1" || got.Port != 3333 {
t.Fatalf("expected bind addresses to remain unchanged, got %+v", got)
}
if p.config.Mode != "nicehash" {
t.Fatalf("expected mode to remain unchanged, got %q", p.config.Mode)
}
if p.config.Workers != WorkersByUser {
t.Fatalf("expected workers mode to reload, got %q", p.config.Workers)
}
if got := p.config.Pools[0].URL; got != "pool-b.example:4444" {
t.Fatalf("expected pools to reload, got %q", got)
}
if got := p.customDiff.globalDiff.Load(); got != 50000 {
t.Fatalf("expected custom diff to reload, got %d", got)
}
if !p.rateLimit.IsActive() {
t.Fatalf("expected rate limiter to be replaced with active configuration")
}
}
func TestProxy_Reload_WorkersMode_Good(t *testing.T) {
miner := &Miner{id: 7, user: "wallet-a", rigID: "rig-a", ip: "10.0.0.7"}
workers := NewWorkers(WorkersByRigID, nil)
workers.OnLogin(Event{Miner: miner})
p := &Proxy{
config: &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
},
workers: workers,
miners: map[int64]*Miner{miner.id: miner},
}
p.Reload(&Config{
Mode: "nicehash",
Workers: WorkersByUser,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
})
if got := p.WorkersMode(); got != WorkersByUser {
t.Fatalf("expected proxy workers mode %q, got %q", WorkersByUser, got)
}
records := p.WorkerRecords()
if len(records) != 1 {
t.Fatalf("expected one rebuilt worker record, got %d", len(records))
}
if got := records[0].Name; got != "wallet-a" {
t.Fatalf("expected worker record to rebuild using user mode, got %q", got)
}
}
func TestProxy_Reload_CustomDiff_Good(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := NewMiner(minerConn, 3333, nil)
miner.state = MinerStateReady
miner.globalDiff = 1000
miner.customDiff = 1000
miner.currentJob = Job{
Blob: strings.Repeat("0", 160),
JobID: "job-1",
Target: "01000000",
Algo: "cn/r",
}
p := &Proxy{
config: &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
CustomDiff: 1000,
},
customDiff: NewCustomDiff(1000),
miners: map[int64]*Miner{miner.ID(): miner},
}
done := make(chan map[string]any, 1)
go func() {
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
done <- nil
return
}
var payload map[string]any
if err := json.Unmarshal(line, &payload); err != nil {
done <- nil
return
}
done <- payload
}()
p.Reload(&Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
CustomDiff: 5000,
})
select {
case payload := <-done:
if payload == nil {
t.Fatal("expected reload to resend the current job with the new custom diff")
}
params, ok := payload["params"].(map[string]any)
if !ok {
t.Fatalf("expected job params payload, got %#v", payload["params"])
}
target, _ := params["target"].(string)
if got := (Job{Target: target}).DifficultyFromTarget(); got == 0 || got > 5000 {
t.Fatalf("expected resent job difficulty at or below 5000, got %d", got)
}
case <-time.After(time.Second):
t.Fatal("timed out waiting for reload job refresh")
}
if miner.customDiff != 5000 {
t.Fatalf("expected active miner custom diff to reload, got %d", miner.customDiff)
}
if miner.globalDiff != 5000 {
t.Fatalf("expected active miner global diff to reload, got %d", miner.globalDiff)
}
}
func TestProxy_Reload_CustomDiff_Bad(t *testing.T) {
miner := &Miner{
id: 9,
state: MinerStateReady,
globalDiff: 1000,
customDiff: 7000,
customDiffFromLogin: true,
currentJob: Job{
Blob: strings.Repeat("0", 160),
JobID: "job-1",
Target: "01000000",
},
}
p := &Proxy{
config: &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
CustomDiff: 1000,
},
customDiff: NewCustomDiff(1000),
miners: map[int64]*Miner{miner.ID(): miner},
}
p.Reload(&Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
CustomDiff: 5000,
})
if miner.customDiff != 7000 {
t.Fatalf("expected login suffix custom diff to be preserved, got %d", miner.customDiff)
}
if miner.globalDiff != 5000 {
t.Fatalf("expected miner global diff to update for future logins, got %d", miner.globalDiff)
}
}
func TestProxy_Reload_CustomDiff_Ugly(t *testing.T) {
miner := &Miner{
id: 11,
state: MinerStateWaitLogin,
globalDiff: 1000,
customDiff: 1000,
}
p := &Proxy{
config: &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
CustomDiff: 1000,
},
customDiff: NewCustomDiff(1000),
miners: map[int64]*Miner{miner.ID(): miner},
}
p.Reload(&Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
CustomDiff: 0,
})
if miner.customDiff != 0 {
t.Fatalf("expected reload to clear the global custom diff for unauthenticated miners, got %d", miner.customDiff)
}
if miner.globalDiff != 0 {
t.Fatalf("expected miner global diff to be cleared, got %d", miner.globalDiff)
}
}
func TestProxy_Reload_UpdatesServers(t *testing.T) {
originalLimiter := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1})
p := &Proxy{
config: &Config{Mode: "nicehash", Workers: WorkersByRigID},
rateLimit: originalLimiter,
servers: []*Server{
{limiter: originalLimiter},
},
}
p.Reload(&Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
RateLimit: RateLimit{MaxConnectionsPerMinute: 10},
AccessLogFile: "",
})
if got := p.servers[0].limiter; got != p.rateLimit {
t.Fatalf("expected server limiter to be updated")
}
if p.rateLimit == originalLimiter {
t.Fatalf("expected rate limiter instance to be replaced")
}
}
func TestProxy_Reload_WatchEnabled_Good(t *testing.T) {
p := &Proxy{
config: &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
configPath: "/tmp/proxy.json",
},
}
p.Reload(&Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 4444}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
Watch: true,
configPath: "/tmp/ignored.json",
})
if p.watcher == nil {
t.Fatalf("expected reload to create a watcher when watch is enabled")
}
if got := p.watcher.configPath; got != "/tmp/proxy.json" {
t.Fatalf("expected watcher to keep the original config path, got %q", got)
}
p.watcher.Stop()
}
func TestProxy_Reload_WatchDisabled_Bad(t *testing.T) {
watcher := NewConfigWatcher("/tmp/proxy.json", func(*Config) {})
p := &Proxy{
config: &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
configPath: "/tmp/proxy.json",
Watch: true,
},
watcher: watcher,
}
p.Reload(&Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
Watch: false,
configPath: "/tmp/ignored.json",
})
if p.watcher != nil {
t.Fatalf("expected reload to stop and clear the watcher when watch is disabled")
}
select {
case <-watcher.stopCh:
default:
t.Fatalf("expected existing watcher to be stopped")
}
}
func TestProxy_Reload_PoolsChanged_ReloadsSplitter_Good(t *testing.T) {
splitter := &reloadableSplitter{}
p := &Proxy{
config: &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
},
splitter: splitter,
}
p.Reload(&Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-b.example:3333", Enabled: true}},
})
if splitter.reloads != 1 {
t.Fatalf("expected pool reload to reconnect upstreams once, got %d", splitter.reloads)
}
}
func TestProxy_Reload_PoolsUnchanged_DoesNotReloadSplitter_Ugly(t *testing.T) {
splitter := &reloadableSplitter{}
p := &Proxy{
config: &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
},
splitter: splitter,
}
p.Reload(&Config{
Mode: "nicehash",
Workers: WorkersByRigID,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool-a.example:3333", Enabled: true}},
})
if splitter.reloads != 0 {
t.Fatalf("expected unchanged pool config to skip reconnect, got %d", splitter.reloads)
}
}

View file

@ -1,181 +0,0 @@
package proxy
import (
"strconv"
"strings"
"time"
)
// NewRateLimiter creates a per-IP limiter, for example:
//
// rl := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300})
func NewRateLimiter(config RateLimit) *RateLimiter {
return &RateLimiter{
config: config,
buckets: make(map[string]*tokenBucket),
banned: make(map[string]time.Time),
}
}
// SetConfig swaps in a live reload value such as:
//
// rl.SetConfig(proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300})
func (rateLimiter *RateLimiter) SetConfig(config RateLimit) {
if rateLimiter == nil {
return
}
rateLimiter.mu.Lock()
rateLimiter.config = config
rateLimiter.mu.Unlock()
}
// Allow returns true if the IP address is permitted to open a new connection. Thread-safe.
//
// if rl.Allow(conn.RemoteAddr().String()) { proceed() }
func (rateLimiter *RateLimiter) Allow(ip string) bool {
if rateLimiter == nil {
return true
}
host := remoteHost(ip)
now := time.Now().UTC()
rateLimiter.mu.Lock()
defer rateLimiter.mu.Unlock()
if rateLimiter.config.MaxConnectionsPerMinute <= 0 {
return true
}
if bannedUntil, exists := rateLimiter.banned[host]; exists {
if bannedUntil.After(now) {
return false
}
delete(rateLimiter.banned, host)
}
bucket, exists := rateLimiter.buckets[host]
if !exists {
bucket = &tokenBucket{
tokens: rateLimiter.config.MaxConnectionsPerMinute,
lastRefill: now,
}
rateLimiter.buckets[host] = bucket
}
rateLimiter.refillBucket(bucket, now)
if bucket.tokens <= 0 {
if rateLimiter.config.BanDurationSeconds > 0 {
rateLimiter.banned[host] = now.Add(time.Duration(rateLimiter.config.BanDurationSeconds) * time.Second)
}
return false
}
bucket.tokens--
return true
}
// Tick removes expired ban entries and refills all token buckets. Called every second.
//
// rl.Tick()
func (rateLimiter *RateLimiter) Tick() {
if rateLimiter == nil {
return
}
now := time.Now().UTC()
rateLimiter.mu.Lock()
defer rateLimiter.mu.Unlock()
if rateLimiter.config.MaxConnectionsPerMinute <= 0 {
return
}
for host, bannedUntil := range rateLimiter.banned {
if !bannedUntil.After(now) {
delete(rateLimiter.banned, host)
}
}
for _, bucket := range rateLimiter.buckets {
rateLimiter.refillBucket(bucket, now)
}
}
func (rateLimiter *RateLimiter) refillBucket(bucket *tokenBucket, now time.Time) {
if bucket == nil || rateLimiter.config.MaxConnectionsPerMinute <= 0 {
return
}
refillEvery := time.Minute / time.Duration(rateLimiter.config.MaxConnectionsPerMinute)
if refillEvery <= 0 {
refillEvery = time.Second
}
elapsed := now.Sub(bucket.lastRefill)
if elapsed < refillEvery {
return
}
tokensToAdd := int(elapsed / refillEvery)
bucket.tokens += tokensToAdd
if bucket.tokens > rateLimiter.config.MaxConnectionsPerMinute {
bucket.tokens = rateLimiter.config.MaxConnectionsPerMinute
}
bucket.lastRefill = bucket.lastRefill.Add(time.Duration(tokensToAdd) * refillEvery)
}
// NewCustomDiff stores the default custom difficulty override.
//
// cd := proxy.NewCustomDiff(50000)
func NewCustomDiff(globalDiff uint64) *CustomDiff {
return &CustomDiff{globalDiff: globalDiff}
}
// SetGlobalDiff updates the default custom difficulty override.
//
// cd.SetGlobalDiff(100000)
func (customDiff *CustomDiff) SetGlobalDiff(globalDiff uint64) {
if customDiff == nil {
return
}
customDiff.mu.Lock()
customDiff.globalDiff = globalDiff
customDiff.mu.Unlock()
}
// OnLogin parses `WALLET+50000` into `WALLET` and `50000`.
//
// cd.OnLogin(proxy.Event{Miner: miner})
func (customDiff *CustomDiff) OnLogin(event Event) {
if event.Miner == nil {
return
}
user := event.Miner.User()
index := strings.LastIndex(user, "+")
if index > 0 && index < len(user)-1 {
if value, errorValue := strconv.ParseUint(user[index+1:], 10, 64); errorValue == nil {
event.Miner.SetUser(user[:index])
event.Miner.SetCustomDiff(value)
return
}
}
if customDiff == nil {
event.Miner.SetCustomDiff(0)
return
}
customDiff.mu.RLock()
globalDiff := customDiff.globalDiff
customDiff.mu.RUnlock()
if globalDiff > 0 {
event.Miner.SetCustomDiff(globalDiff)
return
}
event.Miner.SetCustomDiff(0)
}

View file

@ -1,57 +0,0 @@
package proxy
import (
"testing"
"time"
)
func TestRateLimiter_Allow_Good(t *testing.T) {
limiter := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 2})
if !limiter.Allow("127.0.0.1:1234") {
t.Fatal("expected first connection to pass")
}
}
func TestRateLimiter_Allow_Bad(t *testing.T) {
limiter := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1, BanDurationSeconds: 60})
if !limiter.Allow("127.0.0.1:1234") {
t.Fatal("expected first connection to pass")
}
if limiter.Allow("127.0.0.1:1234") {
t.Fatal("expected second connection to be blocked")
}
}
func TestRateLimiter_Allow_Ugly(t *testing.T) {
limiter := NewRateLimiter(RateLimit{MaxConnectionsPerMinute: 1})
limiter.Allow("127.0.0.1:1234")
time.Sleep(time.Second)
limiter.Tick()
if limiter.Allow("127.0.0.1:1234") {
t.Fatal("expected bucket not to refill fully after one second at 1/minute")
}
}
func TestCustomDiff_OnLogin_Good(t *testing.T) {
miner := &Miner{user: "wallet+5000"}
NewCustomDiff(100).OnLogin(Event{Miner: miner})
if miner.User() != "wallet" || miner.CustomDiff() != 5000 {
t.Fatalf("expected parsed custom diff, got user=%q diff=%d", miner.User(), miner.CustomDiff())
}
}
func TestCustomDiff_OnLogin_Bad(t *testing.T) {
miner := &Miner{user: "wallet"}
NewCustomDiff(100).OnLogin(Event{Miner: miner})
if miner.CustomDiff() != 100 {
t.Fatalf("expected fallback diff 100, got %d", miner.CustomDiff())
}
}
func TestCustomDiff_OnLogin_Ugly(t *testing.T) {
miner := &Miner{user: "wallet+bad"}
NewCustomDiff(100).OnLogin(Event{Miner: miner})
if miner.User() != "wallet+bad" || miner.CustomDiff() != 100 {
t.Fatalf("expected invalid suffix to preserve user and fall back to global diff, got user=%q diff=%d", miner.User(), miner.CustomDiff())
}
}

View file

@ -7,11 +7,18 @@ import (
// Server listens on one BindAddr and creates a Miner for each accepted connection. // Server listens on one BindAddr and creates a Miner for each accepted connection.
// //
// srv, result := proxy.NewServer(bind, tlsCfg, rateLimiter, onAccept) // srv, result := proxy.NewServer(
// proxy.BindAddr{Host: "0.0.0.0", Port: 3333, TLS: false},
// nil,
// proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 30}),
// func(conn net.Conn, port uint16) { _ = conn; _ = port },
// )
// if result.OK {
// srv.Start() // srv.Start()
// }
type Server struct { type Server struct {
addr BindAddr addr BindAddr
tlsCfg *tls.Config // nil for plain TCP tlsConfig *tls.Config // nil for plain TCP
limiter *RateLimiter limiter *RateLimiter
onAccept func(net.Conn, uint16) onAccept func(net.Conn, uint16)
listener net.Listener listener net.Listener

View file

@ -1,84 +0,0 @@
package proxy
import (
"crypto/tls"
"errors"
"net"
"strconv"
)
// NewServer opens a listener and prepares the accept loop.
//
// srv, errorValue := proxy.NewServer(proxy.BindAddr{Host: "0.0.0.0", Port: 3333}, nil, rateLimiter, onAccept)
func NewServer(bindAddress BindAddr, tlsConfig *tls.Config, rateLimiter *RateLimiter, onAccept func(net.Conn, uint16)) (*Server, error) {
address := net.JoinHostPort(bindAddress.Host, strconv.Itoa(int(bindAddress.Port)))
listener, errorValue := net.Listen("tcp", address)
if errorValue != nil {
return nil, errorValue
}
return &Server{
addr: bindAddress,
tlsCfg: tlsConfig,
limiter: rateLimiter,
onAccept: onAccept,
listener: listener,
done: make(chan struct{}),
}, nil
}
// Start accepts miners in a background goroutine.
//
// srv.Start()
func (server *Server) Start() {
if server == nil || server.listener == nil {
return
}
go func() {
for {
conn, errorValue := server.listener.Accept()
if errorValue != nil {
select {
case <-server.done:
return
default:
continue
}
}
if server.limiter != nil && !server.limiter.Allow(conn.RemoteAddr().String()) {
_ = conn.Close()
continue
}
if server.tlsCfg != nil {
conn = tls.Server(conn, server.tlsCfg)
}
if server.onAccept == nil {
_ = conn.Close()
continue
}
server.onAccept(conn, server.addr.Port)
}
}()
}
// Stop closes the listener without forcing existing sockets shut.
//
// srv.Stop()
func (server *Server) Stop() {
if server == nil || server.listener == nil {
return
}
select {
case <-server.done:
default:
close(server.done)
}
_ = server.listener.Close()
}
var errServerClosed = errors.New("server closed")

95
sharelog_impl.go Normal file
View file

@ -0,0 +1,95 @@
package proxy
import (
"os"
"strings"
"sync"
"time"
)
type shareLogSink struct {
path string
file *os.File
mu sync.Mutex
}
func newShareLogSink(path string) *shareLogSink {
return &shareLogSink{path: path}
}
func (l *shareLogSink) SetPath(path string) {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.path == path {
return
}
l.path = path
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func (l *shareLogSink) Close() {
if l == nil {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.file != nil {
_ = l.file.Close()
l.file = nil
}
}
func (l *shareLogSink) OnAccept(e Event) {
if l == nil || e.Miner == nil {
return
}
l.writeLine("ACCEPT", e.Miner.User(), e.Diff, e.Latency, "")
}
func (l *shareLogSink) OnReject(e Event) {
if l == nil || e.Miner == nil {
return
}
l.writeLine("REJECT", e.Miner.User(), 0, 0, e.Error)
}
func (l *shareLogSink) writeLine(kind, user string, diff uint64, latency uint16, reason string) {
l.mu.Lock()
defer l.mu.Unlock()
if strings.TrimSpace(l.path) == "" {
return
}
if l.file == nil {
file, err := os.OpenFile(l.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return
}
l.file = file
}
var builder strings.Builder
builder.WriteString(time.Now().UTC().Format(time.RFC3339))
builder.WriteByte(' ')
builder.WriteString(kind)
builder.WriteString(" ")
builder.WriteString(user)
switch kind {
case "ACCEPT":
builder.WriteString(" diff=")
builder.WriteString(formatUint(diff))
builder.WriteString(" latency=")
builder.WriteString(formatUint(uint64(latency)))
builder.WriteString("ms")
case "REJECT":
builder.WriteString(" reason=\"")
builder.WriteString(reason)
builder.WriteString("\"")
}
builder.WriteByte('\n')
_, _ = l.file.WriteString(builder.String())
}

46
sharelog_test.go Normal file
View file

@ -0,0 +1,46 @@
package proxy
import (
"os"
"path/filepath"
"strings"
"testing"
)
func TestProxy_ShareLog_WritesOutcomeLines(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "shares.log")
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByRigID,
ShareLogFile: path,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
}
p, result := New(cfg)
if !result.OK {
t.Fatalf("expected valid proxy, got error: %v", result.Error)
}
miner := &Miner{
user: "WALLET",
conn: noopConn{},
state: MinerStateReady,
}
p.events.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 1234, Latency: 56})
p.events.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Invalid nonce"})
p.Stop()
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("read share log: %v", err)
}
text := string(data)
if !strings.Contains(text, "ACCEPT WALLET diff=1234 latency=56ms") {
t.Fatalf("expected ACCEPT line, got %q", text)
}
if !strings.Contains(text, "REJECT WALLET reason=\"Invalid nonce\"") {
t.Fatalf("expected REJECT line, got %q", text)
}
}

View file

@ -0,0 +1,92 @@
package nicehash
import (
"sync"
"testing"
"time"
)
type gcStrategy struct {
mu sync.Mutex
disconnected bool
active bool
}
func (s *gcStrategy) Connect() {}
func (s *gcStrategy) Submit(jobID, nonce, result, algo string) int64 {
return 0
}
func (s *gcStrategy) Disconnect() {
s.mu.Lock()
defer s.mu.Unlock()
s.disconnected = true
}
func (s *gcStrategy) IsActive() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.active
}
func TestNonceSplitter_GC_Good(t *testing.T) {
strategy := &gcStrategy{active: false}
mapper := &NonceMapper{
id: 42,
storage: NewNonceStorage(),
strategy: strategy,
lastUsed: time.Now().Add(-2 * time.Minute),
pending: make(map[int64]SubmitContext),
}
mapper.storage.slots[0] = -1
splitter := &NonceSplitter{
mappers: []*NonceMapper{mapper},
mapperByID: map[int64]*NonceMapper{mapper.id: mapper},
}
splitter.GC()
if len(splitter.mappers) != 0 {
t.Fatalf("expected idle mapper to be reclaimed, got %d mapper(s)", len(splitter.mappers))
}
if _, ok := splitter.mapperByID[mapper.id]; ok {
t.Fatalf("expected reclaimed mapper to be removed from lookup table")
}
if !strategy.disconnected {
t.Fatalf("expected reclaimed mapper strategy to be disconnected")
}
}
func TestNonceSplitter_GC_Bad(t *testing.T) {
var splitter *NonceSplitter
splitter.GC()
}
func TestNonceSplitter_GC_Ugly(t *testing.T) {
strategy := &gcStrategy{active: true}
mapper := &NonceMapper{
id: 99,
storage: NewNonceStorage(),
strategy: strategy,
lastUsed: time.Now().Add(-2 * time.Minute),
pending: make(map[int64]SubmitContext),
}
mapper.storage.slots[0] = 7
splitter := &NonceSplitter{
mappers: []*NonceMapper{mapper},
mapperByID: map[int64]*NonceMapper{mapper.id: mapper},
}
splitter.GC()
if len(splitter.mappers) != 1 {
t.Fatalf("expected active mapper to remain, got %d mapper(s)", len(splitter.mappers))
}
if strategy.disconnected {
t.Fatalf("expected active mapper to stay connected")
}
}

511
splitter/nicehash/impl.go Normal file
View file

@ -0,0 +1,511 @@
package nicehash
import (
"time"
"dappco.re/go/proxy"
"dappco.re/go/proxy/pool"
)
func init() {
proxy.RegisterSplitterFactory("nicehash", func(config *proxy.Config, eventBus *proxy.EventBus) proxy.Splitter {
return NewNonceSplitter(config, eventBus, pool.NewStrategyFactory(config))
})
}
// NewNonceSplitter creates a NiceHash splitter.
func NewNonceSplitter(config *proxy.Config, eventBus *proxy.EventBus, factory pool.StrategyFactory) *NonceSplitter {
if factory == nil {
factory = pool.NewStrategyFactory(config)
}
return &NonceSplitter{
mapperByID: make(map[int64]*NonceMapper),
config: config,
events: eventBus,
strategyFactory: factory,
}
}
// Connect establishes the first mapper.
func (s *NonceSplitter) Connect() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if len(s.mappers) == 0 {
s.addMapperLocked()
}
for _, mapper := range s.mappers {
mapper.Start()
}
}
// OnLogin assigns the miner to a mapper with a free slot.
func (s *NonceSplitter) OnLogin(event *proxy.LoginEvent) {
if s == nil || event == nil || event.Miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
event.Miner.SetExtendedNiceHash(true)
for _, mapper := range s.mappers {
if mapper.Add(event.Miner) {
s.mapperByID[mapper.id] = mapper
return
}
}
mapper := s.addMapperLocked()
if mapper != nil {
_ = mapper.Add(event.Miner)
s.mapperByID[mapper.id] = mapper
}
}
// OnSubmit forwards a share to the owning mapper.
func (s *NonceSplitter) OnSubmit(event *proxy.SubmitEvent) {
if s == nil || event == nil || event.Miner == nil {
return
}
s.mu.RLock()
mapper := s.mapperByID[event.Miner.MapperID()]
s.mu.RUnlock()
if mapper != nil {
mapper.Submit(event)
}
}
// OnClose releases the miner slot.
func (s *NonceSplitter) OnClose(event *proxy.CloseEvent) {
if s == nil || event == nil || event.Miner == nil {
return
}
s.mu.RLock()
mapper := s.mapperByID[event.Miner.MapperID()]
s.mu.RUnlock()
if mapper != nil {
mapper.Remove(event.Miner)
}
}
// GC removes empty mappers that have been idle.
func (s *NonceSplitter) GC() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
now := time.Now()
next := s.mappers[:0]
for _, mapper := range s.mappers {
if mapper == nil || mapper.storage == nil {
continue
}
free, dead, active := mapper.storage.SlotCount()
if active == 0 && now.Sub(mapper.lastUsed) > time.Minute {
if mapper.strategy != nil {
mapper.strategy.Disconnect()
}
delete(s.mapperByID, mapper.id)
_ = free
_ = dead
continue
}
next = append(next, mapper)
}
s.mappers = next
}
// Tick is called once per second.
func (s *NonceSplitter) Tick(ticks uint64) {
if s == nil {
return
}
strategies := make([]pool.Strategy, 0, len(s.mappers))
s.mu.RLock()
for _, mapper := range s.mappers {
if mapper == nil || mapper.strategy == nil {
continue
}
strategies = append(strategies, mapper.strategy)
}
s.mu.RUnlock()
for _, strategy := range strategies {
if ticker, ok := strategy.(interface{ Tick(uint64) }); ok {
ticker.Tick(ticks)
}
}
}
// Upstreams returns pool connection counts.
func (s *NonceSplitter) Upstreams() proxy.UpstreamStats {
if s == nil {
return proxy.UpstreamStats{}
}
s.mu.RLock()
defer s.mu.RUnlock()
var stats proxy.UpstreamStats
for _, mapper := range s.mappers {
if mapper.strategy != nil && mapper.strategy.IsActive() {
stats.Active++
} else if mapper.suspended > 0 || !mapper.active {
stats.Error++
}
}
stats.Total = stats.Active + stats.Sleep + stats.Error
return stats
}
// Disconnect closes all upstream pool connections and forgets the current mapper set.
func (s *NonceSplitter) Disconnect() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
for _, mapper := range s.mappers {
if mapper != nil && mapper.strategy != nil {
mapper.strategy.Disconnect()
}
}
s.mappers = nil
s.mapperByID = make(map[int64]*NonceMapper)
}
// ReloadPools reconnects each mapper strategy using the updated pool list.
//
// s.ReloadPools()
func (s *NonceSplitter) ReloadPools() {
if s == nil {
return
}
strategies := make([]pool.Strategy, 0, len(s.mappers))
s.mu.RLock()
for _, mapper := range s.mappers {
if mapper == nil || mapper.strategy == nil {
continue
}
strategies = append(strategies, mapper.strategy)
}
s.mu.RUnlock()
for _, strategy := range strategies {
if reloadable, ok := strategy.(pool.ReloadableStrategy); ok {
reloadable.ReloadPools()
}
}
}
func (s *NonceSplitter) addMapperLocked() *NonceMapper {
id := s.nextMapperID
s.nextMapperID++
mapper := NewNonceMapper(id, s.config, nil)
mapper.events = s.events
mapper.lastUsed = time.Now()
mapper.strategy = s.strategyFactory(mapper)
s.mappers = append(s.mappers, mapper)
if s.mapperByID == nil {
s.mapperByID = make(map[int64]*NonceMapper)
}
s.mapperByID[mapper.id] = mapper
mapper.Start()
return mapper
}
// NewNonceMapper creates a mapper for one upstream connection.
func NewNonceMapper(id int64, config *proxy.Config, strategy pool.Strategy) *NonceMapper {
return &NonceMapper{
id: id,
storage: NewNonceStorage(),
strategy: strategy,
pending: make(map[int64]SubmitContext),
config: config,
}
}
// Start connects the mapper's upstream strategy once.
func (m *NonceMapper) Start() {
if m == nil || m.strategy == nil {
return
}
m.startOnce.Do(func() {
m.lastUsed = time.Now()
m.strategy.Connect()
})
}
// Add assigns a miner to a free slot.
func (m *NonceMapper) Add(miner *proxy.Miner) bool {
if m == nil || miner == nil {
return false
}
m.mu.Lock()
defer m.mu.Unlock()
ok := m.storage.Add(miner)
if ok {
miner.SetMapperID(m.id)
miner.SetExtendedNiceHash(true)
m.lastUsed = time.Now()
m.storage.mu.Lock()
job := m.storage.job
m.storage.mu.Unlock()
if job.IsValid() {
miner.SetCurrentJob(job)
}
}
return ok
}
// Remove marks the miner slot as dead.
func (m *NonceMapper) Remove(miner *proxy.Miner) {
if m == nil || miner == nil {
return
}
m.mu.Lock()
defer m.mu.Unlock()
m.storage.Remove(miner)
miner.SetMapperID(-1)
m.lastUsed = time.Now()
}
// Submit forwards the share to the pool.
func (m *NonceMapper) Submit(event *proxy.SubmitEvent) {
if m == nil || event == nil || event.Miner == nil || m.strategy == nil {
return
}
m.mu.Lock()
defer m.mu.Unlock()
jobID := event.JobID
m.storage.mu.Lock()
job := m.storage.job
prevJob := m.storage.prevJob
m.storage.mu.Unlock()
if jobID == "" {
jobID = job.JobID
}
valid := m.storage.IsValidJobID(jobID)
if jobID == "" || !valid {
m.rejectInvalidJobLocked(event, job)
return
}
submissionJob := job
if jobID == prevJob.JobID && prevJob.JobID != "" {
submissionJob = prevJob
}
seq := m.strategy.Submit(jobID, event.Nonce, event.Result, event.Algo)
m.pending[seq] = SubmitContext{
RequestID: event.RequestID,
MinerID: event.Miner.ID(),
JobID: jobID,
Diff: proxy.EffectiveShareDifficulty(submissionJob, event.Miner),
StartedAt: time.Now(),
}
m.lastUsed = time.Now()
}
func (m *NonceMapper) rejectInvalidJobLocked(event *proxy.SubmitEvent, job proxy.Job) {
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
if m.events != nil {
jobCopy := job
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: event.Miner, Job: &jobCopy, Error: "Invalid job id"})
}
}
// IsActive reports whether the mapper has received a valid job.
func (m *NonceMapper) IsActive() bool {
if m == nil {
return false
}
m.mu.Lock()
defer m.mu.Unlock()
return m.active
}
// OnJob stores the current pool job and broadcasts it to active miners.
func (m *NonceMapper) OnJob(job proxy.Job) {
if m == nil || !job.IsValid() {
return
}
m.mu.Lock()
defer m.mu.Unlock()
m.storage.SetJob(job)
m.active = true
m.suspended = 0
m.lastUsed = time.Now()
}
// OnResultAccepted correlates a pool result back to the originating miner.
func (m *NonceMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
if m == nil {
return
}
m.mu.Lock()
ctx, ok := m.pending[sequence]
if ok {
delete(m.pending, sequence)
}
m.storage.mu.Lock()
miner := m.storage.miners[ctx.MinerID]
job := m.storage.job
prevJob := m.storage.prevJob
m.storage.mu.Unlock()
job, expired := resolveSubmissionJob(ctx.JobID, job, prevJob)
m.mu.Unlock()
if !ok || miner == nil {
return
}
latency := uint16(0)
if !ctx.StartedAt.IsZero() {
elapsed := time.Since(ctx.StartedAt).Milliseconds()
if elapsed > int64(^uint16(0)) {
latency = ^uint16(0)
} else {
latency = uint16(elapsed)
}
}
if accepted {
miner.Success(ctx.RequestID, "OK")
if m.events != nil {
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Job: &job, Diff: ctx.Diff, Latency: latency, Expired: expired})
}
return
}
miner.ReplyWithError(ctx.RequestID, errorMessage)
if m.events != nil {
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Job: &job, Diff: ctx.Diff, Error: errorMessage, Latency: latency})
}
}
func resolveSubmissionJob(jobID string, currentJob, previousJob proxy.Job) (proxy.Job, bool) {
if jobID != "" && jobID == previousJob.JobID && jobID != currentJob.JobID {
return previousJob, true
}
return currentJob, false
}
func (m *NonceMapper) OnDisconnect() {
if m == nil {
return
}
m.mu.Lock()
defer m.mu.Unlock()
m.active = false
m.suspended++
}
// NewNonceStorage creates a 256-slot table ready for round-robin miner allocation.
//
// storage := nicehash.NewNonceStorage()
func NewNonceStorage() *NonceStorage {
return &NonceStorage{miners: make(map[int64]*proxy.Miner)}
}
// Add assigns the next free slot, such as 0x2a, to one miner.
//
// ok := storage.Add(&proxy.Miner{})
func (s *NonceStorage) Add(miner *proxy.Miner) bool {
if s == nil || miner == nil {
return false
}
s.mu.Lock()
defer s.mu.Unlock()
for i := 0; i < 256; i++ {
index := (s.cursor + i) % 256
if s.slots[index] != 0 {
continue
}
s.slots[index] = miner.ID()
s.miners[miner.ID()] = miner
miner.SetFixedByte(uint8(index))
s.cursor = (index + 1) % 256
return true
}
return false
}
// Remove marks one miner's slot as dead until the next SetJob call.
//
// storage.Remove(miner)
func (s *NonceStorage) Remove(miner *proxy.Miner) {
if s == nil || miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
index := int(miner.FixedByte())
if index >= 0 && index < len(s.slots) && s.slots[index] == miner.ID() {
s.slots[index] = -miner.ID()
}
delete(s.miners, miner.ID())
}
// SetJob broadcasts one pool job to all active miners and clears dead slots.
//
// storage.SetJob(proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-1"})
func (s *NonceStorage) SetJob(job proxy.Job) {
if s == nil || !job.IsValid() {
return
}
s.mu.Lock()
s.prevJob = s.job
if s.prevJob.ClientID != job.ClientID {
s.prevJob = proxy.Job{}
}
s.job = job
for i := range s.slots {
if s.slots[i] < 0 {
s.slots[i] = 0
}
}
miners := make([]*proxy.Miner, 0, len(s.miners))
for _, miner := range s.miners {
miners = append(miners, miner)
}
s.mu.Unlock()
for _, miner := range miners {
miner.ForwardJob(job, job.Algo)
}
}
// IsValidJobID accepts the current job, or the immediately previous one after a pool roll.
//
// if !storage.IsValidJobID("job-1") { return }
func (s *NonceStorage) IsValidJobID(id string) bool {
if s == nil {
return false
}
s.mu.Lock()
defer s.mu.Unlock()
if id == "" {
return false
}
if id == s.job.JobID {
return true
}
if id == s.prevJob.JobID && s.prevJob.JobID != "" {
s.expired++
return true
}
return false
}
// SlotCount returns free, dead, and active slot counts such as 254, 1, 1.
//
// free, dead, active := storage.SlotCount()
func (s *NonceStorage) SlotCount() (free, dead, active int) {
if s == nil {
return 0, 0, 0
}
s.mu.Lock()
defer s.mu.Unlock()
for _, slot := range s.slots {
switch {
case slot == 0:
free++
case slot < 0:
dead++
default:
active++
}
}
return
}

View file

@ -4,8 +4,8 @@ import (
"sync" "sync"
"time" "time"
"dappco.re/go/core/proxy" "dappco.re/go/proxy"
"dappco.re/go/core/proxy/pool" "dappco.re/go/proxy/pool"
) )
// NonceMapper manages one outbound pool connection and the 256-slot NonceStorage. // NonceMapper manages one outbound pool connection and the 256-slot NonceStorage.
@ -17,12 +17,13 @@ type NonceMapper struct {
id int64 id int64
storage *NonceStorage storage *NonceStorage
strategy pool.Strategy // manages pool client lifecycle and failover strategy pool.Strategy // manages pool client lifecycle and failover
pending map[int64]SubmitContext // sequence → {requestID, minerID, jobID} pending map[int64]SubmitContext // sequence → {requestID, minerID}
config *proxy.Config config *proxy.Config
events *proxy.EventBus events *proxy.EventBus
active bool // true once pool has sent at least one job active bool // true once pool has sent at least one job
suspended int // > 0 when pool connection is in error/reconnecting suspended int // > 0 when pool connection is in error/reconnecting
idleAt time.Time lastUsed time.Time
startOnce sync.Once
mu sync.Mutex mu sync.Mutex
} }
@ -32,185 +33,7 @@ type NonceMapper struct {
type SubmitContext struct { type SubmitContext struct {
RequestID int64 // JSON-RPC id from the miner's submit request RequestID int64 // JSON-RPC id from the miner's submit request
MinerID int64 // miner that submitted MinerID int64 // miner that submitted
Job proxy.Job
JobID string JobID string
Expired bool Diff uint64
SubmittedAt time.Time StartedAt time.Time
}
// NewNonceMapper creates one upstream pool mapper and its local slot table.
//
// mapper := nicehash.NewNonceMapper(1, cfg, strategy)
func NewNonceMapper(id int64, cfg *proxy.Config, strategy pool.Strategy) *NonceMapper {
return &NonceMapper{
id: id,
storage: NewNonceStorage(),
strategy: strategy,
config: cfg,
pending: make(map[int64]SubmitContext),
}
}
func (m *NonceMapper) Add(miner *proxy.Miner) bool {
if !m.storage.Add(miner) {
return false
}
m.mu.Lock()
m.idleAt = time.Time{}
m.mu.Unlock()
return true
}
func (m *NonceMapper) Remove(miner *proxy.Miner) {
m.storage.Remove(miner)
_, _, active := m.storage.SlotCount()
if active == 0 {
m.mu.Lock()
if m.idleAt.IsZero() {
m.idleAt = time.Now().UTC()
}
m.mu.Unlock()
}
}
func (m *NonceMapper) Submit(event *proxy.SubmitEvent) {
if event == nil || event.Miner == nil || m.strategy == nil {
return
}
job, valid, expired := m.storage.JobForID(event.JobID)
if !valid {
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
return
}
sequence := m.strategy.Submit(event.JobID, event.Nonce, event.Result, event.Algo)
if sequence == 0 {
if event.Miner != nil {
event.Miner.ReplyWithError(event.RequestID, "Pool unavailable")
}
return
}
m.mu.Lock()
m.pending[sequence] = SubmitContext{
RequestID: event.RequestID,
MinerID: event.Miner.ID(),
Job: job,
JobID: event.JobID,
Expired: expired,
SubmittedAt: time.Now().UTC(),
}
m.mu.Unlock()
}
func (m *NonceMapper) IsActive() bool {
if m.strategy == nil {
return false
}
return m.strategy.IsActive()
}
func (m *NonceMapper) OnJob(job proxy.Job) {
if !job.IsValid() {
return
}
m.mu.Lock()
m.active = true
m.suspended = 0
m.idleAt = time.Time{}
m.mu.Unlock()
m.storage.SetJob(job)
}
func (m *NonceMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
m.mu.Lock()
context, exists := m.pending[sequence]
if exists {
delete(m.pending, sequence)
}
m.mu.Unlock()
if !exists {
return
}
miner := m.storage.Miners()[context.MinerID]
if miner == nil {
return
}
shareDifficulty := context.Job.DifficultyFromTarget()
if shareDifficulty == 0 {
shareDifficulty = miner.Diff()
}
eventType := proxy.EventReject
if accepted {
eventType = proxy.EventAccept
}
if m.events != nil {
latency := uint16(0)
if !context.SubmittedAt.IsZero() {
elapsed := time.Since(context.SubmittedAt).Milliseconds()
if elapsed > 0 {
if elapsed > int64(^uint16(0)) {
latency = ^uint16(0)
} else {
latency = uint16(elapsed)
}
}
}
m.events.Dispatch(proxy.Event{
Type: eventType,
Miner: miner,
Job: jobPointer(context.Job),
Diff: shareDifficulty,
Error: errorMessage,
Latency: latency,
Expired: context.Expired,
})
}
if accepted {
miner.Success(context.RequestID, "OK")
return
}
miner.ReplyWithError(context.RequestID, errorMessage)
}
func (m *NonceMapper) OnDisconnect() {
m.clearPending()
m.mu.Lock()
m.active = false
m.suspended++
m.mu.Unlock()
}
func (m *NonceMapper) IdleDuration(now time.Time) time.Duration {
m.mu.Lock()
idleAt := m.idleAt
m.mu.Unlock()
if idleAt.IsZero() {
return 0
}
return now.Sub(idleAt)
}
func (m *NonceMapper) clearPending() {
m.mu.Lock()
m.pending = make(map[int64]SubmitContext)
m.mu.Unlock()
}
func jobPointer(job proxy.Job) *proxy.Job {
if !job.IsValid() {
return nil
}
jobCopy := job
return &jobCopy
} }

View file

@ -0,0 +1,243 @@
package nicehash
import (
"bufio"
"encoding/json"
"net"
"sync"
"testing"
"time"
"dappco.re/go/proxy"
)
type startCountingStrategy struct {
mu sync.Mutex
connect int
}
func (s *startCountingStrategy) Connect() {
s.mu.Lock()
defer s.mu.Unlock()
s.connect++
}
func (s *startCountingStrategy) Submit(jobID, nonce, result, algo string) int64 {
return 0
}
func (s *startCountingStrategy) Disconnect() {}
func (s *startCountingStrategy) IsActive() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.connect > 0
}
type discardConn struct{}
func (discardConn) Read([]byte) (int, error) { return 0, nil }
func (discardConn) Write(p []byte) (int, error) { return len(p), nil }
func (discardConn) Close() error { return nil }
func (discardConn) LocalAddr() net.Addr { return nil }
func (discardConn) RemoteAddr() net.Addr { return nil }
func (discardConn) SetDeadline(time.Time) error { return nil }
func (discardConn) SetReadDeadline(time.Time) error { return nil }
func (discardConn) SetWriteDeadline(time.Time) error { return nil }
func TestMapper_Start_Good(t *testing.T) {
strategy := &startCountingStrategy{}
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
mapper.Start()
if strategy.connect != 1 {
t.Fatalf("expected one connect call, got %d", strategy.connect)
}
}
func TestMapper_Start_Bad(t *testing.T) {
mapper := NewNonceMapper(1, &proxy.Config{}, nil)
mapper.Start()
}
func TestMapper_Start_Ugly(t *testing.T) {
strategy := &startCountingStrategy{}
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
mapper.Start()
mapper.Start()
if strategy.connect != 1 {
t.Fatalf("expected Start to be idempotent, got %d connect calls", strategy.connect)
}
}
func TestMapper_Submit_InvalidJob_Good(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := proxy.NewMiner(minerConn, 3333, nil)
miner.SetID(7)
strategy := &startCountingStrategy{}
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
mapper.storage.job = proxy.Job{JobID: "job-1", Blob: "blob", Target: "b88d0600"}
done := make(chan struct{})
go func() {
mapper.Submit(&proxy.SubmitEvent{
Miner: miner,
JobID: "job-missing",
Nonce: "deadbeef",
Result: "hash",
RequestID: 42,
})
close(done)
}()
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read error reply: %v", err)
}
<-done
var payload struct {
ID float64 `json:"id"`
Error struct {
Message string `json:"message"`
} `json:"error"`
}
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal error reply: %v", err)
}
if payload.ID != 42 {
t.Fatalf("expected request id 42, got %v", payload.ID)
}
if payload.Error.Message != "Invalid job id" {
t.Fatalf("expected invalid job error, got %q", payload.Error.Message)
}
if len(mapper.pending) != 0 {
t.Fatalf("expected invalid submit not to create a pending entry")
}
}
func TestMapper_OnResultAccepted_ExpiredUsesPreviousJob(t *testing.T) {
bus := proxy.NewEventBus()
events := make(chan proxy.Event, 1)
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
events <- e
})
miner := proxy.NewMiner(discardConn{}, 3333, nil)
miner.SetID(7)
mapper := NewNonceMapper(1, &proxy.Config{}, &startCountingStrategy{})
mapper.events = bus
mapper.storage.job = proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"}
mapper.storage.prevJob = proxy.Job{JobID: "job-old", Blob: "blob-old", Target: "b88d0600"}
mapper.storage.miners[miner.ID()] = miner
if !mapper.storage.IsValidJobID("job-old") {
t.Fatal("expected previous job to validate before result handling")
}
mapper.pending[9] = SubmitContext{
RequestID: 42,
MinerID: miner.ID(),
JobID: "job-old",
StartedAt: time.Now(),
}
mapper.OnResultAccepted(9, true, "")
if got := mapper.storage.expired; got != 1 {
t.Fatalf("expected one expired validation, got %d", got)
}
select {
case event := <-events:
if !event.Expired {
t.Fatalf("expected expired share to be flagged")
}
if event.Job == nil || event.Job.JobID != "job-old" {
t.Fatalf("expected previous job to be attached, got %+v", event.Job)
}
case <-time.After(time.Second):
t.Fatal("expected accept event")
}
}
func TestMapper_Submit_ExpiredJobUsesPreviousDifficulty(t *testing.T) {
miner := proxy.NewMiner(discardConn{}, 3333, nil)
miner.SetID(9)
strategy := &submitCaptureStrategy{}
mapper := NewNonceMapper(1, &proxy.Config{}, strategy)
mapper.storage.job = proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "ffffffff"}
mapper.storage.prevJob = proxy.Job{JobID: "job-old", Blob: "blob-old", Target: "b88d0600"}
mapper.storage.miners[miner.ID()] = miner
mapper.Submit(&proxy.SubmitEvent{
Miner: miner,
JobID: "job-old",
Nonce: "deadbeef",
Result: "hash",
RequestID: 88,
})
ctx, ok := mapper.pending[strategy.seq]
if !ok {
t.Fatal("expected pending submit context for expired job")
}
want := mapper.storage.prevJob.DifficultyFromTarget()
if ctx.Diff != want {
t.Fatalf("expected previous-job difficulty %d, got %d", want, ctx.Diff)
}
}
type submitCaptureStrategy struct {
seq int64
}
func (s *submitCaptureStrategy) Connect() {}
func (s *submitCaptureStrategy) Submit(jobID, nonce, result, algo string) int64 {
s.seq++
return s.seq
}
func (s *submitCaptureStrategy) Disconnect() {}
func (s *submitCaptureStrategy) IsActive() bool { return true }
func TestMapper_OnResultAccepted_CustomDiffUsesEffectiveDifficulty(t *testing.T) {
bus := proxy.NewEventBus()
events := make(chan proxy.Event, 1)
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
events <- e
})
miner := proxy.NewMiner(discardConn{}, 3333, nil)
miner.SetID(8)
mapper := NewNonceMapper(1, &proxy.Config{}, &startCountingStrategy{})
mapper.events = bus
mapper.storage.job = proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"}
mapper.storage.miners[miner.ID()] = miner
mapper.pending[10] = SubmitContext{
RequestID: 77,
MinerID: miner.ID(),
JobID: "job-new",
Diff: 25000,
StartedAt: time.Now(),
}
mapper.OnResultAccepted(10, true, "")
select {
case event := <-events:
if event.Diff != 25000 {
t.Fatalf("expected effective difficulty 25000, got %d", event.Diff)
}
case <-time.After(time.Second):
t.Fatal("expected accept event")
}
}

View file

@ -1,12 +0,0 @@
package nicehash
import (
"dappco.re/go/core/proxy"
"dappco.re/go/core/proxy/pool"
)
func init() {
proxy.RegisterSplitterFactory("nicehash", func(cfg *proxy.Config, events *proxy.EventBus) proxy.Splitter {
return NewNonceSplitter(cfg, events, pool.NewStrategyFactory(cfg))
})
}

View file

@ -0,0 +1,67 @@
package nicehash
import (
"testing"
"dappco.re/go/proxy"
"dappco.re/go/proxy/pool"
)
type reloadableStrategy struct {
reloads int
}
func (s *reloadableStrategy) Connect() {}
func (s *reloadableStrategy) Submit(jobID, nonce, result, algo string) int64 { return 0 }
func (s *reloadableStrategy) Disconnect() {}
func (s *reloadableStrategy) IsActive() bool { return true }
func (s *reloadableStrategy) ReloadPools() { s.reloads++ }
var _ pool.ReloadableStrategy = (*reloadableStrategy)(nil)
func TestNonceSplitter_ReloadPools_Good(t *testing.T) {
strategy := &reloadableStrategy{}
splitter := &NonceSplitter{
mappers: []*NonceMapper{
{strategy: strategy},
},
}
splitter.ReloadPools()
if strategy.reloads != 1 {
t.Fatalf("expected mapper strategy to reload once, got %d", strategy.reloads)
}
}
func TestNonceSplitter_ReloadPools_Bad(t *testing.T) {
splitter := &NonceSplitter{
mappers: []*NonceMapper{
{strategy: nil},
},
}
splitter.ReloadPools()
}
func TestNonceSplitter_ReloadPools_Ugly(t *testing.T) {
splitter := NewNonceSplitter(&proxy.Config{}, proxy.NewEventBus(), func(listener pool.StratumListener) pool.Strategy {
return &reloadableStrategy{}
})
splitter.mappers = []*NonceMapper{
{strategy: &reloadableStrategy{}},
{strategy: &reloadableStrategy{}},
}
splitter.ReloadPools()
for index, mapper := range splitter.mappers {
strategy, ok := mapper.strategy.(*reloadableStrategy)
if !ok {
t.Fatalf("expected reloadable strategy at mapper %d", index)
}
if strategy.reloads != 1 {
t.Fatalf("expected mapper %d to reload once, got %d", index, strategy.reloads)
}
}
}

View file

@ -11,10 +11,9 @@ package nicehash
import ( import (
"sync" "sync"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/proxy"
"dappco.re/go/core/proxy/pool" "dappco.re/go/proxy/pool"
) )
// NonceSplitter is the Splitter implementation for NiceHash mode. // NonceSplitter is the Splitter implementation for NiceHash mode.
@ -24,193 +23,10 @@ import (
// s.Connect() // s.Connect()
type NonceSplitter struct { type NonceSplitter struct {
mappers []*NonceMapper mappers []*NonceMapper
cfg *proxy.Config mapperByID map[int64]*NonceMapper
config *proxy.Config
events *proxy.EventBus events *proxy.EventBus
strategyFactory pool.StrategyFactory strategyFactory pool.StrategyFactory
mu sync.RWMutex mu sync.RWMutex
} nextMapperID int64
// NewNonceSplitter creates the NiceHash splitter.
//
// s := nicehash.NewNonceSplitter(cfg, bus, factory)
func NewNonceSplitter(cfg *proxy.Config, events *proxy.EventBus, factory pool.StrategyFactory) *NonceSplitter {
return &NonceSplitter{
cfg: cfg,
events: events,
strategyFactory: factory,
mappers: make([]*NonceMapper, 0, 1),
}
}
func (s *NonceSplitter) Connect() {
s.mu.Lock()
defer s.mu.Unlock()
if len(s.mappers) > 0 {
return
}
mapper := s.newMapperLocked()
s.mappers = append(s.mappers, mapper)
mapper.strategy.Connect()
}
func (s *NonceSplitter) OnLogin(event *proxy.LoginEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
for _, mapper := range s.mappers {
if mapper.Add(event.Miner) {
mapper.events = s.events
event.Miner.SetMapperID(mapper.id)
event.Miner.SetNiceHashEnabled(true)
if currentJob := mapper.storage.CurrentJob(); currentJob != nil && currentJob.IsValid() {
event.Miner.PrimeJob(*currentJob)
}
return
}
}
mapper := s.newMapperLocked()
s.mappers = append(s.mappers, mapper)
mapper.strategy.Connect()
if mapper.Add(event.Miner) {
mapper.events = s.events
event.Miner.SetMapperID(mapper.id)
event.Miner.SetNiceHashEnabled(true)
if currentJob := mapper.storage.CurrentJob(); currentJob != nil && currentJob.IsValid() {
event.Miner.PrimeJob(*currentJob)
}
}
}
func (s *NonceSplitter) OnSubmit(event *proxy.SubmitEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.RLock()
defer s.mu.RUnlock()
for _, mapper := range s.mappers {
if mapper.id == event.Miner.MapperID() {
mapper.Submit(event)
return
}
}
}
func (s *NonceSplitter) OnClose(event *proxy.CloseEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.RLock()
defer s.mu.RUnlock()
for _, mapper := range s.mappers {
if mapper.id == event.Miner.MapperID() {
mapper.Remove(event.Miner)
return
}
}
}
func (s *NonceSplitter) Tick(ticks uint64) {
if ticks%60 == 0 {
s.GC()
}
}
func (s *NonceSplitter) GC() {
s.mu.Lock()
defer s.mu.Unlock()
now := time.Now().UTC()
filtered := s.mappers[:0]
for _, mapper := range s.mappers {
_, _, active := mapper.storage.SlotCount()
if active == 0 && mapper.IdleDuration(now) >= 60*time.Second {
if mapper.strategy != nil {
mapper.strategy.Disconnect()
}
continue
}
filtered = append(filtered, mapper)
}
s.mappers = filtered
}
func (s *NonceSplitter) Upstreams() proxy.UpstreamStats {
s.mu.RLock()
defer s.mu.RUnlock()
var stats proxy.UpstreamStats
for _, mapper := range s.mappers {
stats.Total++
switch {
case mapper.suspended > 0:
stats.Error++
case mapper.IsActive():
stats.Active++
default:
stats.Sleep++
}
}
return stats
}
func (s *NonceSplitter) PendingCount() int {
s.mu.RLock()
mappers := append([]*NonceMapper(nil), s.mappers...)
s.mu.RUnlock()
pending := 0
for _, mapper := range mappers {
if mapper == nil {
continue
}
mapper.mu.Lock()
pending += len(mapper.pending)
mapper.mu.Unlock()
}
return pending
}
func (s *NonceSplitter) Disconnect() {
s.mu.Lock()
mappers := s.mappers
s.mappers = nil
s.mu.Unlock()
for _, mapper := range mappers {
if mapper == nil {
continue
}
mapper.mu.Lock()
strategy := mapper.strategy
mapper.strategy = nil
mapper.active = false
mapper.suspended = 0
mapper.mu.Unlock()
if strategy != nil {
strategy.Disconnect()
}
}
}
func (s *NonceSplitter) newMapperLocked() *NonceMapper {
mapperID := int64(len(s.mappers) + 1)
mapper := NewNonceMapper(mapperID, s.cfg, nil)
mapper.events = s.events
var strategy pool.Strategy
if s.strategyFactory != nil {
strategy = s.strategyFactory(mapper)
}
mapper.strategy = strategy
return mapper
} }

View file

@ -3,7 +3,7 @@ package nicehash
import ( import (
"sync" "sync"
"dappco.re/go/core/proxy" "dappco.re/go/proxy"
) )
// NonceStorage is the 256-slot fixed-byte allocation table for one NonceMapper. // NonceStorage is the 256-slot fixed-byte allocation table for one NonceMapper.
@ -20,185 +20,7 @@ type NonceStorage struct {
miners map[int64]*proxy.Miner // minerID → Miner pointer for active miners miners map[int64]*proxy.Miner // minerID → Miner pointer for active miners
job proxy.Job // current job from pool job proxy.Job // current job from pool
prevJob proxy.Job // previous job (for stale submit validation) prevJob proxy.Job // previous job (for stale submit validation)
expired uint64
cursor int // search starts here (round-robin allocation) cursor int // search starts here (round-robin allocation)
expired uint64 // stale job ID hits for the previous job
mu sync.Mutex mu sync.Mutex
} }
// NewNonceStorage allocates the fixed-size miner slot table.
//
// storage := nicehash.NewNonceStorage()
func NewNonceStorage() *NonceStorage {
return &NonceStorage{
miners: make(map[int64]*proxy.Miner),
}
}
// Add finds the next free slot starting from cursor (wrapping), sets slot[index] = minerID,
// and sets the miner fixed byte.
//
// ok := storage.Add(miner)
func (s *NonceStorage) Add(miner *proxy.Miner) bool {
if miner == nil {
return false
}
s.mu.Lock()
defer s.mu.Unlock()
for offset := 0; offset < len(s.slots); offset++ {
index := (s.cursor + offset) % len(s.slots)
if s.slots[index] != 0 {
continue
}
s.slots[index] = miner.ID()
s.miners[miner.ID()] = miner
miner.SetFixedByte(uint8(index))
s.cursor = (index + 1) % len(s.slots)
return true
}
return false
}
// Remove marks slot[miner.FixedByte] as a dead slot until the next SetJob call.
//
// storage.Remove(miner)
func (s *NonceStorage) Remove(miner *proxy.Miner) {
if miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
index := int(miner.FixedByte())
if index >= 0 && index < len(s.slots) && s.slots[index] == miner.ID() {
s.slots[index] = -miner.ID()
}
delete(s.miners, miner.ID())
}
// SetJob replaces the current job, clears dead slots, and fans the job out to active miners.
//
// storage.SetJob(job)
func (s *NonceStorage) SetJob(job proxy.Job) {
s.mu.Lock()
if s.job.IsValid() && s.job.ClientID != "" && s.job.ClientID == job.ClientID {
s.prevJob = s.job
} else {
s.prevJob = proxy.Job{}
}
s.job = job
miners := make([]*proxy.Miner, 0, len(s.miners))
for index, minerID := range s.slots {
if minerID < 0 {
s.slots[index] = 0
continue
}
if minerID > 0 {
if miner := s.miners[minerID]; miner != nil {
miners = append(miners, miner)
}
}
}
s.mu.Unlock()
for _, miner := range miners {
miner.ForwardJob(job, job.Algo)
}
}
// IsValidJobID returns true if id matches the current or previous job ID.
//
// if !storage.IsValidJobID(submitJobID) { reject }
func (s *NonceStorage) IsValidJobID(id string) bool {
valid, _ := s.JobStatus(id)
return valid
}
// JobForID returns a copy of the current or previous job for the given ID.
//
// job, valid, expired := storage.JobForID(submitJobID)
func (s *NonceStorage) JobForID(id string) (job proxy.Job, valid bool, expired bool) {
s.mu.Lock()
defer s.mu.Unlock()
if id == "" {
return proxy.Job{}, false, false
}
if id == s.job.JobID {
return s.job, true, false
}
if s.prevJob.IsValid() && s.prevJob.ClientID != "" && id == s.prevJob.JobID {
s.expired++
return s.prevJob, true, true
}
return proxy.Job{}, false, false
}
// JobStatus returns whether the job ID is current or stale-but-still-acceptable.
//
// valid, expired := storage.JobStatus(submitJobID)
func (s *NonceStorage) JobStatus(id string) (valid bool, expired bool) {
_, valid, expired = s.JobForID(id)
return valid, expired
}
// SlotCount returns free, dead, and active slot counts for monitoring output.
//
// free, dead, active := storage.SlotCount()
func (s *NonceStorage) SlotCount() (free int, dead int, active int) {
s.mu.Lock()
defer s.mu.Unlock()
for _, slot := range s.slots {
switch {
case slot == 0:
free++
case slot < 0:
dead++
default:
active++
}
}
return free, dead, active
}
// ExpiredCount returns the number of times the previous job ID has been accepted as stale.
//
// count := storage.ExpiredCount()
func (s *NonceStorage) ExpiredCount() uint64 {
s.mu.Lock()
defer s.mu.Unlock()
return s.expired
}
// Miners returns a snapshot of the active miner map.
func (s *NonceStorage) Miners() map[int64]*proxy.Miner {
s.mu.Lock()
defer s.mu.Unlock()
miners := make(map[int64]*proxy.Miner, len(s.miners))
for minerID, miner := range s.miners {
miners[minerID] = miner
}
return miners
}
// CurrentJob returns a copy of the latest assigned job, if any.
func (s *NonceStorage) CurrentJob() *proxy.Job {
s.mu.Lock()
defer s.mu.Unlock()
if !s.job.IsValid() {
return nil
}
jobCopy := s.job
return &jobCopy
}

View file

@ -1,120 +1,160 @@
package nicehash package nicehash
import ( import (
"strings"
"testing" "testing"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/proxy"
) )
func TestNonceStorage_Add_Good(t *testing.T) { // TestStorage_Add_Good verifies 256 sequential Add calls fill all slots with unique FixedByte values.
//
// storage := nicehash.NewNonceStorage()
// for i := 0; i < 256; i++ {
// m := &proxy.Miner{}
// m.SetID(int64(i + 1))
// ok := storage.Add(m) // true for all 256
// }
func TestStorage_Add_Good(t *testing.T) {
storage := NewNonceStorage() storage := NewNonceStorage()
miner := proxy.NewMiner(nil, 0, nil) seen := make(map[uint8]bool)
miner.SetUser("wallet") for i := 0; i < 256; i++ {
m := &proxy.Miner{}
m.SetID(int64(i + 1))
ok := storage.Add(m)
if !ok {
t.Fatalf("expected add %d to succeed", i)
}
if seen[m.FixedByte()] {
t.Fatalf("duplicate fixed byte %d at add %d", m.FixedByte(), i)
}
seen[m.FixedByte()] = true
}
}
// TestStorage_Add_Bad verifies the 257th Add returns false when all 256 slots are occupied.
//
// storage := nicehash.NewNonceStorage()
// // fill 256 slots...
// ok := storage.Add(overflowMiner) // false — table is full
func TestStorage_Add_Bad(t *testing.T) {
storage := NewNonceStorage()
for i := 0; i < 256; i++ {
m := &proxy.Miner{}
m.SetID(int64(i + 1))
storage.Add(m)
}
overflow := &proxy.Miner{}
overflow.SetID(257)
if storage.Add(overflow) {
t.Fatalf("expected 257th add to fail when table is full")
}
}
// TestStorage_Add_Ugly verifies that a removed slot (dead) is reclaimed after SetJob clears it.
//
// storage := nicehash.NewNonceStorage()
// storage.Add(miner)
// storage.Remove(miner) // slot becomes dead (-minerID)
// storage.SetJob(job) // dead slots cleared to 0
// storage.Add(newMiner) // reclaimed slot succeeds
func TestStorage_Add_Ugly(t *testing.T) {
storage := NewNonceStorage()
miner := &proxy.Miner{}
miner.SetID(1)
if !storage.Add(miner) { if !storage.Add(miner) {
t.Fatal("expected slot allocation to succeed") t.Fatalf("expected first add to succeed")
}
storage.Remove(miner)
free, dead, active := storage.SlotCount()
if dead != 1 || active != 0 {
t.Fatalf("expected 1 dead slot, got free=%d dead=%d active=%d", free, dead, active)
}
// SetJob clears dead slots
storage.SetJob(proxy.Job{Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", JobID: "job-1"})
free, dead, active = storage.SlotCount()
if dead != 0 {
t.Fatalf("expected dead slots cleared after SetJob, got %d", dead)
}
// Reclaim the slot
newMiner := &proxy.Miner{}
newMiner.SetID(2)
if !storage.Add(newMiner) {
t.Fatalf("expected reclaimed slot add to succeed")
} }
} }
func TestNonceStorage_Add_Bad(t *testing.T) { // TestStorage_IsValidJobID_Good verifies the current job ID is accepted.
//
// storage := nicehash.NewNonceStorage()
// storage.SetJob(proxy.Job{JobID: "job-2", Blob: "..."})
// storage.IsValidJobID("job-2") // true
func TestStorage_IsValidJobID_Good(t *testing.T) {
storage := NewNonceStorage() storage := NewNonceStorage()
if storage.Add(nil) { storage.SetJob(proxy.Job{
t.Fatal("expected nil miner allocation to fail") JobID: "job-1",
} Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
}
func TestNonceStorage_Add_Ugly(t *testing.T) {
storage := NewNonceStorage()
for index := 0; index < 256; index++ {
miner := proxy.NewMiner(nil, 0, nil)
if !storage.Add(miner) {
t.Fatalf("expected miner %d to fit", index)
}
}
if storage.Add(proxy.NewMiner(nil, 0, nil)) {
t.Fatal("expected 257th miner to fail")
}
}
func TestNonceStorage_IsValidJobID_Good(t *testing.T) {
storage := NewNonceStorage()
storage.SetJob(proxy.Job{Blob: "abcd", JobID: "job-1"})
if !storage.IsValidJobID("job-1") {
t.Fatal("expected current job ID to be valid")
}
}
func TestNonceStorage_IsValidJobID_Bad(t *testing.T) {
storage := NewNonceStorage()
storage.SetJob(proxy.Job{Blob: "abcd", JobID: "job-1"})
if storage.IsValidJobID("job-2") {
t.Fatal("expected unknown job ID to be invalid")
}
}
func TestNonceStorage_IsValidJobID_Ugly(t *testing.T) {
storage := NewNonceStorage()
storage.SetJob(proxy.Job{Blob: "abcd", JobID: "job-1", ClientID: "pool-a"})
storage.SetJob(proxy.Job{Blob: "efgh", JobID: "job-2", ClientID: "pool-a"})
if !storage.IsValidJobID("job-1") {
t.Fatal("expected previous job ID from same client to remain valid")
}
if got := storage.ExpiredCount(); got != 1 {
t.Fatalf("expected stale job lookups to increment the expired counter, got %d", got)
}
}
func TestNonceStorage_IsValidJobID_BadClientID(t *testing.T) {
storage := NewNonceStorage()
storage.SetJob(proxy.Job{Blob: "abcd", JobID: "job-1", ClientID: "pool-a"})
storage.SetJob(proxy.Job{Blob: "efgh", JobID: "job-2", ClientID: "pool-b"})
if storage.IsValidJobID("job-1") {
t.Fatal("expected previous job ID from a different client to be invalid")
}
}
func TestNonceMapper_OnDisconnect_Ugly(t *testing.T) {
mapper := NewNonceMapper(1, &proxy.Config{}, nil)
mapper.pending[1] = SubmitContext{RequestID: 7}
mapper.OnDisconnect()
if len(mapper.pending) != 0 {
t.Fatalf("expected pending submits to be cleared, got %d", len(mapper.pending))
}
}
func TestNonceMapper_OnResultAccepted_Good(t *testing.T) {
bus := proxy.NewEventBus()
resultCh := make(chan proxy.Event, 1)
bus.Subscribe(proxy.EventAccept, func(event proxy.Event) {
resultCh <- event
}) })
miner := proxy.NewMiner(nil, 0, nil) if !storage.IsValidJobID("job-1") {
mapper := NewNonceMapper(1, &proxy.Config{}, nil) t.Fatalf("expected current job to be valid")
mapper.events = bus }
if !mapper.storage.Add(miner) { }
t.Fatal("expected miner slot allocation")
} // TestStorage_IsValidJobID_Bad verifies an unknown job ID is rejected.
mapper.storage.SetJob(proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-a", Target: "b88d0600"}) //
mapper.mu.Lock() // storage := nicehash.NewNonceStorage()
mapper.pending[1] = SubmitContext{ // storage.IsValidJobID("nonexistent") // false
RequestID: 7, func TestStorage_IsValidJobID_Bad(t *testing.T) {
MinerID: miner.ID(), storage := NewNonceStorage()
Job: proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-a", Target: "b88d0600"}, storage.SetJob(proxy.Job{
SubmittedAt: time.Now().UTC(), JobID: "job-1",
} Blob: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
mapper.mu.Unlock() })
mapper.storage.SetJob(proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-b", Target: "b88d0600"})
if storage.IsValidJobID("nonexistent") {
mapper.OnResultAccepted(1, true, "") t.Fatalf("expected unknown job id to be invalid")
}
select { if storage.IsValidJobID("") {
case event := <-resultCh: t.Fatalf("expected empty job id to be invalid")
if event.Job == nil || event.Job.JobID != "job-a" { }
t.Fatalf("expected submitted job to be reported, got %#v", event.Job) }
}
case <-time.After(time.Second): // TestStorage_IsValidJobID_Ugly verifies the previous job ID is accepted but counts as expired.
t.Fatal("expected accept event") //
// storage := nicehash.NewNonceStorage()
// // job-1 is current, job-2 pushes job-1 to previous
// storage.IsValidJobID("job-1") // true (but expired counter increments)
func TestStorage_IsValidJobID_Ugly(t *testing.T) {
storage := NewNonceStorage()
blob160 := "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
storage.SetJob(proxy.Job{JobID: "job-1", Blob: blob160, ClientID: "session-1"})
storage.SetJob(proxy.Job{JobID: "job-2", Blob: blob160, ClientID: "session-1"})
if !storage.IsValidJobID("job-2") {
t.Fatalf("expected current job to be valid")
}
if !storage.IsValidJobID("job-1") {
t.Fatalf("expected previous job to remain valid")
}
if storage.expired != 1 {
t.Fatalf("expected one expired job validation, got %d", storage.expired)
}
}
// TestStorage_SlotCount_Good verifies free/dead/active counts on a fresh storage.
//
// storage := nicehash.NewNonceStorage()
// free, dead, active := storage.SlotCount() // 256, 0, 0
func TestStorage_SlotCount_Good(t *testing.T) {
storage := NewNonceStorage()
free, dead, active := storage.SlotCount()
if free != 256 || dead != 0 || active != 0 {
t.Fatalf("expected 256/0/0, got free=%d dead=%d active=%d", free, dead, active)
} }
} }

View file

@ -0,0 +1,69 @@
package nicehash
import (
"testing"
"dappco.re/go/proxy"
)
type upstreamStateStrategy struct {
active bool
}
func (s *upstreamStateStrategy) Connect() {}
func (s *upstreamStateStrategy) Submit(jobID, nonce, result, algo string) int64 {
return 0
}
func (s *upstreamStateStrategy) Disconnect() {}
func (s *upstreamStateStrategy) IsActive() bool { return s.active }
func TestNonceSplitter_Upstreams_Good(t *testing.T) {
splitter := &NonceSplitter{
mappers: []*NonceMapper{
{strategy: &upstreamStateStrategy{active: true}, active: true},
{strategy: &upstreamStateStrategy{active: false}, active: false, suspended: 1},
},
}
stats := splitter.Upstreams()
if stats.Active != 1 {
t.Fatalf("expected one active upstream, got %d", stats.Active)
}
if stats.Error != 1 {
t.Fatalf("expected one error upstream, got %d", stats.Error)
}
if stats.Total != 2 {
t.Fatalf("expected total to equal active + sleep + error, got %d", stats.Total)
}
}
func TestNonceSplitter_Upstreams_Bad(t *testing.T) {
var splitter *NonceSplitter
stats := splitter.Upstreams()
if stats != (proxy.UpstreamStats{}) {
t.Fatalf("expected zero-value stats for nil splitter, got %+v", stats)
}
}
func TestNonceSplitter_Upstreams_Ugly(t *testing.T) {
splitter := &NonceSplitter{
mappers: []*NonceMapper{
{strategy: &upstreamStateStrategy{active: false}, active: false},
},
}
stats := splitter.Upstreams()
if stats.Error != 1 {
t.Fatalf("expected an unready mapper to be counted as error, got %+v", stats)
}
if stats.Total != 1 {
t.Fatalf("expected total to remain internally consistent, got %+v", stats)
}
}

383
splitter/simple/impl.go Normal file
View file

@ -0,0 +1,383 @@
package simple
import (
"time"
"dappco.re/go/proxy"
"dappco.re/go/proxy/pool"
)
func init() {
proxy.RegisterSplitterFactory("simple", func(config *proxy.Config, eventBus *proxy.EventBus) proxy.Splitter {
return NewSimpleSplitter(config, eventBus, pool.NewStrategyFactory(config))
})
}
// NewSimpleSplitter creates the passthrough splitter.
func NewSimpleSplitter(config *proxy.Config, eventBus *proxy.EventBus, factory pool.StrategyFactory) *SimpleSplitter {
if factory == nil {
factory = pool.NewStrategyFactory(config)
}
return &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: make(map[int64]*SimpleMapper),
config: config,
events: eventBus,
factory: factory,
}
}
// Connect establishes any mapper strategies that already exist.
func (s *SimpleSplitter) Connect() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
for _, mapper := range s.active {
if mapper.strategy != nil {
mapper.strategy.Connect()
}
}
for _, mapper := range s.idle {
if mapper.strategy != nil {
mapper.strategy.Connect()
}
}
}
// OnLogin creates or reclaims a mapper.
func (s *SimpleSplitter) OnLogin(event *proxy.LoginEvent) {
if s == nil || event == nil || event.Miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
now := time.Now()
if s.config.ReuseTimeout > 0 {
for id, mapper := range s.idle {
if mapper.strategy != nil && mapper.strategy.IsActive() && !mapper.idleAt.IsZero() && now.Sub(mapper.idleAt) <= time.Duration(s.config.ReuseTimeout)*time.Second {
delete(s.idle, id)
mapper.miner = event.Miner
mapper.idleAt = time.Time{}
mapper.stopped = false
s.active[event.Miner.ID()] = mapper
event.Miner.SetRouteID(mapper.id)
if mapper.currentJob.IsValid() {
event.Miner.SetCurrentJob(mapper.currentJob)
}
return
}
}
}
mapper := s.newMapperLocked()
mapper.miner = event.Miner
s.active[event.Miner.ID()] = mapper
event.Miner.SetRouteID(mapper.id)
if mapper.strategy != nil {
mapper.strategy.Connect()
}
}
// OnSubmit forwards the share to the owning mapper.
func (s *SimpleSplitter) OnSubmit(event *proxy.SubmitEvent) {
if s == nil || event == nil || event.Miner == nil {
return
}
s.mu.Lock()
mapper := s.activeMapperByRouteIDLocked(event.Miner.RouteID())
s.mu.Unlock()
if mapper != nil {
mapper.Submit(event)
}
}
// OnClose moves a mapper to the idle pool or stops it.
func (s *SimpleSplitter) OnClose(event *proxy.CloseEvent) {
if s == nil || event == nil || event.Miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
mapper := s.active[event.Miner.ID()]
if mapper == nil {
return
}
delete(s.active, event.Miner.ID())
mapper.miner = nil
mapper.idleAt = time.Now()
event.Miner.SetRouteID(-1)
if s.config.ReuseTimeout > 0 {
s.idle[mapper.id] = mapper
return
}
mapper.stopped = true
if mapper.strategy != nil {
mapper.strategy.Disconnect()
}
}
// GC removes expired idle mappers.
func (s *SimpleSplitter) GC() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
now := time.Now()
for id, mapper := range s.idle {
if mapper.stopped || (s.config.ReuseTimeout > 0 && now.Sub(mapper.idleAt) > time.Duration(s.config.ReuseTimeout)*time.Second) {
if mapper.strategy != nil {
mapper.strategy.Disconnect()
}
delete(s.idle, id)
}
}
}
// Tick advances timeout checks in simple mode.
func (s *SimpleSplitter) Tick(ticks uint64) {
if s == nil {
return
}
strategies := make([]pool.Strategy, 0, len(s.active)+len(s.idle))
s.mu.Lock()
for _, mapper := range s.active {
if mapper != nil && mapper.strategy != nil {
strategies = append(strategies, mapper.strategy)
}
}
for _, mapper := range s.idle {
if mapper != nil && mapper.strategy != nil {
strategies = append(strategies, mapper.strategy)
}
}
s.mu.Unlock()
for _, strategy := range strategies {
if ticker, ok := strategy.(interface{ Tick(uint64) }); ok {
ticker.Tick(ticks)
}
}
s.GC()
}
// Upstreams returns active/idle/error counts.
func (s *SimpleSplitter) Upstreams() proxy.UpstreamStats {
if s == nil {
return proxy.UpstreamStats{}
}
s.mu.Lock()
defer s.mu.Unlock()
var stats proxy.UpstreamStats
for _, mapper := range s.active {
if mapper == nil {
continue
}
if mapper.stopped || mapper.strategy == nil || !mapper.strategy.IsActive() {
stats.Error++
continue
}
stats.Active++
}
for _, mapper := range s.idle {
if mapper == nil {
continue
}
if mapper.stopped || mapper.strategy == nil || !mapper.strategy.IsActive() {
stats.Error++
continue
}
stats.Sleep++
}
stats.Total = stats.Active + stats.Sleep + stats.Error
return stats
}
// Disconnect closes every active or idle upstream connection and clears the mapper tables.
func (s *SimpleSplitter) Disconnect() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
for _, mapper := range s.active {
if mapper != nil && mapper.strategy != nil {
mapper.strategy.Disconnect()
}
}
for _, mapper := range s.idle {
if mapper != nil && mapper.strategy != nil {
mapper.strategy.Disconnect()
}
}
s.active = make(map[int64]*SimpleMapper)
s.idle = make(map[int64]*SimpleMapper)
}
// ReloadPools reconnects each active or idle mapper using the updated pool list.
//
// s.ReloadPools()
func (s *SimpleSplitter) ReloadPools() {
if s == nil {
return
}
strategies := make([]pool.Strategy, 0, len(s.active)+len(s.idle))
s.mu.Lock()
for _, mapper := range s.active {
if mapper == nil || mapper.strategy == nil {
continue
}
strategies = append(strategies, mapper.strategy)
}
for _, mapper := range s.idle {
if mapper == nil || mapper.strategy == nil {
continue
}
strategies = append(strategies, mapper.strategy)
}
s.mu.Unlock()
for _, strategy := range strategies {
if reloadable, ok := strategy.(pool.ReloadableStrategy); ok {
reloadable.ReloadPools()
}
}
}
func (s *SimpleSplitter) newMapperLocked() *SimpleMapper {
id := s.nextMapperID
s.nextMapperID++
mapper := NewSimpleMapper(id, nil)
mapper.events = s.events
mapper.strategy = s.factory(mapper)
if mapper.strategy == nil {
mapper.strategy = s.factory(mapper)
}
return mapper
}
func (s *SimpleSplitter) activeMapperByRouteIDLocked(routeID int64) *SimpleMapper {
if s == nil || routeID < 0 {
return nil
}
for _, mapper := range s.active {
if mapper != nil && mapper.id == routeID {
return mapper
}
}
return nil
}
// Submit forwards a share to the pool.
func (m *SimpleMapper) Submit(event *proxy.SubmitEvent) {
if m == nil || event == nil || m.strategy == nil {
return
}
m.mu.Lock()
defer m.mu.Unlock()
jobID := event.JobID
if jobID == "" {
jobID = m.currentJob.JobID
}
if jobID == "" || (jobID != m.currentJob.JobID && jobID != m.prevJob.JobID) {
m.rejectInvalidJobLocked(event, m.currentJob)
return
}
submissionJob := m.currentJob
if jobID == m.prevJob.JobID && m.prevJob.JobID != "" {
submissionJob = m.prevJob
}
seq := m.strategy.Submit(jobID, event.Nonce, event.Result, event.Algo)
m.pending[seq] = submitContext{
RequestID: event.RequestID,
Diff: proxy.EffectiveShareDifficulty(submissionJob, event.Miner),
StartedAt: time.Now(),
JobID: jobID,
}
}
func (m *SimpleMapper) rejectInvalidJobLocked(event *proxy.SubmitEvent, job proxy.Job) {
if event == nil || event.Miner == nil {
return
}
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
if m.events != nil {
jobCopy := job
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: event.Miner, Job: &jobCopy, Error: "Invalid job id"})
}
}
// OnJob forwards the latest pool job to the active miner.
func (m *SimpleMapper) OnJob(job proxy.Job) {
if m == nil {
return
}
m.mu.Lock()
m.prevJob = m.currentJob
if m.prevJob.ClientID != job.ClientID {
m.prevJob = proxy.Job{}
}
m.currentJob = job
m.stopped = false
m.idleAt = time.Time{}
miner := m.miner
m.mu.Unlock()
if miner == nil {
return
}
miner.ForwardJob(job, job.Algo)
}
// OnResultAccepted forwards result status to the miner.
func (m *SimpleMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
if m == nil {
return
}
m.mu.Lock()
ctx, ok := m.pending[sequence]
if ok {
delete(m.pending, sequence)
}
miner := m.miner
currentJob := m.currentJob
prevJob := m.prevJob
m.mu.Unlock()
if !ok || miner == nil {
return
}
latency := uint16(0)
if !ctx.StartedAt.IsZero() {
elapsed := time.Since(ctx.StartedAt).Milliseconds()
if elapsed > int64(^uint16(0)) {
latency = ^uint16(0)
} else {
latency = uint16(elapsed)
}
}
job := currentJob
expired := false
if ctx.JobID != "" && ctx.JobID == prevJob.JobID && ctx.JobID != currentJob.JobID {
job = prevJob
expired = true
}
if accepted {
miner.Success(ctx.RequestID, "OK")
if m.events != nil {
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Diff: ctx.Diff, Job: &job, Latency: latency, Expired: expired})
}
return
}
miner.ReplyWithError(ctx.RequestID, errorMessage)
if m.events != nil {
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Diff: ctx.Diff, Job: &job, Error: errorMessage, Latency: latency})
}
}
// OnDisconnect marks the mapper as disconnected.
func (m *SimpleMapper) OnDisconnect() {
if m == nil {
return
}
m.stopped = true
}

View file

@ -0,0 +1,377 @@
package simple
import (
"bufio"
"encoding/json"
"io"
"net"
"sync"
"testing"
"time"
"dappco.re/go/proxy"
"dappco.re/go/proxy/pool"
)
type activeStrategy struct{}
func (a activeStrategy) Connect() {}
func (a activeStrategy) Submit(string, string, string, string) int64 { return 0 }
func (a activeStrategy) Disconnect() {}
func (a activeStrategy) IsActive() bool { return true }
type submitRecordingStrategy struct {
submits int
}
func (s *submitRecordingStrategy) Connect() {}
func (s *submitRecordingStrategy) Submit(string, string, string, string) int64 {
s.submits++
return int64(s.submits)
}
func (s *submitRecordingStrategy) Disconnect() {}
func (s *submitRecordingStrategy) IsActive() bool { return true }
func TestSimpleMapper_New_Good(t *testing.T) {
strategy := activeStrategy{}
mapper := NewSimpleMapper(7, strategy)
if mapper == nil {
t.Fatal("expected mapper")
}
if mapper.id != 7 {
t.Fatalf("expected mapper id 7, got %d", mapper.id)
}
if mapper.strategy != strategy {
t.Fatalf("expected strategy to be stored")
}
if mapper.pending == nil {
t.Fatal("expected pending map to be initialised")
}
}
func TestSimpleSplitter_OnLogin_Good(t *testing.T) {
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
return activeStrategy{}
})
miner := &proxy.Miner{}
job := proxy.Job{JobID: "job-1", Blob: "blob"}
mapper := &SimpleMapper{
id: 7,
strategy: activeStrategy{},
currentJob: job,
idleAt: time.Now(),
}
splitter.idle[mapper.id] = mapper
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
if miner.RouteID() != mapper.id {
t.Fatalf("expected reclaimed mapper route id %d, got %d", mapper.id, miner.RouteID())
}
if got := miner.CurrentJob().JobID; got != job.JobID {
t.Fatalf("expected current job to be restored on reuse, got %q", got)
}
}
func TestSimpleSplitter_OnLogin_Ugly(t *testing.T) {
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
return activeStrategy{}
})
miner := &proxy.Miner{}
expired := &SimpleMapper{
id: 7,
strategy: activeStrategy{},
idleAt: time.Now().Add(-time.Minute),
}
splitter.idle[expired.id] = expired
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
if miner.RouteID() == expired.id {
t.Fatalf("expected expired mapper not to be reclaimed")
}
if miner.RouteID() != 0 {
t.Fatalf("expected a new mapper to be allocated, got route id %d", miner.RouteID())
}
if len(splitter.active) != 1 {
t.Fatalf("expected one active mapper, got %d", len(splitter.active))
}
if len(splitter.idle) != 1 {
t.Fatalf("expected expired mapper to remain idle until GC, got %d idle mappers", len(splitter.idle))
}
}
func TestSimpleSplitter_OnSubmit_UsesRouteID_Good(t *testing.T) {
strategy := &submitRecordingStrategy{}
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, nil)
miner := proxy.NewMiner(discardConn{}, 3333, nil)
miner.SetID(21)
miner.SetRouteID(7)
mapper := &SimpleMapper{
id: 7,
miner: miner,
currentJob: proxy.Job{JobID: "job-1", Blob: "blob", Target: "b88d0600"},
strategy: strategy,
pending: make(map[int64]submitContext),
}
splitter.active[99] = mapper
splitter.OnSubmit(&proxy.SubmitEvent{
Miner: miner,
JobID: "job-1",
Nonce: "deadbeef",
Result: "hash",
RequestID: 11,
})
if strategy.submits != 1 {
t.Fatalf("expected one submit routed by route id, got %d", strategy.submits)
}
if len(mapper.pending) != 1 {
t.Fatalf("expected routed submit to create one pending entry, got %d", len(mapper.pending))
}
}
func TestSimpleSplitter_Upstreams_Good(t *testing.T) {
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
return activeStrategy{}
})
splitter.active[1] = &SimpleMapper{id: 1, strategy: activeStrategy{}}
splitter.idle[2] = &SimpleMapper{id: 2, strategy: activeStrategy{}, idleAt: time.Now()}
stats := splitter.Upstreams()
if stats.Active != 1 {
t.Fatalf("expected one active upstream, got %d", stats.Active)
}
if stats.Sleep != 1 {
t.Fatalf("expected one sleeping upstream, got %d", stats.Sleep)
}
if stats.Error != 0 {
t.Fatalf("expected no error upstreams, got %d", stats.Error)
}
if stats.Total != 2 {
t.Fatalf("expected total upstreams to be 2, got %d", stats.Total)
}
}
func TestSimpleSplitter_Upstreams_Ugly(t *testing.T) {
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
return activeStrategy{}
})
splitter.active[1] = &SimpleMapper{id: 1, strategy: activeStrategy{}, stopped: true}
splitter.idle[2] = &SimpleMapper{id: 2, strategy: activeStrategy{}, stopped: true, idleAt: time.Now()}
stats := splitter.Upstreams()
if stats.Active != 0 {
t.Fatalf("expected no active upstreams, got %d", stats.Active)
}
if stats.Sleep != 0 {
t.Fatalf("expected no sleeping upstreams, got %d", stats.Sleep)
}
if stats.Error != 2 {
t.Fatalf("expected both upstreams to be counted as error, got %d", stats.Error)
}
if stats.Total != 2 {
t.Fatalf("expected total upstreams to be 2, got %d", stats.Total)
}
}
func TestSimpleSplitter_Upstreams_RecoveryResetsStopped_Good(t *testing.T) {
splitter := NewSimpleSplitter(&proxy.Config{ReuseTimeout: 30}, nil, func(listener pool.StratumListener) pool.Strategy {
return activeStrategy{}
})
mapper := &SimpleMapper{id: 1, strategy: activeStrategy{}, stopped: true}
splitter.active[1] = mapper
before := splitter.Upstreams()
if before.Error != 1 {
t.Fatalf("expected disconnected mapper to count as error, got %+v", before)
}
mapper.OnJob(proxy.Job{JobID: "job-1", Blob: "blob"})
after := splitter.Upstreams()
if after.Active != 1 {
t.Fatalf("expected recovered mapper to count as active, got %+v", after)
}
if after.Error != 0 {
t.Fatalf("expected recovered mapper not to remain in error, got %+v", after)
}
}
type discardConn struct{}
func (discardConn) Read([]byte) (int, error) { return 0, io.EOF }
func (discardConn) Write(p []byte) (int, error) { return len(p), nil }
func (discardConn) Close() error { return nil }
func (discardConn) LocalAddr() net.Addr { return nil }
func (discardConn) RemoteAddr() net.Addr { return nil }
func (discardConn) SetDeadline(time.Time) error { return nil }
func (discardConn) SetReadDeadline(time.Time) error { return nil }
func (discardConn) SetWriteDeadline(time.Time) error { return nil }
func TestSimpleMapper_OnResultAccepted_Expired(t *testing.T) {
bus := proxy.NewEventBus()
events := make(chan proxy.Event, 1)
var once sync.Once
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
once.Do(func() {
events <- e
})
})
miner := proxy.NewMiner(discardConn{}, 3333, nil)
miner.SetID(1)
mapper := &SimpleMapper{
miner: miner,
currentJob: proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"},
prevJob: proxy.Job{JobID: "job-old", Blob: "blob-old", Target: "b88d0600"},
events: bus,
pending: map[int64]submitContext{
7: {RequestID: 9, StartedAt: time.Now(), JobID: "job-old"},
},
}
mapper.OnResultAccepted(7, true, "")
select {
case event := <-events:
if !event.Expired {
t.Fatalf("expected expired share to be flagged")
}
if event.Job == nil || event.Job.JobID != "job-old" {
t.Fatalf("expected previous job to be attached, got %+v", event.Job)
}
case <-time.After(time.Second):
t.Fatal("expected accept event")
}
}
func TestSimpleMapper_OnResultAccepted_CustomDiffUsesEffectiveDifficulty(t *testing.T) {
bus := proxy.NewEventBus()
events := make(chan proxy.Event, 1)
var once sync.Once
bus.Subscribe(proxy.EventAccept, func(e proxy.Event) {
once.Do(func() {
events <- e
})
})
miner := proxy.NewMiner(discardConn{}, 3333, nil)
miner.SetID(2)
job := proxy.Job{JobID: "job-new", Blob: "blob-new", Target: "b88d0600"}
mapper := &SimpleMapper{
miner: miner,
currentJob: job,
events: bus,
pending: map[int64]submitContext{
8: {
RequestID: 10,
Diff: 25000,
StartedAt: time.Now(),
JobID: "job-new",
},
},
}
mapper.OnResultAccepted(8, true, "")
select {
case event := <-events:
if event.Diff != 25000 {
t.Fatalf("expected effective difficulty 25000, got %d", event.Diff)
}
case <-time.After(time.Second):
t.Fatal("expected accept event")
}
}
func TestSimpleMapper_OnJob_PreservesPreviousJobForSamePoolSession_Good(t *testing.T) {
mapper := &SimpleMapper{
currentJob: proxy.Job{JobID: "job-1", Blob: "blob-1", ClientID: "session-a"},
}
mapper.OnJob(proxy.Job{JobID: "job-2", Blob: "blob-2", ClientID: "session-a"})
if mapper.currentJob.JobID != "job-2" {
t.Fatalf("expected current job to roll forward, got %q", mapper.currentJob.JobID)
}
if mapper.prevJob.JobID != "job-1" {
t.Fatalf("expected previous job to remain available within one pool session, got %q", mapper.prevJob.JobID)
}
}
func TestSimpleMapper_OnJob_ResetsPreviousJobAcrossPoolSessions_Ugly(t *testing.T) {
mapper := &SimpleMapper{
currentJob: proxy.Job{JobID: "job-1", Blob: "blob-1", ClientID: "session-a"},
prevJob: proxy.Job{JobID: "job-0", Blob: "blob-0", ClientID: "session-a"},
}
mapper.OnJob(proxy.Job{JobID: "job-2", Blob: "blob-2", ClientID: "session-b"})
if mapper.currentJob.JobID != "job-2" {
t.Fatalf("expected current job to advance after session change, got %q", mapper.currentJob.JobID)
}
if mapper.prevJob.JobID != "" {
t.Fatalf("expected previous job history to reset on new pool session, got %q", mapper.prevJob.JobID)
}
}
func TestSimpleMapper_Submit_InvalidJob_Good(t *testing.T) {
minerConn, clientConn := net.Pipe()
defer minerConn.Close()
defer clientConn.Close()
miner := proxy.NewMiner(minerConn, 3333, nil)
mapper := &SimpleMapper{
miner: miner,
currentJob: proxy.Job{JobID: "job-1", Blob: "blob", Target: "b88d0600"},
prevJob: proxy.Job{JobID: "job-0", Blob: "blob", Target: "b88d0600"},
strategy: activeStrategy{},
pending: make(map[int64]submitContext),
}
done := make(chan struct{})
go func() {
mapper.Submit(&proxy.SubmitEvent{
Miner: miner,
JobID: "job-missing",
Nonce: "deadbeef",
Result: "hash",
RequestID: 9,
})
close(done)
}()
line, err := bufio.NewReader(clientConn).ReadBytes('\n')
if err != nil {
t.Fatalf("read error reply: %v", err)
}
<-done
var payload struct {
ID float64 `json:"id"`
Error struct {
Message string `json:"message"`
} `json:"error"`
}
if err := json.Unmarshal(line, &payload); err != nil {
t.Fatalf("unmarshal error reply: %v", err)
}
if payload.ID != 9 {
t.Fatalf("expected request id 9, got %v", payload.ID)
}
if payload.Error.Message != "Invalid job id" {
t.Fatalf("expected invalid job error, got %q", payload.Error.Message)
}
if len(mapper.pending) != 0 {
t.Fatalf("expected invalid submit not to create a pending entry")
}
}

View file

@ -4,8 +4,8 @@ import (
"sync" "sync"
"time" "time"
"dappco.re/go/core/proxy" "dappco.re/go/proxy"
"dappco.re/go/core/proxy/pool" "dappco.re/go/proxy/pool"
) )
// SimpleMapper holds one outbound pool connection and serves at most one active miner // SimpleMapper holds one outbound pool connection and serves at most one active miner
@ -16,136 +16,30 @@ import (
type SimpleMapper struct { type SimpleMapper struct {
id int64 id int64
miner *proxy.Miner // nil when idle miner *proxy.Miner // nil when idle
strategy pool.Strategy currentJob proxy.Job
events *proxy.EventBus
pending map[int64]simpleSubmitContext
job proxy.Job
prevJob proxy.Job prevJob proxy.Job
strategy pool.Strategy
idleAt time.Time // zero when active idleAt time.Time // zero when active
stopped bool stopped bool
events *proxy.EventBus
pending map[int64]submitContext
mu sync.Mutex mu sync.Mutex
} }
type simpleSubmitContext struct { type submitContext struct {
RequestID int64 RequestID int64
Job proxy.Job Diff uint64
Expired bool StartedAt time.Time
SubmittedAt time.Time JobID string
} }
// NewSimpleMapper stores the mapper ID and strategy. // NewSimpleMapper creates a passthrough mapper for one pool connection.
// //
// mapper := simple.NewSimpleMapper(1, strategy) // m := simple.NewSimpleMapper(7, strategy)
func NewSimpleMapper(id int64, strategy pool.Strategy) *SimpleMapper { func NewSimpleMapper(id int64, strategy pool.Strategy) *SimpleMapper {
return &SimpleMapper{id: id, strategy: strategy, pending: make(map[int64]simpleSubmitContext)} return &SimpleMapper{
} id: id,
strategy: strategy,
func (m *SimpleMapper) OnJob(job proxy.Job) { pending: make(map[int64]submitContext),
if !job.IsValid() {
return
}
m.mu.Lock()
if m.job.IsValid() && m.job.ClientID != "" && m.job.ClientID == job.ClientID {
m.prevJob = m.job
} else {
m.prevJob = proxy.Job{}
}
m.job = job
miner := m.miner
m.mu.Unlock()
if miner != nil {
miner.ForwardJob(job, job.Algo)
} }
} }
func (m *SimpleMapper) JobStatus(id string) (valid bool, expired bool) {
_, valid, expired = m.JobForID(id)
return valid, expired
}
func (m *SimpleMapper) JobForID(id string) (proxy.Job, bool, bool) {
m.mu.Lock()
defer m.mu.Unlock()
if id == "" {
return proxy.Job{}, false, false
}
if id == m.job.JobID {
return m.job, true, false
}
if m.prevJob.IsValid() && m.prevJob.ClientID != "" && id == m.prevJob.JobID {
return m.prevJob, true, true
}
return proxy.Job{}, false, false
}
func (m *SimpleMapper) OnResultAccepted(sequence int64, accepted bool, errorMessage string) {
m.mu.Lock()
context, exists := m.pending[sequence]
miner := m.miner
if !exists {
m.mu.Unlock()
return
}
delete(m.pending, sequence)
m.mu.Unlock()
if miner == nil {
return
}
shareDifficulty := context.Job.DifficultyFromTarget()
if shareDifficulty == 0 {
shareDifficulty = miner.Diff()
}
if accepted {
latency := shareLatency(context.SubmittedAt)
if m.events != nil {
m.events.Dispatch(proxy.Event{Type: proxy.EventAccept, Miner: miner, Job: jobPointer(context.Job), Diff: shareDifficulty, Latency: latency, Expired: context.Expired})
}
miner.Success(context.RequestID, "OK")
return
}
latency := shareLatency(context.SubmittedAt)
if m.events != nil {
m.events.Dispatch(proxy.Event{Type: proxy.EventReject, Miner: miner, Job: jobPointer(context.Job), Diff: shareDifficulty, Error: errorMessage, Latency: latency, Expired: context.Expired})
}
miner.ReplyWithError(context.RequestID, errorMessage)
}
func (m *SimpleMapper) OnDisconnect() {
m.clearPending()
m.stopped = true
}
func (m *SimpleMapper) clearPending() {
m.mu.Lock()
m.pending = make(map[int64]simpleSubmitContext)
m.mu.Unlock()
}
func jobPointer(job proxy.Job) *proxy.Job {
if !job.IsValid() {
return nil
}
jobCopy := job
return &jobCopy
}
func shareLatency(submittedAt time.Time) uint16 {
if submittedAt.IsZero() {
return 0
}
elapsed := time.Since(submittedAt).Milliseconds()
if elapsed <= 0 {
return 0
}
if elapsed > int64(^uint16(0)) {
return ^uint16(0)
}
return uint16(elapsed)
}

View file

@ -1,12 +0,0 @@
package simple
import (
"dappco.re/go/core/proxy"
"dappco.re/go/core/proxy/pool"
)
func init() {
proxy.RegisterSplitterFactory("simple", func(cfg *proxy.Config, events *proxy.EventBus) proxy.Splitter {
return NewSimpleSplitter(cfg, events, pool.NewStrategyFactory(cfg))
})
}

View file

@ -0,0 +1,68 @@
package simple
import (
"testing"
"dappco.re/go/proxy/pool"
)
type reloadableStrategy struct {
reloads int
}
func (s *reloadableStrategy) Connect() {}
func (s *reloadableStrategy) Submit(jobID, nonce, result, algo string) int64 { return 0 }
func (s *reloadableStrategy) Disconnect() {}
func (s *reloadableStrategy) IsActive() bool { return true }
func (s *reloadableStrategy) ReloadPools() { s.reloads++ }
var _ pool.ReloadableStrategy = (*reloadableStrategy)(nil)
func TestSimpleSplitter_ReloadPools_Good(t *testing.T) {
strategy := &reloadableStrategy{}
splitter := &SimpleSplitter{
active: map[int64]*SimpleMapper{
1: {strategy: strategy},
},
idle: map[int64]*SimpleMapper{},
}
splitter.ReloadPools()
if strategy.reloads != 1 {
t.Fatalf("expected active mapper strategy to reload once, got %d", strategy.reloads)
}
}
func TestSimpleSplitter_ReloadPools_Bad(t *testing.T) {
splitter := &SimpleSplitter{
active: map[int64]*SimpleMapper{
1: {strategy: nil},
},
idle: map[int64]*SimpleMapper{},
}
splitter.ReloadPools()
}
func TestSimpleSplitter_ReloadPools_Ugly(t *testing.T) {
active := &reloadableStrategy{}
idle := &reloadableStrategy{}
splitter := &SimpleSplitter{
active: map[int64]*SimpleMapper{
1: {strategy: active},
},
idle: map[int64]*SimpleMapper{
2: {strategy: idle},
},
}
splitter.ReloadPools()
if active.reloads != 1 {
t.Fatalf("expected active mapper reload, got %d", active.reloads)
}
if idle.reloads != 1 {
t.Fatalf("expected idle mapper reload, got %d", idle.reloads)
}
}

View file

@ -9,10 +9,9 @@ package simple
import ( import (
"sync" "sync"
"time"
"dappco.re/go/core/proxy" "dappco.re/go/proxy"
"dappco.re/go/core/proxy/pool" "dappco.re/go/proxy/pool"
) )
// SimpleSplitter is the Splitter implementation for simple (passthrough) mode. // SimpleSplitter is the Splitter implementation for simple (passthrough) mode.
@ -20,256 +19,10 @@ import (
// s := simple.NewSimpleSplitter(cfg, eventBus, strategyFactory) // s := simple.NewSimpleSplitter(cfg, eventBus, strategyFactory)
type SimpleSplitter struct { type SimpleSplitter struct {
active map[int64]*SimpleMapper // minerID → mapper active map[int64]*SimpleMapper // minerID → mapper
idle map[int64]*SimpleMapper // mapperID → mapper (reuse pool, keyed by mapper seq) idle map[int64]*SimpleMapper // mapperID → mapper (reuse pool, keyed by mapper ID)
config *proxy.Config config *proxy.Config
events *proxy.EventBus events *proxy.EventBus
strategyFactory pool.StrategyFactory factory pool.StrategyFactory
mu sync.Mutex mu sync.Mutex
mapperSequence int64 // monotonic mapper sequence counter nextMapperID int64 // monotonic mapper ID counter
}
// NewSimpleSplitter creates the passthrough splitter.
//
// s := simple.NewSimpleSplitter(cfg, bus, factory)
func NewSimpleSplitter(cfg *proxy.Config, events *proxy.EventBus, factory pool.StrategyFactory) *SimpleSplitter {
return &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: make(map[int64]*SimpleMapper),
config: cfg,
events: events,
strategyFactory: factory,
}
}
func (s *SimpleSplitter) Connect() {}
func (s *SimpleSplitter) OnLogin(event *proxy.LoginEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
timeout := time.Duration(0)
if s.config != nil && s.config.ReuseTimeout > 0 {
timeout = time.Duration(s.config.ReuseTimeout) * time.Second
}
var mapper *SimpleMapper
now := time.Now().UTC()
for mapperID, idleMapper := range s.idle {
if idleMapper == nil || idleMapper.stopped || idleMapper.strategy == nil || !idleMapper.strategy.IsActive() || (timeout > 0 && !idleMapper.idleAt.IsZero() && now.Sub(idleMapper.idleAt) > timeout) {
if idleMapper != nil && idleMapper.strategy != nil {
idleMapper.strategy.Disconnect()
}
delete(s.idle, mapperID)
continue
}
mapper = idleMapper
delete(s.idle, mapperID)
break
}
if mapper == nil {
s.mapperSequence++
var strategy pool.Strategy
mapper = NewSimpleMapper(s.mapperSequence, nil)
mapper.events = s.events
if s.strategyFactory != nil {
strategy = s.strategyFactory(mapper)
}
mapper.strategy = strategy
if mapper.strategy != nil {
mapper.strategy.Connect()
}
} else {
mapper.events = s.events
mapper.clearPending()
}
mapper.miner = event.Miner
mapper.idleAt = time.Time{}
event.Miner.SetRouteID(mapper.id)
s.active[event.Miner.ID()] = mapper
mapper.mu.Lock()
currentJob := mapper.job
mapper.mu.Unlock()
if currentJob.IsValid() {
event.Miner.PrimeJob(currentJob)
}
}
func (s *SimpleSplitter) OnSubmit(event *proxy.SubmitEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.Lock()
mapper := s.active[event.Miner.ID()]
s.mu.Unlock()
if mapper == nil || mapper.strategy == nil {
return
}
job, valid, expired := mapper.JobForID(event.JobID)
if !valid {
event.Miner.ReplyWithError(event.RequestID, "Invalid job id")
return
}
sequence := mapper.strategy.Submit(event.JobID, event.Nonce, event.Result, event.Algo)
if sequence == 0 {
event.Miner.ReplyWithError(event.RequestID, "Pool unavailable")
return
}
mapper.mu.Lock()
mapper.pending[sequence] = simpleSubmitContext{
RequestID: event.RequestID,
Job: job,
Expired: expired,
SubmittedAt: time.Now().UTC(),
}
mapper.mu.Unlock()
}
func (s *SimpleSplitter) OnClose(event *proxy.CloseEvent) {
if event == nil || event.Miner == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
mapper := s.active[event.Miner.ID()]
if mapper == nil {
return
}
delete(s.active, event.Miner.ID())
mapper.clearPending()
mapper.miner = nil
mapper.idleAt = time.Now().UTC()
if s.config != nil && s.config.ReuseTimeout > 0 {
s.idle[mapper.id] = mapper
return
}
mapper.stopped = true
if mapper.strategy != nil {
mapper.strategy.Disconnect()
}
}
func (s *SimpleSplitter) Tick(ticks uint64) {
if ticks%60 == 0 {
s.GC()
}
}
func (s *SimpleSplitter) GC() {
s.mu.Lock()
defer s.mu.Unlock()
timeout := time.Duration(0)
if s.config != nil && s.config.ReuseTimeout > 0 {
timeout = time.Duration(s.config.ReuseTimeout) * time.Second
}
now := time.Now().UTC()
for mapperID, mapper := range s.idle {
if mapper == nil {
delete(s.idle, mapperID)
continue
}
if mapper.stopped || mapper.strategy == nil || !mapper.strategy.IsActive() || timeout == 0 || (!mapper.idleAt.IsZero() && now.Sub(mapper.idleAt) > timeout) {
if mapper.strategy != nil {
mapper.strategy.Disconnect()
}
delete(s.idle, mapperID)
}
}
}
func (s *SimpleSplitter) Upstreams() proxy.UpstreamStats {
s.mu.Lock()
defer s.mu.Unlock()
stats := proxy.UpstreamStats{
Sleep: uint64(len(s.idle)),
}
for _, mapper := range s.active {
stats.Total++
if mapper.strategy != nil && mapper.strategy.IsActive() {
stats.Active++
} else {
stats.Error++
}
}
stats.Total += uint64(len(s.idle))
return stats
}
func (s *SimpleSplitter) PendingCount() int {
s.mu.Lock()
mapperList := make([]*SimpleMapper, 0, len(s.active)+len(s.idle))
for _, mapper := range s.active {
mapperList = append(mapperList, mapper)
}
for _, mapper := range s.idle {
mapperList = append(mapperList, mapper)
}
s.mu.Unlock()
pending := 0
for _, mapper := range mapperList {
if mapper == nil {
continue
}
mapper.mu.Lock()
pending += len(mapper.pending)
mapper.mu.Unlock()
}
return pending
}
func (s *SimpleSplitter) Disconnect() {
s.mu.Lock()
active := s.active
idle := s.idle
s.active = make(map[int64]*SimpleMapper)
s.idle = make(map[int64]*SimpleMapper)
s.mu.Unlock()
for _, mapper := range active {
if mapper == nil {
continue
}
mapper.mu.Lock()
mapper.stopped = true
strategy := mapper.strategy
mapper.strategy = nil
mapper.miner = nil
mapper.mu.Unlock()
if strategy != nil {
strategy.Disconnect()
}
}
for _, mapper := range idle {
if mapper == nil {
continue
}
mapper.mu.Lock()
mapper.stopped = true
strategy := mapper.strategy
mapper.strategy = nil
mapper.miner = nil
mapper.mu.Unlock()
if strategy != nil {
strategy.Disconnect()
}
}
} }

View file

@ -1,196 +0,0 @@
package simple
import (
"os"
"strings"
"testing"
"time"
"dappco.re/go/core/proxy"
"dappco.re/go/core/proxy/pool"
)
type fakeStrategy struct {
active bool
connects int
disconnects int
}
func (s *fakeStrategy) Connect() {}
func (s *fakeStrategy) Submit(jobID, nonce, result, algo string) int64 { return 1 }
func (s *fakeStrategy) Disconnect() {
s.disconnects++
s.active = false
}
func (s *fakeStrategy) IsActive() bool { return s.active }
func TestSimpleSplitter_OnLogin_Ugly(t *testing.T) {
deadStrategy := &fakeStrategy{active: false}
liveStrategy := &fakeStrategy{active: true}
splitter := &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: map[int64]*SimpleMapper{
1: {
id: 1,
strategy: deadStrategy,
idleAt: time.Now().UTC(),
},
},
config: &proxy.Config{ReuseTimeout: 60},
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
return liveStrategy
},
}
miner := &proxy.Miner{}
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
if len(splitter.idle) != 0 {
t.Fatalf("expected dead idle mapper to be discarded, got %d idle mappers", len(splitter.idle))
}
if len(splitter.active) != 1 {
t.Fatalf("expected one active mapper, got %d", len(splitter.active))
}
if deadStrategy.disconnects != 1 {
t.Fatalf("expected dead mapper to be disconnected once, got %d", deadStrategy.disconnects)
}
if miner.RouteID() == 0 {
t.Fatal("expected miner to receive a route ID")
}
}
func TestSimpleSplitter_OnLogin_Bad(t *testing.T) {
activeStrategy := &fakeStrategy{active: true}
splitter := &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: map[int64]*SimpleMapper{
1: {
id: 1,
strategy: activeStrategy,
idleAt: time.Now().UTC().Add(-2 * time.Minute),
},
},
config: &proxy.Config{ReuseTimeout: 60},
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
return activeStrategy
},
}
miner := &proxy.Miner{}
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
if len(splitter.idle) != 0 {
t.Fatalf("expected stale idle mapper to be discarded, got %d idle mappers", len(splitter.idle))
}
if len(splitter.active) != 1 {
t.Fatalf("expected one active mapper, got %d active mappers", len(splitter.active))
}
}
func TestSimpleSplitter_OnClose_Ugly(t *testing.T) {
activeStrategy := &fakeStrategy{active: true}
splitter := &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: make(map[int64]*SimpleMapper),
config: &proxy.Config{ReuseTimeout: 60},
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
return activeStrategy
},
}
miner := &proxy.Miner{}
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
mapper := splitter.active[miner.ID()]
if mapper == nil {
t.Fatal("expected active mapper")
}
mapper.pending[1] = simpleSubmitContext{RequestID: 42}
splitter.OnClose(&proxy.CloseEvent{Miner: miner})
if len(mapper.pending) != 0 {
t.Fatalf("expected pending submits to be cleared, got %d", len(mapper.pending))
}
if _, exists := splitter.idle[mapper.id]; !exists {
t.Fatal("expected mapper to move to idle pool")
}
}
func TestSimpleMapper_OnResultAccepted_Good(t *testing.T) {
bus := proxy.NewEventBus()
resultCh := make(chan proxy.Event, 1)
bus.Subscribe(proxy.EventAccept, func(event proxy.Event) {
resultCh <- event
})
mapper := &SimpleMapper{
miner: &proxy.Miner{},
events: bus,
pending: make(map[int64]simpleSubmitContext),
job: proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-b", Target: "b88d0600"},
prevJob: proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-a", Target: "b88d0600"},
}
mapper.pending[1] = simpleSubmitContext{
RequestID: 7,
Job: proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-a", Target: "b88d0600"},
SubmittedAt: time.Now().UTC(),
}
mapper.OnResultAccepted(1, true, "")
select {
case event := <-resultCh:
if event.Job == nil || event.Job.JobID != "job-a" {
t.Fatalf("expected submitted job to be reported, got %#v", event.Job)
}
case <-time.After(time.Second):
t.Fatal("expected accept event")
}
}
func TestSimpleMapper_JobForID_BadClientID(t *testing.T) {
mapper := &SimpleMapper{
pending: make(map[int64]simpleSubmitContext),
}
mapper.OnJob(proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-a", ClientID: "pool-a"})
mapper.OnJob(proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-b", ClientID: "pool-b"})
if valid, expired := mapper.JobStatus("job-a"); valid || expired {
t.Fatalf("expected stale job from a different client to be invalid, got valid=%t expired=%t", valid, expired)
}
}
func TestConfigWatcher_Start_Ugly(t *testing.T) {
path := t.TempDir() + "/config.json"
errorValue := os.WriteFile(path, []byte(`{"mode":"simple","workers":"rig-id","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-a:3333","enabled":true}]}`), 0o644)
if errorValue != nil {
t.Fatal(errorValue)
}
watcherTriggered := make(chan struct{}, 1)
watcher := proxy.NewConfigWatcher(path, func(cfg *proxy.Config) {
watcherTriggered <- struct{}{}
})
watcher.Start()
defer watcher.Stop()
select {
case <-watcherTriggered:
t.Fatal("expected watcher to stay quiet until the file changes")
case <-time.After(1200 * time.Millisecond):
}
if errorValue = os.WriteFile(path, []byte(`{"mode":"simple","workers":"rig-id","bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-b:3333","enabled":true}]}`), 0o644); errorValue != nil {
t.Fatal(errorValue)
}
select {
case <-watcherTriggered:
case <-time.After(2 * time.Second):
t.Fatal("expected watcher to observe the modification")
}
}

2007
state_impl.go Normal file

File diff suppressed because it is too large Load diff

137
state_stop_test.go Normal file
View file

@ -0,0 +1,137 @@
package proxy
import (
"net"
"testing"
"time"
)
func TestProxy_Stop_Good(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer serverConn.Close()
miner := NewMiner(clientConn, 3333, nil)
splitter := &stubSplitter{}
proxyInstance := &Proxy{
done: make(chan struct{}),
miners: map[int64]*Miner{miner.ID(): miner},
splitter: splitter,
}
done := make(chan error, 1)
go func() {
buf := make([]byte, 1)
_, err := serverConn.Read(buf)
done <- err
}()
time.Sleep(10 * time.Millisecond)
proxyInstance.Stop()
select {
case err := <-done:
if err == nil {
t.Fatalf("expected miner connection to close during Stop")
}
case <-time.After(time.Second):
t.Fatalf("expected miner connection to close during Stop")
}
if !splitter.disconnected {
t.Fatalf("expected splitter to be disconnected during Stop")
}
}
func TestProxy_Stop_Bad(t *testing.T) {
var proxyInstance *Proxy
proxyInstance.Stop()
}
func TestProxy_Stop_Ugly(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer serverConn.Close()
miner := NewMiner(clientConn, 3333, nil)
proxyInstance := &Proxy{
done: make(chan struct{}),
miners: map[int64]*Miner{miner.ID(): miner},
}
proxyInstance.Stop()
proxyInstance.Stop()
buf := make([]byte, 1)
if _, err := serverConn.Read(buf); err == nil {
t.Fatalf("expected closed connection after repeated Stop calls")
}
}
func TestProxy_Stop_WaitsBeforeDisconnectingSubmitPaths(t *testing.T) {
serverConn, clientConn := net.Pipe()
defer serverConn.Close()
miner := NewMiner(clientConn, 3333, nil)
splitter := &blockingStopSplitter{disconnectedCh: make(chan struct{})}
proxyInstance := &Proxy{
done: make(chan struct{}),
miners: map[int64]*Miner{miner.ID(): miner},
splitter: splitter,
}
proxyInstance.submitCount.Store(1)
stopped := make(chan struct{})
go func() {
proxyInstance.Stop()
close(stopped)
}()
select {
case <-splitter.disconnectedCh:
t.Fatalf("expected splitter disconnect to wait for submit drain")
case <-stopped:
t.Fatalf("expected Stop to keep waiting while submits are in flight")
case <-time.After(50 * time.Millisecond):
}
proxyInstance.submitCount.Store(0)
select {
case <-splitter.disconnectedCh:
case <-time.After(time.Second):
t.Fatalf("expected splitter disconnect after submit drain")
}
select {
case <-stopped:
case <-time.After(time.Second):
t.Fatalf("expected Stop to finish after submit drain")
}
}
type stubSplitter struct {
disconnected bool
}
func (s *stubSplitter) Connect() {}
func (s *stubSplitter) OnLogin(event *LoginEvent) {}
func (s *stubSplitter) OnSubmit(event *SubmitEvent) {}
func (s *stubSplitter) OnClose(event *CloseEvent) {}
func (s *stubSplitter) Tick(ticks uint64) {}
func (s *stubSplitter) GC() {}
func (s *stubSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
func (s *stubSplitter) Disconnect() { s.disconnected = true }
type blockingStopSplitter struct {
disconnectedCh chan struct{}
}
func (s *blockingStopSplitter) Connect() {}
func (s *blockingStopSplitter) OnLogin(event *LoginEvent) {}
func (s *blockingStopSplitter) OnSubmit(event *SubmitEvent) {}
func (s *blockingStopSplitter) OnClose(event *CloseEvent) {}
func (s *blockingStopSplitter) Tick(ticks uint64) {}
func (s *blockingStopSplitter) GC() {}
func (s *blockingStopSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
func (s *blockingStopSplitter) Disconnect() {
close(s.disconnectedCh)
}

33
state_submit_test.go Normal file
View file

@ -0,0 +1,33 @@
package proxy
import (
"testing"
"time"
)
func TestProxy_Stop_WaitsForSubmitDrain(t *testing.T) {
p := &Proxy{
done: make(chan struct{}),
}
p.submitCount.Store(1)
stopped := make(chan struct{})
go func() {
p.Stop()
close(stopped)
}()
select {
case <-stopped:
t.Fatalf("expected Stop to wait for pending submits")
case <-time.After(50 * time.Millisecond):
}
p.submitCount.Store(0)
select {
case <-stopped:
case <-time.After(time.Second):
t.Fatalf("expected Stop to finish after pending submits drain")
}
}

180
stats.go
View file

@ -1,8 +1,6 @@
package proxy package proxy
import ( import (
"slices"
"sort"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -11,9 +9,11 @@ import (
// Stats tracks global proxy metrics. Hot-path counters are atomic. Hashrate windows // Stats tracks global proxy metrics. Hot-path counters are atomic. Hashrate windows
// use a ring buffer per window size, advanced by Tick(). // use a ring buffer per window size, advanced by Tick().
// //
// s := proxy.NewStats() // stats := proxy.NewStats()
// bus.Subscribe(proxy.EventAccept, s.OnAccept) // bus.Subscribe(proxy.EventAccept, stats.OnAccept)
// bus.Subscribe(proxy.EventReject, s.OnReject) // bus.Subscribe(proxy.EventReject, stats.OnReject)
// stats.Tick()
// summary := stats.Summary()
type Stats struct { type Stats struct {
accepted atomic.Uint64 accepted atomic.Uint64
rejected atomic.Uint64 rejected atomic.Uint64
@ -21,15 +21,15 @@ type Stats struct {
expired atomic.Uint64 expired atomic.Uint64
hashes atomic.Uint64 // cumulative sum of accepted share difficulties hashes atomic.Uint64 // cumulative sum of accepted share difficulties
connections atomic.Uint64 // total TCP connections accepted (ever) connections atomic.Uint64 // total TCP connections accepted (ever)
miners atomic.Uint64 // current connected miners
maxMiners atomic.Uint64 // peak concurrent miner count maxMiners atomic.Uint64 // peak concurrent miner count
topDifficulties [10]uint64 // top-10 accepted difficulties, sorted descending; guarded by mu topDiff [10]uint64 // top-10 accepted difficulties, sorted descending; guarded by mu
latencySamples []uint16 // pool response latencies in ms; capped at 10000 samples; guarded by mu latency []uint16 // pool response latencies in ms; capped at 10000 samples; guarded by mu
windows [6]tickWindow // one per hashrate reporting period windows [6]tickWindow // one per hashrate reporting period
startTime time.Time startTime time.Time
mu sync.Mutex mu sync.Mutex
} }
// Hashrate window sizes in seconds. Index maps to Stats.windows and SummaryResponse.Hashrate.
const ( const (
HashrateWindow60s = 0 // 1 minute HashrateWindow60s = 0 // 1 minute
HashrateWindow600s = 1 // 10 minutes HashrateWindow600s = 1 // 10 minutes
@ -39,7 +39,9 @@ const (
HashrateWindowAll = 5 // all-time (single accumulator, no window) HashrateWindowAll = 5 // all-time (single accumulator, no window)
) )
// tickWindow is a fixed-capacity ring buffer of per-second difficulty sums. // tickWindow is a fixed-capacity ring buffer of per-second difficulty totals.
//
// window := newTickWindow(60)
type tickWindow struct { type tickWindow struct {
buckets []uint64 buckets []uint64
pos int pos int
@ -48,7 +50,8 @@ type tickWindow struct {
// StatsSummary is the serialisable snapshot returned by Summary(). // StatsSummary is the serialisable snapshot returned by Summary().
// //
// summary := stats.Summary() // summary := proxy.NewStats().Summary()
// _ = summary.Hashrate[0] // 60-second window H/s
type StatsSummary struct { type StatsSummary struct {
Accepted uint64 `json:"accepted"` Accepted uint64 `json:"accepted"`
Rejected uint64 `json:"rejected"` Rejected uint64 `json:"rejected"`
@ -59,160 +62,5 @@ type StatsSummary struct {
AvgLatency uint32 `json:"latency"` // median pool response latency in ms AvgLatency uint32 `json:"latency"` // median pool response latency in ms
Hashrate [6]float64 `json:"hashrate"` // H/s per window (index = HashrateWindow* constants) Hashrate [6]float64 `json:"hashrate"` // H/s per window (index = HashrateWindow* constants)
TopDiff [10]uint64 `json:"best"` TopDiff [10]uint64 `json:"best"`
} CustomDiffStats map[uint64]CustomDiffBucketStats `json:"custom_diff_stats,omitempty"`
var hashrateWindowSizes = [5]int{60, 600, 3600, 43200, 86400}
// NewStats allocates the rolling windows and initialises the clock anchor.
//
// s := proxy.NewStats()
func NewStats() *Stats {
stats := &Stats{
startTime: time.Now().UTC(),
latencySamples: make([]uint16, 0, 128),
}
for index, size := range hashrateWindowSizes {
stats.windows[index] = tickWindow{
buckets: make([]uint64, size),
size: size,
}
}
return stats
}
// OnAccept records an accepted share. Adds diff to the current second's bucket in all windows.
//
// stats.OnAccept(proxy.Event{Diff: 100000, Latency: 82})
func (s *Stats) OnAccept(event Event) {
s.accepted.Add(1)
s.hashes.Add(event.Diff)
if event.Expired {
s.expired.Add(1)
}
s.mu.Lock()
for index := 0; index < HashrateWindowAll; index++ {
s.windows[index].buckets[s.windows[index].pos] += event.Diff
}
insertTopDiff(&s.topDifficulties, event.Diff)
if event.Latency > 0 {
s.latencySamples = appendCappedLatency(s.latencySamples, event.Latency)
}
s.mu.Unlock()
}
// OnReject records a rejected share. If e.Error indicates low diff or malformed, increments invalid.
//
// stats.OnReject(proxy.Event{Error: "Low difficulty share"})
func (s *Stats) OnReject(event Event) {
s.rejected.Add(1)
if isInvalidShareError(event.Error) {
s.invalid.Add(1)
}
if event.Expired {
s.expired.Add(1)
}
if event.Latency > 0 {
s.mu.Lock()
s.latencySamples = appendCappedLatency(s.latencySamples, event.Latency)
s.mu.Unlock()
}
}
// Tick advances all rolling windows by one second bucket. Called by the proxy tick loop.
//
// stats.Tick()
func (s *Stats) Tick() {
s.mu.Lock()
defer s.mu.Unlock()
for index := 0; index < HashrateWindowAll; index++ {
window := &s.windows[index]
window.pos = (window.pos + 1) % window.size
window.buckets[window.pos] = 0
}
}
// Summary returns a point-in-time snapshot of all stats fields for API serialisation.
//
// summary := stats.Summary()
func (s *Stats) Summary() StatsSummary {
s.mu.Lock()
defer s.mu.Unlock()
var summary StatsSummary
summary.Accepted = s.accepted.Load()
summary.Rejected = s.rejected.Load()
summary.Invalid = s.invalid.Load()
summary.Expired = s.expired.Load()
summary.Hashes = s.hashes.Load()
summary.TopDiff = s.topDifficulties
for index := 0; index < HashrateWindowAll; index++ {
windowSize := hashrateWindowSizes[index]
summary.Hashrate[index] = float64(sumBuckets(s.windows[index].buckets)) / float64(windowSize)
}
uptimeSeconds := uint64(time.Since(s.startTime).Seconds())
if uptimeSeconds > 0 {
summary.Hashrate[HashrateWindowAll] = float64(summary.Hashes) / float64(uptimeSeconds)
}
if summary.Accepted > 0 && uptimeSeconds > 0 {
summary.AvgTime = uint32(uptimeSeconds / summary.Accepted)
}
if len(s.latencySamples) > 0 {
values := slices.Clone(s.latencySamples)
sort.Slice(values, func(left int, right int) bool {
return values[left] < values[right]
})
summary.AvgLatency = uint32(values[len(values)/2])
}
return summary
}
func appendCappedLatency(latencies []uint16, latency uint16) []uint16 {
if len(latencies) == 10000 {
copy(latencies, latencies[1:])
latencies[len(latencies)-1] = latency
return latencies
}
return append(latencies, latency)
}
func insertTopDiff(topDiff *[10]uint64, difficulty uint64) {
if difficulty == 0 {
return
}
for index, value := range topDiff {
if difficulty <= value {
continue
}
copy(topDiff[index+1:], topDiff[index:len(topDiff)-1])
topDiff[index] = difficulty
return
}
}
func isInvalidShareError(message string) bool {
switch message {
case "Low difficulty share", "Invalid nonce", "Malformed share", "Invalid result":
return true
default:
return false
}
}
func sumBuckets(values []uint64) uint64 {
var total uint64
for _, value := range values {
total += value
}
return total
} }

173
stats_test.go Normal file
View file

@ -0,0 +1,173 @@
package proxy
import (
"sync"
"testing"
)
// TestStats_OnAccept_Good verifies that accepted counter, hashes, and topDiff are updated.
//
// stats := proxy.NewStats()
// stats.OnAccept(proxy.Event{Diff: 100000, Latency: 82})
// summary := stats.Summary()
// _ = summary.Accepted // 1
// _ = summary.Hashes // 100000
func TestStats_OnAccept_Good(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 100000, Latency: 82})
summary := stats.Summary()
if summary.Accepted != 1 {
t.Fatalf("expected accepted 1, got %d", summary.Accepted)
}
if summary.Hashes != 100000 {
t.Fatalf("expected hashes 100000, got %d", summary.Hashes)
}
if summary.TopDiff[0] != 100000 {
t.Fatalf("expected top diff 100000, got %d", summary.TopDiff[0])
}
}
// TestStats_OnAccept_Bad verifies concurrent OnAccept calls do not race.
//
// stats := proxy.NewStats()
// // 100 goroutines each call OnAccept — no data race under -race flag.
func TestStats_OnAccept_Bad(t *testing.T) {
stats := NewStats()
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func(diff uint64) {
defer wg.Done()
stats.OnAccept(Event{Diff: diff, Latency: 10})
}(uint64(i + 1))
}
wg.Wait()
summary := stats.Summary()
if summary.Accepted != 100 {
t.Fatalf("expected 100 accepted, got %d", summary.Accepted)
}
}
// TestStats_OnAccept_Ugly verifies that 15 accepts with varying diffs fill all topDiff slots.
//
// stats := proxy.NewStats()
// // 15 accepts with diffs 1..15 → topDiff[9] is 6 (10th highest), not 0
func TestStats_OnAccept_Ugly(t *testing.T) {
stats := NewStats()
for i := 1; i <= 15; i++ {
stats.OnAccept(Event{Diff: uint64(i)})
}
summary := stats.Summary()
// top 10 should be 15, 14, 13, ..., 6
if summary.TopDiff[0] != 15 {
t.Fatalf("expected top diff[0]=15, got %d", summary.TopDiff[0])
}
if summary.TopDiff[9] != 6 {
t.Fatalf("expected top diff[9]=6, got %d", summary.TopDiff[9])
}
}
// TestStats_OnReject_Good verifies that rejected and invalid counters are updated.
//
// stats := proxy.NewStats()
// stats.OnReject(proxy.Event{Error: "Low difficulty share"})
func TestStats_OnReject_Good(t *testing.T) {
stats := NewStats()
stats.OnReject(Event{Error: "Low difficulty share"})
stats.OnReject(Event{Error: "Malformed share"})
summary := stats.Summary()
if summary.Rejected != 2 {
t.Fatalf("expected two rejected shares, got %d", summary.Rejected)
}
if summary.Invalid != 2 {
t.Fatalf("expected two invalid shares, got %d", summary.Invalid)
}
}
// TestStats_OnReject_Bad verifies that a non-invalid rejection increments rejected but not invalid.
//
// stats := proxy.NewStats()
// stats.OnReject(proxy.Event{Error: "Stale share"})
func TestStats_OnReject_Bad(t *testing.T) {
stats := NewStats()
stats.OnReject(Event{Error: "Stale share"})
summary := stats.Summary()
if summary.Rejected != 1 {
t.Fatalf("expected one rejected, got %d", summary.Rejected)
}
if summary.Invalid != 0 {
t.Fatalf("expected zero invalid for non-invalid reason, got %d", summary.Invalid)
}
}
// TestStats_OnReject_Ugly verifies an expired accepted share increments both accepted and expired.
//
// stats := proxy.NewStats()
// stats.OnAccept(proxy.Event{Diff: 1000, Expired: true})
func TestStats_OnReject_Ugly(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 1000, Expired: true})
summary := stats.Summary()
if summary.Accepted != 1 {
t.Fatalf("expected accepted 1, got %d", summary.Accepted)
}
if summary.Expired != 1 {
t.Fatalf("expected expired 1, got %d", summary.Expired)
}
}
// TestStats_Tick_Good verifies that Tick advances the rolling window position.
//
// stats := proxy.NewStats()
// stats.OnAccept(proxy.Event{Diff: 500})
// stats.Tick()
// summary := stats.Summary()
func TestStats_Tick_Good(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 500})
stats.Tick()
summary := stats.Summary()
// After one tick, the hashrate should still include the 500 diff
if summary.Hashrate[HashrateWindow60s] == 0 {
t.Fatalf("expected non-zero 60s hashrate after accept and tick")
}
}
// TestStats_OnLogin_OnClose_Good verifies miner count tracking.
//
// stats := proxy.NewStats()
// stats.OnLogin(proxy.Event{Miner: &proxy.Miner{}})
// stats.OnClose(proxy.Event{Miner: &proxy.Miner{}})
func TestStats_OnLogin_OnClose_Good(t *testing.T) {
stats := NewStats()
m := &Miner{}
stats.OnLogin(Event{Miner: m})
if got := stats.miners.Load(); got != 1 {
t.Fatalf("expected 1 miner, got %d", got)
}
if got := stats.maxMiners.Load(); got != 1 {
t.Fatalf("expected max miners 1, got %d", got)
}
stats.OnClose(Event{Miner: m})
if got := stats.miners.Load(); got != 0 {
t.Fatalf("expected 0 miners after close, got %d", got)
}
if got := stats.maxMiners.Load(); got != 1 {
t.Fatalf("expected max miners to remain 1, got %d", got)
}
}

View file

@ -1,141 +0,0 @@
package proxy
import "testing"
func TestEventBus_Dispatch_Good(t *testing.T) {
bus := NewEventBus()
called := false
bus.Subscribe(EventLogin, func(event Event) {
called = event.Miner != nil
})
bus.Dispatch(Event{Type: EventLogin, Miner: &Miner{}})
if !called {
t.Fatal("expected handler to be called")
}
}
func TestEventBus_Dispatch_Bad(t *testing.T) {
bus := NewEventBus()
bus.Subscribe(EventLogin, nil)
bus.Dispatch(Event{Type: EventLogin})
}
func TestEventBus_Dispatch_Ugly(t *testing.T) {
bus := NewEventBus()
count := 0
bus.Subscribe(EventLogin, func(event Event) { count++ })
bus.Subscribe(EventLogin, func(event Event) { count++ })
bus.Dispatch(Event{Type: EventLogin})
if count != 2 {
t.Fatalf("expected both handlers to run, got %d", count)
}
}
func TestStats_Summary_Good(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 120, Latency: 80})
summary := stats.Summary()
if summary.Accepted != 1 || summary.Hashes != 120 {
t.Fatalf("unexpected summary: %+v", summary)
}
}
func TestStats_Summary_Bad(t *testing.T) {
stats := NewStats()
stats.OnReject(Event{Error: "Low difficulty share"})
summary := stats.Summary()
if summary.Rejected != 1 || summary.Invalid != 1 {
t.Fatalf("unexpected summary: %+v", summary)
}
}
func TestStats_Summary_Ugly(t *testing.T) {
stats := NewStats()
stats.OnAccept(Event{Diff: 100, Latency: 10})
stats.Tick()
stats.OnAccept(Event{Diff: 200, Latency: 20})
summary := stats.Summary()
if summary.TopDiff[0] != 200 || summary.TopDiff[1] != 100 {
t.Fatalf("unexpected best shares: %+v", summary.TopDiff)
}
}
func TestWorkers_List_Good(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByRigID, bus)
miner := &Miner{id: 1, user: "wallet", rigID: "rig-a", ip: "10.0.0.1"}
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 600})
records := workers.List()
if len(records) != 1 || records[0].Name != "rig-a" || records[0].Accepted != 1 {
t.Fatalf("unexpected worker records: %+v", records)
}
}
func TestWorkers_List_Bad(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersDisabled, bus)
bus.Dispatch(Event{Type: EventLogin, Miner: &Miner{id: 1, user: "wallet"}})
if len(workers.List()) != 0 {
t.Fatal("expected no worker records when disabled")
}
}
func TestWorkers_List_Ugly(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByRigID, bus)
miner := &Miner{id: 1, user: "wallet", ip: "10.0.0.1"}
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
bus.Dispatch(Event{Type: EventReject, Miner: miner, Error: "Low difficulty share"})
records := workers.List()
if len(records) != 1 || records[0].Name != "wallet" || records[0].Invalid != 1 {
t.Fatalf("unexpected worker records: %+v", records)
}
}
func TestWorkers_CustomDiffStats_Good(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
workers.SetCustomDiffStats(true)
firstMiner := &Miner{id: 1, user: "wallet", customDiff: 1000}
secondMiner := &Miner{id: 2, user: "wallet", customDiff: 2000}
bus.Dispatch(Event{Type: EventLogin, Miner: firstMiner})
bus.Dispatch(Event{Type: EventLogin, Miner: secondMiner})
records := workers.List()
if len(records) != 2 || records[0].Name == records[1].Name {
t.Fatalf("expected separate custom-diff buckets, got %+v", records)
}
}
func TestWorkers_CustomDiffStats_Bad(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
workers.SetCustomDiffStats(true)
firstMiner := &Miner{id: 1, user: "wallet", customDiff: 1000}
secondMiner := &Miner{id: 2, user: "wallet", customDiff: 1000}
bus.Dispatch(Event{Type: EventLogin, Miner: firstMiner})
bus.Dispatch(Event{Type: EventLogin, Miner: secondMiner})
records := workers.List()
if len(records) != 1 {
t.Fatalf("expected identical custom-diff bucket to merge, got %+v", records)
}
}
func TestWorkers_CustomDiffStats_Ugly(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
firstMiner := &Miner{id: 1, user: "wallet", customDiff: 1000}
secondMiner := &Miner{id: 2, user: "wallet", customDiff: 2000}
bus.Dispatch(Event{Type: EventLogin, Miner: firstMiner})
bus.Dispatch(Event{Type: EventLogin, Miner: secondMiner})
records := workers.List()
if len(records) != 1 || records[0].Name != "wallet" {
t.Fatalf("expected default worker bucketing to ignore custom diff, got %+v", records)
}
}

View file

@ -1,120 +0,0 @@
package proxy
import (
"crypto/tls"
"strconv"
"strings"
)
func buildTLSConfig(config TLSConfig) *tls.Config {
tlsConfig := &tls.Config{}
if versions := parseTLSVersions(config.Protocols); versions != nil {
tlsConfig.MinVersion = versions.min
tlsConfig.MaxVersion = versions.max
}
if suites := parseCipherSuites(config.Ciphers); len(suites) > 0 {
tlsConfig.CipherSuites = suites
}
return tlsConfig
}
type tlsVersionBounds struct {
min uint16
max uint16
}
func parseTLSVersions(value string) *tlsVersionBounds {
if strings.TrimSpace(value) == "" {
return nil
}
bounds := tlsVersionBounds{}
for _, token := range splitTLSList(value) {
version, ok := parseTLSVersionToken(token)
if !ok {
continue
}
if bounds.min == 0 || version < bounds.min {
bounds.min = version
}
if version > bounds.max {
bounds.max = version
}
}
if bounds.min == 0 || bounds.max == 0 {
return nil
}
return &bounds
}
func parseTLSVersionToken(token string) (uint16, bool) {
switch strings.ToLower(strings.TrimSpace(token)) {
case "tls1.0", "tlsv1.0", "tls1", "tlsv1", "1.0", "tls10":
return tls.VersionTLS10, true
case "tls1.1", "tlsv1.1", "1.1", "tls11":
return tls.VersionTLS11, true
case "tls1.2", "tlsv1.2", "1.2", "tls12":
return tls.VersionTLS12, true
case "tls1.3", "tlsv1.3", "1.3", "tls13":
return tls.VersionTLS13, true
}
if raw, errorValue := strconv.ParseUint(strings.TrimSpace(token), 10, 16); errorValue == nil {
switch uint16(raw) {
case tls.VersionTLS10, tls.VersionTLS11, tls.VersionTLS12, tls.VersionTLS13:
return uint16(raw), true
}
}
return 0, false
}
func parseCipherSuites(value string) []uint16 {
if strings.TrimSpace(value) == "" {
return nil
}
var suites []uint16
for _, token := range splitTLSList(value) {
if suite, ok := tlsCipherSuiteNames[strings.ToUpper(strings.TrimSpace(token))]; ok {
suites = append(suites, suite)
}
}
return suites
}
func splitTLSList(value string) []string {
return strings.FieldsFunc(value, func(r rune) bool {
switch r {
case ':', ',', ' ', ';':
return true
default:
return false
}
})
}
var tlsCipherSuiteNames = map[string]uint16{
"TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
"TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
"TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256,
"TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384,
"TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256,
"ECDHE-RSA-AES128-GCM-SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
"ECDHE-RSA-AES256-GCM-SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
"ECDHE-ECDSA-AES128-GCM-SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
"ECDHE-ECDSA-AES256-GCM-SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
"AES128-GCM-SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
"AES256-GCM-SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
"ECDHE-RSA-CHACHA20-POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
"ECDHE-ECDSA-CHACHA20-POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
"CHACHA20-POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
}

View file

@ -1,48 +0,0 @@
package proxy
import (
"crypto/tls"
"testing"
)
func TestTLSRuntime_buildTLSConfig_Good(t *testing.T) {
config := buildTLSConfig(TLSConfig{
Ciphers: "ECDHE-RSA-AES128-GCM-SHA256:TLS_AES_128_GCM_SHA256",
Protocols: "TLSv1.2,TLSv1.3",
})
if config.MinVersion != tls.VersionTLS12 {
t.Fatalf("expected min version TLS1.2, got %d", config.MinVersion)
}
if config.MaxVersion != tls.VersionTLS13 {
t.Fatalf("expected max version TLS1.3, got %d", config.MaxVersion)
}
if len(config.CipherSuites) != 2 || config.CipherSuites[0] != tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 || config.CipherSuites[1] != tls.TLS_AES_128_GCM_SHA256 {
t.Fatalf("unexpected cipher suites: %#v", config.CipherSuites)
}
}
func TestTLSRuntime_buildTLSConfig_Bad(t *testing.T) {
config := buildTLSConfig(TLSConfig{Protocols: "bogus", Ciphers: "bogus"})
if config.MinVersion != 0 || config.MaxVersion != 0 {
t.Fatalf("expected default versions for invalid input, got min=%d max=%d", config.MinVersion, config.MaxVersion)
}
if len(config.CipherSuites) != 0 {
t.Fatalf("expected no cipher suites for invalid input, got %#v", config.CipherSuites)
}
}
func TestTLSRuntime_buildTLSConfig_Ugly(t *testing.T) {
config := buildTLSConfig(TLSConfig{Protocols: "1.1:1.2:1.3", Ciphers: "AES128-GCM-SHA256,unknown"})
if config.MinVersion != tls.VersionTLS11 {
t.Fatalf("expected min version TLS1.1, got %d", config.MinVersion)
}
if config.MaxVersion != tls.VersionTLS13 {
t.Fatalf("expected max version TLS1.3, got %d", config.MaxVersion)
}
if len(config.CipherSuites) != 1 || config.CipherSuites[0] != tls.TLS_RSA_WITH_AES_128_GCM_SHA256 {
t.Fatalf("unexpected cipher suites: %#v", config.CipherSuites)
}
}

36
tls_test.go Normal file
View file

@ -0,0 +1,36 @@
package proxy
import (
"crypto/tls"
"testing"
)
func TestTLS_applyTLSCiphers_Good(t *testing.T) {
cfg := &tls.Config{}
applyTLSCiphers(cfg, "ECDHE-RSA-AES128-GCM-SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256")
if len(cfg.CipherSuites) != 2 {
t.Fatalf("expected two recognised cipher suites, got %d", len(cfg.CipherSuites))
}
}
func TestTLS_applyTLSCiphers_Bad(t *testing.T) {
cfg := &tls.Config{}
applyTLSCiphers(cfg, "made-up-cipher-one:made-up-cipher-two")
if len(cfg.CipherSuites) != 0 {
t.Fatalf("expected unknown cipher names to be ignored, got %#v", cfg.CipherSuites)
}
}
func TestTLS_applyTLSCiphers_Ugly(t *testing.T) {
cfg := &tls.Config{}
applyTLSCiphers(cfg, " aes128-sha | ECDHE-RSA-AES256-GCM-SHA384 ; tls_ecdhe_ecdsa_with_aes_256_gcm_sha384 ")
if len(cfg.CipherSuites) != 3 {
t.Fatalf("expected mixed separators and casing to be accepted, got %d", len(cfg.CipherSuites))
}
}

214
worker.go
View file

@ -1,7 +1,6 @@
package proxy package proxy
import ( import (
"strconv"
"sync" "sync"
"time" "time"
) )
@ -9,19 +8,22 @@ import (
// Workers maintains per-worker aggregate stats. Workers are identified by name, // Workers maintains per-worker aggregate stats. Workers are identified by name,
// derived from the miner's login fields per WorkersMode. // derived from the miner's login fields per WorkersMode.
// //
// w := proxy.NewWorkers(proxy.WorkersByRigID, bus) // workers := proxy.NewWorkers(proxy.WorkersByRigID, bus)
// workers.OnLogin(proxy.Event{Miner: miner})
// records := workers.List()
type Workers struct { type Workers struct {
mode WorkersMode mode WorkersMode
customDiffStats bool
entries []WorkerRecord // ordered by first-seen (stable) entries []WorkerRecord // ordered by first-seen (stable)
nameIndex map[string]int // workerName → entries index nameIndex map[string]int // workerName → entries index
idIndex map[int64]int // minerID → entries index idIndex map[int64]int // minerID → entries index
subscribed bool
mu sync.RWMutex mu sync.RWMutex
} }
// WorkerRecord is the per-identity aggregate. // WorkerRecord is the per-identity aggregate with rolling hashrate windows.
// //
// hr60 := record.Hashrate(60) // record := proxy.WorkerRecord{Name: "rig-alpha", Accepted: 10, Hashes: 500000}
// hr60 := record.Hashrate(60) // H/s over the last 60 seconds
type WorkerRecord struct { type WorkerRecord struct {
Name string Name string
LastIP string LastIP string
@ -33,205 +35,3 @@ type WorkerRecord struct {
LastHashAt time.Time LastHashAt time.Time
windows [5]tickWindow // 60s, 600s, 3600s, 12h, 24h windows [5]tickWindow // 60s, 600s, 3600s, 12h, 24h
} }
// Hashrate returns the H/s for a given window (seconds: 60, 600, 3600, 43200, 86400).
//
// hr60 := record.Hashrate(60)
func (r *WorkerRecord) Hashrate(seconds int) float64 {
for index, windowSize := range hashrateWindowSizes {
if windowSize == seconds {
return float64(sumBuckets(r.windows[index].buckets)) / float64(seconds)
}
}
return 0
}
// NewWorkers creates the worker aggregate and subscribes it to the event bus.
//
// w := proxy.NewWorkers(proxy.WorkersByRigID, bus)
func NewWorkers(mode WorkersMode, bus *EventBus) *Workers {
workers := &Workers{
mode: mode,
entries: make([]WorkerRecord, 0),
nameIndex: make(map[string]int),
idIndex: make(map[int64]int),
}
if bus != nil {
bus.Subscribe(EventLogin, workers.onLogin)
bus.Subscribe(EventAccept, workers.onAccept)
bus.Subscribe(EventReject, workers.onReject)
bus.Subscribe(EventClose, workers.onClose)
}
return workers
}
// SetCustomDiffStats toggles per-custom-difficulty worker bucketing.
//
// workers.SetCustomDiffStats(true)
func (w *Workers) SetCustomDiffStats(enabled bool) {
if w == nil {
return
}
w.mu.Lock()
w.customDiffStats = enabled
w.mu.Unlock()
}
// List returns a snapshot of all worker records in first-seen order.
//
// records := workers.List()
func (w *Workers) List() []WorkerRecord {
w.mu.RLock()
defer w.mu.RUnlock()
records := make([]WorkerRecord, len(w.entries))
copy(records, w.entries)
return records
}
// Tick advances all worker hashrate windows. Called by the proxy tick loop every second.
//
// workers.Tick()
func (w *Workers) Tick() {
w.mu.Lock()
defer w.mu.Unlock()
for entryIndex := range w.entries {
for windowIndex, size := range hashrateWindowSizes {
if windowIndex >= len(w.entries[entryIndex].windows) {
break
}
window := &w.entries[entryIndex].windows[windowIndex]
if window.size == 0 {
window.size = size
window.buckets = make([]uint64, size)
}
window.pos = (window.pos + 1) % window.size
window.buckets[window.pos] = 0
}
}
}
func (w *Workers) onLogin(event Event) {
if event.Miner == nil || w.mode == WorkersDisabled {
return
}
name := w.workerName(event.Miner)
if name == "" {
return
}
w.mu.Lock()
defer w.mu.Unlock()
index, exists := w.nameIndex[name]
if !exists {
record := WorkerRecord{Name: name}
for windowIndex, size := range hashrateWindowSizes {
if windowIndex >= len(record.windows) {
break
}
record.windows[windowIndex] = tickWindow{
buckets: make([]uint64, size),
size: size,
}
}
w.entries = append(w.entries, record)
index = len(w.entries) - 1
w.nameIndex[name] = index
}
record := &w.entries[index]
record.LastIP = event.Miner.IP()
record.Connections++
w.idIndex[event.Miner.ID()] = index
}
func (w *Workers) onAccept(event Event) {
w.updateShare(event, true)
}
func (w *Workers) onReject(event Event) {
w.updateShare(event, false)
}
func (w *Workers) onClose(event Event) {
if event.Miner == nil {
return
}
w.mu.Lock()
defer w.mu.Unlock()
delete(w.idIndex, event.Miner.ID())
}
func (w *Workers) updateShare(event Event, accepted bool) {
if event.Miner == nil || w.mode == WorkersDisabled {
return
}
w.mu.Lock()
defer w.mu.Unlock()
index, exists := w.idIndex[event.Miner.ID()]
if !exists {
return
}
record := &w.entries[index]
if accepted {
record.Accepted++
record.Hashes += event.Diff
record.LastHashAt = time.Now().UTC()
for windowIndex := range record.windows {
record.windows[windowIndex].buckets[record.windows[windowIndex].pos] += event.Diff
}
return
}
record.Rejected++
if isInvalidShareError(event.Error) {
record.Invalid++
}
}
func (w *Workers) workerName(miner *Miner) string {
if miner == nil {
return ""
}
w.mu.RLock()
customDiffStats := w.customDiffStats
w.mu.RUnlock()
name := ""
switch w.mode {
case WorkersByRigID:
if miner.RigID() != "" {
name = miner.RigID()
} else {
name = miner.User()
}
case WorkersByUser:
name = miner.User()
case WorkersByPass:
name = miner.Password()
case WorkersByAgent:
name = miner.Agent()
case WorkersByIP:
name = miner.IP()
default:
return ""
}
if !customDiffStats || miner.CustomDiff() == 0 || name == "" {
return name
}
return name + "+cd" + strconv.FormatUint(miner.CustomDiff(), 10)
}

164
worker_test.go Normal file
View file

@ -0,0 +1,164 @@
package proxy
import "testing"
func TestWorker_NewWorkers_Good(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByRigID, bus)
miner := &Miner{id: 7, user: "wallet", rigID: "rig-1", ip: "10.0.0.1"}
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
records := workers.List()
if len(records) != 1 {
t.Fatalf("expected one worker record, got %d", len(records))
}
if records[0].Name != "rig-1" {
t.Fatalf("expected rig id worker name, got %q", records[0].Name)
}
if records[0].Connections != 1 {
t.Fatalf("expected one connection, got %d", records[0].Connections)
}
}
func TestWorker_NewWorkers_Bad(t *testing.T) {
workers := NewWorkers(WorkersDisabled, nil)
if workers == nil {
t.Fatalf("expected workers instance")
}
if got := workers.List(); len(got) != 0 {
t.Fatalf("expected no worker records, got %d", len(got))
}
}
func TestWorker_NewWorkers_Ugly(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
workers.bindEvents(bus)
miner := &Miner{id: 11, user: "wallet", ip: "10.0.0.2"}
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
records := workers.List()
if len(records) != 1 {
t.Fatalf("expected one worker record, got %d", len(records))
}
if records[0].Connections != 1 {
t.Fatalf("expected a single subscription path, got %d connections", records[0].Connections)
}
}
// TestWorker_Hashrate_Good verifies that recording an accepted share produces a nonzero
// hashrate reading from the 60-second window.
//
// record := proxy.WorkerRecord{}
// record.Hashrate(60) // > 0.0 after an accepted share
func TestWorker_Hashrate_Good(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
miner := &Miner{id: 100, user: "hashtest", ip: "10.0.0.10"}
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 50000})
records := workers.List()
if len(records) != 1 {
t.Fatalf("expected one worker record, got %d", len(records))
}
hr := records[0].Hashrate(60)
if hr <= 0 {
t.Fatalf("expected nonzero hashrate for 60-second window after accept, got %f", hr)
}
}
// TestWorker_Hashrate_Bad verifies that an invalid window size returns 0.
//
// record := proxy.WorkerRecord{}
// record.Hashrate(999) // 0.0 (unsupported window)
func TestWorker_Hashrate_Bad(t *testing.T) {
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
miner := &Miner{id: 101, user: "hashtest-bad", ip: "10.0.0.11"}
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
bus.Dispatch(Event{Type: EventAccept, Miner: miner, Diff: 50000})
records := workers.List()
if len(records) != 1 {
t.Fatalf("expected one worker record, got %d", len(records))
}
hr := records[0].Hashrate(999)
if hr != 0 {
t.Fatalf("expected zero hashrate for unsupported window, got %f", hr)
}
hrZero := records[0].Hashrate(0)
if hrZero != 0 {
t.Fatalf("expected zero hashrate for zero window, got %f", hrZero)
}
hrNeg := records[0].Hashrate(-1)
if hrNeg != 0 {
t.Fatalf("expected zero hashrate for negative window, got %f", hrNeg)
}
}
// TestWorker_Hashrate_Ugly verifies that calling Hashrate on a nil record returns 0
// and that a worker with no accepts also returns 0.
//
// var record *proxy.WorkerRecord
// record.Hashrate(60) // 0.0
func TestWorker_Hashrate_Ugly(t *testing.T) {
var nilRecord *WorkerRecord
if hr := nilRecord.Hashrate(60); hr != 0 {
t.Fatalf("expected zero hashrate for nil record, got %f", hr)
}
bus := NewEventBus()
workers := NewWorkers(WorkersByUser, bus)
miner := &Miner{id: 102, user: "hashtest-ugly", ip: "10.0.0.12"}
bus.Dispatch(Event{Type: EventLogin, Miner: miner})
records := workers.List()
if len(records) != 1 {
t.Fatalf("expected one worker record, got %d", len(records))
}
hr := records[0].Hashrate(60)
if hr != 0 {
t.Fatalf("expected zero hashrate for worker with no accepts, got %f", hr)
}
}
func TestWorker_CustomDiffOrdering_Good(t *testing.T) {
cfg := &Config{
Mode: "nicehash",
Workers: WorkersByUser,
Bind: []BindAddr{{Host: "127.0.0.1", Port: 3333}},
Pools: []PoolConfig{{URL: "pool.example:3333", Enabled: true}},
CustomDiff: 50000,
AccessLogFile: "",
}
p, result := New(cfg)
if !result.OK {
t.Fatalf("expected valid proxy, got error: %v", result.Error)
}
miner := &Miner{
id: 21,
user: "WALLET+50000",
ip: "10.0.0.3",
conn: noopConn{},
}
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
records := p.WorkerRecords()
if len(records) != 1 {
t.Fatalf("expected one worker record, got %d", len(records))
}
if records[0].Name != "WALLET" {
t.Fatalf("expected custom diff login suffix to be stripped before worker registration, got %q", records[0].Name)
}
if miner.User() != "WALLET" {
t.Fatalf("expected miner user to be stripped before downstream consumers, got %q", miner.User())
}
}