196 lines
5.5 KiB
Go
196 lines
5.5 KiB
Go
package simple
|
|
|
|
import (
|
|
"os"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"dappco.re/go/core/proxy"
|
|
"dappco.re/go/core/proxy/pool"
|
|
)
|
|
|
|
type fakeStrategy struct {
|
|
active bool
|
|
connects int
|
|
disconnects int
|
|
}
|
|
|
|
func (s *fakeStrategy) Connect() {}
|
|
|
|
func (s *fakeStrategy) Submit(jobID, nonce, result, algo string) int64 { return 1 }
|
|
|
|
func (s *fakeStrategy) Disconnect() {
|
|
s.disconnects++
|
|
s.active = false
|
|
}
|
|
|
|
func (s *fakeStrategy) IsActive() bool { return s.active }
|
|
|
|
func TestSimpleSplitter_OnLogin_Ugly(t *testing.T) {
|
|
deadStrategy := &fakeStrategy{active: false}
|
|
liveStrategy := &fakeStrategy{active: true}
|
|
splitter := &SimpleSplitter{
|
|
active: make(map[int64]*SimpleMapper),
|
|
idle: map[int64]*SimpleMapper{
|
|
1: {
|
|
id: 1,
|
|
strategy: deadStrategy,
|
|
idleAt: time.Now().UTC(),
|
|
},
|
|
},
|
|
config: &proxy.Config{ReuseTimeout: 60},
|
|
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
|
|
return liveStrategy
|
|
},
|
|
}
|
|
|
|
miner := &proxy.Miner{}
|
|
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
|
|
|
|
if len(splitter.idle) != 0 {
|
|
t.Fatalf("expected dead idle mapper to be discarded, got %d idle mappers", len(splitter.idle))
|
|
}
|
|
if len(splitter.active) != 1 {
|
|
t.Fatalf("expected one active mapper, got %d", len(splitter.active))
|
|
}
|
|
if deadStrategy.disconnects != 1 {
|
|
t.Fatalf("expected dead mapper to be disconnected once, got %d", deadStrategy.disconnects)
|
|
}
|
|
if miner.RouteID() == 0 {
|
|
t.Fatal("expected miner to receive a route ID")
|
|
}
|
|
}
|
|
|
|
func TestSimpleSplitter_OnLogin_Bad(t *testing.T) {
|
|
activeStrategy := &fakeStrategy{active: true}
|
|
splitter := &SimpleSplitter{
|
|
active: make(map[int64]*SimpleMapper),
|
|
idle: map[int64]*SimpleMapper{
|
|
1: {
|
|
id: 1,
|
|
strategy: activeStrategy,
|
|
idleAt: time.Now().UTC().Add(-2 * time.Minute),
|
|
},
|
|
},
|
|
config: &proxy.Config{ReuseTimeout: 60},
|
|
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
|
|
return activeStrategy
|
|
},
|
|
}
|
|
|
|
miner := &proxy.Miner{}
|
|
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
|
|
|
|
if len(splitter.idle) != 0 {
|
|
t.Fatalf("expected stale idle mapper to be discarded, got %d idle mappers", len(splitter.idle))
|
|
}
|
|
if len(splitter.active) != 1 {
|
|
t.Fatalf("expected one active mapper, got %d active mappers", len(splitter.active))
|
|
}
|
|
}
|
|
|
|
func TestSimpleSplitter_OnClose_Ugly(t *testing.T) {
|
|
activeStrategy := &fakeStrategy{active: true}
|
|
splitter := &SimpleSplitter{
|
|
active: make(map[int64]*SimpleMapper),
|
|
idle: make(map[int64]*SimpleMapper),
|
|
config: &proxy.Config{ReuseTimeout: 60},
|
|
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
|
|
return activeStrategy
|
|
},
|
|
}
|
|
|
|
miner := &proxy.Miner{}
|
|
splitter.OnLogin(&proxy.LoginEvent{Miner: miner})
|
|
|
|
mapper := splitter.active[miner.ID()]
|
|
if mapper == nil {
|
|
t.Fatal("expected active mapper")
|
|
}
|
|
mapper.pending[1] = simpleSubmitContext{RequestID: 42}
|
|
|
|
splitter.OnClose(&proxy.CloseEvent{Miner: miner})
|
|
|
|
if len(mapper.pending) != 0 {
|
|
t.Fatalf("expected pending submits to be cleared, got %d", len(mapper.pending))
|
|
}
|
|
if _, exists := splitter.idle[mapper.id]; !exists {
|
|
t.Fatal("expected mapper to move to idle pool")
|
|
}
|
|
}
|
|
|
|
func TestSimpleMapper_OnResultAccepted_Good(t *testing.T) {
|
|
bus := proxy.NewEventBus()
|
|
resultCh := make(chan proxy.Event, 1)
|
|
bus.Subscribe(proxy.EventAccept, func(event proxy.Event) {
|
|
resultCh <- event
|
|
})
|
|
|
|
mapper := &SimpleMapper{
|
|
miner: &proxy.Miner{},
|
|
events: bus,
|
|
pending: make(map[int64]simpleSubmitContext),
|
|
job: proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-b", Target: "b88d0600"},
|
|
prevJob: proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-a", Target: "b88d0600"},
|
|
}
|
|
mapper.pending[1] = simpleSubmitContext{
|
|
RequestID: 7,
|
|
Job: proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-a", Target: "b88d0600"},
|
|
SubmittedAt: time.Now().UTC(),
|
|
}
|
|
|
|
mapper.OnResultAccepted(1, true, "")
|
|
|
|
select {
|
|
case event := <-resultCh:
|
|
if event.Job == nil || event.Job.JobID != "job-a" {
|
|
t.Fatalf("expected submitted job to be reported, got %#v", event.Job)
|
|
}
|
|
case <-time.After(time.Second):
|
|
t.Fatal("expected accept event")
|
|
}
|
|
}
|
|
|
|
func TestSimpleMapper_JobForID_BadClientID(t *testing.T) {
|
|
mapper := &SimpleMapper{
|
|
pending: make(map[int64]simpleSubmitContext),
|
|
}
|
|
mapper.OnJob(proxy.Job{Blob: strings.Repeat("1", 160), JobID: "job-a", ClientID: "pool-a"})
|
|
mapper.OnJob(proxy.Job{Blob: strings.Repeat("0", 160), JobID: "job-b", ClientID: "pool-b"})
|
|
|
|
if valid, expired := mapper.JobStatus("job-a"); valid || expired {
|
|
t.Fatalf("expected stale job from a different client to be invalid, got valid=%t expired=%t", valid, expired)
|
|
}
|
|
}
|
|
|
|
func TestConfigWatcher_Start_Ugly(t *testing.T) {
|
|
path := t.TempDir() + "/config.json"
|
|
errorValue := os.WriteFile(path, []byte(`{"bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-a:3333","enabled":true}]}`), 0o644)
|
|
if errorValue != nil {
|
|
t.Fatal(errorValue)
|
|
}
|
|
|
|
watcherTriggered := make(chan struct{}, 1)
|
|
watcher := proxy.NewConfigWatcher(path, func(cfg *proxy.Config) {
|
|
watcherTriggered <- struct{}{}
|
|
})
|
|
watcher.Start()
|
|
defer watcher.Stop()
|
|
|
|
select {
|
|
case <-watcherTriggered:
|
|
t.Fatal("expected watcher to stay quiet until the file changes")
|
|
case <-time.After(1200 * time.Millisecond):
|
|
}
|
|
|
|
if errorValue = os.WriteFile(path, []byte(`{"bind":[{"host":"127.0.0.1","port":3333}],"pools":[{"url":"pool-b:3333","enabled":true}]}`), 0o644); errorValue != nil {
|
|
t.Fatal(errorValue)
|
|
}
|
|
|
|
select {
|
|
case <-watcherTriggered:
|
|
case <-time.After(2 * time.Second):
|
|
t.Fatal("expected watcher to observe the modification")
|
|
}
|
|
}
|