Merge pull request '[agent/codex:gpt-5.4-mini] Read docs/RFC.md and docs/specs/core/go/RFC.md fully. Find f...' (#3) from agent/read-docs-rfc-md-and-docs-specs-core-go into dev

This commit is contained in:
Virgil 2026-04-04 18:15:54 +00:00
commit a4f2176e34
5 changed files with 47 additions and 47 deletions

View file

@ -14,14 +14,14 @@ import (
// strategy := pool.NewFailoverStrategy(cfg.Pools, listener, cfg)
// strategy.Connect()
type FailoverStrategy struct {
pools []proxy.PoolConfig
current int
client *StratumClient
listener StratumListener
config *proxy.Config
closed bool
running bool
mu sync.Mutex
pools []proxy.PoolConfig
current int
client *StratumClient
listener StratumListener
configuration *proxy.Config
closed bool
running bool
mu sync.Mutex
}
// StrategyFactory creates a new FailoverStrategy for a given StratumListener.
@ -56,9 +56,9 @@ func NewStrategyFactory(cfg *proxy.Config) StrategyFactory {
// strategy := pool.NewFailoverStrategy([]proxy.PoolConfig{{URL: "pool.lthn.io:3333", Enabled: true}}, listener, cfg)
func NewFailoverStrategy(pools []proxy.PoolConfig, listener StratumListener, cfg *proxy.Config) *FailoverStrategy {
return &FailoverStrategy{
pools: append([]proxy.PoolConfig(nil), pools...),
listener: listener,
config: cfg,
pools: append([]proxy.PoolConfig(nil), pools...),
listener: listener,
configuration: cfg,
}
}
@ -87,8 +87,8 @@ func (s *FailoverStrategy) connectFrom(start int) {
}()
pools := s.pools
if s.config != nil && len(s.config.Pools) > 0 {
pools = s.config.Pools
if s.configuration != nil && len(s.configuration.Pools) > 0 {
pools = s.configuration.Pools
}
if len(pools) == 0 {
return
@ -96,12 +96,12 @@ func (s *FailoverStrategy) connectFrom(start int) {
retries := 1
pause := time.Duration(0)
if s.config != nil {
if s.config.Retries > 0 {
retries = s.config.Retries
if s.configuration != nil {
if s.configuration.Retries > 0 {
retries = s.configuration.Retries
}
if s.config.RetryPause > 0 {
pause = time.Duration(s.config.RetryPause) * time.Second
if s.configuration.RetryPause > 0 {
pause = time.Duration(s.configuration.RetryPause) * time.Second
}
}

View file

@ -111,10 +111,10 @@ type ConfigWatcher struct {
// rl := proxy.NewRateLimiter(cfg.RateLimit)
// if !rl.Allow("1.2.3.4") { conn.Close(); return }
type RateLimiter struct {
config RateLimit
buckets map[string]*tokenBucket
banned map[string]time.Time
mu sync.Mutex
configuration RateLimit
buckets map[string]*tokenBucket
banned map[string]time.Time
mu sync.Mutex
}
// tokenBucket is a simple token bucket for one IP.

View file

@ -11,9 +11,9 @@ import (
// rl := proxy.NewRateLimiter(proxy.RateLimit{MaxConnectionsPerMinute: 30, BanDurationSeconds: 300})
func NewRateLimiter(config RateLimit) *RateLimiter {
return &RateLimiter{
config: config,
buckets: make(map[string]*tokenBucket),
banned: make(map[string]time.Time),
configuration: config,
buckets: make(map[string]*tokenBucket),
banned: make(map[string]time.Time),
}
}
@ -26,7 +26,7 @@ func (rateLimiter *RateLimiter) SetConfig(config RateLimit) {
}
rateLimiter.mu.Lock()
rateLimiter.config = config
rateLimiter.configuration = config
rateLimiter.mu.Unlock()
}
@ -44,7 +44,7 @@ func (rateLimiter *RateLimiter) Allow(ip string) bool {
rateLimiter.mu.Lock()
defer rateLimiter.mu.Unlock()
if rateLimiter.config.MaxConnectionsPerMinute <= 0 {
if rateLimiter.configuration.MaxConnectionsPerMinute <= 0 {
return true
}
@ -58,7 +58,7 @@ func (rateLimiter *RateLimiter) Allow(ip string) bool {
bucket, exists := rateLimiter.buckets[host]
if !exists {
bucket = &tokenBucket{
tokens: rateLimiter.config.MaxConnectionsPerMinute,
tokens: rateLimiter.configuration.MaxConnectionsPerMinute,
lastRefill: now,
}
rateLimiter.buckets[host] = bucket
@ -66,8 +66,8 @@ func (rateLimiter *RateLimiter) Allow(ip string) bool {
rateLimiter.refillBucket(bucket, now)
if bucket.tokens <= 0 {
if rateLimiter.config.BanDurationSeconds > 0 {
rateLimiter.banned[host] = now.Add(time.Duration(rateLimiter.config.BanDurationSeconds) * time.Second)
if rateLimiter.configuration.BanDurationSeconds > 0 {
rateLimiter.banned[host] = now.Add(time.Duration(rateLimiter.configuration.BanDurationSeconds) * time.Second)
}
return false
}
@ -88,7 +88,7 @@ func (rateLimiter *RateLimiter) Tick() {
rateLimiter.mu.Lock()
defer rateLimiter.mu.Unlock()
if rateLimiter.config.MaxConnectionsPerMinute <= 0 {
if rateLimiter.configuration.MaxConnectionsPerMinute <= 0 {
return
}
@ -104,11 +104,11 @@ func (rateLimiter *RateLimiter) Tick() {
}
func (rateLimiter *RateLimiter) refillBucket(bucket *tokenBucket, now time.Time) {
if bucket == nil || rateLimiter.config.MaxConnectionsPerMinute <= 0 {
if bucket == nil || rateLimiter.configuration.MaxConnectionsPerMinute <= 0 {
return
}
refillEvery := time.Minute / time.Duration(rateLimiter.config.MaxConnectionsPerMinute)
refillEvery := time.Minute / time.Duration(rateLimiter.configuration.MaxConnectionsPerMinute)
if refillEvery <= 0 {
refillEvery = time.Second
}
@ -120,8 +120,8 @@ func (rateLimiter *RateLimiter) refillBucket(bucket *tokenBucket, now time.Time)
tokensToAdd := int(elapsed / refillEvery)
bucket.tokens += tokensToAdd
if bucket.tokens > rateLimiter.config.MaxConnectionsPerMinute {
bucket.tokens = rateLimiter.config.MaxConnectionsPerMinute
if bucket.tokens > rateLimiter.configuration.MaxConnectionsPerMinute {
bucket.tokens = rateLimiter.configuration.MaxConnectionsPerMinute
}
bucket.lastRefill = bucket.lastRefill.Add(time.Duration(tokensToAdd) * refillEvery)
}

View file

@ -21,7 +21,7 @@ import (
type SimpleSplitter struct {
active map[int64]*SimpleMapper // minerID → mapper
idle map[int64]*SimpleMapper // mapperID → mapper (reuse pool, keyed by mapper seq)
config *proxy.Config
configuration *proxy.Config
events *proxy.EventBus
strategyFactory pool.StrategyFactory
mu sync.Mutex
@ -35,7 +35,7 @@ func NewSimpleSplitter(cfg *proxy.Config, events *proxy.EventBus, factory pool.S
return &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: make(map[int64]*SimpleMapper),
config: cfg,
configuration: cfg,
events: events,
strategyFactory: factory,
}
@ -52,8 +52,8 @@ func (s *SimpleSplitter) OnLogin(event *proxy.LoginEvent) {
defer s.mu.Unlock()
timeout := time.Duration(0)
if s.config != nil && s.config.ReuseTimeout > 0 {
timeout = time.Duration(s.config.ReuseTimeout) * time.Second
if s.configuration != nil && s.configuration.ReuseTimeout > 0 {
timeout = time.Duration(s.configuration.ReuseTimeout) * time.Second
}
var mapper *SimpleMapper
@ -152,7 +152,7 @@ func (s *SimpleSplitter) OnClose(event *proxy.CloseEvent) {
mapper.clearPending()
mapper.miner = nil
mapper.idleAt = time.Now().UTC()
if s.config != nil && s.config.ReuseTimeout > 0 {
if s.configuration != nil && s.configuration.ReuseTimeout > 0 {
s.idle[mapper.id] = mapper
return
}
@ -174,8 +174,8 @@ func (s *SimpleSplitter) GC() {
defer s.mu.Unlock()
timeout := time.Duration(0)
if s.config != nil && s.config.ReuseTimeout > 0 {
timeout = time.Duration(s.config.ReuseTimeout) * time.Second
if s.configuration != nil && s.configuration.ReuseTimeout > 0 {
timeout = time.Duration(s.configuration.ReuseTimeout) * time.Second
}
now := time.Now().UTC()

View file

@ -39,7 +39,7 @@ func TestSimpleSplitter_OnLogin_Ugly(t *testing.T) {
idleAt: time.Now().UTC(),
},
},
config: &proxy.Config{ReuseTimeout: 60},
configuration: &proxy.Config{ReuseTimeout: 60},
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
return liveStrategy
},
@ -73,7 +73,7 @@ func TestSimpleSplitter_OnLogin_Bad(t *testing.T) {
idleAt: time.Now().UTC().Add(-2 * time.Minute),
},
},
config: &proxy.Config{ReuseTimeout: 60},
configuration: &proxy.Config{ReuseTimeout: 60},
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
return activeStrategy
},
@ -93,9 +93,9 @@ func TestSimpleSplitter_OnLogin_Bad(t *testing.T) {
func TestSimpleSplitter_OnClose_Ugly(t *testing.T) {
activeStrategy := &fakeStrategy{active: true}
splitter := &SimpleSplitter{
active: make(map[int64]*SimpleMapper),
idle: make(map[int64]*SimpleMapper),
config: &proxy.Config{ReuseTimeout: 60},
active: make(map[int64]*SimpleMapper),
idle: make(map[int64]*SimpleMapper),
configuration: &proxy.Config{ReuseTimeout: 60},
strategyFactory: func(listener pool.StratumListener) pool.Strategy {
return activeStrategy
},