go-proxy/proxy_runtime.go
Virgil 48c6e0fc6d feat(proxy): implement RFC runtime primitives
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 10:39:59 +00:00

169 lines
3.4 KiB
Go

package proxy
import (
"net"
"sync/atomic"
"time"
)
var proxyMinerCount atomic.Uint64
// New creates and wires all subsystems but does not start the tick loop or TCP listeners.
//
// p, errorValue := proxy.New(cfg)
func New(cfg *Config) (*Proxy, error) {
if errorValue := cfg.Validate(); errorValue != nil {
return nil, errorValue
}
events := NewEventBus()
stats := NewStats()
workers := NewWorkers(cfg.Workers, events)
proxyValue := &Proxy{
config: cfg,
splitter: noopSplitter{},
stats: stats,
workers: workers,
events: events,
done: make(chan struct{}),
}
events.Subscribe(EventAccept, stats.OnAccept)
events.Subscribe(EventReject, stats.OnReject)
events.Subscribe(EventLogin, func(event Event) {
stats.connections.Add(1)
current := proxyMinerCount.Add(1)
for {
maximum := stats.maxMiners.Load()
if current <= maximum || stats.maxMiners.CompareAndSwap(maximum, current) {
break
}
}
})
events.Subscribe(EventClose, func(event Event) {
if proxyMinerCount.Load() > 0 {
proxyMinerCount.Add(^uint64(0))
}
})
events.Subscribe(EventLogin, NewCustomDiff(cfg.CustomDiff).OnLogin)
return proxyValue, nil
}
// Start begins the TCP listener(s), pool connections, and tick loop.
//
// p.Start()
func (p *Proxy) Start() {
if p.splitter != nil {
p.splitter.Connect()
}
p.ticker = time.NewTicker(time.Second)
for _, bind := range p.config.Bind {
server, errorValue := NewServer(bind, nil, NewRateLimiter(p.config.RateLimit), p.acceptConn)
if errorValue != nil {
continue
}
p.servers = append(p.servers, server)
server.Start()
}
go func() {
var ticks uint64
for {
select {
case <-p.ticker.C:
ticks++
p.stats.Tick()
p.workers.Tick()
if p.splitter != nil {
p.splitter.Tick(ticks)
}
case <-p.done:
return
}
}
}()
}
type noopSplitter struct{}
func (noopSplitter) Connect() {}
func (noopSplitter) OnLogin(event *LoginEvent) {}
func (noopSplitter) OnSubmit(event *SubmitEvent) {}
func (noopSplitter) OnClose(event *CloseEvent) {}
func (noopSplitter) Tick(ticks uint64) {}
func (noopSplitter) GC() {}
func (noopSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
// Stop shuts down all subsystems cleanly.
//
// p.Stop()
func (p *Proxy) Stop() {
if p.ticker != nil {
p.ticker.Stop()
}
for _, server := range p.servers {
server.Stop()
}
if p.watcher != nil {
p.watcher.Stop()
}
select {
case <-p.done:
default:
close(p.done)
}
}
// Reload replaces the live config.
//
// p.Reload(newCfg)
func (p *Proxy) Reload(cfg *Config) {
if cfg != nil {
p.config = cfg
}
}
func (p *Proxy) Summary() StatsSummary {
return p.stats.Summary()
}
func (p *Proxy) Workers() []WorkerRecord {
return p.workers.List()
}
func (p *Proxy) CurrentMiners() uint64 {
return proxyMinerCount.Load()
}
func (p *Proxy) MaxMiners() uint64 {
return p.stats.maxMiners.Load()
}
func (p *Proxy) Mode() string {
if p == nil || p.config == nil {
return ""
}
return p.config.Mode
}
func (p *Proxy) WorkersMode() string {
if p == nil || p.config == nil {
return ""
}
return string(p.config.Workers)
}
func (p *Proxy) Upstreams() UpstreamStats {
if p == nil || p.splitter == nil {
return UpstreamStats{}
}
return p.splitter.Upstreams()
}
func (p *Proxy) acceptConn(conn net.Conn, localPort uint16) {
miner := NewMiner(conn, localPort, nil)
p.events.Dispatch(Event{Type: EventLogin, Miner: miner})
}