go-proxy/proxy_runtime.go
Virgil 465ea38308 feat(proxy): honour TLS config and pool keepalive
Co-Authored-By: Virgil <virgil@lethean.io>
2026-04-04 11:16:29 +00:00

279 lines
6 KiB
Go

package proxy
import (
"crypto/tls"
"net"
"sync/atomic"
"time"
)
var proxyMinerCount atomic.Uint64
// New creates and wires all subsystems but does not start the tick loop or TCP listeners.
//
// p, errorValue := proxy.New(cfg)
func New(cfg *Config) (*Proxy, error) {
if errorValue := cfg.Validate(); errorValue != nil {
return nil, errorValue
}
events := NewEventBus()
stats := NewStats()
customDiff := NewCustomDiff(cfg.CustomDiff)
events.Subscribe(EventLogin, customDiff.OnLogin)
workers := NewWorkers(cfg.Workers, events)
splitter := newSplitter(cfg, events)
proxyValue := &Proxy{
config: cfg,
customDiff: customDiff,
splitter: splitter,
stats: stats,
workers: workers,
events: events,
miners: make(map[int64]*Miner),
rateLimiter: NewRateLimiter(cfg.RateLimit),
done: make(chan struct{}),
}
subscribeAccessLog(events, cfg.AccessLogFile)
subscribeShareLog(events, cfg.ShareLogFile)
events.Subscribe(EventLogin, func(event Event) {
if event.Miner != nil {
proxyValue.minerMu.Lock()
proxyValue.miners[event.Miner.ID()] = event.Miner
proxyValue.minerMu.Unlock()
}
stats.connections.Add(1)
current := proxyMinerCount.Add(1)
for {
maximum := stats.maxMiners.Load()
if current <= maximum || stats.maxMiners.CompareAndSwap(maximum, current) {
break
}
}
})
events.Subscribe(EventClose, func(event Event) {
if event.Miner != nil {
proxyValue.minerMu.Lock()
delete(proxyValue.miners, event.Miner.ID())
proxyValue.minerMu.Unlock()
}
if proxyMinerCount.Load() > 0 {
proxyMinerCount.Add(^uint64(0))
}
})
events.Subscribe(EventAccept, stats.OnAccept)
events.Subscribe(EventReject, stats.OnReject)
if splitter != nil {
events.Subscribe(EventLogin, func(event Event) {
splitter.OnLogin(&LoginEvent{Miner: event.Miner})
})
events.Subscribe(EventClose, func(event Event) {
splitter.OnClose(&CloseEvent{Miner: event.Miner})
})
}
if cfg.Watch && cfg.sourcePath != "" {
proxyValue.watcher = NewConfigWatcher(cfg.sourcePath, proxyValue.Reload)
proxyValue.watcher.Start()
}
return proxyValue, nil
}
// Start begins the TCP listener(s), pool connections, and tick loop.
//
// p.Start()
func (p *Proxy) Start() {
if p.splitter != nil {
p.splitter.Connect()
}
p.ticker = time.NewTicker(time.Second)
for _, bind := range p.config.Bind {
var tlsConfig *tls.Config
if bind.TLS && p.config.TLS.Enabled {
certificate, errorValue := tls.LoadX509KeyPair(p.config.TLS.CertFile, p.config.TLS.KeyFile)
if errorValue == nil {
tlsConfig = buildTLSConfig(p.config.TLS)
tlsConfig.Certificates = []tls.Certificate{certificate}
} else {
p.Stop()
return
}
}
server, errorValue := NewServer(bind, tlsConfig, p.rateLimiter, p.acceptConn)
if errorValue != nil {
p.Stop()
return
}
p.servers = append(p.servers, server)
server.Start()
}
if p.config != nil && p.config.HTTP.Enabled {
startHTTPServer(p)
}
go func() {
var ticks uint64
for {
select {
case <-p.ticker.C:
ticks++
p.stats.Tick()
p.workers.Tick()
if p.rateLimiter != nil {
p.rateLimiter.Tick()
}
if p.splitter != nil {
p.splitter.Tick(ticks)
}
case <-p.done:
return
}
}
}()
<-p.done
}
type noopSplitter struct{}
func (noopSplitter) Connect() {}
func (noopSplitter) OnLogin(event *LoginEvent) {}
func (noopSplitter) OnSubmit(event *SubmitEvent) {}
func (noopSplitter) OnClose(event *CloseEvent) {}
func (noopSplitter) Tick(ticks uint64) {}
func (noopSplitter) GC() {}
func (noopSplitter) Upstreams() UpstreamStats { return UpstreamStats{} }
func noopSplitterFactory(cfg *Config, events *EventBus) Splitter {
return noopSplitter{}
}
// Stop shuts down all subsystems cleanly.
//
// p.Stop()
func (p *Proxy) Stop() {
if p.ticker != nil {
p.ticker.Stop()
}
for _, server := range p.servers {
server.Stop()
}
stopHTTPServer(p)
if p.watcher != nil {
p.watcher.Stop()
}
select {
case <-p.done:
default:
close(p.done)
}
}
// Reload replaces the live config.
//
// p.Reload(newCfg)
func (p *Proxy) Reload(cfg *Config) {
if cfg != nil {
if p.config == nil {
p.config = cfg
} else {
sourcePath := p.config.sourcePath
*p.config = *cfg
p.config.sourcePath = sourcePath
}
if p.customDiff != nil {
p.customDiff.SetGlobalDiff(p.config.CustomDiff)
}
if p.rateLimiter != nil {
p.rateLimiter.SetConfig(p.config.RateLimit)
}
}
}
func (p *Proxy) Summary() StatsSummary {
if p == nil || p.stats == nil {
return StatsSummary{}
}
return p.stats.Summary()
}
func (p *Proxy) Workers() []WorkerRecord {
if p == nil || p.workers == nil {
return nil
}
return p.workers.List()
}
func (p *Proxy) Miners() []*Miner {
if p == nil {
return nil
}
p.minerMu.RLock()
defer p.minerMu.RUnlock()
miners := make([]*Miner, 0, len(p.miners))
for _, miner := range p.miners {
miners = append(miners, miner)
}
return miners
}
func (p *Proxy) CurrentMiners() uint64 {
return proxyMinerCount.Load()
}
func (p *Proxy) MaxMiners() uint64 {
if p == nil || p.stats == nil {
return 0
}
return p.stats.maxMiners.Load()
}
func (p *Proxy) Mode() string {
if p == nil || p.config == nil {
return ""
}
return p.config.Mode
}
func (p *Proxy) HTTPConfig() HTTPConfig {
if p == nil || p.config == nil {
return HTTPConfig{}
}
return p.config.HTTP
}
func (p *Proxy) WorkersMode() string {
if p == nil || p.config == nil {
return ""
}
return string(p.config.Workers)
}
func (p *Proxy) Upstreams() UpstreamStats {
if p == nil || p.splitter == nil {
return UpstreamStats{}
}
return p.splitter.Upstreams()
}
func (p *Proxy) acceptConn(conn net.Conn, localPort uint16) {
var tlsCfg *tls.Config
if _, ok := conn.(*tls.Conn); ok {
tlsCfg = &tls.Config{}
}
miner := NewMiner(conn, localPort, tlsCfg)
miner.events = p.events
miner.splitter = p.splitter
if p.config != nil {
miner.accessPassword = p.config.AccessPassword
miner.algoExtension = p.config.AlgoExtension
}
miner.Start()
}