Mining/pkg/mining/container.go
Virgil 262987c10a
Some checks are pending
Security Scan / security (push) Waiting to run
Test / test (push) Waiting to run
AX cleanup for mining service names
2026-04-04 05:53:48 +00:00

254 lines
8 KiB
Go

package mining
import (
"context"
"sync"
"forge.lthn.ai/Snider/Mining/pkg/database"
"forge.lthn.ai/Snider/Mining/pkg/logging"
)
// containerConfig := mining.DefaultContainerConfig()
// containerConfig.ListenAddr = ":8080"
// containerConfig.SimulationMode = true
// ContainerConfig{ListenAddr: ":9090", DisplayAddr: "localhost:9090", SwaggerNamespace: "/api/v1/mining"}
// ContainerConfig{SimulationMode: true, Database: database.Config{Enabled: false}}
type ContainerConfig struct {
Database database.Config
ListenAddr string
DisplayAddr string
SwaggerNamespace string
SimulationMode bool
}
// containerConfig := mining.DefaultContainerConfig()
// containerConfig.ListenAddr = ":8080"
// container := NewContainer(containerConfig)
func DefaultContainerConfig() ContainerConfig {
return ContainerConfig{
Database: database.Config{
Enabled: true,
RetentionDays: 30,
},
ListenAddr: ":9090",
DisplayAddr: "localhost:9090",
SwaggerNamespace: "/api/v1/mining",
SimulationMode: false,
}
}
// container := NewContainer(DefaultContainerConfig())
// container.Initialize(ctx); container.Start(ctx); defer container.Shutdown(ctx)
type Container struct {
config ContainerConfig
mutex sync.RWMutex
manager ManagerInterface
profileManager *ProfileManager
nodeService *NodeService
eventHub *EventHub
service *Service
hashrateStore database.HashrateStore
initialized bool
transportStarted bool
shutdownCh chan struct{}
}
// container := NewContainer(DefaultContainerConfig())
// container.Initialize(ctx)
func NewContainer(config ContainerConfig) *Container {
return &Container{
config: config,
shutdownCh: make(chan struct{}),
}
}
// if err := container.Initialize(ctx); err != nil { return err }
// container.Start(ctx)
func (container *Container) Initialize(ctx context.Context) error {
container.mutex.Lock()
defer container.mutex.Unlock()
if container.initialized {
return ErrInternal("container already initialized")
}
// database.Initialize(container.config.Database) enables HTTP handlers like GET /api/v1/mining/status to persist hashrate data.
if container.config.Database.Enabled {
if err := database.Initialize(container.config.Database); err != nil {
return ErrInternal("failed to initialize database").WithCause(err)
}
container.hashrateStore = database.DefaultStore()
logging.Info("database initialized", logging.Fields{"retention_days": container.config.Database.RetentionDays})
} else {
container.hashrateStore = database.NopStore()
logging.Info("database disabled, using no-op store", nil)
}
var err error
// profileManager, err := NewProfileManager() keeps POST /api/v1/mining/profiles working even without XDG storage.
container.profileManager, err = NewProfileManager()
if err != nil {
return ErrInternal("failed to initialize profile manager").WithCause(err)
}
// NewManagerForSimulation() keeps `mining serve` and `mining remote status` pointed at simulated miners during local development.
if container.config.SimulationMode {
container.manager = NewManagerForSimulation()
} else {
container.manager = NewManager()
}
// nodeService, err := NewNodeService() enables remote peer commands such as `mining remote status peer-19f3`.
container.nodeService, err = NewNodeService()
if err != nil {
logging.Warn("node service unavailable", logging.Fields{"error": err})
}
// NewEventHub() powers GET /ws/events for browsers that watch miner start and stop events.
container.eventHub = NewEventHub()
// concreteManager.SetEventHub(container.eventHub) lets GET /ws/events stream miner lifecycle updates.
if concreteManager, ok := container.manager.(*Manager); ok {
concreteManager.SetEventHub(container.eventHub)
}
container.initialized = true
logging.Info("service container initialized", nil)
return nil
}
// if err := container.Start(ctx); err != nil { return err }
func (container *Container) Start(ctx context.Context) error {
container.mutex.RLock()
defer container.mutex.RUnlock()
if !container.initialized {
return ErrInternal("container not initialized")
}
// container.eventHub.Run() keeps `/ws/events` clients connected while the API is serving requests.
go container.eventHub.Run()
// container.nodeService.StartTransport() enables `mining remote connect peer-19f3` when peer transport is configured.
if container.nodeService != nil {
if err := container.nodeService.StartTransport(); err != nil {
logging.Warn("failed to start node transport", logging.Fields{"error": err})
} else {
container.transportStarted = true
}
}
logging.Info("service container started", nil)
return nil
}
// defer container.Shutdown(ctx) // safe to call multiple times
func (container *Container) Shutdown(ctx context.Context) error {
container.mutex.Lock()
defer container.mutex.Unlock()
if !container.initialized {
return nil
}
logging.Info("shutting down service container", nil)
var errs []error
// container.service is stopped by the caller so `mining serve` can close the HTTP server and shell together.
if container.service != nil {
// Service shutdown is handled externally
}
// container.nodeService.StopTransport() tears down peer connectivity after `mining remote connect` sessions finish.
if container.nodeService != nil && container.transportStarted {
if err := container.nodeService.StopTransport(); err != nil {
errs = append(errs, ErrInternal("node transport shutdown failed").WithCause(err))
}
container.transportStarted = false
}
// container.eventHub.Stop() closes `/ws/events` listeners before process exit.
if container.eventHub != nil {
container.eventHub.Stop()
}
// concreteManager.Stop() stops miners started through `mining start xmrig`.
if concreteManager, ok := container.manager.(*Manager); ok {
concreteManager.Stop()
}
// database.Close() flushes the hashrate store used by GET /api/v1/mining/miners/xmrig/history.
if err := database.Close(); err != nil {
errs = append(errs, ErrInternal("database shutdown failed").WithCause(err))
}
container.initialized = false
close(container.shutdownCh)
if len(errs) > 0 {
return ErrInternal("shutdown completed with errors").WithCause(errs[0])
}
logging.Info("service container shutdown complete", nil)
return nil
}
// miner := container.Manager()
// miner.StartMiner(ctx, "xmrig", config)
func (container *Container) Manager() ManagerInterface {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.manager
}
// profileManager := container.ProfileManager()
// profileManager.SaveProfile("eth-main", config)
func (container *Container) ProfileManager() *ProfileManager {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.profileManager
}
// nodeService := container.NodeService() // nil when `mining remote status` should stay local-only.
// nodeService.GetPeers()
func (container *Container) NodeService() *NodeService {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.nodeService
}
// eventHub := container.EventHub()
// eventHub.Broadcast(event)
func (container *Container) EventHub() *EventHub {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.eventHub
}
// store := container.HashrateStore()
// store.RecordHashrate("xmrig", 1234.5)
func (container *Container) HashrateStore() database.HashrateStore {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.hashrateStore
}
// container.SetHashrateStore(database.NopStore()) // injects a no-op store in tests for GET /api/v1/mining/status.
func (container *Container) SetHashrateStore(store database.HashrateStore) {
container.mutex.Lock()
defer container.mutex.Unlock()
container.hashrateStore = store
}
// <-container.ShutdownCh() // blocks until `mining serve` finishes shutting down.
func (container *Container) ShutdownCh() <-chan struct{} {
return container.shutdownCh
}
// if container.IsInitialized() { container.Start(ctx) }
func (container *Container) IsInitialized() bool {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.initialized
}