Mining/pkg/mining/container.go
Claude d8dffa398b
Some checks are pending
Security Scan / security (push) Waiting to run
Test / test (push) Waiting to run
ax(mining): replace prose field comments with usage examples in ContainerConfig
ContainerConfig had five "X is the Y" prose field comments that restated
the type signature (AX §2 violation). Container had three section-divider
labels ("Core services", "Database store", "Initialization state") that
describe what is self-evident from the field names. Both patterns are
explicitly banned — delete prose, show usage.

Co-Authored-By: Charon <charon@lethean.io>
2026-04-02 16:47:16 +01:00

255 lines
6.6 KiB
Go

package mining
import (
"context"
"sync"
"forge.lthn.ai/Snider/Mining/pkg/database"
"forge.lthn.ai/Snider/Mining/pkg/logging"
)
// containerConfig := mining.DefaultContainerConfig()
// containerConfig.ListenAddr = ":8080"
// containerConfig.SimulationMode = true
// ContainerConfig{ListenAddr: ":9090", DisplayAddr: "localhost:9090", SwaggerNamespace: "/api/v1/mining"}
// ContainerConfig{SimulationMode: true, Database: database.Config{Enabled: false}}
type ContainerConfig struct {
Database database.Config
ListenAddr string
DisplayAddr string
SwaggerNamespace string
SimulationMode bool
}
// containerConfig := mining.DefaultContainerConfig()
// containerConfig.ListenAddr = ":8080"
// container := NewContainer(containerConfig)
func DefaultContainerConfig() ContainerConfig {
return ContainerConfig{
Database: database.Config{
Enabled: true,
RetentionDays: 30,
},
ListenAddr: ":9090",
DisplayAddr: "localhost:9090",
SwaggerNamespace: "/api/v1/mining",
SimulationMode: false,
}
}
// container := NewContainer(DefaultContainerConfig())
// container.Initialize(ctx); container.Start(ctx); defer container.Shutdown(ctx)
type Container struct {
config ContainerConfig
mutex sync.RWMutex
manager ManagerInterface
profileManager *ProfileManager
nodeService *NodeService
eventHub *EventHub
service *Service
hashrateStore database.HashrateStore
initialized bool
transportStarted bool
shutdownCh chan struct{}
}
// container := NewContainer(DefaultContainerConfig())
// container.Initialize(ctx)
func NewContainer(config ContainerConfig) *Container {
return &Container{
config: config,
shutdownCh: make(chan struct{}),
}
}
// if err := container.Initialize(ctx); err != nil { return err }
// container.Start(ctx)
func (c *Container) Initialize(ctx context.Context) error {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.initialized {
return ErrInternal("container already initialized")
}
// 1. Initialize database (optional)
if c.config.Database.Enabled {
if err := database.Initialize(c.config.Database); err != nil {
return ErrInternal("failed to initialize database").WithCause(err)
}
c.hashrateStore = database.DefaultStore()
logging.Info("database initialized", logging.Fields{"retention_days": c.config.Database.RetentionDays})
} else {
c.hashrateStore = database.NopStore()
logging.Info("database disabled, using no-op store", nil)
}
// 2. Initialize profile manager
var err error
c.profileManager, err = NewProfileManager()
if err != nil {
return ErrInternal("failed to initialize profile manager").WithCause(err)
}
// 3. Initialize miner manager
if c.config.SimulationMode {
c.manager = NewManagerForSimulation()
} else {
c.manager = NewManager()
}
// 4. Initialize node service (optional - P2P features)
c.nodeService, err = NewNodeService()
if err != nil {
logging.Warn("node service unavailable", logging.Fields{"error": err})
// Continue without node service - P2P features will be unavailable
}
// 5. Initialize event hub for WebSocket
c.eventHub = NewEventHub()
// Wire up event hub to manager
if concreteManager, ok := c.manager.(*Manager); ok {
concreteManager.SetEventHub(c.eventHub)
}
c.initialized = true
logging.Info("service container initialized", nil)
return nil
}
// if err := container.Start(ctx); err != nil { return err }
func (c *Container) Start(ctx context.Context) error {
c.mutex.RLock()
defer c.mutex.RUnlock()
if !c.initialized {
return ErrInternal("container not initialized")
}
// Start event hub
go c.eventHub.Run()
// Start node transport if available
if c.nodeService != nil {
if err := c.nodeService.StartTransport(); err != nil {
logging.Warn("failed to start node transport", logging.Fields{"error": err})
} else {
c.transportStarted = true
}
}
logging.Info("service container started", nil)
return nil
}
// defer container.Shutdown(ctx) // safe to call multiple times
func (c *Container) Shutdown(ctx context.Context) error {
c.mutex.Lock()
defer c.mutex.Unlock()
if !c.initialized {
return nil
}
logging.Info("shutting down service container", nil)
var errs []error
// 1. Stop service (HTTP server)
if c.service != nil {
// Service shutdown is handled externally
}
// 2. Stop node transport (only if it was started)
if c.nodeService != nil && c.transportStarted {
if err := c.nodeService.StopTransport(); err != nil {
errs = append(errs, ErrInternal("node transport shutdown failed").WithCause(err))
}
c.transportStarted = false
}
// 3. Stop event hub
if c.eventHub != nil {
c.eventHub.Stop()
}
// 4. Stop miner manager
if concreteManager, ok := c.manager.(*Manager); ok {
concreteManager.Stop()
}
// 5. Close database
if err := database.Close(); err != nil {
errs = append(errs, ErrInternal("database shutdown failed").WithCause(err))
}
c.initialized = false
close(c.shutdownCh)
if len(errs) > 0 {
return ErrInternal("shutdown completed with errors").WithCause(errs[0])
}
logging.Info("service container shutdown complete", nil)
return nil
}
// miner := container.Manager()
// miner.StartMiner(ctx, "xmrig", config)
func (c *Container) Manager() ManagerInterface {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.manager
}
// profileManager := container.ProfileManager()
// profileManager.SaveProfile("eth-main", config)
func (c *Container) ProfileManager() *ProfileManager {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.profileManager
}
// nodeService := container.NodeService() // nil if P2P is unavailable
// nodeService.GetPeers()
func (c *Container) NodeService() *NodeService {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.nodeService
}
// eventHub := container.EventHub()
// eventHub.Broadcast(event)
func (c *Container) EventHub() *EventHub {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.eventHub
}
// store := container.HashrateStore()
// store.RecordHashrate("xmrig", 1234.5)
func (c *Container) HashrateStore() database.HashrateStore {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.hashrateStore
}
// container.SetHashrateStore(database.NopStore()) // inject no-op store in tests
func (c *Container) SetHashrateStore(store database.HashrateStore) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.hashrateStore = store
}
// <-container.ShutdownCh() // blocks until shutdown is complete
func (c *Container) ShutdownCh() <-chan struct{} {
return c.shutdownCh
}
// if container.IsInitialized() { container.Start(ctx) }
func (c *Container) IsInitialized() bool {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.initialized
}