AX Principle 2 — comments must show HOW with real values, not restate what the type signature already says. The Container struct comment and Initialize method comment both described what the code does in prose; replaced with concrete call-site examples. Co-Authored-By: Charon <charon@lethean.io>
264 lines
6.3 KiB
Go
264 lines
6.3 KiB
Go
package mining
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"sync"
|
|
|
|
"forge.lthn.ai/Snider/Mining/pkg/database"
|
|
"forge.lthn.ai/Snider/Mining/pkg/logging"
|
|
)
|
|
|
|
// ContainerConfig holds configuration for the service container.
|
|
type ContainerConfig struct {
|
|
// Database configuration
|
|
Database database.Config
|
|
|
|
// ListenAddr is the address to listen on (e.g., ":9090")
|
|
ListenAddr string
|
|
|
|
// DisplayAddr is the address shown in Swagger docs
|
|
DisplayAddr string
|
|
|
|
// SwaggerNamespace is the API path prefix
|
|
SwaggerNamespace string
|
|
|
|
// SimulationMode enables simulation mode for testing
|
|
SimulationMode bool
|
|
}
|
|
|
|
// DefaultContainerConfig returns sensible defaults for the container.
|
|
func DefaultContainerConfig() ContainerConfig {
|
|
return ContainerConfig{
|
|
Database: database.Config{
|
|
Enabled: true,
|
|
RetentionDays: 30,
|
|
},
|
|
ListenAddr: ":9090",
|
|
DisplayAddr: "localhost:9090",
|
|
SwaggerNamespace: "/api/v1/mining",
|
|
SimulationMode: false,
|
|
}
|
|
}
|
|
|
|
// c := NewContainer(DefaultContainerConfig())
|
|
// c.Initialize(ctx); c.Start(ctx); defer c.Shutdown(ctx)
|
|
type Container struct {
|
|
config ContainerConfig
|
|
mu sync.RWMutex
|
|
|
|
// Core services
|
|
manager ManagerInterface
|
|
profileManager *ProfileManager
|
|
nodeService *NodeService
|
|
eventHub *EventHub
|
|
service *Service
|
|
|
|
// Database store (interface for testing)
|
|
hashrateStore database.HashrateStore
|
|
|
|
// Initialization state
|
|
initialized bool
|
|
transportStarted bool
|
|
shutdownCh chan struct{}
|
|
}
|
|
|
|
// NewContainer creates a new service container with the given configuration.
|
|
func NewContainer(config ContainerConfig) *Container {
|
|
return &Container{
|
|
config: config,
|
|
shutdownCh: make(chan struct{}),
|
|
}
|
|
}
|
|
|
|
// if err := container.Initialize(ctx); err != nil { return err }
|
|
// container.Start(ctx)
|
|
func (c *Container) Initialize(ctx context.Context) error {
|
|
c.mu.Lock()
|
|
defer c.mu.Unlock()
|
|
|
|
if c.initialized {
|
|
return fmt.Errorf("container already initialized")
|
|
}
|
|
|
|
// 1. Initialize database (optional)
|
|
if c.config.Database.Enabled {
|
|
if err := database.Initialize(c.config.Database); err != nil {
|
|
return fmt.Errorf("failed to initialize database: %w", err)
|
|
}
|
|
c.hashrateStore = database.DefaultStore()
|
|
logging.Info("database initialized", logging.Fields{"retention_days": c.config.Database.RetentionDays})
|
|
} else {
|
|
c.hashrateStore = database.NopStore()
|
|
logging.Info("database disabled, using no-op store", nil)
|
|
}
|
|
|
|
// 2. Initialize profile manager
|
|
var err error
|
|
c.profileManager, err = NewProfileManager()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to initialize profile manager: %w", err)
|
|
}
|
|
|
|
// 3. Initialize miner manager
|
|
if c.config.SimulationMode {
|
|
c.manager = NewManagerForSimulation()
|
|
} else {
|
|
c.manager = NewManager()
|
|
}
|
|
|
|
// 4. Initialize node service (optional - P2P features)
|
|
c.nodeService, err = NewNodeService()
|
|
if err != nil {
|
|
logging.Warn("node service unavailable", logging.Fields{"error": err})
|
|
// Continue without node service - P2P features will be unavailable
|
|
}
|
|
|
|
// 5. Initialize event hub for WebSocket
|
|
c.eventHub = NewEventHub()
|
|
|
|
// Wire up event hub to manager
|
|
if concreteManager, ok := c.manager.(*Manager); ok {
|
|
concreteManager.SetEventHub(c.eventHub)
|
|
}
|
|
|
|
c.initialized = true
|
|
logging.Info("service container initialized", nil)
|
|
return nil
|
|
}
|
|
|
|
// Start begins all background services.
|
|
func (c *Container) Start(ctx context.Context) error {
|
|
c.mu.RLock()
|
|
defer c.mu.RUnlock()
|
|
|
|
if !c.initialized {
|
|
return fmt.Errorf("container not initialized")
|
|
}
|
|
|
|
// Start event hub
|
|
go c.eventHub.Run()
|
|
|
|
// Start node transport if available
|
|
if c.nodeService != nil {
|
|
if err := c.nodeService.StartTransport(); err != nil {
|
|
logging.Warn("failed to start node transport", logging.Fields{"error": err})
|
|
} else {
|
|
c.transportStarted = true
|
|
}
|
|
}
|
|
|
|
logging.Info("service container started", nil)
|
|
return nil
|
|
}
|
|
|
|
// Shutdown gracefully stops all services in reverse order.
|
|
func (c *Container) Shutdown(ctx context.Context) error {
|
|
c.mu.Lock()
|
|
defer c.mu.Unlock()
|
|
|
|
if !c.initialized {
|
|
return nil
|
|
}
|
|
|
|
logging.Info("shutting down service container", nil)
|
|
|
|
var errs []error
|
|
|
|
// 1. Stop service (HTTP server)
|
|
if c.service != nil {
|
|
// Service shutdown is handled externally
|
|
}
|
|
|
|
// 2. Stop node transport (only if it was started)
|
|
if c.nodeService != nil && c.transportStarted {
|
|
if err := c.nodeService.StopTransport(); err != nil {
|
|
errs = append(errs, fmt.Errorf("node transport: %w", err))
|
|
}
|
|
c.transportStarted = false
|
|
}
|
|
|
|
// 3. Stop event hub
|
|
if c.eventHub != nil {
|
|
c.eventHub.Stop()
|
|
}
|
|
|
|
// 4. Stop miner manager
|
|
if concreteManager, ok := c.manager.(*Manager); ok {
|
|
concreteManager.Stop()
|
|
}
|
|
|
|
// 5. Close database
|
|
if err := database.Close(); err != nil {
|
|
errs = append(errs, fmt.Errorf("database: %w", err))
|
|
}
|
|
|
|
c.initialized = false
|
|
close(c.shutdownCh)
|
|
|
|
if len(errs) > 0 {
|
|
return fmt.Errorf("shutdown errors: %v", errs)
|
|
}
|
|
|
|
logging.Info("service container shutdown complete", nil)
|
|
return nil
|
|
}
|
|
|
|
// miner := container.Manager()
|
|
// miner.StartMiner(ctx, "xmrig", config)
|
|
func (c *Container) Manager() ManagerInterface {
|
|
c.mu.RLock()
|
|
defer c.mu.RUnlock()
|
|
return c.manager
|
|
}
|
|
|
|
// pm := container.ProfileManager()
|
|
// pm.SaveProfile("eth-main", config)
|
|
func (c *Container) ProfileManager() *ProfileManager {
|
|
c.mu.RLock()
|
|
defer c.mu.RUnlock()
|
|
return c.profileManager
|
|
}
|
|
|
|
// ns := container.NodeService() // nil if P2P is unavailable
|
|
// ns.GetPeers()
|
|
func (c *Container) NodeService() *NodeService {
|
|
c.mu.RLock()
|
|
defer c.mu.RUnlock()
|
|
return c.nodeService
|
|
}
|
|
|
|
// hub := container.EventHub()
|
|
// hub.Broadcast(event)
|
|
func (c *Container) EventHub() *EventHub {
|
|
c.mu.RLock()
|
|
defer c.mu.RUnlock()
|
|
return c.eventHub
|
|
}
|
|
|
|
// store := container.HashrateStore()
|
|
// store.RecordHashrate("xmrig", 1234.5)
|
|
func (c *Container) HashrateStore() database.HashrateStore {
|
|
c.mu.RLock()
|
|
defer c.mu.RUnlock()
|
|
return c.hashrateStore
|
|
}
|
|
|
|
// container.SetHashrateStore(database.NopStore()) // inject no-op store in tests
|
|
func (c *Container) SetHashrateStore(store database.HashrateStore) {
|
|
c.mu.Lock()
|
|
defer c.mu.Unlock()
|
|
c.hashrateStore = store
|
|
}
|
|
|
|
// <-container.ShutdownCh() // blocks until shutdown is complete
|
|
func (c *Container) ShutdownCh() <-chan struct{} {
|
|
return c.shutdownCh
|
|
}
|
|
|
|
// if container.IsInitialized() { container.Start(ctx) }
|
|
func (c *Container) IsInitialized() bool {
|
|
c.mu.RLock()
|
|
defer c.mu.RUnlock()
|
|
return c.initialized
|
|
}
|