Mining/pkg/mining/manager.go
Virgil 9ed6c33c42
Some checks are pending
Security Scan / security (push) Waiting to run
Test / test (push) Waiting to run
ax(mining): standardize command and repository names
Co-authored-by: Virgil <virgil@lethean.io>
2026-04-04 07:08:47 +00:00

809 lines
25 KiB
Go

package mining
import (
"bytes"
"context"
"net"
"regexp"
"strconv"
"sync"
"time"
"forge.lthn.ai/Snider/Mining/pkg/database"
"forge.lthn.ai/Snider/Mining/pkg/logging"
)
// equalFold("xmrig", "XMRig") returns true.
// equalFold("tt-miner", "TT-Miner") returns true.
func equalFold(left, right string) bool {
return bytes.EqualFold([]byte(left), []byte(right))
}
// hasPrefix("xmrig-rx0", "xmrig") returns true.
// hasPrefix("ttminer-rtx", "xmrig") returns false.
func hasPrefix(input, prefix string) bool {
return len(input) >= len(prefix) && input[:len(prefix)] == prefix
}
// containsStr("peer not found", "not found") returns true.
// containsStr("connection ok", "not found") returns false.
func containsStr(haystack, needle string) bool {
if len(needle) == 0 {
return true
}
if len(haystack) < len(needle) {
return false
}
for i := 0; i <= len(haystack)-len(needle); i++ {
if haystack[i:i+len(needle)] == needle {
return true
}
}
return false
}
// safeName := instanceNameRegex.ReplaceAllString("my algo!", "_") // `my_algo_`
var instanceNameRegex = regexp.MustCompile(`[^a-zA-Z0-9_/-]`)
// var managerInterface ManagerInterface = mining.NewManager()
// miner, err := managerInterface.StartMiner(ctx, "xmrig", &mining.Config{Algo: "rx/0"})
// defer managerInterface.Stop()
type ManagerInterface interface {
StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error)
StopMiner(ctx context.Context, name string) error
GetMiner(name string) (Miner, error)
ListMiners() []Miner
ListAvailableMiners() []AvailableMiner
GetMinerHashrateHistory(name string) ([]HashratePoint, error)
UninstallMiner(ctx context.Context, minerType string) error
Stop()
}
// manager := mining.NewManager()
// defer manager.Stop()
// miner, err := manager.StartMiner(ctx, "xmrig", &mining.Config{Algo: "rx/0"})
type Manager struct {
miners map[string]Miner
mutex sync.RWMutex
stopChan chan struct{}
stopOnce sync.Once
waitGroup sync.WaitGroup
databaseEnabled bool
databaseRetention int
eventHub *EventHub
eventHubMutex sync.RWMutex
}
// manager.SetEventHub(eventHub)
func (manager *Manager) SetEventHub(eventHub *EventHub) {
manager.eventHubMutex.Lock()
defer manager.eventHubMutex.Unlock()
manager.eventHub = eventHub
}
// manager.emitEvent(EventMinerStarted, MinerEventData{Name: instanceName})
// manager.emitEvent(EventMinerError, MinerEventData{Name: instanceName, Error: err.Error()})
func (manager *Manager) emitEvent(eventType EventType, data interface{}) {
manager.eventHubMutex.RLock()
eventHub := manager.eventHub
manager.eventHubMutex.RUnlock()
if eventHub != nil {
eventHub.Broadcast(NewEvent(eventType, data))
}
}
var _ ManagerInterface = (*Manager)(nil)
// manager := mining.NewManager()
// defer manager.Stop() // stops miner goroutines and the hourly database cleanup loop
func NewManager() *Manager {
manager := &Manager{
miners: make(map[string]Miner),
stopChan: make(chan struct{}),
waitGroup: sync.WaitGroup{},
}
manager.syncMinersConfig()
manager.initDatabase()
manager.autostartMiners()
manager.startStatsCollection()
return manager
}
// manager := mining.NewManagerForSimulation()
// manager.StartMiner(ctx, "xmrig", &Config{Algo: "rx/0"})
func NewManagerForSimulation() *Manager {
manager := &Manager{
miners: make(map[string]Miner),
stopChan: make(chan struct{}),
waitGroup: sync.WaitGroup{},
}
manager.startStatsCollection()
return manager
}
// manager.initDatabase() reads `~/.config/lethean-desktop/miners.json` and enables database persistence when `Database.Enabled` is true.
func (manager *Manager) initDatabase() {
minersConfig, err := LoadMinersConfig()
if err != nil {
logging.Warn("could not load config for database init", logging.Fields{"error": err})
return
}
manager.databaseEnabled = minersConfig.Database.Enabled
manager.databaseRetention = minersConfig.Database.RetentionDays
if manager.databaseRetention == 0 {
manager.databaseRetention = 30
}
if !manager.databaseEnabled {
logging.Debug("database persistence is disabled")
return
}
databaseConfig := database.Config{
Enabled: true,
RetentionDays: manager.databaseRetention,
}
if err := database.Initialize(databaseConfig); err != nil {
logging.Warn("failed to initialize database", logging.Fields{"error": err})
manager.databaseEnabled = false
return
}
logging.Info("database persistence enabled", logging.Fields{"retention_days": manager.databaseRetention})
// manager.startDBCleanup() calls database.Cleanup(30) at startup and every hour after NewManager enables persistence.
manager.startDBCleanup()
}
// manager.startDBCleanup() calls database.Cleanup(manager.databaseRetention) at startup and every hour.
func (manager *Manager) startDBCleanup() {
manager.waitGroup.Add(1)
go func() {
defer manager.waitGroup.Done()
defer func() {
if r := recover(); r != nil {
logging.Error("panic in database cleanup goroutine", logging.Fields{"panic": r})
}
}()
cleanupTicker := time.NewTicker(time.Hour)
defer cleanupTicker.Stop()
if err := database.Cleanup(manager.databaseRetention); err != nil {
logging.Warn("database cleanup failed", logging.Fields{"error": err})
}
for {
select {
case <-cleanupTicker.C:
if err := database.Cleanup(manager.databaseRetention); err != nil {
logging.Warn("database cleanup failed", logging.Fields{"error": err})
}
case <-manager.stopChan:
return
}
}
}()
}
// manager.syncMinersConfig() appends `MinerAutostartConfig{MinerType: "xmrig", Autostart: false}` to miners.json when a miner is missing.
func (manager *Manager) syncMinersConfig() {
minersConfig, err := LoadMinersConfig()
if err != nil {
logging.Warn("could not load miners config for sync", logging.Fields{"error": err})
return
}
availableMiners := manager.ListAvailableMiners()
configUpdated := false
for _, availableMiner := range availableMiners {
minerExists := false
for _, configuredMiner := range minersConfig.Miners {
if equalFold(configuredMiner.MinerType, availableMiner.Name) {
minerExists = true
break
}
}
if !minerExists {
minersConfig.Miners = append(minersConfig.Miners, MinerAutostartConfig{
MinerType: availableMiner.Name,
Autostart: false,
Config: nil, // keep the new miner disabled until the user saves a profile
})
configUpdated = true
logging.Info("added default config for missing miner", logging.Fields{"miner": availableMiner.Name})
}
}
if configUpdated {
if err := SaveMinersConfig(minersConfig); err != nil {
logging.Warn("failed to save updated miners config", logging.Fields{"error": err})
}
}
}
// manager.autostartMiners() starts entries with `Autostart: true` from `context.Background()`.
func (manager *Manager) autostartMiners() {
minersConfig, err := LoadMinersConfig()
if err != nil {
logging.Warn("could not load miners config for autostart", logging.Fields{"error": err})
return
}
for _, minerConfig := range minersConfig.Miners {
if minerConfig.Autostart && minerConfig.Config != nil {
logging.Info("autostarting miner", logging.Fields{"type": minerConfig.MinerType})
if _, err := manager.StartMiner(context.Background(), minerConfig.MinerType, minerConfig.Config); err != nil {
logging.Error("failed to autostart miner", logging.Fields{"type": minerConfig.MinerType, "error": err})
}
}
}
}
// port, err := findAvailablePort()
// if err != nil { return 0, err }
// config.HTTPPort = port
func findAvailablePort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer listener.Close()
return listener.Addr().(*net.TCPAddr).Port, nil
}
// ctx, cancel := context.WithCancel(context.Background())
// cancel()
// _, err := manager.StartMiner(ctx, "xmrig", &Config{Algo: "rx/0"}) // returns context.Canceled before locking
func (manager *Manager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
// ctx, cancel := context.WithCancel(context.Background()); cancel(); manager.StartMiner(ctx, "xmrig", &Config{Algo: "rx/0"}) returns context.Canceled before locking.
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
manager.mutex.Lock()
defer manager.mutex.Unlock()
if config == nil {
config = &Config{}
}
miner, err := CreateMiner(minerType)
if err != nil {
return nil, err
}
instanceName := miner.GetName()
if config.Algo != "" {
// sanitizedAlgo := instanceNameRegex.ReplaceAllString("rx/0", "_") // "rx_0"
sanitizedAlgo := instanceNameRegex.ReplaceAllString(config.Algo, "_")
instanceName = instanceName + "-" + sanitizedAlgo
} else {
instanceName = instanceName + "-" + strconv.FormatInt(time.Now().UnixNano()%1000, 10)
}
if _, exists := manager.miners[instanceName]; exists {
return nil, ErrMinerExists(instanceName)
}
// config.HTTPPort = 3333 keeps the miner API on a user-supplied port between 1024 and 65535.
if config.HTTPPort != 0 {
if config.HTTPPort < 1024 || config.HTTPPort > 65535 {
return nil, ErrInvalidConfig("HTTPPort must be between 1024 and 65535, got " + strconv.Itoa(config.HTTPPort))
}
}
apiPort, err := findAvailablePort()
if err != nil {
return nil, ErrInternal("failed to find an available port for the miner API").WithCause(err)
}
if config.HTTPPort == 0 {
config.HTTPPort = apiPort
}
if xmrigMiner, ok := miner.(*XMRigMiner); ok {
xmrigMiner.Name = instanceName
if xmrigMiner.API != nil {
xmrigMiner.API.ListenPort = apiPort
}
}
if ttMiner, ok := miner.(*TTMiner); ok {
ttMiner.Name = instanceName
if ttMiner.API != nil {
ttMiner.API.ListenPort = apiPort
}
}
// manager.emitEvent(EventMinerStarting, MinerEventData{Name: "xmrig-rx_0"}) fires before miner.Start(config).
manager.emitEvent(EventMinerStarting, MinerEventData{
Name: instanceName,
})
if err := miner.Start(config); err != nil {
// manager.emitEvent(EventMinerError, MinerEventData{Name: "xmrig-rx_0", Error: err.Error()}) reports the failure before returning it.
manager.emitEvent(EventMinerError, MinerEventData{
Name: instanceName,
Error: err.Error(),
})
return nil, err
}
manager.miners[instanceName] = miner
if err := manager.updateMinerConfig(minerType, true, config); err != nil {
logging.Warn("failed to save miner config for autostart", logging.Fields{"error": err})
}
logMessage := "CryptoCurrency Miner started: " + miner.GetName() + " (Binary: " + miner.GetBinaryPath() + ")"
logToSyslog(logMessage)
// manager.emitEvent(EventMinerStarted, MinerEventData{Name: "xmrig-rx_0"}) marks the miner as running for websocket clients.
manager.emitEvent(EventMinerStarted, MinerEventData{
Name: instanceName,
})
RecordMinerStart()
return miner, nil
}
// manager.UninstallMiner(ctx, "xmrig") stops all xmrig instances and removes the matching config entry.
// manager.UninstallMiner(ctx, "ttminer") stops all ttminer instances and removes the matching config entry.
func (manager *Manager) UninstallMiner(ctx context.Context, minerType string) error {
// ctx, cancel := context.WithCancel(context.Background()); cancel(); manager.UninstallMiner(ctx, "xmrig") returns context.Canceled before locking.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
manager.mutex.Lock()
// manager.UninstallMiner(ctx, "xmrig") collects every running xmrig instance before removing it from the map.
minersToDelete := make([]string, 0)
minersToStop := make([]Miner, 0)
for name, runningMiner := range manager.miners {
if xmrigInstance, ok := runningMiner.(*XMRigMiner); ok && equalFold(xmrigInstance.ExecutableName, minerType) {
minersToStop = append(minersToStop, runningMiner)
minersToDelete = append(minersToDelete, name)
}
if ttInstance, ok := runningMiner.(*TTMiner); ok && equalFold(ttInstance.ExecutableName, minerType) {
minersToStop = append(minersToStop, runningMiner)
minersToDelete = append(minersToDelete, name)
}
}
// delete(manager.miners, "xmrig-rx_0") happens before stopping miners so Stop can block without holding the lock.
for _, name := range minersToDelete {
delete(manager.miners, name)
}
manager.mutex.Unlock()
// miner.Stop() runs outside the lock so one slow uninstall does not block other manager calls.
for i, miner := range minersToStop {
if err := miner.Stop(); err != nil {
logging.Warn("failed to stop running miner during uninstall", logging.Fields{"miner": minersToDelete[i], "error": err})
}
}
miner, err := CreateMiner(minerType)
if err != nil {
return err
}
if err := miner.Uninstall(); err != nil {
return ErrInternal("failed to uninstall miner files").WithCause(err)
}
return UpdateMinersConfig(func(minersConfig *MinersConfig) error {
var updatedMiners []MinerAutostartConfig
for _, minerConfig := range minersConfig.Miners {
if !equalFold(minerConfig.MinerType, minerType) {
updatedMiners = append(updatedMiners, minerConfig)
}
}
minersConfig.Miners = updatedMiners
return nil
})
}
// manager.updateMinerConfig("xmrig", true, config) // saves Autostart=true and the last-used config back to miners.json
func (manager *Manager) updateMinerConfig(minerType string, autostart bool, config *Config) error {
return UpdateMinersConfig(func(minersConfig *MinersConfig) error {
minerFound := false
for i, minerConfig := range minersConfig.Miners {
if equalFold(minerConfig.MinerType, minerType) {
minersConfig.Miners[i].Autostart = autostart
minersConfig.Miners[i].Config = config
minerFound = true
break
}
}
if !minerFound {
minersConfig.Miners = append(minersConfig.Miners, MinerAutostartConfig{
MinerType: minerType,
Autostart: autostart,
Config: config,
})
}
return nil
})
}
// manager.StopMiner(ctx, "xmrig/monero") stops the matching miner instance and removes it from the manager map.
// manager.StopMiner(ctx, "ttminer/rtx4090") still removes the entry when the miner has already stopped.
func (manager *Manager) StopMiner(ctx context.Context, name string) error {
// ctx, cancel := context.WithCancel(context.Background()); cancel(); manager.StopMiner(ctx, "xmrig-rx_0") returns context.Canceled before locking.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
manager.mutex.Lock()
defer manager.mutex.Unlock()
miner, exists := manager.miners[name]
if !exists {
for minerKey := range manager.miners {
if hasPrefix(minerKey, name) {
miner = manager.miners[minerKey]
name = minerKey
exists = true
break
}
}
}
if !exists {
return ErrMinerNotFound(name)
}
// manager.emitEvent(EventMinerStopping, MinerEventData{Name: "xmrig-rx_0"}) tells websocket clients shutdown has started.
manager.emitEvent(EventMinerStopping, MinerEventData{
Name: name,
})
// stopErr := miner.Stop() may fail after an external kill, but cleanup continues so the manager state stays accurate.
stopErr := miner.Stop()
// delete(manager.miners, "xmrig-rx_0") removes stale entries even when the process has already exited.
delete(manager.miners, name)
// manager.emitEvent(EventMinerStopped, MinerEventData{Name: "xmrig-rx_0", Reason: "stopped"}) confirms the final stop reason.
reason := "stopped"
if stopErr != nil && stopErr.Error() != "miner is not running" {
reason = stopErr.Error()
}
manager.emitEvent(EventMinerStopped, MinerEventData{
Name: name,
Reason: reason,
})
// stopErr = errors.New("permission denied") still returns the stop failure after the manager removes the stale entry.
if stopErr != nil && stopErr.Error() != "miner is not running" {
return stopErr
}
RecordMinerStop()
return nil
}
// miner, err := manager.GetMiner("xmrig-randomx") // returns ErrMinerNotFound when the name is missing
// if err != nil { /* miner not found */ }
func (manager *Manager) GetMiner(name string) (Miner, error) {
manager.mutex.RLock()
defer manager.mutex.RUnlock()
miner, exists := manager.miners[name]
if !exists {
return nil, ErrMinerNotFound(name)
}
return miner, nil
}
// miners := manager.ListMiners()
// for _, miner := range miners { logging.Info(miner.GetName()) }
func (manager *Manager) ListMiners() []Miner {
manager.mutex.RLock()
defer manager.mutex.RUnlock()
miners := make([]Miner, 0, len(manager.miners))
for _, miner := range manager.miners {
miners = append(miners, miner)
}
return miners
}
// simulatedMiner := NewSimulatedMiner(SimulatedMinerConfig{Name: "sim-rx0"})
// if err := manager.RegisterMiner(simulatedMiner); err != nil { return err }
func (manager *Manager) RegisterMiner(miner Miner) error {
name := miner.GetName()
manager.mutex.Lock()
if _, exists := manager.miners[name]; exists {
manager.mutex.Unlock()
return ErrMinerExists(name)
}
manager.miners[name] = miner
manager.mutex.Unlock()
logging.Info("registered miner", logging.Fields{"name": name})
// Emit miner started event (outside lock)
manager.emitEvent(EventMinerStarted, map[string]interface{}{
"name": name,
})
return nil
}
// for _, availableMiner := range manager.ListAvailableMiners() { logging.Info(availableMiner.Name, nil) }
func (manager *Manager) ListAvailableMiners() []AvailableMiner {
return []AvailableMiner{
{
Name: "xmrig",
Description: "XMRig is a high performance, open source, cross platform RandomX, KawPow, CryptoNight and AstroBWT CPU/GPU miner and RandomX benchmark.",
},
{
Name: "tt-miner",
Description: "TT-Miner is a high performance NVIDIA GPU miner for various algorithms including Ethash, KawPow, ProgPow, and more. Requires CUDA.",
},
}
}
// manager.startStatsCollection() // NewManager() uses this to poll each running miner every HighResolutionInterval
func (manager *Manager) startStatsCollection() {
manager.waitGroup.Add(1)
go func() {
defer manager.waitGroup.Done()
defer func() {
if r := recover(); r != nil {
logging.Error("panic in stats collection goroutine", logging.Fields{"panic": r})
}
}()
ticker := time.NewTicker(HighResolutionInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
manager.collectMinerStats()
case <-manager.stopChan:
return
}
}
}()
}
// ctx, cancel := context.WithTimeout(ctx, statsCollectionTimeout)
const statsCollectionTimeout = 5 * time.Second
// manager.collectMinerStats() // the stats ticker calls this to poll all running miners in parallel
func (manager *Manager) collectMinerStats() {
// Take a snapshot of miners under read lock - minimize lock duration
manager.mutex.RLock()
if len(manager.miners) == 0 {
manager.mutex.RUnlock()
return
}
type minerInfo struct {
miner Miner
minerType string
}
miners := make([]minerInfo, 0, len(manager.miners))
for _, miner := range manager.miners {
// Use the miner's GetType() method for proper type identification
miners = append(miners, minerInfo{miner: miner, minerType: miner.GetType()})
}
databaseEnabled := manager.databaseEnabled // Copy to avoid holding lock
manager.mutex.RUnlock()
now := time.Now()
// Collect stats from all miners in parallel
var collectionWaitGroup sync.WaitGroup
for _, entry := range miners {
collectionWaitGroup.Add(1)
go func(miner Miner, minerType string) {
defer collectionWaitGroup.Done()
defer func() {
if r := recover(); r != nil {
logging.Error("panic in single miner stats collection", logging.Fields{
"panic": r,
"miner": miner.GetName(),
})
}
}()
manager.collectSingleMinerStats(miner, minerType, now, databaseEnabled)
}(entry.miner, entry.minerType)
}
collectionWaitGroup.Wait()
}
// for attempt := 0; attempt <= statsRetryCount; attempt++ { ... }
const statsRetryCount = 2
// time.Sleep(statsRetryDelay) // between retry attempts
const statsRetryDelay = 500 * time.Millisecond
// manager.collectSingleMinerStats(miner, "xmrig", time.Now(), true) // retries up to statsRetryCount times; persists to DB if databaseEnabled
func (manager *Manager) collectSingleMinerStats(miner Miner, minerType string, now time.Time, databaseEnabled bool) {
minerName := miner.GetName()
var stats *PerformanceMetrics
var lastErr error
// Retry loop for transient failures
for attempt := 0; attempt <= statsRetryCount; attempt++ {
// Use context with timeout to prevent hanging on unresponsive miner APIs
ctx, cancel := context.WithTimeout(context.Background(), statsCollectionTimeout)
stats, lastErr = miner.GetStats(ctx)
cancel() // Release context immediately
if lastErr == nil {
break // Success
}
// Log retry attempts at debug level
if attempt < statsRetryCount {
logging.Debug("retrying stats collection", logging.Fields{
"miner": minerName,
"attempt": attempt + 1,
"error": lastErr.Error(),
})
time.Sleep(statsRetryDelay)
}
}
if lastErr != nil {
logging.Error("failed to get miner stats after retries", logging.Fields{
"miner": minerName,
"error": lastErr.Error(),
"retries": statsRetryCount,
})
RecordStatsCollection(true, true)
return
}
// Record stats collection (retried if we did any retries)
RecordStatsCollection(stats != nil && lastErr == nil, false)
point := HashratePoint{
Timestamp: now,
Hashrate: stats.Hashrate,
}
// Add to in-memory history (rolling window)
// Note: AddHashratePoint and ReduceHashrateHistory must be thread-safe
miner.AddHashratePoint(point)
miner.ReduceHashrateHistory(now)
// Persist to database if enabled
if databaseEnabled {
databasePoint := database.HashratePoint{
Timestamp: point.Timestamp,
Hashrate: point.Hashrate,
}
// database.InsertHashratePoint(ctx, "xmrig-rx_0", databasePoint, database.ResolutionHigh) // persists a single sample
databaseWriteContext, databaseCancel := context.WithTimeout(context.Background(), statsCollectionTimeout)
if err := database.InsertHashratePoint(databaseWriteContext, minerName, minerType, databasePoint, database.ResolutionHigh); err != nil {
logging.Warn("failed to persist hashrate", logging.Fields{"miner": minerName, "error": err})
}
databaseCancel()
}
// Emit stats event for real-time WebSocket updates
manager.emitEvent(EventMinerStats, MinerStatsData{
Name: minerName,
Hashrate: stats.Hashrate,
Shares: stats.Shares,
Rejected: stats.Rejected,
Uptime: stats.Uptime,
Algorithm: stats.Algorithm,
DiffCurrent: stats.DiffCurrent,
})
}
// points, err := manager.GetMinerHashrateHistory("xmrig")
// for _, point := range points { logging.Info("hashrate", logging.Fields{"time": point.Timestamp, "rate": point.Hashrate}) }
func (manager *Manager) GetMinerHashrateHistory(name string) ([]HashratePoint, error) {
manager.mutex.RLock()
defer manager.mutex.RUnlock()
miner, exists := manager.miners[name]
if !exists {
return nil, ErrMinerNotFound(name)
}
return miner.GetHashrateHistory(), nil
}
// ctx, cancel := context.WithTimeout(context.Background(), ShutdownTimeout)
const ShutdownTimeout = 10 * time.Second
// defer manager.Stop() // stops miners, waits for goroutines, and closes the database during shutdown
func (manager *Manager) Stop() {
manager.stopOnce.Do(func() {
// Stop all running miners first
manager.mutex.Lock()
for name, miner := range manager.miners {
if err := miner.Stop(); err != nil {
logging.Warn("failed to stop miner", logging.Fields{"miner": name, "error": err})
}
}
manager.mutex.Unlock()
close(manager.stopChan)
// Wait for goroutines with timeout
done := make(chan struct{})
go func() {
manager.waitGroup.Wait()
close(done)
}()
select {
case <-done:
logging.Info("all goroutines stopped gracefully")
case <-time.After(ShutdownTimeout):
logging.Warn("shutdown timeout - some goroutines may not have stopped")
}
// Close the database
if manager.databaseEnabled {
if err := database.Close(); err != nil {
logging.Warn("failed to close database", logging.Fields{"error": err})
}
}
})
}
// stats, err := manager.GetMinerHistoricalStats("xmrig")
// if err == nil { logging.Info("stats", logging.Fields{"average": stats.AverageRate}) }
func (manager *Manager) GetMinerHistoricalStats(minerName string) (*database.HashrateStats, error) {
if !manager.databaseEnabled {
return nil, ErrDatabaseError("database persistence is disabled")
}
return database.GetHashrateStats(minerName)
}
// points, err := manager.GetMinerHistoricalHashrate("xmrig", time.Now().Add(-1*time.Hour), time.Now())
func (manager *Manager) GetMinerHistoricalHashrate(minerName string, since, until time.Time) ([]HashratePoint, error) {
if !manager.databaseEnabled {
return nil, ErrDatabaseError("database persistence is disabled")
}
databasePoints, err := database.GetHashrateHistory(minerName, database.ResolutionHigh, since, until)
if err != nil {
return nil, err
}
// Convert database points to mining points
points := make([]HashratePoint, len(databasePoints))
for i, databasePoint := range databasePoints {
points[i] = HashratePoint{
Timestamp: databasePoint.Timestamp,
Hashrate: databasePoint.Hashrate,
}
}
return points, nil
}
// allStats, err := manager.GetAllMinerHistoricalStats()
// for _, stats := range allStats { logging.Info("stats", logging.Fields{"miner": stats.MinerName, "average": stats.AverageRate}) }
func (manager *Manager) GetAllMinerHistoricalStats() ([]database.HashrateStats, error) {
if !manager.databaseEnabled {
return nil, ErrDatabaseError("database persistence is disabled")
}
return database.GetAllMinerStats()
}
// if manager.IsDatabaseEnabled() { /* persist stats */ }
func (manager *Manager) IsDatabaseEnabled() bool {
return manager.databaseEnabled
}