feat: extract mining engine from Mining repo

XMRig/TTMiner mining engine with profile management, stats collection,
circuit breakers, event system, supervisor, and SQLite persistence.
P2P node service stubbed (moved to core/go-p2p).

Ported from github.com/Snider/Mining/pkg/{mining,database,logging}

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Claude 2026-02-16 15:47:12 +00:00
commit e9cab59ada
No known key found for this signature in database
GPG key ID: AF404715446AEB41
66 changed files with 16330 additions and 0 deletions

184
database/database.go Normal file
View file

@ -0,0 +1,184 @@
package database
import (
"database/sql"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/adrg/xdg"
_ "github.com/mattn/go-sqlite3"
)
// DB is the global database instance
var (
db *sql.DB
dbMu sync.RWMutex
)
// Config holds database configuration options
type Config struct {
// Enabled determines if database persistence is active
Enabled bool `json:"enabled"`
// Path is the database file path (optional, uses default if empty)
Path string `json:"path,omitempty"`
// RetentionDays is how long to keep historical data (default 30)
RetentionDays int `json:"retentionDays,omitempty"`
}
// defaultConfig returns the default database configuration
func defaultConfig() Config {
return Config{
Enabled: true,
Path: "",
RetentionDays: 30,
}
}
// defaultDBPath returns the default database file path
func defaultDBPath() (string, error) {
dataDir := filepath.Join(xdg.DataHome, "lethean-desktop")
if err := os.MkdirAll(dataDir, 0755); err != nil {
return "", fmt.Errorf("failed to create data directory: %w", err)
}
return filepath.Join(dataDir, "mining.db"), nil
}
// Initialize opens the database connection and creates tables
func Initialize(cfg Config) error {
dbMu.Lock()
defer dbMu.Unlock()
if !cfg.Enabled {
return nil
}
dbPath := cfg.Path
if dbPath == "" {
var err error
dbPath, err = defaultDBPath()
if err != nil {
return err
}
}
var err error
db, err = sql.Open("sqlite3", dbPath+"?_journal=WAL&_timeout=5000")
if err != nil {
return fmt.Errorf("failed to open database: %w", err)
}
// Set connection pool settings
db.SetMaxOpenConns(1) // SQLite only supports one writer
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(time.Hour)
// Create tables
if err := createTables(); err != nil {
// Nil out global before closing to prevent use of closed connection
closingDB := db
db = nil
closingDB.Close()
return fmt.Errorf("failed to create tables: %w", err)
}
return nil
}
// Close closes the database connection
func Close() error {
dbMu.Lock()
defer dbMu.Unlock()
if db == nil {
return nil
}
err := db.Close()
db = nil
return err
}
// isInitialized returns true if the database is ready
func isInitialized() bool {
dbMu.RLock()
defer dbMu.RUnlock()
return db != nil
}
// createTables creates all required database tables
func createTables() error {
schema := `
-- Hashrate history table for storing miner performance data
CREATE TABLE IF NOT EXISTS hashrate_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
miner_name TEXT NOT NULL,
miner_type TEXT NOT NULL,
timestamp DATETIME NOT NULL,
hashrate INTEGER NOT NULL,
resolution TEXT NOT NULL DEFAULT 'high',
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
-- Index for efficient queries by miner and time range
CREATE INDEX IF NOT EXISTS idx_hashrate_miner_time
ON hashrate_history(miner_name, timestamp DESC);
-- Index for cleanup queries
CREATE INDEX IF NOT EXISTS idx_hashrate_resolution_time
ON hashrate_history(resolution, timestamp);
-- Miner sessions table for tracking uptime
CREATE TABLE IF NOT EXISTS miner_sessions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
miner_name TEXT NOT NULL,
miner_type TEXT NOT NULL,
started_at DATETIME NOT NULL,
stopped_at DATETIME,
total_shares INTEGER DEFAULT 0,
rejected_shares INTEGER DEFAULT 0,
average_hashrate INTEGER DEFAULT 0
);
-- Index for session queries
CREATE INDEX IF NOT EXISTS idx_sessions_miner
ON miner_sessions(miner_name, started_at DESC);
`
_, err := db.Exec(schema)
return err
}
// Cleanup removes old data based on retention settings
func Cleanup(retentionDays int) error {
dbMu.RLock()
defer dbMu.RUnlock()
if db == nil {
return nil
}
cutoff := time.Now().AddDate(0, 0, -retentionDays)
_, err := db.Exec(`
DELETE FROM hashrate_history
WHERE timestamp < ?
`, cutoff)
return err
}
// vacuumDB optimizes the database file size
func vacuumDB() error {
dbMu.RLock()
defer dbMu.RUnlock()
if db == nil {
return nil
}
_, err := db.Exec("VACUUM")
return err
}

View file

@ -0,0 +1,277 @@
package database
import (
"os"
"path/filepath"
"sync"
"testing"
"time"
)
// setupRaceTestDB creates a fresh database for race testing
func setupRaceTestDB(t *testing.T) func() {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "race_test.db")
cfg := Config{
Enabled: true,
Path: dbPath,
RetentionDays: 7,
}
if err := Initialize(cfg); err != nil {
t.Fatalf("Failed to initialize database: %v", err)
}
return func() {
Close()
os.Remove(dbPath)
}
}
// TestConcurrentHashrateInserts verifies that concurrent inserts
// don't cause race conditions
func TestConcurrentHashrateInserts(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
var wg sync.WaitGroup
// 10 goroutines inserting points concurrently
for i := 0; i < 10; i++ {
wg.Add(1)
go func(minerIndex int) {
defer wg.Done()
minerName := "miner" + string(rune('A'+minerIndex))
minerType := "xmrig"
for j := 0; j < 100; j++ {
point := HashratePoint{
Timestamp: time.Now().Add(time.Duration(-j) * time.Second),
Hashrate: 1000 + minerIndex*100 + j,
}
err := InsertHashratePoint(nil, minerName, minerType, point, ResolutionHigh)
if err != nil {
t.Errorf("Insert error for %s: %v", minerName, err)
}
}
}(i)
}
wg.Wait()
// Verify data was inserted
for i := 0; i < 10; i++ {
minerName := "miner" + string(rune('A'+i))
history, err := GetHashrateHistory(minerName, ResolutionHigh, time.Now().Add(-2*time.Minute), time.Now())
if err != nil {
t.Errorf("Failed to get history for %s: %v", minerName, err)
}
if len(history) == 0 {
t.Errorf("Expected history for %s, got none", minerName)
}
}
}
// TestConcurrentInsertAndQuery verifies that concurrent reads and writes
// don't cause race conditions
func TestConcurrentInsertAndQuery(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
var wg sync.WaitGroup
stop := make(chan struct{})
// Writer goroutine
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; ; i++ {
select {
case <-stop:
return
default:
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1000 + i,
}
InsertHashratePoint(nil, "concurrent-test", "xmrig", point, ResolutionHigh)
time.Sleep(time.Millisecond)
}
}
}()
// Multiple reader goroutines
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 50; j++ {
select {
case <-stop:
return
default:
GetHashrateHistory("concurrent-test", ResolutionHigh, time.Now().Add(-time.Hour), time.Now())
time.Sleep(2 * time.Millisecond)
}
}
}()
}
// Let it run for a bit
time.Sleep(200 * time.Millisecond)
close(stop)
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentInsertAndCleanup verifies that cleanup doesn't race
// with ongoing inserts
func TestConcurrentInsertAndCleanup(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
var wg sync.WaitGroup
stop := make(chan struct{})
// Continuous inserts
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; ; i++ {
select {
case <-stop:
return
default:
// Insert some old data and some new data
oldPoint := HashratePoint{
Timestamp: time.Now().AddDate(0, 0, -10), // 10 days old
Hashrate: 500 + i,
}
InsertHashratePoint(nil, "cleanup-test", "xmrig", oldPoint, ResolutionHigh)
newPoint := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1000 + i,
}
InsertHashratePoint(nil, "cleanup-test", "xmrig", newPoint, ResolutionHigh)
time.Sleep(time.Millisecond)
}
}
}()
// Periodic cleanup
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 10; i++ {
select {
case <-stop:
return
default:
Cleanup(7) // 7 day retention
time.Sleep(20 * time.Millisecond)
}
}
}()
// Let it run
time.Sleep(200 * time.Millisecond)
close(stop)
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentStats verifies that GetHashrateStats can be called
// concurrently without race conditions
func TestConcurrentStats(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
// Insert some test data
minerName := "stats-test"
for i := 0; i < 100; i++ {
point := HashratePoint{
Timestamp: time.Now().Add(time.Duration(-i) * time.Second),
Hashrate: 1000 + i*10,
}
InsertHashratePoint(nil, minerName, "xmrig", point, ResolutionHigh)
}
var wg sync.WaitGroup
// Multiple goroutines querying stats
for i := 0; i < 20; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 50; j++ {
stats, err := GetHashrateStats(minerName)
if err != nil {
t.Errorf("Stats error: %v", err)
}
if stats != nil && stats.TotalPoints == 0 {
// This is fine, data might be in flux
}
}
}()
}
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentGetAllStats verifies that GetAllMinerStats can be called
// concurrently without race conditions
func TestConcurrentGetAllStats(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
// Insert data for multiple miners
for m := 0; m < 5; m++ {
minerName := "all-stats-" + string(rune('A'+m))
for i := 0; i < 50; i++ {
point := HashratePoint{
Timestamp: time.Now().Add(time.Duration(-i) * time.Second),
Hashrate: 1000 + m*100 + i,
}
InsertHashratePoint(nil, minerName, "xmrig", point, ResolutionHigh)
}
}
var wg sync.WaitGroup
// Multiple goroutines querying all stats
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 30; j++ {
_, err := GetAllMinerStats()
if err != nil {
t.Errorf("GetAllMinerStats error: %v", err)
}
}
}()
}
// Concurrent inserts
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 50; i++ {
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 2000 + i,
}
InsertHashratePoint(nil, "all-stats-new", "xmrig", point, ResolutionHigh)
}
}()
wg.Wait()
// Test passes if no race detector warnings
}

497
database/database_test.go Normal file
View file

@ -0,0 +1,497 @@
package database
import (
"os"
"path/filepath"
"testing"
"time"
)
func setupTestDB(t *testing.T) func() {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "test.db")
cfg := Config{
Enabled: true,
Path: dbPath,
RetentionDays: 7,
}
if err := Initialize(cfg); err != nil {
t.Fatalf("Failed to initialize database: %v", err)
}
return func() {
Close()
os.Remove(dbPath)
}
}
func TestInitialize(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
// Database should be initialized
dbMu.RLock()
initialized := db != nil
dbMu.RUnlock()
if !initialized {
t.Error("Database should be initialized")
}
}
func TestInitialize_Disabled(t *testing.T) {
cfg := Config{
Enabled: false,
}
if err := Initialize(cfg); err != nil {
t.Errorf("Initialize with disabled should not error: %v", err)
}
}
func TestClose(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
// Close should not error
if err := Close(); err != nil {
t.Errorf("Close failed: %v", err)
}
}
func TestHashrateStorage(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
// Store some hashrate data
minerName := "test-miner"
minerType := "xmrig"
now := time.Now()
points := []HashratePoint{
{Timestamp: now.Add(-5 * time.Minute), Hashrate: 1000},
{Timestamp: now.Add(-4 * time.Minute), Hashrate: 1100},
{Timestamp: now.Add(-3 * time.Minute), Hashrate: 1200},
}
for _, p := range points {
if err := InsertHashratePoint(nil, minerName, minerType, p, ResolutionHigh); err != nil {
t.Fatalf("Failed to store hashrate point: %v", err)
}
}
// Retrieve the data
retrieved, err := GetHashrateHistory(minerName, ResolutionHigh, now.Add(-10*time.Minute), now)
if err != nil {
t.Fatalf("Failed to get hashrate history: %v", err)
}
if len(retrieved) != 3 {
t.Errorf("Expected 3 points, got %d", len(retrieved))
}
}
func TestGetHashrateStats(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
minerName := "stats-test-miner"
minerType := "xmrig"
now := time.Now()
// Store some test data
points := []HashratePoint{
{Timestamp: now.Add(-2 * time.Minute), Hashrate: 500},
{Timestamp: now.Add(-1 * time.Minute), Hashrate: 1000},
{Timestamp: now, Hashrate: 1500},
}
for _, p := range points {
if err := InsertHashratePoint(nil, minerName, minerType, p, ResolutionHigh); err != nil {
t.Fatalf("Failed to store point: %v", err)
}
}
stats, err := GetHashrateStats(minerName)
if err != nil {
t.Fatalf("Failed to get stats: %v", err)
}
if stats.TotalPoints != 3 {
t.Errorf("Expected 3 total points, got %d", stats.TotalPoints)
}
// Average should be (500+1000+1500)/3 = 1000
if stats.AverageRate != 1000 {
t.Errorf("Expected average rate 1000, got %d", stats.AverageRate)
}
if stats.MaxRate != 1500 {
t.Errorf("Expected max rate 1500, got %d", stats.MaxRate)
}
if stats.MinRate != 500 {
t.Errorf("Expected min rate 500, got %d", stats.MinRate)
}
}
func TestDefaultConfig(t *testing.T) {
cfg := defaultConfig()
if !cfg.Enabled {
t.Error("Default config should have Enabled=true")
}
if cfg.RetentionDays != 30 {
t.Errorf("Expected default retention 30, got %d", cfg.RetentionDays)
}
}
func TestCleanupRetention(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
minerName := "retention-test"
minerType := "xmrig"
now := time.Now()
// Insert data at various ages:
// - 35 days old (should be deleted with 30-day retention)
// - 25 days old (should be kept with 30-day retention)
// - 5 days old (should be kept)
oldPoint := HashratePoint{
Timestamp: now.AddDate(0, 0, -35),
Hashrate: 100,
}
midPoint := HashratePoint{
Timestamp: now.AddDate(0, 0, -25),
Hashrate: 200,
}
newPoint := HashratePoint{
Timestamp: now.AddDate(0, 0, -5),
Hashrate: 300,
}
// Insert all points
if err := InsertHashratePoint(nil, minerName, minerType, oldPoint, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert old point: %v", err)
}
if err := InsertHashratePoint(nil, minerName, minerType, midPoint, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert mid point: %v", err)
}
if err := InsertHashratePoint(nil, minerName, minerType, newPoint, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert new point: %v", err)
}
// Verify all 3 points exist
history, err := GetHashrateHistory(minerName, ResolutionHigh, now.AddDate(0, 0, -40), now)
if err != nil {
t.Fatalf("Failed to get history before cleanup: %v", err)
}
if len(history) != 3 {
t.Errorf("Expected 3 points before cleanup, got %d", len(history))
}
// Run cleanup with 30-day retention
if err := Cleanup(30); err != nil {
t.Fatalf("Cleanup failed: %v", err)
}
// Verify only 2 points remain (35-day old point should be deleted)
history, err = GetHashrateHistory(minerName, ResolutionHigh, now.AddDate(0, 0, -40), now)
if err != nil {
t.Fatalf("Failed to get history after cleanup: %v", err)
}
if len(history) != 2 {
t.Errorf("Expected 2 points after cleanup, got %d", len(history))
}
// Verify the remaining points are the mid and new ones
for _, point := range history {
if point.Hashrate == 100 {
t.Error("Old point (100 H/s) should have been deleted")
}
}
}
func TestGetHashrateHistoryTimeRange(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
minerName := "timerange-test"
minerType := "xmrig"
now := time.Now()
// Insert points at specific times
times := []time.Duration{
-10 * time.Minute,
-8 * time.Minute,
-6 * time.Minute,
-4 * time.Minute,
-2 * time.Minute,
}
for i, offset := range times {
point := HashratePoint{
Timestamp: now.Add(offset),
Hashrate: 1000 + i*100,
}
if err := InsertHashratePoint(nil, minerName, minerType, point, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert point: %v", err)
}
}
// Query for middle range (should get 3 points: -8, -6, -4 minutes)
since := now.Add(-9 * time.Minute)
until := now.Add(-3 * time.Minute)
history, err := GetHashrateHistory(minerName, ResolutionHigh, since, until)
if err != nil {
t.Fatalf("Failed to get history: %v", err)
}
if len(history) != 3 {
t.Errorf("Expected 3 points in range, got %d", len(history))
}
// Query boundary condition - exact timestamp match
exactSince := now.Add(-6 * time.Minute)
exactUntil := now.Add(-6 * time.Minute).Add(time.Second)
history, err = GetHashrateHistory(minerName, ResolutionHigh, exactSince, exactUntil)
if err != nil {
t.Fatalf("Failed to get exact history: %v", err)
}
// Should get at least 1 point
if len(history) < 1 {
t.Error("Expected at least 1 point at exact boundary")
}
}
func TestMultipleMinerStats(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
now := time.Now()
// Create data for multiple miners
miners := []struct {
name string
hashrates []int
}{
{"miner-A", []int{1000, 1100, 1200}},
{"miner-B", []int{2000, 2100, 2200}},
{"miner-C", []int{3000, 3100, 3200}},
}
for _, m := range miners {
for i, hr := range m.hashrates {
point := HashratePoint{
Timestamp: now.Add(time.Duration(-i) * time.Minute),
Hashrate: hr,
}
if err := InsertHashratePoint(nil, m.name, "xmrig", point, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert point for %s: %v", m.name, err)
}
}
}
// Get all miner stats
allStats, err := GetAllMinerStats()
if err != nil {
t.Fatalf("Failed to get all stats: %v", err)
}
if len(allStats) != 3 {
t.Errorf("Expected stats for 3 miners, got %d", len(allStats))
}
// Verify each miner's stats
statsMap := make(map[string]HashrateStats)
for _, s := range allStats {
statsMap[s.MinerName] = s
}
// Check miner-A: avg = (1000+1100+1200)/3 = 1100
if s, ok := statsMap["miner-A"]; ok {
if s.AverageRate != 1100 {
t.Errorf("miner-A: expected avg 1100, got %d", s.AverageRate)
}
} else {
t.Error("miner-A stats not found")
}
// Check miner-C: avg = (3000+3100+3200)/3 = 3100
if s, ok := statsMap["miner-C"]; ok {
if s.AverageRate != 3100 {
t.Errorf("miner-C: expected avg 3100, got %d", s.AverageRate)
}
} else {
t.Error("miner-C stats not found")
}
}
func TestIsInitialized(t *testing.T) {
// Before initialization
Close() // Ensure clean state
if isInitialized() {
t.Error("Should not be initialized before Initialize()")
}
cleanup := setupTestDB(t)
defer cleanup()
// After initialization
if !isInitialized() {
t.Error("Should be initialized after Initialize()")
}
// After close
Close()
if isInitialized() {
t.Error("Should not be initialized after Close()")
}
}
func TestSchemaCreation(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
// Verify tables exist by querying sqlite_master
dbMu.RLock()
defer dbMu.RUnlock()
// Check hashrate_history table
var tableName string
err := db.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name='hashrate_history'").Scan(&tableName)
if err != nil {
t.Errorf("hashrate_history table should exist: %v", err)
}
// Check miner_sessions table
err = db.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name='miner_sessions'").Scan(&tableName)
if err != nil {
t.Errorf("miner_sessions table should exist: %v", err)
}
// Verify indexes exist
var indexName string
err = db.QueryRow("SELECT name FROM sqlite_master WHERE type='index' AND name='idx_hashrate_miner_time'").Scan(&indexName)
if err != nil {
t.Errorf("idx_hashrate_miner_time index should exist: %v", err)
}
err = db.QueryRow("SELECT name FROM sqlite_master WHERE type='index' AND name='idx_sessions_miner'").Scan(&indexName)
if err != nil {
t.Errorf("idx_sessions_miner index should exist: %v", err)
}
}
func TestReInitializeExistingDB(t *testing.T) {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "reinit_test.db")
cfg := Config{
Enabled: true,
Path: dbPath,
RetentionDays: 7,
}
// First initialization
if err := Initialize(cfg); err != nil {
t.Fatalf("First initialization failed: %v", err)
}
// Insert some data
minerName := "reinit-test-miner"
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1234,
}
if err := InsertHashratePoint(nil, minerName, "xmrig", point, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert point: %v", err)
}
// Close and re-initialize (simulates app restart)
if err := Close(); err != nil {
t.Fatalf("Close failed: %v", err)
}
// Re-initialize with same path
if err := Initialize(cfg); err != nil {
t.Fatalf("Re-initialization failed: %v", err)
}
defer func() {
Close()
os.Remove(dbPath)
}()
// Verify data persisted
history, err := GetHashrateHistory(minerName, ResolutionHigh, time.Now().Add(-time.Hour), time.Now().Add(time.Hour))
if err != nil {
t.Fatalf("Failed to get history after reinit: %v", err)
}
if len(history) != 1 {
t.Errorf("Expected 1 point after reinit, got %d", len(history))
}
if len(history) > 0 && history[0].Hashrate != 1234 {
t.Errorf("Expected hashrate 1234, got %d", history[0].Hashrate)
}
}
func TestConcurrentDatabaseAccess(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
const numGoroutines = 10
const numOpsPerGoroutine = 20
done := make(chan bool, numGoroutines)
errors := make(chan error, numGoroutines*numOpsPerGoroutine)
now := time.Now()
// Launch multiple goroutines doing concurrent reads/writes
for i := 0; i < numGoroutines; i++ {
go func(id int) {
minerName := "concurrent-miner-" + string(rune('A'+id))
for j := 0; j < numOpsPerGoroutine; j++ {
// Write
point := HashratePoint{
Timestamp: now.Add(time.Duration(-j) * time.Second),
Hashrate: 1000 + j,
}
if err := InsertHashratePoint(nil, minerName, "xmrig", point, ResolutionHigh); err != nil {
errors <- err
}
// Read
_, err := GetHashrateHistory(minerName, ResolutionHigh, now.Add(-time.Hour), now)
if err != nil {
errors <- err
}
}
done <- true
}(i)
}
// Wait for all goroutines
for i := 0; i < numGoroutines; i++ {
<-done
}
close(errors)
// Check for errors
var errCount int
for err := range errors {
t.Errorf("Concurrent access error: %v", err)
errCount++
}
if errCount > 0 {
t.Errorf("Got %d errors during concurrent access", errCount)
}
}

233
database/hashrate.go Normal file
View file

@ -0,0 +1,233 @@
package database
import (
"context"
"fmt"
"time"
"forge.lthn.ai/core/mining/logging"
)
// parseSQLiteTimestamp parses timestamp strings from SQLite which may use various formats.
// Logs a warning if parsing fails and returns zero time.
func parseSQLiteTimestamp(s string) time.Time {
if s == "" {
return time.Time{}
}
// Try common SQLite timestamp formats
formats := []string{
"2006-01-02 15:04:05.999999999-07:00",
time.RFC3339Nano,
time.RFC3339,
"2006-01-02 15:04:05",
"2006-01-02T15:04:05Z",
}
for _, format := range formats {
if t, err := time.Parse(format, s); err == nil {
return t
}
}
logging.Warn("failed to parse timestamp from database", logging.Fields{"timestamp": s})
return time.Time{}
}
// Resolution indicates the data resolution type
type Resolution string
const (
ResolutionHigh Resolution = "high" // 10-second intervals
ResolutionLow Resolution = "low" // 1-minute averages
)
// HashratePoint represents a single hashrate measurement
type HashratePoint struct {
Timestamp time.Time `json:"timestamp"`
Hashrate int `json:"hashrate"`
}
// dbInsertTimeout is the maximum time to wait for a database insert operation
const dbInsertTimeout = 5 * time.Second
// InsertHashratePoint stores a hashrate measurement in the database.
// If ctx is nil, a default timeout context will be used.
func InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
dbMu.RLock()
defer dbMu.RUnlock()
if db == nil {
return nil // DB not enabled, silently skip
}
// Use provided context or create one with default timeout
if ctx == nil {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(context.Background(), dbInsertTimeout)
defer cancel()
}
_, err := db.ExecContext(ctx, `
INSERT INTO hashrate_history (miner_name, miner_type, timestamp, hashrate, resolution)
VALUES (?, ?, ?, ?, ?)
`, minerName, minerType, point.Timestamp, point.Hashrate, string(resolution))
return err
}
// GetHashrateHistory retrieves hashrate history for a miner within a time range
func GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
dbMu.RLock()
defer dbMu.RUnlock()
if db == nil {
return nil, nil
}
rows, err := db.Query(`
SELECT timestamp, hashrate
FROM hashrate_history
WHERE miner_name = ?
AND resolution = ?
AND timestamp >= ?
AND timestamp <= ?
ORDER BY timestamp ASC
`, minerName, string(resolution), since, until)
if err != nil {
return nil, fmt.Errorf("failed to query hashrate history: %w", err)
}
defer rows.Close()
var points []HashratePoint
for rows.Next() {
var point HashratePoint
if err := rows.Scan(&point.Timestamp, &point.Hashrate); err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
points = append(points, point)
}
return points, rows.Err()
}
// GetHashrateStats retrieves aggregated stats for a miner
type HashrateStats struct {
MinerName string `json:"minerName"`
TotalPoints int `json:"totalPoints"`
AverageRate int `json:"averageRate"`
MaxRate int `json:"maxRate"`
MinRate int `json:"minRate"`
FirstSeen time.Time `json:"firstSeen"`
LastSeen time.Time `json:"lastSeen"`
}
func GetHashrateStats(minerName string) (*HashrateStats, error) {
dbMu.RLock()
defer dbMu.RUnlock()
if db == nil {
return nil, nil
}
// First check if there are any rows for this miner
var count int
err := db.QueryRow(`SELECT COUNT(*) FROM hashrate_history WHERE miner_name = ?`, minerName).Scan(&count)
if err != nil {
return nil, err
}
// No data for this miner
if count == 0 {
return nil, nil
}
var stats HashrateStats
stats.MinerName = minerName
// SQLite returns timestamps as strings and AVG as float64, so scan them appropriately
var firstSeenStr, lastSeenStr string
var avgRate float64
err = db.QueryRow(`
SELECT
COUNT(*),
COALESCE(AVG(hashrate), 0),
COALESCE(MAX(hashrate), 0),
COALESCE(MIN(hashrate), 0),
MIN(timestamp),
MAX(timestamp)
FROM hashrate_history
WHERE miner_name = ?
`, minerName).Scan(
&stats.TotalPoints,
&avgRate,
&stats.MaxRate,
&stats.MinRate,
&firstSeenStr,
&lastSeenStr,
)
stats.AverageRate = int(avgRate)
if err != nil {
return nil, err
}
// Parse timestamps using helper that logs errors
stats.FirstSeen = parseSQLiteTimestamp(firstSeenStr)
stats.LastSeen = parseSQLiteTimestamp(lastSeenStr)
return &stats, nil
}
// GetAllMinerStats retrieves stats for all miners
func GetAllMinerStats() ([]HashrateStats, error) {
dbMu.RLock()
defer dbMu.RUnlock()
if db == nil {
return nil, nil
}
rows, err := db.Query(`
SELECT
miner_name,
COUNT(*),
COALESCE(AVG(hashrate), 0),
COALESCE(MAX(hashrate), 0),
COALESCE(MIN(hashrate), 0),
MIN(timestamp),
MAX(timestamp)
FROM hashrate_history
GROUP BY miner_name
ORDER BY miner_name
`)
if err != nil {
return nil, err
}
defer rows.Close()
var allStats []HashrateStats
for rows.Next() {
var stats HashrateStats
var firstSeenStr, lastSeenStr string
var avgRate float64
if err := rows.Scan(
&stats.MinerName,
&stats.TotalPoints,
&avgRate,
&stats.MaxRate,
&stats.MinRate,
&firstSeenStr,
&lastSeenStr,
); err != nil {
return nil, err
}
stats.AverageRate = int(avgRate)
// Parse timestamps using helper that logs errors
stats.FirstSeen = parseSQLiteTimestamp(firstSeenStr)
stats.LastSeen = parseSQLiteTimestamp(lastSeenStr)
allStats = append(allStats, stats)
}
return allStats, rows.Err()
}

95
database/interface.go Normal file
View file

@ -0,0 +1,95 @@
package database
import (
"context"
"time"
)
// HashrateStore defines the interface for hashrate data persistence.
// This interface allows for dependency injection and easier testing.
type HashrateStore interface {
// InsertHashratePoint stores a hashrate measurement.
// If ctx is nil, a default timeout will be used.
InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error
// GetHashrateHistory retrieves hashrate history for a miner within a time range.
GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error)
// GetHashrateStats retrieves aggregated statistics for a specific miner.
GetHashrateStats(minerName string) (*HashrateStats, error)
// GetAllMinerStats retrieves statistics for all miners.
GetAllMinerStats() ([]HashrateStats, error)
// Cleanup removes old data based on retention settings.
Cleanup(retentionDays int) error
// Close closes the store and releases resources.
Close() error
}
// defaultStore implements HashrateStore using the global database connection.
// This provides backward compatibility while allowing interface-based usage.
type defaultStore struct{}
// DefaultStore returns a HashrateStore that uses the global database connection.
// This is useful for gradual migration from package-level functions to interface-based usage.
func DefaultStore() HashrateStore {
return &defaultStore{}
}
func (s *defaultStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
return InsertHashratePoint(ctx, minerName, minerType, point, resolution)
}
func (s *defaultStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
return GetHashrateHistory(minerName, resolution, since, until)
}
func (s *defaultStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
return GetHashrateStats(minerName)
}
func (s *defaultStore) GetAllMinerStats() ([]HashrateStats, error) {
return GetAllMinerStats()
}
func (s *defaultStore) Cleanup(retentionDays int) error {
return Cleanup(retentionDays)
}
func (s *defaultStore) Close() error {
return Close()
}
// NopStore returns a HashrateStore that does nothing.
// Useful for testing or when database is disabled.
func NopStore() HashrateStore {
return &nopStore{}
}
type nopStore struct{}
func (s *nopStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
return nil
}
func (s *nopStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
return nil, nil
}
func (s *nopStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
return nil, nil
}
func (s *nopStore) GetAllMinerStats() ([]HashrateStats, error) {
return nil, nil
}
func (s *nopStore) Cleanup(retentionDays int) error {
return nil
}
func (s *nopStore) Close() error {
return nil
}

204
database/interface_test.go Normal file
View file

@ -0,0 +1,204 @@
package database
import (
"context"
"testing"
"time"
)
func TestDefaultStore(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
store := DefaultStore()
// Test InsertHashratePoint
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1500,
}
if err := store.InsertHashratePoint(nil, "interface-test", "xmrig", point, ResolutionHigh); err != nil {
t.Fatalf("InsertHashratePoint failed: %v", err)
}
// Test GetHashrateHistory
history, err := store.GetHashrateHistory("interface-test", ResolutionHigh, time.Now().Add(-time.Hour), time.Now().Add(time.Hour))
if err != nil {
t.Fatalf("GetHashrateHistory failed: %v", err)
}
if len(history) != 1 {
t.Errorf("Expected 1 point, got %d", len(history))
}
// Test GetHashrateStats
stats, err := store.GetHashrateStats("interface-test")
if err != nil {
t.Fatalf("GetHashrateStats failed: %v", err)
}
if stats == nil {
t.Fatal("Expected non-nil stats")
}
if stats.TotalPoints != 1 {
t.Errorf("Expected 1 total point, got %d", stats.TotalPoints)
}
// Test GetAllMinerStats
allStats, err := store.GetAllMinerStats()
if err != nil {
t.Fatalf("GetAllMinerStats failed: %v", err)
}
if len(allStats) != 1 {
t.Errorf("Expected 1 miner in stats, got %d", len(allStats))
}
// Test Cleanup
if err := store.Cleanup(30); err != nil {
t.Fatalf("Cleanup failed: %v", err)
}
}
func TestDefaultStore_WithContext(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
store := DefaultStore()
ctx := context.Background()
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 2000,
}
if err := store.InsertHashratePoint(ctx, "ctx-test", "xmrig", point, ResolutionHigh); err != nil {
t.Fatalf("InsertHashratePoint with context failed: %v", err)
}
history, err := store.GetHashrateHistory("ctx-test", ResolutionHigh, time.Now().Add(-time.Hour), time.Now().Add(time.Hour))
if err != nil {
t.Fatalf("GetHashrateHistory failed: %v", err)
}
if len(history) != 1 {
t.Errorf("Expected 1 point, got %d", len(history))
}
}
func TestNopStore(t *testing.T) {
store := NopStore()
// All operations should succeed without error
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1000,
}
if err := store.InsertHashratePoint(nil, "test", "xmrig", point, ResolutionHigh); err != nil {
t.Errorf("NopStore InsertHashratePoint should not error: %v", err)
}
history, err := store.GetHashrateHistory("test", ResolutionHigh, time.Now().Add(-time.Hour), time.Now())
if err != nil {
t.Errorf("NopStore GetHashrateHistory should not error: %v", err)
}
if history != nil {
t.Errorf("NopStore GetHashrateHistory should return nil, got %v", history)
}
stats, err := store.GetHashrateStats("test")
if err != nil {
t.Errorf("NopStore GetHashrateStats should not error: %v", err)
}
if stats != nil {
t.Errorf("NopStore GetHashrateStats should return nil, got %v", stats)
}
allStats, err := store.GetAllMinerStats()
if err != nil {
t.Errorf("NopStore GetAllMinerStats should not error: %v", err)
}
if allStats != nil {
t.Errorf("NopStore GetAllMinerStats should return nil, got %v", allStats)
}
if err := store.Cleanup(30); err != nil {
t.Errorf("NopStore Cleanup should not error: %v", err)
}
if err := store.Close(); err != nil {
t.Errorf("NopStore Close should not error: %v", err)
}
}
// TestInterfaceCompatibility ensures all implementations satisfy HashrateStore
func TestInterfaceCompatibility(t *testing.T) {
var _ HashrateStore = DefaultStore()
var _ HashrateStore = NopStore()
var _ HashrateStore = &defaultStore{}
var _ HashrateStore = &nopStore{}
}
func TestDefaultStore_ContextCancellation(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
store := DefaultStore()
// Create a cancelled context
ctx, cancel := context.WithCancel(context.Background())
cancel()
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1000,
}
// Insert with cancelled context should fail
err := store.InsertHashratePoint(ctx, "cancel-test", "xmrig", point, ResolutionHigh)
if err == nil {
t.Log("InsertHashratePoint with cancelled context succeeded (SQLite may not check context)")
} else {
t.Logf("InsertHashratePoint with cancelled context: %v (expected)", err)
}
}
func TestDefaultStore_ContextTimeout(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
store := DefaultStore()
// Create a context that expires very quickly
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
defer cancel()
// Wait for timeout to expire
time.Sleep(1 * time.Millisecond)
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1000,
}
// Insert with expired context
err := store.InsertHashratePoint(ctx, "timeout-test", "xmrig", point, ResolutionHigh)
if err == nil {
t.Log("InsertHashratePoint with expired context succeeded (SQLite may not check context)")
} else {
t.Logf("InsertHashratePoint with expired context: %v (expected)", err)
}
}
func TestNopStore_WithContext(t *testing.T) {
store := NopStore()
// NopStore should work with any context, including cancelled ones
ctx, cancel := context.WithCancel(context.Background())
cancel()
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1000,
}
// Should still succeed (nop store ignores context)
if err := store.InsertHashratePoint(ctx, "nop-cancel-test", "xmrig", point, ResolutionHigh); err != nil {
t.Errorf("NopStore should succeed even with cancelled context: %v", err)
}
}

5
database/session.go Normal file
View file

@ -0,0 +1,5 @@
package database
// This file previously contained session tracking functions.
// Session tracking is not currently integrated into the mining manager.
// The database schema still supports sessions for future use.

946
docs/docs.go Normal file
View file

@ -0,0 +1,946 @@
// Package docs Code generated by swaggo/swag. DO NOT EDIT
package docs
import "github.com/swaggo/swag"
const docTemplate = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{escape .Description}}",
"title": "{{.Title}}",
"contact": {},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/doctor": {
"post": {
"description": "Performs a live check on all available miners to verify their installation status, version, and path.",
"produces": [
"application/json"
],
"tags": [
"system"
],
"summary": "Check miner installations",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/mining.SystemInfo"
}
}
}
}
},
"/info": {
"get": {
"description": "Retrieves live installation details for all miners, along with system information.",
"produces": [
"application/json"
],
"tags": [
"system"
],
"summary": "Get live miner installation information",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/mining.SystemInfo"
}
},
"500": {
"description": "Internal server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/miners": {
"get": {
"description": "Get a list of all running miners",
"produces": [
"application/json"
],
"tags": [
"miners"
],
"summary": "List all running miners",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/mining.XMRigMiner"
}
}
}
}
}
},
"/miners/available": {
"get": {
"description": "Get a list of all available miners",
"produces": [
"application/json"
],
"tags": [
"miners"
],
"summary": "List all available miners",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/mining.AvailableMiner"
}
}
}
}
}
},
"/miners/{miner_name}": {
"delete": {
"description": "Stop a running miner by its name",
"produces": [
"application/json"
],
"tags": [
"miners"
],
"summary": "Stop a running miner",
"parameters": [
{
"type": "string",
"description": "Miner Name",
"name": "miner_name",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/miners/{miner_name}/hashrate-history": {
"get": {
"description": "Get historical hashrate data for a running miner",
"produces": [
"application/json"
],
"tags": [
"miners"
],
"summary": "Get miner hashrate history",
"parameters": [
{
"type": "string",
"description": "Miner Name",
"name": "miner_name",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/mining.HashratePoint"
}
}
}
}
}
},
"/miners/{miner_name}/logs": {
"get": {
"description": "Get the captured stdout/stderr output from a running miner",
"produces": [
"application/json"
],
"tags": [
"miners"
],
"summary": "Get miner log output",
"parameters": [
{
"type": "string",
"description": "Miner Name",
"name": "miner_name",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
},
"/miners/{miner_name}/stats": {
"get": {
"description": "Get statistics for a running miner",
"produces": [
"application/json"
],
"tags": [
"miners"
],
"summary": "Get miner stats",
"parameters": [
{
"type": "string",
"description": "Miner Name",
"name": "miner_name",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/mining.PerformanceMetrics"
}
}
}
}
},
"/miners/{miner_type}/install": {
"post": {
"description": "Install a new miner or update an existing one.",
"produces": [
"application/json"
],
"tags": [
"miners"
],
"summary": "Install or update a miner",
"parameters": [
{
"type": "string",
"description": "Miner Type to install/update",
"name": "miner_type",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/miners/{miner_type}/uninstall": {
"delete": {
"description": "Removes all files for a specific miner.",
"produces": [
"application/json"
],
"tags": [
"miners"
],
"summary": "Uninstall a miner",
"parameters": [
{
"type": "string",
"description": "Miner Type to uninstall",
"name": "miner_type",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/profiles": {
"get": {
"description": "Get a list of all saved mining profiles",
"produces": [
"application/json"
],
"tags": [
"profiles"
],
"summary": "List all mining profiles",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/mining.MiningProfile"
}
}
}
}
},
"post": {
"description": "Create and save a new mining profile",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"profiles"
],
"summary": "Create a new mining profile",
"parameters": [
{
"description": "Mining Profile",
"name": "profile",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/mining.MiningProfile"
}
}
],
"responses": {
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/mining.MiningProfile"
}
}
}
}
},
"/profiles/{id}": {
"get": {
"description": "Get a mining profile by its ID",
"produces": [
"application/json"
],
"tags": [
"profiles"
],
"summary": "Get a specific mining profile",
"parameters": [
{
"type": "string",
"description": "Profile ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/mining.MiningProfile"
}
}
}
},
"put": {
"description": "Update an existing mining profile",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"profiles"
],
"summary": "Update a mining profile",
"parameters": [
{
"type": "string",
"description": "Profile ID",
"name": "id",
"in": "path",
"required": true
},
{
"description": "Updated Mining Profile",
"name": "profile",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/mining.MiningProfile"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/mining.MiningProfile"
}
}
}
},
"delete": {
"description": "Delete a mining profile by its ID",
"produces": [
"application/json"
],
"tags": [
"profiles"
],
"summary": "Delete a mining profile",
"parameters": [
{
"type": "string",
"description": "Profile ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/profiles/{id}/start": {
"post": {
"description": "Start a new miner with the configuration from a saved profile",
"produces": [
"application/json"
],
"tags": [
"profiles"
],
"summary": "Start a new miner using a profile",
"parameters": [
{
"type": "string",
"description": "Profile ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/mining.XMRigMiner"
}
}
}
}
},
"/update": {
"post": {
"description": "Checks if any installed miners have a new version available for download.",
"produces": [
"application/json"
],
"tags": [
"system"
],
"summary": "Check for miner updates",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
}
},
"definitions": {
"mining.API": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"listenHost": {
"type": "string"
},
"listenPort": {
"type": "integer"
}
}
},
"mining.AvailableMiner": {
"type": "object",
"properties": {
"description": {
"type": "string"
},
"name": {
"type": "string"
}
}
},
"mining.HashratePoint": {
"type": "object",
"properties": {
"hashrate": {
"type": "integer"
},
"timestamp": {
"type": "string"
}
}
},
"mining.InstallationDetails": {
"type": "object",
"properties": {
"config_path": {
"description": "Add path to the miner-specific config",
"type": "string"
},
"is_installed": {
"type": "boolean"
},
"miner_binary": {
"type": "string"
},
"path": {
"type": "string"
},
"version": {
"type": "string"
}
}
},
"mining.MiningProfile": {
"type": "object",
"properties": {
"config": {
"description": "The raw JSON config for the specific miner",
"type": "object"
},
"id": {
"type": "string"
},
"minerType": {
"description": "e.g., \"xmrig\", \"ttminer\"",
"type": "string"
},
"name": {
"type": "string"
}
}
},
"mining.PerformanceMetrics": {
"type": "object",
"properties": {
"algorithm": {
"type": "string"
},
"extraData": {
"type": "object",
"additionalProperties": true
},
"hashrate": {
"type": "integer"
},
"lastShare": {
"type": "integer"
},
"rejected": {
"type": "integer"
},
"shares": {
"type": "integer"
},
"uptime": {
"type": "integer"
}
}
},
"mining.SystemInfo": {
"type": "object",
"properties": {
"architecture": {
"type": "string"
},
"available_cpu_cores": {
"type": "integer"
},
"go_version": {
"type": "string"
},
"installed_miners_info": {
"type": "array",
"items": {
"$ref": "#/definitions/mining.InstallationDetails"
}
},
"os": {
"type": "string"
},
"timestamp": {
"type": "string"
},
"total_system_ram_gb": {
"type": "number"
}
}
},
"mining.XMRigMiner": {
"type": "object",
"properties": {
"api": {
"$ref": "#/definitions/mining.API"
},
"configPath": {
"type": "string"
},
"full_stats": {
"$ref": "#/definitions/mining.XMRigSummary"
},
"hashrateHistory": {
"type": "array",
"items": {
"$ref": "#/definitions/mining.HashratePoint"
}
},
"lowResHashrateHistory": {
"type": "array",
"items": {
"$ref": "#/definitions/mining.HashratePoint"
}
},
"miner_binary": {
"type": "string"
},
"name": {
"type": "string"
},
"path": {
"type": "string"
},
"running": {
"type": "boolean"
},
"url": {
"type": "string"
},
"version": {
"type": "string"
}
}
},
"mining.XMRigSummary": {
"type": "object",
"properties": {
"algo": {
"type": "string"
},
"algorithms": {
"type": "array",
"items": {
"type": "string"
}
},
"connection": {
"type": "object",
"properties": {
"accepted": {
"type": "integer"
},
"algo": {
"type": "string"
},
"avg_time": {
"type": "integer"
},
"avg_time_ms": {
"type": "integer"
},
"diff": {
"type": "integer"
},
"failures": {
"type": "integer"
},
"hashes_total": {
"type": "integer"
},
"ip": {
"type": "string"
},
"ping": {
"type": "integer"
},
"pool": {
"type": "string"
},
"rejected": {
"type": "integer"
},
"tls": {
"type": "string"
},
"tls-fingerprint": {
"type": "string"
},
"uptime": {
"type": "integer"
},
"uptime_ms": {
"type": "integer"
}
}
},
"cpu": {
"type": "object",
"properties": {
"64_bit": {
"type": "boolean"
},
"aes": {
"type": "boolean"
},
"arch": {
"type": "string"
},
"assembly": {
"type": "string"
},
"avx2": {
"type": "boolean"
},
"backend": {
"type": "string"
},
"brand": {
"type": "string"
},
"cores": {
"type": "integer"
},
"family": {
"type": "integer"
},
"flags": {
"type": "array",
"items": {
"type": "string"
}
},
"l2": {
"type": "integer"
},
"l3": {
"type": "integer"
},
"model": {
"type": "integer"
},
"msr": {
"type": "string"
},
"nodes": {
"type": "integer"
},
"packages": {
"type": "integer"
},
"proc_info": {
"type": "integer"
},
"stepping": {
"type": "integer"
},
"threads": {
"type": "integer"
},
"x64": {
"type": "boolean"
}
}
},
"donate_level": {
"type": "integer"
},
"features": {
"type": "array",
"items": {
"type": "string"
}
},
"hashrate": {
"type": "object",
"properties": {
"highest": {
"type": "number"
},
"total": {
"type": "array",
"items": {
"type": "number"
}
}
}
},
"hugepages": {
"type": "array",
"items": {
"type": "integer"
}
},
"id": {
"type": "string"
},
"kind": {
"type": "string"
},
"paused": {
"type": "boolean"
},
"resources": {
"type": "object",
"properties": {
"hardware_concurrency": {
"type": "integer"
},
"load_average": {
"type": "array",
"items": {
"type": "number"
}
},
"memory": {
"type": "object",
"properties": {
"free": {
"type": "integer"
},
"resident_set_memory": {
"type": "integer"
},
"total": {
"type": "integer"
}
}
}
}
},
"restricted": {
"type": "boolean"
},
"results": {
"type": "object",
"properties": {
"avg_time": {
"type": "integer"
},
"avg_time_ms": {
"type": "integer"
},
"best": {
"type": "array",
"items": {
"type": "integer"
}
},
"diff_current": {
"type": "integer"
},
"hashes_total": {
"type": "integer"
},
"shares_good": {
"type": "integer"
},
"shares_total": {
"type": "integer"
}
}
},
"ua": {
"type": "string"
},
"uptime": {
"type": "integer"
},
"version": {
"type": "string"
},
"worker_id": {
"type": "string"
}
}
}
}
}`
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = &swag.Spec{
Version: "1.0",
Host: "localhost:8080",
BasePath: "/api/v1/mining",
Schemes: []string{},
Title: "Mining API",
Description: "This is a sample server for a mining application.",
InfoInstanceName: "swagger",
SwaggerTemplate: docTemplate,
LeftDelim: "{{",
RightDelim: "}}",
}
func init() {
swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo)
}

70
go.mod Normal file
View file

@ -0,0 +1,70 @@
module forge.lthn.ai/core/mining
go 1.25.5
require (
github.com/Masterminds/semver/v3 v3.4.0
github.com/adrg/xdg v0.5.3
github.com/ckanthony/gin-mcp v0.0.0-20251107113615-3c631c4fa9f4
github.com/gin-contrib/cors v1.7.6
github.com/gin-gonic/gin v1.11.0
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/mattn/go-sqlite3 v1.14.34
github.com/shirou/gopsutil/v4 v4.26.1
github.com/swaggo/files v1.0.1
github.com/swaggo/gin-swagger v1.6.1
github.com/swaggo/swag v1.16.6
)
require (
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/bytedance/sonic v1.14.0 // indirect
github.com/bytedance/sonic/loader v0.3.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/ebitengine/purego v0.9.1 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-openapi/swag v0.19.15 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.27.0 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/goccy/go-yaml v1.18.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/tklauser/go-sysconf v0.3.16 // indirect
github.com/tklauser/numcpus v0.11.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.3.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.uber.org/mock v0.5.0 // indirect
golang.org/x/arch v0.20.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/mod v0.25.0 // indirect
golang.org/x/net v0.42.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.27.0 // indirect
golang.org/x/tools v0.34.0 // indirect
google.golang.org/protobuf v1.36.9 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

207
go.sum Normal file
View file

@ -0,0 +1,207 @@
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ=
github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA=
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/ckanthony/gin-mcp v0.0.0-20251107113615-3c631c4fa9f4 h1:V0tltxRKT8DZRXcn2ErLy4alznOBzWWmx4gnQbic9jE=
github.com/ckanthony/gin-mcp v0.0.0-20251107113615-3c631c4fa9f4/go.mod h1:eaCpaNzFM2bfCUXMPxbLFwI/ar67gAaVTNrltASGeoc=
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/gin-contrib/cors v1.7.6 h1:3gQ8GMzs1Ylpf70y8bMw4fVpycXIeX1ZemuSQIsnQQY=
github.com/gin-contrib/cors v1.7.6/go.mod h1:Ulcl+xN4jel9t1Ry8vqph23a60FwH9xVLd+3ykmTjOk=
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M=
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.34 h1:3NtcvcUnFBPsuRcno8pUtupspG/GM+9nZ88zgJcp6Zk=
github.com/mattn/go-sqlite3 v1.14.34/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/shirou/gopsutil/v4 v4.26.1 h1:TOkEyriIXk2HX9d4isZJtbjXbEjf5qyKPAzbzY0JWSo=
github.com/shirou/gopsutil/v4 v4.26.1/go.mod h1:medLI9/UNAb0dOI9Q3/7yWSqKkj00u+1tgY8nvv41pc=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE=
github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg=
github.com/swaggo/gin-swagger v1.6.1 h1:Ri06G4gc9N4t4k8hekMigJ9zKTFSlqj/9paAQCQs7cY=
github.com/swaggo/gin-swagger v1.6.1/go.mod h1:LQ+hJStHakCWRiK/YNYtJOu4mR2FP+pxLnILT/qNiTw=
github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI=
github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg=
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA=
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c=
golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

284
logging/logger.go Normal file
View file

@ -0,0 +1,284 @@
// Package logging provides structured logging with log levels and fields.
package logging
import (
"fmt"
"io"
"os"
"strings"
"sync"
"time"
)
// Level represents the severity of a log message.
type Level int
const (
// LevelDebug is the most verbose log level.
LevelDebug Level = iota
// LevelInfo is for general informational messages.
LevelInfo
// LevelWarn is for warning messages.
LevelWarn
// LevelError is for error messages.
LevelError
)
// String returns the string representation of the log level.
func (l Level) String() string {
switch l {
case LevelDebug:
return "DEBUG"
case LevelInfo:
return "INFO"
case LevelWarn:
return "WARN"
case LevelError:
return "ERROR"
default:
return "UNKNOWN"
}
}
// Logger provides structured logging with configurable output and level.
type Logger struct {
mu sync.Mutex
output io.Writer
level Level
component string
}
// Config holds configuration for creating a new Logger.
type Config struct {
Output io.Writer
Level Level
Component string
}
// DefaultConfig returns the default logger configuration.
func DefaultConfig() Config {
return Config{
Output: os.Stderr,
Level: LevelInfo,
Component: "",
}
}
// New creates a new Logger with the given configuration.
func New(cfg Config) *Logger {
if cfg.Output == nil {
cfg.Output = os.Stderr
}
return &Logger{
output: cfg.Output,
level: cfg.Level,
component: cfg.Component,
}
}
// WithComponent returns a new Logger with the specified component name.
func (l *Logger) WithComponent(component string) *Logger {
return &Logger{
output: l.output,
level: l.level,
component: component,
}
}
// SetLevel sets the minimum log level.
func (l *Logger) SetLevel(level Level) {
l.mu.Lock()
defer l.mu.Unlock()
l.level = level
}
// GetLevel returns the current log level.
func (l *Logger) GetLevel() Level {
l.mu.Lock()
defer l.mu.Unlock()
return l.level
}
// Fields represents key-value pairs for structured logging.
type Fields map[string]interface{}
// log writes a log message at the specified level.
func (l *Logger) log(level Level, msg string, fields Fields) {
l.mu.Lock()
defer l.mu.Unlock()
if level < l.level {
return
}
// Build the log line
var sb strings.Builder
timestamp := time.Now().Format("2006/01/02 15:04:05")
sb.WriteString(timestamp)
sb.WriteString(" [")
sb.WriteString(level.String())
sb.WriteString("]")
if l.component != "" {
sb.WriteString(" [")
sb.WriteString(l.component)
sb.WriteString("]")
}
sb.WriteString(" ")
sb.WriteString(msg)
// Add fields if present
if len(fields) > 0 {
sb.WriteString(" |")
for k, v := range fields {
sb.WriteString(" ")
sb.WriteString(k)
sb.WriteString("=")
sb.WriteString(fmt.Sprintf("%v", v))
}
}
sb.WriteString("\n")
fmt.Fprint(l.output, sb.String())
}
// Debug logs a debug message.
func (l *Logger) Debug(msg string, fields ...Fields) {
l.log(LevelDebug, msg, mergeFields(fields))
}
// Info logs an informational message.
func (l *Logger) Info(msg string, fields ...Fields) {
l.log(LevelInfo, msg, mergeFields(fields))
}
// Warn logs a warning message.
func (l *Logger) Warn(msg string, fields ...Fields) {
l.log(LevelWarn, msg, mergeFields(fields))
}
// Error logs an error message.
func (l *Logger) Error(msg string, fields ...Fields) {
l.log(LevelError, msg, mergeFields(fields))
}
// Debugf logs a formatted debug message.
func (l *Logger) Debugf(format string, args ...interface{}) {
l.log(LevelDebug, fmt.Sprintf(format, args...), nil)
}
// Infof logs a formatted informational message.
func (l *Logger) Infof(format string, args ...interface{}) {
l.log(LevelInfo, fmt.Sprintf(format, args...), nil)
}
// Warnf logs a formatted warning message.
func (l *Logger) Warnf(format string, args ...interface{}) {
l.log(LevelWarn, fmt.Sprintf(format, args...), nil)
}
// Errorf logs a formatted error message.
func (l *Logger) Errorf(format string, args ...interface{}) {
l.log(LevelError, fmt.Sprintf(format, args...), nil)
}
// mergeFields combines multiple Fields maps into one.
func mergeFields(fields []Fields) Fields {
if len(fields) == 0 {
return nil
}
result := make(Fields)
for _, f := range fields {
for k, v := range f {
result[k] = v
}
}
return result
}
// --- Global logger for convenience ---
var (
globalLogger = New(DefaultConfig())
globalMu sync.RWMutex
)
// SetGlobal sets the global logger instance.
func SetGlobal(l *Logger) {
globalMu.Lock()
defer globalMu.Unlock()
globalLogger = l
}
// GetGlobal returns the global logger instance.
func GetGlobal() *Logger {
globalMu.RLock()
defer globalMu.RUnlock()
return globalLogger
}
// SetGlobalLevel sets the log level of the global logger.
func SetGlobalLevel(level Level) {
globalMu.RLock()
defer globalMu.RUnlock()
globalLogger.SetLevel(level)
}
// Global convenience functions that use the global logger
// Debug logs a debug message using the global logger.
func Debug(msg string, fields ...Fields) {
GetGlobal().Debug(msg, fields...)
}
// Info logs an informational message using the global logger.
func Info(msg string, fields ...Fields) {
GetGlobal().Info(msg, fields...)
}
// Warn logs a warning message using the global logger.
func Warn(msg string, fields ...Fields) {
GetGlobal().Warn(msg, fields...)
}
// Error logs an error message using the global logger.
func Error(msg string, fields ...Fields) {
GetGlobal().Error(msg, fields...)
}
// Debugf logs a formatted debug message using the global logger.
func Debugf(format string, args ...interface{}) {
GetGlobal().Debugf(format, args...)
}
// Infof logs a formatted informational message using the global logger.
func Infof(format string, args ...interface{}) {
GetGlobal().Infof(format, args...)
}
// Warnf logs a formatted warning message using the global logger.
func Warnf(format string, args ...interface{}) {
GetGlobal().Warnf(format, args...)
}
// Errorf logs a formatted error message using the global logger.
func Errorf(format string, args ...interface{}) {
GetGlobal().Errorf(format, args...)
}
// ParseLevel parses a string into a log level.
func ParseLevel(s string) (Level, error) {
switch strings.ToUpper(s) {
case "DEBUG":
return LevelDebug, nil
case "INFO":
return LevelInfo, nil
case "WARN", "WARNING":
return LevelWarn, nil
case "ERROR":
return LevelError, nil
default:
return LevelInfo, fmt.Errorf("unknown log level: %s", s)
}
}

262
logging/logger_test.go Normal file
View file

@ -0,0 +1,262 @@
package logging
import (
"bytes"
"strings"
"testing"
)
func TestLoggerLevels(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
Level: LevelInfo,
})
// Debug should not appear at Info level
logger.Debug("debug message")
if buf.Len() > 0 {
t.Error("Debug message should not appear at Info level")
}
// Info should appear
logger.Info("info message")
if !strings.Contains(buf.String(), "[INFO]") {
t.Error("Info message should appear")
}
if !strings.Contains(buf.String(), "info message") {
t.Error("Info message content should appear")
}
buf.Reset()
// Warn should appear
logger.Warn("warn message")
if !strings.Contains(buf.String(), "[WARN]") {
t.Error("Warn message should appear")
}
buf.Reset()
// Error should appear
logger.Error("error message")
if !strings.Contains(buf.String(), "[ERROR]") {
t.Error("Error message should appear")
}
}
func TestLoggerDebugLevel(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
Level: LevelDebug,
})
logger.Debug("debug message")
if !strings.Contains(buf.String(), "[DEBUG]") {
t.Error("Debug message should appear at Debug level")
}
}
func TestLoggerWithFields(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
Level: LevelInfo,
})
logger.Info("test message", Fields{"key": "value", "num": 42})
output := buf.String()
if !strings.Contains(output, "key=value") {
t.Error("Field key=value should appear")
}
if !strings.Contains(output, "num=42") {
t.Error("Field num=42 should appear")
}
}
func TestLoggerWithComponent(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
Level: LevelInfo,
Component: "TestComponent",
})
logger.Info("test message")
output := buf.String()
if !strings.Contains(output, "[TestComponent]") {
t.Error("Component name should appear in log")
}
}
func TestLoggerDerivedComponent(t *testing.T) {
var buf bytes.Buffer
parent := New(Config{
Output: &buf,
Level: LevelInfo,
})
child := parent.WithComponent("ChildComponent")
child.Info("child message")
output := buf.String()
if !strings.Contains(output, "[ChildComponent]") {
t.Error("Derived component name should appear")
}
}
func TestLoggerFormatted(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
Level: LevelInfo,
})
logger.Infof("formatted %s %d", "string", 123)
output := buf.String()
if !strings.Contains(output, "formatted string 123") {
t.Errorf("Formatted message should appear, got: %s", output)
}
}
func TestSetLevel(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
Level: LevelError,
})
// Info should not appear at Error level
logger.Info("should not appear")
if buf.Len() > 0 {
t.Error("Info should not appear at Error level")
}
// Change to Info level
logger.SetLevel(LevelInfo)
logger.Info("should appear now")
if !strings.Contains(buf.String(), "should appear now") {
t.Error("Info should appear after level change")
}
// Verify GetLevel
if logger.GetLevel() != LevelInfo {
t.Error("GetLevel should return LevelInfo")
}
}
func TestParseLevel(t *testing.T) {
tests := []struct {
input string
expected Level
wantErr bool
}{
{"DEBUG", LevelDebug, false},
{"debug", LevelDebug, false},
{"INFO", LevelInfo, false},
{"info", LevelInfo, false},
{"WARN", LevelWarn, false},
{"WARNING", LevelWarn, false},
{"ERROR", LevelError, false},
{"error", LevelError, false},
{"invalid", LevelInfo, true},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
level, err := ParseLevel(tt.input)
if tt.wantErr && err == nil {
t.Error("Expected error but got none")
}
if !tt.wantErr && err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !tt.wantErr && level != tt.expected {
t.Errorf("Expected %v, got %v", tt.expected, level)
}
})
}
}
func TestGlobalLogger(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &buf,
Level: LevelInfo,
})
SetGlobal(logger)
Info("global test")
if !strings.Contains(buf.String(), "global test") {
t.Error("Global logger should write message")
}
buf.Reset()
SetGlobalLevel(LevelError)
Info("should not appear")
if buf.Len() > 0 {
t.Error("Info should not appear at Error level")
}
// Reset to default for other tests
SetGlobal(New(DefaultConfig()))
}
func TestLevelString(t *testing.T) {
tests := []struct {
level Level
expected string
}{
{LevelDebug, "DEBUG"},
{LevelInfo, "INFO"},
{LevelWarn, "WARN"},
{LevelError, "ERROR"},
{Level(99), "UNKNOWN"},
}
for _, tt := range tests {
if got := tt.level.String(); got != tt.expected {
t.Errorf("Level(%d).String() = %s, want %s", tt.level, got, tt.expected)
}
}
}
func TestMergeFields(t *testing.T) {
// Empty fields
result := mergeFields(nil)
if result != nil {
t.Error("nil input should return nil")
}
result = mergeFields([]Fields{})
if result != nil {
t.Error("empty input should return nil")
}
// Single fields
result = mergeFields([]Fields{{"key": "value"}})
if result["key"] != "value" {
t.Error("Single field should be preserved")
}
// Multiple fields
result = mergeFields([]Fields{
{"key1": "value1"},
{"key2": "value2"},
})
if result["key1"] != "value1" || result["key2"] != "value2" {
t.Error("Multiple fields should be merged")
}
// Override
result = mergeFields([]Fields{
{"key": "value1"},
{"key": "value2"},
})
if result["key"] != "value2" {
t.Error("Later fields should override earlier ones")
}
}

274
mining/auth.go Normal file
View file

@ -0,0 +1,274 @@
package mining
import (
"crypto/md5"
"crypto/rand"
"crypto/subtle"
"encoding/hex"
"fmt"
"net/http"
"os"
"strings"
"sync"
"time"
"forge.lthn.ai/core/mining/logging"
"github.com/gin-gonic/gin"
)
// AuthConfig holds authentication configuration
type AuthConfig struct {
// Enabled determines if authentication is required
Enabled bool
// Username for basic/digest auth
Username string
// Password for basic/digest auth
Password string
// Realm for digest auth
Realm string
// NonceExpiry is how long a nonce is valid
NonceExpiry time.Duration
}
// DefaultAuthConfig returns the default auth configuration.
// Auth is disabled by default for local development.
func DefaultAuthConfig() AuthConfig {
return AuthConfig{
Enabled: false,
Username: "",
Password: "",
Realm: "Mining API",
NonceExpiry: 5 * time.Minute,
}
}
// AuthConfigFromEnv creates auth config from environment variables.
// Set MINING_API_AUTH=true to enable, MINING_API_USER and MINING_API_PASS for credentials.
func AuthConfigFromEnv() AuthConfig {
config := DefaultAuthConfig()
if os.Getenv("MINING_API_AUTH") == "true" {
config.Enabled = true
config.Username = os.Getenv("MINING_API_USER")
config.Password = os.Getenv("MINING_API_PASS")
if config.Username == "" || config.Password == "" {
logging.Warn("API auth enabled but credentials not set", logging.Fields{
"hint": "Set MINING_API_USER and MINING_API_PASS environment variables",
})
config.Enabled = false
}
}
if realm := os.Getenv("MINING_API_REALM"); realm != "" {
config.Realm = realm
}
return config
}
// DigestAuth implements HTTP Digest Authentication middleware
type DigestAuth struct {
config AuthConfig
nonces sync.Map // map[string]time.Time for nonce expiry tracking
stopChan chan struct{}
stopOnce sync.Once
}
// NewDigestAuth creates a new digest auth middleware
func NewDigestAuth(config AuthConfig) *DigestAuth {
da := &DigestAuth{
config: config,
stopChan: make(chan struct{}),
}
// Start nonce cleanup goroutine
go da.cleanupNonces()
return da
}
// Stop gracefully shuts down the DigestAuth, stopping the cleanup goroutine.
// Safe to call multiple times.
func (da *DigestAuth) Stop() {
da.stopOnce.Do(func() {
close(da.stopChan)
})
}
// Middleware returns a Gin middleware that enforces digest authentication
func (da *DigestAuth) Middleware() gin.HandlerFunc {
return func(c *gin.Context) {
if !da.config.Enabled {
c.Next()
return
}
authHeader := c.GetHeader("Authorization")
if authHeader == "" {
da.sendChallenge(c)
return
}
// Try digest auth first
if strings.HasPrefix(authHeader, "Digest ") {
if da.validateDigest(c, authHeader) {
c.Next()
return
}
da.sendChallenge(c)
return
}
// Fall back to basic auth
if strings.HasPrefix(authHeader, "Basic ") {
if da.validateBasic(c, authHeader) {
c.Next()
return
}
}
da.sendChallenge(c)
}
}
// sendChallenge sends a 401 response with digest auth challenge
func (da *DigestAuth) sendChallenge(c *gin.Context) {
nonce := da.generateNonce()
da.nonces.Store(nonce, time.Now())
challenge := fmt.Sprintf(
`Digest realm="%s", qop="auth", nonce="%s", opaque="%s"`,
da.config.Realm,
nonce,
da.generateOpaque(),
)
c.Header("WWW-Authenticate", challenge)
c.AbortWithStatusJSON(http.StatusUnauthorized, APIError{
Code: "AUTH_REQUIRED",
Message: "Authentication required",
Suggestion: "Provide valid credentials using Digest or Basic authentication",
})
}
// validateDigest validates a digest auth header
func (da *DigestAuth) validateDigest(c *gin.Context, authHeader string) bool {
params := parseDigestParams(authHeader[7:]) // Skip "Digest "
nonce := params["nonce"]
if nonce == "" {
return false
}
// Check nonce validity
if storedTime, ok := da.nonces.Load(nonce); ok {
if time.Since(storedTime.(time.Time)) > da.config.NonceExpiry {
da.nonces.Delete(nonce)
return false
}
} else {
return false
}
// Validate username with constant-time comparison to prevent timing attacks
if subtle.ConstantTimeCompare([]byte(params["username"]), []byte(da.config.Username)) != 1 {
return false
}
// Calculate expected response
ha1 := md5Hash(fmt.Sprintf("%s:%s:%s", da.config.Username, da.config.Realm, da.config.Password))
ha2 := md5Hash(fmt.Sprintf("%s:%s", c.Request.Method, params["uri"]))
var expectedResponse string
if params["qop"] == "auth" {
expectedResponse = md5Hash(fmt.Sprintf("%s:%s:%s:%s:%s:%s",
ha1, nonce, params["nc"], params["cnonce"], params["qop"], ha2))
} else {
expectedResponse = md5Hash(fmt.Sprintf("%s:%s:%s", ha1, nonce, ha2))
}
// Constant-time comparison to prevent timing attacks
return subtle.ConstantTimeCompare([]byte(expectedResponse), []byte(params["response"])) == 1
}
// validateBasic validates a basic auth header
func (da *DigestAuth) validateBasic(c *gin.Context, authHeader string) bool {
// Gin has built-in basic auth, but we do manual validation for consistency
user, pass, ok := c.Request.BasicAuth()
if !ok {
return false
}
// Constant-time comparison to prevent timing attacks
userMatch := subtle.ConstantTimeCompare([]byte(user), []byte(da.config.Username)) == 1
passMatch := subtle.ConstantTimeCompare([]byte(pass), []byte(da.config.Password)) == 1
return userMatch && passMatch
}
// generateNonce creates a cryptographically random nonce
func (da *DigestAuth) generateNonce() string {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
// Cryptographic failure is critical - fall back to time-based nonce
// This should never happen on a properly configured system
return hex.EncodeToString([]byte(fmt.Sprintf("%d", time.Now().UnixNano())))
}
return hex.EncodeToString(b)
}
// generateOpaque creates an opaque value
func (da *DigestAuth) generateOpaque() string {
return md5Hash(da.config.Realm)
}
// cleanupNonces removes expired nonces periodically
func (da *DigestAuth) cleanupNonces() {
interval := da.config.NonceExpiry
if interval <= 0 {
interval = 5 * time.Minute // Default if not set
}
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-da.stopChan:
return
case <-ticker.C:
now := time.Now()
da.nonces.Range(func(key, value interface{}) bool {
if now.Sub(value.(time.Time)) > da.config.NonceExpiry {
da.nonces.Delete(key)
}
return true
})
}
}
}
// parseDigestParams parses the parameters from a digest auth header
func parseDigestParams(header string) map[string]string {
params := make(map[string]string)
parts := strings.Split(header, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
idx := strings.Index(part, "=")
if idx < 0 {
continue
}
key := strings.TrimSpace(part[:idx])
value := strings.TrimSpace(part[idx+1:])
// Remove quotes
value = strings.Trim(value, `"`)
params[key] = value
}
return params
}
// md5Hash returns the MD5 hash of a string as a hex string
func md5Hash(s string) string {
h := md5.Sum([]byte(s))
return hex.EncodeToString(h[:])
}

604
mining/auth_test.go Normal file
View file

@ -0,0 +1,604 @@
package mining
import (
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/gin-gonic/gin"
)
func init() {
gin.SetMode(gin.TestMode)
}
func TestDefaultAuthConfig(t *testing.T) {
cfg := DefaultAuthConfig()
if cfg.Enabled {
t.Error("expected Enabled to be false by default")
}
if cfg.Username != "" {
t.Error("expected Username to be empty by default")
}
if cfg.Password != "" {
t.Error("expected Password to be empty by default")
}
if cfg.Realm != "Mining API" {
t.Errorf("expected Realm to be 'Mining API', got %s", cfg.Realm)
}
if cfg.NonceExpiry != 5*time.Minute {
t.Errorf("expected NonceExpiry to be 5 minutes, got %v", cfg.NonceExpiry)
}
}
func TestAuthConfigFromEnv(t *testing.T) {
// Save original env
origAuth := os.Getenv("MINING_API_AUTH")
origUser := os.Getenv("MINING_API_USER")
origPass := os.Getenv("MINING_API_PASS")
origRealm := os.Getenv("MINING_API_REALM")
defer func() {
os.Setenv("MINING_API_AUTH", origAuth)
os.Setenv("MINING_API_USER", origUser)
os.Setenv("MINING_API_PASS", origPass)
os.Setenv("MINING_API_REALM", origRealm)
}()
t.Run("auth disabled by default", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "")
cfg := AuthConfigFromEnv()
if cfg.Enabled {
t.Error("expected Enabled to be false when env not set")
}
})
t.Run("auth enabled with valid credentials", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "true")
os.Setenv("MINING_API_USER", "testuser")
os.Setenv("MINING_API_PASS", "testpass")
cfg := AuthConfigFromEnv()
if !cfg.Enabled {
t.Error("expected Enabled to be true")
}
if cfg.Username != "testuser" {
t.Errorf("expected Username 'testuser', got %s", cfg.Username)
}
if cfg.Password != "testpass" {
t.Errorf("expected Password 'testpass', got %s", cfg.Password)
}
})
t.Run("auth disabled if credentials missing", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "true")
os.Setenv("MINING_API_USER", "")
os.Setenv("MINING_API_PASS", "")
cfg := AuthConfigFromEnv()
if cfg.Enabled {
t.Error("expected Enabled to be false when credentials missing")
}
})
t.Run("custom realm", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "")
os.Setenv("MINING_API_REALM", "Custom Realm")
cfg := AuthConfigFromEnv()
if cfg.Realm != "Custom Realm" {
t.Errorf("expected Realm 'Custom Realm', got %s", cfg.Realm)
}
})
}
func TestNewDigestAuth(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: time.Second,
}
da := NewDigestAuth(cfg)
if da == nil {
t.Fatal("expected non-nil DigestAuth")
}
// Cleanup
da.Stop()
}
func TestDigestAuthStop(t *testing.T) {
cfg := DefaultAuthConfig()
da := NewDigestAuth(cfg)
// Should not panic when called multiple times
da.Stop()
da.Stop()
da.Stop()
}
func TestMiddlewareAuthDisabled(t *testing.T) {
cfg := AuthConfig{Enabled: false}
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
req := httptest.NewRequest("GET", "/test", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("expected status 200, got %d", w.Code)
}
if w.Body.String() != "success" {
t.Errorf("expected body 'success', got %s", w.Body.String())
}
}
func TestMiddlewareNoAuth(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
req := httptest.NewRequest("GET", "/test", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusUnauthorized {
t.Errorf("expected status 401, got %d", w.Code)
}
wwwAuth := w.Header().Get("WWW-Authenticate")
if wwwAuth == "" {
t.Error("expected WWW-Authenticate header")
}
if !authTestContains(wwwAuth, "Digest") {
t.Error("expected Digest challenge in WWW-Authenticate")
}
if !authTestContains(wwwAuth, `realm="Test"`) {
t.Error("expected realm in WWW-Authenticate")
}
}
func TestMiddlewareBasicAuthValid(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
req := httptest.NewRequest("GET", "/test", nil)
req.SetBasicAuth("user", "pass")
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("expected status 200, got %d", w.Code)
}
}
func TestMiddlewareBasicAuthInvalid(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
testCases := []struct {
name string
user string
password string
}{
{"wrong user", "wronguser", "pass"},
{"wrong password", "user", "wrongpass"},
{"both wrong", "wronguser", "wrongpass"},
{"empty user", "", "pass"},
{"empty password", "user", ""},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/test", nil)
req.SetBasicAuth(tc.user, tc.password)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusUnauthorized {
t.Errorf("expected status 401, got %d", w.Code)
}
})
}
}
func TestMiddlewareDigestAuthValid(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "testuser",
Password: "testpass",
Realm: "Test Realm",
NonceExpiry: 5 * time.Minute,
}
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
// First request to get nonce
req := httptest.NewRequest("GET", "/test", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusUnauthorized {
t.Fatalf("expected 401 to get nonce, got %d", w.Code)
}
wwwAuth := w.Header().Get("WWW-Authenticate")
params := parseDigestParams(wwwAuth[7:]) // Skip "Digest "
nonce := params["nonce"]
if nonce == "" {
t.Fatal("nonce not found in challenge")
}
// Build digest auth response
uri := "/test"
nc := "00000001"
cnonce := "abc123"
qop := "auth"
ha1 := md5Hash(fmt.Sprintf("%s:%s:%s", cfg.Username, cfg.Realm, cfg.Password))
ha2 := md5Hash(fmt.Sprintf("GET:%s", uri))
response := md5Hash(fmt.Sprintf("%s:%s:%s:%s:%s:%s", ha1, nonce, nc, cnonce, qop, ha2))
authHeader := fmt.Sprintf(
`Digest username="%s", realm="%s", nonce="%s", uri="%s", qop=%s, nc=%s, cnonce="%s", response="%s"`,
cfg.Username, cfg.Realm, nonce, uri, qop, nc, cnonce, response,
)
// Second request with digest auth
req2 := httptest.NewRequest("GET", "/test", nil)
req2.Header.Set("Authorization", authHeader)
w2 := httptest.NewRecorder()
router.ServeHTTP(w2, req2)
if w2.Code != http.StatusOK {
t.Errorf("expected status 200, got %d; body: %s", w2.Code, w2.Body.String())
}
}
func TestMiddlewareDigestAuthInvalidNonce(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
// Try with a fake nonce that was never issued
authHeader := `Digest username="user", realm="Test", nonce="fakenonce123", uri="/test", qop=auth, nc=00000001, cnonce="abc", response="xxx"`
req := httptest.NewRequest("GET", "/test", nil)
req.Header.Set("Authorization", authHeader)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusUnauthorized {
t.Errorf("expected status 401 for invalid nonce, got %d", w.Code)
}
}
func TestMiddlewareDigestAuthExpiredNonce(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 50 * time.Millisecond, // Very short for testing
}
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
// Get a valid nonce
req := httptest.NewRequest("GET", "/test", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
wwwAuth := w.Header().Get("WWW-Authenticate")
params := parseDigestParams(wwwAuth[7:])
nonce := params["nonce"]
// Wait for nonce to expire
time.Sleep(100 * time.Millisecond)
// Try to use expired nonce
uri := "/test"
ha1 := md5Hash(fmt.Sprintf("%s:%s:%s", cfg.Username, cfg.Realm, cfg.Password))
ha2 := md5Hash(fmt.Sprintf("GET:%s", uri))
response := md5Hash(fmt.Sprintf("%s:%s:%s", ha1, nonce, ha2))
authHeader := fmt.Sprintf(
`Digest username="%s", realm="%s", nonce="%s", uri="%s", response="%s"`,
cfg.Username, cfg.Realm, nonce, uri, response,
)
req2 := httptest.NewRequest("GET", "/test", nil)
req2.Header.Set("Authorization", authHeader)
w2 := httptest.NewRecorder()
router.ServeHTTP(w2, req2)
if w2.Code != http.StatusUnauthorized {
t.Errorf("expected status 401 for expired nonce, got %d", w2.Code)
}
}
func TestParseDigestParams(t *testing.T) {
testCases := []struct {
name string
input string
expected map[string]string
}{
{
name: "basic params",
input: `username="john", realm="test"`,
expected: map[string]string{
"username": "john",
"realm": "test",
},
},
{
name: "params with spaces",
input: ` username = "john" , realm = "test" `,
expected: map[string]string{
"username": "john",
"realm": "test",
},
},
{
name: "unquoted values",
input: `qop=auth, nc=00000001`,
expected: map[string]string{
"qop": "auth",
"nc": "00000001",
},
},
{
name: "full digest header",
input: `username="user", realm="Test", nonce="abc123", uri="/api", qop=auth, nc=00000001, cnonce="xyz", response="hash"`,
expected: map[string]string{
"username": "user",
"realm": "Test",
"nonce": "abc123",
"uri": "/api",
"qop": "auth",
"nc": "00000001",
"cnonce": "xyz",
"response": "hash",
},
},
{
name: "empty string",
input: "",
expected: map[string]string{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := parseDigestParams(tc.input)
for key, expectedVal := range tc.expected {
if result[key] != expectedVal {
t.Errorf("key %s: expected %s, got %s", key, expectedVal, result[key])
}
}
})
}
}
func TestMd5Hash(t *testing.T) {
testCases := []struct {
input string
expected string
}{
{"hello", "5d41402abc4b2a76b9719d911017c592"},
{"", "d41d8cd98f00b204e9800998ecf8427e"},
{"user:realm:password", func() string {
h := md5.Sum([]byte("user:realm:password"))
return hex.EncodeToString(h[:])
}()},
}
for _, tc := range testCases {
t.Run(tc.input, func(t *testing.T) {
result := md5Hash(tc.input)
if result != tc.expected {
t.Errorf("expected %s, got %s", tc.expected, result)
}
})
}
}
func TestNonceGeneration(t *testing.T) {
cfg := DefaultAuthConfig()
da := NewDigestAuth(cfg)
defer da.Stop()
nonces := make(map[string]bool)
for i := 0; i < 100; i++ {
nonce := da.generateNonce()
if len(nonce) != 32 { // 16 bytes = 32 hex chars
t.Errorf("expected nonce length 32, got %d", len(nonce))
}
if nonces[nonce] {
t.Error("duplicate nonce generated")
}
nonces[nonce] = true
}
}
func TestOpaqueGeneration(t *testing.T) {
cfg := AuthConfig{Realm: "TestRealm"}
da := NewDigestAuth(cfg)
defer da.Stop()
opaque1 := da.generateOpaque()
opaque2 := da.generateOpaque()
// Same realm should produce same opaque
if opaque1 != opaque2 {
t.Error("opaque should be consistent for same realm")
}
// Should be MD5 of realm
expected := md5Hash("TestRealm")
if opaque1 != expected {
t.Errorf("expected opaque %s, got %s", expected, opaque1)
}
}
func TestNonceCleanup(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 50 * time.Millisecond,
}
da := NewDigestAuth(cfg)
defer da.Stop()
// Store a nonce
nonce := da.generateNonce()
da.nonces.Store(nonce, time.Now())
// Verify it exists
if _, ok := da.nonces.Load(nonce); !ok {
t.Error("nonce should exist immediately after storing")
}
// Wait for cleanup (2x expiry to be safe)
time.Sleep(150 * time.Millisecond)
// Verify it was cleaned up
if _, ok := da.nonces.Load(nonce); ok {
t.Error("expired nonce should have been cleaned up")
}
}
// Helper function
func authTestContains(s, substr string) bool {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}
// Benchmark tests
func BenchmarkMd5Hash(b *testing.B) {
input := "user:realm:password"
for i := 0; i < b.N; i++ {
md5Hash(input)
}
}
func BenchmarkNonceGeneration(b *testing.B) {
cfg := DefaultAuthConfig()
da := NewDigestAuth(cfg)
defer da.Stop()
for i := 0; i < b.N; i++ {
da.generateNonce()
}
}
func BenchmarkBasicAuthValidation(b *testing.B) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.Status(http.StatusOK)
})
req := httptest.NewRequest("GET", "/test", nil)
req.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("user:pass")))
b.ResetTimer()
for i := 0; i < b.N; i++ {
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
}
}

55
mining/bufpool.go Normal file
View file

@ -0,0 +1,55 @@
package mining
import (
"bytes"
"encoding/json"
"sync"
)
// bufferPool provides reusable byte buffers for JSON encoding.
// This reduces allocation overhead in hot paths like WebSocket event serialization.
var bufferPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(make([]byte, 0, 1024))
},
}
// getBuffer retrieves a buffer from the pool.
func getBuffer() *bytes.Buffer {
buf := bufferPool.Get().(*bytes.Buffer)
buf.Reset()
return buf
}
// putBuffer returns a buffer to the pool.
func putBuffer(buf *bytes.Buffer) {
// Don't pool buffers that grew too large (>64KB)
if buf.Cap() <= 65536 {
bufferPool.Put(buf)
}
}
// MarshalJSON encodes a value to JSON using a pooled buffer.
// Returns a copy of the encoded bytes (safe to use after the function returns).
func MarshalJSON(v interface{}) ([]byte, error) {
buf := getBuffer()
defer putBuffer(buf)
enc := json.NewEncoder(buf)
// Don't escape HTML characters (matches json.Marshal behavior for these use cases)
enc.SetEscapeHTML(false)
if err := enc.Encode(v); err != nil {
return nil, err
}
// json.Encoder.Encode adds a newline; remove it to match json.Marshal
data := buf.Bytes()
if len(data) > 0 && data[len(data)-1] == '\n' {
data = data[:len(data)-1]
}
// Return a copy since the buffer will be reused
result := make([]byte, len(data))
copy(result, data)
return result, nil
}

246
mining/circuit_breaker.go Normal file
View file

@ -0,0 +1,246 @@
package mining
import (
"errors"
"sync"
"time"
"forge.lthn.ai/core/mining/logging"
)
// CircuitState represents the state of a circuit breaker
type CircuitState int
const (
// CircuitClosed means the circuit is functioning normally
CircuitClosed CircuitState = iota
// CircuitOpen means the circuit has tripped and requests are being rejected
CircuitOpen
// CircuitHalfOpen means the circuit is testing if the service has recovered
CircuitHalfOpen
)
func (s CircuitState) String() string {
switch s {
case CircuitClosed:
return "closed"
case CircuitOpen:
return "open"
case CircuitHalfOpen:
return "half-open"
default:
return "unknown"
}
}
// CircuitBreakerConfig holds configuration for a circuit breaker
type CircuitBreakerConfig struct {
// FailureThreshold is the number of failures before opening the circuit
FailureThreshold int
// ResetTimeout is how long to wait before attempting recovery
ResetTimeout time.Duration
// SuccessThreshold is the number of successes needed in half-open state to close
SuccessThreshold int
}
// DefaultCircuitBreakerConfig returns sensible defaults
func DefaultCircuitBreakerConfig() CircuitBreakerConfig {
return CircuitBreakerConfig{
FailureThreshold: 3,
ResetTimeout: 30 * time.Second,
SuccessThreshold: 1,
}
}
// CircuitBreaker implements the circuit breaker pattern
type CircuitBreaker struct {
name string
config CircuitBreakerConfig
state CircuitState
failures int
successes int
lastFailure time.Time
mu sync.RWMutex
cachedResult interface{}
cachedErr error
lastCacheTime time.Time
cacheDuration time.Duration
}
// ErrCircuitOpen is returned when the circuit is open
var ErrCircuitOpen = errors.New("circuit breaker is open")
// NewCircuitBreaker creates a new circuit breaker
func NewCircuitBreaker(name string, config CircuitBreakerConfig) *CircuitBreaker {
return &CircuitBreaker{
name: name,
config: config,
state: CircuitClosed,
cacheDuration: 5 * time.Minute, // Cache successful results for 5 minutes
}
}
// State returns the current circuit state
func (cb *CircuitBreaker) State() CircuitState {
cb.mu.RLock()
defer cb.mu.RUnlock()
return cb.state
}
// Execute runs the given function with circuit breaker protection
func (cb *CircuitBreaker) Execute(fn func() (interface{}, error)) (interface{}, error) {
// Check if we should allow this request
if !cb.allowRequest() {
// Return cached result if available
cb.mu.RLock()
if cb.cachedResult != nil && time.Since(cb.lastCacheTime) < cb.cacheDuration {
result := cb.cachedResult
cb.mu.RUnlock()
logging.Debug("circuit breaker returning cached result", logging.Fields{
"name": cb.name,
"state": cb.state.String(),
})
return result, nil
}
cb.mu.RUnlock()
return nil, ErrCircuitOpen
}
// Execute the function
result, err := fn()
// Record the result
if err != nil {
cb.recordFailure()
} else {
cb.recordSuccess(result)
}
return result, err
}
// allowRequest checks if a request should be allowed through
func (cb *CircuitBreaker) allowRequest() bool {
cb.mu.Lock()
defer cb.mu.Unlock()
switch cb.state {
case CircuitClosed:
return true
case CircuitOpen:
// Check if we should transition to half-open
if time.Since(cb.lastFailure) > cb.config.ResetTimeout {
cb.state = CircuitHalfOpen
cb.successes = 0
logging.Info("circuit breaker transitioning to half-open", logging.Fields{
"name": cb.name,
})
return true
}
return false
case CircuitHalfOpen:
// Allow probe requests through
return true
default:
return false
}
}
// recordFailure records a failed request
func (cb *CircuitBreaker) recordFailure() {
cb.mu.Lock()
defer cb.mu.Unlock()
cb.failures++
cb.lastFailure = time.Now()
switch cb.state {
case CircuitClosed:
if cb.failures >= cb.config.FailureThreshold {
cb.state = CircuitOpen
logging.Warn("circuit breaker opened", logging.Fields{
"name": cb.name,
"failures": cb.failures,
})
}
case CircuitHalfOpen:
// Probe failed, back to open
cb.state = CircuitOpen
logging.Warn("circuit breaker probe failed, reopening", logging.Fields{
"name": cb.name,
})
}
}
// recordSuccess records a successful request
func (cb *CircuitBreaker) recordSuccess(result interface{}) {
cb.mu.Lock()
defer cb.mu.Unlock()
// Cache the successful result
cb.cachedResult = result
cb.lastCacheTime = time.Now()
cb.cachedErr = nil
switch cb.state {
case CircuitClosed:
// Reset failure count on success
cb.failures = 0
case CircuitHalfOpen:
cb.successes++
if cb.successes >= cb.config.SuccessThreshold {
cb.state = CircuitClosed
cb.failures = 0
logging.Info("circuit breaker closed after successful probe", logging.Fields{
"name": cb.name,
})
}
}
}
// Reset manually resets the circuit breaker to closed state
func (cb *CircuitBreaker) Reset() {
cb.mu.Lock()
defer cb.mu.Unlock()
cb.state = CircuitClosed
cb.failures = 0
cb.successes = 0
logging.Debug("circuit breaker manually reset", logging.Fields{
"name": cb.name,
})
}
// GetCached returns the cached result if available
func (cb *CircuitBreaker) GetCached() (interface{}, bool) {
cb.mu.RLock()
defer cb.mu.RUnlock()
if cb.cachedResult != nil && time.Since(cb.lastCacheTime) < cb.cacheDuration {
return cb.cachedResult, true
}
return nil, false
}
// Global circuit breaker for GitHub API
var (
githubCircuitBreaker *CircuitBreaker
githubCircuitBreakerOnce sync.Once
)
// getGitHubCircuitBreaker returns the shared GitHub API circuit breaker
func getGitHubCircuitBreaker() *CircuitBreaker {
githubCircuitBreakerOnce.Do(func() {
githubCircuitBreaker = NewCircuitBreaker("github-api", CircuitBreakerConfig{
FailureThreshold: 3,
ResetTimeout: 60 * time.Second, // Wait 1 minute before retrying
SuccessThreshold: 1,
})
})
return githubCircuitBreaker
}

View file

@ -0,0 +1,334 @@
package mining
import (
"errors"
"sync"
"testing"
"time"
)
func TestCircuitBreakerDefaultConfig(t *testing.T) {
cfg := DefaultCircuitBreakerConfig()
if cfg.FailureThreshold != 3 {
t.Errorf("expected FailureThreshold 3, got %d", cfg.FailureThreshold)
}
if cfg.ResetTimeout != 30*time.Second {
t.Errorf("expected ResetTimeout 30s, got %v", cfg.ResetTimeout)
}
if cfg.SuccessThreshold != 1 {
t.Errorf("expected SuccessThreshold 1, got %d", cfg.SuccessThreshold)
}
}
func TestCircuitBreakerStateString(t *testing.T) {
tests := []struct {
state CircuitState
expected string
}{
{CircuitClosed, "closed"},
{CircuitOpen, "open"},
{CircuitHalfOpen, "half-open"},
{CircuitState(99), "unknown"},
}
for _, tt := range tests {
if got := tt.state.String(); got != tt.expected {
t.Errorf("state %d: expected %s, got %s", tt.state, tt.expected, got)
}
}
}
func TestCircuitBreakerClosed(t *testing.T) {
cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
if cb.State() != CircuitClosed {
t.Error("expected initial state to be closed")
}
// Successful execution
result, err := cb.Execute(func() (interface{}, error) {
return "success", nil
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if result != "success" {
t.Errorf("expected 'success', got %v", result)
}
if cb.State() != CircuitClosed {
t.Error("state should still be closed after success")
}
}
func TestCircuitBreakerOpensAfterFailures(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 2,
ResetTimeout: time.Minute,
SuccessThreshold: 1,
}
cb := NewCircuitBreaker("test", cfg)
testErr := errors.New("test error")
// First failure
_, err := cb.Execute(func() (interface{}, error) {
return nil, testErr
})
if err != testErr {
t.Errorf("expected test error, got %v", err)
}
if cb.State() != CircuitClosed {
t.Error("should still be closed after 1 failure")
}
// Second failure - should open circuit
_, err = cb.Execute(func() (interface{}, error) {
return nil, testErr
})
if err != testErr {
t.Errorf("expected test error, got %v", err)
}
if cb.State() != CircuitOpen {
t.Error("should be open after 2 failures")
}
}
func TestCircuitBreakerRejectsWhenOpen(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: time.Hour, // Long timeout to keep circuit open
SuccessThreshold: 1,
}
cb := NewCircuitBreaker("test", cfg)
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
if cb.State() != CircuitOpen {
t.Fatal("circuit should be open")
}
// Next request should be rejected
called := false
_, err := cb.Execute(func() (interface{}, error) {
called = true
return "should not run", nil
})
if called {
t.Error("function should not have been called when circuit is open")
}
if err != ErrCircuitOpen {
t.Errorf("expected ErrCircuitOpen, got %v", err)
}
}
func TestCircuitBreakerTransitionsToHalfOpen(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: 50 * time.Millisecond,
SuccessThreshold: 1,
}
cb := NewCircuitBreaker("test", cfg)
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
if cb.State() != CircuitOpen {
t.Fatal("circuit should be open")
}
// Wait for reset timeout
time.Sleep(100 * time.Millisecond)
// Next request should transition to half-open and execute
result, err := cb.Execute(func() (interface{}, error) {
return "probe success", nil
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if result != "probe success" {
t.Errorf("expected 'probe success', got %v", result)
}
if cb.State() != CircuitClosed {
t.Error("should be closed after successful probe")
}
}
func TestCircuitBreakerHalfOpenFailureReopens(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: 50 * time.Millisecond,
SuccessThreshold: 1,
}
cb := NewCircuitBreaker("test", cfg)
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
// Wait for reset timeout
time.Sleep(100 * time.Millisecond)
// Probe fails
cb.Execute(func() (interface{}, error) {
return nil, errors.New("probe failed")
})
if cb.State() != CircuitOpen {
t.Error("should be open after probe failure")
}
}
func TestCircuitBreakerCaching(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: time.Hour,
SuccessThreshold: 1,
}
cb := NewCircuitBreaker("test", cfg)
// Successful call - caches result
result, err := cb.Execute(func() (interface{}, error) {
return "cached value", nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if result != "cached value" {
t.Fatalf("expected 'cached value', got %v", result)
}
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
// Should return cached value when circuit is open
result, err = cb.Execute(func() (interface{}, error) {
return "should not run", nil
})
if err != nil {
t.Errorf("expected cached result, got error: %v", err)
}
if result != "cached value" {
t.Errorf("expected 'cached value', got %v", result)
}
}
func TestCircuitBreakerGetCached(t *testing.T) {
cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
// No cache initially
_, ok := cb.GetCached()
if ok {
t.Error("expected no cached value initially")
}
// Cache a value
cb.Execute(func() (interface{}, error) {
return "test value", nil
})
cached, ok := cb.GetCached()
if !ok {
t.Error("expected cached value")
}
if cached != "test value" {
t.Errorf("expected 'test value', got %v", cached)
}
}
func TestCircuitBreakerReset(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: time.Hour,
SuccessThreshold: 1,
}
cb := NewCircuitBreaker("test", cfg)
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
if cb.State() != CircuitOpen {
t.Fatal("circuit should be open")
}
// Manual reset
cb.Reset()
if cb.State() != CircuitClosed {
t.Error("circuit should be closed after reset")
}
}
func TestCircuitBreakerConcurrency(t *testing.T) {
cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func(n int) {
defer wg.Done()
cb.Execute(func() (interface{}, error) {
if n%3 == 0 {
return nil, errors.New("fail")
}
return "success", nil
})
}(i)
}
wg.Wait()
// Just verify no panics occurred
_ = cb.State()
}
func TestGetGitHubCircuitBreaker(t *testing.T) {
cb1 := getGitHubCircuitBreaker()
cb2 := getGitHubCircuitBreaker()
if cb1 != cb2 {
t.Error("expected singleton circuit breaker")
}
if cb1.name != "github-api" {
t.Errorf("expected name 'github-api', got %s", cb1.name)
}
}
// Benchmark tests
func BenchmarkCircuitBreakerExecute(b *testing.B) {
cb := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig())
b.ResetTimer()
for i := 0; i < b.N; i++ {
cb.Execute(func() (interface{}, error) {
return "result", nil
})
}
}
func BenchmarkCircuitBreakerConcurrent(b *testing.B) {
cb := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig())
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cb.Execute(func() (interface{}, error) {
return "result", nil
})
}
})
}

20
mining/component.go Normal file
View file

@ -0,0 +1,20 @@
package mining
import (
"embed"
"io/fs"
"net/http"
)
//go:embed component/*
var componentFS embed.FS
// GetComponentFS returns the embedded file system containing the web component.
// This allows the component to be served even when the package is used as a module.
func GetComponentFS() (http.FileSystem, error) {
sub, err := fs.Sub(componentFS, "component")
if err != nil {
return nil, err
}
return http.FS(sub), nil
}

File diff suppressed because one or more lines are too long

158
mining/config_manager.go Normal file
View file

@ -0,0 +1,158 @@
package mining
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/adrg/xdg"
)
// configMu protects concurrent access to config file operations
var configMu sync.RWMutex
// MinerAutostartConfig represents the configuration for a single miner's autostart settings.
type MinerAutostartConfig struct {
MinerType string `json:"minerType"`
Autostart bool `json:"autostart"`
Config *Config `json:"config,omitempty"` // Store the last used config
}
// DatabaseConfig holds configuration for SQLite database persistence.
type DatabaseConfig struct {
// Enabled determines if database persistence is active (default: true)
Enabled bool `json:"enabled"`
// RetentionDays is how long to keep historical data (default: 30)
RetentionDays int `json:"retentionDays,omitempty"`
}
// defaultDatabaseConfig returns the default database configuration.
func defaultDatabaseConfig() DatabaseConfig {
return DatabaseConfig{
Enabled: true,
RetentionDays: 30,
}
}
// MinersConfig represents the overall configuration for all miners, including autostart settings.
type MinersConfig struct {
Miners []MinerAutostartConfig `json:"miners"`
Database DatabaseConfig `json:"database"`
}
// getMinersConfigPath returns the path to the miners configuration file.
func getMinersConfigPath() (string, error) {
return xdg.ConfigFile("lethean-desktop/miners/config.json")
}
// LoadMinersConfig loads the miners configuration from the file system.
func LoadMinersConfig() (*MinersConfig, error) {
configMu.RLock()
defer configMu.RUnlock()
configPath, err := getMinersConfigPath()
if err != nil {
return nil, fmt.Errorf("could not determine miners config path: %w", err)
}
data, err := os.ReadFile(configPath)
if err != nil {
if os.IsNotExist(err) {
// Return empty config with defaults if file doesn't exist
return &MinersConfig{
Miners: []MinerAutostartConfig{},
Database: defaultDatabaseConfig(),
}, nil
}
return nil, fmt.Errorf("failed to read miners config file: %w", err)
}
var cfg MinersConfig
if err := json.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("failed to unmarshal miners config: %w", err)
}
// Apply default database config if not set (for backwards compatibility)
if cfg.Database.RetentionDays == 0 {
cfg.Database = defaultDatabaseConfig()
}
return &cfg, nil
}
// SaveMinersConfig saves the miners configuration to the file system.
// Uses atomic write pattern: write to temp file, then rename.
func SaveMinersConfig(cfg *MinersConfig) error {
configMu.Lock()
defer configMu.Unlock()
configPath, err := getMinersConfigPath()
if err != nil {
return fmt.Errorf("could not determine miners config path: %w", err)
}
dir := filepath.Dir(configPath)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create config directory: %w", err)
}
data, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal miners config: %w", err)
}
return AtomicWriteFile(configPath, data, 0600)
}
// UpdateMinersConfig atomically loads, modifies, and saves the miners config.
// This prevents race conditions in read-modify-write operations.
func UpdateMinersConfig(fn func(*MinersConfig) error) error {
configMu.Lock()
defer configMu.Unlock()
configPath, err := getMinersConfigPath()
if err != nil {
return fmt.Errorf("could not determine miners config path: %w", err)
}
// Load current config
var cfg MinersConfig
data, err := os.ReadFile(configPath)
if err != nil {
if os.IsNotExist(err) {
cfg = MinersConfig{
Miners: []MinerAutostartConfig{},
Database: defaultDatabaseConfig(),
}
} else {
return fmt.Errorf("failed to read miners config file: %w", err)
}
} else {
if err := json.Unmarshal(data, &cfg); err != nil {
return fmt.Errorf("failed to unmarshal miners config: %w", err)
}
if cfg.Database.RetentionDays == 0 {
cfg.Database = defaultDatabaseConfig()
}
}
// Apply the modification
if err := fn(&cfg); err != nil {
return err
}
// Save atomically
dir := filepath.Dir(configPath)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create config directory: %w", err)
}
newData, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal miners config: %w", err)
}
return AtomicWriteFile(configPath, newData, 0600)
}

259
mining/container.go Normal file
View file

@ -0,0 +1,259 @@
package mining
import (
"context"
"fmt"
"sync"
"forge.lthn.ai/core/mining/database"
"forge.lthn.ai/core/mining/logging"
)
// ContainerConfig holds configuration for the service container.
type ContainerConfig struct {
// Database configuration
Database database.Config
// ListenAddr is the address to listen on (e.g., ":9090")
ListenAddr string
// DisplayAddr is the address shown in Swagger docs
DisplayAddr string
// SwaggerNamespace is the API path prefix
SwaggerNamespace string
// SimulationMode enables simulation mode for testing
SimulationMode bool
}
// DefaultContainerConfig returns sensible defaults for the container.
func DefaultContainerConfig() ContainerConfig {
return ContainerConfig{
Database: database.Config{
Enabled: true,
RetentionDays: 30,
},
ListenAddr: ":9090",
DisplayAddr: "localhost:9090",
SwaggerNamespace: "/api/v1/mining",
SimulationMode: false,
}
}
// Container manages the lifecycle of all services.
// It provides centralized initialization, dependency injection, and graceful shutdown.
type Container struct {
config ContainerConfig
mu sync.RWMutex
// Core services
manager ManagerInterface
profileManager *ProfileManager
nodeService *NodeService
eventHub *EventHub
service *Service
// Database store (interface for testing)
hashrateStore database.HashrateStore
// Initialization state
initialized bool
transportStarted bool
shutdownCh chan struct{}
}
// NewContainer creates a new service container with the given configuration.
func NewContainer(config ContainerConfig) *Container {
return &Container{
config: config,
shutdownCh: make(chan struct{}),
}
}
// Initialize sets up all services in the correct order.
// This should be called before Start().
func (c *Container) Initialize(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.initialized {
return fmt.Errorf("container already initialized")
}
// 1. Initialize database (optional)
if c.config.Database.Enabled {
if err := database.Initialize(c.config.Database); err != nil {
return fmt.Errorf("failed to initialize database: %w", err)
}
c.hashrateStore = database.DefaultStore()
logging.Info("database initialized", logging.Fields{"retention_days": c.config.Database.RetentionDays})
} else {
c.hashrateStore = database.NopStore()
logging.Info("database disabled, using no-op store", nil)
}
// 2. Initialize profile manager
var err error
c.profileManager, err = NewProfileManager()
if err != nil {
return fmt.Errorf("failed to initialize profile manager: %w", err)
}
// 3. Initialize miner manager
if c.config.SimulationMode {
c.manager = NewManagerForSimulation()
} else {
c.manager = NewManager()
}
// 4. Initialize node service (optional - P2P features)
c.nodeService, err = NewNodeService()
if err != nil {
logging.Warn("node service unavailable", logging.Fields{"error": err})
// Continue without node service - P2P features will be unavailable
}
// 5. Initialize event hub for WebSocket
c.eventHub = NewEventHub()
// Wire up event hub to manager
if mgr, ok := c.manager.(*Manager); ok {
mgr.SetEventHub(c.eventHub)
}
c.initialized = true
logging.Info("service container initialized", nil)
return nil
}
// Start begins all background services.
func (c *Container) Start(ctx context.Context) error {
c.mu.RLock()
defer c.mu.RUnlock()
if !c.initialized {
return fmt.Errorf("container not initialized")
}
// Start event hub
go c.eventHub.Run()
// Start node transport if available
if c.nodeService != nil {
if err := c.nodeService.StartTransport(); err != nil {
logging.Warn("failed to start node transport", logging.Fields{"error": err})
} else {
c.transportStarted = true
}
}
logging.Info("service container started", nil)
return nil
}
// Shutdown gracefully stops all services in reverse order.
func (c *Container) Shutdown(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
if !c.initialized {
return nil
}
logging.Info("shutting down service container", nil)
var errs []error
// 1. Stop service (HTTP server)
if c.service != nil {
// Service shutdown is handled externally
}
// 2. Stop node transport (only if it was started)
if c.nodeService != nil && c.transportStarted {
if err := c.nodeService.StopTransport(); err != nil {
errs = append(errs, fmt.Errorf("node transport: %w", err))
}
c.transportStarted = false
}
// 3. Stop event hub
if c.eventHub != nil {
c.eventHub.Stop()
}
// 4. Stop miner manager
if mgr, ok := c.manager.(*Manager); ok {
mgr.Stop()
}
// 5. Close database
if err := database.Close(); err != nil {
errs = append(errs, fmt.Errorf("database: %w", err))
}
c.initialized = false
close(c.shutdownCh)
if len(errs) > 0 {
return fmt.Errorf("shutdown errors: %v", errs)
}
logging.Info("service container shutdown complete", nil)
return nil
}
// Manager returns the miner manager.
func (c *Container) Manager() ManagerInterface {
c.mu.RLock()
defer c.mu.RUnlock()
return c.manager
}
// ProfileManager returns the profile manager.
func (c *Container) ProfileManager() *ProfileManager {
c.mu.RLock()
defer c.mu.RUnlock()
return c.profileManager
}
// NodeService returns the node service (may be nil if P2P is unavailable).
func (c *Container) NodeService() *NodeService {
c.mu.RLock()
defer c.mu.RUnlock()
return c.nodeService
}
// EventHub returns the event hub for WebSocket connections.
func (c *Container) EventHub() *EventHub {
c.mu.RLock()
defer c.mu.RUnlock()
return c.eventHub
}
// HashrateStore returns the hashrate store interface.
func (c *Container) HashrateStore() database.HashrateStore {
c.mu.RLock()
defer c.mu.RUnlock()
return c.hashrateStore
}
// SetHashrateStore allows injecting a custom hashrate store (useful for testing).
func (c *Container) SetHashrateStore(store database.HashrateStore) {
c.mu.Lock()
defer c.mu.Unlock()
c.hashrateStore = store
}
// ShutdownCh returns a channel that's closed when shutdown is complete.
func (c *Container) ShutdownCh() <-chan struct{} {
return c.shutdownCh
}
// IsInitialized returns true if the container has been initialized.
func (c *Container) IsInitialized() bool {
c.mu.RLock()
defer c.mu.RUnlock()
return c.initialized
}

316
mining/container_test.go Normal file
View file

@ -0,0 +1,316 @@
package mining
import (
"context"
"os"
"path/filepath"
"testing"
"time"
"forge.lthn.ai/core/mining/database"
)
func setupContainerTestEnv(t *testing.T) func() {
tmpDir := t.TempDir()
os.Setenv("XDG_CONFIG_HOME", filepath.Join(tmpDir, "config"))
os.Setenv("XDG_DATA_HOME", filepath.Join(tmpDir, "data"))
return func() {
os.Unsetenv("XDG_CONFIG_HOME")
os.Unsetenv("XDG_DATA_HOME")
}
}
func TestNewContainer(t *testing.T) {
config := DefaultContainerConfig()
container := NewContainer(config)
if container == nil {
t.Fatal("NewContainer returned nil")
}
if container.IsInitialized() {
t.Error("Container should not be initialized before Initialize() is called")
}
}
func TestDefaultContainerConfig(t *testing.T) {
config := DefaultContainerConfig()
if !config.Database.Enabled {
t.Error("Database should be enabled by default")
}
if config.Database.RetentionDays != 30 {
t.Errorf("Expected 30 retention days, got %d", config.Database.RetentionDays)
}
if config.ListenAddr != ":9090" {
t.Errorf("Expected :9090, got %s", config.ListenAddr)
}
if config.SimulationMode {
t.Error("SimulationMode should be false by default")
}
}
func TestContainer_Initialize(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
config := DefaultContainerConfig()
config.Database.Enabled = true
config.Database.Path = filepath.Join(t.TempDir(), "test.db")
config.SimulationMode = true // Use simulation mode for faster tests
container := NewContainer(config)
ctx := context.Background()
if err := container.Initialize(ctx); err != nil {
t.Fatalf("Initialize failed: %v", err)
}
if !container.IsInitialized() {
t.Error("Container should be initialized after Initialize()")
}
// Verify services are available
if container.Manager() == nil {
t.Error("Manager should not be nil after initialization")
}
if container.ProfileManager() == nil {
t.Error("ProfileManager should not be nil after initialization")
}
if container.EventHub() == nil {
t.Error("EventHub should not be nil after initialization")
}
if container.HashrateStore() == nil {
t.Error("HashrateStore should not be nil after initialization")
}
// Cleanup
if err := container.Shutdown(ctx); err != nil {
t.Errorf("Shutdown failed: %v", err)
}
}
func TestContainer_InitializeTwice(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
config := DefaultContainerConfig()
config.Database.Enabled = false
config.SimulationMode = true
container := NewContainer(config)
ctx := context.Background()
if err := container.Initialize(ctx); err != nil {
t.Fatalf("First Initialize failed: %v", err)
}
// Second initialization should fail
if err := container.Initialize(ctx); err == nil {
t.Error("Second Initialize should fail")
}
container.Shutdown(ctx)
}
func TestContainer_DatabaseDisabled(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
config := DefaultContainerConfig()
config.Database.Enabled = false
config.SimulationMode = true
container := NewContainer(config)
ctx := context.Background()
if err := container.Initialize(ctx); err != nil {
t.Fatalf("Initialize failed: %v", err)
}
// Should use NopStore when database is disabled
store := container.HashrateStore()
if store == nil {
t.Fatal("HashrateStore should not be nil")
}
// NopStore should accept inserts without error
point := database.HashratePoint{
Timestamp: time.Now(),
Hashrate: 1000,
}
if err := store.InsertHashratePoint(nil, "test", "xmrig", point, database.ResolutionHigh); err != nil {
t.Errorf("NopStore insert should not fail: %v", err)
}
container.Shutdown(ctx)
}
func TestContainer_SetHashrateStore(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
config := DefaultContainerConfig()
config.Database.Enabled = false
config.SimulationMode = true
container := NewContainer(config)
ctx := context.Background()
if err := container.Initialize(ctx); err != nil {
t.Fatalf("Initialize failed: %v", err)
}
// Inject custom store
customStore := database.NopStore()
container.SetHashrateStore(customStore)
if container.HashrateStore() != customStore {
t.Error("SetHashrateStore should update the store")
}
container.Shutdown(ctx)
}
func TestContainer_StartWithoutInitialize(t *testing.T) {
config := DefaultContainerConfig()
container := NewContainer(config)
ctx := context.Background()
if err := container.Start(ctx); err == nil {
t.Error("Start should fail if Initialize was not called")
}
}
func TestContainer_ShutdownWithoutInitialize(t *testing.T) {
config := DefaultContainerConfig()
container := NewContainer(config)
ctx := context.Background()
// Shutdown on uninitialized container should not error
if err := container.Shutdown(ctx); err != nil {
t.Errorf("Shutdown on uninitialized container should not error: %v", err)
}
}
func TestContainer_ShutdownChannel(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
config := DefaultContainerConfig()
config.Database.Enabled = false
config.SimulationMode = true
container := NewContainer(config)
ctx := context.Background()
if err := container.Initialize(ctx); err != nil {
t.Fatalf("Initialize failed: %v", err)
}
shutdownCh := container.ShutdownCh()
// Channel should be open before shutdown
select {
case <-shutdownCh:
t.Error("ShutdownCh should not be closed before Shutdown()")
default:
// Expected
}
if err := container.Shutdown(ctx); err != nil {
t.Errorf("Shutdown failed: %v", err)
}
// Channel should be closed after shutdown
select {
case <-shutdownCh:
// Expected
case <-time.After(time.Second):
t.Error("ShutdownCh should be closed after Shutdown()")
}
}
func TestContainer_InitializeWithCancelledContext(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
config := DefaultContainerConfig()
config.Database.Enabled = false
config.SimulationMode = true
container := NewContainer(config)
// Use a pre-cancelled context
ctx, cancel := context.WithCancel(context.Background())
cancel()
// Initialize should still succeed (context is checked at operation start)
// But operations that check context should respect cancellation
if err := container.Initialize(ctx); err != nil {
// This is acceptable - initialization may fail with cancelled context
t.Logf("Initialize with cancelled context: %v (acceptable)", err)
}
// Cleanup if initialized
if container.IsInitialized() {
container.Shutdown(context.Background())
}
}
func TestContainer_ShutdownWithTimeout(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
config := DefaultContainerConfig()
config.Database.Enabled = false
config.SimulationMode = true
container := NewContainer(config)
ctx := context.Background()
if err := container.Initialize(ctx); err != nil {
t.Fatalf("Initialize failed: %v", err)
}
// Use a context with very short timeout
timeoutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
// Shutdown should still complete (cleanup is fast without real miners)
if err := container.Shutdown(timeoutCtx); err != nil {
t.Logf("Shutdown with timeout: %v (may be acceptable)", err)
}
}
func TestContainer_DoubleShutdown(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
config := DefaultContainerConfig()
config.Database.Enabled = false
config.SimulationMode = true
container := NewContainer(config)
ctx := context.Background()
if err := container.Initialize(ctx); err != nil {
t.Fatalf("Initialize failed: %v", err)
}
// First shutdown
if err := container.Shutdown(ctx); err != nil {
t.Errorf("First shutdown failed: %v", err)
}
// Second shutdown should not panic or error
if err := container.Shutdown(ctx); err != nil {
t.Logf("Second shutdown returned: %v (expected no-op)", err)
}
}

127
mining/dual_mining_test.go Normal file
View file

@ -0,0 +1,127 @@
package mining
import (
"context"
"testing"
"time"
)
// TestDualMiningCPUAndGPU tests running CPU and GPU mining together
// This test requires XMRig installed and a GPU with OpenCL support
func TestDualMiningCPUAndGPU(t *testing.T) {
if testing.Short() {
t.Skip("Skipping dual mining test in short mode")
}
miner := NewXMRigMiner()
details, err := miner.CheckInstallation()
if err != nil || !details.IsInstalled {
t.Skip("XMRig not installed, skipping dual mining test")
}
manager := NewManager()
defer manager.Stop()
// Dual mining config:
// - CPU: 25% threads on RandomX
// - GPU: OpenCL device 0 (discrete GPU, not iGPU)
config := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
Algo: "rx/0",
CPUMaxThreadsHint: 25, // 25% CPU
// GPU config - explicit device selection required!
GPUEnabled: true,
OpenCL: true, // AMD GPU
Devices: "0", // Device 0 only - user must pick
}
minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Failed to start dual miner: %v", err)
}
t.Logf("Started dual miner: %s", minerInstance.GetName())
// Let it warm up
time.Sleep(20 * time.Second)
// Get stats
stats, err := minerInstance.GetStats(context.Background())
if err != nil {
t.Logf("Warning: couldn't get stats: %v", err)
} else {
t.Logf("Hashrate: %d H/s, Shares: %d, Algo: %s",
stats.Hashrate, stats.Shares, stats.Algorithm)
}
// Check logs for GPU initialization
logs := minerInstance.GetLogs()
gpuFound := false
for _, line := range logs {
if contains(line, "OpenCL") || contains(line, "GPU") {
gpuFound = true
t.Logf("GPU log: %s", line)
}
}
if !gpuFound {
t.Log("No GPU-related log lines found - GPU may not be mining")
}
// Clean up
manager.StopMiner(context.Background(), minerInstance.GetName())
}
// TestGPUDeviceSelection tests that GPU mining requires explicit device selection
func TestGPUDeviceSelection(t *testing.T) {
tmpDir := t.TempDir()
miner := &XMRigMiner{
BaseMiner: BaseMiner{
Name: "xmrig-device-test",
API: &API{
Enabled: true,
ListenHost: "127.0.0.1",
ListenPort: 54321,
},
},
}
origGetPath := getXMRigConfigPath
getXMRigConfigPath = func(name string) (string, error) {
return tmpDir + "/" + name + ".json", nil
}
defer func() { getXMRigConfigPath = origGetPath }()
// Config WITHOUT device selection - GPU should be disabled
configNoDevice := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "test_wallet",
Algo: "rx/0",
GPUEnabled: true,
OpenCL: true,
// NO Devices specified!
}
err := miner.createConfig(configNoDevice)
if err != nil {
t.Fatalf("Failed to create config: %v", err)
}
// GPU should be disabled because no device was specified
t.Log("Config without explicit device - GPU should be disabled (safe default)")
}
func contains(s, substr string) bool {
return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsAt(s, substr, 0))
}
func containsAt(s, substr string, start int) bool {
for i := start; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}

248
mining/errors.go Normal file
View file

@ -0,0 +1,248 @@
package mining
import (
"fmt"
"net/http"
)
// Error codes for the mining package
const (
ErrCodeMinerNotFound = "MINER_NOT_FOUND"
ErrCodeMinerExists = "MINER_EXISTS"
ErrCodeMinerNotRunning = "MINER_NOT_RUNNING"
ErrCodeInstallFailed = "INSTALL_FAILED"
ErrCodeStartFailed = "START_FAILED"
ErrCodeStopFailed = "STOP_FAILED"
ErrCodeInvalidConfig = "INVALID_CONFIG"
ErrCodeInvalidInput = "INVALID_INPUT"
ErrCodeUnsupportedMiner = "UNSUPPORTED_MINER"
ErrCodeNotSupported = "NOT_SUPPORTED"
ErrCodeConnectionFailed = "CONNECTION_FAILED"
ErrCodeServiceUnavailable = "SERVICE_UNAVAILABLE"
ErrCodeTimeout = "TIMEOUT"
ErrCodeDatabaseError = "DATABASE_ERROR"
ErrCodeProfileNotFound = "PROFILE_NOT_FOUND"
ErrCodeProfileExists = "PROFILE_EXISTS"
ErrCodeInternalError = "INTERNAL_ERROR"
ErrCodeInternal = "INTERNAL_ERROR" // Alias for consistency
)
// MiningError is a structured error type for the mining package
type MiningError struct {
Code string // Machine-readable error code
Message string // Human-readable message
Details string // Technical details (for debugging)
Suggestion string // What to do next
Retryable bool // Can the client retry?
HTTPStatus int // HTTP status code to return
Cause error // Underlying error
}
// Error implements the error interface
func (e *MiningError) Error() string {
if e.Cause != nil {
return fmt.Sprintf("%s: %s (%v)", e.Code, e.Message, e.Cause)
}
return fmt.Sprintf("%s: %s", e.Code, e.Message)
}
// Unwrap returns the underlying error
func (e *MiningError) Unwrap() error {
return e.Cause
}
// WithCause adds an underlying error
func (e *MiningError) WithCause(err error) *MiningError {
e.Cause = err
return e
}
// WithDetails adds technical details
func (e *MiningError) WithDetails(details string) *MiningError {
e.Details = details
return e
}
// WithSuggestion adds a suggestion for the user
func (e *MiningError) WithSuggestion(suggestion string) *MiningError {
e.Suggestion = suggestion
return e
}
// IsRetryable returns whether the error is retryable
func (e *MiningError) IsRetryable() bool {
return e.Retryable
}
// StatusCode returns the HTTP status code for this error
func (e *MiningError) StatusCode() int {
if e.HTTPStatus == 0 {
return http.StatusInternalServerError
}
return e.HTTPStatus
}
// NewMiningError creates a new MiningError
func NewMiningError(code, message string) *MiningError {
return &MiningError{
Code: code,
Message: message,
HTTPStatus: http.StatusInternalServerError,
}
}
// Predefined error constructors for common errors
// ErrMinerNotFound creates a miner not found error
func ErrMinerNotFound(name string) *MiningError {
return &MiningError{
Code: ErrCodeMinerNotFound,
Message: fmt.Sprintf("miner '%s' not found", name),
Suggestion: "Check that the miner name is correct and that it is running",
Retryable: false,
HTTPStatus: http.StatusNotFound,
}
}
// ErrMinerExists creates a miner already exists error
func ErrMinerExists(name string) *MiningError {
return &MiningError{
Code: ErrCodeMinerExists,
Message: fmt.Sprintf("miner '%s' is already running", name),
Suggestion: "Stop the existing miner first or use a different configuration",
Retryable: false,
HTTPStatus: http.StatusConflict,
}
}
// ErrMinerNotRunning creates a miner not running error
func ErrMinerNotRunning(name string) *MiningError {
return &MiningError{
Code: ErrCodeMinerNotRunning,
Message: fmt.Sprintf("miner '%s' is not running", name),
Suggestion: "Start the miner first before performing this operation",
Retryable: false,
HTTPStatus: http.StatusBadRequest,
}
}
// ErrInstallFailed creates an installation failed error
func ErrInstallFailed(minerType string) *MiningError {
return &MiningError{
Code: ErrCodeInstallFailed,
Message: fmt.Sprintf("failed to install %s", minerType),
Suggestion: "Check your internet connection and try again",
Retryable: true,
HTTPStatus: http.StatusInternalServerError,
}
}
// ErrStartFailed creates a start failed error
func ErrStartFailed(name string) *MiningError {
return &MiningError{
Code: ErrCodeStartFailed,
Message: fmt.Sprintf("failed to start miner '%s'", name),
Suggestion: "Check the miner configuration and logs for details",
Retryable: true,
HTTPStatus: http.StatusInternalServerError,
}
}
// ErrStopFailed creates a stop failed error
func ErrStopFailed(name string) *MiningError {
return &MiningError{
Code: ErrCodeStopFailed,
Message: fmt.Sprintf("failed to stop miner '%s'", name),
Suggestion: "The miner process may need to be terminated manually",
Retryable: true,
HTTPStatus: http.StatusInternalServerError,
}
}
// ErrInvalidConfig creates an invalid configuration error
func ErrInvalidConfig(reason string) *MiningError {
return &MiningError{
Code: ErrCodeInvalidConfig,
Message: fmt.Sprintf("invalid configuration: %s", reason),
Suggestion: "Review the configuration and ensure all required fields are provided",
Retryable: false,
HTTPStatus: http.StatusBadRequest,
}
}
// ErrUnsupportedMiner creates an unsupported miner type error
func ErrUnsupportedMiner(minerType string) *MiningError {
return &MiningError{
Code: ErrCodeUnsupportedMiner,
Message: fmt.Sprintf("unsupported miner type: %s", minerType),
Suggestion: "Use one of the supported miner types: xmrig, tt-miner",
Retryable: false,
HTTPStatus: http.StatusBadRequest,
}
}
// ErrConnectionFailed creates a connection failed error
func ErrConnectionFailed(target string) *MiningError {
return &MiningError{
Code: ErrCodeConnectionFailed,
Message: fmt.Sprintf("failed to connect to %s", target),
Suggestion: "Check network connectivity and try again",
Retryable: true,
HTTPStatus: http.StatusServiceUnavailable,
}
}
// ErrTimeout creates a timeout error
func ErrTimeout(operation string) *MiningError {
return &MiningError{
Code: ErrCodeTimeout,
Message: fmt.Sprintf("operation timed out: %s", operation),
Suggestion: "The operation is taking longer than expected, try again later",
Retryable: true,
HTTPStatus: http.StatusGatewayTimeout,
}
}
// ErrDatabaseError creates a database error
func ErrDatabaseError(operation string) *MiningError {
return &MiningError{
Code: ErrCodeDatabaseError,
Message: fmt.Sprintf("database error during %s", operation),
Suggestion: "This may be a temporary issue, try again",
Retryable: true,
HTTPStatus: http.StatusInternalServerError,
}
}
// ErrProfileNotFound creates a profile not found error
func ErrProfileNotFound(id string) *MiningError {
return &MiningError{
Code: ErrCodeProfileNotFound,
Message: fmt.Sprintf("profile '%s' not found", id),
Suggestion: "Check that the profile ID is correct",
Retryable: false,
HTTPStatus: http.StatusNotFound,
}
}
// ErrProfileExists creates a profile already exists error
func ErrProfileExists(name string) *MiningError {
return &MiningError{
Code: ErrCodeProfileExists,
Message: fmt.Sprintf("profile '%s' already exists", name),
Suggestion: "Use a different name or update the existing profile",
Retryable: false,
HTTPStatus: http.StatusConflict,
}
}
// ErrInternal creates a generic internal error
func ErrInternal(message string) *MiningError {
return &MiningError{
Code: ErrCodeInternalError,
Message: message,
Suggestion: "Please report this issue if it persists",
Retryable: true,
HTTPStatus: http.StatusInternalServerError,
}
}

151
mining/errors_test.go Normal file
View file

@ -0,0 +1,151 @@
package mining
import (
"errors"
"net/http"
"testing"
)
func TestMiningError_Error(t *testing.T) {
err := NewMiningError(ErrCodeMinerNotFound, "miner not found")
expected := "MINER_NOT_FOUND: miner not found"
if err.Error() != expected {
t.Errorf("Expected %q, got %q", expected, err.Error())
}
}
func TestMiningError_ErrorWithCause(t *testing.T) {
cause := errors.New("underlying error")
err := NewMiningError(ErrCodeStartFailed, "failed to start").WithCause(cause)
// Should include cause in error message
if err.Cause != cause {
t.Error("Cause was not set")
}
// Should be unwrappable
if errors.Unwrap(err) != cause {
t.Error("Unwrap did not return cause")
}
}
func TestMiningError_WithDetails(t *testing.T) {
err := NewMiningError(ErrCodeInvalidConfig, "invalid config").
WithDetails("port must be between 1024 and 65535")
if err.Details != "port must be between 1024 and 65535" {
t.Errorf("Details not set correctly: %s", err.Details)
}
}
func TestMiningError_WithSuggestion(t *testing.T) {
err := NewMiningError(ErrCodeConnectionFailed, "connection failed").
WithSuggestion("check your network")
if err.Suggestion != "check your network" {
t.Errorf("Suggestion not set correctly: %s", err.Suggestion)
}
}
func TestMiningError_StatusCode(t *testing.T) {
tests := []struct {
name string
err *MiningError
expected int
}{
{"default", NewMiningError("TEST", "test"), http.StatusInternalServerError},
{"not found", ErrMinerNotFound("test"), http.StatusNotFound},
{"conflict", ErrMinerExists("test"), http.StatusConflict},
{"bad request", ErrInvalidConfig("bad"), http.StatusBadRequest},
{"service unavailable", ErrConnectionFailed("pool"), http.StatusServiceUnavailable},
{"timeout", ErrTimeout("operation"), http.StatusGatewayTimeout},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.err.StatusCode() != tt.expected {
t.Errorf("Expected status %d, got %d", tt.expected, tt.err.StatusCode())
}
})
}
}
func TestMiningError_IsRetryable(t *testing.T) {
tests := []struct {
name string
err *MiningError
retryable bool
}{
{"not found", ErrMinerNotFound("test"), false},
{"exists", ErrMinerExists("test"), false},
{"invalid config", ErrInvalidConfig("bad"), false},
{"install failed", ErrInstallFailed("xmrig"), true},
{"start failed", ErrStartFailed("test"), true},
{"connection failed", ErrConnectionFailed("pool"), true},
{"timeout", ErrTimeout("operation"), true},
{"database error", ErrDatabaseError("query"), true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.err.IsRetryable() != tt.retryable {
t.Errorf("Expected retryable=%v, got %v", tt.retryable, tt.err.IsRetryable())
}
})
}
}
func TestPredefinedErrors(t *testing.T) {
tests := []struct {
name string
err *MiningError
code string
}{
{"ErrMinerNotFound", ErrMinerNotFound("test"), ErrCodeMinerNotFound},
{"ErrMinerExists", ErrMinerExists("test"), ErrCodeMinerExists},
{"ErrMinerNotRunning", ErrMinerNotRunning("test"), ErrCodeMinerNotRunning},
{"ErrInstallFailed", ErrInstallFailed("xmrig"), ErrCodeInstallFailed},
{"ErrStartFailed", ErrStartFailed("test"), ErrCodeStartFailed},
{"ErrStopFailed", ErrStopFailed("test"), ErrCodeStopFailed},
{"ErrInvalidConfig", ErrInvalidConfig("bad port"), ErrCodeInvalidConfig},
{"ErrUnsupportedMiner", ErrUnsupportedMiner("unknown"), ErrCodeUnsupportedMiner},
{"ErrConnectionFailed", ErrConnectionFailed("pool:3333"), ErrCodeConnectionFailed},
{"ErrTimeout", ErrTimeout("GetStats"), ErrCodeTimeout},
{"ErrDatabaseError", ErrDatabaseError("insert"), ErrCodeDatabaseError},
{"ErrProfileNotFound", ErrProfileNotFound("abc123"), ErrCodeProfileNotFound},
{"ErrProfileExists", ErrProfileExists("My Profile"), ErrCodeProfileExists},
{"ErrInternal", ErrInternal("unexpected error"), ErrCodeInternalError},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.err.Code != tt.code {
t.Errorf("Expected code %s, got %s", tt.code, tt.err.Code)
}
if tt.err.Message == "" {
t.Error("Message should not be empty")
}
})
}
}
func TestMiningError_Chaining(t *testing.T) {
cause := errors.New("network timeout")
err := ErrConnectionFailed("pool:3333").
WithCause(cause).
WithDetails("timeout after 30s").
WithSuggestion("check firewall settings")
if err.Code != ErrCodeConnectionFailed {
t.Errorf("Code changed: %s", err.Code)
}
if err.Cause != cause {
t.Error("Cause not set")
}
if err.Details != "timeout after 30s" {
t.Errorf("Details not set: %s", err.Details)
}
if err.Suggestion != "check firewall settings" {
t.Errorf("Suggestion not set: %s", err.Suggestion)
}
}

423
mining/events.go Normal file
View file

@ -0,0 +1,423 @@
package mining
import (
"encoding/json"
"sync"
"time"
"forge.lthn.ai/core/mining/logging"
"github.com/gorilla/websocket"
)
// EventType represents the type of mining event
type EventType string
const (
// Miner lifecycle events
EventMinerStarting EventType = "miner.starting"
EventMinerStarted EventType = "miner.started"
EventMinerStopping EventType = "miner.stopping"
EventMinerStopped EventType = "miner.stopped"
EventMinerStats EventType = "miner.stats"
EventMinerError EventType = "miner.error"
EventMinerConnected EventType = "miner.connected"
// System events
EventPong EventType = "pong"
EventStateSync EventType = "state.sync" // Initial state on connect/reconnect
)
// Event represents a mining event that can be broadcast to clients
type Event struct {
Type EventType `json:"type"`
Timestamp time.Time `json:"timestamp"`
Data interface{} `json:"data,omitempty"`
}
// MinerStatsData contains stats data for a miner event
type MinerStatsData struct {
Name string `json:"name"`
Hashrate int `json:"hashrate"`
Shares int `json:"shares"`
Rejected int `json:"rejected"`
Uptime int `json:"uptime"`
Algorithm string `json:"algorithm,omitempty"`
DiffCurrent int `json:"diffCurrent,omitempty"`
}
// MinerEventData contains basic miner event data
type MinerEventData struct {
Name string `json:"name"`
ProfileID string `json:"profileId,omitempty"`
Reason string `json:"reason,omitempty"`
Error string `json:"error,omitempty"`
Pool string `json:"pool,omitempty"`
}
// wsClient represents a WebSocket client connection
type wsClient struct {
conn *websocket.Conn
send chan []byte
hub *EventHub
miners map[string]bool // subscribed miners, "*" for all
minersMu sync.RWMutex // protects miners map from concurrent access
closeOnce sync.Once
}
// safeClose closes the send channel exactly once to prevent panic on double close
func (c *wsClient) safeClose() {
c.closeOnce.Do(func() {
close(c.send)
})
}
// StateProvider is a function that returns the current state for sync
type StateProvider func() interface{}
// EventHub manages WebSocket connections and event broadcasting
type EventHub struct {
// Registered clients
clients map[*wsClient]bool
// Inbound events to broadcast
broadcast chan Event
// Register requests from clients
register chan *wsClient
// Unregister requests from clients
unregister chan *wsClient
// Mutex for thread-safe access
mu sync.RWMutex
// Stop signal
stop chan struct{}
// Ensure Stop() is called only once
stopOnce sync.Once
// Connection limits
maxConnections int
// State provider for sync on connect
stateProvider StateProvider
}
// DefaultMaxConnections is the default maximum WebSocket connections
const DefaultMaxConnections = 100
// NewEventHub creates a new EventHub with default settings
func NewEventHub() *EventHub {
return NewEventHubWithOptions(DefaultMaxConnections)
}
// NewEventHubWithOptions creates a new EventHub with custom settings
func NewEventHubWithOptions(maxConnections int) *EventHub {
if maxConnections <= 0 {
maxConnections = DefaultMaxConnections
}
return &EventHub{
clients: make(map[*wsClient]bool),
broadcast: make(chan Event, 256),
register: make(chan *wsClient, 16),
unregister: make(chan *wsClient, 16), // Buffered to prevent goroutine leaks on shutdown
stop: make(chan struct{}),
maxConnections: maxConnections,
}
}
// Run starts the EventHub's main loop
func (h *EventHub) Run() {
for {
select {
case <-h.stop:
// Close all client connections
h.mu.Lock()
for client := range h.clients {
client.safeClose()
delete(h.clients, client)
}
h.mu.Unlock()
return
case client := <-h.register:
h.mu.Lock()
h.clients[client] = true
stateProvider := h.stateProvider
h.mu.Unlock()
logging.Debug("client connected", logging.Fields{"total": len(h.clients)})
// Send initial state sync if provider is set
if stateProvider != nil {
go func(c *wsClient) {
defer func() {
if r := recover(); r != nil {
logging.Error("panic in state sync goroutine", logging.Fields{"panic": r})
}
}()
state := stateProvider()
if state != nil {
event := Event{
Type: EventStateSync,
Timestamp: time.Now(),
Data: state,
}
data, err := MarshalJSON(event)
if err != nil {
logging.Error("failed to marshal state sync", logging.Fields{"error": err})
return
}
select {
case c.send <- data:
default:
// Client buffer full
}
}
}(client)
}
case client := <-h.unregister:
h.mu.Lock()
if _, ok := h.clients[client]; ok {
delete(h.clients, client)
client.safeClose()
// Decrement WebSocket connection metrics
RecordWSConnection(false)
}
h.mu.Unlock()
logging.Debug("client disconnected", logging.Fields{"total": len(h.clients)})
case event := <-h.broadcast:
data, err := MarshalJSON(event)
if err != nil {
logging.Error("failed to marshal event", logging.Fields{"error": err})
continue
}
h.mu.RLock()
for client := range h.clients {
// Check if client is subscribed to this miner
if h.shouldSendToClient(client, event) {
select {
case client.send <- data:
default:
// Client buffer full, close connection
go func(c *wsClient) {
h.unregister <- c
}(client)
}
}
}
h.mu.RUnlock()
}
}
}
// shouldSendToClient checks if an event should be sent to a client
func (h *EventHub) shouldSendToClient(client *wsClient, event Event) bool {
// Always send pong and system events
if event.Type == EventPong {
return true
}
// Check miner subscription for miner events (protected by mutex)
client.minersMu.RLock()
defer client.minersMu.RUnlock()
if client.miners == nil || len(client.miners) == 0 {
// No subscription filter, send all
return true
}
// Check for wildcard subscription
if client.miners["*"] {
return true
}
// Extract miner name from event data
minerName := ""
switch data := event.Data.(type) {
case MinerStatsData:
minerName = data.Name
case MinerEventData:
minerName = data.Name
case map[string]interface{}:
if name, ok := data["name"].(string); ok {
minerName = name
}
}
if minerName == "" {
// Non-miner event, send to all
return true
}
return client.miners[minerName]
}
// Stop stops the EventHub (safe to call multiple times)
func (h *EventHub) Stop() {
h.stopOnce.Do(func() {
close(h.stop)
})
}
// SetStateProvider sets the function that provides current state for new clients
func (h *EventHub) SetStateProvider(provider StateProvider) {
h.mu.Lock()
defer h.mu.Unlock()
h.stateProvider = provider
}
// Broadcast sends an event to all subscribed clients
func (h *EventHub) Broadcast(event Event) {
if event.Timestamp.IsZero() {
event.Timestamp = time.Now()
}
select {
case h.broadcast <- event:
default:
logging.Warn("broadcast channel full, dropping event", logging.Fields{"type": event.Type})
}
}
// ClientCount returns the number of connected clients
func (h *EventHub) ClientCount() int {
h.mu.RLock()
defer h.mu.RUnlock()
return len(h.clients)
}
// NewEvent creates a new event with the current timestamp
func NewEvent(eventType EventType, data interface{}) Event {
return Event{
Type: eventType,
Timestamp: time.Now(),
Data: data,
}
}
// writePump pumps messages from the hub to the websocket connection
func (c *wsClient) writePump() {
ticker := time.NewTicker(30 * time.Second)
defer func() {
ticker.Stop()
c.conn.Close()
}()
for {
select {
case message, ok := <-c.send:
c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if !ok {
// Hub closed the channel
c.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
w, err := c.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
if _, err := w.Write(message); err != nil {
logging.Debug("WebSocket write error", logging.Fields{"error": err})
return
}
if err := w.Close(); err != nil {
return
}
case <-ticker.C:
c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}
// readPump pumps messages from the websocket connection to the hub
func (c *wsClient) readPump() {
defer func() {
c.hub.unregister <- c
c.conn.Close()
}()
c.conn.SetReadLimit(512)
c.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
c.conn.SetPongHandler(func(string) error {
c.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
return nil
})
for {
_, message, err := c.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
logging.Debug("WebSocket error", logging.Fields{"error": err})
}
break
}
// Parse client message
var msg struct {
Type string `json:"type"`
Miners []string `json:"miners,omitempty"`
}
if err := json.Unmarshal(message, &msg); err != nil {
continue
}
switch msg.Type {
case "subscribe":
// Update miner subscription (protected by mutex)
c.minersMu.Lock()
c.miners = make(map[string]bool)
for _, m := range msg.Miners {
c.miners[m] = true
}
c.minersMu.Unlock()
logging.Debug("client subscribed to miners", logging.Fields{"miners": msg.Miners})
case "ping":
// Respond with pong
c.hub.Broadcast(Event{
Type: EventPong,
Timestamp: time.Now(),
})
}
}
}
// ServeWs handles websocket requests from clients.
// Returns false if the connection was rejected due to limits.
func (h *EventHub) ServeWs(conn *websocket.Conn) bool {
// Check connection limit
h.mu.RLock()
currentCount := len(h.clients)
h.mu.RUnlock()
if currentCount >= h.maxConnections {
logging.Warn("connection rejected: limit reached", logging.Fields{"current": currentCount, "max": h.maxConnections})
conn.WriteMessage(websocket.CloseMessage,
websocket.FormatCloseMessage(websocket.CloseTryAgainLater, "connection limit reached"))
conn.Close()
return false
}
client := &wsClient{
conn: conn,
send: make(chan []byte, 256),
hub: h,
miners: map[string]bool{"*": true}, // Subscribe to all by default
}
h.register <- client
// Start read/write pumps
go client.writePump()
go client.readPump()
return true
}

201
mining/events_test.go Normal file
View file

@ -0,0 +1,201 @@
package mining
import (
"encoding/json"
"sync"
"testing"
"time"
"github.com/gorilla/websocket"
)
func TestNewEventHub(t *testing.T) {
hub := NewEventHub()
if hub == nil {
t.Fatal("NewEventHub returned nil")
}
if hub.clients == nil {
t.Error("clients map should be initialized")
}
if hub.maxConnections != DefaultMaxConnections {
t.Errorf("Expected maxConnections %d, got %d", DefaultMaxConnections, hub.maxConnections)
}
}
func TestNewEventHubWithOptions(t *testing.T) {
hub := NewEventHubWithOptions(50)
if hub.maxConnections != 50 {
t.Errorf("Expected maxConnections 50, got %d", hub.maxConnections)
}
// Test with invalid value
hub2 := NewEventHubWithOptions(0)
if hub2.maxConnections != DefaultMaxConnections {
t.Errorf("Expected default maxConnections for 0, got %d", hub2.maxConnections)
}
hub3 := NewEventHubWithOptions(-1)
if hub3.maxConnections != DefaultMaxConnections {
t.Errorf("Expected default maxConnections for -1, got %d", hub3.maxConnections)
}
}
func TestEventHubBroadcast(t *testing.T) {
hub := NewEventHub()
go hub.Run()
defer hub.Stop()
// Create an event
event := Event{
Type: EventMinerStarted,
Timestamp: time.Now(),
Data: MinerEventData{Name: "test-miner"},
}
// Broadcast should not block even with no clients
done := make(chan struct{})
go func() {
hub.Broadcast(event)
close(done)
}()
select {
case <-done:
// Success
case <-time.After(time.Second):
t.Error("Broadcast blocked unexpectedly")
}
}
func TestEventHubClientCount(t *testing.T) {
hub := NewEventHub()
go hub.Run()
defer hub.Stop()
// Initial count should be 0
if count := hub.ClientCount(); count != 0 {
t.Errorf("Expected 0 clients, got %d", count)
}
}
func TestEventHubStop(t *testing.T) {
hub := NewEventHub()
go hub.Run()
// Stop should not panic
defer func() {
if r := recover(); r != nil {
t.Errorf("Stop panicked: %v", r)
}
}()
hub.Stop()
// Give time for cleanup
time.Sleep(50 * time.Millisecond)
}
func TestNewEvent(t *testing.T) {
data := MinerEventData{Name: "test-miner"}
event := NewEvent(EventMinerStarted, data)
if event.Type != EventMinerStarted {
t.Errorf("Expected type %s, got %s", EventMinerStarted, event.Type)
}
if event.Timestamp.IsZero() {
t.Error("Timestamp should not be zero")
}
eventData, ok := event.Data.(MinerEventData)
if !ok {
t.Error("Data should be MinerEventData")
}
if eventData.Name != "test-miner" {
t.Errorf("Expected miner name 'test-miner', got '%s'", eventData.Name)
}
}
func TestEventJSON(t *testing.T) {
event := Event{
Type: EventMinerStats,
Timestamp: time.Now(),
Data: MinerStatsData{
Name: "test-miner",
Hashrate: 1000,
Shares: 10,
Rejected: 1,
Uptime: 3600,
},
}
data, err := json.Marshal(event)
if err != nil {
t.Fatalf("Failed to marshal event: %v", err)
}
var decoded Event
if err := json.Unmarshal(data, &decoded); err != nil {
t.Fatalf("Failed to unmarshal event: %v", err)
}
if decoded.Type != EventMinerStats {
t.Errorf("Expected type %s, got %s", EventMinerStats, decoded.Type)
}
}
func TestSetStateProvider(t *testing.T) {
hub := NewEventHub()
go hub.Run()
defer hub.Stop()
called := false
var mu sync.Mutex
provider := func() interface{} {
mu.Lock()
called = true
mu.Unlock()
return map[string]string{"status": "ok"}
}
hub.SetStateProvider(provider)
// The provider should be set but not called until a client connects
mu.Lock()
wasCalled := called
mu.Unlock()
if wasCalled {
t.Error("Provider should not be called until client connects")
}
}
// MockWebSocketConn provides a minimal mock for testing
type MockWebSocketConn struct {
websocket.Conn
written [][]byte
mu sync.Mutex
}
func TestEventTypes(t *testing.T) {
types := []EventType{
EventMinerStarting,
EventMinerStarted,
EventMinerStopping,
EventMinerStopped,
EventMinerStats,
EventMinerError,
EventMinerConnected,
EventPong,
EventStateSync,
}
for _, et := range types {
if et == "" {
t.Error("Event type should not be empty")
}
}
}

57
mining/file_utils.go Normal file
View file

@ -0,0 +1,57 @@
package mining
import (
"fmt"
"os"
"path/filepath"
)
// AtomicWriteFile writes data to a file atomically by writing to a temp file
// first, syncing to disk, then renaming to the target path. This prevents
// corruption if the process is interrupted during write.
func AtomicWriteFile(path string, data []byte, perm os.FileMode) error {
dir := filepath.Dir(path)
// Create temp file in the same directory for atomic rename
tmpFile, err := os.CreateTemp(dir, ".tmp-*")
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
tmpPath := tmpFile.Name()
// Clean up temp file on error
success := false
defer func() {
if !success {
os.Remove(tmpPath)
}
}()
if _, err := tmpFile.Write(data); err != nil {
tmpFile.Close()
return fmt.Errorf("failed to write temp file: %w", err)
}
// Sync to ensure data is flushed to disk before rename
if err := tmpFile.Sync(); err != nil {
tmpFile.Close()
return fmt.Errorf("failed to sync temp file: %w", err)
}
if err := tmpFile.Close(); err != nil {
return fmt.Errorf("failed to close temp file: %w", err)
}
// Set permissions before rename
if err := os.Chmod(tmpPath, perm); err != nil {
return fmt.Errorf("failed to set file permissions: %w", err)
}
// Atomic rename (on POSIX systems)
if err := os.Rename(tmpPath, path); err != nil {
return fmt.Errorf("failed to rename temp file: %w", err)
}
success = true
return nil
}

775
mining/manager.go Normal file
View file

@ -0,0 +1,775 @@
package mining
import (
"context"
"fmt"
"net"
"regexp"
"strings"
"sync"
"time"
"forge.lthn.ai/core/mining/database"
"forge.lthn.ai/core/mining/logging"
)
// sanitizeInstanceName ensures the instance name only contains safe characters.
var instanceNameRegex = regexp.MustCompile(`[^a-zA-Z0-9_/-]`)
// ManagerInterface defines the contract for a miner manager.
type ManagerInterface interface {
StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error)
StopMiner(ctx context.Context, name string) error
GetMiner(name string) (Miner, error)
ListMiners() []Miner
ListAvailableMiners() []AvailableMiner
GetMinerHashrateHistory(name string) ([]HashratePoint, error)
UninstallMiner(ctx context.Context, minerType string) error
Stop()
}
// Manager handles the lifecycle and operations of multiple miners.
type Manager struct {
miners map[string]Miner
mu sync.RWMutex
stopChan chan struct{}
stopOnce sync.Once
waitGroup sync.WaitGroup
dbEnabled bool
dbRetention int
eventHub *EventHub
eventHubMu sync.RWMutex // Separate mutex for eventHub to avoid deadlock with main mu
}
// SetEventHub sets the event hub for broadcasting miner events
func (m *Manager) SetEventHub(hub *EventHub) {
m.eventHubMu.Lock()
defer m.eventHubMu.Unlock()
m.eventHub = hub
}
// emitEvent broadcasts an event if an event hub is configured
// Uses separate eventHubMu to avoid deadlock when called while holding m.mu
func (m *Manager) emitEvent(eventType EventType, data interface{}) {
m.eventHubMu.RLock()
hub := m.eventHub
m.eventHubMu.RUnlock()
if hub != nil {
hub.Broadcast(NewEvent(eventType, data))
}
}
var _ ManagerInterface = (*Manager)(nil)
// NewManager creates a new miner manager and autostarts miners based on config.
func NewManager() *Manager {
m := &Manager{
miners: make(map[string]Miner),
stopChan: make(chan struct{}),
waitGroup: sync.WaitGroup{},
}
m.syncMinersConfig() // Ensure config file is populated
m.initDatabase()
m.autostartMiners()
m.startStatsCollection()
return m
}
// NewManagerForSimulation creates a manager for simulation mode.
// It skips autostarting real miners and config sync, suitable for UI testing.
func NewManagerForSimulation() *Manager {
m := &Manager{
miners: make(map[string]Miner),
stopChan: make(chan struct{}),
waitGroup: sync.WaitGroup{},
}
// Skip syncMinersConfig and autostartMiners for simulation
m.startStatsCollection()
return m
}
// initDatabase initializes the SQLite database based on config.
func (m *Manager) initDatabase() {
cfg, err := LoadMinersConfig()
if err != nil {
logging.Warn("could not load config for database init", logging.Fields{"error": err})
return
}
m.dbEnabled = cfg.Database.Enabled
m.dbRetention = cfg.Database.RetentionDays
if m.dbRetention == 0 {
m.dbRetention = 30
}
if !m.dbEnabled {
logging.Debug("database persistence is disabled")
return
}
dbCfg := database.Config{
Enabled: true,
RetentionDays: m.dbRetention,
}
if err := database.Initialize(dbCfg); err != nil {
logging.Warn("failed to initialize database", logging.Fields{"error": err})
m.dbEnabled = false
return
}
logging.Info("database persistence enabled", logging.Fields{"retention_days": m.dbRetention})
// Start periodic cleanup
m.startDBCleanup()
}
// startDBCleanup starts a goroutine that periodically cleans old data.
func (m *Manager) startDBCleanup() {
m.waitGroup.Add(1)
go func() {
defer m.waitGroup.Done()
defer func() {
if r := recover(); r != nil {
logging.Error("panic in database cleanup goroutine", logging.Fields{"panic": r})
}
}()
// Run cleanup once per hour
ticker := time.NewTicker(time.Hour)
defer ticker.Stop()
// Run initial cleanup
if err := database.Cleanup(m.dbRetention); err != nil {
logging.Warn("database cleanup failed", logging.Fields{"error": err})
}
for {
select {
case <-ticker.C:
if err := database.Cleanup(m.dbRetention); err != nil {
logging.Warn("database cleanup failed", logging.Fields{"error": err})
}
case <-m.stopChan:
return
}
}
}()
}
// syncMinersConfig ensures the miners.json config file has entries for all available miners.
func (m *Manager) syncMinersConfig() {
cfg, err := LoadMinersConfig()
if err != nil {
logging.Warn("could not load miners config for sync", logging.Fields{"error": err})
return
}
availableMiners := m.ListAvailableMiners()
configUpdated := false
for _, availableMiner := range availableMiners {
found := false
for _, configuredMiner := range cfg.Miners {
if strings.EqualFold(configuredMiner.MinerType, availableMiner.Name) {
found = true
break
}
}
if !found {
cfg.Miners = append(cfg.Miners, MinerAutostartConfig{
MinerType: availableMiner.Name,
Autostart: false,
Config: nil, // No default config
})
configUpdated = true
logging.Info("added default config for missing miner", logging.Fields{"miner": availableMiner.Name})
}
}
if configUpdated {
if err := SaveMinersConfig(cfg); err != nil {
logging.Warn("failed to save updated miners config", logging.Fields{"error": err})
}
}
}
// autostartMiners loads the miners config and starts any miners marked for autostart.
func (m *Manager) autostartMiners() {
cfg, err := LoadMinersConfig()
if err != nil {
logging.Warn("could not load miners config for autostart", logging.Fields{"error": err})
return
}
for _, minerCfg := range cfg.Miners {
if minerCfg.Autostart && minerCfg.Config != nil {
logging.Info("autostarting miner", logging.Fields{"type": minerCfg.MinerType})
if _, err := m.StartMiner(context.Background(), minerCfg.MinerType, minerCfg.Config); err != nil {
logging.Error("failed to autostart miner", logging.Fields{"type": minerCfg.MinerType, "error": err})
}
}
}
}
// findAvailablePort finds an available TCP port on the local machine.
func findAvailablePort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}
// StartMiner starts a new miner and saves its configuration.
// The context can be used to cancel the operation.
func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
// Check for cancellation before acquiring lock
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
m.mu.Lock()
defer m.mu.Unlock()
if config == nil {
config = &Config{}
}
miner, err := CreateMiner(minerType)
if err != nil {
return nil, err
}
instanceName := miner.GetName()
if config.Algo != "" {
// Sanitize algo to prevent directory traversal or invalid filenames
sanitizedAlgo := instanceNameRegex.ReplaceAllString(config.Algo, "_")
instanceName = fmt.Sprintf("%s-%s", instanceName, sanitizedAlgo)
} else {
instanceName = fmt.Sprintf("%s-%d", instanceName, time.Now().UnixNano()%1000)
}
if _, exists := m.miners[instanceName]; exists {
return nil, fmt.Errorf("a miner with a similar configuration is already running: %s", instanceName)
}
// Validate user-provided HTTPPort if specified
if config.HTTPPort != 0 {
if config.HTTPPort < 1024 || config.HTTPPort > 65535 {
return nil, fmt.Errorf("HTTPPort must be between 1024 and 65535, got %d", config.HTTPPort)
}
}
apiPort, err := findAvailablePort()
if err != nil {
return nil, fmt.Errorf("failed to find an available port for the miner API: %w", err)
}
if config.HTTPPort == 0 {
config.HTTPPort = apiPort
}
if xmrigMiner, ok := miner.(*XMRigMiner); ok {
xmrigMiner.Name = instanceName
if xmrigMiner.API != nil {
xmrigMiner.API.ListenPort = apiPort
}
}
if ttMiner, ok := miner.(*TTMiner); ok {
ttMiner.Name = instanceName
if ttMiner.API != nil {
ttMiner.API.ListenPort = apiPort
}
}
// Emit starting event before actually starting
m.emitEvent(EventMinerStarting, MinerEventData{
Name: instanceName,
})
if err := miner.Start(config); err != nil {
// Emit error event
m.emitEvent(EventMinerError, MinerEventData{
Name: instanceName,
Error: err.Error(),
})
return nil, err
}
m.miners[instanceName] = miner
if err := m.updateMinerConfig(minerType, true, config); err != nil {
logging.Warn("failed to save miner config for autostart", logging.Fields{"error": err})
}
logMessage := fmt.Sprintf("CryptoCurrency Miner started: %s (Binary: %s)", miner.GetName(), miner.GetBinaryPath())
logToSyslog(logMessage)
// Emit started event
m.emitEvent(EventMinerStarted, MinerEventData{
Name: instanceName,
})
RecordMinerStart()
return miner, nil
}
// UninstallMiner stops, uninstalls, and removes a miner's configuration.
// The context can be used to cancel the operation.
func (m *Manager) UninstallMiner(ctx context.Context, minerType string) error {
// Check for cancellation before acquiring lock
select {
case <-ctx.Done():
return ctx.Err()
default:
}
m.mu.Lock()
// Collect miners to stop and delete (can't modify map during iteration)
minersToDelete := make([]string, 0)
minersToStop := make([]Miner, 0)
for name, runningMiner := range m.miners {
if rm, ok := runningMiner.(*XMRigMiner); ok && strings.EqualFold(rm.ExecutableName, minerType) {
minersToStop = append(minersToStop, runningMiner)
minersToDelete = append(minersToDelete, name)
}
if rm, ok := runningMiner.(*TTMiner); ok && strings.EqualFold(rm.ExecutableName, minerType) {
minersToStop = append(minersToStop, runningMiner)
minersToDelete = append(minersToDelete, name)
}
}
// Delete from map first, then release lock before stopping (Stop may block)
for _, name := range minersToDelete {
delete(m.miners, name)
}
m.mu.Unlock()
// Stop miners outside the lock to avoid blocking
for i, miner := range minersToStop {
if err := miner.Stop(); err != nil {
logging.Warn("failed to stop running miner during uninstall", logging.Fields{"miner": minersToDelete[i], "error": err})
}
}
miner, err := CreateMiner(minerType)
if err != nil {
return err
}
if err := miner.Uninstall(); err != nil {
return fmt.Errorf("failed to uninstall miner files: %w", err)
}
return UpdateMinersConfig(func(cfg *MinersConfig) error {
var updatedMiners []MinerAutostartConfig
for _, minerCfg := range cfg.Miners {
if !strings.EqualFold(minerCfg.MinerType, minerType) {
updatedMiners = append(updatedMiners, minerCfg)
}
}
cfg.Miners = updatedMiners
return nil
})
}
// updateMinerConfig saves the autostart and last-used config for a miner.
func (m *Manager) updateMinerConfig(minerType string, autostart bool, config *Config) error {
return UpdateMinersConfig(func(cfg *MinersConfig) error {
found := false
for i, minerCfg := range cfg.Miners {
if strings.EqualFold(minerCfg.MinerType, minerType) {
cfg.Miners[i].Autostart = autostart
cfg.Miners[i].Config = config
found = true
break
}
}
if !found {
cfg.Miners = append(cfg.Miners, MinerAutostartConfig{
MinerType: minerType,
Autostart: autostart,
Config: config,
})
}
return nil
})
}
// StopMiner stops a running miner and removes it from the manager.
// If the miner is already stopped, it will still be removed from the manager.
// The context can be used to cancel the operation.
func (m *Manager) StopMiner(ctx context.Context, name string) error {
// Check for cancellation before acquiring lock
select {
case <-ctx.Done():
return ctx.Err()
default:
}
m.mu.Lock()
defer m.mu.Unlock()
miner, exists := m.miners[name]
if !exists {
for k := range m.miners {
if strings.HasPrefix(k, name) {
miner = m.miners[k]
name = k
exists = true
break
}
}
}
if !exists {
return fmt.Errorf("miner not found: %s", name)
}
// Emit stopping event
m.emitEvent(EventMinerStopping, MinerEventData{
Name: name,
})
// Try to stop the miner, but always remove it from the map
// This handles the case where a miner crashed or was killed externally
stopErr := miner.Stop()
// Always remove from map - if it's not running, we still want to clean it up
delete(m.miners, name)
// Emit stopped event
reason := "stopped"
if stopErr != nil && stopErr.Error() != "miner is not running" {
reason = stopErr.Error()
}
m.emitEvent(EventMinerStopped, MinerEventData{
Name: name,
Reason: reason,
})
// Only return error if it wasn't just "miner is not running"
if stopErr != nil && stopErr.Error() != "miner is not running" {
return stopErr
}
RecordMinerStop()
return nil
}
// GetMiner retrieves a running miner by its name.
func (m *Manager) GetMiner(name string) (Miner, error) {
m.mu.RLock()
defer m.mu.RUnlock()
miner, exists := m.miners[name]
if !exists {
return nil, fmt.Errorf("miner not found: %s", name)
}
return miner, nil
}
// ListMiners returns a slice of all running miners.
func (m *Manager) ListMiners() []Miner {
m.mu.RLock()
defer m.mu.RUnlock()
miners := make([]Miner, 0, len(m.miners))
for _, miner := range m.miners {
miners = append(miners, miner)
}
return miners
}
// RegisterMiner registers an already-started miner with the manager.
// This is useful for simulated miners or externally managed miners.
func (m *Manager) RegisterMiner(miner Miner) error {
name := miner.GetName()
m.mu.Lock()
if _, exists := m.miners[name]; exists {
m.mu.Unlock()
return fmt.Errorf("miner %s is already registered", name)
}
m.miners[name] = miner
m.mu.Unlock()
logging.Info("registered miner", logging.Fields{"name": name})
// Emit miner started event (outside lock)
m.emitEvent(EventMinerStarted, map[string]interface{}{
"name": name,
})
return nil
}
// ListAvailableMiners returns a list of available miners that can be started.
func (m *Manager) ListAvailableMiners() []AvailableMiner {
return []AvailableMiner{
{
Name: "xmrig",
Description: "XMRig is a high performance, open source, cross platform RandomX, KawPow, CryptoNight and AstroBWT CPU/GPU miner and RandomX benchmark.",
},
{
Name: "tt-miner",
Description: "TT-Miner is a high performance NVIDIA GPU miner for various algorithms including Ethash, KawPow, ProgPow, and more. Requires CUDA.",
},
}
}
// startStatsCollection starts a goroutine to periodically collect stats from active miners.
func (m *Manager) startStatsCollection() {
m.waitGroup.Add(1)
go func() {
defer m.waitGroup.Done()
defer func() {
if r := recover(); r != nil {
logging.Error("panic in stats collection goroutine", logging.Fields{"panic": r})
}
}()
ticker := time.NewTicker(HighResolutionInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
m.collectMinerStats()
case <-m.stopChan:
return
}
}
}()
}
// statsCollectionTimeout is the maximum time to wait for stats from a single miner.
const statsCollectionTimeout = 5 * time.Second
// collectMinerStats iterates through active miners and collects their stats.
// Stats are collected in parallel to reduce overall collection time.
func (m *Manager) collectMinerStats() {
// Take a snapshot of miners under read lock - minimize lock duration
m.mu.RLock()
if len(m.miners) == 0 {
m.mu.RUnlock()
return
}
type minerInfo struct {
miner Miner
minerType string
}
miners := make([]minerInfo, 0, len(m.miners))
for _, miner := range m.miners {
// Use the miner's GetType() method for proper type identification
miners = append(miners, minerInfo{miner: miner, minerType: miner.GetType()})
}
dbEnabled := m.dbEnabled // Copy to avoid holding lock
m.mu.RUnlock()
now := time.Now()
// Collect stats from all miners in parallel
var wg sync.WaitGroup
for _, mi := range miners {
wg.Add(1)
go func(miner Miner, minerType string) {
defer wg.Done()
defer func() {
if r := recover(); r != nil {
logging.Error("panic in single miner stats collection", logging.Fields{
"panic": r,
"miner": miner.GetName(),
})
}
}()
m.collectSingleMinerStats(miner, minerType, now, dbEnabled)
}(mi.miner, mi.minerType)
}
wg.Wait()
}
// statsRetryCount is the number of retries for transient stats failures.
const statsRetryCount = 2
// statsRetryDelay is the delay between stats collection retries.
const statsRetryDelay = 500 * time.Millisecond
// collectSingleMinerStats collects stats from a single miner with retry logic.
// This is called concurrently for each miner.
func (m *Manager) collectSingleMinerStats(miner Miner, minerType string, now time.Time, dbEnabled bool) {
minerName := miner.GetName()
var stats *PerformanceMetrics
var lastErr error
// Retry loop for transient failures
for attempt := 0; attempt <= statsRetryCount; attempt++ {
// Use context with timeout to prevent hanging on unresponsive miner APIs
ctx, cancel := context.WithTimeout(context.Background(), statsCollectionTimeout)
stats, lastErr = miner.GetStats(ctx)
cancel() // Release context immediately
if lastErr == nil {
break // Success
}
// Log retry attempts at debug level
if attempt < statsRetryCount {
logging.Debug("retrying stats collection", logging.Fields{
"miner": minerName,
"attempt": attempt + 1,
"error": lastErr.Error(),
})
time.Sleep(statsRetryDelay)
}
}
if lastErr != nil {
logging.Error("failed to get miner stats after retries", logging.Fields{
"miner": minerName,
"error": lastErr.Error(),
"retries": statsRetryCount,
})
RecordStatsCollection(true, true)
return
}
// Record stats collection (retried if we did any retries)
RecordStatsCollection(stats != nil && lastErr == nil, false)
point := HashratePoint{
Timestamp: now,
Hashrate: stats.Hashrate,
}
// Add to in-memory history (rolling window)
// Note: AddHashratePoint and ReduceHashrateHistory must be thread-safe
miner.AddHashratePoint(point)
miner.ReduceHashrateHistory(now)
// Persist to database if enabled
if dbEnabled {
dbPoint := database.HashratePoint{
Timestamp: point.Timestamp,
Hashrate: point.Hashrate,
}
// Create a new context for DB writes (original context is from retry loop)
dbCtx, dbCancel := context.WithTimeout(context.Background(), statsCollectionTimeout)
if err := database.InsertHashratePoint(dbCtx, minerName, minerType, dbPoint, database.ResolutionHigh); err != nil {
logging.Warn("failed to persist hashrate", logging.Fields{"miner": minerName, "error": err})
}
dbCancel()
}
// Emit stats event for real-time WebSocket updates
m.emitEvent(EventMinerStats, MinerStatsData{
Name: minerName,
Hashrate: stats.Hashrate,
Shares: stats.Shares,
Rejected: stats.Rejected,
Uptime: stats.Uptime,
Algorithm: stats.Algorithm,
DiffCurrent: stats.DiffCurrent,
})
}
// GetMinerHashrateHistory returns the hashrate history for a specific miner.
func (m *Manager) GetMinerHashrateHistory(name string) ([]HashratePoint, error) {
m.mu.RLock()
defer m.mu.RUnlock()
miner, exists := m.miners[name]
if !exists {
return nil, fmt.Errorf("miner not found: %s", name)
}
return miner.GetHashrateHistory(), nil
}
// ShutdownTimeout is the maximum time to wait for goroutines during shutdown
const ShutdownTimeout = 10 * time.Second
// Stop stops all running miners, background goroutines, and closes resources.
// Safe to call multiple times - subsequent calls are no-ops.
func (m *Manager) Stop() {
m.stopOnce.Do(func() {
// Stop all running miners first
m.mu.Lock()
for name, miner := range m.miners {
if err := miner.Stop(); err != nil {
logging.Warn("failed to stop miner", logging.Fields{"miner": name, "error": err})
}
}
m.mu.Unlock()
close(m.stopChan)
// Wait for goroutines with timeout
done := make(chan struct{})
go func() {
m.waitGroup.Wait()
close(done)
}()
select {
case <-done:
logging.Info("all goroutines stopped gracefully")
case <-time.After(ShutdownTimeout):
logging.Warn("shutdown timeout - some goroutines may not have stopped")
}
// Close the database
if m.dbEnabled {
if err := database.Close(); err != nil {
logging.Warn("failed to close database", logging.Fields{"error": err})
}
}
})
}
// GetMinerHistoricalStats returns historical stats from the database for a miner.
func (m *Manager) GetMinerHistoricalStats(minerName string) (*database.HashrateStats, error) {
if !m.dbEnabled {
return nil, fmt.Errorf("database persistence is disabled")
}
return database.GetHashrateStats(minerName)
}
// GetMinerHistoricalHashrate returns historical hashrate data from the database.
func (m *Manager) GetMinerHistoricalHashrate(minerName string, since, until time.Time) ([]HashratePoint, error) {
if !m.dbEnabled {
return nil, fmt.Errorf("database persistence is disabled")
}
dbPoints, err := database.GetHashrateHistory(minerName, database.ResolutionHigh, since, until)
if err != nil {
return nil, err
}
// Convert database points to mining points
points := make([]HashratePoint, len(dbPoints))
for i, p := range dbPoints {
points[i] = HashratePoint{
Timestamp: p.Timestamp,
Hashrate: p.Hashrate,
}
}
return points, nil
}
// GetAllMinerHistoricalStats returns historical stats for all miners from the database.
func (m *Manager) GetAllMinerHistoricalStats() ([]database.HashrateStats, error) {
if !m.dbEnabled {
return nil, fmt.Errorf("database persistence is disabled")
}
return database.GetAllMinerStats()
}
// IsDatabaseEnabled returns whether database persistence is enabled.
func (m *Manager) IsDatabaseEnabled() bool {
return m.dbEnabled
}

View file

@ -0,0 +1,5 @@
package mining
// This file is intentionally left with only a package declaration
// to resolve a redeclaration error. The ManagerInterface is defined
// in manager.go.

314
mining/manager_race_test.go Normal file
View file

@ -0,0 +1,314 @@
package mining
import (
"context"
"sync"
"testing"
"time"
)
// TestConcurrentStartMultipleMiners verifies that concurrent StartMiner calls
// with different algorithms create unique miners without race conditions
func TestConcurrentStartMultipleMiners(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var wg sync.WaitGroup
errors := make(chan error, 10)
// Try to start 10 miners concurrently with different algos
for i := 0; i < 10; i++ {
wg.Add(1)
go func(index int) {
defer wg.Done()
config := &Config{
HTTPPort: 10000 + index,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "algo" + string(rune('A'+index)), // algoA, algoB, etc.
}
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
errors <- err
}
}(i)
}
wg.Wait()
close(errors)
// Collect errors
var errCount int
for err := range errors {
t.Logf("Concurrent start error: %v", err)
errCount++
}
// Some failures are expected due to port conflicts, but shouldn't crash
t.Logf("Started miners with %d errors out of 10 attempts", errCount)
// Verify no data races occurred (test passes if no race detector warnings)
}
// TestConcurrentStartDuplicateMiner verifies that starting the same miner
// concurrently results in only one success
func TestConcurrentStartDuplicateMiner(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var wg sync.WaitGroup
successes := make(chan struct{}, 10)
failures := make(chan error, 10)
// Try to start the same miner 10 times concurrently
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
config := &Config{
HTTPPort: 11000,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "duplicate_test", // Same algo = same instance name
}
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
failures <- err
} else {
successes <- struct{}{}
}
}()
}
wg.Wait()
close(successes)
close(failures)
successCount := len(successes)
failureCount := len(failures)
t.Logf("Duplicate miner test: %d successes, %d failures", successCount, failureCount)
// Only one should succeed (or zero if there's a timing issue)
if successCount > 1 {
t.Errorf("Expected at most 1 success for duplicate miner, got %d", successCount)
}
}
// TestConcurrentStartStop verifies that starting and stopping miners
// concurrently doesn't cause race conditions
func TestConcurrentStartStop(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var wg sync.WaitGroup
// Start some miners
for i := 0; i < 5; i++ {
config := &Config{
HTTPPort: 12000 + i,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "startstop" + string(rune('A'+i)),
}
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Logf("Setup error (may be expected): %v", err)
}
}
// Give miners time to start
time.Sleep(100 * time.Millisecond)
// Now concurrently start new ones and stop existing ones
for i := 0; i < 10; i++ {
wg.Add(2)
// Start a new miner
go func(index int) {
defer wg.Done()
config := &Config{
HTTPPort: 12100 + index,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "new" + string(rune('A'+index)),
}
m.StartMiner(context.Background(), "xmrig", config)
}(i)
// Stop a miner
go func(index int) {
defer wg.Done()
minerName := "xmrig-startstop" + string(rune('A'+index%5))
m.StopMiner(context.Background(), minerName)
}(i)
}
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentListMiners verifies that listing miners while modifying
// the miner map doesn't cause race conditions
func TestConcurrentListMiners(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
// Continuously list miners
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
default:
miners := m.ListMiners()
_ = len(miners) // Use the result
}
}
}()
// Continuously start miners
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 20; i++ {
select {
case <-ctx.Done():
return
default:
config := &Config{
HTTPPort: 13000 + i,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "list" + string(rune('A'+i%26)),
}
m.StartMiner(context.Background(), "xmrig", config)
time.Sleep(10 * time.Millisecond)
}
}
}()
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentGetMiner verifies that getting a miner while others
// are being started/stopped doesn't cause race conditions
func TestConcurrentGetMiner(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Start a miner first
config := &Config{
HTTPPort: 14000,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "gettest",
}
miner, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Skipf("Could not start test miner: %v", err)
}
minerName := miner.GetName()
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
// Continuously get the miner
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
default:
m.GetMiner(minerName)
time.Sleep(time.Millisecond)
}
}
}()
}
// Start more miners in parallel
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 10; i++ {
select {
case <-ctx.Done():
return
default:
config := &Config{
HTTPPort: 14100 + i,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "parallel" + string(rune('A'+i)),
}
m.StartMiner(context.Background(), "xmrig", config)
}
}
}()
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentStatsCollection verifies that stats collection
// doesn't race with miner operations
func TestConcurrentStatsCollection(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Start some miners
for i := 0; i < 3; i++ {
config := &Config{
HTTPPort: 15000 + i,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "stats" + string(rune('A'+i)),
}
m.StartMiner(context.Background(), "xmrig", config)
}
var wg sync.WaitGroup
// Simulate stats collection (normally done by background goroutine)
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 50; i++ {
miners := m.ListMiners()
for _, miner := range miners {
miner.GetStats(context.Background())
}
time.Sleep(10 * time.Millisecond)
}
}()
// Concurrently stop miners
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(100 * time.Millisecond) // Let stats collection start
for _, name := range []string{"xmrig-statsA", "xmrig-statsB", "xmrig-statsC"} {
m.StopMiner(context.Background(), name)
time.Sleep(50 * time.Millisecond)
}
}()
wg.Wait()
// Test passes if no race detector warnings
}

140
mining/manager_test.go Normal file
View file

@ -0,0 +1,140 @@
package mining
import (
"context"
"os"
"path/filepath"
"runtime"
"testing"
)
// setupTestManager creates a new Manager and a dummy executable for tests.
// It also temporarily modifies the PATH to include the dummy executable's directory.
func setupTestManager(t *testing.T) *Manager {
dummyDir := t.TempDir()
executableName := "miner"
if runtime.GOOS == "windows" {
executableName += ".exe"
}
dummyPath := filepath.Join(dummyDir, executableName)
// Create a script that prints version and exits
var script []byte
if runtime.GOOS == "windows" {
script = []byte("@echo off\necho XMRig 6.24.0\n")
} else {
script = []byte("#!/bin/sh\necho 'XMRig 6.24.0'\n")
}
if err := os.WriteFile(dummyPath, script, 0755); err != nil {
t.Fatalf("Failed to create dummy miner executable: %v", err)
}
// Prepend the dummy directory to the PATH
originalPath := os.Getenv("PATH")
t.Cleanup(func() {
os.Setenv("PATH", originalPath)
})
os.Setenv("PATH", dummyDir+string(os.PathListSeparator)+originalPath)
return NewManager()
}
// TestStartMiner tests the StartMiner function
func TestStartMiner_Good(t *testing.T) {
t.Skip("Skipping test that runs miner process as per request")
}
func TestStartMiner_Bad(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
config := &Config{
HTTPPort: 9001, // Use a different port to avoid conflict
Pool: "test:1234",
Wallet: "testwallet",
}
// Case 2: Attempt to start an unsupported miner
_, err := m.StartMiner(context.Background(), "unsupported", config)
if err == nil {
t.Error("Expected an error when starting an unsupported miner, but got nil")
}
}
func TestStartMiner_Ugly(t *testing.T) {
t.Skip("Skipping test that runs miner process")
}
// TestStopMiner tests the StopMiner function
func TestStopMiner_Good(t *testing.T) {
t.Skip("Skipping test that runs miner process")
}
func TestStopMiner_Bad(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Case 2: Attempt to stop a non-existent miner
err := m.StopMiner(context.Background(), "nonexistent")
if err == nil {
t.Error("Expected an error when stopping a non-existent miner, but got nil")
}
}
// TestGetMiner tests the GetMiner function
func TestGetMiner_Good(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Case 1: Get an existing miner (manually injected)
miner := NewXMRigMiner()
// Set name to match what StartMiner would produce usually ("xmrig")
// Since we inject it, we can use the default name or set one.
miner.Name = "xmrig-test"
m.mu.Lock()
m.miners["xmrig-test"] = miner
m.mu.Unlock()
retrievedMiner, err := m.GetMiner("xmrig-test")
if err != nil {
t.Fatalf("Expected to get miner, but got error: %v", err)
}
if retrievedMiner.GetName() != "xmrig-test" {
t.Errorf("Expected to get miner 'xmrig-test', but got %s", retrievedMiner.GetName())
}
}
func TestGetMiner_Bad(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Case 2: Attempt to get a non-existent miner
_, err := m.GetMiner("nonexistent")
if err == nil {
t.Error("Expected an error when getting a non-existent miner, but got nil")
}
}
// TestListMiners tests the ListMiners function
func TestListMiners_Good(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Get initial count (may include autostarted miners from config)
initialMiners := m.ListMiners()
initialCount := len(initialMiners)
// Case 2: List miners when not empty (manually injected)
miner := NewXMRigMiner()
miner.Name = "xmrig-test"
m.mu.Lock()
m.miners["xmrig-test"] = miner
m.mu.Unlock()
finalMiners := m.ListMiners()
expectedCount := initialCount + 1
if len(finalMiners) != expectedCount {
t.Errorf("Expected %d miners, but got %d", expectedCount, len(finalMiners))
}
}

169
mining/metrics.go Normal file
View file

@ -0,0 +1,169 @@
package mining
import (
"sync"
"sync/atomic"
"time"
)
// Metrics provides simple instrumentation counters for the mining package.
// These can be exposed via Prometheus or other metrics systems in the future.
type Metrics struct {
// API metrics
RequestsTotal atomic.Int64
RequestsErrored atomic.Int64
RequestLatency *LatencyHistogram
// Miner metrics
MinersStarted atomic.Int64
MinersStopped atomic.Int64
MinersErrored atomic.Int64
// Stats collection metrics
StatsCollected atomic.Int64
StatsRetried atomic.Int64
StatsFailed atomic.Int64
// WebSocket metrics
WSConnections atomic.Int64
WSMessages atomic.Int64
// P2P metrics
P2PMessagesSent atomic.Int64
P2PMessagesReceived atomic.Int64
P2PConnectionsTotal atomic.Int64
}
// LatencyHistogram tracks request latencies with basic percentile support.
type LatencyHistogram struct {
mu sync.Mutex
samples []time.Duration
maxSize int
}
// NewLatencyHistogram creates a new latency histogram with a maximum sample size.
func NewLatencyHistogram(maxSize int) *LatencyHistogram {
return &LatencyHistogram{
samples: make([]time.Duration, 0, maxSize),
maxSize: maxSize,
}
}
// Record adds a latency sample.
func (h *LatencyHistogram) Record(d time.Duration) {
h.mu.Lock()
defer h.mu.Unlock()
if len(h.samples) >= h.maxSize {
// Ring buffer behavior - overwrite oldest
copy(h.samples, h.samples[1:])
h.samples = h.samples[:len(h.samples)-1]
}
h.samples = append(h.samples, d)
}
// Average returns the average latency.
func (h *LatencyHistogram) Average() time.Duration {
h.mu.Lock()
defer h.mu.Unlock()
if len(h.samples) == 0 {
return 0
}
var total time.Duration
for _, d := range h.samples {
total += d
}
return total / time.Duration(len(h.samples))
}
// Count returns the number of samples.
func (h *LatencyHistogram) Count() int {
h.mu.Lock()
defer h.mu.Unlock()
return len(h.samples)
}
// DefaultMetrics is the global metrics instance.
var DefaultMetrics = &Metrics{
RequestLatency: NewLatencyHistogram(1000),
}
// RecordRequest records an API request.
func RecordRequest(errored bool, latency time.Duration) {
DefaultMetrics.RequestsTotal.Add(1)
if errored {
DefaultMetrics.RequestsErrored.Add(1)
}
DefaultMetrics.RequestLatency.Record(latency)
}
// RecordMinerStart records a miner start event.
func RecordMinerStart() {
DefaultMetrics.MinersStarted.Add(1)
}
// RecordMinerStop records a miner stop event.
func RecordMinerStop() {
DefaultMetrics.MinersStopped.Add(1)
}
// RecordMinerError records a miner error event.
func RecordMinerError() {
DefaultMetrics.MinersErrored.Add(1)
}
// RecordStatsCollection records a stats collection event.
func RecordStatsCollection(retried bool, failed bool) {
DefaultMetrics.StatsCollected.Add(1)
if retried {
DefaultMetrics.StatsRetried.Add(1)
}
if failed {
DefaultMetrics.StatsFailed.Add(1)
}
}
// RecordWSConnection increments or decrements WebSocket connection count.
func RecordWSConnection(connected bool) {
if connected {
DefaultMetrics.WSConnections.Add(1)
} else {
DefaultMetrics.WSConnections.Add(-1)
}
}
// RecordWSMessage records a WebSocket message.
func RecordWSMessage() {
DefaultMetrics.WSMessages.Add(1)
}
// RecordP2PMessage records a P2P message.
func RecordP2PMessage(sent bool) {
if sent {
DefaultMetrics.P2PMessagesSent.Add(1)
} else {
DefaultMetrics.P2PMessagesReceived.Add(1)
}
}
// GetMetricsSnapshot returns a snapshot of current metrics.
func GetMetricsSnapshot() map[string]interface{} {
return map[string]interface{}{
"requests_total": DefaultMetrics.RequestsTotal.Load(),
"requests_errored": DefaultMetrics.RequestsErrored.Load(),
"request_latency_avg_ms": DefaultMetrics.RequestLatency.Average().Milliseconds(),
"request_latency_samples": DefaultMetrics.RequestLatency.Count(),
"miners_started": DefaultMetrics.MinersStarted.Load(),
"miners_stopped": DefaultMetrics.MinersStopped.Load(),
"miners_errored": DefaultMetrics.MinersErrored.Load(),
"stats_collected": DefaultMetrics.StatsCollected.Load(),
"stats_retried": DefaultMetrics.StatsRetried.Load(),
"stats_failed": DefaultMetrics.StatsFailed.Load(),
"ws_connections": DefaultMetrics.WSConnections.Load(),
"ws_messages": DefaultMetrics.WSMessages.Load(),
"p2p_messages_sent": DefaultMetrics.P2PMessagesSent.Load(),
"p2p_messages_received": DefaultMetrics.P2PMessagesReceived.Load(),
}
}

635
mining/miner.go Normal file
View file

@ -0,0 +1,635 @@
package mining
import (
"archive/tar"
"archive/zip"
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"forge.lthn.ai/core/mining/logging"
"github.com/adrg/xdg"
)
// LogBuffer is a thread-safe ring buffer for capturing miner output.
type LogBuffer struct {
lines []string
maxLines int
mu sync.RWMutex
}
// NewLogBuffer creates a new log buffer with the specified max lines.
func NewLogBuffer(maxLines int) *LogBuffer {
return &LogBuffer{
lines: make([]string, 0, maxLines),
maxLines: maxLines,
}
}
// maxLineLength is the maximum length of a single log line to prevent memory bloat.
const maxLineLength = 2000
// Write implements io.Writer for capturing output.
func (lb *LogBuffer) Write(p []byte) (n int, err error) {
lb.mu.Lock()
defer lb.mu.Unlock()
// Split input into lines
text := string(p)
newLines := strings.Split(text, "\n")
for _, line := range newLines {
if line == "" {
continue
}
// Truncate excessively long lines to prevent memory bloat
if len(line) > maxLineLength {
line = line[:maxLineLength] + "... [truncated]"
}
// Add timestamp prefix
timestampedLine := fmt.Sprintf("[%s] %s", time.Now().Format("15:04:05"), line)
lb.lines = append(lb.lines, timestampedLine)
// Trim if over max - force reallocation to release memory
if len(lb.lines) > lb.maxLines {
newSlice := make([]string, lb.maxLines)
copy(newSlice, lb.lines[len(lb.lines)-lb.maxLines:])
lb.lines = newSlice
}
}
return len(p), nil
}
// GetLines returns all captured log lines.
func (lb *LogBuffer) GetLines() []string {
lb.mu.RLock()
defer lb.mu.RUnlock()
result := make([]string, len(lb.lines))
copy(result, lb.lines)
return result
}
// Clear clears the log buffer.
func (lb *LogBuffer) Clear() {
lb.mu.Lock()
defer lb.mu.Unlock()
lb.lines = lb.lines[:0]
}
// BaseMiner provides a foundation for specific miner implementations.
type BaseMiner struct {
Name string `json:"name"`
MinerType string `json:"miner_type"` // Type identifier (e.g., "xmrig", "tt-miner")
Version string `json:"version"`
URL string `json:"url"`
Path string `json:"path"`
MinerBinary string `json:"miner_binary"`
ExecutableName string `json:"-"`
Running bool `json:"running"`
ConfigPath string `json:"configPath"`
API *API `json:"api"`
mu sync.RWMutex
cmd *exec.Cmd
stdinPipe io.WriteCloser `json:"-"`
HashrateHistory []HashratePoint `json:"hashrateHistory"`
LowResHashrateHistory []HashratePoint `json:"lowResHashrateHistory"`
LastLowResAggregation time.Time `json:"-"`
LogBuffer *LogBuffer `json:"-"`
}
// GetType returns the miner type identifier.
func (b *BaseMiner) GetType() string {
return b.MinerType
}
// GetName returns the name of the miner.
func (b *BaseMiner) GetName() string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.Name
}
// GetPath returns the base installation directory for the miner type.
// It uses the stable ExecutableName field to ensure the correct path.
func (b *BaseMiner) GetPath() string {
dataPath, err := xdg.DataFile(fmt.Sprintf("lethean-desktop/miners/%s", b.ExecutableName))
if err != nil {
home, err := os.UserHomeDir()
if err != nil {
return ""
}
return filepath.Join(home, ".lethean-desktop", "miners", b.ExecutableName)
}
return dataPath
}
// GetBinaryPath returns the full path to the miner's executable file.
func (b *BaseMiner) GetBinaryPath() string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.MinerBinary
}
// Stop terminates the miner process gracefully.
// It first tries SIGTERM to allow cleanup, then SIGKILL if needed.
func (b *BaseMiner) Stop() error {
b.mu.Lock()
if !b.Running || b.cmd == nil {
b.mu.Unlock()
return errors.New("miner is not running")
}
// Close stdin pipe if open
if b.stdinPipe != nil {
b.stdinPipe.Close()
b.stdinPipe = nil
}
// Capture cmd locally to avoid race with Wait() goroutine
cmd := b.cmd
process := cmd.Process
// Mark as not running immediately to prevent concurrent Stop() calls
b.Running = false
b.cmd = nil
b.mu.Unlock()
// Try graceful shutdown with SIGTERM first (Unix only)
if runtime.GOOS != "windows" {
if err := process.Signal(syscall.SIGTERM); err == nil {
// Wait up to 3 seconds for graceful shutdown
done := make(chan struct{})
go func() {
process.Wait()
close(done)
}()
select {
case <-done:
return nil
case <-time.After(3 * time.Second):
// Process didn't exit gracefully, force kill below
}
}
}
// Force kill and wait for process to exit
if err := process.Kill(); err != nil {
return err
}
// Wait for process to fully terminate to avoid zombies
process.Wait()
return nil
}
// stdinWriteTimeout is the maximum time to wait for stdin write to complete.
const stdinWriteTimeout = 5 * time.Second
// WriteStdin sends input to the miner's stdin (for console commands).
func (b *BaseMiner) WriteStdin(input string) error {
b.mu.RLock()
stdinPipe := b.stdinPipe
running := b.Running
b.mu.RUnlock()
if !running || stdinPipe == nil {
return errors.New("miner is not running or stdin not available")
}
// Append newline if not present
if !strings.HasSuffix(input, "\n") {
input += "\n"
}
// Write with timeout to prevent blocking indefinitely.
// Use buffered channel size 1 so goroutine can exit even if we don't read the result.
done := make(chan error, 1)
go func() {
_, err := stdinPipe.Write([]byte(input))
// Non-blocking send - if timeout already fired, this won't block
select {
case done <- err:
default:
// Timeout already occurred, goroutine exits cleanly
}
}()
select {
case err := <-done:
return err
case <-time.After(stdinWriteTimeout):
return errors.New("stdin write timeout: miner may be unresponsive")
}
}
// Uninstall removes all files related to the miner.
func (b *BaseMiner) Uninstall() error {
return os.RemoveAll(b.GetPath())
}
// InstallFromURL handles the generic download and extraction process for a miner.
func (b *BaseMiner) InstallFromURL(url string) error {
tmpfile, err := os.CreateTemp("", b.ExecutableName+"-")
if err != nil {
return err
}
defer os.Remove(tmpfile.Name())
defer tmpfile.Close()
resp, err := getHTTPClient().Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
_, _ = io.Copy(io.Discard, resp.Body) // Drain body to allow connection reuse (error ignored intentionally)
return fmt.Errorf("failed to download release: unexpected status code %d", resp.StatusCode)
}
if _, err := io.Copy(tmpfile, resp.Body); err != nil {
// Drain remaining body to allow connection reuse (error ignored intentionally)
_, _ = io.Copy(io.Discard, resp.Body)
return err
}
baseInstallPath := b.GetPath()
if err := os.MkdirAll(baseInstallPath, 0755); err != nil {
return err
}
if strings.HasSuffix(url, ".zip") {
err = b.unzip(tmpfile.Name(), baseInstallPath)
} else {
err = b.untar(tmpfile.Name(), baseInstallPath)
}
if err != nil {
return fmt.Errorf("failed to extract miner: %w", err)
}
return nil
}
// parseVersion parses a version string (e.g., "6.24.0") into a slice of integers for comparison.
func parseVersion(v string) []int {
parts := strings.Split(v, ".")
intParts := make([]int, len(parts))
for i, p := range parts {
val, err := strconv.Atoi(p)
if err != nil {
return []int{0} // Malformed version, treat as very old
}
intParts[i] = val
}
return intParts
}
// compareVersions compares two version slices. Returns 1 if v1 > v2, -1 if v1 < v2, 0 if equal.
func compareVersions(v1, v2 []int) int {
minLen := len(v1)
if len(v2) < minLen {
minLen = len(v2)
}
for i := 0; i < minLen; i++ {
if v1[i] > v2[i] {
return 1
}
if v1[i] < v2[i] {
return -1
}
}
if len(v1) > len(v2) {
return 1
}
if len(v1) < len(v2) {
return -1
}
return 0
}
// findMinerBinary searches for the miner's executable file.
// It returns the absolute path to the executable if found, prioritizing the highest versioned installation.
func (b *BaseMiner) findMinerBinary() (string, error) {
executableName := b.ExecutableName
if runtime.GOOS == "windows" {
executableName += ".exe"
}
baseInstallPath := b.GetPath()
searchedPaths := []string{}
var highestVersion []int
var highestVersionDir string
// 1. Check the standard installation directory first
if _, err := os.Stat(baseInstallPath); err == nil {
dirs, err := os.ReadDir(baseInstallPath)
if err == nil {
for _, d := range dirs {
if d.IsDir() && strings.HasPrefix(d.Name(), b.ExecutableName+"-") {
// Extract version string, e.g., "xmrig-6.24.0" -> "6.24.0"
versionStr := strings.TrimPrefix(d.Name(), b.ExecutableName+"-")
currentVersion := parseVersion(versionStr)
if highestVersionDir == "" || compareVersions(currentVersion, highestVersion) > 0 {
highestVersion = currentVersion
highestVersionDir = d.Name()
}
versionedPath := filepath.Join(baseInstallPath, d.Name())
fullPath := filepath.Join(versionedPath, executableName)
searchedPaths = append(searchedPaths, fullPath)
}
}
}
if highestVersionDir != "" {
fullPath := filepath.Join(baseInstallPath, highestVersionDir, executableName)
if _, err := os.Stat(fullPath); err == nil {
logging.Debug("found miner binary at highest versioned path", logging.Fields{"path": fullPath})
return fullPath, nil
}
}
}
// 2. Fallback to searching the system PATH
path, err := exec.LookPath(executableName)
if err == nil {
absPath, err := filepath.Abs(path)
if err != nil {
return "", fmt.Errorf("failed to get absolute path for '%s': %w", path, err)
}
logging.Debug("found miner binary in system PATH", logging.Fields{"path": absPath})
return absPath, nil
}
// If not found, return a detailed error
return "", fmt.Errorf("miner executable '%s' not found. Searched in: %s and system PATH", executableName, strings.Join(searchedPaths, ", "))
}
// CheckInstallation verifies if the miner is installed correctly.
func (b *BaseMiner) CheckInstallation() (*InstallationDetails, error) {
binaryPath, err := b.findMinerBinary()
if err != nil {
return &InstallationDetails{IsInstalled: false}, err
}
b.MinerBinary = binaryPath
b.Path = filepath.Dir(binaryPath)
cmd := exec.Command(binaryPath, "--version")
var out bytes.Buffer
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
b.Version = "Unknown (could not run executable)"
} else {
fields := strings.Fields(out.String())
if len(fields) >= 2 {
b.Version = fields[1]
} else {
b.Version = "Unknown (could not parse version)"
}
}
return &InstallationDetails{
IsInstalled: true,
MinerBinary: b.MinerBinary,
Path: b.Path,
Version: b.Version,
}, nil
}
// GetHashrateHistory returns the combined hashrate history.
func (b *BaseMiner) GetHashrateHistory() []HashratePoint {
b.mu.RLock()
defer b.mu.RUnlock()
combinedHistory := make([]HashratePoint, 0, len(b.LowResHashrateHistory)+len(b.HashrateHistory))
combinedHistory = append(combinedHistory, b.LowResHashrateHistory...)
combinedHistory = append(combinedHistory, b.HashrateHistory...)
return combinedHistory
}
// AddHashratePoint adds a new hashrate measurement.
func (b *BaseMiner) AddHashratePoint(point HashratePoint) {
b.mu.Lock()
defer b.mu.Unlock()
b.HashrateHistory = append(b.HashrateHistory, point)
}
// GetHighResHistoryLength returns the number of high-resolution hashrate points.
func (b *BaseMiner) GetHighResHistoryLength() int {
b.mu.RLock()
defer b.mu.RUnlock()
return len(b.HashrateHistory)
}
// GetLowResHistoryLength returns the number of low-resolution hashrate points.
func (b *BaseMiner) GetLowResHistoryLength() int {
b.mu.RLock()
defer b.mu.RUnlock()
return len(b.LowResHashrateHistory)
}
// GetLogs returns the captured log output from the miner process.
func (b *BaseMiner) GetLogs() []string {
b.mu.RLock()
logBuffer := b.LogBuffer
b.mu.RUnlock()
if logBuffer == nil {
return []string{}
}
return logBuffer.GetLines()
}
// ReduceHashrateHistory aggregates and trims hashrate data.
func (b *BaseMiner) ReduceHashrateHistory(now time.Time) {
b.mu.Lock()
defer b.mu.Unlock()
if !b.LastLowResAggregation.IsZero() && now.Sub(b.LastLowResAggregation) < LowResolutionInterval {
return
}
var pointsToAggregate []HashratePoint
var newHighResHistory []HashratePoint
cutoff := now.Add(-HighResolutionDuration)
for _, p := range b.HashrateHistory {
if p.Timestamp.Before(cutoff) {
pointsToAggregate = append(pointsToAggregate, p)
} else {
newHighResHistory = append(newHighResHistory, p)
}
}
// Force reallocation if significantly oversized to free memory
if cap(b.HashrateHistory) > 1000 && len(newHighResHistory) < cap(b.HashrateHistory)/2 {
trimmed := make([]HashratePoint, len(newHighResHistory))
copy(trimmed, newHighResHistory)
b.HashrateHistory = trimmed
} else {
b.HashrateHistory = newHighResHistory
}
if len(pointsToAggregate) == 0 {
b.LastLowResAggregation = now
return
}
minuteGroups := make(map[time.Time][]int)
for _, p := range pointsToAggregate {
minute := p.Timestamp.Truncate(LowResolutionInterval)
minuteGroups[minute] = append(minuteGroups[minute], p.Hashrate)
}
var newLowResPoints []HashratePoint
for minute, hashrates := range minuteGroups {
if len(hashrates) > 0 {
totalHashrate := 0
for _, hr := range hashrates {
totalHashrate += hr
}
avgHashrate := totalHashrate / len(hashrates)
newLowResPoints = append(newLowResPoints, HashratePoint{Timestamp: minute, Hashrate: avgHashrate})
}
}
sort.Slice(newLowResPoints, func(i, j int) bool {
return newLowResPoints[i].Timestamp.Before(newLowResPoints[j].Timestamp)
})
b.LowResHashrateHistory = append(b.LowResHashrateHistory, newLowResPoints...)
lowResCutoff := now.Add(-LowResHistoryRetention)
firstValidLowResIndex := 0
for i, p := range b.LowResHashrateHistory {
if p.Timestamp.After(lowResCutoff) || p.Timestamp.Equal(lowResCutoff) {
firstValidLowResIndex = i
break
}
if i == len(b.LowResHashrateHistory)-1 {
firstValidLowResIndex = len(b.LowResHashrateHistory)
}
}
// Force reallocation if significantly oversized to free memory
newLowResLen := len(b.LowResHashrateHistory) - firstValidLowResIndex
if cap(b.LowResHashrateHistory) > 1000 && newLowResLen < cap(b.LowResHashrateHistory)/2 {
trimmed := make([]HashratePoint, newLowResLen)
copy(trimmed, b.LowResHashrateHistory[firstValidLowResIndex:])
b.LowResHashrateHistory = trimmed
} else {
b.LowResHashrateHistory = b.LowResHashrateHistory[firstValidLowResIndex:]
}
b.LastLowResAggregation = now
}
// unzip extracts a zip archive.
func (b *BaseMiner) unzip(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer r.Close()
for _, f := range r.File {
fpath := filepath.Join(dest, f.Name)
if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) {
return fmt.Errorf("%s: illegal file path", fpath)
}
if f.FileInfo().IsDir() {
if err := os.MkdirAll(fpath, os.ModePerm); err != nil {
return fmt.Errorf("failed to create directory %s: %w", fpath, err)
}
continue
}
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
return err
}
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
rc, err := f.Open()
if err != nil {
outFile.Close()
return err
}
_, err = io.Copy(outFile, rc)
outFile.Close()
rc.Close()
if err != nil {
return err
}
}
return nil
}
// untar extracts a tar.gz archive.
func (b *BaseMiner) untar(src, dest string) error {
file, err := os.Open(src)
if err != nil {
return err
}
defer file.Close()
gzr, err := gzip.NewReader(file)
if err != nil {
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
target := filepath.Join(dest, header.Name)
if !strings.HasPrefix(target, filepath.Clean(dest)+string(os.PathSeparator)) {
return fmt.Errorf("%s: illegal file path in archive", header.Name)
}
switch header.Typeflag {
case tar.TypeDir:
if err := os.MkdirAll(target, 0755); err != nil {
return err
}
case tar.TypeReg:
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
return err
}
f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode))
if err != nil {
return err
}
if _, err := io.Copy(f, tr); err != nil {
f.Close()
return err
}
f.Close()
}
}
}

139
mining/miner_factory.go Normal file
View file

@ -0,0 +1,139 @@
package mining
import (
"fmt"
"strings"
"sync"
)
// MinerConstructor is a function that creates a new miner instance
type MinerConstructor func() Miner
// MinerFactory handles miner instantiation and registration
type MinerFactory struct {
mu sync.RWMutex
constructors map[string]MinerConstructor
aliases map[string]string // maps aliases to canonical names
}
// globalFactory is the default factory instance
var globalFactory = NewMinerFactory()
// NewMinerFactory creates a new MinerFactory with default miners registered
func NewMinerFactory() *MinerFactory {
f := &MinerFactory{
constructors: make(map[string]MinerConstructor),
aliases: make(map[string]string),
}
f.registerDefaults()
return f
}
// registerDefaults registers all built-in miners
func (f *MinerFactory) registerDefaults() {
// XMRig miner (CPU/GPU RandomX, Cryptonight, etc.)
f.Register("xmrig", func() Miner { return NewXMRigMiner() })
// TT-Miner (GPU Kawpow, etc.)
f.Register("tt-miner", func() Miner { return NewTTMiner() })
f.RegisterAlias("ttminer", "tt-miner")
// Simulated miner for testing and development
f.Register(MinerTypeSimulated, func() Miner {
return NewSimulatedMiner(SimulatedMinerConfig{
Name: "simulated-miner",
Algorithm: "rx/0",
BaseHashrate: 1000,
Variance: 0.1,
})
})
}
// Register adds a miner constructor to the factory
func (f *MinerFactory) Register(name string, constructor MinerConstructor) {
f.mu.Lock()
defer f.mu.Unlock()
f.constructors[strings.ToLower(name)] = constructor
}
// RegisterAlias adds an alias for an existing miner type
func (f *MinerFactory) RegisterAlias(alias, canonicalName string) {
f.mu.Lock()
defer f.mu.Unlock()
f.aliases[strings.ToLower(alias)] = strings.ToLower(canonicalName)
}
// Create instantiates a miner of the specified type
func (f *MinerFactory) Create(minerType string) (Miner, error) {
f.mu.RLock()
defer f.mu.RUnlock()
name := strings.ToLower(minerType)
// Check for alias first
if canonical, ok := f.aliases[name]; ok {
name = canonical
}
constructor, ok := f.constructors[name]
if !ok {
return nil, fmt.Errorf("unsupported miner type: %s", minerType)
}
return constructor(), nil
}
// IsSupported checks if a miner type is registered
func (f *MinerFactory) IsSupported(minerType string) bool {
f.mu.RLock()
defer f.mu.RUnlock()
name := strings.ToLower(minerType)
// Check alias
if canonical, ok := f.aliases[name]; ok {
name = canonical
}
_, ok := f.constructors[name]
return ok
}
// ListTypes returns all registered miner type names (excluding aliases)
func (f *MinerFactory) ListTypes() []string {
f.mu.RLock()
defer f.mu.RUnlock()
types := make([]string, 0, len(f.constructors))
for name := range f.constructors {
types = append(types, name)
}
return types
}
// --- Global factory functions for convenience ---
// CreateMiner creates a miner using the global factory
func CreateMiner(minerType string) (Miner, error) {
return globalFactory.Create(minerType)
}
// IsMinerSupported checks if a miner type is supported using the global factory
func IsMinerSupported(minerType string) bool {
return globalFactory.IsSupported(minerType)
}
// ListMinerTypes returns all registered miner types from the global factory
func ListMinerTypes() []string {
return globalFactory.ListTypes()
}
// RegisterMinerType adds a miner constructor to the global factory
func RegisterMinerType(name string, constructor MinerConstructor) {
globalFactory.Register(name, constructor)
}
// RegisterMinerAlias adds an alias to the global factory
func RegisterMinerAlias(alias, canonicalName string) {
globalFactory.RegisterAlias(alias, canonicalName)
}

View file

@ -0,0 +1,155 @@
package mining
import (
"testing"
)
func TestMinerFactory_Create(t *testing.T) {
factory := NewMinerFactory()
tests := []struct {
name string
minerType string
wantErr bool
}{
{"xmrig lowercase", "xmrig", false},
{"xmrig uppercase", "XMRIG", false},
{"xmrig mixed case", "XmRig", false},
{"tt-miner", "tt-miner", false},
{"ttminer alias", "ttminer", false},
{"unknown type", "unknown", true},
{"empty type", "", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
miner, err := factory.Create(tt.minerType)
if tt.wantErr {
if err == nil {
t.Errorf("Create(%q) expected error, got nil", tt.minerType)
}
} else {
if err != nil {
t.Errorf("Create(%q) unexpected error: %v", tt.minerType, err)
}
if miner == nil {
t.Errorf("Create(%q) returned nil miner", tt.minerType)
}
}
})
}
}
func TestMinerFactory_IsSupported(t *testing.T) {
factory := NewMinerFactory()
tests := []struct {
minerType string
want bool
}{
{"xmrig", true},
{"tt-miner", true},
{"ttminer", true}, // alias
{"unknown", false},
{"", false},
}
for _, tt := range tests {
t.Run(tt.minerType, func(t *testing.T) {
if got := factory.IsSupported(tt.minerType); got != tt.want {
t.Errorf("IsSupported(%q) = %v, want %v", tt.minerType, got, tt.want)
}
})
}
}
func TestMinerFactory_ListTypes(t *testing.T) {
factory := NewMinerFactory()
types := factory.ListTypes()
if len(types) < 2 {
t.Errorf("ListTypes() returned %d types, expected at least 2", len(types))
}
// Check that expected types are present
typeMap := make(map[string]bool)
for _, typ := range types {
typeMap[typ] = true
}
expectedTypes := []string{"xmrig", "tt-miner"}
for _, expected := range expectedTypes {
if !typeMap[expected] {
t.Errorf("ListTypes() missing expected type %q", expected)
}
}
}
func TestMinerFactory_Register(t *testing.T) {
factory := NewMinerFactory()
// Register a custom miner type
called := false
factory.Register("custom-miner", func() Miner {
called = true
return NewXMRigMiner() // Return something valid for testing
})
if !factory.IsSupported("custom-miner") {
t.Error("custom-miner should be supported after registration")
}
_, err := factory.Create("custom-miner")
if err != nil {
t.Errorf("Create custom-miner failed: %v", err)
}
if !called {
t.Error("custom constructor was not called")
}
}
func TestMinerFactory_RegisterAlias(t *testing.T) {
factory := NewMinerFactory()
// Register an alias for xmrig
factory.RegisterAlias("x", "xmrig")
if !factory.IsSupported("x") {
t.Error("alias 'x' should be supported")
}
miner, err := factory.Create("x")
if err != nil {
t.Errorf("Create with alias failed: %v", err)
}
if miner == nil {
t.Error("Create with alias returned nil miner")
}
}
func TestGlobalFactory_CreateMiner(t *testing.T) {
// Test global convenience functions
miner, err := CreateMiner("xmrig")
if err != nil {
t.Errorf("CreateMiner failed: %v", err)
}
if miner == nil {
t.Error("CreateMiner returned nil")
}
}
func TestGlobalFactory_IsMinerSupported(t *testing.T) {
if !IsMinerSupported("xmrig") {
t.Error("xmrig should be supported")
}
if IsMinerSupported("nosuchminer") {
t.Error("nosuchminer should not be supported")
}
}
func TestGlobalFactory_ListMinerTypes(t *testing.T) {
types := ListMinerTypes()
if len(types) < 2 {
t.Errorf("ListMinerTypes() returned %d types, expected at least 2", len(types))
}
}

355
mining/mining.go Normal file
View file

@ -0,0 +1,355 @@
package mining
import (
"context"
"fmt"
"strings"
"time"
)
const (
HighResolutionDuration = 5 * time.Minute
HighResolutionInterval = 10 * time.Second
LowResolutionInterval = 1 * time.Minute
LowResHistoryRetention = 24 * time.Hour
)
// Miner defines the standard interface for a cryptocurrency miner.
// The interface is logically grouped into focused capabilities:
//
// Lifecycle - Installation and process management:
// - Install, Uninstall, Start, Stop
//
// Stats - Performance metrics collection:
// - GetStats
//
// Info - Miner identification and installation details:
// - GetType, GetName, GetPath, GetBinaryPath, CheckInstallation, GetLatestVersion
//
// History - Hashrate history management:
// - GetHashrateHistory, AddHashratePoint, ReduceHashrateHistory
//
// IO - Interactive input/output:
// - GetLogs, WriteStdin
type Miner interface {
// Lifecycle operations
Install() error
Uninstall() error
Start(config *Config) error
Stop() error
// Stats operations
GetStats(ctx context.Context) (*PerformanceMetrics, error)
// Info operations
GetType() string // Returns miner type identifier (e.g., "xmrig", "tt-miner")
GetName() string
GetPath() string
GetBinaryPath() string
CheckInstallation() (*InstallationDetails, error)
GetLatestVersion() (string, error)
// History operations
GetHashrateHistory() []HashratePoint
AddHashratePoint(point HashratePoint)
ReduceHashrateHistory(now time.Time)
// IO operations
GetLogs() []string
WriteStdin(input string) error
}
// InstallationDetails contains information about an installed miner.
type InstallationDetails struct {
IsInstalled bool `json:"is_installed"`
Version string `json:"version"`
Path string `json:"path"`
MinerBinary string `json:"miner_binary"`
ConfigPath string `json:"config_path,omitempty"` // Add path to the miner-specific config
}
// SystemInfo provides general system and miner installation information.
type SystemInfo struct {
Timestamp time.Time `json:"timestamp"`
OS string `json:"os"`
Architecture string `json:"architecture"`
GoVersion string `json:"go_version"`
AvailableCPUCores int `json:"available_cpu_cores"`
TotalSystemRAMGB float64 `json:"total_system_ram_gb"`
InstalledMinersInfo []*InstallationDetails `json:"installed_miners_info"`
}
// Config represents the configuration for a miner.
type Config struct {
Miner string `json:"miner"`
Pool string `json:"pool"`
Wallet string `json:"wallet"`
Threads int `json:"threads"`
TLS bool `json:"tls"`
HugePages bool `json:"hugePages"`
Algo string `json:"algo,omitempty"`
Coin string `json:"coin,omitempty"`
Password string `json:"password,omitempty"`
UserPass string `json:"userPass,omitempty"`
Proxy string `json:"proxy,omitempty"`
Keepalive bool `json:"keepalive,omitempty"`
Nicehash bool `json:"nicehash,omitempty"`
RigID string `json:"rigId,omitempty"`
TLSSingerprint string `json:"tlsFingerprint,omitempty"`
Retries int `json:"retries,omitempty"`
RetryPause int `json:"retryPause,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
DonateLevel int `json:"donateLevel,omitempty"`
DonateOverProxy bool `json:"donateOverProxy,omitempty"`
NoCPU bool `json:"noCpu,omitempty"`
CPUAffinity string `json:"cpuAffinity,omitempty"`
AV int `json:"av,omitempty"`
CPUPriority int `json:"cpuPriority,omitempty"`
CPUMaxThreadsHint int `json:"cpuMaxThreadsHint,omitempty"`
CPUMemoryPool int `json:"cpuMemoryPool,omitempty"`
CPUNoYield bool `json:"cpuNoYield,omitempty"`
HugepageSize int `json:"hugepageSize,omitempty"`
HugePagesJIT bool `json:"hugePagesJIT,omitempty"`
ASM string `json:"asm,omitempty"`
Argon2Impl string `json:"argon2Impl,omitempty"`
RandomXInit int `json:"randomXInit,omitempty"`
RandomXNoNUMA bool `json:"randomXNoNuma,omitempty"`
RandomXMode string `json:"randomXMode,omitempty"`
RandomX1GBPages bool `json:"randomX1GBPages,omitempty"`
RandomXWrmsr string `json:"randomXWrmsr,omitempty"`
RandomXNoRdmsr bool `json:"randomXNoRdmsr,omitempty"`
RandomXCacheQoS bool `json:"randomXCacheQoS,omitempty"`
APIWorkerID string `json:"apiWorkerId,omitempty"`
APIID string `json:"apiId,omitempty"`
HTTPHost string `json:"httpHost,omitempty"`
HTTPPort int `json:"httpPort,omitempty"`
HTTPAccessToken string `json:"httpAccessToken,omitempty"`
HTTPNoRestricted bool `json:"httpNoRestricted,omitempty"`
Syslog bool `json:"syslog,omitempty"`
LogFile string `json:"logFile,omitempty"`
PrintTime int `json:"printTime,omitempty"`
HealthPrintTime int `json:"healthPrintTime,omitempty"`
NoColor bool `json:"noColor,omitempty"`
Verbose bool `json:"verbose,omitempty"`
LogOutput bool `json:"logOutput,omitempty"`
Background bool `json:"background,omitempty"`
Title string `json:"title,omitempty"`
NoTitle bool `json:"noTitle,omitempty"`
PauseOnBattery bool `json:"pauseOnBattery,omitempty"`
PauseOnActive int `json:"pauseOnActive,omitempty"`
Stress bool `json:"stress,omitempty"`
Bench string `json:"bench,omitempty"`
Submit bool `json:"submit,omitempty"`
Verify string `json:"verify,omitempty"`
Seed string `json:"seed,omitempty"`
Hash string `json:"hash,omitempty"`
NoDMI bool `json:"noDMI,omitempty"`
// GPU-specific options (for XMRig dual CPU+GPU mining)
GPUEnabled bool `json:"gpuEnabled,omitempty"` // Enable GPU mining
GPUPool string `json:"gpuPool,omitempty"` // Separate pool for GPU (can differ from CPU)
GPUWallet string `json:"gpuWallet,omitempty"` // Wallet for GPU pool (defaults to main Wallet)
GPUAlgo string `json:"gpuAlgo,omitempty"` // Algorithm for GPU (e.g., "kawpow", "ethash")
GPUPassword string `json:"gpuPassword,omitempty"` // Password for GPU pool
GPUIntensity int `json:"gpuIntensity,omitempty"` // GPU mining intensity (0-100)
GPUThreads int `json:"gpuThreads,omitempty"` // GPU threads per card
Devices string `json:"devices,omitempty"` // GPU device selection (e.g., "0,1,2")
OpenCL bool `json:"opencl,omitempty"` // Enable OpenCL (AMD/Intel GPUs)
CUDA bool `json:"cuda,omitempty"` // Enable CUDA (NVIDIA GPUs)
Intensity int `json:"intensity,omitempty"` // Mining intensity for GPU miners
CLIArgs string `json:"cliArgs,omitempty"` // Additional CLI arguments
}
// Validate checks the Config for common errors and security issues.
// Returns nil if valid, otherwise returns a descriptive error.
func (c *Config) Validate() error {
// Pool URL validation
if c.Pool != "" {
// Block shell metacharacters in pool URL
if containsShellChars(c.Pool) {
return fmt.Errorf("pool URL contains invalid characters")
}
}
// Wallet validation (basic alphanumeric + special chars allowed in addresses)
if c.Wallet != "" {
if containsShellChars(c.Wallet) {
return fmt.Errorf("wallet address contains invalid characters")
}
// Most wallet addresses are 40-128 chars
if len(c.Wallet) > 256 {
return fmt.Errorf("wallet address too long (max 256 chars)")
}
}
// Thread count validation
if c.Threads < 0 {
return fmt.Errorf("threads cannot be negative")
}
if c.Threads > 1024 {
return fmt.Errorf("threads value too high (max 1024)")
}
// Algorithm validation (alphanumeric, dash, slash)
if c.Algo != "" {
if !isValidAlgo(c.Algo) {
return fmt.Errorf("algorithm name contains invalid characters")
}
}
// Intensity validation
if c.Intensity < 0 || c.Intensity > 100 {
return fmt.Errorf("intensity must be between 0 and 100")
}
if c.GPUIntensity < 0 || c.GPUIntensity > 100 {
return fmt.Errorf("GPU intensity must be between 0 and 100")
}
// Donate level validation
if c.DonateLevel < 0 || c.DonateLevel > 100 {
return fmt.Errorf("donate level must be between 0 and 100")
}
// CLIArgs validation - check for shell metacharacters
if c.CLIArgs != "" {
if containsShellChars(c.CLIArgs) {
return fmt.Errorf("CLI arguments contain invalid characters")
}
// Limit length to prevent abuse
if len(c.CLIArgs) > 1024 {
return fmt.Errorf("CLI arguments too long (max 1024 chars)")
}
}
return nil
}
// containsShellChars checks for shell metacharacters that could enable injection
func containsShellChars(s string) bool {
dangerous := []string{";", "|", "&", "`", "$", "(", ")", "{", "}", "<", ">", "\n", "\r", "\\", "'", "\"", "!"}
for _, d := range dangerous {
if strings.Contains(s, d) {
return true
}
}
return false
}
// isValidAlgo checks if an algorithm name contains only valid characters
func isValidAlgo(algo string) bool {
for _, r := range algo {
if !((r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '-' || r == '/' || r == '_') {
return false
}
}
return true
}
// PerformanceMetrics represents the performance metrics for a miner.
type PerformanceMetrics struct {
Hashrate int `json:"hashrate"`
Shares int `json:"shares"`
Rejected int `json:"rejected"`
Uptime int `json:"uptime"`
LastShare int64 `json:"lastShare"`
Algorithm string `json:"algorithm"`
AvgDifficulty int `json:"avgDifficulty"` // Average difficulty per accepted share (HashesTotal/SharesGood)
DiffCurrent int `json:"diffCurrent"` // Current job difficulty from pool
ExtraData map[string]interface{} `json:"extraData,omitempty"`
}
// HashratePoint represents a single hashrate measurement at a specific time.
type HashratePoint struct {
Timestamp time.Time `json:"timestamp"`
Hashrate int `json:"hashrate"`
}
// API represents the miner's API configuration.
type API struct {
Enabled bool `json:"enabled"`
ListenHost string `json:"listenHost"`
ListenPort int `json:"listenPort"`
}
// XMRigSummary represents the full JSON response from the XMRig API.
type XMRigSummary struct {
ID string `json:"id"`
WorkerID string `json:"worker_id"`
Uptime int `json:"uptime"`
Restricted bool `json:"restricted"`
Resources struct {
Memory struct {
Free int64 `json:"free"`
Total int64 `json:"total"`
ResidentSetMemory int64 `json:"resident_set_memory"`
} `json:"memory"`
LoadAverage []float64 `json:"load_average"`
HardwareConcurrency int `json:"hardware_concurrency"`
} `json:"resources"`
Features []string `json:"features"`
Results struct {
DiffCurrent int `json:"diff_current"`
SharesGood int `json:"shares_good"`
SharesTotal int `json:"shares_total"`
AvgTime int `json:"avg_time"`
AvgTimeMS int `json:"avg_time_ms"`
HashesTotal int `json:"hashes_total"`
Best []int `json:"best"`
} `json:"results"`
Algo string `json:"algo"`
Connection struct {
Pool string `json:"pool"`
IP string `json:"ip"`
Uptime int `json:"uptime"`
UptimeMS int `json:"uptime_ms"`
Ping int `json:"ping"`
Failures int `json:"failures"`
TLS string `json:"tls"`
TLSFingerprint string `json:"tls-fingerprint"`
Algo string `json:"algo"`
Diff int `json:"diff"`
Accepted int `json:"accepted"`
Rejected int `json:"rejected"`
AvgTime int `json:"avg_time"`
AvgTimeMS int `json:"avg_time_ms"`
HashesTotal int `json:"hashes_total"`
} `json:"connection"`
Version string `json:"version"`
Kind string `json:"kind"`
UA string `json:"ua"`
CPU struct {
Brand string `json:"brand"`
Family int `json:"family"`
Model int `json:"model"`
Stepping int `json:"stepping"`
ProcInfo int `json:"proc_info"`
AES bool `json:"aes"`
AVX2 bool `json:"avx2"`
X64 bool `json:"x64"`
Is64Bit bool `json:"64_bit"`
L2 int `json:"l2"`
L3 int `json:"l3"`
Cores int `json:"cores"`
Threads int `json:"threads"`
Packages int `json:"packages"`
Nodes int `json:"nodes"`
Backend string `json:"backend"`
MSR string `json:"msr"`
Assembly string `json:"assembly"`
Arch string `json:"arch"`
Flags []string `json:"flags"`
} `json:"cpu"`
DonateLevel int `json:"donate_level"`
Paused bool `json:"paused"`
Algorithms []string `json:"algorithms"`
Hashrate struct {
Total []float64 `json:"total"`
Highest float64 `json:"highest"`
} `json:"hashrate"`
Hugepages []int `json:"hugepages"`
}
// AvailableMiner represents a miner that is available for use.
type AvailableMiner struct {
Name string `json:"name"`
Description string `json:"description"`
}

36
mining/mining_profile.go Normal file
View file

@ -0,0 +1,36 @@
package mining
import (
"errors"
)
// RawConfig is a raw encoded JSON value.
// It implements Marshaler and Unmarshaler and can be used to delay JSON decoding or precompute a JSON encoding.
// We define it as []byte (like json.RawMessage) to avoid swagger parsing issues with the json package.
type RawConfig []byte
// MiningProfile represents a saved configuration for running a specific miner.
// It decouples the UI from the underlying miner's specific config structure.
type MiningProfile struct {
ID string `json:"id"`
Name string `json:"name"`
MinerType string `json:"minerType"` // e.g., "xmrig", "ttminer"
Config RawConfig `json:"config" swaggertype:"object"` // The raw JSON config for the specific miner
}
// MarshalJSON returns m as the JSON encoding of m.
func (m RawConfig) MarshalJSON() ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
return m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawConfig) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("RawConfig: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}

60
mining/mining_test.go Normal file
View file

@ -0,0 +1,60 @@
package mining
import (
"testing"
)
func TestNewManager(t *testing.T) {
manager := NewManager()
defer manager.Stop()
if manager == nil {
t.Fatal("NewManager returned nil")
}
if manager.miners == nil {
t.Error("Manager miners map is nil")
}
}
func TestStartAndStopMiner(t *testing.T) {
t.Skip("Skipping test that attempts to run miner process")
}
func TestGetNonExistentMiner(t *testing.T) {
manager := NewManager()
defer manager.Stop()
_, err := manager.GetMiner("non-existent")
if err == nil {
t.Error("Expected error for getting non-existent miner")
}
}
func TestListMiners(t *testing.T) {
manager := NewManager()
defer manager.Stop()
// ListMiners should return a valid slice (may include autostarted miners)
miners := manager.ListMiners()
if miners == nil {
t.Error("ListMiners returned nil")
}
// Note: count may be > 0 if autostart is configured
}
func TestListAvailableMiners(t *testing.T) {
manager := NewManager()
defer manager.Stop()
miners := manager.ListAvailableMiners()
if len(miners) == 0 {
t.Error("Expected at least one available miner")
}
}
func TestGetVersion(t *testing.T) {
version := GetVersion()
if version == "" {
t.Error("Version is empty")
}
}

29
mining/node_service.go Normal file
View file

@ -0,0 +1,29 @@
package mining
import (
"fmt"
"github.com/gin-gonic/gin"
)
// NodeService handles P2P node-related API endpoints.
// This is a stub — the full implementation lives in core/go-p2p.
// When P2P is needed, inject a concrete NodeService via the container.
type NodeService struct{}
// NewNodeService returns an error because P2P node support has moved to core/go-p2p.
// Callers (Container, Service) handle nil NodeService gracefully.
func NewNodeService() (*NodeService, error) {
return nil, fmt.Errorf("P2P node service not available (moved to core/go-p2p)")
}
// SetupRoutes is a no-op stub.
func (ns *NodeService) SetupRoutes(router *gin.RouterGroup) {}
// StartTransport is a no-op stub.
func (ns *NodeService) StartTransport() error {
return fmt.Errorf("P2P transport not available")
}
// StopTransport is a no-op stub.
func (ns *NodeService) StopTransport() error { return nil }

164
mining/profile_manager.go Normal file
View file

@ -0,0 +1,164 @@
package mining
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/adrg/xdg"
"github.com/google/uuid"
)
const profileConfigFileName = "mining_profiles.json"
// ProfileManager handles CRUD operations for MiningProfiles.
type ProfileManager struct {
mu sync.RWMutex
profiles map[string]*MiningProfile
configPath string
}
// NewProfileManager creates and initializes a new ProfileManager.
func NewProfileManager() (*ProfileManager, error) {
configPath, err := xdg.ConfigFile(filepath.Join("lethean-desktop", profileConfigFileName))
if err != nil {
return nil, fmt.Errorf("could not resolve config path: %w", err)
}
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
if err := pm.loadProfiles(); err != nil {
// If the file doesn't exist, that's fine, but any other error is a problem.
if !os.IsNotExist(err) {
return nil, fmt.Errorf("could not load profiles: %w", err)
}
}
return pm, nil
}
// loadProfiles reads the profiles from the JSON file into memory.
func (pm *ProfileManager) loadProfiles() error {
pm.mu.Lock()
defer pm.mu.Unlock()
data, err := os.ReadFile(pm.configPath)
if err != nil {
return err
}
var profiles []*MiningProfile
if err := json.Unmarshal(data, &profiles); err != nil {
return err
}
pm.profiles = make(map[string]*MiningProfile)
for _, p := range profiles {
pm.profiles[p.ID] = p
}
return nil
}
// saveProfiles writes the current profiles from memory to the JSON file.
// This is an internal method and assumes the caller holds the appropriate lock.
// Uses atomic write pattern: write to temp file, sync, then rename.
func (pm *ProfileManager) saveProfiles() error {
profileList := make([]*MiningProfile, 0, len(pm.profiles))
for _, p := range pm.profiles {
profileList = append(profileList, p)
}
data, err := json.MarshalIndent(profileList, "", " ")
if err != nil {
return err
}
return AtomicWriteFile(pm.configPath, data, 0600)
}
// CreateProfile adds a new profile and saves it.
func (pm *ProfileManager) CreateProfile(profile *MiningProfile) (*MiningProfile, error) {
pm.mu.Lock()
defer pm.mu.Unlock()
profile.ID = uuid.New().String()
pm.profiles[profile.ID] = profile
if err := pm.saveProfiles(); err != nil {
// Rollback
delete(pm.profiles, profile.ID)
return nil, err
}
return profile, nil
}
// GetProfile retrieves a profile by its ID.
func (pm *ProfileManager) GetProfile(id string) (*MiningProfile, bool) {
pm.mu.RLock()
defer pm.mu.RUnlock()
profile, exists := pm.profiles[id]
return profile, exists
}
// GetAllProfiles returns a list of all profiles.
func (pm *ProfileManager) GetAllProfiles() []*MiningProfile {
pm.mu.RLock()
defer pm.mu.RUnlock()
profileList := make([]*MiningProfile, 0, len(pm.profiles))
for _, p := range pm.profiles {
profileList = append(profileList, p)
}
return profileList
}
// UpdateProfile modifies an existing profile.
func (pm *ProfileManager) UpdateProfile(profile *MiningProfile) error {
pm.mu.Lock()
defer pm.mu.Unlock()
oldProfile, exists := pm.profiles[profile.ID]
if !exists {
return fmt.Errorf("profile with ID %s not found", profile.ID)
}
// Update in-memory state
pm.profiles[profile.ID] = profile
// Save to disk - rollback if save fails
if err := pm.saveProfiles(); err != nil {
// Restore old profile on save failure
pm.profiles[profile.ID] = oldProfile
return fmt.Errorf("failed to save profile: %w", err)
}
return nil
}
// DeleteProfile removes a profile by its ID.
func (pm *ProfileManager) DeleteProfile(id string) error {
pm.mu.Lock()
defer pm.mu.Unlock()
profile, exists := pm.profiles[id]
if !exists {
return fmt.Errorf("profile with ID %s not found", id)
}
delete(pm.profiles, id)
// Save to disk - rollback if save fails
if err := pm.saveProfiles(); err != nil {
// Restore profile on save failure
pm.profiles[id] = profile
return fmt.Errorf("failed to delete profile: %w", err)
}
return nil
}

View file

@ -0,0 +1,365 @@
package mining
import (
"encoding/json"
"os"
"path/filepath"
"sync"
"testing"
)
// setupTestProfileManager creates a ProfileManager with a temp config path.
func setupTestProfileManager(t *testing.T) (*ProfileManager, func()) {
tmpDir, err := os.MkdirTemp("", "profile-manager-test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
configPath := filepath.Join(tmpDir, "mining_profiles.json")
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
cleanup := func() {
os.RemoveAll(tmpDir)
}
return pm, cleanup
}
func TestProfileManagerCreate(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
profile := &MiningProfile{
Name: "Test Profile",
MinerType: "xmrig",
Config: RawConfig(`{"pool": "test.pool.com:3333"}`),
}
created, err := pm.CreateProfile(profile)
if err != nil {
t.Fatalf("failed to create profile: %v", err)
}
if created.ID == "" {
t.Error("created profile should have an ID")
}
if created.Name != "Test Profile" {
t.Errorf("expected name 'Test Profile', got '%s'", created.Name)
}
// Verify it's stored
retrieved, exists := pm.GetProfile(created.ID)
if !exists {
t.Error("profile should exist after creation")
}
if retrieved.Name != created.Name {
t.Errorf("retrieved name doesn't match: expected '%s', got '%s'", created.Name, retrieved.Name)
}
}
func TestProfileManagerGet(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
// Get non-existent profile
_, exists := pm.GetProfile("non-existent-id")
if exists {
t.Error("GetProfile should return false for non-existent ID")
}
// Create and get
profile := &MiningProfile{
Name: "Get Test",
MinerType: "xmrig",
}
created, _ := pm.CreateProfile(profile)
retrieved, exists := pm.GetProfile(created.ID)
if !exists {
t.Error("GetProfile should return true for existing ID")
}
if retrieved.ID != created.ID {
t.Error("GetProfile returned wrong profile")
}
}
func TestProfileManagerGetAll(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
// Empty list initially
profiles := pm.GetAllProfiles()
if len(profiles) != 0 {
t.Errorf("expected 0 profiles initially, got %d", len(profiles))
}
// Create multiple profiles
for i := 0; i < 3; i++ {
pm.CreateProfile(&MiningProfile{
Name: "Profile",
MinerType: "xmrig",
})
}
profiles = pm.GetAllProfiles()
if len(profiles) != 3 {
t.Errorf("expected 3 profiles, got %d", len(profiles))
}
}
func TestProfileManagerUpdate(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
// Update non-existent profile
err := pm.UpdateProfile(&MiningProfile{ID: "non-existent"})
if err == nil {
t.Error("UpdateProfile should fail for non-existent profile")
}
// Create profile
profile := &MiningProfile{
Name: "Original Name",
MinerType: "xmrig",
}
created, _ := pm.CreateProfile(profile)
// Update it
created.Name = "Updated Name"
created.MinerType = "ttminer"
err = pm.UpdateProfile(created)
if err != nil {
t.Fatalf("failed to update profile: %v", err)
}
// Verify update
retrieved, _ := pm.GetProfile(created.ID)
if retrieved.Name != "Updated Name" {
t.Errorf("expected name 'Updated Name', got '%s'", retrieved.Name)
}
if retrieved.MinerType != "ttminer" {
t.Errorf("expected miner type 'ttminer', got '%s'", retrieved.MinerType)
}
}
func TestProfileManagerDelete(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
// Delete non-existent profile
err := pm.DeleteProfile("non-existent")
if err == nil {
t.Error("DeleteProfile should fail for non-existent profile")
}
// Create and delete
profile := &MiningProfile{
Name: "Delete Me",
MinerType: "xmrig",
}
created, _ := pm.CreateProfile(profile)
err = pm.DeleteProfile(created.ID)
if err != nil {
t.Fatalf("failed to delete profile: %v", err)
}
// Verify deletion
_, exists := pm.GetProfile(created.ID)
if exists {
t.Error("profile should not exist after deletion")
}
}
func TestProfileManagerPersistence(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "profile-persist-test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
configPath := filepath.Join(tmpDir, "mining_profiles.json")
// Create first manager and add profile
pm1 := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
profile := &MiningProfile{
Name: "Persistent Profile",
MinerType: "xmrig",
Config: RawConfig(`{"pool": "persist.pool.com"}`),
}
created, err := pm1.CreateProfile(profile)
if err != nil {
t.Fatalf("failed to create profile: %v", err)
}
// Create second manager with same path - should load existing profile
pm2 := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
err = pm2.loadProfiles()
if err != nil {
t.Fatalf("failed to load profiles: %v", err)
}
// Verify profile persisted
loaded, exists := pm2.GetProfile(created.ID)
if !exists {
t.Fatal("profile should be loaded from file")
}
if loaded.Name != "Persistent Profile" {
t.Errorf("expected name 'Persistent Profile', got '%s'", loaded.Name)
}
}
func TestProfileManagerConcurrency(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
var wg sync.WaitGroup
numGoroutines := 10
// Concurrent creates
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(n int) {
defer wg.Done()
pm.CreateProfile(&MiningProfile{
Name: "Concurrent Profile",
MinerType: "xmrig",
})
}(i)
}
wg.Wait()
profiles := pm.GetAllProfiles()
if len(profiles) != numGoroutines {
t.Errorf("expected %d profiles, got %d", numGoroutines, len(profiles))
}
// Concurrent reads
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
pm.GetAllProfiles()
}()
}
wg.Wait()
}
func TestProfileManagerInvalidJSON(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "profile-invalid-test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
configPath := filepath.Join(tmpDir, "mining_profiles.json")
// Write invalid JSON
err = os.WriteFile(configPath, []byte("invalid json{{{"), 0644)
if err != nil {
t.Fatalf("failed to write invalid JSON: %v", err)
}
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
err = pm.loadProfiles()
if err == nil {
t.Error("loadProfiles should fail with invalid JSON")
}
}
func TestProfileManagerFileNotFound(t *testing.T) {
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: "/non/existent/path/profiles.json",
}
err := pm.loadProfiles()
if err == nil {
t.Error("loadProfiles should fail when file not found")
}
if !os.IsNotExist(err) {
t.Errorf("expected 'file not found' error, got: %v", err)
}
}
func TestProfileManagerCreateRollback(t *testing.T) {
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: "/invalid/path/that/cannot/be/written/profiles.json",
}
profile := &MiningProfile{
Name: "Rollback Test",
MinerType: "xmrig",
}
_, err := pm.CreateProfile(profile)
if err == nil {
t.Error("CreateProfile should fail when save fails")
}
// Verify rollback - profile should not be in memory
profiles := pm.GetAllProfiles()
if len(profiles) != 0 {
t.Error("failed create should rollback - no profile should be in memory")
}
}
func TestProfileManagerConfigWithData(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
config := RawConfig(`{
"pool": "pool.example.com:3333",
"wallet": "wallet123",
"threads": 4,
"algorithm": "rx/0"
}`)
profile := &MiningProfile{
Name: "Config Test",
MinerType: "xmrig",
Config: config,
}
created, err := pm.CreateProfile(profile)
if err != nil {
t.Fatalf("failed to create profile: %v", err)
}
retrieved, _ := pm.GetProfile(created.ID)
// Parse config to verify
var parsedConfig map[string]interface{}
err = json.Unmarshal(retrieved.Config, &parsedConfig)
if err != nil {
t.Fatalf("failed to parse config: %v", err)
}
if parsedConfig["pool"] != "pool.example.com:3333" {
t.Error("config pool value not preserved")
}
if parsedConfig["threads"].(float64) != 4 {
t.Error("config threads value not preserved")
}
}

119
mining/ratelimiter.go Normal file
View file

@ -0,0 +1,119 @@
package mining
import (
"net/http"
"sync"
"time"
"github.com/gin-gonic/gin"
)
// RateLimiter provides token bucket rate limiting per IP address
type RateLimiter struct {
requestsPerSecond int
burst int
clients map[string]*rateLimitClient
mu sync.RWMutex
stopChan chan struct{}
stopped bool
}
type rateLimitClient struct {
tokens float64
lastCheck time.Time
}
// NewRateLimiter creates a new rate limiter with the specified limits
func NewRateLimiter(requestsPerSecond, burst int) *RateLimiter {
rl := &RateLimiter{
requestsPerSecond: requestsPerSecond,
burst: burst,
clients: make(map[string]*rateLimitClient),
stopChan: make(chan struct{}),
}
// Start cleanup goroutine
go rl.cleanupLoop()
return rl
}
// cleanupLoop removes stale clients periodically
func (rl *RateLimiter) cleanupLoop() {
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-rl.stopChan:
return
case <-ticker.C:
rl.cleanup()
}
}
}
// cleanup removes clients that haven't made requests in 5 minutes
func (rl *RateLimiter) cleanup() {
rl.mu.Lock()
defer rl.mu.Unlock()
for ip, c := range rl.clients {
if time.Since(c.lastCheck) > 5*time.Minute {
delete(rl.clients, ip)
}
}
}
// Stop stops the rate limiter's cleanup goroutine
func (rl *RateLimiter) Stop() {
rl.mu.Lock()
defer rl.mu.Unlock()
if !rl.stopped {
close(rl.stopChan)
rl.stopped = true
}
}
// Middleware returns a Gin middleware handler for rate limiting
func (rl *RateLimiter) Middleware() gin.HandlerFunc {
return func(c *gin.Context) {
ip := c.ClientIP()
rl.mu.Lock()
cl, exists := rl.clients[ip]
if !exists {
cl = &rateLimitClient{tokens: float64(rl.burst), lastCheck: time.Now()}
rl.clients[ip] = cl
}
// Token bucket algorithm
now := time.Now()
elapsed := now.Sub(cl.lastCheck).Seconds()
cl.tokens += elapsed * float64(rl.requestsPerSecond)
if cl.tokens > float64(rl.burst) {
cl.tokens = float64(rl.burst)
}
cl.lastCheck = now
if cl.tokens < 1 {
rl.mu.Unlock()
respondWithError(c, http.StatusTooManyRequests, "RATE_LIMITED",
"too many requests", "rate limit exceeded")
c.Abort()
return
}
cl.tokens--
rl.mu.Unlock()
c.Next()
}
}
// ClientCount returns the number of tracked clients (for testing/monitoring)
func (rl *RateLimiter) ClientCount() int {
rl.mu.RLock()
defer rl.mu.RUnlock()
return len(rl.clients)
}

194
mining/ratelimiter_test.go Normal file
View file

@ -0,0 +1,194 @@
package mining
import (
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/gin-gonic/gin"
)
func TestNewRateLimiter(t *testing.T) {
rl := NewRateLimiter(10, 20)
if rl == nil {
t.Fatal("NewRateLimiter returned nil")
}
defer rl.Stop()
if rl.requestsPerSecond != 10 {
t.Errorf("Expected requestsPerSecond 10, got %d", rl.requestsPerSecond)
}
if rl.burst != 20 {
t.Errorf("Expected burst 20, got %d", rl.burst)
}
}
func TestRateLimiterStop(t *testing.T) {
rl := NewRateLimiter(10, 20)
// Stop should not panic
defer func() {
if r := recover(); r != nil {
t.Errorf("Stop panicked: %v", r)
}
}()
rl.Stop()
// Calling Stop again should not panic (idempotent)
rl.Stop()
}
func TestRateLimiterMiddleware(t *testing.T) {
gin.SetMode(gin.TestMode)
rl := NewRateLimiter(10, 5) // 10 req/s, burst of 5
defer rl.Stop()
router := gin.New()
router.Use(rl.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "ok")
})
// First 5 requests should succeed (burst)
for i := 0; i < 5; i++ {
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("Request %d: expected 200, got %d", i+1, w.Code)
}
}
// 6th request should be rate limited
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusTooManyRequests {
t.Errorf("Expected 429 Too Many Requests, got %d", w.Code)
}
}
func TestRateLimiterDifferentIPs(t *testing.T) {
gin.SetMode(gin.TestMode)
rl := NewRateLimiter(10, 2) // 10 req/s, burst of 2
defer rl.Stop()
router := gin.New()
router.Use(rl.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "ok")
})
// Exhaust rate limit for IP1
for i := 0; i < 2; i++ {
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
}
// IP1 should be rate limited
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusTooManyRequests {
t.Errorf("IP1 should be rate limited, got %d", w.Code)
}
// IP2 should still be able to make requests
req = httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.2:12345"
w = httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("IP2 should not be rate limited, got %d", w.Code)
}
}
func TestRateLimiterClientCount(t *testing.T) {
rl := NewRateLimiter(10, 5)
defer rl.Stop()
gin.SetMode(gin.TestMode)
router := gin.New()
router.Use(rl.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "ok")
})
// Initial count should be 0
if count := rl.ClientCount(); count != 0 {
t.Errorf("Expected 0 clients, got %d", count)
}
// Make a request
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
// Should have 1 client now
if count := rl.ClientCount(); count != 1 {
t.Errorf("Expected 1 client, got %d", count)
}
// Make request from different IP
req = httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.2:12345"
w = httptest.NewRecorder()
router.ServeHTTP(w, req)
// Should have 2 clients now
if count := rl.ClientCount(); count != 2 {
t.Errorf("Expected 2 clients, got %d", count)
}
}
func TestRateLimiterTokenRefill(t *testing.T) {
gin.SetMode(gin.TestMode)
rl := NewRateLimiter(100, 1) // 100 req/s, burst of 1 (refills quickly)
defer rl.Stop()
router := gin.New()
router.Use(rl.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "ok")
})
// First request succeeds
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("First request should succeed, got %d", w.Code)
}
// Second request should fail (burst exhausted)
req = httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w = httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusTooManyRequests {
t.Errorf("Second request should be rate limited, got %d", w.Code)
}
// Wait for token to refill (at 100 req/s, 1 token takes 10ms)
time.Sleep(20 * time.Millisecond)
// Third request should succeed (token refilled)
req = httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w = httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("Third request should succeed after refill, got %d", w.Code)
}
}

158
mining/repository.go Normal file
View file

@ -0,0 +1,158 @@
package mining
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
)
// Repository defines a generic interface for data persistence.
// Implementations can store data in files, databases, etc.
type Repository[T any] interface {
// Load reads data from the repository
Load() (T, error)
// Save writes data to the repository
Save(data T) error
// Update atomically loads, modifies, and saves data
Update(fn func(*T) error) error
}
// FileRepository provides atomic file-based persistence for JSON data.
// It uses atomic writes (temp file + rename) to prevent corruption.
type FileRepository[T any] struct {
mu sync.RWMutex
path string
defaults func() T
}
// FileRepositoryOption configures a FileRepository.
type FileRepositoryOption[T any] func(*FileRepository[T])
// WithDefaults sets the default value factory for when the file doesn't exist.
func WithDefaults[T any](fn func() T) FileRepositoryOption[T] {
return func(r *FileRepository[T]) {
r.defaults = fn
}
}
// NewFileRepository creates a new file-based repository.
func NewFileRepository[T any](path string, opts ...FileRepositoryOption[T]) *FileRepository[T] {
r := &FileRepository[T]{
path: path,
}
for _, opt := range opts {
opt(r)
}
return r
}
// Load reads and deserializes data from the file.
// Returns defaults if file doesn't exist.
func (r *FileRepository[T]) Load() (T, error) {
r.mu.RLock()
defer r.mu.RUnlock()
var result T
data, err := os.ReadFile(r.path)
if err != nil {
if os.IsNotExist(err) {
if r.defaults != nil {
return r.defaults(), nil
}
return result, nil
}
return result, fmt.Errorf("failed to read file: %w", err)
}
if err := json.Unmarshal(data, &result); err != nil {
return result, fmt.Errorf("failed to unmarshal data: %w", err)
}
return result, nil
}
// Save serializes and writes data to the file atomically.
func (r *FileRepository[T]) Save(data T) error {
r.mu.Lock()
defer r.mu.Unlock()
return r.saveUnlocked(data)
}
// saveUnlocked saves data without acquiring the lock (caller must hold lock).
func (r *FileRepository[T]) saveUnlocked(data T) error {
dir := filepath.Dir(r.path)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
jsonData, err := json.MarshalIndent(data, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal data: %w", err)
}
return AtomicWriteFile(r.path, jsonData, 0600)
}
// Update atomically loads, modifies, and saves data.
// The modification function receives a pointer to the data.
func (r *FileRepository[T]) Update(fn func(*T) error) error {
r.mu.Lock()
defer r.mu.Unlock()
// Load current data
var data T
fileData, err := os.ReadFile(r.path)
if err != nil {
if os.IsNotExist(err) {
if r.defaults != nil {
data = r.defaults()
}
} else {
return fmt.Errorf("failed to read file: %w", err)
}
} else {
if err := json.Unmarshal(fileData, &data); err != nil {
return fmt.Errorf("failed to unmarshal data: %w", err)
}
}
// Apply modification
if err := fn(&data); err != nil {
return err
}
// Save atomically
return r.saveUnlocked(data)
}
// Path returns the file path of this repository.
func (r *FileRepository[T]) Path() string {
return r.path
}
// Exists returns true if the repository file exists.
func (r *FileRepository[T]) Exists() bool {
r.mu.RLock()
defer r.mu.RUnlock()
_, err := os.Stat(r.path)
return err == nil
}
// Delete removes the repository file.
func (r *FileRepository[T]) Delete() error {
r.mu.Lock()
defer r.mu.Unlock()
err := os.Remove(r.path)
if os.IsNotExist(err) {
return nil
}
return err
}

401
mining/repository_test.go Normal file
View file

@ -0,0 +1,401 @@
package mining
import (
"errors"
"os"
"path/filepath"
"testing"
)
type testData struct {
Name string `json:"name"`
Value int `json:"value"`
}
func TestFileRepository_Load(t *testing.T) {
t.Run("NonExistentFile", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "nonexistent.json")
repo := NewFileRepository[testData](path)
data, err := repo.Load()
if err != nil {
t.Fatalf("Load should not error for non-existent file: %v", err)
}
if data.Name != "" || data.Value != 0 {
t.Error("Expected zero value for non-existent file")
}
})
t.Run("NonExistentFileWithDefaults", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "nonexistent.json")
repo := NewFileRepository[testData](path, WithDefaults(func() testData {
return testData{Name: "default", Value: 42}
}))
data, err := repo.Load()
if err != nil {
t.Fatalf("Load should not error: %v", err)
}
if data.Name != "default" || data.Value != 42 {
t.Errorf("Expected default values, got %+v", data)
}
})
t.Run("ExistingFile", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "test.json")
// Write test data
if err := os.WriteFile(path, []byte(`{"name":"test","value":123}`), 0600); err != nil {
t.Fatalf("Failed to write test file: %v", err)
}
repo := NewFileRepository[testData](path)
data, err := repo.Load()
if err != nil {
t.Fatalf("Load failed: %v", err)
}
if data.Name != "test" || data.Value != 123 {
t.Errorf("Unexpected data: %+v", data)
}
})
t.Run("InvalidJSON", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "invalid.json")
if err := os.WriteFile(path, []byte(`{invalid json}`), 0600); err != nil {
t.Fatalf("Failed to write test file: %v", err)
}
repo := NewFileRepository[testData](path)
_, err := repo.Load()
if err == nil {
t.Error("Expected error for invalid JSON")
}
})
}
func TestFileRepository_Save(t *testing.T) {
t.Run("NewFile", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "subdir", "new.json")
repo := NewFileRepository[testData](path)
data := testData{Name: "saved", Value: 456}
if err := repo.Save(data); err != nil {
t.Fatalf("Save failed: %v", err)
}
// Verify file was created
if !repo.Exists() {
t.Error("File should exist after save")
}
// Verify content
loaded, err := repo.Load()
if err != nil {
t.Fatalf("Load after save failed: %v", err)
}
if loaded.Name != "saved" || loaded.Value != 456 {
t.Errorf("Unexpected loaded data: %+v", loaded)
}
})
t.Run("OverwriteExisting", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "existing.json")
repo := NewFileRepository[testData](path)
// Save initial data
if err := repo.Save(testData{Name: "first", Value: 1}); err != nil {
t.Fatalf("First save failed: %v", err)
}
// Overwrite
if err := repo.Save(testData{Name: "second", Value: 2}); err != nil {
t.Fatalf("Second save failed: %v", err)
}
// Verify overwrite
loaded, err := repo.Load()
if err != nil {
t.Fatalf("Load failed: %v", err)
}
if loaded.Name != "second" || loaded.Value != 2 {
t.Errorf("Expected overwritten data, got: %+v", loaded)
}
})
}
func TestFileRepository_Update(t *testing.T) {
t.Run("UpdateExisting", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "update.json")
repo := NewFileRepository[testData](path)
// Save initial data
if err := repo.Save(testData{Name: "initial", Value: 10}); err != nil {
t.Fatalf("Initial save failed: %v", err)
}
// Update
err := repo.Update(func(data *testData) error {
data.Value += 5
return nil
})
if err != nil {
t.Fatalf("Update failed: %v", err)
}
// Verify update
loaded, err := repo.Load()
if err != nil {
t.Fatalf("Load failed: %v", err)
}
if loaded.Value != 15 {
t.Errorf("Expected value 15, got %d", loaded.Value)
}
})
t.Run("UpdateNonExistentWithDefaults", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "new.json")
repo := NewFileRepository[testData](path, WithDefaults(func() testData {
return testData{Name: "default", Value: 100}
}))
err := repo.Update(func(data *testData) error {
data.Value *= 2
return nil
})
if err != nil {
t.Fatalf("Update failed: %v", err)
}
// Verify update started from defaults
loaded, err := repo.Load()
if err != nil {
t.Fatalf("Load failed: %v", err)
}
if loaded.Value != 200 {
t.Errorf("Expected value 200, got %d", loaded.Value)
}
})
t.Run("UpdateWithError", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "error.json")
repo := NewFileRepository[testData](path)
if err := repo.Save(testData{Name: "test", Value: 1}); err != nil {
t.Fatalf("Initial save failed: %v", err)
}
// Update that returns error
testErr := errors.New("update error")
err := repo.Update(func(data *testData) error {
data.Value = 999 // This change should not be saved
return testErr
})
if err != testErr {
t.Errorf("Expected test error, got: %v", err)
}
// Verify original data unchanged
loaded, err := repo.Load()
if err != nil {
t.Fatalf("Load failed: %v", err)
}
if loaded.Value != 1 {
t.Errorf("Expected value 1 (unchanged), got %d", loaded.Value)
}
})
}
func TestFileRepository_Delete(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "delete.json")
repo := NewFileRepository[testData](path)
// Save data
if err := repo.Save(testData{Name: "temp", Value: 1}); err != nil {
t.Fatalf("Save failed: %v", err)
}
if !repo.Exists() {
t.Error("File should exist after save")
}
// Delete
if err := repo.Delete(); err != nil {
t.Fatalf("Delete failed: %v", err)
}
if repo.Exists() {
t.Error("File should not exist after delete")
}
// Delete non-existent should not error
if err := repo.Delete(); err != nil {
t.Errorf("Delete non-existent should not error: %v", err)
}
}
func TestFileRepository_Path(t *testing.T) {
path := "/some/path/config.json"
repo := NewFileRepository[testData](path)
if repo.Path() != path {
t.Errorf("Expected path %s, got %s", path, repo.Path())
}
}
func TestFileRepository_UpdateWithLoadError(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "corrupt.json")
repo := NewFileRepository[testData](path)
// Write invalid JSON
if err := os.WriteFile(path, []byte(`{invalid}`), 0600); err != nil {
t.Fatalf("Failed to write corrupt file: %v", err)
}
// Update should fail to load the corrupt file
err := repo.Update(func(data *testData) error {
data.Value = 999
return nil
})
if err == nil {
t.Error("Expected error for corrupt file during Update")
}
}
func TestFileRepository_SaveToReadOnlyDirectory(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("Test skipped when running as root")
}
tmpDir := t.TempDir()
readOnlyDir := filepath.Join(tmpDir, "readonly")
if err := os.Mkdir(readOnlyDir, 0555); err != nil {
t.Fatalf("Failed to create readonly dir: %v", err)
}
defer os.Chmod(readOnlyDir, 0755) // Restore permissions for cleanup
path := filepath.Join(readOnlyDir, "test.json")
repo := NewFileRepository[testData](path)
// Save should fail due to permission denied
err := repo.Save(testData{Name: "test", Value: 1})
if err == nil {
t.Error("Expected error when saving to read-only directory")
}
}
func TestFileRepository_DeleteNonExistent(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "nonexistent.json")
repo := NewFileRepository[testData](path)
// Delete on non-existent file should not error
if err := repo.Delete(); err != nil {
t.Errorf("Delete on non-existent file should not error: %v", err)
}
}
func TestFileRepository_ExistsOnInvalidPath(t *testing.T) {
// Use a path that definitely doesn't exist
repo := NewFileRepository[testData]("/nonexistent/path/to/file.json")
if repo.Exists() {
t.Error("Exists should return false for invalid path")
}
}
func TestFileRepository_ConcurrentUpdates(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "concurrent.json")
repo := NewFileRepository[testData](path, WithDefaults(func() testData {
return testData{Name: "initial", Value: 0}
}))
// Run multiple concurrent updates
const numUpdates = 10
done := make(chan bool)
for i := 0; i < numUpdates; i++ {
go func() {
err := repo.Update(func(data *testData) error {
data.Value++
return nil
})
if err != nil {
t.Logf("Concurrent update error: %v", err)
}
done <- true
}()
}
// Wait for all updates
for i := 0; i < numUpdates; i++ {
<-done
}
// Verify final value equals number of updates
data, err := repo.Load()
if err != nil {
t.Fatalf("Load failed: %v", err)
}
if data.Value != numUpdates {
t.Errorf("Expected value %d after concurrent updates, got %d", numUpdates, data.Value)
}
}
// Test with slice data
func TestFileRepository_SliceData(t *testing.T) {
type item struct {
ID string `json:"id"`
Name string `json:"name"`
}
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "items.json")
repo := NewFileRepository[[]item](path, WithDefaults(func() []item {
return []item{}
}))
// Save slice
items := []item{
{ID: "1", Name: "First"},
{ID: "2", Name: "Second"},
}
if err := repo.Save(items); err != nil {
t.Fatalf("Save failed: %v", err)
}
// Load and verify
loaded, err := repo.Load()
if err != nil {
t.Fatalf("Load failed: %v", err)
}
if len(loaded) != 2 {
t.Errorf("Expected 2 items, got %d", len(loaded))
}
// Update slice
err = repo.Update(func(data *[]item) error {
*data = append(*data, item{ID: "3", Name: "Third"})
return nil
})
if err != nil {
t.Fatalf("Update failed: %v", err)
}
loaded, _ = repo.Load()
if len(loaded) != 3 {
t.Errorf("Expected 3 items after update, got %d", len(loaded))
}
}

1415
mining/service.go Normal file

File diff suppressed because it is too large Load diff

226
mining/service_test.go Normal file
View file

@ -0,0 +1,226 @@
package mining
import (
"context"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/gin-gonic/gin"
)
// MockMiner is a mock implementation of the Miner interface for testing.
type MockMiner struct {
InstallFunc func() error
UninstallFunc func() error
StartFunc func(config *Config) error
StopFunc func() error
GetStatsFunc func(ctx context.Context) (*PerformanceMetrics, error)
GetTypeFunc func() string
GetNameFunc func() string
GetPathFunc func() string
GetBinaryPathFunc func() string
CheckInstallationFunc func() (*InstallationDetails, error)
GetLatestVersionFunc func() (string, error)
GetHashrateHistoryFunc func() []HashratePoint
AddHashratePointFunc func(point HashratePoint)
ReduceHashrateHistoryFunc func(now time.Time)
GetLogsFunc func() []string
WriteStdinFunc func(input string) error
}
func (m *MockMiner) Install() error { return m.InstallFunc() }
func (m *MockMiner) Uninstall() error { return m.UninstallFunc() }
func (m *MockMiner) Start(config *Config) error { return m.StartFunc(config) }
func (m *MockMiner) Stop() error { return m.StopFunc() }
func (m *MockMiner) GetStats(ctx context.Context) (*PerformanceMetrics, error) {
return m.GetStatsFunc(ctx)
}
func (m *MockMiner) GetType() string {
if m.GetTypeFunc != nil {
return m.GetTypeFunc()
}
return "mock"
}
func (m *MockMiner) GetName() string { return m.GetNameFunc() }
func (m *MockMiner) GetPath() string { return m.GetPathFunc() }
func (m *MockMiner) GetBinaryPath() string { return m.GetBinaryPathFunc() }
func (m *MockMiner) CheckInstallation() (*InstallationDetails, error) {
return m.CheckInstallationFunc()
}
func (m *MockMiner) GetLatestVersion() (string, error) { return m.GetLatestVersionFunc() }
func (m *MockMiner) GetHashrateHistory() []HashratePoint { return m.GetHashrateHistoryFunc() }
func (m *MockMiner) AddHashratePoint(point HashratePoint) { m.AddHashratePointFunc(point) }
func (m *MockMiner) ReduceHashrateHistory(now time.Time) { m.ReduceHashrateHistoryFunc(now) }
func (m *MockMiner) GetLogs() []string { return m.GetLogsFunc() }
func (m *MockMiner) WriteStdin(input string) error { return m.WriteStdinFunc(input) }
// MockManager is a mock implementation of the Manager for testing.
type MockManager struct {
ListMinersFunc func() []Miner
ListAvailableMinersFunc func() []AvailableMiner
StartMinerFunc func(ctx context.Context, minerType string, config *Config) (Miner, error)
StopMinerFunc func(ctx context.Context, minerName string) error
GetMinerFunc func(minerName string) (Miner, error)
GetMinerHashrateHistoryFunc func(minerName string) ([]HashratePoint, error)
UninstallMinerFunc func(ctx context.Context, minerType string) error
StopFunc func()
}
func (m *MockManager) ListMiners() []Miner { return m.ListMinersFunc() }
func (m *MockManager) ListAvailableMiners() []AvailableMiner { return m.ListAvailableMinersFunc() }
func (m *MockManager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
return m.StartMinerFunc(ctx, minerType, config)
}
func (m *MockManager) StopMiner(ctx context.Context, minerName string) error {
return m.StopMinerFunc(ctx, minerName)
}
func (m *MockManager) GetMiner(minerName string) (Miner, error) {
return m.GetMinerFunc(minerName)
}
func (m *MockManager) GetMinerHashrateHistory(minerName string) ([]HashratePoint, error) {
return m.GetMinerHashrateHistoryFunc(minerName)
}
func (m *MockManager) UninstallMiner(ctx context.Context, minerType string) error {
return m.UninstallMinerFunc(ctx, minerType)
}
func (m *MockManager) Stop() { m.StopFunc() }
var _ ManagerInterface = (*MockManager)(nil)
func setupTestRouter() (*gin.Engine, *MockManager) {
gin.SetMode(gin.TestMode)
router := gin.Default()
mockManager := &MockManager{
ListMinersFunc: func() []Miner { return []Miner{} },
ListAvailableMinersFunc: func() []AvailableMiner { return []AvailableMiner{} },
StartMinerFunc: func(ctx context.Context, minerType string, config *Config) (Miner, error) {
return nil, nil
},
StopMinerFunc: func(ctx context.Context, minerName string) error { return nil },
GetMinerFunc: func(minerName string) (Miner, error) { return nil, nil },
GetMinerHashrateHistoryFunc: func(minerName string) ([]HashratePoint, error) {
return nil, nil
},
UninstallMinerFunc: func(ctx context.Context, minerType string) error { return nil },
StopFunc: func() {},
}
service := &Service{
Manager: mockManager,
Router: router,
APIBasePath: "/",
SwaggerUIPath: "/swagger",
}
service.SetupRoutes()
return router, mockManager
}
func TestHandleListMiners(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.ListMinersFunc = func() []Miner {
return []Miner{&XMRigMiner{BaseMiner: BaseMiner{Name: "test-miner"}}}
}
req, _ := http.NewRequest("GET", "/miners", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestHandleGetInfo(t *testing.T) {
router, _ := setupTestRouter()
// Case 1: Successful response
req, _ := http.NewRequest("GET", "/info", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestHandleDoctor(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.ListAvailableMinersFunc = func() []AvailableMiner {
return []AvailableMiner{{Name: "xmrig"}}
}
// Case 1: Successful response
req, _ := http.NewRequest("POST", "/doctor", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestHandleInstallMiner(t *testing.T) {
router, _ := setupTestRouter()
// Test installing a miner
req, _ := http.NewRequest("POST", "/miners/xmrig/install", nil)
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
// Installation endpoint should be accessible
if w.Code != http.StatusOK && w.Code != http.StatusInternalServerError {
t.Errorf("expected status 200 or 500, got %d", w.Code)
}
}
func TestHandleStopMiner(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.StopMinerFunc = func(ctx context.Context, minerName string) error {
return nil
}
req, _ := http.NewRequest("DELETE", "/miners/test-miner", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestHandleGetMinerStats(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.GetMinerFunc = func(minerName string) (Miner, error) {
return &MockMiner{
GetStatsFunc: func(ctx context.Context) (*PerformanceMetrics, error) {
return &PerformanceMetrics{Hashrate: 100}, nil
},
GetLogsFunc: func() []string { return []string{} },
}, nil
}
req, _ := http.NewRequest("GET", "/miners/test-miner/stats", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestHandleGetMinerHashrateHistory(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.GetMinerHashrateHistoryFunc = func(minerName string) ([]HashratePoint, error) {
return []HashratePoint{{Timestamp: time.Now(), Hashrate: 100}}, nil
}
req, _ := http.NewRequest("GET", "/miners/test-miner/hashrate-history", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}

225
mining/settings_manager.go Normal file
View file

@ -0,0 +1,225 @@
package mining
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/adrg/xdg"
)
const settingsFileName = "settings.json"
// WindowState stores the last window position and size
type WindowState struct {
X int `json:"x"`
Y int `json:"y"`
Width int `json:"width"`
Height int `json:"height"`
Maximized bool `json:"maximized"`
}
// MinerDefaults stores default configuration for miners
type MinerDefaults struct {
DefaultPool string `json:"defaultPool,omitempty"`
DefaultWallet string `json:"defaultWallet,omitempty"`
DefaultAlgorithm string `json:"defaultAlgorithm,omitempty"`
CPUMaxThreadsHint int `json:"cpuMaxThreadsHint,omitempty"` // Default CPU throttle percentage
CPUThrottleThreshold int `json:"cpuThrottleThreshold,omitempty"` // Throttle when CPU exceeds this %
}
// AppSettings stores application-wide settings
type AppSettings struct {
// Window settings
Window WindowState `json:"window"`
// Behavior settings
StartOnBoot bool `json:"startOnBoot"`
MinimizeToTray bool `json:"minimizeToTray"`
StartMinimized bool `json:"startMinimized"`
AutostartMiners bool `json:"autostartMiners"`
ShowNotifications bool `json:"showNotifications"`
// Mining settings
MinerDefaults MinerDefaults `json:"minerDefaults"`
PauseOnBattery bool `json:"pauseOnBattery"`
PauseOnUserActive bool `json:"pauseOnUserActive"`
PauseOnUserActiveDelay int `json:"pauseOnUserActiveDelay"` // Seconds of inactivity before resuming
// Performance settings
EnableCPUThrottle bool `json:"enableCpuThrottle"`
CPUThrottlePercent int `json:"cpuThrottlePercent"` // Target max CPU % when throttling
CPUMonitorInterval int `json:"cpuMonitorInterval"` // Seconds between CPU checks
AutoThrottleOnHighTemp bool `json:"autoThrottleOnHighTemp"` // Throttle when CPU temp is high
// Theme
Theme string `json:"theme"` // "light", "dark", "system"
}
// DefaultSettings returns sensible defaults for app settings
func DefaultSettings() *AppSettings {
return &AppSettings{
Window: WindowState{
Width: 1400,
Height: 900,
},
StartOnBoot: false,
MinimizeToTray: true,
StartMinimized: false,
AutostartMiners: false,
ShowNotifications: true,
MinerDefaults: MinerDefaults{
CPUMaxThreadsHint: 50, // Default to 50% CPU
CPUThrottleThreshold: 80, // Throttle if CPU > 80%
},
PauseOnBattery: true,
PauseOnUserActive: false,
PauseOnUserActiveDelay: 60,
EnableCPUThrottle: false,
CPUThrottlePercent: 70,
CPUMonitorInterval: 5,
AutoThrottleOnHighTemp: false,
Theme: "system",
}
}
// SettingsManager handles loading and saving app settings
type SettingsManager struct {
mu sync.RWMutex
settings *AppSettings
settingsPath string
}
// NewSettingsManager creates a new settings manager
func NewSettingsManager() (*SettingsManager, error) {
settingsPath, err := xdg.ConfigFile(filepath.Join("lethean-desktop", settingsFileName))
if err != nil {
return nil, fmt.Errorf("could not resolve settings path: %w", err)
}
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
if err := sm.Load(); err != nil {
// If file doesn't exist, use defaults and save them
if os.IsNotExist(err) {
if saveErr := sm.Save(); saveErr != nil {
return nil, fmt.Errorf("could not save default settings: %w", saveErr)
}
} else {
return nil, fmt.Errorf("could not load settings: %w", err)
}
}
return sm, nil
}
// Load reads settings from disk
func (sm *SettingsManager) Load() error {
sm.mu.Lock()
defer sm.mu.Unlock()
data, err := os.ReadFile(sm.settingsPath)
if err != nil {
return err
}
var settings AppSettings
if err := json.Unmarshal(data, &settings); err != nil {
return err
}
sm.settings = &settings
return nil
}
// Save writes settings to disk
func (sm *SettingsManager) Save() error {
sm.mu.Lock()
defer sm.mu.Unlock()
data, err := json.MarshalIndent(sm.settings, "", " ")
if err != nil {
return err
}
return os.WriteFile(sm.settingsPath, data, 0600)
}
// Get returns a copy of the current settings
func (sm *SettingsManager) Get() *AppSettings {
sm.mu.RLock()
defer sm.mu.RUnlock()
// Return a copy to prevent concurrent modification
copy := *sm.settings
return &copy
}
// Update applies changes to settings and saves
func (sm *SettingsManager) Update(fn func(*AppSettings)) error {
sm.mu.Lock()
defer sm.mu.Unlock()
fn(sm.settings)
data, err := json.MarshalIndent(sm.settings, "", " ")
if err != nil {
return err
}
return os.WriteFile(sm.settingsPath, data, 0600)
}
// UpdateWindowState saves the current window state
func (sm *SettingsManager) UpdateWindowState(x, y, width, height int, maximized bool) error {
return sm.Update(func(s *AppSettings) {
s.Window.X = x
s.Window.Y = y
s.Window.Width = width
s.Window.Height = height
s.Window.Maximized = maximized
})
}
// GetWindowState returns the saved window state
func (sm *SettingsManager) GetWindowState() WindowState {
sm.mu.RLock()
defer sm.mu.RUnlock()
return sm.settings.Window
}
// SetStartOnBoot enables/disables start on boot
func (sm *SettingsManager) SetStartOnBoot(enabled bool) error {
return sm.Update(func(s *AppSettings) {
s.StartOnBoot = enabled
})
}
// SetAutostartMiners enables/disables miner autostart
func (sm *SettingsManager) SetAutostartMiners(enabled bool) error {
return sm.Update(func(s *AppSettings) {
s.AutostartMiners = enabled
})
}
// SetCPUThrottle configures CPU throttling
func (sm *SettingsManager) SetCPUThrottle(enabled bool, percent int) error {
return sm.Update(func(s *AppSettings) {
s.EnableCPUThrottle = enabled
if percent > 0 && percent <= 100 {
s.CPUThrottlePercent = percent
}
})
}
// SetMinerDefaults updates default miner configuration
func (sm *SettingsManager) SetMinerDefaults(defaults MinerDefaults) error {
return sm.Update(func(s *AppSettings) {
s.MinerDefaults = defaults
})
}

View file

@ -0,0 +1,211 @@
package mining
import (
"os"
"path/filepath"
"testing"
)
func TestSettingsManager_DefaultSettings(t *testing.T) {
defaults := DefaultSettings()
if defaults.Window.Width != 1400 {
t.Errorf("Expected default width 1400, got %d", defaults.Window.Width)
}
if defaults.Window.Height != 900 {
t.Errorf("Expected default height 900, got %d", defaults.Window.Height)
}
if defaults.MinerDefaults.CPUMaxThreadsHint != 50 {
t.Errorf("Expected default CPU hint 50, got %d", defaults.MinerDefaults.CPUMaxThreadsHint)
}
if defaults.MinerDefaults.CPUThrottleThreshold != 80 {
t.Errorf("Expected default throttle threshold 80, got %d", defaults.MinerDefaults.CPUThrottleThreshold)
}
if !defaults.PauseOnBattery {
t.Error("Expected PauseOnBattery to be true by default")
}
}
func TestSettingsManager_SaveAndLoad(t *testing.T) {
// Use a temp directory for testing
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
// Create settings manager with custom path
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
// Modify settings
sm.settings.Window.Width = 1920
sm.settings.Window.Height = 1080
sm.settings.StartOnBoot = true
sm.settings.AutostartMiners = true
sm.settings.CPUThrottlePercent = 50
// Save
err := sm.Save()
if err != nil {
t.Fatalf("Failed to save settings: %v", err)
}
// Verify file exists
if _, err := os.Stat(settingsPath); os.IsNotExist(err) {
t.Fatal("Settings file was not created")
}
// Create new manager and load
sm2 := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
err = sm2.Load()
if err != nil {
t.Fatalf("Failed to load settings: %v", err)
}
// Verify loaded values
if sm2.settings.Window.Width != 1920 {
t.Errorf("Expected width 1920, got %d", sm2.settings.Window.Width)
}
if sm2.settings.Window.Height != 1080 {
t.Errorf("Expected height 1080, got %d", sm2.settings.Window.Height)
}
if !sm2.settings.StartOnBoot {
t.Error("Expected StartOnBoot to be true")
}
if !sm2.settings.AutostartMiners {
t.Error("Expected AutostartMiners to be true")
}
if sm2.settings.CPUThrottlePercent != 50 {
t.Errorf("Expected CPUThrottlePercent 50, got %d", sm2.settings.CPUThrottlePercent)
}
}
func TestSettingsManager_UpdateWindowState(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
err := sm.UpdateWindowState(100, 200, 800, 600, false)
if err != nil {
t.Fatalf("Failed to update window state: %v", err)
}
state := sm.GetWindowState()
if state.X != 100 {
t.Errorf("Expected X 100, got %d", state.X)
}
if state.Y != 200 {
t.Errorf("Expected Y 200, got %d", state.Y)
}
if state.Width != 800 {
t.Errorf("Expected Width 800, got %d", state.Width)
}
if state.Height != 600 {
t.Errorf("Expected Height 600, got %d", state.Height)
}
}
func TestSettingsManager_SetCPUThrottle(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
// Test enabling throttle
err := sm.SetCPUThrottle(true, 30)
if err != nil {
t.Fatalf("Failed to set CPU throttle: %v", err)
}
settings := sm.Get()
if !settings.EnableCPUThrottle {
t.Error("Expected EnableCPUThrottle to be true")
}
if settings.CPUThrottlePercent != 30 {
t.Errorf("Expected CPUThrottlePercent 30, got %d", settings.CPUThrottlePercent)
}
// Test invalid percentage (should be ignored)
err = sm.SetCPUThrottle(true, 150)
if err != nil {
t.Fatalf("Failed to set CPU throttle: %v", err)
}
settings = sm.Get()
if settings.CPUThrottlePercent != 30 { // Should remain unchanged
t.Errorf("Expected CPUThrottlePercent to remain 30, got %d", settings.CPUThrottlePercent)
}
}
func TestSettingsManager_SetMinerDefaults(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
defaults := MinerDefaults{
DefaultPool: "stratum+tcp://pool.example.com:3333",
DefaultWallet: "wallet123",
DefaultAlgorithm: "rx/0",
CPUMaxThreadsHint: 25,
CPUThrottleThreshold: 90,
}
err := sm.SetMinerDefaults(defaults)
if err != nil {
t.Fatalf("Failed to set miner defaults: %v", err)
}
settings := sm.Get()
if settings.MinerDefaults.DefaultPool != "stratum+tcp://pool.example.com:3333" {
t.Errorf("Expected pool to be set, got %s", settings.MinerDefaults.DefaultPool)
}
if settings.MinerDefaults.CPUMaxThreadsHint != 25 {
t.Errorf("Expected CPUMaxThreadsHint 25, got %d", settings.MinerDefaults.CPUMaxThreadsHint)
}
}
func TestSettingsManager_ConcurrentAccess(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
// Concurrent reads and writes
done := make(chan bool)
for i := 0; i < 10; i++ {
go func(n int) {
for j := 0; j < 100; j++ {
_ = sm.Get()
sm.UpdateWindowState(n*10, n*10, 800+n, 600+n, false)
}
done <- true
}(i)
}
// Wait for all goroutines
for i := 0; i < 10; i++ {
<-done
}
// Should complete without race conditions
state := sm.GetWindowState()
if state.Width < 800 || state.Width > 900 {
t.Errorf("Unexpected width after concurrent access: %d", state.Width)
}
}

457
mining/simulated_miner.go Normal file
View file

@ -0,0 +1,457 @@
package mining
import (
"context"
"fmt"
"math"
"math/rand"
"sync"
"time"
)
// MinerTypeSimulated is the type identifier for simulated miners.
const MinerTypeSimulated = "simulated"
// SimulatedMiner is a mock miner that generates realistic-looking stats for UI testing.
type SimulatedMiner struct {
// Exported fields for JSON serialization
Name string `json:"name"`
MinerType string `json:"miner_type"`
Version string `json:"version"`
URL string `json:"url"`
Path string `json:"path"`
MinerBinary string `json:"miner_binary"`
Running bool `json:"running"`
Algorithm string `json:"algorithm"`
HashrateHistory []HashratePoint `json:"hashrateHistory"`
LowResHistory []HashratePoint `json:"lowResHashrateHistory"`
Stats *PerformanceMetrics `json:"stats,omitempty"`
FullStats *XMRigSummary `json:"full_stats,omitempty"` // XMRig-compatible format for UI
// Internal fields (not exported)
baseHashrate int
peakHashrate int
variance float64
startTime time.Time
shares int
rejected int
logs []string
mu sync.RWMutex
stopChan chan struct{}
poolName string
difficultyBase int
}
// SimulatedMinerConfig holds configuration for creating a simulated miner.
type SimulatedMinerConfig struct {
Name string // Miner instance name (e.g., "sim-xmrig-001")
Algorithm string // Algorithm name (e.g., "rx/0", "kawpow", "ethash")
BaseHashrate int // Base hashrate in H/s
Variance float64 // Variance as percentage (0.0-0.2 for 20% variance)
PoolName string // Simulated pool name
Difficulty int // Base difficulty
}
// NewSimulatedMiner creates a new simulated miner instance.
func NewSimulatedMiner(config SimulatedMinerConfig) *SimulatedMiner {
if config.Variance <= 0 {
config.Variance = 0.1 // Default 10% variance
}
if config.PoolName == "" {
config.PoolName = "sim-pool.example.com:3333"
}
if config.Difficulty <= 0 {
config.Difficulty = 10000
}
return &SimulatedMiner{
Name: config.Name,
MinerType: MinerTypeSimulated,
Version: "1.0.0-simulated",
URL: "https://github.com/simulated/miner",
Path: "/simulated/miner",
MinerBinary: "/simulated/miner/sim-miner",
Algorithm: config.Algorithm,
HashrateHistory: make([]HashratePoint, 0),
LowResHistory: make([]HashratePoint, 0),
baseHashrate: config.BaseHashrate,
variance: config.Variance,
poolName: config.PoolName,
difficultyBase: config.Difficulty,
logs: make([]string, 0),
}
}
// GetType returns the miner type identifier.
func (m *SimulatedMiner) GetType() string {
return m.MinerType
}
// Install is a no-op for simulated miners.
func (m *SimulatedMiner) Install() error {
return nil
}
// Uninstall is a no-op for simulated miners.
func (m *SimulatedMiner) Uninstall() error {
return nil
}
// Start begins the simulated mining process.
func (m *SimulatedMiner) Start(config *Config) error {
m.mu.Lock()
if m.Running {
m.mu.Unlock()
return fmt.Errorf("simulated miner %s is already running", m.Name)
}
m.Running = true
m.startTime = time.Now()
m.shares = 0
m.rejected = 0
m.stopChan = make(chan struct{})
m.HashrateHistory = make([]HashratePoint, 0)
m.LowResHistory = make([]HashratePoint, 0)
m.logs = []string{
fmt.Sprintf("[%s] Simulated miner starting...", time.Now().Format("15:04:05")),
fmt.Sprintf("[%s] Connecting to %s", time.Now().Format("15:04:05"), m.poolName),
fmt.Sprintf("[%s] Pool connected, algorithm: %s", time.Now().Format("15:04:05"), m.Algorithm),
}
m.mu.Unlock()
// Start background simulation
go m.runSimulation()
return nil
}
// Stop stops the simulated miner.
func (m *SimulatedMiner) Stop() error {
m.mu.Lock()
defer m.mu.Unlock()
if !m.Running {
return fmt.Errorf("simulated miner %s is not running", m.Name)
}
close(m.stopChan)
m.Running = false
m.logs = append(m.logs, fmt.Sprintf("[%s] Miner stopped", time.Now().Format("15:04:05")))
return nil
}
// runSimulation runs the background simulation loop.
func (m *SimulatedMiner) runSimulation() {
ticker := time.NewTicker(HighResolutionInterval)
defer ticker.Stop()
shareTicker := time.NewTicker(time.Duration(5+rand.Intn(10)) * time.Second)
defer shareTicker.Stop()
for {
select {
case <-m.stopChan:
return
case <-ticker.C:
m.updateHashrate()
case <-shareTicker.C:
m.simulateShare()
// Randomize next share time
shareTicker.Reset(time.Duration(5+rand.Intn(15)) * time.Second)
}
}
}
// updateHashrate generates a new hashrate value with realistic variation.
func (m *SimulatedMiner) updateHashrate() {
m.mu.Lock()
defer m.mu.Unlock()
// Generate hashrate with variance and smooth transitions
now := time.Now()
uptime := now.Sub(m.startTime).Seconds()
// Ramp up period (first 30 seconds)
rampFactor := math.Min(1.0, uptime/30.0)
// Add some sine wave variation for realistic fluctuation
sineVariation := math.Sin(uptime/10) * 0.05
// Random noise
noise := (rand.Float64() - 0.5) * 2 * m.variance
// Calculate final hashrate
hashrate := int(float64(m.baseHashrate) * rampFactor * (1.0 + sineVariation + noise))
if hashrate < 0 {
hashrate = 0
}
point := HashratePoint{
Timestamp: now,
Hashrate: hashrate,
}
m.HashrateHistory = append(m.HashrateHistory, point)
// Track peak hashrate
if hashrate > m.peakHashrate {
m.peakHashrate = hashrate
}
// Update stats for JSON serialization
uptimeInt := int(uptime)
diffCurrent := m.difficultyBase + rand.Intn(m.difficultyBase/2)
m.Stats = &PerformanceMetrics{
Hashrate: hashrate,
Shares: m.shares,
Rejected: m.rejected,
Uptime: uptimeInt,
Algorithm: m.Algorithm,
AvgDifficulty: m.difficultyBase,
DiffCurrent: diffCurrent,
}
// Update XMRig-compatible full_stats for UI
m.FullStats = &XMRigSummary{
ID: m.Name,
WorkerID: m.Name,
Uptime: uptimeInt,
Algo: m.Algorithm,
Version: m.Version,
}
m.FullStats.Hashrate.Total = []float64{float64(hashrate)}
m.FullStats.Hashrate.Highest = float64(m.peakHashrate)
m.FullStats.Results.SharesGood = m.shares
m.FullStats.Results.SharesTotal = m.shares + m.rejected
m.FullStats.Results.DiffCurrent = diffCurrent
m.FullStats.Results.AvgTime = 15 + rand.Intn(10) // Simulated avg share time
m.FullStats.Results.HashesTotal = m.shares * diffCurrent
m.FullStats.Connection.Pool = m.poolName
m.FullStats.Connection.Uptime = uptimeInt
m.FullStats.Connection.Diff = diffCurrent
m.FullStats.Connection.Accepted = m.shares
m.FullStats.Connection.Rejected = m.rejected
m.FullStats.Connection.Algo = m.Algorithm
m.FullStats.Connection.Ping = 50 + rand.Intn(50)
// Trim high-res history to last 5 minutes
cutoff := now.Add(-HighResolutionDuration)
for len(m.HashrateHistory) > 0 && m.HashrateHistory[0].Timestamp.Before(cutoff) {
m.HashrateHistory = m.HashrateHistory[1:]
}
}
// simulateShare simulates finding a share.
func (m *SimulatedMiner) simulateShare() {
m.mu.Lock()
defer m.mu.Unlock()
// 2% chance of rejected share
if rand.Float64() < 0.02 {
m.rejected++
m.logs = append(m.logs, fmt.Sprintf("[%s] Share rejected (stale)", time.Now().Format("15:04:05")))
} else {
m.shares++
diff := m.difficultyBase + rand.Intn(m.difficultyBase/2)
m.logs = append(m.logs, fmt.Sprintf("[%s] Share accepted (%d/%d) diff %d", time.Now().Format("15:04:05"), m.shares, m.rejected, diff))
}
// Keep last 100 log lines
if len(m.logs) > 100 {
m.logs = m.logs[len(m.logs)-100:]
}
}
// GetStats returns current performance metrics.
func (m *SimulatedMiner) GetStats(ctx context.Context) (*PerformanceMetrics, error) {
m.mu.RLock()
defer m.mu.RUnlock()
if !m.Running {
return nil, fmt.Errorf("simulated miner %s is not running", m.Name)
}
// Calculate current hashrate from recent history
var hashrate int
if len(m.HashrateHistory) > 0 {
hashrate = m.HashrateHistory[len(m.HashrateHistory)-1].Hashrate
}
uptime := int(time.Since(m.startTime).Seconds())
// Calculate average difficulty
avgDiff := m.difficultyBase
if m.shares > 0 {
avgDiff = m.difficultyBase + rand.Intn(m.difficultyBase/4)
}
return &PerformanceMetrics{
Hashrate: hashrate,
Shares: m.shares,
Rejected: m.rejected,
Uptime: uptime,
LastShare: time.Now().Unix() - int64(rand.Intn(30)),
Algorithm: m.Algorithm,
AvgDifficulty: avgDiff,
DiffCurrent: m.difficultyBase + rand.Intn(m.difficultyBase/2),
ExtraData: map[string]interface{}{
"pool": m.poolName,
"simulated": true,
},
}, nil
}
// GetName returns the miner's name.
func (m *SimulatedMiner) GetName() string {
return m.Name
}
// GetPath returns a simulated path.
func (m *SimulatedMiner) GetPath() string {
return m.Path
}
// GetBinaryPath returns a simulated binary path.
func (m *SimulatedMiner) GetBinaryPath() string {
return m.MinerBinary
}
// CheckInstallation returns simulated installation details.
func (m *SimulatedMiner) CheckInstallation() (*InstallationDetails, error) {
return &InstallationDetails{
IsInstalled: true,
Version: "1.0.0-simulated",
Path: "/simulated/miner",
MinerBinary: "simulated-miner",
ConfigPath: "/simulated/config.json",
}, nil
}
// GetLatestVersion returns a simulated version.
func (m *SimulatedMiner) GetLatestVersion() (string, error) {
return "1.0.0-simulated", nil
}
// GetHashrateHistory returns the hashrate history.
func (m *SimulatedMiner) GetHashrateHistory() []HashratePoint {
m.mu.RLock()
defer m.mu.RUnlock()
result := make([]HashratePoint, len(m.HashrateHistory))
copy(result, m.HashrateHistory)
return result
}
// AddHashratePoint adds a point to the history.
func (m *SimulatedMiner) AddHashratePoint(point HashratePoint) {
m.mu.Lock()
defer m.mu.Unlock()
m.HashrateHistory = append(m.HashrateHistory, point)
}
// ReduceHashrateHistory reduces the history (called by manager).
func (m *SimulatedMiner) ReduceHashrateHistory(now time.Time) {
m.mu.Lock()
defer m.mu.Unlock()
// Move old high-res points to low-res
cutoff := now.Add(-HighResolutionDuration)
var toMove []HashratePoint
newHistory := make([]HashratePoint, 0)
for _, point := range m.HashrateHistory {
if point.Timestamp.Before(cutoff) {
toMove = append(toMove, point)
} else {
newHistory = append(newHistory, point)
}
}
m.HashrateHistory = newHistory
// Average the old points and add to low-res
if len(toMove) > 0 {
var sum int
for _, p := range toMove {
sum += p.Hashrate
}
avg := sum / len(toMove)
m.LowResHistory = append(m.LowResHistory, HashratePoint{
Timestamp: toMove[len(toMove)-1].Timestamp,
Hashrate: avg,
})
}
// Trim low-res history
lowResCutoff := now.Add(-LowResHistoryRetention)
newLowRes := make([]HashratePoint, 0)
for _, point := range m.LowResHistory {
if !point.Timestamp.Before(lowResCutoff) {
newLowRes = append(newLowRes, point)
}
}
m.LowResHistory = newLowRes
}
// GetLogs returns the simulated logs.
func (m *SimulatedMiner) GetLogs() []string {
m.mu.RLock()
defer m.mu.RUnlock()
result := make([]string, len(m.logs))
copy(result, m.logs)
return result
}
// WriteStdin simulates stdin input.
func (m *SimulatedMiner) WriteStdin(input string) error {
m.mu.Lock()
defer m.mu.Unlock()
if !m.Running {
return fmt.Errorf("simulated miner %s is not running", m.Name)
}
m.logs = append(m.logs, fmt.Sprintf("[%s] stdin: %s", time.Now().Format("15:04:05"), input))
return nil
}
// SimulatedMinerPresets provides common presets for simulated miners.
var SimulatedMinerPresets = map[string]SimulatedMinerConfig{
"cpu-low": {
Algorithm: "rx/0",
BaseHashrate: 500,
Variance: 0.15,
PoolName: "pool.hashvault.pro:443",
Difficulty: 50000,
},
"cpu-medium": {
Algorithm: "rx/0",
BaseHashrate: 5000,
Variance: 0.10,
PoolName: "pool.hashvault.pro:443",
Difficulty: 100000,
},
"cpu-high": {
Algorithm: "rx/0",
BaseHashrate: 15000,
Variance: 0.08,
PoolName: "pool.hashvault.pro:443",
Difficulty: 200000,
},
"gpu-ethash": {
Algorithm: "ethash",
BaseHashrate: 30000000, // 30 MH/s
Variance: 0.05,
PoolName: "eth.2miners.com:2020",
Difficulty: 4000000000,
},
"gpu-kawpow": {
Algorithm: "kawpow",
BaseHashrate: 15000000, // 15 MH/s
Variance: 0.06,
PoolName: "rvn.2miners.com:6060",
Difficulty: 1000000000,
},
}

57
mining/stats_collector.go Normal file
View file

@ -0,0 +1,57 @@
package mining
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
)
// StatsCollector defines the interface for collecting miner statistics.
// This allows different miner types to implement their own stats collection logic
// while sharing common HTTP fetching infrastructure.
type StatsCollector interface {
// CollectStats fetches and returns performance metrics from the miner.
CollectStats(ctx context.Context) (*PerformanceMetrics, error)
}
// HTTPStatsConfig holds configuration for HTTP-based stats collection.
type HTTPStatsConfig struct {
Host string
Port int
Endpoint string // e.g., "/2/summary" for XMRig, "/summary" for TT-Miner
}
// FetchJSONStats performs an HTTP GET request and decodes the JSON response.
// This is a common helper for HTTP-based miner stats collection.
// The caller must provide the target struct to decode into.
func FetchJSONStats[T any](ctx context.Context, config HTTPStatsConfig, target *T) error {
if config.Port == 0 {
return fmt.Errorf("API port is zero")
}
url := fmt.Sprintf("http://%s:%d%s", config.Host, config.Port, config.Endpoint)
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
resp, err := getHTTPClient().Do(req)
if err != nil {
return fmt.Errorf("HTTP request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
io.Copy(io.Discard, resp.Body) // Drain body to allow connection reuse
return fmt.Errorf("unexpected status code %d", resp.StatusCode)
}
if err := json.NewDecoder(resp.Body).Decode(target); err != nil {
return fmt.Errorf("failed to decode response: %w", err)
}
return nil
}

View file

@ -0,0 +1,140 @@
package mining
import (
"context"
"encoding/json"
"net"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestFetchJSONStats(t *testing.T) {
t.Run("SuccessfulFetch", func(t *testing.T) {
// Create a test server
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/test/endpoint" {
t.Errorf("Unexpected path: %s", r.URL.Path)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"value": 42,
"name": "test",
})
}))
defer server.Close()
// Get port from server listener
addr := server.Listener.Addr().(*net.TCPAddr)
config := HTTPStatsConfig{
Host: "127.0.0.1",
Port: addr.Port,
Endpoint: "/test/endpoint",
}
var result struct {
Value int `json:"value"`
Name string `json:"name"`
}
ctx := context.Background()
err := FetchJSONStats(ctx, config, &result)
if err != nil {
t.Fatalf("FetchJSONStats failed: %v", err)
}
if result.Value != 42 {
t.Errorf("Expected value 42, got %d", result.Value)
}
if result.Name != "test" {
t.Errorf("Expected name 'test', got '%s'", result.Name)
}
})
t.Run("ZeroPort", func(t *testing.T) {
config := HTTPStatsConfig{
Host: "localhost",
Port: 0,
Endpoint: "/test",
}
var result map[string]interface{}
err := FetchJSONStats(context.Background(), config, &result)
if err == nil {
t.Error("Expected error for zero port")
}
})
t.Run("ContextCancellation", func(t *testing.T) {
config := HTTPStatsConfig{
Host: "127.0.0.1",
Port: 12345, // Intentionally wrong port to trigger connection timeout
Endpoint: "/test",
}
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
defer cancel()
var result map[string]interface{}
err := FetchJSONStats(ctx, config, &result)
if err == nil {
t.Error("Expected error for cancelled context")
}
})
}
func TestMinerTypeRegistry(t *testing.T) {
t.Run("KnownTypes", func(t *testing.T) {
if !IsMinerSupported(MinerTypeXMRig) {
t.Error("xmrig should be a known miner type")
}
if !IsMinerSupported(MinerTypeTTMiner) {
t.Error("tt-miner should be a known miner type")
}
if !IsMinerSupported(MinerTypeSimulated) {
t.Error("simulated should be a known miner type")
}
})
t.Run("UnknownType", func(t *testing.T) {
if IsMinerSupported("unknown-miner") {
t.Error("unknown-miner should not be a known miner type")
}
})
t.Run("ListMinerTypes", func(t *testing.T) {
types := ListMinerTypes()
if len(types) == 0 {
t.Error("ListMinerTypes should return registered types")
}
})
}
func TestGetType(t *testing.T) {
t.Run("XMRigMiner", func(t *testing.T) {
miner := NewXMRigMiner()
if miner.GetType() != MinerTypeXMRig {
t.Errorf("Expected type %s, got %s", MinerTypeXMRig, miner.GetType())
}
})
t.Run("TTMiner", func(t *testing.T) {
miner := NewTTMiner()
if miner.GetType() != MinerTypeTTMiner {
t.Errorf("Expected type %s, got %s", MinerTypeTTMiner, miner.GetType())
}
})
t.Run("SimulatedMiner", func(t *testing.T) {
miner := NewSimulatedMiner(SimulatedMinerConfig{
Name: "test-sim",
Algorithm: "rx/0",
BaseHashrate: 1000,
})
if miner.GetType() != MinerTypeSimulated {
t.Errorf("Expected type %s, got %s", MinerTypeSimulated, miner.GetType())
}
})
}

203
mining/supervisor.go Normal file
View file

@ -0,0 +1,203 @@
package mining
import (
"context"
"sync"
"time"
"forge.lthn.ai/core/mining/logging"
)
// TaskFunc is a function that can be supervised.
type TaskFunc func(ctx context.Context)
// SupervisedTask represents a background task with restart capability.
type SupervisedTask struct {
name string
task TaskFunc
restartDelay time.Duration
maxRestarts int
restartCount int
running bool
lastStartTime time.Time
cancel context.CancelFunc
mu sync.Mutex
}
// TaskSupervisor manages background tasks with automatic restart on failure.
type TaskSupervisor struct {
tasks map[string]*SupervisedTask
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
mu sync.RWMutex
started bool
}
// NewTaskSupervisor creates a new task supervisor.
func NewTaskSupervisor() *TaskSupervisor {
ctx, cancel := context.WithCancel(context.Background())
return &TaskSupervisor{
tasks: make(map[string]*SupervisedTask),
ctx: ctx,
cancel: cancel,
}
}
// RegisterTask registers a task for supervision.
// The task will be automatically restarted if it exits or panics.
func (s *TaskSupervisor) RegisterTask(name string, task TaskFunc, restartDelay time.Duration, maxRestarts int) {
s.mu.Lock()
defer s.mu.Unlock()
s.tasks[name] = &SupervisedTask{
name: name,
task: task,
restartDelay: restartDelay,
maxRestarts: maxRestarts,
}
}
// Start starts all registered tasks.
func (s *TaskSupervisor) Start() {
s.mu.Lock()
if s.started {
s.mu.Unlock()
return
}
s.started = true
s.mu.Unlock()
s.mu.RLock()
for name, task := range s.tasks {
s.startTask(name, task)
}
s.mu.RUnlock()
}
// startTask starts a single supervised task.
func (s *TaskSupervisor) startTask(name string, st *SupervisedTask) {
st.mu.Lock()
if st.running {
st.mu.Unlock()
return
}
st.running = true
st.lastStartTime = time.Now()
taskCtx, taskCancel := context.WithCancel(s.ctx)
st.cancel = taskCancel
st.mu.Unlock()
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
select {
case <-s.ctx.Done():
return
default:
}
// Run the task with panic recovery
func() {
defer func() {
if r := recover(); r != nil {
logging.Error("supervised task panicked", logging.Fields{
"task": name,
"panic": r,
})
}
}()
st.task(taskCtx)
}()
// Check if we should restart
st.mu.Lock()
st.restartCount++
shouldRestart := st.restartCount <= st.maxRestarts || st.maxRestarts < 0
restartDelay := st.restartDelay
st.mu.Unlock()
if !shouldRestart {
logging.Warn("supervised task reached max restarts", logging.Fields{
"task": name,
"maxRestart": st.maxRestarts,
})
return
}
select {
case <-s.ctx.Done():
return
case <-time.After(restartDelay):
logging.Info("restarting supervised task", logging.Fields{
"task": name,
"restartCount": st.restartCount,
})
}
}
}()
logging.Info("started supervised task", logging.Fields{"task": name})
}
// Stop stops all supervised tasks.
func (s *TaskSupervisor) Stop() {
s.cancel()
s.wg.Wait()
s.mu.Lock()
s.started = false
for _, task := range s.tasks {
task.mu.Lock()
task.running = false
task.mu.Unlock()
}
s.mu.Unlock()
logging.Info("task supervisor stopped")
}
// GetTaskStatus returns the status of a task.
func (s *TaskSupervisor) GetTaskStatus(name string) (running bool, restartCount int, found bool) {
s.mu.RLock()
task, ok := s.tasks[name]
s.mu.RUnlock()
if !ok {
return false, 0, false
}
task.mu.Lock()
defer task.mu.Unlock()
return task.running, task.restartCount, true
}
// GetAllTaskStatuses returns status of all tasks.
func (s *TaskSupervisor) GetAllTaskStatuses() map[string]TaskStatus {
s.mu.RLock()
defer s.mu.RUnlock()
statuses := make(map[string]TaskStatus, len(s.tasks))
for name, task := range s.tasks {
task.mu.Lock()
statuses[name] = TaskStatus{
Name: name,
Running: task.running,
RestartCount: task.restartCount,
LastStart: task.lastStartTime,
}
task.mu.Unlock()
}
return statuses
}
// TaskStatus contains the status of a supervised task.
type TaskStatus struct {
Name string `json:"name"`
Running bool `json:"running"`
RestartCount int `json:"restartCount"`
LastStart time.Time `json:"lastStart"`
}

33
mining/syslog_unix.go Normal file
View file

@ -0,0 +1,33 @@
//go:build !windows
package mining
import (
"log/syslog"
"forge.lthn.ai/core/mining/logging"
)
var syslogWriter *syslog.Writer
func init() {
// Initialize syslog writer globally.
// LOG_NOTICE is for normal but significant condition.
// LOG_DAEMON is for system daemons.
// "mining-service" is the tag for the log messages.
var err error
syslogWriter, err = syslog.New(syslog.LOG_NOTICE|syslog.LOG_DAEMON, "mining-service")
if err != nil {
logging.Warn("failed to connect to syslog, syslog logging disabled", logging.Fields{"error": err})
syslogWriter = nil // Ensure it's nil on failure
}
}
// logToSyslog sends a message to syslog if available, otherwise falls back to standard log.
func logToSyslog(message string) {
if syslogWriter != nil {
_ = syslogWriter.Notice(message)
} else {
logging.Info(message)
}
}

15
mining/syslog_windows.go Normal file
View file

@ -0,0 +1,15 @@
//go:build windows
package mining
import (
"forge.lthn.ai/core/mining/logging"
)
// On Windows, syslog is not available. We'll use a dummy implementation
// that logs to the standard logger.
// logToSyslog logs a message to the standard logger, mimicking the syslog function's signature.
func logToSyslog(message string) {
logging.Info(message)
}

314
mining/throttle_test.go Normal file
View file

@ -0,0 +1,314 @@
package mining
import (
"context"
"runtime"
"testing"
"time"
"github.com/shirou/gopsutil/v4/cpu"
"github.com/shirou/gopsutil/v4/process"
)
// TestCPUThrottleSingleMiner tests that a single miner respects CPU throttle settings
func TestCPUThrottleSingleMiner(t *testing.T) {
if testing.Short() {
t.Skip("Skipping CPU throttle test in short mode")
}
miner := NewXMRigMiner()
details, err := miner.CheckInstallation()
if err != nil || !details.IsInstalled {
t.Skip("XMRig not installed, skipping throttle test")
}
// Use simulation manager to avoid autostart conflicts
manager := NewManagerForSimulation()
defer manager.Stop()
// Configure miner to use only 10% of CPU
config := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 10, // 10% CPU usage
Algo: "throttle-single",
}
minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Failed to start miner: %v", err)
}
t.Logf("Started miner: %s", minerInstance.GetName())
// Let miner warm up
time.Sleep(15 * time.Second)
// Measure CPU usage
avgCPU := measureCPUUsage(t, 10*time.Second)
t.Logf("Configured: 10%% CPU, Measured: %.1f%% CPU", avgCPU)
// Allow 15% margin (10% target + 5% tolerance)
if avgCPU > 25 {
t.Errorf("CPU usage %.1f%% exceeds expected ~10%% (with tolerance)", avgCPU)
}
manager.StopMiner(context.Background(), minerInstance.GetName())
}
// TestCPUThrottleDualMiners tests that two miners together respect combined CPU limits
func TestCPUThrottleDualMiners(t *testing.T) {
if testing.Short() {
t.Skip("Skipping CPU throttle test in short mode")
}
miner1 := NewXMRigMiner()
details, err := miner1.CheckInstallation()
if err != nil || !details.IsInstalled {
t.Skip("XMRig not installed, skipping throttle test")
}
// Use simulation manager to avoid autostart conflicts
manager := NewManagerForSimulation()
defer manager.Stop()
// Start first miner at 10% CPU with RandomX
config1 := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 10,
Algo: "throttle-dual-1",
}
miner1Instance, err := manager.StartMiner(context.Background(), "xmrig", config1)
if err != nil {
t.Fatalf("Failed to start first miner: %v", err)
}
t.Logf("Started miner 1: %s", miner1Instance.GetName())
// Start second miner at 10% CPU with different algo
config2 := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:5555",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 10,
Algo: "throttle-dual-2",
}
miner2Instance, err := manager.StartMiner(context.Background(), "xmrig", config2)
if err != nil {
t.Fatalf("Failed to start second miner: %v", err)
}
t.Logf("Started miner 2: %s", miner2Instance.GetName())
// Let miners warm up
time.Sleep(20 * time.Second)
// Verify both miners are running
miners := manager.ListMiners()
if len(miners) != 2 {
t.Fatalf("Expected 2 miners running, got %d", len(miners))
}
// Measure combined CPU usage
avgCPU := measureCPUUsage(t, 15*time.Second)
t.Logf("Configured: 2x10%% CPU, Measured: %.1f%% CPU", avgCPU)
// Combined should be ~20% with tolerance
if avgCPU > 40 {
t.Errorf("Combined CPU usage %.1f%% exceeds expected ~20%% (with tolerance)", avgCPU)
}
// Clean up
manager.StopMiner(context.Background(), miner1Instance.GetName())
manager.StopMiner(context.Background(), miner2Instance.GetName())
}
// TestCPUThrottleThreadCount tests thread-based CPU limiting
func TestCPUThrottleThreadCount(t *testing.T) {
if testing.Short() {
t.Skip("Skipping CPU throttle test in short mode")
}
miner := NewXMRigMiner()
details, err := miner.CheckInstallation()
if err != nil || !details.IsInstalled {
t.Skip("XMRig not installed, skipping throttle test")
}
// Use simulation manager to avoid autostart conflicts
manager := NewManagerForSimulation()
defer manager.Stop()
numCPU := runtime.NumCPU()
targetThreads := 1 // Use only 1 thread
expectedMaxCPU := float64(100) / float64(numCPU) * float64(targetThreads) * 1.5 // 50% tolerance
config := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
Threads: targetThreads,
Algo: "throttle-thread",
}
minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Failed to start miner: %v", err)
}
t.Logf("Started miner: %s", minerInstance.GetName())
defer manager.StopMiner(context.Background(), minerInstance.GetName())
// Let miner warm up
time.Sleep(15 * time.Second)
avgCPU := measureCPUUsage(t, 10*time.Second)
t.Logf("CPUs: %d, Threads: %d, Expected max: %.1f%%, Measured: %.1f%%",
numCPU, targetThreads, expectedMaxCPU, avgCPU)
if avgCPU > expectedMaxCPU {
t.Errorf("CPU usage %.1f%% exceeds expected max %.1f%% for %d thread(s)",
avgCPU, expectedMaxCPU, targetThreads)
}
}
// TestMinerResourceIsolation tests that miners don't interfere with each other
func TestMinerResourceIsolation(t *testing.T) {
if testing.Short() {
t.Skip("Skipping resource isolation test in short mode")
}
miner := NewXMRigMiner()
details, err := miner.CheckInstallation()
if err != nil || !details.IsInstalled {
t.Skip("XMRig not installed, skipping test")
}
// Use simulation manager to avoid autostart conflicts
manager := NewManagerForSimulation()
defer manager.Stop()
// Start first miner
config1 := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 25,
Algo: "isolation-1",
}
miner1, err := manager.StartMiner(context.Background(), "xmrig", config1)
if err != nil {
t.Fatalf("Failed to start miner 1: %v", err)
}
time.Sleep(10 * time.Second)
// Get baseline hashrate for miner 1 alone
stats1Alone, err := miner1.GetStats(context.Background())
if err != nil {
t.Logf("Warning: couldn't get stats for miner 1: %v", err)
}
baselineHashrate := 0
if stats1Alone != nil {
baselineHashrate = stats1Alone.Hashrate
}
// Start second miner
config2 := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:5555",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 25,
Algo: "isolation-2",
}
miner2, err := manager.StartMiner(context.Background(), "xmrig", config2)
if err != nil {
t.Fatalf("Failed to start miner 2: %v", err)
}
time.Sleep(15 * time.Second)
// Check both miners are running and producing hashrate
stats1, err := miner1.GetStats(context.Background())
if err != nil {
t.Logf("Warning: couldn't get stats for miner 1: %v", err)
}
stats2, err := miner2.GetStats(context.Background())
if err != nil {
t.Logf("Warning: couldn't get stats for miner 2: %v", err)
}
t.Logf("Miner 1 baseline: %d H/s, with miner 2: %d H/s", baselineHashrate, getHashrate(stats1))
t.Logf("Miner 2 hashrate: %d H/s", getHashrate(stats2))
// Both miners should be producing some hashrate
if stats1 != nil && stats1.Hashrate == 0 {
t.Error("Miner 1 has zero hashrate")
}
if stats2 != nil && stats2.Hashrate == 0 {
t.Error("Miner 2 has zero hashrate")
}
// Clean up
manager.StopMiner(context.Background(), miner1.GetName())
manager.StopMiner(context.Background(), miner2.GetName())
}
// measureCPUUsage measures average CPU usage over a duration
func measureCPUUsage(t *testing.T, duration time.Duration) float64 {
t.Helper()
samples := int(duration.Seconds())
if samples < 1 {
samples = 1
}
var totalCPU float64
for i := 0; i < samples; i++ {
percentages, err := cpu.Percent(time.Second, false)
if err != nil {
t.Logf("Warning: failed to get CPU percentage: %v", err)
continue
}
if len(percentages) > 0 {
totalCPU += percentages[0]
}
}
return totalCPU / float64(samples)
}
// measureProcessCPU measures CPU usage of a specific process
func measureProcessCPU(t *testing.T, pid int32, duration time.Duration) float64 {
t.Helper()
proc, err := process.NewProcess(pid)
if err != nil {
t.Logf("Warning: failed to get process: %v", err)
return 0
}
samples := int(duration.Seconds())
if samples < 1 {
samples = 1
}
var totalCPU float64
for i := 0; i < samples; i++ {
pct, err := proc.CPUPercent()
if err != nil {
continue
}
totalCPU += pct
time.Sleep(time.Second)
}
return totalCPU / float64(samples)
}
func getHashrate(stats *PerformanceMetrics) int {
if stats == nil {
return 0
}
return stats.Hashrate
}

187
mining/ttminer.go Normal file
View file

@ -0,0 +1,187 @@
package mining
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
)
// TTMiner represents a TT-Miner (GPU miner), embedding the BaseMiner for common functionality.
type TTMiner struct {
BaseMiner
FullStats *TTMinerSummary `json:"-"` // Excluded from JSON to prevent race during marshaling
}
// TTMinerSummary represents the stats response from TT-Miner API
type TTMinerSummary struct {
Name string `json:"name"`
Version string `json:"version"`
Uptime int `json:"uptime"`
Algo string `json:"algo"`
GPUs []struct {
Name string `json:"name"`
ID int `json:"id"`
Hashrate float64 `json:"hashrate"`
Temp int `json:"temp"`
Fan int `json:"fan"`
Power int `json:"power"`
Accepted int `json:"accepted"`
Rejected int `json:"rejected"`
Intensity float64 `json:"intensity"`
} `json:"gpus"`
Results struct {
SharesGood int `json:"shares_good"`
SharesTotal int `json:"shares_total"`
AvgTime int `json:"avg_time"`
} `json:"results"`
Connection struct {
Pool string `json:"pool"`
Ping int `json:"ping"`
Diff int `json:"diff"`
} `json:"connection"`
Hashrate struct {
Total []float64 `json:"total"`
Highest float64 `json:"highest"`
} `json:"hashrate"`
}
// MinerTypeTTMiner is the type identifier for TT-Miner miners.
const MinerTypeTTMiner = "tt-miner"
// NewTTMiner creates a new TT-Miner instance with default settings.
func NewTTMiner() *TTMiner {
return &TTMiner{
BaseMiner: BaseMiner{
Name: "tt-miner",
MinerType: MinerTypeTTMiner,
ExecutableName: "TT-Miner",
Version: "latest",
URL: "https://github.com/TrailingStop/TT-Miner-release",
API: &API{
Enabled: true,
ListenHost: "127.0.0.1",
ListenPort: 4068, // TT-Miner default port
},
HashrateHistory: make([]HashratePoint, 0),
LowResHashrateHistory: make([]HashratePoint, 0),
LastLowResAggregation: time.Now(),
LogBuffer: NewLogBuffer(500), // Keep last 500 lines
},
}
}
// getTTMinerConfigPath returns the platform-specific path for the tt-miner config file.
func getTTMinerConfigPath() (string, error) {
homeDir, err := os.UserHomeDir()
if err != nil {
return "", err
}
return filepath.Join(homeDir, ".config", "lethean-desktop", "tt-miner.json"), nil
}
// GetLatestVersion fetches the latest version of TT-Miner from the GitHub API.
func (m *TTMiner) GetLatestVersion() (string, error) {
return FetchLatestGitHubVersion("TrailingStop", "TT-Miner-release")
}
// Install determines the correct download URL for the latest version of TT-Miner
// and then calls the generic InstallFromURL method on the BaseMiner.
func (m *TTMiner) Install() error {
version, err := m.GetLatestVersion()
if err != nil {
return err
}
m.Version = version
var url string
switch runtime.GOOS {
case "windows":
// Windows version - uses .zip
url = fmt.Sprintf("https://github.com/TrailingStop/TT-Miner-release/releases/download/%s/TT-Miner-%s.zip", version, version)
case "linux":
// Linux version - uses .tar.gz
url = fmt.Sprintf("https://github.com/TrailingStop/TT-Miner-release/releases/download/%s/TT-Miner-%s.tar.gz", version, version)
default:
return errors.New("TT-Miner is only available for Windows and Linux (requires CUDA)")
}
if err := m.InstallFromURL(url); err != nil {
return err
}
// After installation, verify it.
_, err = m.CheckInstallation()
if err != nil {
return fmt.Errorf("failed to verify installation after extraction: %w", err)
}
return nil
}
// Uninstall removes all files related to the TT-Miner, including its specific config file.
func (m *TTMiner) Uninstall() error {
// Remove the specific tt-miner config file
configPath, err := getTTMinerConfigPath()
if err == nil {
os.Remove(configPath) // Ignore error if it doesn't exist
}
// Call the base uninstall method to remove the installation directory
return m.BaseMiner.Uninstall()
}
// CheckInstallation verifies if the TT-Miner is installed correctly.
// Thread-safe: properly locks before modifying shared fields.
func (m *TTMiner) CheckInstallation() (*InstallationDetails, error) {
binaryPath, err := m.findMinerBinary()
if err != nil {
return &InstallationDetails{IsInstalled: false}, err
}
// Run version command before acquiring lock (I/O operation)
cmd := exec.Command(binaryPath, "--version")
var out bytes.Buffer
cmd.Stdout = &out
var version string
if err := cmd.Run(); err != nil {
version = "Unknown (could not run executable)"
} else {
// Parse version from output
output := strings.TrimSpace(out.String())
fields := strings.Fields(output)
if len(fields) >= 2 {
version = fields[1]
} else if len(fields) >= 1 {
version = fields[0]
} else {
version = "Unknown (could not parse version)"
}
}
// Get the config path using the helper
configPath, err := getTTMinerConfigPath()
if err != nil {
configPath = "Error: Could not determine config path"
}
// Update shared fields under lock
m.mu.Lock()
m.MinerBinary = binaryPath
m.Path = filepath.Dir(binaryPath)
m.Version = version
m.mu.Unlock()
return &InstallationDetails{
IsInstalled: true,
MinerBinary: binaryPath,
Path: filepath.Dir(binaryPath),
Version: version,
ConfigPath: configPath,
}, nil
}

235
mining/ttminer_start.go Normal file
View file

@ -0,0 +1,235 @@
package mining
import (
"errors"
"fmt"
"io"
"os"
"os/exec"
"strings"
"time"
"forge.lthn.ai/core/mining/logging"
)
// Start launches the TT-Miner with the given configuration.
func (m *TTMiner) Start(config *Config) error {
// Check installation BEFORE acquiring lock (CheckInstallation takes its own locks)
m.mu.RLock()
needsInstallCheck := m.MinerBinary == ""
m.mu.RUnlock()
if needsInstallCheck {
if _, err := m.CheckInstallation(); err != nil {
return err // Propagate the detailed error from CheckInstallation
}
}
m.mu.Lock()
defer m.mu.Unlock()
if m.Running {
return errors.New("miner is already running")
}
if m.API != nil && config.HTTPPort != 0 {
m.API.ListenPort = config.HTTPPort
} else if m.API != nil && m.API.ListenPort == 0 {
return errors.New("miner API port not assigned")
}
// Build command line arguments for TT-Miner
args := m.buildArgs(config)
logging.Info("executing TT-Miner command", logging.Fields{"binary": m.MinerBinary, "args": strings.Join(args, " ")})
m.cmd = exec.Command(m.MinerBinary, args...)
// Create stdin pipe for console commands
stdinPipe, err := m.cmd.StdinPipe()
if err != nil {
return fmt.Errorf("failed to create stdin pipe: %w", err)
}
m.stdinPipe = stdinPipe
// Always capture output to LogBuffer
if m.LogBuffer != nil {
m.cmd.Stdout = m.LogBuffer
m.cmd.Stderr = m.LogBuffer
}
// Also output to console if requested
if config.LogOutput {
m.cmd.Stdout = io.MultiWriter(m.LogBuffer, os.Stdout)
m.cmd.Stderr = io.MultiWriter(m.LogBuffer, os.Stderr)
}
if err := m.cmd.Start(); err != nil {
stdinPipe.Close()
return fmt.Errorf("failed to start TT-Miner: %w", err)
}
m.Running = true
// Capture cmd locally to avoid race with Stop()
cmd := m.cmd
go func() {
// Use a channel to detect if Wait() completes
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
// Wait with timeout to prevent goroutine leak on zombie processes
var err error
select {
case err = <-done:
// Normal exit
case <-time.After(5 * time.Minute):
// Process didn't exit after 5 minutes - force cleanup
logging.Warn("TT-Miner process wait timeout, forcing cleanup")
if cmd.Process != nil {
cmd.Process.Kill()
}
// Wait for inner goroutine with secondary timeout to prevent leak
select {
case err = <-done:
// Inner goroutine completed
case <-time.After(10 * time.Second):
logging.Error("TT-Miner process cleanup timed out after kill", logging.Fields{"miner": m.Name})
err = nil
}
}
m.mu.Lock()
// Only clear if this is still the same command (not restarted)
if m.cmd == cmd {
m.Running = false
m.cmd = nil
}
m.mu.Unlock()
if err != nil {
logging.Debug("TT-Miner exited with error", logging.Fields{"error": err})
} else {
logging.Debug("TT-Miner exited normally")
}
}()
return nil
}
// buildArgs constructs the command line arguments for TT-Miner
func (m *TTMiner) buildArgs(config *Config) []string {
var args []string
// Pool configuration
if config.Pool != "" {
args = append(args, "-P", config.Pool)
}
// Wallet/user configuration
if config.Wallet != "" {
args = append(args, "-u", config.Wallet)
}
// Password
if config.Password != "" {
args = append(args, "-p", config.Password)
} else {
args = append(args, "-p", "x")
}
// Algorithm selection
if config.Algo != "" {
args = append(args, "-a", config.Algo)
}
// API binding for stats collection
if m.API != nil && m.API.Enabled {
args = append(args, "-b", fmt.Sprintf("%s:%d", m.API.ListenHost, m.API.ListenPort))
}
// GPU device selection (if specified)
if config.Devices != "" {
args = append(args, "-d", config.Devices)
}
// Intensity (if specified)
if config.Intensity > 0 {
args = append(args, "-i", fmt.Sprintf("%d", config.Intensity))
}
// Additional CLI arguments
addTTMinerCliArgs(config, &args)
return args
}
// addTTMinerCliArgs adds any additional CLI arguments from config
func addTTMinerCliArgs(config *Config, args *[]string) {
// Add any extra arguments passed via CLIArgs
if config.CLIArgs != "" {
extraArgs := strings.Fields(config.CLIArgs)
for _, arg := range extraArgs {
// Skip potentially dangerous arguments
if isValidCLIArg(arg) {
*args = append(*args, arg)
} else {
logging.Warn("skipping invalid CLI argument", logging.Fields{"arg": arg})
}
}
}
}
// isValidCLIArg validates CLI arguments to prevent injection or dangerous patterns.
// Uses a combination of allowlist patterns and blocklist for security.
func isValidCLIArg(arg string) bool {
// Empty or whitespace-only args are invalid
if strings.TrimSpace(arg) == "" {
return false
}
// Must start with dash (standard CLI argument format)
// This is an allowlist approach - only accept valid argument patterns
if !strings.HasPrefix(arg, "-") {
// Allow values for flags (e.g., the "3" in "-i 3")
// Values must not contain shell metacharacters
return isValidArgValue(arg)
}
// Block shell metacharacters and dangerous patterns
if !isValidArgValue(arg) {
return false
}
// Block arguments that could override security-related settings
blockedPrefixes := []string{
"--api-access-token", "--api-worker-id", // TT-Miner API settings
"--config", // Could load arbitrary config
"--log-file", // Could write to arbitrary locations
"--coin-file", // Could load arbitrary coin configs
"-o", "--out", // Output redirection
}
lowerArg := strings.ToLower(arg)
for _, blocked := range blockedPrefixes {
if lowerArg == blocked || strings.HasPrefix(lowerArg, blocked+"=") {
return false
}
}
return true
}
// isValidArgValue checks if a value contains dangerous patterns
func isValidArgValue(arg string) bool {
// Block shell metacharacters and command injection patterns
dangerousPatterns := []string{
";", "|", "&", "`", "$", "(", ")", "{", "}",
"<", ">", "\n", "\r", "\\", "'", "\"", "!",
}
for _, p := range dangerousPatterns {
if strings.Contains(arg, p) {
return false
}
}
return true
}

66
mining/ttminer_stats.go Normal file
View file

@ -0,0 +1,66 @@
package mining
import (
"context"
"errors"
)
// GetStats retrieves performance metrics from the TT-Miner API.
func (m *TTMiner) GetStats(ctx context.Context) (*PerformanceMetrics, error) {
// Read state under RLock, then release before HTTP call
m.mu.RLock()
if !m.Running {
m.mu.RUnlock()
return nil, errors.New("miner is not running")
}
if m.API == nil || m.API.ListenPort == 0 {
m.mu.RUnlock()
return nil, errors.New("miner API not configured or port is zero")
}
config := HTTPStatsConfig{
Host: m.API.ListenHost,
Port: m.API.ListenPort,
Endpoint: "/summary",
}
m.mu.RUnlock()
// Create request with context and timeout
reqCtx, cancel := context.WithTimeout(ctx, statsTimeout)
defer cancel()
// Use the common HTTP stats fetcher
var summary TTMinerSummary
if err := FetchJSONStats(reqCtx, config, &summary); err != nil {
return nil, err
}
// Store the full summary in the miner struct (requires lock)
m.mu.Lock()
m.FullStats = &summary
m.mu.Unlock()
// Calculate total hashrate from all GPUs
var totalHashrate float64
if len(summary.Hashrate.Total) > 0 {
totalHashrate = summary.Hashrate.Total[0]
} else {
// Sum individual GPU hashrates
for _, gpu := range summary.GPUs {
totalHashrate += gpu.Hashrate
}
}
// For TT-Miner, we use the connection difficulty as both current and avg
// since TT-Miner doesn't expose per-share difficulty data
diffCurrent := summary.Connection.Diff
return &PerformanceMetrics{
Hashrate: int(totalHashrate),
Shares: summary.Results.SharesGood,
Rejected: summary.Results.SharesTotal - summary.Results.SharesGood,
Uptime: summary.Uptime,
Algorithm: summary.Algo,
AvgDifficulty: diffCurrent, // Use pool diff as approximation
DiffCurrent: diffCurrent,
}, nil
}

89
mining/version.go Normal file
View file

@ -0,0 +1,89 @@
package mining
import (
"encoding/json"
"fmt"
"io"
"net/http"
)
var (
version = "dev"
commit = "none"
date = "unknown"
)
// GetVersion returns the version of the application
func GetVersion() string {
return version
}
// GetCommit returns the git commit hash
func GetCommit() string {
return commit
}
// GetBuildDate returns the build date
func GetBuildDate() string {
return date
}
// GitHubRelease represents the structure of a GitHub release response.
type GitHubRelease struct {
TagName string `json:"tag_name"`
Name string `json:"name"`
}
// FetchLatestGitHubVersion fetches the latest release version from a GitHub repository.
// It takes the repository owner and name (e.g., "xmrig", "xmrig") and returns the tag name.
// Uses a circuit breaker to prevent cascading failures when GitHub API is unavailable.
func FetchLatestGitHubVersion(owner, repo string) (string, error) {
cb := getGitHubCircuitBreaker()
result, err := cb.Execute(func() (interface{}, error) {
return fetchGitHubVersionDirect(owner, repo)
})
if err != nil {
// If circuit is open, try to return cached value with warning
if err == ErrCircuitOpen {
if cached, ok := cb.GetCached(); ok {
if tagName, ok := cached.(string); ok {
return tagName, nil
}
}
return "", fmt.Errorf("github API unavailable (circuit breaker open): %w", err)
}
return "", err
}
tagName, ok := result.(string)
if !ok {
return "", fmt.Errorf("unexpected result type from circuit breaker")
}
return tagName, nil
}
// fetchGitHubVersionDirect is the actual GitHub API call, wrapped by circuit breaker
func fetchGitHubVersionDirect(owner, repo string) (string, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo)
resp, err := getHTTPClient().Get(url)
if err != nil {
return "", fmt.Errorf("failed to fetch version: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
io.Copy(io.Discard, resp.Body) // Drain body to allow connection reuse
return "", fmt.Errorf("failed to get latest release: unexpected status code %d", resp.StatusCode)
}
var release GitHubRelease
if err := json.NewDecoder(resp.Body).Decode(&release); err != nil {
return "", fmt.Errorf("failed to decode release: %w", err)
}
return release.TagName, nil
}

199
mining/xmrig.go Normal file
View file

@ -0,0 +1,199 @@
package mining
import (
"bytes"
"errors"
"fmt"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/adrg/xdg"
)
// XMRigMiner represents an XMRig miner, embedding the BaseMiner for common functionality.
type XMRigMiner struct {
BaseMiner
FullStats *XMRigSummary `json:"-"` // Excluded from JSON to prevent race during marshaling
}
var (
httpClient = &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
},
}
httpClientMu sync.RWMutex
)
// getHTTPClient returns the HTTP client with proper synchronization
func getHTTPClient() *http.Client {
httpClientMu.RLock()
defer httpClientMu.RUnlock()
return httpClient
}
// setHTTPClient sets the HTTP client (for testing)
func setHTTPClient(client *http.Client) {
httpClientMu.Lock()
defer httpClientMu.Unlock()
httpClient = client
}
// MinerTypeXMRig is the type identifier for XMRig miners.
// Note: This type now supports the Miner Platform binary ("miner") as the default.
const MinerTypeXMRig = "xmrig"
// NewXMRigMiner creates a new XMRig miner instance with default settings.
// The executable name defaults to "miner" (Miner Platform) but can also find "xmrig".
func NewXMRigMiner() *XMRigMiner {
return &XMRigMiner{
BaseMiner: BaseMiner{
Name: "miner",
MinerType: MinerTypeXMRig,
ExecutableName: "miner",
Version: "latest",
URL: "", // Local build only - no remote download
API: &API{
Enabled: true,
ListenHost: "127.0.0.1",
},
HashrateHistory: make([]HashratePoint, 0),
LowResHashrateHistory: make([]HashratePoint, 0),
LastLowResAggregation: time.Now(),
LogBuffer: NewLogBuffer(500), // Keep last 500 lines
},
}
}
// getXMRigConfigPath returns the platform-specific path for the xmrig.json file.
// If instanceName is provided, it creates an instance-specific config file.
// This is a variable so it can be overridden in tests.
var getXMRigConfigPath = func(instanceName string) (string, error) {
configFileName := "xmrig.json"
if instanceName != "" && instanceName != "xmrig" {
// Use instance-specific config file (e.g., xmrig-78.json)
configFileName = instanceName + ".json"
}
path, err := xdg.ConfigFile("lethean-desktop/" + configFileName)
if err != nil {
// Fallback for non-XDG environments or when XDG variables are not set
homeDir, homeErr := os.UserHomeDir()
if homeErr != nil {
return "", homeErr
}
return filepath.Join(homeDir, ".config", "lethean-desktop", configFileName), nil
}
return path, nil
}
// GetLatestVersion fetches the latest version of XMRig from the GitHub API.
func (m *XMRigMiner) GetLatestVersion() (string, error) {
return FetchLatestGitHubVersion("xmrig", "xmrig")
}
// Install determines the correct download URL for the latest version of XMRig
// and then calls the generic InstallFromURL method on the BaseMiner.
func (m *XMRigMiner) Install() error {
version, err := m.GetLatestVersion()
if err != nil {
return err
}
m.Version = version
var url string
switch runtime.GOOS {
case "windows":
url = fmt.Sprintf("https://github.com/xmrig/xmrig/releases/download/%s/xmrig-%s-windows-x64.zip", version, strings.TrimPrefix(version, "v"))
case "linux":
url = fmt.Sprintf("https://github.com/xmrig/xmrig/releases/download/%s/xmrig-%s-linux-static-x64.tar.gz", version, strings.TrimPrefix(version, "v"))
case "darwin":
url = fmt.Sprintf("https://github.com/xmrig/xmrig/releases/download/%s/xmrig-%s-macos-x64.tar.gz", version, strings.TrimPrefix(version, "v"))
default:
return errors.New("unsupported operating system")
}
if err := m.InstallFromURL(url); err != nil {
return err
}
// After installation, verify it.
_, err = m.CheckInstallation()
if err != nil {
return fmt.Errorf("failed to verify installation after extraction: %w", err)
}
return nil
}
// Uninstall removes all files related to the XMRig miner, including its specific config file.
func (m *XMRigMiner) Uninstall() error {
// Remove the instance-specific config file
configPath, err := getXMRigConfigPath(m.Name)
if err == nil {
os.Remove(configPath) // Ignore error if it doesn't exist
}
// Call the base uninstall method to remove the installation directory
return m.BaseMiner.Uninstall()
}
// CheckInstallation verifies if the XMRig miner is installed correctly.
// Thread-safe: properly locks before modifying shared fields.
func (m *XMRigMiner) CheckInstallation() (*InstallationDetails, error) {
binaryPath, err := m.findMinerBinary()
if err != nil {
return &InstallationDetails{IsInstalled: false}, err
}
// Run version command before acquiring lock (I/O operation)
cmd := exec.Command(binaryPath, "--version")
var out bytes.Buffer
cmd.Stdout = &out
var version string
if err := cmd.Run(); err != nil {
version = "Unknown (could not run executable)"
} else {
fields := strings.Fields(out.String())
if len(fields) >= 2 {
version = fields[1]
} else {
version = "Unknown (could not parse version)"
}
}
// Get the config path using the helper (use instance name if set)
m.mu.RLock()
instanceName := m.Name
m.mu.RUnlock()
configPath, err := getXMRigConfigPath(instanceName)
if err != nil {
// Log the error but don't fail CheckInstallation if config path can't be determined
configPath = "Error: Could not determine config path"
}
// Update shared fields under lock
m.mu.Lock()
m.MinerBinary = binaryPath
m.Path = filepath.Dir(binaryPath)
m.Version = version
m.mu.Unlock()
return &InstallationDetails{
IsInstalled: true,
MinerBinary: binaryPath,
Path: filepath.Dir(binaryPath),
Version: version,
ConfigPath: configPath,
}, nil
}

241
mining/xmrig_gpu_test.go Normal file
View file

@ -0,0 +1,241 @@
package mining
import (
"encoding/json"
"os"
"path/filepath"
"testing"
)
func TestXMRigDualMiningConfig(t *testing.T) {
// Create a temp directory for the config
tmpDir := t.TempDir()
miner := &XMRigMiner{
BaseMiner: BaseMiner{
Name: "xmrig-dual-test",
API: &API{
Enabled: true,
ListenHost: "127.0.0.1",
ListenPort: 12345,
},
},
}
// Temporarily override config path
origGetPath := getXMRigConfigPath
getXMRigConfigPath = func(name string) (string, error) {
return filepath.Join(tmpDir, name+".json"), nil
}
defer func() { getXMRigConfigPath = origGetPath }()
// Config with CPU mining rx/0 and GPU mining kawpow on different pools
config := &Config{
// CPU config
Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "cpu_wallet_address",
Algo: "rx/0",
CPUMaxThreadsHint: 50,
// GPU config - separate pool and algo
// MUST specify Devices explicitly - no auto-picking!
GPUEnabled: true,
GPUPool: "stratum+tcp://ravencoin.pool.com:3333",
GPUWallet: "gpu_wallet_address",
GPUAlgo: "kawpow",
CUDA: true, // NVIDIA
OpenCL: false,
Devices: "0", // Explicit device selection required
}
err := miner.createConfig(config)
if err != nil {
t.Fatalf("Failed to create config: %v", err)
}
// Read and parse the generated config
data, err := os.ReadFile(miner.ConfigPath)
if err != nil {
t.Fatalf("Failed to read config: %v", err)
}
var generatedConfig map[string]interface{}
if err := json.Unmarshal(data, &generatedConfig); err != nil {
t.Fatalf("Failed to parse config: %v", err)
}
// Verify pools
pools, ok := generatedConfig["pools"].([]interface{})
if !ok {
t.Fatal("pools not found in config")
}
if len(pools) != 2 {
t.Errorf("Expected 2 pools (CPU + GPU), got %d", len(pools))
}
// Verify CPU pool
cpuPool := pools[0].(map[string]interface{})
if cpuPool["url"] != "stratum+tcp://pool.supportxmr.com:3333" {
t.Errorf("CPU pool URL mismatch: %v", cpuPool["url"])
}
if cpuPool["user"] != "cpu_wallet_address" {
t.Errorf("CPU wallet mismatch: %v", cpuPool["user"])
}
if cpuPool["algo"] != "rx/0" {
t.Errorf("CPU algo mismatch: %v", cpuPool["algo"])
}
// Verify GPU pool
gpuPool := pools[1].(map[string]interface{})
if gpuPool["url"] != "stratum+tcp://ravencoin.pool.com:3333" {
t.Errorf("GPU pool URL mismatch: %v", gpuPool["url"])
}
if gpuPool["user"] != "gpu_wallet_address" {
t.Errorf("GPU wallet mismatch: %v", gpuPool["user"])
}
if gpuPool["algo"] != "kawpow" {
t.Errorf("GPU algo mismatch: %v", gpuPool["algo"])
}
// Verify CUDA enabled, OpenCL disabled
cuda := generatedConfig["cuda"].(map[string]interface{})
if cuda["enabled"] != true {
t.Error("CUDA should be enabled")
}
opencl := generatedConfig["opencl"].(map[string]interface{})
if opencl["enabled"] != false {
t.Error("OpenCL should be disabled")
}
// Verify CPU config
cpu := generatedConfig["cpu"].(map[string]interface{})
if cpu["enabled"] != true {
t.Error("CPU should be enabled")
}
if cpu["max-threads-hint"] != float64(50) {
t.Errorf("CPU max-threads-hint mismatch: %v", cpu["max-threads-hint"])
}
t.Logf("Generated dual-mining config:\n%s", string(data))
}
func TestXMRigGPUOnlyConfig(t *testing.T) {
tmpDir := t.TempDir()
miner := &XMRigMiner{
BaseMiner: BaseMiner{
Name: "xmrig-gpu-only",
API: &API{
Enabled: true,
ListenHost: "127.0.0.1",
ListenPort: 12346,
},
},
}
origGetPath := getXMRigConfigPath
getXMRigConfigPath = func(name string) (string, error) {
return filepath.Join(tmpDir, name+".json"), nil
}
defer func() { getXMRigConfigPath = origGetPath }()
// GPU-only config using same pool for simplicity
// MUST specify Devices explicitly - no auto-picking!
config := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "test_wallet",
Algo: "rx/0",
NoCPU: true, // Disable CPU
GPUEnabled: true,
OpenCL: true, // AMD GPU
CUDA: true, // Also NVIDIA
Devices: "0,1", // Explicit device selection required
}
err := miner.createConfig(config)
if err != nil {
t.Fatalf("Failed to create config: %v", err)
}
data, err := os.ReadFile(miner.ConfigPath)
if err != nil {
t.Fatalf("Failed to read config: %v", err)
}
var generatedConfig map[string]interface{}
json.Unmarshal(data, &generatedConfig)
// Both GPU backends should be enabled
cuda := generatedConfig["cuda"].(map[string]interface{})
opencl := generatedConfig["opencl"].(map[string]interface{})
if cuda["enabled"] != true {
t.Error("CUDA should be enabled")
}
if opencl["enabled"] != true {
t.Error("OpenCL should be enabled")
}
t.Logf("Generated GPU config:\n%s", string(data))
}
func TestXMRigCPUOnlyConfig(t *testing.T) {
tmpDir := t.TempDir()
miner := &XMRigMiner{
BaseMiner: BaseMiner{
Name: "xmrig-cpu-only",
API: &API{
Enabled: true,
ListenHost: "127.0.0.1",
ListenPort: 12347,
},
},
}
origGetPath := getXMRigConfigPath
getXMRigConfigPath = func(name string) (string, error) {
return filepath.Join(tmpDir, name+".json"), nil
}
defer func() { getXMRigConfigPath = origGetPath }()
// CPU-only config (GPUEnabled defaults to false)
config := &Config{
Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "test_wallet",
Algo: "rx/0",
}
err := miner.createConfig(config)
if err != nil {
t.Fatalf("Failed to create config: %v", err)
}
data, err := os.ReadFile(miner.ConfigPath)
if err != nil {
t.Fatalf("Failed to read config: %v", err)
}
var generatedConfig map[string]interface{}
json.Unmarshal(data, &generatedConfig)
// GPU backends should be disabled
cuda := generatedConfig["cuda"].(map[string]interface{})
opencl := generatedConfig["opencl"].(map[string]interface{})
if cuda["enabled"] != false {
t.Error("CUDA should be disabled for CPU-only config")
}
if opencl["enabled"] != false {
t.Error("OpenCL should be disabled for CPU-only config")
}
// Should only have 1 pool
pools := generatedConfig["pools"].([]interface{})
if len(pools) != 1 {
t.Errorf("Expected 1 pool for CPU-only, got %d", len(pools))
}
t.Logf("Generated CPU-only config:\n%s", string(data))
}

312
mining/xmrig_start.go Normal file
View file

@ -0,0 +1,312 @@
package mining
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"forge.lthn.ai/core/mining/logging"
)
// Start launches the XMRig miner with the specified configuration.
func (m *XMRigMiner) Start(config *Config) error {
// Check installation BEFORE acquiring lock (CheckInstallation takes its own locks)
m.mu.RLock()
needsInstallCheck := m.MinerBinary == ""
m.mu.RUnlock()
if needsInstallCheck {
if _, err := m.CheckInstallation(); err != nil {
return err // Propagate the detailed error from CheckInstallation
}
}
m.mu.Lock()
defer m.mu.Unlock()
if m.Running {
return errors.New("miner is already running")
}
if m.API != nil && config.HTTPPort != 0 {
m.API.ListenPort = config.HTTPPort
} else if m.API != nil && m.API.ListenPort == 0 {
return errors.New("miner API port not assigned")
}
if config.Pool != "" && config.Wallet != "" {
if err := m.createConfig(config); err != nil {
return err
}
} else {
// Use the centralized helper to get the instance-specific config path
configPath, err := getXMRigConfigPath(m.Name)
if err != nil {
return fmt.Errorf("could not determine config file path: %w", err)
}
m.ConfigPath = configPath
if _, err := os.Stat(m.ConfigPath); os.IsNotExist(err) {
return errors.New("config file does not exist and no pool/wallet provided to create one")
}
}
args := []string{"-c", m.ConfigPath}
if m.API != nil && m.API.Enabled {
args = append(args, "--http-host", m.API.ListenHost, "--http-port", fmt.Sprintf("%d", m.API.ListenPort))
}
addCliArgs(config, &args)
logging.Info("executing miner command", logging.Fields{"binary": m.MinerBinary, "args": strings.Join(args, " ")})
m.cmd = exec.Command(m.MinerBinary, args...)
// Create stdin pipe for console commands
stdinPipe, err := m.cmd.StdinPipe()
if err != nil {
return fmt.Errorf("failed to create stdin pipe: %w", err)
}
m.stdinPipe = stdinPipe
// Always capture output to LogBuffer
if m.LogBuffer != nil {
m.cmd.Stdout = m.LogBuffer
m.cmd.Stderr = m.LogBuffer
}
// Also output to console if requested
if config.LogOutput {
m.cmd.Stdout = io.MultiWriter(m.LogBuffer, os.Stdout)
m.cmd.Stderr = io.MultiWriter(m.LogBuffer, os.Stderr)
}
if err := m.cmd.Start(); err != nil {
stdinPipe.Close()
// Clean up config file on failed start
if m.ConfigPath != "" {
os.Remove(m.ConfigPath)
}
return fmt.Errorf("failed to start miner: %w", err)
}
m.Running = true
// Capture cmd locally to avoid race with Stop()
cmd := m.cmd
minerName := m.Name // Capture name for logging
go func() {
// Use a channel to detect if Wait() completes
done := make(chan struct{})
var waitErr error
go func() {
waitErr = cmd.Wait()
close(done)
}()
// Wait with timeout to prevent goroutine leak on zombie processes
select {
case <-done:
// Normal exit - log the exit status
if waitErr != nil {
logging.Info("miner process exited", logging.Fields{
"miner": minerName,
"error": waitErr.Error(),
})
} else {
logging.Info("miner process exited normally", logging.Fields{
"miner": minerName,
})
}
case <-time.After(5 * time.Minute):
// Process didn't exit after 5 minutes - force cleanup
logging.Warn("miner process wait timeout, forcing cleanup", logging.Fields{"miner": minerName})
if cmd.Process != nil {
cmd.Process.Kill()
}
// Wait with timeout to prevent goroutine leak if Wait() never returns
select {
case <-done:
// Inner goroutine completed
case <-time.After(10 * time.Second):
logging.Error("process cleanup timed out after kill", logging.Fields{"miner": minerName})
}
}
m.mu.Lock()
// Only clear if this is still the same command (not restarted)
if m.cmd == cmd {
m.Running = false
m.cmd = nil
}
m.mu.Unlock()
}()
return nil
}
// Stop terminates the miner process and cleans up the instance-specific config file.
func (m *XMRigMiner) Stop() error {
// Call the base Stop to kill the process
if err := m.BaseMiner.Stop(); err != nil {
return err
}
// Clean up the instance-specific config file
if m.ConfigPath != "" {
os.Remove(m.ConfigPath) // Ignore error if it doesn't exist
}
return nil
}
// addCliArgs is a helper to append command line arguments based on the config.
func addCliArgs(config *Config, args *[]string) {
if config.Pool != "" {
*args = append(*args, "-o", config.Pool)
}
if config.Wallet != "" {
*args = append(*args, "-u", config.Wallet)
}
if config.Threads != 0 {
*args = append(*args, "-t", fmt.Sprintf("%d", config.Threads))
}
if !config.HugePages {
*args = append(*args, "--no-huge-pages")
}
if config.TLS {
*args = append(*args, "--tls")
}
*args = append(*args, "--donate-level", "1")
}
// createConfig creates a JSON configuration file for the XMRig miner.
func (m *XMRigMiner) createConfig(config *Config) error {
// Use the centralized helper to get the instance-specific config path
configPath, err := getXMRigConfigPath(m.Name)
if err != nil {
return err
}
m.ConfigPath = configPath
if err := os.MkdirAll(filepath.Dir(m.ConfigPath), 0755); err != nil {
return err
}
apiListen := "127.0.0.1:0"
if m.API != nil {
apiListen = fmt.Sprintf("%s:%d", m.API.ListenHost, m.API.ListenPort)
}
cpuConfig := map[string]interface{}{
"enabled": true,
"huge-pages": config.HugePages,
}
// Set thread count or max-threads-hint for CPU throttling
if config.Threads > 0 {
cpuConfig["threads"] = config.Threads
}
if config.CPUMaxThreadsHint > 0 {
cpuConfig["max-threads-hint"] = config.CPUMaxThreadsHint
}
if config.CPUPriority > 0 {
cpuConfig["priority"] = config.CPUPriority
}
// Build pools array - CPU pool first
cpuPool := map[string]interface{}{
"url": config.Pool,
"user": config.Wallet,
"pass": "x",
"keepalive": true,
"tls": config.TLS,
}
// Add algo or coin (coin takes precedence for algorithm auto-detection)
if config.Coin != "" {
cpuPool["coin"] = config.Coin
} else if config.Algo != "" {
cpuPool["algo"] = config.Algo
}
pools := []map[string]interface{}{cpuPool}
// Add separate GPU pool if configured
if config.GPUEnabled && config.GPUPool != "" {
gpuWallet := config.GPUWallet
if gpuWallet == "" {
gpuWallet = config.Wallet // Default to main wallet
}
gpuPass := config.GPUPassword
if gpuPass == "" {
gpuPass = "x"
}
gpuPool := map[string]interface{}{
"url": config.GPUPool,
"user": gpuWallet,
"pass": gpuPass,
"keepalive": true,
}
// Add GPU algo (typically etchash, ethash, kawpow, progpowz for GPU mining)
if config.GPUAlgo != "" {
gpuPool["algo"] = config.GPUAlgo
}
pools = append(pools, gpuPool)
}
// Build OpenCL (AMD/Intel GPU) config
// GPU mining requires explicit device selection - no auto-picking
openclConfig := map[string]interface{}{
"enabled": config.GPUEnabled && config.OpenCL && config.Devices != "",
}
if config.GPUEnabled && config.OpenCL && config.Devices != "" {
// User must explicitly specify devices (e.g., "0" or "0,1")
openclConfig["devices"] = config.Devices
if config.GPUIntensity > 0 {
openclConfig["intensity"] = config.GPUIntensity
}
if config.GPUThreads > 0 {
openclConfig["threads"] = config.GPUThreads
}
}
// Build CUDA (NVIDIA GPU) config
// GPU mining requires explicit device selection - no auto-picking
cudaConfig := map[string]interface{}{
"enabled": config.GPUEnabled && config.CUDA && config.Devices != "",
}
if config.GPUEnabled && config.CUDA && config.Devices != "" {
// User must explicitly specify devices (e.g., "0" or "0,1")
cudaConfig["devices"] = config.Devices
if config.GPUIntensity > 0 {
cudaConfig["intensity"] = config.GPUIntensity
}
if config.GPUThreads > 0 {
cudaConfig["threads"] = config.GPUThreads
}
}
c := map[string]interface{}{
"api": map[string]interface{}{
"enabled": m.API != nil && m.API.Enabled,
"listen": apiListen,
"restricted": true,
},
"pools": pools,
"cpu": cpuConfig,
"opencl": openclConfig,
"cuda": cudaConfig,
"pause-on-active": config.PauseOnActive,
"pause-on-battery": config.PauseOnBattery,
}
data, err := json.MarshalIndent(c, "", " ")
if err != nil {
return err
}
return os.WriteFile(m.ConfigPath, data, 0600)
}

66
mining/xmrig_stats.go Normal file
View file

@ -0,0 +1,66 @@
package mining
import (
"context"
"errors"
"time"
)
// statsTimeout is the timeout for stats HTTP requests (shorter than general timeout)
const statsTimeout = 5 * time.Second
// GetStats retrieves the performance statistics from the running XMRig miner.
func (m *XMRigMiner) GetStats(ctx context.Context) (*PerformanceMetrics, error) {
// Read state under RLock, then release before HTTP call
m.mu.RLock()
if !m.Running {
m.mu.RUnlock()
return nil, errors.New("miner is not running")
}
if m.API == nil || m.API.ListenPort == 0 {
m.mu.RUnlock()
return nil, errors.New("miner API not configured or port is zero")
}
config := HTTPStatsConfig{
Host: m.API.ListenHost,
Port: m.API.ListenPort,
Endpoint: "/2/summary",
}
m.mu.RUnlock()
// Create request with context and timeout
reqCtx, cancel := context.WithTimeout(ctx, statsTimeout)
defer cancel()
// Use the common HTTP stats fetcher
var summary XMRigSummary
if err := FetchJSONStats(reqCtx, config, &summary); err != nil {
return nil, err
}
// Store the full summary in the miner struct (requires lock)
m.mu.Lock()
m.FullStats = &summary
m.mu.Unlock()
var hashrate int
if len(summary.Hashrate.Total) > 0 {
hashrate = int(summary.Hashrate.Total[0])
}
// Calculate average difficulty per accepted share
var avgDifficulty int
if summary.Results.SharesGood > 0 {
avgDifficulty = summary.Results.HashesTotal / summary.Results.SharesGood
}
return &PerformanceMetrics{
Hashrate: hashrate,
Shares: summary.Results.SharesGood,
Rejected: summary.Results.SharesTotal - summary.Results.SharesGood,
Uptime: summary.Uptime,
Algorithm: summary.Algo,
AvgDifficulty: avgDifficulty,
DiffCurrent: summary.Results.DiffCurrent,
}, nil
}

266
mining/xmrig_test.go Normal file
View file

@ -0,0 +1,266 @@
package mining
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
)
// MockRoundTripper is a mock implementation of http.RoundTripper for testing.
type MockRoundTripper func(req *http.Request) *http.Response
// RoundTrip executes a single HTTP transaction, returning a Response for the given Request.
func (f MockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
return f(req), nil
}
// newTestClient returns *http.Client with Transport replaced to avoid making real calls.
func newTestClient(fn MockRoundTripper) *http.Client {
return &http.Client{
Transport: fn,
}
}
// helper function to create a temporary directory for testing
func tempDir(t *testing.T) string {
dir, err := os.MkdirTemp("", "test")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
t.Cleanup(func() { os.RemoveAll(dir) })
return dir
}
func TestNewXMRigMiner_Good(t *testing.T) {
miner := NewXMRigMiner()
if miner == nil {
t.Fatal("NewXMRigMiner returned nil")
}
if miner.Name != "miner" {
t.Errorf("Expected miner name to be 'miner', got '%s'", miner.Name)
}
if miner.Version != "latest" {
t.Errorf("Expected miner version to be 'latest', got '%s'", miner.Version)
}
if !miner.API.Enabled {
t.Error("Expected API to be enabled by default")
}
}
func TestXMRigMiner_GetName_Good(t *testing.T) {
miner := NewXMRigMiner()
if name := miner.GetName(); name != "miner" {
t.Errorf("Expected GetName() to return 'miner', got '%s'", name)
}
}
func TestXMRigMiner_GetLatestVersion_Good(t *testing.T) {
originalClient := getHTTPClient()
setHTTPClient(newTestClient(func(req *http.Request) *http.Response {
if req.URL.String() != "https://api.github.com/repos/xmrig/xmrig/releases/latest" {
return &http.Response{
StatusCode: http.StatusNotFound,
Body: io.NopCloser(strings.NewReader("Not Found")),
Header: make(http.Header),
}
}
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(strings.NewReader(`{"tag_name": "v6.18.0"}`)),
Header: make(http.Header),
}
}))
defer setHTTPClient(originalClient)
miner := NewXMRigMiner()
version, err := miner.GetLatestVersion()
if err != nil {
t.Fatalf("GetLatestVersion() returned an error: %v", err)
}
if version != "v6.18.0" {
t.Errorf("Expected version 'v6.18.0', got '%s'", version)
}
}
func TestXMRigMiner_GetLatestVersion_Bad(t *testing.T) {
originalClient := getHTTPClient()
setHTTPClient(newTestClient(func(req *http.Request) *http.Response {
return &http.Response{
StatusCode: http.StatusNotFound,
Body: io.NopCloser(strings.NewReader("Not Found")),
Header: make(http.Header),
}
}))
defer setHTTPClient(originalClient)
miner := NewXMRigMiner()
_, err := miner.GetLatestVersion()
if err == nil {
t.Fatalf("GetLatestVersion() did not return an error")
}
}
func TestXMRigMiner_Start_Stop_Good(t *testing.T) {
t.Skip("Skipping test that runs miner process as per request")
}
func TestXMRigMiner_Start_Stop_Bad(t *testing.T) {
t.Skip("Skipping test that attempts to spawn miner process")
}
func TestXMRigMiner_CheckInstallation(t *testing.T) {
tmpDir := t.TempDir()
// Use "miner" since that's what NewXMRigMiner() sets as ExecutableName
executableName := "miner"
if runtime.GOOS == "windows" {
executableName += ".exe"
}
dummyExePath := filepath.Join(tmpDir, executableName)
if runtime.GOOS == "windows" {
// Create a dummy batch file that prints version
if err := os.WriteFile(dummyExePath, []byte("@echo off\necho XMRig 6.24.0\n"), 0755); err != nil {
t.Fatalf("failed to create dummy executable: %v", err)
}
} else {
// Create a dummy shell script that prints version
if err := os.WriteFile(dummyExePath, []byte("#!/bin/sh\necho 'XMRig 6.24.0'\n"), 0755); err != nil {
t.Fatalf("failed to create dummy executable: %v", err)
}
}
// Prepend tmpDir to PATH so findMinerBinary can find it
originalPath := os.Getenv("PATH")
t.Cleanup(func() { os.Setenv("PATH", originalPath) })
os.Setenv("PATH", tmpDir+string(os.PathListSeparator)+originalPath)
miner := NewXMRigMiner()
// Clear any binary path to force search
miner.MinerBinary = ""
details, err := miner.CheckInstallation()
if err != nil {
t.Fatalf("CheckInstallation failed: %v", err)
}
if !details.IsInstalled {
t.Error("Expected IsInstalled to be true")
}
if details.Version != "6.24.0" {
t.Errorf("Expected version '6.24.0', got '%s'", details.Version)
}
// On Windows, the path might be canonicalized differently (e.g. 8.3 names), so checking Base is safer or full path equality if we trust os.Path
if filepath.Base(details.MinerBinary) != executableName {
t.Errorf("Expected binary name '%s', got '%s'", executableName, filepath.Base(details.MinerBinary))
}
}
func TestXMRigMiner_GetStats_Good(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
summary := XMRigSummary{
Hashrate: struct {
Total []float64 `json:"total"`
Highest float64 `json:"highest"`
}{Total: []float64{123.45}, Highest: 130.0},
Results: struct {
DiffCurrent int `json:"diff_current"`
SharesGood int `json:"shares_good"`
SharesTotal int `json:"shares_total"`
AvgTime int `json:"avg_time"`
AvgTimeMS int `json:"avg_time_ms"`
HashesTotal int `json:"hashes_total"`
Best []int `json:"best"`
}{SharesGood: 10, SharesTotal: 12},
Uptime: 600,
Algo: "rx/0",
}
json.NewEncoder(w).Encode(summary)
}))
defer server.Close()
originalHTTPClient := getHTTPClient()
setHTTPClient(server.Client())
defer setHTTPClient(originalHTTPClient)
miner := NewXMRigMiner()
miner.Running = true // Mock running state
miner.API.ListenHost = strings.TrimPrefix(server.URL, "http://")
miner.API.ListenHost, miner.API.ListenPort = server.Listener.Addr().String(), 0
parts := strings.Split(server.Listener.Addr().String(), ":")
miner.API.ListenHost = parts[0]
fmt.Sscanf(parts[1], "%d", &miner.API.ListenPort)
stats, err := miner.GetStats(context.Background())
if err != nil {
t.Fatalf("GetStats() returned an error: %v", err)
}
if stats.Hashrate != 123 {
t.Errorf("Expected hashrate 123, got %d", stats.Hashrate)
}
if stats.Shares != 10 {
t.Errorf("Expected 10 shares, got %d", stats.Shares)
}
if stats.Rejected != 2 {
t.Errorf("Expected 2 rejected shares, got %d", stats.Rejected)
}
if stats.Uptime != 600 {
t.Errorf("Expected uptime 600, got %d", stats.Uptime)
}
if stats.Algorithm != "rx/0" {
t.Errorf("Expected algorithm 'rx/0', got '%s'", stats.Algorithm)
}
}
func TestXMRigMiner_GetStats_Bad(t *testing.T) {
// Don't start a server, so the API call will fail
miner := NewXMRigMiner()
miner.Running = true // Mock running state
miner.API.ListenHost = "127.0.0.1"
miner.API.ListenPort = 9999 // A port that is unlikely to be in use
_, err := miner.GetStats(context.Background())
if err == nil {
t.Fatalf("GetStats() did not return an error")
}
}
func TestXMRigMiner_HashrateHistory_Good(t *testing.T) {
miner := NewXMRigMiner()
now := time.Now()
// Add high-resolution points
for i := 0; i < 10; i++ {
miner.AddHashratePoint(HashratePoint{Timestamp: now.Add(time.Duration(i) * time.Second), Hashrate: 100 + i})
}
history := miner.GetHashrateHistory()
if len(history) != 10 {
t.Fatalf("Expected 10 hashrate points, got %d", len(history))
}
// Test ReduceHashrateHistory
// Move time forward to make some points eligible for reduction
future := now.Add(HighResolutionDuration + 30*time.Second)
miner.ReduceHashrateHistory(future)
// After reduction, high-res history should be smaller
if miner.GetHighResHistoryLength() >= 10 {
t.Errorf("High-res history not reduced, size: %d", miner.GetHighResHistoryLength())
}
if miner.GetLowResHistoryLength() == 0 {
t.Error("Low-res history not populated")
}
combinedHistory := miner.GetHashrateHistory()
if len(combinedHistory) == 0 {
t.Error("GetHashrateHistory returned empty slice after reduction")
}
}