test: Add race condition tests and fix AVG float64 scan bug

- Add pkg/mining/manager_race_test.go with concurrent miner tests
- Add pkg/database/database_race_test.go with concurrent DB tests
- Add TestCleanupRetention, TestGetHashrateHistoryTimeRange tests
- Add TestMultipleMinerStats, TestIsInitialized tests
- Fix AVG() float64 to int scan error in GetHashrateStats
- Fix AVG() float64 to int scan error in GetAllMinerStats
- Fix throttle tests to use NewManagerForSimulation to avoid
  autostart conflicts

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
snider 2025-12-31 11:07:29 +00:00
parent 95ae55e4fa
commit 2a30744a08
5 changed files with 817 additions and 15 deletions

View file

@ -0,0 +1,277 @@
package database
import (
"os"
"path/filepath"
"sync"
"testing"
"time"
)
// setupRaceTestDB creates a fresh database for race testing
func setupRaceTestDB(t *testing.T) func() {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "race_test.db")
cfg := Config{
Enabled: true,
Path: dbPath,
RetentionDays: 7,
}
if err := Initialize(cfg); err != nil {
t.Fatalf("Failed to initialize database: %v", err)
}
return func() {
Close()
os.Remove(dbPath)
}
}
// TestConcurrentHashrateInserts verifies that concurrent inserts
// don't cause race conditions
func TestConcurrentHashrateInserts(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
var wg sync.WaitGroup
// 10 goroutines inserting points concurrently
for i := 0; i < 10; i++ {
wg.Add(1)
go func(minerIndex int) {
defer wg.Done()
minerName := "miner" + string(rune('A'+minerIndex))
minerType := "xmrig"
for j := 0; j < 100; j++ {
point := HashratePoint{
Timestamp: time.Now().Add(time.Duration(-j) * time.Second),
Hashrate: 1000 + minerIndex*100 + j,
}
err := InsertHashratePoint(minerName, minerType, point, ResolutionHigh)
if err != nil {
t.Errorf("Insert error for %s: %v", minerName, err)
}
}
}(i)
}
wg.Wait()
// Verify data was inserted
for i := 0; i < 10; i++ {
minerName := "miner" + string(rune('A'+i))
history, err := GetHashrateHistory(minerName, ResolutionHigh, time.Now().Add(-2*time.Minute), time.Now())
if err != nil {
t.Errorf("Failed to get history for %s: %v", minerName, err)
}
if len(history) == 0 {
t.Errorf("Expected history for %s, got none", minerName)
}
}
}
// TestConcurrentInsertAndQuery verifies that concurrent reads and writes
// don't cause race conditions
func TestConcurrentInsertAndQuery(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
var wg sync.WaitGroup
stop := make(chan struct{})
// Writer goroutine
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; ; i++ {
select {
case <-stop:
return
default:
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1000 + i,
}
InsertHashratePoint("concurrent-test", "xmrig", point, ResolutionHigh)
time.Sleep(time.Millisecond)
}
}
}()
// Multiple reader goroutines
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 50; j++ {
select {
case <-stop:
return
default:
GetHashrateHistory("concurrent-test", ResolutionHigh, time.Now().Add(-time.Hour), time.Now())
time.Sleep(2 * time.Millisecond)
}
}
}()
}
// Let it run for a bit
time.Sleep(200 * time.Millisecond)
close(stop)
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentInsertAndCleanup verifies that cleanup doesn't race
// with ongoing inserts
func TestConcurrentInsertAndCleanup(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
var wg sync.WaitGroup
stop := make(chan struct{})
// Continuous inserts
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; ; i++ {
select {
case <-stop:
return
default:
// Insert some old data and some new data
oldPoint := HashratePoint{
Timestamp: time.Now().AddDate(0, 0, -10), // 10 days old
Hashrate: 500 + i,
}
InsertHashratePoint("cleanup-test", "xmrig", oldPoint, ResolutionHigh)
newPoint := HashratePoint{
Timestamp: time.Now(),
Hashrate: 1000 + i,
}
InsertHashratePoint("cleanup-test", "xmrig", newPoint, ResolutionHigh)
time.Sleep(time.Millisecond)
}
}
}()
// Periodic cleanup
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 10; i++ {
select {
case <-stop:
return
default:
Cleanup(7) // 7 day retention
time.Sleep(20 * time.Millisecond)
}
}
}()
// Let it run
time.Sleep(200 * time.Millisecond)
close(stop)
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentStats verifies that GetHashrateStats can be called
// concurrently without race conditions
func TestConcurrentStats(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
// Insert some test data
minerName := "stats-test"
for i := 0; i < 100; i++ {
point := HashratePoint{
Timestamp: time.Now().Add(time.Duration(-i) * time.Second),
Hashrate: 1000 + i*10,
}
InsertHashratePoint(minerName, "xmrig", point, ResolutionHigh)
}
var wg sync.WaitGroup
// Multiple goroutines querying stats
for i := 0; i < 20; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 50; j++ {
stats, err := GetHashrateStats(minerName)
if err != nil {
t.Errorf("Stats error: %v", err)
}
if stats != nil && stats.TotalPoints == 0 {
// This is fine, data might be in flux
}
}
}()
}
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentGetAllStats verifies that GetAllMinerStats can be called
// concurrently without race conditions
func TestConcurrentGetAllStats(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
// Insert data for multiple miners
for m := 0; m < 5; m++ {
minerName := "all-stats-" + string(rune('A'+m))
for i := 0; i < 50; i++ {
point := HashratePoint{
Timestamp: time.Now().Add(time.Duration(-i) * time.Second),
Hashrate: 1000 + m*100 + i,
}
InsertHashratePoint(minerName, "xmrig", point, ResolutionHigh)
}
}
var wg sync.WaitGroup
// Multiple goroutines querying all stats
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 30; j++ {
_, err := GetAllMinerStats()
if err != nil {
t.Errorf("GetAllMinerStats error: %v", err)
}
}
}()
}
// Concurrent inserts
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 50; i++ {
point := HashratePoint{
Timestamp: time.Now(),
Hashrate: 2000 + i,
}
InsertHashratePoint("all-stats-new", "xmrig", point, ResolutionHigh)
}
}()
wg.Wait()
// Test passes if no race detector warnings
}

View file

@ -148,3 +148,208 @@ func TestDefaultConfig(t *testing.T) {
t.Errorf("Expected default retention 30, got %d", cfg.RetentionDays) t.Errorf("Expected default retention 30, got %d", cfg.RetentionDays)
} }
} }
func TestCleanupRetention(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
minerName := "retention-test"
minerType := "xmrig"
now := time.Now()
// Insert data at various ages:
// - 35 days old (should be deleted with 30-day retention)
// - 25 days old (should be kept with 30-day retention)
// - 5 days old (should be kept)
oldPoint := HashratePoint{
Timestamp: now.AddDate(0, 0, -35),
Hashrate: 100,
}
midPoint := HashratePoint{
Timestamp: now.AddDate(0, 0, -25),
Hashrate: 200,
}
newPoint := HashratePoint{
Timestamp: now.AddDate(0, 0, -5),
Hashrate: 300,
}
// Insert all points
if err := InsertHashratePoint(minerName, minerType, oldPoint, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert old point: %v", err)
}
if err := InsertHashratePoint(minerName, minerType, midPoint, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert mid point: %v", err)
}
if err := InsertHashratePoint(minerName, minerType, newPoint, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert new point: %v", err)
}
// Verify all 3 points exist
history, err := GetHashrateHistory(minerName, ResolutionHigh, now.AddDate(0, 0, -40), now)
if err != nil {
t.Fatalf("Failed to get history before cleanup: %v", err)
}
if len(history) != 3 {
t.Errorf("Expected 3 points before cleanup, got %d", len(history))
}
// Run cleanup with 30-day retention
if err := Cleanup(30); err != nil {
t.Fatalf("Cleanup failed: %v", err)
}
// Verify only 2 points remain (35-day old point should be deleted)
history, err = GetHashrateHistory(minerName, ResolutionHigh, now.AddDate(0, 0, -40), now)
if err != nil {
t.Fatalf("Failed to get history after cleanup: %v", err)
}
if len(history) != 2 {
t.Errorf("Expected 2 points after cleanup, got %d", len(history))
}
// Verify the remaining points are the mid and new ones
for _, point := range history {
if point.Hashrate == 100 {
t.Error("Old point (100 H/s) should have been deleted")
}
}
}
func TestGetHashrateHistoryTimeRange(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
minerName := "timerange-test"
minerType := "xmrig"
now := time.Now()
// Insert points at specific times
times := []time.Duration{
-10 * time.Minute,
-8 * time.Minute,
-6 * time.Minute,
-4 * time.Minute,
-2 * time.Minute,
}
for i, offset := range times {
point := HashratePoint{
Timestamp: now.Add(offset),
Hashrate: 1000 + i*100,
}
if err := InsertHashratePoint(minerName, minerType, point, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert point: %v", err)
}
}
// Query for middle range (should get 3 points: -8, -6, -4 minutes)
since := now.Add(-9 * time.Minute)
until := now.Add(-3 * time.Minute)
history, err := GetHashrateHistory(minerName, ResolutionHigh, since, until)
if err != nil {
t.Fatalf("Failed to get history: %v", err)
}
if len(history) != 3 {
t.Errorf("Expected 3 points in range, got %d", len(history))
}
// Query boundary condition - exact timestamp match
exactSince := now.Add(-6 * time.Minute)
exactUntil := now.Add(-6 * time.Minute).Add(time.Second)
history, err = GetHashrateHistory(minerName, ResolutionHigh, exactSince, exactUntil)
if err != nil {
t.Fatalf("Failed to get exact history: %v", err)
}
// Should get at least 1 point
if len(history) < 1 {
t.Error("Expected at least 1 point at exact boundary")
}
}
func TestMultipleMinerStats(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
now := time.Now()
// Create data for multiple miners
miners := []struct {
name string
hashrates []int
}{
{"miner-A", []int{1000, 1100, 1200}},
{"miner-B", []int{2000, 2100, 2200}},
{"miner-C", []int{3000, 3100, 3200}},
}
for _, m := range miners {
for i, hr := range m.hashrates {
point := HashratePoint{
Timestamp: now.Add(time.Duration(-i) * time.Minute),
Hashrate: hr,
}
if err := InsertHashratePoint(m.name, "xmrig", point, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert point for %s: %v", m.name, err)
}
}
}
// Get all miner stats
allStats, err := GetAllMinerStats()
if err != nil {
t.Fatalf("Failed to get all stats: %v", err)
}
if len(allStats) != 3 {
t.Errorf("Expected stats for 3 miners, got %d", len(allStats))
}
// Verify each miner's stats
statsMap := make(map[string]HashrateStats)
for _, s := range allStats {
statsMap[s.MinerName] = s
}
// Check miner-A: avg = (1000+1100+1200)/3 = 1100
if s, ok := statsMap["miner-A"]; ok {
if s.AverageRate != 1100 {
t.Errorf("miner-A: expected avg 1100, got %d", s.AverageRate)
}
} else {
t.Error("miner-A stats not found")
}
// Check miner-C: avg = (3000+3100+3200)/3 = 3100
if s, ok := statsMap["miner-C"]; ok {
if s.AverageRate != 3100 {
t.Errorf("miner-C: expected avg 3100, got %d", s.AverageRate)
}
} else {
t.Error("miner-C stats not found")
}
}
func TestIsInitialized(t *testing.T) {
// Before initialization
Close() // Ensure clean state
if isInitialized() {
t.Error("Should not be initialized before Initialize()")
}
cleanup := setupTestDB(t)
defer cleanup()
// After initialization
if !isInitialized() {
t.Error("Should be initialized after Initialize()")
}
// After close
Close()
if isInitialized() {
t.Error("Should not be initialized after Close()")
}
}

View file

@ -132,8 +132,9 @@ func GetHashrateStats(minerName string) (*HashrateStats, error) {
var stats HashrateStats var stats HashrateStats
stats.MinerName = minerName stats.MinerName = minerName
// SQLite returns timestamps as strings, so scan them as strings first // SQLite returns timestamps as strings and AVG as float64, so scan them appropriately
var firstSeenStr, lastSeenStr string var firstSeenStr, lastSeenStr string
var avgRate float64
err = db.QueryRow(` err = db.QueryRow(`
SELECT SELECT
COUNT(*), COUNT(*),
@ -146,12 +147,13 @@ func GetHashrateStats(minerName string) (*HashrateStats, error) {
WHERE miner_name = ? WHERE miner_name = ?
`, minerName).Scan( `, minerName).Scan(
&stats.TotalPoints, &stats.TotalPoints,
&stats.AverageRate, &avgRate,
&stats.MaxRate, &stats.MaxRate,
&stats.MinRate, &stats.MinRate,
&firstSeenStr, &firstSeenStr,
&lastSeenStr, &lastSeenStr,
) )
stats.AverageRate = int(avgRate)
if err != nil { if err != nil {
return nil, err return nil, err
@ -195,10 +197,11 @@ func GetAllMinerStats() ([]HashrateStats, error) {
for rows.Next() { for rows.Next() {
var stats HashrateStats var stats HashrateStats
var firstSeenStr, lastSeenStr string var firstSeenStr, lastSeenStr string
var avgRate float64
if err := rows.Scan( if err := rows.Scan(
&stats.MinerName, &stats.MinerName,
&stats.TotalPoints, &stats.TotalPoints,
&stats.AverageRate, &avgRate,
&stats.MaxRate, &stats.MaxRate,
&stats.MinRate, &stats.MinRate,
&firstSeenStr, &firstSeenStr,
@ -206,6 +209,7 @@ func GetAllMinerStats() ([]HashrateStats, error) {
); err != nil { ); err != nil {
return nil, err return nil, err
} }
stats.AverageRate = int(avgRate)
// Parse timestamps using helper that logs errors // Parse timestamps using helper that logs errors
stats.FirstSeen = parseSQLiteTimestamp(firstSeenStr) stats.FirstSeen = parseSQLiteTimestamp(firstSeenStr)
stats.LastSeen = parseSQLiteTimestamp(lastSeenStr) stats.LastSeen = parseSQLiteTimestamp(lastSeenStr)

View file

@ -0,0 +1,314 @@
package mining
import (
"context"
"sync"
"testing"
"time"
)
// TestConcurrentStartMultipleMiners verifies that concurrent StartMiner calls
// with different algorithms create unique miners without race conditions
func TestConcurrentStartMultipleMiners(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var wg sync.WaitGroup
errors := make(chan error, 10)
// Try to start 10 miners concurrently with different algos
for i := 0; i < 10; i++ {
wg.Add(1)
go func(index int) {
defer wg.Done()
config := &Config{
HTTPPort: 10000 + index,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "algo" + string(rune('A'+index)), // algoA, algoB, etc.
}
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
errors <- err
}
}(i)
}
wg.Wait()
close(errors)
// Collect errors
var errCount int
for err := range errors {
t.Logf("Concurrent start error: %v", err)
errCount++
}
// Some failures are expected due to port conflicts, but shouldn't crash
t.Logf("Started miners with %d errors out of 10 attempts", errCount)
// Verify no data races occurred (test passes if no race detector warnings)
}
// TestConcurrentStartDuplicateMiner verifies that starting the same miner
// concurrently results in only one success
func TestConcurrentStartDuplicateMiner(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var wg sync.WaitGroup
successes := make(chan struct{}, 10)
failures := make(chan error, 10)
// Try to start the same miner 10 times concurrently
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
config := &Config{
HTTPPort: 11000,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "duplicate_test", // Same algo = same instance name
}
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
failures <- err
} else {
successes <- struct{}{}
}
}()
}
wg.Wait()
close(successes)
close(failures)
successCount := len(successes)
failureCount := len(failures)
t.Logf("Duplicate miner test: %d successes, %d failures", successCount, failureCount)
// Only one should succeed (or zero if there's a timing issue)
if successCount > 1 {
t.Errorf("Expected at most 1 success for duplicate miner, got %d", successCount)
}
}
// TestConcurrentStartStop verifies that starting and stopping miners
// concurrently doesn't cause race conditions
func TestConcurrentStartStop(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var wg sync.WaitGroup
// Start some miners
for i := 0; i < 5; i++ {
config := &Config{
HTTPPort: 12000 + i,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "startstop" + string(rune('A'+i)),
}
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Logf("Setup error (may be expected): %v", err)
}
}
// Give miners time to start
time.Sleep(100 * time.Millisecond)
// Now concurrently start new ones and stop existing ones
for i := 0; i < 10; i++ {
wg.Add(2)
// Start a new miner
go func(index int) {
defer wg.Done()
config := &Config{
HTTPPort: 12100 + index,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "new" + string(rune('A'+index)),
}
m.StartMiner(context.Background(), "xmrig", config)
}(i)
// Stop a miner
go func(index int) {
defer wg.Done()
minerName := "xmrig-startstop" + string(rune('A'+index%5))
m.StopMiner(context.Background(), minerName)
}(i)
}
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentListMiners verifies that listing miners while modifying
// the miner map doesn't cause race conditions
func TestConcurrentListMiners(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
// Continuously list miners
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
default:
miners := m.ListMiners()
_ = len(miners) // Use the result
}
}
}()
// Continuously start miners
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 20; i++ {
select {
case <-ctx.Done():
return
default:
config := &Config{
HTTPPort: 13000 + i,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "list" + string(rune('A'+i%26)),
}
m.StartMiner(context.Background(), "xmrig", config)
time.Sleep(10 * time.Millisecond)
}
}
}()
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentGetMiner verifies that getting a miner while others
// are being started/stopped doesn't cause race conditions
func TestConcurrentGetMiner(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Start a miner first
config := &Config{
HTTPPort: 14000,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "gettest",
}
miner, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Skipf("Could not start test miner: %v", err)
}
minerName := miner.GetName()
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
// Continuously get the miner
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
default:
m.GetMiner(minerName)
time.Sleep(time.Millisecond)
}
}
}()
}
// Start more miners in parallel
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 10; i++ {
select {
case <-ctx.Done():
return
default:
config := &Config{
HTTPPort: 14100 + i,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "parallel" + string(rune('A'+i)),
}
m.StartMiner(context.Background(), "xmrig", config)
}
}
}()
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentStatsCollection verifies that stats collection
// doesn't race with miner operations
func TestConcurrentStatsCollection(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Start some miners
for i := 0; i < 3; i++ {
config := &Config{
HTTPPort: 15000 + i,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "stats" + string(rune('A'+i)),
}
m.StartMiner(context.Background(), "xmrig", config)
}
var wg sync.WaitGroup
// Simulate stats collection (normally done by background goroutine)
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 50; i++ {
miners := m.ListMiners()
for _, miner := range miners {
miner.GetStats(context.Background())
}
time.Sleep(10 * time.Millisecond)
}
}()
// Concurrently stop miners
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(100 * time.Millisecond) // Let stats collection start
for _, name := range []string{"xmrig-statsA", "xmrig-statsB", "xmrig-statsC"} {
m.StopMiner(context.Background(), name)
time.Sleep(50 * time.Millisecond)
}
}()
wg.Wait()
// Test passes if no race detector warnings
}

View file

@ -22,8 +22,8 @@ func TestCPUThrottleSingleMiner(t *testing.T) {
t.Skip("XMRig not installed, skipping throttle test") t.Skip("XMRig not installed, skipping throttle test")
} }
// Use the manager to start miner (handles API port assignment) // Use simulation manager to avoid autostart conflicts
manager := NewManager() manager := NewManagerForSimulation()
defer manager.Stop() defer manager.Stop()
// Configure miner to use only 10% of CPU // Configure miner to use only 10% of CPU
@ -31,7 +31,7 @@ func TestCPUThrottleSingleMiner(t *testing.T) {
Pool: "stratum+tcp://pool.supportxmr.com:3333", Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A", Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 10, // 10% CPU usage CPUMaxThreadsHint: 10, // 10% CPU usage
Algo: "rx/0", Algo: "throttle-single",
} }
minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config) minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config)
@ -68,7 +68,8 @@ func TestCPUThrottleDualMiners(t *testing.T) {
t.Skip("XMRig not installed, skipping throttle test") t.Skip("XMRig not installed, skipping throttle test")
} }
manager := NewManager() // Use simulation manager to avoid autostart conflicts
manager := NewManagerForSimulation()
defer manager.Stop() defer manager.Stop()
// Start first miner at 10% CPU with RandomX // Start first miner at 10% CPU with RandomX
@ -76,7 +77,7 @@ func TestCPUThrottleDualMiners(t *testing.T) {
Pool: "stratum+tcp://pool.supportxmr.com:3333", Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A", Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 10, CPUMaxThreadsHint: 10,
Algo: "rx/0", Algo: "throttle-dual-1",
} }
miner1Instance, err := manager.StartMiner(context.Background(), "xmrig", config1) miner1Instance, err := manager.StartMiner(context.Background(), "xmrig", config1)
@ -90,7 +91,7 @@ func TestCPUThrottleDualMiners(t *testing.T) {
Pool: "stratum+tcp://pool.supportxmr.com:5555", Pool: "stratum+tcp://pool.supportxmr.com:5555",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A", Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 10, CPUMaxThreadsHint: 10,
Algo: "gr", // GhostRider algo Algo: "throttle-dual-2",
} }
miner2Instance, err := manager.StartMiner(context.Background(), "xmrig", config2) miner2Instance, err := manager.StartMiner(context.Background(), "xmrig", config2)
@ -135,8 +136,8 @@ func TestCPUThrottleThreadCount(t *testing.T) {
t.Skip("XMRig not installed, skipping throttle test") t.Skip("XMRig not installed, skipping throttle test")
} }
// Use the manager to start miner (handles API port assignment) // Use simulation manager to avoid autostart conflicts
manager := NewManager() manager := NewManagerForSimulation()
defer manager.Stop() defer manager.Stop()
numCPU := runtime.NumCPU() numCPU := runtime.NumCPU()
@ -147,7 +148,7 @@ func TestCPUThrottleThreadCount(t *testing.T) {
Pool: "stratum+tcp://pool.supportxmr.com:3333", Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A", Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
Threads: targetThreads, Threads: targetThreads,
Algo: "rx/0", Algo: "throttle-thread",
} }
minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config) minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config)
@ -183,7 +184,8 @@ func TestMinerResourceIsolation(t *testing.T) {
t.Skip("XMRig not installed, skipping test") t.Skip("XMRig not installed, skipping test")
} }
manager := NewManager() // Use simulation manager to avoid autostart conflicts
manager := NewManagerForSimulation()
defer manager.Stop() defer manager.Stop()
// Start first miner // Start first miner
@ -191,7 +193,7 @@ func TestMinerResourceIsolation(t *testing.T) {
Pool: "stratum+tcp://pool.supportxmr.com:3333", Pool: "stratum+tcp://pool.supportxmr.com:3333",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A", Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 25, CPUMaxThreadsHint: 25,
Algo: "rx/0", Algo: "isolation-1",
} }
miner1, err := manager.StartMiner(context.Background(), "xmrig", config1) miner1, err := manager.StartMiner(context.Background(), "xmrig", config1)
@ -216,7 +218,7 @@ func TestMinerResourceIsolation(t *testing.T) {
Pool: "stratum+tcp://pool.supportxmr.com:5555", Pool: "stratum+tcp://pool.supportxmr.com:5555",
Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A", Wallet: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A",
CPUMaxThreadsHint: 25, CPUMaxThreadsHint: 25,
Algo: "gr", Algo: "isolation-2",
} }
miner2, err := manager.StartMiner(context.Background(), "xmrig", config2) miner2, err := manager.StartMiner(context.Background(), "xmrig", config2)