feat: Add context propagation, state sync, and tests

- Add context.Context to ManagerInterface methods (StartMiner, StopMiner, UninstallMiner)
- Add WebSocket state sync on client connect (sends current miner states)
- Add EventStateSync event type and SetStateProvider method
- Add manager lifecycle tests (idempotent stop, context cancellation, shutdown timeout)
- Add database tests (initialization, hashrate storage, stats)
- Add EventHub tests (creation, broadcast, client count, state provider)
- Update all test files for new context-aware API

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
snider 2025-12-31 10:10:39 +00:00
parent 0c8b2d999b
commit b454bbd6d6
10 changed files with 633 additions and 52 deletions

View file

@ -0,0 +1,150 @@
package database
import (
"os"
"path/filepath"
"testing"
"time"
)
func setupTestDB(t *testing.T) func() {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "test.db")
cfg := Config{
Enabled: true,
Path: dbPath,
RetentionDays: 7,
}
if err := Initialize(cfg); err != nil {
t.Fatalf("Failed to initialize database: %v", err)
}
return func() {
Close()
os.Remove(dbPath)
}
}
func TestInitialize(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
// Database should be initialized
dbMu.RLock()
initialized := db != nil
dbMu.RUnlock()
if !initialized {
t.Error("Database should be initialized")
}
}
func TestInitialize_Disabled(t *testing.T) {
cfg := Config{
Enabled: false,
}
if err := Initialize(cfg); err != nil {
t.Errorf("Initialize with disabled should not error: %v", err)
}
}
func TestClose(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
// Close should not error
if err := Close(); err != nil {
t.Errorf("Close failed: %v", err)
}
}
func TestHashrateStorage(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
// Store some hashrate data
minerName := "test-miner"
minerType := "xmrig"
now := time.Now()
points := []HashratePoint{
{Timestamp: now.Add(-5 * time.Minute), Hashrate: 1000},
{Timestamp: now.Add(-4 * time.Minute), Hashrate: 1100},
{Timestamp: now.Add(-3 * time.Minute), Hashrate: 1200},
}
for _, p := range points {
if err := InsertHashratePoint(minerName, minerType, p, ResolutionHigh); err != nil {
t.Fatalf("Failed to store hashrate point: %v", err)
}
}
// Retrieve the data
retrieved, err := GetHashrateHistory(minerName, ResolutionHigh, now.Add(-10*time.Minute), now)
if err != nil {
t.Fatalf("Failed to get hashrate history: %v", err)
}
if len(retrieved) != 3 {
t.Errorf("Expected 3 points, got %d", len(retrieved))
}
}
func TestGetHashrateStats(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
minerName := "stats-test-miner"
minerType := "xmrig"
now := time.Now()
// Store some test data
points := []HashratePoint{
{Timestamp: now.Add(-2 * time.Minute), Hashrate: 500},
{Timestamp: now.Add(-1 * time.Minute), Hashrate: 1000},
{Timestamp: now, Hashrate: 1500},
}
for _, p := range points {
if err := InsertHashratePoint(minerName, minerType, p, ResolutionHigh); err != nil {
t.Fatalf("Failed to store point: %v", err)
}
}
stats, err := GetHashrateStats(minerName)
if err != nil {
t.Fatalf("Failed to get stats: %v", err)
}
if stats.TotalPoints != 3 {
t.Errorf("Expected 3 total points, got %d", stats.TotalPoints)
}
// Average should be (500+1000+1500)/3 = 1000
if stats.AverageRate != 1000 {
t.Errorf("Expected average rate 1000, got %d", stats.AverageRate)
}
if stats.MaxRate != 1500 {
t.Errorf("Expected max rate 1500, got %d", stats.MaxRate)
}
if stats.MinRate != 500 {
t.Errorf("Expected min rate 500, got %d", stats.MinRate)
}
}
func TestDefaultConfig(t *testing.T) {
cfg := defaultConfig()
if !cfg.Enabled {
t.Error("Default config should have Enabled=true")
}
if cfg.RetentionDays != 30 {
t.Errorf("Expected default retention 30, got %d", cfg.RetentionDays)
}
}

View file

@ -37,7 +37,7 @@ func TestDualMiningCPUAndGPU(t *testing.T) {
Devices: "0", // Device 0 only - user must pick
}
minerInstance, err := manager.StartMiner("xmrig", config)
minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Failed to start dual miner: %v", err)
}
@ -70,7 +70,7 @@ func TestDualMiningCPUAndGPU(t *testing.T) {
}
// Clean up
manager.StopMiner(minerInstance.GetName())
manager.StopMiner(context.Background(), minerInstance.GetName())
}
// TestGPUDeviceSelection tests that GPU mining requires explicit device selection

View file

@ -24,6 +24,7 @@ const (
// System events
EventPong EventType = "pong"
EventStateSync EventType = "state.sync" // Initial state on connect/reconnect
)
// Event represents a mining event that can be broadcast to clients
@ -62,6 +63,9 @@ type wsClient struct {
closeOnce sync.Once
}
// StateProvider is a function that returns the current state for sync
type StateProvider func() interface{}
// EventHub manages WebSocket connections and event broadcasting
type EventHub struct {
// Registered clients
@ -84,6 +88,9 @@ type EventHub struct {
// Connection limits
maxConnections int
// State provider for sync on connect
stateProvider StateProvider
}
// DefaultMaxConnections is the default maximum WebSocket connections
@ -126,9 +133,34 @@ func (h *EventHub) Run() {
case client := <-h.register:
h.mu.Lock()
h.clients[client] = true
stateProvider := h.stateProvider
h.mu.Unlock()
log.Printf("[EventHub] Client connected (total: %d)", len(h.clients))
// Send initial state sync if provider is set
if stateProvider != nil {
go func(c *wsClient) {
state := stateProvider()
if state != nil {
event := Event{
Type: EventStateSync,
Timestamp: time.Now(),
Data: state,
}
data, err := json.Marshal(event)
if err != nil {
log.Printf("[EventHub] Failed to marshal state sync: %v", err)
return
}
select {
case c.send <- data:
default:
// Client buffer full
}
}
}(client)
}
case client := <-h.unregister:
h.mu.Lock()
if _, ok := h.clients[client]; ok {
@ -208,6 +240,13 @@ func (h *EventHub) Stop() {
close(h.stop)
}
// SetStateProvider sets the function that provides current state for new clients
func (h *EventHub) SetStateProvider(provider StateProvider) {
h.mu.Lock()
defer h.mu.Unlock()
h.stateProvider = provider
}
// Broadcast sends an event to all subscribed clients
func (h *EventHub) Broadcast(event Event) {
if event.Timestamp.IsZero() {

201
pkg/mining/events_test.go Normal file
View file

@ -0,0 +1,201 @@
package mining
import (
"encoding/json"
"sync"
"testing"
"time"
"github.com/gorilla/websocket"
)
func TestNewEventHub(t *testing.T) {
hub := NewEventHub()
if hub == nil {
t.Fatal("NewEventHub returned nil")
}
if hub.clients == nil {
t.Error("clients map should be initialized")
}
if hub.maxConnections != DefaultMaxConnections {
t.Errorf("Expected maxConnections %d, got %d", DefaultMaxConnections, hub.maxConnections)
}
}
func TestNewEventHubWithOptions(t *testing.T) {
hub := NewEventHubWithOptions(50)
if hub.maxConnections != 50 {
t.Errorf("Expected maxConnections 50, got %d", hub.maxConnections)
}
// Test with invalid value
hub2 := NewEventHubWithOptions(0)
if hub2.maxConnections != DefaultMaxConnections {
t.Errorf("Expected default maxConnections for 0, got %d", hub2.maxConnections)
}
hub3 := NewEventHubWithOptions(-1)
if hub3.maxConnections != DefaultMaxConnections {
t.Errorf("Expected default maxConnections for -1, got %d", hub3.maxConnections)
}
}
func TestEventHubBroadcast(t *testing.T) {
hub := NewEventHub()
go hub.Run()
defer hub.Stop()
// Create an event
event := Event{
Type: EventMinerStarted,
Timestamp: time.Now(),
Data: MinerEventData{Name: "test-miner"},
}
// Broadcast should not block even with no clients
done := make(chan struct{})
go func() {
hub.Broadcast(event)
close(done)
}()
select {
case <-done:
// Success
case <-time.After(time.Second):
t.Error("Broadcast blocked unexpectedly")
}
}
func TestEventHubClientCount(t *testing.T) {
hub := NewEventHub()
go hub.Run()
defer hub.Stop()
// Initial count should be 0
if count := hub.ClientCount(); count != 0 {
t.Errorf("Expected 0 clients, got %d", count)
}
}
func TestEventHubStop(t *testing.T) {
hub := NewEventHub()
go hub.Run()
// Stop should not panic
defer func() {
if r := recover(); r != nil {
t.Errorf("Stop panicked: %v", r)
}
}()
hub.Stop()
// Give time for cleanup
time.Sleep(50 * time.Millisecond)
}
func TestNewEvent(t *testing.T) {
data := MinerEventData{Name: "test-miner"}
event := NewEvent(EventMinerStarted, data)
if event.Type != EventMinerStarted {
t.Errorf("Expected type %s, got %s", EventMinerStarted, event.Type)
}
if event.Timestamp.IsZero() {
t.Error("Timestamp should not be zero")
}
eventData, ok := event.Data.(MinerEventData)
if !ok {
t.Error("Data should be MinerEventData")
}
if eventData.Name != "test-miner" {
t.Errorf("Expected miner name 'test-miner', got '%s'", eventData.Name)
}
}
func TestEventJSON(t *testing.T) {
event := Event{
Type: EventMinerStats,
Timestamp: time.Now(),
Data: MinerStatsData{
Name: "test-miner",
Hashrate: 1000,
Shares: 10,
Rejected: 1,
Uptime: 3600,
},
}
data, err := json.Marshal(event)
if err != nil {
t.Fatalf("Failed to marshal event: %v", err)
}
var decoded Event
if err := json.Unmarshal(data, &decoded); err != nil {
t.Fatalf("Failed to unmarshal event: %v", err)
}
if decoded.Type != EventMinerStats {
t.Errorf("Expected type %s, got %s", EventMinerStats, decoded.Type)
}
}
func TestSetStateProvider(t *testing.T) {
hub := NewEventHub()
go hub.Run()
defer hub.Stop()
called := false
var mu sync.Mutex
provider := func() interface{} {
mu.Lock()
called = true
mu.Unlock()
return map[string]string{"status": "ok"}
}
hub.SetStateProvider(provider)
// The provider should be set but not called until a client connects
mu.Lock()
wasCalled := called
mu.Unlock()
if wasCalled {
t.Error("Provider should not be called until client connects")
}
}
// MockWebSocketConn provides a minimal mock for testing
type MockWebSocketConn struct {
websocket.Conn
written [][]byte
mu sync.Mutex
}
func TestEventTypes(t *testing.T) {
types := []EventType{
EventMinerStarting,
EventMinerStarted,
EventMinerStopping,
EventMinerStopped,
EventMinerStats,
EventMinerError,
EventMinerConnected,
EventPong,
EventStateSync,
}
for _, et := range types {
if et == "" {
t.Error("Event type should not be empty")
}
}
}

View file

@ -19,13 +19,13 @@ var instanceNameRegex = regexp.MustCompile(`[^a-zA-Z0-9_/-]`)
// ManagerInterface defines the contract for a miner manager.
type ManagerInterface interface {
StartMiner(minerType string, config *Config) (Miner, error)
StopMiner(name string) error
StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error)
StopMiner(ctx context.Context, name string) error
GetMiner(name string) (Miner, error)
ListMiners() []Miner
ListAvailableMiners() []AvailableMiner
GetMinerHashrateHistory(name string) ([]HashratePoint, error)
UninstallMiner(minerType string) error
UninstallMiner(ctx context.Context, minerType string) error
Stop()
}
@ -201,7 +201,7 @@ func (m *Manager) autostartMiners() {
for _, minerCfg := range cfg.Miners {
if minerCfg.Autostart && minerCfg.Config != nil {
log.Printf("Autostarting miner: %s", minerCfg.MinerType)
if _, err := m.StartMiner(minerCfg.MinerType, minerCfg.Config); err != nil {
if _, err := m.StartMiner(context.Background(), minerCfg.MinerType, minerCfg.Config); err != nil {
log.Printf("Failed to autostart miner %s: %v", minerCfg.MinerType, err)
}
}
@ -223,7 +223,15 @@ func findAvailablePort() (int, error) {
}
// StartMiner starts a new miner and saves its configuration.
func (m *Manager) StartMiner(minerType string, config *Config) (Miner, error) {
// The context can be used to cancel the operation.
func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
// Check for cancellation before acquiring lock
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
m.mu.Lock()
defer m.mu.Unlock()
@ -314,7 +322,15 @@ func (m *Manager) StartMiner(minerType string, config *Config) (Miner, error) {
}
// UninstallMiner stops, uninstalls, and removes a miner's configuration.
func (m *Manager) UninstallMiner(minerType string) error {
// The context can be used to cancel the operation.
func (m *Manager) UninstallMiner(ctx context.Context, minerType string) error {
// Check for cancellation before acquiring lock
select {
case <-ctx.Done():
return ctx.Err()
default:
}
m.mu.Lock()
// Collect miners to stop and delete (can't modify map during iteration)
minersToDelete := make([]string, 0)
@ -394,7 +410,15 @@ func (m *Manager) updateMinerConfig(minerType string, autostart bool, config *Co
// StopMiner stops a running miner and removes it from the manager.
// If the miner is already stopped, it will still be removed from the manager.
func (m *Manager) StopMiner(name string) error {
// The context can be used to cancel the operation.
func (m *Manager) StopMiner(ctx context.Context, name string) error {
// Check for cancellation before acquiring lock
select {
case <-ctx.Done():
return ctx.Err()
default:
}
m.mu.Lock()
defer m.mu.Unlock()

View file

@ -1,10 +1,12 @@
package mining
import (
"context"
"os"
"path/filepath"
"runtime"
"testing"
"time"
)
// setupTestManager creates a new Manager and a dummy executable for tests.
@ -51,7 +53,7 @@ func TestStartMiner_Good(t *testing.T) {
}
// Case 1: Successfully start a supported miner
miner, err := m.StartMiner("xmrig", config)
miner, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Expected to start miner, but got error: %v", err)
}
@ -74,7 +76,7 @@ func TestStartMiner_Bad(t *testing.T) {
}
// Case 2: Attempt to start an unsupported miner
_, err := m.StartMiner("unsupported", config)
_, err := m.StartMiner(context.Background(), "unsupported", config)
if err == nil {
t.Error("Expected an error when starting an unsupported miner, but got nil")
}
@ -90,12 +92,12 @@ func TestStartMiner_Ugly(t *testing.T) {
Wallet: "testwallet",
}
// Case 1: Successfully start a supported miner
_, err := m.StartMiner("xmrig", config)
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Expected to start miner, but got error: %v", err)
}
// Case 3: Attempt to start a duplicate miner
_, err = m.StartMiner("xmrig", config)
_, err = m.StartMiner(context.Background(), "xmrig", config)
if err == nil {
t.Error("Expected an error when starting a duplicate miner, but got nil")
}
@ -113,8 +115,8 @@ func TestStopMiner_Good(t *testing.T) {
}
// Case 1: Stop a running miner
miner, _ := m.StartMiner("xmrig", config)
err := m.StopMiner(miner.GetName())
miner, _ := m.StartMiner(context.Background(), "xmrig", config)
err := m.StopMiner(context.Background(), miner.GetName())
if err != nil {
t.Fatalf("Expected to stop miner, but got error: %v", err)
}
@ -128,7 +130,7 @@ func TestStopMiner_Bad(t *testing.T) {
defer m.Stop()
// Case 2: Attempt to stop a non-existent miner
err := m.StopMiner("nonexistent")
err := m.StopMiner(context.Background(), "nonexistent")
if err == nil {
t.Error("Expected an error when stopping a non-existent miner, but got nil")
}
@ -146,7 +148,7 @@ func TestGetMiner_Good(t *testing.T) {
}
// Case 1: Get an existing miner
startedMiner, _ := m.StartMiner("xmrig", config)
startedMiner, _ := m.StartMiner(context.Background(), "xmrig", config)
retrievedMiner, err := m.GetMiner(startedMiner.GetName())
if err != nil {
t.Fatalf("Expected to get miner, but got error: %v", err)
@ -184,9 +186,138 @@ func TestListMiners_Good(t *testing.T) {
Pool: "test:1234",
Wallet: "testwallet",
}
_, _ = m.StartMiner("xmrig", config)
_, _ = m.StartMiner(context.Background(), "xmrig", config)
miners = m.ListMiners()
if len(miners) != 1 {
t.Errorf("Expected 1 miner, but got %d", len(miners))
}
}
// TestManagerStop_Idempotent tests that Stop() can be called multiple times safely
func TestManagerStop_Idempotent(t *testing.T) {
m := setupTestManager(t)
// Start a miner
config := &Config{
HTTPPort: 9010,
Pool: "test:1234",
Wallet: "testwallet",
}
_, _ = m.StartMiner(context.Background(), "xmrig", config)
// Call Stop() multiple times - should not panic
defer func() {
if r := recover(); r != nil {
t.Errorf("Stop() panicked: %v", r)
}
}()
m.Stop()
m.Stop()
m.Stop()
// If we got here without panicking, the test passes
}
// TestStartMiner_CancelledContext tests that StartMiner respects context cancellation
func TestStartMiner_CancelledContext(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
config := &Config{
HTTPPort: 9011,
Pool: "test:1234",
Wallet: "testwallet",
}
_, err := m.StartMiner(ctx, "xmrig", config)
if err == nil {
t.Error("Expected error when starting miner with cancelled context")
}
if err != context.Canceled {
t.Errorf("Expected context.Canceled error, got: %v", err)
}
}
// TestStopMiner_CancelledContext tests that StopMiner respects context cancellation
func TestStopMiner_CancelledContext(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
err := m.StopMiner(ctx, "nonexistent")
if err == nil {
t.Error("Expected error when stopping miner with cancelled context")
}
if err != context.Canceled {
t.Errorf("Expected context.Canceled error, got: %v", err)
}
}
// TestManagerEventHub tests that SetEventHub works correctly
func TestManagerEventHub(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
eventHub := NewEventHub()
go eventHub.Run()
defer eventHub.Stop()
m.SetEventHub(eventHub)
// Get initial miner count (may have autostarted miners)
initialCount := len(m.ListMiners())
// Start a miner - should emit events
config := &Config{
HTTPPort: 9012,
Pool: "test:1234",
Wallet: "testwallet",
}
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Failed to start miner: %v", err)
}
// Give time for events to be processed
time.Sleep(50 * time.Millisecond)
// Verify miner count increased by 1
miners := m.ListMiners()
if len(miners) != initialCount+1 {
t.Errorf("Expected %d miners, got %d", initialCount+1, len(miners))
}
}
// TestManagerShutdownTimeout tests the graceful shutdown timeout
func TestManagerShutdownTimeout(t *testing.T) {
m := setupTestManager(t)
// Start a miner
config := &Config{
HTTPPort: 9013,
Pool: "test:1234",
Wallet: "testwallet",
}
_, _ = m.StartMiner(context.Background(), "xmrig", config)
// Stop should complete within a reasonable time
done := make(chan struct{})
go func() {
m.Stop()
close(done)
}()
select {
case <-done:
// Success - stopped in time
case <-time.After(15 * time.Second):
t.Error("Manager.Stop() took too long - possible shutdown issue")
}
}

View file

@ -1,6 +1,7 @@
package mining
import (
"context"
"testing"
)
@ -26,7 +27,7 @@ func TestStartAndStopMiner(t *testing.T) {
// but we can test the manager's behavior.
// This will fail because the miner executable is not present,
// which is expected in a test environment.
_, err := manager.StartMiner("xmrig", config)
_, err := manager.StartMiner(context.Background(), "xmrig", config)
if err == nil {
t.Log("StartMiner did not fail as expected in test environment")
}

View file

@ -235,6 +235,33 @@ func NewService(manager ManagerInterface, listenAddr string, displayAddr string,
mgr.SetEventHub(eventHub)
}
// Set up state provider for WebSocket state sync on reconnect
eventHub.SetStateProvider(func() interface{} {
miners := manager.ListMiners()
if len(miners) == 0 {
return nil
}
// Return current state of all miners
state := make([]map[string]interface{}, 0, len(miners))
for _, miner := range miners {
stats, _ := miner.GetStats(context.Background())
minerState := map[string]interface{}{
"name": miner.GetName(),
"status": "running",
}
if stats != nil {
minerState["hashrate"] = stats.Hashrate
minerState["shares"] = stats.Shares
minerState["rejected"] = stats.Rejected
minerState["uptime"] = stats.Uptime
}
state = append(state, minerState)
}
return map[string]interface{}{
"miners": state,
}
})
return &Service{
Manager: manager,
ProfileManager: profileManager,
@ -567,7 +594,7 @@ func (s *Service) handleUpdateCheck(c *gin.Context) {
// @Router /miners/{miner_type}/uninstall [delete]
func (s *Service) handleUninstallMiner(c *gin.Context) {
minerType := c.Param("miner_name")
if err := s.Manager.UninstallMiner(minerType); err != nil {
if err := s.Manager.UninstallMiner(c.Request.Context(), minerType); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
@ -662,7 +689,7 @@ func (s *Service) handleStartMinerWithProfile(c *gin.Context) {
return
}
miner, err := s.Manager.StartMiner(profile.MinerType, &config)
miner, err := s.Manager.StartMiner(c.Request.Context(), profile.MinerType, &config)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
@ -680,7 +707,7 @@ func (s *Service) handleStartMinerWithProfile(c *gin.Context) {
// @Router /miners/{miner_name} [delete]
func (s *Service) handleStopMiner(c *gin.Context) {
minerName := c.Param("miner_name")
if err := s.Manager.StopMiner(minerName); err != nil {
if err := s.Manager.StopMiner(c.Request.Context(), minerName); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}

View file

@ -53,27 +53,31 @@ func (m *MockMiner) WriteStdin(input string) error { return m.WriteStdinF
type MockManager struct {
ListMinersFunc func() []Miner
ListAvailableMinersFunc func() []AvailableMiner
StartMinerFunc func(minerType string, config *Config) (Miner, error)
StopMinerFunc func(minerName string) error
StartMinerFunc func(ctx context.Context, minerType string, config *Config) (Miner, error)
StopMinerFunc func(ctx context.Context, minerName string) error
GetMinerFunc func(minerName string) (Miner, error)
GetMinerHashrateHistoryFunc func(minerName string) ([]HashratePoint, error)
UninstallMinerFunc func(minerType string) error
UninstallMinerFunc func(ctx context.Context, minerType string) error
StopFunc func()
}
func (m *MockManager) ListMiners() []Miner { return m.ListMinersFunc() }
func (m *MockManager) ListAvailableMiners() []AvailableMiner { return m.ListAvailableMinersFunc() }
func (m *MockManager) StartMiner(minerType string, config *Config) (Miner, error) {
return m.StartMinerFunc(minerType, config)
func (m *MockManager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
return m.StartMinerFunc(ctx, minerType, config)
}
func (m *MockManager) StopMiner(ctx context.Context, minerName string) error {
return m.StopMinerFunc(ctx, minerName)
}
func (m *MockManager) StopMiner(minerName string) error { return m.StopMinerFunc(minerName) }
func (m *MockManager) GetMiner(minerName string) (Miner, error) {
return m.GetMinerFunc(minerName)
}
func (m *MockManager) GetMinerHashrateHistory(minerName string) ([]HashratePoint, error) {
return m.GetMinerHashrateHistoryFunc(minerName)
}
func (m *MockManager) UninstallMiner(minerType string) error { return m.UninstallMinerFunc(minerType) }
func (m *MockManager) UninstallMiner(ctx context.Context, minerType string) error {
return m.UninstallMinerFunc(ctx, minerType)
}
func (m *MockManager) Stop() { m.StopFunc() }
var _ ManagerInterface = (*MockManager)(nil)
@ -84,11 +88,15 @@ func setupTestRouter() (*gin.Engine, *MockManager) {
mockManager := &MockManager{
ListMinersFunc: func() []Miner { return []Miner{} },
ListAvailableMinersFunc: func() []AvailableMiner { return []AvailableMiner{} },
StartMinerFunc: func(minerType string, config *Config) (Miner, error) { return nil, nil },
StopMinerFunc: func(minerName string) error { return nil },
StartMinerFunc: func(ctx context.Context, minerType string, config *Config) (Miner, error) {
return nil, nil
},
StopMinerFunc: func(ctx context.Context, minerName string) error { return nil },
GetMinerFunc: func(minerName string) (Miner, error) { return nil, nil },
GetMinerHashrateHistoryFunc: func(minerName string) ([]HashratePoint, error) { return nil, nil },
UninstallMinerFunc: func(minerType string) error { return nil },
GetMinerHashrateHistoryFunc: func(minerName string) ([]HashratePoint, error) {
return nil, nil
},
UninstallMinerFunc: func(ctx context.Context, minerType string) error { return nil },
StopFunc: func() {},
}
service := &Service{
@ -162,7 +170,7 @@ func TestHandleInstallMiner(t *testing.T) {
func TestHandleStopMiner(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.StopMinerFunc = func(minerName string) error {
mockManager.StopMinerFunc = func(ctx context.Context, minerName string) error {
return nil
}

View file

@ -34,7 +34,7 @@ func TestCPUThrottleSingleMiner(t *testing.T) {
Algo: "rx/0",
}
minerInstance, err := manager.StartMiner("xmrig", config)
minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Failed to start miner: %v", err)
}
@ -53,7 +53,7 @@ func TestCPUThrottleSingleMiner(t *testing.T) {
t.Errorf("CPU usage %.1f%% exceeds expected ~10%% (with tolerance)", avgCPU)
}
manager.StopMiner(minerInstance.GetName())
manager.StopMiner(context.Background(), minerInstance.GetName())
}
// TestCPUThrottleDualMiners tests that two miners together respect combined CPU limits
@ -79,7 +79,7 @@ func TestCPUThrottleDualMiners(t *testing.T) {
Algo: "rx/0",
}
miner1Instance, err := manager.StartMiner("xmrig", config1)
miner1Instance, err := manager.StartMiner(context.Background(), "xmrig", config1)
if err != nil {
t.Fatalf("Failed to start first miner: %v", err)
}
@ -93,7 +93,7 @@ func TestCPUThrottleDualMiners(t *testing.T) {
Algo: "gr", // GhostRider algo
}
miner2Instance, err := manager.StartMiner("xmrig", config2)
miner2Instance, err := manager.StartMiner(context.Background(), "xmrig", config2)
if err != nil {
t.Fatalf("Failed to start second miner: %v", err)
}
@ -119,8 +119,8 @@ func TestCPUThrottleDualMiners(t *testing.T) {
}
// Clean up
manager.StopMiner(miner1Instance.GetName())
manager.StopMiner(miner2Instance.GetName())
manager.StopMiner(context.Background(), miner1Instance.GetName())
manager.StopMiner(context.Background(), miner2Instance.GetName())
}
// TestCPUThrottleThreadCount tests thread-based CPU limiting
@ -150,12 +150,12 @@ func TestCPUThrottleThreadCount(t *testing.T) {
Algo: "rx/0",
}
minerInstance, err := manager.StartMiner("xmrig", config)
minerInstance, err := manager.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Failed to start miner: %v", err)
}
t.Logf("Started miner: %s", minerInstance.GetName())
defer manager.StopMiner(minerInstance.GetName())
defer manager.StopMiner(context.Background(), minerInstance.GetName())
// Let miner warm up
time.Sleep(15 * time.Second)
@ -194,7 +194,7 @@ func TestMinerResourceIsolation(t *testing.T) {
Algo: "rx/0",
}
miner1, err := manager.StartMiner("xmrig", config1)
miner1, err := manager.StartMiner(context.Background(), "xmrig", config1)
if err != nil {
t.Fatalf("Failed to start miner 1: %v", err)
}
@ -219,7 +219,7 @@ func TestMinerResourceIsolation(t *testing.T) {
Algo: "gr",
}
miner2, err := manager.StartMiner("xmrig", config2)
miner2, err := manager.StartMiner(context.Background(), "xmrig", config2)
if err != nil {
t.Fatalf("Failed to start miner 2: %v", err)
}
@ -248,8 +248,8 @@ func TestMinerResourceIsolation(t *testing.T) {
}
// Clean up
manager.StopMiner(miner1.GetName())
manager.StopMiner(miner2.GetName())
manager.StopMiner(context.Background(), miner1.GetName())
manager.StopMiner(context.Background(), miner2.GetName())
}
// measureCPUUsage measures average CPU usage over a duration