diff --git a/pkg/database/database_test.go b/pkg/database/database_test.go index 7ad2bee..fa2bc1f 100644 --- a/pkg/database/database_test.go +++ b/pkg/database/database_test.go @@ -11,13 +11,13 @@ func setupTestDB(t *testing.T) func() { tmpDir := t.TempDir() dbPath := filepath.Join(tmpDir, "test.db") - cfg := Config{ + config := Config{ Enabled: true, Path: dbPath, RetentionDays: 7, } - if err := Initialize(cfg); err != nil { + if err := Initialize(config); err != nil { t.Fatalf("Failed to initialize database: %v", err) } @@ -42,11 +42,11 @@ func TestDatabase_Initialize_Good(t *testing.T) { } func TestDatabase_Initialize_Bad(t *testing.T) { - cfg := Config{ + config := Config{ Enabled: false, } - if err := Initialize(cfg); err != nil { + if err := Initialize(config); err != nil { t.Errorf("Initialize with disabled should not error: %v", err) } } @@ -138,14 +138,14 @@ func TestGetHashrateStats(t *testing.T) { } func TestDefaultConfig(t *testing.T) { - cfg := defaultConfig() + config := defaultConfig() - if !cfg.Enabled { + if !config.Enabled { t.Error("Default config should have Enabled=true") } - if cfg.RetentionDays != 30 { - t.Errorf("Expected default retention 30, got %d", cfg.RetentionDays) + if config.RetentionDays != 30 { + t.Errorf("Expected default retention 30, got %d", config.RetentionDays) } } @@ -286,10 +286,10 @@ func TestMultipleMinerStats(t *testing.T) { } for _, m := range miners { - for i, hr := range m.hashrates { + for i, rate := range m.hashrates { point := HashratePoint{ Timestamp: now.Add(time.Duration(-i) * time.Minute), - Hashrate: hr, + Hashrate: rate, } if err := InsertHashratePoint(nil, m.name, "xmrig", point, ResolutionHigh); err != nil { t.Fatalf("Failed to insert point for %s: %v", m.name, err) @@ -392,14 +392,14 @@ func TestReInitializeExistingDB(t *testing.T) { tmpDir := t.TempDir() dbPath := filepath.Join(tmpDir, "reinit_test.db") - cfg := Config{ + config := Config{ Enabled: true, Path: dbPath, RetentionDays: 7, } // First initialization - if err := Initialize(cfg); err != nil { + if err := Initialize(config); err != nil { t.Fatalf("First initialization failed: %v", err) } @@ -419,7 +419,7 @@ func TestReInitializeExistingDB(t *testing.T) { } // Re-initialize with same path - if err := Initialize(cfg); err != nil { + if err := Initialize(config); err != nil { t.Fatalf("Re-initialization failed: %v", err) } defer func() { diff --git a/pkg/mining/circuit_breaker_test.go b/pkg/mining/circuit_breaker_test.go index 9b8cd5d..9e1705e 100644 --- a/pkg/mining/circuit_breaker_test.go +++ b/pkg/mining/circuit_breaker_test.go @@ -51,13 +51,13 @@ func TestCircuitBreaker_StateString_Ugly(t *testing.T) { // cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig()) // result, err := cb.Execute(func() (interface{}, error) { return "success", nil }) func TestCircuitBreaker_Execute_Good(t *testing.T) { - cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig()) + breaker := NewCircuitBreaker("test", DefaultCircuitBreakerConfig()) - if cb.State() != CircuitClosed { + if breaker.State() != CircuitClosed { t.Error("expected initial state to be closed") } - result, err := cb.Execute(func() (interface{}, error) { + result, err := breaker.Execute(func() (interface{}, error) { return "success", nil }) @@ -67,7 +67,7 @@ func TestCircuitBreaker_Execute_Good(t *testing.T) { if result != "success" { t.Errorf("expected 'success', got %v", result) } - if cb.State() != CircuitClosed { + if breaker.State() != CircuitClosed { t.Error("state should still be closed after success") } } @@ -79,27 +79,27 @@ func TestCircuitBreaker_Execute_Bad(t *testing.T) { ResetTimeout: time.Minute, SuccessThreshold: 1, } - cb := NewCircuitBreaker("test", configuration) + breaker := NewCircuitBreaker("test", configuration) testErr := errors.New("test error") - _, err := cb.Execute(func() (interface{}, error) { + _, err := breaker.Execute(func() (interface{}, error) { return nil, testErr }) if err != testErr { t.Errorf("expected test error, got %v", err) } - if cb.State() != CircuitClosed { + if breaker.State() != CircuitClosed { t.Error("should still be closed after 1 failure") } - _, err = cb.Execute(func() (interface{}, error) { + _, err = breaker.Execute(func() (interface{}, error) { return nil, testErr }) if err != testErr { t.Errorf("expected test error, got %v", err) } - if cb.State() != CircuitOpen { + if breaker.State() != CircuitOpen { t.Error("should be open after 2 failures") } } @@ -111,18 +111,18 @@ func TestCircuitBreaker_Execute_Ugly(t *testing.T) { ResetTimeout: time.Hour, SuccessThreshold: 1, } - cb := NewCircuitBreaker("test", configuration) + breaker := NewCircuitBreaker("test", configuration) - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck return nil, errors.New("fail") }) - if cb.State() != CircuitOpen { + if breaker.State() != CircuitOpen { t.Fatal("circuit should be open") } called := false - _, err := cb.Execute(func() (interface{}, error) { + _, err := breaker.Execute(func() (interface{}, error) { called = true return "should not run", nil }) @@ -142,19 +142,19 @@ func TestCircuitBreaker_HalfOpen_Good(t *testing.T) { ResetTimeout: 50 * time.Millisecond, SuccessThreshold: 1, } - cb := NewCircuitBreaker("test", configuration) + breaker := NewCircuitBreaker("test", configuration) - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck return nil, errors.New("fail") }) - if cb.State() != CircuitOpen { + if breaker.State() != CircuitOpen { t.Fatal("circuit should be open") } time.Sleep(100 * time.Millisecond) - result, err := cb.Execute(func() (interface{}, error) { + result, err := breaker.Execute(func() (interface{}, error) { return "probe success", nil }) @@ -164,7 +164,7 @@ func TestCircuitBreaker_HalfOpen_Good(t *testing.T) { if result != "probe success" { t.Errorf("expected 'probe success', got %v", result) } - if cb.State() != CircuitClosed { + if breaker.State() != CircuitClosed { t.Error("should be closed after successful probe") } } @@ -176,19 +176,19 @@ func TestCircuitBreaker_HalfOpen_Bad(t *testing.T) { ResetTimeout: 50 * time.Millisecond, SuccessThreshold: 1, } - cb := NewCircuitBreaker("test", configuration) + breaker := NewCircuitBreaker("test", configuration) - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck return nil, errors.New("fail") }) time.Sleep(100 * time.Millisecond) - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck return nil, errors.New("probe failed") }) - if cb.State() != CircuitOpen { + if breaker.State() != CircuitOpen { t.Error("should be open after probe failure") } } @@ -200,9 +200,9 @@ func TestCircuitBreaker_Caching_Good(t *testing.T) { ResetTimeout: time.Hour, SuccessThreshold: 1, } - cb := NewCircuitBreaker("test", configuration) + breaker := NewCircuitBreaker("test", configuration) - result, err := cb.Execute(func() (interface{}, error) { + result, err := breaker.Execute(func() (interface{}, error) { return "cached value", nil }) if err != nil { @@ -212,11 +212,11 @@ func TestCircuitBreaker_Caching_Good(t *testing.T) { t.Fatalf("expected 'cached value', got %v", result) } - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck return nil, errors.New("fail") }) - result, err = cb.Execute(func() (interface{}, error) { + result, err = breaker.Execute(func() (interface{}, error) { return "should not run", nil }) @@ -230,18 +230,18 @@ func TestCircuitBreaker_Caching_Good(t *testing.T) { // cb.Execute(successFn) → result, ok := cb.GetCached() → ok == true func TestCircuitBreaker_GetCached_Good(t *testing.T) { - cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig()) + breaker := NewCircuitBreaker("test", DefaultCircuitBreakerConfig()) - _, ok := cb.GetCached() + _, ok := breaker.GetCached() if ok { t.Error("expected no cached value initially") } - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck return "test value", nil }) - cached, ok := cb.GetCached() + cached, ok := breaker.GetCached() if !ok { t.Error("expected cached value") } @@ -252,9 +252,9 @@ func TestCircuitBreaker_GetCached_Good(t *testing.T) { // cb := NewCircuitBreaker(...) → _, ok := cb.GetCached() → ok == false func TestCircuitBreaker_GetCached_Bad(t *testing.T) { - cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig()) + breaker := NewCircuitBreaker("test", DefaultCircuitBreakerConfig()) - _, ok := cb.GetCached() + _, ok := breaker.GetCached() if ok { t.Error("expected no cached value on fresh circuit breaker") } @@ -267,33 +267,33 @@ func TestCircuitBreaker_Reset_Good(t *testing.T) { ResetTimeout: time.Hour, SuccessThreshold: 1, } - cb := NewCircuitBreaker("test", configuration) + breaker := NewCircuitBreaker("test", configuration) - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck return nil, errors.New("fail") }) - if cb.State() != CircuitOpen { + if breaker.State() != CircuitOpen { t.Fatal("circuit should be open") } - cb.Reset() + breaker.Reset() - if cb.State() != CircuitClosed { + if breaker.State() != CircuitClosed { t.Error("circuit should be closed after reset") } } // 100 goroutines concurrently call cb.Execute — no race condition or panic should occur. func TestCircuitBreaker_Concurrency_Ugly(t *testing.T) { - cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig()) + breaker := NewCircuitBreaker("test", DefaultCircuitBreakerConfig()) var waitGroup sync.WaitGroup for i := 0; i < 100; i++ { waitGroup.Add(1) go func(n int) { defer waitGroup.Done() - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck if n%3 == 0 { return nil, errors.New("fail") } @@ -303,42 +303,42 @@ func TestCircuitBreaker_Concurrency_Ugly(t *testing.T) { } waitGroup.Wait() - _ = cb.State() + _ = breaker.State() } // cb1 := getGitHubCircuitBreaker(); cb2 := getGitHubCircuitBreaker(); cb1 == cb2 func TestCircuitBreaker_GitHubSingleton_Good(t *testing.T) { - cb1 := getGitHubCircuitBreaker() - cb2 := getGitHubCircuitBreaker() + firstBreaker := getGitHubCircuitBreaker() + secondBreaker := getGitHubCircuitBreaker() - if cb1 != cb2 { + if firstBreaker != secondBreaker { t.Error("expected singleton circuit breaker") } - if cb1.name != "github-api" { - t.Errorf("expected name 'github-api', got %s", cb1.name) + if firstBreaker.name != "github-api" { + t.Errorf("expected name 'github-api', got %s", firstBreaker.name) } } // Benchmark tests func BenchmarkCircuitBreakerExecute(b *testing.B) { - cb := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig()) + breaker := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig()) b.ResetTimer() for i := 0; i < b.N; i++ { - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck return "result", nil }) } } func BenchmarkCircuitBreakerConcurrent(b *testing.B) { - cb := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig()) + breaker := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig()) b.RunParallel(func(pb *testing.PB) { for pb.Next() { - cb.Execute(func() (interface{}, error) { //nolint:errcheck + breaker.Execute(func() (interface{}, error) { //nolint:errcheck return "result", nil }) } diff --git a/pkg/mining/miner.go b/pkg/mining/miner.go index 60bf186..8edc8f7 100644 --- a/pkg/mining/miner.go +++ b/pkg/mining/miner.go @@ -506,8 +506,8 @@ func (b *BaseMiner) ReduceHashrateHistory(now time.Time) { for minute, hashrates := range minuteGroups { if len(hashrates) > 0 { totalHashrate := 0 - for _, hr := range hashrates { - totalHashrate += hr + for _, rate := range hashrates { + totalHashrate += rate } avgHashrate := totalHashrate / len(hashrates) newLowResPoints = append(newLowResPoints, HashratePoint{Timestamp: minute, Hashrate: avgHashrate}) diff --git a/pkg/mining/profile_manager_test.go b/pkg/mining/profile_manager_test.go index 3716430..1928206 100644 --- a/pkg/mining/profile_manager_test.go +++ b/pkg/mining/profile_manager_test.go @@ -17,7 +17,7 @@ func setupTestProfileManager(t *testing.T) (*ProfileManager, func()) { configPath := filepath.Join(tmpDir, "mining_profiles.json") - pm := &ProfileManager{ + profileManager := &ProfileManager{ profiles: make(map[string]*MiningProfile), configPath: configPath, } @@ -26,11 +26,11 @@ func setupTestProfileManager(t *testing.T) (*ProfileManager, func()) { os.RemoveAll(tmpDir) } - return pm, cleanup + return profileManager, cleanup } func TestProfileManagerCreate(t *testing.T) { - pm, cleanup := setupTestProfileManager(t) + profileManager, cleanup := setupTestProfileManager(t) defer cleanup() profile := &MiningProfile{ @@ -39,7 +39,7 @@ func TestProfileManagerCreate(t *testing.T) { Config: RawConfig(`{"pool": "test.pool.com:3333"}`), } - created, err := pm.CreateProfile(profile) + created, err := profileManager.CreateProfile(profile) if err != nil { t.Fatalf("failed to create profile: %v", err) } @@ -53,7 +53,7 @@ func TestProfileManagerCreate(t *testing.T) { } // Verify it's stored - retrieved, exists := pm.GetProfile(created.ID) + retrieved, exists := profileManager.GetProfile(created.ID) if !exists { t.Error("profile should exist after creation") } @@ -64,11 +64,11 @@ func TestProfileManagerCreate(t *testing.T) { } func TestProfileManagerGet(t *testing.T) { - pm, cleanup := setupTestProfileManager(t) + profileManager, cleanup := setupTestProfileManager(t) defer cleanup() // Get non-existent profile - _, exists := pm.GetProfile("non-existent-id") + _, exists := profileManager.GetProfile("non-existent-id") if exists { t.Error("GetProfile should return false for non-existent ID") } @@ -78,9 +78,9 @@ func TestProfileManagerGet(t *testing.T) { Name: "Get Test", MinerType: "xmrig", } - created, _ := pm.CreateProfile(profile) + created, _ := profileManager.CreateProfile(profile) - retrieved, exists := pm.GetProfile(created.ID) + retrieved, exists := profileManager.GetProfile(created.ID) if !exists { t.Error("GetProfile should return true for existing ID") } @@ -91,35 +91,35 @@ func TestProfileManagerGet(t *testing.T) { } func TestProfileManagerGetAll(t *testing.T) { - pm, cleanup := setupTestProfileManager(t) + profileManager, cleanup := setupTestProfileManager(t) defer cleanup() // Empty list initially - profiles := pm.GetAllProfiles() + profiles := profileManager.GetAllProfiles() if len(profiles) != 0 { t.Errorf("expected 0 profiles initially, got %d", len(profiles)) } // Create multiple profiles for i := 0; i < 3; i++ { - pm.CreateProfile(&MiningProfile{ + profileManager.CreateProfile(&MiningProfile{ Name: "Profile", MinerType: "xmrig", }) } - profiles = pm.GetAllProfiles() + profiles = profileManager.GetAllProfiles() if len(profiles) != 3 { t.Errorf("expected 3 profiles, got %d", len(profiles)) } } func TestProfileManagerUpdate(t *testing.T) { - pm, cleanup := setupTestProfileManager(t) + profileManager, cleanup := setupTestProfileManager(t) defer cleanup() // Update non-existent profile - err := pm.UpdateProfile(&MiningProfile{ID: "non-existent"}) + err := profileManager.UpdateProfile(&MiningProfile{ID: "non-existent"}) if err == nil { t.Error("UpdateProfile should fail for non-existent profile") } @@ -129,18 +129,18 @@ func TestProfileManagerUpdate(t *testing.T) { Name: "Original Name", MinerType: "xmrig", } - created, _ := pm.CreateProfile(profile) + created, _ := profileManager.CreateProfile(profile) // Update it created.Name = "Updated Name" created.MinerType = "ttminer" - err = pm.UpdateProfile(created) + err = profileManager.UpdateProfile(created) if err != nil { t.Fatalf("failed to update profile: %v", err) } // Verify update - retrieved, _ := pm.GetProfile(created.ID) + retrieved, _ := profileManager.GetProfile(created.ID) if retrieved.Name != "Updated Name" { t.Errorf("expected name 'Updated Name', got '%s'", retrieved.Name) } @@ -150,11 +150,11 @@ func TestProfileManagerUpdate(t *testing.T) { } func TestProfileManagerDelete(t *testing.T) { - pm, cleanup := setupTestProfileManager(t) + profileManager, cleanup := setupTestProfileManager(t) defer cleanup() // Delete non-existent profile - err := pm.DeleteProfile("non-existent") + err := profileManager.DeleteProfile("non-existent") if err == nil { t.Error("DeleteProfile should fail for non-existent profile") } @@ -164,15 +164,15 @@ func TestProfileManagerDelete(t *testing.T) { Name: "Delete Me", MinerType: "xmrig", } - created, _ := pm.CreateProfile(profile) + created, _ := profileManager.CreateProfile(profile) - err = pm.DeleteProfile(created.ID) + err = profileManager.DeleteProfile(created.ID) if err != nil { t.Fatalf("failed to delete profile: %v", err) } // Verify deletion - _, exists := pm.GetProfile(created.ID) + _, exists := profileManager.GetProfile(created.ID) if exists { t.Error("profile should not exist after deletion") } @@ -188,7 +188,7 @@ func TestProfileManagerPersistence(t *testing.T) { configPath := filepath.Join(tmpDir, "mining_profiles.json") // Create first manager and add profile - pm1 := &ProfileManager{ + firstProfileManager := &ProfileManager{ profiles: make(map[string]*MiningProfile), configPath: configPath, } @@ -198,23 +198,23 @@ func TestProfileManagerPersistence(t *testing.T) { MinerType: "xmrig", Config: RawConfig(`{"pool": "persist.pool.com"}`), } - created, err := pm1.CreateProfile(profile) + created, err := firstProfileManager.CreateProfile(profile) if err != nil { t.Fatalf("failed to create profile: %v", err) } // Create second manager with same path - should load existing profile - pm2 := &ProfileManager{ + secondProfileManager := &ProfileManager{ profiles: make(map[string]*MiningProfile), configPath: configPath, } - err = pm2.loadProfiles() + err = secondProfileManager.loadProfiles() if err != nil { t.Fatalf("failed to load profiles: %v", err) } // Verify profile persisted - loaded, exists := pm2.GetProfile(created.ID) + loaded, exists := secondProfileManager.GetProfile(created.ID) if !exists { t.Fatal("profile should be loaded from file") } @@ -225,39 +225,39 @@ func TestProfileManagerPersistence(t *testing.T) { } func TestProfileManagerConcurrency(t *testing.T) { - pm, cleanup := setupTestProfileManager(t) + profileManager, cleanup := setupTestProfileManager(t) defer cleanup() - var wg sync.WaitGroup + var waitGroup sync.WaitGroup numGoroutines := 10 // Concurrent creates for i := 0; i < numGoroutines; i++ { - wg.Add(1) + waitGroup.Add(1) go func(n int) { - defer wg.Done() - pm.CreateProfile(&MiningProfile{ + defer waitGroup.Done() + profileManager.CreateProfile(&MiningProfile{ Name: "Concurrent Profile", MinerType: "xmrig", }) }(i) } - wg.Wait() + waitGroup.Wait() - profiles := pm.GetAllProfiles() + profiles := profileManager.GetAllProfiles() if len(profiles) != numGoroutines { t.Errorf("expected %d profiles, got %d", numGoroutines, len(profiles)) } // Concurrent reads for i := 0; i < numGoroutines; i++ { - wg.Add(1) + waitGroup.Add(1) go func() { - defer wg.Done() - pm.GetAllProfiles() + defer waitGroup.Done() + profileManager.GetAllProfiles() }() } - wg.Wait() + waitGroup.Wait() } func TestProfileManagerInvalidJSON(t *testing.T) { @@ -275,24 +275,24 @@ func TestProfileManagerInvalidJSON(t *testing.T) { t.Fatalf("failed to write invalid JSON: %v", err) } - pm := &ProfileManager{ + profileManager := &ProfileManager{ profiles: make(map[string]*MiningProfile), configPath: configPath, } - err = pm.loadProfiles() + err = profileManager.loadProfiles() if err == nil { t.Error("loadProfiles should fail with invalid JSON") } } func TestProfileManagerFileNotFound(t *testing.T) { - pm := &ProfileManager{ + profileManager := &ProfileManager{ profiles: make(map[string]*MiningProfile), configPath: "/non/existent/path/profiles.json", } - err := pm.loadProfiles() + err := profileManager.loadProfiles() if err == nil { t.Error("loadProfiles should fail when file not found") } @@ -303,7 +303,7 @@ func TestProfileManagerFileNotFound(t *testing.T) { } func TestProfileManagerCreateRollback(t *testing.T) { - pm := &ProfileManager{ + profileManager := &ProfileManager{ profiles: make(map[string]*MiningProfile), configPath: "/invalid/path/that/cannot/be/written/profiles.json", } @@ -313,20 +313,20 @@ func TestProfileManagerCreateRollback(t *testing.T) { MinerType: "xmrig", } - _, err := pm.CreateProfile(profile) + _, err := profileManager.CreateProfile(profile) if err == nil { t.Error("CreateProfile should fail when save fails") } // Verify rollback - profile should not be in memory - profiles := pm.GetAllProfiles() + profiles := profileManager.GetAllProfiles() if len(profiles) != 0 { t.Error("failed create should rollback - no profile should be in memory") } } func TestProfileManagerConfigWithData(t *testing.T) { - pm, cleanup := setupTestProfileManager(t) + profileManager, cleanup := setupTestProfileManager(t) defer cleanup() config := RawConfig(`{ @@ -342,12 +342,12 @@ func TestProfileManagerConfigWithData(t *testing.T) { Config: config, } - created, err := pm.CreateProfile(profile) + created, err := profileManager.CreateProfile(profile) if err != nil { t.Fatalf("failed to create profile: %v", err) } - retrieved, _ := pm.GetProfile(created.ID) + retrieved, _ := profileManager.GetProfile(created.ID) // Parse config to verify var parsedConfig map[string]interface{} diff --git a/pkg/mining/ratelimiter_test.go b/pkg/mining/ratelimiter_test.go index 2382a19..cacb55d 100644 --- a/pkg/mining/ratelimiter_test.go +++ b/pkg/mining/ratelimiter_test.go @@ -12,23 +12,23 @@ import ( // rl := NewRateLimiter(10, 20) // rl.Stop() func TestRatelimiter_NewRateLimiter_Good(t *testing.T) { - rl := NewRateLimiter(10, 20) - if rl == nil { + rateLimiter := NewRateLimiter(10, 20) + if rateLimiter == nil { t.Fatal("NewRateLimiter returned nil") } - defer rl.Stop() + defer rateLimiter.Stop() - if rl.requestsPerSecond != 10 { + if rateLimiter.requestsPerSecond != 10 { t.Errorf("Expected requestsPerSecond 10, got %d", rl.requestsPerSecond) } - if rl.burst != 20 { + if rateLimiter.burst != 20 { t.Errorf("Expected burst 20, got %d", rl.burst) } } // rl.Stop() // idempotent — calling twice must not panic func TestRatelimiter_Stop_Good(t *testing.T) { - rl := NewRateLimiter(10, 20) + rateLimiter := NewRateLimiter(10, 20) defer func() { if r := recover(); r != nil { @@ -36,18 +36,18 @@ func TestRatelimiter_Stop_Good(t *testing.T) { } }() - rl.Stop() - rl.Stop() + rateLimiter.Stop() + rateLimiter.Stop() } // router.Use(rl.Middleware()) — allows requests within burst, rejects beyond func TestRatelimiter_Middleware_Good(t *testing.T) { gin.SetMode(gin.TestMode) - rl := NewRateLimiter(10, 5) - defer rl.Stop() + rateLimiter := NewRateLimiter(10, 5) + defer rateLimiter.Stop() router := gin.New() - router.Use(rl.Middleware()) + router.Use(rateLimiter.Middleware()) router.GET("/test", func(c *gin.Context) { c.String(http.StatusOK, "ok") }) @@ -67,11 +67,11 @@ func TestRatelimiter_Middleware_Good(t *testing.T) { // router.Use(rl.Middleware()) — rejects the 6th request after burst is exhausted func TestRatelimiter_Middleware_Bad(t *testing.T) { gin.SetMode(gin.TestMode) - rl := NewRateLimiter(10, 5) - defer rl.Stop() + rateLimiter := NewRateLimiter(10, 5) + defer rateLimiter.Stop() router := gin.New() - router.Use(rl.Middleware()) + router.Use(rateLimiter.Middleware()) router.GET("/test", func(c *gin.Context) { c.String(http.StatusOK, "ok") }) @@ -96,11 +96,11 @@ func TestRatelimiter_Middleware_Bad(t *testing.T) { // router.Use(rl.Middleware()) — rate limit per IP; exhausted IP1 does not affect IP2 func TestRatelimiter_Middleware_Ugly(t *testing.T) { gin.SetMode(gin.TestMode) - rl := NewRateLimiter(10, 2) - defer rl.Stop() + rateLimiter := NewRateLimiter(10, 2) + defer rateLimiter.Stop() router := gin.New() - router.Use(rl.Middleware()) + router.Use(rateLimiter.Middleware()) router.GET("/test", func(c *gin.Context) { c.String(http.StatusOK, "ok") }) @@ -131,17 +131,17 @@ func TestRatelimiter_Middleware_Ugly(t *testing.T) { // count := rl.ClientCount() // returns number of tracked IPs func TestRatelimiter_ClientCount_Good(t *testing.T) { - rl := NewRateLimiter(10, 5) - defer rl.Stop() + rateLimiter := NewRateLimiter(10, 5) + defer rateLimiter.Stop() gin.SetMode(gin.TestMode) router := gin.New() - router.Use(rl.Middleware()) + router.Use(rateLimiter.Middleware()) router.GET("/test", func(c *gin.Context) { c.String(http.StatusOK, "ok") }) - if count := rl.ClientCount(); count != 0 { + if count := rateLimiter.ClientCount(); count != 0 { t.Errorf("Expected 0 clients, got %d", count) } @@ -150,7 +150,7 @@ func TestRatelimiter_ClientCount_Good(t *testing.T) { w := httptest.NewRecorder() router.ServeHTTP(w, req) - if count := rl.ClientCount(); count != 1 { + if count := rateLimiter.ClientCount(); count != 1 { t.Errorf("Expected 1 client, got %d", count) } @@ -159,7 +159,7 @@ func TestRatelimiter_ClientCount_Good(t *testing.T) { w = httptest.NewRecorder() router.ServeHTTP(w, req) - if count := rl.ClientCount(); count != 2 { + if count := rateLimiter.ClientCount(); count != 2 { t.Errorf("Expected 2 clients, got %d", count) } } @@ -167,11 +167,11 @@ func TestRatelimiter_ClientCount_Good(t *testing.T) { // rl.Middleware() — token refills at requestsPerSecond rate; request succeeds after wait func TestRatelimiter_TokenRefill_Good(t *testing.T) { gin.SetMode(gin.TestMode) - rl := NewRateLimiter(100, 1) - defer rl.Stop() + rateLimiter := NewRateLimiter(100, 1) + defer rateLimiter.Stop() router := gin.New() - router.Use(rl.Middleware()) + router.Use(rateLimiter.Middleware()) router.GET("/test", func(c *gin.Context) { c.String(http.StatusOK, "ok") }) diff --git a/pkg/mining/settings_manager_test.go b/pkg/mining/settings_manager_test.go index 02d6f5c..aa43728 100644 --- a/pkg/mining/settings_manager_test.go +++ b/pkg/mining/settings_manager_test.go @@ -30,18 +30,18 @@ func TestSettingsManager_SaveAndLoad_Good(t *testing.T) { tmpDir := t.TempDir() settingsPath := filepath.Join(tmpDir, "settings.json") - sm := &SettingsManager{ + settingsManager := &SettingsManager{ settings: DefaultSettings(), settingsPath: settingsPath, } - sm.settings.Window.Width = 1920 - sm.settings.Window.Height = 1080 - sm.settings.StartOnBoot = true - sm.settings.AutostartMiners = true - sm.settings.CPUThrottlePercent = 50 + settingsManager.settings.Window.Width = 1920 + settingsManager.settings.Window.Height = 1080 + settingsManager.settings.StartOnBoot = true + settingsManager.settings.AutostartMiners = true + settingsManager.settings.CPUThrottlePercent = 50 - err := sm.Save() + err := settingsManager.Save() if err != nil { t.Fatalf("Failed to save settings: %v", err) } @@ -50,40 +50,40 @@ func TestSettingsManager_SaveAndLoad_Good(t *testing.T) { t.Fatal("Settings file was not created") } - sm2 := &SettingsManager{ + loadedSettingsManager := &SettingsManager{ settings: DefaultSettings(), settingsPath: settingsPath, } - err = sm2.Load() + err = loadedSettingsManager.Load() if err != nil { t.Fatalf("Failed to load settings: %v", err) } - if sm2.settings.Window.Width != 1920 { - t.Errorf("Expected width 1920, got %d", sm2.settings.Window.Width) + if loadedSettingsManager.settings.Window.Width != 1920 { + t.Errorf("Expected width 1920, got %d", loadedSettingsManager.settings.Window.Width) } - if sm2.settings.Window.Height != 1080 { - t.Errorf("Expected height 1080, got %d", sm2.settings.Window.Height) + if loadedSettingsManager.settings.Window.Height != 1080 { + t.Errorf("Expected height 1080, got %d", loadedSettingsManager.settings.Window.Height) } - if !sm2.settings.StartOnBoot { + if !loadedSettingsManager.settings.StartOnBoot { t.Error("Expected StartOnBoot to be true") } - if !sm2.settings.AutostartMiners { + if !loadedSettingsManager.settings.AutostartMiners { t.Error("Expected AutostartMiners to be true") } - if sm2.settings.CPUThrottlePercent != 50 { - t.Errorf("Expected CPUThrottlePercent 50, got %d", sm2.settings.CPUThrottlePercent) + if loadedSettingsManager.settings.CPUThrottlePercent != 50 { + t.Errorf("Expected CPUThrottlePercent 50, got %d", loadedSettingsManager.settings.CPUThrottlePercent) } } func TestSettingsManager_SaveAndLoad_Bad(t *testing.T) { // Load from a path that does not exist — must return an error. - sm := &SettingsManager{ + settingsManager := &SettingsManager{ settings: DefaultSettings(), settingsPath: filepath.Join(t.TempDir(), "does_not_exist", "settings.json"), } - if err := sm.Load(); err == nil { + if err := settingsManager.Load(); err == nil { t.Error("Expected error loading from missing path, got nil") } } @@ -92,17 +92,17 @@ func TestSettingsManager_UpdateWindowState_Good(t *testing.T) { tmpDir := t.TempDir() settingsPath := filepath.Join(tmpDir, "settings.json") - sm := &SettingsManager{ + settingsManager := &SettingsManager{ settings: DefaultSettings(), settingsPath: settingsPath, } - err := sm.UpdateWindowState(100, 200, 800, 600, false) + err := settingsManager.UpdateWindowState(100, 200, 800, 600, false) if err != nil { t.Fatalf("Failed to update window state: %v", err) } - state := sm.GetWindowState() + state := settingsManager.GetWindowState() if state.X != 100 { t.Errorf("Expected X 100, got %d", state.X) } @@ -121,17 +121,17 @@ func TestSettingsManager_SetCPUThrottle_Good(t *testing.T) { tmpDir := t.TempDir() settingsPath := filepath.Join(tmpDir, "settings.json") - sm := &SettingsManager{ + settingsManager := &SettingsManager{ settings: DefaultSettings(), settingsPath: settingsPath, } - err := sm.SetCPUThrottle(true, 30) + err := settingsManager.SetCPUThrottle(true, 30) if err != nil { t.Fatalf("Failed to set CPU throttle: %v", err) } - settings := sm.Get() + settings := settingsManager.Get() if !settings.EnableCPUThrottle { t.Error("Expected EnableCPUThrottle to be true") } @@ -145,20 +145,20 @@ func TestSettingsManager_SetCPUThrottle_Bad(t *testing.T) { tmpDir := t.TempDir() settingsPath := filepath.Join(tmpDir, "settings.json") - sm := &SettingsManager{ + settingsManager := &SettingsManager{ settings: DefaultSettings(), settingsPath: settingsPath, } - if err := sm.SetCPUThrottle(true, 30); err != nil { + if err := settingsManager.SetCPUThrottle(true, 30); err != nil { t.Fatalf("Setup: failed to set initial throttle: %v", err) } - if err := sm.SetCPUThrottle(true, 150); err != nil { + if err := settingsManager.SetCPUThrottle(true, 150); err != nil { t.Fatalf("Expected no error on invalid percent, got: %v", err) } - settings := sm.Get() + settings := settingsManager.Get() if settings.CPUThrottlePercent != 30 { t.Errorf("Expected CPUThrottlePercent to remain 30 after invalid input, got %d", settings.CPUThrottlePercent) } @@ -168,7 +168,7 @@ func TestSettingsManager_SetMinerDefaults_Good(t *testing.T) { tmpDir := t.TempDir() settingsPath := filepath.Join(tmpDir, "settings.json") - sm := &SettingsManager{ + settingsManager := &SettingsManager{ settings: DefaultSettings(), settingsPath: settingsPath, } @@ -181,12 +181,12 @@ func TestSettingsManager_SetMinerDefaults_Good(t *testing.T) { CPUThrottleThreshold: 90, } - err := sm.SetMinerDefaults(minerDefaults) + err := settingsManager.SetMinerDefaults(minerDefaults) if err != nil { t.Fatalf("Failed to set miner defaults: %v", err) } - settings := sm.Get() + settings := settingsManager.Get() if settings.MinerDefaults.DefaultPool != "stratum+tcp://pool.example.com:3333" { t.Errorf("Expected pool to be set, got %s", settings.MinerDefaults.DefaultPool) } @@ -200,7 +200,7 @@ func TestSettingsManager_ConcurrentAccess_Ugly(t *testing.T) { tmpDir := t.TempDir() settingsPath := filepath.Join(tmpDir, "settings.json") - sm := &SettingsManager{ + settingsManager := &SettingsManager{ settings: DefaultSettings(), settingsPath: settingsPath, } @@ -209,8 +209,8 @@ func TestSettingsManager_ConcurrentAccess_Ugly(t *testing.T) { for i := 0; i < 10; i++ { go func(n int) { for j := 0; j < 100; j++ { - _ = sm.Get() - sm.UpdateWindowState(n*10, n*10, 800+n, 600+n, false) + _ = settingsManager.Get() + settingsManager.UpdateWindowState(n*10, n*10, 800+n, 600+n, false) } done <- true }(i) @@ -220,7 +220,7 @@ func TestSettingsManager_ConcurrentAccess_Ugly(t *testing.T) { <-done } - state := sm.GetWindowState() + state := settingsManager.GetWindowState() if state.Width < 800 || state.Width > 900 { t.Errorf("Unexpected width after concurrent access: %d", state.Width) }