diff --git a/cmd/mining/cmd/remote.go b/cmd/mining/cmd/remote.go index 7ea5ac5..b73f043 100644 --- a/cmd/mining/cmd/remote.go +++ b/cmd/mining/cmd/remote.go @@ -29,7 +29,7 @@ var remoteStatusCmd = &cobra.Command{ Use: "status [peer-id]", Short: "Get mining status from remote peers", Long: `Display mining statistics from all connected peers or a specific peer.`, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(command *cobra.Command, args []string) error { remoteController, err := getController() if err != nil { return err @@ -38,17 +38,17 @@ var remoteStatusCmd = &cobra.Command{ if len(args) > 0 { // remote status peer-19f3 shows that peer's stats. peerID := args[0] - peer := findPeerByPartialID(peerID) - if peer == nil { + selectedPeer := findPeerByPartialID(peerID) + if selectedPeer == nil { return fmt.Errorf("peer not found: %s", peerID) } - stats, err := remoteController.GetRemoteStats(peer.ID) + stats, err := remoteController.GetRemoteStats(selectedPeer.ID) if err != nil { return fmt.Errorf("failed to get stats: %w", err) } - printPeerStats(peer, stats) + printPeerStats(selectedPeer, stats) } else { // remote status peer-19f3 shows one peer, while `remote status` shows the fleet. allStats := remoteController.GetAllStats() @@ -61,9 +61,9 @@ var remoteStatusCmd = &cobra.Command{ var totalHashrate float64 for peerID, stats := range allStats { - peer := peerRegistry.GetPeer(peerID) - if peer != nil { - printPeerStats(peer, stats) + selectedPeer := peerRegistry.GetPeer(peerID) + if selectedPeer != nil { + printPeerStats(selectedPeer, stats) for _, miner := range stats.Miners { totalHashrate += miner.Hashrate } @@ -84,16 +84,16 @@ var remoteStartCmd = &cobra.Command{ Short: "Start miner on remote peer", Long: `Start a miner on a remote peer using a profile.`, Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - minerType, _ := cmd.Flags().GetString("type") + RunE: func(command *cobra.Command, args []string) error { + minerType, _ := command.Flags().GetString("type") if minerType == "" { return fmt.Errorf("--type is required, for example `xmrig` or `tt-miner`") } - profileID, _ := cmd.Flags().GetString("profile") + profileID, _ := command.Flags().GetString("profile") peerID := args[0] - peer := findPeerByPartialID(peerID) - if peer == nil { + selectedPeer := findPeerByPartialID(peerID) + if selectedPeer == nil { return fmt.Errorf("peer not found: %s", peerID) } @@ -102,8 +102,8 @@ var remoteStartCmd = &cobra.Command{ return err } - fmt.Printf("Starting %s miner on %s with profile %s...\n", minerType, peer.Name, profileID) - if err := remoteController.StartRemoteMiner(peer.ID, minerType, profileID, nil); err != nil { + fmt.Printf("Starting %s miner on %s with profile %s...\n", minerType, selectedPeer.Name, profileID) + if err := remoteController.StartRemoteMiner(selectedPeer.ID, minerType, profileID, nil); err != nil { return fmt.Errorf("failed to start miner: %w", err) } @@ -118,10 +118,10 @@ var remoteStopCmd = &cobra.Command{ Short: "Stop miner on remote peer", Long: `Stop a running miner on a remote peer.`, Args: cobra.MinimumNArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(command *cobra.Command, args []string) error { peerID := args[0] - peer := findPeerByPartialID(peerID) - if peer == nil { + selectedPeer := findPeerByPartialID(peerID) + if selectedPeer == nil { return fmt.Errorf("peer not found: %s", peerID) } @@ -129,7 +129,7 @@ var remoteStopCmd = &cobra.Command{ if len(args) > 1 { minerName = args[1] } else { - minerName, _ = cmd.Flags().GetString("miner") + minerName, _ = command.Flags().GetString("miner") } if minerName == "" { @@ -141,8 +141,8 @@ var remoteStopCmd = &cobra.Command{ return err } - fmt.Printf("Stopping miner %s on %s...\n", minerName, peer.Name) - if err := remoteController.StopRemoteMiner(peer.ID, minerName); err != nil { + fmt.Printf("Stopping miner %s on %s...\n", minerName, selectedPeer.Name) + if err := remoteController.StopRemoteMiner(selectedPeer.ID, minerName); err != nil { return fmt.Errorf("failed to stop miner: %w", err) } @@ -157,13 +157,13 @@ var remoteLogsCmd = &cobra.Command{ Short: "Get console logs from remote miner", Long: `Retrieve console output logs from a miner running on a remote peer.`, Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(command *cobra.Command, args []string) error { peerID := args[0] minerName := args[1] - lines, _ := cmd.Flags().GetInt("lines") + lines, _ := command.Flags().GetInt("lines") - peer := findPeerByPartialID(peerID) - if peer == nil { + selectedPeer := findPeerByPartialID(peerID) + if selectedPeer == nil { return fmt.Errorf("peer not found: %s", peerID) } @@ -172,12 +172,12 @@ var remoteLogsCmd = &cobra.Command{ return err } - logLines, err := remoteController.GetRemoteLogs(peer.ID, minerName, lines) + logLines, err := remoteController.GetRemoteLogs(selectedPeer.ID, minerName, lines) if err != nil { return fmt.Errorf("failed to get logs: %w", err) } - fmt.Printf("Logs from %s on %s (%d lines):\n", minerName, peer.Name, len(logLines)) + fmt.Printf("Logs from %s on %s (%d lines):\n", minerName, selectedPeer.Name, len(logLines)) fmt.Println("────────────────────────────────────") for _, line := range logLines { fmt.Println(line) @@ -193,10 +193,10 @@ var remoteConnectCmd = &cobra.Command{ Short: "Connect to a remote peer", Long: `Establish a WebSocket connection to a registered peer.`, Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(command *cobra.Command, args []string) error { peerID := args[0] - peer := findPeerByPartialID(peerID) - if peer == nil { + selectedPeer := findPeerByPartialID(peerID) + if selectedPeer == nil { return fmt.Errorf("peer not found: %s", peerID) } @@ -205,8 +205,8 @@ var remoteConnectCmd = &cobra.Command{ return err } - fmt.Printf("Connecting to %s at %s...\n", peer.Name, peer.Address) - if err := remoteController.ConnectToPeer(peer.ID); err != nil { + fmt.Printf("Connecting to %s at %s...\n", selectedPeer.Name, selectedPeer.Address) + if err := remoteController.ConnectToPeer(selectedPeer.ID); err != nil { return fmt.Errorf("failed to connect: %w", err) } @@ -221,10 +221,10 @@ var remoteDisconnectCmd = &cobra.Command{ Short: "Disconnect from a remote peer", Long: `Close the connection to a peer.`, Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(command *cobra.Command, args []string) error { peerID := args[0] - peer := findPeerByPartialID(peerID) - if peer == nil { + selectedPeer := findPeerByPartialID(peerID) + if selectedPeer == nil { return fmt.Errorf("peer not found: %s", peerID) } @@ -233,8 +233,8 @@ var remoteDisconnectCmd = &cobra.Command{ return err } - fmt.Printf("Disconnecting from %s...\n", peer.Name) - if err := remoteController.DisconnectFromPeer(peer.ID); err != nil { + fmt.Printf("Disconnecting from %s...\n", selectedPeer.Name) + if err := remoteController.DisconnectFromPeer(selectedPeer.ID); err != nil { return fmt.Errorf("failed to disconnect: %w", err) } @@ -249,12 +249,12 @@ var remotePingCmd = &cobra.Command{ Short: "Ping a remote peer", Long: `Send a ping to a peer and measure round-trip latency.`, Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - count, _ := cmd.Flags().GetInt("count") + RunE: func(command *cobra.Command, args []string) error { + count, _ := command.Flags().GetInt("count") peerID := args[0] - peer := findPeerByPartialID(peerID) - if peer == nil { + selectedPeer := findPeerByPartialID(peerID) + if selectedPeer == nil { return fmt.Errorf("peer not found: %s", peerID) } @@ -263,13 +263,13 @@ var remotePingCmd = &cobra.Command{ return err } - fmt.Printf("Pinging %s (%s)...\n", peer.Name, peer.Address) + fmt.Printf("Pinging %s (%s)...\n", selectedPeer.Name, selectedPeer.Address) var totalRoundTripMillis float64 var successfulPings int for i := 0; i < count; i++ { - rtt, err := remoteController.PingPeer(peer.ID) + rtt, err := remoteController.PingPeer(selectedPeer.ID) if err != nil { fmt.Printf(" Ping %d: timeout\n", i+1) continue diff --git a/cmd/mining/cmd/serve.go b/cmd/mining/cmd/serve.go index 4d8232a..f03a719 100644 --- a/cmd/mining/cmd/serve.go +++ b/cmd/mining/cmd/serve.go @@ -27,7 +27,7 @@ var serveCmd = &cobra.Command{ Use: "serve", Short: "Start the mining service and interactive shell", Long: `Start the mining service, which provides a RESTful API for managing miners, and an interactive shell for CLI commands.`, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(command *cobra.Command, args []string) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -42,7 +42,7 @@ var serveCmd = &cobra.Command{ displayAddress := fmt.Sprintf("%s:%d", displayHostName, port) listenAddress := fmt.Sprintf("%s:%d", host, port) - // manager := getManager() shares the same miner lifecycle state across CLI commands. + // manager := getManager() shares the same miner lifecycle state across `mining start`, `mining stop`, and `mining serve`. manager := getManager() service, err := mining.NewService(manager, listenAddress, displayAddress, namespace) @@ -71,27 +71,27 @@ var serveCmd = &cobra.Command{ scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { - line := scanner.Text() - if line == "" { + inputLine := scanner.Text() + if inputLine == "" { fmt.Print(">> ") continue } - if strings.ToLower(line) == "exit" || strings.ToLower(line) == "quit" { + if strings.ToLower(inputLine) == "exit" || strings.ToLower(inputLine) == "quit" { fmt.Println("Exiting...") cancel() return } - parts := strings.Fields(line) - if len(parts) == 0 { + tokens := strings.Fields(inputLine) + if len(tokens) == 0 { fmt.Print(">> ") continue } - command := strings.ToLower(parts[0]) - commandArgs := parts[1:] + shellCommand := strings.ToLower(tokens[0]) + commandArgs := tokens[1:] - switch command { + switch shellCommand { case "start": if len(commandArgs) < 3 { fmt.Println("Usage: start ") @@ -128,7 +128,7 @@ var serveCmd = &cobra.Command{ LogOutput: true, } - // config.Validate() // rejects malformed pool and wallet values before the miner starts. + // config.Validate() rejects malformed pool and wallet values before the miner starts. if err := config.Validate(); err != nil { fmt.Fprintf(os.Stderr, "Error: Invalid configuration: %v\n", err) fmt.Print(">> ") @@ -187,7 +187,7 @@ var serveCmd = &cobra.Command{ } } default: - fmt.Fprintf(os.Stderr, "Unknown command: %s. Only 'start', 'status', 'stop', 'list' are directly supported in this shell.\n", command) + fmt.Fprintf(os.Stderr, "Unknown command: %s. Only 'start', 'status', 'stop', 'list' are directly supported in this shell.\n", shellCommand) fmt.Fprintf(os.Stderr, "For other commands, please run them directly from your terminal, for example `mining doctor`.\n") } fmt.Print(">> ") diff --git a/cmd/mining/cmd/start.go b/cmd/mining/cmd/start.go index d68aca4..07520c9 100644 --- a/cmd/mining/cmd/start.go +++ b/cmd/mining/cmd/start.go @@ -19,7 +19,7 @@ var startCmd = &cobra.Command{ Short: "Start a new miner", Long: `Start a new miner with the specified configuration.`, Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(command *cobra.Command, args []string) error { minerType := args[0] config := &mining.Config{ Pool: poolAddress, diff --git a/pkg/mining/container.go b/pkg/mining/container.go index 8b211cf..8ecdb00 100644 --- a/pkg/mining/container.go +++ b/pkg/mining/container.go @@ -40,17 +40,17 @@ func DefaultContainerConfig() ContainerConfig { // container := NewContainer(DefaultContainerConfig()) // container.Initialize(ctx); container.Start(ctx); defer container.Shutdown(ctx) type Container struct { - config ContainerConfig - mutex sync.RWMutex - manager ManagerInterface - profileManager *ProfileManager - nodeService *NodeService - eventHub *EventHub - service *Service - hashrateStore database.HashrateStore - initialized bool + config ContainerConfig + mutex sync.RWMutex + manager ManagerInterface + profileManager *ProfileManager + nodeService *NodeService + eventHub *EventHub + service *Service + hashrateStore database.HashrateStore + initialized bool transportStarted bool - shutdownCh chan struct{} + shutdownCh chan struct{} } // container := NewContainer(DefaultContainerConfig()) @@ -64,78 +64,77 @@ func NewContainer(config ContainerConfig) *Container { // if err := container.Initialize(ctx); err != nil { return err } // container.Start(ctx) -func (c *Container) Initialize(ctx context.Context) error { - c.mutex.Lock() - defer c.mutex.Unlock() +func (container *Container) Initialize(ctx context.Context) error { + container.mutex.Lock() + defer container.mutex.Unlock() - if c.initialized { + if container.initialized { return ErrInternal("container already initialized") } - // 1. Initialize database (optional) - if c.config.Database.Enabled { - if err := database.Initialize(c.config.Database); err != nil { + // database.Initialize(container.config.Database) enables HTTP handlers like GET /api/v1/mining/status to persist hashrate data. + if container.config.Database.Enabled { + if err := database.Initialize(container.config.Database); err != nil { return ErrInternal("failed to initialize database").WithCause(err) } - c.hashrateStore = database.DefaultStore() - logging.Info("database initialized", logging.Fields{"retention_days": c.config.Database.RetentionDays}) + container.hashrateStore = database.DefaultStore() + logging.Info("database initialized", logging.Fields{"retention_days": container.config.Database.RetentionDays}) } else { - c.hashrateStore = database.NopStore() + container.hashrateStore = database.NopStore() logging.Info("database disabled, using no-op store", nil) } - // 2. Initialize profile manager var err error - c.profileManager, err = NewProfileManager() + // profileManager, err := NewProfileManager() keeps POST /api/v1/mining/profiles working even without XDG storage. + container.profileManager, err = NewProfileManager() if err != nil { return ErrInternal("failed to initialize profile manager").WithCause(err) } - // 3. Initialize miner manager - if c.config.SimulationMode { - c.manager = NewManagerForSimulation() + // NewManagerForSimulation() keeps `mining serve` and `mining remote status` pointed at simulated miners during local development. + if container.config.SimulationMode { + container.manager = NewManagerForSimulation() } else { - c.manager = NewManager() + container.manager = NewManager() } - // 4. Initialize node service (optional - P2P features) - c.nodeService, err = NewNodeService() + // nodeService, err := NewNodeService() enables remote peer commands such as `mining remote status peer-19f3`. + container.nodeService, err = NewNodeService() if err != nil { logging.Warn("node service unavailable", logging.Fields{"error": err}) - // Continue without node service - P2P features will be unavailable } - // 5. Initialize event hub for WebSocket - c.eventHub = NewEventHub() + // NewEventHub() powers GET /ws/events for browsers that watch miner start and stop events. + container.eventHub = NewEventHub() - // Wire up event hub to manager - if concreteManager, ok := c.manager.(*Manager); ok { - concreteManager.SetEventHub(c.eventHub) + // concreteManager.SetEventHub(container.eventHub) lets GET /ws/events stream miner lifecycle updates. + if concreteManager, ok := container.manager.(*Manager); ok { + concreteManager.SetEventHub(container.eventHub) } - c.initialized = true + container.initialized = true logging.Info("service container initialized", nil) return nil } // if err := container.Start(ctx); err != nil { return err } -func (c *Container) Start(ctx context.Context) error { - c.mutex.RLock() - defer c.mutex.RUnlock() +func (container *Container) Start(ctx context.Context) error { + container.mutex.RLock() + defer container.mutex.RUnlock() - if !c.initialized { + if !container.initialized { return ErrInternal("container not initialized") } - // Start event hub - go c.eventHub.Run() + // container.eventHub.Run() keeps `/ws/events` clients connected while the API is serving requests. + go container.eventHub.Run() - // Start node transport if available - if c.nodeService != nil { - if err := c.nodeService.StartTransport(); err != nil { + // container.nodeService.StartTransport() enables `mining remote connect peer-19f3` when peer transport is configured. + if container.nodeService != nil { + if err := container.nodeService.StartTransport(); err != nil { logging.Warn("failed to start node transport", logging.Fields{"error": err}) } else { - c.transportStarted = true + container.transportStarted = true } } @@ -144,11 +143,11 @@ func (c *Container) Start(ctx context.Context) error { } // defer container.Shutdown(ctx) // safe to call multiple times -func (c *Container) Shutdown(ctx context.Context) error { - c.mutex.Lock() - defer c.mutex.Unlock() +func (container *Container) Shutdown(ctx context.Context) error { + container.mutex.Lock() + defer container.mutex.Unlock() - if !c.initialized { + if !container.initialized { return nil } @@ -156,36 +155,36 @@ func (c *Container) Shutdown(ctx context.Context) error { var errs []error - // 1. Stop service (HTTP server) - if c.service != nil { + // container.service is stopped by the caller so `mining serve` can close the HTTP server and shell together. + if container.service != nil { // Service shutdown is handled externally } - // 2. Stop node transport (only if it was started) - if c.nodeService != nil && c.transportStarted { - if err := c.nodeService.StopTransport(); err != nil { + // container.nodeService.StopTransport() tears down peer connectivity after `mining remote connect` sessions finish. + if container.nodeService != nil && container.transportStarted { + if err := container.nodeService.StopTransport(); err != nil { errs = append(errs, ErrInternal("node transport shutdown failed").WithCause(err)) } - c.transportStarted = false + container.transportStarted = false } - // 3. Stop event hub - if c.eventHub != nil { - c.eventHub.Stop() + // container.eventHub.Stop() closes `/ws/events` listeners before process exit. + if container.eventHub != nil { + container.eventHub.Stop() } - // 4. Stop miner manager - if concreteManager, ok := c.manager.(*Manager); ok { + // concreteManager.Stop() stops miners started through `mining start xmrig`. + if concreteManager, ok := container.manager.(*Manager); ok { concreteManager.Stop() } - // 5. Close database + // database.Close() flushes the hashrate store used by GET /api/v1/mining/miners/xmrig/history. if err := database.Close(); err != nil { errs = append(errs, ErrInternal("database shutdown failed").WithCause(err)) } - c.initialized = false - close(c.shutdownCh) + container.initialized = false + close(container.shutdownCh) if len(errs) > 0 { return ErrInternal("shutdown completed with errors").WithCause(errs[0]) @@ -197,59 +196,59 @@ func (c *Container) Shutdown(ctx context.Context) error { // miner := container.Manager() // miner.StartMiner(ctx, "xmrig", config) -func (c *Container) Manager() ManagerInterface { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.manager +func (container *Container) Manager() ManagerInterface { + container.mutex.RLock() + defer container.mutex.RUnlock() + return container.manager } // profileManager := container.ProfileManager() // profileManager.SaveProfile("eth-main", config) -func (c *Container) ProfileManager() *ProfileManager { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.profileManager +func (container *Container) ProfileManager() *ProfileManager { + container.mutex.RLock() + defer container.mutex.RUnlock() + return container.profileManager } -// nodeService := container.NodeService() // nil if P2P is unavailable +// nodeService := container.NodeService() // nil when `mining remote status` should stay local-only. // nodeService.GetPeers() -func (c *Container) NodeService() *NodeService { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.nodeService +func (container *Container) NodeService() *NodeService { + container.mutex.RLock() + defer container.mutex.RUnlock() + return container.nodeService } // eventHub := container.EventHub() // eventHub.Broadcast(event) -func (c *Container) EventHub() *EventHub { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.eventHub +func (container *Container) EventHub() *EventHub { + container.mutex.RLock() + defer container.mutex.RUnlock() + return container.eventHub } // store := container.HashrateStore() // store.RecordHashrate("xmrig", 1234.5) -func (c *Container) HashrateStore() database.HashrateStore { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.hashrateStore +func (container *Container) HashrateStore() database.HashrateStore { + container.mutex.RLock() + defer container.mutex.RUnlock() + return container.hashrateStore } -// container.SetHashrateStore(database.NopStore()) // inject no-op store in tests -func (c *Container) SetHashrateStore(store database.HashrateStore) { - c.mutex.Lock() - defer c.mutex.Unlock() - c.hashrateStore = store +// container.SetHashrateStore(database.NopStore()) // injects a no-op store in tests for GET /api/v1/mining/status. +func (container *Container) SetHashrateStore(store database.HashrateStore) { + container.mutex.Lock() + defer container.mutex.Unlock() + container.hashrateStore = store } -// <-container.ShutdownCh() // blocks until shutdown is complete -func (c *Container) ShutdownCh() <-chan struct{} { - return c.shutdownCh +// <-container.ShutdownCh() // blocks until `mining serve` finishes shutting down. +func (container *Container) ShutdownCh() <-chan struct{} { + return container.shutdownCh } // if container.IsInitialized() { container.Start(ctx) } -func (c *Container) IsInitialized() bool { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.initialized +func (container *Container) IsInitialized() bool { + container.mutex.RLock() + defer container.mutex.RUnlock() + return container.initialized } diff --git a/pkg/mining/manager.go b/pkg/mining/manager.go index 81241f1..fb9a6c2 100644 --- a/pkg/mining/manager.go +++ b/pkg/mining/manager.go @@ -141,12 +141,12 @@ func (manager *Manager) initDatabase() { return } - databaseConfiguration := database.Config{ + databaseConfig := database.Config{ Enabled: true, RetentionDays: manager.databaseRetention, } - if err := database.Initialize(databaseConfiguration); err != nil { + if err := database.Initialize(databaseConfig); err != nil { logging.Warn("failed to initialize database", logging.Fields{"error": err}) manager.databaseEnabled = false return @@ -192,7 +192,7 @@ func (manager *Manager) startDBCleanup() { // manager.syncMinersConfig() adds missing entries such as `MinerAutostartConfig{MinerType: "xmrig", Autostart: false}`. func (manager *Manager) syncMinersConfig() { - minersConfiguration, err := LoadMinersConfig() + minersConfig, err := LoadMinersConfig() if err != nil { logging.Warn("could not load miners config for sync", logging.Fields{"error": err}) return @@ -202,15 +202,15 @@ func (manager *Manager) syncMinersConfig() { configUpdated := false for _, availableMiner := range availableMiners { - found := false - for _, configuredMiner := range minersConfiguration.Miners { + minerExists := false + for _, configuredMiner := range minersConfig.Miners { if equalFold(configuredMiner.MinerType, availableMiner.Name) { - found = true + minerExists = true break } } - if !found { - minersConfiguration.Miners = append(minersConfiguration.Miners, MinerAutostartConfig{ + if !minerExists { + minersConfig.Miners = append(minersConfig.Miners, MinerAutostartConfig{ MinerType: availableMiner.Name, Autostart: false, Config: nil, // keep the new miner disabled until the user saves a profile @@ -221,7 +221,7 @@ func (manager *Manager) syncMinersConfig() { } if configUpdated { - if err := SaveMinersConfig(minersConfiguration); err != nil { + if err := SaveMinersConfig(minersConfig); err != nil { logging.Warn("failed to save updated miners config", logging.Fields{"error": err}) } } diff --git a/pkg/mining/service.go b/pkg/mining/service.go index a93f7a1..7d62042 100644 --- a/pkg/mining/service.go +++ b/pkg/mining/service.go @@ -388,18 +388,16 @@ func NewService(manager ManagerInterface, listenAddress string, displayAddress s profileManager, err := NewProfileManager() if err != nil { logging.Warn("failed to initialize profile manager", logging.Fields{"error": err}) - // Continue without profile manager - profile features will be degraded - // Create a minimal in-memory profile manager as fallback + // profileManager = &ProfileManager{profiles: map[string]*MiningProfile{}} keeps POST /api/v1/mining/profiles working even without XDG storage. profileManager = &ProfileManager{ profiles: make(map[string]*MiningProfile), } } - // NewNodeService() // falls back to miner-only API mode when XDG paths are unavailable + // nodeService, err := NewNodeService() keeps GET /api/v1/mining/status available even when peer transport is unavailable. nodeService, err := NewNodeService() if err != nil { logging.Warn("failed to initialize node service", logging.Fields{"error": err}) - // Continue without node service - P2P features will be unavailable } // NewEventHub() // broadcasts miner events to /ws/events clients @@ -411,7 +409,7 @@ func NewService(manager ManagerInterface, listenAddress string, displayAddress s concreteManager.SetEventHub(eventHub) } - // eventHub.SetStateProvider(...) // returns running miner state after a reconnect + // eventHub.SetStateProvider(...) returns running miner state after a reconnect to GET /ws/events. eventHub.SetStateProvider(func() interface{} { miners := manager.ListMiners() if len(miners) == 0 { @@ -438,7 +436,7 @@ func NewService(manager ManagerInterface, listenAddress string, displayAddress s } }) - // AuthConfigFromEnv() // enables digest auth when MINING_API_USERNAME and MINING_API_PASSWORD are set + // AuthConfigFromEnv() enables digest auth for requests like GET /api/v1/mining/status when credentials are present. authConfig := AuthConfigFromEnv() var auth *DigestAuth if authConfig.Enabled { @@ -467,11 +465,11 @@ func NewService(manager ManagerInterface, listenAddress string, displayAddress s } // service.InitRouter() -// http.Handle("/", service.Router) // embeds the API under a parent HTTP server in Wails +// http.Handle("/", service.Router) // embeds GET /api/v1/mining/status under a parent HTTP server in Wails func (service *Service) InitRouter() { service.Router = gin.Default() - // service.Server.Addr = ":9090" -> serverPort = "9090" for local CORS origins + // service.Server.Addr = ":9090" -> serverPort = "9090" for local CORS origins such as http://localhost:9090. serverPort := "9090" // default fallback if service.Server.Addr != "" { if _, port, err := net.SplitHostPort(service.Server.Addr); err == nil && port != "" { @@ -479,7 +477,7 @@ func (service *Service) InitRouter() { } } - // Configure CORS to only allow local origins + // Configure CORS to only allow local origins like http://localhost:4200 and http://wails.localhost. corsConfig := cors.Config{ AllowOrigins: []string{ "http://localhost:4200", // Angular dev server @@ -498,25 +496,25 @@ func (service *Service) InitRouter() { } service.Router.Use(cors.New(corsConfig)) - // service.Router.Use(securityHeadersMiddleware()) // sets security headers on every API response + // service.Router.Use(securityHeadersMiddleware()) // sets security headers on every GET /api/v1/mining/status response. service.Router.Use(securityHeadersMiddleware()) - // service.Router.Use(contentTypeValidationMiddleware()) // rejects POST /miners/xmrig/install without application/json + // service.Router.Use(contentTypeValidationMiddleware()) // rejects POST /api/v1/mining/profiles without application/json. service.Router.Use(contentTypeValidationMiddleware()) - // c.Request.Body = http.MaxBytesReader(..., 1<<20) // caps request bodies at 1 MiB + // c.Request.Body = http.MaxBytesReader(..., 1<<20) // caps bodies for requests like POST /api/v1/mining/profiles. service.Router.Use(func(c *gin.Context) { c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, 1<<20) // 1MB c.Next() }) - // service.Router.Use(csrfMiddleware()) // allows API clients with Authorization or X-Requested-With + // service.Router.Use(csrfMiddleware()) // allows API clients with Authorization or X-Requested-With headers. service.Router.Use(csrfMiddleware()) - // service.Router.Use(requestTimeoutMiddleware(DefaultRequestTimeout)) // aborts stalled requests after 30 seconds + // service.Router.Use(requestTimeoutMiddleware(DefaultRequestTimeout)) // aborts stalled requests like GET /api/v1/mining/history/xmrig. service.Router.Use(requestTimeoutMiddleware(DefaultRequestTimeout)) - // service.Router.Use(cacheMiddleware()) // returns Cache-Control: public, max-age=300 for /miners/available + // service.Router.Use(cacheMiddleware()) // returns Cache-Control: public, max-age=300 for GET /api/v1/mining/miners/available. service.Router.Use(cacheMiddleware()) // service.Router.Use(requestIDMiddleware()) // preserves the incoming X-Request-ID or creates a new one