Apply AX naming and comment cleanup
This commit is contained in:
parent
e3986fb064
commit
9102b25f55
16 changed files with 421 additions and 425 deletions
|
|
@ -29,7 +29,7 @@ func validateConfigPath(configPath string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// doctorCmd represents the doctor command
|
||||
// doctorCmd.Use == "doctor" and RunE refreshes the local installation cache.
|
||||
var doctorCmd = &cobra.Command{
|
||||
Use: "doctor",
|
||||
Short: "Check and refresh the status of installed miners",
|
||||
|
|
|
|||
|
|
@ -5,12 +5,12 @@ import (
|
|||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"forge.lthn.ai/Snider/Mining/pkg/mining"
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// installCmd represents the install command
|
||||
// installCmd.Use == "install [miner_type]" and RunE installs or updates the miner.
|
||||
var installCmd = &cobra.Command{
|
||||
Use: "install [miner_type]",
|
||||
Short: "Install or update a miner",
|
||||
|
|
@ -27,7 +27,7 @@ var installCmd = &cobra.Command{
|
|||
return fmt.Errorf("unknown miner type: %s", minerType)
|
||||
}
|
||||
|
||||
// Check if it's already installed and up-to-date
|
||||
// miner.CheckInstallation() // returns the installed version before deciding whether to update
|
||||
details, err := miner.CheckInstallation()
|
||||
if err == nil && details.IsInstalled {
|
||||
latestVersionStr, err := miner.GetLatestVersion()
|
||||
|
|
@ -50,7 +50,7 @@ var installCmd = &cobra.Command{
|
|||
return fmt.Errorf("failed to install/update miner: %w", err)
|
||||
}
|
||||
|
||||
// Get fresh details after installation
|
||||
// miner.CheckInstallation() // returns the post-install path and version for the success message
|
||||
finalDetails, err := miner.CheckInstallation()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to verify installation: %w", err)
|
||||
|
|
@ -58,7 +58,7 @@ var installCmd = &cobra.Command{
|
|||
|
||||
fmt.Printf("%s installed successfully to %s (version %s).\n", miner.GetName(), finalDetails.Path, finalDetails.Version)
|
||||
|
||||
// Update the cache after a successful installation
|
||||
// updateDoctorCache() // refreshes the cached installation details after a successful install
|
||||
fmt.Println("Updating installation cache...")
|
||||
if err := updateDoctorCache(); err != nil {
|
||||
fmt.Printf("Warning: failed to update doctor cache: %v\n", err)
|
||||
|
|
@ -68,7 +68,7 @@ var installCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// updateDoctorCache runs the core logic of the doctor command to refresh the cache.
|
||||
// updateDoctorCache() // refreshes the cache used by `mining doctor` and `mining update`.
|
||||
func updateDoctorCache() error {
|
||||
manager := getManager()
|
||||
availableMiners := manager.ListAvailableMiners()
|
||||
|
|
@ -92,7 +92,7 @@ func updateDoctorCache() error {
|
|||
allDetails = append(allDetails, details)
|
||||
}
|
||||
|
||||
// Create the SystemInfo struct that the /info endpoint expects
|
||||
// mining.SystemInfo{Timestamp: time.Now(), OS: runtime.GOOS} // matches the cache shape returned by the doctor command
|
||||
systemInfo := &mining.SystemInfo{
|
||||
Timestamp: time.Now(),
|
||||
OS: runtime.GOOS,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// listCmd represents the list command
|
||||
// listCmd.Use == "list" and RunE prints running and available miners.
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List running and available miners",
|
||||
|
|
|
|||
|
|
@ -22,14 +22,14 @@ var (
|
|||
peerRegistryErr error
|
||||
)
|
||||
|
||||
// nodeCmd represents the node parent command
|
||||
// nodeCmd.Use == "node" and RunE groups identity and P2P subcommands.
|
||||
var nodeCmd = &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Manage P2P node identity and connections",
|
||||
Long: `Manage the node's identity, view status, and control P2P networking.`,
|
||||
}
|
||||
|
||||
// nodeInitCmd initializes a new node identity
|
||||
// nodeInitCmd.Use == "init" and RunE creates a node identity.
|
||||
var nodeInitCmd = &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "Initialize node identity",
|
||||
|
|
@ -43,12 +43,12 @@ This creates the node's cryptographic identity for secure P2P communication.`,
|
|||
return fmt.Errorf("--name is required")
|
||||
}
|
||||
|
||||
nm, err := node.NewNodeManager()
|
||||
nodeManager, err := node.NewNodeManager()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create node manager: %w", err)
|
||||
}
|
||||
|
||||
if nm.HasIdentity() {
|
||||
if nodeManager.HasIdentity() {
|
||||
return fmt.Errorf("node identity already exists. Use 'node reset' to create a new one")
|
||||
}
|
||||
|
||||
|
|
@ -64,11 +64,11 @@ This creates the node's cryptographic identity for secure P2P communication.`,
|
|||
return fmt.Errorf("invalid role: %s (use controller, worker, or dual)", role)
|
||||
}
|
||||
|
||||
if err := nm.GenerateIdentity(name, nodeRole); err != nil {
|
||||
if err := nodeManager.GenerateIdentity(name, nodeRole); err != nil {
|
||||
return fmt.Errorf("failed to generate identity: %w", err)
|
||||
}
|
||||
|
||||
identity := nm.GetIdentity()
|
||||
identity := nodeManager.GetIdentity()
|
||||
fmt.Println("Node identity created successfully!")
|
||||
fmt.Println()
|
||||
fmt.Printf(" ID: %s\n", identity.ID)
|
||||
|
|
@ -81,24 +81,24 @@ This creates the node's cryptographic identity for secure P2P communication.`,
|
|||
},
|
||||
}
|
||||
|
||||
// nodeInfoCmd shows current node identity
|
||||
// nodeInfoCmd.Use == "info" and RunE prints the current node identity.
|
||||
var nodeInfoCmd = &cobra.Command{
|
||||
Use: "info",
|
||||
Short: "Show node identity and status",
|
||||
Long: `Display the current node's identity, role, and connection status.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
nm, err := node.NewNodeManager()
|
||||
nodeManager, err := node.NewNodeManager()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create node manager: %w", err)
|
||||
}
|
||||
|
||||
if !nm.HasIdentity() {
|
||||
if !nodeManager.HasIdentity() {
|
||||
fmt.Println("No node identity found.")
|
||||
fmt.Println("Run 'node init --name <name>' to create one.")
|
||||
return nil
|
||||
}
|
||||
|
||||
identity := nm.GetIdentity()
|
||||
identity := nodeManager.GetIdentity()
|
||||
fmt.Println("Node Identity:")
|
||||
fmt.Println()
|
||||
fmt.Printf(" ID: %s\n", identity.ID)
|
||||
|
|
@ -107,12 +107,12 @@ var nodeInfoCmd = &cobra.Command{
|
|||
fmt.Printf(" Public Key: %s\n", identity.PublicKey)
|
||||
fmt.Printf(" Created: %s\n", identity.CreatedAt.Format(time.RFC3339))
|
||||
|
||||
// Show peer info if available
|
||||
pr, err := node.NewPeerRegistry()
|
||||
// node.NewPeerRegistry() // loads registered peers to print the connected count
|
||||
peerRegistry, err := node.NewPeerRegistry()
|
||||
if err == nil {
|
||||
fmt.Println()
|
||||
fmt.Printf(" Registered Peers: %d\n", pr.Count())
|
||||
connected := pr.GetConnectedPeers()
|
||||
fmt.Printf(" Registered Peers: %d\n", peerRegistry.Count())
|
||||
connected := peerRegistry.GetConnectedPeers()
|
||||
fmt.Printf(" Connected Peers: %d\n", len(connected))
|
||||
}
|
||||
|
||||
|
|
@ -120,7 +120,7 @@ var nodeInfoCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// nodeServeCmd starts the P2P server
|
||||
// nodeServeCmd.Use == "serve" and RunE starts the P2P server.
|
||||
var nodeServeCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Start P2P server for remote connections",
|
||||
|
|
@ -129,16 +129,16 @@ This allows other nodes to connect, send commands, and receive stats.`,
|
|||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
listen, _ := cmd.Flags().GetString("listen")
|
||||
|
||||
nm, err := node.NewNodeManager()
|
||||
nodeManager, err := node.NewNodeManager()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create node manager: %w", err)
|
||||
}
|
||||
|
||||
if !nm.HasIdentity() {
|
||||
if !nodeManager.HasIdentity() {
|
||||
return fmt.Errorf("no node identity found. Run 'node init --name <name>' first")
|
||||
}
|
||||
|
||||
pr, err := node.NewPeerRegistry()
|
||||
peerRegistry, err := node.NewPeerRegistry()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create peer registry: %w", err)
|
||||
}
|
||||
|
|
@ -148,43 +148,43 @@ This allows other nodes to connect, send commands, and receive stats.`,
|
|||
config.ListenAddr = listen
|
||||
}
|
||||
|
||||
transport := node.NewTransport(nm, pr, config)
|
||||
transport := node.NewTransport(nodeManager, peerRegistry, config)
|
||||
|
||||
// Create worker to handle incoming messages
|
||||
worker := node.NewWorker(nm, transport)
|
||||
// node.NewWorker(nodeManager, transport) // handles incoming remote commands
|
||||
worker := node.NewWorker(nodeManager, transport)
|
||||
worker.RegisterWithTransport()
|
||||
|
||||
if err := transport.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start transport: %w", err)
|
||||
}
|
||||
|
||||
identity := nm.GetIdentity()
|
||||
identity := nodeManager.GetIdentity()
|
||||
fmt.Printf("P2P server started on %s\n", config.ListenAddr)
|
||||
fmt.Printf("Node ID: %s (%s)\n", identity.ID, identity.Name)
|
||||
fmt.Printf("Role: %s\n", identity.Role)
|
||||
fmt.Println()
|
||||
fmt.Println("Press Ctrl+C to stop...")
|
||||
|
||||
// Set up signal handling for graceful shutdown (including SIGHUP for terminal disconnect)
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
|
||||
// signalChannel captures Ctrl+C and terminal disconnects for a clean shutdown.
|
||||
signalChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
|
||||
|
||||
// Wait for shutdown signal
|
||||
sig := <-sigChan
|
||||
// signalChannel <- os.Interrupt // blocks until shutdown is requested
|
||||
sig := <-signalChannel
|
||||
fmt.Printf("\nReceived signal %v, shutting down...\n", sig)
|
||||
|
||||
// Graceful shutdown: stop transport and cleanup resources
|
||||
// transport.Stop() // stops the socket listener before the peer registry is flushed
|
||||
if err := transport.Stop(); err != nil {
|
||||
fmt.Printf("Warning: error during transport shutdown: %v\n", err)
|
||||
// Force cleanup on Stop() failure
|
||||
// peerRegistry.GetConnectedPeers() // clears connected flags when transport shutdown fails
|
||||
fmt.Println("Forcing resource cleanup...")
|
||||
for _, peer := range pr.GetConnectedPeers() {
|
||||
pr.SetConnected(peer.ID, false)
|
||||
for _, peer := range peerRegistry.GetConnectedPeers() {
|
||||
peerRegistry.SetConnected(peer.ID, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure peer registry is flushed to disk
|
||||
if err := pr.Close(); err != nil {
|
||||
// peerRegistry.Close() // flushes peer state to disk during shutdown
|
||||
if err := peerRegistry.Close(); err != nil {
|
||||
fmt.Printf("Warning: error closing peer registry: %v\n", err)
|
||||
}
|
||||
|
||||
|
|
@ -193,7 +193,7 @@ This allows other nodes to connect, send commands, and receive stats.`,
|
|||
},
|
||||
}
|
||||
|
||||
// nodeResetCmd deletes the node identity
|
||||
// nodeResetCmd.Use == "reset" and RunE deletes the node identity.
|
||||
var nodeResetCmd = &cobra.Command{
|
||||
Use: "reset",
|
||||
Short: "Delete node identity and start fresh",
|
||||
|
|
@ -201,12 +201,12 @@ var nodeResetCmd = &cobra.Command{
|
|||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
|
||||
nm, err := node.NewNodeManager()
|
||||
nodeManager, err := node.NewNodeManager()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create node manager: %w", err)
|
||||
}
|
||||
|
||||
if !nm.HasIdentity() {
|
||||
if !nodeManager.HasIdentity() {
|
||||
fmt.Println("No node identity to reset.")
|
||||
return nil
|
||||
}
|
||||
|
|
@ -219,7 +219,7 @@ var nodeResetCmd = &cobra.Command{
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := nm.Delete(); err != nil {
|
||||
if err := nodeManager.Delete(); err != nil {
|
||||
return fmt.Errorf("failed to delete identity: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,16 +8,15 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Note: findPeerByPartialID is defined in remote.go and used for peer lookup
|
||||
|
||||
// peerCmd represents the peer parent command
|
||||
// findPeerByPartialID("a1b2c3") // defined in remote.go and used by peer subcommands
|
||||
// peerCmd.Use == "peer" and RunE groups peer-management subcommands.
|
||||
var peerCmd = &cobra.Command{
|
||||
Use: "peer",
|
||||
Short: "Manage peer nodes",
|
||||
Long: `Add, remove, and manage connections to peer nodes.`,
|
||||
}
|
||||
|
||||
// peerAddCmd adds a new peer
|
||||
// peerAddCmd.Use == "add" and RunE registers a new peer by address.
|
||||
var peerAddCmd = &cobra.Command{
|
||||
Use: "add",
|
||||
Short: "Add a peer node",
|
||||
|
|
@ -31,16 +30,16 @@ to exchange public keys and establish a secure connection.`,
|
|||
return fmt.Errorf("--address is required")
|
||||
}
|
||||
|
||||
nm, err := getNodeManager()
|
||||
nodeManager, err := getNodeManager()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node manager: %w", err)
|
||||
}
|
||||
|
||||
if !nm.HasIdentity() {
|
||||
if !nodeManager.HasIdentity() {
|
||||
return fmt.Errorf("no node identity found. Run 'node init' first")
|
||||
}
|
||||
|
||||
pr, err := getPeerRegistry()
|
||||
peerRegistry, err := getPeerRegistry()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get peer registry: %w", err)
|
||||
}
|
||||
|
|
@ -56,7 +55,7 @@ to exchange public keys and establish a secure connection.`,
|
|||
Score: 50,
|
||||
}
|
||||
|
||||
if err := pr.AddPeer(peer); err != nil {
|
||||
if err := peerRegistry.AddPeer(peer); err != nil {
|
||||
return fmt.Errorf("failed to add peer: %w", err)
|
||||
}
|
||||
|
||||
|
|
@ -66,18 +65,18 @@ to exchange public keys and establish a secure connection.`,
|
|||
},
|
||||
}
|
||||
|
||||
// peerListCmd lists all registered peers
|
||||
// peerListCmd.Use == "list" and RunE prints registered peers.
|
||||
var peerListCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List registered peers",
|
||||
Long: `Display all registered peers with their connection status.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
pr, err := getPeerRegistry()
|
||||
peerRegistry, err := getPeerRegistry()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get peer registry: %w", err)
|
||||
}
|
||||
|
||||
peers := pr.ListPeers()
|
||||
peers := peerRegistry.ListPeers()
|
||||
if len(peers) == 0 {
|
||||
fmt.Println("No peers registered.")
|
||||
fmt.Println("Use 'peer add --address <host:port> --name <name>' to add one.")
|
||||
|
|
@ -107,7 +106,7 @@ var peerListCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// peerRemoveCmd removes a peer
|
||||
// peerRemoveCmd.Use == "remove <peer-id>" and RunE removes the selected peer.
|
||||
var peerRemoveCmd = &cobra.Command{
|
||||
Use: "remove <peer-id>",
|
||||
Short: "Remove a peer from registry",
|
||||
|
|
@ -121,12 +120,12 @@ var peerRemoveCmd = &cobra.Command{
|
|||
return fmt.Errorf("peer not found: %s", peerID)
|
||||
}
|
||||
|
||||
pr, err := getPeerRegistry()
|
||||
peerRegistry, err := getPeerRegistry()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get peer registry: %w", err)
|
||||
}
|
||||
|
||||
if err := pr.RemovePeer(peer.ID); err != nil {
|
||||
if err := peerRegistry.RemovePeer(peer.ID); err != nil {
|
||||
return fmt.Errorf("failed to remove peer: %w", err)
|
||||
}
|
||||
|
||||
|
|
@ -135,7 +134,7 @@ var peerRemoveCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// peerPingCmd pings a peer
|
||||
// peerPingCmd.Use == "ping <peer-id>" and RunE prints a ping placeholder.
|
||||
var peerPingCmd = &cobra.Command{
|
||||
Use: "ping <peer-id>",
|
||||
Short: "Ping a peer and update metrics",
|
||||
|
|
@ -160,7 +159,7 @@ var peerPingCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// peerOptimalCmd shows the optimal peer based on metrics
|
||||
// peerOptimalCmd.Use == "optimal" and RunE prints the best peer by score.
|
||||
var peerOptimalCmd = &cobra.Command{
|
||||
Use: "optimal",
|
||||
Short: "Show the optimal peer based on metrics",
|
||||
|
|
@ -169,18 +168,18 @@ ping latency, hop count, geographic distance, and reliability score.`,
|
|||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
count, _ := cmd.Flags().GetInt("count")
|
||||
|
||||
pr, err := getPeerRegistry()
|
||||
peerRegistry, err := getPeerRegistry()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get peer registry: %w", err)
|
||||
}
|
||||
|
||||
if pr.Count() == 0 {
|
||||
if peerRegistry.Count() == 0 {
|
||||
fmt.Println("No peers registered.")
|
||||
return nil
|
||||
}
|
||||
|
||||
if count == 1 {
|
||||
peer := pr.SelectOptimalPeer()
|
||||
peer := peerRegistry.SelectOptimalPeer()
|
||||
if peer == nil {
|
||||
fmt.Println("No optimal peer found.")
|
||||
return nil
|
||||
|
|
@ -194,7 +193,7 @@ ping latency, hop count, geographic distance, and reliability score.`,
|
|||
fmt.Printf(" Geo: %.1f km\n", peer.GeoKM)
|
||||
fmt.Printf(" Score: %.1f\n", peer.Score)
|
||||
} else {
|
||||
peers := pr.SelectNearestPeers(count)
|
||||
peers := peerRegistry.SelectNearestPeers(count)
|
||||
if len(peers) == 0 {
|
||||
fmt.Println("No peers found.")
|
||||
return nil
|
||||
|
|
@ -215,21 +214,21 @@ ping latency, hop count, geographic distance, and reliability score.`,
|
|||
func init() {
|
||||
rootCmd.AddCommand(peerCmd)
|
||||
|
||||
// peer add
|
||||
// rootCmd.AddCommand(peerAddCmd) // exposes `peer add --address 10.0.0.2:9090 --name worker-1`
|
||||
peerCmd.AddCommand(peerAddCmd)
|
||||
peerAddCmd.Flags().StringP("address", "a", "", "Peer address (host:port)")
|
||||
peerAddCmd.Flags().StringP("name", "n", "", "Peer name")
|
||||
|
||||
// peer list
|
||||
// rootCmd.AddCommand(peerListCmd) // exposes `peer list`
|
||||
peerCmd.AddCommand(peerListCmd)
|
||||
|
||||
// peer remove
|
||||
// rootCmd.AddCommand(peerRemoveCmd) // exposes `peer remove <peer-id>`
|
||||
peerCmd.AddCommand(peerRemoveCmd)
|
||||
|
||||
// peer ping
|
||||
// rootCmd.AddCommand(peerPingCmd) // exposes `peer ping <peer-id>`
|
||||
peerCmd.AddCommand(peerPingCmd)
|
||||
|
||||
// peer optimal
|
||||
// rootCmd.AddCommand(peerOptimalCmd) // exposes `peer optimal --count 4`
|
||||
peerCmd.AddCommand(peerOptimalCmd)
|
||||
peerOptimalCmd.Flags().IntP("count", "c", 1, "Number of optimal peers to show")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,20 +17,20 @@ var (
|
|||
controllerErr error
|
||||
)
|
||||
|
||||
// remoteCmd represents the remote parent command
|
||||
// remoteCmd.Use == "remote" and RunE groups remote node commands.
|
||||
var remoteCmd = &cobra.Command{
|
||||
Use: "remote",
|
||||
Short: "Control remote mining nodes",
|
||||
Long: `Send commands to remote worker nodes and retrieve their status.`,
|
||||
}
|
||||
|
||||
// remoteStatusCmd shows stats from remote peers
|
||||
// remoteStatusCmd.Use == "status [peer-id]" and RunE prints remote stats.
|
||||
var remoteStatusCmd = &cobra.Command{
|
||||
Use: "status [peer-id]",
|
||||
Short: "Get mining status from remote peers",
|
||||
Long: `Display mining statistics from all connected peers or a specific peer.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctrl, err := getController()
|
||||
controller, err := getController()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -43,7 +43,7 @@ var remoteStatusCmd = &cobra.Command{
|
|||
return fmt.Errorf("peer not found: %s", peerID)
|
||||
}
|
||||
|
||||
stats, err := ctrl.GetRemoteStats(peer.ID)
|
||||
stats, err := controller.GetRemoteStats(peer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get stats: %w", err)
|
||||
}
|
||||
|
|
@ -51,17 +51,17 @@ var remoteStatusCmd = &cobra.Command{
|
|||
printPeerStats(peer, stats)
|
||||
} else {
|
||||
// Get stats from all peers
|
||||
allStats := ctrl.GetAllStats()
|
||||
allStats := controller.GetAllStats()
|
||||
if len(allStats) == 0 {
|
||||
fmt.Println("No connected peers.")
|
||||
return nil
|
||||
}
|
||||
|
||||
pr, _ := getPeerRegistry()
|
||||
peerRegistry, _ := getPeerRegistry()
|
||||
var totalHashrate float64
|
||||
|
||||
for peerID, stats := range allStats {
|
||||
peer := pr.GetPeer(peerID)
|
||||
peer := peerRegistry.GetPeer(peerID)
|
||||
if peer != nil {
|
||||
printPeerStats(peer, stats)
|
||||
for _, miner := range stats.Miners {
|
||||
|
|
@ -78,7 +78,7 @@ var remoteStatusCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// remoteStartCmd starts a miner on a remote peer
|
||||
// remoteStartCmd.Use == "start <peer-id>" and RunE starts a miner on a peer.
|
||||
var remoteStartCmd = &cobra.Command{
|
||||
Use: "start <peer-id>",
|
||||
Short: "Start miner on remote peer",
|
||||
|
|
@ -97,13 +97,13 @@ var remoteStartCmd = &cobra.Command{
|
|||
return fmt.Errorf("peer not found: %s", peerID)
|
||||
}
|
||||
|
||||
ctrl, err := getController()
|
||||
controller, err := getController()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Starting %s miner on %s with profile %s...\n", minerType, peer.Name, profileID)
|
||||
if err := ctrl.StartRemoteMiner(peer.ID, minerType, profileID, nil); err != nil {
|
||||
if err := controller.StartRemoteMiner(peer.ID, minerType, profileID, nil); err != nil {
|
||||
return fmt.Errorf("failed to start miner: %w", err)
|
||||
}
|
||||
|
||||
|
|
@ -112,7 +112,7 @@ var remoteStartCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// remoteStopCmd stops a miner on a remote peer
|
||||
// remoteStopCmd.Use == "stop <peer-id> [miner-name]" and RunE stops a remote miner.
|
||||
var remoteStopCmd = &cobra.Command{
|
||||
Use: "stop <peer-id> [miner-name]",
|
||||
Short: "Stop miner on remote peer",
|
||||
|
|
@ -136,13 +136,13 @@ var remoteStopCmd = &cobra.Command{
|
|||
return fmt.Errorf("miner name required (as argument or --miner flag)")
|
||||
}
|
||||
|
||||
ctrl, err := getController()
|
||||
controller, err := getController()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping miner %s on %s...\n", minerName, peer.Name)
|
||||
if err := ctrl.StopRemoteMiner(peer.ID, minerName); err != nil {
|
||||
if err := controller.StopRemoteMiner(peer.ID, minerName); err != nil {
|
||||
return fmt.Errorf("failed to stop miner: %w", err)
|
||||
}
|
||||
|
||||
|
|
@ -151,7 +151,7 @@ var remoteStopCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// remoteLogsCmd gets logs from a remote miner
|
||||
// remoteLogsCmd.Use == "logs <peer-id> <miner-name>" and RunE prints remote logs.
|
||||
var remoteLogsCmd = &cobra.Command{
|
||||
Use: "logs <peer-id> <miner-name>",
|
||||
Short: "Get console logs from remote miner",
|
||||
|
|
@ -167,12 +167,12 @@ var remoteLogsCmd = &cobra.Command{
|
|||
return fmt.Errorf("peer not found: %s", peerID)
|
||||
}
|
||||
|
||||
ctrl, err := getController()
|
||||
controller, err := getController()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logLines, err := ctrl.GetRemoteLogs(peer.ID, minerName, lines)
|
||||
logLines, err := controller.GetRemoteLogs(peer.ID, minerName, lines)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get logs: %w", err)
|
||||
}
|
||||
|
|
@ -187,7 +187,7 @@ var remoteLogsCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// remoteConnectCmd connects to a peer
|
||||
// remoteConnectCmd.Use == "connect <peer-id>" and RunE opens a peer connection.
|
||||
var remoteConnectCmd = &cobra.Command{
|
||||
Use: "connect <peer-id>",
|
||||
Short: "Connect to a remote peer",
|
||||
|
|
@ -200,13 +200,13 @@ var remoteConnectCmd = &cobra.Command{
|
|||
return fmt.Errorf("peer not found: %s", peerID)
|
||||
}
|
||||
|
||||
ctrl, err := getController()
|
||||
controller, err := getController()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Connecting to %s at %s...\n", peer.Name, peer.Address)
|
||||
if err := ctrl.ConnectToPeer(peer.ID); err != nil {
|
||||
if err := controller.ConnectToPeer(peer.ID); err != nil {
|
||||
return fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
|
||||
|
|
@ -215,7 +215,7 @@ var remoteConnectCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// remoteDisconnectCmd disconnects from a peer
|
||||
// remoteDisconnectCmd.Use == "disconnect <peer-id>" and RunE closes a peer connection.
|
||||
var remoteDisconnectCmd = &cobra.Command{
|
||||
Use: "disconnect <peer-id>",
|
||||
Short: "Disconnect from a remote peer",
|
||||
|
|
@ -228,13 +228,13 @@ var remoteDisconnectCmd = &cobra.Command{
|
|||
return fmt.Errorf("peer not found: %s", peerID)
|
||||
}
|
||||
|
||||
ctrl, err := getController()
|
||||
controller, err := getController()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Disconnecting from %s...\n", peer.Name)
|
||||
if err := ctrl.DisconnectFromPeer(peer.ID); err != nil {
|
||||
if err := controller.DisconnectFromPeer(peer.ID); err != nil {
|
||||
return fmt.Errorf("failed to disconnect: %w", err)
|
||||
}
|
||||
|
||||
|
|
@ -243,7 +243,7 @@ var remoteDisconnectCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// remotePingCmd pings a peer
|
||||
// remotePingCmd.Use == "ping <peer-id>" and RunE averages multiple ping samples.
|
||||
var remotePingCmd = &cobra.Command{
|
||||
Use: "ping <peer-id>",
|
||||
Short: "Ping a remote peer",
|
||||
|
|
@ -258,7 +258,7 @@ var remotePingCmd = &cobra.Command{
|
|||
return fmt.Errorf("peer not found: %s", peerID)
|
||||
}
|
||||
|
||||
ctrl, err := getController()
|
||||
controller, err := getController()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -269,7 +269,7 @@ var remotePingCmd = &cobra.Command{
|
|||
var successful int
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
rtt, err := ctrl.PingPeer(peer.ID)
|
||||
rtt, err := controller.PingPeer(peer.ID)
|
||||
if err != nil {
|
||||
fmt.Printf(" Ping %d: timeout\n", i+1)
|
||||
continue
|
||||
|
|
@ -296,29 +296,29 @@ var remotePingCmd = &cobra.Command{
|
|||
func init() {
|
||||
rootCmd.AddCommand(remoteCmd)
|
||||
|
||||
// remote status
|
||||
// remoteCmd.AddCommand(remoteStatusCmd) // exposes `remote status <peer-id>`
|
||||
remoteCmd.AddCommand(remoteStatusCmd)
|
||||
|
||||
// remote start
|
||||
// remoteCmd.AddCommand(remoteStartCmd) // exposes `remote start <peer-id> --type xmrig --profile default`
|
||||
remoteCmd.AddCommand(remoteStartCmd)
|
||||
remoteStartCmd.Flags().StringP("profile", "p", "", "Profile ID to use for starting the miner")
|
||||
remoteStartCmd.Flags().StringP("type", "t", "", "Miner type (e.g., xmrig, tt-miner)")
|
||||
|
||||
// remote stop
|
||||
// remoteCmd.AddCommand(remoteStopCmd) // exposes `remote stop <peer-id> --miner xmrig-1`
|
||||
remoteCmd.AddCommand(remoteStopCmd)
|
||||
remoteStopCmd.Flags().StringP("miner", "m", "", "Miner name to stop")
|
||||
|
||||
// remote logs
|
||||
// remoteCmd.AddCommand(remoteLogsCmd) // exposes `remote logs <peer-id> <miner-name>`
|
||||
remoteCmd.AddCommand(remoteLogsCmd)
|
||||
remoteLogsCmd.Flags().IntP("lines", "n", 100, "Number of log lines to retrieve")
|
||||
|
||||
// remote connect
|
||||
// remoteCmd.AddCommand(remoteConnectCmd) // exposes `remote connect <peer-id>`
|
||||
remoteCmd.AddCommand(remoteConnectCmd)
|
||||
|
||||
// remote disconnect
|
||||
// remoteCmd.AddCommand(remoteDisconnectCmd) // exposes `remote disconnect <peer-id>`
|
||||
remoteCmd.AddCommand(remoteDisconnectCmd)
|
||||
|
||||
// remote ping
|
||||
// remoteCmd.AddCommand(remotePingCmd) // exposes `remote ping <peer-id>`
|
||||
remoteCmd.AddCommand(remotePingCmd)
|
||||
remotePingCmd.Flags().IntP("count", "c", 4, "Number of pings to send")
|
||||
}
|
||||
|
|
@ -326,46 +326,46 @@ func init() {
|
|||
// getController returns or creates the controller instance (thread-safe).
|
||||
func getController() (*node.Controller, error) {
|
||||
controllerOnce.Do(func() {
|
||||
nm, err := getNodeManager()
|
||||
nodeManager, err := getNodeManager()
|
||||
if err != nil {
|
||||
controllerErr = fmt.Errorf("failed to get node manager: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !nm.HasIdentity() {
|
||||
if !nodeManager.HasIdentity() {
|
||||
controllerErr = fmt.Errorf("no node identity found. Run 'node init' first")
|
||||
return
|
||||
}
|
||||
|
||||
pr, err := getPeerRegistry()
|
||||
peerRegistry, err := getPeerRegistry()
|
||||
if err != nil {
|
||||
controllerErr = fmt.Errorf("failed to get peer registry: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize transport
|
||||
// node.DefaultTransportConfig() // provides the transport settings used by `remote` commands
|
||||
config := node.DefaultTransportConfig()
|
||||
transport = node.NewTransport(nm, pr, config)
|
||||
controller = node.NewController(nm, pr, transport)
|
||||
transport = node.NewTransport(nodeManager, peerRegistry, config)
|
||||
controller = node.NewController(nodeManager, peerRegistry, transport)
|
||||
})
|
||||
return controller, controllerErr
|
||||
}
|
||||
|
||||
// findPeerByPartialID finds a peer by full or partial ID.
|
||||
// findPeerByPartialID("a1b2c3") // returns the matching peer by full or partial ID.
|
||||
func findPeerByPartialID(partialID string) *node.Peer {
|
||||
pr, err := getPeerRegistry()
|
||||
peerRegistry, err := getPeerRegistry()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try exact match first
|
||||
peer := pr.GetPeer(partialID)
|
||||
// peerRegistry.GetPeer(partialID) // exact match first
|
||||
peer := peerRegistry.GetPeer(partialID)
|
||||
if peer != nil {
|
||||
return peer
|
||||
}
|
||||
|
||||
// Try partial match
|
||||
for _, p := range pr.ListPeers() {
|
||||
// peerRegistry.ListPeers() // then fall back to partial ID or exact name matches
|
||||
for _, p := range peerRegistry.ListPeers() {
|
||||
if strings.HasPrefix(p.ID, partialID) {
|
||||
return p
|
||||
}
|
||||
|
|
@ -378,7 +378,7 @@ func findPeerByPartialID(partialID string) *node.Peer {
|
|||
return nil
|
||||
}
|
||||
|
||||
// printPeerStats prints formatted stats for a peer.
|
||||
// printPeerStats(peer, stats) // formats the remote stats output for `remote status`.
|
||||
func printPeerStats(peer *node.Peer, stats *node.StatsPayload) {
|
||||
fmt.Printf("\n%s (%s)\n", peer.Name, peer.ID[:16])
|
||||
fmt.Printf(" Address: %s\n", peer.Address)
|
||||
|
|
@ -397,7 +397,7 @@ func printPeerStats(peer *node.Peer, stats *node.StatsPayload) {
|
|||
}
|
||||
}
|
||||
|
||||
// formatDuration formats a duration into a human-readable string.
|
||||
// formatDuration(90*time.Minute) // returns "1h 30m"
|
||||
func formatDuration(d time.Duration) string {
|
||||
days := int(d.Hours() / 24)
|
||||
hours := int(d.Hours()) % 24
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ var (
|
|||
manager *mining.Manager
|
||||
)
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
// rootCmd.Use == "mining" and rootCmd.Version prints pkg/mining.GetVersion().
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "mining",
|
||||
Short: "Mining CLI - Manage miners with RESTful control",
|
||||
|
|
@ -20,8 +20,7 @@ It provides commands to start, stop, list, and manage miners with RESTful contro
|
|||
Version: mining.GetVersion(),
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
// Execute() // runs the root command from main.main().
|
||||
func Execute() error {
|
||||
return rootCmd.Execute()
|
||||
}
|
||||
|
|
@ -30,9 +29,8 @@ func init() {
|
|||
cobra.OnInitialize(initManager)
|
||||
}
|
||||
|
||||
// initManager initializes the miner manager
|
||||
// initManager() // skips simulate so `mining simulate` can create its own manager.
|
||||
func initManager() {
|
||||
// Skip for commands that create their own manager (like simulate)
|
||||
if len(os.Args) > 1 && os.Args[1] == "simulate" {
|
||||
return
|
||||
}
|
||||
|
|
@ -41,7 +39,7 @@ func initManager() {
|
|||
}
|
||||
}
|
||||
|
||||
// getManager returns the singleton manager instance
|
||||
// getManager() // returns the shared manager used by `mining start` and `mining stop`.
|
||||
func getManager() *mining.Manager {
|
||||
if manager == nil {
|
||||
manager = mining.NewManager()
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ var (
|
|||
namespace string
|
||||
)
|
||||
|
||||
// serveCmd represents the serve command
|
||||
// serveCmd.Use == "serve" and RunE starts the HTTP API and shell.
|
||||
var serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Start the mining service and interactive shell",
|
||||
|
|
@ -42,7 +42,7 @@ var serveCmd = &cobra.Command{
|
|||
displayAddr := fmt.Sprintf("%s:%d", displayHost, port)
|
||||
listenAddr := fmt.Sprintf("%s:%d", host, port)
|
||||
|
||||
// Use the global manager instance initialized by initManager.
|
||||
// getManager() // returns the shared manager used by the long-running service.
|
||||
manager := getManager()
|
||||
|
||||
service, err := mining.NewService(manager, listenAddr, displayAddr, namespace)
|
||||
|
|
@ -50,7 +50,7 @@ var serveCmd = &cobra.Command{
|
|||
return fmt.Errorf("failed to create new service: %w", err)
|
||||
}
|
||||
|
||||
// Start the server in a goroutine
|
||||
// service.ServiceStartup(ctx) // starts the HTTP server without blocking the shell loop.
|
||||
go func() {
|
||||
if err := service.ServiceStartup(ctx); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to start service: %v\n", err)
|
||||
|
|
@ -58,11 +58,11 @@ var serveCmd = &cobra.Command{
|
|||
}
|
||||
}()
|
||||
|
||||
// Handle graceful shutdown on Ctrl+C
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
// signalChannel captures Ctrl+C so the service can shut down cleanly.
|
||||
signalChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChannel, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Start interactive shell in a goroutine
|
||||
// Start the interactive shell in a goroutine.
|
||||
go func() {
|
||||
fmt.Printf("Mining service started on http://%s:%d\n", displayHost, port)
|
||||
fmt.Printf("Swagger documentation is available at http://%s:%d%s/index.html\n", displayHost, port, service.SwaggerUIPath)
|
||||
|
|
@ -188,7 +188,7 @@ var serveCmd = &cobra.Command{
|
|||
}
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Unknown command: %s. Only 'start', 'status', 'stop', 'list' are directly supported in this shell.\n", command)
|
||||
fmt.Fprintf(os.Stderr, "For other commands, please run them directly from your terminal (e.g., 'miner-ctrl doctor').\n")
|
||||
fmt.Fprintf(os.Stderr, "For other commands, please run them directly from your terminal (e.g., 'mining doctor').\n")
|
||||
}
|
||||
fmt.Print(">> ")
|
||||
}
|
||||
|
|
@ -200,7 +200,7 @@ var serveCmd = &cobra.Command{
|
|||
}()
|
||||
|
||||
select {
|
||||
case <-signalChan:
|
||||
case <-signalChannel:
|
||||
fmt.Println("\nReceived shutdown signal, stopping service...")
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ var (
|
|||
simAlgorithm string
|
||||
)
|
||||
|
||||
// simulateCmd represents the simulate command
|
||||
// simulateCmd.Use == "simulate" and RunE starts the service with fake miners.
|
||||
var simulateCmd = &cobra.Command{
|
||||
Use: "simulate",
|
||||
Short: "Start the service with simulated miners for UI testing",
|
||||
|
|
@ -31,13 +31,13 @@ without requiring actual mining hardware.
|
|||
|
||||
Examples:
|
||||
# Start with 3 medium-hashrate CPU miners
|
||||
miner-ctrl simulate --count 3 --preset cpu-medium
|
||||
mining simulate --count 3 --preset cpu-medium
|
||||
|
||||
# Start with custom hashrate
|
||||
miner-ctrl simulate --count 2 --hashrate 8000 --algorithm rx/0
|
||||
mining simulate --count 2 --hashrate 8000 --algorithm rx/0
|
||||
|
||||
# Start with a mix of presets
|
||||
miner-ctrl simulate --count 1 --preset gpu-ethash
|
||||
mining simulate --count 1 --preset gpu-ethash
|
||||
|
||||
Available presets:
|
||||
cpu-low - Low-end CPU (500 H/s, rx/0)
|
||||
|
|
@ -63,17 +63,17 @@ Available presets:
|
|||
// Create a dedicated simulation manager without autostarting real miners.
|
||||
manager := mining.NewManagerForSimulation()
|
||||
|
||||
// Create and start simulated miners
|
||||
// getSimulatedConfig(i) // derives one miner profile per index, e.g. sim-cpu-medium-001
|
||||
for i := 0; i < simCount; i++ {
|
||||
config := getSimulatedConfig(i)
|
||||
simMiner := mining.NewSimulatedMiner(config)
|
||||
|
||||
// Start the simulated miner
|
||||
// simMiner.Start(&mining.Config{}) // uses the simulated miner lifecycle, not a real binary
|
||||
if err := simMiner.Start(&mining.Config{}); err != nil {
|
||||
return fmt.Errorf("failed to start simulated miner %d: %w", i, err)
|
||||
}
|
||||
|
||||
// Register with manager
|
||||
// manager.RegisterMiner(simMiner) // exposes the simulated miner through the shared manager
|
||||
if err := manager.RegisterMiner(simMiner); err != nil {
|
||||
return fmt.Errorf("failed to register simulated miner %d: %w", i, err)
|
||||
}
|
||||
|
|
@ -88,7 +88,7 @@ Available presets:
|
|||
return fmt.Errorf("failed to create new service: %w", err)
|
||||
}
|
||||
|
||||
// Start the server in a goroutine
|
||||
// service.ServiceStartup(ctx) // starts the API server while the simulation loop keeps running.
|
||||
go func() {
|
||||
if err := service.ServiceStartup(ctx); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to start service: %v\n", err)
|
||||
|
|
@ -102,12 +102,12 @@ Available presets:
|
|||
fmt.Printf("\nSimulating %d miner(s). Press Ctrl+C to stop.\n", simCount)
|
||||
fmt.Printf("Note: All data is simulated - no actual mining is occurring.\n\n")
|
||||
|
||||
// Handle graceful shutdown on Ctrl+C
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
// signalChannel captures Ctrl+C so simulated miners can stop cleanly.
|
||||
signalChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChannel, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case <-signalChan:
|
||||
case <-signalChannel:
|
||||
fmt.Println("\nReceived shutdown signal, stopping simulation...")
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
|
|
@ -156,7 +156,7 @@ func getSimulatedConfig(index int) mining.SimulatedMinerConfig {
|
|||
}
|
||||
|
||||
func init() {
|
||||
// Seed random for varied simulation
|
||||
// rand.Seed(time.Now().UnixNano()) // varies simulated hash rates between runs
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
simulateCmd.Flags().IntVarP(&simCount, "count", "c", 1, "Number of simulated miners to create")
|
||||
|
|
@ -164,7 +164,7 @@ func init() {
|
|||
simulateCmd.Flags().IntVar(&simHashrate, "hashrate", 0, "Custom base hashrate (overrides preset)")
|
||||
simulateCmd.Flags().StringVar(&simAlgorithm, "algorithm", "", "Custom algorithm (overrides preset)")
|
||||
|
||||
// Reuse serve command flags
|
||||
// simulateCmd.Flags().StringVar(&host, "host", "127.0.0.1", "Host to listen on") // same listen address as `mining serve`
|
||||
simulateCmd.Flags().StringVar(&host, "host", "127.0.0.1", "Host to listen on")
|
||||
simulateCmd.Flags().IntVarP(&port, "port", "p", 9090, "Port to listen on")
|
||||
simulateCmd.Flags().StringVarP(&namespace, "namespace", "n", "/api/v1/mining", "API namespace")
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ var (
|
|||
minerWallet string
|
||||
)
|
||||
|
||||
// startCmd represents the start command
|
||||
// startCmd.Use == "start [miner_name]" and RunE starts the requested miner.
|
||||
var startCmd = &cobra.Command{
|
||||
Use: "start [miner_name]",
|
||||
Short: "Start a new miner",
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
// statusCmd represents the status command
|
||||
// statusCmd.Use == "status [miner_name]" and RunE prints live miner stats.
|
||||
var statusCmd = &cobra.Command{
|
||||
Use: "status [miner_name]",
|
||||
Short: "Get status of a running miner",
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// stopCmd represents the stop command
|
||||
// stopCmd.Use == "stop [miner_name]" and RunE stops the named miner.
|
||||
var stopCmd = &cobra.Command{
|
||||
Use: "stop [miner_name]",
|
||||
Short: "Stop a running miner",
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// uninstallCmd represents the uninstall command
|
||||
// uninstallCmd.Use == "uninstall [miner_type]" and RunE removes the miner.
|
||||
var uninstallCmd = &cobra.Command{
|
||||
Use: "uninstall [miner_type]",
|
||||
Short: "Uninstall a miner",
|
||||
|
|
@ -15,7 +15,7 @@ var uninstallCmd = &cobra.Command{
|
|||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
minerType := args[0]
|
||||
manager := getManager() // Assuming getManager() provides the singleton manager instance
|
||||
manager := getManager() // getManager() returns the shared manager used by `mining uninstall`
|
||||
|
||||
fmt.Printf("Uninstalling %s...\n", minerType)
|
||||
if err := manager.UninstallMiner(context.Background(), minerType); err != nil {
|
||||
|
|
@ -24,8 +24,7 @@ var uninstallCmd = &cobra.Command{
|
|||
|
||||
fmt.Printf("%s uninstalled successfully.\n", minerType)
|
||||
|
||||
// The doctor cache is implicitly updated by the manager's actions,
|
||||
// but an explicit cache update can still be beneficial.
|
||||
// updateDoctorCache() // refreshes the cached install status after uninstalling a miner
|
||||
fmt.Println("Updating installation cache...")
|
||||
if err := updateDoctorCache(); err != nil {
|
||||
fmt.Printf("Warning: failed to update doctor cache: %v\n", err)
|
||||
|
|
|
|||
|
|
@ -7,13 +7,13 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"forge.lthn.ai/Snider/Mining/pkg/mining"
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/adrg/xdg"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// validateUpdateConfigPath validates that a config path is within the expected XDG config directory
|
||||
// validateUpdateConfigPath("/home/alice/.config/lethean-desktop/miners/config.json") // nil
|
||||
func validateUpdateConfigPath(configPath string) error {
|
||||
expectedBase := filepath.Join(xdg.ConfigHome, "lethean-desktop")
|
||||
cleanPath := filepath.Clean(configPath)
|
||||
|
|
@ -23,7 +23,7 @@ func validateUpdateConfigPath(configPath string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// updateCmd represents the update command
|
||||
// updateCmd.Use == "update" and RunE checks cached miner versions for upgrades.
|
||||
var updateCmd = &cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Check for updates to installed miners",
|
||||
|
|
@ -48,7 +48,7 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
configPath := strings.TrimSpace(string(configPathBytes))
|
||||
|
||||
// Security: Validate that the config path is within the expected directory
|
||||
// validateUpdateConfigPath("/home/alice/.config/lethean-desktop/miners/config.json") // blocks path traversal
|
||||
if err := validateUpdateConfigPath(configPath); err != nil {
|
||||
return fmt.Errorf("security error: %w", err)
|
||||
}
|
||||
|
|
@ -58,7 +58,7 @@ var updateCmd = &cobra.Command{
|
|||
return fmt.Errorf("could not read cache file from %s: %w", configPath, err)
|
||||
}
|
||||
|
||||
// Fix: Use SystemInfo type (matches what doctor.go saves)
|
||||
// mining.SystemInfo{} // matches what doctor.go writes to the cache file
|
||||
var systemInfo mining.SystemInfo
|
||||
if err := json.Unmarshal(cacheBytes, &systemInfo); err != nil {
|
||||
return fmt.Errorf("could not parse cache file: %w", err)
|
||||
|
|
@ -76,7 +76,7 @@ var updateCmd = &cobra.Command{
|
|||
minerName = "xmrig"
|
||||
miner = mining.NewXMRigMiner()
|
||||
} else {
|
||||
continue // Skip unknown miners
|
||||
continue // skip miners that do not have an updater yet
|
||||
}
|
||||
|
||||
fmt.Printf("Checking %s... ", minerName)
|
||||
|
|
|
|||
|
|
@ -74,22 +74,22 @@ type Manager struct {
|
|||
eventHubMutex sync.RWMutex // Separate mutex for eventHub to avoid deadlock with main mutex
|
||||
}
|
||||
|
||||
// m.SetEventHub(eventHub)
|
||||
func (m *Manager) SetEventHub(hub *EventHub) {
|
||||
m.eventHubMutex.Lock()
|
||||
defer m.eventHubMutex.Unlock()
|
||||
m.eventHub = hub
|
||||
// manager.SetEventHub(eventHub)
|
||||
func (manager *Manager) SetEventHub(eventHub *EventHub) {
|
||||
manager.eventHubMutex.Lock()
|
||||
defer manager.eventHubMutex.Unlock()
|
||||
manager.eventHub = eventHub
|
||||
}
|
||||
|
||||
// m.emitEvent(EventMinerStarted, MinerEventData{Name: instanceName})
|
||||
// m.emitEvent(EventMinerError, MinerEventData{Name: instanceName, Error: err.Error()})
|
||||
func (m *Manager) emitEvent(eventType EventType, data interface{}) {
|
||||
m.eventHubMutex.RLock()
|
||||
hub := m.eventHub
|
||||
m.eventHubMutex.RUnlock()
|
||||
// manager.emitEvent(EventMinerStarted, MinerEventData{Name: instanceName})
|
||||
// manager.emitEvent(EventMinerError, MinerEventData{Name: instanceName, Error: err.Error()})
|
||||
func (manager *Manager) emitEvent(eventType EventType, data interface{}) {
|
||||
manager.eventHubMutex.RLock()
|
||||
eventHub := manager.eventHub
|
||||
manager.eventHubMutex.RUnlock()
|
||||
|
||||
if hub != nil {
|
||||
hub.Broadcast(NewEvent(eventType, data))
|
||||
if eventHub != nil {
|
||||
eventHub.Broadcast(NewEvent(eventType, data))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -122,47 +122,47 @@ func NewManagerForSimulation() *Manager {
|
|||
return manager
|
||||
}
|
||||
|
||||
// m.initDatabase() // NewManager() calls this after loading miners.json, for example with Database.Enabled = true
|
||||
func (m *Manager) initDatabase() {
|
||||
// manager.initDatabase() // NewManager() calls this after loading miners.json, for example with Database.Enabled = true
|
||||
func (manager *Manager) initDatabase() {
|
||||
minersConfiguration, err := LoadMinersConfig()
|
||||
if err != nil {
|
||||
logging.Warn("could not load config for database init", logging.Fields{"error": err})
|
||||
return
|
||||
}
|
||||
|
||||
m.databaseEnabled = minersConfiguration.Database.Enabled
|
||||
m.databaseRetention = minersConfiguration.Database.RetentionDays
|
||||
if m.databaseRetention == 0 {
|
||||
m.databaseRetention = 30
|
||||
manager.databaseEnabled = minersConfiguration.Database.Enabled
|
||||
manager.databaseRetention = minersConfiguration.Database.RetentionDays
|
||||
if manager.databaseRetention == 0 {
|
||||
manager.databaseRetention = 30
|
||||
}
|
||||
|
||||
if !m.databaseEnabled {
|
||||
if !manager.databaseEnabled {
|
||||
logging.Debug("database persistence is disabled")
|
||||
return
|
||||
}
|
||||
|
||||
databaseConfiguration := database.Config{
|
||||
Enabled: true,
|
||||
RetentionDays: m.databaseRetention,
|
||||
RetentionDays: manager.databaseRetention,
|
||||
}
|
||||
|
||||
if err := database.Initialize(databaseConfiguration); err != nil {
|
||||
logging.Warn("failed to initialize database", logging.Fields{"error": err})
|
||||
m.databaseEnabled = false
|
||||
manager.databaseEnabled = false
|
||||
return
|
||||
}
|
||||
|
||||
logging.Info("database persistence enabled", logging.Fields{"retention_days": m.databaseRetention})
|
||||
logging.Info("database persistence enabled", logging.Fields{"retention_days": manager.databaseRetention})
|
||||
|
||||
// m.startDBCleanup() // keeps database.Cleanup(30) running after persistence is enabled
|
||||
m.startDBCleanup()
|
||||
// manager.startDBCleanup() // keeps database.Cleanup(30) running after persistence is enabled
|
||||
manager.startDBCleanup()
|
||||
}
|
||||
|
||||
// m.startDBCleanup() // runs database.Cleanup(30) once an hour after persistence is enabled
|
||||
func (m *Manager) startDBCleanup() {
|
||||
m.waitGroup.Add(1)
|
||||
// manager.startDBCleanup() // runs database.Cleanup(30) once an hour after persistence is enabled
|
||||
func (manager *Manager) startDBCleanup() {
|
||||
manager.waitGroup.Add(1)
|
||||
go func() {
|
||||
defer m.waitGroup.Done()
|
||||
defer manager.waitGroup.Done()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logging.Error("panic in database cleanup goroutine", logging.Fields{"panic": r})
|
||||
|
|
@ -173,32 +173,32 @@ func (m *Manager) startDBCleanup() {
|
|||
defer ticker.Stop()
|
||||
|
||||
// database.Cleanup(30) // removes rows older than 30 days during startup
|
||||
if err := database.Cleanup(m.databaseRetention); err != nil {
|
||||
if err := database.Cleanup(manager.databaseRetention); err != nil {
|
||||
logging.Warn("database cleanup failed", logging.Fields{"error": err})
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := database.Cleanup(m.databaseRetention); err != nil {
|
||||
if err := database.Cleanup(manager.databaseRetention); err != nil {
|
||||
logging.Warn("database cleanup failed", logging.Fields{"error": err})
|
||||
}
|
||||
case <-m.stopChan:
|
||||
case <-manager.stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// m.syncMinersConfig() // when miners.json only contains tt-miner, this adds xmrig with Autostart=false
|
||||
func (m *Manager) syncMinersConfig() {
|
||||
// manager.syncMinersConfig() // when miners.json only contains tt-miner, this adds xmrig with Autostart=false
|
||||
func (manager *Manager) syncMinersConfig() {
|
||||
minersConfiguration, err := LoadMinersConfig()
|
||||
if err != nil {
|
||||
logging.Warn("could not load miners config for sync", logging.Fields{"error": err})
|
||||
return
|
||||
}
|
||||
|
||||
availableMiners := m.ListAvailableMiners()
|
||||
availableMiners := manager.ListAvailableMiners()
|
||||
configUpdated := false
|
||||
|
||||
for _, availableMiner := range availableMiners {
|
||||
|
|
@ -227,8 +227,8 @@ func (m *Manager) syncMinersConfig() {
|
|||
}
|
||||
}
|
||||
|
||||
// m.autostartMiners() // NewManager() uses this to start xmrig when miners.json contains Autostart=true
|
||||
func (m *Manager) autostartMiners() {
|
||||
// manager.autostartMiners() // NewManager() uses this to start xmrig when miners.json contains Autostart=true
|
||||
func (manager *Manager) autostartMiners() {
|
||||
minersConfiguration, err := LoadMinersConfig()
|
||||
if err != nil {
|
||||
logging.Warn("could not load miners config for autostart", logging.Fields{"error": err})
|
||||
|
|
@ -238,7 +238,7 @@ func (m *Manager) autostartMiners() {
|
|||
for _, autostartEntry := range minersConfiguration.Miners {
|
||||
if autostartEntry.Autostart && autostartEntry.Config != nil {
|
||||
logging.Info("autostarting miner", logging.Fields{"type": autostartEntry.MinerType})
|
||||
if _, err := m.StartMiner(context.Background(), autostartEntry.MinerType, autostartEntry.Config); err != nil {
|
||||
if _, err := manager.StartMiner(context.Background(), autostartEntry.MinerType, autostartEntry.Config); err != nil {
|
||||
logging.Error("failed to autostart miner", logging.Fields{"type": autostartEntry.MinerType, "error": err})
|
||||
}
|
||||
}
|
||||
|
|
@ -262,7 +262,7 @@ func findAvailablePort() (int, error) {
|
|||
}
|
||||
|
||||
// miner, err := manager.StartMiner(ctx, "xmrig", &Config{Algo: "rx/0"})
|
||||
func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
|
||||
func (manager *Manager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
|
||||
// Check for cancellation before acquiring lock
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
@ -270,8 +270,8 @@ func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Conf
|
|||
default:
|
||||
}
|
||||
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
manager.mutex.Lock()
|
||||
defer manager.mutex.Unlock()
|
||||
|
||||
if config == nil {
|
||||
config = &Config{}
|
||||
|
|
@ -291,7 +291,7 @@ func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Conf
|
|||
instanceName = instanceName + "-" + strconv.FormatInt(time.Now().UnixNano()%1000, 10)
|
||||
}
|
||||
|
||||
if _, exists := m.miners[instanceName]; exists {
|
||||
if _, exists := manager.miners[instanceName]; exists {
|
||||
return nil, ErrMinerExists(instanceName)
|
||||
}
|
||||
|
||||
|
|
@ -323,23 +323,23 @@ func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Conf
|
|||
}
|
||||
}
|
||||
|
||||
// m.emitEvent(EventMinerStarting, MinerEventData{Name: "xmrig-rx_0"}) // fires before miner.Start(config)
|
||||
m.emitEvent(EventMinerStarting, MinerEventData{
|
||||
// manager.emitEvent(EventMinerStarting, MinerEventData{Name: "xmrig-rx_0"}) // fires before miner.Start(config)
|
||||
manager.emitEvent(EventMinerStarting, MinerEventData{
|
||||
Name: instanceName,
|
||||
})
|
||||
|
||||
if err := miner.Start(config); err != nil {
|
||||
// m.emitEvent(EventMinerError, MinerEventData{Name: "xmrig-rx_0", Error: err.Error()})
|
||||
m.emitEvent(EventMinerError, MinerEventData{
|
||||
// manager.emitEvent(EventMinerError, MinerEventData{Name: "xmrig-rx_0", Error: err.Error()})
|
||||
manager.emitEvent(EventMinerError, MinerEventData{
|
||||
Name: instanceName,
|
||||
Error: err.Error(),
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.miners[instanceName] = miner
|
||||
manager.miners[instanceName] = miner
|
||||
|
||||
if err := m.updateMinerConfig(minerType, true, config); err != nil {
|
||||
if err := manager.updateMinerConfig(minerType, true, config); err != nil {
|
||||
logging.Warn("failed to save miner config for autostart", logging.Fields{"error": err})
|
||||
}
|
||||
|
||||
|
|
@ -347,7 +347,7 @@ func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Conf
|
|||
logToSyslog(logMessage)
|
||||
|
||||
// Emit started event
|
||||
m.emitEvent(EventMinerStarted, MinerEventData{
|
||||
manager.emitEvent(EventMinerStarted, MinerEventData{
|
||||
Name: instanceName,
|
||||
})
|
||||
|
||||
|
|
@ -357,7 +357,7 @@ func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Conf
|
|||
|
||||
// manager.UninstallMiner(ctx, "xmrig") // stops all xmrig instances and removes config
|
||||
// manager.UninstallMiner(ctx, "ttminer") // stops all ttminer instances and removes config
|
||||
func (m *Manager) UninstallMiner(ctx context.Context, minerType string) error {
|
||||
func (manager *Manager) UninstallMiner(ctx context.Context, minerType string) error {
|
||||
// Check for cancellation before acquiring lock
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
@ -365,11 +365,11 @@ func (m *Manager) UninstallMiner(ctx context.Context, minerType string) error {
|
|||
default:
|
||||
}
|
||||
|
||||
m.mutex.Lock()
|
||||
manager.mutex.Lock()
|
||||
// Collect miners to stop and delete (can't modify map during iteration)
|
||||
minersToDelete := make([]string, 0)
|
||||
minersToStop := make([]Miner, 0)
|
||||
for name, runningMiner := range m.miners {
|
||||
for name, runningMiner := range manager.miners {
|
||||
if xmrigInstance, ok := runningMiner.(*XMRigMiner); ok && equalFold(xmrigInstance.ExecutableName, minerType) {
|
||||
minersToStop = append(minersToStop, runningMiner)
|
||||
minersToDelete = append(minersToDelete, name)
|
||||
|
|
@ -381,9 +381,9 @@ func (m *Manager) UninstallMiner(ctx context.Context, minerType string) error {
|
|||
}
|
||||
// Delete from map first, then release lock before stopping (Stop may block)
|
||||
for _, name := range minersToDelete {
|
||||
delete(m.miners, name)
|
||||
delete(manager.miners, name)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
manager.mutex.Unlock()
|
||||
|
||||
// Stop miners outside the lock to avoid blocking
|
||||
for i, miner := range minersToStop {
|
||||
|
|
@ -413,8 +413,8 @@ func (m *Manager) UninstallMiner(ctx context.Context, minerType string) error {
|
|||
})
|
||||
}
|
||||
|
||||
// m.updateMinerConfig("xmrig", true, config) // saves Autostart=true and the last-used config back to miners.json
|
||||
func (m *Manager) updateMinerConfig(minerType string, autostart bool, config *Config) error {
|
||||
// manager.updateMinerConfig("xmrig", true, config) // saves Autostart=true and the last-used config back to miners.json
|
||||
func (manager *Manager) updateMinerConfig(minerType string, autostart bool, config *Config) error {
|
||||
return UpdateMinersConfig(func(configuration *MinersConfig) error {
|
||||
found := false
|
||||
for i, autostartEntry := range configuration.Miners {
|
||||
|
|
@ -439,7 +439,7 @@ func (m *Manager) updateMinerConfig(minerType string, autostart bool, config *Co
|
|||
|
||||
// manager.StopMiner(ctx, "xmrig/monero")
|
||||
// manager.StopMiner(ctx, "ttminer/rtx4090") // still removes if already stopped
|
||||
func (m *Manager) StopMiner(ctx context.Context, name string) error {
|
||||
func (manager *Manager) StopMiner(ctx context.Context, name string) error {
|
||||
// Check for cancellation before acquiring lock
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
@ -447,14 +447,14 @@ func (m *Manager) StopMiner(ctx context.Context, name string) error {
|
|||
default:
|
||||
}
|
||||
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
manager.mutex.Lock()
|
||||
defer manager.mutex.Unlock()
|
||||
|
||||
miner, exists := m.miners[name]
|
||||
miner, exists := manager.miners[name]
|
||||
if !exists {
|
||||
for minerKey := range m.miners {
|
||||
for minerKey := range manager.miners {
|
||||
if hasPrefix(minerKey, name) {
|
||||
miner = m.miners[minerKey]
|
||||
miner = manager.miners[minerKey]
|
||||
name = minerKey
|
||||
exists = true
|
||||
break
|
||||
|
|
@ -467,7 +467,7 @@ func (m *Manager) StopMiner(ctx context.Context, name string) error {
|
|||
}
|
||||
|
||||
// Emit stopping event
|
||||
m.emitEvent(EventMinerStopping, MinerEventData{
|
||||
manager.emitEvent(EventMinerStopping, MinerEventData{
|
||||
Name: name,
|
||||
})
|
||||
|
||||
|
|
@ -476,14 +476,14 @@ func (m *Manager) StopMiner(ctx context.Context, name string) error {
|
|||
stopErr := miner.Stop()
|
||||
|
||||
// Always remove from map - if it's not running, we still want to clean it up
|
||||
delete(m.miners, name)
|
||||
delete(manager.miners, name)
|
||||
|
||||
// Emit stopped event
|
||||
reason := "stopped"
|
||||
if stopErr != nil && stopErr.Error() != "miner is not running" {
|
||||
reason = stopErr.Error()
|
||||
}
|
||||
m.emitEvent(EventMinerStopped, MinerEventData{
|
||||
manager.emitEvent(EventMinerStopped, MinerEventData{
|
||||
Name: name,
|
||||
Reason: reason,
|
||||
})
|
||||
|
|
@ -497,25 +497,25 @@ func (m *Manager) StopMiner(ctx context.Context, name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// miner, err := m.GetMiner("xmrig-randomx") // returns ErrMinerNotFound when the name is missing
|
||||
// miner, err := manager.GetMiner("xmrig-randomx") // returns ErrMinerNotFound when the name is missing
|
||||
// if err != nil { /* miner not found */ }
|
||||
func (m *Manager) GetMiner(name string) (Miner, error) {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
miner, exists := m.miners[name]
|
||||
func (manager *Manager) GetMiner(name string) (Miner, error) {
|
||||
manager.mutex.RLock()
|
||||
defer manager.mutex.RUnlock()
|
||||
miner, exists := manager.miners[name]
|
||||
if !exists {
|
||||
return nil, ErrMinerNotFound(name)
|
||||
}
|
||||
return miner, nil
|
||||
}
|
||||
|
||||
// miners := m.ListMiners()
|
||||
// miners := manager.ListMiners()
|
||||
// for _, miner := range miners { logging.Info(miner.GetName()) }
|
||||
func (m *Manager) ListMiners() []Miner {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
miners := make([]Miner, 0, len(m.miners))
|
||||
for _, miner := range m.miners {
|
||||
func (manager *Manager) ListMiners() []Miner {
|
||||
manager.mutex.RLock()
|
||||
defer manager.mutex.RUnlock()
|
||||
miners := make([]Miner, 0, len(manager.miners))
|
||||
for _, miner := range manager.miners {
|
||||
miners = append(miners, miner)
|
||||
}
|
||||
return miners
|
||||
|
|
@ -523,21 +523,21 @@ func (m *Manager) ListMiners() []Miner {
|
|||
|
||||
// simulatedMiner := NewSimulatedMiner(SimulatedMinerConfig{Name: "sim-rx0"})
|
||||
// if err := manager.RegisterMiner(simulatedMiner); err != nil { return err }
|
||||
func (m *Manager) RegisterMiner(miner Miner) error {
|
||||
func (manager *Manager) RegisterMiner(miner Miner) error {
|
||||
name := miner.GetName()
|
||||
|
||||
m.mutex.Lock()
|
||||
if _, exists := m.miners[name]; exists {
|
||||
m.mutex.Unlock()
|
||||
manager.mutex.Lock()
|
||||
if _, exists := manager.miners[name]; exists {
|
||||
manager.mutex.Unlock()
|
||||
return ErrMinerExists(name)
|
||||
}
|
||||
m.miners[name] = miner
|
||||
m.mutex.Unlock()
|
||||
manager.miners[name] = miner
|
||||
manager.mutex.Unlock()
|
||||
|
||||
logging.Info("registered miner", logging.Fields{"name": name})
|
||||
|
||||
// Emit miner started event (outside lock)
|
||||
m.emitEvent(EventMinerStarted, map[string]interface{}{
|
||||
manager.emitEvent(EventMinerStarted, map[string]interface{}{
|
||||
"name": name,
|
||||
})
|
||||
|
||||
|
|
@ -545,7 +545,7 @@ func (m *Manager) RegisterMiner(miner Miner) error {
|
|||
}
|
||||
|
||||
// for _, availableMiner := range manager.ListAvailableMiners() { logging.Info(availableMiner.Name, nil) }
|
||||
func (m *Manager) ListAvailableMiners() []AvailableMiner {
|
||||
func (manager *Manager) ListAvailableMiners() []AvailableMiner {
|
||||
return []AvailableMiner{
|
||||
{
|
||||
Name: "xmrig",
|
||||
|
|
@ -558,11 +558,11 @@ func (m *Manager) ListAvailableMiners() []AvailableMiner {
|
|||
}
|
||||
}
|
||||
|
||||
// m.startStatsCollection() // NewManager() uses this to poll each running miner every HighResolutionInterval
|
||||
func (m *Manager) startStatsCollection() {
|
||||
m.waitGroup.Add(1)
|
||||
// manager.startStatsCollection() // NewManager() uses this to poll each running miner every HighResolutionInterval
|
||||
func (manager *Manager) startStatsCollection() {
|
||||
manager.waitGroup.Add(1)
|
||||
go func() {
|
||||
defer m.waitGroup.Done()
|
||||
defer manager.waitGroup.Done()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logging.Error("panic in stats collection goroutine", logging.Fields{"panic": r})
|
||||
|
|
@ -574,8 +574,8 @@ func (m *Manager) startStatsCollection() {
|
|||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
m.collectMinerStats()
|
||||
case <-m.stopChan:
|
||||
manager.collectMinerStats()
|
||||
case <-manager.stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -585,12 +585,12 @@ func (m *Manager) startStatsCollection() {
|
|||
// ctx, cancel := context.WithTimeout(ctx, statsCollectionTimeout)
|
||||
const statsCollectionTimeout = 5 * time.Second
|
||||
|
||||
// m.collectMinerStats() // the stats ticker calls this to poll all running miners in parallel
|
||||
func (m *Manager) collectMinerStats() {
|
||||
// manager.collectMinerStats() // the stats ticker calls this to poll all running miners in parallel
|
||||
func (manager *Manager) collectMinerStats() {
|
||||
// Take a snapshot of miners under read lock - minimize lock duration
|
||||
m.mutex.RLock()
|
||||
if len(m.miners) == 0 {
|
||||
m.mutex.RUnlock()
|
||||
manager.mutex.RLock()
|
||||
if len(manager.miners) == 0 {
|
||||
manager.mutex.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -598,13 +598,13 @@ func (m *Manager) collectMinerStats() {
|
|||
miner Miner
|
||||
minerType string
|
||||
}
|
||||
miners := make([]minerInfo, 0, len(m.miners))
|
||||
for _, miner := range m.miners {
|
||||
miners := make([]minerInfo, 0, len(manager.miners))
|
||||
for _, miner := range manager.miners {
|
||||
// Use the miner's GetType() method for proper type identification
|
||||
miners = append(miners, minerInfo{miner: miner, minerType: miner.GetType()})
|
||||
}
|
||||
databaseEnabled := m.databaseEnabled // Copy to avoid holding lock
|
||||
m.mutex.RUnlock()
|
||||
databaseEnabled := manager.databaseEnabled // Copy to avoid holding lock
|
||||
manager.mutex.RUnlock()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
|
|
@ -622,7 +622,7 @@ func (m *Manager) collectMinerStats() {
|
|||
})
|
||||
}
|
||||
}()
|
||||
m.collectSingleMinerStats(miner, minerType, now, databaseEnabled)
|
||||
manager.collectSingleMinerStats(miner, minerType, now, databaseEnabled)
|
||||
}(entry.miner, entry.minerType)
|
||||
}
|
||||
collectionWaitGroup.Wait()
|
||||
|
|
@ -634,8 +634,8 @@ const statsRetryCount = 2
|
|||
// time.Sleep(statsRetryDelay) // between retry attempts
|
||||
const statsRetryDelay = 500 * time.Millisecond
|
||||
|
||||
// m.collectSingleMinerStats(miner, "xmrig", time.Now(), true) // retries up to statsRetryCount times; persists to DB if databaseEnabled
|
||||
func (m *Manager) collectSingleMinerStats(miner Miner, minerType string, now time.Time, databaseEnabled bool) {
|
||||
// manager.collectSingleMinerStats(miner, "xmrig", time.Now(), true) // retries up to statsRetryCount times; persists to DB if databaseEnabled
|
||||
func (manager *Manager) collectSingleMinerStats(miner Miner, minerType string, now time.Time, databaseEnabled bool) {
|
||||
minerName := miner.GetName()
|
||||
|
||||
var stats *PerformanceMetrics
|
||||
|
|
@ -701,7 +701,7 @@ func (m *Manager) collectSingleMinerStats(miner Miner, minerType string, now tim
|
|||
}
|
||||
|
||||
// Emit stats event for real-time WebSocket updates
|
||||
m.emitEvent(EventMinerStats, MinerStatsData{
|
||||
manager.emitEvent(EventMinerStats, MinerStatsData{
|
||||
Name: minerName,
|
||||
Hashrate: stats.Hashrate,
|
||||
Shares: stats.Shares,
|
||||
|
|
@ -714,10 +714,10 @@ func (m *Manager) collectSingleMinerStats(miner Miner, minerType string, now tim
|
|||
|
||||
// points, err := manager.GetMinerHashrateHistory("xmrig")
|
||||
// for _, point := range points { logging.Info("hashrate", logging.Fields{"time": point.Timestamp, "rate": point.Hashrate}) }
|
||||
func (m *Manager) GetMinerHashrateHistory(name string) ([]HashratePoint, error) {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
miner, exists := m.miners[name]
|
||||
func (manager *Manager) GetMinerHashrateHistory(name string) ([]HashratePoint, error) {
|
||||
manager.mutex.RLock()
|
||||
defer manager.mutex.RUnlock()
|
||||
miner, exists := manager.miners[name]
|
||||
if !exists {
|
||||
return nil, ErrMinerNotFound(name)
|
||||
}
|
||||
|
|
@ -728,23 +728,23 @@ func (m *Manager) GetMinerHashrateHistory(name string) ([]HashratePoint, error)
|
|||
const ShutdownTimeout = 10 * time.Second
|
||||
|
||||
// defer manager.Stop() // stops miners, waits for goroutines, and closes the database during shutdown
|
||||
func (m *Manager) Stop() {
|
||||
m.stopOnce.Do(func() {
|
||||
func (manager *Manager) Stop() {
|
||||
manager.stopOnce.Do(func() {
|
||||
// Stop all running miners first
|
||||
m.mutex.Lock()
|
||||
for name, miner := range m.miners {
|
||||
manager.mutex.Lock()
|
||||
for name, miner := range manager.miners {
|
||||
if err := miner.Stop(); err != nil {
|
||||
logging.Warn("failed to stop miner", logging.Fields{"miner": name, "error": err})
|
||||
}
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
manager.mutex.Unlock()
|
||||
|
||||
close(m.stopChan)
|
||||
close(manager.stopChan)
|
||||
|
||||
// Wait for goroutines with timeout
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
m.waitGroup.Wait()
|
||||
manager.waitGroup.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
|
|
@ -756,7 +756,7 @@ func (m *Manager) Stop() {
|
|||
}
|
||||
|
||||
// Close the database
|
||||
if m.databaseEnabled {
|
||||
if manager.databaseEnabled {
|
||||
if err := database.Close(); err != nil {
|
||||
logging.Warn("failed to close database", logging.Fields{"error": err})
|
||||
}
|
||||
|
|
@ -766,16 +766,16 @@ func (m *Manager) Stop() {
|
|||
|
||||
// stats, err := manager.GetMinerHistoricalStats("xmrig")
|
||||
// if err == nil { logging.Info("stats", logging.Fields{"average": stats.AverageRate}) }
|
||||
func (m *Manager) GetMinerHistoricalStats(minerName string) (*database.HashrateStats, error) {
|
||||
if !m.databaseEnabled {
|
||||
func (manager *Manager) GetMinerHistoricalStats(minerName string) (*database.HashrateStats, error) {
|
||||
if !manager.databaseEnabled {
|
||||
return nil, ErrDatabaseError("database persistence is disabled")
|
||||
}
|
||||
return database.GetHashrateStats(minerName)
|
||||
}
|
||||
|
||||
// points, err := manager.GetMinerHistoricalHashrate("xmrig", time.Now().Add(-1*time.Hour), time.Now())
|
||||
func (m *Manager) GetMinerHistoricalHashrate(minerName string, since, until time.Time) ([]HashratePoint, error) {
|
||||
if !m.databaseEnabled {
|
||||
func (manager *Manager) GetMinerHistoricalHashrate(minerName string, since, until time.Time) ([]HashratePoint, error) {
|
||||
if !manager.databaseEnabled {
|
||||
return nil, ErrDatabaseError("database persistence is disabled")
|
||||
}
|
||||
|
||||
|
|
@ -797,14 +797,14 @@ func (m *Manager) GetMinerHistoricalHashrate(minerName string, since, until time
|
|||
|
||||
// allStats, err := manager.GetAllMinerHistoricalStats()
|
||||
// for _, stats := range allStats { logging.Info("stats", logging.Fields{"miner": stats.MinerName, "average": stats.AverageRate}) }
|
||||
func (m *Manager) GetAllMinerHistoricalStats() ([]database.HashrateStats, error) {
|
||||
if !m.databaseEnabled {
|
||||
func (manager *Manager) GetAllMinerHistoricalStats() ([]database.HashrateStats, error) {
|
||||
if !manager.databaseEnabled {
|
||||
return nil, ErrDatabaseError("database persistence is disabled")
|
||||
}
|
||||
return database.GetAllMinerStats()
|
||||
}
|
||||
|
||||
// if manager.IsDatabaseEnabled() { /* persist stats */ }
|
||||
func (m *Manager) IsDatabaseEnabled() bool {
|
||||
return m.databaseEnabled
|
||||
func (manager *Manager) IsDatabaseEnabled() bool {
|
||||
return manager.databaseEnabled
|
||||
}
|
||||
|
|
|
|||
|
|
@ -480,13 +480,13 @@ func NewService(manager ManagerInterface, listenAddr string, displayAddr string,
|
|||
|
||||
// service.InitRouter()
|
||||
// http.Handle("/", service.Router) // embeds the API under a parent HTTP server in Wails
|
||||
func (s *Service) InitRouter() {
|
||||
s.Router = gin.Default()
|
||||
func (service *Service) InitRouter() {
|
||||
service.Router = gin.Default()
|
||||
|
||||
// s.Server.Addr = ":9090" -> serverPort = "9090" for local CORS origins
|
||||
// service.Server.Addr = ":9090" -> serverPort = "9090" for local CORS origins
|
||||
serverPort := "9090" // default fallback
|
||||
if s.Server.Addr != "" {
|
||||
if _, port, err := net.SplitHostPort(s.Server.Addr); err == nil && port != "" {
|
||||
if service.Server.Addr != "" {
|
||||
if _, port, err := net.SplitHostPort(service.Server.Addr); err == nil && port != "" {
|
||||
serverPort = port
|
||||
}
|
||||
}
|
||||
|
|
@ -508,68 +508,68 @@ func (s *Service) InitRouter() {
|
|||
AllowCredentials: true,
|
||||
MaxAge: 12 * time.Hour,
|
||||
}
|
||||
s.Router.Use(cors.New(corsConfig))
|
||||
service.Router.Use(cors.New(corsConfig))
|
||||
|
||||
// s.Router.Use(securityHeadersMiddleware()) // sets security headers on every API response
|
||||
s.Router.Use(securityHeadersMiddleware())
|
||||
// service.Router.Use(securityHeadersMiddleware()) // sets security headers on every API response
|
||||
service.Router.Use(securityHeadersMiddleware())
|
||||
|
||||
// s.Router.Use(contentTypeValidationMiddleware()) // rejects POST /miners/xmrig/install without application/json
|
||||
s.Router.Use(contentTypeValidationMiddleware())
|
||||
// service.Router.Use(contentTypeValidationMiddleware()) // rejects POST /miners/xmrig/install without application/json
|
||||
service.Router.Use(contentTypeValidationMiddleware())
|
||||
|
||||
// c.Request.Body = http.MaxBytesReader(..., 1<<20) // caps request bodies at 1 MiB
|
||||
s.Router.Use(func(c *gin.Context) {
|
||||
service.Router.Use(func(c *gin.Context) {
|
||||
c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, 1<<20) // 1MB
|
||||
c.Next()
|
||||
})
|
||||
|
||||
// s.Router.Use(csrfMiddleware()) // allows API clients with Authorization or X-Requested-With
|
||||
s.Router.Use(csrfMiddleware())
|
||||
// service.Router.Use(csrfMiddleware()) // allows API clients with Authorization or X-Requested-With
|
||||
service.Router.Use(csrfMiddleware())
|
||||
|
||||
// s.Router.Use(requestTimeoutMiddleware(DefaultRequestTimeout)) // aborts stalled requests after 30 seconds
|
||||
s.Router.Use(requestTimeoutMiddleware(DefaultRequestTimeout))
|
||||
// service.Router.Use(requestTimeoutMiddleware(DefaultRequestTimeout)) // aborts stalled requests after 30 seconds
|
||||
service.Router.Use(requestTimeoutMiddleware(DefaultRequestTimeout))
|
||||
|
||||
// s.Router.Use(cacheMiddleware()) // returns Cache-Control: public, max-age=300 for /miners/available
|
||||
s.Router.Use(cacheMiddleware())
|
||||
// service.Router.Use(cacheMiddleware()) // returns Cache-Control: public, max-age=300 for /miners/available
|
||||
service.Router.Use(cacheMiddleware())
|
||||
|
||||
// s.Router.Use(requestIDMiddleware()) // preserves the incoming X-Request-ID or creates a new one
|
||||
s.Router.Use(requestIDMiddleware())
|
||||
// service.Router.Use(requestIDMiddleware()) // preserves the incoming X-Request-ID or creates a new one
|
||||
service.Router.Use(requestIDMiddleware())
|
||||
|
||||
// NewRateLimiter(10, 20) // allows bursts of 20 with a 10 requests/second refill rate
|
||||
s.rateLimiter = NewRateLimiter(10, 20)
|
||||
s.Router.Use(s.rateLimiter.Middleware())
|
||||
service.rateLimiter = NewRateLimiter(10, 20)
|
||||
service.Router.Use(service.rateLimiter.Middleware())
|
||||
|
||||
s.SetupRoutes()
|
||||
service.SetupRoutes()
|
||||
}
|
||||
|
||||
// service.Stop() // stops rate limiting, auth, event hub, and node transport during shutdown
|
||||
func (s *Service) Stop() {
|
||||
if s.rateLimiter != nil {
|
||||
s.rateLimiter.Stop()
|
||||
func (service *Service) Stop() {
|
||||
if service.rateLimiter != nil {
|
||||
service.rateLimiter.Stop()
|
||||
}
|
||||
if s.EventHub != nil {
|
||||
s.EventHub.Stop()
|
||||
if service.EventHub != nil {
|
||||
service.EventHub.Stop()
|
||||
}
|
||||
if s.auth != nil {
|
||||
s.auth.Stop()
|
||||
if service.auth != nil {
|
||||
service.auth.Stop()
|
||||
}
|
||||
if s.NodeService != nil {
|
||||
if err := s.NodeService.StopTransport(); err != nil {
|
||||
if service.NodeService != nil {
|
||||
if err := service.NodeService.StopTransport(); err != nil {
|
||||
logging.Warn("failed to stop node service transport", logging.Fields{"error": err})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// service.ServiceStartup(ctx) // starts the HTTP server and stops it when ctx.Done() fires
|
||||
func (s *Service) ServiceStartup(ctx context.Context) error {
|
||||
s.InitRouter()
|
||||
s.Server.Handler = s.Router
|
||||
func (service *Service) ServiceStartup(ctx context.Context) error {
|
||||
service.InitRouter()
|
||||
service.Server.Handler = service.Router
|
||||
|
||||
// serverErrorChannel captures ListenAndServe failures without blocking startup
|
||||
serverErrorChannel := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
if err := s.Server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
logging.Error("server error", logging.Fields{"addr": s.Server.Addr, "error": err})
|
||||
if err := service.Server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
logging.Error("server error", logging.Fields{"addr": service.Server.Addr, "error": err})
|
||||
serverErrorChannel <- err
|
||||
}
|
||||
close(serverErrorChannel) // prevent goroutine leak
|
||||
|
|
@ -577,11 +577,11 @@ func (s *Service) ServiceStartup(ctx context.Context) error {
|
|||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
s.Stop() // Clean up service resources (auth, event hub, node service)
|
||||
s.Manager.Stop()
|
||||
service.Stop() // Clean up service resources (auth, event hub, node service)
|
||||
service.Manager.Stop()
|
||||
shutdownContext, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := s.Server.Shutdown(shutdownContext); err != nil {
|
||||
if err := service.Server.Shutdown(shutdownContext); err != nil {
|
||||
logging.Error("server shutdown error", logging.Fields{"error": err})
|
||||
}
|
||||
}()
|
||||
|
|
@ -597,7 +597,7 @@ func (s *Service) ServiceStartup(ctx context.Context) error {
|
|||
return nil // Channel closed without error means server shut down
|
||||
default:
|
||||
// Try to connect to verify server is listening
|
||||
conn, err := net.DialTimeout("tcp", s.Server.Addr, 50*time.Millisecond)
|
||||
conn, err := net.DialTimeout("tcp", service.Server.Addr, 50*time.Millisecond)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return nil // Server is ready
|
||||
|
|
@ -606,90 +606,90 @@ func (s *Service) ServiceStartup(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
|
||||
return ErrInternal("server failed to start listening on " + s.Server.Addr + " within timeout")
|
||||
return ErrInternal("server failed to start listening on " + service.Server.Addr + " within timeout")
|
||||
}
|
||||
|
||||
// service.InitRouter()
|
||||
// service.SetupRoutes() // re-call after adding middleware manually
|
||||
func (s *Service) SetupRoutes() {
|
||||
apiRouterGroup := s.Router.Group(s.APIBasePath)
|
||||
func (service *Service) SetupRoutes() {
|
||||
apiRouterGroup := service.Router.Group(service.APIBasePath)
|
||||
|
||||
// Health endpoints (no auth required for orchestration/monitoring)
|
||||
apiRouterGroup.GET("/health", s.handleHealth)
|
||||
apiRouterGroup.GET("/ready", s.handleReady)
|
||||
apiRouterGroup.GET("/health", service.handleHealth)
|
||||
apiRouterGroup.GET("/ready", service.handleReady)
|
||||
|
||||
// Apply authentication middleware if enabled
|
||||
if s.auth != nil {
|
||||
apiRouterGroup.Use(s.auth.Middleware())
|
||||
if service.auth != nil {
|
||||
apiRouterGroup.Use(service.auth.Middleware())
|
||||
}
|
||||
|
||||
{
|
||||
apiRouterGroup.GET("/info", s.handleGetInfo)
|
||||
apiRouterGroup.GET("/metrics", s.handleMetrics)
|
||||
apiRouterGroup.POST("/doctor", s.handleDoctor)
|
||||
apiRouterGroup.POST("/update", s.handleUpdateCheck)
|
||||
apiRouterGroup.GET("/info", service.handleGetInfo)
|
||||
apiRouterGroup.GET("/metrics", service.handleMetrics)
|
||||
apiRouterGroup.POST("/doctor", service.handleDoctor)
|
||||
apiRouterGroup.POST("/update", service.handleUpdateCheck)
|
||||
|
||||
minersRouterGroup := apiRouterGroup.Group("/miners")
|
||||
{
|
||||
minersRouterGroup.GET("", s.handleListMiners)
|
||||
minersRouterGroup.GET("/available", s.handleListAvailableMiners)
|
||||
minersRouterGroup.POST("/:miner_name/install", s.handleInstallMiner)
|
||||
minersRouterGroup.DELETE("/:miner_name/uninstall", s.handleUninstallMiner)
|
||||
minersRouterGroup.DELETE("/:miner_name", s.handleStopMiner)
|
||||
minersRouterGroup.GET("/:miner_name/stats", s.handleGetMinerStats)
|
||||
minersRouterGroup.GET("/:miner_name/hashrate-history", s.handleGetMinerHashrateHistory)
|
||||
minersRouterGroup.GET("/:miner_name/logs", s.handleGetMinerLogs)
|
||||
minersRouterGroup.POST("/:miner_name/stdin", s.handleMinerStdin)
|
||||
minersRouterGroup.GET("", service.handleListMiners)
|
||||
minersRouterGroup.GET("/available", service.handleListAvailableMiners)
|
||||
minersRouterGroup.POST("/:miner_name/install", service.handleInstallMiner)
|
||||
minersRouterGroup.DELETE("/:miner_name/uninstall", service.handleUninstallMiner)
|
||||
minersRouterGroup.DELETE("/:miner_name", service.handleStopMiner)
|
||||
minersRouterGroup.GET("/:miner_name/stats", service.handleGetMinerStats)
|
||||
minersRouterGroup.GET("/:miner_name/hashrate-history", service.handleGetMinerHashrateHistory)
|
||||
minersRouterGroup.GET("/:miner_name/logs", service.handleGetMinerLogs)
|
||||
minersRouterGroup.POST("/:miner_name/stdin", service.handleMinerStdin)
|
||||
}
|
||||
|
||||
// Historical data endpoints (database-backed)
|
||||
historyRouterGroup := apiRouterGroup.Group("/history")
|
||||
{
|
||||
historyRouterGroup.GET("/status", s.handleHistoryStatus)
|
||||
historyRouterGroup.GET("/miners", s.handleAllMinersHistoricalStats)
|
||||
historyRouterGroup.GET("/miners/:miner_name", s.handleMinerHistoricalStats)
|
||||
historyRouterGroup.GET("/miners/:miner_name/hashrate", s.handleMinerHistoricalHashrate)
|
||||
historyRouterGroup.GET("/status", service.handleHistoryStatus)
|
||||
historyRouterGroup.GET("/miners", service.handleAllMinersHistoricalStats)
|
||||
historyRouterGroup.GET("/miners/:miner_name", service.handleMinerHistoricalStats)
|
||||
historyRouterGroup.GET("/miners/:miner_name/hashrate", service.handleMinerHistoricalHashrate)
|
||||
}
|
||||
|
||||
profilesRouterGroup := apiRouterGroup.Group("/profiles")
|
||||
{
|
||||
profilesRouterGroup.GET("", s.handleListProfiles)
|
||||
profilesRouterGroup.POST("", s.handleCreateProfile)
|
||||
profilesRouterGroup.GET("/:id", s.handleGetProfile)
|
||||
profilesRouterGroup.PUT("/:id", s.handleUpdateProfile)
|
||||
profilesRouterGroup.DELETE("/:id", s.handleDeleteProfile)
|
||||
profilesRouterGroup.POST("/:id/start", s.handleStartMinerWithProfile)
|
||||
profilesRouterGroup.GET("", service.handleListProfiles)
|
||||
profilesRouterGroup.POST("", service.handleCreateProfile)
|
||||
profilesRouterGroup.GET("/:id", service.handleGetProfile)
|
||||
profilesRouterGroup.PUT("/:id", service.handleUpdateProfile)
|
||||
profilesRouterGroup.DELETE("/:id", service.handleDeleteProfile)
|
||||
profilesRouterGroup.POST("/:id/start", service.handleStartMinerWithProfile)
|
||||
}
|
||||
|
||||
// WebSocket endpoint for real-time events
|
||||
websocketRouterGroup := apiRouterGroup.Group("/ws")
|
||||
{
|
||||
websocketRouterGroup.GET("/events", s.handleWebSocketEvents)
|
||||
websocketRouterGroup.GET("/events", service.handleWebSocketEvents)
|
||||
}
|
||||
|
||||
// Add P2P node endpoints if node service is available
|
||||
if s.NodeService != nil {
|
||||
s.NodeService.SetupRoutes(apiRouterGroup)
|
||||
if service.NodeService != nil {
|
||||
service.NodeService.SetupRoutes(apiRouterGroup)
|
||||
}
|
||||
}
|
||||
|
||||
// Serve the embedded web component
|
||||
componentFS, err := GetComponentFS()
|
||||
if err == nil {
|
||||
s.Router.StaticFS("/component", componentFS)
|
||||
service.Router.StaticFS("/component", componentFS)
|
||||
}
|
||||
|
||||
swaggerURL := ginSwagger.URL("http://" + s.DisplayAddr + s.SwaggerUIPath + "/doc.json")
|
||||
s.Router.GET(s.SwaggerUIPath+"/*any", ginSwagger.WrapHandler(swaggerFiles.Handler, swaggerURL, ginSwagger.InstanceName(s.SwaggerInstanceName)))
|
||||
swaggerURL := ginSwagger.URL("http://" + service.DisplayAddr + service.SwaggerUIPath + "/doc.json")
|
||||
service.Router.GET(service.SwaggerUIPath+"/*any", ginSwagger.WrapHandler(swaggerFiles.Handler, swaggerURL, ginSwagger.InstanceName(service.SwaggerInstanceName)))
|
||||
|
||||
// ginmcp.New(s.Router, ...) // exposes the API as MCP tools for Claude or Cursor
|
||||
s.mcpServer = ginmcp.New(s.Router, &ginmcp.Config{
|
||||
// ginmcp.New(service.Router, ...) // exposes the API as MCP tools for Claude or Cursor
|
||||
service.mcpServer = ginmcp.New(service.Router, &ginmcp.Config{
|
||||
Name: "Mining API",
|
||||
Description: "Mining dashboard API exposed via Model Context Protocol (MCP)",
|
||||
BaseURL: "http://" + s.DisplayAddr,
|
||||
BaseURL: "http://" + service.DisplayAddr,
|
||||
})
|
||||
s.mcpServer.Mount(s.APIBasePath + "/mcp")
|
||||
logging.Info("MCP server enabled", logging.Fields{"endpoint": s.APIBasePath + "/mcp"})
|
||||
service.mcpServer.Mount(service.APIBasePath + "/mcp")
|
||||
logging.Info("MCP server enabled", logging.Fields{"endpoint": service.APIBasePath + "/mcp"})
|
||||
}
|
||||
|
||||
// c.JSON(http.StatusOK, HealthResponse{Status: "healthy", Components: map[string]string{"db": "ok"}})
|
||||
|
|
@ -705,7 +705,7 @@ type HealthResponse struct {
|
|||
// @Produce json
|
||||
// @Success 200 {object} HealthResponse
|
||||
// @Router /health [get]
|
||||
func (s *Service) handleHealth(c *gin.Context) {
|
||||
func (service *Service) handleHealth(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, HealthResponse{
|
||||
Status: "healthy",
|
||||
})
|
||||
|
|
@ -719,36 +719,36 @@ func (s *Service) handleHealth(c *gin.Context) {
|
|||
// @Success 200 {object} HealthResponse
|
||||
// @Success 503 {object} HealthResponse
|
||||
// @Router /ready [get]
|
||||
func (s *Service) handleReady(c *gin.Context) {
|
||||
func (service *Service) handleReady(c *gin.Context) {
|
||||
components := make(map[string]string)
|
||||
allReady := true
|
||||
|
||||
// s.Manager != nil -> "manager": "ready"
|
||||
if s.Manager != nil {
|
||||
// service.Manager != nil -> "manager": "ready"
|
||||
if service.Manager != nil {
|
||||
components["manager"] = "ready"
|
||||
} else {
|
||||
components["manager"] = "not initialized"
|
||||
allReady = false
|
||||
}
|
||||
|
||||
// s.ProfileManager != nil -> "profiles": "ready"
|
||||
if s.ProfileManager != nil {
|
||||
// service.ProfileManager != nil -> "profiles": "ready"
|
||||
if service.ProfileManager != nil {
|
||||
components["profiles"] = "ready"
|
||||
} else {
|
||||
components["profiles"] = "degraded"
|
||||
// keep readiness green when only profile loading is degraded
|
||||
}
|
||||
|
||||
// s.EventHub != nil -> "events": "ready"
|
||||
if s.EventHub != nil {
|
||||
// service.EventHub != nil -> "events": "ready"
|
||||
if service.EventHub != nil {
|
||||
components["events"] = "ready"
|
||||
} else {
|
||||
components["events"] = "not initialized"
|
||||
allReady = false
|
||||
}
|
||||
|
||||
// s.NodeService != nil -> "p2p": "ready"
|
||||
if s.NodeService != nil {
|
||||
// service.NodeService != nil -> "p2p": "ready"
|
||||
if service.NodeService != nil {
|
||||
components["p2p"] = "ready"
|
||||
} else {
|
||||
components["p2p"] = "disabled"
|
||||
|
|
@ -775,8 +775,8 @@ func (s *Service) handleReady(c *gin.Context) {
|
|||
// @Success 200 {object} SystemInfo
|
||||
// @Failure 500 {object} map[string]string "Internal server error"
|
||||
// @Router /info [get]
|
||||
func (s *Service) handleGetInfo(c *gin.Context) {
|
||||
systemInfo, err := s.updateInstallationCache()
|
||||
func (service *Service) handleGetInfo(c *gin.Context) {
|
||||
systemInfo, err := service.updateInstallationCache()
|
||||
if err != nil {
|
||||
respondWithMiningError(c, ErrInternal("failed to get system info").WithCause(err))
|
||||
return
|
||||
|
|
@ -784,9 +784,9 @@ func (s *Service) handleGetInfo(c *gin.Context) {
|
|||
c.JSON(http.StatusOK, systemInfo)
|
||||
}
|
||||
|
||||
// systemInfo, err := s.updateInstallationCache()
|
||||
// systemInfo, err := service.updateInstallationCache()
|
||||
// if err != nil { return ErrInternal("cache update failed").WithCause(err) }
|
||||
func (s *Service) updateInstallationCache() (*SystemInfo, error) {
|
||||
func (service *Service) updateInstallationCache() (*SystemInfo, error) {
|
||||
// Always create a complete SystemInfo object
|
||||
systemInfo := &SystemInfo{
|
||||
Timestamp: time.Now(),
|
||||
|
|
@ -802,7 +802,7 @@ func (s *Service) updateInstallationCache() (*SystemInfo, error) {
|
|||
systemInfo.TotalSystemRAMGB = float64(vMem.Total) / (1024 * 1024 * 1024)
|
||||
}
|
||||
|
||||
for _, availableMiner := range s.Manager.ListAvailableMiners() {
|
||||
for _, availableMiner := range service.Manager.ListAvailableMiners() {
|
||||
miner, err := CreateMiner(availableMiner.Name)
|
||||
if err != nil {
|
||||
continue // Skip unsupported miner types
|
||||
|
|
@ -842,8 +842,8 @@ func (s *Service) updateInstallationCache() (*SystemInfo, error) {
|
|||
// @Produce json
|
||||
// @Success 200 {object} SystemInfo
|
||||
// @Router /doctor [post]
|
||||
func (s *Service) handleDoctor(c *gin.Context) {
|
||||
systemInfo, err := s.updateInstallationCache()
|
||||
func (service *Service) handleDoctor(c *gin.Context) {
|
||||
systemInfo, err := service.updateInstallationCache()
|
||||
if err != nil {
|
||||
respondWithMiningError(c, ErrInternal("failed to update cache").WithCause(err))
|
||||
return
|
||||
|
|
@ -858,9 +858,9 @@ func (s *Service) handleDoctor(c *gin.Context) {
|
|||
// @Produce json
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Router /update [post]
|
||||
func (s *Service) handleUpdateCheck(c *gin.Context) {
|
||||
func (service *Service) handleUpdateCheck(c *gin.Context) {
|
||||
updates := make(map[string]string)
|
||||
for _, availableMiner := range s.Manager.ListAvailableMiners() {
|
||||
for _, availableMiner := range service.Manager.ListAvailableMiners() {
|
||||
miner, err := CreateMiner(availableMiner.Name)
|
||||
if err != nil {
|
||||
continue // Skip unsupported miner types
|
||||
|
|
@ -907,13 +907,13 @@ func (s *Service) handleUpdateCheck(c *gin.Context) {
|
|||
// @Param miner_type path string true "Miner Type to uninstall"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Router /miners/{miner_type}/uninstall [delete]
|
||||
func (s *Service) handleUninstallMiner(c *gin.Context) {
|
||||
func (service *Service) handleUninstallMiner(c *gin.Context) {
|
||||
minerType := c.Param("miner_name")
|
||||
if err := s.Manager.UninstallMiner(c.Request.Context(), minerType); err != nil {
|
||||
if err := service.Manager.UninstallMiner(c.Request.Context(), minerType); err != nil {
|
||||
respondWithMiningError(c, ErrInternal("failed to uninstall miner").WithCause(err))
|
||||
return
|
||||
}
|
||||
if _, err := s.updateInstallationCache(); err != nil {
|
||||
if _, err := service.updateInstallationCache(); err != nil {
|
||||
logging.Warn("failed to update cache after uninstall", logging.Fields{"error": err})
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{"status": minerType + " uninstalled successfully."})
|
||||
|
|
@ -926,8 +926,8 @@ func (s *Service) handleUninstallMiner(c *gin.Context) {
|
|||
// @Produce json
|
||||
// @Success 200 {array} XMRigMiner
|
||||
// @Router /miners [get]
|
||||
func (s *Service) handleListMiners(c *gin.Context) {
|
||||
miners := s.Manager.ListMiners()
|
||||
func (service *Service) handleListMiners(c *gin.Context) {
|
||||
miners := service.Manager.ListMiners()
|
||||
c.JSON(http.StatusOK, miners)
|
||||
}
|
||||
|
||||
|
|
@ -938,8 +938,8 @@ func (s *Service) handleListMiners(c *gin.Context) {
|
|||
// @Produce json
|
||||
// @Success 200 {array} AvailableMiner
|
||||
// @Router /miners/available [get]
|
||||
func (s *Service) handleListAvailableMiners(c *gin.Context) {
|
||||
miners := s.Manager.ListAvailableMiners()
|
||||
func (service *Service) handleListAvailableMiners(c *gin.Context) {
|
||||
miners := service.Manager.ListAvailableMiners()
|
||||
c.JSON(http.StatusOK, miners)
|
||||
}
|
||||
|
||||
|
|
@ -951,7 +951,7 @@ func (s *Service) handleListAvailableMiners(c *gin.Context) {
|
|||
// @Param miner_type path string true "Miner Type to install/update"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Router /miners/{miner_type}/install [post]
|
||||
func (s *Service) handleInstallMiner(c *gin.Context) {
|
||||
func (service *Service) handleInstallMiner(c *gin.Context) {
|
||||
minerType := c.Param("miner_name")
|
||||
miner, err := CreateMiner(minerType)
|
||||
if err != nil {
|
||||
|
|
@ -964,7 +964,7 @@ func (s *Service) handleInstallMiner(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
if _, err := s.updateInstallationCache(); err != nil {
|
||||
if _, err := service.updateInstallationCache(); err != nil {
|
||||
logging.Warn("failed to update cache after install", logging.Fields{"error": err})
|
||||
}
|
||||
|
||||
|
|
@ -985,9 +985,9 @@ func (s *Service) handleInstallMiner(c *gin.Context) {
|
|||
// @Param id path string true "Profile ID"
|
||||
// @Success 200 {object} XMRigMiner
|
||||
// @Router /profiles/{id}/start [post]
|
||||
func (s *Service) handleStartMinerWithProfile(c *gin.Context) {
|
||||
func (service *Service) handleStartMinerWithProfile(c *gin.Context) {
|
||||
profileID := c.Param("id")
|
||||
profile, exists := s.ProfileManager.GetProfile(profileID)
|
||||
profile, exists := service.ProfileManager.GetProfile(profileID)
|
||||
if !exists {
|
||||
respondWithMiningError(c, ErrProfileNotFound(profileID))
|
||||
return
|
||||
|
|
@ -1005,7 +1005,7 @@ func (s *Service) handleStartMinerWithProfile(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
miner, err := s.Manager.StartMiner(c.Request.Context(), profile.MinerType, &config)
|
||||
miner, err := service.Manager.StartMiner(c.Request.Context(), profile.MinerType, &config)
|
||||
if err != nil {
|
||||
respondWithMiningError(c, ErrStartFailed(profile.Name).WithCause(err))
|
||||
return
|
||||
|
|
@ -1021,9 +1021,9 @@ func (s *Service) handleStartMinerWithProfile(c *gin.Context) {
|
|||
// @Param miner_name path string true "Miner Name"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Router /miners/{miner_name} [delete]
|
||||
func (s *Service) handleStopMiner(c *gin.Context) {
|
||||
func (service *Service) handleStopMiner(c *gin.Context) {
|
||||
minerName := c.Param("miner_name")
|
||||
if err := s.Manager.StopMiner(c.Request.Context(), minerName); err != nil {
|
||||
if err := service.Manager.StopMiner(c.Request.Context(), minerName); err != nil {
|
||||
respondWithMiningError(c, ErrStopFailed(minerName).WithCause(err))
|
||||
return
|
||||
}
|
||||
|
|
@ -1038,9 +1038,9 @@ func (s *Service) handleStopMiner(c *gin.Context) {
|
|||
// @Param miner_name path string true "Miner Name"
|
||||
// @Success 200 {object} PerformanceMetrics
|
||||
// @Router /miners/{miner_name}/stats [get]
|
||||
func (s *Service) handleGetMinerStats(c *gin.Context) {
|
||||
func (service *Service) handleGetMinerStats(c *gin.Context) {
|
||||
minerName := c.Param("miner_name")
|
||||
miner, err := s.Manager.GetMiner(minerName)
|
||||
miner, err := service.Manager.GetMiner(minerName)
|
||||
if err != nil {
|
||||
respondWithMiningError(c, ErrMinerNotFound(minerName).WithCause(err))
|
||||
return
|
||||
|
|
@ -1061,9 +1061,9 @@ func (s *Service) handleGetMinerStats(c *gin.Context) {
|
|||
// @Param miner_name path string true "Miner Name"
|
||||
// @Success 200 {array} HashratePoint
|
||||
// @Router /miners/{miner_name}/hashrate-history [get]
|
||||
func (s *Service) handleGetMinerHashrateHistory(c *gin.Context) {
|
||||
func (service *Service) handleGetMinerHashrateHistory(c *gin.Context) {
|
||||
minerName := c.Param("miner_name")
|
||||
history, err := s.Manager.GetMinerHashrateHistory(minerName)
|
||||
history, err := service.Manager.GetMinerHashrateHistory(minerName)
|
||||
if err != nil {
|
||||
respondWithMiningError(c, ErrMinerNotFound(minerName).WithCause(err))
|
||||
return
|
||||
|
|
@ -1079,9 +1079,9 @@ func (s *Service) handleGetMinerHashrateHistory(c *gin.Context) {
|
|||
// @Param miner_name path string true "Miner Name"
|
||||
// @Success 200 {array} string "Base64 encoded log lines"
|
||||
// @Router /miners/{miner_name}/logs [get]
|
||||
func (s *Service) handleGetMinerLogs(c *gin.Context) {
|
||||
func (service *Service) handleGetMinerLogs(c *gin.Context) {
|
||||
minerName := c.Param("miner_name")
|
||||
miner, err := s.Manager.GetMiner(minerName)
|
||||
miner, err := service.Manager.GetMiner(minerName)
|
||||
if err != nil {
|
||||
respondWithMiningError(c, ErrMinerNotFound(minerName).WithCause(err))
|
||||
return
|
||||
|
|
@ -1112,9 +1112,9 @@ type StdinInput struct {
|
|||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 404 {object} map[string]string
|
||||
// @Router /miners/{miner_name}/stdin [post]
|
||||
func (s *Service) handleMinerStdin(c *gin.Context) {
|
||||
func (service *Service) handleMinerStdin(c *gin.Context) {
|
||||
minerName := c.Param("miner_name")
|
||||
miner, err := s.Manager.GetMiner(minerName)
|
||||
miner, err := service.Manager.GetMiner(minerName)
|
||||
if err != nil {
|
||||
respondWithError(c, http.StatusNotFound, ErrCodeMinerNotFound, "miner not found", err.Error())
|
||||
return
|
||||
|
|
@ -1141,8 +1141,8 @@ func (s *Service) handleMinerStdin(c *gin.Context) {
|
|||
// @Produce json
|
||||
// @Success 200 {array} MiningProfile
|
||||
// @Router /profiles [get]
|
||||
func (s *Service) handleListProfiles(c *gin.Context) {
|
||||
profiles := s.ProfileManager.GetAllProfiles()
|
||||
func (service *Service) handleListProfiles(c *gin.Context) {
|
||||
profiles := service.ProfileManager.GetAllProfiles()
|
||||
c.JSON(http.StatusOK, profiles)
|
||||
}
|
||||
|
||||
|
|
@ -1156,7 +1156,7 @@ func (s *Service) handleListProfiles(c *gin.Context) {
|
|||
// @Success 201 {object} MiningProfile
|
||||
// @Failure 400 {object} APIError "Invalid profile data"
|
||||
// @Router /profiles [post]
|
||||
func (s *Service) handleCreateProfile(c *gin.Context) {
|
||||
func (service *Service) handleCreateProfile(c *gin.Context) {
|
||||
var profile MiningProfile
|
||||
if err := c.ShouldBindJSON(&profile); err != nil {
|
||||
respondWithError(c, http.StatusBadRequest, ErrCodeInvalidInput, "invalid profile data", err.Error())
|
||||
|
|
@ -1173,7 +1173,7 @@ func (s *Service) handleCreateProfile(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
createdProfile, err := s.ProfileManager.CreateProfile(&profile)
|
||||
createdProfile, err := service.ProfileManager.CreateProfile(&profile)
|
||||
if err != nil {
|
||||
respondWithError(c, http.StatusInternalServerError, ErrCodeInternalError, "failed to create profile", err.Error())
|
||||
return
|
||||
|
|
@ -1190,9 +1190,9 @@ func (s *Service) handleCreateProfile(c *gin.Context) {
|
|||
// @Param id path string true "Profile ID"
|
||||
// @Success 200 {object} MiningProfile
|
||||
// @Router /profiles/{id} [get]
|
||||
func (s *Service) handleGetProfile(c *gin.Context) {
|
||||
func (service *Service) handleGetProfile(c *gin.Context) {
|
||||
profileID := c.Param("id")
|
||||
profile, exists := s.ProfileManager.GetProfile(profileID)
|
||||
profile, exists := service.ProfileManager.GetProfile(profileID)
|
||||
if !exists {
|
||||
respondWithError(c, http.StatusNotFound, ErrCodeProfileNotFound, "profile not found", "")
|
||||
return
|
||||
|
|
@ -1211,7 +1211,7 @@ func (s *Service) handleGetProfile(c *gin.Context) {
|
|||
// @Success 200 {object} MiningProfile
|
||||
// @Failure 404 {object} APIError "Profile not found"
|
||||
// @Router /profiles/{id} [put]
|
||||
func (s *Service) handleUpdateProfile(c *gin.Context) {
|
||||
func (service *Service) handleUpdateProfile(c *gin.Context) {
|
||||
profileID := c.Param("id")
|
||||
var profile MiningProfile
|
||||
if err := c.ShouldBindJSON(&profile); err != nil {
|
||||
|
|
@ -1220,7 +1220,7 @@ func (s *Service) handleUpdateProfile(c *gin.Context) {
|
|||
}
|
||||
profile.ID = profileID
|
||||
|
||||
if err := s.ProfileManager.UpdateProfile(&profile); err != nil {
|
||||
if err := service.ProfileManager.UpdateProfile(&profile); err != nil {
|
||||
// Check if error is "not found"
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
respondWithError(c, http.StatusNotFound, ErrCodeProfileNotFound, "profile not found", err.Error())
|
||||
|
|
@ -1240,9 +1240,9 @@ func (s *Service) handleUpdateProfile(c *gin.Context) {
|
|||
// @Param id path string true "Profile ID"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Router /profiles/{id} [delete]
|
||||
func (s *Service) handleDeleteProfile(c *gin.Context) {
|
||||
func (service *Service) handleDeleteProfile(c *gin.Context) {
|
||||
profileID := c.Param("id")
|
||||
if err := s.ProfileManager.DeleteProfile(profileID); err != nil {
|
||||
if err := service.ProfileManager.DeleteProfile(profileID); err != nil {
|
||||
// Make DELETE idempotent - if profile doesn't exist, still return success
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
c.JSON(http.StatusOK, gin.H{"status": "profile deleted"})
|
||||
|
|
@ -1261,8 +1261,8 @@ func (s *Service) handleDeleteProfile(c *gin.Context) {
|
|||
// @Produce json
|
||||
// @Success 200 {object} map[string]interface{}
|
||||
// @Router /history/status [get]
|
||||
func (s *Service) handleHistoryStatus(c *gin.Context) {
|
||||
if manager, ok := s.Manager.(*Manager); ok {
|
||||
func (service *Service) handleHistoryStatus(c *gin.Context) {
|
||||
if manager, ok := service.Manager.(*Manager); ok {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"enabled": manager.IsDatabaseEnabled(),
|
||||
"retentionDays": manager.databaseRetention,
|
||||
|
|
@ -1279,8 +1279,8 @@ func (s *Service) handleHistoryStatus(c *gin.Context) {
|
|||
// @Produce json
|
||||
// @Success 200 {array} database.HashrateStats
|
||||
// @Router /history/miners [get]
|
||||
func (s *Service) handleAllMinersHistoricalStats(c *gin.Context) {
|
||||
manager, ok := s.Manager.(*Manager)
|
||||
func (service *Service) handleAllMinersHistoricalStats(c *gin.Context) {
|
||||
manager, ok := service.Manager.(*Manager)
|
||||
if !ok {
|
||||
respondWithMiningError(c, ErrInternal("manager type not supported"))
|
||||
return
|
||||
|
|
@ -1303,9 +1303,9 @@ func (s *Service) handleAllMinersHistoricalStats(c *gin.Context) {
|
|||
// @Param miner_name path string true "Miner Name"
|
||||
// @Success 200 {object} database.HashrateStats
|
||||
// @Router /history/miners/{miner_name} [get]
|
||||
func (s *Service) handleMinerHistoricalStats(c *gin.Context) {
|
||||
func (service *Service) handleMinerHistoricalStats(c *gin.Context) {
|
||||
minerName := c.Param("miner_name")
|
||||
manager, ok := s.Manager.(*Manager)
|
||||
manager, ok := service.Manager.(*Manager)
|
||||
if !ok {
|
||||
respondWithMiningError(c, ErrInternal("manager type not supported"))
|
||||
return
|
||||
|
|
@ -1335,9 +1335,9 @@ func (s *Service) handleMinerHistoricalStats(c *gin.Context) {
|
|||
// @Param until query string false "End time (RFC3339 format)"
|
||||
// @Success 200 {array} HashratePoint
|
||||
// @Router /history/miners/{miner_name}/hashrate [get]
|
||||
func (s *Service) handleMinerHistoricalHashrate(c *gin.Context) {
|
||||
func (service *Service) handleMinerHistoricalHashrate(c *gin.Context) {
|
||||
minerName := c.Param("miner_name")
|
||||
manager, ok := s.Manager.(*Manager)
|
||||
manager, ok := service.Manager.(*Manager)
|
||||
if !ok {
|
||||
respondWithMiningError(c, ErrInternal("manager type not supported"))
|
||||
return
|
||||
|
|
@ -1374,7 +1374,7 @@ func (s *Service) handleMinerHistoricalHashrate(c *gin.Context) {
|
|||
// @Tags websocket
|
||||
// @Success 101 {string} string "Switching Protocols"
|
||||
// @Router /ws/events [get]
|
||||
func (s *Service) handleWebSocketEvents(c *gin.Context) {
|
||||
func (service *Service) handleWebSocketEvents(c *gin.Context) {
|
||||
conn, err := wsUpgrader.Upgrade(c.Writer, c.Request, nil)
|
||||
if err != nil {
|
||||
logging.Error("failed to upgrade WebSocket connection", logging.Fields{"error": err})
|
||||
|
|
@ -1383,7 +1383,7 @@ func (s *Service) handleWebSocketEvents(c *gin.Context) {
|
|||
|
||||
logging.Info("new WebSocket connection", logging.Fields{"remote": c.Request.RemoteAddr})
|
||||
// Only record connection after successful registration to avoid metrics race
|
||||
if s.EventHub.ServeWs(conn) {
|
||||
if service.EventHub.ServeWs(conn) {
|
||||
RecordWSConnection(true)
|
||||
} else {
|
||||
logging.Warn("WebSocket connection rejected", logging.Fields{"remote": c.Request.RemoteAddr, "reason": "limit reached"})
|
||||
|
|
@ -1397,6 +1397,6 @@ func (s *Service) handleWebSocketEvents(c *gin.Context) {
|
|||
// @Produce json
|
||||
// @Success 200 {object} map[string]interface{}
|
||||
// @Router /metrics [get]
|
||||
func (s *Service) handleMetrics(c *gin.Context) {
|
||||
func (service *Service) handleMetrics(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, GetMetricsSnapshot())
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue