refactor(ax): tighten remaining naming and docs
Some checks are pending
Security Scan / security (push) Waiting to run
Test / test (push) Waiting to run

Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
Virgil 2026-04-04 05:08:34 +00:00
parent 9102b25f55
commit 976ff0141c
7 changed files with 79 additions and 131 deletions

View file

@ -10,7 +10,6 @@ import (
"github.com/spf13/cobra"
)
// installCmd.Use == "install [miner_type]" and RunE installs or updates the miner.
var installCmd = &cobra.Command{
Use: "install [miner_type]",
Short: "Install or update a miner",
@ -27,14 +26,13 @@ var installCmd = &cobra.Command{
return fmt.Errorf("unknown miner type: %s", minerType)
}
// miner.CheckInstallation() // returns the installed version before deciding whether to update
details, err := miner.CheckInstallation()
if err == nil && details.IsInstalled {
installationDetails, err := miner.CheckInstallation()
if err == nil && installationDetails.IsInstalled {
latestVersionStr, err := miner.GetLatestVersion()
if err == nil {
latestVersion, err := semver.NewVersion(latestVersionStr)
if err == nil {
installedVersion, err := semver.NewVersion(details.Version)
installedVersion, err := semver.NewVersion(installationDetails.Version)
if err == nil && !latestVersion.GreaterThan(installedVersion) {
fmt.Printf("%s is already installed and up to date (version %s).\n", miner.GetName(), installedVersion)
return nil
@ -50,15 +48,13 @@ var installCmd = &cobra.Command{
return fmt.Errorf("failed to install/update miner: %w", err)
}
// miner.CheckInstallation() // returns the post-install path and version for the success message
finalDetails, err := miner.CheckInstallation()
finalInstallationDetails, err := miner.CheckInstallation()
if err != nil {
return fmt.Errorf("failed to verify installation: %w", err)
}
fmt.Printf("%s installed successfully to %s (version %s).\n", miner.GetName(), finalDetails.Path, finalDetails.Version)
fmt.Printf("%s installed successfully to %s (version %s).\n", miner.GetName(), finalInstallationDetails.Path, finalInstallationDetails.Version)
// updateDoctorCache() // refreshes the cached installation details after a successful install
fmt.Println("Updating installation cache...")
if err := updateDoctorCache(); err != nil {
fmt.Printf("Warning: failed to update doctor cache: %v\n", err)
@ -68,7 +64,6 @@ var installCmd = &cobra.Command{
},
}
// updateDoctorCache() // refreshes the cache used by `mining doctor` and `mining update`.
func updateDoctorCache() error {
manager := getManager()
availableMiners := manager.ListAvailableMiners()
@ -87,12 +82,11 @@ func updateDoctorCache() error {
}
details, err := miner.CheckInstallation()
if err != nil {
continue // Ignore errors for this background update
continue
}
allDetails = append(allDetails, details)
}
// mining.SystemInfo{Timestamp: time.Now(), OS: runtime.GOOS} // matches the cache shape returned by the doctor command
systemInfo := &mining.SystemInfo{
Timestamp: time.Now(),
OS: runtime.GOOS,

View file

@ -22,24 +22,22 @@ var (
peerRegistryErr error
)
// nodeCmd.Use == "node" and RunE groups identity and P2P subcommands.
var nodeCmd = &cobra.Command{
Use: "node",
Short: "Manage P2P node identity and connections",
Long: `Manage the node's identity, view status, and control P2P networking.`,
}
// nodeInitCmd.Use == "init" and RunE creates a node identity.
var nodeInitCmd = &cobra.Command{
Use: "init",
Short: "Initialize node identity",
Long: `Initialize a new node identity with X25519 keypair.
This creates the node's cryptographic identity for secure P2P communication.`,
RunE: func(cmd *cobra.Command, args []string) error {
name, _ := cmd.Flags().GetString("name")
role, _ := cmd.Flags().GetString("role")
nodeName, _ := cmd.Flags().GetString("name")
roleName, _ := cmd.Flags().GetString("role")
if name == "" {
if nodeName == "" {
return fmt.Errorf("--name is required")
}
@ -53,7 +51,7 @@ This creates the node's cryptographic identity for secure P2P communication.`,
}
var nodeRole node.NodeRole
switch role {
switch roleName {
case "controller":
nodeRole = node.RoleController
case "worker":
@ -61,10 +59,10 @@ This creates the node's cryptographic identity for secure P2P communication.`,
case "dual", "":
nodeRole = node.RoleDual
default:
return fmt.Errorf("invalid role: %s (use controller, worker, or dual)", role)
return fmt.Errorf("invalid role: %s (use controller, worker, or dual)", roleName)
}
if err := nodeManager.GenerateIdentity(name, nodeRole); err != nil {
if err := nodeManager.GenerateIdentity(nodeName, nodeRole); err != nil {
return fmt.Errorf("failed to generate identity: %w", err)
}
@ -81,7 +79,6 @@ This creates the node's cryptographic identity for secure P2P communication.`,
},
}
// nodeInfoCmd.Use == "info" and RunE prints the current node identity.
var nodeInfoCmd = &cobra.Command{
Use: "info",
Short: "Show node identity and status",
@ -107,7 +104,6 @@ var nodeInfoCmd = &cobra.Command{
fmt.Printf(" Public Key: %s\n", identity.PublicKey)
fmt.Printf(" Created: %s\n", identity.CreatedAt.Format(time.RFC3339))
// node.NewPeerRegistry() // loads registered peers to print the connected count
peerRegistry, err := node.NewPeerRegistry()
if err == nil {
fmt.Println()
@ -120,14 +116,13 @@ var nodeInfoCmd = &cobra.Command{
},
}
// nodeServeCmd.Use == "serve" and RunE starts the P2P server.
var nodeServeCmd = &cobra.Command{
Use: "serve",
Short: "Start P2P server for remote connections",
Long: `Start the P2P WebSocket server to accept connections from other nodes.
This allows other nodes to connect, send commands, and receive stats.`,
RunE: func(cmd *cobra.Command, args []string) error {
listen, _ := cmd.Flags().GetString("listen")
listenAddress, _ := cmd.Flags().GetString("listen")
nodeManager, err := node.NewNodeManager()
if err != nil {
@ -144,13 +139,12 @@ This allows other nodes to connect, send commands, and receive stats.`,
}
config := node.DefaultTransportConfig()
if listen != "" {
config.ListenAddr = listen
if listenAddress != "" {
config.ListenAddr = listenAddress
}
transport := node.NewTransport(nodeManager, peerRegistry, config)
// node.NewWorker(nodeManager, transport) // handles incoming remote commands
worker := node.NewWorker(nodeManager, transport)
worker.RegisterWithTransport()
@ -165,25 +159,20 @@ This allows other nodes to connect, send commands, and receive stats.`,
fmt.Println()
fmt.Println("Press Ctrl+C to stop...")
// signalChannel captures Ctrl+C and terminal disconnects for a clean shutdown.
signalChannel := make(chan os.Signal, 1)
signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
// signalChannel <- os.Interrupt // blocks until shutdown is requested
sig := <-signalChannel
fmt.Printf("\nReceived signal %v, shutting down...\n", sig)
signalValue := <-signalChannel
fmt.Printf("\nReceived signal %v, shutting down...\n", signalValue)
// transport.Stop() // stops the socket listener before the peer registry is flushed
if err := transport.Stop(); err != nil {
fmt.Printf("Warning: error during transport shutdown: %v\n", err)
// peerRegistry.GetConnectedPeers() // clears connected flags when transport shutdown fails
fmt.Println("Forcing resource cleanup...")
for _, peer := range peerRegistry.GetConnectedPeers() {
peerRegistry.SetConnected(peer.ID, false)
}
}
// peerRegistry.Close() // flushes peer state to disk during shutdown
if err := peerRegistry.Close(); err != nil {
fmt.Printf("Warning: error closing peer registry: %v\n", err)
}
@ -193,7 +182,6 @@ This allows other nodes to connect, send commands, and receive stats.`,
},
}
// nodeResetCmd.Use == "reset" and RunE deletes the node identity.
var nodeResetCmd = &cobra.Command{
Use: "reset",
Short: "Delete node identity and start fresh",
@ -232,24 +220,19 @@ var nodeResetCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(nodeCmd)
// node init
nodeCmd.AddCommand(nodeInitCmd)
nodeInitCmd.Flags().StringP("name", "n", "", "Node name (required)")
nodeInitCmd.Flags().StringP("role", "r", "dual", "Node role: controller, worker, or dual (default)")
// node info
nodeCmd.AddCommand(nodeInfoCmd)
// node serve
nodeCmd.AddCommand(nodeServeCmd)
nodeServeCmd.Flags().StringP("listen", "l", ":9091", "Address to listen on")
// node reset
nodeCmd.AddCommand(nodeResetCmd)
nodeResetCmd.Flags().BoolP("force", "f", false, "Force reset without confirmation")
}
// getNodeManager returns the singleton node manager (thread-safe)
func getNodeManager() (*node.NodeManager, error) {
nodeManagerOnce.Do(func() {
nodeManager, nodeManagerErr = node.NewNodeManager()
@ -257,7 +240,6 @@ func getNodeManager() (*node.NodeManager, error) {
return nodeManager, nodeManagerErr
}
// getPeerRegistry returns the singleton peer registry (thread-safe)
func getPeerRegistry() (*node.PeerRegistry, error) {
peerRegistryOnce.Do(func() {
peerRegistry, peerRegistryErr = node.NewPeerRegistry()

View file

@ -8,10 +8,9 @@ import (
)
var (
manager *mining.Manager
sharedManager *mining.Manager
)
// rootCmd.Use == "mining" and rootCmd.Version prints pkg/mining.GetVersion().
var rootCmd = &cobra.Command{
Use: "mining",
Short: "Mining CLI - Manage miners with RESTful control",
@ -20,7 +19,6 @@ It provides commands to start, stop, list, and manage miners with RESTful contro
Version: mining.GetVersion(),
}
// Execute() // runs the root command from main.main().
func Execute() error {
return rootCmd.Execute()
}
@ -29,20 +27,18 @@ func init() {
cobra.OnInitialize(initManager)
}
// initManager() // skips simulate so `mining simulate` can create its own manager.
func initManager() {
if len(os.Args) > 1 && os.Args[1] == "simulate" {
return
}
if manager == nil {
manager = mining.NewManager()
if sharedManager == nil {
sharedManager = mining.NewManager()
}
}
// getManager() // returns the shared manager used by `mining start` and `mining stop`.
func getManager() *mining.Manager {
if manager == nil {
manager = mining.NewManager()
if sharedManager == nil {
sharedManager = mining.NewManager()
}
return manager
return sharedManager
}

View file

@ -30,7 +30,7 @@ type HashrateStore interface {
Close() error
}
// store := database.DefaultStore() // wraps the global database connection
// store := database.DefaultStore()
type defaultStore struct{}
// store := database.DefaultStore()
@ -39,57 +39,57 @@ func DefaultStore() HashrateStore {
return &defaultStore{}
}
func (s *defaultStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
func (store *defaultStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
return InsertHashratePoint(ctx, minerName, minerType, point, resolution)
}
func (s *defaultStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
func (store *defaultStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
return GetHashrateHistory(minerName, resolution, since, until)
}
func (s *defaultStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
func (store *defaultStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
return GetHashrateStats(minerName)
}
func (s *defaultStore) GetAllMinerStats() ([]HashrateStats, error) {
func (store *defaultStore) GetAllMinerStats() ([]HashrateStats, error) {
return GetAllMinerStats()
}
func (s *defaultStore) Cleanup(retentionDays int) error {
func (store *defaultStore) Cleanup(retentionDays int) error {
return Cleanup(retentionDays)
}
func (s *defaultStore) Close() error {
func (store *defaultStore) Close() error {
return Close()
}
// store := database.NopStore() // all methods are no-ops; use when database is disabled
// store := database.NopStore()
func NopStore() HashrateStore {
return &nopStore{}
}
type nopStore struct{}
func (s *nopStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
func (store *nopStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
return nil
}
func (s *nopStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
func (store *nopStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
return nil, nil
}
func (s *nopStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
func (store *nopStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
return nil, nil
}
func (s *nopStore) GetAllMinerStats() ([]HashrateStats, error) {
func (store *nopStore) GetAllMinerStats() ([]HashrateStats, error) {
return nil, nil
}
func (s *nopStore) Cleanup(retentionDays int) error {
func (store *nopStore) Cleanup(retentionDays int) error {
return nil
}
func (s *nopStore) Close() error {
func (store *nopStore) Close() error {
return nil
}

View file

@ -7,73 +7,68 @@ import (
)
func TestLogger_Log_Good(t *testing.T) {
var buf bytes.Buffer
var outputBuffer bytes.Buffer
logger := New(Config{
Output: &buf,
Output: &outputBuffer,
Level: LevelInfo,
})
// Info should appear at Info level
logger.Info("info message")
if !strings.Contains(buf.String(), "[INFO]") {
if !strings.Contains(outputBuffer.String(), "[INFO]") {
t.Error("Info message should appear")
}
if !strings.Contains(buf.String(), "info message") {
if !strings.Contains(outputBuffer.String(), "info message") {
t.Error("Info message content should appear")
}
buf.Reset()
outputBuffer.Reset()
// Warn should appear
logger.Warn("warn message")
if !strings.Contains(buf.String(), "[WARN]") {
if !strings.Contains(outputBuffer.String(), "[WARN]") {
t.Error("Warn message should appear")
}
buf.Reset()
outputBuffer.Reset()
// Error should appear
logger.Error("error message")
if !strings.Contains(buf.String(), "[ERROR]") {
if !strings.Contains(outputBuffer.String(), "[ERROR]") {
t.Error("Error message should appear")
}
}
func TestLogger_Log_Bad(t *testing.T) {
var buf bytes.Buffer
var outputBuffer bytes.Buffer
logger := New(Config{
Output: &buf,
Output: &outputBuffer,
Level: LevelInfo,
})
// Debug should not appear at Info level
logger.Debug("debug message")
if buf.Len() > 0 {
if outputBuffer.Len() > 0 {
t.Error("Debug message should not appear at Info level")
}
}
func TestLogger_Log_Ugly(t *testing.T) {
var buf bytes.Buffer
var outputBuffer bytes.Buffer
logger := New(Config{
Output: &buf,
Output: &outputBuffer,
Level: LevelDebug,
})
// All levels should appear at Debug level
logger.Debug("debug message")
if !strings.Contains(buf.String(), "[DEBUG]") {
if !strings.Contains(outputBuffer.String(), "[DEBUG]") {
t.Error("Debug message should appear at Debug level")
}
}
func TestLogger_WithFields_Good(t *testing.T) {
var buf bytes.Buffer
var outputBuffer bytes.Buffer
logger := New(Config{
Output: &buf,
Output: &outputBuffer,
Level: LevelInfo,
})
logger.Info("test message", Fields{"key": "value", "num": 42})
output := buf.String()
output := outputBuffer.String()
if !strings.Contains(output, "key=value") {
t.Error("Field key=value should appear")
@ -84,15 +79,15 @@ func TestLogger_WithFields_Good(t *testing.T) {
}
func TestLogger_WithComponent_Good(t *testing.T) {
var buf bytes.Buffer
var outputBuffer bytes.Buffer
logger := New(Config{
Output: &buf,
Output: &outputBuffer,
Level: LevelInfo,
Component: "TestComponent",
})
logger.Info("test message")
output := buf.String()
output := outputBuffer.String()
if !strings.Contains(output, "[TestComponent]") {
t.Error("Component name should appear in log")
@ -100,15 +95,15 @@ func TestLogger_WithComponent_Good(t *testing.T) {
}
func TestLogger_DerivedComponent_Good(t *testing.T) {
var buf bytes.Buffer
var outputBuffer bytes.Buffer
parent := New(Config{
Output: &buf,
Output: &outputBuffer,
Level: LevelInfo,
})
child := parent.WithComponent("ChildComponent")
child.Info("child message")
output := buf.String()
output := outputBuffer.String()
if !strings.Contains(output, "[ChildComponent]") {
t.Error("Derived component name should appear")
@ -116,14 +111,14 @@ func TestLogger_DerivedComponent_Good(t *testing.T) {
}
func TestLogger_Formatted_Good(t *testing.T) {
var buf bytes.Buffer
var outputBuffer bytes.Buffer
logger := New(Config{
Output: &buf,
Output: &outputBuffer,
Level: LevelInfo,
})
logger.Infof("formatted %s %d", "string", 123)
output := buf.String()
output := outputBuffer.String()
if !strings.Contains(output, "formatted string 123") {
t.Errorf("Formatted message should appear, got: %s", output)
@ -131,26 +126,23 @@ func TestLogger_Formatted_Good(t *testing.T) {
}
func TestLogger_SetLevel_Good(t *testing.T) {
var buf bytes.Buffer
var outputBuffer bytes.Buffer
logger := New(Config{
Output: &buf,
Output: &outputBuffer,
Level: LevelError,
})
// Info should not appear at Error level
logger.Info("should not appear")
if buf.Len() > 0 {
if outputBuffer.Len() > 0 {
t.Error("Info should not appear at Error level")
}
// Change to Info level
logger.SetLevel(LevelInfo)
logger.Info("should appear now")
if !strings.Contains(buf.String(), "should appear now") {
if !strings.Contains(outputBuffer.String(), "should appear now") {
t.Error("Info should appear after level change")
}
// Verify GetLevel
if logger.GetLevel() != LevelInfo {
t.Error("GetLevel should return LevelInfo")
}
@ -190,27 +182,26 @@ func TestLogger_ParseLevel_Good(t *testing.T) {
}
func TestLogger_GlobalLogger_Good(t *testing.T) {
var buf bytes.Buffer
var outputBuffer bytes.Buffer
logger := New(Config{
Output: &buf,
Output: &outputBuffer,
Level: LevelInfo,
})
SetGlobal(logger)
Info("global test")
if !strings.Contains(buf.String(), "global test") {
if !strings.Contains(outputBuffer.String(), "global test") {
t.Error("Global logger should write message")
}
buf.Reset()
outputBuffer.Reset()
SetGlobalLevel(LevelError)
Info("should not appear")
if buf.Len() > 0 {
if outputBuffer.Len() > 0 {
t.Error("Info should not appear at Error level")
}
// Reset to default for other tests
SetGlobal(New(DefaultConfig()))
}
@ -234,7 +225,6 @@ func TestLogger_LevelString_Good(t *testing.T) {
}
func TestLogger_MergeFields_Good(t *testing.T) {
// Empty fields
result := mergeFields(nil)
if result != nil {
t.Error("nil input should return nil")
@ -245,13 +235,11 @@ func TestLogger_MergeFields_Good(t *testing.T) {
t.Error("empty input should return nil")
}
// Single fields
result = mergeFields([]Fields{{"key": "value"}})
if result["key"] != "value" {
t.Error("Single field should be preserved")
}
// Multiple fields
result = mergeFields([]Fields{
{"key1": "value1"},
{"key2": "value2"},
@ -260,7 +248,6 @@ func TestLogger_MergeFields_Good(t *testing.T) {
t.Error("Multiple fields should be merged")
}
// Override
result = mergeFields([]Fields{
{"key": "value1"},
{"key": "value2"},

View file

@ -29,13 +29,13 @@ const (
// miningError := &MiningError{Code: ErrCodeStartFailed, Message: "failed to start xmrig", HTTPStatus: 500}
// miningError.WithCause(err).WithSuggestion("check logs")
type MiningError struct {
Code string // Machine-readable error code
Message string // Human-readable message
Details string // Technical details (for debugging)
Suggestion string // What to do next
Retryable bool // Can the client retry?
HTTPStatus int // HTTP status code to return
Cause error // Underlying error
Code string
Message string
Details string
Suggestion string
Retryable bool
HTTPStatus int
Cause error
}
// e.Error() // "START_FAILED: failed to start miner 'xmrig' (exec: not found)"

View file

@ -72,8 +72,6 @@ func TestGetChainInfo_Good(t *testing.T) {
}))
defer server.Close()
// info, err := node.GetChainInfo(server.URL)
// if info.Synced { log("height:", info.Height) }
info, err := GetChainInfo(server.URL)
if err != nil {
t.Fatalf("GetChainInfo returned unexpected error: %v", err)
@ -103,7 +101,6 @@ func TestGetChainInfo_Ugly(t *testing.T) {
}))
defer server.Close()
// info, err := node.GetChainInfo(server.URL) — malformed response triggers unmarshal error
_, err := GetChainInfo(server.URL)
if err == nil {
t.Error("expected error when daemon returns malformed JSON")
@ -117,8 +114,6 @@ func TestDiscoverPools_Good(t *testing.T) {
}))
defer server.Close()
// pools := node.DiscoverPools(server.URL)
// for _, pool := range pools { log(pool.Name, pool.Endpoint) }
pools := DiscoverPools(server.URL)
if len(pools) != 1 {
t.Fatalf("expected 1 pool, got %d", len(pools))
@ -138,12 +133,10 @@ func TestDiscoverPools_Bad(t *testing.T) {
func TestDiscoverPools_Ugly(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// Aliases present but none contain "pool" in comment — result must be empty
w.Write([]byte(`{"result":{"aliases":[{"alias":"gw.lthn","address":"LTHNxyz","comment":"v=lthn1;type=gateway;cap=vpn"}]}}`))
}))
defer server.Close()
// DiscoverPools filters by comment containing "pool"; gateway alias must be skipped
pools := DiscoverPools(server.URL)
if len(pools) != 0 {
t.Errorf("expected 0 pools when no alias comment contains 'pool', got %d", len(pools))
@ -157,8 +150,6 @@ func TestDiscoverGateways_Good(t *testing.T) {
}))
defer server.Close()
// gateways := node.DiscoverGateways(server.URL)
// for _, gw := range gateways { log(gw.Name, gw.Endpoint) }
gateways := DiscoverGateways(server.URL)
if len(gateways) != 1 {
t.Fatalf("expected 1 gateway, got %d", len(gateways))
@ -178,12 +169,10 @@ func TestDiscoverGateways_Bad(t *testing.T) {
func TestDiscoverGateways_Ugly(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// Alias present but comment lacks "type=gateway" — must be filtered out
w.Write([]byte(`{"result":{"aliases":[{"alias":"pool.lthn","address":"LTHNabc","comment":"v=lthn1;type=pool;cap=pool"}]}}`))
}))
defer server.Close()
// DiscoverGateways filters by comment containing "type=gateway"; pool alias must be skipped
gateways := DiscoverGateways(server.URL)
if len(gateways) != 0 {
t.Errorf("expected 0 gateways when no alias comment contains 'type=gateway', got %d", len(gateways))