ax(batch): replace prose comments with usage examples across all packages

Applies AX principle 2 (Comments as Usage Examples) — removes prose
descriptions that restate the function signature ("returns", "retrieves",
"creates", "wraps", etc.) and keeps or replaces with concrete usage
examples showing real calls with realistic values.

Co-Authored-By: Charon <charon@lethean.io>
This commit is contained in:
Claude 2026-04-02 18:28:16 +01:00
parent b12db10680
commit 0d1b20e177
No known key found for this signature in database
GPG key ID: AF404715446AEB41
7 changed files with 14 additions and 22 deletions

View file

@ -11,7 +11,6 @@ import (
_ "github.com/mattn/go-sqlite3"
)
// databaseError wraps an error with a scope message for consistent error reporting.
// databaseError("open database", err) // => "database: open database: <cause>"
func databaseError(scope string, cause error) error {
if cause == nil {
@ -33,7 +32,6 @@ func (e *databaseErr) Unwrap() error {
return e.cause
}
// globalDatabase is the global database instance
// db := globalDatabase // check before use; nil means not initialised
var (
globalDatabase *sql.DB
@ -65,7 +63,6 @@ func defaultDBPath() (string, error) {
return filepath.Join(dataDir, "mining.db"), nil
}
// Initialize opens the SQLite database and creates all required tables.
// database.Initialize(database.Config{Enabled: true, Path: "/data/mining.db", RetentionDays: 30})
func Initialize(config Config) error {
databaseMutex.Lock()
@ -121,7 +118,6 @@ func Close() error {
return err
}
// isInitialized reports whether the global database connection is open.
// if isInitialized() { database.Cleanup(30) }
func isInitialized() bool {
databaseMutex.RLock()

View file

@ -51,8 +51,7 @@ type HashratePoint struct {
// ctx, cancel := context.WithTimeout(ctx, dbInsertTimeout) // 5s ceiling for INSERT
const dbInsertTimeout = 5 * time.Second
// InsertHashratePoint stores a hashrate measurement in the database.
// If ctx is nil, a default timeout context will be used.
// database.InsertHashratePoint(ctx, "xmrig", "xmrig", HashratePoint{Timestamp: time.Now(), Hashrate: 1234}, ResolutionHigh)
func InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
databaseMutex.RLock()
defer databaseMutex.RUnlock()
@ -76,7 +75,6 @@ func InsertHashratePoint(ctx context.Context, minerName, minerType string, point
return err
}
// GetHashrateHistory retrieves hashrate history for a miner within a time range.
// points, err := database.GetHashrateHistory("xmrig", database.ResolutionHigh, time.Now().Add(-time.Hour), time.Now())
func GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
databaseMutex.RLock()
@ -112,7 +110,6 @@ func GetHashrateHistory(minerName string, resolution Resolution, since, until ti
return points, rows.Err()
}
// HashrateStats holds aggregated performance data for a single miner.
// stats, err := database.GetHashrateStats("xmrig")
// if stats != nil { logging.Info("stats", logging.Fields{"average": stats.AverageRate}) }
type HashrateStats struct {
@ -125,7 +122,6 @@ type HashrateStats struct {
LastSeen time.Time `json:"lastSeen"`
}
// GetHashrateStats retrieves aggregated statistics for a specific miner.
// stats, err := database.GetHashrateStats("xmrig")
// if stats != nil { logging.Info("stats", logging.Fields{"miner": minerName, "average": stats.AverageRate}) }
func GetHashrateStats(minerName string) (*HashrateStats, error) {
@ -185,7 +181,6 @@ func GetHashrateStats(minerName string) (*HashrateStats, error) {
return &stats, nil
}
// GetAllMinerStats returns aggregated performance data for every known miner.
// allStats, err := database.GetAllMinerStats()
// for _, stats := range allStats { logging.Info("stats", logging.Fields{"miner": stats.MinerName, "average": stats.AverageRate}) }
func GetAllMinerStats() ([]HashrateStats, error) {

View file

@ -1,4 +1,7 @@
// Package logging provides structured logging with log levels and fields.
// logger := logging.New(logging.Config{Level: logging.LevelDebug, Component: "mining"})
// logger.Info("started", logging.Fields{"miner": "xmrig"})
// logging.SetGlobal(logger)
// logging.Info("global log", logging.Fields{"key": "value"})
package logging
import (
@ -62,7 +65,6 @@ func DefaultConfig() Config {
}
}
// New creates a Logger with the given configuration.
// logger := logging.New(logging.Config{Output: os.Stderr, Level: logging.LevelInfo, Component: "mining"})
func New(config Config) *Logger {
if config.Output == nil {
@ -75,7 +77,6 @@ func New(config Config) *Logger {
}
}
// WithComponent returns a child logger scoped to a sub-system.
// child := logger.WithComponent("xmrig")
// child.Info("miner started")
func (logger *Logger) WithComponent(component string) *Logger {
@ -86,7 +87,6 @@ func (logger *Logger) WithComponent(component string) *Logger {
}
}
// SetLevel adjusts the minimum level for subsequent log calls.
// logger.SetLevel(logging.LevelDebug)
func (logger *Logger) SetLevel(level Level) {
logger.mutex.Lock()
@ -94,7 +94,6 @@ func (logger *Logger) SetLevel(level Level) {
logger.level = level
}
// GetLevel returns the current minimum log level.
// current := logger.GetLevel()
// if current == logging.LevelDebug { logger.SetLevel(logging.LevelInfo) }
func (logger *Logger) GetLevel() Level {

View file

@ -584,7 +584,7 @@ func (m *Manager) startStatsCollection() {
}()
}
// statsCollectionTimeout is the maximum time to wait for stats from a single miner.
// ctx, cancel := context.WithTimeout(ctx, statsCollectionTimeout)
const statsCollectionTimeout = 5 * time.Second
// m.collectMinerStats() // called by startStatsCollection ticker; gathers stats from all active miners in parallel
@ -630,10 +630,10 @@ func (m *Manager) collectMinerStats() {
waitGroup.Wait()
}
// statsRetryCount is the number of retries for transient stats failures.
// for attempt := 0; attempt <= statsRetryCount; attempt++ { ... }
const statsRetryCount = 2
// statsRetryDelay is the delay between stats collection retries.
// time.Sleep(statsRetryDelay) // between retry attempts
const statsRetryDelay = 500 * time.Millisecond
// m.collectSingleMinerStats(miner, "xmrig", time.Now(), true) // retries up to statsRetryCount times; persists to DB if databaseEnabled

View file

@ -39,7 +39,7 @@ func NewLogBuffer(maxLines int) *LogBuffer {
}
}
// maxLineLength is the maximum length of a single log line to prevent memory bloat.
// if len(line) > maxLineLength { line = line[:maxLineLength] + "... [truncated]" }
const maxLineLength = 2000
// cmd.Stdout = lb // satisfies io.Writer; timestamps and ring-buffers each line
@ -197,7 +197,7 @@ func (b *BaseMiner) Stop() error {
return nil
}
// stdinWriteTimeout is the maximum time to wait for stdin write to complete.
// case <-time.After(stdinWriteTimeout): return ErrTimeout("stdin write")
const stdinWriteTimeout = 5 * time.Second
// if err := miner.WriteStdin("h"); err != nil { /* miner not running */ }

View file

@ -5,7 +5,7 @@ import (
"time"
)
// statsTimeout is the timeout for stats HTTP requests (shorter than general timeout)
// reqCtx, cancel := context.WithTimeout(ctx, statsTimeout)
const statsTimeout = 5 * time.Second
// metrics, err := miner.GetStats(ctx)

View file

@ -1,4 +1,6 @@
// Package node provides P2P node identity and communication for multi-node mining management.
// manager, _ := node.NewNodeManager()
// manager.GenerateIdentity("my-worker", node.RoleWorker)
// identity := manager.GetIdentity()
package node
import (