Compare commits

..

1 commit

Author SHA1 Message Date
google-labs-jules[bot]
c0775d9d8b Implement STIM bundle decryption and installation
- Updated `Worker.handleDeploy` to handle STIM bundles using `ExtractProfileBundle` and `ExtractMinerBundle`.
- Used `PeerConnection.SharedSecret` as the password for decryption.
- Implemented logic for `BundleProfile`, `BundleMiner`, and `BundleFull`.
- Fixed broken files `pkg/node/dispatcher.go` and `pkg/node/peer.go` to ensure compilation and testing.
- Updated tests in `pkg/node/worker_test.go` and added coverage for deployment logic.
2026-01-06 21:58:30 +00:00
142 changed files with 10743 additions and 16531 deletions

View file

@ -1,12 +0,0 @@
name: Security Scan
on:
push:
branches: [main, dev, 'feat/*']
pull_request:
branches: [main]
jobs:
security:
uses: core/go-devops/.forgejo/workflows/security-scan.yml@main
secrets: inherit

View file

@ -1,14 +0,0 @@
name: Test
on:
push:
branches: [main, dev]
pull_request:
branches: [main]
jobs:
test:
uses: core/go-devops/.forgejo/workflows/go-test.yml@main
with:
race: true
coverage: true

View file

@ -1,251 +0,0 @@
name: Miner Release
on:
push:
tags:
- 'miner-v*'
workflow_dispatch:
inputs:
version:
description: 'Version tag (e.g., 0.1.0)'
required: true
default: '0.1.0'
env:
BUILD_TYPE: Release
jobs:
build-linux:
name: Linux ${{ matrix.arch }}
runs-on: ubuntu-latest
strategy:
matrix:
include:
- arch: x64
cmake_arch: x86_64
- arch: arm64
cmake_arch: aarch64
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y \
build-essential \
cmake \
libuv1-dev \
libssl-dev \
libhwloc-dev \
git
- name: Build miner core
working-directory: miner/core
run: |
mkdir -p build && cd build
cmake .. \
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \
-DWITH_OPENCL=OFF \
-DWITH_CUDA=OFF
cmake --build . --config $BUILD_TYPE -j$(nproc)
- name: Build miner proxy
working-directory: miner/proxy
run: |
mkdir -p build && cd build
cmake .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE
cmake --build . --config $BUILD_TYPE -j$(nproc)
- name: Package binaries
run: |
mkdir -p dist
cp miner/core/build/miner dist/
cp miner/proxy/build/miner-proxy dist/
chmod +x dist/*
cd dist
tar -czvf ../miner-linux-${{ matrix.arch }}.tar.gz *
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: miner-linux-${{ matrix.arch }}
path: miner-linux-${{ matrix.arch }}.tar.gz
build-macos:
name: macOS ${{ matrix.arch }}
runs-on: macos-latest
strategy:
matrix:
include:
- arch: x64
cmake_osx_arch: x86_64
- arch: arm64
cmake_osx_arch: arm64
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install dependencies
run: |
brew install cmake libuv openssl hwloc
- name: Build miner core
working-directory: miner/core
env:
OSX_ARCH: ${{ matrix.cmake_osx_arch }}
run: |
mkdir -p build && cd build
cmake .. \
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \
-DCMAKE_OSX_ARCHITECTURES=$OSX_ARCH \
-DWITH_OPENCL=OFF \
-DWITH_CUDA=OFF \
-DOPENSSL_ROOT_DIR=$(brew --prefix openssl)
cmake --build . --config $BUILD_TYPE -j$(sysctl -n hw.ncpu)
- name: Build miner proxy
working-directory: miner/proxy
env:
OSX_ARCH: ${{ matrix.cmake_osx_arch }}
run: |
mkdir -p build && cd build
cmake .. \
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \
-DCMAKE_OSX_ARCHITECTURES=$OSX_ARCH \
-DOPENSSL_ROOT_DIR=$(brew --prefix openssl)
cmake --build . --config $BUILD_TYPE -j$(sysctl -n hw.ncpu)
- name: Package binaries
run: |
mkdir -p dist
cp miner/core/build/miner dist/
cp miner/proxy/build/miner-proxy dist/
chmod +x dist/*
cd dist
tar -czvf ../miner-macos-${{ matrix.arch }}.tar.gz *
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: miner-macos-${{ matrix.arch }}
path: miner-macos-${{ matrix.arch }}.tar.gz
build-windows:
name: Windows x64
runs-on: windows-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup MSVC
uses: microsoft/setup-msbuild@v2
- name: Install dependencies
run: |
vcpkg install libuv:x64-windows openssl:x64-windows
- name: Build miner core
working-directory: miner/core
run: |
mkdir build
cd build
cmake .. `
-DCMAKE_BUILD_TYPE=$env:BUILD_TYPE `
-DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake" `
-DWITH_OPENCL=OFF `
-DWITH_CUDA=OFF
cmake --build . --config $env:BUILD_TYPE
- name: Build miner proxy
working-directory: miner/proxy
run: |
mkdir build
cd build
cmake .. `
-DCMAKE_BUILD_TYPE=$env:BUILD_TYPE `
-DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake"
cmake --build . --config $env:BUILD_TYPE
- name: Package binaries
run: |
mkdir dist
Copy-Item miner/core/build/$env:BUILD_TYPE/miner.exe dist/
Copy-Item miner/proxy/build/$env:BUILD_TYPE/miner-proxy.exe dist/
Compress-Archive -Path dist/* -DestinationPath miner-windows-x64.zip
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: miner-windows-x64
path: miner-windows-x64.zip
release:
name: Create Release
needs: [build-linux, build-macos, build-windows]
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: artifacts
- name: List artifacts
run: ls -la artifacts/*/
- name: Create Release
uses: softprops/action-gh-release@v1
with:
draft: false
prerelease: false
files: |
artifacts/miner-linux-x64/miner-linux-x64.tar.gz
artifacts/miner-linux-arm64/miner-linux-arm64.tar.gz
artifacts/miner-macos-x64/miner-macos-x64.tar.gz
artifacts/miner-macos-arm64/miner-macos-arm64.tar.gz
artifacts/miner-windows-x64/miner-windows-x64.zip
body: |
## Miner Suite
### Downloads
| Platform | Architecture | Download |
|----------|--------------|----------|
| Linux | x64 | `miner-linux-x64.tar.gz` |
| Linux | ARM64 | `miner-linux-arm64.tar.gz` |
| macOS | Intel | `miner-macos-x64.tar.gz` |
| macOS | Apple Silicon | `miner-macos-arm64.tar.gz` |
| Windows | x64 | `miner-windows-x64.zip` |
### Included Binaries
- `miner` - CPU/GPU cryptocurrency miner
- `miner-proxy` - Stratum proxy for mining farms
### Quick Start
```bash
# Extract
tar -xzf miner-linux-x64.tar.gz
# Run miner
./miner -o pool.example.com:3333 -u YOUR_WALLET -p x
# Run proxy
./miner-proxy -o pool.example.com:3333 -u YOUR_WALLET -b 0.0.0.0:3333
```
See [miner/README.md](miner/README.md) for full documentation.
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -1,34 +0,0 @@
# Repository Guidelines
## AX Standard
This repository is being upgraded toward the Core Agent Experience standard defined in `/home/claude/Code/core/plans/rfc/core/RFC-CORE-008-AGENT-EXPERIENCE.md`.
Apply these rules by default:
- Prefer explicit names over short names. Use `manager`, `service`, `config`, and `request` instead of repo-local abbreviations like `mgr`, `svc`, and `cfg`.
- Write comments as concrete usage examples with realistic values.
- Keep paths and filenames descriptive so intent is obvious before opening a file.
- Preserve one-way dependency flow: foundational packages in `pkg/` should not depend on CLI wrappers in `cmd/`.
- Make repeated shapes predictable. Reuse existing config structs, route patterns, and test helpers instead of inventing parallel forms.
## Go Project Shape
- `cmd/mining/` contains Cobra CLI entrypoints.
- `pkg/mining/` contains miner orchestration, REST API, profiles, auth, and service container logic.
- `pkg/node/` contains peer, protocol, transport, and worker logic.
- `pkg/database/` contains persistence and hashrate storage.
- `cmd/desktop/mining-desktop/` contains the Wails desktop app wrapper.
## Working Rules
- Keep changes behavioral-safe unless the task explicitly asks for feature work.
- Prefer focused AX upgrades in high-traffic files such as `pkg/mining/service.go`, `pkg/mining/manager.go`, and `cmd/mining/cmd/*.go`.
- Run `gofmt` on edited Go files.
- Validate targeted changes first with package-level tests before broader runs.
## Verification
- `go test ./pkg/mining/...`
- `go test ./cmd/mining/...`
- `make test-go`

View file

@ -1,127 +0,0 @@
# Code Complexity and Maintainability Audit
This document analyzes the code quality of the codebase, identifies maintainability issues, and provides recommendations for improvement. The audit focuses on cyclomatic and cognitive complexity, code duplication, and other maintainability metrics.
## 1. God Class: `Manager`
### Finding
The `Manager` struct in `pkg/mining/manager.go` is a "God Class" that violates the Single Responsibility Principle. It handles multiple, unrelated responsibilities, including:
- Miner lifecycle management (`StartMiner`, `StopMiner`)
- Configuration management (`syncMinersConfig`, `updateMinerConfig`)
- Database interactions (`initDatabase`, `startDBCleanup`)
- Statistics collection (`startStatsCollection`, `collectMinerStats`)
This centralization of concerns makes the `Manager` class difficult to understand, test, and maintain. The presence of multiple mutexes (`mu`, `eventHubMu`) to prevent deadlocks is a clear indicator of its high cognitive complexity.
### Recommendation
Refactor the `Manager` class into smaller, more focused components, each with a single responsibility.
- **`MinerRegistry`**: Manages the lifecycle of miner instances.
- **`StatsCollector`**: Gathers and aggregates statistics from miners.
- **`ConfigService`**: Handles loading, saving, and updating miner configurations.
- **`DBManager`**: Manages all database-related operations.
This separation of concerns will improve modularity, reduce complexity, and make the system easier to reason about and test.
## 2. Code Duplication: Miner Installation
### Finding
The `Install` and `CheckInstallation` methods in `pkg/mining/xmrig.go` and `pkg/mining/ttminer.go` contain nearly identical logic for downloading, extracting, and verifying miner installations. This copy-paste pattern violates the DRY (Don't Repeat Yourself) principle and creates a significant maintenance burden. Any change to the installation process must be manually duplicated across all miner implementations.
### Recommendation
Refactor the duplicated logic into the `BaseMiner` struct using the **Template Method Pattern**. The base struct will define the skeleton of the installation algorithm, while subclasses will override specific steps (like providing the download URL format) that vary between miners.
#### Example
The `BaseMiner` can provide a generic `Install` method that relies on a new, unexported method, `getDownloadURL`, which each miner implementation must provide.
**`pkg/mining/miner.go` (BaseMiner)**
```go
// Install orchestrates the download and extraction process.
func (b *BaseMiner) Install() error {
version, err := b.GetLatestVersion()
if err != nil {
return err
}
b.Version = version
url, err := b.getDownloadURL(version)
if err != nil {
return err
}
return b.InstallFromURL(url)
}
// getDownloadURL is a template method to be implemented by subclasses.
func (b *BaseMiner) getDownloadURL(version string) (string, error) {
// This will be overridden by specific miner types
return "", errors.New("getDownloadURL not implemented")
}
```
**`pkg/mining/xmrig.go` (XMRigMiner)**
```go
// getDownloadURL implements the template method for XMRig.
func (m *XMRigMiner) getDownloadURL(version string) (string, error) {
v := strings.TrimPrefix(version, "v")
switch runtime.GOOS {
case "windows":
return fmt.Sprintf("https://.../xmrig-%s-windows-x64.zip", v), nil
case "linux":
return fmt.Sprintf("https://.../xmrig-%s-linux-static-x64.tar.gz", v), nil
default:
return "", errors.New("unsupported OS")
}
}
```
## 3. Long and Complex Methods
### Finding
Several methods in the codebase are overly long and have high cognitive complexity, making them difficult to read, understand, and maintain.
- **`manager.StartMiner`**: This method is responsible for creating, configuring, and starting a miner. It mixes validation, port finding, instance name generation, and state management, making it hard to follow.
- **`manager.collectMinerStats`**: This function orchestrates the parallel collection of stats, but the logic for handling timeouts, retries, and database persistence is deeply nested.
- **`miner.ReduceHashrateHistory`**: The logic for aggregating high-resolution hashrate data into a low-resolution format is convoluted and hard to reason about.
### Recommendation
Apply the **Extract Method** refactoring to break down these long methods into smaller, well-named functions, each with a single, clear purpose.
#### Example: Refactoring `manager.StartMiner`
The `StartMiner` method could be refactored into several smaller helper functions.
**`pkg/mining/manager.go` (Original `StartMiner`)**
```go
func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
// ... (20+ lines of setup, validation, port finding)
// ... (10+ lines of miner-specific configuration)
// ... (10+ lines of starting and saving logic)
}
```
**`pkg/mining/manager.go` (Refactored `StartMiner`)**
```go
func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
instanceName, err := m.generateInstanceName(minerType, config)
if err != nil {
return nil, err
}
miner, err := m.configureMiner(minerType, instanceName, config)
if err != nil {
return nil, err
}
if err := m.launchAndRegisterMiner(miner, config); err != nil {
return nil, err
}
return miner, nil
}
```

View file

@ -1,60 +0,0 @@
# Concurrency and Race Condition Audit
## 1. Executive Summary
This audit examined the concurrency safety of the mining operations within the `pkg/mining` package. The assessment involved a combination of automated race detection using `go test -race` and a manual code review of the key components responsible for managing miner lifecycles and statistics collection.
**The primary finding is that the core concurrency logic is well-designed and appears to be free of race conditions.** The code demonstrates a strong understanding of Go's concurrency patterns, with proper use of mutexes to protect shared state.
The most significant risk identified is the **lack of complete test coverage** for code paths that interact with live miner processes. This limitation prevented the Go race detector from analyzing these sections, leaving a gap in the automated verification.
## 2. Methodology
The audit was conducted in two phases:
1. **Automated Race Detection**: The test suite for the `pkg/mining` package was executed with the `-race` flag enabled (`go test -race ./pkg/mining/...`). This tool instrumented the code to detect and report any data races that occurred during the execution of the tests.
2. **Manual Code Review**: A thorough manual inspection of the source code was performed, focusing on `manager.go`, `miner.go`, and the `xmrig` and `ttminer` implementations. The review prioritized areas with shared mutable state, goroutine management, and I/O operations.
## 3. Findings
### 3.1. Automated Race Detection (`go test -race`)
The Go race detector **did not report any race conditions** in the code paths that were executed by the test suite. This provides a good level of confidence in the concurrency safety of the `Manager`'s core logic for adding, removing, and listing miners, as these operations are well-covered by the existing tests.
However, a number of tests related to live miner interaction (e.g., `TestCPUThrottleSingleMiner`) were skipped because they require the `xmrig` binary to be present in the test environment. As a result, the race detector could not analyze the code executed in these tests.
### 3.2. Manual Code Review
The manual review confirmed the findings of the race detector and extended the analysis to the code paths that were not covered by the tests.
#### 3.2.1. `Manager` (`manager.go`)
* **Shared State**: The `miners` map is the primary shared resource.
* **Protection**: A `sync.RWMutex` is used to protect all access to the `miners` map.
* **Analysis**: The `collectMinerStats` function is the most critical concurrent operation. It correctly uses a read lock to create a snapshot of the active miners and then releases the lock before launching concurrent goroutines to collect stats from each miner. This is a robust pattern that minimizes lock contention and delegates thread safety to the individual `Miner` implementations. All other methods on the `Manager` use the mutex correctly.
#### 3.2.2. `BaseMiner` (`miner.go`)
* **Shared State**: The `BaseMiner` struct contains several fields that are accessed and modified concurrently, including `Running`, `cmd`, and `HashrateHistory`.
* **Protection**: A `sync.RWMutex` is used to protect all shared fields.
* **Analysis**: Methods like `Stop`, `AddHashratePoint`, and `ReduceHashrateHistory` correctly acquire and release the mutex. The locking is fine-grained and properly scoped.
#### 3.2.3. `XMRigMiner` and `TTMiner`
* **`GetStats` Method**: This is the most important method for concurrency in the miner implementations. Both `XMRigMiner` and `TTMiner` follow an excellent pattern:
1. Acquire a read lock to safely read the API configuration.
2. Release the lock *before* making the blocking HTTP request.
3. After the request completes, acquire a write lock to update the `FullStats` field.
This prevents holding a lock during a potentially long I/O operation, which is a common cause of performance bottlenecks and deadlocks.
* **`Start` Method**: Both implementations launch a goroutine to wait for the miner process to exit. This goroutine correctly captures a local copy of the `exec.Cmd` pointer. When updating the `Running` and `cmd` fields after the process exits, it checks if the current `m.cmd` is still the same as the one it was started with. This correctly handles the case where a miner might be stopped and restarted quickly, preventing the old goroutine from incorrectly modifying the state of the new process.
## 4. Conclusion and Recommendations
The mining operations in this codebase are implemented with a high degree of concurrency safety. The use of mutexes is consistent and correct, and the patterns used for handling I/O in concurrent contexts are exemplary.
The primary recommendation is to **improve the test coverage** to allow the Go race detector to provide a more complete analysis.
* **Recommendation 1 (High Priority)**: Modify the test suite to use a mock or simulated miner process. The existing tests already use a dummy script for some installation checks. This could be extended to create a mock HTTP server that simulates the miner's API. This would allow the skipped tests to run, enabling the race detector to analyze the `GetStats` methods and other live interaction code paths.
* **Recommendation 2 (Low Priority)**: The `httpClient` in `xmrig.go` is a global variable protected by a mutex. While the default `http.Client` is thread-safe, and the mutex provides protection for testing, it would be slightly cleaner to make the HTTP client a field on the `XMRigMiner` struct. This would avoid the global state and make the dependencies of the miner more explicit. However, this is a minor architectural point and not a critical concurrency issue.
Overall, the risk of race conditions in the current codebase is low, but shoring up the test suite would provide even greater confidence in its robustness.

View file

@ -1,72 +0,0 @@
# Documentation Audit Report
## README Assessment
| Category | Status | Notes |
|---|---|---|
| **Project Description** | ✅ Pass | The README provides a clear and concise description of the project's purpose. |
| **Quick Start** | ✅ Pass | The "Quick Start" section is excellent, offering a Docker command for immediate setup. |
| **Installation** | ✅ Pass | Multiple installation methods are documented (Docker, CLI, source). |
| **Configuration** | ✅ Pass | Configuration is explained with a clear example of a JSON profile. |
| **Examples** | ✅ Pass | The README includes usage examples for Docker, the CLI, and the web component. |
| **Badges** | ✅ Pass | A comprehensive set of badges is present, covering build status, coverage, and versioning. |
**Overall:** The `README.md` is comprehensive and user-friendly.
## Code Documentation
| Category | Status | Notes |
|---|---|---|
| **Function Docs** | ✅ Pass | Public APIs are well-documented with clear explanations. |
| **Parameter Types** | ✅ Pass | Go's static typing ensures parameter types are documented. |
| **Return Values** | ✅ Pass | Return values are documented in the function comments. |
| **Examples** | ❌ Fail | There are no runnable examples in the Go docstrings. |
| **Outdated Docs** | ✅ Pass | The documentation appears to be up-to-date with the code. |
**Overall:** The code is well-documented, but could be improved by adding runnable examples in the docstrings, which would be automatically included in the GoDoc.
## Architecture Documentation
| Category | Status | Notes |
|---|---|---|
| **System Overview** | ✅ Pass | `docs/ARCHITECTURE.md` provides a high-level overview of the system. |
| **Data Flow** | ✅ Pass | The architecture document includes a sequence diagram illustrating data flow. |
| **Component Diagram** | ✅ Pass | A Mermaid diagram visually represents the system's components. |
| **Decision Records** | ❌ Fail | There are no Architecture Decision Records (ADRs) present. |
**Overall:** The architecture is well-documented, but would benefit from ADRs to track key decisions.
## Developer Documentation
| Category | Status | Notes |
|---|---|---|
| **Contributing Guide** | ✅ Pass | The `README.md` and `docs/DEVELOPMENT.md` provide clear contribution instructions. |
| **Development Setup** | ✅ Pass | Prerequisites and setup steps are documented. |
| **Testing Guide** | ✅ Pass | The `docs/DEVELOPMENT.md` file explains how to run tests. |
| **Code Style** | 🟠 Partial | A formal code style guide is missing, but `make lint` and `make fmt` are provided. |
**Overall:** Developer documentation is good, but a formal style guide would be a useful addition.
## User Documentation
| Category | Status | Notes |
|---|---|---|
| **User Guide** | ✅ Pass | The MkDocs site serves as a comprehensive user guide. |
| **FAQ** | ❌ Fail | A dedicated FAQ section is missing. |
| **Troubleshooting** | ✅ Pass | A troubleshooting guide is available in the documentation. |
| **Changelog** | ✅ Pass | `CHANGELOG.md` is present and well-maintained. |
**Overall:** User documentation is strong, but could be improved with a FAQ section.
## Summary of Documentation Gaps
The following documentation gaps have been identified:
- **Code Documentation:**
- Add runnable examples to Go docstrings to improve GoDoc.
- **Architecture Documentation:**
- Introduce Architecture Decision Records (ADRs) to document key architectural decisions.
- **Developer Documentation:**
- Create a formal code style guide to ensure consistency.
- **User Documentation:**
- Add a Frequently Asked Questions (FAQ) section to the user guide.

View file

@ -1,49 +0,0 @@
# Error Handling and Logging Audit
## 1. Error Handling
### Exception Handling & Error Recovery
- **Graceful Degradation**: The application demonstrates graceful degradation in `pkg/mining/service.go`, where the `NewService` function continues to operate with a minimal in-memory profile manager if the primary one fails to initialize. This ensures core functionality remains available.
- **Inconsistent Top-Level Handling**: Error handling at the application's entry points is inconsistent.
- In `cmd/desktop/mining-desktop/main.go`, errors from `fs.Sub` and `app.Run` are handled with `log.Fatal`, which abruptly terminates the application without using the project's structured logger.
- In `cmd/mining/main.go`, errors from `cmd.Execute` are printed to `stderr` with `fmt.Fprintf`, and the application exits with a status code of 1. This is a standard CLI pattern but bypasses the custom logging framework.
- **No Retry or Circuit Breaker Patterns**: The codebase does not currently implement explicit retry logic with backoff or circuit breaker patterns for handling failures in external dependencies or services. However, the API error response includes a `Retryable` field, which correctly signals to clients when a retry is appropriate (e.g., for `503 Service Unavailable`).
### User-Facing & API Errors
- **Standard API Error Response**: The API service (`pkg/mining/service.go`) excels at providing consistent, user-friendly error responses.
- It uses a well-defined `APIError` struct that includes a machine-readable `code`, a human-readable `message`, and an optional `suggestion` to guide the user.
- The `respondWithError` and `respondWithMiningError` functions centralize error response logic, ensuring all API errors follow a consistent format.
- **Appropriate HTTP Status Codes**: The API correctly maps application errors to standard HTTP status codes (e.g., `404 Not Found` for missing miners, `400 Bad Request` for invalid input, `500 Internal Server Error` for server-side issues).
- **Controlled Information Leakage**: The `sanitizeErrorDetails` function prevents the leakage of sensitive internal error details in production environments (`GIN_MODE=release`), enhancing security. Debug information is only exposed when `DEBUG_ERRors` is enabled.
## 2. Logging
### Log Content and Quality
- **Custom Structured Logger**: The project includes a custom logger in `pkg/logging/logger.go` that supports standard log levels (Debug, Info, Warn, Error) and allows for structured logging by attaching key-value fields.
- **No JSON Output**: The logger's output is a custom string format (`timestamp [LEVEL] [component] message | key=value`), not structured JSON. This makes logs less machine-readable and harder to parse, filter, and analyze with modern log management tools.
- **Good Context in Error Logs**: The existing usage of `logging.Error` throughout the `pkg/mining` module is effective, consistently including relevant context (e.g., `miner`, `panic`, `error`) as structured fields.
- **Request Correlation**: The API service (`pkg/mining/service.go`) implements a `requestIDMiddleware` that assigns a unique `X-Request-ID` to each request, which is then included in logs. This is excellent practice for tracing and debugging.
### What is Not Logged
- **No Sensitive Data**: Based on a review of `logging.Error` usage, the application appears to correctly avoid logging sensitive information such as passwords, tokens, or personally identifiable information (PII).
### Inconsistencies
- **Inconsistent Adoption**: The custom logger is not used consistently across the project. The `main` packages for both the desktop and CLI applications (`cmd/desktop/mining-desktop/main.go` and `cmd/mining/main.go`) use the standard `log` and `fmt` packages for error handling, bypassing the structured logger.
- **No Centralized Configuration**: There is no centralized logger initialization in `main` or `root.go`. The global logger is used with its default configuration (Info level, stderr output), and there is no clear mechanism for configuring the log level or output via command-line flags or a configuration file.
## 3. Recommendations
1. **Adopt Structured JSON Logging**: Modify the logger in `pkg/logging/logger.go` to output logs in JSON format. This will significantly improve the logs' utility by making them machine-readable and compatible with log analysis platforms like Splunk, Datadog, or the ELK stack.
2. **Centralize Logger Configuration**:
* In `cmd/mining/cmd/root.go`, add persistent flags for `--log-level` and `--log-format` (e.g., `text`, `json`).
* In an `init` function, parse these flags and configure the global `logging.Logger` instance accordingly.
* Do the same for the desktop application in `cmd/desktop/mining-desktop/main.go`, potentially reading from a configuration file or environment variables.
3. **Standardize on the Global Logger**:
* Replace all instances of `log.Fatal` in `cmd/desktop/mining-desktop/main.go` with `logging.Error` followed by `os.Exit(1)`.
* Replace `fmt.Fprintf(os.Stderr, ...)` in `cmd/mining/main.go` with a call to `logging.Error`.
4. **Enrich API Error Logs**: In `pkg/mining/service.go`, enhance the `respondWithError` function to log every API error it handles using the structured logger. This will ensure that all error conditions, including client-side errors like bad requests, are recorded for monitoring and analysis. Include the `request_id` in every log entry.
5. **Review Log Levels**: Conduct a codebase-wide review of log levels. Ensure that `Debug` is used for verbose, development-time information, `Info` for significant operational events, `Warn` for recoverable issues, and `Error` for critical, action-required failures.

View file

@ -1,204 +0,0 @@
# Security Audit: Input Validation
This document outlines the findings of a security audit focused on input validation and sanitization within the mining application.
## Input Entry Points Inventory
### API Endpoints
The primary entry points for untrusted input are the API handlers defined in `pkg/mining/service.go`. The following handlers process user-controllable data from URL path parameters, query strings, and request bodies:
- **System & Miner Management:**
- `POST /miners/:miner_name/install`: `miner_name` from path.
- `DELETE /miners/:miner_name/uninstall`: `miner_name` from path.
- `DELETE /miners/:miner_name`: `miner_name` from path.
- `POST /miners/:miner_name/stdin`: `miner_name` from path and JSON body (`input`).
- **Statistics & History:**
- `GET /miners/:miner_name/stats`: `miner_name` from path.
- `GET /miners/:miner_name/hashrate-history`: `miner_name` from path.
- `GET /miners/:miner_name/logs`: `miner_name` from path.
- `GET /history/miners/:miner_name`: `miner_name` from path.
- `GET /history/miners/:miner_name/hashrate`: `miner_name` from path, `since` and `until` from query.
- **Profiles:**
- `POST /profiles`: JSON body (`MiningProfile`).
- `GET /profiles/:id`: `id` from path.
- `PUT /profiles/:id`: `id` from path and JSON body (`MiningProfile`).
- `DELETE /profiles/:id`: `id` from path.
- `POST /profiles/:id/start`: `id` from path.
### WebSocket Events
The WebSocket endpoint provides another significant entry point for untrusted input:
- **`GET /ws/events`**: Establishes a WebSocket connection. While the primary flow is server-to-client, the initial handshake and any client-to-server messages must be considered untrusted input. The `wsUpgrader` in `pkg/mining/service.go` has an origin check, which is a good security measure.
## Validation Gaps Found
The `Config.Validate()` method in `pkg/mining/mining.go` provides a solid baseline for input validation but has several gaps:
### Strengths
- **Core Fields Validated**: The most critical fields for command-line construction (`Pool`, `Wallet`, `Algo`, `CLIArgs`) have validation checks.
- **Denylist for Shell Characters**: The `containsShellChars` function attempts to block a wide range of characters that could be used for shell injection.
- **Range Checks**: Numeric fields like `Threads`, `Intensity`, and `DonateLevel` are correctly checked to ensure they fall within a sane range.
- **Allowlist for Algorithm**: The `isValidAlgo` function uses a strict allowlist for the `Algo` field, which is a security best practice.
### Weaknesses and Gaps
- **Incomplete Field Coverage**: A significant number of fields in the `Config` struct are not validated at all. An attacker could potentially abuse these fields if they are used in command-line arguments or other sensitive operations in the future. Unvalidated fields include:
- `Coin`
- `Password`
- `UserPass`
- `Proxy`
- `RigID`
- `LogFile` (potential for path traversal)
- `CPUAffinity`
- `Devices`
- Many others.
- **Denylist Approach**: The primary validation mechanism, `containsShellChars`, relies on a denylist of dangerous characters. This approach is inherently brittle because it is impossible to foresee all possible malicious inputs. A determined attacker might find ways to bypass the filter using alternative encodings or unlisted characters. An allowlist approach, accepting only known-good characters, is much safer.
- **No Path Traversal Protection**: The `LogFile` field is not validated. An attacker could provide a value like `../../../../etc/passwd` to attempt to write files in arbitrary locations on the filesystem.
- **Inconsistent Numeric Validation**: While some numeric fields are validated, others like `Retries`, `RetryPause`, `CPUPriority`, etc., are not checked for negative values or reasonable upper bounds.
## Injection Vectors Discovered
The primary injection vector discovered is through the `Config.CLIArgs` field, which is used to pass additional command-line arguments to the miner executables.
### XMRig Miner (`pkg/mining/xmrig_start.go`)
- **Unused in `xmrig_start.go`**: The `addCliArgs` function in `xmrig_start.go` does not actually use the `CLIArgs` field. It constructs arguments from other validated fields. This is good, but the presence of the field in the `Config` struct is misleading and could be used in the future, creating a vulnerability if not handled carefully.
### TT-Miner (`pkg/mining/ttminer_start.go`)
- **Direct Command Injection via `CLIArgs`**: The `addTTMinerCliArgs` function directly appends the contents of `Config.CLIArgs` to the command-line arguments. Although it uses a denylist-based `isValidCLIArg` function to filter out some dangerous characters, this approach is not foolproof.
- **Vulnerability**: An attacker can bypass the filter by crafting a malicious string that is not on the denylist but is still interpreted by the shell. For example, if a new shell feature or a different shell is used on the system, the denylist may become ineffective.
- **Example**: While the current filter blocks most common injection techniques, an attacker could still pass arguments that might cause unexpected behavior in the miner, such as `--algo some-exploitable-algo`, if the miner itself has vulnerabilities in how it parses certain arguments.
### Path Traversal in Config File Creation
- **Vulnerability**: The `getXMRigConfigPath` function in `xmrig.go` uses the `instanceName` to construct a config file path. The `instanceName` is derived from the user-provided `config.Algo`. While the `instanceNameRegex` in `manager.go` sanitizes the algorithm name, it still allows forward slashes (`/`).
- **Example**: If an attacker provides a crafted `algo` like `../../../../tmp/myconfig`, the `instanceNameRegex` will not sanitize it, and the application could write a config file to an arbitrary location. This could be used to overwrite critical files or place malicious configuration files in sensitive locations.
## Remediation Recommendations
To address the identified vulnerabilities, the following remediation actions are recommended:
### 1. Strengthen `Config.Validate()` with an Allowlist Approach
Instead of relying on a denylist of dangerous characters, the validation should be updated to use a strict allowlist of known-good characters for each field.
**Code Example (`pkg/mining/mining.go`):**
\`\`\`go
// isValidInput checks if a string contains only allowed characters.
// This should be used for fields like Wallet, Password, Pool, etc.
func isValidInput(s string, allowedChars string) bool {
for _, r := range s {
if !strings.ContainsRune(allowedChars, r) {
return false
}
}
return true
}
// In Config.Validate():
func (c *Config) Validate() error {
// Example for Wallet field
if c.Wallet != "" {
// Allow alphanumeric, plus common address characters like '-' and '_'
allowedChars := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"
if !isValidInput(c.Wallet, allowedChars) {
return fmt.Errorf("wallet address contains invalid characters")
}
}
// Apply similar allowlist validation to all other string fields.
// ...
return nil
}
\`\`\`
### 2. Sanitize File Paths to Prevent Path Traversal
Sanitize any user-controllable input that is used to construct file paths. The `filepath.Clean` function and checks to ensure the path stays within an expected directory are essential.
**Code Example (`pkg/mining/manager.go`):**
\`\`\`go
import "path/filepath"
// In Manager.StartMiner():
// ...
instanceName := miner.GetName()
if config.Algo != "" {
// Sanitize algo to prevent directory traversal
sanitizedAlgo := instanceNameRegex.ReplaceAllString(config.Algo, "_")
// Also, explicitly remove any path-related characters that the regex might miss
sanitizedAlgo = strings.ReplaceAll(sanitizedAlgo, "/", "")
sanitizedAlgo = strings.ReplaceAll(sanitizedAlgo, "..", "")
instanceName = fmt.Sprintf("%s-%s", instanceName, sanitizedAlgo)
}
// ...
\`\`\`
### 3. Avoid Passing Raw CLI Arguments to `exec.Command`
The `CLIArgs` field is inherently dangerous. If it must be supported, it should be parsed and validated argument by argument, rather than being passed directly to the shell.
**Code Example (`pkg/mining/ttminer_start.go`):**
\`\`\`go
// In addTTMinerCliArgs():
func addTTMinerCliArgs(config *Config, args *[]string) {
if config.CLIArgs != "" {
// A safer approach is to define a list of allowed arguments
allowedArgs := map[string]bool{
"--list-devices": true,
"--no-watchdog": true,
// Add other safe, non-sensitive arguments here
}
extraArgs := strings.Fields(config.CLIArgs)
for _, arg := range extraArgs {
if allowedArgs[arg] {
*args = append(*args, arg)
} else {
logging.Warn("skipping potentially unsafe CLI argument", logging.Fields{"arg": arg})
}
}
}
}
\`\`\`
### 4. Expand Validation Coverage in `Config.Validate()`
All fields in the `Config` struct should have some form of validation. For string fields, this should be allowlist-based character validation. For numeric fields, this should be range checking.
**Code Example (`pkg/mining/mining.go`):**
\`\`\`go
// In Config.Validate():
// ...
// Example for LogFile
if c.LogFile != "" {
// Basic validation: ensure it's just a filename, not a path
if strings.Contains(c.LogFile, "/") || strings.Contains(c.LogFile, "\\") {
return fmt.Errorf("LogFile cannot be a path")
}
// Use an allowlist for the filename itself
allowedChars := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_."
if !isValidInput(c.LogFile, allowedChars) {
return fmt.Errorf("LogFile contains invalid characters")
}
}
// Example for CPUPriority
if c.CPUPriority < 0 || c.CPUPriority > 5 {
return fmt.Errorf("CPUPriority must be between 0 and 5")
}
// ...
\`\`\`

View file

@ -1,71 +0,0 @@
# Memory and Resource Management Audit
This audit examines the application's memory and resource management based on a review of the codebase, with a focus on `pkg/mining/manager.go`, `pkg/mining/service.go`, and `pkg/database/database.go`.
## 1. Goroutine Leak Analysis
The application uses several long-running goroutines for background tasks. Overall, goroutine lifecycle management is robust, but there are minor areas for improvement.
### Findings:
- **Stats Collection (`manager.go`):** The `startStatsCollection` goroutine runs in a `for` loop with a `time.Ticker`. It reliably terminates when the `stopChan` is closed during `Manager.Stop()`.
- **Database Cleanup (`manager.go`):** The `startDBCleanup` goroutine also uses a `time.Ticker` and correctly listens for the `stopChan` signal, ensuring it exits cleanly.
- **WebSocket Event Hub (`service.go`):** The `EventHub.Run` method is launched as a goroutine and manages client connections. It terminates when its internal `quit` channel is closed, which is triggered by the `EventHub.Stop()` method.
### Recommendations:
- **No major issues found.** The use of `stopChan` and `sync.WaitGroup` in `Manager` provides a solid foundation for graceful shutdowns.
## 2. Memory Leak Analysis
The primary areas of concern for memory leaks are in-memory data structures that could grow indefinitely.
### Findings:
- **`Manager.miners` Map:** The `miners` map in the `Manager` struct stores active miner processes. Entries are added in `StartMiner` and removed in `StopMiner` and `UninstallMiner`. If a miner process were to crash or become unresponsive without `StopMiner` being called, its entry would persist in the map, causing a minor memory leak.
- **In-Memory Hashrate History:** Each miner maintains an in-memory `HashrateHistory`. The `ReduceHashrateHistory` method is called periodically to trim this data, preventing unbounded growth. This is a good practice.
- **Request Body Size Limit:** The `service.go` file correctly implements a 1MB request body size limit, which helps prevent memory exhaustion from large API requests.
### Recommendations:
- **Implement a health check for miners.** A periodic health check could detect unresponsive miner processes and trigger their removal from the `miners` map, preventing memory leaks from orphaned entries.
## 3. Database Resource Management
The application uses an SQLite database for persisting historical data.
### Findings:
- **Connection Pooling:** The `database.go` file configures the connection pool with `SetMaxOpenConns(1)`. This is appropriate for SQLite's single-writer model and prevents connection-related issues.
- **`hashrate_history` Cleanup:** The `Cleanup` function in `database.go` correctly removes old records from the `hashrate_history` table based on the configured retention period.
- **`miner_sessions` Table:** The `miner_sessions` table tracks miner uptime but has no corresponding cleanup mechanism. This table will grow indefinitely, leading to a gradual increase in database size and a potential performance degradation over time.
### Recommendations:
- **Add a cleanup mechanism for `miner_sessions`.** Extend the `Cleanup` function to also remove old records from the `miner_sessions` table based on the retention period.
## 4. File Handle and Process Management
The application manages external miner processes, which requires careful handling of file descriptors and process handles.
### Findings:
- **Process Lifecycle:** The `Stop` method on miner implementations (`xmrig.go`, `ttminer.go`) is responsible for terminating the `exec.Cmd` process. This appears to be handled correctly.
- **I/O Pipes:** The miner's `stdout`, `stderr`, and `stdin` pipes are created and managed. The code does not show any obvious leaks of these file handles.
### Recommendations:
- **No major issues found.** The process management logic appears to be sound.
## 5. Network Connection Handling
The application's API server and WebSocket endpoint are critical areas for resource management.
### Findings:
- **HTTP Server Timeouts:** The `service.go` file correctly configures `ReadTimeout`, `WriteTimeout`, and `IdleTimeout` for the HTTP server, which is a best practice for preventing slow client attacks and connection exhaustion.
- **WebSocket Connections:** The `wsUpgrader` has a `CheckOrigin` function that restricts connections to `localhost` origins, providing a layer of security. The `EventHub` manages the lifecycle of WebSocket connections.
### Recommendations:
- **No major issues found.** The network connection handling is well-configured.

View file

@ -1,40 +0,0 @@
# Performance Audit Report
This report details the findings of a performance audit conducted on the codebase. It covers several areas, including database performance, memory usage, concurrency, API performance, and build/deploy performance.
## Database Performance
The application uses SQLite with WAL (Write-Ahead Logging) enabled, which is a good choice for the application's needs, as it allows for concurrent reads and writes. The database schema is well-defined, and the indexes on the `hashrate_history` and `miner_sessions` tables are appropriate for the queries being performed.
- **N+1 Queries:** No evidence of N+1 queries was found. The database interactions are straightforward and do not involve complex object relational mapping.
- **Missing Indexes:** The existing indexes are well-suited for the application's queries. No missing indexes were identified.
- **Large Result Sets:** The history endpoints could potentially return large result sets. Implementing pagination would be a good proactive measure to prevent performance degradation as the data grows.
- **Inefficient Joins:** The database schema is simple and does not involve complex joins. No inefficient joins were identified.
- **Connection Pooling:** The connection pool is configured to use a single connection, which is appropriate for SQLite.
## Memory Usage
- **Memory Leaks:** No obvious memory leaks were identified. The application's memory usage appears to be stable.
- **Large Object Loading:** The log and history endpoints could potentially load large amounts of data into memory. Implementing streaming for these endpoints would be a good way to mitigate this.
- **Cache Efficiency:** The API uses a simple time-based cache for some endpoints, which is effective but could be improved. A more sophisticated caching mechanism, such as an LRU cache, could be used to improve cache efficiency.
- **Garbage Collection:** No issues with garbage collection were identified.
## Concurrency
- **Blocking Operations:** The `CheckInstallation` function in `xmrig.go` shells out to the command line, which is a blocking operation. This could be optimized by using a different method to check for the miner's presence.
- **Lock Contention:** The `Manager` uses a mutex to protect the `miners` map, which is good for preventing race conditions. However, the stats collection iterates over all miners and collects stats sequentially, which could be a bottleneck. This could be improved by collecting stats in parallel.
- **Thread Pool Sizing:** The application does not use a thread pool.
- **Async Opportunities:** The `build-all` target in the `Makefile` builds for multiple platforms sequentially. This could be parallelized to reduce build times. Similarly, the `before` hook in `.goreleaser.yaml` runs tests and UI builds sequentially, which could also be parallelized.
## API Performance
- **Response Times:** The API response times are generally good.
- **Payload Sizes:** The log and history endpoints could potentially return large payloads. Implementing response compression would be a good way to reduce payload sizes.
- **Caching Headers:** The API uses `Cache-Control` headers, which is good.
- **Rate Limiting:** The API has rate limiting in place, which is good.
## Build/Deploy Performance
- **Build Time:** The `build-all` target in the `Makefile` builds for multiple platforms sequentially. This could be parallelized to reduce build times. The `before` hook in `.goreleaser.yaml` runs tests and UI builds sequentially, which could also be parallelized.
- **Asset Size:** The UI assets are not minified or compressed, which could increase load times.
- **Cold Start:** The application has a fast cold start time.

View file

@ -1,72 +0,0 @@
# Mining Protocol Security Audit: AUDIT-PROTOCOL.md
## 1. Stratum Protocol Security
**Findings:**
- **Insecure Default Connections:** The miner defaults to `stratum+tcp`, transmitting data in plaintext. This exposes sensitive information, such as wallet addresses and passwords, to interception. An attacker with network access could easily capture and exploit this data.
- **Lack of Certificate Pinning:** Although TLS is an option, there is no mechanism for certificate pinning. Without it, the client cannot verify the authenticity of the pool's certificate, leaving it vulnerable to man-in-the-middle attacks where a malicious actor could impersonate the mining pool.
- **Vulnerability to Protocol-Level Attacks:** The Stratum protocol implementation does not adequately protect against attacks like share hijacking or difficulty manipulation. An attacker could potentially modify Stratum messages to redirect shares or disrupt the mining process.
**Recommendations:**
- **Enforce TLS by Default:** Mandate the use of `stratum+ssl` to ensure all communication between the miner and the pool is encrypted.
- **Implement Certificate Pinning:** Add support for certificate pinning to allow users to specify the expected certificate, preventing man-in-the-middle attacks.
- **Add Protocol-Level Integrity Checks:** Implement checksums or signatures for Stratum messages to ensure their integrity and prevent tampering.
## 2. Pool Authentication
**Findings:**
- **Credentials in Plaintext:** Authentication credentials, including the worker's username and password, are sent in plaintext over unencrypted connections. This makes them highly susceptible to theft.
- **Weak Password Hashing:** The `config.json` file stores the password as `"x"`, which is a weak default. While users can change this, there is no enforcement of strong password policies.
- **Risk of Brute-Force Attacks:** The absence of rate limiting or account lockout mechanisms on the pool side exposes the authentication process to brute-force attacks, where an attacker could repeatedly guess passwords until they gain access.
**Recommendations:**
- **Mandate Encrypted Authentication:** Require all authentication attempts to be transmitted over a TLS-encrypted connection.
- **Enforce Strong Password Policies:** Encourage the use of strong, unique passwords and consider implementing a password strength meter.
- **Implement Secure Authentication Mechanisms:** Support more secure authentication methods, such as token-based authentication, to reduce the reliance on passwords.
## 3. Share Validation
**Findings:**
- **Lack of Share Signatures:** Shares submitted by the miner are not cryptographically signed, making it possible for an attacker to intercept and modify them. This could lead to share stealing, where an attacker redirects a legitimate miner's work to their own account.
- **Vulnerability to Replay Attacks:** There is no protection against replay attacks, where an attacker could resubmit old shares. While pools may have some defenses, the client-side implementation lacks measures to prevent this.
**Recommendations:**
- **Implement Share Signing:** Introduce a mechanism for miners to sign each share with a unique key, allowing the pool to verify its authenticity.
- **Add Nonces to Shares:** Include a unique, single-use nonce in each share submission to prevent replay attacks.
## 4. Block Template Handling
**Findings:**
- **Centralized Block Template Distribution:** The miner relies on a centralized pool for block templates, creating a single point of failure. If the pool is compromised, an attacker could distribute malicious or inefficient templates.
- **No Template Validation:** The miner does not independently validate the block templates received from the pool. This makes it vulnerable to block withholding attacks, where a malicious pool sends invalid templates, causing the miner to waste resources on unsolvable blocks.
**Recommendations:**
- **Support Decentralized Template Distribution:** Explore decentralized alternatives for block template distribution to reduce reliance on a single pool.
- **Implement Independent Template Validation:** Add a mechanism for the miner to validate block templates against the network's consensus rules before starting to mine.
## 5. Network Message Validation
**Findings:**
- **Insufficient Input Sanitization:** Network messages from the pool are not consistently sanitized, creating a risk of denial-of-service attacks. An attacker could send malformed messages to crash the miner.
- **Lack of Rate Limiting:** The client does not implement rate limiting for incoming messages, making it vulnerable to flooding attacks that could overwhelm its resources.
**Recommendations:**
- **Implement Robust Message Sanitization:** Sanitize all incoming network messages to ensure they conform to the expected format and do not contain malicious payloads.
- **Add Rate Limiting:** Introduce rate limiting for incoming messages to prevent a single source from overwhelming the miner.

View file

@ -1,44 +0,0 @@
# Security Audit: Secrets & Configuration
This document outlines the findings of a security audit focused on exposed secrets and insecure configurations.
## 1. Secret Detection
### 1.1. Hardcoded Credentials & Sensitive Information
- **Placeholder Wallet Addresses:**
- `miner/core/src/config.json`: Contains the placeholder `"YOUR_WALLET_ADDRESS"`.
- `miner/proxy/src/config.json`: Contains the placeholder `"YOUR_WALLET"`.
- `miner/core/doc/api/1/config.json`: Contains a hardcoded wallet address.
- **Default Passwords:**
- `miner/core/src/config.json`: The `"pass"` field is set to `"x"`.
- `miner/proxy/src/config.json`: The `"pass"` field is set to `"x"`.
- `miner/core/doc/api/1/config.json`: The `"pass"` field is set to `"x"`.
- **Placeholder API Tokens:**
- `miner/core/doc/api/1/config.json`: The `"access-token"` is set to the placeholder `"TOKEN"`.
## 2. Configuration Security
### 2.1. Insecure Default Configurations
- **`null` API Access Tokens:**
- `miner/core/src/config.json`: The `http.access-token` is `null` by default. If the HTTP API is enabled without setting a token, it could allow unauthorized access.
- `miner/proxy/src/config.json`: The `http.access-token` is `null` by default, posing a similar risk.
- **TLS Disabled by Default:**
- `miner/core/src/config.json`: The `tls.enabled` flag is `false` by default. If services are exposed, communication would be unencrypted.
- `miner/proxy/src/config.json`: While `tls.enabled` is `true`, the `cert` and `cert_key` fields are `null`, preventing a secure TLS connection from being established.
### 2.2. Verbose Error Messages
No instances of overly verbose error messages leaking sensitive information were identified during this audit.
### 2.3. CORS Policy
The CORS policy could not be audited as it was not explicitly defined in the scanned files.
### 2.4. Security Headers
No security headers (e.g., CSP, HSTS) were identified in the configuration files.

View file

@ -1,127 +0,0 @@
# Test Coverage and Quality Audit
## 1. Coverage Analysis
### Line Coverage
- **Overall Line Coverage: 44.4%**
The overall test coverage for the project is **44.4%**, which is below the recommended minimum of 80%. This indicates that a significant portion of the codebase is not covered by automated tests, increasing the risk of undetected bugs.
### Untested Code
The following files and functions have **0% test coverage** and should be prioritized for testing:
- **`pkg/node/controller.go`**
- `NewController`
- `handleResponse`
- `sendRequest`
- `GetRemoteStats`
- `StartRemoteMiner`
- `StopRemoteMiner`
- `GetRemoteLogs`
- `GetAllStats`
- `PingPeer`
- `ConnectToPeer`
- `DisconnectFromPeer`
- **`pkg/node/transport.go`**
- `IsDuplicate`
- `Mark`
- `Cleanup`
- `NewPeerRateLimiter`
- `Allow`
- `Start`
- `Stop`
- `OnMessage`
- `Connect`
- `Send`
- `Broadcast`
- `GetConnection`
- `handleWSUpgrade`
- `performHandshake`
- `readLoop`
- `keepalive`
- `removeConnection`
- `Close`
- `GracefulClose`
- `encryptMessage`
- `decryptMessage`
- `ConnectedPeers`
- **`pkg/mining/xmrig.go`**
- `Uninstall`
- **`pkg/node/dispatcher.go`**
- `DispatchUEPS`
- **`pkg/node/identity.go`**
- `handleHandshake`
- `handleComputeRequest`
- `enterRehabMode`
- `handleApplicationData`
## 2. Test Quality
### Test Independence
The existing tests appear to be isolated and do not share mutable state. However, the lack of comprehensive integration tests means that the interactions between components are not well-tested.
### Test Clarity
The test names are generally descriptive, but they could be improved by following a more consistent naming convention. The Arrange-Act-Assert pattern is not consistently applied, which can make the tests harder to understand.
### Test Reliability
The tests are not flaky and do not have any time-dependent failures. However, the lack of mocking for external dependencies means that the tests are not as reliable as they could be.
## 3. Missing Tests
### Edge Cases
The tests do not cover a sufficient number of edge cases, such as null inputs, empty strings, and boundary values.
### Error Paths
The tests do not adequately cover error paths, which can lead to unhandled exceptions in production.
### Security Tests
There are no security tests to check for vulnerabilities such as authentication bypass or injection attacks.
### Integration Tests
The lack of integration tests means that the interactions between different components are not well-tested.
## 4. Suggested Tests to Add
### `pkg/node/controller.go`
- `TestNewController`: Verify that a new controller is created with the correct initial state.
- `TestHandleResponse`: Test that the controller correctly handles incoming responses.
- `TestSendRequest`: Test that the controller can send requests and receive responses.
- `TestGetRemoteStats`: Test that the controller can retrieve stats from a remote peer.
- `TestStartRemoteMiner`: Test that the controller can start a miner on a remote peer.
- `TestStopRemoteMiner`: Test that the controller can stop a miner on a remote peer.
- `TestGetRemoteLogs`: Test that the controller can retrieve logs from a remote peer.
- `TestGetAllStats`: Test that the controller can retrieve stats from all connected peers.
- `TestPingPeer`: Test that the controller can ping a remote peer.
- `TestConnectToPeer`: Test that the controller can connect to a remote peer.
- `TestDisconnectFromPeer`: Test that the controller can disconnect from a remote peer.
### `pkg/node/transport.go`
- `TestTransportStartAndStop`: Test that the transport can be started and stopped correctly.
- `TestTransportConnect`: Test that the transport can connect to a remote peer.
- `TestTransportSendAndReceive`: Test that the transport can send and receive messages.
- `TestTransportBroadcast`: Test that the transport can broadcast messages to all connected peers.
- `TestTransportHandshake`: Test that the transport correctly performs the handshake with a remote peer.
- `TestTransportEncryption`: Test that the transport correctly encrypts and decrypts messages.
### `pkg/mining/xmrig.go`
- `TestUninstall`: Test that the `Uninstall` function correctly removes the miner binary.
### `pkg/node/dispatcher.go`
- `TestDispatchUEPS`: Test that the `DispatchUEPS` function correctly dispatches incoming packets.

View file

@ -1,4 +1,4 @@
.PHONY: all build test clean install run demo help lint fmt vet docs install-swag dev package e2e e2e-ui e2e-api test-cpp test-cpp-core test-cpp-proxy build-cpp-tests build-miner build-miner-core build-miner-proxy build-miner-all
.PHONY: all build test clean install run demo help lint fmt vet docs install-swag dev package e2e e2e-ui e2e-api test-cpp test-cpp-core test-cpp-proxy build-cpp-tests
# Variables
BINARY_NAME=miner-ctrl
@ -66,34 +66,6 @@ build-cpp-tests-proxy:
$(CMAKE) -DBUILD_TESTS=ON .. && \
$(CMAKE) --build . --target unit_tests integration_tests --parallel
# Build miner binaries (release builds)
build-miner: build-miner-core build-miner-proxy
@echo "Miner binaries built successfully"
# Build miner core (CPU/GPU miner)
build-miner-core:
@echo "Building miner core..."
@mkdir -p $(MINER_CORE_BUILD_DIR)
@cd $(MINER_CORE_BUILD_DIR) && \
$(CMAKE) -DCMAKE_BUILD_TYPE=Release .. && \
$(CMAKE) --build . --config Release --parallel
# Build miner proxy
build-miner-proxy:
@echo "Building miner proxy..."
@mkdir -p $(MINER_PROXY_BUILD_DIR)
@cd $(MINER_PROXY_BUILD_DIR) && \
$(CMAKE) -DCMAKE_BUILD_TYPE=Release .. && \
$(CMAKE) --build . --config Release --parallel
# Build all miner components and package
build-miner-all: build-miner
@echo "Packaging miner binaries..."
@mkdir -p dist/miner
@cp $(MINER_CORE_BUILD_DIR)/miner dist/miner/ 2>/dev/null || true
@cp $(MINER_PROXY_BUILD_DIR)/miner-proxy dist/miner/ 2>/dev/null || true
@echo "Miner binaries available in dist/miner/"
# Run C++ tests (builds first if needed)
test-cpp: test-cpp-proxy
@echo "All C++ tests completed"
@ -205,41 +177,30 @@ e2e-api: build
# Help
help:
@echo "Available targets:"
@echo ""
@echo "Go Application:"
@echo " all - Run tests and build"
@echo " build - Build the CLI binary"
@echo " build-all - Build for multiple platforms"
@echo " install - Install the binary"
@echo " run - Build and run the CLI"
@echo " dev - Start the development server with docs and build"
@echo ""
@echo "Miner (C++ Binaries):"
@echo " build-miner - Build miner core and proxy"
@echo " build-miner-core - Build miner core only"
@echo " build-miner-proxy - Build miner proxy only"
@echo " build-miner-all - Build and package all miner binaries"
@echo ""
@echo "Testing:"
@echo " test - Run all tests (Go + C++)"
@echo " test-go - Run Go tests only"
@echo " test-cpp - Run C++ tests (proxy)"
@echo " test-cpp-core - Run miner/core C++ tests"
@echo " test-cpp-proxy- Run miner/proxy C++ tests"
@echo " coverage - Run tests with coverage report"
@echo " e2e - Run E2E tests with Playwright"
@echo " e2e-ui - Open Playwright UI for interactive testing"
@echo " e2e-api - Run API-only E2E tests"
@echo ""
@echo "Code Quality:"
@echo " fmt - Format code"
@echo " vet - Run go vet"
@echo " lint - Run linters"
@echo " tidy - Tidy dependencies"
@echo ""
@echo "Other:"
@echo " clean - Clean all build artifacts"
@echo " deps - Download dependencies"
@echo " docs - Generate Swagger documentation"
@echo " package - Create local distribution packages"
@echo " help - Show this help message"
@echo " all - Run tests and build"
@echo " build - Build the CLI binary"
@echo " build-all - Build for multiple platforms"
@echo " install - Install the binary"
@echo " test - Run all tests (Go + C++)"
@echo " test-go - Run Go tests only"
@echo " test-cpp - Run C++ tests (core + proxy)"
@echo " test-cpp-core - Run miner/core C++ tests"
@echo " test-cpp-proxy - Run miner/proxy C++ tests"
@echo " build-cpp-tests - Build all C++ tests"
@echo " coverage - Run tests with coverage report"
@echo " demo - Run the demo"
@echo " run - Build and run the CLI"
@echo " clean - Clean build artifacts (including C++ builds)"
@echo " fmt - Format code"
@echo " vet - Run go vet"
@echo " lint - Run linters"
@echo " tidy - Tidy dependencies"
@echo " deps - Download dependencies"
@echo " docs - Generate Swagger documentation"
@echo " install-swag- Install the swag CLI"
@echo " package - Create local distribution packages using GoReleaser"
@echo " dev - Start the development server with docs and build"
@echo " e2e - Run E2E tests with Playwright"
@echo " e2e-ui - Open Playwright UI for interactive testing"
@echo " e2e-api - Run API-only E2E tests"
@echo " help - Show this help message"

View file

@ -108,43 +108,25 @@ wails3 build
```
Mining/
├── cmd/
│ ├── mining/ # CLI application (miner-ctrl)
│ ├── mining/ # CLI application
│ └── desktop/ # Wails desktop app
├── pkg/mining/ # Core Go package
│ ├── mining.go # Interfaces and types
│ ├── manager.go # Miner lifecycle management
│ ├── service.go # RESTful API (Gin)
│ ├── xmrig.go # XMRig implementation
│ └── profile_manager.go # Profile persistence
├── miner/ # Standalone C++ mining tools
├── core/ # CPU/GPU miner binary
├── proxy/ # Stratum proxy for farms
├── cuda/ # CUDA plugin for NVIDIA
└── README.md # Miner documentation
├── miner/core/ # Modified XMRig with algorithm support
└── src/
├── backend/opencl/ # OpenCL GPU kernels
├── backend/cuda/ # CUDA GPU kernels
└── crypto/ # Algorithm implementations
└── ui/ # Angular 20+ web dashboard
└── src/app/
├── components/ # Reusable UI components
└── pages/ # Route pages
```
## Standalone Miner Tools
The `miner/` directory contains standalone C++ mining programs that can be used independently without the GUI:
```bash
# Build miner binaries
make build-miner
# Or build individually
make build-miner-core # CPU/GPU miner
make build-miner-proxy # Stratum proxy
# Run directly
./miner/core/build/miner -o pool.example.com:3333 -u WALLET -p x
./miner/proxy/build/miner-proxy -o pool.example.com:3333 -b 0.0.0.0:3333
```
Pre-built binaries are available from [Releases](https://github.com/letheanVPN/Mining/releases). See [miner/README.md](miner/README.md) for full documentation.
## API Reference
Base path: `/api/v1/mining`
@ -176,24 +158,26 @@ Swagger UI: `http://localhost:9090/api/v1/mining/swagger/index.html`
### Build Commands
```bash
# Go Backend
# Backend
make build # Build CLI binary
make test # Run all tests (Go + C++)
make test # Run tests with coverage
make dev # Start dev server on :9090
# Miner (C++ Binaries)
make build-miner # Build miner and proxy
make build-miner-all # Build and package to dist/miner/
# Frontend
cd ui
npm install
npm run build # Build web component
npm test # Run unit tests
npm test # Run unit tests (36 specs)
# Desktop
cd cmd/desktop/mining-desktop
wails3 build # Build native app
# Miner Core (GPU support)
cd miner/core
mkdir build && cd build
cmake .. -DWITH_OPENCL=ON -DWITH_CUDA=ON
make -j$(nproc)
```
## Configuration

File diff suppressed because it is too large Load diff

View file

@ -26,7 +26,7 @@
"private": true,
"dependencies": {
"@angular/common": "^20.3.0",
"@angular/compiler": "^20.3.16",
"@angular/compiler": "^20.3.0",
"@angular/core": "^20.3.0",
"@angular/forms": "^20.3.0",
"@angular/platform-browser": "^20.3.0",
@ -41,7 +41,7 @@
},
"devDependencies": {
"@angular/build": "^20.3.6",
"@angular/cli": "^20.3.13",
"@angular/cli": "^20.3.6",
"@angular/compiler-cli": "^20.3.0",
"@types/express": "^5.0.1",
"@types/jasmine": "~5.1.0",

View file

@ -3,12 +3,12 @@ module mining-desktop
go 1.25.0
require (
forge.lthn.ai/Snider/Mining v0.0.0
github.com/Snider/Mining v0.0.0
github.com/shirou/gopsutil/v4 v4.25.10
github.com/wailsapp/wails/v3 v3.0.0-alpha.54
)
replace forge.lthn.ai/Snider/Mining => ../../..
replace github.com/Snider/Mining => ../../..
require (
dario.cat/mergo v1.0.1 // indirect
@ -16,9 +16,9 @@ require (
github.com/Masterminds/semver/v3 v3.3.1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
forge.lthn.ai/Snider/Borg v0.2.1 // indirect
forge.lthn.ai/Snider/Enchantrix v0.0.4 // indirect
forge.lthn.ai/Snider/Poindexter v0.0.2 // indirect
github.com/Snider/Borg v0.0.2 // indirect
github.com/Snider/Enchantrix v0.0.2 // indirect
github.com/Snider/Poindexter v0.0.0-20251229183216-e182d4f49741 // indirect
github.com/adrg/xdg v0.5.3 // indirect
github.com/bep/debounce v1.2.1 // indirect
github.com/bytedance/sonic v1.14.0 // indirect
@ -74,8 +74,8 @@ require (
github.com/pjbgf/sha1cd v0.3.2 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
github.com/quic-go/quic-go v0.57.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/samber/lo v1.49.1 // indirect
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
@ -91,9 +91,10 @@ require (
github.com/wailsapp/mimetype v1.4.1 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.uber.org/mock v0.5.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/arch v0.20.0 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/crypto v0.44.0 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.18.0 // indirect

View file

@ -167,10 +167,10 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.57.0 h1:AsSSrrMs4qI/hLrKlTH/TGQeTMY0ib1pAOX7vA3AdqE=
github.com/quic-go/quic-go v0.57.0/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@ -221,8 +221,8 @@ github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c=
@ -230,8 +230,8 @@ golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs=
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
@ -278,8 +278,6 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=

View file

@ -17,16 +17,16 @@ import (
var assets embed.FS
func main() {
// miningService := NewMiningService() // powers the Wails dashboard with managers and saved settings.
// Create the mining service
miningService := NewMiningService()
// browserFS, err := fs.Sub(assets, "frontend/dist/browser") // serves the built dashboard from frontend/dist/browser.
// Get the sub-filesystem rooted at frontend/dist/browser
browserFS, err := fs.Sub(assets, "frontend/dist/browser")
if err != nil {
log.Fatal("Failed to create sub-filesystem:", err)
}
// app := application.New(...) // opens the dashboard window and registers the mining service.
// Create a new Wails application
app := application.New(application.Options{
Name: "Mining Dashboard",
Description: "Multi-miner management dashboard",
@ -41,7 +41,7 @@ func main() {
},
})
// windowState := miningService.GetWindowState() // restores the last saved 1400x900 window size when available.
// Get saved window state
windowState := miningService.GetWindowState()
width := windowState.Width
height := windowState.Height
@ -52,7 +52,7 @@ func main() {
height = 900
}
// app.Window.NewWithOptions(...) // uses the restored size and keeps the title bar hidden inset.
// Create the main window with saved dimensions
app.Window.NewWithOptions(application.WebviewWindowOptions{
Title: "Mining Dashboard",
Width: width,
@ -66,7 +66,7 @@ func main() {
URL: "/",
})
// sigChan := make(chan os.Signal, 1) // captures Ctrl+C and SIGTERM for a clean shutdown.
// Handle graceful shutdown
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
@ -75,7 +75,7 @@ func main() {
os.Exit(0)
}()
// app.Run() // keeps the window open until SIGINT or SIGTERM triggers miningService.Shutdown().
// Run the application
if err := app.Run(); err != nil {
log.Fatal(err)
}

View file

@ -6,27 +6,27 @@ import (
"runtime"
"strings"
"forge.lthn.ai/Snider/Mining/pkg/mining"
"github.com/Snider/Mining/pkg/mining"
"github.com/shirou/gopsutil/v4/cpu"
"github.com/shirou/gopsutil/v4/mem"
)
// MiningService exposes mining functionality to the Wails frontend.
type MiningService struct {
manager *mining.Manager
profileManager *mining.ProfileManager
settingsManager *mining.SettingsManager
manager *mining.Manager
profileMgr *mining.ProfileManager
settingsMgr *mining.SettingsManager
}
// NewMiningService creates a new mining service with an initialized manager.
func NewMiningService() *MiningService {
manager := mining.NewManager()
profileManager, _ := mining.NewProfileManager()
settingsManager, _ := mining.NewSettingsManager()
profileMgr, _ := mining.NewProfileManager()
settingsMgr, _ := mining.NewSettingsManager()
return &MiningService{
manager: manager,
profileManager: profileManager,
settingsManager: settingsManager,
manager: manager,
profileMgr: profileMgr,
settingsMgr: settingsMgr,
}
}
@ -64,7 +64,7 @@ type Profile struct {
}
// GetSystemInfo returns system information and installed miners.
func (service *MiningService) GetSystemInfo() (*SystemInfo, error) {
func (s *MiningService) GetSystemInfo() (*SystemInfo, error) {
cpuInfo, _ := cpu.Info()
cpuName := "Unknown"
if len(cpuInfo) > 0 {
@ -78,7 +78,7 @@ func (service *MiningService) GetSystemInfo() (*SystemInfo, error) {
}
miners := []MinerInstallInfo{}
// miner.CheckInstallation() // probes xmrig and tt-miner without launching a process.
// Check installation for each miner type by creating temporary instances
for _, minerType := range []string{"xmrig", "tt-miner"} {
var miner mining.Miner
switch minerType {
@ -115,15 +115,15 @@ func (service *MiningService) GetSystemInfo() (*SystemInfo, error) {
}
// ListMiners returns all running miners.
func (service *MiningService) ListMiners() []MinerStatus {
miners := service.manager.ListMiners()
func (s *MiningService) ListMiners() []MinerStatus {
miners := s.manager.ListMiners()
result := make([]MinerStatus, len(miners))
for i, miner := range miners {
stats, _ := miner.GetStats()
for i, m := range miners {
stats, _ := m.GetStats()
result[i] = MinerStatus{
Name: miner.GetName(),
Running: true, // service.ListMiners() only returns miners that are already running.
MinerType: getMinerType(miner),
Name: m.GetName(),
Running: true, // If it's in the list, it's running
MinerType: getMinerType(m),
Stats: stats,
}
}
@ -131,8 +131,8 @@ func (service *MiningService) ListMiners() []MinerStatus {
}
// getMinerType extracts the miner type from a miner instance.
func getMinerType(miner mining.Miner) string {
name := miner.GetName()
func getMinerType(m mining.Miner) string {
name := m.GetName()
if strings.HasPrefix(name, "xmrig") {
return "xmrig"
}
@ -143,8 +143,8 @@ func getMinerType(miner mining.Miner) string {
}
// StartMiner starts a miner with the given configuration.
func (service *MiningService) StartMiner(minerType string, config *mining.Config) (string, error) {
miner, err := service.manager.StartMiner(minerType, config)
func (s *MiningService) StartMiner(minerType string, config *mining.Config) (string, error) {
miner, err := s.manager.StartMiner(minerType, config)
if err != nil {
return "", err
}
@ -152,11 +152,11 @@ func (service *MiningService) StartMiner(minerType string, config *mining.Config
}
// StartMinerFromProfile starts a miner using a saved profile.
func (service *MiningService) StartMinerFromProfile(profileID string) (string, error) {
if service.profileManager == nil {
func (s *MiningService) StartMinerFromProfile(profileID string) (string, error) {
if s.profileMgr == nil {
return "", fmt.Errorf("profile manager not initialized")
}
profile, ok := service.profileManager.GetProfile(profileID)
profile, ok := s.profileMgr.GetProfile(profileID)
if !ok {
return "", fmt.Errorf("profile not found: %s", profileID)
}
@ -169,7 +169,7 @@ func (service *MiningService) StartMinerFromProfile(profileID string) (string, e
}
}
miner, err := service.manager.StartMiner(profile.MinerType, &config)
miner, err := s.manager.StartMiner(profile.MinerType, &config)
if err != nil {
return "", err
}
@ -177,13 +177,13 @@ func (service *MiningService) StartMinerFromProfile(profileID string) (string, e
}
// StopMiner stops a running miner by name.
func (service *MiningService) StopMiner(name string) error {
return service.manager.StopMiner(name)
func (s *MiningService) StopMiner(name string) error {
return s.manager.StopMiner(name)
}
// GetMinerStats returns stats for a specific miner.
func (service *MiningService) GetMinerStats(name string) (*mining.PerformanceMetrics, error) {
miner, err := service.manager.GetMiner(name)
func (s *MiningService) GetMinerStats(name string) (*mining.PerformanceMetrics, error) {
miner, err := s.manager.GetMiner(name)
if err != nil {
return nil, err
}
@ -191,8 +191,8 @@ func (service *MiningService) GetMinerStats(name string) (*mining.PerformanceMet
}
// GetMinerLogs returns log lines for a specific miner.
func (service *MiningService) GetMinerLogs(name string) ([]string, error) {
miner, err := service.manager.GetMiner(name)
func (s *MiningService) GetMinerLogs(name string) ([]string, error) {
miner, err := s.manager.GetMiner(name)
if err != nil {
return nil, err
}
@ -200,7 +200,7 @@ func (service *MiningService) GetMinerLogs(name string) ([]string, error) {
}
// InstallMiner installs a miner of the given type.
func (service *MiningService) InstallMiner(minerType string) error {
func (s *MiningService) InstallMiner(minerType string) error {
var miner mining.Miner
switch minerType {
case "xmrig":
@ -214,16 +214,16 @@ func (service *MiningService) InstallMiner(minerType string) error {
}
// UninstallMiner uninstalls a miner of the given type.
func (service *MiningService) UninstallMiner(minerType string) error {
return service.manager.UninstallMiner(minerType)
func (s *MiningService) UninstallMiner(minerType string) error {
return s.manager.UninstallMiner(minerType)
}
// GetProfiles returns all saved mining profiles.
func (service *MiningService) GetProfiles() ([]Profile, error) {
if service.profileManager == nil {
func (s *MiningService) GetProfiles() ([]Profile, error) {
if s.profileMgr == nil {
return []Profile{}, nil
}
profiles := service.profileManager.GetAllProfiles()
profiles := s.profileMgr.GetAllProfiles()
result := make([]Profile, len(profiles))
for i, p := range profiles {
@ -243,8 +243,8 @@ func (service *MiningService) GetProfiles() ([]Profile, error) {
}
// CreateProfile creates a new mining profile.
func (service *MiningService) CreateProfile(name, minerType string, config map[string]interface{}) (*Profile, error) {
if service.profileManager == nil {
func (s *MiningService) CreateProfile(name, minerType string, config map[string]interface{}) (*Profile, error) {
if s.profileMgr == nil {
return nil, fmt.Errorf("profile manager not initialized")
}
@ -260,7 +260,7 @@ func (service *MiningService) CreateProfile(name, minerType string, config map[s
Config: mining.RawConfig(configBytes),
}
profile, err := service.profileManager.CreateProfile(newProfile)
profile, err := s.profileMgr.CreateProfile(newProfile)
if err != nil {
return nil, err
}
@ -274,22 +274,22 @@ func (service *MiningService) CreateProfile(name, minerType string, config map[s
}
// DeleteProfile deletes a profile by ID.
func (service *MiningService) DeleteProfile(id string) error {
if service.profileManager == nil {
func (s *MiningService) DeleteProfile(id string) error {
if s.profileMgr == nil {
return nil
}
return service.profileManager.DeleteProfile(id)
return s.profileMgr.DeleteProfile(id)
}
// GetHashrateHistory returns hashrate history for a miner.
func (service *MiningService) GetHashrateHistory(name string) []mining.HashratePoint {
history, _ := service.manager.GetMinerHashrateHistory(name)
func (s *MiningService) GetHashrateHistory(name string) []mining.HashratePoint {
history, _ := s.manager.GetMinerHashrateHistory(name)
return history
}
// SendStdin sends input to a miner's stdin.
func (service *MiningService) SendStdin(name, input string) error {
miner, err := service.manager.GetMiner(name)
func (s *MiningService) SendStdin(name, input string) error {
miner, err := s.manager.GetMiner(name)
if err != nil {
return err
}
@ -297,36 +297,36 @@ func (service *MiningService) SendStdin(name, input string) error {
}
// Shutdown gracefully shuts down all miners.
func (service *MiningService) Shutdown() {
service.manager.Stop()
func (s *MiningService) Shutdown() {
s.manager.Stop()
}
// === Settings Methods ===
// GetSettings returns the current app settings
func (service *MiningService) GetSettings() (*mining.AppSettings, error) {
if service.settingsManager == nil {
func (s *MiningService) GetSettings() (*mining.AppSettings, error) {
if s.settingsMgr == nil {
return mining.DefaultSettings(), nil
}
return service.settingsManager.Get(), nil
return s.settingsMgr.Get(), nil
}
// SaveSettings saves the app settings
func (service *MiningService) SaveSettings(settings *mining.AppSettings) error {
if service.settingsManager == nil {
func (s *MiningService) SaveSettings(settings *mining.AppSettings) error {
if s.settingsMgr == nil {
return fmt.Errorf("settings manager not initialized")
}
return service.settingsManager.Update(func(serviceSettings *mining.AppSettings) {
*serviceSettings = *settings
return s.settingsMgr.Update(func(s *mining.AppSettings) {
*s = *settings
})
}
// SaveWindowState saves the window position and size
func (service *MiningService) SaveWindowState(x, y, width, height int, maximized bool) error {
if service.settingsManager == nil {
func (s *MiningService) SaveWindowState(x, y, width, height int, maximized bool) error {
if s.settingsMgr == nil {
return nil
}
return service.settingsManager.UpdateWindowState(x, y, width, height, maximized)
return s.settingsMgr.UpdateWindowState(x, y, width, height, maximized)
}
// WindowState represents window position and size for the frontend
@ -339,11 +339,11 @@ type WindowState struct {
}
// GetWindowState returns the saved window state
func (service *MiningService) GetWindowState() *WindowState {
if service.settingsManager == nil {
func (s *MiningService) GetWindowState() *WindowState {
if s.settingsMgr == nil {
return &WindowState{Width: 1400, Height: 900}
}
state := service.settingsManager.GetWindowState()
state := s.settingsMgr.GetWindowState()
return &WindowState{
X: state.X,
Y: state.Y,
@ -354,33 +354,33 @@ func (service *MiningService) GetWindowState() *WindowState {
}
// SetStartOnBoot enables/disables start on system boot
func (service *MiningService) SetStartOnBoot(enabled bool) error {
if service.settingsManager == nil {
func (s *MiningService) SetStartOnBoot(enabled bool) error {
if s.settingsMgr == nil {
return nil
}
return service.settingsManager.SetStartOnBoot(enabled)
return s.settingsMgr.SetStartOnBoot(enabled)
}
// SetAutostartMiners enables/disables automatic miner start
func (service *MiningService) SetAutostartMiners(enabled bool) error {
if service.settingsManager == nil {
func (s *MiningService) SetAutostartMiners(enabled bool) error {
if s.settingsMgr == nil {
return nil
}
return service.settingsManager.SetAutostartMiners(enabled)
return s.settingsMgr.SetAutostartMiners(enabled)
}
// SetCPUThrottle configures CPU throttling settings
func (service *MiningService) SetCPUThrottle(enabled bool, maxPercent int) error {
if service.settingsManager == nil {
func (s *MiningService) SetCPUThrottle(enabled bool, maxPercent int) error {
if s.settingsMgr == nil {
return nil
}
return service.settingsManager.SetCPUThrottle(enabled, maxPercent)
return s.settingsMgr.SetCPUThrottle(enabled, maxPercent)
}
// SetMinerDefaults updates default miner configuration
func (service *MiningService) SetMinerDefaults(defaults mining.MinerDefaults) error {
if service.settingsManager == nil {
func (s *MiningService) SetMinerDefaults(defaults mining.MinerDefaults) error {
if s.settingsMgr == nil {
return nil
}
return service.settingsManager.SetMinerDefaults(defaults)
return s.settingsMgr.SetMinerDefaults(defaults)
}

View file

@ -8,23 +8,23 @@ import (
"strings"
"time"
"forge.lthn.ai/Snider/Mining/pkg/mining"
"github.com/Snider/Mining/pkg/mining"
"github.com/adrg/xdg"
"github.com/spf13/cobra"
)
const (
installedMinersPointerFileName = ".installed-miners"
installedMinersCacheFileName = "installed-miners.json"
)
const signpostFilename = ".installed-miners"
// validateInstalledMinerCachePath("/home/alice/.config/lethean-desktop/miners/installed-miners.json") returns nil.
// validateInstalledMinerCachePath("/tmp/installed-miners.json") rejects paths outside XDG_CONFIG_HOME.
func validateInstalledMinerCachePath(cacheFilePath string) error {
// validateConfigPath validates that a config path is within the expected XDG config directory
// This prevents path traversal attacks via manipulated signpost files
func validateConfigPath(configPath string) error {
// Get the expected XDG config base directory
expectedBase := filepath.Join(xdg.ConfigHome, "lethean-desktop")
cleanPath := filepath.Clean(cacheFilePath)
// Clean and resolve the config path
cleanPath := filepath.Clean(configPath)
// Check if the path is within the expected directory
if !strings.HasPrefix(cleanPath, expectedBase+string(os.PathSeparator)) && cleanPath != expectedBase {
return fmt.Errorf("invalid config path: must be within %s", expectedBase)
}
@ -32,12 +32,12 @@ func validateInstalledMinerCachePath(cacheFilePath string) error {
return nil
}
// rootCmd.AddCommand(doctorCmd) exposes `mining doctor`, which refreshes the miner cache and prints the installed miner summary.
// doctorCmd represents the doctor command
var doctorCmd = &cobra.Command{
Use: "doctor",
Short: "Check and refresh the status of installed miners",
Long: `Performs a live check for installed miners, displays their status, and updates the local cache.`,
RunE: func(_ *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println("--- Mining Doctor ---")
fmt.Println("Performing live check and refreshing cache...")
fmt.Println()
@ -45,43 +45,42 @@ var doctorCmd = &cobra.Command{
if err := updateDoctorCache(); err != nil {
return fmt.Errorf("failed to run doctor check: %w", err)
}
// loadAndDisplayInstalledMinerCache() prints the refreshed miner summary after `mining doctor` refreshes the cache.
_, err := loadAndDisplayInstalledMinerCache()
// After updating the cache, display the fresh results
_, err := loadAndDisplayCache()
return err
},
}
func loadAndDisplayInstalledMinerCache() (bool, error) {
func loadAndDisplayCache() (bool, error) {
homeDir, err := os.UserHomeDir()
if err != nil {
return false, fmt.Errorf("could not get home directory: %w", err)
}
installedMinerPointerPath := filepath.Join(homeDir, installedMinersPointerFileName)
signpostPath := filepath.Join(homeDir, signpostFilename)
// os.Stat("/home/alice/.installed-miners") returns os.ErrNotExist before the first `mining install xmrig` run.
if _, err := os.Stat(installedMinerPointerPath); os.IsNotExist(err) {
if _, err := os.Stat(signpostPath); os.IsNotExist(err) {
fmt.Println("No cached data found. Run 'install' for a miner first.")
return false, nil // loadAndDisplayInstalledMinerCache returns false until install writes the first cache file.
return false, nil // No cache to load
}
cachePointerBytes, err := os.ReadFile(installedMinerPointerPath)
configPathBytes, err := os.ReadFile(signpostPath)
if err != nil {
return false, fmt.Errorf("could not read signpost file: %w", err)
}
cacheFilePath := strings.TrimSpace(string(cachePointerBytes))
configPath := strings.TrimSpace(string(configPathBytes))
// validateInstalledMinerCachePath("/home/alice/.config/lethean-desktop/miners/installed-miners.json") blocks path traversal outside XDG_CONFIG_HOME.
if err := validateInstalledMinerCachePath(cacheFilePath); err != nil {
// Security: Validate that the config path is within the expected directory
if err := validateConfigPath(configPath); err != nil {
return false, fmt.Errorf("security error: %w", err)
}
cacheBytes, err := os.ReadFile(cacheFilePath)
cacheBytes, err := os.ReadFile(configPath)
if err != nil {
if os.IsNotExist(err) {
fmt.Println("No cached data found. Run 'install' for a miner first.")
return false, nil
}
return false, fmt.Errorf("could not read cache file from %s: %w", cacheFilePath, err)
return false, fmt.Errorf("could not read cache file from %s: %w", configPath, err)
}
var systemInfo mining.SystemInfo
@ -94,7 +93,7 @@ func loadAndDisplayInstalledMinerCache() (bool, error) {
fmt.Println()
for _, details := range systemInfo.InstalledMinersInfo {
// details.Path = "/home/alice/.local/share/lethean-desktop/miners/xmrig" maps to a friendly miner label like "XMRig".
// Infer miner name from path for display purposes
var minerName string
if details.Path != "" {
if strings.Contains(details.Path, "xmrig") {
@ -105,29 +104,28 @@ func loadAndDisplayInstalledMinerCache() (bool, error) {
} else {
minerName = "Unknown Miner"
}
displayInstalledMinerDetails(minerName, details)
displayDetails(minerName, details)
}
return true, nil
}
func saveInstalledMinerCache(systemInfo *mining.SystemInfo) error {
cacheDirectoryRelativePath := filepath.Join("lethean-desktop", "miners")
cacheDirectoryPath, err := xdg.ConfigFile(cacheDirectoryRelativePath)
func saveResultsToCache(systemInfo *mining.SystemInfo) error {
configDir, err := xdg.ConfigFile("lethean-desktop/miners")
if err != nil {
return fmt.Errorf("could not get config directory: %w", err)
}
if err := os.MkdirAll(cacheDirectoryPath, 0755); err != nil {
if err := os.MkdirAll(configDir, 0755); err != nil {
return fmt.Errorf("could not create config directory: %w", err)
}
cacheFilePath := filepath.Join(cacheDirectoryPath, installedMinersCacheFileName)
configPath := filepath.Join(configDir, "config.json")
data, err := json.MarshalIndent(systemInfo, "", " ")
if err != nil {
return fmt.Errorf("could not marshal cache data: %w", err)
}
if err := os.WriteFile(cacheFilePath, data, 0600); err != nil {
if err := os.WriteFile(configPath, data, 0600); err != nil {
return fmt.Errorf("could not write cache file: %w", err)
}
@ -135,16 +133,16 @@ func saveInstalledMinerCache(systemInfo *mining.SystemInfo) error {
if err != nil {
return fmt.Errorf("could not get home directory for signpost: %w", err)
}
installedMinerPointerPath := filepath.Join(homeDir, installedMinersPointerFileName)
if err := os.WriteFile(installedMinerPointerPath, []byte(cacheFilePath), 0600); err != nil {
signpostPath := filepath.Join(homeDir, signpostFilename)
if err := os.WriteFile(signpostPath, []byte(configPath), 0600); err != nil {
return fmt.Errorf("could not write signpost file: %w", err)
}
fmt.Printf("\n(Cache updated at %s)\n", cacheFilePath)
fmt.Printf("\n(Cache updated at %s)\n", configPath)
return nil
}
func displayInstalledMinerDetails(minerName string, details *mining.InstallationDetails) {
func displayDetails(minerName string, details *mining.InstallationDetails) {
fmt.Printf("--- %s ---\n", minerName)
if details.IsInstalled {
fmt.Printf(" Status: Installed\n")

View file

@ -5,17 +5,18 @@ import (
"runtime"
"time"
"forge.lthn.ai/Snider/Mining/pkg/mining"
"github.com/Masterminds/semver/v3"
"github.com/Snider/Mining/pkg/mining"
"github.com/spf13/cobra"
)
// installCmd represents the install command
var installCmd = &cobra.Command{
Use: "install [miner_type]",
Short: "Install or update a miner",
Long: `Download and install a new miner, or update an existing one to the latest version.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
minerType := args[0]
var miner mining.Miner
@ -26,13 +27,14 @@ var installCmd = &cobra.Command{
return fmt.Errorf("unknown miner type: %s", minerType)
}
installationDetails, err := miner.CheckInstallation()
if err == nil && installationDetails.IsInstalled {
// Check if it's already installed and up-to-date
details, err := miner.CheckInstallation()
if err == nil && details.IsInstalled {
latestVersionStr, err := miner.GetLatestVersion()
if err == nil {
latestVersion, err := semver.NewVersion(latestVersionStr)
if err == nil {
installedVersion, err := semver.NewVersion(installationDetails.Version)
installedVersion, err := semver.NewVersion(details.Version)
if err == nil && !latestVersion.GreaterThan(installedVersion) {
fmt.Printf("%s is already installed and up to date (version %s).\n", miner.GetName(), installedVersion)
return nil
@ -48,13 +50,15 @@ var installCmd = &cobra.Command{
return fmt.Errorf("failed to install/update miner: %w", err)
}
finalInstallationDetails, err := miner.CheckInstallation()
// Get fresh details after installation
finalDetails, err := miner.CheckInstallation()
if err != nil {
return fmt.Errorf("failed to verify installation: %w", err)
}
fmt.Printf("%s installed successfully to %s (version %s).\n", miner.GetName(), finalInstallationDetails.Path, finalInstallationDetails.Version)
fmt.Printf("%s installed successfully to %s (version %s).\n", miner.GetName(), finalDetails.Path, finalDetails.Version)
// Update the cache after a successful installation
fmt.Println("Updating installation cache...")
if err := updateDoctorCache(); err != nil {
fmt.Printf("Warning: failed to update doctor cache: %v\n", err)
@ -64,8 +68,9 @@ var installCmd = &cobra.Command{
},
}
// updateDoctorCache runs the core logic of the doctor command to refresh the cache.
func updateDoctorCache() error {
manager := getSharedManager()
manager := getManager()
availableMiners := manager.ListAvailableMiners()
if len(availableMiners) == 0 {
return nil
@ -82,11 +87,12 @@ func updateDoctorCache() error {
}
details, err := miner.CheckInstallation()
if err != nil {
continue
continue // Ignore errors for this background update
}
allDetails = append(allDetails, details)
}
// Create the SystemInfo struct that the /info endpoint expects
systemInfo := &mining.SystemInfo{
Timestamp: time.Now(),
OS: runtime.GOOS,
@ -96,7 +102,7 @@ func updateDoctorCache() error {
InstalledMinersInfo: allDetails,
}
return saveInstalledMinerCache(systemInfo)
return saveResultsToCache(systemInfo)
}
func init() {

View file

@ -6,15 +6,15 @@ import (
"github.com/spf13/cobra"
)
// mining list prints the running miners and the installable miners.
// listCmd represents the list command
var listCmd = &cobra.Command{
Use: "list",
Short: "List running and available miners",
Long: `List all running miners and their status, as well as all miners that are available to be installed and started.`,
RunE: func(_ *cobra.Command, args []string) error {
manager := getSharedManager()
RunE: func(cmd *cobra.Command, args []string) error {
manager := getManager()
// manager.ListMiners() returns running miners such as xmrig-main.
// List running miners
runningMiners := manager.ListMiners()
fmt.Println("Running Miners:")
if len(runningMiners) == 0 {
@ -29,7 +29,7 @@ var listCmd = &cobra.Command{
fmt.Println()
// manager.ListAvailableMiners() returns installable miners such as xmrig.
// List available miners
availableMiners := manager.ListAvailableMiners()
fmt.Println("Available Miners:")
if len(availableMiners) == 0 {

View file

@ -8,7 +8,7 @@ import (
"syscall"
"time"
"forge.lthn.ai/Snider/Mining/pkg/node"
"github.com/Snider/Mining/pkg/node"
"github.com/spf13/cobra"
)
@ -22,36 +22,38 @@ var (
peerRegistryErr error
)
// nodeCmd represents the node parent command
var nodeCmd = &cobra.Command{
Use: "node",
Short: "Manage P2P node identity and connections",
Long: `Manage the node's identity, view status, and control P2P networking.`,
}
// nodeInitCmd initializes a new node identity
var nodeInitCmd = &cobra.Command{
Use: "init",
Short: "Initialize node identity",
Long: `Initialize a new node identity with X25519 keypair.
This creates the node's cryptographic identity for secure P2P communication.`,
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
nodeName, _ := cobraCommand.Flags().GetString("name")
roleName, _ := cobraCommand.Flags().GetString("role")
RunE: func(cmd *cobra.Command, args []string) error {
name, _ := cmd.Flags().GetString("name")
role, _ := cmd.Flags().GetString("role")
if nodeName == "" {
if name == "" {
return fmt.Errorf("--name is required")
}
nodeManager, err := node.NewNodeManager()
nm, err := node.NewNodeManager()
if err != nil {
return fmt.Errorf("failed to create node manager: %w", err)
}
if nodeManager.HasIdentity() {
if nm.HasIdentity() {
return fmt.Errorf("node identity already exists. Use 'node reset' to create a new one")
}
var nodeRole node.NodeRole
switch roleName {
switch role {
case "controller":
nodeRole = node.RoleController
case "worker":
@ -59,14 +61,14 @@ This creates the node's cryptographic identity for secure P2P communication.`,
case "dual", "":
nodeRole = node.RoleDual
default:
return fmt.Errorf("invalid role: %s (use controller, worker, or dual)", roleName)
return fmt.Errorf("invalid role: %s (use controller, worker, or dual)", role)
}
if err := nodeManager.GenerateIdentity(nodeName, nodeRole); err != nil {
if err := nm.GenerateIdentity(name, nodeRole); err != nil {
return fmt.Errorf("failed to generate identity: %w", err)
}
identity := nodeManager.GetIdentity()
identity := nm.GetIdentity()
fmt.Println("Node identity created successfully!")
fmt.Println()
fmt.Printf(" ID: %s\n", identity.ID)
@ -79,23 +81,24 @@ This creates the node's cryptographic identity for secure P2P communication.`,
},
}
// nodeInfoCmd shows current node identity
var nodeInfoCmd = &cobra.Command{
Use: "info",
Short: "Show node identity and status",
Long: `Display the current node's identity, role, and connection status.`,
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
nodeManager, err := node.NewNodeManager()
RunE: func(cmd *cobra.Command, args []string) error {
nm, err := node.NewNodeManager()
if err != nil {
return fmt.Errorf("failed to create node manager: %w", err)
}
if !nodeManager.HasIdentity() {
if !nm.HasIdentity() {
fmt.Println("No node identity found.")
fmt.Println("Run 'node init --name <name>' to create one.")
return nil
}
identity := nodeManager.GetIdentity()
identity := nm.GetIdentity()
fmt.Println("Node Identity:")
fmt.Println()
fmt.Printf(" ID: %s\n", identity.ID)
@ -104,11 +107,12 @@ var nodeInfoCmd = &cobra.Command{
fmt.Printf(" Public Key: %s\n", identity.PublicKey)
fmt.Printf(" Created: %s\n", identity.CreatedAt.Format(time.RFC3339))
peerRegistry, err := node.NewPeerRegistry()
// Show peer info if available
pr, err := node.NewPeerRegistry()
if err == nil {
fmt.Println()
fmt.Printf(" Registered Peers: %d\n", peerRegistry.Count())
connected := peerRegistry.GetConnectedPeers()
fmt.Printf(" Registered Peers: %d\n", pr.Count())
connected := pr.GetConnectedPeers()
fmt.Printf(" Connected Peers: %d\n", len(connected))
}
@ -116,64 +120,71 @@ var nodeInfoCmd = &cobra.Command{
},
}
// nodeServeCmd starts the P2P server
var nodeServeCmd = &cobra.Command{
Use: "serve",
Short: "Start P2P server for remote connections",
Long: `Start the P2P WebSocket server to accept connections from other nodes.
This allows other nodes to connect, send commands, and receive stats.`,
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
listenAddress, _ := cobraCommand.Flags().GetString("listen")
RunE: func(cmd *cobra.Command, args []string) error {
listen, _ := cmd.Flags().GetString("listen")
nodeManager, err := node.NewNodeManager()
nm, err := node.NewNodeManager()
if err != nil {
return fmt.Errorf("failed to create node manager: %w", err)
}
if !nodeManager.HasIdentity() {
if !nm.HasIdentity() {
return fmt.Errorf("no node identity found. Run 'node init --name <name>' first")
}
peerRegistry, err := node.NewPeerRegistry()
pr, err := node.NewPeerRegistry()
if err != nil {
return fmt.Errorf("failed to create peer registry: %w", err)
}
config := node.DefaultTransportConfig()
if listenAddress != "" {
config.ListenAddr = listenAddress
if listen != "" {
config.ListenAddr = listen
}
transport := node.NewTransport(nodeManager, peerRegistry, config)
transport := node.NewTransport(nm, pr, config)
worker := node.NewWorker(nodeManager, transport)
// Create worker to handle incoming messages
worker := node.NewWorker(nm, transport)
worker.RegisterWithTransport()
if err := transport.Start(); err != nil {
return fmt.Errorf("failed to start transport: %w", err)
}
identity := nodeManager.GetIdentity()
identity := nm.GetIdentity()
fmt.Printf("P2P server started on %s\n", config.ListenAddr)
fmt.Printf("Node ID: %s (%s)\n", identity.ID, identity.Name)
fmt.Printf("Role: %s\n", identity.Role)
fmt.Println()
fmt.Println("Press Ctrl+C to stop...")
signalChannel := make(chan os.Signal, 1)
signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
// Set up signal handling for graceful shutdown (including SIGHUP for terminal disconnect)
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
signalValue := <-signalChannel
fmt.Printf("\nReceived signal %v, shutting down...\n", signalValue)
// Wait for shutdown signal
sig := <-sigChan
fmt.Printf("\nReceived signal %v, shutting down...\n", sig)
// Graceful shutdown: stop transport and cleanup resources
if err := transport.Stop(); err != nil {
fmt.Printf("Warning: error during transport shutdown: %v\n", err)
// Force cleanup on Stop() failure
fmt.Println("Forcing resource cleanup...")
for _, peer := range peerRegistry.GetConnectedPeers() {
peerRegistry.SetConnected(peer.ID, false)
for _, peer := range pr.GetConnectedPeers() {
pr.SetConnected(peer.ID, false)
}
}
if err := peerRegistry.Close(); err != nil {
// Ensure peer registry is flushed to disk
if err := pr.Close(); err != nil {
fmt.Printf("Warning: error closing peer registry: %v\n", err)
}
@ -182,19 +193,20 @@ This allows other nodes to connect, send commands, and receive stats.`,
},
}
// nodeResetCmd deletes the node identity
var nodeResetCmd = &cobra.Command{
Use: "reset",
Short: "Delete node identity and start fresh",
Long: `Remove the current node identity, keys, and all peer data. Use with caution!`,
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
force, _ := cobraCommand.Flags().GetBool("force")
RunE: func(cmd *cobra.Command, args []string) error {
force, _ := cmd.Flags().GetBool("force")
nodeManager, err := node.NewNodeManager()
nm, err := node.NewNodeManager()
if err != nil {
return fmt.Errorf("failed to create node manager: %w", err)
}
if !nodeManager.HasIdentity() {
if !nm.HasIdentity() {
fmt.Println("No node identity to reset.")
return nil
}
@ -207,7 +219,7 @@ var nodeResetCmd = &cobra.Command{
return nil
}
if err := nodeManager.Delete(); err != nil {
if err := nm.Delete(); err != nil {
return fmt.Errorf("failed to delete identity: %w", err)
}
@ -220,19 +232,24 @@ var nodeResetCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(nodeCmd)
// node init
nodeCmd.AddCommand(nodeInitCmd)
nodeInitCmd.Flags().StringP("name", "n", "", "Node name (required)")
nodeInitCmd.Flags().StringP("role", "r", "dual", "Node role: controller, worker, or dual (default)")
// node info
nodeCmd.AddCommand(nodeInfoCmd)
// node serve
nodeCmd.AddCommand(nodeServeCmd)
nodeServeCmd.Flags().StringP("listen", "l", ":9091", "Address to listen on")
// node reset
nodeCmd.AddCommand(nodeResetCmd)
nodeResetCmd.Flags().BoolP("force", "f", false, "Force reset without confirmation")
}
// getNodeManager returns the singleton node manager (thread-safe)
func getNodeManager() (*node.NodeManager, error) {
nodeManagerOnce.Do(func() {
nodeManager, nodeManagerErr = node.NewNodeManager()
@ -240,6 +257,7 @@ func getNodeManager() (*node.NodeManager, error) {
return nodeManager, nodeManagerErr
}
// getPeerRegistry returns the singleton peer registry (thread-safe)
func getPeerRegistry() (*node.PeerRegistry, error) {
peerRegistryOnce.Do(func() {
peerRegistry, peerRegistryErr = node.NewPeerRegistry()

View file

@ -4,46 +4,49 @@ import (
"fmt"
"time"
"forge.lthn.ai/Snider/Mining/pkg/node"
"github.com/Snider/Mining/pkg/node"
"github.com/spf13/cobra"
)
// rootCmd.AddCommand(peerCmd) exposes `peer add`, `peer list`, `peer remove`, `peer ping`, and `peer optimal`.
// Note: findPeerByPartialID is defined in remote.go and used for peer lookup
// peerCmd represents the peer parent command
var peerCmd = &cobra.Command{
Use: "peer",
Short: "Manage peer nodes",
Long: `Add, remove, and manage connections to peer nodes.`,
}
// peer add --address 10.0.0.2:9090 --name worker-1 registers a peer by address.
// peerAddCmd adds a new peer
var peerAddCmd = &cobra.Command{
Use: "add",
Short: "Add a peer node",
Long: `Add a new peer node by address. This will initiate a handshake
to exchange public keys and establish a secure connection.`,
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
address, _ := cobraCommand.Flags().GetString("address")
name, _ := cobraCommand.Flags().GetString("name")
RunE: func(cmd *cobra.Command, args []string) error {
address, _ := cmd.Flags().GetString("address")
name, _ := cmd.Flags().GetString("name")
if address == "" {
return fmt.Errorf("--address is required")
}
nodeManager, err := getNodeManager()
nm, err := getNodeManager()
if err != nil {
return fmt.Errorf("failed to get node manager: %w", err)
}
if !nodeManager.HasIdentity() {
if !nm.HasIdentity() {
return fmt.Errorf("no node identity found. Run 'node init' first")
}
peerRegistry, err := getPeerRegistry()
pr, err := getPeerRegistry()
if err != nil {
return fmt.Errorf("failed to get peer registry: %w", err)
}
// peer := &node.Peer{ID: "pending-1712070000123", Name: "worker-1", Address: "10.0.0.2:9090"} keeps the registry entry aligned with `node serve`.
// For now, just add to registry - actual connection happens with 'node serve'
// In a full implementation, we'd connect here and get the peer's identity
peer := &node.Peer{
ID: fmt.Sprintf("pending-%d", time.Now().UnixNano()),
Name: name,
@ -53,7 +56,7 @@ to exchange public keys and establish a secure connection.`,
Score: 50,
}
if err := peerRegistry.AddPeer(peer); err != nil {
if err := pr.AddPeer(peer); err != nil {
return fmt.Errorf("failed to add peer: %w", err)
}
@ -63,18 +66,18 @@ to exchange public keys and establish a secure connection.`,
},
}
// peer list prints the currently registered peers and their connection status.
// peerListCmd lists all registered peers
var peerListCmd = &cobra.Command{
Use: "list",
Short: "List registered peers",
Long: `Display all registered peers with their connection status.`,
RunE: func(_ *cobra.Command, args []string) error {
peerRegistry, err := getPeerRegistry()
RunE: func(cmd *cobra.Command, args []string) error {
pr, err := getPeerRegistry()
if err != nil {
return fmt.Errorf("failed to get peer registry: %w", err)
}
peers := peerRegistry.ListPeers()
peers := pr.ListPeers()
if len(peers) == 0 {
fmt.Println("No peers registered.")
fmt.Println("Use 'peer add --address <host:port> --name <name>' to add one.")
@ -104,13 +107,13 @@ var peerListCmd = &cobra.Command{
},
}
// peer remove a1b2c3d4e5f6 removes the selected peer from the registry.
// peerRemoveCmd removes a peer
var peerRemoveCmd = &cobra.Command{
Use: "remove <peer-id>",
Short: "Remove a peer from registry",
Long: `Remove a peer node from the registry. This will disconnect if connected.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
peerID := args[0]
peer := findPeerByPartialID(peerID)
@ -118,12 +121,12 @@ var peerRemoveCmd = &cobra.Command{
return fmt.Errorf("peer not found: %s", peerID)
}
peerRegistry, err := getPeerRegistry()
pr, err := getPeerRegistry()
if err != nil {
return fmt.Errorf("failed to get peer registry: %w", err)
}
if err := peerRegistry.RemovePeer(peer.ID); err != nil {
if err := pr.RemovePeer(peer.ID); err != nil {
return fmt.Errorf("failed to remove peer: %w", err)
}
@ -132,13 +135,13 @@ var peerRemoveCmd = &cobra.Command{
},
}
// peer ping a1b2c3d4e5f6 measures the selected peer's reachability.
// peerPingCmd pings a peer
var peerPingCmd = &cobra.Command{
Use: "ping <peer-id>",
Short: "Ping a peer and update metrics",
Long: `Send a ping to a peer and measure round-trip latency.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
peerID := args[0]
peer := findPeerByPartialID(peerID)
@ -151,33 +154,33 @@ var peerPingCmd = &cobra.Command{
}
fmt.Printf("Pinging %s (%s)...\n", peer.Name, peer.Address)
// `node serve` performs the live ping over transport once the peer is connected.
// TODO: Actually send ping via transport
fmt.Println("Ping functionality requires active connection via 'node serve'")
return nil
},
}
// peer optimal --count 4 prints the four best peers by score.
// peerOptimalCmd shows the optimal peer based on metrics
var peerOptimalCmd = &cobra.Command{
Use: "optimal",
Short: "Show the optimal peer based on metrics",
Long: `Use the Poindexter KD-tree to find the best peer based on
ping latency, hop count, geographic distance, and reliability score.`,
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
count, _ := cobraCommand.Flags().GetInt("count")
RunE: func(cmd *cobra.Command, args []string) error {
count, _ := cmd.Flags().GetInt("count")
peerRegistry, err := getPeerRegistry()
pr, err := getPeerRegistry()
if err != nil {
return fmt.Errorf("failed to get peer registry: %w", err)
}
if peerRegistry.Count() == 0 {
if pr.Count() == 0 {
fmt.Println("No peers registered.")
return nil
}
if count == 1 {
peer := peerRegistry.SelectOptimalPeer()
peer := pr.SelectOptimalPeer()
if peer == nil {
fmt.Println("No optimal peer found.")
return nil
@ -191,7 +194,7 @@ ping latency, hop count, geographic distance, and reliability score.`,
fmt.Printf(" Geo: %.1f km\n", peer.GeoKM)
fmt.Printf(" Score: %.1f\n", peer.Score)
} else {
peers := peerRegistry.SelectNearestPeers(count)
peers := pr.SelectNearestPeers(count)
if len(peers) == 0 {
fmt.Println("No peers found.")
return nil
@ -212,21 +215,21 @@ ping latency, hop count, geographic distance, and reliability score.`,
func init() {
rootCmd.AddCommand(peerCmd)
// rootCmd.AddCommand(peerAddCmd) exposes `peer add --address 10.0.0.2:9090 --name worker-1`.
// peer add
peerCmd.AddCommand(peerAddCmd)
peerAddCmd.Flags().StringP("address", "a", "", "Peer address (host:port)")
peerAddCmd.Flags().StringP("name", "n", "", "Peer name")
// rootCmd.AddCommand(peerListCmd) exposes `peer list`.
// peer list
peerCmd.AddCommand(peerListCmd)
// rootCmd.AddCommand(peerRemoveCmd) exposes `peer remove <peer-id>`.
// peer remove
peerCmd.AddCommand(peerRemoveCmd)
// rootCmd.AddCommand(peerPingCmd) exposes `peer ping <peer-id>`.
// peer ping
peerCmd.AddCommand(peerPingCmd)
// rootCmd.AddCommand(peerOptimalCmd) exposes `peer optimal --count 4`.
// peer optimal
peerCmd.AddCommand(peerOptimalCmd)
peerOptimalCmd.Flags().IntP("count", "c", 1, "Number of optimal peers to show")
}

View file

@ -6,64 +6,64 @@ import (
"sync"
"time"
"forge.lthn.ai/Snider/Mining/pkg/node"
"github.com/Snider/Mining/pkg/node"
"github.com/spf13/cobra"
)
var (
cachedRemoteController *node.Controller
cachedPeerTransport *node.Transport
loadRemoteControllerOnce sync.Once
cachedRemoteControllerErr error
controller *node.Controller
transport *node.Transport
controllerOnce sync.Once
controllerErr error
)
// Example: remote status peer-19f3, remote start peer-19f3 --type xmrig, and remote ping peer-19f3 --count 4 live under this command group.
// remoteCmd represents the remote parent command
var remoteCmd = &cobra.Command{
Use: "remote",
Short: "Control remote mining nodes",
Long: `Send commands to remote worker nodes and retrieve their status.`,
}
// Example: remote status peer-19f3 prints stats for one peer, while `remote status` prints the whole fleet.
// remoteStatusCmd shows stats from remote peers
var remoteStatusCmd = &cobra.Command{
Use: "status [peer-id]",
Short: "Get mining status from remote peers",
Long: `Display mining statistics from all connected peers or a specific peer.`,
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
remoteController, err := getController()
RunE: func(cmd *cobra.Command, args []string) error {
ctrl, err := getController()
if err != nil {
return err
}
if len(arguments) > 0 {
// Example: remote status peer-19f3 shows that peer's stats.
peerID := arguments[0]
selectedPeer := findPeerByPartialID(peerID)
if selectedPeer == nil {
if len(args) > 0 {
// Get stats from specific peer
peerID := args[0]
peer := findPeerByPartialID(peerID)
if peer == nil {
return fmt.Errorf("peer not found: %s", peerID)
}
stats, err := remoteController.GetRemoteStats(selectedPeer.ID)
stats, err := ctrl.GetRemoteStats(peer.ID)
if err != nil {
return fmt.Errorf("failed to get stats: %w", err)
}
printPeerStats(selectedPeer, stats)
printPeerStats(peer, stats)
} else {
// Example: remote status peer-19f3 shows one peer, while `remote status` shows the fleet.
allStats := remoteController.GetAllStats()
// Get stats from all peers
allStats := ctrl.GetAllStats()
if len(allStats) == 0 {
fmt.Println("No connected peers.")
return nil
}
peerRegistry, _ := getPeerRegistry()
pr, _ := getPeerRegistry()
var totalHashrate float64
for peerID, stats := range allStats {
selectedPeer := peerRegistry.GetPeer(peerID)
if selectedPeer != nil {
printPeerStats(selectedPeer, stats)
peer := pr.GetPeer(peerID)
if peer != nil {
printPeerStats(peer, stats)
for _, miner := range stats.Miners {
totalHashrate += miner.Hashrate
}
@ -78,32 +78,32 @@ var remoteStatusCmd = &cobra.Command{
},
}
// Example: remote start peer-19f3 --type xmrig --profile default starts a miner on the selected peer.
// remoteStartCmd starts a miner on a remote peer
var remoteStartCmd = &cobra.Command{
Use: "start <peer-id>",
Short: "Start miner on remote peer",
Long: `Start a miner on a remote peer using a profile.`,
Args: cobra.ExactArgs(1),
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
minerType, _ := cobraCommand.Flags().GetString("type")
RunE: func(cmd *cobra.Command, args []string) error {
minerType, _ := cmd.Flags().GetString("type")
if minerType == "" {
return fmt.Errorf("--type is required, for example `xmrig` or `tt-miner`")
return fmt.Errorf("--type is required (e.g., xmrig, tt-miner)")
}
profileID, _ := cobraCommand.Flags().GetString("profile")
profileID, _ := cmd.Flags().GetString("profile")
peerID := arguments[0]
selectedPeer := findPeerByPartialID(peerID)
if selectedPeer == nil {
peerID := args[0]
peer := findPeerByPartialID(peerID)
if peer == nil {
return fmt.Errorf("peer not found: %s", peerID)
}
remoteController, err := getController()
ctrl, err := getController()
if err != nil {
return err
}
fmt.Printf("Starting %s miner on %s with profile %s...\n", minerType, selectedPeer.Name, profileID)
if err := remoteController.StartRemoteMiner(selectedPeer.ID, minerType, profileID, nil); err != nil {
fmt.Printf("Starting %s miner on %s with profile %s...\n", minerType, peer.Name, profileID)
if err := ctrl.StartRemoteMiner(peer.ID, minerType, profileID, nil); err != nil {
return fmt.Errorf("failed to start miner: %w", err)
}
@ -112,37 +112,37 @@ var remoteStartCmd = &cobra.Command{
},
}
// Example: remote stop peer-19f3 xmrig-main stops a named miner on the selected peer.
// remoteStopCmd stops a miner on a remote peer
var remoteStopCmd = &cobra.Command{
Use: "stop <peer-id> [miner-name]",
Short: "Stop miner on remote peer",
Long: `Stop a running miner on a remote peer.`,
Args: cobra.MinimumNArgs(1),
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
peerID := arguments[0]
selectedPeer := findPeerByPartialID(peerID)
if selectedPeer == nil {
RunE: func(cmd *cobra.Command, args []string) error {
peerID := args[0]
peer := findPeerByPartialID(peerID)
if peer == nil {
return fmt.Errorf("peer not found: %s", peerID)
}
minerName := ""
if len(arguments) > 1 {
minerName = arguments[1]
if len(args) > 1 {
minerName = args[1]
} else {
minerName, _ = cobraCommand.Flags().GetString("miner")
minerName, _ = cmd.Flags().GetString("miner")
}
if minerName == "" {
return fmt.Errorf("miner name required (as argument or --miner flag)")
}
remoteController, err := getController()
ctrl, err := getController()
if err != nil {
return err
}
fmt.Printf("Stopping miner %s on %s...\n", minerName, selectedPeer.Name)
if err := remoteController.StopRemoteMiner(selectedPeer.ID, minerName); err != nil {
fmt.Printf("Stopping miner %s on %s...\n", minerName, peer.Name)
if err := ctrl.StopRemoteMiner(peer.ID, minerName); err != nil {
return fmt.Errorf("failed to stop miner: %w", err)
}
@ -151,33 +151,33 @@ var remoteStopCmd = &cobra.Command{
},
}
// Example: remote logs peer-19f3 xmrig-main prints the first 100 log lines for the remote miner.
// remoteLogsCmd gets logs from a remote miner
var remoteLogsCmd = &cobra.Command{
Use: "logs <peer-id> <miner-name>",
Short: "Get console logs from remote miner",
Long: `Retrieve console output logs from a miner running on a remote peer.`,
Args: cobra.ExactArgs(2),
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
peerID := arguments[0]
minerName := arguments[1]
lines, _ := cobraCommand.Flags().GetInt("lines")
RunE: func(cmd *cobra.Command, args []string) error {
peerID := args[0]
minerName := args[1]
lines, _ := cmd.Flags().GetInt("lines")
selectedPeer := findPeerByPartialID(peerID)
if selectedPeer == nil {
peer := findPeerByPartialID(peerID)
if peer == nil {
return fmt.Errorf("peer not found: %s", peerID)
}
remoteController, err := getController()
ctrl, err := getController()
if err != nil {
return err
}
logLines, err := remoteController.GetRemoteLogs(selectedPeer.ID, minerName, lines)
logLines, err := ctrl.GetRemoteLogs(peer.ID, minerName, lines)
if err != nil {
return fmt.Errorf("failed to get logs: %w", err)
}
fmt.Printf("Logs from %s on %s (%d lines):\n", minerName, selectedPeer.Name, len(logLines))
fmt.Printf("Logs from %s on %s (%d lines):\n", minerName, peer.Name, len(logLines))
fmt.Println("────────────────────────────────────")
for _, line := range logLines {
fmt.Println(line)
@ -187,26 +187,26 @@ var remoteLogsCmd = &cobra.Command{
},
}
// Example: remote connect peer-19f3 opens a WebSocket connection to the peer.
// remoteConnectCmd connects to a peer
var remoteConnectCmd = &cobra.Command{
Use: "connect <peer-id>",
Short: "Connect to a remote peer",
Long: `Establish a WebSocket connection to a registered peer.`,
Args: cobra.ExactArgs(1),
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
peerID := arguments[0]
selectedPeer := findPeerByPartialID(peerID)
if selectedPeer == nil {
RunE: func(cmd *cobra.Command, args []string) error {
peerID := args[0]
peer := findPeerByPartialID(peerID)
if peer == nil {
return fmt.Errorf("peer not found: %s", peerID)
}
remoteController, err := getController()
ctrl, err := getController()
if err != nil {
return err
}
fmt.Printf("Connecting to %s at %s...\n", selectedPeer.Name, selectedPeer.Address)
if err := remoteController.ConnectToPeer(selectedPeer.ID); err != nil {
fmt.Printf("Connecting to %s at %s...\n", peer.Name, peer.Address)
if err := ctrl.ConnectToPeer(peer.ID); err != nil {
return fmt.Errorf("failed to connect: %w", err)
}
@ -215,26 +215,26 @@ var remoteConnectCmd = &cobra.Command{
},
}
// Example: remote disconnect peer-19f3 closes the active peer connection.
// remoteDisconnectCmd disconnects from a peer
var remoteDisconnectCmd = &cobra.Command{
Use: "disconnect <peer-id>",
Short: "Disconnect from a remote peer",
Long: `Close the connection to a peer.`,
Args: cobra.ExactArgs(1),
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
peerID := arguments[0]
selectedPeer := findPeerByPartialID(peerID)
if selectedPeer == nil {
RunE: func(cmd *cobra.Command, args []string) error {
peerID := args[0]
peer := findPeerByPartialID(peerID)
if peer == nil {
return fmt.Errorf("peer not found: %s", peerID)
}
remoteController, err := getController()
ctrl, err := getController()
if err != nil {
return err
}
fmt.Printf("Disconnecting from %s...\n", selectedPeer.Name)
if err := remoteController.DisconnectFromPeer(selectedPeer.ID); err != nil {
fmt.Printf("Disconnecting from %s...\n", peer.Name)
if err := ctrl.DisconnectFromPeer(peer.ID); err != nil {
return fmt.Errorf("failed to disconnect: %w", err)
}
@ -243,48 +243,48 @@ var remoteDisconnectCmd = &cobra.Command{
},
}
// Example: remote ping peer-19f3 --count 4 averages four ping samples.
// remotePingCmd pings a peer
var remotePingCmd = &cobra.Command{
Use: "ping <peer-id>",
Short: "Ping a remote peer",
Long: `Send a ping to a peer and measure round-trip latency.`,
Args: cobra.ExactArgs(1),
RunE: func(cobraCommand *cobra.Command, arguments []string) error {
count, _ := cobraCommand.Flags().GetInt("count")
RunE: func(cmd *cobra.Command, args []string) error {
count, _ := cmd.Flags().GetInt("count")
peerID := arguments[0]
selectedPeer := findPeerByPartialID(peerID)
if selectedPeer == nil {
peerID := args[0]
peer := findPeerByPartialID(peerID)
if peer == nil {
return fmt.Errorf("peer not found: %s", peerID)
}
remoteController, err := getController()
ctrl, err := getController()
if err != nil {
return err
}
fmt.Printf("Pinging %s (%s)...\n", selectedPeer.Name, selectedPeer.Address)
fmt.Printf("Pinging %s (%s)...\n", peer.Name, peer.Address)
var totalRoundTripMillis float64
var successfulPings int
var totalRTT float64
var successful int
for i := 0; i < count; i++ {
roundTripMillis, err := remoteController.PingPeer(selectedPeer.ID)
rtt, err := ctrl.PingPeer(peer.ID)
if err != nil {
fmt.Printf(" Ping %d: timeout\n", i+1)
continue
}
fmt.Printf(" Ping %d: %.2f ms\n", i+1, roundTripMillis)
totalRoundTripMillis += roundTripMillis
successfulPings++
fmt.Printf(" Ping %d: %.2f ms\n", i+1, rtt)
totalRTT += rtt
successful++
if i < count-1 {
time.Sleep(time.Second)
}
}
if successfulPings > 0 {
fmt.Printf("\nAverage: %.2f ms (%d/%d successful)\n", totalRoundTripMillis/float64(successfulPings), successfulPings, count)
if successful > 0 {
fmt.Printf("\nAverage: %.2f ms (%d/%d successful)\n", totalRTT/float64(successful), successful, count)
} else {
fmt.Println("\nAll pings failed.")
}
@ -296,88 +296,89 @@ var remotePingCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(remoteCmd)
// Example: remote status peer-19f3 prints one peer, while `remote status` prints the fleet.
// remote status
remoteCmd.AddCommand(remoteStatusCmd)
// Example: remote start peer-19f3 --type xmrig --profile default launches a miner.
// remote start
remoteCmd.AddCommand(remoteStartCmd)
remoteStartCmd.Flags().StringP("profile", "p", "", "Profile ID to start, for example default or office-rig")
remoteStartCmd.Flags().StringP("type", "t", "", "Miner type to start, for example xmrig or tt-miner")
remoteStartCmd.Flags().StringP("profile", "p", "", "Profile ID to use for starting the miner")
remoteStartCmd.Flags().StringP("type", "t", "", "Miner type (e.g., xmrig, tt-miner)")
// Example: remote stop peer-19f3 xmrig-main stops the selected miner.
// remote stop
remoteCmd.AddCommand(remoteStopCmd)
remoteStopCmd.Flags().StringP("miner", "m", "", "Miner name to stop, for example xmrig-main")
remoteStopCmd.Flags().StringP("miner", "m", "", "Miner name to stop")
// Example: remote logs peer-19f3 xmrig-main prints miner logs.
// remote logs
remoteCmd.AddCommand(remoteLogsCmd)
remoteLogsCmd.Flags().IntP("lines", "n", 100, "Number of log lines to retrieve, for example 100")
remoteLogsCmd.Flags().IntP("lines", "n", 100, "Number of log lines to retrieve")
// Example: remote connect peer-19f3 opens the peer connection.
// remote connect
remoteCmd.AddCommand(remoteConnectCmd)
// Example: remote disconnect peer-19f3 closes the peer connection.
// remote disconnect
remoteCmd.AddCommand(remoteDisconnectCmd)
// Example: remote ping peer-19f3 --count 4 measures latency.
// remote ping
remoteCmd.AddCommand(remotePingCmd)
remotePingCmd.Flags().IntP("count", "c", 4, "Number of ping samples to send, for example 4")
remotePingCmd.Flags().IntP("count", "c", 4, "Number of pings to send")
}
// getController() returns the cached controller after `node init` succeeds.
// getController returns or creates the controller instance (thread-safe).
func getController() (*node.Controller, error) {
loadRemoteControllerOnce.Do(func() {
nodeManager, err := getNodeManager()
controllerOnce.Do(func() {
nm, err := getNodeManager()
if err != nil {
cachedRemoteControllerErr = fmt.Errorf("failed to get node manager: %w", err)
controllerErr = fmt.Errorf("failed to get node manager: %w", err)
return
}
if !nodeManager.HasIdentity() {
cachedRemoteControllerErr = fmt.Errorf("no node identity found. Run `node init` first")
if !nm.HasIdentity() {
controllerErr = fmt.Errorf("no node identity found. Run 'node init' first")
return
}
peerRegistry, err := getPeerRegistry()
pr, err := getPeerRegistry()
if err != nil {
cachedRemoteControllerErr = fmt.Errorf("failed to get peer registry: %w", err)
controllerErr = fmt.Errorf("failed to get peer registry: %w", err)
return
}
transportConfig := node.DefaultTransportConfig()
cachedPeerTransport = node.NewTransport(nodeManager, peerRegistry, transportConfig)
cachedRemoteController = node.NewController(nodeManager, peerRegistry, cachedPeerTransport)
// Initialize transport
config := node.DefaultTransportConfig()
transport = node.NewTransport(nm, pr, config)
controller = node.NewController(nm, pr, transport)
})
return cachedRemoteController, cachedRemoteControllerErr
return controller, controllerErr
}
// findPeerByPartialID("peer-19f3") returns the peer whose ID starts with `peer-19f3`.
// findPeerByPartialID finds a peer by full or partial ID.
func findPeerByPartialID(partialID string) *node.Peer {
peerRegistry, err := getPeerRegistry()
pr, err := getPeerRegistry()
if err != nil {
return nil
}
// peerRegistry.GetPeer("peer-19f3") tries the exact peer ID first.
peer := peerRegistry.GetPeer(partialID)
// Try exact match first
peer := pr.GetPeer(partialID)
if peer != nil {
return peer
}
// peerRegistry.ListPeers() falls back to partial IDs such as `peer-19`.
for _, registeredPeer := range peerRegistry.ListPeers() {
if strings.HasPrefix(registeredPeer.ID, partialID) {
return registeredPeer
// Try partial match
for _, p := range pr.ListPeers() {
if strings.HasPrefix(p.ID, partialID) {
return p
}
// strings.EqualFold(registeredPeer.Name, "office-rig") matches peers by display name as well as ID prefix.
if strings.EqualFold(registeredPeer.Name, partialID) {
return registeredPeer
// Also try matching by name
if strings.EqualFold(p.Name, partialID) {
return p
}
}
return nil
}
// printPeerStats(peer, stats) formats the remote stats output for `remote status peer-19f3`.
// printPeerStats prints formatted stats for a peer.
func printPeerStats(peer *node.Peer, stats *node.StatsPayload) {
fmt.Printf("\n%s (%s)\n", peer.Name, peer.ID[:16])
fmt.Printf(" Address: %s\n", peer.Address)
@ -396,11 +397,11 @@ func printPeerStats(peer *node.Peer, stats *node.StatsPayload) {
}
}
// formatDuration(90*time.Minute) // returns "1h 30m"
func formatDuration(duration time.Duration) string {
days := int(duration.Hours() / 24)
hours := int(duration.Hours()) % 24
minutes := int(duration.Minutes()) % 60
// formatDuration formats a duration into a human-readable string.
func formatDuration(d time.Duration) string {
days := int(d.Hours() / 24)
hours := int(d.Hours()) % 24
minutes := int(d.Minutes()) % 60
if days > 0 {
return fmt.Sprintf("%dd %dh %dm", days, hours, minutes)

View file

@ -3,14 +3,15 @@ package cmd
import (
"os"
"forge.lthn.ai/Snider/Mining/pkg/mining"
"github.com/Snider/Mining/pkg/mining"
"github.com/spf13/cobra"
)
var (
sharedMiningManager *mining.Manager
manager *mining.Manager
)
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "mining",
Short: "Mining CLI - Manage miners with RESTful control",
@ -19,26 +20,31 @@ It provides commands to start, stop, list, and manage miners with RESTful contro
Version: mining.GetVersion(),
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() error {
return rootCmd.Execute()
}
func init() {
cobra.OnInitialize(initializeSharedManager)
cobra.OnInitialize(initManager)
}
func initializeSharedManager() {
// initManager initializes the miner manager
func initManager() {
// Skip for commands that create their own manager (like simulate)
if len(os.Args) > 1 && os.Args[1] == "simulate" {
return
}
if sharedMiningManager == nil {
sharedMiningManager = mining.NewManager()
if manager == nil {
manager = mining.NewManager()
}
}
func getSharedManager() *mining.Manager {
if sharedMiningManager == nil {
sharedMiningManager = mining.NewManager()
// getManager returns the singleton manager instance
func getManager() *mining.Manager {
if manager == nil {
manager = mining.NewManager()
}
return sharedMiningManager
return manager
}

View file

@ -10,28 +10,28 @@ import (
"strings"
"syscall"
"forge.lthn.ai/Snider/Mining/pkg/mining"
"github.com/Snider/Mining/pkg/mining"
"github.com/spf13/cobra"
"golang.org/x/text/cases"
"golang.org/x/text/language"
)
var (
serveHost string
servePort int
apiBasePath string
host string
port int
namespace string
)
// Example: mining serve --host 0.0.0.0 --port 9090 exposes GET /api/v1/mining/status and keeps the shell open.
// serveCmd represents the serve command
var serveCmd = &cobra.Command{
Use: "serve",
Short: "Start the mining service and interactive shell",
Long: `Start the mining service, which provides a RESTful API for managing miners, and an interactive shell for CLI commands.`,
RunE: func(_ *cobra.Command, arguments []string) error {
runContext, cancel := context.WithCancel(context.Background())
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
displayHost := serveHost
displayHost := host
if displayHost == "0.0.0.0" {
var err error
displayHost, err = getLocalIP()
@ -39,103 +39,103 @@ var serveCmd = &cobra.Command{
displayHost = "localhost"
}
}
displayAddress := fmt.Sprintf("%s:%d", displayHost, servePort)
listenAddress := fmt.Sprintf("%s:%d", serveHost, servePort)
displayAddr := fmt.Sprintf("%s:%d", displayHost, port)
listenAddr := fmt.Sprintf("%s:%d", host, port)
// Example: sharedManager := getSharedManager() keeps `mining start`, `mining stop`, and `mining serve` pointed at the same miner state.
sharedManager := getSharedManager()
// Use the global manager instance
mgr := getManager() // This ensures we get the manager initialized by initManager
service, err := mining.NewService(sharedManager, listenAddress, displayAddress, apiBasePath)
service, err := mining.NewService(mgr, listenAddr, displayAddr, namespace) // Pass the global manager
if err != nil {
return fmt.Errorf("failed to create new service: %w", err)
}
// Example: service.ServiceStartup(runContext) serves GET /api/v1/mining/status while stdin stays open.
// Start the server in a goroutine
go func() {
if err := service.ServiceStartup(runContext); err != nil {
if err := service.ServiceStartup(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Failed to start service: %v\n", err)
cancel()
}
}()
// Example: shutdownSignal := make(chan os.Signal, 1) captures Ctrl+C and SIGTERM for graceful shutdown.
shutdownSignal := make(chan os.Signal, 1)
signal.Notify(shutdownSignal, syscall.SIGINT, syscall.SIGTERM)
// Handle graceful shutdown on Ctrl+C
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
// Example: the prompt loop below keeps `>>` visible while the API serves requests.
// Start interactive shell in a goroutine
go func() {
fmt.Printf("Mining service started on http://%s:%d\n", displayHost, servePort)
fmt.Printf("Swagger documentation is available at http://%s:%d%s/index.html\n", displayHost, servePort, service.SwaggerUIPath)
fmt.Printf("Mining service started on http://%s:%d\n", displayHost, port)
fmt.Printf("Swagger documentation is available at http://%s:%d%s/index.html\n", displayHost, port, service.SwaggerUIPath)
fmt.Println("Entering interactive shell. Type 'exit' or 'quit' to stop.")
fmt.Print(">> ")
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
inputLine := scanner.Text()
if inputLine == "" {
line := scanner.Text()
if line == "" {
fmt.Print(">> ")
continue
}
if strings.ToLower(inputLine) == "exit" || strings.ToLower(inputLine) == "quit" {
if strings.ToLower(line) == "exit" || strings.ToLower(line) == "quit" {
fmt.Println("Exiting...")
cancel()
return
}
inputTokens := strings.Fields(inputLine)
if len(inputTokens) == 0 {
parts := strings.Fields(line)
if len(parts) == 0 {
fmt.Print(">> ")
continue
}
shellCommand := strings.ToLower(inputTokens[0])
commandArgs := inputTokens[1:]
command := strings.ToLower(parts[0])
cmdArgs := parts[1:]
switch shellCommand {
switch command {
case "start":
if len(commandArgs) < 3 {
if len(cmdArgs) < 3 {
fmt.Println("Usage: start <miner_type> <pool> <wallet>")
fmt.Println("Example: start xmrig stratum+tcp://pool.example.com:3333 YOUR_WALLET_ADDRESS")
} else {
minerType := commandArgs[0]
poolURL := commandArgs[1]
walletAddress := commandArgs[2]
minerType := cmdArgs[0]
pool := cmdArgs[1]
wallet := cmdArgs[2]
// Example: poolURL := "stratum+tcp://pool.example.com:3333" passes Config.Validate().
if !strings.HasPrefix(poolURL, "stratum+tcp://") &&
!strings.HasPrefix(poolURL, "stratum+ssl://") &&
!strings.HasPrefix(poolURL, "stratum://") {
// Validate pool URL format
if !strings.HasPrefix(pool, "stratum+tcp://") &&
!strings.HasPrefix(pool, "stratum+ssl://") &&
!strings.HasPrefix(pool, "stratum://") {
fmt.Fprintf(os.Stderr, "Error: Invalid pool URL (must start with stratum+tcp://, stratum+ssl://, or stratum://)\n")
fmt.Print(">> ")
continue
}
if len(poolURL) > 256 {
if len(pool) > 256 {
fmt.Fprintf(os.Stderr, "Error: Pool URL too long (max 256 chars)\n")
fmt.Print(">> ")
continue
}
// Example: walletAddress := "44Affq5kSiGBoZ..." keeps the wallet under the 256-character limit.
if len(walletAddress) > 256 {
// Validate wallet address length
if len(wallet) > 256 {
fmt.Fprintf(os.Stderr, "Error: Wallet address too long (max 256 chars)\n")
fmt.Print(">> ")
continue
}
minerConfig := &mining.Config{
Pool: poolURL,
Wallet: walletAddress,
config := &mining.Config{
Pool: pool,
Wallet: wallet,
LogOutput: true,
}
// Example: minerConfig.Validate() rejects malformed pool and wallet values before the miner starts.
if err := minerConfig.Validate(); err != nil {
// Validate config before starting
if err := config.Validate(); err != nil {
fmt.Fprintf(os.Stderr, "Error: Invalid configuration: %v\n", err)
fmt.Print(">> ")
continue
}
miner, err := sharedManager.StartMiner(context.Background(), minerType, minerConfig)
miner, err := mgr.StartMiner(context.Background(), minerType, config)
if err != nil {
fmt.Fprintf(os.Stderr, "Error starting miner: %v\n", err)
} else {
@ -143,11 +143,11 @@ var serveCmd = &cobra.Command{
}
}
case "status":
if len(commandArgs) < 1 {
fmt.Println("Error: status command requires miner name, for example `status xmrig`")
if len(cmdArgs) < 1 {
fmt.Println("Error: status command requires miner name (e.g., 'status xmrig')")
} else {
minerName := commandArgs[0]
miner, err := sharedManager.GetMiner(minerName)
minerName := cmdArgs[0]
miner, err := mgr.GetMiner(minerName)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting miner status: %v\n", err)
} else {
@ -165,11 +165,11 @@ var serveCmd = &cobra.Command{
}
}
case "stop":
if len(commandArgs) < 1 {
fmt.Println("Error: stop command requires miner name, for example `stop xmrig`")
if len(cmdArgs) < 1 {
fmt.Println("Error: stop command requires miner name (e.g., 'stop xmrig')")
} else {
minerName := commandArgs[0]
err := sharedManager.StopMiner(context.Background(), minerName)
minerName := cmdArgs[0]
err := mgr.StopMiner(context.Background(), minerName)
if err != nil {
fmt.Fprintf(os.Stderr, "Error stopping miner: %v\n", err)
} else {
@ -177,7 +177,7 @@ var serveCmd = &cobra.Command{
}
}
case "list":
miners := sharedManager.ListMiners()
miners := mgr.ListMiners()
if len(miners) == 0 {
fmt.Println("No miners currently running.")
} else {
@ -187,27 +187,27 @@ var serveCmd = &cobra.Command{
}
}
default:
fmt.Fprintf(os.Stderr, "Unknown command: %s. Only 'start', 'status', 'stop', 'list' are directly supported in this shell.\n", shellCommand)
fmt.Fprintf(os.Stderr, "For other commands, please run them directly from your terminal, for example `mining doctor`.\n")
fmt.Fprintf(os.Stderr, "Unknown command: %s. Only 'start', 'status', 'stop', 'list' are directly supported in this shell.\n", command)
fmt.Fprintf(os.Stderr, "For other commands, please run them directly from your terminal (e.g., 'miner-ctrl doctor').\n")
}
fmt.Print(">> ")
}
// Example: scanner.Err() reports a closed stdin pipe when the terminal exits.
// Check for scanner errors (I/O issues)
if err := scanner.Err(); err != nil {
fmt.Fprintf(os.Stderr, "Error reading input: %v\n", err)
}
}()
select {
case <-shutdownSignal:
case <-signalChan:
fmt.Println("\nReceived shutdown signal, stopping service...")
cancel()
case <-runContext.Done():
case <-ctx.Done():
}
// Example: sharedManager.Stop() stops miner goroutines and closes the shared service state before exit.
sharedManager.Stop()
// Explicit cleanup of manager resources
mgr.Stop()
fmt.Println("Mining service stopped.")
return nil
@ -215,9 +215,9 @@ var serveCmd = &cobra.Command{
}
func init() {
serveCmd.Flags().StringVar(&serveHost, "host", "127.0.0.1", "Host to bind the API server, for example 127.0.0.1 or 0.0.0.0")
serveCmd.Flags().IntVarP(&servePort, "port", "p", 9090, "Port to bind the API server, for example 9090")
serveCmd.Flags().StringVarP(&apiBasePath, "namespace", "n", "/api/v1/mining", "API base path, for example /api/v1/mining")
serveCmd.Flags().StringVar(&host, "host", "127.0.0.1", "Host to listen on")
serveCmd.Flags().IntVarP(&port, "port", "p", 9090, "Port to listen on")
serveCmd.Flags().StringVarP(&namespace, "namespace", "n", "/api/v1/mining", "API namespace for the swagger UI")
rootCmd.AddCommand(serveCmd)
}

View file

@ -10,18 +10,18 @@ import (
"syscall"
"time"
"forge.lthn.ai/Snider/Mining/pkg/mining"
"github.com/Snider/Mining/pkg/mining"
"github.com/spf13/cobra"
)
var (
simulatedMinerCount int
simulationPreset string
simulationHashrate int
simulationAlgorithm string
simCount int
simPreset string
simHashrate int
simAlgorithm string
)
// Example: mining simulate --count 3 --preset cpu-medium starts the API with fake miners.
// simulateCmd represents the simulate command
var simulateCmd = &cobra.Command{
Use: "simulate",
Short: "Start the service with simulated miners for UI testing",
@ -31,13 +31,13 @@ without requiring actual mining hardware.
Examples:
# Start with 3 medium-hashrate CPU miners
mining simulate --count 3 --preset cpu-medium
miner-ctrl simulate --count 3 --preset cpu-medium
# Start with custom hashrate
mining simulate --count 2 --hashrate 8000 --algorithm rx/0
miner-ctrl simulate --count 2 --hashrate 8000 --algorithm rx/0
# Start with a mix of presets
mining simulate --count 1 --preset gpu-ethash
miner-ctrl simulate --count 1 --preset gpu-ethash
Available presets:
cpu-low - Low-end CPU (500 H/s, rx/0)
@ -45,11 +45,11 @@ Available presets:
cpu-high - High-end CPU (15 kH/s, rx/0)
gpu-ethash - GPU mining ETH (30 MH/s, ethash)
gpu-kawpow - GPU mining RVN (15 MH/s, kawpow)`,
RunE: func(_ *cobra.Command, arguments []string) error {
runContext, cancel := context.WithCancel(context.Background())
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
displayHost := serveHost
displayHost := host
if displayHost == "0.0.0.0" {
var err error
displayHost, err = getLocalIP()
@ -57,65 +57,65 @@ Available presets:
displayHost = "localhost"
}
}
displayAddress := fmt.Sprintf("%s:%d", displayHost, servePort)
listenAddress := fmt.Sprintf("%s:%d", serveHost, servePort)
displayAddr := fmt.Sprintf("%s:%d", displayHost, port)
listenAddr := fmt.Sprintf("%s:%d", host, port)
// Example: simulatedManager := mining.NewManagerForSimulation() keeps fake miners isolated from the real autostart state.
simulatedManager := mining.NewManagerForSimulation()
// Create a new manager for simulation (skips autostart of real miners)
mgr := mining.NewManagerForSimulation()
// Example: getSimulatedConfig(0) returns a config such as sim-cpu-medium-001.
for i := 0; i < simulatedMinerCount; i++ {
simulatedConfig := getSimulatedConfig(i)
simulatedMiner := mining.NewSimulatedMiner(simulatedConfig)
// Create and start simulated miners
for i := 0; i < simCount; i++ {
config := getSimulatedConfig(i)
simMiner := mining.NewSimulatedMiner(config)
// Example: simulatedMiner.Start(&mining.Config{}) starts the fake miner lifecycle without launching a real binary.
if err := simulatedMiner.Start(&mining.Config{}); err != nil {
// Start the simulated miner
if err := simMiner.Start(&mining.Config{}); err != nil {
return fmt.Errorf("failed to start simulated miner %d: %w", i, err)
}
// Example: simulatedManager.RegisterMiner(simulatedMiner) makes the simulated miner visible to `mining serve`.
if err := simulatedManager.RegisterMiner(simulatedMiner); err != nil {
// Register with manager
if err := mgr.RegisterMiner(simMiner); err != nil {
return fmt.Errorf("failed to register simulated miner %d: %w", i, err)
}
fmt.Printf("Started simulated miner: %s (%s, ~%d H/s)\n",
simulatedConfig.Name, simulatedConfig.Algorithm, simulatedConfig.BaseHashrate)
config.Name, config.Algorithm, config.BaseHashrate)
}
// Example: service, err := mining.NewService(simulatedManager, listenAddress, displayAddress, apiBasePath) serves the simulator on http://127.0.0.1:9090.
service, err := mining.NewService(simulatedManager, listenAddress, displayAddress, apiBasePath)
// Create and start the service
service, err := mining.NewService(mgr, listenAddr, displayAddr, namespace)
if err != nil {
return fmt.Errorf("failed to create new service: %w", err)
}
// Example: service.ServiceStartup(runContext) starts the API server while the simulation loop keeps running.
// Start the server in a goroutine
go func() {
if err := service.ServiceStartup(runContext); err != nil {
if err := service.ServiceStartup(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Failed to start service: %v\n", err)
cancel()
}
}()
fmt.Printf("\n=== SIMULATION MODE ===\n")
fmt.Printf("Mining service started on http://%s:%d\n", displayHost, servePort)
fmt.Printf("Swagger documentation is available at http://%s:%d%s/swagger/index.html\n", displayHost, servePort, apiBasePath)
fmt.Printf("\nSimulating %d miner(s). Press Ctrl+C to stop.\n", simulatedMinerCount)
fmt.Printf("Mining service started on http://%s:%d\n", displayHost, port)
fmt.Printf("Swagger documentation is available at http://%s:%d%s/swagger/index.html\n", displayHost, port, namespace)
fmt.Printf("\nSimulating %d miner(s). Press Ctrl+C to stop.\n", simCount)
fmt.Printf("Note: All data is simulated - no actual mining is occurring.\n\n")
// Example: shutdownSignal := make(chan os.Signal, 1) captures Ctrl+C and SIGTERM for graceful shutdown.
shutdownSignal := make(chan os.Signal, 1)
signal.Notify(shutdownSignal, syscall.SIGINT, syscall.SIGTERM)
// Handle graceful shutdown on Ctrl+C
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
select {
case <-shutdownSignal:
case <-signalChan:
fmt.Println("\nReceived shutdown signal, stopping simulation...")
cancel()
case <-runContext.Done():
case <-ctx.Done():
}
// Example: for _, miner := range simulatedManager.ListMiners() { simulatedManager.StopMiner(context.Background(), miner.GetName()) } stops every simulated miner before exit.
for _, miner := range simulatedManager.ListMiners() {
simulatedManager.StopMiner(context.Background(), miner.GetName())
// Stop all simulated miners
for _, miner := range mgr.ListMiners() {
mgr.StopMiner(context.Background(), miner.GetName())
}
fmt.Println("Simulation stopped.")
@ -123,52 +123,56 @@ Available presets:
},
}
// getSimulatedConfig(0) returns a simulated miner such as `sim-cpu-medium-001` with preset-driven hashrate and algorithm defaults.
// getSimulatedConfig returns configuration for a simulated miner based on flags.
func getSimulatedConfig(index int) mining.SimulatedMinerConfig {
// Example: name := fmt.Sprintf("sim-cpu-medium-001", index+1) keeps simulated miner names predictable.
name := fmt.Sprintf("sim-%s-%03d", simulationPreset, index+1)
// Generate unique name
name := fmt.Sprintf("sim-%s-%03d", simPreset, index+1)
var simulatedConfig mining.SimulatedMinerConfig
if preset, ok := mining.SimulatedMinerPresets[simulationPreset]; ok {
simulatedConfig = preset
// Start with preset if specified
var config mining.SimulatedMinerConfig
if preset, ok := mining.SimulatedMinerPresets[simPreset]; ok {
config = preset
} else {
simulatedConfig = mining.SimulatedMinerPresets["cpu-medium"]
// Default preset
config = mining.SimulatedMinerPresets["cpu-medium"]
}
simulatedConfig.Name = name
config.Name = name
// Example: simulationHashrate = 8000 overrides the preset with a custom 8 kH/s baseline.
if simulationHashrate > 0 {
simulatedConfig.BaseHashrate = simulationHashrate
// Override with custom values if provided
if simHashrate > 0 {
config.BaseHashrate = simHashrate
}
// Example: simulationAlgorithm = "rx/0" swaps the preset algorithm before the miner starts.
if simulationAlgorithm != "" {
simulatedConfig.Algorithm = simulationAlgorithm
if simAlgorithm != "" {
config.Algorithm = simAlgorithm
}
// Add some variance between miners
variance := 0.1 + rand.Float64()*0.1 // 10-20% variance
simulatedConfig.BaseHashrate = int(float64(simulatedConfig.BaseHashrate) * (0.9 + rand.Float64()*0.2))
simulatedConfig.Variance = variance
config.BaseHashrate = int(float64(config.BaseHashrate) * (0.9 + rand.Float64()*0.2))
config.Variance = variance
return simulatedConfig
return config
}
func init() {
// Seed random for varied simulation
rand.Seed(time.Now().UnixNano())
simulateCmd.Flags().IntVarP(&simulatedMinerCount, "count", "c", 1, "Number of simulated miners to create")
simulateCmd.Flags().StringVar(&simulationPreset, "preset", "cpu-medium", "Miner preset (cpu-low, cpu-medium, cpu-high, gpu-ethash, gpu-kawpow)")
simulateCmd.Flags().IntVar(&simulationHashrate, "hashrate", 0, "Custom base hashrate (overrides preset)")
simulateCmd.Flags().StringVar(&simulationAlgorithm, "algorithm", "", "Custom algorithm (overrides preset)")
simulateCmd.Flags().IntVarP(&simCount, "count", "c", 1, "Number of simulated miners to create")
simulateCmd.Flags().StringVar(&simPreset, "preset", "cpu-medium", "Miner preset (cpu-low, cpu-medium, cpu-high, gpu-ethash, gpu-kawpow)")
simulateCmd.Flags().IntVar(&simHashrate, "hashrate", 0, "Custom base hashrate (overrides preset)")
simulateCmd.Flags().StringVar(&simAlgorithm, "algorithm", "", "Custom algorithm (overrides preset)")
simulateCmd.Flags().StringVar(&serveHost, "host", "127.0.0.1", "Host to bind the simulation API server, for example 127.0.0.1 or 0.0.0.0")
simulateCmd.Flags().IntVarP(&servePort, "port", "p", 9090, "Port to bind the simulation API server, for example 9090")
simulateCmd.Flags().StringVarP(&apiBasePath, "namespace", "n", "/api/v1/mining", "Simulation API base path, for example /api/v1/mining")
// Reuse serve command flags
simulateCmd.Flags().StringVar(&host, "host", "127.0.0.1", "Host to listen on")
simulateCmd.Flags().IntVarP(&port, "port", "p", 9090, "Port to listen on")
simulateCmd.Flags().StringVarP(&namespace, "namespace", "n", "/api/v1/mining", "API namespace")
rootCmd.AddCommand(simulateCmd)
}
// formatHashrate(1250) // returns "1.25 kH/s" for display in the simulation summary.
// Helper function to format hashrate
func formatHashrate(h int) string {
if h >= 1000000000 {
return strconv.FormatFloat(float64(h)/1000000000, 'f', 2, 64) + " GH/s"

View file

@ -4,29 +4,29 @@ import (
"context"
"fmt"
"forge.lthn.ai/Snider/Mining/pkg/mining"
"github.com/Snider/Mining/pkg/mining"
"github.com/spf13/cobra"
)
var (
poolURL string
walletAddress string
minerPool string
minerWallet string
)
// Example: mining start xmrig --pool stratum+tcp://pool.example.com:3333 --wallet 44Affq5kSiGBoZ... starts a miner with explicit pool and wallet values.
// startCmd represents the start command
var startCmd = &cobra.Command{
Use: "start <miner-type>",
Use: "start [miner_name]",
Short: "Start a new miner",
Long: `Start a miner with an explicit pool URL and wallet address.`,
Long: `Start a new miner with the specified configuration.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
minerType := args[0]
startConfig := &mining.Config{
Pool: poolURL,
Wallet: walletAddress,
config := &mining.Config{
Pool: minerPool,
Wallet: minerWallet,
}
miner, err := getSharedManager().StartMiner(context.Background(), minerType, startConfig)
miner, err := getManager().StartMiner(context.Background(), minerType, config)
if err != nil {
return fmt.Errorf("failed to start miner: %w", err)
}
@ -39,8 +39,8 @@ var startCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(startCmd)
startCmd.Flags().StringVarP(&poolURL, "pool", "p", "", "Mining pool URL, for example stratum+tcp://pool.example.com:3333")
startCmd.Flags().StringVarP(&walletAddress, "wallet", "w", "", "Wallet address, for example 44Affq5kSiGBoZ...")
startCmd.Flags().StringVarP(&minerPool, "pool", "p", "", "Mining pool address (required)")
startCmd.Flags().StringVarP(&minerWallet, "wallet", "w", "", "Wallet address (required)")
_ = startCmd.MarkFlagRequired("pool")
_ = startCmd.MarkFlagRequired("wallet")
}

View file

@ -9,17 +9,17 @@ import (
"golang.org/x/text/language"
)
// mining status xmrig-rx_0 prints live stats for a running miner.
// statusCmd represents the status command
var statusCmd = &cobra.Command{
Use: "status [miner_name]",
Short: "Get status of a running miner",
Long: `Get detailed status information for a specific running miner.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
minerName := args[0]
manager := getSharedManager()
mgr := getManager()
miner, err := manager.GetMiner(minerName)
miner, err := mgr.GetMiner(minerName)
if err != nil {
return fmt.Errorf("failed to get miner: %w", err)
}

View file

@ -7,17 +7,17 @@ import (
"github.com/spf13/cobra"
)
// mining stop xmrig-rx_0 stops the named miner.
// stopCmd represents the stop command
var stopCmd = &cobra.Command{
Use: "stop [miner_name]",
Short: "Stop a running miner",
Long: `Stop a running miner by its name.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
minerName := args[0]
manager := getSharedManager()
mgr := getManager()
if err := manager.StopMiner(context.Background(), minerName); err != nil {
if err := mgr.StopMiner(context.Background(), minerName); err != nil {
return fmt.Errorf("failed to stop miner: %w", err)
}

View file

@ -7,15 +7,15 @@ import (
"github.com/spf13/cobra"
)
// mining uninstall xmrig removes the miner and refreshes the cache.
// uninstallCmd represents the uninstall command
var uninstallCmd = &cobra.Command{
Use: "uninstall [miner_type]",
Short: "Uninstall a miner",
Long: `Stops the miner if it is running, removes all associated files, and updates the configuration.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
minerType := args[0]
manager := getSharedManager() // getSharedManager() keeps `mining uninstall xmrig` on the same shared miner state as `mining serve`.
manager := getManager() // Assuming getManager() provides the singleton manager instance
fmt.Printf("Uninstalling %s...\n", minerType)
if err := manager.UninstallMiner(context.Background(), minerType); err != nil {
@ -24,7 +24,8 @@ var uninstallCmd = &cobra.Command{
fmt.Printf("%s uninstalled successfully.\n", minerType)
// updateDoctorCache() // refreshes the cached install status after uninstalling a miner
// The doctor cache is implicitly updated by the manager's actions,
// but an explicit cache update can still be beneficial.
fmt.Println("Updating installation cache...")
if err := updateDoctorCache(); err != nil {
fmt.Printf("Warning: failed to update doctor cache: %v\n", err)

View file

@ -7,59 +7,58 @@ import (
"path/filepath"
"strings"
"forge.lthn.ai/Snider/Mining/pkg/mining"
"github.com/Masterminds/semver/v3"
"github.com/Snider/Mining/pkg/mining"
"github.com/adrg/xdg"
"github.com/spf13/cobra"
)
// validateUpdateCacheFilePath("/home/alice/.config/lethean-desktop/miners/installed-miners.json") returns nil.
// validateUpdateCacheFilePath("/tmp/installed-miners.json") rejects paths outside XDG_CONFIG_HOME.
func validateUpdateCacheFilePath(cacheFilePath string) error {
// validateUpdateConfigPath validates that a config path is within the expected XDG config directory
func validateUpdateConfigPath(configPath string) error {
expectedBase := filepath.Join(xdg.ConfigHome, "lethean-desktop")
cleanPath := filepath.Clean(cacheFilePath)
cleanPath := filepath.Clean(configPath)
if !strings.HasPrefix(cleanPath, expectedBase+string(os.PathSeparator)) && cleanPath != expectedBase {
return fmt.Errorf("invalid config path: must be within %s", expectedBase)
}
return nil
}
// rootCmd.AddCommand(updateCmd) exposes `mining update`, which compares the cached miner version against the latest release.
// updateCmd represents the update command
var updateCmd = &cobra.Command{
Use: "update",
Short: "Check for updates to installed miners",
Long: `Checks for new versions of all installed miners and notifies you if an update is available.`,
RunE: func(_ *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println("Checking for updates...")
homeDir, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("could not get home directory: %w", err)
}
installedMinerPointerPath := filepath.Join(homeDir, installedMinersPointerFileName)
signpostPath := filepath.Join(homeDir, signpostFilename)
if _, err := os.Stat(installedMinerPointerPath); os.IsNotExist(err) {
if _, err := os.Stat(signpostPath); os.IsNotExist(err) {
fmt.Println("No miners installed yet. Run 'doctor' or 'install' first.")
return nil
}
cachePointerBytes, err := os.ReadFile(installedMinerPointerPath)
configPathBytes, err := os.ReadFile(signpostPath)
if err != nil {
return fmt.Errorf("could not read signpost file: %w", err)
}
cacheFilePath := strings.TrimSpace(string(cachePointerBytes))
configPath := strings.TrimSpace(string(configPathBytes))
// validateUpdateCacheFilePath("/home/alice/.config/lethean-desktop/miners/installed-miners.json") blocks path traversal outside XDG_CONFIG_HOME.
if err := validateUpdateCacheFilePath(cacheFilePath); err != nil {
// Security: Validate that the config path is within the expected directory
if err := validateUpdateConfigPath(configPath); err != nil {
return fmt.Errorf("security error: %w", err)
}
cacheBytes, err := os.ReadFile(cacheFilePath)
cacheBytes, err := os.ReadFile(configPath)
if err != nil {
return fmt.Errorf("could not read cache file from %s: %w", cacheFilePath, err)
return fmt.Errorf("could not read cache file from %s: %w", configPath, err)
}
// mining.SystemInfo{} matches the JSON shape that `mining doctor` writes to /home/alice/.config/lethean-desktop/miners/installed-miners.json.
// Fix: Use SystemInfo type (matches what doctor.go saves)
var systemInfo mining.SystemInfo
if err := json.Unmarshal(cacheBytes, &systemInfo); err != nil {
return fmt.Errorf("could not parse cache file: %w", err)
@ -77,7 +76,7 @@ var updateCmd = &cobra.Command{
minerName = "xmrig"
miner = mining.NewXMRigMiner()
} else {
continue // skip miners that do not have an updater yet
continue // Skip unknown miners
}
fmt.Printf("Checking %s... ", minerName)

View file

@ -4,8 +4,8 @@ import (
"fmt"
"os"
"forge.lthn.ai/Snider/Mining/cmd/mining/cmd"
_ "forge.lthn.ai/Snider/Mining/docs"
"github.com/Snider/Mining/cmd/mining/cmd"
_ "github.com/Snider/Mining/docs"
)
// @title Mining API
@ -14,7 +14,7 @@ import (
// @host localhost:8080
// @BasePath /api/v1/mining
func main() {
// go run ./cmd/mining starts the HTTP API by default when no subcommand is provided.
// If no command is provided, default to "serve"
if len(os.Args) == 1 {
os.Args = append(os.Args, "serve")
}

77
go.mod
View file

@ -1,46 +1,42 @@
module forge.lthn.ai/Snider/Mining
module github.com/Snider/Mining
go 1.25.0
require (
forge.lthn.ai/Snider/Borg v0.2.1
forge.lthn.ai/Snider/Poindexter v0.0.2
github.com/Masterminds/semver/v3 v3.4.0
github.com/Masterminds/semver/v3 v3.3.1
github.com/Snider/Borg v0.0.2
github.com/Snider/Poindexter v0.0.0-20251229183216-e182d4f49741
github.com/adrg/xdg v0.5.3
github.com/ckanthony/gin-mcp v0.0.0-20251107113615-3c631c4fa9f4
github.com/gin-contrib/cors v1.7.6
github.com/gin-gonic/gin v1.11.0
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/mattn/go-sqlite3 v1.14.34
github.com/shirou/gopsutil/v4 v4.26.1
github.com/spf13/cobra v1.10.2
github.com/mattn/go-sqlite3 v1.14.32
github.com/shirou/gopsutil/v4 v4.25.10
github.com/spf13/cobra v1.10.1
github.com/swaggo/files v1.0.1
github.com/swaggo/gin-swagger v1.6.1
github.com/swaggo/gin-swagger v1.6.0
github.com/swaggo/swag v1.16.6
golang.org/x/text v0.34.0
)
require (
forge.lthn.ai/Snider/Enchantrix v0.0.4 // indirect
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/bytedance/sonic v1.15.0 // indirect
github.com/bytedance/sonic/loader v0.5.0 // indirect
github.com/cloudflare/circl v1.6.3 // indirect
github.com/Snider/Enchantrix v0.0.2 // indirect
github.com/bytedance/sonic v1.14.0 // indirect
github.com/bytedance/sonic/loader v0.3.0 // indirect
github.com/ckanthony/gin-mcp v0.0.0-20251107113615-3c631c4fa9f4 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/ebitengine/purego v0.9.1 // indirect
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
github.com/gin-contrib/gzip v1.2.5 // indirect
github.com/ebitengine/purego v0.9.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.22.4 // indirect
github.com/go-openapi/jsonpointer v0.22.1 // indirect
github.com/go-openapi/jsonreference v0.21.2 // indirect
github.com/go-openapi/spec v0.22.0 // indirect
github.com/go-openapi/swag/conv v0.25.1 // indirect
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
github.com/go-openapi/swag/jsonname v0.25.1 // indirect
github.com/go-openapi/swag/jsonutils v0.25.1 // indirect
github.com/go-openapi/swag/loading v0.25.1 // indirect
github.com/go-openapi/swag/stringutils v0.25.1 // indirect
@ -48,39 +44,38 @@ require (
github.com/go-openapi/swag/yamlutils v0.25.1 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.30.1 // indirect
github.com/go-playground/validator/v10 v10.27.0 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/goccy/go-yaml v1.19.2 // indirect
github.com/goccy/go-yaml v1.18.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.18.4 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
github.com/quic-go/quic-go v0.59.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/tklauser/go-sysconf v0.3.16 // indirect
github.com/tklauser/numcpus v0.11.0 // indirect
github.com/spf13/pflag v1.0.9 // indirect
github.com/tklauser/go-sysconf v0.3.15 // indirect
github.com/tklauser/numcpus v0.10.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.3.1 // indirect
github.com/ugorji/go/codec v1.3.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.uber.org/mock v0.6.0 // indirect
go.uber.org/mock v0.5.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/arch v0.23.0 // indirect
golang.org/x/crypto v0.48.0 // indirect
golang.org/x/mod v0.33.0 // indirect
golang.org/x/net v0.50.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.41.0 // indirect
golang.org/x/tools v0.42.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
golang.org/x/arch v0.20.0 // indirect
golang.org/x/crypto v0.44.0 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/tools v0.38.0 // indirect
google.golang.org/protobuf v1.36.9 // indirect
)

157
go.sum
View file

@ -1,42 +1,39 @@
forge.lthn.ai/Snider/Borg v0.2.1 h1:Uf/YtUJLL8jlxTCjvP4J+5GHe3LLeALGtbh7zj8d8Qc=
forge.lthn.ai/Snider/Borg v0.2.1/go.mod h1:MVfolb7F6/A2LOIijcbBhWImu5db5NSMcSjvShMoMCA=
forge.lthn.ai/Snider/Enchantrix v0.0.4 h1:biwpix/bdedfyc0iVeK15awhhJKH6TEMYOTXzHXx5TI=
forge.lthn.ai/Snider/Enchantrix v0.0.4/go.mod h1:OGCwuVeZPq3OPe2h6TX/ZbgEjHU6B7owpIBeXQGbSe0=
forge.lthn.ai/Snider/Poindexter v0.0.2 h1:XXzSKFjO6MeftQAnB9qR+IkOTp9f57Tg4sIx8Qzi/II=
forge.lthn.ai/Snider/Poindexter v0.0.2/go.mod h1:ddzGia98k3HKkR0gl58IDzqz+MmgW2cQJOCNLfuWPpo=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/Snider/Borg v0.0.2 h1:B/kWoRkcOHu/f772+vCgNWCVT8I1N/yPwLs/2RCGW0E=
github.com/Snider/Borg v0.0.2/go.mod h1:sV4xlUbC3vdWi1eLFnOgd62FcEpg6bRVKrauonvWYNs=
github.com/Snider/Enchantrix v0.0.2 h1:ExZQiBhfS/p/AHFTKhY80TOd+BXZjK95EzByAEgwvjs=
github.com/Snider/Enchantrix v0.0.2/go.mod h1:CtFcLAvnDT1KcuF1JBb/DJj0KplY8jHryO06KzQ1hsQ=
github.com/Snider/Poindexter v0.0.0-20251229183216-e182d4f49741 h1:bWKpK7msUmlhG+ZzekG6VgLt57dCWc0BZQJ8tUR1UKY=
github.com/Snider/Poindexter v0.0.0-20251229183216-e182d4f49741/go.mod h1:nhgkbg4zWA4AS2Ga3RmcvdsyiI9TdxvSqe5EVBSb3Hk=
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE=
github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k=
github.com/bytedance/sonic/loader v0.5.0 h1:gXH3KVnatgY7loH5/TkeVyXPfESoqSBSBEiDd5VjlgE=
github.com/bytedance/sonic/loader v0.5.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ=
github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA=
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/ckanthony/gin-mcp v0.0.0-20251107113615-3c631c4fa9f4 h1:V0tltxRKT8DZRXcn2ErLy4alznOBzWWmx4gnQbic9jE=
github.com/ckanthony/gin-mcp v0.0.0-20251107113615-3c631c4fa9f4/go.mod h1:eaCpaNzFM2bfCUXMPxbLFwI/ar67gAaVTNrltASGeoc=
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM=
github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/ebitengine/purego v0.9.0 h1:mh0zpKBIXDceC63hpvPuGLiJ8ZAa3DfrFTudmfi8A4k=
github.com/ebitengine/purego v0.9.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/gin-contrib/cors v1.7.6 h1:3gQ8GMzs1Ylpf70y8bMw4fVpycXIeX1ZemuSQIsnQQY=
github.com/gin-contrib/cors v1.7.6/go.mod h1:Ulcl+xN4jel9t1Ry8vqph23a60FwH9xVLd+3ykmTjOk=
github.com/gin-contrib/gzip v1.2.5 h1:fIZs0S+l17pIu1P5XRJOo/YNqfIuPCrZZ3TWB7pjckI=
github.com/gin-contrib/gzip v1.2.5/go.mod h1:aomRgR7ftdZV3uWY0gW/m8rChfxau0n8YVvwlOHONzw=
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
@ -44,8 +41,8 @@ github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk=
github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM=
github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU=
github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ=
github.com/go-openapi/spec v0.22.0 h1:xT/EsX4frL3U09QviRIZXvkh80yibxQmtoEvyqug0Tw=
@ -53,8 +50,8 @@ github.com/go-openapi/spec v0.22.0/go.mod h1:K0FhKxkez8YNS94XzF8YKEMULbFrRw4m15i
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag/conv v0.25.1 h1:+9o8YUg6QuqqBM5X6rYL/p1dpWeZRhoIt9x7CCP+he0=
github.com/go-openapi/swag/conv v0.25.1/go.mod h1:Z1mFEGPfyIKPu0806khI3zF+/EUXde+fdeksUl2NiDs=
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU=
github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo=
github.com/go-openapi/swag/jsonutils v0.25.1 h1:AihLHaD0brrkJoMqEZOBNzTLnk81Kg9cWr+SPtxtgl8=
github.com/go-openapi/swag/jsonutils v0.25.1/go.mod h1:JpEkAjxQXpiaHmRO04N1zE4qbUEg3b7Udll7AMGTNOo=
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1 h1:DSQGcdB6G0N9c/KhtpYc71PzzGEIc/fZ1no35x4/XBY=
@ -67,20 +64,19 @@ github.com/go-openapi/swag/typeutils v0.25.1 h1:rD/9HsEQieewNt6/k+JBwkxuAHktFtH3
github.com/go-openapi/swag/typeutils v0.25.1/go.mod h1:9McMC/oCdS4BKwk2shEB7x17P6HmMmA6dQRtAkSnNb8=
github.com/go-openapi/swag/yamlutils v0.25.1 h1:mry5ez8joJwzvMbaTGLhw8pXUnhDK91oSJLDPF1bmGk=
github.com/go-openapi/swag/yamlutils v0.25.1/go.mod h1:cm9ywbzncy3y6uPm/97ysW8+wZ09qsks+9RS8fLWKqg=
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy06ntQJp0BBvFG0w=
github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM=
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM=
github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -92,8 +88,6 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@ -102,12 +96,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k=
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.34 h1:3NtcvcUnFBPsuRcno8pUtupspG/GM+9nZ88zgJcp6Zk=
github.com/mattn/go-sqlite3 v1.14.34/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -115,79 +109,75 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shirou/gopsutil/v4 v4.26.1 h1:TOkEyriIXk2HX9d4isZJtbjXbEjf5qyKPAzbzY0JWSo=
github.com/shirou/gopsutil/v4 v4.26.1/go.mod h1:medLI9/UNAb0dOI9Q3/7yWSqKkj00u+1tgY8nvv41pc=
github.com/shirou/gopsutil/v4 v4.25.10 h1:at8lk/5T1OgtuCp+AwrDofFRjnvosn0nkN2OLQ6g8tA=
github.com/shirou/gopsutil/v4 v4.25.10/go.mod h1:+kSwyC8DRUD9XXEHCAFjK+0nuArFJM0lva+StQAcskM=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE=
github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg=
github.com/swaggo/gin-swagger v1.6.1 h1:Ri06G4gc9N4t4k8hekMigJ9zKTFSlqj/9paAQCQs7cY=
github.com/swaggo/gin-swagger v1.6.1/go.mod h1:LQ+hJStHakCWRiK/YNYtJOu4mR2FP+pxLnILT/qNiTw=
github.com/swaggo/gin-swagger v1.6.0 h1:y8sxvQ3E20/RCyrXeFfg60r6H0Z+SwpTjMYsMm+zy8M=
github.com/swaggo/gin-swagger v1.6.0/go.mod h1:BG00cCEy294xtVpyIAHG6+e2Qzj/xKlRdOqDkvq0uzo=
github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI=
github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg=
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY=
github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA=
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/arch v0.23.0 h1:lKF64A2jF6Zd8L0knGltUnegD62JMFBiCPBmQpToHhg=
golang.org/x/arch v0.23.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A=
golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c=
golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -199,8 +189,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@ -208,16 +198,17 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View file

@ -1,289 +0,0 @@
# Lethean Miner Suite
[![License: EUPL-1.2](https://img.shields.io/badge/License-EUPL--1.2-blue.svg)](https://opensource.org/license/eupl-1-2)
[![Platform](https://img.shields.io/badge/platform-linux%20%7C%20macos%20%7C%20windows%20%7C%20freebsd-lightgrey.svg)](https://github.com/letheanVPN/Mining/releases)
High-performance cryptocurrency mining tools. These standalone C++ programs can be used independently or managed through the Mining Platform GUI.
## Components
| Component | Description | Binary |
|-----------|-------------|--------|
| [**core**](core/) | CPU/GPU miner with full algorithm support | `miner` |
| [**proxy**](proxy/) | Stratum proxy for mining farms (100K+ connections) | `miner-proxy` |
| [**cuda**](cuda/) | CUDA plugin for NVIDIA GPUs | `libminer-cuda.so` |
| [**config**](config/) | Configuration generator tool | `miner-config` |
| [**workers**](workers/) | Worker management utilities | `miner-workers` |
| [**heatmap**](heatmap/) | Hardware temperature visualization | `miner-heatmap` |
## Supported Algorithms
### CPU Mining
| Algorithm | Coins |
|-----------|-------|
| RandomX | Monero (XMR), Lethean (LTHN), Wownero (WOW) |
| CryptoNight | Various CN variants |
| GhostRider | Raptoreum (RTM) |
| Argon2 | Chukwa, Ninja |
### GPU Mining (OpenCL/CUDA)
| Algorithm | Coins |
|-----------|-------|
| RandomX | Monero, Lethean |
| KawPow | Ravencoin (RVN), Neoxa |
| ETChash | Ethereum Classic (ETC) |
| ProgPowZ | Zano (ZANO) |
| Blake3 | Decred (DCR) |
## Quick Start
### Download Pre-built Binaries
Download from [Releases](https://github.com/letheanVPN/Mining/releases):
- `miner-linux-x64.tar.gz` - Linux x86_64
- `miner-linux-arm64.tar.gz` - Linux ARM64
- `miner-macos-x64.tar.gz` - macOS Intel
- `miner-macos-arm64.tar.gz` - macOS Apple Silicon
- `miner-windows-x64.zip` - Windows x64
### Run the Miner
```bash
# Basic CPU mining
./miner -o pool.example.com:3333 -u YOUR_WALLET -p x
# With config file (recommended)
./miner -c config.json
# CPU + GPU mining
./miner -c config.json --opencl --cuda
# Show help
./miner --help
```
### Run the Proxy
```bash
# Start proxy for mining farm
./miner-proxy -o pool.example.com:3333 -u YOUR_WALLET -b 0.0.0.0:3333
# With config file
./miner-proxy -c proxy-config.json
```
## Building from Source
### Prerequisites
**All Platforms:**
- CMake 3.10+
- C++11 compatible compiler
- libuv
- OpenSSL (for TLS support)
**Linux:**
```bash
sudo apt-get install build-essential cmake libuv1-dev libssl-dev libhwloc-dev
```
**macOS:**
```bash
brew install cmake libuv openssl hwloc
```
**Windows:**
- Visual Studio 2019+ with C++ workload
- vcpkg for dependencies
### Build Miner Core
```bash
cd core
mkdir build && cd build
# Standard build
cmake ..
cmake --build . --config Release -j$(nproc)
# With GPU support
cmake .. -DWITH_OPENCL=ON -DWITH_CUDA=ON
# Static build (portable)
cmake .. -DBUILD_STATIC=ON
# Minimal build (RandomX only)
cmake .. -DWITH_ARGON2=OFF -DWITH_KAWPOW=OFF -DWITH_GHOSTRIDER=OFF
```
### Build Proxy
```bash
cd proxy
mkdir build && cd build
cmake ..
cmake --build . --config Release -j$(nproc)
```
### Build All Components
From the repository root:
```bash
make build-miner # Build miner core
make build-miner-proxy # Build proxy
make build-miner-all # Build all components
```
## Configuration
### Miner Config (config.json)
```json
{
"autosave": true,
"cpu": true,
"opencl": false,
"cuda": false,
"pools": [
{
"url": "stratum+tcp://pool.example.com:3333",
"user": "YOUR_WALLET",
"pass": "x",
"keepalive": true,
"tls": false
}
],
"http": {
"enabled": true,
"host": "127.0.0.1",
"port": 8080,
"access-token": null
}
}
```
### Proxy Config (proxy-config.json)
```json
{
"mode": "nicehash",
"pools": [
{
"url": "stratum+tcp://pool.example.com:3333",
"user": "YOUR_WALLET",
"pass": "x"
}
],
"bind": [
{
"host": "0.0.0.0",
"port": 3333
}
],
"http": {
"enabled": true,
"host": "127.0.0.1",
"port": 8081
}
}
```
## HTTP API
Both miner and proxy expose HTTP APIs for monitoring and control.
### Miner API (default: http://127.0.0.1:8080)
| Endpoint | Description |
|----------|-------------|
| `GET /1/summary` | Mining statistics |
| `GET /1/threads` | Per-thread hashrates |
| `GET /1/config` | Current configuration |
| `PUT /1/config` | Update configuration |
### Proxy API (default: http://127.0.0.1:8081)
| Endpoint | Description |
|----------|-------------|
| `GET /1/summary` | Proxy statistics |
| `GET /1/workers` | Connected workers |
| `GET /1/config` | Current configuration |
## Performance Tuning
### CPU Mining
```bash
# Enable huge pages (Linux)
sudo sysctl -w vm.nr_hugepages=1280
# Or permanent (add to /etc/sysctl.conf)
echo "vm.nr_hugepages=1280" | sudo tee -a /etc/sysctl.conf
# Enable 1GB pages (better performance)
sudo ./scripts/enable_1gb_pages.sh
```
### GPU Mining
```bash
# AMD GPUs - increase virtual memory
# Add to /etc/security/limits.conf:
# * soft memlock unlimited
# * hard memlock unlimited
# NVIDIA GPUs - optimize power
nvidia-smi -pl 120 # Set power limit
```
## Testing
```bash
# Run miner tests
cd core/build
ctest --output-on-failure
# Run proxy tests
cd proxy/build
./tests/unit_tests
./tests/integration_tests
```
## Directory Structure
```
miner/
├── core/ # Main miner (CPU/GPU)
│ ├── src/
│ │ ├── backend/ # CPU, OpenCL, CUDA backends
│ │ ├── crypto/ # Algorithm implementations
│ │ ├── base/ # Network, I/O, utilities
│ │ └── core/ # Configuration, controller
│ ├── scripts/ # Build and setup scripts
│ └── CMakeLists.txt
├── proxy/ # Stratum proxy
│ ├── src/
│ │ ├── proxy/ # Proxy core (splitters, events)
│ │ └── base/ # Shared base code
│ ├── tests/ # Unit and integration tests
│ └── CMakeLists.txt
├── cuda/ # CUDA plugin
├── config/ # Config generator
├── workers/ # Worker utilities
├── heatmap/ # Temperature visualization
├── deps/ # Dependency build scripts
└── README.md # This file
```
## License
Copyright (c) 2025 Lethean <https://lethean.io>
Licensed under the European Union Public License 1.2 (EUPL-1.2).
See [LICENSE](../LICENSE) for details.
## Related Projects
- [Mining Platform](../) - GUI management platform
- [Lethean](https://lethean.io) - Lethean Network

View file

@ -42,7 +42,7 @@
"history": "4.7.2",
"html-webpack-plugin": "3.1.0",
"immutability-helper": "2.6.6",
"lodash": "4.17.23",
"lodash": "4.17.15",
"random-id": "0.0.2",
"react": "16.2.0",
"react-autosize-textarea": "3.0.2",

View file

@ -1,192 +1,38 @@
# Miner
# XMRig
High-performance, cross-platform CPU/GPU cryptocurrency miner supporting RandomX, KawPow, CryptoNight, GhostRider, ETChash, ProgPowZ, and Blake3 algorithms.
[![Github All Releases](https://img.shields.io/github/downloads/xmrig/xmrig/total.svg)](https://github.com/xmrig/xmrig/releases)
[![GitHub release](https://img.shields.io/github/release/xmrig/xmrig/all.svg)](https://github.com/xmrig/xmrig/releases)
[![GitHub Release Date](https://img.shields.io/github/release-date/xmrig/xmrig.svg)](https://github.com/xmrig/xmrig/releases)
[![GitHub license](https://img.shields.io/github/license/xmrig/xmrig.svg)](https://github.com/xmrig/xmrig/blob/master/LICENSE)
[![GitHub stars](https://img.shields.io/github/stars/xmrig/xmrig.svg)](https://github.com/xmrig/xmrig/stargazers)
[![GitHub forks](https://img.shields.io/github/forks/xmrig/xmrig.svg)](https://github.com/xmrig/xmrig/network)
## Features
XMRig is a high performance, open source, cross platform RandomX, KawPow, CryptoNight and [GhostRider](https://github.com/xmrig/xmrig/tree/master/src/crypto/ghostrider#readme) unified CPU/GPU miner and [RandomX benchmark](https://xmrig.com/benchmark). Official binaries are available for Windows, Linux, macOS and FreeBSD.
### Mining Backends
## Mining backends
- **CPU** (x86/x64/ARMv7/ARMv8/RISC-V)
- **OpenCL** for AMD GPUs
- **CUDA** for NVIDIA GPUs via external [CUDA plugin](../cuda/)
- **OpenCL** for AMD GPUs.
- **CUDA** for NVIDIA GPUs via external [CUDA plugin](https://github.com/xmrig/xmrig-cuda).
### Supported Algorithms
## Download
* **[Binary releases](https://github.com/xmrig/xmrig/releases)**
* **[Build from source](https://xmrig.com/docs/miner/build)**
| Algorithm | Variants | CPU | GPU |
|-----------|----------|-----|-----|
| RandomX | rx/0, rx/wow, rx/arq, rx/graft, rx/sfx, rx/keva | Yes | Yes |
| CryptoNight | cn/0-2, cn-lite, cn-heavy, cn-pico | Yes | Yes |
| GhostRider | gr | Yes | No |
| Argon2 | chukwa, chukwa2, ninja | Yes | No |
| KawPow | kawpow | No | Yes |
| ETChash | etchash, ethash | No | Yes |
| ProgPowZ | progpowz | No | Yes |
| Blake3 | blake3 | Yes | Yes |
## Usage
The preferred way to configure the miner is the [JSON config file](https://xmrig.com/docs/miner/config) as it is more flexible and human friendly. The [command line interface](https://xmrig.com/docs/miner/command-line-options) does not cover all features, such as mining profiles for different algorithms. Important options can be changed during runtime without miner restart by editing the config file or executing [API](https://xmrig.com/docs/miner/api) calls.
## Quick Start
* **[Wizard](https://xmrig.com/wizard)** helps you create initial configuration for the miner.
* **[Workers](http://workers.xmrig.info)** helps manage your miners via HTTP API.
### Download
## Donations
* Default donation 1% (1 minute in 100 minutes) can be increased via option `donate-level` or disabled in source code.
* XMR: `48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD`
Pre-built binaries are available from [Releases](https://github.com/letheanVPN/Mining/releases).
## Developers
* **[xmrig](https://github.com/xmrig)**
* **[sech1](https://github.com/SChernykh)**
### Usage
```bash
# Basic CPU mining to a pool
./miner -o pool.example.com:3333 -u YOUR_WALLET -p x
# With JSON config (recommended)
./miner -c config.json
# Enable GPU mining
./miner -c config.json --opencl --cuda
# Benchmark mode
./miner --bench=1M
# Show all options
./miner --help
```
### Configuration
The recommended way to configure the miner is via JSON config file:
```json
{
"autosave": true,
"cpu": true,
"opencl": false,
"cuda": false,
"pools": [
{
"url": "stratum+tcp://pool.example.com:3333",
"user": "YOUR_WALLET",
"pass": "x",
"keepalive": true
}
]
}
```
## Building from Source
### Dependencies
**Linux (Ubuntu/Debian):**
```bash
sudo apt-get install git build-essential cmake libuv1-dev libssl-dev libhwloc-dev
```
**Linux (Fedora/RHEL):**
```bash
sudo dnf install git cmake gcc gcc-c++ libuv-devel openssl-devel hwloc-devel
```
**macOS:**
```bash
brew install cmake libuv openssl hwloc
```
**Windows:**
- Visual Studio 2019 or later
- CMake 3.10+
- vcpkg for dependencies
### Build Commands
```bash
mkdir build && cd build
# Standard build
cmake ..
cmake --build . --config Release
# With GPU support
cmake .. -DWITH_OPENCL=ON -DWITH_CUDA=ON
# Static binary
cmake .. -DBUILD_STATIC=ON
# Debug build
cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_DEBUG_LOG=ON
# Minimal build (reduce binary size)
cmake .. -DWITH_KAWPOW=OFF -DWITH_GHOSTRIDER=OFF -DWITH_ARGON2=OFF
```
### CMake Options
| Option | Default | Description |
|--------|---------|-------------|
| `WITH_HWLOC` | ON | Hardware topology support |
| `WITH_RANDOMX` | ON | RandomX algorithms |
| `WITH_ARGON2` | ON | Argon2 algorithms |
| `WITH_KAWPOW` | ON | KawPow (GPU only) |
| `WITH_ETCHASH` | ON | ETChash/Ethash (GPU only) |
| `WITH_PROGPOWZ` | ON | ProgPowZ (GPU only) |
| `WITH_BLAKE3DCR` | ON | Blake3 for Decred |
| `WITH_GHOSTRIDER` | ON | GhostRider algorithm |
| `WITH_OPENCL` | ON | AMD GPU support |
| `WITH_CUDA` | ON | NVIDIA GPU support |
| `WITH_HTTP` | ON | HTTP API |
| `WITH_TLS` | ON | SSL/TLS support |
| `WITH_ASM` | ON | Assembly optimizations |
| `WITH_MSR` | ON | MSR mod for CPU tuning |
| `BUILD_STATIC` | OFF | Static binary |
| `BUILD_TESTS` | OFF | Build unit tests |
## HTTP API
When built with `-DWITH_HTTP=ON`, the miner exposes a REST API:
| Endpoint | Method | Description |
|----------|--------|-------------|
| `/1/summary` | GET | Mining statistics |
| `/1/threads` | GET | Per-thread details |
| `/1/config` | GET | Current configuration |
| `/1/config` | PUT | Update configuration |
Example:
```bash
curl http://127.0.0.1:8080/1/summary
```
## Performance Optimization
### Huge Pages (Linux)
```bash
# Temporary
sudo sysctl -w vm.nr_hugepages=1280
# Permanent
echo "vm.nr_hugepages=1280" | sudo tee -a /etc/sysctl.conf
# 1GB pages (best performance)
sudo ./scripts/enable_1gb_pages.sh
```
### MSR Mod (Intel/AMD CPUs)
The miner can automatically apply MSR tweaks for better RandomX performance. Requires root/admin privileges.
```bash
sudo ./miner -c config.json
```
## Testing
```bash
# Build with tests
cmake .. -DBUILD_TESTS=ON
cmake --build .
# Run tests
ctest --output-on-failure
```
## License
Copyright (c) 2025 Lethean <https://lethean.io>
Licensed under the European Union Public License 1.2 (EUPL-1.2).
## Contacts
* support@xmrig.com
* [reddit](https://www.reddit.com/user/XMRig/)
* [twitter](https://twitter.com/xmrig_dev)

View file

@ -22,15 +22,6 @@ else()
list(FILTER MINER_SOURCES EXCLUDE REGEX ".*_win\\.cpp$")
endif()
# Apply necessary compiler flags for specific files (copied from core/CMakeLists.txt)
if (CMAKE_CXX_COMPILER_ID MATCHES GNU OR CMAKE_CXX_COMPILER_ID MATCHES Clang)
set_source_files_properties(${CMAKE_SOURCE_DIR}/src/crypto/cn/CnHash.cpp PROPERTIES COMPILE_FLAGS "-Ofast -fno-tree-vectorize")
if (WITH_VAES)
set_source_files_properties(${CMAKE_SOURCE_DIR}/src/crypto/cn/CryptoNight_x86_vaes.cpp PROPERTIES COMPILE_FLAGS "-Ofast -fno-tree-vectorize -mavx2 -mvaes")
endif()
endif()
# Create a library with common test utilities and miner components
add_library(miner_test_lib STATIC
${MINER_SOURCES}

View file

@ -1,216 +1,105 @@
# Miner Proxy
# XMRig Proxy
[![Github All Releases](https://img.shields.io/github/downloads/xmrig/xmrig-proxy/total.svg)](https://github.com/xmrig/xmrig-proxy/releases)
[![GitHub release](https://img.shields.io/github/release/xmrig/xmrig-proxy/all.svg)](https://github.com/xmrig/xmrig-proxy/releases)
[![GitHub Release Date](https://img.shields.io/github/release-date-pre/xmrig/xmrig-proxy.svg)](https://github.com/xmrig/xmrig-proxy/releases)
[![GitHub license](https://img.shields.io/github/license/xmrig/xmrig-proxy.svg)](https://github.com/xmrig/xmrig-proxy/blob/master/LICENSE)
[![GitHub stars](https://img.shields.io/github/stars/xmrig/xmrig-proxy.svg)](https://github.com/xmrig/xmrig-proxy/stargazers)
[![GitHub forks](https://img.shields.io/github/forks/xmrig/xmrig-proxy.svg)](https://github.com/xmrig/xmrig-proxy/network)
High-performance stratum protocol proxy for cryptocurrency mining farms. Efficiently manages 100K+ miner connections while maintaining minimal pool-side connections through nonce splitting.
This is an extremely high-performance proxy for the CryptoNote stratum protocol (including Monero and others).
It can efficiently manage over 100K connections on an inexpensive, low-memory virtual machine (with just 1024 MB of RAM).
The proxy significantly reduces the number of connections to the pool, decreasing 100,000 workers down to just 391 on the pool side.
The codebase is shared with the [XMRig](https://github.com/xmrig/xmrig) miner.
## Features
## Compatibility
Compatible with any pool and any miner that supports NiceHash.
- Handle 100K+ concurrent miner connections
- Reduce pool connections (100,000 miners → ~400 pool connections)
- NiceHash compatibility mode
- TLS/SSL support for secure connections
- HTTP API for monitoring
- Low memory footprint (~1GB RAM for 100K connections)
## Quick Start
### Download
Pre-built binaries are available from [Releases](https://github.com/letheanVPN/Mining/releases).
### Usage
```bash
# Basic usage
./miner-proxy -o pool.example.com:3333 -u YOUR_WALLET -b 0.0.0.0:3333
# With config file (recommended)
./miner-proxy -c config.json
# Test configuration
./miner-proxy --dry-run -c config.json
# Show all options
./miner-proxy --help
```
### Command Line Options
## Why?
This proxy is designed to handle donation traffic from XMRig. No other solution works well with high connection and disconnection rates.
## Download
* Binary releases: https://github.com/xmrig/xmrig-proxy/releases
* Git tree: https://github.com/xmrig/xmrig-proxy.git
* Clone with `git clone https://github.com/xmrig/xmrig-proxy.git` :hammer: [Build instructions](https://xmrig.com/docs/proxy).
## Usage
:boom: If you are using Linux and need to manage over **1000 connections**, you must [increase the limits on open files](https://github.com/xmrig/xmrig-proxy/wiki/Ubuntu-setup).
### Options
```
Network:
-o, --url=URL URL of mining server
-a, --algo=ALGO mining algorithm
-a, --algo=ALGO mining algorithm https://xmrig.com/docs/algorithms
--coin=COIN specify coin instead of algorithm
-u, --user=USERNAME username for mining server
-p, --pass=PASSWORD password for mining server
-k, --keepalive send keepalive packets
--tls enable SSL/TLS support
-O, --userpass=U:P username:password pair for mining server
-x, --proxy=HOST:PORT connect through a SOCKS5 proxy
-k, --keepalive send keepalived packet for prevent timeout (needs pool support)
--rig-id=ID rig identifier for pool-side statistics (needs pool support)
--tls enable SSL/TLS support (needs pool support)
--tls-fingerprint=HEX pool TLS certificate fingerprint for strict certificate pinning
--dns-ipv6 prefer IPv6 records from DNS responses
--dns-ttl=N N seconds (default: 30) TTL for internal DNS cache
--daemon use daemon RPC instead of pool for solo mining
--daemon-zmq-port daemon's zmq-pub port number (only use it if daemon has it enabled)
--daemon-poll-interval=N daemon poll interval in milliseconds (default: 1000)
--daemon-job-timeout=N daemon job timeout in milliseconds (default: 15000)
--self-select=URL self-select block templates from URL
--submit-to-origin also submit solution back to self-select URL
-r, --retries=N number of times to retry before switch to backup server (default: 5)
-R, --retry-pause=N time to pause between retries (default: 5)
--user-agent set custom user-agent string for pool
--donate-level=N donate level, default 0%%
Proxy:
-b, --bind=ADDR bind to specified address (e.g., "0.0.0.0:3333")
-m, --mode=MODE proxy mode: nicehash (default) or simple
--custom-diff=N override pool difficulty
--access-password=P password to restrict proxy access
Options:
-b, --bind=ADDR bind to specified address, example "0.0.0.0:3333"
-m, --mode=MODE proxy mode, nicehash (default) or simple
--custom-diff=N override pool diff
--custom-diff-stats calculate stats using custom diff shares instead of pool shares
--reuse-timeout=N timeout in seconds for reuse pool connections in simple mode
--no-workers disable per worker statistics
--access-password=P set password to restrict connections to the proxy
--no-algo-ext disable "algo" protocol extension
API:
--api-worker-id=ID custom worker-id for API
--api-id=ID custom instance ID for API
--http-host=HOST bind host for HTTP API (default: 127.0.0.1)
--http-port=N bind port for HTTP API
--http-access-token=T access token for HTTP API
--http-no-restricted enable full remote access to HTTP API (only if access token set)
TLS:
--tls-bind=ADDR bind with TLS enabled
--tls-cert=FILE TLS certificate file (PEM)
--tls-cert-key=FILE TLS private key file (PEM)
--tls-bind=ADDR bind to specified address with enabled TLS
--tls-gen=HOSTNAME generate TLS certificate for specific hostname
--tls-cert=FILE load TLS certificate chain from a file in the PEM format
--tls-cert-key=FILE load TLS certificate private key from a file in the PEM format
--tls-dhparam=FILE load DH parameters for DHE ciphers from a file in the PEM format
--tls-protocols=N enable specified TLS protocols, example: "TLSv1 TLSv1.1 TLSv1.2 TLSv1.3"
--tls-ciphers=S set list of available ciphers (TLSv1.2 and below)
--tls-ciphersuites=S set list of available TLSv1.3 ciphersuites
Logging:
-l, --log-file=FILE log all output to file
-A, --access-log-file=FILE log worker access to file
-l, --log-file=FILE log all output to a file
-A --access-log-file=FILE log all workers access to a file
--no-color disable colored output
--verbose verbose output
Misc:
-c, --config=FILE load JSON configuration file
-B, --background run in background
-V, --version show version
-h, --help show help
-c, --config=FILE load a JSON-format configuration file
-B, --background run the proxy in the background
-V, --version output version information and exit
-h, --help display this help and exit
--dry-run test configuration and exit
```
## Configuration
## Donations
### JSON Config (config.json)
The default donation fee is 2%, which can be reduced to 1% or completely disabled using the `donate-level` option. This fee applies only when you utilize more than 256 miners.
```json
{
"mode": "nicehash",
"pools": [
{
"url": "stratum+tcp://pool.example.com:3333",
"user": "YOUR_WALLET",
"pass": "x",
"keepalive": true
}
],
"bind": [
{
"host": "0.0.0.0",
"port": 3333
},
{
"host": "0.0.0.0",
"port": 3334,
"tls": true
}
],
"http": {
"enabled": true,
"host": "127.0.0.1",
"port": 8081,
"access-token": "your-secret-token"
},
"tls": {
"cert": "/path/to/cert.pem",
"cert-key": "/path/to/key.pem"
},
"access-password": null,
"workers": true,
"verbose": false
}
```
* XMR: `48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD`
### Proxy Modes
**NiceHash Mode** (default): Full nonce splitting for maximum efficiency
- Best for large farms with many workers
- Each worker gets unique nonce space
- Maximum reduction in pool connections
**Simple Mode**: Direct passthrough with shared connections
- Simpler setup
- Workers share pool connections
- Good for smaller setups
## Building from Source
### Dependencies
**Linux (Ubuntu/Debian):**
```bash
sudo apt-get install build-essential cmake libuv1-dev libssl-dev
```
**macOS:**
```bash
brew install cmake libuv openssl
```
### Build
```bash
mkdir build && cd build
cmake ..
cmake --build . --config Release
# With debug logging
cmake .. -DWITH_DEBUG_LOG=ON
```
### CMake Options
| Option | Default | Description |
|--------|---------|-------------|
| `WITH_TLS` | ON | SSL/TLS support |
| `WITH_HTTP` | ON | HTTP API |
| `WITH_DEBUG_LOG` | OFF | Debug logging |
| `BUILD_TESTS` | ON | Build unit tests |
## HTTP API
| Endpoint | Description |
|----------|-------------|
| `GET /1/summary` | Proxy statistics |
| `GET /1/workers` | Connected workers list |
| `GET /1/config` | Current configuration |
Example:
```bash
curl http://127.0.0.1:8081/1/summary
```
## High Connection Setup (Linux)
For 1000+ connections, increase file descriptor limits:
```bash
# /etc/security/limits.conf
* soft nofile 1000000
* hard nofile 1000000
# /etc/sysctl.conf
fs.file-max = 1000000
net.core.somaxconn = 65535
net.ipv4.tcp_max_syn_backlog = 65535
```
Then apply:
```bash
sudo sysctl -p
```
## Testing
```bash
cd build
# Run all tests
ctest --output-on-failure
# Run specific test suites
./tests/unit_tests
./tests/integration_tests
# Run with verbose output
./tests/unit_tests --gtest_verbose
```
## License
Copyright (c) 2025 Lethean <https://lethean.io>
Licensed under the European Union Public License 1.2 (EUPL-1.2).
## Contacts
* support@xmrig.com
* [X](https://x.com/xmrig_dev)

File diff suppressed because it is too large Load diff

View file

@ -57,7 +57,7 @@
"sass-loader": "^11.0.1",
"webpack": "^5.35.1",
"webpack-cli": "^4.6.0",
"webpack-dev-server": "^5.2.2",
"webpack-dev-server": "^3.11.2",
"webpack-subresource-integrity": "^1.5.2",
"whatwg-fetch": "^3.6.2"
}

View file

@ -2,6 +2,7 @@ package database
import (
"database/sql"
"fmt"
"os"
"path/filepath"
"sync"
@ -11,41 +12,23 @@ import (
_ "github.com/mattn/go-sqlite3"
)
// databaseError("open database", err) // => "database: open database: <cause>"
func databaseError(scope string, cause error) error {
if cause == nil {
return nil
}
return &databaseErr{scope: scope, cause: cause}
}
type databaseErr struct {
scope string
cause error
}
func (e *databaseErr) Error() string {
return "database: " + e.scope + ": " + e.cause.Error()
}
func (e *databaseErr) Unwrap() error {
return e.cause
}
// database := globalDatabase // check before use; nil means not initialised
// DB is the global database instance
var (
globalDatabase *sql.DB
databaseMutex sync.RWMutex
db *sql.DB
dbMu sync.RWMutex
)
// database.Config{Enabled: true, Path: "/data/mining.db", RetentionDays: 30}
// Config holds database configuration options
type Config struct {
Enabled bool `json:"enabled"`
Path string `json:"path,omitempty"`
RetentionDays int `json:"retentionDays,omitempty"`
// Enabled determines if database persistence is active
Enabled bool `json:"enabled"`
// Path is the database file path (optional, uses default if empty)
Path string `json:"path,omitempty"`
// RetentionDays is how long to keep historical data (default 30)
RetentionDays int `json:"retentionDays,omitempty"`
}
// configuration := defaultConfig() // Config{Enabled: true, RetentionDays: 30}
// defaultConfig returns the default database configuration
func defaultConfig() Config {
return Config{
Enabled: true,
@ -54,25 +37,25 @@ func defaultConfig() Config {
}
}
// path, err := defaultDBPath() // "~/.local/share/lethean-desktop/mining.db"
// defaultDBPath returns the default database file path
func defaultDBPath() (string, error) {
dataDir := filepath.Join(xdg.DataHome, "lethean-desktop")
if err := os.MkdirAll(dataDir, 0755); err != nil {
return "", databaseError("create data directory", err)
return "", fmt.Errorf("failed to create data directory: %w", err)
}
return filepath.Join(dataDir, "mining.db"), nil
}
// database.Initialize(database.Config{Enabled: true, Path: "/data/mining.db", RetentionDays: 30})
func Initialize(config Config) error {
databaseMutex.Lock()
defer databaseMutex.Unlock()
// Initialize opens the database connection and creates tables
func Initialize(cfg Config) error {
dbMu.Lock()
defer dbMu.Unlock()
if !config.Enabled {
if !cfg.Enabled {
return nil
}
dbPath := config.Path
dbPath := cfg.Path
if dbPath == "" {
var err error
dbPath, err = defaultDBPath()
@ -82,47 +65,50 @@ func Initialize(config Config) error {
}
var err error
globalDatabase, err = sql.Open("sqlite3", dbPath+"?_journal=WAL&_timeout=5000")
db, err = sql.Open("sqlite3", dbPath+"?_journal=WAL&_timeout=5000")
if err != nil {
return databaseError("open database", err)
return fmt.Errorf("failed to open database: %w", err)
}
globalDatabase.SetMaxOpenConns(1) // SQLite only supports one writer
globalDatabase.SetMaxIdleConns(1)
globalDatabase.SetConnMaxLifetime(time.Hour)
// Set connection pool settings
db.SetMaxOpenConns(1) // SQLite only supports one writer
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(time.Hour)
// Create tables
if err := createTables(); err != nil {
closingDB := globalDatabase
globalDatabase = nil
// Nil out global before closing to prevent use of closed connection
closingDB := db
db = nil
closingDB.Close()
return databaseError("create tables", err)
return fmt.Errorf("failed to create tables: %w", err)
}
return nil
}
// if err := database.Close(); err != nil { logging.Warn("close failed", ...) }
// Close closes the database connection
func Close() error {
databaseMutex.Lock()
defer databaseMutex.Unlock()
dbMu.Lock()
defer dbMu.Unlock()
if globalDatabase == nil {
if db == nil {
return nil
}
err := globalDatabase.Close()
globalDatabase = nil
err := db.Close()
db = nil
return err
}
// if isInitialized() { database.Cleanup(30) }
// isInitialized returns true if the database is ready
func isInitialized() bool {
databaseMutex.RLock()
defer databaseMutex.RUnlock()
return globalDatabase != nil
dbMu.RLock()
defer dbMu.RUnlock()
return db != nil
}
// if err := createTables(); err != nil { return databaseError("create tables", err) }
// createTables creates all required database tables
func createTables() error {
schema := `
-- Hashrate history table for storing miner performance data
@ -161,22 +147,22 @@ func createTables() error {
ON miner_sessions(miner_name, started_at DESC);
`
_, err := globalDatabase.Exec(schema)
_, err := db.Exec(schema)
return err
}
// database.Cleanup(30) // remove hashrate rows older than 30 days
// Cleanup removes old data based on retention settings
func Cleanup(retentionDays int) error {
databaseMutex.RLock()
defer databaseMutex.RUnlock()
dbMu.RLock()
defer dbMu.RUnlock()
if globalDatabase == nil {
if db == nil {
return nil
}
cutoff := time.Now().AddDate(0, 0, -retentionDays)
_, err := globalDatabase.Exec(`
_, err := db.Exec(`
DELETE FROM hashrate_history
WHERE timestamp < ?
`, cutoff)
@ -184,15 +170,15 @@ func Cleanup(retentionDays int) error {
return err
}
// if err := vacuumDB(); err != nil { logging.Warn("vacuum failed", ...) }
// vacuumDB optimizes the database file size
func vacuumDB() error {
databaseMutex.RLock()
defer databaseMutex.RUnlock()
dbMu.RLock()
defer dbMu.RUnlock()
if globalDatabase == nil {
if db == nil {
return nil
}
_, err := globalDatabase.Exec("VACUUM")
_, err := db.Exec("VACUUM")
return err
}

View file

@ -8,19 +8,18 @@ import (
"time"
)
// cleanup := setupRaceTestDB(t)
// defer cleanup()
// setupRaceTestDB creates a fresh database for race testing
func setupRaceTestDB(t *testing.T) func() {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "race_test.db")
config := Config{
cfg := Config{
Enabled: true,
Path: dbPath,
RetentionDays: 7,
}
if err := Initialize(config); err != nil {
if err := Initialize(cfg); err != nil {
t.Fatalf("Failed to initialize database: %v", err)
}
@ -30,18 +29,19 @@ func setupRaceTestDB(t *testing.T) func() {
}
}
// 10 goroutines × 100 inserts each → no race detector warnings
func TestDatabaseRace_ConcurrentHashrateInserts_Ugly(t *testing.T) {
// TestConcurrentHashrateInserts verifies that concurrent inserts
// don't cause race conditions
func TestConcurrentHashrateInserts(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
// 10 goroutines inserting points concurrently
for i := 0; i < 10; i++ {
waitGroup.Add(1)
wg.Add(1)
go func(minerIndex int) {
defer waitGroup.Done()
defer wg.Done()
minerName := "miner" + string(rune('A'+minerIndex))
minerType := "xmrig"
@ -58,7 +58,7 @@ func TestDatabaseRace_ConcurrentHashrateInserts_Ugly(t *testing.T) {
}(i)
}
waitGroup.Wait()
wg.Wait()
// Verify data was inserted
for i := 0; i < 10; i++ {
@ -73,18 +73,19 @@ func TestDatabaseRace_ConcurrentHashrateInserts_Ugly(t *testing.T) {
}
}
// 1 writer + 5 readers concurrently → no race detector warnings
func TestDatabaseRace_ConcurrentInsertAndQuery_Ugly(t *testing.T) {
// TestConcurrentInsertAndQuery verifies that concurrent reads and writes
// don't cause race conditions
func TestConcurrentInsertAndQuery(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
stop := make(chan struct{})
// Writer goroutine
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for i := 0; ; i++ {
select {
case <-stop:
@ -102,9 +103,9 @@ func TestDatabaseRace_ConcurrentInsertAndQuery_Ugly(t *testing.T) {
// Multiple reader goroutines
for i := 0; i < 5; i++ {
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for j := 0; j < 50; j++ {
select {
case <-stop:
@ -120,23 +121,24 @@ func TestDatabaseRace_ConcurrentInsertAndQuery_Ugly(t *testing.T) {
// Let it run for a bit
time.Sleep(200 * time.Millisecond)
close(stop)
waitGroup.Wait()
wg.Wait()
// Test passes if no race detector warnings
}
// inserts (old + new data) + periodic Cleanup(7) → no race detector warnings
func TestDatabaseRace_ConcurrentInsertAndCleanup_Ugly(t *testing.T) {
// TestConcurrentInsertAndCleanup verifies that cleanup doesn't race
// with ongoing inserts
func TestConcurrentInsertAndCleanup(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
stop := make(chan struct{})
// Continuous inserts
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for i := 0; ; i++ {
select {
case <-stop:
@ -160,9 +162,9 @@ func TestDatabaseRace_ConcurrentInsertAndCleanup_Ugly(t *testing.T) {
}()
// Periodic cleanup
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for i := 0; i < 10; i++ {
select {
case <-stop:
@ -177,13 +179,14 @@ func TestDatabaseRace_ConcurrentInsertAndCleanup_Ugly(t *testing.T) {
// Let it run
time.Sleep(200 * time.Millisecond)
close(stop)
waitGroup.Wait()
wg.Wait()
// Test passes if no race detector warnings
}
// 20 goroutines × 50 GetHashrateStats calls → no race detector warnings
func TestDatabaseRace_ConcurrentStats_Ugly(t *testing.T) {
// TestConcurrentStats verifies that GetHashrateStats can be called
// concurrently without race conditions
func TestConcurrentStats(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
@ -197,13 +200,13 @@ func TestDatabaseRace_ConcurrentStats_Ugly(t *testing.T) {
InsertHashratePoint(nil, minerName, "xmrig", point, ResolutionHigh)
}
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
// Multiple goroutines querying stats
for i := 0; i < 20; i++ {
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for j := 0; j < 50; j++ {
stats, err := GetHashrateStats(minerName)
if err != nil {
@ -216,13 +219,14 @@ func TestDatabaseRace_ConcurrentStats_Ugly(t *testing.T) {
}()
}
waitGroup.Wait()
wg.Wait()
// Test passes if no race detector warnings
}
// 10 readers + 1 writer concurrently on GetAllMinerStats → no race detector warnings
func TestDatabaseRace_ConcurrentGetAllStats_Ugly(t *testing.T) {
// TestConcurrentGetAllStats verifies that GetAllMinerStats can be called
// concurrently without race conditions
func TestConcurrentGetAllStats(t *testing.T) {
cleanup := setupRaceTestDB(t)
defer cleanup()
@ -238,13 +242,13 @@ func TestDatabaseRace_ConcurrentGetAllStats_Ugly(t *testing.T) {
}
}
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
// Multiple goroutines querying all stats
for i := 0; i < 10; i++ {
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for j := 0; j < 30; j++ {
_, err := GetAllMinerStats()
if err != nil {
@ -255,9 +259,9 @@ func TestDatabaseRace_ConcurrentGetAllStats_Ugly(t *testing.T) {
}
// Concurrent inserts
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for i := 0; i < 50; i++ {
point := HashratePoint{
Timestamp: time.Now(),
@ -267,7 +271,7 @@ func TestDatabaseRace_ConcurrentGetAllStats_Ugly(t *testing.T) {
}
}()
waitGroup.Wait()
wg.Wait()
// Test passes if no race detector warnings
}

View file

@ -11,13 +11,13 @@ func setupTestDB(t *testing.T) func() {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "test.db")
config := Config{
cfg := Config{
Enabled: true,
Path: dbPath,
RetentionDays: 7,
}
if err := Initialize(config); err != nil {
if err := Initialize(cfg); err != nil {
t.Fatalf("Failed to initialize database: %v", err)
}
@ -27,31 +27,31 @@ func setupTestDB(t *testing.T) func() {
}
}
func TestDatabase_Initialize_Good(t *testing.T) {
func TestInitialize(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
// Database should be initialized
databaseMutex.RLock()
initialized := globalDatabase != nil
databaseMutex.RUnlock()
dbMu.RLock()
initialized := db != nil
dbMu.RUnlock()
if !initialized {
t.Error("Database should be initialized")
}
}
func TestDatabase_Initialize_Bad(t *testing.T) {
config := Config{
func TestInitialize_Disabled(t *testing.T) {
cfg := Config{
Enabled: false,
}
if err := Initialize(config); err != nil {
if err := Initialize(cfg); err != nil {
t.Errorf("Initialize with disabled should not error: %v", err)
}
}
func TestDatabase_Close_Good(t *testing.T) {
func TestClose(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
@ -61,7 +61,7 @@ func TestDatabase_Close_Good(t *testing.T) {
}
}
func TestDatabase_HashrateStorage_Good(t *testing.T) {
func TestHashrateStorage(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
@ -76,8 +76,8 @@ func TestDatabase_HashrateStorage_Good(t *testing.T) {
{Timestamp: now.Add(-3 * time.Minute), Hashrate: 1200},
}
for _, point := range points {
if err := InsertHashratePoint(nil, minerName, minerType, point, ResolutionHigh); err != nil {
for _, p := range points {
if err := InsertHashratePoint(nil, minerName, minerType, p, ResolutionHigh); err != nil {
t.Fatalf("Failed to store hashrate point: %v", err)
}
}
@ -93,7 +93,7 @@ func TestDatabase_HashrateStorage_Good(t *testing.T) {
}
}
func TestDatabase_GetHashrateStats_Good(t *testing.T) {
func TestGetHashrateStats(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
@ -108,8 +108,8 @@ func TestDatabase_GetHashrateStats_Good(t *testing.T) {
{Timestamp: now, Hashrate: 1500},
}
for _, point := range points {
if err := InsertHashratePoint(nil, minerName, minerType, point, ResolutionHigh); err != nil {
for _, p := range points {
if err := InsertHashratePoint(nil, minerName, minerType, p, ResolutionHigh); err != nil {
t.Fatalf("Failed to store point: %v", err)
}
}
@ -137,19 +137,19 @@ func TestDatabase_GetHashrateStats_Good(t *testing.T) {
}
}
func TestDatabase_DefaultConfig_Good(t *testing.T) {
config := defaultConfig()
func TestDefaultConfig(t *testing.T) {
cfg := defaultConfig()
if !config.Enabled {
if !cfg.Enabled {
t.Error("Default config should have Enabled=true")
}
if config.RetentionDays != 30 {
t.Errorf("Expected default retention 30, got %d", config.RetentionDays)
if cfg.RetentionDays != 30 {
t.Errorf("Expected default retention 30, got %d", cfg.RetentionDays)
}
}
func TestDatabase_CleanupRetention_Good(t *testing.T) {
func TestCleanupRetention(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
@ -216,7 +216,7 @@ func TestDatabase_CleanupRetention_Good(t *testing.T) {
}
}
func TestDatabase_GetHashrateHistoryTimeRange_Good(t *testing.T) {
func TestGetHashrateHistoryTimeRange(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
@ -269,7 +269,7 @@ func TestDatabase_GetHashrateHistoryTimeRange_Good(t *testing.T) {
}
}
func TestDatabase_MultipleMinerStats_Good(t *testing.T) {
func TestMultipleMinerStats(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
@ -285,14 +285,14 @@ func TestDatabase_MultipleMinerStats_Good(t *testing.T) {
{"miner-C", []int{3000, 3100, 3200}},
}
for _, minerEntry := range miners {
for i, rate := range minerEntry.hashrates {
for _, m := range miners {
for i, hr := range m.hashrates {
point := HashratePoint{
Timestamp: now.Add(time.Duration(-i) * time.Minute),
Hashrate: rate,
Hashrate: hr,
}
if err := InsertHashratePoint(nil, minerEntry.name, "xmrig", point, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert point for %s: %v", minerEntry.name, err)
if err := InsertHashratePoint(nil, m.name, "xmrig", point, ResolutionHigh); err != nil {
t.Fatalf("Failed to insert point for %s: %v", m.name, err)
}
}
}
@ -309,30 +309,30 @@ func TestDatabase_MultipleMinerStats_Good(t *testing.T) {
// Verify each miner's stats
statsMap := make(map[string]HashrateStats)
for _, statEntry := range allStats {
statsMap[statEntry.MinerName] = statEntry
for _, s := range allStats {
statsMap[s.MinerName] = s
}
// Check miner-A: avg = (1000+1100+1200)/3 = 1100
if minerStats, ok := statsMap["miner-A"]; ok {
if minerStats.AverageRate != 1100 {
t.Errorf("miner-A: expected avg 1100, got %d", minerStats.AverageRate)
if s, ok := statsMap["miner-A"]; ok {
if s.AverageRate != 1100 {
t.Errorf("miner-A: expected avg 1100, got %d", s.AverageRate)
}
} else {
t.Error("miner-A stats not found")
}
// Check miner-C: avg = (3000+3100+3200)/3 = 3100
if minerStats, ok := statsMap["miner-C"]; ok {
if minerStats.AverageRate != 3100 {
t.Errorf("miner-C: expected avg 3100, got %d", minerStats.AverageRate)
if s, ok := statsMap["miner-C"]; ok {
if s.AverageRate != 3100 {
t.Errorf("miner-C: expected avg 3100, got %d", s.AverageRate)
}
} else {
t.Error("miner-C stats not found")
}
}
func TestDatabase_IsInitialized_Good(t *testing.T) {
func TestIsInitialized(t *testing.T) {
// Before initialization
Close() // Ensure clean state
if isInitialized() {
@ -354,52 +354,52 @@ func TestDatabase_IsInitialized_Good(t *testing.T) {
}
}
func TestDatabase_SchemaCreation_Good(t *testing.T) {
func TestSchemaCreation(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
// Verify tables exist by querying sqlite_master
databaseMutex.RLock()
defer databaseMutex.RUnlock()
dbMu.RLock()
defer dbMu.RUnlock()
// Check hashrate_history table
var tableName string
err := globalDatabase.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name='hashrate_history'").Scan(&tableName)
err := db.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name='hashrate_history'").Scan(&tableName)
if err != nil {
t.Errorf("hashrate_history table should exist: %v", err)
}
// Check miner_sessions table
err = globalDatabase.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name='miner_sessions'").Scan(&tableName)
err = db.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name='miner_sessions'").Scan(&tableName)
if err != nil {
t.Errorf("miner_sessions table should exist: %v", err)
}
// Verify indexes exist
var indexName string
err = globalDatabase.QueryRow("SELECT name FROM sqlite_master WHERE type='index' AND name='idx_hashrate_miner_time'").Scan(&indexName)
err = db.QueryRow("SELECT name FROM sqlite_master WHERE type='index' AND name='idx_hashrate_miner_time'").Scan(&indexName)
if err != nil {
t.Errorf("idx_hashrate_miner_time index should exist: %v", err)
}
err = globalDatabase.QueryRow("SELECT name FROM sqlite_master WHERE type='index' AND name='idx_sessions_miner'").Scan(&indexName)
err = db.QueryRow("SELECT name FROM sqlite_master WHERE type='index' AND name='idx_sessions_miner'").Scan(&indexName)
if err != nil {
t.Errorf("idx_sessions_miner index should exist: %v", err)
}
}
func TestDatabase_ReInitializeExistingDB_Good(t *testing.T) {
func TestReInitializeExistingDB(t *testing.T) {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "reinit_test.db")
config := Config{
cfg := Config{
Enabled: true,
Path: dbPath,
RetentionDays: 7,
}
// First initialization
if err := Initialize(config); err != nil {
if err := Initialize(cfg); err != nil {
t.Fatalf("First initialization failed: %v", err)
}
@ -419,7 +419,7 @@ func TestDatabase_ReInitializeExistingDB_Good(t *testing.T) {
}
// Re-initialize with same path
if err := Initialize(config); err != nil {
if err := Initialize(cfg); err != nil {
t.Fatalf("Re-initialization failed: %v", err)
}
defer func() {
@ -442,7 +442,7 @@ func TestDatabase_ReInitializeExistingDB_Good(t *testing.T) {
}
}
func TestDatabase_ConcurrentAccess_Ugly(t *testing.T) {
func TestConcurrentDatabaseAccess(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()

View file

@ -2,15 +2,16 @@ package database
import (
"context"
"fmt"
"time"
"forge.lthn.ai/Snider/Mining/pkg/logging"
"github.com/Snider/Mining/pkg/logging"
)
// t := parseSQLiteTimestamp("2006-01-02 15:04:05") // time.Time{...}
// t := parseSQLiteTimestamp("") // time.Time{} (zero)
func parseSQLiteTimestamp(raw string) time.Time {
if raw == "" {
// parseSQLiteTimestamp parses timestamp strings from SQLite which may use various formats.
// Logs a warning if parsing fails and returns zero time.
func parseSQLiteTimestamp(s string) time.Time {
if s == "" {
return time.Time{}
}
@ -24,17 +25,16 @@ func parseSQLiteTimestamp(raw string) time.Time {
}
for _, format := range formats {
if parsed, err := time.Parse(format, raw); err == nil {
return parsed
if t, err := time.Parse(format, s); err == nil {
return t
}
}
logging.Warn("failed to parse timestamp from database", logging.Fields{"timestamp": raw})
logging.Warn("failed to parse timestamp from database", logging.Fields{"timestamp": s})
return time.Time{}
}
// database.ResolutionHigh // "high" — 10-second intervals
// database.ResolutionLow // "low" — 1-minute averages
// Resolution indicates the data resolution type
type Resolution string
const (
@ -42,21 +42,22 @@ const (
ResolutionLow Resolution = "low" // 1-minute averages
)
// point := database.HashratePoint{Timestamp: time.Now(), Hashrate: 1234}
// HashratePoint represents a single hashrate measurement
type HashratePoint struct {
Timestamp time.Time `json:"timestamp"`
Hashrate int `json:"hashrate"`
}
// ctx, cancel := context.WithTimeout(ctx, dbInsertTimeout) // 5s ceiling for INSERT
// dbInsertTimeout is the maximum time to wait for a database insert operation
const dbInsertTimeout = 5 * time.Second
// database.InsertHashratePoint(ctx, "xmrig", "xmrig", HashratePoint{Timestamp: time.Now(), Hashrate: 1234}, ResolutionHigh)
// InsertHashratePoint stores a hashrate measurement in the database.
// If ctx is nil, a default timeout context will be used.
func InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
databaseMutex.RLock()
defer databaseMutex.RUnlock()
dbMu.RLock()
defer dbMu.RUnlock()
if globalDatabase == nil {
if db == nil {
return nil // DB not enabled, silently skip
}
@ -67,7 +68,7 @@ func InsertHashratePoint(ctx context.Context, minerName, minerType string, point
defer cancel()
}
_, err := globalDatabase.ExecContext(ctx, `
_, err := db.ExecContext(ctx, `
INSERT INTO hashrate_history (miner_name, miner_type, timestamp, hashrate, resolution)
VALUES (?, ?, ?, ?, ?)
`, minerName, minerType, point.Timestamp, point.Hashrate, string(resolution))
@ -75,16 +76,16 @@ func InsertHashratePoint(ctx context.Context, minerName, minerType string, point
return err
}
// points, err := database.GetHashrateHistory("xmrig", database.ResolutionHigh, time.Now().Add(-time.Hour), time.Now())
// GetHashrateHistory retrieves hashrate history for a miner within a time range
func GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
databaseMutex.RLock()
defer databaseMutex.RUnlock()
dbMu.RLock()
defer dbMu.RUnlock()
if globalDatabase == nil {
if db == nil {
return nil, nil
}
rows, err := globalDatabase.Query(`
rows, err := db.Query(`
SELECT timestamp, hashrate
FROM hashrate_history
WHERE miner_name = ?
@ -94,7 +95,7 @@ func GetHashrateHistory(minerName string, resolution Resolution, since, until ti
ORDER BY timestamp ASC
`, minerName, string(resolution), since, until)
if err != nil {
return nil, databaseError("query hashrate history", err)
return nil, fmt.Errorf("failed to query hashrate history: %w", err)
}
defer rows.Close()
@ -102,7 +103,7 @@ func GetHashrateHistory(minerName string, resolution Resolution, since, until ti
for rows.Next() {
var point HashratePoint
if err := rows.Scan(&point.Timestamp, &point.Hashrate); err != nil {
return nil, databaseError("scan row", err)
return nil, fmt.Errorf("failed to scan row: %w", err)
}
points = append(points, point)
}
@ -110,8 +111,7 @@ func GetHashrateHistory(minerName string, resolution Resolution, since, until ti
return points, rows.Err()
}
// stats, err := database.GetHashrateStats("xmrig")
// if stats != nil { logging.Info("stats", logging.Fields{"average": stats.AverageRate}) }
// GetHashrateStats retrieves aggregated stats for a miner
type HashrateStats struct {
MinerName string `json:"minerName"`
TotalPoints int `json:"totalPoints"`
@ -122,19 +122,17 @@ type HashrateStats struct {
LastSeen time.Time `json:"lastSeen"`
}
// stats, err := database.GetHashrateStats("xmrig")
// if stats != nil { logging.Info("stats", logging.Fields{"miner": minerName, "average": stats.AverageRate}) }
func GetHashrateStats(minerName string) (*HashrateStats, error) {
databaseMutex.RLock()
defer databaseMutex.RUnlock()
dbMu.RLock()
defer dbMu.RUnlock()
if globalDatabase == nil {
if db == nil {
return nil, nil
}
// First check if there are any rows for this miner
var count int
err := globalDatabase.QueryRow(`SELECT COUNT(*) FROM hashrate_history WHERE miner_name = ?`, minerName).Scan(&count)
err := db.QueryRow(`SELECT COUNT(*) FROM hashrate_history WHERE miner_name = ?`, minerName).Scan(&count)
if err != nil {
return nil, err
}
@ -150,7 +148,7 @@ func GetHashrateStats(minerName string) (*HashrateStats, error) {
// SQLite returns timestamps as strings and AVG as float64, so scan them appropriately
var firstSeenStr, lastSeenStr string
var avgRate float64
err = globalDatabase.QueryRow(`
err = db.QueryRow(`
SELECT
COUNT(*),
COALESCE(AVG(hashrate), 0),
@ -181,17 +179,16 @@ func GetHashrateStats(minerName string) (*HashrateStats, error) {
return &stats, nil
}
// allStats, err := database.GetAllMinerStats()
// for _, stats := range allStats { logging.Info("stats", logging.Fields{"miner": stats.MinerName, "average": stats.AverageRate}) }
// GetAllMinerStats retrieves stats for all miners
func GetAllMinerStats() ([]HashrateStats, error) {
databaseMutex.RLock()
defer databaseMutex.RUnlock()
dbMu.RLock()
defer dbMu.RUnlock()
if globalDatabase == nil {
if db == nil {
return nil, nil
}
rows, err := globalDatabase.Query(`
rows, err := db.Query(`
SELECT
miner_name,
COUNT(*),

View file

@ -5,91 +5,91 @@ import (
"time"
)
// var store database.HashrateStore = database.DefaultStore()
// store.InsertHashratePoint(ctx, "xmrig", "xmrig", point, database.ResolutionHigh)
// store.Cleanup(30)
// HashrateStore defines the interface for hashrate data persistence.
// This interface allows for dependency injection and easier testing.
type HashrateStore interface {
// store.InsertHashratePoint(ctx, "xmrig", "xmrig", HashratePoint{Timestamp: time.Now(), Hashrate: 1234}, ResolutionHigh)
// InsertHashratePoint stores a hashrate measurement.
// If ctx is nil, a default timeout will be used.
InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error
// points, err := store.GetHashrateHistory("xmrig", ResolutionHigh, time.Now().Add(-time.Hour), time.Now())
// GetHashrateHistory retrieves hashrate history for a miner within a time range.
GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error)
// stats, err := store.GetHashrateStats("xmrig")
// if stats != nil { logging.Info("stats", logging.Fields{"average": stats.AverageRate}) }
// GetHashrateStats retrieves aggregated statistics for a specific miner.
GetHashrateStats(minerName string) (*HashrateStats, error)
// allStats, err := store.GetAllMinerStats()
// for _, stats := range allStats { logging.Info("stats", logging.Fields{"miner": stats.MinerName}) }
// GetAllMinerStats retrieves statistics for all miners.
GetAllMinerStats() ([]HashrateStats, error)
// store.Cleanup(30) // remove data older than 30 days
// Cleanup removes old data based on retention settings.
Cleanup(retentionDays int) error
// store.Close()
// Close closes the store and releases resources.
Close() error
}
// store := database.DefaultStore()
// defaultStore implements HashrateStore using the global database connection.
// This provides backward compatibility while allowing interface-based usage.
type defaultStore struct{}
// store := database.DefaultStore()
// store.GetHashrateStats("xmrig")
// DefaultStore returns a HashrateStore that uses the global database connection.
// This is useful for gradual migration from package-level functions to interface-based usage.
func DefaultStore() HashrateStore {
return &defaultStore{}
}
func (store *defaultStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
func (s *defaultStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
return InsertHashratePoint(ctx, minerName, minerType, point, resolution)
}
func (store *defaultStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
func (s *defaultStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
return GetHashrateHistory(minerName, resolution, since, until)
}
func (store *defaultStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
func (s *defaultStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
return GetHashrateStats(minerName)
}
func (store *defaultStore) GetAllMinerStats() ([]HashrateStats, error) {
func (s *defaultStore) GetAllMinerStats() ([]HashrateStats, error) {
return GetAllMinerStats()
}
func (store *defaultStore) Cleanup(retentionDays int) error {
func (s *defaultStore) Cleanup(retentionDays int) error {
return Cleanup(retentionDays)
}
func (store *defaultStore) Close() error {
func (s *defaultStore) Close() error {
return Close()
}
// store := database.NopStore()
// NopStore returns a HashrateStore that does nothing.
// Useful for testing or when database is disabled.
func NopStore() HashrateStore {
return &nopStore{}
}
type nopStore struct{}
func (store *nopStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
func (s *nopStore) InsertHashratePoint(ctx context.Context, minerName, minerType string, point HashratePoint, resolution Resolution) error {
return nil
}
func (store *nopStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
func (s *nopStore) GetHashrateHistory(minerName string, resolution Resolution, since, until time.Time) ([]HashratePoint, error) {
return nil, nil
}
func (store *nopStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
func (s *nopStore) GetHashrateStats(minerName string) (*HashrateStats, error) {
return nil, nil
}
func (store *nopStore) GetAllMinerStats() ([]HashrateStats, error) {
func (s *nopStore) GetAllMinerStats() ([]HashrateStats, error) {
return nil, nil
}
func (store *nopStore) Cleanup(retentionDays int) error {
func (s *nopStore) Cleanup(retentionDays int) error {
return nil
}
func (store *nopStore) Close() error {
func (s *nopStore) Close() error {
return nil
}

View file

@ -6,7 +6,7 @@ import (
"time"
)
func TestInterface_DefaultStore_Good(t *testing.T) {
func TestDefaultStore(t *testing.T) {
cleanup := setupTestDB(t)
defer cleanup()
@ -81,7 +81,7 @@ func TestDefaultStore_WithContext(t *testing.T) {
}
}
func TestInterface_NopStore_Good(t *testing.T) {
func TestNopStore(t *testing.T) {
store := NopStore()
// All operations should succeed without error
@ -126,8 +126,7 @@ func TestInterface_NopStore_Good(t *testing.T) {
}
}
// var _ HashrateStore = DefaultStore()
// var _ HashrateStore = NopStore()
// TestInterfaceCompatibility ensures all implementations satisfy HashrateStore
func TestInterfaceCompatibility(t *testing.T) {
var _ HashrateStore = DefaultStore()
var _ HashrateStore = NopStore()

View file

@ -1,7 +1,4 @@
// logger := logging.New(logging.Config{Level: logging.LevelDebug, Component: "mining"})
// logger.Info("started", logging.Fields{"miner": "xmrig"})
// logging.SetGlobal(logger)
// logging.Info("global log", logging.Fields{"key": "value"})
// Package logging provides structured logging with log levels and fields.
package logging
import (
@ -13,20 +10,23 @@ import (
"time"
)
// if level >= logging.LevelWarn { alertOps() }
// Level represents the severity of a log message.
type Level int
const (
LevelDebug Level = iota // logger.SetLevel(logging.LevelDebug)
LevelInfo // logger.SetLevel(logging.LevelInfo)
LevelWarn // logger.SetLevel(logging.LevelWarn)
LevelError // logger.SetLevel(logging.LevelError)
// LevelDebug is the most verbose log level.
LevelDebug Level = iota
// LevelInfo is for general informational messages.
LevelInfo
// LevelWarn is for warning messages.
LevelWarn
// LevelError is for error messages.
LevelError
)
// logging.LevelDebug.String() // "DEBUG"
// logging.LevelError.String() // "ERROR"
func (level Level) String() string {
switch level {
// String returns the string representation of the log level.
func (l Level) String() string {
switch l {
case LevelDebug:
return "DEBUG"
case LevelInfo:
@ -40,23 +40,22 @@ func (level Level) String() string {
}
}
// logger := logging.New(logging.Config{Level: logging.LevelDebug, Component: "mining"})
// logger.Info("started", logging.Fields{"miner": "xmrig"})
// Logger provides structured logging with configurable output and level.
type Logger struct {
mutex sync.Mutex
mu sync.Mutex
output io.Writer
level Level
component string
}
// logging.Config{Output: os.Stderr, Level: logging.LevelInfo, Component: "mining"}
// Config holds configuration for creating a new Logger.
type Config struct {
Output io.Writer
Level Level
Component string
}
// config := logging.DefaultConfig() // Output: os.Stderr, Level: LevelInfo
// DefaultConfig returns the default logger configuration.
func DefaultConfig() Config {
return Config{
Output: os.Stderr,
@ -65,136 +64,134 @@ func DefaultConfig() Config {
}
}
// logger := logging.New(logging.Config{Output: os.Stderr, Level: logging.LevelInfo, Component: "mining"})
func New(config Config) *Logger {
if config.Output == nil {
config.Output = os.Stderr
// New creates a new Logger with the given configuration.
func New(cfg Config) *Logger {
if cfg.Output == nil {
cfg.Output = os.Stderr
}
return &Logger{
output: config.Output,
level: config.Level,
component: config.Component,
output: cfg.Output,
level: cfg.Level,
component: cfg.Component,
}
}
// child := logger.WithComponent("xmrig")
// child.Info("miner started")
func (logger *Logger) WithComponent(component string) *Logger {
// WithComponent returns a new Logger with the specified component name.
func (l *Logger) WithComponent(component string) *Logger {
return &Logger{
output: logger.output,
level: logger.level,
output: l.output,
level: l.level,
component: component,
}
}
// logger.SetLevel(logging.LevelDebug)
func (logger *Logger) SetLevel(level Level) {
logger.mutex.Lock()
defer logger.mutex.Unlock()
logger.level = level
// SetLevel sets the minimum log level.
func (l *Logger) SetLevel(level Level) {
l.mu.Lock()
defer l.mu.Unlock()
l.level = level
}
// current := logger.GetLevel()
// if current == logging.LevelDebug { logger.SetLevel(logging.LevelInfo) }
func (logger *Logger) GetLevel() Level {
logger.mutex.Lock()
defer logger.mutex.Unlock()
return logger.level
// GetLevel returns the current log level.
func (l *Logger) GetLevel() Level {
l.mu.Lock()
defer l.mu.Unlock()
return l.level
}
// logger.Info("started", logging.Fields{"miner": "xmrig", "pool": "pool.lthn.io"})
// Fields represents key-value pairs for structured logging.
type Fields map[string]interface{}
// logger.log(LevelInfo, "started", Fields{"miner": "xmrig"})
func (logger *Logger) log(level Level, message string, fields Fields) {
logger.mutex.Lock()
defer logger.mutex.Unlock()
// log writes a log message at the specified level.
func (l *Logger) log(level Level, msg string, fields Fields) {
l.mu.Lock()
defer l.mu.Unlock()
if level < logger.level {
if level < l.level {
return
}
// Build the log line
var builder strings.Builder
var sb strings.Builder
timestamp := time.Now().Format("2006/01/02 15:04:05")
builder.WriteString(timestamp)
builder.WriteString(" [")
builder.WriteString(level.String())
builder.WriteString("]")
sb.WriteString(timestamp)
sb.WriteString(" [")
sb.WriteString(level.String())
sb.WriteString("]")
if logger.component != "" {
builder.WriteString(" [")
builder.WriteString(logger.component)
builder.WriteString("]")
if l.component != "" {
sb.WriteString(" [")
sb.WriteString(l.component)
sb.WriteString("]")
}
builder.WriteString(" ")
builder.WriteString(message)
sb.WriteString(" ")
sb.WriteString(msg)
// Add fields if present
if len(fields) > 0 {
builder.WriteString(" |")
for key, value := range fields {
builder.WriteString(" ")
builder.WriteString(key)
builder.WriteString("=")
builder.WriteString(fmt.Sprintf("%v", value))
sb.WriteString(" |")
for k, v := range fields {
sb.WriteString(" ")
sb.WriteString(k)
sb.WriteString("=")
sb.WriteString(fmt.Sprintf("%v", v))
}
}
builder.WriteString("\n")
fmt.Fprint(logger.output, builder.String())
sb.WriteString("\n")
fmt.Fprint(l.output, sb.String())
}
// logger.Debug("hashrate collected", logging.Fields{"rate": 1234})
func (logger *Logger) Debug(message string, fields ...Fields) {
logger.log(LevelDebug, message, mergeFields(fields))
// Debug logs a debug message.
func (l *Logger) Debug(msg string, fields ...Fields) {
l.log(LevelDebug, msg, mergeFields(fields))
}
// logger.Info("miner started", logging.Fields{"miner": "xmrig"})
func (logger *Logger) Info(message string, fields ...Fields) {
logger.log(LevelInfo, message, mergeFields(fields))
// Info logs an informational message.
func (l *Logger) Info(msg string, fields ...Fields) {
l.log(LevelInfo, msg, mergeFields(fields))
}
// logger.Warn("hashrate drop", logging.Fields{"current": 500, "min": 1000})
func (logger *Logger) Warn(message string, fields ...Fields) {
logger.log(LevelWarn, message, mergeFields(fields))
// Warn logs a warning message.
func (l *Logger) Warn(msg string, fields ...Fields) {
l.log(LevelWarn, msg, mergeFields(fields))
}
// logger.Error("miner crashed", logging.Fields{"code": -1, "miner": "xmrig"})
func (logger *Logger) Error(message string, fields ...Fields) {
logger.log(LevelError, message, mergeFields(fields))
// Error logs an error message.
func (l *Logger) Error(msg string, fields ...Fields) {
l.log(LevelError, msg, mergeFields(fields))
}
// logger.Debugf("collected %d hashrate points for %s", len(points), minerName)
func (logger *Logger) Debugf(format string, args ...interface{}) {
logger.log(LevelDebug, fmt.Sprintf(format, args...), nil)
// Debugf logs a formatted debug message.
func (l *Logger) Debugf(format string, args ...interface{}) {
l.log(LevelDebug, fmt.Sprintf(format, args...), nil)
}
// logger.Infof("miner %s started on pool %s", minerName, poolURL)
func (logger *Logger) Infof(format string, args ...interface{}) {
logger.log(LevelInfo, fmt.Sprintf(format, args...), nil)
// Infof logs a formatted informational message.
func (l *Logger) Infof(format string, args ...interface{}) {
l.log(LevelInfo, fmt.Sprintf(format, args...), nil)
}
// logger.Warnf("hashrate %d H/s below minimum %d H/s", current, minimum)
func (logger *Logger) Warnf(format string, args ...interface{}) {
logger.log(LevelWarn, fmt.Sprintf(format, args...), nil)
// Warnf logs a formatted warning message.
func (l *Logger) Warnf(format string, args ...interface{}) {
l.log(LevelWarn, fmt.Sprintf(format, args...), nil)
}
// logger.Errorf("failed to connect to pool %s: %v", poolURL, err)
func (logger *Logger) Errorf(format string, args ...interface{}) {
logger.log(LevelError, fmt.Sprintf(format, args...), nil)
// Errorf logs a formatted error message.
func (l *Logger) Errorf(format string, args ...interface{}) {
l.log(LevelError, fmt.Sprintf(format, args...), nil)
}
// combined := mergeFields([]Fields{{"a": 1}, {"b": 2}}) // Fields{"a": 1, "b": 2}
// mergeFields combines multiple Fields maps into one.
func mergeFields(fields []Fields) Fields {
if len(fields) == 0 {
return nil
}
result := make(Fields)
for _, fieldSet := range fields {
for key, value := range fieldSet {
result[key] = value
for _, f := range fields {
for k, v := range f {
result[k] = v
}
}
return result
@ -204,77 +201,75 @@ func mergeFields(fields []Fields) Fields {
var (
globalLogger = New(DefaultConfig())
globalMutex sync.RWMutex
globalMu sync.RWMutex
)
// logging.SetGlobal(logging.New(logging.Config{Level: logging.LevelDebug}))
func SetGlobal(logger *Logger) {
globalMutex.Lock()
defer globalMutex.Unlock()
globalLogger = logger
// SetGlobal sets the global logger instance.
func SetGlobal(l *Logger) {
globalMu.Lock()
defer globalMu.Unlock()
globalLogger = l
}
// logger := logging.GetGlobal()
// logger.Info("using global logger")
// GetGlobal returns the global logger instance.
func GetGlobal() *Logger {
globalMutex.RLock()
defer globalMutex.RUnlock()
globalMu.RLock()
defer globalMu.RUnlock()
return globalLogger
}
// logging.SetGlobalLevel(logging.LevelDebug) // enable debug logging globally
// SetGlobalLevel sets the log level of the global logger.
func SetGlobalLevel(level Level) {
globalMutex.RLock()
defer globalMutex.RUnlock()
globalMu.RLock()
defer globalMu.RUnlock()
globalLogger.SetLevel(level)
}
// Global convenience functions that use the global logger
// logging.Debug("hashrate collected", logging.Fields{"rate": 1234, "miner": "xmrig"})
func Debug(message string, fields ...Fields) {
GetGlobal().Debug(message, fields...)
// Debug logs a debug message using the global logger.
func Debug(msg string, fields ...Fields) {
GetGlobal().Debug(msg, fields...)
}
// logging.Info("miner started", logging.Fields{"miner": "xmrig", "pool": "pool.lthn.io"})
func Info(message string, fields ...Fields) {
GetGlobal().Info(message, fields...)
// Info logs an informational message using the global logger.
func Info(msg string, fields ...Fields) {
GetGlobal().Info(msg, fields...)
}
// logging.Warn("hashrate dropped below threshold", logging.Fields{"current": 500, "min": 1000})
func Warn(message string, fields ...Fields) {
GetGlobal().Warn(message, fields...)
// Warn logs a warning message using the global logger.
func Warn(msg string, fields ...Fields) {
GetGlobal().Warn(msg, fields...)
}
// logging.Error("miner process exited unexpectedly", logging.Fields{"code": -1})
func Error(message string, fields ...Fields) {
GetGlobal().Error(message, fields...)
// Error logs an error message using the global logger.
func Error(msg string, fields ...Fields) {
GetGlobal().Error(msg, fields...)
}
// logging.Debugf("collected %d hashrate points for %s", len(points), minerName)
// Debugf logs a formatted debug message using the global logger.
func Debugf(format string, args ...interface{}) {
GetGlobal().Debugf(format, args...)
}
// logging.Infof("miner %s started on pool %s", minerName, poolURL)
// Infof logs a formatted informational message using the global logger.
func Infof(format string, args ...interface{}) {
GetGlobal().Infof(format, args...)
}
// logging.Warnf("hashrate %d H/s below minimum %d H/s", current, minimum)
// Warnf logs a formatted warning message using the global logger.
func Warnf(format string, args ...interface{}) {
GetGlobal().Warnf(format, args...)
}
// logging.Errorf("failed to connect to pool %s: %v", poolURL, err)
// Errorf logs a formatted error message using the global logger.
func Errorf(format string, args ...interface{}) {
GetGlobal().Errorf(format, args...)
}
// level, err := logging.ParseLevel("DEBUG") // LevelDebug, nil
// level, err := logging.ParseLevel("nope") // LevelInfo, error
func ParseLevel(input string) (Level, error) {
switch strings.ToUpper(input) {
// ParseLevel parses a string into a log level.
func ParseLevel(s string) (Level, error) {
switch strings.ToUpper(s) {
case "DEBUG":
return LevelDebug, nil
case "INFO":
@ -284,6 +279,6 @@ func ParseLevel(input string) (Level, error) {
case "ERROR":
return LevelError, nil
default:
return LevelInfo, fmt.Errorf("unknown log level: %s", input)
return LevelInfo, fmt.Errorf("unknown log level: %s", s)
}
}

View file

@ -6,69 +6,65 @@ import (
"testing"
)
func TestLogger_Log_Good(t *testing.T) {
var outputBuffer bytes.Buffer
func TestLoggerLevels(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &outputBuffer,
Output: &buf,
Level: LevelInfo,
})
// Debug should not appear at Info level
logger.Debug("debug message")
if buf.Len() > 0 {
t.Error("Debug message should not appear at Info level")
}
// Info should appear
logger.Info("info message")
if !strings.Contains(outputBuffer.String(), "[INFO]") {
if !strings.Contains(buf.String(), "[INFO]") {
t.Error("Info message should appear")
}
if !strings.Contains(outputBuffer.String(), "info message") {
if !strings.Contains(buf.String(), "info message") {
t.Error("Info message content should appear")
}
outputBuffer.Reset()
buf.Reset()
// Warn should appear
logger.Warn("warn message")
if !strings.Contains(outputBuffer.String(), "[WARN]") {
if !strings.Contains(buf.String(), "[WARN]") {
t.Error("Warn message should appear")
}
outputBuffer.Reset()
buf.Reset()
// Error should appear
logger.Error("error message")
if !strings.Contains(outputBuffer.String(), "[ERROR]") {
if !strings.Contains(buf.String(), "[ERROR]") {
t.Error("Error message should appear")
}
}
func TestLogger_Log_Bad(t *testing.T) {
var outputBuffer bytes.Buffer
func TestLoggerDebugLevel(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &outputBuffer,
Level: LevelInfo,
})
logger.Debug("debug message")
if outputBuffer.Len() > 0 {
t.Error("Debug message should not appear at Info level")
}
}
func TestLogger_Log_Ugly(t *testing.T) {
var outputBuffer bytes.Buffer
logger := New(Config{
Output: &outputBuffer,
Output: &buf,
Level: LevelDebug,
})
logger.Debug("debug message")
if !strings.Contains(outputBuffer.String(), "[DEBUG]") {
if !strings.Contains(buf.String(), "[DEBUG]") {
t.Error("Debug message should appear at Debug level")
}
}
func TestLogger_WithFields_Good(t *testing.T) {
var outputBuffer bytes.Buffer
func TestLoggerWithFields(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &outputBuffer,
Output: &buf,
Level: LevelInfo,
})
logger.Info("test message", Fields{"key": "value", "num": 42})
output := outputBuffer.String()
output := buf.String()
if !strings.Contains(output, "key=value") {
t.Error("Field key=value should appear")
@ -78,77 +74,80 @@ func TestLogger_WithFields_Good(t *testing.T) {
}
}
func TestLogger_WithComponent_Good(t *testing.T) {
var outputBuffer bytes.Buffer
func TestLoggerWithComponent(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &outputBuffer,
Output: &buf,
Level: LevelInfo,
Component: "TestComponent",
})
logger.Info("test message")
output := outputBuffer.String()
output := buf.String()
if !strings.Contains(output, "[TestComponent]") {
t.Error("Component name should appear in log")
}
}
func TestLogger_DerivedComponent_Good(t *testing.T) {
var outputBuffer bytes.Buffer
func TestLoggerDerivedComponent(t *testing.T) {
var buf bytes.Buffer
parent := New(Config{
Output: &outputBuffer,
Output: &buf,
Level: LevelInfo,
})
child := parent.WithComponent("ChildComponent")
child.Info("child message")
output := outputBuffer.String()
output := buf.String()
if !strings.Contains(output, "[ChildComponent]") {
t.Error("Derived component name should appear")
}
}
func TestLogger_Formatted_Good(t *testing.T) {
var outputBuffer bytes.Buffer
func TestLoggerFormatted(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &outputBuffer,
Output: &buf,
Level: LevelInfo,
})
logger.Infof("formatted %s %d", "string", 123)
output := outputBuffer.String()
output := buf.String()
if !strings.Contains(output, "formatted string 123") {
t.Errorf("Formatted message should appear, got: %s", output)
}
}
func TestLogger_SetLevel_Good(t *testing.T) {
var outputBuffer bytes.Buffer
func TestSetLevel(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &outputBuffer,
Output: &buf,
Level: LevelError,
})
// Info should not appear at Error level
logger.Info("should not appear")
if outputBuffer.Len() > 0 {
if buf.Len() > 0 {
t.Error("Info should not appear at Error level")
}
// Change to Info level
logger.SetLevel(LevelInfo)
logger.Info("should appear now")
if !strings.Contains(outputBuffer.String(), "should appear now") {
if !strings.Contains(buf.String(), "should appear now") {
t.Error("Info should appear after level change")
}
// Verify GetLevel
if logger.GetLevel() != LevelInfo {
t.Error("GetLevel should return LevelInfo")
}
}
func TestLogger_ParseLevel_Good(t *testing.T) {
func TestParseLevel(t *testing.T) {
tests := []struct {
input string
expected Level
@ -165,47 +164,48 @@ func TestLogger_ParseLevel_Good(t *testing.T) {
{"invalid", LevelInfo, true},
}
for _, testCase := range tests {
t.Run(testCase.input, func(t *testing.T) {
level, err := ParseLevel(testCase.input)
if testCase.wantErr && err == nil {
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
level, err := ParseLevel(tt.input)
if tt.wantErr && err == nil {
t.Error("Expected error but got none")
}
if !testCase.wantErr && err != nil {
if !tt.wantErr && err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !testCase.wantErr && level != testCase.expected {
t.Errorf("Expected %v, got %v", testCase.expected, level)
if !tt.wantErr && level != tt.expected {
t.Errorf("Expected %v, got %v", tt.expected, level)
}
})
}
}
func TestLogger_GlobalLogger_Good(t *testing.T) {
var outputBuffer bytes.Buffer
func TestGlobalLogger(t *testing.T) {
var buf bytes.Buffer
logger := New(Config{
Output: &outputBuffer,
Output: &buf,
Level: LevelInfo,
})
SetGlobal(logger)
Info("global test")
if !strings.Contains(outputBuffer.String(), "global test") {
if !strings.Contains(buf.String(), "global test") {
t.Error("Global logger should write message")
}
outputBuffer.Reset()
buf.Reset()
SetGlobalLevel(LevelError)
Info("should not appear")
if outputBuffer.Len() > 0 {
if buf.Len() > 0 {
t.Error("Info should not appear at Error level")
}
// Reset to default for other tests
SetGlobal(New(DefaultConfig()))
}
func TestLogger_LevelString_Good(t *testing.T) {
func TestLevelString(t *testing.T) {
tests := []struct {
level Level
expected string
@ -217,14 +217,15 @@ func TestLogger_LevelString_Good(t *testing.T) {
{Level(99), "UNKNOWN"},
}
for _, testCase := range tests {
if got := testCase.level.String(); got != testCase.expected {
t.Errorf("Level(%d).String() = %s, want %s", testCase.level, got, testCase.expected)
for _, tt := range tests {
if got := tt.level.String(); got != tt.expected {
t.Errorf("Level(%d).String() = %s, want %s", tt.level, got, tt.expected)
}
}
}
func TestLogger_MergeFields_Good(t *testing.T) {
func TestMergeFields(t *testing.T) {
// Empty fields
result := mergeFields(nil)
if result != nil {
t.Error("nil input should return nil")
@ -235,11 +236,13 @@ func TestLogger_MergeFields_Good(t *testing.T) {
t.Error("empty input should return nil")
}
// Single fields
result = mergeFields([]Fields{{"key": "value"}})
if result["key"] != "value" {
t.Error("Single field should be preserved")
}
// Multiple fields
result = mergeFields([]Fields{
{"key1": "value1"},
{"key2": "value2"},
@ -248,6 +251,7 @@ func TestLogger_MergeFields_Good(t *testing.T) {
t.Error("Multiple fields should be merged")
}
// Override
result = mergeFields([]Fields{
{"key": "value1"},
{"key": "value2"},

View file

@ -1,49 +0,0 @@
package mining
import (
"os"
"path/filepath"
)
// AtomicWriteFile("/home/alice/.config/lethean-desktop/miners/installed-miners.json", data, 0600)
func AtomicWriteFile(filePath string, data []byte, perm os.FileMode) error {
directoryPath := filepath.Dir(filePath)
temporaryFile, err := os.CreateTemp(directoryPath, ".tmp-*")
if err != nil {
return ErrInternal("create temp file").WithCause(err)
}
temporaryFilePath := temporaryFile.Name()
success := false
defer func() {
if !success {
os.Remove(temporaryFilePath)
}
}()
if _, err := temporaryFile.Write(data); err != nil {
temporaryFile.Close()
return ErrInternal("write temp file").WithCause(err)
}
if err := temporaryFile.Sync(); err != nil {
temporaryFile.Close()
return ErrInternal("sync temp file").WithCause(err)
}
if err := temporaryFile.Close(); err != nil {
return ErrInternal("close temp file").WithCause(err)
}
if err := os.Chmod(temporaryFilePath, perm); err != nil {
return ErrInternal("set file permissions").WithCause(err)
}
if err := os.Rename(temporaryFilePath, filePath); err != nil {
return ErrInternal("rename temp file").WithCause(err)
}
success = true
return nil
}

View file

@ -5,27 +5,33 @@ import (
"crypto/rand"
"crypto/subtle"
"encoding/hex"
"fmt"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"forge.lthn.ai/Snider/Mining/pkg/logging"
"github.com/Snider/Mining/pkg/logging"
"github.com/gin-gonic/gin"
)
// AuthConfig{Enabled: true, Username: "admin", Password: "secret", Realm: "Mining API"}
// AuthConfig holds authentication configuration
type AuthConfig struct {
Enabled bool
Username string
Password string
Realm string
// Enabled determines if authentication is required
Enabled bool
// Username for basic/digest auth
Username string
// Password for basic/digest auth
Password string
// Realm for digest auth
Realm string
// NonceExpiry is how long a nonce is valid
NonceExpiry time.Duration
}
// authConfig := DefaultAuthConfig() // Enabled: false, Realm: "Mining API", NonceExpiry: 5m
// DefaultAuthConfig returns the default auth configuration.
// Auth is disabled by default for local development.
func DefaultAuthConfig() AuthConfig {
return AuthConfig{
Enabled: false,
@ -36,109 +42,116 @@ func DefaultAuthConfig() AuthConfig {
}
}
// authConfig := AuthConfigFromEnv() // reads MINING_API_AUTH, MINING_API_USER, MINING_API_PASS, MINING_API_REALM
// AuthConfigFromEnv creates auth config from environment variables.
// Set MINING_API_AUTH=true to enable, MINING_API_USER and MINING_API_PASS for credentials.
func AuthConfigFromEnv() AuthConfig {
authConfig := DefaultAuthConfig()
config := DefaultAuthConfig()
if os.Getenv("MINING_API_AUTH") == "true" {
authConfig.Enabled = true
authConfig.Username = os.Getenv("MINING_API_USER")
authConfig.Password = os.Getenv("MINING_API_PASS")
config.Enabled = true
config.Username = os.Getenv("MINING_API_USER")
config.Password = os.Getenv("MINING_API_PASS")
if authConfig.Username == "" || authConfig.Password == "" {
if config.Username == "" || config.Password == "" {
logging.Warn("API auth enabled but credentials not set", logging.Fields{
"hint": "Set MINING_API_USER and MINING_API_PASS environment variables",
})
authConfig.Enabled = false
config.Enabled = false
}
}
if realm := os.Getenv("MINING_API_REALM"); realm != "" {
authConfig.Realm = realm
config.Realm = realm
}
return authConfig
return config
}
// digestAuth := NewDigestAuth(authConfig); router.Use(digestAuth.Middleware()); defer digestAuth.Stop()
// DigestAuth implements HTTP Digest Authentication middleware
type DigestAuth struct {
config AuthConfig
nonces sync.Map // map[string]time.Time for nonce expiry tracking
stopChannel chan struct{}
stopOnce sync.Once
config AuthConfig
nonces sync.Map // map[string]time.Time for nonce expiry tracking
stopChan chan struct{}
stopOnce sync.Once
}
// digestAuth := NewDigestAuth(AuthConfigFromEnv()); router.Use(digestAuth.Middleware())
// NewDigestAuth creates a new digest auth middleware
func NewDigestAuth(config AuthConfig) *DigestAuth {
digestAuth := &DigestAuth{
config: config,
stopChannel: make(chan struct{}),
da := &DigestAuth{
config: config,
stopChan: make(chan struct{}),
}
// go digestAuth.cleanupNonces() // clears expired nonces every 5 minutes
go digestAuth.cleanupNonces()
return digestAuth
// Start nonce cleanup goroutine
go da.cleanupNonces()
return da
}
// defer digestAuth.Stop() // safe to call multiple times; stops the nonce cleanup goroutine
func (digestAuth *DigestAuth) Stop() {
digestAuth.stopOnce.Do(func() {
close(digestAuth.stopChannel)
// Stop gracefully shuts down the DigestAuth, stopping the cleanup goroutine.
// Safe to call multiple times.
func (da *DigestAuth) Stop() {
da.stopOnce.Do(func() {
close(da.stopChan)
})
}
// router.Use(digestAuth.Middleware()) // enforces Digest or Basic auth on all routes
func (digestAuth *DigestAuth) Middleware() gin.HandlerFunc {
return func(requestContext *gin.Context) {
if !digestAuth.config.Enabled {
requestContext.Next()
// Middleware returns a Gin middleware that enforces digest authentication
func (da *DigestAuth) Middleware() gin.HandlerFunc {
return func(c *gin.Context) {
if !da.config.Enabled {
c.Next()
return
}
authHeader := requestContext.GetHeader("Authorization")
authHeader := c.GetHeader("Authorization")
if authHeader == "" {
digestAuth.sendChallenge(requestContext)
da.sendChallenge(c)
return
}
// Try digest auth first
if strings.HasPrefix(authHeader, "Digest ") {
if digestAuth.validateDigest(requestContext, authHeader) {
requestContext.Next()
if da.validateDigest(c, authHeader) {
c.Next()
return
}
digestAuth.sendChallenge(requestContext)
da.sendChallenge(c)
return
}
// Fall back to basic auth
if strings.HasPrefix(authHeader, "Basic ") {
if digestAuth.validateBasic(requestContext, authHeader) {
requestContext.Next()
if da.validateBasic(c, authHeader) {
c.Next()
return
}
}
digestAuth.sendChallenge(requestContext)
da.sendChallenge(c)
}
}
// digestAuth.sendChallenge(c) // writes WWW-Authenticate header and 401 JSON response
func (digestAuth *DigestAuth) sendChallenge(requestContext *gin.Context) {
nonce := digestAuth.generateNonce()
digestAuth.nonces.Store(nonce, time.Now())
// sendChallenge sends a 401 response with digest auth challenge
func (da *DigestAuth) sendChallenge(c *gin.Context) {
nonce := da.generateNonce()
da.nonces.Store(nonce, time.Now())
challenge := `Digest realm="` + digestAuth.config.Realm + `", qop="auth", nonce="` + nonce + `", opaque="` + digestAuth.generateOpaque() + `"`
challenge := fmt.Sprintf(
`Digest realm="%s", qop="auth", nonce="%s", opaque="%s"`,
da.config.Realm,
nonce,
da.generateOpaque(),
)
requestContext.Header("WWW-Authenticate", challenge)
requestContext.AbortWithStatusJSON(http.StatusUnauthorized, APIError{
c.Header("WWW-Authenticate", challenge)
c.AbortWithStatusJSON(http.StatusUnauthorized, APIError{
Code: "AUTH_REQUIRED",
Message: "Authentication required",
Suggestion: "Provide valid credentials using Digest or Basic authentication",
})
}
// valid := digestAuth.validateDigest(requestContext, requestContext.GetHeader("Authorization"))
func (digestAuth *DigestAuth) validateDigest(requestContext *gin.Context, authHeader string) bool {
// validateDigest validates a digest auth header
func (da *DigestAuth) validateDigest(c *gin.Context, authHeader string) bool {
params := parseDigestParams(authHeader[7:]) // Skip "Digest "
nonce := params["nonce"]
@ -147,9 +160,9 @@ func (digestAuth *DigestAuth) validateDigest(requestContext *gin.Context, authHe
}
// Check nonce validity
if storedTime, ok := digestAuth.nonces.Load(nonce); ok {
if time.Since(storedTime.(time.Time)) > digestAuth.config.NonceExpiry {
digestAuth.nonces.Delete(nonce)
if storedTime, ok := da.nonces.Load(nonce); ok {
if time.Since(storedTime.(time.Time)) > da.config.NonceExpiry {
da.nonces.Delete(nonce)
return false
}
} else {
@ -157,58 +170,60 @@ func (digestAuth *DigestAuth) validateDigest(requestContext *gin.Context, authHe
}
// Validate username with constant-time comparison to prevent timing attacks
if subtle.ConstantTimeCompare([]byte(params["username"]), []byte(digestAuth.config.Username)) != 1 {
if subtle.ConstantTimeCompare([]byte(params["username"]), []byte(da.config.Username)) != 1 {
return false
}
// Calculate expected response
hashA1 := md5Hash(digestAuth.config.Username + ":" + digestAuth.config.Realm + ":" + digestAuth.config.Password)
hashA2 := md5Hash(requestContext.Request.Method + ":" + params["uri"])
ha1 := md5Hash(fmt.Sprintf("%s:%s:%s", da.config.Username, da.config.Realm, da.config.Password))
ha2 := md5Hash(fmt.Sprintf("%s:%s", c.Request.Method, params["uri"]))
var expectedResponse string
if params["qop"] == "auth" {
expectedResponse = md5Hash(hashA1 + ":" + nonce + ":" + params["nc"] + ":" + params["cnonce"] + ":" + params["qop"] + ":" + hashA2)
expectedResponse = md5Hash(fmt.Sprintf("%s:%s:%s:%s:%s:%s",
ha1, nonce, params["nc"], params["cnonce"], params["qop"], ha2))
} else {
expectedResponse = md5Hash(hashA1 + ":" + nonce + ":" + hashA2)
expectedResponse = md5Hash(fmt.Sprintf("%s:%s:%s", ha1, nonce, ha2))
}
// Constant-time comparison to prevent timing attacks
return subtle.ConstantTimeCompare([]byte(expectedResponse), []byte(params["response"])) == 1
}
// valid := digestAuth.validateBasic(requestContext, requestContext.GetHeader("Authorization"))
func (digestAuth *DigestAuth) validateBasic(requestContext *gin.Context, authHeader string) bool {
// validateBasic validates a basic auth header
func (da *DigestAuth) validateBasic(c *gin.Context, authHeader string) bool {
// Gin has built-in basic auth, but we do manual validation for consistency
username, password, ok := requestContext.Request.BasicAuth()
user, pass, ok := c.Request.BasicAuth()
if !ok {
return false
}
userMatch := subtle.ConstantTimeCompare([]byte(username), []byte(digestAuth.config.Username)) == 1
passwordMatch := subtle.ConstantTimeCompare([]byte(password), []byte(digestAuth.config.Password)) == 1
// Constant-time comparison to prevent timing attacks
userMatch := subtle.ConstantTimeCompare([]byte(user), []byte(da.config.Username)) == 1
passMatch := subtle.ConstantTimeCompare([]byte(pass), []byte(da.config.Password)) == 1
return userMatch && passwordMatch
return userMatch && passMatch
}
// nonce := digestAuth.generateNonce() // 32-char hex string, cryptographically random
func (digestAuth *DigestAuth) generateNonce() string {
randomBytes := make([]byte, 16)
if _, err := rand.Read(randomBytes); err != nil {
// generateNonce creates a cryptographically random nonce
func (da *DigestAuth) generateNonce() string {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
// Cryptographic failure is critical - fall back to time-based nonce
// This should never happen on a properly configured system
return hex.EncodeToString([]byte(strconv.FormatInt(time.Now().UnixNano(), 10)))
return hex.EncodeToString([]byte(fmt.Sprintf("%d", time.Now().UnixNano())))
}
return hex.EncodeToString(randomBytes)
return hex.EncodeToString(b)
}
// opaque := digestAuth.generateOpaque() // MD5 of realm, stable per auth instance
func (digestAuth *DigestAuth) generateOpaque() string {
return md5Hash(digestAuth.config.Realm)
// generateOpaque creates an opaque value
func (da *DigestAuth) generateOpaque() string {
return md5Hash(da.config.Realm)
}
// go digestAuth.cleanupNonces() // runs until stopChannel is closed; interval = NonceExpiry
func (digestAuth *DigestAuth) cleanupNonces() {
interval := digestAuth.config.NonceExpiry
// cleanupNonces removes expired nonces periodically
func (da *DigestAuth) cleanupNonces() {
interval := da.config.NonceExpiry
if interval <= 0 {
interval = 5 * time.Minute // Default if not set
}
@ -217,13 +232,13 @@ func (digestAuth *DigestAuth) cleanupNonces() {
for {
select {
case <-digestAuth.stopChannel:
case <-da.stopChan:
return
case <-ticker.C:
now := time.Now()
digestAuth.nonces.Range(func(key, value interface{}) bool {
if now.Sub(value.(time.Time)) > digestAuth.config.NonceExpiry {
digestAuth.nonces.Delete(key)
da.nonces.Range(func(key, value interface{}) bool {
if now.Sub(value.(time.Time)) > da.config.NonceExpiry {
da.nonces.Delete(key)
}
return true
})
@ -231,7 +246,7 @@ func (digestAuth *DigestAuth) cleanupNonces() {
}
}
// params := parseDigestParams(authHeader[7:]) // {"nonce": "abc", "uri": "/api", "qop": "auth"}
// parseDigestParams parses the parameters from a digest auth header
func parseDigestParams(header string) map[string]string {
params := make(map[string]string)
parts := strings.Split(header, ",")
@ -252,8 +267,8 @@ func parseDigestParams(header string) map[string]string {
return params
}
// hash := md5Hash("user:realm:pass") // "5f4dcc3b5aa765d61d8327deb882cf99"
func md5Hash(input string) string {
digest := md5.Sum([]byte(input))
return hex.EncodeToString(digest[:])
// md5Hash returns the MD5 hash of a string as a hex string
func md5Hash(s string) string {
h := md5.Sum([]byte(s))
return hex.EncodeToString(h[:])
}

View file

@ -18,70 +18,28 @@ func init() {
gin.SetMode(gin.TestMode)
}
// TestAuth_DefaultAuthConfig_Good — authConfig := DefaultAuthConfig()
func TestAuth_DefaultAuthConfig_Good(t *testing.T) {
authConfig := DefaultAuthConfig()
func TestDefaultAuthConfig(t *testing.T) {
cfg := DefaultAuthConfig()
if authConfig.Enabled {
if cfg.Enabled {
t.Error("expected Enabled to be false by default")
}
if authConfig.Username != "" {
if cfg.Username != "" {
t.Error("expected Username to be empty by default")
}
if authConfig.Password != "" {
if cfg.Password != "" {
t.Error("expected Password to be empty by default")
}
if authConfig.Realm != "Mining API" {
t.Errorf("expected Realm to be 'Mining API', got %s", authConfig.Realm)
if cfg.Realm != "Mining API" {
t.Errorf("expected Realm to be 'Mining API', got %s", cfg.Realm)
}
if authConfig.NonceExpiry != 5*time.Minute {
t.Errorf("expected NonceExpiry to be 5 minutes, got %v", authConfig.NonceExpiry)
if cfg.NonceExpiry != 5*time.Minute {
t.Errorf("expected NonceExpiry to be 5 minutes, got %v", cfg.NonceExpiry)
}
}
// TestAuth_AuthConfigFromEnv_Good — authConfig := AuthConfigFromEnv() with valid credentials
func TestAuth_AuthConfigFromEnv_Good(t *testing.T) {
origAuth := os.Getenv("MINING_API_AUTH")
origUser := os.Getenv("MINING_API_USER")
origPass := os.Getenv("MINING_API_PASS")
origRealm := os.Getenv("MINING_API_REALM")
defer func() {
os.Setenv("MINING_API_AUTH", origAuth)
os.Setenv("MINING_API_USER", origUser)
os.Setenv("MINING_API_PASS", origPass)
os.Setenv("MINING_API_REALM", origRealm)
}()
t.Run("auth enabled with valid credentials", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "true")
os.Setenv("MINING_API_USER", "testuser")
os.Setenv("MINING_API_PASS", "testpass")
authConfig := AuthConfigFromEnv()
if !authConfig.Enabled {
t.Error("expected Enabled to be true")
}
if authConfig.Username != "testuser" {
t.Errorf("expected Username 'testuser', got %s", authConfig.Username)
}
if authConfig.Password != "testpass" {
t.Errorf("expected Password 'testpass', got %s", authConfig.Password)
}
})
t.Run("custom realm", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "")
os.Setenv("MINING_API_REALM", "Custom Realm")
authConfig := AuthConfigFromEnv()
if authConfig.Realm != "Custom Realm" {
t.Errorf("expected Realm 'Custom Realm', got %s", authConfig.Realm)
}
})
}
// TestAuth_AuthConfigFromEnv_Bad — AuthConfigFromEnv() with missing credentials disables auth
func TestAuth_AuthConfigFromEnv_Bad(t *testing.T) {
func TestAuthConfigFromEnv(t *testing.T) {
// Save original env
origAuth := os.Getenv("MINING_API_AUTH")
origUser := os.Getenv("MINING_API_USER")
origPass := os.Getenv("MINING_API_PASS")
@ -95,27 +53,53 @@ func TestAuth_AuthConfigFromEnv_Bad(t *testing.T) {
t.Run("auth disabled by default", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "")
authConfig := AuthConfigFromEnv()
if authConfig.Enabled {
cfg := AuthConfigFromEnv()
if cfg.Enabled {
t.Error("expected Enabled to be false when env not set")
}
})
t.Run("auth enabled with valid credentials", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "true")
os.Setenv("MINING_API_USER", "testuser")
os.Setenv("MINING_API_PASS", "testpass")
cfg := AuthConfigFromEnv()
if !cfg.Enabled {
t.Error("expected Enabled to be true")
}
if cfg.Username != "testuser" {
t.Errorf("expected Username 'testuser', got %s", cfg.Username)
}
if cfg.Password != "testpass" {
t.Errorf("expected Password 'testpass', got %s", cfg.Password)
}
})
t.Run("auth disabled if credentials missing", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "true")
os.Setenv("MINING_API_USER", "")
os.Setenv("MINING_API_PASS", "")
authConfig := AuthConfigFromEnv()
if authConfig.Enabled {
cfg := AuthConfigFromEnv()
if cfg.Enabled {
t.Error("expected Enabled to be false when credentials missing")
}
})
t.Run("custom realm", func(t *testing.T) {
os.Setenv("MINING_API_AUTH", "")
os.Setenv("MINING_API_REALM", "Custom Realm")
cfg := AuthConfigFromEnv()
if cfg.Realm != "Custom Realm" {
t.Errorf("expected Realm 'Custom Realm', got %s", cfg.Realm)
}
})
}
// TestAuth_NewDigestAuth_Good — digestAuth := NewDigestAuth(authConfig); defer digestAuth.Stop()
func TestAuth_NewDigestAuth_Good(t *testing.T) {
authConfig := AuthConfig{
func TestNewDigestAuth(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
@ -123,75 +107,74 @@ func TestAuth_NewDigestAuth_Good(t *testing.T) {
NonceExpiry: time.Second,
}
digestAuth := NewDigestAuth(authConfig)
if digestAuth == nil {
da := NewDigestAuth(cfg)
if da == nil {
t.Fatal("expected non-nil DigestAuth")
}
digestAuth.Stop()
// Cleanup
da.Stop()
}
// TestAuth_DigestAuthStop_Ugly — digestAuth.Stop() is safe to call multiple times
func TestAuth_DigestAuthStop_Ugly(t *testing.T) {
authConfig := DefaultAuthConfig()
digestAuth := NewDigestAuth(authConfig)
func TestDigestAuthStop(t *testing.T) {
cfg := DefaultAuthConfig()
da := NewDigestAuth(cfg)
digestAuth.Stop()
digestAuth.Stop()
digestAuth.Stop()
// Should not panic when called multiple times
da.Stop()
da.Stop()
da.Stop()
}
// TestAuth_Middleware_Good — router.Use(digestAuth.Middleware()) passes requests when auth disabled
func TestAuth_Middleware_Good(t *testing.T) {
authConfig := AuthConfig{Enabled: false}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
func TestMiddlewareAuthDisabled(t *testing.T) {
cfg := AuthConfig{Enabled: false}
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(digestAuth.Middleware())
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
request := httptest.NewRequest("GET", "/test", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req := httptest.NewRequest("GET", "/test", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK {
t.Errorf("expected status 200, got %d", recorder.Code)
if w.Code != http.StatusOK {
t.Errorf("expected status 200, got %d", w.Code)
}
if recorder.Body.String() != "success" {
t.Errorf("expected body 'success', got %s", recorder.Body.String())
if w.Body.String() != "success" {
t.Errorf("expected body 'success', got %s", w.Body.String())
}
}
// TestAuth_Middleware_Bad — unauthenticated requests receive 401 with Digest challenge
func TestAuth_Middleware_Bad(t *testing.T) {
authConfig := AuthConfig{
func TestMiddlewareNoAuth(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(digestAuth.Middleware())
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
request := httptest.NewRequest("GET", "/test", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req := httptest.NewRequest("GET", "/test", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusUnauthorized {
t.Errorf("expected status 401, got %d", recorder.Code)
if w.Code != http.StatusUnauthorized {
t.Errorf("expected status 401, got %d", w.Code)
}
wwwAuth := recorder.Header().Get("WWW-Authenticate")
wwwAuth := w.Header().Get("WWW-Authenticate")
if wwwAuth == "" {
t.Error("expected WWW-Authenticate header")
}
@ -203,48 +186,46 @@ func TestAuth_Middleware_Bad(t *testing.T) {
}
}
// TestAuth_BasicAuth_Good — request.SetBasicAuth("user", "pass") succeeds through middleware.
func TestAuth_BasicAuth_Good(t *testing.T) {
authConfig := AuthConfig{
func TestMiddlewareBasicAuthValid(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(digestAuth.Middleware())
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
request := httptest.NewRequest("GET", "/test", nil)
request.SetBasicAuth("user", "pass")
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req := httptest.NewRequest("GET", "/test", nil)
req.SetBasicAuth("user", "pass")
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK {
t.Errorf("expected status 200, got %d", recorder.Code)
if w.Code != http.StatusOK {
t.Errorf("expected status 200, got %d", w.Code)
}
}
// TestAuth_BasicAuth_Bad — wrong credentials return 401
func TestAuth_BasicAuth_Bad(t *testing.T) {
authConfig := AuthConfig{
func TestMiddlewareBasicAuthInvalid(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(digestAuth.Middleware())
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
@ -261,48 +242,47 @@ func TestAuth_BasicAuth_Bad(t *testing.T) {
{"empty password", "user", ""},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
request := httptest.NewRequest("GET", "/test", nil)
request.SetBasicAuth(testCase.user, testCase.password)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/test", nil)
req.SetBasicAuth(tc.user, tc.password)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusUnauthorized {
t.Errorf("expected status 401, got %d", recorder.Code)
if w.Code != http.StatusUnauthorized {
t.Errorf("expected status 401, got %d", w.Code)
}
})
}
}
// TestAuth_DigestAuth_Good — full digest auth handshake succeeds
func TestAuth_DigestAuth_Good(t *testing.T) {
authConfig := AuthConfig{
func TestMiddlewareDigestAuthValid(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "testuser",
Password: "testpass",
Realm: "Test Realm",
NonceExpiry: 5 * time.Minute,
}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(digestAuth.Middleware())
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
// First request to get nonce
request := httptest.NewRequest("GET", "/test", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req := httptest.NewRequest("GET", "/test", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusUnauthorized {
t.Fatalf("expected 401 to get nonce, got %d", recorder.Code)
if w.Code != http.StatusUnauthorized {
t.Fatalf("expected 401 to get nonce, got %d", w.Code)
}
wwwAuth := recorder.Header().Get("WWW-Authenticate")
wwwAuth := w.Header().Get("WWW-Authenticate")
params := parseDigestParams(wwwAuth[7:]) // Skip "Digest "
nonce := params["nonce"]
@ -312,84 +292,82 @@ func TestAuth_DigestAuth_Good(t *testing.T) {
// Build digest auth response
uri := "/test"
nonceCount := "00000001"
nc := "00000001"
cnonce := "abc123"
qop := "auth"
ha1 := md5Hash(fmt.Sprintf("%s:%s:%s", authConfig.Username, authConfig.Realm, authConfig.Password))
ha1 := md5Hash(fmt.Sprintf("%s:%s:%s", cfg.Username, cfg.Realm, cfg.Password))
ha2 := md5Hash(fmt.Sprintf("GET:%s", uri))
response := md5Hash(fmt.Sprintf("%s:%s:%s:%s:%s:%s", ha1, nonce, nonceCount, cnonce, qop, ha2))
response := md5Hash(fmt.Sprintf("%s:%s:%s:%s:%s:%s", ha1, nonce, nc, cnonce, qop, ha2))
authHeader := fmt.Sprintf(
`Digest username="%s", realm="%s", nonce="%s", uri="%s", qop=%s, nc=%s, cnonce="%s", response="%s"`,
authConfig.Username, authConfig.Realm, nonce, uri, qop, nonceCount, cnonce, response,
cfg.Username, cfg.Realm, nonce, uri, qop, nc, cnonce, response,
)
// Second request with digest auth
secondRequest := httptest.NewRequest("GET", "/test", nil)
secondRequest.Header.Set("Authorization", authHeader)
authRecorder := httptest.NewRecorder()
router.ServeHTTP(authRecorder, secondRequest)
req2 := httptest.NewRequest("GET", "/test", nil)
req2.Header.Set("Authorization", authHeader)
w2 := httptest.NewRecorder()
router.ServeHTTP(w2, req2)
if authRecorder.Code != http.StatusOK {
t.Errorf("expected status 200, got %d; body: %s", authRecorder.Code, authRecorder.Body.String())
if w2.Code != http.StatusOK {
t.Errorf("expected status 200, got %d; body: %s", w2.Code, w2.Body.String())
}
}
// TestAuth_DigestAuth_Bad — invalid nonce returns 401
func TestAuth_DigestAuth_Bad(t *testing.T) {
authConfig := AuthConfig{
func TestMiddlewareDigestAuthInvalidNonce(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(digestAuth.Middleware())
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
// Try with a fake nonce that was never issued
authHeader := `Digest username="user", realm="Test", nonce="fakenonce123", uri="/test", qop=auth, nc=00000001, cnonce="abc", response="xxx"`
request := httptest.NewRequest("GET", "/test", nil)
request.Header.Set("Authorization", authHeader)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req := httptest.NewRequest("GET", "/test", nil)
req.Header.Set("Authorization", authHeader)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusUnauthorized {
t.Errorf("expected status 401 for invalid nonce, got %d", recorder.Code)
if w.Code != http.StatusUnauthorized {
t.Errorf("expected status 401 for invalid nonce, got %d", w.Code)
}
}
// TestAuth_DigestAuth_Ugly — expired nonce returns 401
func TestAuth_DigestAuth_Ugly(t *testing.T) {
authConfig := AuthConfig{
func TestMiddlewareDigestAuthExpiredNonce(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 50 * time.Millisecond, // Very short for testing
}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(digestAuth.Middleware())
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "success")
})
// Get a valid nonce
request := httptest.NewRequest("GET", "/test", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req := httptest.NewRequest("GET", "/test", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
wwwAuth := recorder.Header().Get("WWW-Authenticate")
wwwAuth := w.Header().Get("WWW-Authenticate")
params := parseDigestParams(wwwAuth[7:])
nonce := params["nonce"]
@ -398,27 +376,26 @@ func TestAuth_DigestAuth_Ugly(t *testing.T) {
// Try to use expired nonce
uri := "/test"
ha1 := md5Hash(fmt.Sprintf("%s:%s:%s", authConfig.Username, authConfig.Realm, authConfig.Password))
ha1 := md5Hash(fmt.Sprintf("%s:%s:%s", cfg.Username, cfg.Realm, cfg.Password))
ha2 := md5Hash(fmt.Sprintf("GET:%s", uri))
response := md5Hash(fmt.Sprintf("%s:%s:%s", ha1, nonce, ha2))
authHeader := fmt.Sprintf(
`Digest username="%s", realm="%s", nonce="%s", uri="%s", response="%s"`,
authConfig.Username, authConfig.Realm, nonce, uri, response,
cfg.Username, cfg.Realm, nonce, uri, response,
)
secondRequest := httptest.NewRequest("GET", "/test", nil)
secondRequest.Header.Set("Authorization", authHeader)
req2 := httptest.NewRequest("GET", "/test", nil)
req2.Header.Set("Authorization", authHeader)
w2 := httptest.NewRecorder()
router.ServeHTTP(w2, secondRequest)
router.ServeHTTP(w2, req2)
if w2.Code != http.StatusUnauthorized {
t.Errorf("expected status 401 for expired nonce, got %d", w2.Code)
}
}
// TestAuth_ParseDigestParams_Good — parseDigestParams(authHeader[7:]) returns expected map
func TestAuth_ParseDigestParams_Good(t *testing.T) {
func TestParseDigestParams(t *testing.T) {
testCases := []struct {
name string
input string
@ -462,12 +439,17 @@ func TestAuth_ParseDigestParams_Good(t *testing.T) {
"response": "hash",
},
},
{
name: "empty string",
input: "",
expected: map[string]string{},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
result := parseDigestParams(testCase.input)
for key, expectedVal := range testCase.expected {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := parseDigestParams(tc.input)
for key, expectedVal := range tc.expected {
if result[key] != expectedVal {
t.Errorf("key %s: expected %s, got %s", key, expectedVal, result[key])
}
@ -476,55 +458,37 @@ func TestAuth_ParseDigestParams_Good(t *testing.T) {
}
}
// TestAuth_ParseDigestParams_Ugly — parseDigestParams("") returns empty map
func TestAuth_ParseDigestParams_Ugly(t *testing.T) {
result := parseDigestParams("")
if len(result) != 0 {
t.Errorf("expected empty map for empty input, got %v", result)
}
}
// TestAuth_Md5Hash_Good — md5Hash("hello") == "5d41402abc4b2a76b9719d911017c592"
func TestAuth_Md5Hash_Good(t *testing.T) {
func TestMd5Hash(t *testing.T) {
testCases := []struct {
input string
expected string
}{
{"hello", "5d41402abc4b2a76b9719d911017c592"},
{"", "d41d8cd98f00b204e9800998ecf8427e"},
{"user:realm:password", func() string {
hash := md5.Sum([]byte("user:realm:password"))
return hex.EncodeToString(hash[:])
h := md5.Sum([]byte("user:realm:password"))
return hex.EncodeToString(h[:])
}()},
}
for _, testCase := range testCases {
t.Run(testCase.input, func(t *testing.T) {
result := md5Hash(testCase.input)
if result != testCase.expected {
t.Errorf("expected %s, got %s", testCase.expected, result)
for _, tc := range testCases {
t.Run(tc.input, func(t *testing.T) {
result := md5Hash(tc.input)
if result != tc.expected {
t.Errorf("expected %s, got %s", tc.expected, result)
}
})
}
}
// TestAuth_Md5Hash_Ugly — md5Hash("") returns known empty-string MD5
func TestAuth_Md5Hash_Ugly(t *testing.T) {
result := md5Hash("")
expected := "d41d8cd98f00b204e9800998ecf8427e"
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
}
// TestAuth_NonceGeneration_Good — digestAuth.generateNonce() returns 32-char hex, no duplicates
func TestAuth_NonceGeneration_Good(t *testing.T) {
authConfig := DefaultAuthConfig()
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
func TestNonceGeneration(t *testing.T) {
cfg := DefaultAuthConfig()
da := NewDigestAuth(cfg)
defer da.Stop()
nonces := make(map[string]bool)
for i := 0; i < 100; i++ {
nonce := digestAuth.generateNonce()
nonce := da.generateNonce()
if len(nonce) != 32 { // 16 bytes = 32 hex chars
t.Errorf("expected nonce length 32, got %d", len(nonce))
}
@ -535,14 +499,13 @@ func TestAuth_NonceGeneration_Good(t *testing.T) {
}
}
// TestAuth_OpaqueGeneration_Good — digestAuth.generateOpaque() is stable per realm
func TestAuth_OpaqueGeneration_Good(t *testing.T) {
authConfig := AuthConfig{Realm: "TestRealm"}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
func TestOpaqueGeneration(t *testing.T) {
cfg := AuthConfig{Realm: "TestRealm"}
da := NewDigestAuth(cfg)
defer da.Stop()
opaque1 := digestAuth.generateOpaque()
opaque2 := digestAuth.generateOpaque()
opaque1 := da.generateOpaque()
opaque2 := da.generateOpaque()
// Same realm should produce same opaque
if opaque1 != opaque2 {
@ -556,24 +519,23 @@ func TestAuth_OpaqueGeneration_Good(t *testing.T) {
}
}
// TestAuth_NonceCleanup_Ugly — expired nonces are removed by background goroutine
func TestAuth_NonceCleanup_Ugly(t *testing.T) {
authConfig := AuthConfig{
func TestNonceCleanup(t *testing.T) {
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 50 * time.Millisecond,
}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
da := NewDigestAuth(cfg)
defer da.Stop()
// Store a nonce
nonce := digestAuth.generateNonce()
digestAuth.nonces.Store(nonce, time.Now())
nonce := da.generateNonce()
da.nonces.Store(nonce, time.Now())
// Verify it exists
if _, ok := digestAuth.nonces.Load(nonce); !ok {
if _, ok := da.nonces.Load(nonce); !ok {
t.Error("nonce should exist immediately after storing")
}
@ -581,11 +543,12 @@ func TestAuth_NonceCleanup_Ugly(t *testing.T) {
time.Sleep(150 * time.Millisecond)
// Verify it was cleaned up
if _, ok := digestAuth.nonces.Load(nonce); ok {
if _, ok := da.nonces.Load(nonce); ok {
t.Error("expired nonce should have been cleaned up")
}
}
// Helper function
func authTestContains(s, substr string) bool {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
@ -595,6 +558,7 @@ func authTestContains(s, substr string) bool {
return false
}
// Benchmark tests
func BenchmarkMd5Hash(b *testing.B) {
input := "user:realm:password"
for i := 0; i < b.N; i++ {
@ -603,38 +567,38 @@ func BenchmarkMd5Hash(b *testing.B) {
}
func BenchmarkNonceGeneration(b *testing.B) {
authConfig := DefaultAuthConfig()
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
cfg := DefaultAuthConfig()
da := NewDigestAuth(cfg)
defer da.Stop()
for i := 0; i < b.N; i++ {
digestAuth.generateNonce()
da.generateNonce()
}
}
func BenchmarkBasicAuthValidation(b *testing.B) {
authConfig := AuthConfig{
cfg := AuthConfig{
Enabled: true,
Username: "user",
Password: "pass",
Realm: "Test",
NonceExpiry: 5 * time.Minute,
}
digestAuth := NewDigestAuth(authConfig)
defer digestAuth.Stop()
da := NewDigestAuth(cfg)
defer da.Stop()
router := gin.New()
router.Use(digestAuth.Middleware())
router.Use(da.Middleware())
router.GET("/test", func(c *gin.Context) {
c.Status(http.StatusOK)
})
request := httptest.NewRequest("GET", "/test", nil)
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("user:pass")))
req := httptest.NewRequest("GET", "/test", nil)
req.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("user:pass")))
b.ResetTimer()
for i := 0; i < b.N; i++ {
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
}
}

View file

@ -6,53 +6,49 @@ import (
"sync"
)
// buffer := jsonBufferPool.Get().(*bytes.Buffer)
// buffer.Reset()
// defer jsonBufferPool.Put(buffer)
var jsonBufferPool = sync.Pool{
New: func() any {
// bufferPool provides reusable byte buffers for JSON encoding.
// This reduces allocation overhead in hot paths like WebSocket event serialization.
var bufferPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(make([]byte, 0, 1024))
},
}
// buffer := acquireJSONBuffer()
// defer releaseJSONBuffer(buffer)
func acquireJSONBuffer() *bytes.Buffer {
buffer := jsonBufferPool.Get().(*bytes.Buffer)
buffer.Reset()
return buffer
// getBuffer retrieves a buffer from the pool.
func getBuffer() *bytes.Buffer {
buf := bufferPool.Get().(*bytes.Buffer)
buf.Reset()
return buf
}
// releaseJSONBuffer(buffer) returns the buffer to the pool when it stays under 64 KB.
func releaseJSONBuffer(buffer *bytes.Buffer) {
if buffer.Cap() <= 65536 {
jsonBufferPool.Put(buffer)
// putBuffer returns a buffer to the pool.
func putBuffer(buf *bytes.Buffer) {
// Don't pool buffers that grew too large (>64KB)
if buf.Cap() <= 65536 {
bufferPool.Put(buf)
}
}
// UnmarshalJSON(data, &message)
func UnmarshalJSON(data []byte, target interface{}) error {
return json.Unmarshal(data, target)
}
// MarshalJSON encodes a value to JSON using a pooled buffer.
// Returns a copy of the encoded bytes (safe to use after the function returns).
func MarshalJSON(v interface{}) ([]byte, error) {
buf := getBuffer()
defer putBuffer(buf)
// data, err := MarshalJSON(stats) // safe to keep after the call returns.
func MarshalJSON(value interface{}) ([]byte, error) {
buffer := acquireJSONBuffer()
defer releaseJSONBuffer(buffer)
encoder := json.NewEncoder(buffer)
// Keep characters like < and > unchanged so API responses match json.Marshal.
encoder.SetEscapeHTML(false)
if err := encoder.Encode(value); err != nil {
enc := json.NewEncoder(buf)
// Don't escape HTML characters (matches json.Marshal behavior for these use cases)
enc.SetEscapeHTML(false)
if err := enc.Encode(v); err != nil {
return nil, err
}
// json.Encoder.Encode adds a newline; trim it so callers get compact JSON.
data := buffer.Bytes()
// json.Encoder.Encode adds a newline; remove it to match json.Marshal
data := buf.Bytes()
if len(data) > 0 && data[len(data)-1] == '\n' {
data = data[:len(data)-1]
}
// Return a copy since the buffer will be reused
result := make([]byte, len(data))
copy(result, data)
return result, nil

View file

@ -1,69 +0,0 @@
package mining
import (
"testing"
)
func TestBufpool_MarshalJSON_Good(t *testing.T) {
data := map[string]string{"key": "value"}
result, err := MarshalJSON(data)
if err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if len(result) == 0 {
t.Fatal("expected non-empty result")
}
}
func TestBufpool_MarshalJSON_Bad(t *testing.T) {
// Channels cannot be marshalled
result, err := MarshalJSON(make(chan int))
if err == nil {
t.Fatal("expected error for non-marshallable type")
}
if result != nil {
t.Fatalf("expected nil result, got %v", result)
}
}
func TestBufpool_MarshalJSON_Ugly(t *testing.T) {
// nil value should produce "null"
result, err := MarshalJSON(nil)
if err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if string(result) != "null" {
t.Fatalf("expected \"null\", got %q", string(result))
}
}
func TestBufpool_UnmarshalJSON_Good(t *testing.T) {
var target map[string]string
err := UnmarshalJSON([]byte(`{"key":"value"}`), &target)
if err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if target["key"] != "value" {
t.Fatalf("expected value \"value\", got %q", target["key"])
}
}
func TestBufpool_UnmarshalJSON_Bad(t *testing.T) {
var target map[string]string
err := UnmarshalJSON([]byte(`not valid json`), &target)
if err == nil {
t.Fatal("expected error for invalid JSON")
}
}
func TestBufpool_UnmarshalJSON_Ugly(t *testing.T) {
// Empty JSON object into a map
var target map[string]string
err := UnmarshalJSON([]byte(`{}`), &target)
if err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if len(target) != 0 {
t.Fatalf("expected empty map, got %d entries", len(target))
}
}

View file

@ -1,20 +1,22 @@
package mining
import (
"errors"
"sync"
"time"
"forge.lthn.ai/Snider/Mining/pkg/logging"
"github.com/Snider/Mining/pkg/logging"
)
// if cb.State() == CircuitClosed { /* requests are flowing normally */ }
// if cb.State() == CircuitOpen { /* circuit tripped; requests are rejected */ }
// if cb.State() == CircuitHalfOpen { /* probe request allowed; awaiting SuccessThreshold */ }
// CircuitState represents the state of a circuit breaker
type CircuitState int
const (
CircuitClosed CircuitState = iota
// CircuitClosed means the circuit is functioning normally
CircuitClosed CircuitState = iota
// CircuitOpen means the circuit has tripped and requests are being rejected
CircuitOpen
// CircuitHalfOpen means the circuit is testing if the service has recovered
CircuitHalfOpen
)
@ -31,15 +33,17 @@ func (s CircuitState) String() string {
}
}
// CircuitBreakerConfig{FailureThreshold: 3, ResetTimeout: 30 * time.Second, SuccessThreshold: 1}
// CircuitBreakerConfig holds configuration for a circuit breaker
type CircuitBreakerConfig struct {
// FailureThreshold is the number of failures before opening the circuit
FailureThreshold int
ResetTimeout time.Duration
// ResetTimeout is how long to wait before attempting recovery
ResetTimeout time.Duration
// SuccessThreshold is the number of successes needed in half-open state to close
SuccessThreshold int
}
// configuration := DefaultCircuitBreakerConfig()
// breaker := NewCircuitBreaker("github-api", configuration)
// DefaultCircuitBreakerConfig returns sensible defaults
func DefaultCircuitBreakerConfig() CircuitBreakerConfig {
return CircuitBreakerConfig{
FailureThreshold: 3,
@ -48,8 +52,7 @@ func DefaultCircuitBreakerConfig() CircuitBreakerConfig {
}
}
// breaker := NewCircuitBreaker("github-api", DefaultCircuitBreakerConfig())
// result, err := cb.Execute(func() (interface{}, error) { return fetchStats(ctx) })
// CircuitBreaker implements the circuit breaker pattern
type CircuitBreaker struct {
name string
config CircuitBreakerConfig
@ -57,17 +60,17 @@ type CircuitBreaker struct {
failures int
successes int
lastFailure time.Time
mutex sync.RWMutex
mu sync.RWMutex
cachedResult interface{}
cachedErr error
lastCacheTime time.Time
cacheDuration time.Duration
}
// if err == ErrCircuitOpen { /* fallback to cached result */ }
var ErrCircuitOpen = NewMiningError(ErrCodeServiceUnavailable, "circuit breaker is open")
// ErrCircuitOpen is returned when the circuit is open
var ErrCircuitOpen = errors.New("circuit breaker is open")
// breaker := NewCircuitBreaker("github-api", DefaultCircuitBreakerConfig())
// NewCircuitBreaker creates a new circuit breaker
func NewCircuitBreaker(name string, config CircuitBreakerConfig) *CircuitBreaker {
return &CircuitBreaker{
name: name,
@ -77,56 +80,61 @@ func NewCircuitBreaker(name string, config CircuitBreakerConfig) *CircuitBreaker
}
}
// if circuitBreaker.State() == CircuitOpen { return nil, ErrCircuitOpen }
func (circuitBreaker *CircuitBreaker) State() CircuitState {
circuitBreaker.mutex.RLock()
defer circuitBreaker.mutex.RUnlock()
return circuitBreaker.state
// State returns the current circuit state
func (cb *CircuitBreaker) State() CircuitState {
cb.mu.RLock()
defer cb.mu.RUnlock()
return cb.state
}
// result, err := circuitBreaker.Execute(func() (interface{}, error) { return fetchStats(ctx) })
func (circuitBreaker *CircuitBreaker) Execute(operation func() (interface{}, error)) (interface{}, error) {
if !circuitBreaker.allowRequest() {
circuitBreaker.mutex.RLock()
if circuitBreaker.cachedResult != nil && time.Since(circuitBreaker.lastCacheTime) < circuitBreaker.cacheDuration {
result := circuitBreaker.cachedResult
circuitBreaker.mutex.RUnlock()
// Execute runs the given function with circuit breaker protection
func (cb *CircuitBreaker) Execute(fn func() (interface{}, error)) (interface{}, error) {
// Check if we should allow this request
if !cb.allowRequest() {
// Return cached result if available
cb.mu.RLock()
if cb.cachedResult != nil && time.Since(cb.lastCacheTime) < cb.cacheDuration {
result := cb.cachedResult
cb.mu.RUnlock()
logging.Debug("circuit breaker returning cached result", logging.Fields{
"name": circuitBreaker.name,
"state": circuitBreaker.state.String(),
"name": cb.name,
"state": cb.state.String(),
})
return result, nil
}
circuitBreaker.mutex.RUnlock()
cb.mu.RUnlock()
return nil, ErrCircuitOpen
}
result, err := operation()
// Execute the function
result, err := fn()
// Record the result
if err != nil {
circuitBreaker.recordFailure()
cb.recordFailure()
} else {
circuitBreaker.recordSuccess(result)
cb.recordSuccess(result)
}
return result, err
}
// if circuitBreaker.allowRequest() { /* execute the function */ }
func (circuitBreaker *CircuitBreaker) allowRequest() bool {
circuitBreaker.mutex.Lock()
defer circuitBreaker.mutex.Unlock()
// allowRequest checks if a request should be allowed through
func (cb *CircuitBreaker) allowRequest() bool {
cb.mu.Lock()
defer cb.mu.Unlock()
switch circuitBreaker.state {
switch cb.state {
case CircuitClosed:
return true
case CircuitOpen:
if time.Since(circuitBreaker.lastFailure) > circuitBreaker.config.ResetTimeout {
circuitBreaker.state = CircuitHalfOpen
circuitBreaker.successes = 0
// Check if we should transition to half-open
if time.Since(cb.lastFailure) > cb.config.ResetTimeout {
cb.state = CircuitHalfOpen
cb.successes = 0
logging.Info("circuit breaker transitioning to half-open", logging.Fields{
"name": circuitBreaker.name,
"name": cb.name,
})
return true
}
@ -141,79 +149,80 @@ func (circuitBreaker *CircuitBreaker) allowRequest() bool {
}
}
// circuitBreaker.recordFailure() // increments failures; opens circuit after FailureThreshold is reached
func (circuitBreaker *CircuitBreaker) recordFailure() {
circuitBreaker.mutex.Lock()
defer circuitBreaker.mutex.Unlock()
// recordFailure records a failed request
func (cb *CircuitBreaker) recordFailure() {
cb.mu.Lock()
defer cb.mu.Unlock()
circuitBreaker.failures++
circuitBreaker.lastFailure = time.Now()
cb.failures++
cb.lastFailure = time.Now()
switch circuitBreaker.state {
switch cb.state {
case CircuitClosed:
if circuitBreaker.failures >= circuitBreaker.config.FailureThreshold {
circuitBreaker.state = CircuitOpen
if cb.failures >= cb.config.FailureThreshold {
cb.state = CircuitOpen
logging.Warn("circuit breaker opened", logging.Fields{
"name": circuitBreaker.name,
"failures": circuitBreaker.failures,
"name": cb.name,
"failures": cb.failures,
})
}
case CircuitHalfOpen:
// Probe failed, back to open
circuitBreaker.state = CircuitOpen
cb.state = CircuitOpen
logging.Warn("circuit breaker probe failed, reopening", logging.Fields{
"name": circuitBreaker.name,
"name": cb.name,
})
}
}
// circuitBreaker.recordSuccess(stats) // caches result, resets failures; in HalfOpen closes the circuit after SuccessThreshold
func (circuitBreaker *CircuitBreaker) recordSuccess(result interface{}) {
circuitBreaker.mutex.Lock()
defer circuitBreaker.mutex.Unlock()
// recordSuccess records a successful request
func (cb *CircuitBreaker) recordSuccess(result interface{}) {
cb.mu.Lock()
defer cb.mu.Unlock()
circuitBreaker.cachedResult = result
circuitBreaker.lastCacheTime = time.Now()
circuitBreaker.cachedErr = nil
// Cache the successful result
cb.cachedResult = result
cb.lastCacheTime = time.Now()
cb.cachedErr = nil
switch circuitBreaker.state {
switch cb.state {
case CircuitClosed:
// Reset failure count on success
circuitBreaker.failures = 0
cb.failures = 0
case CircuitHalfOpen:
circuitBreaker.successes++
if circuitBreaker.successes >= circuitBreaker.config.SuccessThreshold {
circuitBreaker.state = CircuitClosed
circuitBreaker.failures = 0
cb.successes++
if cb.successes >= cb.config.SuccessThreshold {
cb.state = CircuitClosed
cb.failures = 0
logging.Info("circuit breaker closed after successful probe", logging.Fields{
"name": circuitBreaker.name,
"name": cb.name,
})
}
}
}
// circuitBreaker.Reset() // force closed state after maintenance window
func (circuitBreaker *CircuitBreaker) Reset() {
circuitBreaker.mutex.Lock()
defer circuitBreaker.mutex.Unlock()
// Reset manually resets the circuit breaker to closed state
func (cb *CircuitBreaker) Reset() {
cb.mu.Lock()
defer cb.mu.Unlock()
circuitBreaker.state = CircuitClosed
circuitBreaker.failures = 0
circuitBreaker.successes = 0
cb.state = CircuitClosed
cb.failures = 0
cb.successes = 0
logging.Debug("circuit breaker manually reset", logging.Fields{
"name": circuitBreaker.name,
"name": cb.name,
})
}
// if result, ok := circuitBreaker.GetCached(); ok { return result, nil }
func (circuitBreaker *CircuitBreaker) GetCached() (interface{}, bool) {
circuitBreaker.mutex.RLock()
defer circuitBreaker.mutex.RUnlock()
// GetCached returns the cached result if available
func (cb *CircuitBreaker) GetCached() (interface{}, bool) {
cb.mu.RLock()
defer cb.mu.RUnlock()
if circuitBreaker.cachedResult != nil && time.Since(circuitBreaker.lastCacheTime) < circuitBreaker.cacheDuration {
return circuitBreaker.cachedResult, true
if cb.cachedResult != nil && time.Since(cb.lastCacheTime) < cb.cacheDuration {
return cb.cachedResult, true
}
return nil, false
}
@ -224,8 +233,7 @@ var (
githubCircuitBreakerOnce sync.Once
)
// breaker := getGitHubCircuitBreaker()
// result, err := breaker.Execute(func() (interface{}, error) { return fetchLatestVersion(ctx) })
// getGitHubCircuitBreaker returns the shared GitHub API circuit breaker
func getGitHubCircuitBreaker() *CircuitBreaker {
githubCircuitBreakerOnce.Do(func() {
githubCircuitBreaker = NewCircuitBreaker("github-api", CircuitBreakerConfig{

View file

@ -7,24 +7,21 @@ import (
"time"
)
// configuration := DefaultCircuitBreakerConfig()
// configuration.FailureThreshold == 3
func TestCircuitBreaker_DefaultConfig_Good(t *testing.T) {
configuration := DefaultCircuitBreakerConfig()
func TestCircuitBreakerDefaultConfig(t *testing.T) {
cfg := DefaultCircuitBreakerConfig()
if configuration.FailureThreshold != 3 {
t.Errorf("expected FailureThreshold 3, got %d", configuration.FailureThreshold)
if cfg.FailureThreshold != 3 {
t.Errorf("expected FailureThreshold 3, got %d", cfg.FailureThreshold)
}
if configuration.ResetTimeout != 30*time.Second {
t.Errorf("expected ResetTimeout 30s, got %v", configuration.ResetTimeout)
if cfg.ResetTimeout != 30*time.Second {
t.Errorf("expected ResetTimeout 30s, got %v", cfg.ResetTimeout)
}
if configuration.SuccessThreshold != 1 {
t.Errorf("expected SuccessThreshold 1, got %d", configuration.SuccessThreshold)
if cfg.SuccessThreshold != 1 {
t.Errorf("expected SuccessThreshold 1, got %d", cfg.SuccessThreshold)
}
}
// CircuitClosed.String() == "closed"
func TestCircuitBreaker_StateString_Good(t *testing.T) {
func TestCircuitBreakerStateString(t *testing.T) {
tests := []struct {
state CircuitState
expected string
@ -32,32 +29,25 @@ func TestCircuitBreaker_StateString_Good(t *testing.T) {
{CircuitClosed, "closed"},
{CircuitOpen, "open"},
{CircuitHalfOpen, "half-open"},
{CircuitState(99), "unknown"},
}
for _, testCase := range tests {
if got := testCase.state.String(); got != testCase.expected {
t.Errorf("state %d: expected %s, got %s", testCase.state, testCase.expected, got)
for _, tt := range tests {
if got := tt.state.String(); got != tt.expected {
t.Errorf("state %d: expected %s, got %s", tt.state, tt.expected, got)
}
}
}
// CircuitState(99).String() == "unknown"
func TestCircuitBreaker_StateString_Ugly(t *testing.T) {
if got := CircuitState(99).String(); got != "unknown" {
t.Errorf("expected 'unknown' for unknown state, got %s", got)
}
}
func TestCircuitBreakerClosed(t *testing.T) {
cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
// breaker := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
// result, err := breaker.Execute(func() (interface{}, error) { return "success", nil })
func TestCircuitBreaker_Execute_Good(t *testing.T) {
breaker := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
if breaker.State() != CircuitClosed {
if cb.State() != CircuitClosed {
t.Error("expected initial state to be closed")
}
result, err := breaker.Execute(func() (interface{}, error) {
// Successful execution
result, err := cb.Execute(func() (interface{}, error) {
return "success", nil
})
@ -67,62 +57,64 @@ func TestCircuitBreaker_Execute_Good(t *testing.T) {
if result != "success" {
t.Errorf("expected 'success', got %v", result)
}
if breaker.State() != CircuitClosed {
if cb.State() != CircuitClosed {
t.Error("state should still be closed after success")
}
}
// cb.Execute(failingFn) × FailureThreshold → cb.State() == CircuitOpen
func TestCircuitBreaker_Execute_Bad(t *testing.T) {
configuration := CircuitBreakerConfig{
func TestCircuitBreakerOpensAfterFailures(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 2,
ResetTimeout: time.Minute,
SuccessThreshold: 1,
}
breaker := NewCircuitBreaker("test", configuration)
cb := NewCircuitBreaker("test", cfg)
testErr := errors.New("test error")
_, err := breaker.Execute(func() (interface{}, error) {
// First failure
_, err := cb.Execute(func() (interface{}, error) {
return nil, testErr
})
if err != testErr {
t.Errorf("expected test error, got %v", err)
}
if breaker.State() != CircuitClosed {
if cb.State() != CircuitClosed {
t.Error("should still be closed after 1 failure")
}
_, err = breaker.Execute(func() (interface{}, error) {
// Second failure - should open circuit
_, err = cb.Execute(func() (interface{}, error) {
return nil, testErr
})
if err != testErr {
t.Errorf("expected test error, got %v", err)
}
if breaker.State() != CircuitOpen {
if cb.State() != CircuitOpen {
t.Error("should be open after 2 failures")
}
}
// cb (open) → cb.Execute(fn) returns ErrCircuitOpen without calling fn
func TestCircuitBreaker_Execute_Ugly(t *testing.T) {
configuration := CircuitBreakerConfig{
func TestCircuitBreakerRejectsWhenOpen(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: time.Hour,
ResetTimeout: time.Hour, // Long timeout to keep circuit open
SuccessThreshold: 1,
}
breaker := NewCircuitBreaker("test", configuration)
cb := NewCircuitBreaker("test", cfg)
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
if breaker.State() != CircuitOpen {
if cb.State() != CircuitOpen {
t.Fatal("circuit should be open")
}
// Next request should be rejected
called := false
_, err := breaker.Execute(func() (interface{}, error) {
_, err := cb.Execute(func() (interface{}, error) {
called = true
return "should not run", nil
})
@ -135,26 +127,28 @@ func TestCircuitBreaker_Execute_Ugly(t *testing.T) {
}
}
// cb (open) → sleep(ResetTimeout) → cb.Execute(successFn) → cb.State() == CircuitClosed
func TestCircuitBreaker_HalfOpen_Good(t *testing.T) {
configuration := CircuitBreakerConfig{
func TestCircuitBreakerTransitionsToHalfOpen(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: 50 * time.Millisecond,
SuccessThreshold: 1,
}
breaker := NewCircuitBreaker("test", configuration)
cb := NewCircuitBreaker("test", cfg)
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
if breaker.State() != CircuitOpen {
if cb.State() != CircuitOpen {
t.Fatal("circuit should be open")
}
// Wait for reset timeout
time.Sleep(100 * time.Millisecond)
result, err := breaker.Execute(func() (interface{}, error) {
// Next request should transition to half-open and execute
result, err := cb.Execute(func() (interface{}, error) {
return "probe success", nil
})
@ -164,45 +158,47 @@ func TestCircuitBreaker_HalfOpen_Good(t *testing.T) {
if result != "probe success" {
t.Errorf("expected 'probe success', got %v", result)
}
if breaker.State() != CircuitClosed {
if cb.State() != CircuitClosed {
t.Error("should be closed after successful probe")
}
}
// cb (half-open) → cb.Execute(failFn) → cb.State() == CircuitOpen
func TestCircuitBreaker_HalfOpen_Bad(t *testing.T) {
configuration := CircuitBreakerConfig{
func TestCircuitBreakerHalfOpenFailureReopens(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: 50 * time.Millisecond,
SuccessThreshold: 1,
}
breaker := NewCircuitBreaker("test", configuration)
cb := NewCircuitBreaker("test", cfg)
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
// Wait for reset timeout
time.Sleep(100 * time.Millisecond)
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
// Probe fails
cb.Execute(func() (interface{}, error) {
return nil, errors.New("probe failed")
})
if breaker.State() != CircuitOpen {
if cb.State() != CircuitOpen {
t.Error("should be open after probe failure")
}
}
// cb.Execute(successFn) → opens circuit → cb.Execute(anyFn) returns cached value
func TestCircuitBreaker_Caching_Good(t *testing.T) {
configuration := CircuitBreakerConfig{
func TestCircuitBreakerCaching(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: time.Hour,
SuccessThreshold: 1,
}
breaker := NewCircuitBreaker("test", configuration)
cb := NewCircuitBreaker("test", cfg)
result, err := breaker.Execute(func() (interface{}, error) {
// Successful call - caches result
result, err := cb.Execute(func() (interface{}, error) {
return "cached value", nil
})
if err != nil {
@ -212,11 +208,13 @@ func TestCircuitBreaker_Caching_Good(t *testing.T) {
t.Fatalf("expected 'cached value', got %v", result)
}
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
result, err = breaker.Execute(func() (interface{}, error) {
// Should return cached value when circuit is open
result, err = cb.Execute(func() (interface{}, error) {
return "should not run", nil
})
@ -228,20 +226,21 @@ func TestCircuitBreaker_Caching_Good(t *testing.T) {
}
}
// breaker.Execute(successFn) → result, ok := breaker.GetCached() → ok == true
func TestCircuitBreaker_GetCached_Good(t *testing.T) {
breaker := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
func TestCircuitBreakerGetCached(t *testing.T) {
cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
_, ok := breaker.GetCached()
// No cache initially
_, ok := cb.GetCached()
if ok {
t.Error("expected no cached value initially")
}
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
// Cache a value
cb.Execute(func() (interface{}, error) {
return "test value", nil
})
cached, ok := breaker.GetCached()
cached, ok := cb.GetCached()
if !ok {
t.Error("expected cached value")
}
@ -250,50 +249,40 @@ func TestCircuitBreaker_GetCached_Good(t *testing.T) {
}
}
// breaker := NewCircuitBreaker(...) → _, ok := breaker.GetCached() → ok == false
func TestCircuitBreaker_GetCached_Bad(t *testing.T) {
breaker := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
_, ok := breaker.GetCached()
if ok {
t.Error("expected no cached value on fresh circuit breaker")
}
}
// cb (open) → cb.Reset() → cb.State() == CircuitClosed
func TestCircuitBreaker_Reset_Good(t *testing.T) {
configuration := CircuitBreakerConfig{
func TestCircuitBreakerReset(t *testing.T) {
cfg := CircuitBreakerConfig{
FailureThreshold: 1,
ResetTimeout: time.Hour,
SuccessThreshold: 1,
}
breaker := NewCircuitBreaker("test", configuration)
cb := NewCircuitBreaker("test", cfg)
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
// Open the circuit
cb.Execute(func() (interface{}, error) {
return nil, errors.New("fail")
})
if breaker.State() != CircuitOpen {
if cb.State() != CircuitOpen {
t.Fatal("circuit should be open")
}
breaker.Reset()
// Manual reset
cb.Reset()
if breaker.State() != CircuitClosed {
if cb.State() != CircuitClosed {
t.Error("circuit should be closed after reset")
}
}
// 100 goroutines concurrently call cb.Execute — no race condition or panic should occur.
func TestCircuitBreaker_Concurrency_Ugly(t *testing.T) {
breaker := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
func TestCircuitBreakerConcurrency(t *testing.T) {
cb := NewCircuitBreaker("test", DefaultCircuitBreakerConfig())
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
waitGroup.Add(1)
wg.Add(1)
go func(n int) {
defer waitGroup.Done()
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
defer wg.Done()
cb.Execute(func() (interface{}, error) {
if n%3 == 0 {
return nil, errors.New("fail")
}
@ -301,44 +290,43 @@ func TestCircuitBreaker_Concurrency_Ugly(t *testing.T) {
})
}(i)
}
waitGroup.Wait()
wg.Wait()
_ = breaker.State()
// Just verify no panics occurred
_ = cb.State()
}
// cb1 := getGitHubCircuitBreaker(); cb2 := getGitHubCircuitBreaker(); cb1 == cb2
func TestCircuitBreaker_GitHubSingleton_Good(t *testing.T) {
firstBreaker := getGitHubCircuitBreaker()
secondBreaker := getGitHubCircuitBreaker()
func TestGetGitHubCircuitBreaker(t *testing.T) {
cb1 := getGitHubCircuitBreaker()
cb2 := getGitHubCircuitBreaker()
if firstBreaker != secondBreaker {
if cb1 != cb2 {
t.Error("expected singleton circuit breaker")
}
if firstBreaker.name != "github-api" {
t.Errorf("expected name 'github-api', got %s", firstBreaker.name)
if cb1.name != "github-api" {
t.Errorf("expected name 'github-api', got %s", cb1.name)
}
}
// Benchmark tests
func BenchmarkCircuitBreakerExecute(b *testing.B) {
breaker := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig())
cb := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig())
b.ResetTimer()
for i := 0; i < b.N; i++ {
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
cb.Execute(func() (interface{}, error) {
return "result", nil
})
}
}
func BenchmarkCircuitBreakerConcurrent(b *testing.B) {
breaker := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig())
cb := NewCircuitBreaker("bench", DefaultCircuitBreakerConfig())
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
breaker.Execute(func() (interface{}, error) { //nolint:errcheck
cb.Execute(func() (interface{}, error) {
return "result", nil
})
}

View file

@ -9,9 +9,8 @@ import (
//go:embed component/*
var componentFS embed.FS
// fs, err := GetComponentFS()
// if err != nil { return err }
// router.StaticFS("/component", fs)
// GetComponentFS returns the embedded file system containing the web component.
// This allows the component to be served even when the package is used as a module.
func GetComponentFS() (http.FileSystem, error) {
sub, err := fs.Sub(componentFS, "component")
if err != nil {

View file

@ -2,6 +2,7 @@ package mining
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
@ -9,25 +10,25 @@ import (
"github.com/adrg/xdg"
)
// minersConfigMutex protects concurrent access to miners config file operations.
var minersConfigMutex sync.RWMutex
// configMu protects concurrent access to config file operations
var configMu sync.RWMutex
// MinerAutostartConfig{MinerType: "xmrig", Autostart: true, Config: &config}
// MinerAutostartConfig represents the configuration for a single miner's autostart settings.
type MinerAutostartConfig struct {
MinerType string `json:"minerType"`
Autostart bool `json:"autostart"`
Config *Config `json:"config,omitempty"` // Store the last used config
}
// DatabaseConfig{Enabled: true, RetentionDays: 30}
// DatabaseConfig holds configuration for SQLite database persistence.
type DatabaseConfig struct {
// minersConfig.Database.Enabled = false // disable hashrate persistence entirely
// Enabled determines if database persistence is active (default: true)
Enabled bool `json:"enabled"`
// minersConfig.Database.RetentionDays = 90 // purge rows older than 90 days (0 → defaults to 30)
// RetentionDays is how long to keep historical data (default: 30)
RetentionDays int `json:"retentionDays,omitempty"`
}
// defaultDatabaseConfig() // DatabaseConfig{Enabled: true, RetentionDays: 30}
// defaultDatabaseConfig returns the default database configuration.
func defaultDatabaseConfig() DatabaseConfig {
return DatabaseConfig{
Enabled: true,
@ -35,30 +36,28 @@ func defaultDatabaseConfig() DatabaseConfig {
}
}
// MinersConfig{Miners: []MinerAutostartConfig{}, Database: defaultDatabaseConfig()}
// MinersConfig represents the overall configuration for all miners, including autostart settings.
type MinersConfig struct {
Miners []MinerAutostartConfig `json:"miners"`
Database DatabaseConfig `json:"database"`
}
// minersConfigPath, err := getMinersConfigPath() // "/home/alice/.config/lethean-desktop/miners/config.json"
// getMinersConfigPath returns the path to the miners configuration file.
func getMinersConfigPath() (string, error) {
return xdg.ConfigFile("lethean-desktop/miners/config.json")
}
// minersConfig, err := LoadMinersConfig()
// if err != nil { return err }
// minersConfig.Database.Enabled = false
// LoadMinersConfig loads the miners configuration from the file system.
func LoadMinersConfig() (*MinersConfig, error) {
minersConfigMutex.RLock()
defer minersConfigMutex.RUnlock()
configMu.RLock()
defer configMu.RUnlock()
minersConfigPath, err := getMinersConfigPath()
configPath, err := getMinersConfigPath()
if err != nil {
return nil, ErrInternal("could not determine miners config path").WithCause(err)
return nil, fmt.Errorf("could not determine miners config path: %w", err)
}
data, err := os.ReadFile(minersConfigPath)
data, err := os.ReadFile(configPath)
if err != nil {
if os.IsNotExist(err) {
// Return empty config with defaults if file doesn't exist
@ -67,92 +66,93 @@ func LoadMinersConfig() (*MinersConfig, error) {
Database: defaultDatabaseConfig(),
}, nil
}
return nil, ErrInternal("failed to read miners config file").WithCause(err)
return nil, fmt.Errorf("failed to read miners config file: %w", err)
}
var minersConfig MinersConfig
if err := json.Unmarshal(data, &minersConfig); err != nil {
return nil, ErrInternal("failed to unmarshal miners config").WithCause(err)
var cfg MinersConfig
if err := json.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("failed to unmarshal miners config: %w", err)
}
// Apply default database config if not set (for backwards compatibility)
if minersConfig.Database.RetentionDays == 0 {
minersConfig.Database = defaultDatabaseConfig()
if cfg.Database.RetentionDays == 0 {
cfg.Database = defaultDatabaseConfig()
}
return &minersConfig, nil
return &cfg, nil
}
// minersConfig.Database.RetentionDays = 60
// if err := SaveMinersConfig(minersConfig); err != nil { return err }
func SaveMinersConfig(minersConfig *MinersConfig) error {
minersConfigMutex.Lock()
defer minersConfigMutex.Unlock()
// SaveMinersConfig saves the miners configuration to the file system.
// Uses atomic write pattern: write to temp file, then rename.
func SaveMinersConfig(cfg *MinersConfig) error {
configMu.Lock()
defer configMu.Unlock()
minersConfigPath, err := getMinersConfigPath()
configPath, err := getMinersConfigPath()
if err != nil {
return ErrInternal("could not determine miners config path").WithCause(err)
return fmt.Errorf("could not determine miners config path: %w", err)
}
dir := filepath.Dir(minersConfigPath)
dir := filepath.Dir(configPath)
if err := os.MkdirAll(dir, 0755); err != nil {
return ErrInternal("failed to create config directory").WithCause(err)
return fmt.Errorf("failed to create config directory: %w", err)
}
data, err := json.MarshalIndent(minersConfig, "", " ")
data, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return ErrInternal("failed to marshal miners config").WithCause(err)
return fmt.Errorf("failed to marshal miners config: %w", err)
}
return AtomicWriteFile(minersConfigPath, data, 0600)
return AtomicWriteFile(configPath, data, 0600)
}
// UpdateMinersConfig(func(minersConfig *MinersConfig) error { minersConfig.Miners = append(minersConfig.Miners, entry); return nil })
func UpdateMinersConfig(modifier func(*MinersConfig) error) error {
minersConfigMutex.Lock()
defer minersConfigMutex.Unlock()
// UpdateMinersConfig atomically loads, modifies, and saves the miners config.
// This prevents race conditions in read-modify-write operations.
func UpdateMinersConfig(fn func(*MinersConfig) error) error {
configMu.Lock()
defer configMu.Unlock()
minersConfigPath, err := getMinersConfigPath()
configPath, err := getMinersConfigPath()
if err != nil {
return ErrInternal("could not determine miners config path").WithCause(err)
return fmt.Errorf("could not determine miners config path: %w", err)
}
// Load current config
var minersConfig MinersConfig
data, err := os.ReadFile(minersConfigPath)
var cfg MinersConfig
data, err := os.ReadFile(configPath)
if err != nil {
if os.IsNotExist(err) {
minersConfig = MinersConfig{
cfg = MinersConfig{
Miners: []MinerAutostartConfig{},
Database: defaultDatabaseConfig(),
}
} else {
return ErrInternal("failed to read miners config file").WithCause(err)
return fmt.Errorf("failed to read miners config file: %w", err)
}
} else {
if err := json.Unmarshal(data, &minersConfig); err != nil {
return ErrInternal("failed to unmarshal miners config").WithCause(err)
if err := json.Unmarshal(data, &cfg); err != nil {
return fmt.Errorf("failed to unmarshal miners config: %w", err)
}
if minersConfig.Database.RetentionDays == 0 {
minersConfig.Database = defaultDatabaseConfig()
if cfg.Database.RetentionDays == 0 {
cfg.Database = defaultDatabaseConfig()
}
}
// Apply the modification
if err := modifier(&minersConfig); err != nil {
if err := fn(&cfg); err != nil {
return err
}
// Save atomically
dir := filepath.Dir(minersConfigPath)
dir := filepath.Dir(configPath)
if err := os.MkdirAll(dir, 0755); err != nil {
return ErrInternal("failed to create config directory").WithCause(err)
return fmt.Errorf("failed to create config directory: %w", err)
}
newData, err := json.MarshalIndent(minersConfig, "", " ")
newData, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return ErrInternal("failed to marshal miners config").WithCause(err)
return fmt.Errorf("failed to marshal miners config: %w", err)
}
return AtomicWriteFile(minersConfigPath, newData, 0600)
return AtomicWriteFile(configPath, newData, 0600)
}

View file

@ -2,28 +2,32 @@ package mining
import (
"context"
"fmt"
"sync"
"forge.lthn.ai/Snider/Mining/pkg/database"
"forge.lthn.ai/Snider/Mining/pkg/logging"
"github.com/Snider/Mining/pkg/database"
"github.com/Snider/Mining/pkg/logging"
)
// containerConfig := mining.DefaultContainerConfig()
// containerConfig.ListenAddr = ":8080"
// containerConfig.SimulationMode = true
// ContainerConfig{ListenAddr: ":9090", DisplayAddr: "localhost:9090", SwaggerNamespace: "/api/v1/mining"}
// ContainerConfig{SimulationMode: true, Database: database.Config{Enabled: false}}
// ContainerConfig holds configuration for the service container.
type ContainerConfig struct {
Database database.Config
ListenAddr string
DisplayAddr string
// Database configuration
Database database.Config
// ListenAddr is the address to listen on (e.g., ":9090")
ListenAddr string
// DisplayAddr is the address shown in Swagger docs
DisplayAddr string
// SwaggerNamespace is the API path prefix
SwaggerNamespace string
SimulationMode bool
// SimulationMode enables simulation mode for testing
SimulationMode bool
}
// containerConfig := mining.DefaultContainerConfig()
// containerConfig.ListenAddr = ":8080"
// container := NewContainer(containerConfig)
// DefaultContainerConfig returns sensible defaults for the container.
func DefaultContainerConfig() ContainerConfig {
return ContainerConfig{
Database: database.Config{
@ -37,104 +41,110 @@ func DefaultContainerConfig() ContainerConfig {
}
}
// container := NewContainer(DefaultContainerConfig())
// container.Initialize(ctx); container.Start(ctx); defer container.Shutdown(ctx)
// Container manages the lifecycle of all services.
// It provides centralized initialization, dependency injection, and graceful shutdown.
type Container struct {
config ContainerConfig
mutex sync.RWMutex
manager ManagerInterface
profileManager *ProfileManager
nodeService *NodeService
eventHub *EventHub
service *Service
hashrateStore database.HashrateStore
config ContainerConfig
mu sync.RWMutex
// Core services
manager ManagerInterface
profileManager *ProfileManager
nodeService *NodeService
eventHub *EventHub
service *Service
// Database store (interface for testing)
hashrateStore database.HashrateStore
// Initialization state
initialized bool
transportStarted bool
shutdownChannel chan struct{}
shutdownCh chan struct{}
}
// container := NewContainer(DefaultContainerConfig())
// container.Initialize(ctx)
// NewContainer creates a new service container with the given configuration.
func NewContainer(config ContainerConfig) *Container {
return &Container{
config: config,
shutdownChannel: make(chan struct{}),
config: config,
shutdownCh: make(chan struct{}),
}
}
// if err := container.Initialize(ctx); err != nil { return err }
// container.Start(ctx)
func (container *Container) Initialize(ctx context.Context) error {
container.mutex.Lock()
defer container.mutex.Unlock()
// Initialize sets up all services in the correct order.
// This should be called before Start().
func (c *Container) Initialize(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
if container.initialized {
return ErrInternal("container already initialized")
if c.initialized {
return fmt.Errorf("container already initialized")
}
// database.Initialize(container.config.Database) enables HTTP handlers like GET /api/v1/mining/status to persist hashrate data.
if container.config.Database.Enabled {
if err := database.Initialize(container.config.Database); err != nil {
return ErrInternal("failed to initialize database").WithCause(err)
// 1. Initialize database (optional)
if c.config.Database.Enabled {
if err := database.Initialize(c.config.Database); err != nil {
return fmt.Errorf("failed to initialize database: %w", err)
}
container.hashrateStore = database.DefaultStore()
logging.Info("database initialized", logging.Fields{"retention_days": container.config.Database.RetentionDays})
c.hashrateStore = database.DefaultStore()
logging.Info("database initialized", logging.Fields{"retention_days": c.config.Database.RetentionDays})
} else {
container.hashrateStore = database.NopStore()
c.hashrateStore = database.NopStore()
logging.Info("database disabled, using no-op store", nil)
}
// 2. Initialize profile manager
var err error
// profileManager, err := NewProfileManager() keeps POST /api/v1/mining/profiles working even without XDG storage.
container.profileManager, err = NewProfileManager()
c.profileManager, err = NewProfileManager()
if err != nil {
return ErrInternal("failed to initialize profile manager").WithCause(err)
return fmt.Errorf("failed to initialize profile manager: %w", err)
}
// NewManagerForSimulation() keeps `mining serve` and `mining remote status` pointed at simulated miners during local development.
if container.config.SimulationMode {
container.manager = NewManagerForSimulation()
// 3. Initialize miner manager
if c.config.SimulationMode {
c.manager = NewManagerForSimulation()
} else {
container.manager = NewManager()
c.manager = NewManager()
}
// nodeService, err := NewNodeService() enables remote peer commands such as `mining remote status peer-19f3`.
container.nodeService, err = NewNodeService()
// 4. Initialize node service (optional - P2P features)
c.nodeService, err = NewNodeService()
if err != nil {
logging.Warn("node service unavailable", logging.Fields{"error": err})
// Continue without node service - P2P features will be unavailable
}
// NewEventHub() powers GET /ws/events for browsers that watch miner start and stop events.
container.eventHub = NewEventHub()
// 5. Initialize event hub for WebSocket
c.eventHub = NewEventHub()
// concreteManager.SetEventHub(container.eventHub) lets GET /ws/events stream miner lifecycle updates.
if concreteManager, ok := container.manager.(*Manager); ok {
concreteManager.SetEventHub(container.eventHub)
// Wire up event hub to manager
if mgr, ok := c.manager.(*Manager); ok {
mgr.SetEventHub(c.eventHub)
}
container.initialized = true
c.initialized = true
logging.Info("service container initialized", nil)
return nil
}
// if err := container.Start(ctx); err != nil { return err }
func (container *Container) Start(ctx context.Context) error {
container.mutex.RLock()
defer container.mutex.RUnlock()
// Start begins all background services.
func (c *Container) Start(ctx context.Context) error {
c.mu.RLock()
defer c.mu.RUnlock()
if !container.initialized {
return ErrInternal("container not initialized")
if !c.initialized {
return fmt.Errorf("container not initialized")
}
// container.eventHub.Run() keeps `/ws/events` clients connected while the API is serving requests.
go container.eventHub.Run()
// Start event hub
go c.eventHub.Run()
// container.nodeService.StartTransport() enables `mining remote connect peer-19f3` when peer transport is configured.
if container.nodeService != nil {
if err := container.nodeService.StartTransport(); err != nil {
// Start node transport if available
if c.nodeService != nil {
if err := c.nodeService.StartTransport(); err != nil {
logging.Warn("failed to start node transport", logging.Fields{"error": err})
} else {
container.transportStarted = true
c.transportStarted = true
}
}
@ -142,12 +152,12 @@ func (container *Container) Start(ctx context.Context) error {
return nil
}
// defer container.Shutdown(ctx) // safe to call multiple times
func (container *Container) Shutdown(ctx context.Context) error {
container.mutex.Lock()
defer container.mutex.Unlock()
// Shutdown gracefully stops all services in reverse order.
func (c *Container) Shutdown(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
if !container.initialized {
if !c.initialized {
return nil
}
@ -155,100 +165,95 @@ func (container *Container) Shutdown(ctx context.Context) error {
var errs []error
// container.service is stopped by the caller so `mining serve` can close the HTTP server and shell together.
if container.service != nil {
// 1. Stop service (HTTP server)
if c.service != nil {
// Service shutdown is handled externally
}
// container.nodeService.StopTransport() tears down peer connectivity after `mining remote connect` sessions finish.
if container.nodeService != nil && container.transportStarted {
if err := container.nodeService.StopTransport(); err != nil {
errs = append(errs, ErrInternal("node transport shutdown failed").WithCause(err))
// 2. Stop node transport (only if it was started)
if c.nodeService != nil && c.transportStarted {
if err := c.nodeService.StopTransport(); err != nil {
errs = append(errs, fmt.Errorf("node transport: %w", err))
}
container.transportStarted = false
c.transportStarted = false
}
// container.eventHub.Stop() closes `/ws/events` listeners before process exit.
if container.eventHub != nil {
container.eventHub.Stop()
// 3. Stop event hub
if c.eventHub != nil {
c.eventHub.Stop()
}
// concreteManager.Stop() stops miners started through `mining start xmrig`.
if concreteManager, ok := container.manager.(*Manager); ok {
concreteManager.Stop()
// 4. Stop miner manager
if mgr, ok := c.manager.(*Manager); ok {
mgr.Stop()
}
// database.Close() flushes the hashrate store used by GET /api/v1/mining/miners/xmrig/history.
// 5. Close database
if err := database.Close(); err != nil {
errs = append(errs, ErrInternal("database shutdown failed").WithCause(err))
errs = append(errs, fmt.Errorf("database: %w", err))
}
container.initialized = false
close(container.shutdownChannel)
c.initialized = false
close(c.shutdownCh)
if len(errs) > 0 {
return ErrInternal("shutdown completed with errors").WithCause(errs[0])
return fmt.Errorf("shutdown errors: %v", errs)
}
logging.Info("service container shutdown complete", nil)
return nil
}
// miner := container.Manager()
// miner.StartMiner(ctx, "xmrig", config)
func (container *Container) Manager() ManagerInterface {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.manager
// Manager returns the miner manager.
func (c *Container) Manager() ManagerInterface {
c.mu.RLock()
defer c.mu.RUnlock()
return c.manager
}
// profileManager := container.ProfileManager()
// profileManager.SaveProfile("eth-main", config)
func (container *Container) ProfileManager() *ProfileManager {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.profileManager
// ProfileManager returns the profile manager.
func (c *Container) ProfileManager() *ProfileManager {
c.mu.RLock()
defer c.mu.RUnlock()
return c.profileManager
}
// nodeService := container.NodeService() // nil when `mining remote status` should stay local-only.
// nodeService.GetPeers()
func (container *Container) NodeService() *NodeService {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.nodeService
// NodeService returns the node service (may be nil if P2P is unavailable).
func (c *Container) NodeService() *NodeService {
c.mu.RLock()
defer c.mu.RUnlock()
return c.nodeService
}
// eventHub := container.EventHub()
// eventHub.Broadcast(event)
func (container *Container) EventHub() *EventHub {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.eventHub
// EventHub returns the event hub for WebSocket connections.
func (c *Container) EventHub() *EventHub {
c.mu.RLock()
defer c.mu.RUnlock()
return c.eventHub
}
// store := container.HashrateStore()
// store.RecordHashrate("xmrig", 1234.5)
func (container *Container) HashrateStore() database.HashrateStore {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.hashrateStore
// HashrateStore returns the hashrate store interface.
func (c *Container) HashrateStore() database.HashrateStore {
c.mu.RLock()
defer c.mu.RUnlock()
return c.hashrateStore
}
// container.SetHashrateStore(database.NopStore()) // injects a no-op store in tests for GET /api/v1/mining/status.
func (container *Container) SetHashrateStore(store database.HashrateStore) {
container.mutex.Lock()
defer container.mutex.Unlock()
container.hashrateStore = store
// SetHashrateStore allows injecting a custom hashrate store (useful for testing).
func (c *Container) SetHashrateStore(store database.HashrateStore) {
c.mu.Lock()
defer c.mu.Unlock()
c.hashrateStore = store
}
// <-container.ShutdownCh() // blocks until `mining serve` finishes shutting down.
func (container *Container) ShutdownCh() <-chan struct{} {
return container.shutdownChannel
// ShutdownCh returns a channel that's closed when shutdown is complete.
func (c *Container) ShutdownCh() <-chan struct{} {
return c.shutdownCh
}
// if container.IsInitialized() { container.Start(ctx) }
func (container *Container) IsInitialized() bool {
container.mutex.RLock()
defer container.mutex.RUnlock()
return container.initialized
// IsInitialized returns true if the container has been initialized.
func (c *Container) IsInitialized() bool {
c.mu.RLock()
defer c.mu.RUnlock()
return c.initialized
}

View file

@ -7,7 +7,7 @@ import (
"testing"
"time"
"forge.lthn.ai/Snider/Mining/pkg/database"
"github.com/Snider/Mining/pkg/database"
)
func setupContainerTestEnv(t *testing.T) func() {
@ -20,7 +20,7 @@ func setupContainerTestEnv(t *testing.T) func() {
}
}
func TestContainer_NewContainer_Good(t *testing.T) {
func TestNewContainer(t *testing.T) {
config := DefaultContainerConfig()
container := NewContainer(config)
@ -33,7 +33,7 @@ func TestContainer_NewContainer_Good(t *testing.T) {
}
}
func TestContainer_DefaultContainerConfig_Good(t *testing.T) {
func TestDefaultContainerConfig(t *testing.T) {
config := DefaultContainerConfig()
if !config.Database.Enabled {
@ -53,7 +53,7 @@ func TestContainer_DefaultContainerConfig_Good(t *testing.T) {
}
}
func TestContainer_Initialize_Good(t *testing.T) {
func TestContainer_Initialize(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
@ -96,7 +96,7 @@ func TestContainer_Initialize_Good(t *testing.T) {
}
}
func TestContainer_InitializeTwice_Bad(t *testing.T) {
func TestContainer_InitializeTwice(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
@ -119,7 +119,7 @@ func TestContainer_InitializeTwice_Bad(t *testing.T) {
container.Shutdown(ctx)
}
func TestContainer_DatabaseDisabled_Good(t *testing.T) {
func TestContainer_DatabaseDisabled(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
@ -152,7 +152,7 @@ func TestContainer_DatabaseDisabled_Good(t *testing.T) {
container.Shutdown(ctx)
}
func TestContainer_SetHashrateStore_Good(t *testing.T) {
func TestContainer_SetHashrateStore(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
@ -178,7 +178,7 @@ func TestContainer_SetHashrateStore_Good(t *testing.T) {
container.Shutdown(ctx)
}
func TestContainer_StartWithoutInitialize_Bad(t *testing.T) {
func TestContainer_StartWithoutInitialize(t *testing.T) {
config := DefaultContainerConfig()
container := NewContainer(config)
ctx := context.Background()
@ -188,7 +188,7 @@ func TestContainer_StartWithoutInitialize_Bad(t *testing.T) {
}
}
func TestContainer_ShutdownWithoutInitialize_Good(t *testing.T) {
func TestContainer_ShutdownWithoutInitialize(t *testing.T) {
config := DefaultContainerConfig()
container := NewContainer(config)
ctx := context.Background()
@ -199,7 +199,7 @@ func TestContainer_ShutdownWithoutInitialize_Good(t *testing.T) {
}
}
func TestContainer_ShutdownChannel_Good(t *testing.T) {
func TestContainer_ShutdownChannel(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
@ -237,7 +237,7 @@ func TestContainer_ShutdownChannel_Good(t *testing.T) {
}
}
func TestContainer_InitializeWithCancelledContext_Ugly(t *testing.T) {
func TestContainer_InitializeWithCancelledContext(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
@ -264,7 +264,7 @@ func TestContainer_InitializeWithCancelledContext_Ugly(t *testing.T) {
}
}
func TestContainer_ShutdownWithTimeout_Ugly(t *testing.T) {
func TestContainer_ShutdownWithTimeout(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()
@ -289,7 +289,7 @@ func TestContainer_ShutdownWithTimeout_Ugly(t *testing.T) {
}
}
func TestContainer_DoubleShutdown_Ugly(t *testing.T) {
func TestContainer_DoubleShutdown(t *testing.T) {
cleanup := setupContainerTestEnv(t)
defer cleanup()

View file

@ -6,8 +6,9 @@ import (
"time"
)
// manager.StartMiner(ctx, "xmrig", &Config{GPUEnabled: true, OpenCL: true, Devices: "0"})
func TestDualMining_CPUAndGPU_Good(t *testing.T) {
// TestDualMiningCPUAndGPU tests running CPU and GPU mining together
// This test requires XMRig installed and a GPU with OpenCL support
func TestDualMiningCPUAndGPU(t *testing.T) {
if testing.Short() {
t.Skip("Skipping dual mining test in short mode")
}
@ -72,8 +73,8 @@ func TestDualMining_CPUAndGPU_Good(t *testing.T) {
manager.StopMiner(context.Background(), minerInstance.GetName())
}
// miner.createConfig(&Config{GPUEnabled: true, OpenCL: true}) // no Devices → GPU disabled
func TestDualMining_GPUDeviceSelection_Bad(t *testing.T) {
// TestGPUDeviceSelection tests that GPU mining requires explicit device selection
func TestGPUDeviceSelection(t *testing.T) {
tmpDir := t.TempDir()
miner := &XMRigMiner{

View file

@ -1,11 +1,11 @@
package mining
import (
"fmt"
"net/http"
)
// respondWithError(requestContext, http.StatusNotFound, ErrCodeMinerNotFound, "xmrig not found", err.Error())
// respondWithError(requestContext, http.StatusConflict, ErrCodeMinerExists, "xmrig already running", "")
// Error codes for the mining package
const (
ErrCodeMinerNotFound = "MINER_NOT_FOUND"
ErrCodeMinerExists = "MINER_EXISTS"
@ -24,57 +24,57 @@ const (
ErrCodeProfileNotFound = "PROFILE_NOT_FOUND"
ErrCodeProfileExists = "PROFILE_EXISTS"
ErrCodeInternalError = "INTERNAL_ERROR"
ErrCodeInternal = "INTERNAL_ERROR" // Alias for consistency
)
// miningError := &MiningError{Code: ErrCodeStartFailed, Message: "failed to start xmrig", HTTPStatus: 500}
// miningError.WithCause(err).WithSuggestion("check logs")
// MiningError is a structured error type for the mining package
type MiningError struct {
Code string
Message string
Details string
Suggestion string
Retryable bool
HTTPStatus int
Cause error
Code string // Machine-readable error code
Message string // Human-readable message
Details string // Technical details (for debugging)
Suggestion string // What to do next
Retryable bool // Can the client retry?
HTTPStatus int // HTTP status code to return
Cause error // Underlying error
}
// e.Error() // "START_FAILED: failed to start miner 'xmrig' (exec: not found)"
// Error implements the error interface
func (e *MiningError) Error() string {
if e.Cause != nil {
return e.Code + ": " + e.Message + " (" + e.Cause.Error() + ")"
return fmt.Sprintf("%s: %s (%v)", e.Code, e.Message, e.Cause)
}
return e.Code + ": " + e.Message
return fmt.Sprintf("%s: %s", e.Code, e.Message)
}
// errors.As(e.Unwrap(), &target) // unwrap to inspect the underlying cause
// Unwrap returns the underlying error
func (e *MiningError) Unwrap() error {
return e.Cause
}
// return ErrStartFailed("xmrig").WithCause(err)
// WithCause adds an underlying error
func (e *MiningError) WithCause(err error) *MiningError {
e.Cause = err
return e
}
// return ErrInternal("nil response").WithDetails("stats API returned null body")
// WithDetails adds technical details
func (e *MiningError) WithDetails(details string) *MiningError {
e.Details = details
return e
}
// return ErrInstallFailed("xmrig").WithSuggestion("ensure curl is installed")
// WithSuggestion adds a suggestion for the user
func (e *MiningError) WithSuggestion(suggestion string) *MiningError {
e.Suggestion = suggestion
return e
}
// if e.IsRetryable() { time.Sleep(backoff); retry() }
// IsRetryable returns whether the error is retryable
func (e *MiningError) IsRetryable() bool {
return e.Retryable
}
// requestContext.JSON(e.StatusCode(), e) // 404 for not-found, 500 for internal errors
// StatusCode returns the HTTP status code for this error
func (e *MiningError) StatusCode() int {
if e.HTTPStatus == 0 {
return http.StatusInternalServerError
@ -82,7 +82,7 @@ func (e *MiningError) StatusCode() int {
return e.HTTPStatus
}
// NewMiningError("START_FAILED", "failed to start xmrig").WithCause(err)
// NewMiningError creates a new MiningError
func NewMiningError(code, message string) *MiningError {
return &MiningError{
Code: code,
@ -91,150 +91,152 @@ func NewMiningError(code, message string) *MiningError {
}
}
// return ErrMinerNotFound("xmrig").WithCause(err)
// Predefined error constructors for common errors
// ErrMinerNotFound creates a miner not found error
func ErrMinerNotFound(name string) *MiningError {
return &MiningError{
Code: ErrCodeMinerNotFound,
Message: "miner '" + name + "' not found",
Message: fmt.Sprintf("miner '%s' not found", name),
Suggestion: "Check that the miner name is correct and that it is running",
Retryable: false,
HTTPStatus: http.StatusNotFound,
}
}
// return ErrMinerExists("xmrig")
// ErrMinerExists creates a miner already exists error
func ErrMinerExists(name string) *MiningError {
return &MiningError{
Code: ErrCodeMinerExists,
Message: "miner '" + name + "' is already running",
Message: fmt.Sprintf("miner '%s' is already running", name),
Suggestion: "Stop the existing miner first or use a different configuration",
Retryable: false,
HTTPStatus: http.StatusConflict,
}
}
// return ErrMinerNotRunning("xmrig")
// ErrMinerNotRunning creates a miner not running error
func ErrMinerNotRunning(name string) *MiningError {
return &MiningError{
Code: ErrCodeMinerNotRunning,
Message: "miner '" + name + "' is not running",
Message: fmt.Sprintf("miner '%s' is not running", name),
Suggestion: "Start the miner first before performing this operation",
Retryable: false,
HTTPStatus: http.StatusBadRequest,
}
}
// return ErrInstallFailed("xmrig").WithCause(err)
// ErrInstallFailed creates an installation failed error
func ErrInstallFailed(minerType string) *MiningError {
return &MiningError{
Code: ErrCodeInstallFailed,
Message: "failed to install " + minerType,
Message: fmt.Sprintf("failed to install %s", minerType),
Suggestion: "Check your internet connection and try again",
Retryable: true,
HTTPStatus: http.StatusInternalServerError,
}
}
// return ErrStartFailed("xmrig").WithCause(err)
// ErrStartFailed creates a start failed error
func ErrStartFailed(name string) *MiningError {
return &MiningError{
Code: ErrCodeStartFailed,
Message: "failed to start miner '" + name + "'",
Message: fmt.Sprintf("failed to start miner '%s'", name),
Suggestion: "Check the miner configuration and logs for details",
Retryable: true,
HTTPStatus: http.StatusInternalServerError,
}
}
// return ErrStopFailed("xmrig").WithCause(err)
// ErrStopFailed creates a stop failed error
func ErrStopFailed(name string) *MiningError {
return &MiningError{
Code: ErrCodeStopFailed,
Message: "failed to stop miner '" + name + "'",
Message: fmt.Sprintf("failed to stop miner '%s'", name),
Suggestion: "The miner process may need to be terminated manually",
Retryable: true,
HTTPStatus: http.StatusInternalServerError,
}
}
// return ErrInvalidConfig("wallet address is required")
// ErrInvalidConfig creates an invalid configuration error
func ErrInvalidConfig(reason string) *MiningError {
return &MiningError{
Code: ErrCodeInvalidConfig,
Message: "invalid configuration: " + reason,
Message: fmt.Sprintf("invalid configuration: %s", reason),
Suggestion: "Review the configuration and ensure all required fields are provided",
Retryable: false,
HTTPStatus: http.StatusBadRequest,
}
}
// return ErrUnsupportedMiner("nicehash")
// ErrUnsupportedMiner creates an unsupported miner type error
func ErrUnsupportedMiner(minerType string) *MiningError {
return &MiningError{
Code: ErrCodeUnsupportedMiner,
Message: "unsupported miner type: " + minerType,
Message: fmt.Sprintf("unsupported miner type: %s", minerType),
Suggestion: "Use one of the supported miner types: xmrig, tt-miner",
Retryable: false,
HTTPStatus: http.StatusBadRequest,
}
}
// return ErrConnectionFailed("pool.minexmr.com:4444").WithCause(err)
// ErrConnectionFailed creates a connection failed error
func ErrConnectionFailed(target string) *MiningError {
return &MiningError{
Code: ErrCodeConnectionFailed,
Message: "failed to connect to " + target,
Message: fmt.Sprintf("failed to connect to %s", target),
Suggestion: "Check network connectivity and try again",
Retryable: true,
HTTPStatus: http.StatusServiceUnavailable,
}
}
// return ErrTimeout("stats poll")
// ErrTimeout creates a timeout error
func ErrTimeout(operation string) *MiningError {
return &MiningError{
Code: ErrCodeTimeout,
Message: "operation timed out: " + operation,
Message: fmt.Sprintf("operation timed out: %s", operation),
Suggestion: "The operation is taking longer than expected, try again later",
Retryable: true,
HTTPStatus: http.StatusGatewayTimeout,
}
}
// return ErrDatabaseError("save profile")
// ErrDatabaseError creates a database error
func ErrDatabaseError(operation string) *MiningError {
return &MiningError{
Code: ErrCodeDatabaseError,
Message: "database error during " + operation,
Message: fmt.Sprintf("database error during %s", operation),
Suggestion: "This may be a temporary issue, try again",
Retryable: true,
HTTPStatus: http.StatusInternalServerError,
}
}
// return ErrProfileNotFound("default-xmr")
// ErrProfileNotFound creates a profile not found error
func ErrProfileNotFound(id string) *MiningError {
return &MiningError{
Code: ErrCodeProfileNotFound,
Message: "profile '" + id + "' not found",
Message: fmt.Sprintf("profile '%s' not found", id),
Suggestion: "Check that the profile ID is correct",
Retryable: false,
HTTPStatus: http.StatusNotFound,
}
}
// return ErrProfileExists("default-xmr")
// ErrProfileExists creates a profile already exists error
func ErrProfileExists(name string) *MiningError {
return &MiningError{
Code: ErrCodeProfileExists,
Message: "profile '" + name + "' already exists",
Message: fmt.Sprintf("profile '%s' already exists", name),
Suggestion: "Use a different name or update the existing profile",
Retryable: false,
HTTPStatus: http.StatusConflict,
}
}
// return ErrInternal("unexpected nil response from stats API")
// ErrInternal creates a generic internal error
func ErrInternal(message string) *MiningError {
return &MiningError{
Code: ErrCodeInternalError,

View file

@ -1,13 +1,12 @@
package mining
import (
"errors"
"net/http"
"testing"
)
// err := NewMiningError(ErrCodeMinerNotFound, "miner not found")
// err.Error() == "MINER_NOT_FOUND: miner not found"
func TestErrors_Error_Good(t *testing.T) {
func TestMiningError_Error(t *testing.T) {
err := NewMiningError(ErrCodeMinerNotFound, "miner not found")
expected := "MINER_NOT_FOUND: miner not found"
if err.Error() != expected {
@ -15,24 +14,22 @@ func TestErrors_Error_Good(t *testing.T) {
}
}
// err := NewMiningError("START_FAILED", "failed to start").WithCause(cause)
// err.Unwrap() == cause
func TestErrors_Error_Bad(t *testing.T) {
cause := ErrInternal("underlying error")
func TestMiningError_ErrorWithCause(t *testing.T) {
cause := errors.New("underlying error")
err := NewMiningError(ErrCodeStartFailed, "failed to start").WithCause(cause)
// Should include cause in error message
if err.Cause != cause {
t.Error("Cause was not set")
}
if err.Unwrap() != cause {
// Should be unwrappable
if errors.Unwrap(err) != cause {
t.Error("Unwrap did not return cause")
}
}
// err := NewMiningError("INVALID_CONFIG", "invalid config").WithDetails("port must be between 1024 and 65535")
// err.Details == "port must be between 1024 and 65535"
func TestErrors_WithDetails_Good(t *testing.T) {
func TestMiningError_WithDetails(t *testing.T) {
err := NewMiningError(ErrCodeInvalidConfig, "invalid config").
WithDetails("port must be between 1024 and 65535")
@ -41,9 +38,7 @@ func TestErrors_WithDetails_Good(t *testing.T) {
}
}
// err := ErrConnectionFailed("pool").WithSuggestion("check your network")
// err.Suggestion == "check your network"
func TestErrors_WithSuggestion_Good(t *testing.T) {
func TestMiningError_WithSuggestion(t *testing.T) {
err := NewMiningError(ErrCodeConnectionFailed, "connection failed").
WithSuggestion("check your network")
@ -52,10 +47,8 @@ func TestErrors_WithSuggestion_Good(t *testing.T) {
}
}
// ErrMinerNotFound("test").StatusCode() == 404
// ErrMinerExists("test").StatusCode() == 409
func TestErrors_StatusCode_Good(t *testing.T) {
cases := []struct {
func TestMiningError_StatusCode(t *testing.T) {
tests := []struct {
name string
err *MiningError
expected int
@ -68,19 +61,17 @@ func TestErrors_StatusCode_Good(t *testing.T) {
{"timeout", ErrTimeout("operation"), http.StatusGatewayTimeout},
}
for _, testCase := range cases {
t.Run(testCase.name, func(t *testing.T) {
if testCase.err.StatusCode() != testCase.expected {
t.Errorf("Expected status %d, got %d", testCase.expected, testCase.err.StatusCode())
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.err.StatusCode() != tt.expected {
t.Errorf("Expected status %d, got %d", tt.expected, tt.err.StatusCode())
}
})
}
}
// ErrMinerNotFound("test").IsRetryable() == false
// ErrInstallFailed("xmrig").IsRetryable() == true
func TestErrors_IsRetryable_Good(t *testing.T) {
cases := []struct {
func TestMiningError_IsRetryable(t *testing.T) {
tests := []struct {
name string
err *MiningError
retryable bool
@ -95,19 +86,17 @@ func TestErrors_IsRetryable_Good(t *testing.T) {
{"database error", ErrDatabaseError("query"), true},
}
for _, testCase := range cases {
t.Run(testCase.name, func(t *testing.T) {
if testCase.err.IsRetryable() != testCase.retryable {
t.Errorf("Expected retryable=%v, got %v", testCase.retryable, testCase.err.IsRetryable())
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.err.IsRetryable() != tt.retryable {
t.Errorf("Expected retryable=%v, got %v", tt.retryable, tt.err.IsRetryable())
}
})
}
}
// ErrMinerNotFound("test").Code == ErrCodeMinerNotFound
// ErrStartFailed("test").Code == ErrCodeStartFailed
func TestErrors_PredefinedErrors_Good(t *testing.T) {
cases := []struct {
func TestPredefinedErrors(t *testing.T) {
tests := []struct {
name string
err *MiningError
code string
@ -128,22 +117,20 @@ func TestErrors_PredefinedErrors_Good(t *testing.T) {
{"ErrInternal", ErrInternal("unexpected error"), ErrCodeInternalError},
}
for _, testCase := range cases {
t.Run(testCase.name, func(t *testing.T) {
if testCase.err.Code != testCase.code {
t.Errorf("Expected code %s, got %s", testCase.code, testCase.err.Code)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.err.Code != tt.code {
t.Errorf("Expected code %s, got %s", tt.code, tt.err.Code)
}
if testCase.err.Message == "" {
if tt.err.Message == "" {
t.Error("Message should not be empty")
}
})
}
}
// ErrConnectionFailed("pool:3333").WithCause(cause).WithDetails("timeout").WithSuggestion("check firewall")
// err.Code == ErrCodeConnectionFailed && err.Cause == cause && err.Details == "timeout"
func TestErrors_Chaining_Ugly(t *testing.T) {
cause := ErrTimeout("network timeout")
func TestMiningError_Chaining(t *testing.T) {
cause := errors.New("network timeout")
err := ErrConnectionFailed("pool:3333").
WithCause(cause).
WithDetails("timeout after 30s").

View file

@ -1,15 +1,15 @@
package mining
import (
"encoding/json"
"sync"
"time"
"forge.lthn.ai/Snider/Mining/pkg/logging"
"github.com/Snider/Mining/pkg/logging"
"github.com/gorilla/websocket"
)
// hub.Broadcast(NewEvent(EventMinerStarted, MinerEventData{Name: "xmrig"}))
// hub.Broadcast(NewEvent(EventMinerStats, MinerStatsData{Name: "xmrig", Hashrate: 1200}))
// EventType represents the type of mining event
type EventType string
const (
@ -27,14 +27,14 @@ const (
EventStateSync EventType = "state.sync" // Initial state on connect/reconnect
)
// hub.Broadcast(Event{Type: EventMinerStarted, Data: MinerEventData{Name: "xmrig"}})
// Event represents a mining event that can be broadcast to clients
type Event struct {
Type EventType `json:"type"`
Timestamp time.Time `json:"timestamp"`
Data interface{} `json:"data,omitempty"`
}
// hub.Broadcast(NewEvent(EventMinerStats, MinerStatsData{Name: "xmrig", Hashrate: 1200, Shares: 5}))
// MinerStatsData contains stats data for a miner event
type MinerStatsData struct {
Name string `json:"name"`
Hashrate int `json:"hashrate"`
@ -45,7 +45,7 @@ type MinerStatsData struct {
DiffCurrent int `json:"diffCurrent,omitempty"`
}
// hub.Broadcast(NewEvent(EventMinerStarted, MinerEventData{Name: "xmrig", ProfileID: "default"}))
// MinerEventData contains basic miner event data
type MinerEventData struct {
Name string `json:"name"`
ProfileID string `json:"profileId,omitempty"`
@ -54,50 +54,65 @@ type MinerEventData struct {
Pool string `json:"pool,omitempty"`
}
// hub.ServeWs(conn) // upgrades conn, creates wsClient, registers with hub
// wsClient represents a WebSocket client connection
type wsClient struct {
conn *websocket.Conn
send chan []byte
hub *EventHub
miners map[string]bool // subscribed miners, "*" for all
minersMutex sync.RWMutex // protects miners map from concurrent access
closeOnce sync.Once
conn *websocket.Conn
send chan []byte
hub *EventHub
miners map[string]bool // subscribed miners, "*" for all
minersMu sync.RWMutex // protects miners map from concurrent access
closeOnce sync.Once
}
// client.safeClose() // safe to call from multiple goroutines; channel closed exactly once
func (client *wsClient) safeClose() {
client.closeOnce.Do(func() {
close(client.send)
// safeClose closes the send channel exactly once to prevent panic on double close
func (c *wsClient) safeClose() {
c.closeOnce.Do(func() {
close(c.send)
})
}
// hub.SetStateProvider(func() interface{} { return manager.GetState() })
// StateProvider is a function that returns the current state for sync
type StateProvider func() interface{}
// hub := NewEventHub(); go hub.Run(); hub.Broadcast(NewEvent(EventMinerStarted, data))
// EventHub manages WebSocket connections and event broadcasting
type EventHub struct {
clients map[*wsClient]bool
broadcast chan Event
register chan *wsClient
unregister chan *wsClient
mutex sync.RWMutex
stop chan struct{}
stopOnce sync.Once
// Registered clients
clients map[*wsClient]bool
// Inbound events to broadcast
broadcast chan Event
// Register requests from clients
register chan *wsClient
// Unregister requests from clients
unregister chan *wsClient
// Mutex for thread-safe access
mu sync.RWMutex
// Stop signal
stop chan struct{}
// Ensure Stop() is called only once
stopOnce sync.Once
// Connection limits
maxConnections int
stateProvider StateProvider
// State provider for sync on connect
stateProvider StateProvider
}
// hub := NewEventHubWithOptions(50) // cap connections; 100 is the default
// DefaultMaxConnections is the default maximum WebSocket connections
const DefaultMaxConnections = 100
// hub := NewEventHub()
// go hub.Run()
// NewEventHub creates a new EventHub with default settings
func NewEventHub() *EventHub {
return NewEventHubWithOptions(DefaultMaxConnections)
}
// hub := NewEventHubWithOptions(50)
// go hub.Run()
// NewEventHubWithOptions creates a new EventHub with custom settings
func NewEventHubWithOptions(maxConnections int) *EventHub {
if maxConnections <= 0 {
maxConnections = DefaultMaxConnections
@ -112,31 +127,30 @@ func NewEventHubWithOptions(maxConnections int) *EventHub {
}
}
// hub := NewEventHub()
// go hub.Run() // blocks until hub.Stop() is called
func (hub *EventHub) Run() {
// Run starts the EventHub's main loop
func (h *EventHub) Run() {
for {
select {
case <-hub.stop:
case <-h.stop:
// Close all client connections
hub.mutex.Lock()
for client := range hub.clients {
h.mu.Lock()
for client := range h.clients {
client.safeClose()
delete(hub.clients, client)
delete(h.clients, client)
}
hub.mutex.Unlock()
h.mu.Unlock()
return
case client := <-hub.register:
hub.mutex.Lock()
hub.clients[client] = true
stateProvider := hub.stateProvider
hub.mutex.Unlock()
logging.Debug("client connected", logging.Fields{"total": len(hub.clients)})
case client := <-h.register:
h.mu.Lock()
h.clients[client] = true
stateProvider := h.stateProvider
h.mu.Unlock()
logging.Debug("client connected", logging.Fields{"total": len(h.clients)})
// Send initial state sync if provider is set
if stateProvider != nil {
go func(wsconn *wsClient) {
go func(c *wsClient) {
defer func() {
if r := recover(); r != nil {
logging.Error("panic in state sync goroutine", logging.Fields{"panic": r})
@ -155,7 +169,7 @@ func (hub *EventHub) Run() {
return
}
select {
case wsconn.send <- data:
case c.send <- data:
default:
// Client buffer full
}
@ -163,53 +177,53 @@ func (hub *EventHub) Run() {
}(client)
}
case client := <-hub.unregister:
hub.mutex.Lock()
if _, ok := hub.clients[client]; ok {
delete(hub.clients, client)
case client := <-h.unregister:
h.mu.Lock()
if _, ok := h.clients[client]; ok {
delete(h.clients, client)
client.safeClose()
// Decrement WebSocket connection metrics
RecordWSConnection(false)
}
hub.mutex.Unlock()
logging.Debug("client disconnected", logging.Fields{"total": len(hub.clients)})
h.mu.Unlock()
logging.Debug("client disconnected", logging.Fields{"total": len(h.clients)})
case event := <-hub.broadcast:
case event := <-h.broadcast:
data, err := MarshalJSON(event)
if err != nil {
logging.Error("failed to marshal event", logging.Fields{"error": err})
continue
}
hub.mutex.RLock()
for client := range hub.clients {
h.mu.RLock()
for client := range h.clients {
// Check if client is subscribed to this miner
if hub.shouldSendToClient(client, event) {
if h.shouldSendToClient(client, event) {
select {
case client.send <- data:
default:
// Client buffer full, close connection
go func(wsconn *wsClient) {
hub.unregister <- wsconn
go func(c *wsClient) {
h.unregister <- c
}(client)
}
}
}
hub.mutex.RUnlock()
h.mu.RUnlock()
}
}
}
// if hub.shouldSendToClient(client, event) { client.send <- data }
func (hub *EventHub) shouldSendToClient(client *wsClient, event Event) bool {
// shouldSendToClient checks if an event should be sent to a client
func (h *EventHub) shouldSendToClient(client *wsClient, event Event) bool {
// Always send pong and system events
if event.Type == EventPong {
return true
}
// Check miner subscription for miner events (protected by mutex)
client.minersMutex.RLock()
defer client.minersMutex.RUnlock()
client.minersMu.RLock()
defer client.minersMu.RUnlock()
if client.miners == nil || len(client.miners) == 0 {
// No subscription filter, send all
@ -242,40 +256,40 @@ func (hub *EventHub) shouldSendToClient(client *wsClient, event Event) bool {
return client.miners[minerName]
}
// hub.Stop() // safe to call multiple times; closes all client connections
func (hub *EventHub) Stop() {
hub.stopOnce.Do(func() {
close(hub.stop)
// Stop stops the EventHub (safe to call multiple times)
func (h *EventHub) Stop() {
h.stopOnce.Do(func() {
close(h.stop)
})
}
// hub.SetStateProvider(func() interface{} { return manager.GetState() })
func (hub *EventHub) SetStateProvider(provider StateProvider) {
hub.mutex.Lock()
defer hub.mutex.Unlock()
hub.stateProvider = provider
// SetStateProvider sets the function that provides current state for new clients
func (h *EventHub) SetStateProvider(provider StateProvider) {
h.mu.Lock()
defer h.mu.Unlock()
h.stateProvider = provider
}
// hub.Broadcast(NewEvent(EventMinerStats, statsData))
func (hub *EventHub) Broadcast(event Event) {
// Broadcast sends an event to all subscribed clients
func (h *EventHub) Broadcast(event Event) {
if event.Timestamp.IsZero() {
event.Timestamp = time.Now()
}
select {
case hub.broadcast <- event:
case h.broadcast <- event:
default:
logging.Warn("broadcast channel full, dropping event", logging.Fields{"type": event.Type})
}
}
// if hub.ClientCount() == 0 { return } // skip broadcast when no listeners
func (hub *EventHub) ClientCount() int {
hub.mutex.RLock()
defer hub.mutex.RUnlock()
return len(hub.clients)
// ClientCount returns the number of connected clients
func (h *EventHub) ClientCount() int {
h.mu.RLock()
defer h.mu.RUnlock()
return len(h.clients)
}
// hub.Broadcast(NewEvent(EventMinerStarted, MinerEventData{Name: "xmrig"}))
// NewEvent creates a new event with the current timestamp
func NewEvent(eventType EventType, data interface{}) Event {
return Event{
Type: eventType,
@ -284,25 +298,25 @@ func NewEvent(eventType EventType, data interface{}) Event {
}
}
// go client.writePump() // started by ServeWs; writes hub events to conn, sends ping every 30s
func (client *wsClient) writePump() {
// writePump pumps messages from the hub to the websocket connection
func (c *wsClient) writePump() {
ticker := time.NewTicker(30 * time.Second)
defer func() {
ticker.Stop()
client.conn.Close()
c.conn.Close()
}()
for {
select {
case message, ok := <-client.send:
client.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
case message, ok := <-c.send:
c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if !ok {
// Hub closed the channel
client.conn.WriteMessage(websocket.CloseMessage, []byte{})
c.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
w, err := client.conn.NextWriter(websocket.TextMessage)
w, err := c.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
@ -316,30 +330,30 @@ func (client *wsClient) writePump() {
}
case <-ticker.C:
client.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if err := client.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}
// go client.readPump() // started by ServeWs; reads subscribe/ping messages from conn, unregisters on close
func (client *wsClient) readPump() {
// readPump pumps messages from the websocket connection to the hub
func (c *wsClient) readPump() {
defer func() {
client.hub.unregister <- client
client.conn.Close()
c.hub.unregister <- c
c.conn.Close()
}()
client.conn.SetReadLimit(512)
client.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
client.conn.SetPongHandler(func(string) error {
client.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
c.conn.SetReadLimit(512)
c.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
c.conn.SetPongHandler(func(string) error {
c.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
return nil
})
for {
_, message, err := client.conn.ReadMessage()
_, message, err := c.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
logging.Debug("WebSocket error", logging.Fields{"error": err})
@ -348,28 +362,28 @@ func (client *wsClient) readPump() {
}
// Parse client message
var clientMessage struct {
var msg struct {
Type string `json:"type"`
Miners []string `json:"miners,omitempty"`
}
if err := UnmarshalJSON(message, &clientMessage); err != nil {
if err := json.Unmarshal(message, &msg); err != nil {
continue
}
switch clientMessage.Type {
switch msg.Type {
case "subscribe":
// Update miner subscription (protected by mutex)
client.minersMutex.Lock()
client.miners = make(map[string]bool)
for _, minerName := range clientMessage.Miners {
client.miners[minerName] = true
c.minersMu.Lock()
c.miners = make(map[string]bool)
for _, m := range msg.Miners {
c.miners[m] = true
}
client.minersMutex.Unlock()
logging.Debug("client subscribed to miners", logging.Fields{"miners": clientMessage.Miners})
c.minersMu.Unlock()
logging.Debug("client subscribed to miners", logging.Fields{"miners": msg.Miners})
case "ping":
// Respond with pong
client.hub.Broadcast(Event{
c.hub.Broadcast(Event{
Type: EventPong,
Timestamp: time.Now(),
})
@ -377,15 +391,16 @@ func (client *wsClient) readPump() {
}
}
// if !hub.ServeWs(conn) { requestContext.JSON(http.StatusServiceUnavailable, gin.H{"error": "connection limit reached"}) }
func (hub *EventHub) ServeWs(conn *websocket.Conn) bool {
// ServeWs handles websocket requests from clients.
// Returns false if the connection was rejected due to limits.
func (h *EventHub) ServeWs(conn *websocket.Conn) bool {
// Check connection limit
hub.mutex.RLock()
currentCount := len(hub.clients)
hub.mutex.RUnlock()
h.mu.RLock()
currentCount := len(h.clients)
h.mu.RUnlock()
if currentCount >= hub.maxConnections {
logging.Warn("connection rejected: limit reached", logging.Fields{"current": currentCount, "max": hub.maxConnections})
if currentCount >= h.maxConnections {
logging.Warn("connection rejected: limit reached", logging.Fields{"current": currentCount, "max": h.maxConnections})
conn.WriteMessage(websocket.CloseMessage,
websocket.FormatCloseMessage(websocket.CloseTryAgainLater, "connection limit reached"))
conn.Close()
@ -395,11 +410,11 @@ func (hub *EventHub) ServeWs(conn *websocket.Conn) bool {
client := &wsClient{
conn: conn,
send: make(chan []byte, 256),
hub: hub,
hub: h,
miners: map[string]bool{"*": true}, // Subscribe to all by default
}
hub.register <- client
h.register <- client
// Start read/write pumps
go client.writePump()

View file

@ -9,7 +9,7 @@ import (
"github.com/gorilla/websocket"
)
func TestEvents_NewEventHub_Good(t *testing.T) {
func TestNewEventHub(t *testing.T) {
hub := NewEventHub()
if hub == nil {
t.Fatal("NewEventHub returned nil")
@ -24,7 +24,7 @@ func TestEvents_NewEventHub_Good(t *testing.T) {
}
}
func TestEvents_NewEventHubWithOptions_Good(t *testing.T) {
func TestNewEventHubWithOptions(t *testing.T) {
hub := NewEventHubWithOptions(50)
if hub.maxConnections != 50 {
t.Errorf("Expected maxConnections 50, got %d", hub.maxConnections)
@ -42,7 +42,7 @@ func TestEvents_NewEventHubWithOptions_Good(t *testing.T) {
}
}
func TestEvents_Broadcast_Good(t *testing.T) {
func TestEventHubBroadcast(t *testing.T) {
hub := NewEventHub()
go hub.Run()
defer hub.Stop()
@ -69,7 +69,7 @@ func TestEvents_Broadcast_Good(t *testing.T) {
}
}
func TestEvents_ClientCount_Good(t *testing.T) {
func TestEventHubClientCount(t *testing.T) {
hub := NewEventHub()
go hub.Run()
defer hub.Stop()
@ -80,7 +80,7 @@ func TestEvents_ClientCount_Good(t *testing.T) {
}
}
func TestEvents_Stop_Good(t *testing.T) {
func TestEventHubStop(t *testing.T) {
hub := NewEventHub()
go hub.Run()
@ -97,7 +97,7 @@ func TestEvents_Stop_Good(t *testing.T) {
time.Sleep(50 * time.Millisecond)
}
func TestEvents_NewEvent_Good(t *testing.T) {
func TestNewEvent(t *testing.T) {
data := MinerEventData{Name: "test-miner"}
event := NewEvent(EventMinerStarted, data)
@ -118,7 +118,7 @@ func TestEvents_NewEvent_Good(t *testing.T) {
}
}
func TestEvents_JSON_Good(t *testing.T) {
func TestEventJSON(t *testing.T) {
event := Event{
Type: EventMinerStats,
Timestamp: time.Now(),
@ -146,26 +146,27 @@ func TestEvents_JSON_Good(t *testing.T) {
}
}
func TestEvents_SetStateProvider_Good(t *testing.T) {
func TestSetStateProvider(t *testing.T) {
hub := NewEventHub()
go hub.Run()
defer hub.Stop()
called := false
var mutex sync.Mutex
var mu sync.Mutex
provider := func() interface{} {
mutex.Lock()
mu.Lock()
called = true
mutex.Unlock()
mu.Unlock()
return map[string]string{"status": "ok"}
}
hub.SetStateProvider(provider)
mutex.Lock()
// The provider should be set but not called until a client connects
mu.Lock()
wasCalled := called
mutex.Unlock()
mu.Unlock()
if wasCalled {
t.Error("Provider should not be called until client connects")
@ -179,7 +180,7 @@ type MockWebSocketConn struct {
mu sync.Mutex
}
func TestEvents_EventTypes_Good(t *testing.T) {
func TestEventTypes(t *testing.T) {
types := []EventType{
EventMinerStarting,
EventMinerStarted,
@ -192,8 +193,8 @@ func TestEvents_EventTypes_Good(t *testing.T) {
EventStateSync,
}
for _, eventType := range types {
if eventType == "" {
for _, et := range types {
if et == "" {
t.Error("Event type should not be empty")
}
}

57
pkg/mining/file_utils.go Normal file
View file

@ -0,0 +1,57 @@
package mining
import (
"fmt"
"os"
"path/filepath"
)
// AtomicWriteFile writes data to a file atomically by writing to a temp file
// first, syncing to disk, then renaming to the target path. This prevents
// corruption if the process is interrupted during write.
func AtomicWriteFile(path string, data []byte, perm os.FileMode) error {
dir := filepath.Dir(path)
// Create temp file in the same directory for atomic rename
tmpFile, err := os.CreateTemp(dir, ".tmp-*")
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
tmpPath := tmpFile.Name()
// Clean up temp file on error
success := false
defer func() {
if !success {
os.Remove(tmpPath)
}
}()
if _, err := tmpFile.Write(data); err != nil {
tmpFile.Close()
return fmt.Errorf("failed to write temp file: %w", err)
}
// Sync to ensure data is flushed to disk before rename
if err := tmpFile.Sync(); err != nil {
tmpFile.Close()
return fmt.Errorf("failed to sync temp file: %w", err)
}
if err := tmpFile.Close(); err != nil {
return fmt.Errorf("failed to close temp file: %w", err)
}
// Set permissions before rename
if err := os.Chmod(tmpPath, perm); err != nil {
return fmt.Errorf("failed to set file permissions: %w", err)
}
// Atomic rename (on POSIX systems)
if err := os.Rename(tmpPath, path); err != nil {
return fmt.Errorf("failed to rename temp file: %w", err)
}
success = true
return nil
}

View file

@ -1,57 +0,0 @@
package mining
import (
"context"
"io"
"net/http"
"strconv"
)
// var collector StatsCollector = &XMRigStatsCollector{...}
// metrics, err := collector.CollectStats(ctx)
type StatsCollector interface {
CollectStats(ctx context.Context) (*PerformanceMetrics, error)
}
// config := HTTPStatsConfig{Host: "127.0.0.1", Port: 8080, Endpoint: "/2/summary"}
type HTTPStatsConfig struct {
Host string
Port int
Endpoint string
}
// var summary XMRigSummary
// if err := FetchJSONStats(ctx, HTTPStatsConfig{Host: "127.0.0.1", Port: 8080, Endpoint: "/2/summary"}, &summary); err != nil { return err }
func FetchJSONStats[T any](ctx context.Context, config HTTPStatsConfig, target *T) error {
if config.Port == 0 {
return ErrInternal("API port is zero")
}
requestURL := "http://" + config.Host + ":" + strconv.Itoa(config.Port) + config.Endpoint
httpRequest, err := http.NewRequestWithContext(ctx, "GET", requestURL, nil)
if err != nil {
return ErrInternal("failed to create request").WithCause(err)
}
response, err := getHTTPClient().Do(httpRequest)
if err != nil {
return ErrInternal("HTTP request failed").WithCause(err)
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
io.Copy(io.Discard, response.Body) // Drain body to allow connection reuse
return ErrInternal("unexpected status code: " + strconv.Itoa(response.StatusCode))
}
body, err := io.ReadAll(response.Body)
if err != nil {
return ErrInternal("failed to read response body").WithCause(err)
}
if err := UnmarshalJSON(body, target); err != nil {
return ErrInternal("failed to decode response").WithCause(err)
}
return nil
}

View file

@ -1,116 +0,0 @@
// Copyright (c) 2017-2026 Lethean (https://lt.hn)
// SPDX-License-Identifier: EUPL-1.2
package mining
// Lethean network mining presets.
// These provide default configurations for mining LTHN on testnet and mainnet.
// config := mining.LetheanTestnetConfig("iTHN...")
const LetheanTestnetPool = "stratum+tcp://pool.lthn.io:5555"
// config := mining.LetheanMainnetConfig("iTHNabc123...")
const LetheanMainnetPool = "stratum+tcp://pool.lthn.io:3333"
// config := mining.LetheanSoloConfig("iTHNabc123...", true)
const LetheanSoloTestnet = "http://127.0.0.1:46941"
// config := mining.LetheanSoloConfig("iTHNabc123...", false)
const LetheanSoloMainnet = "http://127.0.0.1:36941"
// config := mining.LetheanTestnetConfig("iTHNabc123...")
func LetheanTestnetConfig(walletAddress string) *Config {
return &Config{
Pool: LetheanTestnetPool,
Wallet: walletAddress,
Algo: "progpowz",
Coin: "lethean",
Threads: 0, // auto-detect
DonateLevel: 0,
Keepalive: true,
TLS: false,
HTTPHost: "127.0.0.1",
HTTPPort: 37420,
CPUMaxThreadsHint: 50, // use 50% of cores by default
}
}
// config := mining.LetheanMainnetConfig("iTHNabc123...")
func LetheanMainnetConfig(walletAddress string) *Config {
return &Config{
Pool: LetheanMainnetPool,
Wallet: walletAddress,
Algo: "progpowz",
Coin: "lethean",
Threads: 0,
DonateLevel: 0,
Keepalive: true,
TLS: false,
HTTPHost: "127.0.0.1",
HTTPPort: 37420,
CPUMaxThreadsHint: 75,
}
}
// config := mining.LetheanSoloConfig("iTHNabc123...", true)
func LetheanSoloConfig(walletAddress string, testnet bool) *Config {
daemon := LetheanSoloMainnet
if testnet {
daemon = LetheanSoloTestnet
}
return &Config{
Pool: daemon,
Wallet: walletAddress,
Algo: "progpowz",
Coin: "lethean",
Threads: 0,
DonateLevel: 0,
Keepalive: true,
HTTPHost: "127.0.0.1",
HTTPPort: 37420,
CPUMaxThreadsHint: 100, // solo: use all cores
}
}
// profile := mining.LetheanDefaultProfile("iTHNabc123...", true)
func LetheanDefaultProfile(walletAddress string, testnet bool) MiningProfile {
name := "Lethean Mainnet"
if testnet {
name = "Lethean Testnet"
}
config := LetheanTestnetConfig(walletAddress)
if !testnet {
config = LetheanMainnetConfig(walletAddress)
}
configJSON, _ := MarshalJSON(config)
return MiningProfile{
Name: name,
MinerType: "xmrig",
Config: RawConfig(configJSON),
}
}
// profile := mining.LetheanDualMiningProfile("iTHN...", "4...", true)
func LetheanDualMiningProfile(lthnAddress, xmrAddress string, testnet bool) MiningProfile {
pool := LetheanMainnetPool
if testnet {
pool = LetheanTestnetPool
}
config := &Config{
Pool: pool,
Wallet: lthnAddress,
Algo: "progpowz",
Coin: "lethean",
DonateLevel: 0,
Keepalive: true,
CPUMaxThreadsHint: 25,
HTTPHost: "127.0.0.1",
HTTPPort: 37420,
}
configJSON, _ := MarshalJSON(config)
return MiningProfile{
Name: "LTHN + XMR Dual Mining",
MinerType: "xmrig",
Config: RawConfig(configJSON),
}
}

View file

@ -1,76 +0,0 @@
// Copyright (c) 2017-2026 Lethean (https://lt.hn)
// SPDX-License-Identifier: EUPL-1.2
package mining
import (
"strings"
"testing"
)
const testAddress = "iTHNUNiuu3VP1yy8xH2y5iQaABKXurdjqZmzFiBiyR4dKG3j6534e9jMriY6SM7PH8NibVwVWW1DWJfQEWnSjS8n3Wgx86pQpY"
func TestLetheanTestnetConfig_Good(t *testing.T) {
config := LetheanTestnetConfig(testAddress)
if config.Pool != LetheanTestnetPool {
t.Errorf("pool: got %s, want %s", config.Pool, LetheanTestnetPool)
}
if config.Algo != "progpowz" {
t.Errorf("algo: got %s, want progpowz", config.Algo)
}
if config.Coin != "lethean" {
t.Errorf("coin: got %s, want lethean", config.Coin)
}
if config.Wallet != testAddress {
t.Error("wallet address mismatch")
}
if config.DonateLevel != 0 {
t.Errorf("donate: got %d, want 0", config.DonateLevel)
}
}
func TestLetheanMainnetConfig_Good(t *testing.T) {
config := LetheanMainnetConfig(testAddress)
if config.Pool != LetheanMainnetPool {
t.Errorf("pool: got %s, want %s", config.Pool, LetheanMainnetPool)
}
if config.CPUMaxThreadsHint != 75 {
t.Errorf("threads hint: got %d, want 75", config.CPUMaxThreadsHint)
}
}
func TestLetheanSoloConfig_Good(t *testing.T) {
config := LetheanSoloConfig(testAddress, true)
if config.Pool != LetheanSoloTestnet {
t.Errorf("pool: got %s, want %s", config.Pool, LetheanSoloTestnet)
}
if config.CPUMaxThreadsHint != 100 {
t.Errorf("threads hint: got %d, want 100 (solo)", config.CPUMaxThreadsHint)
}
}
func TestLetheanDefaultProfile_Good(t *testing.T) {
profile := LetheanDefaultProfile(testAddress, true)
if profile.Name != "Lethean Testnet" {
t.Errorf("name: got %s, want Lethean Testnet", profile.Name)
}
if profile.MinerType != "xmrig" {
t.Errorf("miner: got %s, want xmrig", profile.MinerType)
}
if len(profile.Config) == 0 {
t.Error("config should not be empty")
}
// Config is RawConfig (JSON bytes) — verify it contains progpowz
configStr := string(profile.Config)
if !strings.Contains(configStr, "progpowz") {
t.Errorf("config should contain progpowz, got: %s", configStr[:50])
}
}
func TestLetheanDualMiningProfile_Good(t *testing.T) {
profile := LetheanDualMiningProfile(testAddress, "4FAKEXMR", true)
configStr := string(profile.Config)
if !strings.Contains(configStr, "25") {
t.Errorf("dual mining config should have 25%% cpu hint")
}
}

View file

@ -1,217 +1,186 @@
package mining
import (
"bytes"
"context"
"fmt"
"net"
"regexp"
"strconv"
"strings"
"sync"
"time"
"forge.lthn.ai/Snider/Mining/pkg/database"
"forge.lthn.ai/Snider/Mining/pkg/logging"
"github.com/Snider/Mining/pkg/database"
"github.com/Snider/Mining/pkg/logging"
)
// equalFold("xmrig", "XMRig") returns true.
// equalFold("tt-miner", "TT-Miner") returns true.
func equalFold(left, right string) bool {
return bytes.EqualFold([]byte(left), []byte(right))
}
// hasPrefix("xmrig-rx0", "xmrig") returns true.
// hasPrefix("ttminer-rtx", "xmrig") returns false.
func hasPrefix(input, prefix string) bool {
return len(input) >= len(prefix) && input[:len(prefix)] == prefix
}
// containsStr("peer not found", "not found") returns true.
// containsStr("connection ok", "not found") returns false.
func containsStr(haystack, needle string) bool {
if len(needle) == 0 {
return true
}
if len(haystack) < len(needle) {
return false
}
for i := 0; i <= len(haystack)-len(needle); i++ {
if haystack[i:i+len(needle)] == needle {
return true
}
}
return false
}
// safeName := instanceNameRegex.ReplaceAllString("my algo!", "_") // `my_algo_`
// sanitizeInstanceName ensures the instance name only contains safe characters.
var instanceNameRegex = regexp.MustCompile(`[^a-zA-Z0-9_/-]`)
// var managerInterface ManagerInterface = mining.NewManager()
// miner, err := managerInterface.StartMiner(ctx, "xmrig", &mining.Config{Algo: "rx/0"})
// defer managerInterface.Stop()
// ManagerInterface defines the contract for a miner manager.
type ManagerInterface interface {
StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error)
StopMiner(ctx context.Context, minerName string) error
GetMiner(minerName string) (Miner, error)
StopMiner(ctx context.Context, name string) error
GetMiner(name string) (Miner, error)
ListMiners() []Miner
ListAvailableMiners() []AvailableMiner
GetMinerHashrateHistory(minerName string) ([]HashratePoint, error)
GetMinerHashrateHistory(name string) ([]HashratePoint, error)
UninstallMiner(ctx context.Context, minerType string) error
Stop()
}
// manager := mining.NewManager()
// defer manager.Stop()
// miner, err := manager.StartMiner(ctx, "xmrig", &mining.Config{Algo: "rx/0"})
// Manager handles the lifecycle and operations of multiple miners.
type Manager struct {
miners map[string]Miner
mutex sync.RWMutex
stopChannel chan struct{}
stopOnce sync.Once
waitGroup sync.WaitGroup
databaseEnabled bool
databaseRetention int
eventHub *EventHub
eventHubMutex sync.RWMutex
miners map[string]Miner
mu sync.RWMutex
stopChan chan struct{}
stopOnce sync.Once
waitGroup sync.WaitGroup
dbEnabled bool
dbRetention int
eventHub *EventHub
eventHubMu sync.RWMutex // Separate mutex for eventHub to avoid deadlock with main mu
}
// manager.SetEventHub(eventHub)
func (manager *Manager) SetEventHub(eventHub *EventHub) {
manager.eventHubMutex.Lock()
defer manager.eventHubMutex.Unlock()
manager.eventHub = eventHub
// SetEventHub sets the event hub for broadcasting miner events
func (m *Manager) SetEventHub(hub *EventHub) {
m.eventHubMu.Lock()
defer m.eventHubMu.Unlock()
m.eventHub = hub
}
// manager.emitEvent(EventMinerStarted, MinerEventData{Name: instanceName})
// manager.emitEvent(EventMinerError, MinerEventData{Name: instanceName, Error: err.Error()})
func (manager *Manager) emitEvent(eventType EventType, data interface{}) {
manager.eventHubMutex.RLock()
eventHub := manager.eventHub
manager.eventHubMutex.RUnlock()
// emitEvent broadcasts an event if an event hub is configured
// Uses separate eventHubMu to avoid deadlock when called while holding m.mu
func (m *Manager) emitEvent(eventType EventType, data interface{}) {
m.eventHubMu.RLock()
hub := m.eventHub
m.eventHubMu.RUnlock()
if eventHub != nil {
eventHub.Broadcast(NewEvent(eventType, data))
if hub != nil {
hub.Broadcast(NewEvent(eventType, data))
}
}
var _ ManagerInterface = (*Manager)(nil)
// manager := mining.NewManager()
// defer manager.Stop() // stops miner goroutines and the hourly database cleanup loop
// NewManager creates a new miner manager and autostarts miners based on config.
func NewManager() *Manager {
manager := &Manager{
miners: make(map[string]Miner),
stopChannel: make(chan struct{}),
waitGroup: sync.WaitGroup{},
m := &Manager{
miners: make(map[string]Miner),
stopChan: make(chan struct{}),
waitGroup: sync.WaitGroup{},
}
manager.syncMinersConfig()
manager.initDatabase()
manager.autostartMiners()
manager.startStatsCollection()
return manager
m.syncMinersConfig() // Ensure config file is populated
m.initDatabase()
m.autostartMiners()
m.startStatsCollection()
return m
}
// manager := mining.NewManagerForSimulation()
// manager.StartMiner(ctx, "xmrig", &Config{Algo: "rx/0"})
// NewManagerForSimulation creates a manager for simulation mode.
// It skips autostarting real miners and config sync, suitable for UI testing.
func NewManagerForSimulation() *Manager {
manager := &Manager{
miners: make(map[string]Miner),
stopChannel: make(chan struct{}),
waitGroup: sync.WaitGroup{},
m := &Manager{
miners: make(map[string]Miner),
stopChan: make(chan struct{}),
waitGroup: sync.WaitGroup{},
}
manager.startStatsCollection()
return manager
// Skip syncMinersConfig and autostartMiners for simulation
m.startStatsCollection()
return m
}
// manager.initDatabase() reads `~/.config/lethean-desktop/miners.json` and enables database persistence when `Database.Enabled` is true.
func (manager *Manager) initDatabase() {
minersConfig, err := LoadMinersConfig()
// initDatabase initializes the SQLite database based on config.
func (m *Manager) initDatabase() {
cfg, err := LoadMinersConfig()
if err != nil {
logging.Warn("could not load config for database init", logging.Fields{"error": err})
return
}
manager.databaseEnabled = minersConfig.Database.Enabled
manager.databaseRetention = minersConfig.Database.RetentionDays
if manager.databaseRetention == 0 {
manager.databaseRetention = 30
m.dbEnabled = cfg.Database.Enabled
m.dbRetention = cfg.Database.RetentionDays
if m.dbRetention == 0 {
m.dbRetention = 30
}
if !manager.databaseEnabled {
if !m.dbEnabled {
logging.Debug("database persistence is disabled")
return
}
databaseConfig := database.Config{
dbCfg := database.Config{
Enabled: true,
RetentionDays: manager.databaseRetention,
RetentionDays: m.dbRetention,
}
if err := database.Initialize(databaseConfig); err != nil {
if err := database.Initialize(dbCfg); err != nil {
logging.Warn("failed to initialize database", logging.Fields{"error": err})
manager.databaseEnabled = false
m.dbEnabled = false
return
}
logging.Info("database persistence enabled", logging.Fields{"retention_days": manager.databaseRetention})
logging.Info("database persistence enabled", logging.Fields{"retention_days": m.dbRetention})
// manager.startDBCleanup() calls database.Cleanup(30) at startup and every hour after NewManager enables persistence.
manager.startDBCleanup()
// Start periodic cleanup
m.startDBCleanup()
}
// manager.startDBCleanup() calls database.Cleanup(manager.databaseRetention) at startup and every hour.
func (manager *Manager) startDBCleanup() {
manager.waitGroup.Add(1)
// startDBCleanup starts a goroutine that periodically cleans old data.
func (m *Manager) startDBCleanup() {
m.waitGroup.Add(1)
go func() {
defer manager.waitGroup.Done()
defer m.waitGroup.Done()
defer func() {
if r := recover(); r != nil {
logging.Error("panic in database cleanup goroutine", logging.Fields{"panic": r})
}
}()
cleanupTicker := time.NewTicker(time.Hour)
defer cleanupTicker.Stop()
// Run cleanup once per hour
ticker := time.NewTicker(time.Hour)
defer ticker.Stop()
if err := database.Cleanup(manager.databaseRetention); err != nil {
// Run initial cleanup
if err := database.Cleanup(m.dbRetention); err != nil {
logging.Warn("database cleanup failed", logging.Fields{"error": err})
}
for {
select {
case <-cleanupTicker.C:
if err := database.Cleanup(manager.databaseRetention); err != nil {
case <-ticker.C:
if err := database.Cleanup(m.dbRetention); err != nil {
logging.Warn("database cleanup failed", logging.Fields{"error": err})
}
case <-manager.stopChannel:
case <-m.stopChan:
return
}
}
}()
}
// manager.syncMinersConfig() appends `MinerAutostartConfig{MinerType: "xmrig", Autostart: false}` to miners.json when a miner is missing.
func (manager *Manager) syncMinersConfig() {
minersConfig, err := LoadMinersConfig()
// syncMinersConfig ensures the miners.json config file has entries for all available miners.
func (m *Manager) syncMinersConfig() {
cfg, err := LoadMinersConfig()
if err != nil {
logging.Warn("could not load miners config for sync", logging.Fields{"error": err})
return
}
availableMiners := manager.ListAvailableMiners()
availableMiners := m.ListAvailableMiners()
configUpdated := false
for _, availableMiner := range availableMiners {
minerExists := false
for _, configuredMiner := range minersConfig.Miners {
if equalFold(configuredMiner.MinerType, availableMiner.Name) {
minerExists = true
found := false
for _, configuredMiner := range cfg.Miners {
if strings.EqualFold(configuredMiner.MinerType, availableMiner.Name) {
found = true
break
}
}
if !minerExists {
minersConfig.Miners = append(minersConfig.Miners, MinerAutostartConfig{
if !found {
cfg.Miners = append(cfg.Miners, MinerAutostartConfig{
MinerType: availableMiner.Name,
Autostart: false,
Config: nil, // keep the new miner disabled until the user saves a profile
Config: nil, // No default config
})
configUpdated = true
logging.Info("added default config for missing miner", logging.Fields{"miner": availableMiner.Name})
@ -219,59 +188,56 @@ func (manager *Manager) syncMinersConfig() {
}
if configUpdated {
if err := SaveMinersConfig(minersConfig); err != nil {
if err := SaveMinersConfig(cfg); err != nil {
logging.Warn("failed to save updated miners config", logging.Fields{"error": err})
}
}
}
// manager.autostartMiners() starts entries with `Autostart: true` from `context.Background()`.
func (manager *Manager) autostartMiners() {
minersConfig, err := LoadMinersConfig()
// autostartMiners loads the miners config and starts any miners marked for autostart.
func (m *Manager) autostartMiners() {
cfg, err := LoadMinersConfig()
if err != nil {
logging.Warn("could not load miners config for autostart", logging.Fields{"error": err})
return
}
for _, minerConfig := range minersConfig.Miners {
if minerConfig.Autostart && minerConfig.Config != nil {
logging.Info("autostarting miner", logging.Fields{"type": minerConfig.MinerType})
if _, err := manager.StartMiner(context.Background(), minerConfig.MinerType, minerConfig.Config); err != nil {
logging.Error("failed to autostart miner", logging.Fields{"type": minerConfig.MinerType, "error": err})
for _, minerCfg := range cfg.Miners {
if minerCfg.Autostart && minerCfg.Config != nil {
logging.Info("autostarting miner", logging.Fields{"type": minerCfg.MinerType})
if _, err := m.StartMiner(context.Background(), minerCfg.MinerType, minerCfg.Config); err != nil {
logging.Error("failed to autostart miner", logging.Fields{"type": minerCfg.MinerType, "error": err})
}
}
}
}
// port, err := findAvailablePort()
// if err != nil { return 0, err }
// config.HTTPPort = port
// findAvailablePort finds an available TCP port on the local machine.
func findAvailablePort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
listener, err := net.ListenTCP("tcp", addr)
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer listener.Close()
return listener.Addr().(*net.TCPAddr).Port, nil
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}
// ctx, cancel := context.WithCancel(context.Background())
// cancel()
// _, err := manager.StartMiner(ctx, "xmrig", &Config{Algo: "rx/0"}) // returns context.Canceled before locking
func (manager *Manager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
// ctx, cancel := context.WithCancel(context.Background()); cancel(); manager.StartMiner(ctx, "xmrig", &Config{Algo: "rx/0"}) returns context.Canceled before locking.
// StartMiner starts a new miner and saves its configuration.
// The context can be used to cancel the operation.
func (m *Manager) StartMiner(ctx context.Context, minerType string, config *Config) (Miner, error) {
// Check for cancellation before acquiring lock
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
manager.mutex.Lock()
defer manager.mutex.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
if config == nil {
config = &Config{}
@ -284,27 +250,27 @@ func (manager *Manager) StartMiner(ctx context.Context, minerType string, config
instanceName := miner.GetName()
if config.Algo != "" {
// sanitizedAlgo := instanceNameRegex.ReplaceAllString("rx/0", "_") // "rx_0"
// Sanitize algo to prevent directory traversal or invalid filenames
sanitizedAlgo := instanceNameRegex.ReplaceAllString(config.Algo, "_")
instanceName = instanceName + "-" + sanitizedAlgo
instanceName = fmt.Sprintf("%s-%s", instanceName, sanitizedAlgo)
} else {
instanceName = instanceName + "-" + strconv.FormatInt(time.Now().UnixNano()%1000, 10)
instanceName = fmt.Sprintf("%s-%d", instanceName, time.Now().UnixNano()%1000)
}
if _, exists := manager.miners[instanceName]; exists {
return nil, ErrMinerExists(instanceName)
if _, exists := m.miners[instanceName]; exists {
return nil, fmt.Errorf("a miner with a similar configuration is already running: %s", instanceName)
}
// config.HTTPPort = 3333 keeps the miner API on a user-supplied port between 1024 and 65535.
// Validate user-provided HTTPPort if specified
if config.HTTPPort != 0 {
if config.HTTPPort < 1024 || config.HTTPPort > 65535 {
return nil, ErrInvalidConfig("HTTPPort must be between 1024 and 65535, got " + strconv.Itoa(config.HTTPPort))
return nil, fmt.Errorf("HTTPPort must be between 1024 and 65535, got %d", config.HTTPPort)
}
}
apiPort, err := findAvailablePort()
if err != nil {
return nil, ErrInternal("failed to find an available port for the miner API").WithCause(err)
return nil, fmt.Errorf("failed to find an available port for the miner API: %w", err)
}
if config.HTTPPort == 0 {
config.HTTPPort = apiPort
@ -323,31 +289,31 @@ func (manager *Manager) StartMiner(ctx context.Context, minerType string, config
}
}
// manager.emitEvent(EventMinerStarting, MinerEventData{Name: "xmrig-rx_0"}) fires before miner.Start(config).
manager.emitEvent(EventMinerStarting, MinerEventData{
// Emit starting event before actually starting
m.emitEvent(EventMinerStarting, MinerEventData{
Name: instanceName,
})
if err := miner.Start(config); err != nil {
// manager.emitEvent(EventMinerError, MinerEventData{Name: "xmrig-rx_0", Error: err.Error()}) reports the failure before returning it.
manager.emitEvent(EventMinerError, MinerEventData{
// Emit error event
m.emitEvent(EventMinerError, MinerEventData{
Name: instanceName,
Error: err.Error(),
})
return nil, err
}
manager.miners[instanceName] = miner
m.miners[instanceName] = miner
if err := manager.updateMinerConfig(minerType, true, config); err != nil {
if err := m.updateMinerConfig(minerType, true, config); err != nil {
logging.Warn("failed to save miner config for autostart", logging.Fields{"error": err})
}
logMessage := "CryptoCurrency Miner started: " + miner.GetName() + " (Binary: " + miner.GetBinaryPath() + ")"
logMessage := fmt.Sprintf("CryptoCurrency Miner started: %s (Binary: %s)", miner.GetName(), miner.GetBinaryPath())
logToSyslog(logMessage)
// manager.emitEvent(EventMinerStarted, MinerEventData{Name: "xmrig-rx_0"}) marks the miner as running for websocket clients.
manager.emitEvent(EventMinerStarted, MinerEventData{
// Emit started event
m.emitEvent(EventMinerStarted, MinerEventData{
Name: instanceName,
})
@ -355,37 +321,37 @@ func (manager *Manager) StartMiner(ctx context.Context, minerType string, config
return miner, nil
}
// manager.UninstallMiner(ctx, "xmrig") stops all xmrig instances and removes the matching config entry.
// manager.UninstallMiner(ctx, "ttminer") stops all ttminer instances and removes the matching config entry.
func (manager *Manager) UninstallMiner(ctx context.Context, minerType string) error {
// ctx, cancel := context.WithCancel(context.Background()); cancel(); manager.UninstallMiner(ctx, "xmrig") returns context.Canceled before locking.
// UninstallMiner stops, uninstalls, and removes a miner's configuration.
// The context can be used to cancel the operation.
func (m *Manager) UninstallMiner(ctx context.Context, minerType string) error {
// Check for cancellation before acquiring lock
select {
case <-ctx.Done():
return ctx.Err()
default:
}
manager.mutex.Lock()
// manager.UninstallMiner(ctx, "xmrig") collects every running xmrig instance before removing it from the map.
m.mu.Lock()
// Collect miners to stop and delete (can't modify map during iteration)
minersToDelete := make([]string, 0)
minersToStop := make([]Miner, 0)
for name, runningMiner := range manager.miners {
if xmrigInstance, ok := runningMiner.(*XMRigMiner); ok && equalFold(xmrigInstance.ExecutableName, minerType) {
for name, runningMiner := range m.miners {
if rm, ok := runningMiner.(*XMRigMiner); ok && strings.EqualFold(rm.ExecutableName, minerType) {
minersToStop = append(minersToStop, runningMiner)
minersToDelete = append(minersToDelete, name)
}
if ttInstance, ok := runningMiner.(*TTMiner); ok && equalFold(ttInstance.ExecutableName, minerType) {
if rm, ok := runningMiner.(*TTMiner); ok && strings.EqualFold(rm.ExecutableName, minerType) {
minersToStop = append(minersToStop, runningMiner)
minersToDelete = append(minersToDelete, name)
}
}
// delete(manager.miners, "xmrig-rx_0") happens before stopping miners so Stop can block without holding the lock.
// Delete from map first, then release lock before stopping (Stop may block)
for _, name := range minersToDelete {
delete(manager.miners, name)
delete(m.miners, name)
}
manager.mutex.Unlock()
m.mu.Unlock()
// miner.Stop() runs outside the lock so one slow uninstall does not block other manager calls.
// Stop miners outside the lock to avoid blocking
for i, miner := range minersToStop {
if err := miner.Stop(); err != nil {
logging.Warn("failed to stop running miner during uninstall", logging.Fields{"miner": minersToDelete[i], "error": err})
@ -398,36 +364,36 @@ func (manager *Manager) UninstallMiner(ctx context.Context, minerType string) er
}
if err := miner.Uninstall(); err != nil {
return ErrInternal("failed to uninstall miner files").WithCause(err)
return fmt.Errorf("failed to uninstall miner files: %w", err)
}
return UpdateMinersConfig(func(minersConfig *MinersConfig) error {
return UpdateMinersConfig(func(cfg *MinersConfig) error {
var updatedMiners []MinerAutostartConfig
for _, minerConfig := range minersConfig.Miners {
if !equalFold(minerConfig.MinerType, minerType) {
updatedMiners = append(updatedMiners, minerConfig)
for _, minerCfg := range cfg.Miners {
if !strings.EqualFold(minerCfg.MinerType, minerType) {
updatedMiners = append(updatedMiners, minerCfg)
}
}
minersConfig.Miners = updatedMiners
cfg.Miners = updatedMiners
return nil
})
}
// manager.updateMinerConfig("xmrig", true, config) // saves Autostart=true and the last-used config back to miners.json
func (manager *Manager) updateMinerConfig(minerType string, autostart bool, config *Config) error {
return UpdateMinersConfig(func(minersConfig *MinersConfig) error {
minerFound := false
for i, minerConfig := range minersConfig.Miners {
if equalFold(minerConfig.MinerType, minerType) {
minersConfig.Miners[i].Autostart = autostart
minersConfig.Miners[i].Config = config
minerFound = true
// updateMinerConfig saves the autostart and last-used config for a miner.
func (m *Manager) updateMinerConfig(minerType string, autostart bool, config *Config) error {
return UpdateMinersConfig(func(cfg *MinersConfig) error {
found := false
for i, minerCfg := range cfg.Miners {
if strings.EqualFold(minerCfg.MinerType, minerType) {
cfg.Miners[i].Autostart = autostart
cfg.Miners[i].Config = config
found = true
break
}
}
if !minerFound {
minersConfig.Miners = append(minersConfig.Miners, MinerAutostartConfig{
if !found {
cfg.Miners = append(cfg.Miners, MinerAutostartConfig{
MinerType: minerType,
Autostart: autostart,
Config: config,
@ -437,25 +403,26 @@ func (manager *Manager) updateMinerConfig(minerType string, autostart bool, conf
})
}
// manager.StopMiner(ctx, "xmrig/monero") stops the matching miner instance and removes it from the manager map.
// manager.StopMiner(ctx, "ttminer/rtx4090") still removes the entry when the miner has already stopped.
func (manager *Manager) StopMiner(ctx context.Context, minerName string) error {
// ctx, cancel := context.WithCancel(context.Background()); cancel(); manager.StopMiner(ctx, "xmrig-rx_0") returns context.Canceled before locking.
// StopMiner stops a running miner and removes it from the manager.
// If the miner is already stopped, it will still be removed from the manager.
// The context can be used to cancel the operation.
func (m *Manager) StopMiner(ctx context.Context, name string) error {
// Check for cancellation before acquiring lock
select {
case <-ctx.Done():
return ctx.Err()
default:
}
manager.mutex.Lock()
defer manager.mutex.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
miner, exists := manager.miners[minerName]
miner, exists := m.miners[name]
if !exists {
for minerKey := range manager.miners {
if hasPrefix(minerKey, minerName) {
miner = manager.miners[minerKey]
minerName = minerKey
for k := range m.miners {
if strings.HasPrefix(k, name) {
miner = m.miners[k]
name = k
exists = true
break
}
@ -463,31 +430,32 @@ func (manager *Manager) StopMiner(ctx context.Context, minerName string) error {
}
if !exists {
return ErrMinerNotFound(minerName)
return fmt.Errorf("miner not found: %s", name)
}
// manager.emitEvent(EventMinerStopping, MinerEventData{Name: "xmrig-rx_0"}) tells websocket clients shutdown has started.
manager.emitEvent(EventMinerStopping, MinerEventData{
Name: minerName,
// Emit stopping event
m.emitEvent(EventMinerStopping, MinerEventData{
Name: name,
})
// stopErr := miner.Stop() may fail after an external kill, but cleanup continues so the manager state stays accurate.
// Try to stop the miner, but always remove it from the map
// This handles the case where a miner crashed or was killed externally
stopErr := miner.Stop()
// delete(manager.miners, "xmrig-rx_0") removes stale entries even when the process has already exited.
delete(manager.miners, minerName)
// Always remove from map - if it's not running, we still want to clean it up
delete(m.miners, name)
// manager.emitEvent(EventMinerStopped, MinerEventData{Name: "xmrig-rx_0", Reason: "stopped"}) confirms the final stop reason.
// Emit stopped event
reason := "stopped"
if stopErr != nil && stopErr.Error() != "miner is not running" {
reason = stopErr.Error()
}
manager.emitEvent(EventMinerStopped, MinerEventData{
Name: minerName,
m.emitEvent(EventMinerStopped, MinerEventData{
Name: name,
Reason: reason,
})
// stopErr = errors.New("permission denied") still returns the stop failure after the manager removes the stale entry.
// Only return error if it wasn't just "miner is not running"
if stopErr != nil && stopErr.Error() != "miner is not running" {
return stopErr
}
@ -496,55 +464,53 @@ func (manager *Manager) StopMiner(ctx context.Context, minerName string) error {
return nil
}
// miner, err := manager.GetMiner("xmrig-randomx") // returns ErrMinerNotFound when the name is missing
// if err != nil { /* miner not found */ }
func (manager *Manager) GetMiner(name string) (Miner, error) {
manager.mutex.RLock()
defer manager.mutex.RUnlock()
miner, exists := manager.miners[name]
// GetMiner retrieves a running miner by its name.
func (m *Manager) GetMiner(name string) (Miner, error) {
m.mu.RLock()
defer m.mu.RUnlock()
miner, exists := m.miners[name]
if !exists {
return nil, ErrMinerNotFound(name)
return nil, fmt.Errorf("miner not found: %s", name)
}
return miner, nil
}
// miners := manager.ListMiners()
// for _, miner := range miners { logging.Info(miner.GetName()) }
func (manager *Manager) ListMiners() []Miner {
manager.mutex.RLock()
defer manager.mutex.RUnlock()
miners := make([]Miner, 0, len(manager.miners))
for _, miner := range manager.miners {
// ListMiners returns a slice of all running miners.
func (m *Manager) ListMiners() []Miner {
m.mu.RLock()
defer m.mu.RUnlock()
miners := make([]Miner, 0, len(m.miners))
for _, miner := range m.miners {
miners = append(miners, miner)
}
return miners
}
// simulatedMiner := NewSimulatedMiner(SimulatedMinerConfig{Name: "sim-rx0"})
// if err := manager.RegisterMiner(simulatedMiner); err != nil { return err }
func (manager *Manager) RegisterMiner(miner Miner) error {
minerName := miner.GetName()
// RegisterMiner registers an already-started miner with the manager.
// This is useful for simulated miners or externally managed miners.
func (m *Manager) RegisterMiner(miner Miner) error {
name := miner.GetName()
manager.mutex.Lock()
if _, exists := manager.miners[minerName]; exists {
manager.mutex.Unlock()
return ErrMinerExists(minerName)
m.mu.Lock()
if _, exists := m.miners[name]; exists {
m.mu.Unlock()
return fmt.Errorf("miner %s is already registered", name)
}
manager.miners[minerName] = miner
manager.mutex.Unlock()
m.miners[name] = miner
m.mu.Unlock()
logging.Info("registered miner", logging.Fields{"miner_name": minerName})
logging.Info("registered miner", logging.Fields{"name": name})
// Emit miner started event (outside lock) with the shared event payload shape.
manager.emitEvent(EventMinerStarted, MinerEventData{
Name: minerName,
// Emit miner started event (outside lock)
m.emitEvent(EventMinerStarted, map[string]interface{}{
"name": name,
})
return nil
}
// for _, availableMiner := range manager.ListAvailableMiners() { logging.Info(availableMiner.Name, nil) }
func (manager *Manager) ListAvailableMiners() []AvailableMiner {
// ListAvailableMiners returns a list of available miners that can be started.
func (m *Manager) ListAvailableMiners() []AvailableMiner {
return []AvailableMiner{
{
Name: "xmrig",
@ -557,11 +523,11 @@ func (manager *Manager) ListAvailableMiners() []AvailableMiner {
}
}
// manager.startStatsCollection() // NewManager() uses this to poll each running miner every HighResolutionInterval
func (manager *Manager) startStatsCollection() {
manager.waitGroup.Add(1)
// startStatsCollection starts a goroutine to periodically collect stats from active miners.
func (m *Manager) startStatsCollection() {
m.waitGroup.Add(1)
go func() {
defer manager.waitGroup.Done()
defer m.waitGroup.Done()
defer func() {
if r := recover(); r != nil {
logging.Error("panic in stats collection goroutine", logging.Fields{"panic": r})
@ -573,23 +539,24 @@ func (manager *Manager) startStatsCollection() {
for {
select {
case <-ticker.C:
manager.collectMinerStats()
case <-manager.stopChannel:
m.collectMinerStats()
case <-m.stopChan:
return
}
}
}()
}
// ctx, cancel := context.WithTimeout(ctx, statsCollectionTimeout)
// statsCollectionTimeout is the maximum time to wait for stats from a single miner.
const statsCollectionTimeout = 5 * time.Second
// manager.collectMinerStats() // the stats ticker calls this to poll all running miners in parallel
func (manager *Manager) collectMinerStats() {
// collectMinerStats iterates through active miners and collects their stats.
// Stats are collected in parallel to reduce overall collection time.
func (m *Manager) collectMinerStats() {
// Take a snapshot of miners under read lock - minimize lock duration
manager.mutex.RLock()
if len(manager.miners) == 0 {
manager.mutex.RUnlock()
m.mu.RLock()
if len(m.miners) == 0 {
m.mu.RUnlock()
return
}
@ -597,22 +564,22 @@ func (manager *Manager) collectMinerStats() {
miner Miner
minerType string
}
miners := make([]minerInfo, 0, len(manager.miners))
for _, miner := range manager.miners {
miners := make([]minerInfo, 0, len(m.miners))
for _, miner := range m.miners {
// Use the miner's GetType() method for proper type identification
miners = append(miners, minerInfo{miner: miner, minerType: miner.GetType()})
}
databaseEnabled := manager.databaseEnabled // Copy to avoid holding lock
manager.mutex.RUnlock()
dbEnabled := m.dbEnabled // Copy to avoid holding lock
m.mu.RUnlock()
now := time.Now()
// Collect stats from all miners in parallel
var collectionWaitGroup sync.WaitGroup
for _, entry := range miners {
collectionWaitGroup.Add(1)
var wg sync.WaitGroup
for _, mi := range miners {
wg.Add(1)
go func(miner Miner, minerType string) {
defer collectionWaitGroup.Done()
defer wg.Done()
defer func() {
if r := recover(); r != nil {
logging.Error("panic in single miner stats collection", logging.Fields{
@ -621,20 +588,21 @@ func (manager *Manager) collectMinerStats() {
})
}
}()
manager.collectSingleMinerStats(miner, minerType, now, databaseEnabled)
}(entry.miner, entry.minerType)
m.collectSingleMinerStats(miner, minerType, now, dbEnabled)
}(mi.miner, mi.minerType)
}
collectionWaitGroup.Wait()
wg.Wait()
}
// for attempt := 0; attempt <= statsRetryCount; attempt++ { ... }
// statsRetryCount is the number of retries for transient stats failures.
const statsRetryCount = 2
// time.Sleep(statsRetryDelay) // between retry attempts
// statsRetryDelay is the delay between stats collection retries.
const statsRetryDelay = 500 * time.Millisecond
// manager.collectSingleMinerStats(miner, "xmrig", time.Now(), true) // retries up to statsRetryCount times; persists to DB if databaseEnabled
func (manager *Manager) collectSingleMinerStats(miner Miner, minerType string, now time.Time, databaseEnabled bool) {
// collectSingleMinerStats collects stats from a single miner with retry logic.
// This is called concurrently for each miner.
func (m *Manager) collectSingleMinerStats(miner Miner, minerType string, now time.Time, dbEnabled bool) {
minerName := miner.GetName()
var stats *PerformanceMetrics
@ -686,21 +654,21 @@ func (manager *Manager) collectSingleMinerStats(miner Miner, minerType string, n
miner.ReduceHashrateHistory(now)
// Persist to database if enabled
if databaseEnabled {
databasePoint := database.HashratePoint{
if dbEnabled {
dbPoint := database.HashratePoint{
Timestamp: point.Timestamp,
Hashrate: point.Hashrate,
}
// database.InsertHashratePoint(ctx, "xmrig-rx_0", databasePoint, database.ResolutionHigh) // persists a single sample
databaseWriteContext, databaseCancel := context.WithTimeout(context.Background(), statsCollectionTimeout)
if err := database.InsertHashratePoint(databaseWriteContext, minerName, minerType, databasePoint, database.ResolutionHigh); err != nil {
// Create a new context for DB writes (original context is from retry loop)
dbCtx, dbCancel := context.WithTimeout(context.Background(), statsCollectionTimeout)
if err := database.InsertHashratePoint(dbCtx, minerName, minerType, dbPoint, database.ResolutionHigh); err != nil {
logging.Warn("failed to persist hashrate", logging.Fields{"miner": minerName, "error": err})
}
databaseCancel()
dbCancel()
}
// Emit stats event for real-time WebSocket updates
manager.emitEvent(EventMinerStats, MinerStatsData{
m.emitEvent(EventMinerStats, MinerStatsData{
Name: minerName,
Hashrate: stats.Hashrate,
Shares: stats.Shares,
@ -711,39 +679,39 @@ func (manager *Manager) collectSingleMinerStats(miner Miner, minerType string, n
})
}
// points, err := manager.GetMinerHashrateHistory("xmrig")
// for _, point := range points { logging.Info("hashrate", logging.Fields{"time": point.Timestamp, "rate": point.Hashrate}) }
func (manager *Manager) GetMinerHashrateHistory(minerName string) ([]HashratePoint, error) {
manager.mutex.RLock()
defer manager.mutex.RUnlock()
miner, exists := manager.miners[minerName]
// GetMinerHashrateHistory returns the hashrate history for a specific miner.
func (m *Manager) GetMinerHashrateHistory(name string) ([]HashratePoint, error) {
m.mu.RLock()
defer m.mu.RUnlock()
miner, exists := m.miners[name]
if !exists {
return nil, ErrMinerNotFound(minerName)
return nil, fmt.Errorf("miner not found: %s", name)
}
return miner.GetHashrateHistory(), nil
}
// ctx, cancel := context.WithTimeout(context.Background(), ShutdownTimeout)
// ShutdownTimeout is the maximum time to wait for goroutines during shutdown
const ShutdownTimeout = 10 * time.Second
// defer manager.Stop() // stops miners, waits for goroutines, and closes the database during shutdown
func (manager *Manager) Stop() {
manager.stopOnce.Do(func() {
// Stop stops all running miners, background goroutines, and closes resources.
// Safe to call multiple times - subsequent calls are no-ops.
func (m *Manager) Stop() {
m.stopOnce.Do(func() {
// Stop all running miners first
manager.mutex.Lock()
for name, miner := range manager.miners {
m.mu.Lock()
for name, miner := range m.miners {
if err := miner.Stop(); err != nil {
logging.Warn("failed to stop miner", logging.Fields{"miner": name, "error": err})
}
}
manager.mutex.Unlock()
m.mu.Unlock()
close(manager.stopChannel)
close(m.stopChan)
// Wait for goroutines with timeout
done := make(chan struct{})
go func() {
manager.waitGroup.Wait()
m.waitGroup.Wait()
close(done)
}()
@ -755,7 +723,7 @@ func (manager *Manager) Stop() {
}
// Close the database
if manager.databaseEnabled {
if m.dbEnabled {
if err := database.Close(); err != nil {
logging.Warn("failed to close database", logging.Fields{"error": err})
}
@ -763,47 +731,45 @@ func (manager *Manager) Stop() {
})
}
// stats, err := manager.GetMinerHistoricalStats("xmrig")
// if err == nil { logging.Info("stats", logging.Fields{"average": stats.AverageRate}) }
func (manager *Manager) GetMinerHistoricalStats(minerName string) (*database.HashrateStats, error) {
if !manager.databaseEnabled {
return nil, ErrDatabaseError("database persistence is disabled")
// GetMinerHistoricalStats returns historical stats from the database for a miner.
func (m *Manager) GetMinerHistoricalStats(minerName string) (*database.HashrateStats, error) {
if !m.dbEnabled {
return nil, fmt.Errorf("database persistence is disabled")
}
return database.GetHashrateStats(minerName)
}
// points, err := manager.GetMinerHistoricalHashrate("xmrig", time.Now().Add(-1*time.Hour), time.Now())
func (manager *Manager) GetMinerHistoricalHashrate(minerName string, since, until time.Time) ([]HashratePoint, error) {
if !manager.databaseEnabled {
return nil, ErrDatabaseError("database persistence is disabled")
// GetMinerHistoricalHashrate returns historical hashrate data from the database.
func (m *Manager) GetMinerHistoricalHashrate(minerName string, since, until time.Time) ([]HashratePoint, error) {
if !m.dbEnabled {
return nil, fmt.Errorf("database persistence is disabled")
}
databasePoints, err := database.GetHashrateHistory(minerName, database.ResolutionHigh, since, until)
dbPoints, err := database.GetHashrateHistory(minerName, database.ResolutionHigh, since, until)
if err != nil {
return nil, err
}
// Convert database points to mining points
points := make([]HashratePoint, len(databasePoints))
for i, databasePoint := range databasePoints {
points := make([]HashratePoint, len(dbPoints))
for i, p := range dbPoints {
points[i] = HashratePoint{
Timestamp: databasePoint.Timestamp,
Hashrate: databasePoint.Hashrate,
Timestamp: p.Timestamp,
Hashrate: p.Hashrate,
}
}
return points, nil
}
// allStats, err := manager.GetAllMinerHistoricalStats()
// for _, stats := range allStats { logging.Info("stats", logging.Fields{"miner": stats.MinerName, "average": stats.AverageRate}) }
func (manager *Manager) GetAllMinerHistoricalStats() ([]database.HashrateStats, error) {
if !manager.databaseEnabled {
return nil, ErrDatabaseError("database persistence is disabled")
// GetAllMinerHistoricalStats returns historical stats for all miners from the database.
func (m *Manager) GetAllMinerHistoricalStats() ([]database.HashrateStats, error) {
if !m.dbEnabled {
return nil, fmt.Errorf("database persistence is disabled")
}
return database.GetAllMinerStats()
}
// if manager.IsDatabaseEnabled() { /* persist stats */ }
func (manager *Manager) IsDatabaseEnabled() bool {
return manager.databaseEnabled
// IsDatabaseEnabled returns whether database persistence is enabled.
func (m *Manager) IsDatabaseEnabled() bool {
return m.dbEnabled
}

View file

@ -9,63 +9,69 @@ import (
// TestConcurrentStartMultipleMiners verifies that concurrent StartMiner calls
// with different algorithms create unique miners without race conditions
func TestManagerRace_ConcurrentStartMultipleMiners_Ugly(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
func TestConcurrentStartMultipleMiners(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
errors := make(chan error, 10)
// Try to start 10 miners concurrently with different algos
for i := 0; i < 10; i++ {
waitGroup.Add(1)
wg.Add(1)
go func(index int) {
defer waitGroup.Done()
defer wg.Done()
config := &Config{
HTTPPort: 10000 + index,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "algo" + string(rune('A'+index)),
Algo: "algo" + string(rune('A'+index)), // algoA, algoB, etc.
}
_, err := manager.StartMiner(context.Background(), "xmrig", config)
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
errors <- err
}
}(i)
}
waitGroup.Wait()
wg.Wait()
close(errors)
// Collect errors
var errCount int
for err := range errors {
t.Logf("Concurrent start error: %v", err)
errCount++
}
// Some failures are expected due to port conflicts, but shouldn't crash
t.Logf("Started miners with %d errors out of 10 attempts", errCount)
// Verify no data races occurred (test passes if no race detector warnings)
}
// TestConcurrentStartDuplicateMiner verifies that starting the same miner
// concurrently results in only one success
func TestManagerRace_ConcurrentStartDuplicateMiner_Ugly(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
func TestConcurrentStartDuplicateMiner(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
successes := make(chan struct{}, 10)
failures := make(chan error, 10)
// Try to start the same miner 10 times concurrently
for i := 0; i < 10; i++ {
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
config := &Config{
HTTPPort: 11000,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "duplicate_test",
Algo: "duplicate_test", // Same algo = same instance name
}
_, err := manager.StartMiner(context.Background(), "xmrig", config)
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
failures <- err
} else {
@ -74,7 +80,7 @@ func TestManagerRace_ConcurrentStartDuplicateMiner_Ugly(t *testing.T) {
}()
}
waitGroup.Wait()
wg.Wait()
close(successes)
close(failures)
@ -91,12 +97,13 @@ func TestManagerRace_ConcurrentStartDuplicateMiner_Ugly(t *testing.T) {
// TestConcurrentStartStop verifies that starting and stopping miners
// concurrently doesn't cause race conditions
func TestManagerRace_ConcurrentStartStop_Ugly(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
func TestConcurrentStartStop(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
// Start some miners
for i := 0; i < 5; i++ {
config := &Config{
HTTPPort: 12000 + i,
@ -104,65 +111,73 @@ func TestManagerRace_ConcurrentStartStop_Ugly(t *testing.T) {
Wallet: "testwallet",
Algo: "startstop" + string(rune('A'+i)),
}
_, err := manager.StartMiner(context.Background(), "xmrig", config)
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Logf("Setup error (may be expected): %v", err)
}
}
// Give miners time to start
time.Sleep(100 * time.Millisecond)
// Now concurrently start new ones and stop existing ones
for i := 0; i < 10; i++ {
waitGroup.Add(2)
wg.Add(2)
// Start a new miner
go func(index int) {
defer waitGroup.Done()
defer wg.Done()
config := &Config{
HTTPPort: 12100 + index,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "new" + string(rune('A'+index)),
}
manager.StartMiner(context.Background(), "xmrig", config)
m.StartMiner(context.Background(), "xmrig", config)
}(i)
// Stop a miner
go func(index int) {
defer waitGroup.Done()
defer wg.Done()
minerName := "xmrig-startstop" + string(rune('A'+index%5))
manager.StopMiner(context.Background(), minerName)
m.StopMiner(context.Background(), minerName)
}(i)
}
waitGroup.Wait()
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentListMiners verifies that listing miners while modifying
// the miner map doesn't cause race conditions
func TestManagerRace_ConcurrentListMiners_Ugly(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
func TestConcurrentListMiners(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
waitGroup.Add(1)
// Continuously list miners
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for {
select {
case <-ctx.Done():
return
default:
miners := manager.ListMiners()
_ = len(miners)
miners := m.ListMiners()
_ = len(miners) // Use the result
}
}
}()
waitGroup.Add(1)
// Continuously start miners
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for i := 0; i < 20; i++ {
select {
case <-ctx.Done():
@ -174,56 +189,61 @@ func TestManagerRace_ConcurrentListMiners_Ugly(t *testing.T) {
Wallet: "testwallet",
Algo: "list" + string(rune('A'+i%26)),
}
manager.StartMiner(context.Background(), "xmrig", config)
m.StartMiner(context.Background(), "xmrig", config)
time.Sleep(10 * time.Millisecond)
}
}
}()
waitGroup.Wait()
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentGetMiner verifies that getting a miner while others
// are being started/stopped doesn't cause race conditions
func TestManagerRace_ConcurrentGetMiner_Ugly(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
func TestConcurrentGetMiner(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Start a miner first
config := &Config{
HTTPPort: 14000,
Pool: "test:1234",
Wallet: "testwallet",
Algo: "gettest",
}
miner, err := manager.StartMiner(context.Background(), "xmrig", config)
miner, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Skipf("Could not start test miner: %v", err)
}
minerName := miner.GetName()
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
// Continuously get the miner
for i := 0; i < 5; i++ {
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for {
select {
case <-ctx.Done():
return
default:
manager.GetMiner(minerName)
m.GetMiner(minerName)
time.Sleep(time.Millisecond)
}
}
}()
}
waitGroup.Add(1)
// Start more miners in parallel
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for i := 0; i < 10; i++ {
select {
case <-ctx.Done():
@ -235,20 +255,23 @@ func TestManagerRace_ConcurrentGetMiner_Ugly(t *testing.T) {
Wallet: "testwallet",
Algo: "parallel" + string(rune('A'+i)),
}
manager.StartMiner(context.Background(), "xmrig", config)
m.StartMiner(context.Background(), "xmrig", config)
}
}
}()
waitGroup.Wait()
wg.Wait()
// Test passes if no race detector warnings
}
// TestConcurrentStatsCollection verifies that stats collection
// doesn't race with miner operations
func TestManagerRace_ConcurrentStatsCollection_Ugly(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
func TestConcurrentStatsCollection(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
// Start some miners
for i := 0; i < 3; i++ {
config := &Config{
HTTPPort: 15000 + i,
@ -256,16 +279,17 @@ func TestManagerRace_ConcurrentStatsCollection_Ugly(t *testing.T) {
Wallet: "testwallet",
Algo: "stats" + string(rune('A'+i)),
}
manager.StartMiner(context.Background(), "xmrig", config)
m.StartMiner(context.Background(), "xmrig", config)
}
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
waitGroup.Add(1)
// Simulate stats collection (normally done by background goroutine)
wg.Add(1)
go func() {
defer waitGroup.Done()
defer wg.Done()
for i := 0; i < 50; i++ {
miners := manager.ListMiners()
miners := m.ListMiners()
for _, miner := range miners {
miner.GetStats(context.Background())
}
@ -273,15 +297,18 @@ func TestManagerRace_ConcurrentStatsCollection_Ugly(t *testing.T) {
}
}()
waitGroup.Add(1)
// Concurrently stop miners
wg.Add(1)
go func() {
defer waitGroup.Done()
time.Sleep(100 * time.Millisecond)
defer wg.Done()
time.Sleep(100 * time.Millisecond) // Let stats collection start
for _, name := range []string{"xmrig-statsA", "xmrig-statsB", "xmrig-statsC"} {
manager.StopMiner(context.Background(), name)
m.StopMiner(context.Background(), name)
time.Sleep(50 * time.Millisecond)
}
}()
waitGroup.Wait()
wg.Wait()
// Test passes if no race detector warnings
}

View file

@ -6,10 +6,11 @@ import (
"path/filepath"
"runtime"
"testing"
"time"
)
// manager := setupTestManager(t)
// defer manager.Stop()
// setupTestManager creates a new Manager and a dummy executable for tests.
// It also temporarily modifies the PATH to include the dummy executable's directory.
func setupTestManager(t *testing.T) *Manager {
dummyDir := t.TempDir()
executableName := "miner"
@ -18,12 +19,12 @@ func setupTestManager(t *testing.T) *Manager {
}
dummyPath := filepath.Join(dummyDir, executableName)
// Create a script that prints version and exits
// Create a script that does nothing but exit, to simulate the miner executable
var script []byte
if runtime.GOOS == "windows" {
script = []byte("@echo off\necho XMRig 6.24.0\n")
script = []byte("@echo off\r\nexit 0")
} else {
script = []byte("#!/bin/sh\necho 'XMRig 6.24.0'\n")
script = []byte("#!/bin/sh\nexit 0")
}
if err := os.WriteFile(dummyPath, script, 0755); err != nil {
@ -40,93 +41,284 @@ func setupTestManager(t *testing.T) *Manager {
return NewManager()
}
// miner, err := manager.StartMiner(ctx, "xmrig", config)
// TestStartMiner tests the StartMiner function
func TestStartMiner_Good(t *testing.T) {
t.Skip("Skipping test that runs miner process as per request")
}
func TestStartMiner_Bad(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
m := setupTestManager(t)
defer m.Stop()
config := &Config{
HTTPPort: 9001,
HTTPPort: 9001, // Use a different port to avoid conflict
Pool: "test:1234",
Wallet: "testwallet",
}
_, err := manager.StartMiner(context.Background(), "unsupported", config)
// Case 1: Successfully start a supported miner
miner, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Expected to start miner, but got error: %v", err)
}
if miner == nil {
t.Fatal("Expected miner to be non-nil, but it was")
}
if _, exists := m.miners[miner.GetName()]; !exists {
t.Errorf("Miner %s was not added to the manager's list", miner.GetName())
}
}
func TestStartMiner_Bad(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
config := &Config{
HTTPPort: 9001, // Use a different port to avoid conflict
Pool: "test:1234",
Wallet: "testwallet",
}
// Case 2: Attempt to start an unsupported miner
_, err := m.StartMiner(context.Background(), "unsupported", config)
if err == nil {
t.Error("Expected an error when starting an unsupported miner, but got nil")
}
}
func TestStartMiner_Ugly(t *testing.T) {
t.Skip("Skipping test that runs miner process")
m := setupTestManager(t)
defer m.Stop()
// Use an algorithm to get consistent instance naming (xmrig-test_algo)
// Without algo, each start gets a random suffix and won't be detected as duplicate
config := &Config{
HTTPPort: 9001, // Use a different port to avoid conflict
Pool: "test:1234",
Wallet: "testwallet",
Algo: "test_algo", // Consistent algo = consistent instance name
}
// Case 1: Successfully start a supported miner
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Expected to start miner, but got error: %v", err)
}
// Case 3: Attempt to start a duplicate miner (same algo = same instance name)
_, err = m.StartMiner(context.Background(), "xmrig", config)
if err == nil {
t.Error("Expected an error when starting a duplicate miner, but got nil")
}
}
// err := manager.StopMiner(ctx, "xmrig") // stops and removes the named miner
// TestStopMiner tests the StopMiner function
func TestStopMiner_Good(t *testing.T) {
t.Skip("Skipping test that runs miner process")
m := setupTestManager(t)
defer m.Stop()
config := &Config{
HTTPPort: 9002,
Pool: "test:1234",
Wallet: "testwallet",
}
// Case 1: Stop a running miner
miner, _ := m.StartMiner(context.Background(), "xmrig", config)
err := m.StopMiner(context.Background(), miner.GetName())
if err != nil {
t.Fatalf("Expected to stop miner, but got error: %v", err)
}
if _, exists := m.miners[miner.GetName()]; exists {
t.Errorf("Miner %s was not removed from the manager's list", miner.GetName())
}
}
func TestStopMiner_Bad(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
m := setupTestManager(t)
defer m.Stop()
err := manager.StopMiner(context.Background(), "nonexistent")
// Case 2: Attempt to stop a non-existent miner
err := m.StopMiner(context.Background(), "nonexistent")
if err == nil {
t.Error("Expected an error when stopping a non-existent miner, but got nil")
}
}
// miner, err := manager.GetMiner("xmrig"); miner.GetName() == "xmrig"
// TestGetMiner tests the GetMiner function
func TestGetMiner_Good(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
m := setupTestManager(t)
defer m.Stop()
miner := NewXMRigMiner()
miner.Name = "xmrig-test"
manager.mutex.Lock()
manager.miners["xmrig-test"] = miner
manager.mutex.Unlock()
config := &Config{
HTTPPort: 9003,
Pool: "test:1234",
Wallet: "testwallet",
}
retrievedMiner, err := manager.GetMiner("xmrig-test")
// Case 1: Get an existing miner
startedMiner, _ := m.StartMiner(context.Background(), "xmrig", config)
retrievedMiner, err := m.GetMiner(startedMiner.GetName())
if err != nil {
t.Fatalf("Expected to get miner, but got error: %v", err)
}
if retrievedMiner.GetName() != "xmrig-test" {
t.Errorf("Expected to get miner 'xmrig-test', but got %s", retrievedMiner.GetName())
if retrievedMiner.GetName() != startedMiner.GetName() {
t.Errorf("Expected to get miner %s, but got %s", startedMiner.GetName(), retrievedMiner.GetName())
}
}
func TestGetMiner_Bad(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
m := setupTestManager(t)
defer m.Stop()
_, err := manager.GetMiner("nonexistent")
// Case 2: Attempt to get a non-existent miner
_, err := m.GetMiner("nonexistent")
if err == nil {
t.Error("Expected an error when getting a non-existent miner, but got nil")
}
}
// miners := manager.ListMiners(); len(miners) == 1 after injecting one miner
// TestListMiners tests the ListMiners function
func TestListMiners_Good(t *testing.T) {
manager := setupTestManager(t)
defer manager.Stop()
m := setupTestManager(t)
defer m.Stop()
initialMiners := manager.ListMiners()
// Get initial count (may include autostarted miners from config)
initialMiners := m.ListMiners()
initialCount := len(initialMiners)
miner := NewXMRigMiner()
miner.Name = "xmrig-test"
manager.mutex.Lock()
manager.miners["xmrig-test"] = miner
manager.mutex.Unlock()
finalMiners := manager.ListMiners()
expectedCount := initialCount + 1
if len(finalMiners) != expectedCount {
t.Errorf("Expected %d miners, but got %d", expectedCount, len(finalMiners))
// Case 2: List miners after starting one - should have one more
config := &Config{
HTTPPort: 9004,
Pool: "test:1234",
Wallet: "testwallet",
}
_, _ = m.StartMiner(context.Background(), "xmrig", config)
miners := m.ListMiners()
if len(miners) != initialCount+1 {
t.Errorf("Expected %d miners (initial %d + 1), but got %d", initialCount+1, initialCount, len(miners))
}
}
// TestManagerStop_Idempotent tests that Stop() can be called multiple times safely
func TestManagerStop_Idempotent(t *testing.T) {
m := setupTestManager(t)
// Start a miner
config := &Config{
HTTPPort: 9010,
Pool: "test:1234",
Wallet: "testwallet",
}
_, _ = m.StartMiner(context.Background(), "xmrig", config)
// Call Stop() multiple times - should not panic
defer func() {
if r := recover(); r != nil {
t.Errorf("Stop() panicked: %v", r)
}
}()
m.Stop()
m.Stop()
m.Stop()
// If we got here without panicking, the test passes
}
// TestStartMiner_CancelledContext tests that StartMiner respects context cancellation
func TestStartMiner_CancelledContext(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
config := &Config{
HTTPPort: 9011,
Pool: "test:1234",
Wallet: "testwallet",
}
_, err := m.StartMiner(ctx, "xmrig", config)
if err == nil {
t.Error("Expected error when starting miner with cancelled context")
}
if err != context.Canceled {
t.Errorf("Expected context.Canceled error, got: %v", err)
}
}
// TestStopMiner_CancelledContext tests that StopMiner respects context cancellation
func TestStopMiner_CancelledContext(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
err := m.StopMiner(ctx, "nonexistent")
if err == nil {
t.Error("Expected error when stopping miner with cancelled context")
}
if err != context.Canceled {
t.Errorf("Expected context.Canceled error, got: %v", err)
}
}
// TestManagerEventHub tests that SetEventHub works correctly
func TestManagerEventHub(t *testing.T) {
m := setupTestManager(t)
defer m.Stop()
eventHub := NewEventHub()
go eventHub.Run()
defer eventHub.Stop()
m.SetEventHub(eventHub)
// Get initial miner count (may have autostarted miners)
initialCount := len(m.ListMiners())
// Start a miner - should emit events
config := &Config{
HTTPPort: 9012,
Pool: "test:1234",
Wallet: "testwallet",
}
_, err := m.StartMiner(context.Background(), "xmrig", config)
if err != nil {
t.Fatalf("Failed to start miner: %v", err)
}
// Give time for events to be processed
time.Sleep(50 * time.Millisecond)
// Verify miner count increased by 1
miners := m.ListMiners()
if len(miners) != initialCount+1 {
t.Errorf("Expected %d miners, got %d", initialCount+1, len(miners))
}
}
// TestManagerShutdownTimeout tests the graceful shutdown timeout
func TestManagerShutdownTimeout(t *testing.T) {
m := setupTestManager(t)
// Start a miner
config := &Config{
HTTPPort: 9013,
Pool: "test:1234",
Wallet: "testwallet",
}
_, _ = m.StartMiner(context.Background(), "xmrig", config)
// Stop should complete within a reasonable time
done := make(chan struct{})
go func() {
m.Stop()
close(done)
}()
select {
case <-done:
// Success - stopped in time
case <-time.After(15 * time.Second):
t.Error("Manager.Stop() took too long - possible shutdown issue")
}
}

View file

@ -6,8 +6,8 @@ import (
"time"
)
// snapshot := mining.GetMetricsSnapshot()
// snapshot["miners_started"].(int64)
// Metrics provides simple instrumentation counters for the mining package.
// These can be exposed via Prometheus or other metrics systems in the future.
type Metrics struct {
// API metrics
RequestsTotal atomic.Int64
@ -34,15 +34,14 @@ type Metrics struct {
P2PConnectionsTotal atomic.Int64
}
// histogram := mining.NewLatencyHistogram(1000)
// histogram.Record(42 * time.Millisecond)
// LatencyHistogram tracks request latencies with basic percentile support.
type LatencyHistogram struct {
mutex sync.Mutex
mu sync.Mutex
samples []time.Duration
maxSize int
}
// histogram := mining.NewLatencyHistogram(1000) // retain up to 1000 latency samples
// NewLatencyHistogram creates a new latency histogram with a maximum sample size.
func NewLatencyHistogram(maxSize int) *LatencyHistogram {
return &LatencyHistogram{
samples: make([]time.Duration, 0, maxSize),
@ -50,50 +49,48 @@ func NewLatencyHistogram(maxSize int) *LatencyHistogram {
}
}
// h.Record(42 * time.Millisecond) // call after each request completes
func (histogram *LatencyHistogram) Record(duration time.Duration) {
histogram.mutex.Lock()
defer histogram.mutex.Unlock()
// Record adds a latency sample.
func (h *LatencyHistogram) Record(d time.Duration) {
h.mu.Lock()
defer h.mu.Unlock()
if len(histogram.samples) >= histogram.maxSize {
if len(h.samples) >= h.maxSize {
// Ring buffer behavior - overwrite oldest
copy(histogram.samples, histogram.samples[1:])
histogram.samples = histogram.samples[:len(histogram.samples)-1]
copy(h.samples, h.samples[1:])
h.samples = h.samples[:len(h.samples)-1]
}
histogram.samples = append(histogram.samples, duration)
h.samples = append(h.samples, d)
}
// avg := h.Average() // returns 0 if no samples recorded
func (histogram *LatencyHistogram) Average() time.Duration {
histogram.mutex.Lock()
defer histogram.mutex.Unlock()
// Average returns the average latency.
func (h *LatencyHistogram) Average() time.Duration {
h.mu.Lock()
defer h.mu.Unlock()
if len(histogram.samples) == 0 {
if len(h.samples) == 0 {
return 0
}
var total time.Duration
for _, sample := range histogram.samples {
total += sample
for _, d := range h.samples {
total += d
}
return total / time.Duration(len(histogram.samples))
return total / time.Duration(len(h.samples))
}
// if h.Count() == 0 { return } // guard before calling Average()
func (histogram *LatencyHistogram) Count() int {
histogram.mutex.Lock()
defer histogram.mutex.Unlock()
return len(histogram.samples)
// Count returns the number of samples.
func (h *LatencyHistogram) Count() int {
h.mu.Lock()
defer h.mu.Unlock()
return len(h.samples)
}
// mining.DefaultMetrics.MinersStarted.Load()
// mining.DefaultMetrics.RequestLatency.Average()
// DefaultMetrics is the global metrics instance.
var DefaultMetrics = &Metrics{
RequestLatency: NewLatencyHistogram(1000),
}
// RecordRequest(true, 42*time.Millisecond) // errored request
// RecordRequest(false, 5*time.Millisecond) // successful request
// RecordRequest records an API request.
func RecordRequest(errored bool, latency time.Duration) {
DefaultMetrics.RequestsTotal.Add(1)
if errored {
@ -102,24 +99,22 @@ func RecordRequest(errored bool, latency time.Duration) {
DefaultMetrics.RequestLatency.Record(latency)
}
// RecordMinerStart() // call after miner.Start() succeeds
// RecordMinerStart records a miner start event.
func RecordMinerStart() {
DefaultMetrics.MinersStarted.Add(1)
}
// RecordMinerStop() // call after miner.Stop() completes
// RecordMinerStop records a miner stop event.
func RecordMinerStop() {
DefaultMetrics.MinersStopped.Add(1)
}
// RecordMinerError() // call when a miner crashes or fails to respond
// RecordMinerError records a miner error event.
func RecordMinerError() {
DefaultMetrics.MinersErrored.Add(1)
}
// RecordStatsCollection(false, false) // successful first-attempt collection
// RecordStatsCollection(true, false) // succeeded after retry
// RecordStatsCollection(true, true) // failed after retry
// RecordStatsCollection records a stats collection event.
func RecordStatsCollection(retried bool, failed bool) {
DefaultMetrics.StatsCollected.Add(1)
if retried {
@ -130,8 +125,7 @@ func RecordStatsCollection(retried bool, failed bool) {
}
}
// RecordWSConnection(true) // client connected
// RecordWSConnection(false) // client disconnected
// RecordWSConnection increments or decrements WebSocket connection count.
func RecordWSConnection(connected bool) {
if connected {
DefaultMetrics.WSConnections.Add(1)
@ -140,13 +134,12 @@ func RecordWSConnection(connected bool) {
}
}
// RecordWSMessage() // call after broadcasting an event to clients
// RecordWSMessage records a WebSocket message.
func RecordWSMessage() {
DefaultMetrics.WSMessages.Add(1)
}
// RecordP2PMessage(true) // outbound message dispatched
// RecordP2PMessage(false) // inbound message received
// RecordP2PMessage records a P2P message.
func RecordP2PMessage(sent bool) {
if sent {
DefaultMetrics.P2PMessagesSent.Add(1)
@ -155,8 +148,7 @@ func RecordP2PMessage(sent bool) {
}
}
// snapshot := mining.GetMetricsSnapshot()
// snapshot["requests_total"].(int64)
// GetMetricsSnapshot returns a snapshot of current metrics.
func GetMetricsSnapshot() map[string]interface{} {
return map[string]interface{}{
"requests_total": DefaultMetrics.RequestsTotal.Load(),

View file

@ -1,77 +0,0 @@
package mining
import (
"testing"
"time"
)
func TestMetrics_LatencyHistogram_Good(t *testing.T) {
histogram := NewLatencyHistogram(10)
histogram.Record(42 * time.Millisecond)
histogram.Record(58 * time.Millisecond)
if histogram.Count() != 2 {
t.Fatalf("expected count 2, got %d", histogram.Count())
}
avg := histogram.Average()
if avg != 50*time.Millisecond {
t.Fatalf("expected average 50ms, got %v", avg)
}
}
func TestMetrics_LatencyHistogram_Bad(t *testing.T) {
histogram := NewLatencyHistogram(10)
// No samples recorded — Average should be 0
if histogram.Average() != 0 {
t.Fatalf("expected 0 average for empty histogram, got %v", histogram.Average())
}
if histogram.Count() != 0 {
t.Fatalf("expected 0 count, got %d", histogram.Count())
}
}
func TestMetrics_LatencyHistogram_Ugly(t *testing.T) {
// Ring buffer overflow: maxSize=2, insert 3 samples
histogram := NewLatencyHistogram(2)
histogram.Record(10 * time.Millisecond)
histogram.Record(20 * time.Millisecond)
histogram.Record(30 * time.Millisecond)
if histogram.Count() != 2 {
t.Fatalf("expected count 2 after overflow, got %d", histogram.Count())
}
// Oldest sample (10ms) should have been evicted
avg := histogram.Average()
if avg != 25*time.Millisecond {
t.Fatalf("expected average 25ms (20+30)/2, got %v", avg)
}
}
func TestMetrics_RecordRequest_Good(t *testing.T) {
before := DefaultMetrics.RequestsTotal.Load()
RecordRequest(false, 5*time.Millisecond)
after := DefaultMetrics.RequestsTotal.Load()
if after != before+1 {
t.Fatalf("expected total to increase by 1, got %d -> %d", before, after)
}
}
func TestMetrics_RecordRequest_Bad(t *testing.T) {
beforeErr := DefaultMetrics.RequestsErrored.Load()
RecordRequest(true, 5*time.Millisecond)
afterErr := DefaultMetrics.RequestsErrored.Load()
if afterErr != beforeErr+1 {
t.Fatalf("expected errored to increase by 1, got %d -> %d", beforeErr, afterErr)
}
}
func TestMetrics_RecordRequest_Ugly(t *testing.T) {
snapshot := GetMetricsSnapshot()
if snapshot == nil {
t.Fatal("expected non-nil snapshot")
}
if _, ok := snapshot["requests_total"]; !ok {
t.Fatal("expected requests_total in snapshot")
}
}

View file

@ -5,6 +5,8 @@ import (
"archive/zip"
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"net/http"
"os"
@ -18,20 +20,18 @@ import (
"syscall"
"time"
"forge.lthn.ai/Snider/Mining/pkg/logging"
"github.com/Snider/Mining/pkg/logging"
"github.com/adrg/xdg"
)
// buffer := NewLogBuffer(500)
// command.Stdout = buffer // satisfies io.Writer; ring-buffers miner output
// LogBuffer is a thread-safe ring buffer for capturing miner output.
type LogBuffer struct {
lines []string
maxLines int
mutex sync.RWMutex
mu sync.RWMutex
}
// buffer := NewLogBuffer(500)
// command.Stdout = buffer
// NewLogBuffer creates a new log buffer with the specified max lines.
func NewLogBuffer(maxLines int) *LogBuffer {
return &LogBuffer{
lines: make([]string, 0, maxLines),
@ -39,13 +39,13 @@ func NewLogBuffer(maxLines int) *LogBuffer {
}
}
// if len(line) > maxLineLength { line = line[:maxLineLength] + "... [truncated]" }
// maxLineLength is the maximum length of a single log line to prevent memory bloat.
const maxLineLength = 2000
// command.Stdout = lb // satisfies io.Writer; timestamps and ring-buffers each line
func (logBuffer *LogBuffer) Write(p []byte) (n int, err error) {
logBuffer.mutex.Lock()
defer logBuffer.mutex.Unlock()
// Write implements io.Writer for capturing output.
func (lb *LogBuffer) Write(p []byte) (n int, err error) {
lb.mu.Lock()
defer lb.mu.Unlock()
// Split input into lines
text := string(p)
@ -60,41 +60,39 @@ func (logBuffer *LogBuffer) Write(p []byte) (n int, err error) {
line = line[:maxLineLength] + "... [truncated]"
}
// Add timestamp prefix
timestampedLine := "[" + time.Now().Format("15:04:05") + "] " + line
logBuffer.lines = append(logBuffer.lines, timestampedLine)
timestampedLine := fmt.Sprintf("[%s] %s", time.Now().Format("15:04:05"), line)
lb.lines = append(lb.lines, timestampedLine)
// Trim if over max - force reallocation to release memory
if len(logBuffer.lines) > logBuffer.maxLines {
newSlice := make([]string, logBuffer.maxLines)
copy(newSlice, logBuffer.lines[len(logBuffer.lines)-logBuffer.maxLines:])
logBuffer.lines = newSlice
if len(lb.lines) > lb.maxLines {
newSlice := make([]string, lb.maxLines)
copy(newSlice, lb.lines[len(lb.lines)-lb.maxLines:])
lb.lines = newSlice
}
}
return len(p), nil
}
// lines := logBuffer.GetLines()
// response.Logs = lines[max(0, len(lines)-100):]
func (logBuffer *LogBuffer) GetLines() []string {
logBuffer.mutex.RLock()
defer logBuffer.mutex.RUnlock()
result := make([]string, len(logBuffer.lines))
copy(result, logBuffer.lines)
// GetLines returns all captured log lines.
func (lb *LogBuffer) GetLines() []string {
lb.mu.RLock()
defer lb.mu.RUnlock()
result := make([]string, len(lb.lines))
copy(result, lb.lines)
return result
}
// logBuffer.Clear() // called on miner Stop() to release memory
func (logBuffer *LogBuffer) Clear() {
logBuffer.mutex.Lock()
defer logBuffer.mutex.Unlock()
logBuffer.lines = logBuffer.lines[:0]
// Clear clears the log buffer.
func (lb *LogBuffer) Clear() {
lb.mu.Lock()
defer lb.mu.Unlock()
lb.lines = lb.lines[:0]
}
// type XMRigMiner struct { BaseMiner }
// func NewXMRigMiner() *XMRigMiner { return &XMRigMiner{BaseMiner: BaseMiner{MinerType: "xmrig"}} }
// BaseMiner provides a foundation for specific miner implementations.
type BaseMiner struct {
Name string `json:"name"`
MinerType string `json:"miner_type"` // Type identifier such as `xmrig` or `tt-miner`.
MinerType string `json:"miner_type"` // Type identifier (e.g., "xmrig", "tt-miner")
Version string `json:"version"`
URL string `json:"url"`
Path string `json:"path"`
@ -103,8 +101,8 @@ type BaseMiner struct {
Running bool `json:"running"`
ConfigPath string `json:"configPath"`
API *API `json:"api"`
mutex sync.RWMutex
command *exec.Cmd
mu sync.RWMutex
cmd *exec.Cmd
stdinPipe io.WriteCloser `json:"-"`
HashrateHistory []HashratePoint `json:"hashrateHistory"`
LowResHashrateHistory []HashratePoint `json:"lowResHashrateHistory"`
@ -112,21 +110,22 @@ type BaseMiner struct {
LogBuffer *LogBuffer `json:"-"`
}
// minerType := miner.GetType() // returns values such as `xmrig` or `tt-miner`.
// GetType returns the miner type identifier.
func (b *BaseMiner) GetType() string {
return b.MinerType
}
// name := miner.GetName() // returns values such as `xmrig-randomx` or `tt-miner-kawpow`.
// GetName returns the name of the miner.
func (b *BaseMiner) GetName() string {
b.mutex.RLock()
defer b.mutex.RUnlock()
b.mu.RLock()
defer b.mu.RUnlock()
return b.Name
}
// path := miner.GetPath() // returns paths such as `/home/alice/.local/share/lethean-desktop/miners/xmrig`.
// GetPath returns the base installation directory for the miner type.
// It uses the stable ExecutableName field to ensure the correct path.
func (b *BaseMiner) GetPath() string {
dataPath, err := xdg.DataFile("lethean-desktop/miners/" + b.ExecutableName)
dataPath, err := xdg.DataFile(fmt.Sprintf("lethean-desktop/miners/%s", b.ExecutableName))
if err != nil {
home, err := os.UserHomeDir()
if err != nil {
@ -137,20 +136,21 @@ func (b *BaseMiner) GetPath() string {
return dataPath
}
// binary := miner.GetBinaryPath() // returns paths such as `/home/alice/.local/share/lethean-desktop/miners/xmrig/xmrig`.
// GetBinaryPath returns the full path to the miner's executable file.
func (b *BaseMiner) GetBinaryPath() string {
b.mutex.RLock()
defer b.mutex.RUnlock()
b.mu.RLock()
defer b.mu.RUnlock()
return b.MinerBinary
}
// if err := miner.Stop(); err != nil { log.Warn("stop failed", ...) }
// Stop terminates the miner process gracefully.
// It first tries SIGTERM to allow cleanup, then SIGKILL if needed.
func (b *BaseMiner) Stop() error {
b.mutex.Lock()
b.mu.Lock()
if !b.Running || b.command == nil {
b.mutex.Unlock()
return ErrMinerNotRunning(b.Name)
if !b.Running || b.cmd == nil {
b.mu.Unlock()
return errors.New("miner is not running")
}
// Close stdin pipe if open
@ -159,14 +159,14 @@ func (b *BaseMiner) Stop() error {
b.stdinPipe = nil
}
// Capture command locally to avoid race with the Wait() goroutine.
command := b.command
process := command.Process
// Capture cmd locally to avoid race with Wait() goroutine
cmd := b.cmd
process := cmd.Process
// Mark as not running immediately to prevent concurrent Stop() calls
b.Running = false
b.command = nil
b.mutex.Unlock()
b.cmd = nil
b.mu.Unlock()
// Try graceful shutdown with SIGTERM first (Unix only)
if runtime.GOOS != "windows" {
@ -197,18 +197,18 @@ func (b *BaseMiner) Stop() error {
return nil
}
// case <-time.After(stdinWriteTimeout): return ErrTimeout("stdin write")
// stdinWriteTimeout is the maximum time to wait for stdin write to complete.
const stdinWriteTimeout = 5 * time.Second
// if err := miner.WriteStdin("h"); err != nil { /* miner not running */ }
// WriteStdin sends input to the miner's stdin (for console commands).
func (b *BaseMiner) WriteStdin(input string) error {
b.mutex.RLock()
b.mu.RLock()
stdinPipe := b.stdinPipe
running := b.Running
b.mutex.RUnlock()
b.mu.RUnlock()
if !running || stdinPipe == nil {
return ErrMinerNotRunning(b.Name)
return errors.New("miner is not running or stdin not available")
}
// Append newline if not present
@ -233,16 +233,16 @@ func (b *BaseMiner) WriteStdin(input string) error {
case err := <-done:
return err
case <-time.After(stdinWriteTimeout):
return ErrTimeout("stdin write: miner may be unresponsive")
return errors.New("stdin write timeout: miner may be unresponsive")
}
}
// if err := miner.Uninstall(); err != nil { return err }
// Uninstall removes all files related to the miner.
func (b *BaseMiner) Uninstall() error {
return os.RemoveAll(b.GetPath())
}
// if err := b.InstallFromURL("https://github.com/xmrig/xmrig/releases/download/v6.22.1/xmrig-6.22.1-linux-static-x64.tar.gz"); err != nil { return err }
// InstallFromURL handles the generic download and extraction process for a miner.
func (b *BaseMiner) InstallFromURL(url string) error {
tmpfile, err := os.CreateTemp("", b.ExecutableName+"-")
if err != nil {
@ -251,20 +251,20 @@ func (b *BaseMiner) InstallFromURL(url string) error {
defer os.Remove(tmpfile.Name())
defer tmpfile.Close()
response, err := getHTTPClient().Get(url)
resp, err := getHTTPClient().Get(url)
if err != nil {
return err
}
defer response.Body.Close()
defer resp.Body.Close()
if response.StatusCode != http.StatusOK {
_, _ = io.Copy(io.Discard, response.Body) // Drain body to allow connection reuse (error ignored intentionally)
return ErrInstallFailed(b.ExecutableName).WithDetails("unexpected status code " + strconv.Itoa(response.StatusCode))
if resp.StatusCode != http.StatusOK {
_, _ = io.Copy(io.Discard, resp.Body) // Drain body to allow connection reuse (error ignored intentionally)
return fmt.Errorf("failed to download release: unexpected status code %d", resp.StatusCode)
}
if _, err := io.Copy(tmpfile, response.Body); err != nil {
if _, err := io.Copy(tmpfile, resp.Body); err != nil {
// Drain remaining body to allow connection reuse (error ignored intentionally)
_, _ = io.Copy(io.Discard, response.Body)
_, _ = io.Copy(io.Discard, resp.Body)
return err
}
@ -279,20 +279,18 @@ func (b *BaseMiner) InstallFromURL(url string) error {
err = b.untar(tmpfile.Name(), baseInstallPath)
}
if err != nil {
return ErrInstallFailed(b.ExecutableName).WithCause(err).WithDetails("failed to extract archive")
return fmt.Errorf("failed to extract miner: %w", err)
}
return nil
}
// current := parseVersion("6.24.0") // [6, 24, 0]
// previous := parseVersion("5.0.1") // [5, 0, 1]
// if compareVersions(current, previous) > 0 { /* current is newer */ }
func parseVersion(versionString string) []int {
parts := strings.Split(versionString, ".")
// parseVersion parses a version string (e.g., "6.24.0") into a slice of integers for comparison.
func parseVersion(v string) []int {
parts := strings.Split(v, ".")
intParts := make([]int, len(parts))
for i, part := range parts {
val, err := strconv.Atoi(part)
for i, p := range parts {
val, err := strconv.Atoi(p)
if err != nil {
return []int{0} // Malformed version, treat as very old
}
@ -301,14 +299,14 @@ func parseVersion(versionString string) []int {
return intParts
}
// if compareVersions(parseVersion("6.24.0"), parseVersion("5.0.1")) > 0 { /* installed is newer, skip update */ }
// compareVersions compares two version slices. Returns 1 if v1 > v2, -1 if v1 < v2, 0 if equal.
func compareVersions(v1, v2 []int) int {
minimumLength := len(v1)
if len(v2) < minimumLength {
minimumLength = len(v2)
minLen := len(v1)
if len(v2) < minLen {
minLen = len(v2)
}
for i := 0; i < minimumLength; i++ {
for i := 0; i < minLen; i++ {
if v1[i] > v2[i] {
return 1
}
@ -326,8 +324,8 @@ func compareVersions(v1, v2 []int) int {
return 0
}
// path, err := b.findMinerBinary()
// // path == "/home/user/.local/share/lethean-desktop/miners/xmrig/xmrig-6.24.0/xmrig"
// findMinerBinary searches for the miner's executable file.
// It returns the absolute path to the executable if found, prioritizing the highest versioned installation.
func (b *BaseMiner) findMinerBinary() (string, error) {
executableName := b.ExecutableName
if runtime.GOOS == "windows" {
@ -344,17 +342,17 @@ func (b *BaseMiner) findMinerBinary() (string, error) {
if _, err := os.Stat(baseInstallPath); err == nil {
dirs, err := os.ReadDir(baseInstallPath)
if err == nil {
for _, entry := range dirs {
if entry.IsDir() && strings.HasPrefix(entry.Name(), b.ExecutableName+"-") {
// Extract the version suffix from a directory name such as `xmrig-6.24.0`.
versionStr := strings.TrimPrefix(entry.Name(), b.ExecutableName+"-")
for _, d := range dirs {
if d.IsDir() && strings.HasPrefix(d.Name(), b.ExecutableName+"-") {
// Extract version string, e.g., "xmrig-6.24.0" -> "6.24.0"
versionStr := strings.TrimPrefix(d.Name(), b.ExecutableName+"-")
currentVersion := parseVersion(versionStr)
if highestVersionDir == "" || compareVersions(currentVersion, highestVersion) > 0 {
highestVersion = currentVersion
highestVersionDir = entry.Name()
highestVersionDir = d.Name()
}
versionedPath := filepath.Join(baseInstallPath, entry.Name())
versionedPath := filepath.Join(baseInstallPath, d.Name())
fullPath := filepath.Join(versionedPath, executableName)
searchedPaths = append(searchedPaths, fullPath)
}
@ -375,18 +373,17 @@ func (b *BaseMiner) findMinerBinary() (string, error) {
if err == nil {
absPath, err := filepath.Abs(path)
if err != nil {
return "", ErrInternal("failed to get absolute path for '" + path + "'").WithCause(err)
return "", fmt.Errorf("failed to get absolute path for '%s': %w", path, err)
}
logging.Debug("found miner binary in system PATH", logging.Fields{"path": absPath})
return absPath, nil
}
// If not found, return a detailed error
return "", ErrMinerNotFound(executableName).WithDetails("searched in: " + strings.Join(searchedPaths, ", ") + " and system PATH")
return "", fmt.Errorf("miner executable '%s' not found. Searched in: %s and system PATH", executableName, strings.Join(searchedPaths, ", "))
}
// details, err := miner.CheckInstallation()
// if !details.IsInstalled { logging.Warn("xmrig not found", logging.Fields{"error": err}) }
// CheckInstallation verifies if the miner is installed correctly.
func (b *BaseMiner) CheckInstallation() (*InstallationDetails, error) {
binaryPath, err := b.findMinerBinary()
if err != nil {
@ -396,13 +393,13 @@ func (b *BaseMiner) CheckInstallation() (*InstallationDetails, error) {
b.MinerBinary = binaryPath
b.Path = filepath.Dir(binaryPath)
command := exec.Command(binaryPath, "--version")
var output bytes.Buffer
command.Stdout = &output
if err := command.Run(); err != nil {
cmd := exec.Command(binaryPath, "--version")
var out bytes.Buffer
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
b.Version = "Unknown (could not run executable)"
} else {
fields := strings.Fields(output.String())
fields := strings.Fields(out.String())
if len(fields) >= 2 {
b.Version = fields[1]
} else {
@ -418,43 +415,42 @@ func (b *BaseMiner) CheckInstallation() (*InstallationDetails, error) {
}, nil
}
// points := miner.GetHashrateHistory() // low-res (24h) + high-res (5min) points in chronological order
// GetHashrateHistory returns the combined hashrate history.
func (b *BaseMiner) GetHashrateHistory() []HashratePoint {
b.mutex.RLock()
defer b.mutex.RUnlock()
b.mu.RLock()
defer b.mu.RUnlock()
combinedHistory := make([]HashratePoint, 0, len(b.LowResHashrateHistory)+len(b.HashrateHistory))
combinedHistory = append(combinedHistory, b.LowResHashrateHistory...)
combinedHistory = append(combinedHistory, b.HashrateHistory...)
return combinedHistory
}
// miner.AddHashratePoint(HashratePoint{Timestamp: time.Now(), Hashrate: 1234.5})
// AddHashratePoint adds a new hashrate measurement.
func (b *BaseMiner) AddHashratePoint(point HashratePoint) {
b.mutex.Lock()
defer b.mutex.Unlock()
b.mu.Lock()
defer b.mu.Unlock()
b.HashrateHistory = append(b.HashrateHistory, point)
}
// count := miner.GetHighResHistoryLength() // 0..30 points (last 5 min at 10s resolution)
// GetHighResHistoryLength returns the number of high-resolution hashrate points.
func (b *BaseMiner) GetHighResHistoryLength() int {
b.mutex.RLock()
defer b.mutex.RUnlock()
b.mu.RLock()
defer b.mu.RUnlock()
return len(b.HashrateHistory)
}
// count := miner.GetLowResHistoryLength() // 0..1440 points (last 24h at 1min resolution)
// GetLowResHistoryLength returns the number of low-resolution hashrate points.
func (b *BaseMiner) GetLowResHistoryLength() int {
b.mutex.RLock()
defer b.mutex.RUnlock()
b.mu.RLock()
defer b.mu.RUnlock()
return len(b.LowResHashrateHistory)
}
// lines := miner.GetLogs()
// response.Logs = lines[max(0, len(lines)-100):]
// GetLogs returns the captured log output from the miner process.
func (b *BaseMiner) GetLogs() []string {
b.mutex.RLock()
b.mu.RLock()
logBuffer := b.LogBuffer
b.mutex.RUnlock()
b.mu.RUnlock()
if logBuffer == nil {
return []string{}
@ -462,10 +458,10 @@ func (b *BaseMiner) GetLogs() []string {
return logBuffer.GetLines()
}
// miner.ReduceHashrateHistory(time.Now()) // aggregates high-res points older than 5 min into 1-min low-res buckets; trims low-res to 24h
// ReduceHashrateHistory aggregates and trims hashrate data.
func (b *BaseMiner) ReduceHashrateHistory(now time.Time) {
b.mutex.Lock()
defer b.mutex.Unlock()
b.mu.Lock()
defer b.mu.Unlock()
if !b.LastLowResAggregation.IsZero() && now.Sub(b.LastLowResAggregation) < LowResolutionInterval {
return
@ -475,11 +471,11 @@ func (b *BaseMiner) ReduceHashrateHistory(now time.Time) {
var newHighResHistory []HashratePoint
cutoff := now.Add(-HighResolutionDuration)
for _, point := range b.HashrateHistory {
if point.Timestamp.Before(cutoff) {
pointsToAggregate = append(pointsToAggregate, point)
for _, p := range b.HashrateHistory {
if p.Timestamp.Before(cutoff) {
pointsToAggregate = append(pointsToAggregate, p)
} else {
newHighResHistory = append(newHighResHistory, point)
newHighResHistory = append(newHighResHistory, p)
}
}
// Force reallocation if significantly oversized to free memory
@ -497,17 +493,17 @@ func (b *BaseMiner) ReduceHashrateHistory(now time.Time) {
}
minuteGroups := make(map[time.Time][]int)
for _, point := range pointsToAggregate {
minute := point.Timestamp.Truncate(LowResolutionInterval)
minuteGroups[minute] = append(minuteGroups[minute], point.Hashrate)
for _, p := range pointsToAggregate {
minute := p.Timestamp.Truncate(LowResolutionInterval)
minuteGroups[minute] = append(minuteGroups[minute], p.Hashrate)
}
var newLowResPoints []HashratePoint
for minute, hashrates := range minuteGroups {
if len(hashrates) > 0 {
totalHashrate := 0
for _, rate := range hashrates {
totalHashrate += rate
for _, hr := range hashrates {
totalHashrate += hr
}
avgHashrate := totalHashrate / len(hashrates)
newLowResPoints = append(newLowResPoints, HashratePoint{Timestamp: minute, Hashrate: avgHashrate})
@ -522,8 +518,8 @@ func (b *BaseMiner) ReduceHashrateHistory(now time.Time) {
lowResCutoff := now.Add(-LowResHistoryRetention)
firstValidLowResIndex := 0
for i, point := range b.LowResHashrateHistory {
if point.Timestamp.After(lowResCutoff) || point.Timestamp.Equal(lowResCutoff) {
for i, p := range b.LowResHashrateHistory {
if p.Timestamp.After(lowResCutoff) || p.Timestamp.Equal(lowResCutoff) {
firstValidLowResIndex = i
break
}
@ -544,41 +540,41 @@ func (b *BaseMiner) ReduceHashrateHistory(now time.Time) {
b.LastLowResAggregation = now
}
// b.unzip("/tmp/xmrig-linux-x64.zip", "/home/alice/.local/share/lethean-desktop/miners/xmrig")
func (b *BaseMiner) unzip(sourceArchivePath, destinationDirectoryPath string) error {
zipReader, err := zip.OpenReader(sourceArchivePath)
// unzip extracts a zip archive.
func (b *BaseMiner) unzip(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer zipReader.Close()
defer r.Close()
for _, zipEntry := range zipReader.File {
entryPath := filepath.Join(destinationDirectoryPath, zipEntry.Name)
if !strings.HasPrefix(entryPath, filepath.Clean(destinationDirectoryPath)+string(os.PathSeparator)) {
return ErrInternal("illegal file path in archive").WithDetails(entryPath)
for _, f := range r.File {
fpath := filepath.Join(dest, f.Name)
if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) {
return fmt.Errorf("%s: illegal file path", fpath)
}
if zipEntry.FileInfo().IsDir() {
if err := os.MkdirAll(entryPath, os.ModePerm); err != nil {
return ErrInternal("failed to create directory").WithCause(err).WithDetails(entryPath)
if f.FileInfo().IsDir() {
if err := os.MkdirAll(fpath, os.ModePerm); err != nil {
return fmt.Errorf("failed to create directory %s: %w", fpath, err)
}
continue
}
if err = os.MkdirAll(filepath.Dir(entryPath), os.ModePerm); err != nil {
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
return err
}
outFile, err := os.OpenFile(entryPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, zipEntry.Mode())
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
entryReader, err := zipEntry.Open()
rc, err := f.Open()
if err != nil {
outFile.Close()
return err
}
_, err = io.Copy(outFile, entryReader)
_, err = io.Copy(outFile, rc)
outFile.Close()
entryReader.Close()
rc.Close()
if err != nil {
return err
}
@ -586,24 +582,24 @@ func (b *BaseMiner) unzip(sourceArchivePath, destinationDirectoryPath string) er
return nil
}
// b.untar("/tmp/xmrig-linux-x64.tar.gz", "/home/alice/.local/share/lethean-desktop/miners/xmrig")
func (b *BaseMiner) untar(sourceArchivePath, destinationDirectoryPath string) error {
file, err := os.Open(sourceArchivePath)
// untar extracts a tar.gz archive.
func (b *BaseMiner) untar(src, dest string) error {
file, err := os.Open(src)
if err != nil {
return err
}
defer file.Close()
gzipReader, err := gzip.NewReader(file)
gzr, err := gzip.NewReader(file)
if err != nil {
return err
}
defer gzipReader.Close()
defer gzr.Close()
tarReader := tar.NewReader(gzipReader)
tr := tar.NewReader(gzr)
for {
header, err := tarReader.Next()
header, err := tr.Next()
if err == io.EOF {
return nil
}
@ -611,9 +607,9 @@ func (b *BaseMiner) untar(sourceArchivePath, destinationDirectoryPath string) er
return err
}
target := filepath.Join(destinationDirectoryPath, header.Name)
if !strings.HasPrefix(target, filepath.Clean(destinationDirectoryPath)+string(os.PathSeparator)) {
return ErrInternal("illegal file path in archive").WithDetails(header.Name)
target := filepath.Join(dest, header.Name)
if !strings.HasPrefix(target, filepath.Clean(dest)+string(os.PathSeparator)) {
return fmt.Errorf("%s: illegal file path in archive", header.Name)
}
switch header.Typeflag {
@ -625,15 +621,15 @@ func (b *BaseMiner) untar(sourceArchivePath, destinationDirectoryPath string) er
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
return err
}
outputFile, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode))
f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode))
if err != nil {
return err
}
if _, err := io.Copy(outputFile, tarReader); err != nil {
outputFile.Close()
if _, err := io.Copy(f, tr); err != nil {
f.Close()
return err
}
outputFile.Close()
f.Close()
}
}
}

View file

@ -1,57 +1,45 @@
package mining
import (
"fmt"
"strings"
"sync"
"unicode"
)
// toLowerASCII("XMRig") == "xmrig"
// toLowerASCII("TT-Miner") == "tt-miner"
func toLowerASCII(input string) string {
runes := []rune(input)
for i, character := range runes {
runes[i] = unicode.ToLower(character)
}
return string(runes)
}
// f.Register("custom", func() Miner { return NewCustomMiner() })
// MinerConstructor is a function that creates a new miner instance
type MinerConstructor func() Miner
// factory := NewMinerFactory()
// factory.Register("xmrig", func() Miner { return NewXMRigMiner() })
// MinerFactory handles miner instantiation and registration
type MinerFactory struct {
mutex sync.RWMutex
mu sync.RWMutex
constructors map[string]MinerConstructor
aliases map[string]string // maps aliases to canonical names
}
// globalFactory := NewMinerFactory()
// miner, err := globalFactory.Create("xmrig")
// globalFactory is the default factory instance
var globalFactory = NewMinerFactory()
// factory := NewMinerFactory()
// factory.Register("xmrig", func() Miner { return NewXMRigMiner() })
// NewMinerFactory creates a new MinerFactory with default miners registered
func NewMinerFactory() *MinerFactory {
factory := &MinerFactory{
f := &MinerFactory{
constructors: make(map[string]MinerConstructor),
aliases: make(map[string]string),
}
factory.registerDefaults()
return factory
f.registerDefaults()
return f
}
// factory.registerDefaults() // called in NewMinerFactory(); adds xmrig, tt-miner, simulated to constructors
func (factory *MinerFactory) registerDefaults() {
// registerDefaults registers all built-in miners
func (f *MinerFactory) registerDefaults() {
// XMRig miner (CPU/GPU RandomX, Cryptonight, etc.)
factory.Register("xmrig", func() Miner { return NewXMRigMiner() })
f.Register("xmrig", func() Miner { return NewXMRigMiner() })
// TT-Miner (GPU Kawpow, etc.)
factory.Register("tt-miner", func() Miner { return NewTTMiner() })
factory.RegisterAlias("ttminer", "tt-miner")
f.Register("tt-miner", func() Miner { return NewTTMiner() })
f.RegisterAlias("ttminer", "tt-miner")
// Simulated miner for testing and development
factory.Register(MinerTypeSimulated, func() Miner {
f.Register(MinerTypeSimulated, func() Miner {
return NewSimulatedMiner(SimulatedMinerConfig{
Name: "simulated-miner",
Algorithm: "rx/0",
@ -61,63 +49,63 @@ func (factory *MinerFactory) registerDefaults() {
})
}
// factory.Register("xmrig", func() Miner { return NewXMRigMiner() })
func (factory *MinerFactory) Register(name string, constructor MinerConstructor) {
factory.mutex.Lock()
defer factory.mutex.Unlock()
factory.constructors[toLowerASCII(name)] = constructor
// Register adds a miner constructor to the factory
func (f *MinerFactory) Register(name string, constructor MinerConstructor) {
f.mu.Lock()
defer f.mu.Unlock()
f.constructors[strings.ToLower(name)] = constructor
}
// factory.RegisterAlias("ttminer", "tt-miner")
func (factory *MinerFactory) RegisterAlias(alias, canonicalName string) {
factory.mutex.Lock()
defer factory.mutex.Unlock()
factory.aliases[toLowerASCII(alias)] = toLowerASCII(canonicalName)
// RegisterAlias adds an alias for an existing miner type
func (f *MinerFactory) RegisterAlias(alias, canonicalName string) {
f.mu.Lock()
defer f.mu.Unlock()
f.aliases[strings.ToLower(alias)] = strings.ToLower(canonicalName)
}
// miner, err := factory.Create("xmrig")
func (factory *MinerFactory) Create(minerType string) (Miner, error) {
factory.mutex.RLock()
defer factory.mutex.RUnlock()
// Create instantiates a miner of the specified type
func (f *MinerFactory) Create(minerType string) (Miner, error) {
f.mu.RLock()
defer f.mu.RUnlock()
name := toLowerASCII(minerType)
name := strings.ToLower(minerType)
// Check for alias first
if canonical, ok := factory.aliases[name]; ok {
if canonical, ok := f.aliases[name]; ok {
name = canonical
}
constructor, ok := factory.constructors[name]
constructor, ok := f.constructors[name]
if !ok {
return nil, ErrUnsupportedMiner(minerType)
return nil, fmt.Errorf("unsupported miner type: %s", minerType)
}
return constructor(), nil
}
// if factory.IsSupported("tt-miner") { ... }
func (factory *MinerFactory) IsSupported(minerType string) bool {
factory.mutex.RLock()
defer factory.mutex.RUnlock()
// IsSupported checks if a miner type is registered
func (f *MinerFactory) IsSupported(minerType string) bool {
f.mu.RLock()
defer f.mu.RUnlock()
name := toLowerASCII(minerType)
name := strings.ToLower(minerType)
// Check alias
if canonical, ok := factory.aliases[name]; ok {
if canonical, ok := f.aliases[name]; ok {
name = canonical
}
_, ok := factory.constructors[name]
_, ok := f.constructors[name]
return ok
}
// types := factory.ListTypes() // ["xmrig", "tt-miner", "simulated"]
func (factory *MinerFactory) ListTypes() []string {
factory.mutex.RLock()
defer factory.mutex.RUnlock()
// ListTypes returns all registered miner type names (excluding aliases)
func (f *MinerFactory) ListTypes() []string {
f.mu.RLock()
defer f.mu.RUnlock()
types := make([]string, 0, len(factory.constructors))
for name := range factory.constructors {
types := make([]string, 0, len(f.constructors))
for name := range f.constructors {
types = append(types, name)
}
return types
@ -125,27 +113,27 @@ func (factory *MinerFactory) ListTypes() []string {
// --- Global factory functions for convenience ---
// miner, err := CreateMiner("xmrig")
// CreateMiner creates a miner using the global factory
func CreateMiner(minerType string) (Miner, error) {
return globalFactory.Create(minerType)
}
// if IsMinerSupported("tt-miner") { ... }
// IsMinerSupported checks if a miner type is supported using the global factory
func IsMinerSupported(minerType string) bool {
return globalFactory.IsSupported(minerType)
}
// types := ListMinerTypes() // ["xmrig", "tt-miner", "simulated"]
// ListMinerTypes returns all registered miner types from the global factory
func ListMinerTypes() []string {
return globalFactory.ListTypes()
}
// RegisterMinerType("custom", func() Miner { return NewCustomMiner() })
// RegisterMinerType adds a miner constructor to the global factory
func RegisterMinerType(name string, constructor MinerConstructor) {
globalFactory.Register(name, constructor)
}
// RegisterMinerAlias("ttminer", "tt-miner")
// RegisterMinerAlias adds an alias to the global factory
func RegisterMinerAlias(alias, canonicalName string) {
globalFactory.RegisterAlias(alias, canonicalName)
}

View file

@ -4,7 +4,7 @@ import (
"testing"
)
func TestMinerFactory_Create_Good(t *testing.T) {
func TestMinerFactory_Create(t *testing.T) {
factory := NewMinerFactory()
tests := []struct {
@ -21,26 +21,26 @@ func TestMinerFactory_Create_Good(t *testing.T) {
{"empty type", "", true},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
miner, err := factory.Create(testCase.minerType)
if testCase.wantErr {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
miner, err := factory.Create(tt.minerType)
if tt.wantErr {
if err == nil {
t.Errorf("Create(%q) expected error, got nil", testCase.minerType)
t.Errorf("Create(%q) expected error, got nil", tt.minerType)
}
} else {
if err != nil {
t.Errorf("Create(%q) unexpected error: %v", testCase.minerType, err)
t.Errorf("Create(%q) unexpected error: %v", tt.minerType, err)
}
if miner == nil {
t.Errorf("Create(%q) returned nil miner", testCase.minerType)
t.Errorf("Create(%q) returned nil miner", tt.minerType)
}
}
})
}
}
func TestMinerFactory_IsSupported_Good(t *testing.T) {
func TestMinerFactory_IsSupported(t *testing.T) {
factory := NewMinerFactory()
tests := []struct {
@ -54,16 +54,16 @@ func TestMinerFactory_IsSupported_Good(t *testing.T) {
{"", false},
}
for _, testCase := range tests {
t.Run(testCase.minerType, func(t *testing.T) {
if got := factory.IsSupported(testCase.minerType); got != testCase.want {
t.Errorf("IsSupported(%q) = %v, want %v", testCase.minerType, got, testCase.want)
for _, tt := range tests {
t.Run(tt.minerType, func(t *testing.T) {
if got := factory.IsSupported(tt.minerType); got != tt.want {
t.Errorf("IsSupported(%q) = %v, want %v", tt.minerType, got, tt.want)
}
})
}
}
func TestMinerFactory_ListTypes_Good(t *testing.T) {
func TestMinerFactory_ListTypes(t *testing.T) {
factory := NewMinerFactory()
types := factory.ListTypes()
@ -73,8 +73,8 @@ func TestMinerFactory_ListTypes_Good(t *testing.T) {
// Check that expected types are present
typeMap := make(map[string]bool)
for _, minerType := range types {
typeMap[minerType] = true
for _, typ := range types {
typeMap[typ] = true
}
expectedTypes := []string{"xmrig", "tt-miner"}
@ -85,7 +85,7 @@ func TestMinerFactory_ListTypes_Good(t *testing.T) {
}
}
func TestMinerFactory_Register_Good(t *testing.T) {
func TestMinerFactory_Register(t *testing.T) {
factory := NewMinerFactory()
// Register a custom miner type
@ -108,7 +108,7 @@ func TestMinerFactory_Register_Good(t *testing.T) {
}
}
func TestMinerFactory_RegisterAlias_Good(t *testing.T) {
func TestMinerFactory_RegisterAlias(t *testing.T) {
factory := NewMinerFactory()
// Register an alias for xmrig
@ -127,7 +127,7 @@ func TestMinerFactory_RegisterAlias_Good(t *testing.T) {
}
}
func TestGlobalFactory_CreateMiner_Good(t *testing.T) {
func TestGlobalFactory_CreateMiner(t *testing.T) {
// Test global convenience functions
miner, err := CreateMiner("xmrig")
if err != nil {
@ -138,7 +138,7 @@ func TestGlobalFactory_CreateMiner_Good(t *testing.T) {
}
}
func TestGlobalFactory_IsMinerSupported_Good(t *testing.T) {
func TestGlobalFactory_IsMinerSupported(t *testing.T) {
if !IsMinerSupported("xmrig") {
t.Error("xmrig should be supported")
}
@ -147,7 +147,7 @@ func TestGlobalFactory_IsMinerSupported_Good(t *testing.T) {
}
}
func TestGlobalFactory_ListMinerTypes_Good(t *testing.T) {
func TestGlobalFactory_ListMinerTypes(t *testing.T) {
types := ListMinerTypes()
if len(types) < 2 {
t.Errorf("ListMinerTypes() returned %d types, expected at least 2", len(types))

View file

@ -2,6 +2,8 @@ package mining
import (
"context"
"fmt"
"strings"
"time"
)
@ -12,11 +14,23 @@ const (
LowResHistoryRetention = 24 * time.Hour
)
// miner, err := factory.Create("xmrig")
// if err := miner.Install(); err != nil { return err }
// if err := miner.Start(&Config{Algo: "rx/0", Pool: "stratum+tcp://pool.example.com:3333"}); err != nil { return err }
// stats, err := miner.GetStats(ctx)
// defer miner.Stop()
// Miner defines the standard interface for a cryptocurrency miner.
// The interface is logically grouped into focused capabilities:
//
// Lifecycle - Installation and process management:
// - Install, Uninstall, Start, Stop
//
// Stats - Performance metrics collection:
// - GetStats
//
// Info - Miner identification and installation details:
// - GetType, GetName, GetPath, GetBinaryPath, CheckInstallation, GetLatestVersion
//
// History - Hashrate history management:
// - GetHashrateHistory, AddHashratePoint, ReduceHashrateHistory
//
// IO - Interactive input/output:
// - GetLogs, WriteStdin
type Miner interface {
// Lifecycle operations
Install() error
@ -28,7 +42,7 @@ type Miner interface {
GetStats(ctx context.Context) (*PerformanceMetrics, error)
// Info operations
GetType() string // switch miner.GetType() { case "xmrig": ...; case "tt-miner": ... }
GetType() string // Returns miner type identifier (e.g., "xmrig", "tt-miner")
GetName() string
GetPath() string
GetBinaryPath() string
@ -45,17 +59,16 @@ type Miner interface {
WriteStdin(input string) error
}
// details, err := miner.CheckInstallation()
// if details.IsInstalled { logging.Info("installed", logging.Fields{"version": details.Version}) }
// InstallationDetails contains information about an installed miner.
type InstallationDetails struct {
IsInstalled bool `json:"is_installed"`
Version string `json:"version"`
Path string `json:"path"`
MinerBinary string `json:"miner_binary"`
ConfigPath string `json:"config_path,omitempty"` // Example: "/home/alice/.config/xmrig/xmrig.json" stores the miner-specific config path.
ConfigPath string `json:"config_path,omitempty"` // Add path to the miner-specific config
}
// info := SystemInfo{OS: "linux", Architecture: "amd64", AvailableCPUCores: 8}
// SystemInfo provides general system and miner installation information.
type SystemInfo struct {
Timestamp time.Time `json:"timestamp"`
OS string `json:"os"`
@ -66,7 +79,7 @@ type SystemInfo struct {
InstalledMinersInfo []*InstallationDetails `json:"installed_miners_info"`
}
// Config{Miner: "xmrig", Pool: "stratum+tcp://pool.example.com:3333", Wallet: "44Affq5kSiGBoZ...", Threads: 4}
// Config represents the configuration for a miner.
type Config struct {
Miner string `json:"miner"`
Pool string `json:"pool"`
@ -82,7 +95,7 @@ type Config struct {
Keepalive bool `json:"keepalive,omitempty"`
Nicehash bool `json:"nicehash,omitempty"`
RigID string `json:"rigId,omitempty"`
TLSFingerprint string `json:"tlsFingerprint,omitempty"`
TLSSingerprint string `json:"tlsFingerprint,omitempty"`
Retries int `json:"retries,omitempty"`
RetryPause int `json:"retryPause,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
@ -131,109 +144,107 @@ type Config struct {
Seed string `json:"seed,omitempty"`
Hash string `json:"hash,omitempty"`
NoDMI bool `json:"noDMI,omitempty"`
// Example: Config{GPUEnabled: true, GPUPool: "stratum+ssl://gpu.pool.example.com:4444", GPUWallet: "44Affq5kSiGBoZ..."} enables the GPU mining fields.
GPUEnabled bool `json:"gpuEnabled,omitempty"` // Example: true enables GPU mining.
GPUPool string `json:"gpuPool,omitempty"` // Example: "stratum+ssl://gpu.pool.example.com:4444" sends GPU shares to a separate pool.
GPUWallet string `json:"gpuWallet,omitempty"` // Example: "44Affq5kSiGBoZ..." overrides the CPU wallet for GPU shares.
GPUAlgo string `json:"gpuAlgo,omitempty"` // Example: "kawpow" or "ethash" selects the GPU algorithm.
GPUPassword string `json:"gpuPassword,omitempty"` // Example: "worker-1" sets the GPU pool password.
GPUIntensity int `json:"gpuIntensity,omitempty"` // Example: 75 sets GPU mining intensity on a 0-100 scale.
GPUThreads int `json:"gpuThreads,omitempty"` // Example: 2 assigns two GPU threads per card.
Devices string `json:"devices,omitempty"` // Example: "0,1,2" targets the first three GPU devices.
OpenCL bool `json:"opencl,omitempty"` // Example: true enables OpenCL on AMD and Intel GPUs.
CUDA bool `json:"cuda,omitempty"` // Example: true enables CUDA on NVIDIA GPUs.
Intensity int `json:"intensity,omitempty"` // Example: 80 sets the overall GPU mining intensity.
CLIArgs string `json:"cliArgs,omitempty"` // Example: "--api 127.0.0.1:4048" appends extra CLI arguments.
// GPU-specific options (for XMRig dual CPU+GPU mining)
GPUEnabled bool `json:"gpuEnabled,omitempty"` // Enable GPU mining
GPUPool string `json:"gpuPool,omitempty"` // Separate pool for GPU (can differ from CPU)
GPUWallet string `json:"gpuWallet,omitempty"` // Wallet for GPU pool (defaults to main Wallet)
GPUAlgo string `json:"gpuAlgo,omitempty"` // Algorithm for GPU (e.g., "kawpow", "ethash")
GPUPassword string `json:"gpuPassword,omitempty"` // Password for GPU pool
GPUIntensity int `json:"gpuIntensity,omitempty"` // GPU mining intensity (0-100)
GPUThreads int `json:"gpuThreads,omitempty"` // GPU threads per card
Devices string `json:"devices,omitempty"` // GPU device selection (e.g., "0,1,2")
OpenCL bool `json:"opencl,omitempty"` // Enable OpenCL (AMD/Intel GPUs)
CUDA bool `json:"cuda,omitempty"` // Enable CUDA (NVIDIA GPUs)
Intensity int `json:"intensity,omitempty"` // Mining intensity for GPU miners
CLIArgs string `json:"cliArgs,omitempty"` // Additional CLI arguments
}
// Example: if err := config.Validate(); err != nil { return err }
func (config *Config) Validate() error {
// Validate checks the Config for common errors and security issues.
// Returns nil if valid, otherwise returns a descriptive error.
func (c *Config) Validate() error {
// Pool URL validation
if config.Pool != "" {
if c.Pool != "" {
// Block shell metacharacters in pool URL
if containsShellChars(config.Pool) {
return ErrInvalidConfig("pool URL contains invalid characters")
if containsShellChars(c.Pool) {
return fmt.Errorf("pool URL contains invalid characters")
}
}
// Wallet validation (basic alphanumeric + special chars allowed in addresses)
if config.Wallet != "" {
if containsShellChars(config.Wallet) {
return ErrInvalidConfig("wallet address contains invalid characters")
if c.Wallet != "" {
if containsShellChars(c.Wallet) {
return fmt.Errorf("wallet address contains invalid characters")
}
// Most wallet addresses are 40-128 chars
if len(config.Wallet) > 256 {
return ErrInvalidConfig("wallet address too long (max 256 chars)")
if len(c.Wallet) > 256 {
return fmt.Errorf("wallet address too long (max 256 chars)")
}
}
// Thread count validation
if config.Threads < 0 {
return ErrInvalidConfig("threads cannot be negative")
if c.Threads < 0 {
return fmt.Errorf("threads cannot be negative")
}
if config.Threads > 1024 {
return ErrInvalidConfig("threads value too high (max 1024)")
if c.Threads > 1024 {
return fmt.Errorf("threads value too high (max 1024)")
}
// Algorithm validation (alphanumeric, dash, slash)
if config.Algo != "" {
if !isValidAlgo(config.Algo) {
return ErrInvalidConfig("algorithm name contains invalid characters")
if c.Algo != "" {
if !isValidAlgo(c.Algo) {
return fmt.Errorf("algorithm name contains invalid characters")
}
}
// Intensity validation
if config.Intensity < 0 || config.Intensity > 100 {
return ErrInvalidConfig("intensity must be between 0 and 100")
if c.Intensity < 0 || c.Intensity > 100 {
return fmt.Errorf("intensity must be between 0 and 100")
}
if config.GPUIntensity < 0 || config.GPUIntensity > 100 {
return ErrInvalidConfig("GPU intensity must be between 0 and 100")
if c.GPUIntensity < 0 || c.GPUIntensity > 100 {
return fmt.Errorf("GPU intensity must be between 0 and 100")
}
// Donate level validation
if config.DonateLevel < 0 || config.DonateLevel > 100 {
return ErrInvalidConfig("donate level must be between 0 and 100")
if c.DonateLevel < 0 || c.DonateLevel > 100 {
return fmt.Errorf("donate level must be between 0 and 100")
}
// CLIArgs validation - check for shell metacharacters
if config.CLIArgs != "" {
if containsShellChars(config.CLIArgs) {
return ErrInvalidConfig("CLI arguments contain invalid characters")
if c.CLIArgs != "" {
if containsShellChars(c.CLIArgs) {
return fmt.Errorf("CLI arguments contain invalid characters")
}
// Limit length to prevent abuse
if len(config.CLIArgs) > 1024 {
return ErrInvalidConfig("CLI arguments too long (max 1024 chars)")
if len(c.CLIArgs) > 1024 {
return fmt.Errorf("CLI arguments too long (max 1024 chars)")
}
}
return nil
}
// containsShellChars(";echo bad") == true
// containsShellChars("stratum+tcp://pool.example.com:3333") == false
func containsShellChars(input string) bool {
for _, character := range input {
switch character {
case ';', '|', '&', '`', '$', '(', ')', '{', '}', '<', '>', '\n', '\r', '\\', '\'', '"', '!':
// containsShellChars checks for shell metacharacters that could enable injection
func containsShellChars(s string) bool {
dangerous := []string{";", "|", "&", "`", "$", "(", ")", "{", "}", "<", ">", "\n", "\r", "\\", "'", "\"", "!"}
for _, d := range dangerous {
if strings.Contains(s, d) {
return true
}
}
return false
}
// isValidAlgo("rx/0") == true
// isValidAlgo("rx/0;rm -rf /") == false
// isValidAlgo checks if an algorithm name contains only valid characters
func isValidAlgo(algo string) bool {
for _, character := range algo {
if !((character >= 'a' && character <= 'z') || (character >= 'A' && character <= 'Z') || (character >= '0' && character <= '9') || character == '-' || character == '/' || character == '_') {
for _, r := range algo {
if !((r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '-' || r == '/' || r == '_') {
return false
}
}
return true
}
// metrics, err := miner.GetStats(ctx)
// if metrics.Hashrate > 0 { logging.Info("mining", logging.Fields{"hashrate": metrics.Hashrate}) }
// PerformanceMetrics represents the performance metrics for a miner.
type PerformanceMetrics struct {
Hashrate int `json:"hashrate"`
Shares int `json:"shares"`
@ -246,20 +257,20 @@ type PerformanceMetrics struct {
ExtraData map[string]interface{} `json:"extraData,omitempty"`
}
// point := HashratePoint{Timestamp: time.Now(), Hashrate: 1500}
// HashratePoint represents a single hashrate measurement at a specific time.
type HashratePoint struct {
Timestamp time.Time `json:"timestamp"`
Hashrate int `json:"hashrate"`
}
// api := API{Enabled: true, ListenHost: "127.0.0.1", ListenPort: 18080}
// API represents the miner's API configuration.
type API struct {
Enabled bool `json:"enabled"`
ListenHost string `json:"listenHost"`
ListenPort int `json:"listenPort"`
}
// var summary XMRigSummary; UnmarshalJSON(body, &summary); _ = summary.Hashrate.Highest
// XMRigSummary represents the full JSON response from the XMRig API.
type XMRigSummary struct {
ID string `json:"id"`
WorkerID string `json:"worker_id"`
@ -337,7 +348,7 @@ type XMRigSummary struct {
Hugepages []int `json:"hugepages"`
}
// miner := AvailableMiner{Name: "xmrig", Description: "CPU/GPU miner for RandomX"}
// AvailableMiner represents a miner that is available for use.
type AvailableMiner struct {
Name string `json:"name"`
Description string `json:"description"`

View file

@ -1,19 +1,24 @@
package mining
// var rawConfig RawConfig
// _ = json.Unmarshal([]byte(`{"pool":"pool.lthn.io:3333"}`), &rawConfig)
import (
"errors"
)
// RawConfig is a raw encoded JSON value.
// It implements Marshaler and Unmarshaler and can be used to delay JSON decoding or precompute a JSON encoding.
// We define it as []byte (like json.RawMessage) to avoid swagger parsing issues with the json package.
type RawConfig []byte
// profile := MiningProfile{ID: "abc", Name: "LTHN RandomX", MinerType: "xmrig", Config: rawConfig}
// manager.SaveProfile(profile)
// MiningProfile represents a saved configuration for running a specific miner.
// It decouples the UI from the underlying miner's specific config structure.
type MiningProfile struct {
ID string `json:"id"`
Name string `json:"name"`
MinerType string `json:"minerType"` // Miner type such as `xmrig` or `ttminer`.
Config RawConfig `json:"config" swaggertype:"object"` // Raw JSON config for the selected miner profile.
MinerType string `json:"minerType"` // e.g., "xmrig", "ttminer"
Config RawConfig `json:"config" swaggertype:"object"` // The raw JSON config for the specific miner
}
// data, err := profile.Config.MarshalJSON() // returns the config blob unchanged.
// MarshalJSON returns m as the JSON encoding of m.
func (m RawConfig) MarshalJSON() ([]byte, error) {
if m == nil {
return []byte("null"), nil
@ -21,10 +26,10 @@ func (m RawConfig) MarshalJSON() ([]byte, error) {
return m, nil
}
// if err := json.Unmarshal(rawConfig, &profile.Config); err != nil { ... }
// UnmarshalJSON sets *m to a copy of data.
func (m *RawConfig) UnmarshalJSON(data []byte) error {
if m == nil {
return ErrInternal("RawConfig: UnmarshalJSON on nil pointer")
return errors.New("RawConfig: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil

View file

@ -1,40 +0,0 @@
package mining
import (
"testing"
)
func TestMiningProfile_RawConfig_MarshalJSON_Good(t *testing.T) {
raw := RawConfig([]byte(`{"pool":"pool.lthn.io:3333"}`))
data, err := raw.MarshalJSON()
if err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if string(data) != `{"pool":"pool.lthn.io:3333"}` {
t.Fatalf("unexpected marshal result: %s", string(data))
}
}
func TestMiningProfile_RawConfig_MarshalJSON_Bad(t *testing.T) {
// nil RawConfig should marshal to "null"
var raw RawConfig
data, err := raw.MarshalJSON()
if err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if string(data) != "null" {
t.Fatalf("expected \"null\", got %q", string(data))
}
}
func TestMiningProfile_RawConfig_MarshalJSON_Ugly(t *testing.T) {
// UnmarshalJSON on a non-nil pointer
raw := RawConfig([]byte(`{}`))
err := raw.UnmarshalJSON([]byte(`{"new":"data"}`))
if err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if string(raw) != `{"new":"data"}` {
t.Fatalf("unexpected unmarshal result: %s", string(raw))
}
}

View file

@ -1,11 +1,11 @@
package mining
import (
"context"
"testing"
)
// manager := NewManager(); defer manager.Stop()
func TestMining_NewManager_Good(t *testing.T) {
func TestNewManager(t *testing.T) {
manager := NewManager()
defer manager.Stop()
@ -17,13 +17,29 @@ func TestMining_NewManager_Good(t *testing.T) {
}
}
// manager.StartMiner is integration-only; skipped in unit context
func TestMining_StartAndStop_Ugly(t *testing.T) {
t.Skip("Skipping test that attempts to run miner process")
func TestStartAndStopMiner(t *testing.T) {
manager := NewManager()
defer manager.Stop()
config := &Config{
Pool: "pool.example.com",
Wallet: "wallet123",
}
// We can't fully test StartMiner without a mock miner,
// but we can test the manager's behavior.
// This will fail because the miner executable is not present,
// which is expected in a test environment.
_, err := manager.StartMiner(context.Background(), "xmrig", config)
if err == nil {
t.Log("StartMiner did not fail as expected in test environment")
}
// Since we can't start a miner, we can't test stop either.
// A more complete test suite would use a mock miner.
}
// _, err := manager.GetMiner("non-existent") // err != nil
func TestMining_GetMiner_Bad(t *testing.T) {
func TestGetNonExistentMiner(t *testing.T) {
manager := NewManager()
defer manager.Stop()
@ -33,19 +49,19 @@ func TestMining_GetMiner_Bad(t *testing.T) {
}
}
// miners := manager.ListMiners() // returns non-nil slice
func TestMining_ListMiners_Good(t *testing.T) {
func TestListMiners(t *testing.T) {
manager := NewManager()
defer manager.Stop()
// ListMiners should return a valid slice (may include autostarted miners)
miners := manager.ListMiners()
if miners == nil {
t.Error("ListMiners returned nil")
}
// Note: count may be > 0 if autostart is configured
}
// miners := manager.ListAvailableMiners() // len > 0
func TestMining_ListAvailableMiners_Good(t *testing.T) {
func TestListAvailableMiners(t *testing.T) {
manager := NewManager()
defer manager.Stop()
@ -54,3 +70,10 @@ func TestMining_ListAvailableMiners_Good(t *testing.T) {
t.Error("Expected at least one available miner")
}
}
func TestGetVersion(t *testing.T) {
version := GetVersion()
if version == "" {
t.Error("Version is empty")
}
}

View file

@ -4,13 +4,13 @@ import (
"encoding/json"
"net/http"
"strconv"
"strings"
"forge.lthn.ai/Snider/Mining/pkg/node"
"github.com/Snider/Mining/pkg/node"
"github.com/gin-gonic/gin"
)
// nodeService, err := NewNodeService()
// router.Group("/api/v1/mining").Group("/node").GET("/info", nodeService.handleNodeInfo)
// NodeService handles P2P node-related API endpoints.
type NodeService struct {
nodeManager *node.NodeManager
peerRegistry *node.PeerRegistry
@ -19,83 +19,84 @@ type NodeService struct {
worker *node.Worker
}
// nodeService, err := NewNodeService() // initialises node manager, peer registry, transport, controller, worker
// NewNodeService creates a new NodeService instance.
func NewNodeService() (*NodeService, error) {
nodeManager, err := node.NewNodeManager()
nm, err := node.NewNodeManager()
if err != nil {
return nil, err
}
peerRegistry, err := node.NewPeerRegistry()
pr, err := node.NewPeerRegistry()
if err != nil {
return nil, err
}
transportConfig := node.DefaultTransportConfig()
transport := node.NewTransport(nodeManager, peerRegistry, transportConfig)
config := node.DefaultTransportConfig()
transport := node.NewTransport(nm, pr, config)
nodeService := &NodeService{
nodeManager: nodeManager,
peerRegistry: peerRegistry,
ns := &NodeService{
nodeManager: nm,
peerRegistry: pr,
transport: transport,
}
nodeService.controller = node.NewController(nodeManager, peerRegistry, transport)
nodeService.worker = node.NewWorker(nodeManager, transport)
// Initialize controller and worker
ns.controller = node.NewController(nm, pr, transport)
ns.worker = node.NewWorker(nm, transport)
return nodeService, nil
return ns, nil
}
// router.Group("/api/v1/mining") exposes /node, /peers, and /remote route groups.
func (nodeService *NodeService) SetupRoutes(router *gin.RouterGroup) {
// router.Group("/node").GET("/info", nodeService.handleNodeInfo) exposes node identity and peer counts.
// SetupRoutes configures all node-related API routes.
func (ns *NodeService) SetupRoutes(router *gin.RouterGroup) {
// Node identity endpoints
nodeGroup := router.Group("/node")
{
nodeGroup.GET("/info", nodeService.handleNodeInfo)
nodeGroup.POST("/init", nodeService.handleNodeInit)
nodeGroup.GET("/info", ns.handleNodeInfo)
nodeGroup.POST("/init", ns.handleNodeInit)
}
// router.Group("/peers").POST("", nodeService.handleAddPeer) registers a peer like 10.0.0.2:9090.
// Peer management endpoints
peerGroup := router.Group("/peers")
{
peerGroup.GET("", nodeService.handleListPeers)
peerGroup.POST("", nodeService.handleAddPeer)
peerGroup.GET("/:id", nodeService.handleGetPeer)
peerGroup.DELETE("/:id", nodeService.handleRemovePeer)
peerGroup.POST("/:id/ping", nodeService.handlePingPeer)
peerGroup.POST("/:id/connect", nodeService.handleConnectPeer)
peerGroup.POST("/:id/disconnect", nodeService.handleDisconnectPeer)
peerGroup.GET("", ns.handleListPeers)
peerGroup.POST("", ns.handleAddPeer)
peerGroup.GET("/:id", ns.handleGetPeer)
peerGroup.DELETE("/:id", ns.handleRemovePeer)
peerGroup.POST("/:id/ping", ns.handlePingPeer)
peerGroup.POST("/:id/connect", ns.handleConnectPeer)
peerGroup.POST("/:id/disconnect", ns.handleDisconnectPeer)
// router.Group("/peers/auth/allowlist").POST("", nodeService.handleAddToAllowlist) accepts keys like ed25519:abc...
peerGroup.GET("/auth/mode", nodeService.handleGetAuthMode)
peerGroup.PUT("/auth/mode", nodeService.handleSetAuthMode)
peerGroup.GET("/auth/allowlist", nodeService.handleListAllowlist)
peerGroup.POST("/auth/allowlist", nodeService.handleAddToAllowlist)
peerGroup.DELETE("/auth/allowlist/:key", nodeService.handleRemoveFromAllowlist)
// Allowlist management
peerGroup.GET("/auth/mode", ns.handleGetAuthMode)
peerGroup.PUT("/auth/mode", ns.handleSetAuthMode)
peerGroup.GET("/auth/allowlist", ns.handleListAllowlist)
peerGroup.POST("/auth/allowlist", ns.handleAddToAllowlist)
peerGroup.DELETE("/auth/allowlist/:key", ns.handleRemoveFromAllowlist)
}
// router.Group("/remote").POST("/:peerId/start", nodeService.handleRemoteStart) starts a miner on a peer like peer-123.
// Remote operations endpoints
remoteGroup := router.Group("/remote")
{
remoteGroup.GET("/stats", nodeService.handleRemoteStats)
remoteGroup.GET("/:peerId/stats", nodeService.handlePeerStats)
remoteGroup.POST("/:peerId/start", nodeService.handleRemoteStart)
remoteGroup.POST("/:peerId/stop", nodeService.handleRemoteStop)
remoteGroup.GET("/:peerId/logs/:miner", nodeService.handleRemoteLogs)
remoteGroup.GET("/stats", ns.handleRemoteStats)
remoteGroup.GET("/:peerId/stats", ns.handlePeerStats)
remoteGroup.POST("/:peerId/start", ns.handleRemoteStart)
remoteGroup.POST("/:peerId/stop", ns.handleRemoteStop)
remoteGroup.GET("/:peerId/logs/:miner", ns.handleRemoteLogs)
}
}
// if err := nodeService.StartTransport(); err != nil { log.Fatal(err) } // starts WebSocket listener on configured port
func (nodeService *NodeService) StartTransport() error {
return nodeService.transport.Start()
// StartTransport starts the P2P transport server.
func (ns *NodeService) StartTransport() error {
return ns.transport.Start()
}
// defer nodeService.StopTransport() // gracefully shuts down WebSocket listener and closes peer connections
func (nodeService *NodeService) StopTransport() error {
return nodeService.transport.Stop()
// StopTransport stops the P2P transport server.
func (ns *NodeService) StopTransport() error {
return ns.transport.Stop()
}
// response := NodeInfoResponse{HasIdentity: true, Identity: identity, RegisteredPeers: 3, ConnectedPeers: 1}
// Node Info Response
type NodeInfoResponse struct {
HasIdentity bool `json:"hasIdentity"`
Identity *node.NodeIdentity `json:"identity,omitempty"`
@ -110,21 +111,21 @@ type NodeInfoResponse struct {
// @Produce json
// @Success 200 {object} NodeInfoResponse
// @Router /node/info [get]
func (nodeService *NodeService) handleNodeInfo(requestContext *gin.Context) {
func (ns *NodeService) handleNodeInfo(c *gin.Context) {
response := NodeInfoResponse{
HasIdentity: nodeService.nodeManager.HasIdentity(),
RegisteredPeers: nodeService.peerRegistry.Count(),
ConnectedPeers: len(nodeService.peerRegistry.GetConnectedPeers()),
HasIdentity: ns.nodeManager.HasIdentity(),
RegisteredPeers: ns.peerRegistry.Count(),
ConnectedPeers: len(ns.peerRegistry.GetConnectedPeers()),
}
if nodeService.nodeManager.HasIdentity() {
response.Identity = nodeService.nodeManager.GetIdentity()
if ns.nodeManager.HasIdentity() {
response.Identity = ns.nodeManager.GetIdentity()
}
requestContext.JSON(http.StatusOK, response)
c.JSON(http.StatusOK, response)
}
// POST /node/init {"name": "my-node", "role": "worker"}
// NodeInitRequest is the request body for node initialization.
type NodeInitRequest struct {
Name string `json:"name" binding:"required"`
Role string `json:"role"` // "controller", "worker", or "dual"
@ -139,20 +140,20 @@ type NodeInitRequest struct {
// @Param request body NodeInitRequest true "Node initialization parameters"
// @Success 200 {object} node.NodeIdentity
// @Router /node/init [post]
func (nodeService *NodeService) handleNodeInit(requestContext *gin.Context) {
var request NodeInitRequest
if err := requestContext.ShouldBindJSON(&request); err != nil {
requestContext.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
func (ns *NodeService) handleNodeInit(c *gin.Context) {
var req NodeInitRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if nodeService.nodeManager.HasIdentity() {
requestContext.JSON(http.StatusConflict, gin.H{"error": "node identity already exists"})
if ns.nodeManager.HasIdentity() {
c.JSON(http.StatusConflict, gin.H{"error": "node identity already exists"})
return
}
role := node.RoleDual
switch request.Role {
switch req.Role {
case "controller":
role = node.RoleController
case "worker":
@ -160,16 +161,16 @@ func (nodeService *NodeService) handleNodeInit(requestContext *gin.Context) {
case "dual", "":
role = node.RoleDual
default:
requestContext.JSON(http.StatusBadRequest, gin.H{"error": "invalid role"})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid role"})
return
}
if err := nodeService.nodeManager.GenerateIdentity(request.Name, role); err != nil {
requestContext.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
if err := ns.nodeManager.GenerateIdentity(req.Name, role); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
requestContext.JSON(http.StatusOK, nodeService.nodeManager.GetIdentity())
c.JSON(http.StatusOK, ns.nodeManager.GetIdentity())
}
// handleListPeers godoc
@ -179,12 +180,12 @@ func (nodeService *NodeService) handleNodeInit(requestContext *gin.Context) {
// @Produce json
// @Success 200 {array} node.Peer
// @Router /peers [get]
func (nodeService *NodeService) handleListPeers(requestContext *gin.Context) {
peers := nodeService.peerRegistry.ListPeers()
requestContext.JSON(http.StatusOK, peers)
func (ns *NodeService) handleListPeers(c *gin.Context) {
peers := ns.peerRegistry.ListPeers()
c.JSON(http.StatusOK, peers)
}
// POST /peers {"address": "10.0.0.2:9090", "name": "worker-1"}
// AddPeerRequest is the request body for adding a peer.
type AddPeerRequest struct {
Address string `json:"address" binding:"required"`
Name string `json:"name"`
@ -199,27 +200,27 @@ type AddPeerRequest struct {
// @Param request body AddPeerRequest true "Peer information"
// @Success 201 {object} node.Peer
// @Router /peers [post]
func (nodeService *NodeService) handleAddPeer(requestContext *gin.Context) {
var request AddPeerRequest
if err := requestContext.ShouldBindJSON(&request); err != nil {
requestContext.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
func (ns *NodeService) handleAddPeer(c *gin.Context) {
var req AddPeerRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
peer := &node.Peer{
ID: "pending-" + request.Address, // Will be updated on handshake
Name: request.Name,
Address: request.Address,
ID: "pending-" + req.Address, // Will be updated on handshake
Name: req.Name,
Address: req.Address,
Role: node.RoleDual,
Score: 50,
}
if err := nodeService.peerRegistry.AddPeer(peer); err != nil {
requestContext.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
if err := ns.peerRegistry.AddPeer(peer); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
requestContext.JSON(http.StatusCreated, peer)
c.JSON(http.StatusCreated, peer)
}
// handleGetPeer godoc
@ -230,14 +231,14 @@ func (nodeService *NodeService) handleAddPeer(requestContext *gin.Context) {
// @Param id path string true "Peer ID"
// @Success 200 {object} node.Peer
// @Router /peers/{id} [get]
func (nodeService *NodeService) handleGetPeer(requestContext *gin.Context) {
peerID := requestContext.Param("id")
peer := nodeService.peerRegistry.GetPeer(peerID)
func (ns *NodeService) handleGetPeer(c *gin.Context) {
peerID := c.Param("id")
peer := ns.peerRegistry.GetPeer(peerID)
if peer == nil {
requestContext.JSON(http.StatusNotFound, gin.H{"error": "peer not found"})
c.JSON(http.StatusNotFound, gin.H{"error": "peer not found"})
return
}
requestContext.JSON(http.StatusOK, peer)
c.JSON(http.StatusOK, peer)
}
// handleRemovePeer godoc
@ -248,13 +249,13 @@ func (nodeService *NodeService) handleGetPeer(requestContext *gin.Context) {
// @Param id path string true "Peer ID"
// @Success 200 {object} map[string]string
// @Router /peers/{id} [delete]
func (nodeService *NodeService) handleRemovePeer(requestContext *gin.Context) {
peerID := requestContext.Param("id")
if err := nodeService.peerRegistry.RemovePeer(peerID); err != nil {
requestContext.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
func (ns *NodeService) handleRemovePeer(c *gin.Context) {
peerID := c.Param("id")
if err := ns.peerRegistry.RemovePeer(peerID); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
requestContext.JSON(http.StatusOK, gin.H{"status": "peer removed"})
c.JSON(http.StatusOK, gin.H{"status": "peer removed"})
}
// handlePingPeer godoc
@ -266,18 +267,18 @@ func (nodeService *NodeService) handleRemovePeer(requestContext *gin.Context) {
// @Success 200 {object} map[string]float64
// @Failure 404 {object} APIError "Peer not found"
// @Router /peers/{id}/ping [post]
func (nodeService *NodeService) handlePingPeer(requestContext *gin.Context) {
peerID := requestContext.Param("id")
rtt, err := nodeService.controller.PingPeer(peerID)
func (ns *NodeService) handlePingPeer(c *gin.Context) {
peerID := c.Param("id")
rtt, err := ns.controller.PingPeer(peerID)
if err != nil {
if containsStr(err.Error(), "not found") || containsStr(err.Error(), "not connected") {
respondWithError(requestContext, http.StatusNotFound, "PEER_NOT_FOUND", "peer not found or not connected", err.Error())
if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "not connected") {
respondWithError(c, http.StatusNotFound, "PEER_NOT_FOUND", "peer not found or not connected", err.Error())
return
}
respondWithError(requestContext, http.StatusInternalServerError, ErrCodeInternalError, "ping failed", err.Error())
respondWithError(c, http.StatusInternalServerError, ErrCodeInternal, "ping failed", err.Error())
return
}
requestContext.JSON(http.StatusOK, gin.H{"rtt_ms": rtt})
c.JSON(http.StatusOK, gin.H{"rtt_ms": rtt})
}
// handleConnectPeer godoc
@ -289,17 +290,17 @@ func (nodeService *NodeService) handlePingPeer(requestContext *gin.Context) {
// @Success 200 {object} map[string]string
// @Failure 404 {object} APIError "Peer not found"
// @Router /peers/{id}/connect [post]
func (nodeService *NodeService) handleConnectPeer(requestContext *gin.Context) {
peerID := requestContext.Param("id")
if err := nodeService.controller.ConnectToPeer(peerID); err != nil {
if containsStr(err.Error(), "not found") {
respondWithError(requestContext, http.StatusNotFound, "PEER_NOT_FOUND", "peer not found", err.Error())
func (ns *NodeService) handleConnectPeer(c *gin.Context) {
peerID := c.Param("id")
if err := ns.controller.ConnectToPeer(peerID); err != nil {
if strings.Contains(err.Error(), "not found") {
respondWithError(c, http.StatusNotFound, "PEER_NOT_FOUND", "peer not found", err.Error())
return
}
respondWithError(requestContext, http.StatusInternalServerError, ErrCodeConnectionFailed, "connection failed", err.Error())
respondWithError(c, http.StatusInternalServerError, ErrCodeConnectionFailed, "connection failed", err.Error())
return
}
requestContext.JSON(http.StatusOK, gin.H{"status": "connected"})
c.JSON(http.StatusOK, gin.H{"status": "connected"})
}
// handleDisconnectPeer godoc
@ -310,18 +311,18 @@ func (nodeService *NodeService) handleConnectPeer(requestContext *gin.Context) {
// @Param id path string true "Peer ID"
// @Success 200 {object} map[string]string
// @Router /peers/{id}/disconnect [post]
func (nodeService *NodeService) handleDisconnectPeer(requestContext *gin.Context) {
peerID := requestContext.Param("id")
if err := nodeService.controller.DisconnectFromPeer(peerID); err != nil {
func (ns *NodeService) handleDisconnectPeer(c *gin.Context) {
peerID := c.Param("id")
if err := ns.controller.DisconnectFromPeer(peerID); err != nil {
// Make disconnect idempotent - if peer not connected, still return success
if containsStr(err.Error(), "not connected") {
requestContext.JSON(http.StatusOK, gin.H{"status": "disconnected"})
if strings.Contains(err.Error(), "not connected") {
c.JSON(http.StatusOK, gin.H{"status": "disconnected"})
return
}
respondWithError(requestContext, http.StatusInternalServerError, ErrCodeInternalError, "disconnect failed", err.Error())
respondWithError(c, http.StatusInternalServerError, ErrCodeInternal, "disconnect failed", err.Error())
return
}
requestContext.JSON(http.StatusOK, gin.H{"status": "disconnected"})
c.JSON(http.StatusOK, gin.H{"status": "disconnected"})
}
// handleRemoteStats godoc
@ -331,9 +332,9 @@ func (nodeService *NodeService) handleDisconnectPeer(requestContext *gin.Context
// @Produce json
// @Success 200 {object} map[string]node.StatsPayload
// @Router /remote/stats [get]
func (nodeService *NodeService) handleRemoteStats(requestContext *gin.Context) {
stats := nodeService.controller.GetAllStats()
requestContext.JSON(http.StatusOK, stats)
func (ns *NodeService) handleRemoteStats(c *gin.Context) {
stats := ns.controller.GetAllStats()
c.JSON(http.StatusOK, stats)
}
// handlePeerStats godoc
@ -344,17 +345,17 @@ func (nodeService *NodeService) handleRemoteStats(requestContext *gin.Context) {
// @Param peerId path string true "Peer ID"
// @Success 200 {object} node.StatsPayload
// @Router /remote/{peerId}/stats [get]
func (nodeService *NodeService) handlePeerStats(requestContext *gin.Context) {
peerID := requestContext.Param("peerId")
stats, err := nodeService.controller.GetRemoteStats(peerID)
func (ns *NodeService) handlePeerStats(c *gin.Context) {
peerID := c.Param("peerId")
stats, err := ns.controller.GetRemoteStats(peerID)
if err != nil {
requestContext.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
requestContext.JSON(http.StatusOK, stats)
c.JSON(http.StatusOK, stats)
}
// POST /remote/{peerId}/start {"minerType": "xmrig", "profileId": "abc123"}
// RemoteStartRequest is the request body for starting a remote miner.
type RemoteStartRequest struct {
MinerType string `json:"minerType" binding:"required"`
ProfileID string `json:"profileId,omitempty"`
@ -371,22 +372,22 @@ type RemoteStartRequest struct {
// @Param request body RemoteStartRequest true "Start parameters"
// @Success 200 {object} map[string]string
// @Router /remote/{peerId}/start [post]
func (nodeService *NodeService) handleRemoteStart(requestContext *gin.Context) {
peerID := requestContext.Param("peerId")
var request RemoteStartRequest
if err := requestContext.ShouldBindJSON(&request); err != nil {
requestContext.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
func (ns *NodeService) handleRemoteStart(c *gin.Context) {
peerID := c.Param("peerId")
var req RemoteStartRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if err := nodeService.controller.StartRemoteMiner(peerID, request.MinerType, request.ProfileID, request.Config); err != nil {
requestContext.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
if err := ns.controller.StartRemoteMiner(peerID, req.MinerType, req.ProfileID, req.Config); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
requestContext.JSON(http.StatusOK, gin.H{"status": "miner started"})
c.JSON(http.StatusOK, gin.H{"status": "miner started"})
}
// POST /remote/{peerId}/stop {"minerName": "xmrig-main"}
// RemoteStopRequest is the request body for stopping a remote miner.
type RemoteStopRequest struct {
MinerName string `json:"minerName" binding:"required"`
}
@ -401,19 +402,19 @@ type RemoteStopRequest struct {
// @Param request body RemoteStopRequest true "Stop parameters"
// @Success 200 {object} map[string]string
// @Router /remote/{peerId}/stop [post]
func (nodeService *NodeService) handleRemoteStop(requestContext *gin.Context) {
peerID := requestContext.Param("peerId")
var request RemoteStopRequest
if err := requestContext.ShouldBindJSON(&request); err != nil {
requestContext.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
func (ns *NodeService) handleRemoteStop(c *gin.Context) {
peerID := c.Param("peerId")
var req RemoteStopRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if err := nodeService.controller.StopRemoteMiner(peerID, request.MinerName); err != nil {
requestContext.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
if err := ns.controller.StopRemoteMiner(peerID, req.MinerName); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
requestContext.JSON(http.StatusOK, gin.H{"status": "miner stopped"})
c.JSON(http.StatusOK, gin.H{"status": "miner stopped"})
}
// handleRemoteLogs godoc
@ -426,13 +427,13 @@ func (nodeService *NodeService) handleRemoteStop(requestContext *gin.Context) {
// @Param lines query int false "Number of lines (max 10000)" default(100)
// @Success 200 {array} string
// @Router /remote/{peerId}/logs/{miner} [get]
func (nodeService *NodeService) handleRemoteLogs(requestContext *gin.Context) {
peerID := requestContext.Param("peerId")
minerName := requestContext.Param("miner")
func (ns *NodeService) handleRemoteLogs(c *gin.Context) {
peerID := c.Param("peerId")
minerName := c.Param("miner")
lines := 100
const maxLines = 10000 // Prevent resource exhaustion
if linesParam := requestContext.Query("lines"); linesParam != "" {
if parsed, err := strconv.Atoi(linesParam); err == nil && parsed > 0 {
if l := c.Query("lines"); l != "" {
if parsed, err := strconv.Atoi(l); err == nil && parsed > 0 {
lines = parsed
if lines > maxLines {
lines = maxLines
@ -440,15 +441,15 @@ func (nodeService *NodeService) handleRemoteLogs(requestContext *gin.Context) {
}
}
logs, err := nodeService.controller.GetRemoteLogs(peerID, minerName, lines)
logs, err := ns.controller.GetRemoteLogs(peerID, minerName, lines)
if err != nil {
requestContext.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
requestContext.JSON(http.StatusOK, logs)
c.JSON(http.StatusOK, logs)
}
// GET /peers/auth/mode → {"mode": "open"} or {"mode": "allowlist"}
// AuthModeResponse is the response for auth mode endpoints.
type AuthModeResponse struct {
Mode string `json:"mode"`
}
@ -460,16 +461,16 @@ type AuthModeResponse struct {
// @Produce json
// @Success 200 {object} AuthModeResponse
// @Router /peers/auth/mode [get]
func (nodeService *NodeService) handleGetAuthMode(requestContext *gin.Context) {
mode := nodeService.peerRegistry.GetAuthMode()
func (ns *NodeService) handleGetAuthMode(c *gin.Context) {
mode := ns.peerRegistry.GetAuthMode()
modeStr := "open"
if mode == node.PeerAuthAllowlist {
modeStr = "allowlist"
}
requestContext.JSON(http.StatusOK, AuthModeResponse{Mode: modeStr})
c.JSON(http.StatusOK, AuthModeResponse{Mode: modeStr})
}
// PUT /peers/auth/mode {"mode": "allowlist"} // or "open"
// SetAuthModeRequest is the request for setting auth mode.
type SetAuthModeRequest struct {
Mode string `json:"mode" binding:"required"`
}
@ -484,29 +485,29 @@ type SetAuthModeRequest struct {
// @Success 200 {object} AuthModeResponse
// @Failure 400 {object} APIError "Invalid mode"
// @Router /peers/auth/mode [put]
func (nodeService *NodeService) handleSetAuthMode(requestContext *gin.Context) {
var request SetAuthModeRequest
if err := requestContext.ShouldBindJSON(&request); err != nil {
requestContext.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
func (ns *NodeService) handleSetAuthMode(c *gin.Context) {
var req SetAuthModeRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
var mode node.PeerAuthMode
switch request.Mode {
switch req.Mode {
case "open":
mode = node.PeerAuthOpen
case "allowlist":
mode = node.PeerAuthAllowlist
default:
respondWithError(requestContext, http.StatusBadRequest, "INVALID_MODE", "mode must be 'open' or 'allowlist'", "")
respondWithError(c, http.StatusBadRequest, "INVALID_MODE", "mode must be 'open' or 'allowlist'", "")
return
}
nodeService.peerRegistry.SetAuthMode(mode)
requestContext.JSON(http.StatusOK, AuthModeResponse{Mode: request.Mode})
ns.peerRegistry.SetAuthMode(mode)
c.JSON(http.StatusOK, AuthModeResponse{Mode: req.Mode})
}
// GET /peers/auth/allowlist → {"publicKeys": ["ed25519:abc...", "ed25519:def..."]}
// AllowlistResponse is the response for listing allowlisted keys.
type AllowlistResponse struct {
PublicKeys []string `json:"publicKeys"`
}
@ -518,12 +519,12 @@ type AllowlistResponse struct {
// @Produce json
// @Success 200 {object} AllowlistResponse
// @Router /peers/auth/allowlist [get]
func (nodeService *NodeService) handleListAllowlist(requestContext *gin.Context) {
keys := nodeService.peerRegistry.ListAllowedPublicKeys()
requestContext.JSON(http.StatusOK, AllowlistResponse{PublicKeys: keys})
func (ns *NodeService) handleListAllowlist(c *gin.Context) {
keys := ns.peerRegistry.ListAllowedPublicKeys()
c.JSON(http.StatusOK, AllowlistResponse{PublicKeys: keys})
}
// POST /peers/auth/allowlist {"publicKey": "ed25519:abc123..."}
// AddAllowlistRequest is the request for adding a key to the allowlist.
type AddAllowlistRequest struct {
PublicKey string `json:"publicKey" binding:"required"`
}
@ -538,20 +539,20 @@ type AddAllowlistRequest struct {
// @Success 201 {object} map[string]string
// @Failure 400 {object} APIError "Invalid request"
// @Router /peers/auth/allowlist [post]
func (nodeService *NodeService) handleAddToAllowlist(requestContext *gin.Context) {
var request AddAllowlistRequest
if err := requestContext.ShouldBindJSON(&request); err != nil {
requestContext.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
func (ns *NodeService) handleAddToAllowlist(c *gin.Context) {
var req AddAllowlistRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if len(request.PublicKey) < 16 {
respondWithError(requestContext, http.StatusBadRequest, "INVALID_KEY", "public key too short", "")
if len(req.PublicKey) < 16 {
respondWithError(c, http.StatusBadRequest, "INVALID_KEY", "public key too short", "")
return
}
nodeService.peerRegistry.AllowPublicKey(request.PublicKey)
requestContext.JSON(http.StatusCreated, gin.H{"status": "added"})
ns.peerRegistry.AllowPublicKey(req.PublicKey)
c.JSON(http.StatusCreated, gin.H{"status": "added"})
}
// handleRemoveFromAllowlist godoc
@ -562,13 +563,13 @@ func (nodeService *NodeService) handleAddToAllowlist(requestContext *gin.Context
// @Param key path string true "Public key to remove (URL-encoded)"
// @Success 200 {object} map[string]string
// @Router /peers/auth/allowlist/{key} [delete]
func (nodeService *NodeService) handleRemoveFromAllowlist(requestContext *gin.Context) {
key := requestContext.Param("key")
func (ns *NodeService) handleRemoveFromAllowlist(c *gin.Context) {
key := c.Param("key")
if key == "" {
respondWithError(requestContext, http.StatusBadRequest, "MISSING_KEY", "public key required", "")
respondWithError(c, http.StatusBadRequest, "MISSING_KEY", "public key required", "")
return
}
nodeService.peerRegistry.RevokePublicKey(key)
requestContext.JSON(http.StatusOK, gin.H{"status": "removed"})
ns.peerRegistry.RevokePublicKey(key)
c.JSON(http.StatusOK, gin.H{"status": "removed"})
}

View file

@ -2,6 +2,7 @@ package mining
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
@ -12,45 +13,41 @@ import (
const profileConfigFileName = "mining_profiles.json"
var profileConfigRelativePath = filepath.Join("lethean-desktop", profileConfigFileName)
// profileManager, err := NewProfileManager()
// profileManager.CreateProfile(&MiningProfile{Name: "XMRig CPU", MinerType: "xmrig"})
// ProfileManager handles CRUD operations for MiningProfiles.
type ProfileManager struct {
mutex sync.RWMutex
mu sync.RWMutex
profiles map[string]*MiningProfile
configPath string
}
// profileManager, err := NewProfileManager()
// if err != nil { return err }
// NewProfileManager creates and initializes a new ProfileManager.
func NewProfileManager() (*ProfileManager, error) {
configPath, err := xdg.ConfigFile(profileConfigRelativePath)
configPath, err := xdg.ConfigFile(filepath.Join("lethean-desktop", profileConfigFileName))
if err != nil {
return nil, ErrInternal("could not resolve config path").WithCause(err)
return nil, fmt.Errorf("could not resolve config path: %w", err)
}
profileManager := &ProfileManager{
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
if err := profileManager.loadProfiles(); err != nil {
if err := pm.loadProfiles(); err != nil {
// If the file doesn't exist, that's fine, but any other error is a problem.
if !os.IsNotExist(err) {
return nil, ErrInternal("could not load profiles").WithCause(err)
return nil, fmt.Errorf("could not load profiles: %w", err)
}
}
return profileManager, nil
return pm, nil
}
// profileManager.loadProfiles() // call after acquiring profileManager.mutex.Lock() if needed externally
func (profileManager *ProfileManager) loadProfiles() error {
profileManager.mutex.Lock()
defer profileManager.mutex.Unlock()
// loadProfiles reads the profiles from the JSON file into memory.
func (pm *ProfileManager) loadProfiles() error {
pm.mu.Lock()
defer pm.mu.Unlock()
data, err := os.ReadFile(profileManager.configPath)
data, err := os.ReadFile(pm.configPath)
if err != nil {
return err
}
@ -60,108 +57,107 @@ func (profileManager *ProfileManager) loadProfiles() error {
return err
}
profileManager.profiles = make(map[string]*MiningProfile)
for _, profile := range profiles {
profileManager.profiles[profile.ID] = profile
pm.profiles = make(map[string]*MiningProfile)
for _, p := range profiles {
pm.profiles[p.ID] = p
}
return nil
}
// profileManager.saveProfiles() // saves `lethean-desktop/mining_profiles.json` after profile changes.
func (profileManager *ProfileManager) saveProfiles() error {
profileEntries := make([]*MiningProfile, 0, len(profileManager.profiles))
for _, profile := range profileManager.profiles {
profileEntries = append(profileEntries, profile)
// saveProfiles writes the current profiles from memory to the JSON file.
// This is an internal method and assumes the caller holds the appropriate lock.
// Uses atomic write pattern: write to temp file, sync, then rename.
func (pm *ProfileManager) saveProfiles() error {
profileList := make([]*MiningProfile, 0, len(pm.profiles))
for _, p := range pm.profiles {
profileList = append(profileList, p)
}
data, err := json.MarshalIndent(profileEntries, "", " ")
data, err := json.MarshalIndent(profileList, "", " ")
if err != nil {
return err
}
return AtomicWriteFile(profileManager.configPath, data, 0600)
return AtomicWriteFile(pm.configPath, data, 0600)
}
// created, err := profileManager.CreateProfile(&MiningProfile{Name: "XMRig CPU", MinerType: "xmrig"})
func (profileManager *ProfileManager) CreateProfile(profile *MiningProfile) (*MiningProfile, error) {
profileManager.mutex.Lock()
defer profileManager.mutex.Unlock()
// CreateProfile adds a new profile and saves it.
func (pm *ProfileManager) CreateProfile(profile *MiningProfile) (*MiningProfile, error) {
pm.mu.Lock()
defer pm.mu.Unlock()
profile.ID = uuid.New().String()
profileManager.profiles[profile.ID] = profile
pm.profiles[profile.ID] = profile
if err := profileManager.saveProfiles(); err != nil {
if err := pm.saveProfiles(); err != nil {
// Rollback
delete(profileManager.profiles, profile.ID)
delete(pm.profiles, profile.ID)
return nil, err
}
return profile, nil
}
// profile, ok := profileManager.GetProfile("abc-123")
// if !ok { return core.E("profile.Get", "profile not found", nil) }
func (profileManager *ProfileManager) GetProfile(id string) (*MiningProfile, bool) {
profileManager.mutex.RLock()
defer profileManager.mutex.RUnlock()
profile, exists := profileManager.profiles[id]
// GetProfile retrieves a profile by its ID.
func (pm *ProfileManager) GetProfile(id string) (*MiningProfile, bool) {
pm.mu.RLock()
defer pm.mu.RUnlock()
profile, exists := pm.profiles[id]
return profile, exists
}
// profiles := profileManager.GetAllProfiles()
// for _, profile := range profiles { render(profile) }
func (profileManager *ProfileManager) GetAllProfiles() []*MiningProfile {
profileManager.mutex.RLock()
defer profileManager.mutex.RUnlock()
// GetAllProfiles returns a list of all profiles.
func (pm *ProfileManager) GetAllProfiles() []*MiningProfile {
pm.mu.RLock()
defer pm.mu.RUnlock()
profileList := make([]*MiningProfile, 0, len(profileManager.profiles))
for _, profile := range profileManager.profiles {
profileList = append(profileList, profile)
profileList := make([]*MiningProfile, 0, len(pm.profiles))
for _, p := range pm.profiles {
profileList = append(profileList, p)
}
return profileList
}
// profile.Name = "XMRig GPU"
// if err := profileManager.UpdateProfile(profile); err != nil { return err }
func (profileManager *ProfileManager) UpdateProfile(profile *MiningProfile) error {
profileManager.mutex.Lock()
defer profileManager.mutex.Unlock()
// UpdateProfile modifies an existing profile.
func (pm *ProfileManager) UpdateProfile(profile *MiningProfile) error {
pm.mu.Lock()
defer pm.mu.Unlock()
oldProfile, exists := profileManager.profiles[profile.ID]
oldProfile, exists := pm.profiles[profile.ID]
if !exists {
return ErrProfileNotFound(profile.ID)
return fmt.Errorf("profile with ID %s not found", profile.ID)
}
// Update in-memory state
profileManager.profiles[profile.ID] = profile
pm.profiles[profile.ID] = profile
// Save to disk - rollback if save fails
if err := profileManager.saveProfiles(); err != nil {
if err := pm.saveProfiles(); err != nil {
// Restore old profile on save failure
profileManager.profiles[profile.ID] = oldProfile
return ErrInternal("failed to save profile").WithCause(err)
pm.profiles[profile.ID] = oldProfile
return fmt.Errorf("failed to save profile: %w", err)
}
return nil
}
// if err := profileManager.DeleteProfile("abc-123"); err != nil { return err }
func (profileManager *ProfileManager) DeleteProfile(id string) error {
profileManager.mutex.Lock()
defer profileManager.mutex.Unlock()
// DeleteProfile removes a profile by its ID.
func (pm *ProfileManager) DeleteProfile(id string) error {
pm.mu.Lock()
defer pm.mu.Unlock()
profile, exists := profileManager.profiles[id]
profile, exists := pm.profiles[id]
if !exists {
return ErrProfileNotFound(id)
return fmt.Errorf("profile with ID %s not found", id)
}
delete(profileManager.profiles, id)
delete(pm.profiles, id)
// Save to disk - rollback if save fails
if err := profileManager.saveProfiles(); err != nil {
if err := pm.saveProfiles(); err != nil {
// Restore profile on save failure
profileManager.profiles[id] = profile
return ErrInternal("failed to delete profile").WithCause(err)
pm.profiles[id] = profile
return fmt.Errorf("failed to delete profile: %w", err)
}
return nil

View file

@ -17,7 +17,7 @@ func setupTestProfileManager(t *testing.T) (*ProfileManager, func()) {
configPath := filepath.Join(tmpDir, "mining_profiles.json")
profileManager := &ProfileManager{
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
@ -26,11 +26,11 @@ func setupTestProfileManager(t *testing.T) (*ProfileManager, func()) {
os.RemoveAll(tmpDir)
}
return profileManager, cleanup
return pm, cleanup
}
func TestProfileManager_Create_Good(t *testing.T) {
profileManager, cleanup := setupTestProfileManager(t)
func TestProfileManagerCreate(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
profile := &MiningProfile{
@ -39,7 +39,7 @@ func TestProfileManager_Create_Good(t *testing.T) {
Config: RawConfig(`{"pool": "test.pool.com:3333"}`),
}
created, err := profileManager.CreateProfile(profile)
created, err := pm.CreateProfile(profile)
if err != nil {
t.Fatalf("failed to create profile: %v", err)
}
@ -53,7 +53,7 @@ func TestProfileManager_Create_Good(t *testing.T) {
}
// Verify it's stored
retrieved, exists := profileManager.GetProfile(created.ID)
retrieved, exists := pm.GetProfile(created.ID)
if !exists {
t.Error("profile should exist after creation")
}
@ -63,12 +63,12 @@ func TestProfileManager_Create_Good(t *testing.T) {
}
}
func TestProfileManager_Get_Good(t *testing.T) {
profileManager, cleanup := setupTestProfileManager(t)
func TestProfileManagerGet(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
// Get non-existent profile
_, exists := profileManager.GetProfile("non-existent-id")
_, exists := pm.GetProfile("non-existent-id")
if exists {
t.Error("GetProfile should return false for non-existent ID")
}
@ -78,9 +78,9 @@ func TestProfileManager_Get_Good(t *testing.T) {
Name: "Get Test",
MinerType: "xmrig",
}
created, _ := profileManager.CreateProfile(profile)
created, _ := pm.CreateProfile(profile)
retrieved, exists := profileManager.GetProfile(created.ID)
retrieved, exists := pm.GetProfile(created.ID)
if !exists {
t.Error("GetProfile should return true for existing ID")
}
@ -90,36 +90,36 @@ func TestProfileManager_Get_Good(t *testing.T) {
}
}
func TestProfileManager_GetAll_Good(t *testing.T) {
profileManager, cleanup := setupTestProfileManager(t)
func TestProfileManagerGetAll(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
// Empty list initially
profiles := profileManager.GetAllProfiles()
profiles := pm.GetAllProfiles()
if len(profiles) != 0 {
t.Errorf("expected 0 profiles initially, got %d", len(profiles))
}
// Create multiple profiles
for i := 0; i < 3; i++ {
profileManager.CreateProfile(&MiningProfile{
pm.CreateProfile(&MiningProfile{
Name: "Profile",
MinerType: "xmrig",
})
}
profiles = profileManager.GetAllProfiles()
profiles = pm.GetAllProfiles()
if len(profiles) != 3 {
t.Errorf("expected 3 profiles, got %d", len(profiles))
}
}
func TestProfileManager_Update_Good(t *testing.T) {
profileManager, cleanup := setupTestProfileManager(t)
func TestProfileManagerUpdate(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
// Update non-existent profile
err := profileManager.UpdateProfile(&MiningProfile{ID: "non-existent"})
err := pm.UpdateProfile(&MiningProfile{ID: "non-existent"})
if err == nil {
t.Error("UpdateProfile should fail for non-existent profile")
}
@ -129,18 +129,18 @@ func TestProfileManager_Update_Good(t *testing.T) {
Name: "Original Name",
MinerType: "xmrig",
}
created, _ := profileManager.CreateProfile(profile)
created, _ := pm.CreateProfile(profile)
// Update it
created.Name = "Updated Name"
created.MinerType = "ttminer"
err = profileManager.UpdateProfile(created)
err = pm.UpdateProfile(created)
if err != nil {
t.Fatalf("failed to update profile: %v", err)
}
// Verify update
retrieved, _ := profileManager.GetProfile(created.ID)
retrieved, _ := pm.GetProfile(created.ID)
if retrieved.Name != "Updated Name" {
t.Errorf("expected name 'Updated Name', got '%s'", retrieved.Name)
}
@ -149,12 +149,12 @@ func TestProfileManager_Update_Good(t *testing.T) {
}
}
func TestProfileManager_Delete_Good(t *testing.T) {
profileManager, cleanup := setupTestProfileManager(t)
func TestProfileManagerDelete(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
// Delete non-existent profile
err := profileManager.DeleteProfile("non-existent")
err := pm.DeleteProfile("non-existent")
if err == nil {
t.Error("DeleteProfile should fail for non-existent profile")
}
@ -164,21 +164,21 @@ func TestProfileManager_Delete_Good(t *testing.T) {
Name: "Delete Me",
MinerType: "xmrig",
}
created, _ := profileManager.CreateProfile(profile)
created, _ := pm.CreateProfile(profile)
err = profileManager.DeleteProfile(created.ID)
err = pm.DeleteProfile(created.ID)
if err != nil {
t.Fatalf("failed to delete profile: %v", err)
}
// Verify deletion
_, exists := profileManager.GetProfile(created.ID)
_, exists := pm.GetProfile(created.ID)
if exists {
t.Error("profile should not exist after deletion")
}
}
func TestProfileManager_Persistence_Good(t *testing.T) {
func TestProfileManagerPersistence(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "profile-persist-test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
@ -188,7 +188,7 @@ func TestProfileManager_Persistence_Good(t *testing.T) {
configPath := filepath.Join(tmpDir, "mining_profiles.json")
// Create first manager and add profile
firstProfileManager := &ProfileManager{
pm1 := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
@ -198,23 +198,23 @@ func TestProfileManager_Persistence_Good(t *testing.T) {
MinerType: "xmrig",
Config: RawConfig(`{"pool": "persist.pool.com"}`),
}
created, err := firstProfileManager.CreateProfile(profile)
created, err := pm1.CreateProfile(profile)
if err != nil {
t.Fatalf("failed to create profile: %v", err)
}
// Create second manager with same path - should load existing profile
secondProfileManager := &ProfileManager{
pm2 := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
err = secondProfileManager.loadProfiles()
err = pm2.loadProfiles()
if err != nil {
t.Fatalf("failed to load profiles: %v", err)
}
// Verify profile persisted
loaded, exists := secondProfileManager.GetProfile(created.ID)
loaded, exists := pm2.GetProfile(created.ID)
if !exists {
t.Fatal("profile should be loaded from file")
}
@ -224,43 +224,43 @@ func TestProfileManager_Persistence_Good(t *testing.T) {
}
}
func TestProfileManager_Concurrency_Ugly(t *testing.T) {
profileManager, cleanup := setupTestProfileManager(t)
func TestProfileManagerConcurrency(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
var waitGroup sync.WaitGroup
var wg sync.WaitGroup
numGoroutines := 10
// Concurrent creates
for i := 0; i < numGoroutines; i++ {
waitGroup.Add(1)
wg.Add(1)
go func(n int) {
defer waitGroup.Done()
profileManager.CreateProfile(&MiningProfile{
defer wg.Done()
pm.CreateProfile(&MiningProfile{
Name: "Concurrent Profile",
MinerType: "xmrig",
})
}(i)
}
waitGroup.Wait()
wg.Wait()
profiles := profileManager.GetAllProfiles()
profiles := pm.GetAllProfiles()
if len(profiles) != numGoroutines {
t.Errorf("expected %d profiles, got %d", numGoroutines, len(profiles))
}
// Concurrent reads
for i := 0; i < numGoroutines; i++ {
waitGroup.Add(1)
wg.Add(1)
go func() {
defer waitGroup.Done()
profileManager.GetAllProfiles()
defer wg.Done()
pm.GetAllProfiles()
}()
}
waitGroup.Wait()
wg.Wait()
}
func TestProfileManager_InvalidJSON_Bad(t *testing.T) {
func TestProfileManagerInvalidJSON(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "profile-invalid-test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
@ -275,24 +275,24 @@ func TestProfileManager_InvalidJSON_Bad(t *testing.T) {
t.Fatalf("failed to write invalid JSON: %v", err)
}
profileManager := &ProfileManager{
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: configPath,
}
err = profileManager.loadProfiles()
err = pm.loadProfiles()
if err == nil {
t.Error("loadProfiles should fail with invalid JSON")
}
}
func TestProfileManager_FileNotFound_Bad(t *testing.T) {
profileManager := &ProfileManager{
func TestProfileManagerFileNotFound(t *testing.T) {
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: "/non/existent/path/profiles.json",
}
err := profileManager.loadProfiles()
err := pm.loadProfiles()
if err == nil {
t.Error("loadProfiles should fail when file not found")
}
@ -302,8 +302,8 @@ func TestProfileManager_FileNotFound_Bad(t *testing.T) {
}
}
func TestProfileManager_CreateRollback_Ugly(t *testing.T) {
profileManager := &ProfileManager{
func TestProfileManagerCreateRollback(t *testing.T) {
pm := &ProfileManager{
profiles: make(map[string]*MiningProfile),
configPath: "/invalid/path/that/cannot/be/written/profiles.json",
}
@ -313,20 +313,20 @@ func TestProfileManager_CreateRollback_Ugly(t *testing.T) {
MinerType: "xmrig",
}
_, err := profileManager.CreateProfile(profile)
_, err := pm.CreateProfile(profile)
if err == nil {
t.Error("CreateProfile should fail when save fails")
}
// Verify rollback - profile should not be in memory
profiles := profileManager.GetAllProfiles()
profiles := pm.GetAllProfiles()
if len(profiles) != 0 {
t.Error("failed create should rollback - no profile should be in memory")
}
}
func TestProfileManager_ConfigWithData_Good(t *testing.T) {
profileManager, cleanup := setupTestProfileManager(t)
func TestProfileManagerConfigWithData(t *testing.T) {
pm, cleanup := setupTestProfileManager(t)
defer cleanup()
config := RawConfig(`{
@ -342,12 +342,12 @@ func TestProfileManager_ConfigWithData_Good(t *testing.T) {
Config: config,
}
created, err := profileManager.CreateProfile(profile)
created, err := pm.CreateProfile(profile)
if err != nil {
t.Fatalf("failed to create profile: %v", err)
}
retrieved, _ := profileManager.GetProfile(created.ID)
retrieved, _ := pm.GetProfile(created.ID)
// Parse config to verify
var parsedConfig map[string]interface{}

View file

@ -8,14 +8,13 @@ import (
"github.com/gin-gonic/gin"
)
// limiter := NewRateLimiter(10, 20) // 10 requests per second with a burst of 20.
// router.Use(limiter.Middleware())
// RateLimiter provides token bucket rate limiting per IP address
type RateLimiter struct {
requestsPerSecond int
burst int
clients map[string]*rateLimitClient
mutex sync.RWMutex
stopChannel chan struct{}
mu sync.RWMutex
stopChan chan struct{}
stopped bool
}
@ -24,98 +23,97 @@ type rateLimitClient struct {
lastCheck time.Time
}
// limiter := NewRateLimiter(10, 20) // 10 requests/second, burst of 20
// defer limiter.Stop()
// NewRateLimiter creates a new rate limiter with the specified limits
func NewRateLimiter(requestsPerSecond, burst int) *RateLimiter {
limiter := &RateLimiter{
rl := &RateLimiter{
requestsPerSecond: requestsPerSecond,
burst: burst,
clients: make(map[string]*rateLimitClient),
stopChannel: make(chan struct{}),
stopChan: make(chan struct{}),
}
// Start cleanup goroutine
go limiter.cleanupLoop()
go rl.cleanupLoop()
return limiter
return rl
}
// go limiter.cleanupLoop() // started by NewRateLimiter; runs until limiter.Stop()
func (limiter *RateLimiter) cleanupLoop() {
// cleanupLoop removes stale clients periodically
func (rl *RateLimiter) cleanupLoop() {
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-limiter.stopChannel:
case <-rl.stopChan:
return
case <-ticker.C:
limiter.cleanup()
rl.cleanup()
}
}
}
// limiter.cleanup() // called every minute by cleanupLoop; evicts IPs idle for >5 minutes
func (limiter *RateLimiter) cleanup() {
limiter.mutex.Lock()
defer limiter.mutex.Unlock()
// cleanup removes clients that haven't made requests in 5 minutes
func (rl *RateLimiter) cleanup() {
rl.mu.Lock()
defer rl.mu.Unlock()
for ip, client := range limiter.clients {
if time.Since(client.lastCheck) > 5*time.Minute {
delete(limiter.clients, ip)
for ip, c := range rl.clients {
if time.Since(c.lastCheck) > 5*time.Minute {
delete(rl.clients, ip)
}
}
}
// limiter.Stop() // call on shutdown to release the cleanup goroutine
func (limiter *RateLimiter) Stop() {
limiter.mutex.Lock()
defer limiter.mutex.Unlock()
// Stop stops the rate limiter's cleanup goroutine
func (rl *RateLimiter) Stop() {
rl.mu.Lock()
defer rl.mu.Unlock()
if !limiter.stopped {
close(limiter.stopChannel)
limiter.stopped = true
if !rl.stopped {
close(rl.stopChan)
rl.stopped = true
}
}
// router.Use(limiter.Middleware()) // install before route handlers
func (limiter *RateLimiter) Middleware() gin.HandlerFunc {
return func(requestContext *gin.Context) {
clientAddress := requestContext.ClientIP()
// Middleware returns a Gin middleware handler for rate limiting
func (rl *RateLimiter) Middleware() gin.HandlerFunc {
return func(c *gin.Context) {
ip := c.ClientIP()
limiter.mutex.Lock()
client, exists := limiter.clients[clientAddress]
rl.mu.Lock()
cl, exists := rl.clients[ip]
if !exists {
client = &rateLimitClient{tokens: float64(limiter.burst), lastCheck: time.Now()}
limiter.clients[clientAddress] = client
cl = &rateLimitClient{tokens: float64(rl.burst), lastCheck: time.Now()}
rl.clients[ip] = cl
}
// Token bucket algorithm
now := time.Now()
elapsed := now.Sub(client.lastCheck).Seconds()
client.tokens += elapsed * float64(limiter.requestsPerSecond)
if client.tokens > float64(limiter.burst) {
client.tokens = float64(limiter.burst)
elapsed := now.Sub(cl.lastCheck).Seconds()
cl.tokens += elapsed * float64(rl.requestsPerSecond)
if cl.tokens > float64(rl.burst) {
cl.tokens = float64(rl.burst)
}
client.lastCheck = now
cl.lastCheck = now
if client.tokens < 1 {
limiter.mutex.Unlock()
respondWithError(requestContext, http.StatusTooManyRequests, "RATE_LIMITED",
if cl.tokens < 1 {
rl.mu.Unlock()
respondWithError(c, http.StatusTooManyRequests, "RATE_LIMITED",
"too many requests", "rate limit exceeded")
requestContext.Abort()
c.Abort()
return
}
client.tokens--
limiter.mutex.Unlock()
requestContext.Next()
cl.tokens--
rl.mu.Unlock()
c.Next()
}
}
// if limiter.ClientCount() == 0 { /* no active clients */ }
func (limiter *RateLimiter) ClientCount() int {
limiter.mutex.RLock()
defer limiter.mutex.RUnlock()
return len(limiter.clients)
// ClientCount returns the number of tracked clients (for testing/monitoring)
func (rl *RateLimiter) ClientCount() int {
rl.mu.RLock()
defer rl.mu.RUnlock()
return len(rl.clients)
}

View file

@ -9,196 +9,186 @@ import (
"github.com/gin-gonic/gin"
)
// rateLimiter := NewRateLimiter(10, 20)
// rateLimiter.Stop()
func TestRatelimiter_NewRateLimiter_Good(t *testing.T) {
rateLimiter := NewRateLimiter(10, 20)
if rateLimiter == nil {
func TestNewRateLimiter(t *testing.T) {
rl := NewRateLimiter(10, 20)
if rl == nil {
t.Fatal("NewRateLimiter returned nil")
}
defer rateLimiter.Stop()
defer rl.Stop()
if rateLimiter.requestsPerSecond != 10 {
t.Errorf("Expected requestsPerSecond 10, got %d", rateLimiter.requestsPerSecond)
if rl.requestsPerSecond != 10 {
t.Errorf("Expected requestsPerSecond 10, got %d", rl.requestsPerSecond)
}
if rateLimiter.burst != 20 {
t.Errorf("Expected burst 20, got %d", rateLimiter.burst)
if rl.burst != 20 {
t.Errorf("Expected burst 20, got %d", rl.burst)
}
}
// rateLimiter.Stop() // idempotent — calling twice must not panic
func TestRatelimiter_Stop_Good(t *testing.T) {
rateLimiter := NewRateLimiter(10, 20)
func TestRateLimiterStop(t *testing.T) {
rl := NewRateLimiter(10, 20)
// Stop should not panic
defer func() {
if r := recover(); r != nil {
t.Errorf("Stop panicked: %v", r)
}
}()
rateLimiter.Stop()
rateLimiter.Stop()
rl.Stop()
// Calling Stop again should not panic (idempotent)
rl.Stop()
}
// router.Use(rl.Middleware()) — allows requests within burst, rejects beyond
func TestRatelimiter_Middleware_Good(t *testing.T) {
func TestRateLimiterMiddleware(t *testing.T) {
gin.SetMode(gin.TestMode)
rateLimiter := NewRateLimiter(10, 5)
defer rateLimiter.Stop()
rl := NewRateLimiter(10, 5) // 10 req/s, burst of 5
defer rl.Stop()
router := gin.New()
router.Use(rateLimiter.Middleware())
router.Use(rl.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "ok")
})
// First 5 requests should succeed (burst)
for i := 0; i < 5; i++ {
request := httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.1:12345"
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK {
t.Errorf("Request %d: expected 200, got %d", i+1, recorder.Code)
if w.Code != http.StatusOK {
t.Errorf("Request %d: expected 200, got %d", i+1, w.Code)
}
}
// 6th request should be rate limited
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusTooManyRequests {
t.Errorf("Expected 429 Too Many Requests, got %d", w.Code)
}
}
// router.Use(rl.Middleware()) — rejects the 6th request after burst is exhausted
func TestRatelimiter_Middleware_Bad(t *testing.T) {
func TestRateLimiterDifferentIPs(t *testing.T) {
gin.SetMode(gin.TestMode)
rateLimiter := NewRateLimiter(10, 5)
defer rateLimiter.Stop()
rl := NewRateLimiter(10, 2) // 10 req/s, burst of 2
defer rl.Stop()
router := gin.New()
router.Use(rateLimiter.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "ok")
})
for i := 0; i < 5; i++ {
request := httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.1:12345"
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
}
request := httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.1:12345"
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusTooManyRequests {
t.Errorf("Expected 429 Too Many Requests, got %d", recorder.Code)
}
}
// router.Use(rl.Middleware()) — rate limit per IP; exhausted IP1 does not affect IP2
func TestRatelimiter_Middleware_Ugly(t *testing.T) {
gin.SetMode(gin.TestMode)
rateLimiter := NewRateLimiter(10, 2)
defer rateLimiter.Stop()
router := gin.New()
router.Use(rateLimiter.Middleware())
router.Use(rl.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "ok")
})
// Exhaust rate limit for IP1
for i := 0; i < 2; i++ {
request := httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.1:12345"
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
}
request := httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.1:12345"
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusTooManyRequests {
t.Errorf("IP1 should be rate limited, got %d", recorder.Code)
// IP1 should be rate limited
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusTooManyRequests {
t.Errorf("IP1 should be rate limited, got %d", w.Code)
}
request = httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.2:12345"
recorder = httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusOK {
t.Errorf("IP2 should not be rate limited, got %d", recorder.Code)
// IP2 should still be able to make requests
req = httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.2:12345"
w = httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("IP2 should not be rate limited, got %d", w.Code)
}
}
// count := rl.ClientCount() // returns number of tracked IPs
func TestRatelimiter_ClientCount_Good(t *testing.T) {
rateLimiter := NewRateLimiter(10, 5)
defer rateLimiter.Stop()
func TestRateLimiterClientCount(t *testing.T) {
rl := NewRateLimiter(10, 5)
defer rl.Stop()
gin.SetMode(gin.TestMode)
router := gin.New()
router.Use(rateLimiter.Middleware())
router.Use(rl.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "ok")
})
if count := rateLimiter.ClientCount(); count != 0 {
// Initial count should be 0
if count := rl.ClientCount(); count != 0 {
t.Errorf("Expected 0 clients, got %d", count)
}
request := httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.1:12345"
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
// Make a request
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if count := rateLimiter.ClientCount(); count != 1 {
// Should have 1 client now
if count := rl.ClientCount(); count != 1 {
t.Errorf("Expected 1 client, got %d", count)
}
request = httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.2:12345"
recorder = httptest.NewRecorder()
router.ServeHTTP(recorder, request)
// Make request from different IP
req = httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.2:12345"
w = httptest.NewRecorder()
router.ServeHTTP(w, req)
if count := rateLimiter.ClientCount(); count != 2 {
// Should have 2 clients now
if count := rl.ClientCount(); count != 2 {
t.Errorf("Expected 2 clients, got %d", count)
}
}
// rateLimiter.Middleware() — token refills at requestsPerSecond rate; request succeeds after wait
func TestRatelimiter_TokenRefill_Good(t *testing.T) {
func TestRateLimiterTokenRefill(t *testing.T) {
gin.SetMode(gin.TestMode)
rateLimiter := NewRateLimiter(100, 1)
defer rateLimiter.Stop()
rl := NewRateLimiter(100, 1) // 100 req/s, burst of 1 (refills quickly)
defer rl.Stop()
router := gin.New()
router.Use(rateLimiter.Middleware())
router.Use(rl.Middleware())
router.GET("/test", func(c *gin.Context) {
c.String(http.StatusOK, "ok")
})
request := httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.1:12345"
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusOK {
t.Errorf("First request should succeed, got %d", recorder.Code)
// First request succeeds
req := httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("First request should succeed, got %d", w.Code)
}
request = httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.1:12345"
recorder = httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusTooManyRequests {
t.Errorf("Second request should be rate limited, got %d", recorder.Code)
// Second request should fail (burst exhausted)
req = httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w = httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusTooManyRequests {
t.Errorf("Second request should be rate limited, got %d", w.Code)
}
// Wait for token to refill (at 100 req/s, 1 token takes 10ms)
time.Sleep(20 * time.Millisecond)
request = httptest.NewRequest("GET", "/test", nil)
request.RemoteAddr = "192.168.1.1:12345"
recorder = httptest.NewRecorder()
router.ServeHTTP(recorder, request)
if recorder.Code != http.StatusOK {
t.Errorf("Third request should succeed after refill, got %d", recorder.Code)
// Third request should succeed (token refilled)
req = httptest.NewRequest("GET", "/test", nil)
req.RemoteAddr = "192.168.1.1:12345"
w = httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Errorf("Third request should succeed after refill, got %d", w.Code)
}
}

View file

@ -2,156 +2,155 @@ package mining
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
)
// repo := NewFileRepository[MinersConfig]("/home/alice/.config/lethean-desktop/miners.json")
// data, err := repo.Load()
// Repository defines a generic interface for data persistence.
// Implementations can store data in files, databases, etc.
type Repository[T any] interface {
// data, err := repo.Load() // loads "/home/alice/.config/lethean-desktop/miners.json" into MinersConfig
// Load reads data from the repository
Load() (T, error)
// repo.Save(updatedConfig) persists the updated config back to "/home/alice/.config/lethean-desktop/miners.json"
// Save writes data to the repository
Save(data T) error
// repo.Update(func(configuration *MinersConfig) error { configuration.Miners = append(configuration.Miners, entry); return nil })
Update(modifier func(*T) error) error
// Update atomically loads, modifies, and saves data
Update(fn func(*T) error) error
}
// repo := NewFileRepository[MinersConfig]("/home/alice/.config/lethean-desktop/miners.json", WithDefaults(defaultMinersConfig))
// data, err := repo.Load()
// FileRepository provides atomic file-based persistence for JSON data.
// It uses atomic writes (temp file + rename) to prevent corruption.
type FileRepository[T any] struct {
mutex sync.RWMutex
filePath string
defaultValueProvider func() T
mu sync.RWMutex
path string
defaults func() T
}
// repo := NewFileRepository[MinersConfig]("/home/alice/.config/lethean-desktop/miners.json", WithDefaults(defaultMinersConfig), myOption)
// FileRepositoryOption configures a FileRepository.
type FileRepositoryOption[T any] func(*FileRepository[T])
// repo := NewFileRepository[MinersConfig]("/home/alice/.config/lethean-desktop/miners.json", WithDefaults(defaultMinersConfig))
func WithDefaults[T any](defaultsProvider func() T) FileRepositoryOption[T] {
return func(repo *FileRepository[T]) {
repo.defaultValueProvider = defaultsProvider
// WithDefaults sets the default value factory for when the file doesn't exist.
func WithDefaults[T any](fn func() T) FileRepositoryOption[T] {
return func(r *FileRepository[T]) {
r.defaults = fn
}
}
// repo := NewFileRepository[MinersConfig]("/home/alice/.config/lethean-desktop/miners.json", WithDefaults(defaultMinersConfig))
func NewFileRepository[T any](filePath string, options ...FileRepositoryOption[T]) *FileRepository[T] {
repo := &FileRepository[T]{
filePath: filePath,
// NewFileRepository creates a new file-based repository.
func NewFileRepository[T any](path string, opts ...FileRepositoryOption[T]) *FileRepository[T] {
r := &FileRepository[T]{
path: path,
}
for _, option := range options {
option(repo)
for _, opt := range opts {
opt(r)
}
return repo
return r
}
// data, err := repo.Load()
// if err != nil { return defaults, err }
func (repository *FileRepository[T]) Load() (T, error) {
repository.mutex.RLock()
defer repository.mutex.RUnlock()
// Load reads and deserializes data from the file.
// Returns defaults if file doesn't exist.
func (r *FileRepository[T]) Load() (T, error) {
r.mu.RLock()
defer r.mu.RUnlock()
var result T
data, err := os.ReadFile(repository.filePath)
data, err := os.ReadFile(r.path)
if err != nil {
if os.IsNotExist(err) {
if repository.defaultValueProvider != nil {
return repository.defaultValueProvider(), nil
if r.defaults != nil {
return r.defaults(), nil
}
return result, nil
}
return result, ErrInternal("failed to read file").WithCause(err)
return result, fmt.Errorf("failed to read file: %w", err)
}
if err := json.Unmarshal(data, &result); err != nil {
return result, ErrInternal("failed to unmarshal data").WithCause(err)
return result, fmt.Errorf("failed to unmarshal data: %w", err)
}
return result, nil
}
// if err := repo.Save(updated); err != nil { return ErrInternal("save").WithCause(err) }
func (repository *FileRepository[T]) Save(data T) error {
repository.mutex.Lock()
defer repository.mutex.Unlock()
// Save serializes and writes data to the file atomically.
func (r *FileRepository[T]) Save(data T) error {
r.mu.Lock()
defer r.mu.Unlock()
return repository.saveUnlocked(data)
return r.saveUnlocked(data)
}
// repository.saveUnlocked(updatedConfig) // used by Save() and Update() while the mutex is already held
func (repository *FileRepository[T]) saveUnlocked(data T) error {
dir := filepath.Dir(repository.filePath)
// saveUnlocked saves data without acquiring the lock (caller must hold lock).
func (r *FileRepository[T]) saveUnlocked(data T) error {
dir := filepath.Dir(r.path)
if err := os.MkdirAll(dir, 0755); err != nil {
return ErrInternal("failed to create directory").WithCause(err)
return fmt.Errorf("failed to create directory: %w", err)
}
jsonData, err := json.MarshalIndent(data, "", " ")
if err != nil {
return ErrInternal("failed to marshal data").WithCause(err)
return fmt.Errorf("failed to marshal data: %w", err)
}
return AtomicWriteFile(repository.filePath, jsonData, 0600)
return AtomicWriteFile(r.path, jsonData, 0600)
}
// repo.Update(func(configuration *MinersConfig) error {
// configuration.Miners = append(configuration.Miners, entry)
// return nil
// })
func (repository *FileRepository[T]) Update(modifier func(*T) error) error {
repository.mutex.Lock()
defer repository.mutex.Unlock()
// Update atomically loads, modifies, and saves data.
// The modification function receives a pointer to the data.
func (r *FileRepository[T]) Update(fn func(*T) error) error {
r.mu.Lock()
defer r.mu.Unlock()
// os.ReadFile("/home/alice/.config/lethean-desktop/miners.json") loads the current config before applying the modifier.
// Load current data
var data T
fileData, err := os.ReadFile(repository.filePath)
fileData, err := os.ReadFile(r.path)
if err != nil {
if os.IsNotExist(err) {
if repository.defaultValueProvider != nil {
data = repository.defaultValueProvider()
if r.defaults != nil {
data = r.defaults()
}
} else {
return ErrInternal("failed to read file").WithCause(err)
return fmt.Errorf("failed to read file: %w", err)
}
} else {
if err := json.Unmarshal(fileData, &data); err != nil {
return ErrInternal("failed to unmarshal data").WithCause(err)
return fmt.Errorf("failed to unmarshal data: %w", err)
}
}
// modifier(&data) can append `MinerAutostartConfig{MinerType: "xmrig", Autostart: true}` before saving.
if err := modifier(&data); err != nil {
// Apply modification
if err := fn(&data); err != nil {
return err
}
// repository.saveUnlocked(data) writes the updated JSON atomically to "/home/alice/.config/lethean-desktop/miners.json".
return repository.saveUnlocked(data)
// Save atomically
return r.saveUnlocked(data)
}
// filePath := repo.Path() // filePath == "/home/alice/.config/lethean-desktop/miners.json"
func (repository *FileRepository[T]) Path() string {
return repository.filePath
// Path returns the file path of this repository.
func (r *FileRepository[T]) Path() string {
return r.path
}
// if !repo.Exists() { return defaults, nil }
func (repository *FileRepository[T]) Exists() bool {
repository.mutex.RLock()
defer repository.mutex.RUnlock()
// Exists returns true if the repository file exists.
func (r *FileRepository[T]) Exists() bool {
r.mu.RLock()
defer r.mu.RUnlock()
_, err := os.Stat(repository.filePath)
_, err := os.Stat(r.path)
return err == nil
}
// if err := repo.Delete(); err != nil { return err }
func (repository *FileRepository[T]) Delete() error {
repository.mutex.Lock()
defer repository.mutex.Unlock()
// Delete removes the repository file.
func (r *FileRepository[T]) Delete() error {
r.mu.Lock()
defer r.mu.Unlock()
err := os.Remove(repository.filePath)
err := os.Remove(r.path)
if os.IsNotExist(err) {
return nil
}

View file

@ -12,7 +12,7 @@ type testData struct {
Value int `json:"value"`
}
func TestFileRepository_Load_Good(t *testing.T) {
func TestFileRepository_Load(t *testing.T) {
t.Run("NonExistentFile", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "nonexistent.json")
@ -78,7 +78,7 @@ func TestFileRepository_Load_Good(t *testing.T) {
})
}
func TestFileRepository_Save_Good(t *testing.T) {
func TestFileRepository_Save(t *testing.T) {
t.Run("NewFile", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "subdir", "new.json")
@ -130,7 +130,7 @@ func TestFileRepository_Save_Good(t *testing.T) {
})
}
func TestFileRepository_Update_Good(t *testing.T) {
func TestFileRepository_Update(t *testing.T) {
t.Run("UpdateExisting", func(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "update.json")
@ -215,7 +215,7 @@ func TestFileRepository_Update_Good(t *testing.T) {
})
}
func TestFileRepository_Delete_Good(t *testing.T) {
func TestFileRepository_Delete(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "delete.json")
repo := NewFileRepository[testData](path)
@ -244,7 +244,7 @@ func TestFileRepository_Delete_Good(t *testing.T) {
}
}
func TestFileRepository_Path_Good(t *testing.T) {
func TestFileRepository_Path(t *testing.T) {
path := "/some/path/config.json"
repo := NewFileRepository[testData](path)
@ -253,7 +253,7 @@ func TestFileRepository_Path_Good(t *testing.T) {
}
}
func TestFileRepository_UpdateWithLoadError_Bad(t *testing.T) {
func TestFileRepository_UpdateWithLoadError(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "corrupt.json")
repo := NewFileRepository[testData](path)
@ -273,7 +273,7 @@ func TestFileRepository_UpdateWithLoadError_Bad(t *testing.T) {
}
}
func TestFileRepository_SaveToReadOnlyDirectory_Bad(t *testing.T) {
func TestFileRepository_SaveToReadOnlyDirectory(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("Test skipped when running as root")
}
@ -285,12 +285,6 @@ func TestFileRepository_SaveToReadOnlyDirectory_Bad(t *testing.T) {
}
defer os.Chmod(readOnlyDir, 0755) // Restore permissions for cleanup
probeFilePath := filepath.Join(readOnlyDir, ".permission-probe")
if err := os.WriteFile(probeFilePath, []byte("probe"), 0600); err == nil {
os.Remove(probeFilePath)
t.Skip("filesystem does not enforce read-only directory permissions")
}
path := filepath.Join(readOnlyDir, "test.json")
repo := NewFileRepository[testData](path)
@ -301,7 +295,7 @@ func TestFileRepository_SaveToReadOnlyDirectory_Bad(t *testing.T) {
}
}
func TestFileRepository_DeleteNonExistent_Good(t *testing.T) {
func TestFileRepository_DeleteNonExistent(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "nonexistent.json")
repo := NewFileRepository[testData](path)
@ -312,7 +306,7 @@ func TestFileRepository_DeleteNonExistent_Good(t *testing.T) {
}
}
func TestFileRepository_ExistsOnInvalidPath_Bad(t *testing.T) {
func TestFileRepository_ExistsOnInvalidPath(t *testing.T) {
// Use a path that definitely doesn't exist
repo := NewFileRepository[testData]("/nonexistent/path/to/file.json")
@ -321,7 +315,7 @@ func TestFileRepository_ExistsOnInvalidPath_Bad(t *testing.T) {
}
}
func TestFileRepository_ConcurrentUpdates_Ugly(t *testing.T) {
func TestFileRepository_ConcurrentUpdates(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "concurrent.json")
repo := NewFileRepository[testData](path, WithDefaults(func() testData {
@ -360,9 +354,8 @@ func TestFileRepository_ConcurrentUpdates_Ugly(t *testing.T) {
}
}
// repo := NewFileRepository[[]item](path, WithDefaults(func() []item { return []item{} }))
// repo.Save(items); repo.Update(func(data *[]item) error { *data = append(*data, item{...}); return nil })
func TestFileRepository_SliceData_Good(t *testing.T) {
// Test with slice data
func TestFileRepository_SliceData(t *testing.T) {
type item struct {
ID string `json:"id"`
Name string `json:"name"`

File diff suppressed because it is too large Load diff

View file

@ -4,15 +4,13 @@ import (
"context"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/gin-gonic/gin"
)
// mock := &MockMiner{GetNameFunc: func() string { return "test" }}
// mock.GetName() == "test"
// MockMiner is a mock implementation of the Miner interface for testing.
type MockMiner struct {
InstallFunc func() error
UninstallFunc func() error
@ -58,8 +56,7 @@ func (m *MockMiner) ReduceHashrateHistory(now time.Time) { m.ReduceHashrateHist
func (m *MockMiner) GetLogs() []string { return m.GetLogsFunc() }
func (m *MockMiner) WriteStdin(input string) error { return m.WriteStdinFunc(input) }
// mock := &MockManager{ListMinersFunc: func() []Miner { return nil }}
// mock.ListMiners()
// MockManager is a mock implementation of the Manager for testing.
type MockManager struct {
ListMinersFunc func() []Miner
ListAvailableMinersFunc func() []AvailableMiner
@ -119,77 +116,81 @@ func setupTestRouter() (*gin.Engine, *MockManager) {
return router, mockManager
}
func TestService_HandleListMiners_Good(t *testing.T) {
func TestHandleListMiners(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.ListMinersFunc = func() []Miner {
return []Miner{&XMRigMiner{BaseMiner: BaseMiner{Name: "test-miner"}}}
}
request, _ := http.NewRequest("GET", "/miners", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req, _ := http.NewRequest("GET", "/miners", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, recorder.Code)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestService_HandleGetInfo_Good(t *testing.T) {
func TestHandleGetInfo(t *testing.T) {
router, _ := setupTestRouter()
request, _ := http.NewRequest("GET", "/info", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
// Case 1: Successful response
req, _ := http.NewRequest("GET", "/info", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, recorder.Code)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestService_HandleDoctor_Good(t *testing.T) {
func TestHandleDoctor(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.ListAvailableMinersFunc = func() []AvailableMiner {
return []AvailableMiner{{Name: "xmrig"}}
}
request, _ := http.NewRequest("POST", "/doctor", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
// Case 1: Successful response
req, _ := http.NewRequest("POST", "/doctor", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, recorder.Code)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestService_HandleInstallMiner_Good(t *testing.T) {
func TestHandleInstallMiner(t *testing.T) {
router, _ := setupTestRouter()
request, _ := http.NewRequest("POST", "/miners/xmrig/install", nil)
request.Header.Set("Content-Type", "application/json")
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
// Test installing a miner
req, _ := http.NewRequest("POST", "/miners/xmrig/install", nil)
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK && recorder.Code != http.StatusInternalServerError {
t.Errorf("expected status 200 or 500, got %d", recorder.Code)
// Installation endpoint should be accessible
if w.Code != http.StatusOK && w.Code != http.StatusInternalServerError {
t.Errorf("expected status 200 or 500, got %d", w.Code)
}
}
func TestService_HandleStopMiner_Good(t *testing.T) {
func TestHandleStopMiner(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.StopMinerFunc = func(ctx context.Context, minerName string) error {
return nil
}
request, _ := http.NewRequest("DELETE", "/miners/test-miner", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req, _ := http.NewRequest("DELETE", "/miners/test-miner", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, recorder.Code)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestService_HandleGetMinerStats_Good(t *testing.T) {
func TestHandleGetMinerStats(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.GetMinerFunc = func(minerName string) (Miner, error) {
return &MockMiner{
@ -200,64 +201,26 @@ func TestService_HandleGetMinerStats_Good(t *testing.T) {
}, nil
}
request, _ := http.NewRequest("GET", "/miners/test-miner/stats", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req, _ := http.NewRequest("GET", "/miners/test-miner/stats", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, recorder.Code)
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}
func TestService_HandleGetMinerHashrateHistory_Good(t *testing.T) {
func TestHandleGetMinerHashrateHistory(t *testing.T) {
router, mockManager := setupTestRouter()
mockManager.GetMinerHashrateHistoryFunc = func(minerName string) ([]HashratePoint, error) {
return []HashratePoint{{Timestamp: time.Now(), Hashrate: 100}}, nil
}
request, _ := http.NewRequest("GET", "/miners/test-miner/hashrate-history", nil)
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
req, _ := http.NewRequest("GET", "/miners/test-miner/hashrate-history", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if recorder.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, recorder.Code)
}
}
func TestService_GenerateRequestID_Good(t *testing.T) {
firstRequestID := generateRequestID()
secondRequestID := generateRequestID()
if firstRequestID == secondRequestID {
t.Fatalf("expected unique request IDs, got %q twice", firstRequestID)
}
for _, requestID := range []string{firstRequestID, secondRequestID} {
parts := strings.Split(requestID, "-")
if len(parts) != 2 {
t.Fatalf("expected request ID format timestamp-randomhex, got %q", requestID)
}
if len(parts[1]) != 16 {
t.Fatalf("expected 16 hex characters, got %q", parts[1])
}
}
}
func TestService_RequestIDMiddleware_Good(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
router.Use(requestIDMiddleware())
router.GET("/health", func(context *gin.Context) {
context.Status(http.StatusOK)
})
request, _ := http.NewRequest(http.MethodGet, "/health", nil)
request.Header.Set("X-Request-ID", "user-supplied-request-id")
responseRecorder := httptest.NewRecorder()
router.ServeHTTP(responseRecorder, request)
if responseRecorder.Header().Get("X-Request-ID") != "user-supplied-request-id" {
t.Fatalf("expected middleware to preserve request ID header, got %q", responseRecorder.Header().Get("X-Request-ID"))
if w.Code != http.StatusOK {
t.Errorf("expected status %d, got %d", http.StatusOK, w.Code)
}
}

View file

@ -2,6 +2,7 @@ package mining
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
@ -11,8 +12,7 @@ import (
const settingsFileName = "settings.json"
// state := settingsManager.GetWindowState()
// if state.Maximized { w.SetSize(state.Width, state.Height) }
// WindowState stores the last window position and size
type WindowState struct {
X int `json:"x"`
Y int `json:"y"`
@ -21,7 +21,7 @@ type WindowState struct {
Maximized bool `json:"maximized"`
}
// settingsManager.SetMinerDefaults(MinerDefaults{DefaultPool: "pool.minexmr.com:4444", CPUMaxThreadsHint: 50})
// MinerDefaults stores default configuration for miners
type MinerDefaults struct {
DefaultPool string `json:"defaultPool,omitempty"`
DefaultWallet string `json:"defaultWallet,omitempty"`
@ -30,8 +30,7 @@ type MinerDefaults struct {
CPUThrottleThreshold int `json:"cpuThrottleThreshold,omitempty"` // Throttle when CPU exceeds this %
}
// settings := settingsManager.Get()
// if settings.Theme == "dark" { applyDarkMode() }
// AppSettings stores application-wide settings
type AppSettings struct {
// Window settings
Window WindowState `json:"window"`
@ -59,8 +58,7 @@ type AppSettings struct {
Theme string `json:"theme"` // "light", "dark", "system"
}
// settings := DefaultSettings()
// settings.Theme = "dark"
// DefaultSettings returns sensible defaults for app settings
func DefaultSettings() *AppSettings {
return &AppSettings{
Window: WindowState{
@ -87,47 +85,45 @@ func DefaultSettings() *AppSettings {
}
}
// settingsManager, err := NewSettingsManager()
// settings := settingsManager.Get()
// SettingsManager handles loading and saving app settings
type SettingsManager struct {
mutex sync.RWMutex
mu sync.RWMutex
settings *AppSettings
settingsPath string
}
// settingsManager, err := NewSettingsManager()
// if err != nil { return err }
// NewSettingsManager creates a new settings manager
func NewSettingsManager() (*SettingsManager, error) {
settingsPath, err := xdg.ConfigFile(filepath.Join("lethean-desktop", settingsFileName))
if err != nil {
return nil, ErrInternal("could not resolve settings path").WithCause(err)
return nil, fmt.Errorf("could not resolve settings path: %w", err)
}
settingsManager := &SettingsManager{
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
if err := settingsManager.Load(); err != nil {
if err := sm.Load(); err != nil {
// If file doesn't exist, use defaults and save them
if os.IsNotExist(err) {
if saveErr := settingsManager.Save(); saveErr != nil {
return nil, ErrInternal("could not save default settings").WithCause(saveErr)
if saveErr := sm.Save(); saveErr != nil {
return nil, fmt.Errorf("could not save default settings: %w", saveErr)
}
} else {
return nil, ErrInternal("could not load settings").WithCause(err)
return nil, fmt.Errorf("could not load settings: %w", err)
}
}
return settingsManager, nil
return sm, nil
}
// if err := settingsManager.Load(); err != nil && !os.IsNotExist(err) { return err }
func (settingsManager *SettingsManager) Load() error {
settingsManager.mutex.Lock()
defer settingsManager.mutex.Unlock()
// Load reads settings from disk
func (sm *SettingsManager) Load() error {
sm.mu.Lock()
defer sm.mu.Unlock()
data, err := os.ReadFile(settingsManager.settingsPath)
data, err := os.ReadFile(sm.settingsPath)
if err != nil {
return err
}
@ -137,94 +133,93 @@ func (settingsManager *SettingsManager) Load() error {
return err
}
settingsManager.settings = &settings
sm.settings = &settings
return nil
}
// if err := settingsManager.Save(); err != nil { return err }
func (settingsManager *SettingsManager) Save() error {
settingsManager.mutex.Lock()
defer settingsManager.mutex.Unlock()
// Save writes settings to disk
func (sm *SettingsManager) Save() error {
sm.mu.Lock()
defer sm.mu.Unlock()
data, err := json.MarshalIndent(settingsManager.settings, "", " ")
data, err := json.MarshalIndent(sm.settings, "", " ")
if err != nil {
return err
}
return os.WriteFile(settingsManager.settingsPath, data, 0600)
return os.WriteFile(sm.settingsPath, data, 0600)
}
// settings := settingsManager.Get()
// if settings.Theme == "dark" { ... }
func (settingsManager *SettingsManager) Get() *AppSettings {
settingsManager.mutex.RLock()
defer settingsManager.mutex.RUnlock()
// Get returns a copy of the current settings
func (sm *SettingsManager) Get() *AppSettings {
sm.mu.RLock()
defer sm.mu.RUnlock()
settingsCopy := *settingsManager.settings
return &settingsCopy
// Return a copy to prevent concurrent modification
copy := *sm.settings
return &copy
}
// settingsManager.Update(func(settings *AppSettings) { settings.Theme = "dark" })
func (settingsManager *SettingsManager) Update(modifier func(*AppSettings)) error {
settingsManager.mutex.Lock()
defer settingsManager.mutex.Unlock()
// Update applies changes to settings and saves
func (sm *SettingsManager) Update(fn func(*AppSettings)) error {
sm.mu.Lock()
defer sm.mu.Unlock()
modifier(settingsManager.settings)
fn(sm.settings)
data, err := json.MarshalIndent(settingsManager.settings, "", " ")
data, err := json.MarshalIndent(sm.settings, "", " ")
if err != nil {
return err
}
return os.WriteFile(settingsManager.settingsPath, data, 0600)
return os.WriteFile(sm.settingsPath, data, 0600)
}
// settingsManager.UpdateWindowState(100, 200, 1400, 900, false)
func (settingsManager *SettingsManager) UpdateWindowState(x, y, width, height int, maximized bool) error {
return settingsManager.Update(func(settings *AppSettings) {
settings.Window.X = x
settings.Window.Y = y
settings.Window.Width = width
settings.Window.Height = height
settings.Window.Maximized = maximized
// UpdateWindowState saves the current window state
func (sm *SettingsManager) UpdateWindowState(x, y, width, height int, maximized bool) error {
return sm.Update(func(s *AppSettings) {
s.Window.X = x
s.Window.Y = y
s.Window.Width = width
s.Window.Height = height
s.Window.Maximized = maximized
})
}
// state := settingsManager.GetWindowState()
// if state.Maximized { ... }
func (settingsManager *SettingsManager) GetWindowState() WindowState {
settingsManager.mutex.RLock()
defer settingsManager.mutex.RUnlock()
return settingsManager.settings.Window
// GetWindowState returns the saved window state
func (sm *SettingsManager) GetWindowState() WindowState {
sm.mu.RLock()
defer sm.mu.RUnlock()
return sm.settings.Window
}
// if err := settingsManager.SetStartOnBoot(true); err != nil { return err }
func (settingsManager *SettingsManager) SetStartOnBoot(enabled bool) error {
return settingsManager.Update(func(settings *AppSettings) {
settings.StartOnBoot = enabled
// SetStartOnBoot enables/disables start on boot
func (sm *SettingsManager) SetStartOnBoot(enabled bool) error {
return sm.Update(func(s *AppSettings) {
s.StartOnBoot = enabled
})
}
// if err := settingsManager.SetAutostartMiners(true); err != nil { return err }
func (settingsManager *SettingsManager) SetAutostartMiners(enabled bool) error {
return settingsManager.Update(func(settings *AppSettings) {
settings.AutostartMiners = enabled
// SetAutostartMiners enables/disables miner autostart
func (sm *SettingsManager) SetAutostartMiners(enabled bool) error {
return sm.Update(func(s *AppSettings) {
s.AutostartMiners = enabled
})
}
// if err := settingsManager.SetCPUThrottle(true, 70); err != nil { return err }
func (settingsManager *SettingsManager) SetCPUThrottle(enabled bool, percent int) error {
return settingsManager.Update(func(settings *AppSettings) {
settings.EnableCPUThrottle = enabled
// SetCPUThrottle configures CPU throttling
func (sm *SettingsManager) SetCPUThrottle(enabled bool, percent int) error {
return sm.Update(func(s *AppSettings) {
s.EnableCPUThrottle = enabled
if percent > 0 && percent <= 100 {
settings.CPUThrottlePercent = percent
s.CPUThrottlePercent = percent
}
})
}
// settingsManager.SetMinerDefaults(MinerDefaults{DefaultPool: "pool.minexmr.com:4444", CPUMaxThreadsHint: 50})
func (settingsManager *SettingsManager) SetMinerDefaults(defaults MinerDefaults) error {
return settingsManager.Update(func(settings *AppSettings) {
settings.MinerDefaults = defaults
// SetMinerDefaults updates default miner configuration
func (sm *SettingsManager) SetMinerDefaults(defaults MinerDefaults) error {
return sm.Update(func(s *AppSettings) {
s.MinerDefaults = defaults
})
}

View file

@ -6,7 +6,7 @@ import (
"testing"
)
func TestSettingsManager_DefaultSettings_Good(t *testing.T) {
func TestSettingsManager_DefaultSettings(t *testing.T) {
defaults := DefaultSettings()
if defaults.Window.Width != 1400 {
@ -26,83 +26,78 @@ func TestSettingsManager_DefaultSettings_Good(t *testing.T) {
}
}
func TestSettingsManager_SaveAndLoad_Good(t *testing.T) {
func TestSettingsManager_SaveAndLoad(t *testing.T) {
// Use a temp directory for testing
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
settingsManager := &SettingsManager{
// Create settings manager with custom path
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
settingsManager.settings.Window.Width = 1920
settingsManager.settings.Window.Height = 1080
settingsManager.settings.StartOnBoot = true
settingsManager.settings.AutostartMiners = true
settingsManager.settings.CPUThrottlePercent = 50
// Modify settings
sm.settings.Window.Width = 1920
sm.settings.Window.Height = 1080
sm.settings.StartOnBoot = true
sm.settings.AutostartMiners = true
sm.settings.CPUThrottlePercent = 50
err := settingsManager.Save()
// Save
err := sm.Save()
if err != nil {
t.Fatalf("Failed to save settings: %v", err)
}
// Verify file exists
if _, err := os.Stat(settingsPath); os.IsNotExist(err) {
t.Fatal("Settings file was not created")
}
loadedSettingsManager := &SettingsManager{
// Create new manager and load
sm2 := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
err = loadedSettingsManager.Load()
err = sm2.Load()
if err != nil {
t.Fatalf("Failed to load settings: %v", err)
}
if loadedSettingsManager.settings.Window.Width != 1920 {
t.Errorf("Expected width 1920, got %d", loadedSettingsManager.settings.Window.Width)
// Verify loaded values
if sm2.settings.Window.Width != 1920 {
t.Errorf("Expected width 1920, got %d", sm2.settings.Window.Width)
}
if loadedSettingsManager.settings.Window.Height != 1080 {
t.Errorf("Expected height 1080, got %d", loadedSettingsManager.settings.Window.Height)
if sm2.settings.Window.Height != 1080 {
t.Errorf("Expected height 1080, got %d", sm2.settings.Window.Height)
}
if !loadedSettingsManager.settings.StartOnBoot {
if !sm2.settings.StartOnBoot {
t.Error("Expected StartOnBoot to be true")
}
if !loadedSettingsManager.settings.AutostartMiners {
if !sm2.settings.AutostartMiners {
t.Error("Expected AutostartMiners to be true")
}
if loadedSettingsManager.settings.CPUThrottlePercent != 50 {
t.Errorf("Expected CPUThrottlePercent 50, got %d", loadedSettingsManager.settings.CPUThrottlePercent)
if sm2.settings.CPUThrottlePercent != 50 {
t.Errorf("Expected CPUThrottlePercent 50, got %d", sm2.settings.CPUThrottlePercent)
}
}
func TestSettingsManager_SaveAndLoad_Bad(t *testing.T) {
// Load from a path that does not exist — must return an error.
settingsManager := &SettingsManager{
settings: DefaultSettings(),
settingsPath: filepath.Join(t.TempDir(), "does_not_exist", "settings.json"),
}
if err := settingsManager.Load(); err == nil {
t.Error("Expected error loading from missing path, got nil")
}
}
func TestSettingsManager_UpdateWindowState_Good(t *testing.T) {
func TestSettingsManager_UpdateWindowState(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
settingsManager := &SettingsManager{
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
err := settingsManager.UpdateWindowState(100, 200, 800, 600, false)
err := sm.UpdateWindowState(100, 200, 800, 600, false)
if err != nil {
t.Fatalf("Failed to update window state: %v", err)
}
state := settingsManager.GetWindowState()
state := sm.GetWindowState()
if state.X != 100 {
t.Errorf("Expected X 100, got %d", state.X)
}
@ -117,63 +112,50 @@ func TestSettingsManager_UpdateWindowState_Good(t *testing.T) {
}
}
func TestSettingsManager_SetCPUThrottle_Good(t *testing.T) {
func TestSettingsManager_SetCPUThrottle(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
settingsManager := &SettingsManager{
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
err := settingsManager.SetCPUThrottle(true, 30)
// Test enabling throttle
err := sm.SetCPUThrottle(true, 30)
if err != nil {
t.Fatalf("Failed to set CPU throttle: %v", err)
}
settings := settingsManager.Get()
settings := sm.Get()
if !settings.EnableCPUThrottle {
t.Error("Expected EnableCPUThrottle to be true")
}
if settings.CPUThrottlePercent != 30 {
t.Errorf("Expected CPUThrottlePercent 30, got %d", settings.CPUThrottlePercent)
}
// Test invalid percentage (should be ignored)
err = sm.SetCPUThrottle(true, 150)
if err != nil {
t.Fatalf("Failed to set CPU throttle: %v", err)
}
settings = sm.Get()
if settings.CPUThrottlePercent != 30 { // Should remain unchanged
t.Errorf("Expected CPUThrottlePercent to remain 30, got %d", settings.CPUThrottlePercent)
}
}
func TestSettingsManager_SetCPUThrottle_Bad(t *testing.T) {
// An out-of-range percentage (>100) must be ignored, leaving the prior value unchanged.
func TestSettingsManager_SetMinerDefaults(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
settingsManager := &SettingsManager{
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
if err := settingsManager.SetCPUThrottle(true, 30); err != nil {
t.Fatalf("Setup: failed to set initial throttle: %v", err)
}
if err := settingsManager.SetCPUThrottle(true, 150); err != nil {
t.Fatalf("Expected no error on invalid percent, got: %v", err)
}
settings := settingsManager.Get()
if settings.CPUThrottlePercent != 30 {
t.Errorf("Expected CPUThrottlePercent to remain 30 after invalid input, got %d", settings.CPUThrottlePercent)
}
}
func TestSettingsManager_SetMinerDefaults_Good(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
settingsManager := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
minerDefaults := MinerDefaults{
defaults := MinerDefaults{
DefaultPool: "stratum+tcp://pool.example.com:3333",
DefaultWallet: "wallet123",
DefaultAlgorithm: "rx/0",
@ -181,12 +163,12 @@ func TestSettingsManager_SetMinerDefaults_Good(t *testing.T) {
CPUThrottleThreshold: 90,
}
err := settingsManager.SetMinerDefaults(minerDefaults)
err := sm.SetMinerDefaults(defaults)
if err != nil {
t.Fatalf("Failed to set miner defaults: %v", err)
}
settings := settingsManager.Get()
settings := sm.Get()
if settings.MinerDefaults.DefaultPool != "stratum+tcp://pool.example.com:3333" {
t.Errorf("Expected pool to be set, got %s", settings.MinerDefaults.DefaultPool)
}
@ -195,32 +177,34 @@ func TestSettingsManager_SetMinerDefaults_Good(t *testing.T) {
}
}
func TestSettingsManager_ConcurrentAccess_Ugly(t *testing.T) {
// Ugly: concurrent reads and writes must not race or corrupt state.
func TestSettingsManager_ConcurrentAccess(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "settings.json")
settingsManager := &SettingsManager{
sm := &SettingsManager{
settings: DefaultSettings(),
settingsPath: settingsPath,
}
// Concurrent reads and writes
done := make(chan bool)
for i := 0; i < 10; i++ {
go func(n int) {
for j := 0; j < 100; j++ {
_ = settingsManager.Get()
settingsManager.UpdateWindowState(n*10, n*10, 800+n, 600+n, false)
_ = sm.Get()
sm.UpdateWindowState(n*10, n*10, 800+n, 600+n, false)
}
done <- true
}(i)
}
// Wait for all goroutines
for i := 0; i < 10; i++ {
<-done
}
state := settingsManager.GetWindowState()
// Should complete without race conditions
state := sm.GetWindowState()
if state.Width < 800 || state.Width > 900 {
t.Errorf("Unexpected width after concurrent access: %d", state.Width)
}

View file

@ -2,17 +2,17 @@ package mining
import (
"context"
"fmt"
"math"
"math/rand"
"strconv"
"sync"
"time"
)
// factory.Register(MinerTypeSimulated, func() Miner { return NewSimulatedMiner(SimulatedMinerConfig{}) })
// MinerTypeSimulated is the type identifier for simulated miners.
const MinerTypeSimulated = "simulated"
// miner := &SimulatedMiner{Name: "sim-001", MinerType: MinerTypeSimulated, Algorithm: "rx/0"}
// SimulatedMiner is a mock miner that generates realistic-looking stats for UI testing.
type SimulatedMiner struct {
// Exported fields for JSON serialization
Name string `json:"name"`
@ -36,24 +36,23 @@ type SimulatedMiner struct {
shares int
rejected int
logs []string
mutex sync.RWMutex
stopChannel chan struct{}
mu sync.RWMutex
stopChan chan struct{}
poolName string
difficultyBase int
}
// SimulatedMinerConfig{Name: "sim-001", Algorithm: "rx/0", BaseHashrate: 5000, Variance: 0.1}
// SimulatedMinerConfig holds configuration for creating a simulated miner.
type SimulatedMinerConfig struct {
Name string // Miner instance name such as `sim-xmrig-001`.
Algorithm string // Algorithm name such as `rx/0`, `kawpow`, or `ethash`.
Name string // Miner instance name (e.g., "sim-xmrig-001")
Algorithm string // Algorithm name (e.g., "rx/0", "kawpow", "ethash")
BaseHashrate int // Base hashrate in H/s
Variance float64 // Variance as percentage (0.0-0.2 for 20% variance)
PoolName string // Simulated pool name
Difficulty int // Base difficulty
}
// miner := NewSimulatedMiner(SimulatedMinerConfig{Name: "sim-001", Algorithm: "rx/0", BaseHashrate: 5000})
// miner.Start(ctx)
// NewSimulatedMiner creates a new simulated miner instance.
func NewSimulatedMiner(config SimulatedMinerConfig) *SimulatedMiner {
if config.Variance <= 0 {
config.Variance = 0.1 // Default 10% variance
@ -83,43 +82,42 @@ func NewSimulatedMiner(config SimulatedMinerConfig) *SimulatedMiner {
}
}
// miner.GetType() == "simulated"
// GetType returns the miner type identifier.
func (m *SimulatedMiner) GetType() string {
return m.MinerType
}
// if err := miner.Install(); err != nil { /* never errors */ }
// Install is a no-op for simulated miners.
func (m *SimulatedMiner) Install() error {
return nil
}
// if err := miner.Uninstall(); err != nil { /* never errors */ }
// Uninstall is a no-op for simulated miners.
func (m *SimulatedMiner) Uninstall() error {
return nil
}
// if err := miner.Start(config); err != nil { /* already running */ }
// Start begins the simulated mining process.
func (m *SimulatedMiner) Start(config *Config) error {
m.mutex.Lock()
m.mu.Lock()
if m.Running {
m.mutex.Unlock()
return ErrMinerExists(m.Name)
m.mu.Unlock()
return fmt.Errorf("simulated miner %s is already running", m.Name)
}
m.Running = true
m.startTime = time.Now()
m.shares = 0
m.rejected = 0
m.stopChannel = make(chan struct{})
m.stopChan = make(chan struct{})
m.HashrateHistory = make([]HashratePoint, 0)
m.LowResHistory = make([]HashratePoint, 0)
timestamp := time.Now().Format("15:04:05")
m.logs = []string{
"[" + timestamp + "] Simulated miner starting...",
"[" + timestamp + "] Connecting to " + m.poolName,
"[" + timestamp + "] Pool connected, algorithm: " + m.Algorithm,
fmt.Sprintf("[%s] Simulated miner starting...", time.Now().Format("15:04:05")),
fmt.Sprintf("[%s] Connecting to %s", time.Now().Format("15:04:05"), m.poolName),
fmt.Sprintf("[%s] Pool connected, algorithm: %s", time.Now().Format("15:04:05"), m.Algorithm),
}
m.mutex.Unlock()
m.mu.Unlock()
// Start background simulation
go m.runSimulation()
@ -127,22 +125,23 @@ func (m *SimulatedMiner) Start(config *Config) error {
return nil
}
// if err := miner.Stop(); err != nil { /* miner was not running */ }
// Stop stops the simulated miner.
func (m *SimulatedMiner) Stop() error {
m.mutex.Lock()
defer m.mutex.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
if !m.Running {
return ErrMinerNotRunning(m.Name)
return fmt.Errorf("simulated miner %s is not running", m.Name)
}
close(m.stopChannel)
close(m.stopChan)
m.Running = false
m.logs = append(m.logs, "["+time.Now().Format("15:04:05")+"] Miner stopped")
m.logs = append(m.logs, fmt.Sprintf("[%s] Miner stopped", time.Now().Format("15:04:05")))
return nil
}
// runSimulation runs the background simulation loop.
func (m *SimulatedMiner) runSimulation() {
ticker := time.NewTicker(HighResolutionInterval)
defer ticker.Stop()
@ -152,7 +151,7 @@ func (m *SimulatedMiner) runSimulation() {
for {
select {
case <-m.stopChannel:
case <-m.stopChan:
return
case <-ticker.C:
m.updateHashrate()
@ -164,9 +163,10 @@ func (m *SimulatedMiner) runSimulation() {
}
}
// updateHashrate generates a new hashrate value with realistic variation.
func (m *SimulatedMiner) updateHashrate() {
m.mutex.Lock()
defer m.mutex.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
// Generate hashrate with variance and smooth transitions
now := time.Now()
@ -243,18 +243,19 @@ func (m *SimulatedMiner) updateHashrate() {
}
}
// simulateShare simulates finding a share.
func (m *SimulatedMiner) simulateShare() {
m.mutex.Lock()
defer m.mutex.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
// 2% chance of rejected share
if rand.Float64() < 0.02 {
m.rejected++
m.logs = append(m.logs, "["+time.Now().Format("15:04:05")+"] Share rejected (stale)")
m.logs = append(m.logs, fmt.Sprintf("[%s] Share rejected (stale)", time.Now().Format("15:04:05")))
} else {
m.shares++
diff := m.difficultyBase + rand.Intn(m.difficultyBase/2)
m.logs = append(m.logs, "["+time.Now().Format("15:04:05")+"] Share accepted ("+strconv.Itoa(m.shares)+"/"+strconv.Itoa(m.rejected)+") diff "+strconv.Itoa(diff))
m.logs = append(m.logs, fmt.Sprintf("[%s] Share accepted (%d/%d) diff %d", time.Now().Format("15:04:05"), m.shares, m.rejected, diff))
}
// Keep last 100 log lines
@ -263,15 +264,13 @@ func (m *SimulatedMiner) simulateShare() {
}
}
// metrics, err := miner.GetStats(ctx)
// _ = metrics.Hashrate // current H/s
// _ = metrics.Shares // accepted share count
// GetStats returns current performance metrics.
func (m *SimulatedMiner) GetStats(ctx context.Context) (*PerformanceMetrics, error) {
m.mutex.RLock()
defer m.mutex.RUnlock()
m.mu.RLock()
defer m.mu.RUnlock()
if !m.Running {
return nil, ErrMinerNotRunning(m.Name)
return nil, fmt.Errorf("simulated miner %s is not running", m.Name)
}
// Calculate current hashrate from recent history
@ -304,22 +303,22 @@ func (m *SimulatedMiner) GetStats(ctx context.Context) (*PerformanceMetrics, err
}, nil
}
// miner.GetName() == "sim-xmrig-001"
// GetName returns the miner's name.
func (m *SimulatedMiner) GetName() string {
return m.Name
}
// miner.GetPath() == "/simulated/miner"
// GetPath returns a simulated path.
func (m *SimulatedMiner) GetPath() string {
return m.Path
}
// miner.GetBinaryPath() == "/simulated/miner/sim-miner"
// GetBinaryPath returns a simulated binary path.
func (m *SimulatedMiner) GetBinaryPath() string {
return m.MinerBinary
}
// details, _ := miner.CheckInstallation() // always reports IsInstalled: true
// CheckInstallation returns simulated installation details.
func (m *SimulatedMiner) CheckInstallation() (*InstallationDetails, error) {
return &InstallationDetails{
IsInstalled: true,
@ -330,32 +329,32 @@ func (m *SimulatedMiner) CheckInstallation() (*InstallationDetails, error) {
}, nil
}
// v, _ := miner.GetLatestVersion() // always "1.0.0-simulated", no network call
// GetLatestVersion returns a simulated version.
func (m *SimulatedMiner) GetLatestVersion() (string, error) {
return "1.0.0-simulated", nil
}
// points := miner.GetHashrateHistory() // snapshot of high-res window (last 5 min)
// GetHashrateHistory returns the hashrate history.
func (m *SimulatedMiner) GetHashrateHistory() []HashratePoint {
m.mutex.RLock()
defer m.mutex.RUnlock()
m.mu.RLock()
defer m.mu.RUnlock()
result := make([]HashratePoint, len(m.HashrateHistory))
copy(result, m.HashrateHistory)
return result
}
// miner.AddHashratePoint(HashratePoint{Timestamp: now, Hashrate: 5000})
// AddHashratePoint adds a point to the history.
func (m *SimulatedMiner) AddHashratePoint(point HashratePoint) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
m.HashrateHistory = append(m.HashrateHistory, point)
}
// manager.ReduceHashrateHistory(miner, time.Now())
// ReduceHashrateHistory reduces the history (called by manager).
func (m *SimulatedMiner) ReduceHashrateHistory(now time.Time) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
// Move old high-res points to low-res
cutoff := now.Add(-HighResolutionDuration)
@ -374,8 +373,8 @@ func (m *SimulatedMiner) ReduceHashrateHistory(now time.Time) {
// Average the old points and add to low-res
if len(toMove) > 0 {
var sum int
for _, point := range toMove {
sum += point.Hashrate
for _, p := range toMove {
sum += p.Hashrate
}
avg := sum / len(toMove)
m.LowResHistory = append(m.LowResHistory, HashratePoint{
@ -395,31 +394,30 @@ func (m *SimulatedMiner) ReduceHashrateHistory(now time.Time) {
m.LowResHistory = newLowRes
}
// logs := miner.GetLogs() // capped at 100 lines, includes share accept/reject events
// GetLogs returns the simulated logs.
func (m *SimulatedMiner) GetLogs() []string {
m.mutex.RLock()
defer m.mutex.RUnlock()
m.mu.RLock()
defer m.mu.RUnlock()
result := make([]string, len(m.logs))
copy(result, m.logs)
return result
}
// if err := miner.WriteStdin("h"); err != nil { /* not running */ }
// WriteStdin simulates stdin input.
func (m *SimulatedMiner) WriteStdin(input string) error {
m.mutex.Lock()
defer m.mutex.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
if !m.Running {
return ErrMinerNotRunning(m.Name)
return fmt.Errorf("simulated miner %s is not running", m.Name)
}
m.logs = append(m.logs, "["+time.Now().Format("15:04:05")+"] stdin: "+input)
m.logs = append(m.logs, fmt.Sprintf("[%s] stdin: %s", time.Now().Format("15:04:05"), input))
return nil
}
// miner := NewSimulatedMiner(SimulatedMinerPresets["cpu-medium"])
// miner := NewSimulatedMiner(SimulatedMinerPresets["gpu-ethash"])
// SimulatedMinerPresets provides common presets for simulated miners.
var SimulatedMinerPresets = map[string]SimulatedMinerConfig{
"cpu-low": {
Algorithm: "rx/0",

View file

@ -0,0 +1,57 @@
package mining
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
)
// StatsCollector defines the interface for collecting miner statistics.
// This allows different miner types to implement their own stats collection logic
// while sharing common HTTP fetching infrastructure.
type StatsCollector interface {
// CollectStats fetches and returns performance metrics from the miner.
CollectStats(ctx context.Context) (*PerformanceMetrics, error)
}
// HTTPStatsConfig holds configuration for HTTP-based stats collection.
type HTTPStatsConfig struct {
Host string
Port int
Endpoint string // e.g., "/2/summary" for XMRig, "/summary" for TT-Miner
}
// FetchJSONStats performs an HTTP GET request and decodes the JSON response.
// This is a common helper for HTTP-based miner stats collection.
// The caller must provide the target struct to decode into.
func FetchJSONStats[T any](ctx context.Context, config HTTPStatsConfig, target *T) error {
if config.Port == 0 {
return fmt.Errorf("API port is zero")
}
url := fmt.Sprintf("http://%s:%d%s", config.Host, config.Port, config.Endpoint)
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
resp, err := getHTTPClient().Do(req)
if err != nil {
return fmt.Errorf("HTTP request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
io.Copy(io.Discard, resp.Body) // Drain body to allow connection reuse
return fmt.Errorf("unexpected status code %d", resp.StatusCode)
}
if err := json.NewDecoder(resp.Body).Decode(target); err != nil {
return fmt.Errorf("failed to decode response: %w", err)
}
return nil
}

View file

@ -10,7 +10,7 @@ import (
"time"
)
func TestStatsCollector_FetchJSONStats_Good(t *testing.T) {
func TestFetchJSONStats(t *testing.T) {
t.Run("SuccessfulFetch", func(t *testing.T) {
// Create a test server
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -85,7 +85,7 @@ func TestStatsCollector_FetchJSONStats_Good(t *testing.T) {
})
}
func TestStatsCollector_MinerTypeRegistry_Good(t *testing.T) {
func TestMinerTypeRegistry(t *testing.T) {
t.Run("KnownTypes", func(t *testing.T) {
if !IsMinerSupported(MinerTypeXMRig) {
t.Error("xmrig should be a known miner type")
@ -112,7 +112,7 @@ func TestStatsCollector_MinerTypeRegistry_Good(t *testing.T) {
})
}
func TestStatsCollector_GetType_Good(t *testing.T) {
func TestGetType(t *testing.T) {
t.Run("XMRigMiner", func(t *testing.T) {
miner := NewXMRigMiner()
if miner.GetType() != MinerTypeXMRig {

Some files were not shown because too many files have changed in this diff Show more