fix: Address 16 security findings from parallel code review
Critical fixes (6): - CRIT-001/002: Add safeKeyPrefix() to prevent panic on short public keys - CRIT-003/004: Add sync.Once pattern for thread-safe singleton initialization - CRIT-005: Harden console ANSI parser with length limits and stricter validation - CRIT-006: Add client-side input validation for profile creation High priority fixes (10): - HIGH-001: Add secondary timeout in TTMiner to prevent goroutine leak - HIGH-002: Verify atomic flag prevents timeout middleware race - HIGH-004: Add LimitReader (100MB) to prevent decompression bombs - HIGH-005: Add Lines parameter validation (max 10000) in worker - HIGH-006: Add TLS 1.2+ config with secure cipher suites - HIGH-007: Add pool URL format and wallet length validation - HIGH-008: Add SIGHUP handling and force cleanup on Stop() failure - HIGH-009: Add WebSocket message size limit and event type validation - HIGH-010: Refactor to use takeUntil(destroy$) for observable cleanup - HIGH-011: Add sanitizeErrorDetails() with debug mode control 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
ae68119329
commit
4072bdaf0d
15 changed files with 492 additions and 140 deletions
|
|
@ -2,6 +2,10 @@ package cmd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Snider/Mining/pkg/node"
|
||||
|
|
@ -9,8 +13,13 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
nodeManager *node.NodeManager
|
||||
peerRegistry *node.PeerRegistry
|
||||
nodeManager *node.NodeManager
|
||||
nodeManagerOnce sync.Once
|
||||
nodeManagerErr error
|
||||
|
||||
peerRegistry *node.PeerRegistry
|
||||
peerRegistryOnce sync.Once
|
||||
peerRegistryErr error
|
||||
)
|
||||
|
||||
// nodeCmd represents the node parent command
|
||||
|
|
@ -156,8 +165,31 @@ This allows other nodes to connect, send commands, and receive stats.`,
|
|||
fmt.Println()
|
||||
fmt.Println("Press Ctrl+C to stop...")
|
||||
|
||||
// Wait forever (or until signal)
|
||||
select {}
|
||||
// Set up signal handling for graceful shutdown (including SIGHUP for terminal disconnect)
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
|
||||
|
||||
// Wait for shutdown signal
|
||||
sig := <-sigChan
|
||||
fmt.Printf("\nReceived signal %v, shutting down...\n", sig)
|
||||
|
||||
// Graceful shutdown: stop transport and cleanup resources
|
||||
if err := transport.Stop(); err != nil {
|
||||
fmt.Printf("Warning: error during transport shutdown: %v\n", err)
|
||||
// Force cleanup on Stop() failure
|
||||
fmt.Println("Forcing resource cleanup...")
|
||||
for _, peer := range pr.GetConnectedPeers() {
|
||||
pr.SetConnected(peer.ID, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure peer registry is flushed to disk
|
||||
if err := pr.Close(); err != nil {
|
||||
fmt.Printf("Warning: error closing peer registry: %v\n", err)
|
||||
}
|
||||
|
||||
fmt.Println("P2P server stopped.")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -217,26 +249,18 @@ func init() {
|
|||
nodeResetCmd.Flags().BoolP("force", "f", false, "Force reset without confirmation")
|
||||
}
|
||||
|
||||
// getNodeManager returns the singleton node manager
|
||||
// getNodeManager returns the singleton node manager (thread-safe)
|
||||
func getNodeManager() (*node.NodeManager, error) {
|
||||
if nodeManager == nil {
|
||||
var err error
|
||||
nodeManager, err = node.NewNodeManager()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodeManager, nil
|
||||
nodeManagerOnce.Do(func() {
|
||||
nodeManager, nodeManagerErr = node.NewNodeManager()
|
||||
})
|
||||
return nodeManager, nodeManagerErr
|
||||
}
|
||||
|
||||
// getPeerRegistry returns the singleton peer registry
|
||||
// getPeerRegistry returns the singleton peer registry (thread-safe)
|
||||
func getPeerRegistry() (*node.PeerRegistry, error) {
|
||||
if peerRegistry == nil {
|
||||
var err error
|
||||
peerRegistry, err = node.NewPeerRegistry()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return peerRegistry, nil
|
||||
peerRegistryOnce.Do(func() {
|
||||
peerRegistry, peerRegistryErr = node.NewPeerRegistry()
|
||||
})
|
||||
return peerRegistry, peerRegistryErr
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package cmd
|
|||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Snider/Mining/pkg/node"
|
||||
|
|
@ -10,8 +11,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
controller *node.Controller
|
||||
transport *node.Transport
|
||||
controller *node.Controller
|
||||
transport *node.Transport
|
||||
controllerOnce sync.Once
|
||||
controllerErr error
|
||||
)
|
||||
|
||||
// remoteCmd represents the remote parent command
|
||||
|
|
@ -320,34 +323,32 @@ func init() {
|
|||
remotePingCmd.Flags().IntP("count", "c", 4, "Number of pings to send")
|
||||
}
|
||||
|
||||
// getController returns or creates the controller instance.
|
||||
// getController returns or creates the controller instance (thread-safe).
|
||||
func getController() (*node.Controller, error) {
|
||||
if controller != nil {
|
||||
return controller, nil
|
||||
}
|
||||
controllerOnce.Do(func() {
|
||||
nm, err := getNodeManager()
|
||||
if err != nil {
|
||||
controllerErr = fmt.Errorf("failed to get node manager: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
nm, err := getNodeManager()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get node manager: %w", err)
|
||||
}
|
||||
if !nm.HasIdentity() {
|
||||
controllerErr = fmt.Errorf("no node identity found. Run 'node init' first")
|
||||
return
|
||||
}
|
||||
|
||||
if !nm.HasIdentity() {
|
||||
return nil, fmt.Errorf("no node identity found. Run 'node init' first")
|
||||
}
|
||||
pr, err := getPeerRegistry()
|
||||
if err != nil {
|
||||
controllerErr = fmt.Errorf("failed to get peer registry: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
pr, err := getPeerRegistry()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get peer registry: %w", err)
|
||||
}
|
||||
|
||||
// Initialize transport if not done
|
||||
if transport == nil {
|
||||
// Initialize transport
|
||||
config := node.DefaultTransportConfig()
|
||||
transport = node.NewTransport(nm, pr, config)
|
||||
}
|
||||
|
||||
controller = node.NewController(nm, pr, transport)
|
||||
return controller, nil
|
||||
controller = node.NewController(nm, pr, transport)
|
||||
})
|
||||
return controller, controllerErr
|
||||
}
|
||||
|
||||
// findPeerByPartialID finds a peer by full or partial ID.
|
||||
|
|
|
|||
|
|
@ -98,11 +98,43 @@ var serveCmd = &cobra.Command{
|
|||
fmt.Println("Example: start xmrig stratum+tcp://pool.example.com:3333 YOUR_WALLET_ADDRESS")
|
||||
} else {
|
||||
minerType := cmdArgs[0]
|
||||
pool := cmdArgs[1]
|
||||
wallet := cmdArgs[2]
|
||||
|
||||
// Validate pool URL format
|
||||
if !strings.HasPrefix(pool, "stratum+tcp://") &&
|
||||
!strings.HasPrefix(pool, "stratum+ssl://") &&
|
||||
!strings.HasPrefix(pool, "stratum://") {
|
||||
fmt.Fprintf(os.Stderr, "Error: Invalid pool URL (must start with stratum+tcp://, stratum+ssl://, or stratum://)\n")
|
||||
fmt.Print(">> ")
|
||||
continue
|
||||
}
|
||||
if len(pool) > 256 {
|
||||
fmt.Fprintf(os.Stderr, "Error: Pool URL too long (max 256 chars)\n")
|
||||
fmt.Print(">> ")
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate wallet address length
|
||||
if len(wallet) > 256 {
|
||||
fmt.Fprintf(os.Stderr, "Error: Wallet address too long (max 256 chars)\n")
|
||||
fmt.Print(">> ")
|
||||
continue
|
||||
}
|
||||
|
||||
config := &mining.Config{
|
||||
Pool: cmdArgs[1],
|
||||
Wallet: cmdArgs[2],
|
||||
Pool: pool,
|
||||
Wallet: wallet,
|
||||
LogOutput: true,
|
||||
}
|
||||
|
||||
// Validate config before starting
|
||||
if err := config.Validate(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: Invalid configuration: %v\n", err)
|
||||
fmt.Print(">> ")
|
||||
continue
|
||||
}
|
||||
|
||||
miner, err := mgr.StartMiner(context.Background(), minerType, config)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error starting miner: %v\n", err)
|
||||
|
|
@ -160,6 +192,11 @@ var serveCmd = &cobra.Command{
|
|||
}
|
||||
fmt.Print(">> ")
|
||||
}
|
||||
|
||||
// Check for scanner errors (I/O issues)
|
||||
if err := scanner.Err(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error reading input: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
|
|
@ -169,6 +206,9 @@ var serveCmd = &cobra.Command{
|
|||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
// Explicit cleanup of manager resources
|
||||
mgr.Stop()
|
||||
|
||||
fmt.Println("Mining service stopped.")
|
||||
return nil
|
||||
},
|
||||
|
|
|
|||
|
|
@ -209,6 +209,17 @@ func (c *Config) Validate() error {
|
|||
return fmt.Errorf("donate level must be between 0 and 100")
|
||||
}
|
||||
|
||||
// CLIArgs validation - check for shell metacharacters
|
||||
if c.CLIArgs != "" {
|
||||
if containsShellChars(c.CLIArgs) {
|
||||
return fmt.Errorf("CLI arguments contain invalid characters")
|
||||
}
|
||||
// Limit length to prevent abuse
|
||||
if len(c.CLIArgs) > 1024 {
|
||||
return fmt.Errorf("CLI arguments too long (max 1024 chars)")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -124,12 +124,22 @@ func (pm *ProfileManager) UpdateProfile(profile *MiningProfile) error {
|
|||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
if _, exists := pm.profiles[profile.ID]; !exists {
|
||||
oldProfile, exists := pm.profiles[profile.ID]
|
||||
if !exists {
|
||||
return fmt.Errorf("profile with ID %s not found", profile.ID)
|
||||
}
|
||||
|
||||
// Update in-memory state
|
||||
pm.profiles[profile.ID] = profile
|
||||
|
||||
return pm.saveProfiles()
|
||||
// Save to disk - rollback if save fails
|
||||
if err := pm.saveProfiles(); err != nil {
|
||||
// Restore old profile on save failure
|
||||
pm.profiles[profile.ID] = oldProfile
|
||||
return fmt.Errorf("failed to save profile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteProfile removes a profile by its ID.
|
||||
|
|
@ -137,10 +147,18 @@ func (pm *ProfileManager) DeleteProfile(id string) error {
|
|||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
if _, exists := pm.profiles[id]; !exists {
|
||||
profile, exists := pm.profiles[id]
|
||||
if !exists {
|
||||
return fmt.Errorf("profile with ID %s not found", id)
|
||||
}
|
||||
delete(pm.profiles, id)
|
||||
|
||||
return pm.saveProfiles()
|
||||
// Save to disk - rollback if save fails
|
||||
if err := pm.saveProfiles(); err != nil {
|
||||
// Restore profile on save failure
|
||||
pm.profiles[id] = profile
|
||||
return fmt.Errorf("failed to delete profile: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
|
|
@ -55,6 +56,20 @@ type APIError struct {
|
|||
Retryable bool `json:"retryable"` // Can the client retry?
|
||||
}
|
||||
|
||||
// debugErrorsEnabled controls whether internal error details are exposed in API responses.
|
||||
// In production, this should be false to prevent information disclosure.
|
||||
var debugErrorsEnabled = os.Getenv("DEBUG_ERRORS") == "true" || os.Getenv("GIN_MODE") != "release"
|
||||
|
||||
// sanitizeErrorDetails filters potentially sensitive information from error details.
|
||||
// In production mode (debugErrorsEnabled=false), returns empty string.
|
||||
func sanitizeErrorDetails(details string) string {
|
||||
if debugErrorsEnabled {
|
||||
return details
|
||||
}
|
||||
// In production, don't expose internal error details
|
||||
return ""
|
||||
}
|
||||
|
||||
// Error codes are defined in errors.go
|
||||
|
||||
// respondWithError sends a structured error response
|
||||
|
|
@ -62,7 +77,7 @@ func respondWithError(c *gin.Context, status int, code string, message string, d
|
|||
apiErr := APIError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
Details: details,
|
||||
Details: sanitizeErrorDetails(details),
|
||||
Retryable: isRetryableError(status),
|
||||
}
|
||||
|
||||
|
|
@ -103,7 +118,7 @@ func respondWithMiningError(c *gin.Context, err *MiningError) {
|
|||
apiErr := APIError{
|
||||
Code: err.Code,
|
||||
Message: err.Message,
|
||||
Details: details,
|
||||
Details: sanitizeErrorDetails(details),
|
||||
Suggestion: err.Suggestion,
|
||||
Retryable: err.Retryable,
|
||||
}
|
||||
|
|
@ -329,11 +344,17 @@ func requestTimeoutMiddleware(timeout time.Duration) gin.HandlerFunc {
|
|||
// Replace request context
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
|
||||
// Use atomic flag to prevent race condition between handler and timeout response
|
||||
// Only one of them should write to the response
|
||||
var responded int32
|
||||
|
||||
// Channel to signal completion
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
c.Next()
|
||||
// Mark that the handler has completed (and likely written a response)
|
||||
atomic.StoreInt32(&responded, 1)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
|
|
@ -341,10 +362,12 @@ func requestTimeoutMiddleware(timeout time.Duration) gin.HandlerFunc {
|
|||
case <-done:
|
||||
// Request completed normally
|
||||
case <-ctx.Done():
|
||||
// Timeout occurred
|
||||
c.Abort()
|
||||
respondWithError(c, http.StatusGatewayTimeout, ErrCodeTimeout,
|
||||
"Request timed out", fmt.Sprintf("Request exceeded %s timeout", timeout))
|
||||
// Timeout occurred - only respond if handler hasn't already
|
||||
if atomic.CompareAndSwapInt32(&responded, 0, 1) {
|
||||
c.Abort()
|
||||
respondWithError(c, http.StatusGatewayTimeout, ErrCodeTimeout,
|
||||
"Request timed out", fmt.Sprintf("Request exceeded %s timeout", timeout))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -989,6 +1012,12 @@ func (s *Service) handleStartMinerWithProfile(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
// Validate config from profile to prevent shell injection and other issues
|
||||
if err := config.Validate(); err != nil {
|
||||
respondWithMiningError(c, ErrInvalidConfig("profile config validation failed").WithCause(err))
|
||||
return
|
||||
}
|
||||
|
||||
miner, err := s.Manager.StartMiner(c.Request.Context(), profile.MinerType, &config)
|
||||
if err != nil {
|
||||
respondWithMiningError(c, ErrStartFailed(profile.Name).WithCause(err))
|
||||
|
|
@ -1366,9 +1395,10 @@ func (s *Service) handleWebSocketEvents(c *gin.Context) {
|
|||
}
|
||||
|
||||
logging.Info("new WebSocket connection", logging.Fields{"remote": c.Request.RemoteAddr})
|
||||
RecordWSConnection(true)
|
||||
if !s.EventHub.ServeWs(conn) {
|
||||
RecordWSConnection(false) // Undo increment on rejection
|
||||
// Only record connection after successful registration to avoid metrics race
|
||||
if s.EventHub.ServeWs(conn) {
|
||||
RecordWSConnection(true)
|
||||
} else {
|
||||
logging.Warn("WebSocket connection rejected", logging.Fields{"remote": c.Request.RemoteAddr, "reason": "limit reached"})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -90,7 +90,14 @@ func (m *TTMiner) Start(config *Config) error {
|
|||
if cmd.Process != nil {
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
err = <-done // Wait for the inner goroutine to finish
|
||||
// Wait for inner goroutine with secondary timeout to prevent leak
|
||||
select {
|
||||
case err = <-done:
|
||||
// Inner goroutine completed
|
||||
case <-time.After(10 * time.Second):
|
||||
logging.Error("TT-Miner process cleanup timed out after kill", logging.Fields{"miner": m.Name})
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Snider/Borg/pkg/datanode"
|
||||
"github.com/Snider/Borg/pkg/tim"
|
||||
|
|
@ -249,7 +250,14 @@ func createTarball(files map[string][]byte) ([]byte, error) {
|
|||
|
||||
// extractTarball extracts a tar archive to a directory, returns first executable found.
|
||||
func extractTarball(tarData []byte, destDir string) (string, error) {
|
||||
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||
// Ensure destDir is an absolute, clean path for security checks
|
||||
absDestDir, err := filepath.Abs(destDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to resolve destination directory: %w", err)
|
||||
}
|
||||
absDestDir = filepath.Clean(absDestDir)
|
||||
|
||||
if err := os.MkdirAll(absDestDir, 0755); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
|
@ -265,34 +273,65 @@ func extractTarball(tarData []byte, destDir string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
path := filepath.Join(destDir, hdr.Name)
|
||||
// Security: Sanitize the tar entry name to prevent path traversal (Zip Slip)
|
||||
cleanName := filepath.Clean(hdr.Name)
|
||||
|
||||
// Reject absolute paths
|
||||
if filepath.IsAbs(cleanName) {
|
||||
return "", fmt.Errorf("invalid tar entry: absolute path not allowed: %s", hdr.Name)
|
||||
}
|
||||
|
||||
// Reject paths that escape the destination directory
|
||||
if strings.HasPrefix(cleanName, ".."+string(os.PathSeparator)) || cleanName == ".." {
|
||||
return "", fmt.Errorf("invalid tar entry: path traversal attempt: %s", hdr.Name)
|
||||
}
|
||||
|
||||
// Build the full path and verify it's within destDir
|
||||
fullPath := filepath.Join(absDestDir, cleanName)
|
||||
fullPath = filepath.Clean(fullPath)
|
||||
|
||||
// Final security check: ensure the path is still within destDir
|
||||
if !strings.HasPrefix(fullPath, absDestDir+string(os.PathSeparator)) && fullPath != absDestDir {
|
||||
return "", fmt.Errorf("invalid tar entry: path escape attempt: %s", hdr.Name)
|
||||
}
|
||||
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if err := os.MkdirAll(path, os.FileMode(hdr.Mode)); err != nil {
|
||||
if err := os.MkdirAll(fullPath, os.FileMode(hdr.Mode)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
case tar.TypeReg:
|
||||
// Ensure parent directory exists
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(hdr.Mode))
|
||||
f, err := os.OpenFile(fullPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, tr); err != nil {
|
||||
f.Close()
|
||||
// Limit file size to prevent decompression bombs (100MB max per file)
|
||||
const maxFileSize int64 = 100 * 1024 * 1024
|
||||
limitedReader := io.LimitReader(tr, maxFileSize+1)
|
||||
written, err := io.Copy(f, limitedReader)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
f.Close()
|
||||
if written > maxFileSize {
|
||||
os.Remove(fullPath)
|
||||
return "", fmt.Errorf("file %s exceeds maximum size of %d bytes", hdr.Name, maxFileSize)
|
||||
}
|
||||
|
||||
// Track first executable
|
||||
if hdr.Mode&0111 != 0 && firstExecutable == "" {
|
||||
firstExecutable = path
|
||||
firstExecutable = fullPath
|
||||
}
|
||||
// Explicitly ignore symlinks and hard links to prevent symlink attacks
|
||||
case tar.TypeSymlink, tar.TypeLink:
|
||||
// Skip symlinks and hard links for security
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -56,6 +56,17 @@ const (
|
|||
// peerNameRegex validates peer names: alphanumeric, hyphens, underscores, and spaces
|
||||
var peerNameRegex = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9\-_ ]{0,62}[a-zA-Z0-9]$|^[a-zA-Z0-9]$`)
|
||||
|
||||
// safeKeyPrefix returns a truncated key for logging, handling short keys safely
|
||||
func safeKeyPrefix(key string) string {
|
||||
if len(key) >= 16 {
|
||||
return key[:16] + "..."
|
||||
}
|
||||
if len(key) == 0 {
|
||||
return "(empty)"
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// validatePeerName checks if a peer name is valid.
|
||||
// Peer names must be 1-64 characters, start and end with alphanumeric,
|
||||
// and contain only alphanumeric, hyphens, underscores, and spaces.
|
||||
|
|
@ -156,7 +167,7 @@ func (r *PeerRegistry) AllowPublicKey(publicKey string) {
|
|||
r.allowedPublicKeyMu.Lock()
|
||||
defer r.allowedPublicKeyMu.Unlock()
|
||||
r.allowedPublicKeys[publicKey] = true
|
||||
logging.Debug("public key added to allowlist", logging.Fields{"key": publicKey[:16] + "..."})
|
||||
logging.Debug("public key added to allowlist", logging.Fields{"key": safeKeyPrefix(publicKey)})
|
||||
}
|
||||
|
||||
// RevokePublicKey removes a public key from the allowlist.
|
||||
|
|
@ -164,7 +175,7 @@ func (r *PeerRegistry) RevokePublicKey(publicKey string) {
|
|||
r.allowedPublicKeyMu.Lock()
|
||||
defer r.allowedPublicKeyMu.Unlock()
|
||||
delete(r.allowedPublicKeys, publicKey)
|
||||
logging.Debug("public key removed from allowlist", logging.Fields{"key": publicKey[:16] + "..."})
|
||||
logging.Debug("public key removed from allowlist", logging.Fields{"key": safeKeyPrefix(publicKey)})
|
||||
}
|
||||
|
||||
// IsPublicKeyAllowed checks if a public key is in the allowlist.
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package node
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
|
@ -204,8 +205,36 @@ func (t *Transport) Start() error {
|
|||
mux.HandleFunc(t.config.WSPath, t.handleWSUpgrade)
|
||||
|
||||
t.server = &http.Server{
|
||||
Addr: t.config.ListenAddr,
|
||||
Handler: mux,
|
||||
Addr: t.config.ListenAddr,
|
||||
Handler: mux,
|
||||
ReadTimeout: 30 * time.Second,
|
||||
WriteTimeout: 30 * time.Second,
|
||||
IdleTimeout: 60 * time.Second,
|
||||
ReadHeaderTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
// Apply TLS hardening if TLS is enabled
|
||||
if t.config.TLSCertPath != "" && t.config.TLSKeyPath != "" {
|
||||
t.server.TLSConfig = &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
// TLS 1.3 ciphers (automatically used when available)
|
||||
tls.TLS_AES_128_GCM_SHA256,
|
||||
tls.TLS_AES_256_GCM_SHA384,
|
||||
tls.TLS_CHACHA20_POLY1305_SHA256,
|
||||
// TLS 1.2 secure ciphers
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
},
|
||||
CurvePreferences: []tls.CurveID{
|
||||
tls.X25519,
|
||||
tls.CurveP256,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
t.wg.Add(1)
|
||||
|
|
@ -467,7 +496,7 @@ func (t *Transport) handleWSUpgrade(w http.ResponseWriter, r *http.Request) {
|
|||
logging.Warn("peer connection rejected: not in allowlist", logging.Fields{
|
||||
"peer_id": payload.Identity.ID,
|
||||
"peer_name": payload.Identity.Name,
|
||||
"public_key": payload.Identity.PublicKey[:16] + "...",
|
||||
"public_key": safeKeyPrefix(payload.Identity.PublicKey),
|
||||
})
|
||||
// Send rejection before closing
|
||||
identity := t.node.GetIdentity()
|
||||
|
|
|
|||
|
|
@ -268,6 +268,12 @@ func (w *Worker) handleGetLogs(msg *Message) (*Message, error) {
|
|||
return nil, fmt.Errorf("invalid get logs payload: %w", err)
|
||||
}
|
||||
|
||||
// Validate and limit the Lines parameter to prevent resource exhaustion
|
||||
const maxLogLines = 10000
|
||||
if payload.Lines <= 0 || payload.Lines > maxLogLines {
|
||||
payload.Lines = maxLogLines
|
||||
}
|
||||
|
||||
miner, err := w.minerManager.GetMiner(payload.MinerName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("miner not found: %s", payload.MinerName)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import { Injectable, OnDestroy, signal, computed, inject } from '@angular/core';
|
||||
import { HttpClient } from '@angular/common/http';
|
||||
import { of, forkJoin, Subscription, interval, merge } from 'rxjs';
|
||||
import { switchMap, catchError, map, tap, filter, debounceTime } from 'rxjs/operators';
|
||||
import { of, forkJoin, Subject, interval, merge } from 'rxjs';
|
||||
import { switchMap, catchError, map, tap, filter, debounceTime, takeUntil } from 'rxjs/operators';
|
||||
import { WebSocketService, MinerEventData, MinerStatsData } from './websocket.service';
|
||||
import { ApiConfigService } from './api-config.service';
|
||||
|
||||
// --- Interfaces ---
|
||||
export interface InstallationDetails {
|
||||
|
|
@ -46,11 +47,15 @@ export interface SystemState {
|
|||
providedIn: 'root'
|
||||
})
|
||||
export class MinerService implements OnDestroy {
|
||||
private apiBaseUrl = 'http://localhost:9090/api/v1/mining';
|
||||
private pollingSubscription?: Subscription;
|
||||
private wsSubscriptions: Subscription[] = [];
|
||||
private readonly apiConfig = inject(ApiConfigService);
|
||||
private destroy$ = new Subject<void>();
|
||||
private ws = inject(WebSocketService);
|
||||
|
||||
/** Get the API base URL from configuration */
|
||||
private get apiBaseUrl(): string {
|
||||
return this.apiConfig.apiBaseUrl;
|
||||
}
|
||||
|
||||
// --- State Signals ---
|
||||
public state = signal<SystemState>({
|
||||
needsSetup: false,
|
||||
|
|
@ -69,7 +74,6 @@ export class MinerService implements OnDestroy {
|
|||
// Historical hashrate data from database with configurable time range
|
||||
public historicalHashrate = signal<Map<string, HashratePoint[]>>(new Map());
|
||||
public selectedTimeRange = signal<number>(60); // Default 60 minutes
|
||||
private historyPollingSubscription?: Subscription;
|
||||
|
||||
// Available time ranges in minutes
|
||||
public readonly timeRanges = [
|
||||
|
|
@ -118,9 +122,9 @@ export class MinerService implements OnDestroy {
|
|||
}
|
||||
|
||||
ngOnDestroy(): void {
|
||||
this.stopPolling();
|
||||
this.historyPollingSubscription?.unsubscribe();
|
||||
this.wsSubscriptions.forEach(sub => sub.unsubscribe());
|
||||
// Complete the destroy$ subject to trigger takeUntil cleanup for all subscriptions
|
||||
this.destroy$.next();
|
||||
this.destroy$.complete();
|
||||
}
|
||||
|
||||
// --- WebSocket Event Subscriptions ---
|
||||
|
|
@ -128,28 +132,27 @@ export class MinerService implements OnDestroy {
|
|||
/**
|
||||
* Subscribe to WebSocket events for real-time updates.
|
||||
* This supplements polling with instant event-driven updates.
|
||||
* All subscriptions use takeUntil(destroy$) for automatic cleanup.
|
||||
*/
|
||||
private subscribeToWebSocketEvents(): void {
|
||||
// Listen for miner started/stopped events to refresh the miner list immediately
|
||||
const minerLifecycleEvents = merge(
|
||||
merge(
|
||||
this.ws.minerStarted$,
|
||||
this.ws.minerStopped$
|
||||
).pipe(
|
||||
debounceTime(500) // Debounce to avoid rapid-fire updates
|
||||
).subscribe(() => {
|
||||
// Refresh running miners when a miner starts or stops
|
||||
this.getRunningMiners().pipe(
|
||||
catchError(() => of([]))
|
||||
).subscribe(runningMiners => {
|
||||
this.state.update(s => ({ ...s, runningMiners }));
|
||||
this.updateHashrateHistory(runningMiners);
|
||||
});
|
||||
takeUntil(this.destroy$),
|
||||
debounceTime(500), // Debounce to avoid rapid-fire updates
|
||||
switchMap(() => this.getRunningMiners().pipe(catchError(() => of([]))))
|
||||
).subscribe(runningMiners => {
|
||||
this.state.update(s => ({ ...s, runningMiners }));
|
||||
this.updateHashrateHistory(runningMiners);
|
||||
});
|
||||
this.wsSubscriptions.push(minerLifecycleEvents);
|
||||
|
||||
// Listen for stats events to update hashrates in real-time
|
||||
// This provides more immediate updates than the 5-second polling interval
|
||||
const statsSubscription = this.ws.minerStats$.subscribe((stats: MinerStatsData) => {
|
||||
this.ws.minerStats$.pipe(
|
||||
takeUntil(this.destroy$)
|
||||
).subscribe((stats: MinerStatsData) => {
|
||||
// Update the running miners with fresh hashrate data
|
||||
this.state.update(s => {
|
||||
const runningMiners = s.runningMiners.map(miner => {
|
||||
|
|
@ -171,14 +174,14 @@ export class MinerService implements OnDestroy {
|
|||
return { ...s, runningMiners };
|
||||
});
|
||||
});
|
||||
this.wsSubscriptions.push(statsSubscription);
|
||||
|
||||
// Listen for error events to show notifications
|
||||
const errorSubscription = this.ws.minerError$.subscribe((data: MinerEventData) => {
|
||||
this.ws.minerError$.pipe(
|
||||
takeUntil(this.destroy$)
|
||||
).subscribe((data: MinerEventData) => {
|
||||
console.error(`[MinerService] Miner error for ${data.name}:`, data.error);
|
||||
// Notification can be handled by components listening to this event
|
||||
});
|
||||
this.wsSubscriptions.push(errorSubscription);
|
||||
}
|
||||
|
||||
// --- Data Loading and Polling Logic ---
|
||||
|
|
@ -206,9 +209,11 @@ export class MinerService implements OnDestroy {
|
|||
|
||||
/**
|
||||
* Starts a polling interval to fetch only live data (running miners and hashrates).
|
||||
* Uses takeUntil(destroy$) for automatic cleanup.
|
||||
*/
|
||||
private startPollingLive_Data() {
|
||||
this.pollingSubscription = interval(5000).pipe(
|
||||
interval(5000).pipe(
|
||||
takeUntil(this.destroy$),
|
||||
switchMap(() => this.getRunningMiners().pipe(catchError(() => of([]))))
|
||||
).subscribe(runningMiners => {
|
||||
this.state.update(s => ({ ...s, runningMiners }));
|
||||
|
|
@ -219,10 +224,13 @@ export class MinerService implements OnDestroy {
|
|||
/**
|
||||
* Starts a polling interval to fetch historical data from database.
|
||||
* Polls every 30 seconds. Initial fetch happens in forceRefreshState after miners are loaded.
|
||||
* Uses takeUntil(destroy$) for automatic cleanup.
|
||||
*/
|
||||
private startPollingHistoricalData() {
|
||||
// Poll every 30 seconds (initial fetch happens in forceRefreshState)
|
||||
this.historyPollingSubscription = interval(30000).subscribe(() => {
|
||||
interval(30000).pipe(
|
||||
takeUntil(this.destroy$)
|
||||
).subscribe(() => {
|
||||
this.fetchHistoricalHashrate();
|
||||
});
|
||||
}
|
||||
|
|
@ -276,10 +284,6 @@ export class MinerService implements OnDestroy {
|
|||
this.fetchHistoricalHashrate();
|
||||
}
|
||||
|
||||
private stopPolling() {
|
||||
this.pollingSubscription?.unsubscribe();
|
||||
}
|
||||
|
||||
/**
|
||||
* Refreshes only the list of profiles. Called after create, update, or delete.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -364,6 +364,39 @@ import { interval, Subscription, switchMap } from 'rxjs';
|
|||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
/* ANSI color classes - prevents XSS via inline style injection */
|
||||
.ansi-bold { font-weight: bold; }
|
||||
.ansi-italic { font-style: italic; }
|
||||
.ansi-underline { text-decoration: underline; }
|
||||
|
||||
/* Foreground colors */
|
||||
.ansi-fg-30 { color: #1e1e1e; }
|
||||
.ansi-fg-31 { color: #ef4444; }
|
||||
.ansi-fg-32 { color: #22c55e; }
|
||||
.ansi-fg-33 { color: #eab308; }
|
||||
.ansi-fg-34 { color: #3b82f6; }
|
||||
.ansi-fg-35 { color: #a855f7; }
|
||||
.ansi-fg-36 { color: #06b6d4; }
|
||||
.ansi-fg-37 { color: #e5e5e5; }
|
||||
.ansi-fg-90 { color: #737373; }
|
||||
.ansi-fg-91 { color: #fca5a5; }
|
||||
.ansi-fg-92 { color: #86efac; }
|
||||
.ansi-fg-93 { color: #fde047; }
|
||||
.ansi-fg-94 { color: #93c5fd; }
|
||||
.ansi-fg-95 { color: #d8b4fe; }
|
||||
.ansi-fg-96 { color: #67e8f9; }
|
||||
.ansi-fg-97 { color: #ffffff; }
|
||||
|
||||
/* Background colors */
|
||||
.ansi-bg-40 { background: #1e1e1e; padding: 0 2px; }
|
||||
.ansi-bg-41 { background: #dc2626; padding: 0 2px; }
|
||||
.ansi-bg-42 { background: #16a34a; padding: 0 2px; }
|
||||
.ansi-bg-43 { background: #ca8a04; padding: 0 2px; }
|
||||
.ansi-bg-44 { background: #2563eb; padding: 0 2px; }
|
||||
.ansi-bg-45 { background: #9333ea; padding: 0 2px; }
|
||||
.ansi-bg-46 { background: #0891b2; padding: 0 2px; }
|
||||
.ansi-bg-47 { background: #d4d4d4; padding: 0 2px; }
|
||||
`]
|
||||
})
|
||||
export class ConsoleComponent implements OnInit, OnDestroy, AfterViewChecked {
|
||||
|
|
@ -497,52 +530,63 @@ export class ConsoleComponent implements OnInit, OnDestroy, AfterViewChecked {
|
|||
return lower.includes('warn') || lower.includes('timeout') || lower.includes('retry');
|
||||
}
|
||||
|
||||
// Convert ANSI escape codes to HTML with CSS styling
|
||||
// Convert ANSI escape codes to HTML with CSS classes
|
||||
// Security model:
|
||||
// 1. Input is HTML-escaped FIRST before any processing (prevents XSS)
|
||||
// 2. Only whitelisted ANSI codes produce output (no arbitrary injection)
|
||||
// 3. Output uses predefined CSS classes only (no inline styles)
|
||||
// 4. Length-limited to prevent DoS
|
||||
ansiToHtml(text: string): SafeHtml {
|
||||
// ANSI color codes mapping
|
||||
const colors: { [key: string]: string } = {
|
||||
'30': '#1e1e1e', '31': '#ef4444', '32': '#22c55e', '33': '#eab308',
|
||||
'34': '#3b82f6', '35': '#a855f7', '36': '#06b6d4', '37': '#e5e5e5',
|
||||
'90': '#737373', '91': '#fca5a5', '92': '#86efac', '93': '#fde047',
|
||||
'94': '#93c5fd', '95': '#d8b4fe', '96': '#67e8f9', '97': '#ffffff',
|
||||
};
|
||||
const bgColors: { [key: string]: string } = {
|
||||
'40': '#1e1e1e', '41': '#dc2626', '42': '#16a34a', '43': '#ca8a04',
|
||||
'44': '#2563eb', '45': '#9333ea', '46': '#0891b2', '47': '#d4d4d4',
|
||||
};
|
||||
// Length limit to prevent DoS (10KB per line should be more than enough for logs)
|
||||
const maxLength = 10240;
|
||||
if (text.length > maxLength) {
|
||||
text = text.substring(0, maxLength) + '... [truncated]';
|
||||
}
|
||||
|
||||
// Whitelist of valid ANSI codes - only these will be processed
|
||||
const validFgCodes = new Set(['30', '31', '32', '33', '34', '35', '36', '37',
|
||||
'90', '91', '92', '93', '94', '95', '96', '97']);
|
||||
const validBgCodes = new Set(['40', '41', '42', '43', '44', '45', '46', '47']);
|
||||
|
||||
// CRITICAL: Escape HTML FIRST before any processing to prevent XSS
|
||||
let html = this.escapeHtml(text);
|
||||
let currentStyles: string[] = [];
|
||||
|
||||
// Process ANSI escape sequences
|
||||
// Process ANSI escape sequences using CSS classes instead of inline styles
|
||||
// The regex only matches valid ANSI SGR sequences (numeric codes followed by 'm')
|
||||
html = html.replace(/\x1b\[([0-9;]*)m/g, (_, codes) => {
|
||||
if (!codes || codes === '0') {
|
||||
currentStyles = [];
|
||||
return '</span>';
|
||||
}
|
||||
|
||||
const codeList = codes.split(';');
|
||||
const styles: string[] = [];
|
||||
|
||||
for (const code of codeList) {
|
||||
if (code === '1') styles.push('font-weight:bold');
|
||||
else if (code === '3') styles.push('font-style:italic');
|
||||
else if (code === '4') styles.push('text-decoration:underline');
|
||||
else if (colors[code]) styles.push(`color:${colors[code]}`);
|
||||
else if (bgColors[code]) styles.push(`background:${bgColors[code]};padding:0 2px`);
|
||||
// Validate codes format - must be numeric values separated by semicolons
|
||||
if (!/^[0-9;]+$/.test(codes)) {
|
||||
return ''; // Invalid format, skip entirely
|
||||
}
|
||||
|
||||
if (styles.length > 0) {
|
||||
currentStyles = styles;
|
||||
return `<span style="${styles.join(';')}">`;
|
||||
const codeList = codes.split(';');
|
||||
const classes: string[] = [];
|
||||
|
||||
for (const code of codeList) {
|
||||
// Only process whitelisted codes - ignore anything else
|
||||
if (code === '1') classes.push('ansi-bold');
|
||||
else if (code === '3') classes.push('ansi-italic');
|
||||
else if (code === '4') classes.push('ansi-underline');
|
||||
else if (validFgCodes.has(code)) classes.push(`ansi-fg-${code}`);
|
||||
else if (validBgCodes.has(code)) classes.push(`ansi-bg-${code}`);
|
||||
// All other codes are silently ignored for security
|
||||
}
|
||||
|
||||
if (classes.length > 0) {
|
||||
return `<span class="${classes.join(' ')}">`;
|
||||
}
|
||||
return '';
|
||||
});
|
||||
|
||||
// Clean up any unclosed spans
|
||||
// Clean up any unclosed spans (limit to prevent DoS from malformed input)
|
||||
const openSpans = (html.match(/<span/g) || []).length;
|
||||
const closeSpans = (html.match(/<\/span>/g) || []).length;
|
||||
for (let i = 0; i < openSpans - closeSpans; i++) {
|
||||
const unclosed = Math.min(openSpans - closeSpans, 100); // Cap at 100 to prevent DoS
|
||||
for (let i = 0; i < unclosed; i++) {
|
||||
html += '</span>';
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -62,13 +62,63 @@ export class ProfileCreateComponent {
|
|||
this.model.config.hugePages = (event.target as HTMLInputElement).checked;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates input for potential security issues (shell injection, etc.)
|
||||
*/
|
||||
private validateInput(value: string, fieldName: string, maxLength: number): string | null {
|
||||
if (!value || value.length === 0) {
|
||||
return `${fieldName} is required`;
|
||||
}
|
||||
if (value.length > maxLength) {
|
||||
return `${fieldName} is too long (max ${maxLength} characters)`;
|
||||
}
|
||||
// Check for shell metacharacters that could enable injection
|
||||
const dangerousChars = /[;&|`$(){}\\<>'\"\n\r!]/;
|
||||
if (dangerousChars.test(value)) {
|
||||
return `${fieldName} contains invalid characters`;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates pool URL format
|
||||
*/
|
||||
private validatePoolUrl(url: string): string | null {
|
||||
if (!url) {
|
||||
return 'Pool URL is required';
|
||||
}
|
||||
const validPrefixes = ['stratum+tcp://', 'stratum+ssl://', 'stratum://'];
|
||||
if (!validPrefixes.some(prefix => url.startsWith(prefix))) {
|
||||
return 'Pool URL must start with stratum+tcp://, stratum+ssl://, or stratum://';
|
||||
}
|
||||
return this.validateInput(url, 'Pool URL', 256);
|
||||
}
|
||||
|
||||
createProfile() {
|
||||
this.error = null;
|
||||
this.success = null;
|
||||
|
||||
// Basic validation check
|
||||
if (!this.model.name || !this.model.minerType || !this.model.config.pool || !this.model.config.wallet) {
|
||||
this.error = 'Please fill out all required fields.';
|
||||
// Validate all inputs
|
||||
const nameError = this.validateInput(this.model.name, 'Profile name', 100);
|
||||
if (nameError) {
|
||||
this.error = nameError;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.model.minerType) {
|
||||
this.error = 'Please select a miner type';
|
||||
return;
|
||||
}
|
||||
|
||||
const poolError = this.validatePoolUrl(this.model.config.pool);
|
||||
if (poolError) {
|
||||
this.error = poolError;
|
||||
return;
|
||||
}
|
||||
|
||||
const walletError = this.validateInput(this.model.config.wallet, 'Wallet address', 256);
|
||||
if (walletError) {
|
||||
this.error = walletError;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { Injectable, signal, computed, OnDestroy, NgZone, inject } from '@angular/core';
|
||||
import { Subject, Observable, timer, Subscription, BehaviorSubject } from 'rxjs';
|
||||
import { filter, map, share, takeUntil } from 'rxjs/operators';
|
||||
import { ApiConfigService } from './api-config.service';
|
||||
|
||||
// --- Event Types ---
|
||||
export type MiningEventType =
|
||||
|
|
@ -42,15 +43,28 @@ export interface MiningEvent<T = unknown> {
|
|||
|
||||
export type ConnectionState = 'disconnected' | 'connecting' | 'connected' | 'reconnecting';
|
||||
|
||||
// Security constants
|
||||
const MAX_MESSAGE_SIZE = 1024 * 1024; // 1MB max message size
|
||||
const VALID_EVENT_TYPES = new Set<MiningEventType>([
|
||||
'miner.starting', 'miner.started', 'miner.stopping', 'miner.stopped',
|
||||
'miner.stats', 'miner.error', 'miner.connected',
|
||||
'profile.created', 'profile.updated', 'profile.deleted', 'pong'
|
||||
]);
|
||||
|
||||
@Injectable({
|
||||
providedIn: 'root'
|
||||
})
|
||||
export class WebSocketService implements OnDestroy {
|
||||
private ngZone = inject(NgZone);
|
||||
private readonly apiConfig = inject(ApiConfigService);
|
||||
|
||||
// WebSocket connection
|
||||
private socket: WebSocket | null = null;
|
||||
private wsUrl = 'ws://localhost:9090/api/v1/mining/ws/events';
|
||||
|
||||
/** Get the WebSocket URL from configuration */
|
||||
private get wsUrl(): string {
|
||||
return this.apiConfig.wsUrl;
|
||||
}
|
||||
|
||||
// Connection state
|
||||
private connectionState = signal<ConnectionState>('disconnected');
|
||||
|
|
@ -149,7 +163,31 @@ export class WebSocketService implements OnDestroy {
|
|||
this.socket.onmessage = (event) => {
|
||||
this.ngZone.run(() => {
|
||||
try {
|
||||
const data = JSON.parse(event.data) as MiningEvent;
|
||||
// Security: Validate message size to prevent memory exhaustion
|
||||
const rawData = event.data;
|
||||
if (typeof rawData === 'string' && rawData.length > MAX_MESSAGE_SIZE) {
|
||||
console.error('[WebSocket] Message too large, ignoring:', rawData.length);
|
||||
return;
|
||||
}
|
||||
|
||||
const data = JSON.parse(rawData) as MiningEvent;
|
||||
|
||||
// Security: Validate event type is known/expected
|
||||
if (!data.type || !VALID_EVENT_TYPES.has(data.type)) {
|
||||
console.warn('[WebSocket] Unknown event type, ignoring:', data.type);
|
||||
return;
|
||||
}
|
||||
|
||||
// Security: Validate timestamp is reasonable (within 24 hours)
|
||||
if (data.timestamp) {
|
||||
const eventTime = new Date(data.timestamp).getTime();
|
||||
const now = Date.now();
|
||||
if (isNaN(eventTime) || Math.abs(now - eventTime) > 24 * 60 * 60 * 1000) {
|
||||
console.warn('[WebSocket] Invalid timestamp, ignoring');
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
this.eventsSubject.next(data);
|
||||
|
||||
// Log non-stats events for debugging
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue