forked from Snider/Poindexter
Compare commits
2 commits
api-audit-
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 544a3bf5ad | |||
|
|
fa998619dc |
18 changed files with 990 additions and 245 deletions
33
AUDIT-API.md
33
AUDIT-API.md
|
|
@ -1,33 +0,0 @@
|
||||||
# API Design and Ergonomics Audit
|
|
||||||
|
|
||||||
## Findings
|
|
||||||
|
|
||||||
### 1. "God Class" in `kdtree_analytics.go`
|
|
||||||
|
|
||||||
The file `kdtree_analytics.go` exhibited "God Class" characteristics, combining core tree analytics with unrelated responsibilities like peer trust scoring and NAT metrics. This made the code difficult to maintain and understand.
|
|
||||||
|
|
||||||
### 2. Inconsistent Naming
|
|
||||||
|
|
||||||
The method `ComputeDistanceDistribution` in `kdtree.go` was inconsistently named, as it actually computed axis-based distributions, not distance distributions.
|
|
||||||
|
|
||||||
## Changes Made
|
|
||||||
|
|
||||||
### 1. Decomposed `kdtree_analytics.go`
|
|
||||||
|
|
||||||
To address the "God Class" issue, I decomposed `kdtree_analytics.go` into three distinct files:
|
|
||||||
|
|
||||||
* `kdtree_analytics.go`: Now contains only the core tree analytics.
|
|
||||||
* `peer_trust.go`: Contains the peer trust scoring logic.
|
|
||||||
* `nat_metrics.go`: Contains the NAT-related metrics.
|
|
||||||
|
|
||||||
### 2. Renamed `ComputeDistanceDistribution`
|
|
||||||
|
|
||||||
I renamed the `ComputeDistanceDistribution` method to `ComputeAxisDistributions` to more accurately reflect its functionality.
|
|
||||||
|
|
||||||
### 3. Refactored `kdtree.go`
|
|
||||||
|
|
||||||
I updated `kdtree.go` to use the new, more focused modules. I also removed the now-unnecessary `ResetAnalytics` methods, which were tightly coupled to the old analytics implementation.
|
|
||||||
|
|
||||||
## Conclusion
|
|
||||||
|
|
||||||
These changes improve the API's design and ergonomics by making the code more modular, maintainable, and easier to understand.
|
|
||||||
46
docs/plans/2026-02-16-math-expansion-design.md
Normal file
46
docs/plans/2026-02-16-math-expansion-design.md
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
# Poindexter Math Expansion
|
||||||
|
|
||||||
|
**Date:** 2026-02-16
|
||||||
|
**Status:** Approved
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Poindexter serves as the math pillar (alongside Borg=data, Enchantrix=encryption) in the Lethean ecosystem. It currently provides KD-Tree spatial queries, 5 distance metrics, sorting utilities, and normalization helpers.
|
||||||
|
|
||||||
|
Analysis of math operations scattered across core/go, core/go-ai, and core/mining revealed common patterns that Poindexter should centralize: descriptive statistics, scaling/interpolation, approximate equality, weighted scoring, and signal generation.
|
||||||
|
|
||||||
|
## New Modules
|
||||||
|
|
||||||
|
### stats.go — Descriptive statistics
|
||||||
|
Sum, Mean, Variance, StdDev, MinMax, IsUnderrepresented.
|
||||||
|
Consumers: ml/coverage.go, lab/handler/chart.go
|
||||||
|
|
||||||
|
### scale.go — Normalization and interpolation
|
||||||
|
Lerp, InverseLerp, Remap, RoundToN, Clamp, MinMaxScale.
|
||||||
|
Consumers: lab/handler/chart.go, i18n/numbers.go
|
||||||
|
|
||||||
|
### epsilon.go — Approximate equality
|
||||||
|
ApproxEqual, ApproxZero.
|
||||||
|
Consumers: ml/exact.go
|
||||||
|
|
||||||
|
### score.go — Weighted composite scoring
|
||||||
|
Factor type, WeightedScore, Ratio, Delta, DeltaPercent.
|
||||||
|
Consumers: ml/heuristic.go, ml/compare.go
|
||||||
|
|
||||||
|
### signal.go — Time-series primitives
|
||||||
|
RampUp, SineWave, Oscillate, Noise (seeded RNG).
|
||||||
|
Consumers: mining/simulated_miner.go
|
||||||
|
|
||||||
|
## Constraints
|
||||||
|
|
||||||
|
- Zero external dependencies (WASM-compilable)
|
||||||
|
- Pure Go, stdlib only (math, math/rand)
|
||||||
|
- Same package (`poindexter`), flat structure
|
||||||
|
- Table-driven tests for every function
|
||||||
|
- No changes to existing files
|
||||||
|
|
||||||
|
## Not In Scope
|
||||||
|
|
||||||
|
- MLX tensor ops (hardware-accelerated, stays in go-ai)
|
||||||
|
- DNS tools migration to go-netops (separate PR)
|
||||||
|
- gonum backend integration (future work)
|
||||||
14
epsilon.go
Normal file
14
epsilon.go
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// ApproxEqual returns true if the absolute difference between a and b
|
||||||
|
// is less than epsilon.
|
||||||
|
func ApproxEqual(a, b, epsilon float64) bool {
|
||||||
|
return math.Abs(a-b) < epsilon
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApproxZero returns true if the absolute value of v is less than epsilon.
|
||||||
|
func ApproxZero(v, epsilon float64) bool {
|
||||||
|
return math.Abs(v) < epsilon
|
||||||
|
}
|
||||||
50
epsilon_test.go
Normal file
50
epsilon_test.go
Normal file
|
|
@ -0,0 +1,50 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestApproxEqual(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
a, b float64
|
||||||
|
epsilon float64
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{"equal", 1.0, 1.0, 0.01, true},
|
||||||
|
{"close", 1.0, 1.005, 0.01, true},
|
||||||
|
{"not_close", 1.0, 1.02, 0.01, false},
|
||||||
|
{"negative", -1.0, -1.005, 0.01, true},
|
||||||
|
{"zero", 0, 0.0001, 0.001, true},
|
||||||
|
{"at_boundary", 1.0, 1.01, 0.01, false},
|
||||||
|
{"large_epsilon", 100, 200, 150, true},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := ApproxEqual(tt.a, tt.b, tt.epsilon)
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("ApproxEqual(%v, %v, %v) = %v, want %v", tt.a, tt.b, tt.epsilon, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApproxZero(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
v float64
|
||||||
|
epsilon float64
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{"zero", 0, 0.01, true},
|
||||||
|
{"small_pos", 0.005, 0.01, true},
|
||||||
|
{"small_neg", -0.005, 0.01, true},
|
||||||
|
{"not_zero", 0.02, 0.01, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := ApproxZero(tt.v, tt.epsilon)
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("ApproxZero(%v, %v) = %v, want %v", tt.v, tt.epsilon, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -564,8 +564,8 @@ func (t *KDTree[T]) GetTopPeers(n int) []PeerStats {
|
||||||
return t.peerAnalytics.GetTopPeers(n)
|
return t.peerAnalytics.GetTopPeers(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ComputeAxisDistributions analyzes the distribution of current point coordinates.
|
// ComputeDistanceDistribution analyzes the distribution of current point coordinates.
|
||||||
func (t *KDTree[T]) ComputeAxisDistributions(axisNames []string) []AxisDistribution {
|
func (t *KDTree[T]) ComputeDistanceDistribution(axisNames []string) []AxisDistribution {
|
||||||
return ComputeAxisDistributions(t.points, axisNames)
|
return ComputeAxisDistributions(t.points, axisNames)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -395,6 +395,197 @@ func ComputeAxisDistributions[T any](points []KDPoint[T], axisNames []string) []
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NATRoutingMetrics provides metrics specifically for NAT traversal routing decisions.
|
||||||
|
type NATRoutingMetrics struct {
|
||||||
|
// Connectivity score (0-1): higher means better reachability
|
||||||
|
ConnectivityScore float64 `json:"connectivityScore"`
|
||||||
|
// Symmetry score (0-1): higher means more symmetric NAT (easier to traverse)
|
||||||
|
SymmetryScore float64 `json:"symmetryScore"`
|
||||||
|
// Relay requirement probability (0-1): likelihood peer needs relay
|
||||||
|
RelayProbability float64 `json:"relayProbability"`
|
||||||
|
// Direct connection success rate (historical)
|
||||||
|
DirectSuccessRate float64 `json:"directSuccessRate"`
|
||||||
|
// Average RTT in milliseconds
|
||||||
|
AvgRTTMs float64 `json:"avgRttMs"`
|
||||||
|
// Jitter (RTT variance) in milliseconds
|
||||||
|
JitterMs float64 `json:"jitterMs"`
|
||||||
|
// Packet loss rate (0-1)
|
||||||
|
PacketLossRate float64 `json:"packetLossRate"`
|
||||||
|
// Bandwidth estimate in Mbps
|
||||||
|
BandwidthMbps float64 `json:"bandwidthMbps"`
|
||||||
|
// NAT type classification
|
||||||
|
NATType string `json:"natType"`
|
||||||
|
// Last probe timestamp
|
||||||
|
LastProbeAt time.Time `json:"lastProbeAt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NATTypeClassification enumerates common NAT types for routing decisions.
|
||||||
|
type NATTypeClassification string
|
||||||
|
|
||||||
|
const (
|
||||||
|
NATTypeOpen NATTypeClassification = "open" // No NAT / Public IP
|
||||||
|
NATTypeFullCone NATTypeClassification = "full_cone" // Easy to traverse
|
||||||
|
NATTypeRestrictedCone NATTypeClassification = "restricted_cone" // Moderate difficulty
|
||||||
|
NATTypePortRestricted NATTypeClassification = "port_restricted" // Harder to traverse
|
||||||
|
NATTypeSymmetric NATTypeClassification = "symmetric" // Hardest to traverse
|
||||||
|
NATTypeSymmetricUDP NATTypeClassification = "symmetric_udp" // UDP-only symmetric
|
||||||
|
NATTypeUnknown NATTypeClassification = "unknown" // Not yet classified
|
||||||
|
NATTypeBehindCGNAT NATTypeClassification = "cgnat" // Carrier-grade NAT
|
||||||
|
NATTypeFirewalled NATTypeClassification = "firewalled" // Blocked by firewall
|
||||||
|
NATTypeRelayRequired NATTypeClassification = "relay_required" // Must use relay
|
||||||
|
)
|
||||||
|
|
||||||
|
// PeerQualityScore computes a composite quality score for peer selection.
|
||||||
|
// Higher scores indicate better peers for routing.
|
||||||
|
// Weights can be customized; default weights emphasize latency and reliability.
|
||||||
|
func PeerQualityScore(metrics NATRoutingMetrics, weights *QualityWeights) float64 {
|
||||||
|
w := DefaultQualityWeights()
|
||||||
|
if weights != nil {
|
||||||
|
w = *weights
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize metrics to 0-1 scale (higher is better)
|
||||||
|
latencyScore := 1.0 - math.Min(metrics.AvgRTTMs/1000.0, 1.0) // <1000ms is acceptable
|
||||||
|
jitterScore := 1.0 - math.Min(metrics.JitterMs/100.0, 1.0) // <100ms jitter
|
||||||
|
lossScore := 1.0 - metrics.PacketLossRate // 0 loss is best
|
||||||
|
bandwidthScore := math.Min(metrics.BandwidthMbps/100.0, 1.0) // 100Mbps is excellent
|
||||||
|
connectivityScore := metrics.ConnectivityScore // Already 0-1
|
||||||
|
symmetryScore := metrics.SymmetryScore // Already 0-1
|
||||||
|
directScore := metrics.DirectSuccessRate // Already 0-1
|
||||||
|
relayPenalty := 1.0 - metrics.RelayProbability // Prefer non-relay
|
||||||
|
|
||||||
|
// NAT type bonus/penalty
|
||||||
|
natScore := natTypeScore(metrics.NATType)
|
||||||
|
|
||||||
|
// Weighted combination
|
||||||
|
score := (w.Latency*latencyScore +
|
||||||
|
w.Jitter*jitterScore +
|
||||||
|
w.PacketLoss*lossScore +
|
||||||
|
w.Bandwidth*bandwidthScore +
|
||||||
|
w.Connectivity*connectivityScore +
|
||||||
|
w.Symmetry*symmetryScore +
|
||||||
|
w.DirectSuccess*directScore +
|
||||||
|
w.RelayPenalty*relayPenalty +
|
||||||
|
w.NATType*natScore) / w.Total()
|
||||||
|
|
||||||
|
return math.Max(0, math.Min(1, score))
|
||||||
|
}
|
||||||
|
|
||||||
|
// QualityWeights configures the importance of each metric in peer selection.
|
||||||
|
type QualityWeights struct {
|
||||||
|
Latency float64 `json:"latency"`
|
||||||
|
Jitter float64 `json:"jitter"`
|
||||||
|
PacketLoss float64 `json:"packetLoss"`
|
||||||
|
Bandwidth float64 `json:"bandwidth"`
|
||||||
|
Connectivity float64 `json:"connectivity"`
|
||||||
|
Symmetry float64 `json:"symmetry"`
|
||||||
|
DirectSuccess float64 `json:"directSuccess"`
|
||||||
|
RelayPenalty float64 `json:"relayPenalty"`
|
||||||
|
NATType float64 `json:"natType"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total returns the sum of all weights for normalization.
|
||||||
|
func (w QualityWeights) Total() float64 {
|
||||||
|
return w.Latency + w.Jitter + w.PacketLoss + w.Bandwidth +
|
||||||
|
w.Connectivity + w.Symmetry + w.DirectSuccess + w.RelayPenalty + w.NATType
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultQualityWeights returns sensible defaults for peer selection.
|
||||||
|
func DefaultQualityWeights() QualityWeights {
|
||||||
|
return QualityWeights{
|
||||||
|
Latency: 3.0, // Most important
|
||||||
|
Jitter: 1.5,
|
||||||
|
PacketLoss: 2.0,
|
||||||
|
Bandwidth: 1.0,
|
||||||
|
Connectivity: 2.0,
|
||||||
|
Symmetry: 1.0,
|
||||||
|
DirectSuccess: 2.0,
|
||||||
|
RelayPenalty: 1.5,
|
||||||
|
NATType: 1.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// natTypeScore returns a 0-1 score based on NAT type (higher is better for routing).
|
||||||
|
func natTypeScore(natType string) float64 {
|
||||||
|
switch NATTypeClassification(natType) {
|
||||||
|
case NATTypeOpen:
|
||||||
|
return 1.0
|
||||||
|
case NATTypeFullCone:
|
||||||
|
return 0.9
|
||||||
|
case NATTypeRestrictedCone:
|
||||||
|
return 0.7
|
||||||
|
case NATTypePortRestricted:
|
||||||
|
return 0.5
|
||||||
|
case NATTypeSymmetric:
|
||||||
|
return 0.3
|
||||||
|
case NATTypeSymmetricUDP:
|
||||||
|
return 0.25
|
||||||
|
case NATTypeBehindCGNAT:
|
||||||
|
return 0.2
|
||||||
|
case NATTypeFirewalled:
|
||||||
|
return 0.1
|
||||||
|
case NATTypeRelayRequired:
|
||||||
|
return 0.05
|
||||||
|
default:
|
||||||
|
return 0.4 // Unknown gets middle score
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrustMetrics tracks trust and reputation for peer selection.
|
||||||
|
type TrustMetrics struct {
|
||||||
|
// ReputationScore (0-1): aggregated trust score
|
||||||
|
ReputationScore float64 `json:"reputationScore"`
|
||||||
|
// SuccessfulTransactions: count of successful exchanges
|
||||||
|
SuccessfulTransactions int64 `json:"successfulTransactions"`
|
||||||
|
// FailedTransactions: count of failed/aborted exchanges
|
||||||
|
FailedTransactions int64 `json:"failedTransactions"`
|
||||||
|
// AgeSeconds: how long this peer has been known
|
||||||
|
AgeSeconds int64 `json:"ageSeconds"`
|
||||||
|
// LastSuccessAt: last successful interaction
|
||||||
|
LastSuccessAt time.Time `json:"lastSuccessAt"`
|
||||||
|
// LastFailureAt: last failed interaction
|
||||||
|
LastFailureAt time.Time `json:"lastFailureAt"`
|
||||||
|
// VouchCount: number of other peers vouching for this peer
|
||||||
|
VouchCount int `json:"vouchCount"`
|
||||||
|
// FlagCount: number of reports against this peer
|
||||||
|
FlagCount int `json:"flagCount"`
|
||||||
|
// ProofOfWork: computational proof of stake/work
|
||||||
|
ProofOfWork float64 `json:"proofOfWork"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeTrustScore calculates a composite trust score from trust metrics.
|
||||||
|
func ComputeTrustScore(t TrustMetrics) float64 {
|
||||||
|
total := t.SuccessfulTransactions + t.FailedTransactions
|
||||||
|
if total == 0 {
|
||||||
|
// New peer with no history: moderate trust with age bonus
|
||||||
|
ageBonus := math.Min(float64(t.AgeSeconds)/(86400*30), 0.2) // Up to 0.2 for 30 days
|
||||||
|
return 0.5 + ageBonus
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base score from success rate
|
||||||
|
successRate := float64(t.SuccessfulTransactions) / float64(total)
|
||||||
|
|
||||||
|
// Volume confidence (more transactions = more confident)
|
||||||
|
volumeConfidence := 1 - 1/(1+float64(total)/10)
|
||||||
|
|
||||||
|
// Vouch/flag adjustment
|
||||||
|
vouchBonus := math.Min(float64(t.VouchCount)*0.02, 0.15)
|
||||||
|
flagPenalty := math.Min(float64(t.FlagCount)*0.05, 0.3)
|
||||||
|
|
||||||
|
// Recency bonus (recent success = better)
|
||||||
|
recencyBonus := 0.0
|
||||||
|
if !t.LastSuccessAt.IsZero() {
|
||||||
|
hoursSince := time.Since(t.LastSuccessAt).Hours()
|
||||||
|
recencyBonus = 0.1 * math.Exp(-hoursSince/168) // Decays over ~1 week
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proof of work bonus
|
||||||
|
powBonus := math.Min(t.ProofOfWork*0.1, 0.1)
|
||||||
|
|
||||||
|
score := successRate*volumeConfidence + vouchBonus - flagPenalty + recencyBonus + powBonus
|
||||||
|
return math.Max(0, math.Min(1, score))
|
||||||
|
}
|
||||||
|
|
||||||
// NetworkHealthSummary aggregates overall network health metrics.
|
// NetworkHealthSummary aggregates overall network health metrics.
|
||||||
type NetworkHealthSummary struct {
|
type NetworkHealthSummary struct {
|
||||||
TotalPeers int `json:"totalPeers"`
|
TotalPeers int `json:"totalPeers"`
|
||||||
|
|
@ -466,12 +657,6 @@ type FeatureRanges struct {
|
||||||
Ranges []AxisStats `json:"ranges"`
|
Ranges []AxisStats `json:"ranges"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AxisStats holds statistics for a single axis.
|
|
||||||
type AxisStats struct {
|
|
||||||
Min float64 `json:"min"`
|
|
||||||
Max float64 `json:"max"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultPeerFeatureRanges returns sensible default ranges for peer features.
|
// DefaultPeerFeatureRanges returns sensible default ranges for peer features.
|
||||||
func DefaultPeerFeatureRanges() FeatureRanges {
|
func DefaultPeerFeatureRanges() FeatureRanges {
|
||||||
return FeatureRanges{
|
return FeatureRanges{
|
||||||
|
|
|
||||||
|
|
@ -618,7 +618,7 @@ func TestKDTreeDistanceDistribution(t *testing.T) {
|
||||||
}
|
}
|
||||||
tree, _ := NewKDTree(points)
|
tree, _ := NewKDTree(points)
|
||||||
|
|
||||||
dists := tree.ComputeAxisDistributions([]string{"x", "y"})
|
dists := tree.ComputeDistanceDistribution([]string{"x", "y"})
|
||||||
if len(dists) != 2 {
|
if len(dists) != 2 {
|
||||||
t.Errorf("expected 2 axis distributions, got %d", len(dists))
|
t.Errorf("expected 2 axis distributions, got %d", len(dists))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,12 @@ var (
|
||||||
ErrStatsDimMismatch = errors.New("kdtree: stats dimensionality mismatch")
|
ErrStatsDimMismatch = errors.New("kdtree: stats dimensionality mismatch")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// AxisStats holds the min/max observed for a single axis.
|
||||||
|
type AxisStats struct {
|
||||||
|
Min float64
|
||||||
|
Max float64
|
||||||
|
}
|
||||||
|
|
||||||
// NormStats holds per-axis normalisation statistics.
|
// NormStats holds per-axis normalisation statistics.
|
||||||
// For D dimensions, Stats has length D.
|
// For D dimensions, Stats has length D.
|
||||||
type NormStats struct {
|
type NormStats struct {
|
||||||
|
|
|
||||||
142
nat_metrics.go
142
nat_metrics.go
|
|
@ -1,142 +0,0 @@
|
||||||
package poindexter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NATRoutingMetrics provides metrics specifically for NAT traversal routing decisions.
|
|
||||||
type NATRoutingMetrics struct {
|
|
||||||
// Connectivity score (0-1): higher means better reachability
|
|
||||||
ConnectivityScore float64 `json:"connectivityScore"`
|
|
||||||
// Symmetry score (0-1): higher means more symmetric NAT (easier to traverse)
|
|
||||||
SymmetryScore float64 `json:"symmetryScore"`
|
|
||||||
// Relay requirement probability (0-1): likelihood peer needs relay
|
|
||||||
RelayProbability float64 `json:"relayProbability"`
|
|
||||||
// Direct connection success rate (historical)
|
|
||||||
DirectSuccessRate float64 `json:"directSuccessRate"`
|
|
||||||
// Average RTT in milliseconds
|
|
||||||
AvgRTTMs float64 `json:"avgRttMs"`
|
|
||||||
// Jitter (RTT variance) in milliseconds
|
|
||||||
JitterMs float64 `json:"jitterMs"`
|
|
||||||
// Packet loss rate (0-1)
|
|
||||||
PacketLossRate float64 `json:"packetLossRate"`
|
|
||||||
// Bandwidth estimate in Mbps
|
|
||||||
BandwidthMbps float64 `json:"bandwidthMbps"`
|
|
||||||
// NAT type classification
|
|
||||||
NATType string `json:"natType"`
|
|
||||||
// Last probe timestamp
|
|
||||||
LastProbeAt time.Time `json:"lastProbeAt"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NATTypeClassification enumerates common NAT types for routing decisions.
|
|
||||||
type NATTypeClassification string
|
|
||||||
|
|
||||||
const (
|
|
||||||
NATTypeOpen NATTypeClassification = "open" // No NAT / Public IP
|
|
||||||
NATTypeFullCone NATTypeClassification = "full_cone" // Easy to traverse
|
|
||||||
NATTypeRestrictedCone NATTypeClassification = "restricted_cone" // Moderate difficulty
|
|
||||||
NATTypePortRestricted NATTypeClassification = "port_restricted" // Harder to traverse
|
|
||||||
NATTypeSymmetric NATTypeClassification = "symmetric" // Hardest to traverse
|
|
||||||
NATTypeSymmetricUDP NATTypeClassification = "symmetric_udp" // UDP-only symmetric
|
|
||||||
NATTypeUnknown NATTypeClassification = "unknown" // Not yet classified
|
|
||||||
NATTypeBehindCGNAT NATTypeClassification = "cgnat" // Carrier-grade NAT
|
|
||||||
NATTypeFirewalled NATTypeClassification = "firewalled" // Blocked by firewall
|
|
||||||
NATTypeRelayRequired NATTypeClassification = "relay_required" // Must use relay
|
|
||||||
)
|
|
||||||
|
|
||||||
// PeerQualityScore computes a composite quality score for peer selection.
|
|
||||||
// Higher scores indicate better peers for routing.
|
|
||||||
// Weights can be customized; default weights emphasize latency and reliability.
|
|
||||||
func PeerQualityScore(metrics NATRoutingMetrics, weights *QualityWeights) float64 {
|
|
||||||
w := DefaultQualityWeights()
|
|
||||||
if weights != nil {
|
|
||||||
w = *weights
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize metrics to 0-1 scale (higher is better)
|
|
||||||
latencyScore := 1.0 - math.Min(metrics.AvgRTTMs/1000.0, 1.0) // <1000ms is acceptable
|
|
||||||
jitterScore := 1.0 - math.Min(metrics.JitterMs/100.0, 1.0) // <100ms jitter
|
|
||||||
lossScore := 1.0 - metrics.PacketLossRate // 0 loss is best
|
|
||||||
bandwidthScore := math.Min(metrics.BandwidthMbps/100.0, 1.0) // 100Mbps is excellent
|
|
||||||
connectivityScore := metrics.ConnectivityScore // Already 0-1
|
|
||||||
symmetryScore := metrics.SymmetryScore // Already 0-1
|
|
||||||
directScore := metrics.DirectSuccessRate // Already 0-1
|
|
||||||
relayPenalty := 1.0 - metrics.RelayProbability // Prefer non-relay
|
|
||||||
|
|
||||||
// NAT type bonus/penalty
|
|
||||||
natScore := natTypeScore(metrics.NATType)
|
|
||||||
|
|
||||||
// Weighted combination
|
|
||||||
score := (w.Latency*latencyScore +
|
|
||||||
w.Jitter*jitterScore +
|
|
||||||
w.PacketLoss*lossScore +
|
|
||||||
w.Bandwidth*bandwidthScore +
|
|
||||||
w.Connectivity*connectivityScore +
|
|
||||||
w.Symmetry*symmetryScore +
|
|
||||||
w.DirectSuccess*directScore +
|
|
||||||
w.RelayPenalty*relayPenalty +
|
|
||||||
w.NATType*natScore) / w.Total()
|
|
||||||
|
|
||||||
return math.Max(0, math.Min(1, score))
|
|
||||||
}
|
|
||||||
|
|
||||||
// QualityWeights configures the importance of each metric in peer selection.
|
|
||||||
type QualityWeights struct {
|
|
||||||
Latency float64 `json:"latency"`
|
|
||||||
Jitter float64 `json:"jitter"`
|
|
||||||
PacketLoss float64 `json:"packetLoss"`
|
|
||||||
Bandwidth float64 `json:"bandwidth"`
|
|
||||||
Connectivity float64 `json:"connectivity"`
|
|
||||||
Symmetry float64 `json:"symmetry"`
|
|
||||||
DirectSuccess float64 `json:"directSuccess"`
|
|
||||||
RelayPenalty float64 `json:"relayPenalty"`
|
|
||||||
NATType float64 `json:"natType"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Total returns the sum of all weights for normalization.
|
|
||||||
func (w QualityWeights) Total() float64 {
|
|
||||||
return w.Latency + w.Jitter + w.PacketLoss + w.Bandwidth +
|
|
||||||
w.Connectivity + w.Symmetry + w.DirectSuccess + w.RelayPenalty + w.NATType
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultQualityWeights returns sensible defaults for peer selection.
|
|
||||||
func DefaultQualityWeights() QualityWeights {
|
|
||||||
return QualityWeights{
|
|
||||||
Latency: 3.0, // Most important
|
|
||||||
Jitter: 1.5,
|
|
||||||
PacketLoss: 2.0,
|
|
||||||
Bandwidth: 1.0,
|
|
||||||
Connectivity: 2.0,
|
|
||||||
Symmetry: 1.0,
|
|
||||||
DirectSuccess: 2.0,
|
|
||||||
RelayPenalty: 1.5,
|
|
||||||
NATType: 1.0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// natTypeScore returns a 0-1 score based on NAT type (higher is better for routing).
|
|
||||||
func natTypeScore(natType string) float64 {
|
|
||||||
switch NATTypeClassification(natType) {
|
|
||||||
case NATTypeOpen:
|
|
||||||
return 1.0
|
|
||||||
case NATTypeFullCone:
|
|
||||||
return 0.9
|
|
||||||
case NATTypeRestrictedCone:
|
|
||||||
return 0.7
|
|
||||||
case NATTypePortRestricted:
|
|
||||||
return 0.5
|
|
||||||
case NATTypeSymmetric:
|
|
||||||
return 0.3
|
|
||||||
case NATTypeSymmetricUDP:
|
|
||||||
return 0.25
|
|
||||||
case NATTypeBehindCGNAT:
|
|
||||||
return 0.2
|
|
||||||
case NATTypeFirewalled:
|
|
||||||
return 0.1
|
|
||||||
case NATTypeRelayRequired:
|
|
||||||
return 0.05
|
|
||||||
default:
|
|
||||||
return 0.4 // Unknown gets middle score
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,61 +0,0 @@
|
||||||
package poindexter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TrustMetrics tracks trust and reputation for peer selection.
|
|
||||||
type TrustMetrics struct {
|
|
||||||
// ReputationScore (0-1): aggregated trust score
|
|
||||||
ReputationScore float64 `json:"reputationScore"`
|
|
||||||
// SuccessfulTransactions: count of successful exchanges
|
|
||||||
SuccessfulTransactions int64 `json:"successfulTransactions"`
|
|
||||||
// FailedTransactions: count of failed/aborted exchanges
|
|
||||||
FailedTransactions int64 `json:"failedTransactions"`
|
|
||||||
// AgeSeconds: how long this peer has been known
|
|
||||||
AgeSeconds int64 `json:"ageSeconds"`
|
|
||||||
// LastSuccessAt: last successful interaction
|
|
||||||
LastSuccessAt time.Time `json:"lastSuccessAt"`
|
|
||||||
// LastFailureAt: last failed interaction
|
|
||||||
LastFailureAt time.Time `json:"lastFailureAt"`
|
|
||||||
// VouchCount: number of other peers vouching for this peer
|
|
||||||
VouchCount int `json:"vouchCount"`
|
|
||||||
// FlagCount: number of reports against this peer
|
|
||||||
FlagCount int `json:"flagCount"`
|
|
||||||
// ProofOfWork: computational proof of stake/work
|
|
||||||
ProofOfWork float64 `json:"proofOfWork"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ComputeTrustScore calculates a composite trust score from trust metrics.
|
|
||||||
func ComputeTrustScore(t TrustMetrics) float64 {
|
|
||||||
total := t.SuccessfulTransactions + t.FailedTransactions
|
|
||||||
if total == 0 {
|
|
||||||
// New peer with no history: moderate trust with age bonus
|
|
||||||
ageBonus := math.Min(float64(t.AgeSeconds)/(86400*30), 0.2) // Up to 0.2 for 30 days
|
|
||||||
return 0.5 + ageBonus
|
|
||||||
}
|
|
||||||
|
|
||||||
// Base score from success rate
|
|
||||||
successRate := float64(t.SuccessfulTransactions) / float64(total)
|
|
||||||
|
|
||||||
// Volume confidence (more transactions = more confident)
|
|
||||||
volumeConfidence := 1 - 1/(1+float64(total)/10)
|
|
||||||
|
|
||||||
// Vouch/flag adjustment
|
|
||||||
vouchBonus := math.Min(float64(t.VouchCount)*0.02, 0.15)
|
|
||||||
flagPenalty := math.Min(float64(t.FlagCount)*0.05, 0.3)
|
|
||||||
|
|
||||||
// Recency bonus (recent success = better)
|
|
||||||
recencyBonus := 0.0
|
|
||||||
if !t.LastSuccessAt.IsZero() {
|
|
||||||
hoursSince := time.Since(t.LastSuccessAt).Hours()
|
|
||||||
recencyBonus = 0.1 * math.Exp(-hoursSince/168) // Decays over ~1 week
|
|
||||||
}
|
|
||||||
|
|
||||||
// Proof of work bonus
|
|
||||||
powBonus := math.Min(t.ProofOfWork*0.1, 0.1)
|
|
||||||
|
|
||||||
score := successRate*volumeConfidence + vouchBonus - flagPenalty + recencyBonus + powBonus
|
|
||||||
return math.Max(0, math.Min(1, score))
|
|
||||||
}
|
|
||||||
61
scale.go
Normal file
61
scale.go
Normal file
|
|
@ -0,0 +1,61 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// Lerp performs linear interpolation between a and b.
|
||||||
|
// t=0 returns a, t=1 returns b, t=0.5 returns the midpoint.
|
||||||
|
func Lerp(t, a, b float64) float64 {
|
||||||
|
return a + t*(b-a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InverseLerp returns where v falls between a and b as a fraction [0,1].
|
||||||
|
// Returns 0 if a == b.
|
||||||
|
func InverseLerp(v, a, b float64) float64 {
|
||||||
|
if a == b {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return (v - a) / (b - a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remap maps v from the range [inMin, inMax] to [outMin, outMax].
|
||||||
|
// Equivalent to Lerp(InverseLerp(v, inMin, inMax), outMin, outMax).
|
||||||
|
func Remap(v, inMin, inMax, outMin, outMax float64) float64 {
|
||||||
|
return Lerp(InverseLerp(v, inMin, inMax), outMin, outMax)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoundToN rounds f to n decimal places.
|
||||||
|
func RoundToN(f float64, decimals int) float64 {
|
||||||
|
mul := math.Pow(10, float64(decimals))
|
||||||
|
return math.Round(f*mul) / mul
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clamp restricts v to the range [min, max].
|
||||||
|
func Clamp(v, min, max float64) float64 {
|
||||||
|
if v < min {
|
||||||
|
return min
|
||||||
|
}
|
||||||
|
if v > max {
|
||||||
|
return max
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClampInt restricts v to the range [min, max].
|
||||||
|
func ClampInt(v, min, max int) int {
|
||||||
|
if v < min {
|
||||||
|
return min
|
||||||
|
}
|
||||||
|
if v > max {
|
||||||
|
return max
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinMaxScale normalizes v into [0,1] given its range [min, max].
|
||||||
|
// Returns 0 if min == max.
|
||||||
|
func MinMaxScale(v, min, max float64) float64 {
|
||||||
|
if min == max {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return (v - min) / (max - min)
|
||||||
|
}
|
||||||
148
scale_test.go
Normal file
148
scale_test.go
Normal file
|
|
@ -0,0 +1,148 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLerp(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
t_, a, b float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"start", 0, 10, 20, 10},
|
||||||
|
{"end", 1, 10, 20, 20},
|
||||||
|
{"mid", 0.5, 10, 20, 15},
|
||||||
|
{"quarter", 0.25, 0, 100, 25},
|
||||||
|
{"extrapolate", 2, 0, 10, 20},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := Lerp(tt.t_, tt.a, tt.b)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("Lerp(%v, %v, %v) = %v, want %v", tt.t_, tt.a, tt.b, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInverseLerp(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
v, a, b float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"start", 10, 10, 20, 0},
|
||||||
|
{"end", 20, 10, 20, 1},
|
||||||
|
{"mid", 15, 10, 20, 0.5},
|
||||||
|
{"equal_range", 5, 5, 5, 0},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := InverseLerp(tt.v, tt.a, tt.b)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("InverseLerp(%v, %v, %v) = %v, want %v", tt.v, tt.a, tt.b, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemap(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
v, inMin, inMax, outMin, outMax float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"identity", 5, 0, 10, 0, 10, 5},
|
||||||
|
{"scale_up", 5, 0, 10, 0, 100, 50},
|
||||||
|
{"reverse", 3, 0, 10, 10, 0, 7},
|
||||||
|
{"offset", 0, 0, 1, 100, 200, 100},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := Remap(tt.v, tt.inMin, tt.inMax, tt.outMin, tt.outMax)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("Remap = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRoundToN(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
f float64
|
||||||
|
decimals int
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"zero_dec", 3.456, 0, 3},
|
||||||
|
{"one_dec", 3.456, 1, 3.5},
|
||||||
|
{"two_dec", 3.456, 2, 3.46},
|
||||||
|
{"three_dec", 3.4564, 3, 3.456},
|
||||||
|
{"negative", -2.555, 2, -2.56},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := RoundToN(tt.f, tt.decimals)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("RoundToN(%v, %v) = %v, want %v", tt.f, tt.decimals, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClamp(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
v, min, max float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"within", 5, 0, 10, 5},
|
||||||
|
{"below", -5, 0, 10, 0},
|
||||||
|
{"above", 15, 0, 10, 10},
|
||||||
|
{"at_min", 0, 0, 10, 0},
|
||||||
|
{"at_max", 10, 0, 10, 10},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := Clamp(tt.v, tt.min, tt.max)
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("Clamp(%v, %v, %v) = %v, want %v", tt.v, tt.min, tt.max, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClampInt(t *testing.T) {
|
||||||
|
if got := ClampInt(5, 0, 10); got != 5 {
|
||||||
|
t.Errorf("ClampInt(5, 0, 10) = %v, want 5", got)
|
||||||
|
}
|
||||||
|
if got := ClampInt(-1, 0, 10); got != 0 {
|
||||||
|
t.Errorf("ClampInt(-1, 0, 10) = %v, want 0", got)
|
||||||
|
}
|
||||||
|
if got := ClampInt(15, 0, 10); got != 10 {
|
||||||
|
t.Errorf("ClampInt(15, 0, 10) = %v, want 10", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMinMaxScale(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
v, min, max float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"mid", 5, 0, 10, 0.5},
|
||||||
|
{"at_min", 0, 0, 10, 0},
|
||||||
|
{"at_max", 10, 0, 10, 1},
|
||||||
|
{"equal_range", 5, 5, 5, 0},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := MinMaxScale(tt.v, tt.min, tt.max)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("MinMaxScale(%v, %v, %v) = %v, want %v", tt.v, tt.min, tt.max, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
40
score.go
Normal file
40
score.go
Normal file
|
|
@ -0,0 +1,40 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
// Factor is a value–weight pair for composite scoring.
|
||||||
|
type Factor struct {
|
||||||
|
Value float64
|
||||||
|
Weight float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// WeightedScore computes the weighted sum of factors.
|
||||||
|
// Each factor contributes Value * Weight to the total.
|
||||||
|
// Returns 0 for empty slices.
|
||||||
|
func WeightedScore(factors []Factor) float64 {
|
||||||
|
var total float64
|
||||||
|
for _, f := range factors {
|
||||||
|
total += f.Value * f.Weight
|
||||||
|
}
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ratio returns part/whole safely. Returns 0 if whole is 0.
|
||||||
|
func Ratio(part, whole float64) float64 {
|
||||||
|
if whole == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return part / whole
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delta returns the difference new_ - old.
|
||||||
|
func Delta(old, new_ float64) float64 {
|
||||||
|
return new_ - old
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeltaPercent returns the percentage change from old to new_.
|
||||||
|
// Returns 0 if old is 0.
|
||||||
|
func DeltaPercent(old, new_ float64) float64 {
|
||||||
|
if old == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return (new_ - old) / old * 100
|
||||||
|
}
|
||||||
86
score_test.go
Normal file
86
score_test.go
Normal file
|
|
@ -0,0 +1,86 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWeightedScore(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
factors []Factor
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"empty", nil, 0},
|
||||||
|
{"single", []Factor{{Value: 5, Weight: 2}}, 10},
|
||||||
|
{"multiple", []Factor{
|
||||||
|
{Value: 3, Weight: 2}, // 6
|
||||||
|
{Value: 1, Weight: -5}, // -5
|
||||||
|
}, 1},
|
||||||
|
{"lek_heuristic", []Factor{
|
||||||
|
{Value: 2, Weight: 2}, // engagement × 2
|
||||||
|
{Value: 1, Weight: 3}, // creative × 3
|
||||||
|
{Value: 1, Weight: 1.5}, // first person × 1.5
|
||||||
|
{Value: 3, Weight: -5}, // compliance × -5
|
||||||
|
}, -6.5},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := WeightedScore(tt.factors)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("WeightedScore = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRatio(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
part, whole float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"half", 5, 10, 0.5},
|
||||||
|
{"full", 10, 10, 1},
|
||||||
|
{"zero_whole", 5, 0, 0},
|
||||||
|
{"zero_part", 0, 10, 0},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := Ratio(tt.part, tt.whole)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("Ratio(%v, %v) = %v, want %v", tt.part, tt.whole, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDelta(t *testing.T) {
|
||||||
|
if got := Delta(10, 15); got != 5 {
|
||||||
|
t.Errorf("Delta(10, 15) = %v, want 5", got)
|
||||||
|
}
|
||||||
|
if got := Delta(15, 10); got != -5 {
|
||||||
|
t.Errorf("Delta(15, 10) = %v, want -5", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeltaPercent(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
old, new_ float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"increase", 100, 150, 50},
|
||||||
|
{"decrease", 100, 75, -25},
|
||||||
|
{"zero_old", 0, 10, 0},
|
||||||
|
{"no_change", 50, 50, 0},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := DeltaPercent(tt.old, tt.new_)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("DeltaPercent(%v, %v) = %v, want %v", tt.old, tt.new_, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
57
signal.go
Normal file
57
signal.go
Normal file
|
|
@ -0,0 +1,57 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RampUp returns a linear ramp from 0 to 1 over the given duration.
|
||||||
|
// The result is clamped to [0, 1].
|
||||||
|
func RampUp(elapsed, duration float64) float64 {
|
||||||
|
if duration <= 0 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return Clamp(elapsed/duration, 0, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SineWave returns a sine value with the given period and amplitude.
|
||||||
|
// Output range is [-amplitude, amplitude].
|
||||||
|
func SineWave(t, period, amplitude float64) float64 {
|
||||||
|
if period == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return math.Sin(t/period*2*math.Pi) * amplitude
|
||||||
|
}
|
||||||
|
|
||||||
|
// Oscillate modulates a base value with a sine wave.
|
||||||
|
// Returns base * (1 + sin(t/period*2π) * amplitude).
|
||||||
|
func Oscillate(base, t, period, amplitude float64) float64 {
|
||||||
|
if period == 0 {
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
return base * (1 + math.Sin(t/period*2*math.Pi)*amplitude)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Noise generates seeded pseudo-random values.
|
||||||
|
type Noise struct {
|
||||||
|
rng *rand.Rand
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNoise creates a seeded noise generator.
|
||||||
|
func NewNoise(seed int64) *Noise {
|
||||||
|
return &Noise{rng: rand.New(rand.NewSource(seed))}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 returns a random value in [-variance, variance].
|
||||||
|
func (n *Noise) Float64(variance float64) float64 {
|
||||||
|
return (n.rng.Float64() - 0.5) * 2 * variance
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns a random integer in [0, max).
|
||||||
|
// Returns 0 if max <= 0.
|
||||||
|
func (n *Noise) Int(max int) int {
|
||||||
|
if max <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return n.rng.Intn(max)
|
||||||
|
}
|
||||||
103
signal_test.go
Normal file
103
signal_test.go
Normal file
|
|
@ -0,0 +1,103 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRampUp(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
elapsed, duration float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"start", 0, 30, 0},
|
||||||
|
{"mid", 15, 30, 0.5},
|
||||||
|
{"end", 30, 30, 1},
|
||||||
|
{"over", 60, 30, 1},
|
||||||
|
{"negative", -5, 30, 0},
|
||||||
|
{"zero_duration", 10, 0, 1},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := RampUp(tt.elapsed, tt.duration)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("RampUp(%v, %v) = %v, want %v", tt.elapsed, tt.duration, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSineWave(t *testing.T) {
|
||||||
|
// At t=0, sin(0) = 0
|
||||||
|
if got := SineWave(0, 10, 5); math.Abs(got) > 1e-9 {
|
||||||
|
t.Errorf("SineWave(0, 10, 5) = %v, want 0", got)
|
||||||
|
}
|
||||||
|
// At t=period/4, sin(π/2) = 1, so result = amplitude
|
||||||
|
got := SineWave(2.5, 10, 5)
|
||||||
|
if math.Abs(got-5) > 1e-9 {
|
||||||
|
t.Errorf("SineWave(2.5, 10, 5) = %v, want 5", got)
|
||||||
|
}
|
||||||
|
// Zero period returns 0
|
||||||
|
if got := SineWave(5, 0, 5); got != 0 {
|
||||||
|
t.Errorf("SineWave(5, 0, 5) = %v, want 0", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOscillate(t *testing.T) {
|
||||||
|
// At t=0, sin(0)=0, so result = base * (1 + 0) = base
|
||||||
|
got := Oscillate(100, 0, 10, 0.05)
|
||||||
|
if math.Abs(got-100) > 1e-9 {
|
||||||
|
t.Errorf("Oscillate(100, 0, 10, 0.05) = %v, want 100", got)
|
||||||
|
}
|
||||||
|
// At t=period/4, sin=1, so result = base * (1 + amplitude)
|
||||||
|
got = Oscillate(100, 2.5, 10, 0.05)
|
||||||
|
if math.Abs(got-105) > 1e-9 {
|
||||||
|
t.Errorf("Oscillate(100, 2.5, 10, 0.05) = %v, want 105", got)
|
||||||
|
}
|
||||||
|
// Zero period returns base
|
||||||
|
if got := Oscillate(100, 5, 0, 0.05); got != 100 {
|
||||||
|
t.Errorf("Oscillate(100, 5, 0, 0.05) = %v, want 100", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNoise(t *testing.T) {
|
||||||
|
n := NewNoise(42)
|
||||||
|
|
||||||
|
// Float64 should be within [-variance, variance]
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
v := n.Float64(0.1)
|
||||||
|
if v < -0.1 || v > 0.1 {
|
||||||
|
t.Fatalf("Float64(0.1) = %v, outside [-0.1, 0.1]", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int should be within [0, max)
|
||||||
|
n2 := NewNoise(42)
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
v := n2.Int(10)
|
||||||
|
if v < 0 || v >= 10 {
|
||||||
|
t.Fatalf("Int(10) = %v, outside [0, 10)", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int with zero max returns 0
|
||||||
|
if got := n.Int(0); got != 0 {
|
||||||
|
t.Errorf("Int(0) = %v, want 0", got)
|
||||||
|
}
|
||||||
|
if got := n.Int(-1); got != 0 {
|
||||||
|
t.Errorf("Int(-1) = %v, want 0", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNoiseDeterministic(t *testing.T) {
|
||||||
|
n1 := NewNoise(123)
|
||||||
|
n2 := NewNoise(123)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
a := n1.Float64(1.0)
|
||||||
|
b := n2.Float64(1.0)
|
||||||
|
if a != b {
|
||||||
|
t.Fatalf("iteration %d: different values for same seed: %v != %v", i, a, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
63
stats.go
Normal file
63
stats.go
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// Sum returns the sum of all values. Returns 0 for empty slices.
|
||||||
|
func Sum(data []float64) float64 {
|
||||||
|
var s float64
|
||||||
|
for _, v := range data {
|
||||||
|
s += v
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mean returns the arithmetic mean. Returns 0 for empty slices.
|
||||||
|
func Mean(data []float64) float64 {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return Sum(data) / float64(len(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variance returns the population variance. Returns 0 for empty slices.
|
||||||
|
func Variance(data []float64) float64 {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
m := Mean(data)
|
||||||
|
var ss float64
|
||||||
|
for _, v := range data {
|
||||||
|
d := v - m
|
||||||
|
ss += d * d
|
||||||
|
}
|
||||||
|
return ss / float64(len(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdDev returns the population standard deviation.
|
||||||
|
func StdDev(data []float64) float64 {
|
||||||
|
return math.Sqrt(Variance(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinMax returns the minimum and maximum values.
|
||||||
|
// Returns (0, 0) for empty slices.
|
||||||
|
func MinMax(data []float64) (min, max float64) {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
min, max = data[0], data[0]
|
||||||
|
for _, v := range data[1:] {
|
||||||
|
if v < min {
|
||||||
|
min = v
|
||||||
|
}
|
||||||
|
if v > max {
|
||||||
|
max = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return min, max
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUnderrepresented returns true if val is below threshold fraction of avg.
|
||||||
|
// For example, IsUnderrepresented(3, 10, 0.5) returns true because 3 < 10*0.5.
|
||||||
|
func IsUnderrepresented(val, avg, threshold float64) bool {
|
||||||
|
return val < avg*threshold
|
||||||
|
}
|
||||||
122
stats_test.go
Normal file
122
stats_test.go
Normal file
|
|
@ -0,0 +1,122 @@
|
||||||
|
package poindexter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSum(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"empty", nil, 0},
|
||||||
|
{"single", []float64{5}, 5},
|
||||||
|
{"multiple", []float64{1, 2, 3, 4, 5}, 15},
|
||||||
|
{"negative", []float64{-1, -2, 3}, 0},
|
||||||
|
{"floats", []float64{0.1, 0.2, 0.3}, 0.6},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := Sum(tt.data)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("Sum(%v) = %v, want %v", tt.data, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMean(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"empty", nil, 0},
|
||||||
|
{"single", []float64{5}, 5},
|
||||||
|
{"multiple", []float64{2, 4, 6}, 4},
|
||||||
|
{"floats", []float64{1.5, 2.5}, 2},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := Mean(tt.data)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("Mean(%v) = %v, want %v", tt.data, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVariance(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"empty", nil, 0},
|
||||||
|
{"constant", []float64{5, 5, 5}, 0},
|
||||||
|
{"simple", []float64{2, 4, 4, 4, 5, 5, 7, 9}, 4},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := Variance(tt.data)
|
||||||
|
if math.Abs(got-tt.want) > 1e-9 {
|
||||||
|
t.Errorf("Variance(%v) = %v, want %v", tt.data, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStdDev(t *testing.T) {
|
||||||
|
got := StdDev([]float64{2, 4, 4, 4, 5, 5, 7, 9})
|
||||||
|
if math.Abs(got-2) > 1e-9 {
|
||||||
|
t.Errorf("StdDev = %v, want 2", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMinMax(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []float64
|
||||||
|
wantMin float64
|
||||||
|
wantMax float64
|
||||||
|
}{
|
||||||
|
{"empty", nil, 0, 0},
|
||||||
|
{"single", []float64{3}, 3, 3},
|
||||||
|
{"ordered", []float64{1, 2, 3}, 1, 3},
|
||||||
|
{"reversed", []float64{3, 2, 1}, 1, 3},
|
||||||
|
{"negative", []float64{-5, 0, 5}, -5, 5},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
gotMin, gotMax := MinMax(tt.data)
|
||||||
|
if gotMin != tt.wantMin || gotMax != tt.wantMax {
|
||||||
|
t.Errorf("MinMax(%v) = (%v, %v), want (%v, %v)", tt.data, gotMin, gotMax, tt.wantMin, tt.wantMax)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsUnderrepresented(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
val float64
|
||||||
|
avg float64
|
||||||
|
threshold float64
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{"below", 3, 10, 0.5, true},
|
||||||
|
{"at", 5, 10, 0.5, false},
|
||||||
|
{"above", 7, 10, 0.5, false},
|
||||||
|
{"zero_avg", 0, 0, 0.5, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := IsUnderrepresented(tt.val, tt.avg, tt.threshold)
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("IsUnderrepresented(%v, %v, %v) = %v, want %v", tt.val, tt.avg, tt.threshold, got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
Loading…
Add table
Reference in a new issue