forked from Snider/Poindexter
feat: Expose networking analytics for KD-Tree NAT routing
Add comprehensive networking analytics to support peer selection based on performance and trust metrics for KD-Tree based NAT routing: - Add kdtree_analytics.go with TreeAnalytics, PeerAnalytics, DistributionStats, NATRoutingMetrics, TrustMetrics, and QualityWeights structures - Track query/insert/delete operations with timing statistics - Track per-peer selection frequency and average distances - Add PeerQualityScore() for composite peer ranking - Add ComputeTrustScore() for reputation-based selection - Add distribution statistics (min, max, mean, median, percentiles) - Add feature normalization helpers for multi-dimensional peer data WASM/TypeScript integration: - Expose all analytics via WASM bindings - Update TypeScript definitions with full type coverage - Update loader.js with new API methods - Update TypeScript demo to showcase analytics features Includes comprehensive test coverage for all analytics functionality.
This commit is contained in:
parent
299b01ec73
commit
4609b7b2bf
7 changed files with 2165 additions and 25 deletions
|
|
@ -13,19 +13,159 @@ async function run() {
|
|||
|
||||
console.log('Poindexter (WASM) version:', await px.version());
|
||||
|
||||
// =========================================================================
|
||||
// Basic KD-Tree operations
|
||||
// =========================================================================
|
||||
const tree = await px.newTree(2);
|
||||
await tree.insert({ id: 'a', coords: [0, 0], value: 'A' });
|
||||
await tree.insert({ id: 'b', coords: [1, 0], value: 'B' });
|
||||
await tree.insert({ id: 'c', coords: [0, 1], value: 'C' });
|
||||
await tree.insert({ id: 'peer-a', coords: [0, 0], value: 'Peer A' });
|
||||
await tree.insert({ id: 'peer-b', coords: [1, 0], value: 'Peer B' });
|
||||
await tree.insert({ id: 'peer-c', coords: [0, 1], value: 'Peer C' });
|
||||
await tree.insert({ id: 'peer-d', coords: [0.5, 0.5], value: 'Peer D' });
|
||||
|
||||
console.log('\n=== Basic Queries ===');
|
||||
const nn = await tree.nearest([0.9, 0.1]);
|
||||
console.log('Nearest [0.9,0.1]:', nn);
|
||||
|
||||
const kn = await tree.kNearest([0.9, 0.9], 2);
|
||||
console.log('kNN k=2 [0.9,0.9]:', kn);
|
||||
const kn = await tree.kNearest([0.5, 0.5], 3);
|
||||
console.log('kNN k=3 [0.5,0.5]:', kn);
|
||||
|
||||
const rad = await tree.radius([0, 0], 1.1);
|
||||
console.log('Radius r=1.1 [0,0]:', rad);
|
||||
|
||||
// =========================================================================
|
||||
// Analytics Demo
|
||||
// =========================================================================
|
||||
console.log('\n=== Tree Analytics ===');
|
||||
|
||||
// Perform more queries to generate analytics
|
||||
for (let i = 0; i < 10; i++) {
|
||||
await tree.nearest([Math.random(), Math.random()]);
|
||||
}
|
||||
await tree.kNearest([0.2, 0.8], 2);
|
||||
await tree.kNearest([0.7, 0.3], 2);
|
||||
|
||||
// Get tree-level analytics
|
||||
const analytics = await tree.getAnalytics();
|
||||
console.log('Tree Analytics:', {
|
||||
queryCount: analytics.queryCount,
|
||||
insertCount: analytics.insertCount,
|
||||
avgQueryTimeNs: analytics.avgQueryTimeNs,
|
||||
minQueryTimeNs: analytics.minQueryTimeNs,
|
||||
maxQueryTimeNs: analytics.maxQueryTimeNs,
|
||||
});
|
||||
|
||||
// =========================================================================
|
||||
// Peer Selection Analytics
|
||||
// =========================================================================
|
||||
console.log('\n=== Peer Selection Analytics ===');
|
||||
|
||||
// Get all peer stats
|
||||
const peerStats = await tree.getPeerStats();
|
||||
console.log('All Peer Stats:', peerStats);
|
||||
|
||||
// Get top 3 most frequently selected peers
|
||||
const topPeers = await tree.getTopPeers(3);
|
||||
console.log('Top 3 Peers:', topPeers);
|
||||
|
||||
// =========================================================================
|
||||
// Axis Distribution Analysis
|
||||
// =========================================================================
|
||||
console.log('\n=== Axis Distributions ===');
|
||||
|
||||
const axisDists = await tree.getAxisDistributions(['latency', 'hops']);
|
||||
console.log('Axis Distributions:', axisDists);
|
||||
|
||||
// =========================================================================
|
||||
// NAT Routing / Peer Quality Scoring
|
||||
// =========================================================================
|
||||
console.log('\n=== NAT Routing & Peer Quality ===');
|
||||
|
||||
// Simulate peer network metrics
|
||||
const peerMetrics = {
|
||||
connectivityScore: 0.9,
|
||||
symmetryScore: 0.8,
|
||||
relayProbability: 0.1,
|
||||
directSuccessRate: 0.95,
|
||||
avgRttMs: 50,
|
||||
jitterMs: 10,
|
||||
packetLossRate: 0.01,
|
||||
bandwidthMbps: 100,
|
||||
natType: 'full_cone' as const,
|
||||
};
|
||||
|
||||
const qualityScore = await px.computePeerQualityScore(peerMetrics);
|
||||
console.log('Peer Quality Score (0-1):', qualityScore.toFixed(3));
|
||||
|
||||
// Get default quality weights
|
||||
const defaultWeights = await px.getDefaultQualityWeights();
|
||||
console.log('Default Quality Weights:', defaultWeights);
|
||||
|
||||
// =========================================================================
|
||||
// Trust Score Calculation
|
||||
// =========================================================================
|
||||
console.log('\n=== Trust Score ===');
|
||||
|
||||
const trustMetrics = {
|
||||
reputationScore: 0.8,
|
||||
successfulTransactions: 150,
|
||||
failedTransactions: 3,
|
||||
ageSeconds: 86400 * 30, // 30 days
|
||||
vouchCount: 5,
|
||||
flagCount: 0,
|
||||
proofOfWork: 0.5,
|
||||
};
|
||||
|
||||
const trustScore = await px.computeTrustScore(trustMetrics);
|
||||
console.log('Trust Score (0-1):', trustScore.toFixed(3));
|
||||
|
||||
// =========================================================================
|
||||
// Distribution Statistics
|
||||
// =========================================================================
|
||||
console.log('\n=== Distribution Statistics ===');
|
||||
|
||||
// Simulate some distance measurements
|
||||
const distances = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.8, 1.2];
|
||||
const distStats = await px.computeDistributionStats(distances);
|
||||
console.log('Distance Distribution Stats:', {
|
||||
count: distStats.count,
|
||||
min: distStats.min.toFixed(3),
|
||||
max: distStats.max.toFixed(3),
|
||||
mean: distStats.mean.toFixed(3),
|
||||
median: distStats.median.toFixed(3),
|
||||
stdDev: distStats.stdDev.toFixed(3),
|
||||
p90: distStats.p90.toFixed(3),
|
||||
});
|
||||
|
||||
// =========================================================================
|
||||
// Feature Normalization for KD-Tree
|
||||
// =========================================================================
|
||||
console.log('\n=== Feature Normalization ===');
|
||||
|
||||
// Raw peer features: [latency_ms, hops, geo_km, trust_inv, bw_inv, loss, conn_inv, nat_inv]
|
||||
const rawFeatures = [100, 5, 500, 0.1, 50, 0.02, 5, 0.1];
|
||||
|
||||
// Get default feature ranges
|
||||
const featureRanges = await px.getDefaultPeerFeatureRanges();
|
||||
console.log('Feature Labels:', featureRanges.labels);
|
||||
|
||||
// Normalize features
|
||||
const normalizedFeatures = await px.normalizePeerFeatures(rawFeatures);
|
||||
console.log('Normalized Features:', normalizedFeatures.map((f: number) => f.toFixed(3)));
|
||||
|
||||
// Apply custom weights
|
||||
const customWeights = [1.5, 1.0, 0.5, 1.2, 0.8, 2.0, 1.0, 0.7];
|
||||
const weightedFeatures = await px.weightedPeerFeatures(normalizedFeatures, customWeights);
|
||||
console.log('Weighted Features:', weightedFeatures.map((f: number) => f.toFixed(3)));
|
||||
|
||||
// =========================================================================
|
||||
// Analytics Reset
|
||||
// =========================================================================
|
||||
console.log('\n=== Analytics Reset ===');
|
||||
await tree.resetAnalytics();
|
||||
const resetAnalytics = await tree.getAnalytics();
|
||||
console.log('After Reset - Query Count:', resetAnalytics.queryCount);
|
||||
|
||||
console.log('\n=== Demo Complete ===');
|
||||
}
|
||||
|
||||
run().catch((err) => {
|
||||
|
|
|
|||
158
kdtree.go
158
kdtree.go
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -218,6 +219,10 @@ type KDTree[T any] struct {
|
|||
idIndex map[string]int
|
||||
backend KDBackend
|
||||
backendData any // opaque handle for backend-specific structures (e.g., gonum tree)
|
||||
|
||||
// Analytics tracking (optional, enabled by default)
|
||||
analytics *TreeAnalytics
|
||||
peerAnalytics *PeerAnalytics
|
||||
}
|
||||
|
||||
// NewKDTree builds a KDTree from the given points.
|
||||
|
|
@ -259,12 +264,14 @@ func NewKDTree[T any](pts []KDPoint[T], opts ...KDOption) (*KDTree[T], error) {
|
|||
backend = BackendLinear // tag not enabled → fallback
|
||||
}
|
||||
t := &KDTree[T]{
|
||||
points: append([]KDPoint[T](nil), pts...),
|
||||
dim: dim,
|
||||
metric: cfg.metric,
|
||||
idIndex: idIndex,
|
||||
backend: backend,
|
||||
backendData: backendData,
|
||||
points: append([]KDPoint[T](nil), pts...),
|
||||
dim: dim,
|
||||
metric: cfg.metric,
|
||||
idIndex: idIndex,
|
||||
backend: backend,
|
||||
backendData: backendData,
|
||||
analytics: NewTreeAnalytics(),
|
||||
peerAnalytics: NewPeerAnalytics(),
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
|
@ -284,12 +291,14 @@ func NewKDTreeFromDim[T any](dim int, opts ...KDOption) (*KDTree[T], error) {
|
|||
backend = BackendLinear
|
||||
}
|
||||
return &KDTree[T]{
|
||||
points: nil,
|
||||
dim: dim,
|
||||
metric: cfg.metric,
|
||||
idIndex: make(map[string]int),
|
||||
backend: backend,
|
||||
backendData: nil,
|
||||
points: nil,
|
||||
dim: dim,
|
||||
metric: cfg.metric,
|
||||
idIndex: make(map[string]int),
|
||||
backend: backend,
|
||||
backendData: nil,
|
||||
analytics: NewTreeAnalytics(),
|
||||
peerAnalytics: NewPeerAnalytics(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -305,12 +314,23 @@ func (t *KDTree[T]) Nearest(query []float64) (KDPoint[T], float64, bool) {
|
|||
if len(query) != t.dim || t.Len() == 0 {
|
||||
return KDPoint[T]{}, 0, false
|
||||
}
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
if t.analytics != nil {
|
||||
t.analytics.RecordQuery(time.Since(start).Nanoseconds())
|
||||
}
|
||||
}()
|
||||
|
||||
// Gonum backend (if available and built)
|
||||
if t.backend == BackendGonum && t.backendData != nil {
|
||||
if idx, dist, ok := gonumNearest[T](t.backendData, query); ok && idx >= 0 && idx < len(t.points) {
|
||||
return t.points[idx], dist, true
|
||||
p := t.points[idx]
|
||||
if t.peerAnalytics != nil {
|
||||
t.peerAnalytics.RecordSelection(p.ID, dist)
|
||||
}
|
||||
return p, dist, true
|
||||
}
|
||||
// fall through to linear scan if backend didn’t return a result
|
||||
// fall through to linear scan if backend didn't return a result
|
||||
}
|
||||
bestIdx := -1
|
||||
bestDist := math.MaxFloat64
|
||||
|
|
@ -324,7 +344,11 @@ func (t *KDTree[T]) Nearest(query []float64) (KDPoint[T], float64, bool) {
|
|||
if bestIdx < 0 {
|
||||
return KDPoint[T]{}, 0, false
|
||||
}
|
||||
return t.points[bestIdx], bestDist, true
|
||||
p := t.points[bestIdx]
|
||||
if t.peerAnalytics != nil {
|
||||
t.peerAnalytics.RecordSelection(p.ID, bestDist)
|
||||
}
|
||||
return p, bestDist, true
|
||||
}
|
||||
|
||||
// KNearest returns up to k nearest neighbors to the query in ascending distance order.
|
||||
|
|
@ -333,6 +357,13 @@ func (t *KDTree[T]) KNearest(query []float64, k int) ([]KDPoint[T], []float64) {
|
|||
if k <= 0 || len(query) != t.dim || t.Len() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
if t.analytics != nil {
|
||||
t.analytics.RecordQuery(time.Since(start).Nanoseconds())
|
||||
}
|
||||
}()
|
||||
|
||||
// Gonum backend path
|
||||
if t.backend == BackendGonum && t.backendData != nil {
|
||||
idxs, dists := gonumKNearest[T](t.backendData, query, k)
|
||||
|
|
@ -340,6 +371,9 @@ func (t *KDTree[T]) KNearest(query []float64, k int) ([]KDPoint[T], []float64) {
|
|||
neighbors := make([]KDPoint[T], len(idxs))
|
||||
for i := range idxs {
|
||||
neighbors[i] = t.points[idxs[i]]
|
||||
if t.peerAnalytics != nil {
|
||||
t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i])
|
||||
}
|
||||
}
|
||||
return neighbors, dists
|
||||
}
|
||||
|
|
@ -362,6 +396,9 @@ func (t *KDTree[T]) KNearest(query []float64, k int) ([]KDPoint[T], []float64) {
|
|||
for i := 0; i < k; i++ {
|
||||
neighbors[i] = t.points[tmp[i].idx]
|
||||
dists[i] = tmp[i].dist
|
||||
if t.peerAnalytics != nil {
|
||||
t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i])
|
||||
}
|
||||
}
|
||||
return neighbors, dists
|
||||
}
|
||||
|
|
@ -371,6 +408,13 @@ func (t *KDTree[T]) Radius(query []float64, r float64) ([]KDPoint[T], []float64)
|
|||
if r < 0 || len(query) != t.dim || t.Len() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
if t.analytics != nil {
|
||||
t.analytics.RecordQuery(time.Since(start).Nanoseconds())
|
||||
}
|
||||
}()
|
||||
|
||||
// Gonum backend path
|
||||
if t.backend == BackendGonum && t.backendData != nil {
|
||||
idxs, dists := gonumRadius[T](t.backendData, query, r)
|
||||
|
|
@ -378,6 +422,9 @@ func (t *KDTree[T]) Radius(query []float64, r float64) ([]KDPoint[T], []float64)
|
|||
neighbors := make([]KDPoint[T], len(idxs))
|
||||
for i := range idxs {
|
||||
neighbors[i] = t.points[idxs[i]]
|
||||
if t.peerAnalytics != nil {
|
||||
t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i])
|
||||
}
|
||||
}
|
||||
return neighbors, dists
|
||||
}
|
||||
|
|
@ -402,6 +449,9 @@ func (t *KDTree[T]) Radius(query []float64, r float64) ([]KDPoint[T], []float64)
|
|||
for i := range sel {
|
||||
neighbors[i] = t.points[sel[i].idx]
|
||||
dists[i] = sel[i].dist
|
||||
if t.peerAnalytics != nil {
|
||||
t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i])
|
||||
}
|
||||
}
|
||||
return neighbors, dists
|
||||
}
|
||||
|
|
@ -421,10 +471,17 @@ func (t *KDTree[T]) Insert(p KDPoint[T]) bool {
|
|||
if p.ID != "" {
|
||||
t.idIndex[p.ID] = len(t.points) - 1
|
||||
}
|
||||
// Record insert in analytics
|
||||
if t.analytics != nil {
|
||||
t.analytics.RecordInsert()
|
||||
}
|
||||
// Rebuild backend if using Gonum
|
||||
if t.backend == BackendGonum && hasGonum() {
|
||||
if bd, err := buildGonumBackend(t.points, t.metric); err == nil {
|
||||
t.backendData = bd
|
||||
if t.analytics != nil {
|
||||
t.analytics.RecordRebuild()
|
||||
}
|
||||
} else {
|
||||
// fallback to linear if rebuild fails
|
||||
t.backend = BackendLinear
|
||||
|
|
@ -451,10 +508,17 @@ func (t *KDTree[T]) DeleteByID(id string) bool {
|
|||
}
|
||||
t.points = t.points[:last]
|
||||
delete(t.idIndex, id)
|
||||
// Record delete in analytics
|
||||
if t.analytics != nil {
|
||||
t.analytics.RecordDelete()
|
||||
}
|
||||
// Rebuild backend if using Gonum
|
||||
if t.backend == BackendGonum && hasGonum() {
|
||||
if bd, err := buildGonumBackend(t.points, t.metric); err == nil {
|
||||
t.backendData = bd
|
||||
if t.analytics != nil {
|
||||
t.analytics.RecordRebuild()
|
||||
}
|
||||
} else {
|
||||
// fallback to linear if rebuild fails
|
||||
t.backend = BackendLinear
|
||||
|
|
@ -463,3 +527,67 @@ func (t *KDTree[T]) DeleteByID(id string) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Analytics returns the tree analytics tracker.
|
||||
// Returns nil if analytics tracking is disabled.
|
||||
func (t *KDTree[T]) Analytics() *TreeAnalytics {
|
||||
return t.analytics
|
||||
}
|
||||
|
||||
// PeerAnalytics returns the peer analytics tracker.
|
||||
// Returns nil if peer analytics tracking is disabled.
|
||||
func (t *KDTree[T]) PeerAnalytics() *PeerAnalytics {
|
||||
return t.peerAnalytics
|
||||
}
|
||||
|
||||
// GetAnalyticsSnapshot returns a point-in-time snapshot of tree analytics.
|
||||
func (t *KDTree[T]) GetAnalyticsSnapshot() TreeAnalyticsSnapshot {
|
||||
if t.analytics == nil {
|
||||
return TreeAnalyticsSnapshot{}
|
||||
}
|
||||
return t.analytics.Snapshot()
|
||||
}
|
||||
|
||||
// GetPeerStats returns per-peer selection statistics.
|
||||
func (t *KDTree[T]) GetPeerStats() []PeerStats {
|
||||
if t.peerAnalytics == nil {
|
||||
return nil
|
||||
}
|
||||
return t.peerAnalytics.GetAllPeerStats()
|
||||
}
|
||||
|
||||
// GetTopPeers returns the top N most frequently selected peers.
|
||||
func (t *KDTree[T]) GetTopPeers(n int) []PeerStats {
|
||||
if t.peerAnalytics == nil {
|
||||
return nil
|
||||
}
|
||||
return t.peerAnalytics.GetTopPeers(n)
|
||||
}
|
||||
|
||||
// ComputeDistanceDistribution analyzes the distribution of current point coordinates.
|
||||
func (t *KDTree[T]) ComputeDistanceDistribution(axisNames []string) []AxisDistribution {
|
||||
return ComputeAxisDistributions(t.points, axisNames)
|
||||
}
|
||||
|
||||
// ResetAnalytics clears all analytics data.
|
||||
func (t *KDTree[T]) ResetAnalytics() {
|
||||
if t.analytics != nil {
|
||||
t.analytics.Reset()
|
||||
}
|
||||
if t.peerAnalytics != nil {
|
||||
t.peerAnalytics.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Points returns a copy of all points in the tree.
|
||||
// This is useful for analytics and export operations.
|
||||
func (t *KDTree[T]) Points() []KDPoint[T] {
|
||||
result := make([]KDPoint[T], len(t.points))
|
||||
copy(result, t.points)
|
||||
return result
|
||||
}
|
||||
|
||||
// Backend returns the active backend type.
|
||||
func (t *KDTree[T]) Backend() KDBackend {
|
||||
return t.backend
|
||||
}
|
||||
|
|
|
|||
700
kdtree_analytics.go
Normal file
700
kdtree_analytics.go
Normal file
|
|
@ -0,0 +1,700 @@
|
|||
package poindexter
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TreeAnalytics tracks operational statistics for a KDTree.
|
||||
// All counters are safe for concurrent reads; use the Reset() method for atomic reset.
|
||||
type TreeAnalytics struct {
|
||||
QueryCount atomic.Int64 // Total nearest/kNearest/radius queries
|
||||
InsertCount atomic.Int64 // Total successful inserts
|
||||
DeleteCount atomic.Int64 // Total successful deletes
|
||||
|
||||
// Timing statistics (nanoseconds)
|
||||
TotalQueryTimeNs atomic.Int64
|
||||
LastQueryTimeNs atomic.Int64
|
||||
MinQueryTimeNs atomic.Int64
|
||||
MaxQueryTimeNs atomic.Int64
|
||||
LastQueryAt atomic.Int64 // Unix nanoseconds
|
||||
CreatedAt time.Time
|
||||
LastRebuiltAt atomic.Int64 // Unix nanoseconds (for gonum backend rebuilds)
|
||||
BackendRebuildCnt atomic.Int64 // Number of backend rebuilds
|
||||
}
|
||||
|
||||
// NewTreeAnalytics creates a new analytics tracker.
|
||||
func NewTreeAnalytics() *TreeAnalytics {
|
||||
a := &TreeAnalytics{
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
a.MinQueryTimeNs.Store(math.MaxInt64)
|
||||
return a
|
||||
}
|
||||
|
||||
// RecordQuery records a query operation with timing.
|
||||
func (a *TreeAnalytics) RecordQuery(durationNs int64) {
|
||||
a.QueryCount.Add(1)
|
||||
a.TotalQueryTimeNs.Add(durationNs)
|
||||
a.LastQueryTimeNs.Store(durationNs)
|
||||
a.LastQueryAt.Store(time.Now().UnixNano())
|
||||
|
||||
// Update min/max (best-effort, not strictly atomic)
|
||||
for {
|
||||
cur := a.MinQueryTimeNs.Load()
|
||||
if durationNs >= cur || a.MinQueryTimeNs.CompareAndSwap(cur, durationNs) {
|
||||
break
|
||||
}
|
||||
}
|
||||
for {
|
||||
cur := a.MaxQueryTimeNs.Load()
|
||||
if durationNs <= cur || a.MaxQueryTimeNs.CompareAndSwap(cur, durationNs) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RecordInsert records a successful insert.
|
||||
func (a *TreeAnalytics) RecordInsert() {
|
||||
a.InsertCount.Add(1)
|
||||
}
|
||||
|
||||
// RecordDelete records a successful delete.
|
||||
func (a *TreeAnalytics) RecordDelete() {
|
||||
a.DeleteCount.Add(1)
|
||||
}
|
||||
|
||||
// RecordRebuild records a backend rebuild.
|
||||
func (a *TreeAnalytics) RecordRebuild() {
|
||||
a.BackendRebuildCnt.Add(1)
|
||||
a.LastRebuiltAt.Store(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// Snapshot returns a point-in-time view of the analytics.
|
||||
func (a *TreeAnalytics) Snapshot() TreeAnalyticsSnapshot {
|
||||
avgNs := int64(0)
|
||||
qc := a.QueryCount.Load()
|
||||
if qc > 0 {
|
||||
avgNs = a.TotalQueryTimeNs.Load() / qc
|
||||
}
|
||||
minNs := a.MinQueryTimeNs.Load()
|
||||
if minNs == math.MaxInt64 {
|
||||
minNs = 0
|
||||
}
|
||||
return TreeAnalyticsSnapshot{
|
||||
QueryCount: qc,
|
||||
InsertCount: a.InsertCount.Load(),
|
||||
DeleteCount: a.DeleteCount.Load(),
|
||||
AvgQueryTimeNs: avgNs,
|
||||
MinQueryTimeNs: minNs,
|
||||
MaxQueryTimeNs: a.MaxQueryTimeNs.Load(),
|
||||
LastQueryTimeNs: a.LastQueryTimeNs.Load(),
|
||||
LastQueryAt: time.Unix(0, a.LastQueryAt.Load()),
|
||||
CreatedAt: a.CreatedAt,
|
||||
BackendRebuildCnt: a.BackendRebuildCnt.Load(),
|
||||
LastRebuiltAt: time.Unix(0, a.LastRebuiltAt.Load()),
|
||||
}
|
||||
}
|
||||
|
||||
// Reset atomically resets all counters.
|
||||
func (a *TreeAnalytics) Reset() {
|
||||
a.QueryCount.Store(0)
|
||||
a.InsertCount.Store(0)
|
||||
a.DeleteCount.Store(0)
|
||||
a.TotalQueryTimeNs.Store(0)
|
||||
a.LastQueryTimeNs.Store(0)
|
||||
a.MinQueryTimeNs.Store(math.MaxInt64)
|
||||
a.MaxQueryTimeNs.Store(0)
|
||||
a.LastQueryAt.Store(0)
|
||||
a.BackendRebuildCnt.Store(0)
|
||||
a.LastRebuiltAt.Store(0)
|
||||
}
|
||||
|
||||
// TreeAnalyticsSnapshot is an immutable snapshot for JSON serialization.
|
||||
type TreeAnalyticsSnapshot struct {
|
||||
QueryCount int64 `json:"queryCount"`
|
||||
InsertCount int64 `json:"insertCount"`
|
||||
DeleteCount int64 `json:"deleteCount"`
|
||||
AvgQueryTimeNs int64 `json:"avgQueryTimeNs"`
|
||||
MinQueryTimeNs int64 `json:"minQueryTimeNs"`
|
||||
MaxQueryTimeNs int64 `json:"maxQueryTimeNs"`
|
||||
LastQueryTimeNs int64 `json:"lastQueryTimeNs"`
|
||||
LastQueryAt time.Time `json:"lastQueryAt"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
BackendRebuildCnt int64 `json:"backendRebuildCount"`
|
||||
LastRebuiltAt time.Time `json:"lastRebuiltAt"`
|
||||
}
|
||||
|
||||
// PeerAnalytics tracks per-peer selection statistics for NAT routing optimization.
|
||||
type PeerAnalytics struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// Per-peer hit counters (peer ID -> selection count)
|
||||
hitCounts map[string]*atomic.Int64
|
||||
// Per-peer cumulative distance sums for average calculation
|
||||
distanceSums map[string]*atomic.Uint64 // stored as bits of float64
|
||||
// Last selection time per peer
|
||||
lastSelected map[string]*atomic.Int64 // Unix nano
|
||||
}
|
||||
|
||||
// NewPeerAnalytics creates a new peer analytics tracker.
|
||||
func NewPeerAnalytics() *PeerAnalytics {
|
||||
return &PeerAnalytics{
|
||||
hitCounts: make(map[string]*atomic.Int64),
|
||||
distanceSums: make(map[string]*atomic.Uint64),
|
||||
lastSelected: make(map[string]*atomic.Int64),
|
||||
}
|
||||
}
|
||||
|
||||
// RecordSelection records that a peer was selected/returned in a query result.
|
||||
func (p *PeerAnalytics) RecordSelection(peerID string, distance float64) {
|
||||
if peerID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
p.mu.RLock()
|
||||
hc, hok := p.hitCounts[peerID]
|
||||
ds, dok := p.distanceSums[peerID]
|
||||
ls, lok := p.lastSelected[peerID]
|
||||
p.mu.RUnlock()
|
||||
|
||||
if !hok || !dok || !lok {
|
||||
p.mu.Lock()
|
||||
if _, ok := p.hitCounts[peerID]; !ok {
|
||||
p.hitCounts[peerID] = &atomic.Int64{}
|
||||
}
|
||||
if _, ok := p.distanceSums[peerID]; !ok {
|
||||
p.distanceSums[peerID] = &atomic.Uint64{}
|
||||
}
|
||||
if _, ok := p.lastSelected[peerID]; !ok {
|
||||
p.lastSelected[peerID] = &atomic.Int64{}
|
||||
}
|
||||
hc = p.hitCounts[peerID]
|
||||
ds = p.distanceSums[peerID]
|
||||
ls = p.lastSelected[peerID]
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
hc.Add(1)
|
||||
// Atomic float add via CAS
|
||||
for {
|
||||
old := ds.Load()
|
||||
oldF := math.Float64frombits(old)
|
||||
newF := oldF + distance
|
||||
if ds.CompareAndSwap(old, math.Float64bits(newF)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
ls.Store(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// GetPeerStats returns statistics for a specific peer.
|
||||
func (p *PeerAnalytics) GetPeerStats(peerID string) PeerStats {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
|
||||
hc, hok := p.hitCounts[peerID]
|
||||
ds, dok := p.distanceSums[peerID]
|
||||
ls, lok := p.lastSelected[peerID]
|
||||
|
||||
stats := PeerStats{PeerID: peerID}
|
||||
if hok {
|
||||
stats.SelectionCount = hc.Load()
|
||||
}
|
||||
if dok && stats.SelectionCount > 0 {
|
||||
stats.AvgDistance = math.Float64frombits(ds.Load()) / float64(stats.SelectionCount)
|
||||
}
|
||||
if lok {
|
||||
stats.LastSelectedAt = time.Unix(0, ls.Load())
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
// GetAllPeerStats returns statistics for all tracked peers.
|
||||
func (p *PeerAnalytics) GetAllPeerStats() []PeerStats {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
|
||||
result := make([]PeerStats, 0, len(p.hitCounts))
|
||||
for id := range p.hitCounts {
|
||||
stats := PeerStats{PeerID: id}
|
||||
if hc := p.hitCounts[id]; hc != nil {
|
||||
stats.SelectionCount = hc.Load()
|
||||
}
|
||||
if ds := p.distanceSums[id]; ds != nil && stats.SelectionCount > 0 {
|
||||
stats.AvgDistance = math.Float64frombits(ds.Load()) / float64(stats.SelectionCount)
|
||||
}
|
||||
if ls := p.lastSelected[id]; ls != nil {
|
||||
stats.LastSelectedAt = time.Unix(0, ls.Load())
|
||||
}
|
||||
result = append(result, stats)
|
||||
}
|
||||
|
||||
// Sort by selection count descending
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i].SelectionCount > result[j].SelectionCount
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// GetTopPeers returns the top N most frequently selected peers.
|
||||
func (p *PeerAnalytics) GetTopPeers(n int) []PeerStats {
|
||||
all := p.GetAllPeerStats()
|
||||
if n > len(all) {
|
||||
n = len(all)
|
||||
}
|
||||
return all[:n]
|
||||
}
|
||||
|
||||
// Reset clears all peer analytics data.
|
||||
func (p *PeerAnalytics) Reset() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.hitCounts = make(map[string]*atomic.Int64)
|
||||
p.distanceSums = make(map[string]*atomic.Uint64)
|
||||
p.lastSelected = make(map[string]*atomic.Int64)
|
||||
}
|
||||
|
||||
// PeerStats holds statistics for a single peer.
|
||||
type PeerStats struct {
|
||||
PeerID string `json:"peerId"`
|
||||
SelectionCount int64 `json:"selectionCount"`
|
||||
AvgDistance float64 `json:"avgDistance"`
|
||||
LastSelectedAt time.Time `json:"lastSelectedAt"`
|
||||
}
|
||||
|
||||
// DistributionStats provides statistical analysis of distances in query results.
|
||||
type DistributionStats struct {
|
||||
Count int `json:"count"`
|
||||
Min float64 `json:"min"`
|
||||
Max float64 `json:"max"`
|
||||
Mean float64 `json:"mean"`
|
||||
Median float64 `json:"median"`
|
||||
StdDev float64 `json:"stdDev"`
|
||||
P25 float64 `json:"p25"` // 25th percentile
|
||||
P75 float64 `json:"p75"` // 75th percentile
|
||||
P90 float64 `json:"p90"` // 90th percentile
|
||||
P99 float64 `json:"p99"` // 99th percentile
|
||||
Variance float64 `json:"variance"`
|
||||
Skewness float64 `json:"skewness"`
|
||||
SampleSize int `json:"sampleSize"`
|
||||
ComputedAt time.Time `json:"computedAt"`
|
||||
}
|
||||
|
||||
// ComputeDistributionStats calculates distribution statistics from a slice of distances.
|
||||
func ComputeDistributionStats(distances []float64) DistributionStats {
|
||||
n := len(distances)
|
||||
if n == 0 {
|
||||
return DistributionStats{ComputedAt: time.Now()}
|
||||
}
|
||||
|
||||
// Sort for percentile calculations
|
||||
sorted := make([]float64, n)
|
||||
copy(sorted, distances)
|
||||
sort.Float64s(sorted)
|
||||
|
||||
// Basic stats
|
||||
min, max := sorted[0], sorted[n-1]
|
||||
sum := 0.0
|
||||
for _, d := range sorted {
|
||||
sum += d
|
||||
}
|
||||
mean := sum / float64(n)
|
||||
|
||||
// Variance and standard deviation
|
||||
sumSqDiff := 0.0
|
||||
for _, d := range sorted {
|
||||
diff := d - mean
|
||||
sumSqDiff += diff * diff
|
||||
}
|
||||
variance := sumSqDiff / float64(n)
|
||||
stdDev := math.Sqrt(variance)
|
||||
|
||||
// Skewness
|
||||
skewness := 0.0
|
||||
if stdDev > 0 {
|
||||
sumCubeDiff := 0.0
|
||||
for _, d := range sorted {
|
||||
diff := (d - mean) / stdDev
|
||||
sumCubeDiff += diff * diff * diff
|
||||
}
|
||||
skewness = sumCubeDiff / float64(n)
|
||||
}
|
||||
|
||||
return DistributionStats{
|
||||
Count: n,
|
||||
Min: min,
|
||||
Max: max,
|
||||
Mean: mean,
|
||||
Median: percentile(sorted, 0.5),
|
||||
StdDev: stdDev,
|
||||
P25: percentile(sorted, 0.25),
|
||||
P75: percentile(sorted, 0.75),
|
||||
P90: percentile(sorted, 0.90),
|
||||
P99: percentile(sorted, 0.99),
|
||||
Variance: variance,
|
||||
Skewness: skewness,
|
||||
SampleSize: n,
|
||||
ComputedAt: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// percentile returns the p-th percentile from a sorted slice.
|
||||
func percentile(sorted []float64, p float64) float64 {
|
||||
if len(sorted) == 0 {
|
||||
return 0
|
||||
}
|
||||
if len(sorted) == 1 {
|
||||
return sorted[0]
|
||||
}
|
||||
idx := p * float64(len(sorted)-1)
|
||||
lower := int(idx)
|
||||
upper := lower + 1
|
||||
if upper >= len(sorted) {
|
||||
return sorted[len(sorted)-1]
|
||||
}
|
||||
frac := idx - float64(lower)
|
||||
return sorted[lower]*(1-frac) + sorted[upper]*frac
|
||||
}
|
||||
|
||||
// AxisDistribution provides per-axis (feature) distribution analysis.
|
||||
type AxisDistribution struct {
|
||||
Axis int `json:"axis"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Stats DistributionStats `json:"stats"`
|
||||
}
|
||||
|
||||
// ComputeAxisDistributions analyzes the distribution of values along each axis.
|
||||
func ComputeAxisDistributions[T any](points []KDPoint[T], axisNames []string) []AxisDistribution {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
dim := len(points[0].Coords)
|
||||
result := make([]AxisDistribution, dim)
|
||||
|
||||
for axis := 0; axis < dim; axis++ {
|
||||
values := make([]float64, len(points))
|
||||
for i, p := range points {
|
||||
if axis < len(p.Coords) {
|
||||
values[i] = p.Coords[axis]
|
||||
}
|
||||
}
|
||||
name := ""
|
||||
if axis < len(axisNames) {
|
||||
name = axisNames[axis]
|
||||
}
|
||||
result[axis] = AxisDistribution{
|
||||
Axis: axis,
|
||||
Name: name,
|
||||
Stats: ComputeDistributionStats(values),
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// NATRoutingMetrics provides metrics specifically for NAT traversal routing decisions.
|
||||
type NATRoutingMetrics struct {
|
||||
// Connectivity score (0-1): higher means better reachability
|
||||
ConnectivityScore float64 `json:"connectivityScore"`
|
||||
// Symmetry score (0-1): higher means more symmetric NAT (easier to traverse)
|
||||
SymmetryScore float64 `json:"symmetryScore"`
|
||||
// Relay requirement probability (0-1): likelihood peer needs relay
|
||||
RelayProbability float64 `json:"relayProbability"`
|
||||
// Direct connection success rate (historical)
|
||||
DirectSuccessRate float64 `json:"directSuccessRate"`
|
||||
// Average RTT in milliseconds
|
||||
AvgRTTMs float64 `json:"avgRttMs"`
|
||||
// Jitter (RTT variance) in milliseconds
|
||||
JitterMs float64 `json:"jitterMs"`
|
||||
// Packet loss rate (0-1)
|
||||
PacketLossRate float64 `json:"packetLossRate"`
|
||||
// Bandwidth estimate in Mbps
|
||||
BandwidthMbps float64 `json:"bandwidthMbps"`
|
||||
// NAT type classification
|
||||
NATType string `json:"natType"`
|
||||
// Last probe timestamp
|
||||
LastProbeAt time.Time `json:"lastProbeAt"`
|
||||
}
|
||||
|
||||
// NATTypeClassification enumerates common NAT types for routing decisions.
|
||||
type NATTypeClassification string
|
||||
|
||||
const (
|
||||
NATTypeOpen NATTypeClassification = "open" // No NAT / Public IP
|
||||
NATTypeFullCone NATTypeClassification = "full_cone" // Easy to traverse
|
||||
NATTypeRestrictedCone NATTypeClassification = "restricted_cone" // Moderate difficulty
|
||||
NATTypePortRestricted NATTypeClassification = "port_restricted" // Harder to traverse
|
||||
NATTypeSymmetric NATTypeClassification = "symmetric" // Hardest to traverse
|
||||
NATTypeSymmetricUDP NATTypeClassification = "symmetric_udp" // UDP-only symmetric
|
||||
NATTypeUnknown NATTypeClassification = "unknown" // Not yet classified
|
||||
NATTypeBehindCGNAT NATTypeClassification = "cgnat" // Carrier-grade NAT
|
||||
NATTypeFirewalled NATTypeClassification = "firewalled" // Blocked by firewall
|
||||
NATTypeRelayRequired NATTypeClassification = "relay_required" // Must use relay
|
||||
)
|
||||
|
||||
// PeerQualityScore computes a composite quality score for peer selection.
|
||||
// Higher scores indicate better peers for routing.
|
||||
// Weights can be customized; default weights emphasize latency and reliability.
|
||||
func PeerQualityScore(metrics NATRoutingMetrics, weights *QualityWeights) float64 {
|
||||
w := DefaultQualityWeights()
|
||||
if weights != nil {
|
||||
w = *weights
|
||||
}
|
||||
|
||||
// Normalize metrics to 0-1 scale (higher is better)
|
||||
latencyScore := 1.0 - math.Min(metrics.AvgRTTMs/1000.0, 1.0) // <1000ms is acceptable
|
||||
jitterScore := 1.0 - math.Min(metrics.JitterMs/100.0, 1.0) // <100ms jitter
|
||||
lossScore := 1.0 - metrics.PacketLossRate // 0 loss is best
|
||||
bandwidthScore := math.Min(metrics.BandwidthMbps/100.0, 1.0) // 100Mbps is excellent
|
||||
connectivityScore := metrics.ConnectivityScore // Already 0-1
|
||||
symmetryScore := metrics.SymmetryScore // Already 0-1
|
||||
directScore := metrics.DirectSuccessRate // Already 0-1
|
||||
relayPenalty := 1.0 - metrics.RelayProbability // Prefer non-relay
|
||||
|
||||
// NAT type bonus/penalty
|
||||
natScore := natTypeScore(metrics.NATType)
|
||||
|
||||
// Weighted combination
|
||||
score := (w.Latency*latencyScore +
|
||||
w.Jitter*jitterScore +
|
||||
w.PacketLoss*lossScore +
|
||||
w.Bandwidth*bandwidthScore +
|
||||
w.Connectivity*connectivityScore +
|
||||
w.Symmetry*symmetryScore +
|
||||
w.DirectSuccess*directScore +
|
||||
w.RelayPenalty*relayPenalty +
|
||||
w.NATType*natScore) / w.Total()
|
||||
|
||||
return math.Max(0, math.Min(1, score))
|
||||
}
|
||||
|
||||
// QualityWeights configures the importance of each metric in peer selection.
|
||||
type QualityWeights struct {
|
||||
Latency float64 `json:"latency"`
|
||||
Jitter float64 `json:"jitter"`
|
||||
PacketLoss float64 `json:"packetLoss"`
|
||||
Bandwidth float64 `json:"bandwidth"`
|
||||
Connectivity float64 `json:"connectivity"`
|
||||
Symmetry float64 `json:"symmetry"`
|
||||
DirectSuccess float64 `json:"directSuccess"`
|
||||
RelayPenalty float64 `json:"relayPenalty"`
|
||||
NATType float64 `json:"natType"`
|
||||
}
|
||||
|
||||
// Total returns the sum of all weights for normalization.
|
||||
func (w QualityWeights) Total() float64 {
|
||||
return w.Latency + w.Jitter + w.PacketLoss + w.Bandwidth +
|
||||
w.Connectivity + w.Symmetry + w.DirectSuccess + w.RelayPenalty + w.NATType
|
||||
}
|
||||
|
||||
// DefaultQualityWeights returns sensible defaults for peer selection.
|
||||
func DefaultQualityWeights() QualityWeights {
|
||||
return QualityWeights{
|
||||
Latency: 3.0, // Most important
|
||||
Jitter: 1.5,
|
||||
PacketLoss: 2.0,
|
||||
Bandwidth: 1.0,
|
||||
Connectivity: 2.0,
|
||||
Symmetry: 1.0,
|
||||
DirectSuccess: 2.0,
|
||||
RelayPenalty: 1.5,
|
||||
NATType: 1.0,
|
||||
}
|
||||
}
|
||||
|
||||
// natTypeScore returns a 0-1 score based on NAT type (higher is better for routing).
|
||||
func natTypeScore(natType string) float64 {
|
||||
switch NATTypeClassification(natType) {
|
||||
case NATTypeOpen:
|
||||
return 1.0
|
||||
case NATTypeFullCone:
|
||||
return 0.9
|
||||
case NATTypeRestrictedCone:
|
||||
return 0.7
|
||||
case NATTypePortRestricted:
|
||||
return 0.5
|
||||
case NATTypeSymmetric:
|
||||
return 0.3
|
||||
case NATTypeSymmetricUDP:
|
||||
return 0.25
|
||||
case NATTypeBehindCGNAT:
|
||||
return 0.2
|
||||
case NATTypeFirewalled:
|
||||
return 0.1
|
||||
case NATTypeRelayRequired:
|
||||
return 0.05
|
||||
default:
|
||||
return 0.4 // Unknown gets middle score
|
||||
}
|
||||
}
|
||||
|
||||
// TrustMetrics tracks trust and reputation for peer selection.
|
||||
type TrustMetrics struct {
|
||||
// ReputationScore (0-1): aggregated trust score
|
||||
ReputationScore float64 `json:"reputationScore"`
|
||||
// SuccessfulTransactions: count of successful exchanges
|
||||
SuccessfulTransactions int64 `json:"successfulTransactions"`
|
||||
// FailedTransactions: count of failed/aborted exchanges
|
||||
FailedTransactions int64 `json:"failedTransactions"`
|
||||
// AgeSeconds: how long this peer has been known
|
||||
AgeSeconds int64 `json:"ageSeconds"`
|
||||
// LastSuccessAt: last successful interaction
|
||||
LastSuccessAt time.Time `json:"lastSuccessAt"`
|
||||
// LastFailureAt: last failed interaction
|
||||
LastFailureAt time.Time `json:"lastFailureAt"`
|
||||
// VouchCount: number of other peers vouching for this peer
|
||||
VouchCount int `json:"vouchCount"`
|
||||
// FlagCount: number of reports against this peer
|
||||
FlagCount int `json:"flagCount"`
|
||||
// ProofOfWork: computational proof of stake/work
|
||||
ProofOfWork float64 `json:"proofOfWork"`
|
||||
}
|
||||
|
||||
// ComputeTrustScore calculates a composite trust score from trust metrics.
|
||||
func ComputeTrustScore(t TrustMetrics) float64 {
|
||||
total := t.SuccessfulTransactions + t.FailedTransactions
|
||||
if total == 0 {
|
||||
// New peer with no history: moderate trust with age bonus
|
||||
ageBonus := math.Min(float64(t.AgeSeconds)/(86400*30), 0.2) // Up to 0.2 for 30 days
|
||||
return 0.5 + ageBonus
|
||||
}
|
||||
|
||||
// Base score from success rate
|
||||
successRate := float64(t.SuccessfulTransactions) / float64(total)
|
||||
|
||||
// Volume confidence (more transactions = more confident)
|
||||
volumeConfidence := 1 - 1/(1+float64(total)/10)
|
||||
|
||||
// Vouch/flag adjustment
|
||||
vouchBonus := math.Min(float64(t.VouchCount)*0.02, 0.15)
|
||||
flagPenalty := math.Min(float64(t.FlagCount)*0.05, 0.3)
|
||||
|
||||
// Recency bonus (recent success = better)
|
||||
recencyBonus := 0.0
|
||||
if !t.LastSuccessAt.IsZero() {
|
||||
hoursSince := time.Since(t.LastSuccessAt).Hours()
|
||||
recencyBonus = 0.1 * math.Exp(-hoursSince/168) // Decays over ~1 week
|
||||
}
|
||||
|
||||
// Proof of work bonus
|
||||
powBonus := math.Min(t.ProofOfWork*0.1, 0.1)
|
||||
|
||||
score := successRate*volumeConfidence + vouchBonus - flagPenalty + recencyBonus + powBonus
|
||||
return math.Max(0, math.Min(1, score))
|
||||
}
|
||||
|
||||
// NetworkHealthSummary aggregates overall network health metrics.
|
||||
type NetworkHealthSummary struct {
|
||||
TotalPeers int `json:"totalPeers"`
|
||||
ActivePeers int `json:"activePeers"` // Peers queried recently
|
||||
HealthyPeers int `json:"healthyPeers"` // Peers with good metrics
|
||||
DegradedPeers int `json:"degradedPeers"` // Peers with some issues
|
||||
UnhealthyPeers int `json:"unhealthyPeers"` // Peers with poor metrics
|
||||
AvgLatencyMs float64 `json:"avgLatencyMs"`
|
||||
MedianLatencyMs float64 `json:"medianLatencyMs"`
|
||||
AvgTrustScore float64 `json:"avgTrustScore"`
|
||||
AvgQualityScore float64 `json:"avgQualityScore"`
|
||||
DirectConnectRate float64 `json:"directConnectRate"` // % of peers directly reachable
|
||||
RelayDependency float64 `json:"relayDependency"` // % of peers needing relay
|
||||
ComputedAt time.Time `json:"computedAt"`
|
||||
}
|
||||
|
||||
// FeatureVector represents a normalized feature vector for a peer.
|
||||
// This is the core structure for KD-Tree based peer selection.
|
||||
type FeatureVector struct {
|
||||
PeerID string `json:"peerId"`
|
||||
Features []float64 `json:"features"`
|
||||
Labels []string `json:"labels,omitempty"` // Optional feature names
|
||||
}
|
||||
|
||||
// StandardPeerFeatures defines the standard feature set for peer selection.
|
||||
// These map to dimensions in the KD-Tree.
|
||||
type StandardPeerFeatures struct {
|
||||
LatencyMs float64 `json:"latencyMs"` // Lower is better
|
||||
HopCount int `json:"hopCount"` // Lower is better
|
||||
GeoDistanceKm float64 `json:"geoDistanceKm"` // Lower is better
|
||||
TrustScore float64 `json:"trustScore"` // Higher is better (invert)
|
||||
BandwidthMbps float64 `json:"bandwidthMbps"` // Higher is better (invert)
|
||||
PacketLossRate float64 `json:"packetLossRate"` // Lower is better
|
||||
ConnectivityPct float64 `json:"connectivityPct"` // Higher is better (invert)
|
||||
NATScore float64 `json:"natScore"` // Higher is better (invert)
|
||||
}
|
||||
|
||||
// ToFeatureSlice converts structured features to a slice for KD-Tree operations.
|
||||
// Inversion is handled so that lower distance = better peer.
|
||||
func (f StandardPeerFeatures) ToFeatureSlice() []float64 {
|
||||
return []float64{
|
||||
f.LatencyMs,
|
||||
float64(f.HopCount),
|
||||
f.GeoDistanceKm,
|
||||
1 - f.TrustScore, // Invert: higher trust = lower value
|
||||
100 - f.BandwidthMbps, // Invert: higher bandwidth = lower value (capped at 100)
|
||||
f.PacketLossRate,
|
||||
100 - f.ConnectivityPct, // Invert: higher connectivity = lower value
|
||||
1 - f.NATScore, // Invert: higher NAT score = lower value
|
||||
}
|
||||
}
|
||||
|
||||
// StandardFeatureLabels returns the labels for standard peer features.
|
||||
func StandardFeatureLabels() []string {
|
||||
return []string{
|
||||
"latency_ms",
|
||||
"hop_count",
|
||||
"geo_distance_km",
|
||||
"trust_score_inv",
|
||||
"bandwidth_inv",
|
||||
"packet_loss_rate",
|
||||
"connectivity_inv",
|
||||
"nat_score_inv",
|
||||
}
|
||||
}
|
||||
|
||||
// FeatureRanges defines min/max ranges for feature normalization.
|
||||
type FeatureRanges struct {
|
||||
Ranges []AxisStats `json:"ranges"`
|
||||
}
|
||||
|
||||
// DefaultPeerFeatureRanges returns sensible default ranges for peer features.
|
||||
func DefaultPeerFeatureRanges() FeatureRanges {
|
||||
return FeatureRanges{
|
||||
Ranges: []AxisStats{
|
||||
{Min: 0, Max: 1000}, // Latency: 0-1000ms
|
||||
{Min: 0, Max: 20}, // Hops: 0-20
|
||||
{Min: 0, Max: 20000}, // Geo distance: 0-20000km (half Earth circumference)
|
||||
{Min: 0, Max: 1}, // Trust score (inverted): 0-1
|
||||
{Min: 0, Max: 100}, // Bandwidth (inverted): 0-100Mbps
|
||||
{Min: 0, Max: 1}, // Packet loss: 0-100%
|
||||
{Min: 0, Max: 100}, // Connectivity (inverted): 0-100%
|
||||
{Min: 0, Max: 1}, // NAT score (inverted): 0-1
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NormalizePeerFeatures normalizes peer features to [0,1] using provided ranges.
|
||||
func NormalizePeerFeatures(features []float64, ranges FeatureRanges) []float64 {
|
||||
result := make([]float64, len(features))
|
||||
for i, v := range features {
|
||||
if i < len(ranges.Ranges) {
|
||||
result[i] = scale01(v, ranges.Ranges[i].Min, ranges.Ranges[i].Max)
|
||||
} else {
|
||||
result[i] = v
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// WeightedPeerFeatures applies per-feature weights after normalization.
|
||||
func WeightedPeerFeatures(normalized []float64, weights []float64) []float64 {
|
||||
result := make([]float64, len(normalized))
|
||||
for i, v := range normalized {
|
||||
w := 1.0
|
||||
if i < len(weights) {
|
||||
w = weights[i]
|
||||
}
|
||||
result[i] = v * w
|
||||
}
|
||||
return result
|
||||
}
|
||||
662
kdtree_analytics_test.go
Normal file
662
kdtree_analytics_test.go
Normal file
|
|
@ -0,0 +1,662 @@
|
|||
package poindexter
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ============================================================================
|
||||
// TreeAnalytics Tests
|
||||
// ============================================================================
|
||||
|
||||
func TestNewTreeAnalytics(t *testing.T) {
|
||||
a := NewTreeAnalytics()
|
||||
if a == nil {
|
||||
t.Fatal("NewTreeAnalytics returned nil")
|
||||
}
|
||||
if a.QueryCount.Load() != 0 {
|
||||
t.Errorf("expected QueryCount=0, got %d", a.QueryCount.Load())
|
||||
}
|
||||
if a.InsertCount.Load() != 0 {
|
||||
t.Errorf("expected InsertCount=0, got %d", a.InsertCount.Load())
|
||||
}
|
||||
if a.CreatedAt.IsZero() {
|
||||
t.Error("CreatedAt should not be zero")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeAnalyticsRecordQuery(t *testing.T) {
|
||||
a := NewTreeAnalytics()
|
||||
|
||||
a.RecordQuery(1000) // 1μs
|
||||
a.RecordQuery(2000) // 2μs
|
||||
a.RecordQuery(500) // 0.5μs
|
||||
|
||||
if a.QueryCount.Load() != 3 {
|
||||
t.Errorf("expected QueryCount=3, got %d", a.QueryCount.Load())
|
||||
}
|
||||
if a.TotalQueryTimeNs.Load() != 3500 {
|
||||
t.Errorf("expected TotalQueryTimeNs=3500, got %d", a.TotalQueryTimeNs.Load())
|
||||
}
|
||||
if a.MinQueryTimeNs.Load() != 500 {
|
||||
t.Errorf("expected MinQueryTimeNs=500, got %d", a.MinQueryTimeNs.Load())
|
||||
}
|
||||
if a.MaxQueryTimeNs.Load() != 2000 {
|
||||
t.Errorf("expected MaxQueryTimeNs=2000, got %d", a.MaxQueryTimeNs.Load())
|
||||
}
|
||||
if a.LastQueryTimeNs.Load() != 500 {
|
||||
t.Errorf("expected LastQueryTimeNs=500, got %d", a.LastQueryTimeNs.Load())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeAnalyticsSnapshot(t *testing.T) {
|
||||
a := NewTreeAnalytics()
|
||||
|
||||
a.RecordQuery(1000)
|
||||
a.RecordQuery(3000)
|
||||
a.RecordInsert()
|
||||
a.RecordInsert()
|
||||
a.RecordDelete()
|
||||
a.RecordRebuild()
|
||||
|
||||
snap := a.Snapshot()
|
||||
|
||||
if snap.QueryCount != 2 {
|
||||
t.Errorf("expected QueryCount=2, got %d", snap.QueryCount)
|
||||
}
|
||||
if snap.InsertCount != 2 {
|
||||
t.Errorf("expected InsertCount=2, got %d", snap.InsertCount)
|
||||
}
|
||||
if snap.DeleteCount != 1 {
|
||||
t.Errorf("expected DeleteCount=1, got %d", snap.DeleteCount)
|
||||
}
|
||||
if snap.AvgQueryTimeNs != 2000 {
|
||||
t.Errorf("expected AvgQueryTimeNs=2000, got %d", snap.AvgQueryTimeNs)
|
||||
}
|
||||
if snap.MinQueryTimeNs != 1000 {
|
||||
t.Errorf("expected MinQueryTimeNs=1000, got %d", snap.MinQueryTimeNs)
|
||||
}
|
||||
if snap.MaxQueryTimeNs != 3000 {
|
||||
t.Errorf("expected MaxQueryTimeNs=3000, got %d", snap.MaxQueryTimeNs)
|
||||
}
|
||||
if snap.BackendRebuildCnt != 1 {
|
||||
t.Errorf("expected BackendRebuildCnt=1, got %d", snap.BackendRebuildCnt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeAnalyticsReset(t *testing.T) {
|
||||
a := NewTreeAnalytics()
|
||||
|
||||
a.RecordQuery(1000)
|
||||
a.RecordInsert()
|
||||
a.RecordDelete()
|
||||
|
||||
a.Reset()
|
||||
|
||||
if a.QueryCount.Load() != 0 {
|
||||
t.Errorf("expected QueryCount=0 after reset, got %d", a.QueryCount.Load())
|
||||
}
|
||||
if a.InsertCount.Load() != 0 {
|
||||
t.Errorf("expected InsertCount=0 after reset, got %d", a.InsertCount.Load())
|
||||
}
|
||||
if a.DeleteCount.Load() != 0 {
|
||||
t.Errorf("expected DeleteCount=0 after reset, got %d", a.DeleteCount.Load())
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PeerAnalytics Tests
|
||||
// ============================================================================
|
||||
|
||||
func TestNewPeerAnalytics(t *testing.T) {
|
||||
p := NewPeerAnalytics()
|
||||
if p == nil {
|
||||
t.Fatal("NewPeerAnalytics returned nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAnalyticsRecordSelection(t *testing.T) {
|
||||
p := NewPeerAnalytics()
|
||||
|
||||
p.RecordSelection("peer1", 0.5)
|
||||
p.RecordSelection("peer1", 0.3)
|
||||
p.RecordSelection("peer2", 1.0)
|
||||
|
||||
stats := p.GetPeerStats("peer1")
|
||||
if stats.SelectionCount != 2 {
|
||||
t.Errorf("expected peer1 SelectionCount=2, got %d", stats.SelectionCount)
|
||||
}
|
||||
if math.Abs(stats.AvgDistance-0.4) > 0.001 {
|
||||
t.Errorf("expected peer1 AvgDistance~0.4, got %f", stats.AvgDistance)
|
||||
}
|
||||
|
||||
stats2 := p.GetPeerStats("peer2")
|
||||
if stats2.SelectionCount != 1 {
|
||||
t.Errorf("expected peer2 SelectionCount=1, got %d", stats2.SelectionCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAnalyticsGetAllPeerStats(t *testing.T) {
|
||||
p := NewPeerAnalytics()
|
||||
|
||||
p.RecordSelection("peer1", 0.5)
|
||||
p.RecordSelection("peer1", 0.5)
|
||||
p.RecordSelection("peer2", 1.0)
|
||||
p.RecordSelection("peer3", 0.8)
|
||||
p.RecordSelection("peer3", 0.8)
|
||||
p.RecordSelection("peer3", 0.8)
|
||||
|
||||
all := p.GetAllPeerStats()
|
||||
if len(all) != 3 {
|
||||
t.Errorf("expected 3 peers, got %d", len(all))
|
||||
}
|
||||
|
||||
// Should be sorted by selection count descending
|
||||
if all[0].PeerID != "peer3" || all[0].SelectionCount != 3 {
|
||||
t.Errorf("expected first peer to be peer3 with count=3, got %s with count=%d",
|
||||
all[0].PeerID, all[0].SelectionCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAnalyticsGetTopPeers(t *testing.T) {
|
||||
p := NewPeerAnalytics()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
p.RecordSelection("peer1", 0.5)
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
p.RecordSelection("peer2", 0.3)
|
||||
}
|
||||
p.RecordSelection("peer3", 0.1)
|
||||
|
||||
top := p.GetTopPeers(2)
|
||||
if len(top) != 2 {
|
||||
t.Errorf("expected 2 top peers, got %d", len(top))
|
||||
}
|
||||
if top[0].PeerID != "peer1" {
|
||||
t.Errorf("expected top peer to be peer1, got %s", top[0].PeerID)
|
||||
}
|
||||
if top[1].PeerID != "peer2" {
|
||||
t.Errorf("expected second peer to be peer2, got %s", top[1].PeerID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAnalyticsReset(t *testing.T) {
|
||||
p := NewPeerAnalytics()
|
||||
|
||||
p.RecordSelection("peer1", 0.5)
|
||||
p.Reset()
|
||||
|
||||
stats := p.GetAllPeerStats()
|
||||
if len(stats) != 0 {
|
||||
t.Errorf("expected 0 peers after reset, got %d", len(stats))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// DistributionStats Tests
|
||||
// ============================================================================
|
||||
|
||||
func TestComputeDistributionStatsEmpty(t *testing.T) {
|
||||
stats := ComputeDistributionStats(nil)
|
||||
if stats.Count != 0 {
|
||||
t.Errorf("expected Count=0 for empty input, got %d", stats.Count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeDistributionStatsSingle(t *testing.T) {
|
||||
stats := ComputeDistributionStats([]float64{5.0})
|
||||
if stats.Count != 1 {
|
||||
t.Errorf("expected Count=1, got %d", stats.Count)
|
||||
}
|
||||
if stats.Min != 5.0 || stats.Max != 5.0 {
|
||||
t.Errorf("expected Min=Max=5.0, got Min=%f, Max=%f", stats.Min, stats.Max)
|
||||
}
|
||||
if stats.Mean != 5.0 {
|
||||
t.Errorf("expected Mean=5.0, got %f", stats.Mean)
|
||||
}
|
||||
if stats.Median != 5.0 {
|
||||
t.Errorf("expected Median=5.0, got %f", stats.Median)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeDistributionStatsMultiple(t *testing.T) {
|
||||
// Values: 1, 2, 3, 4, 5 - mean=3, median=3
|
||||
stats := ComputeDistributionStats([]float64{1, 2, 3, 4, 5})
|
||||
|
||||
if stats.Count != 5 {
|
||||
t.Errorf("expected Count=5, got %d", stats.Count)
|
||||
}
|
||||
if stats.Min != 1.0 {
|
||||
t.Errorf("expected Min=1.0, got %f", stats.Min)
|
||||
}
|
||||
if stats.Max != 5.0 {
|
||||
t.Errorf("expected Max=5.0, got %f", stats.Max)
|
||||
}
|
||||
if stats.Mean != 3.0 {
|
||||
t.Errorf("expected Mean=3.0, got %f", stats.Mean)
|
||||
}
|
||||
if stats.Median != 3.0 {
|
||||
t.Errorf("expected Median=3.0, got %f", stats.Median)
|
||||
}
|
||||
// Variance = 2.0 for this dataset
|
||||
if math.Abs(stats.Variance-2.0) > 0.001 {
|
||||
t.Errorf("expected Variance~2.0, got %f", stats.Variance)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeDistributionStatsPercentiles(t *testing.T) {
|
||||
// 100 values from 0 to 99
|
||||
values := make([]float64, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
values[i] = float64(i)
|
||||
}
|
||||
stats := ComputeDistributionStats(values)
|
||||
|
||||
// P25 should be around 24.75, P75 around 74.25
|
||||
if math.Abs(stats.P25-24.75) > 0.1 {
|
||||
t.Errorf("expected P25~24.75, got %f", stats.P25)
|
||||
}
|
||||
if math.Abs(stats.P75-74.25) > 0.1 {
|
||||
t.Errorf("expected P75~74.25, got %f", stats.P75)
|
||||
}
|
||||
if math.Abs(stats.P90-89.1) > 0.1 {
|
||||
t.Errorf("expected P90~89.1, got %f", stats.P90)
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// AxisDistribution Tests
|
||||
// ============================================================================
|
||||
|
||||
func TestComputeAxisDistributions(t *testing.T) {
|
||||
points := []KDPoint[string]{
|
||||
{ID: "a", Coords: []float64{1.0, 10.0}},
|
||||
{ID: "b", Coords: []float64{2.0, 20.0}},
|
||||
{ID: "c", Coords: []float64{3.0, 30.0}},
|
||||
}
|
||||
|
||||
dists := ComputeAxisDistributions(points, []string{"x", "y"})
|
||||
|
||||
if len(dists) != 2 {
|
||||
t.Errorf("expected 2 axis distributions, got %d", len(dists))
|
||||
}
|
||||
|
||||
if dists[0].Axis != 0 || dists[0].Name != "x" {
|
||||
t.Errorf("expected first axis=0, name=x, got axis=%d, name=%s", dists[0].Axis, dists[0].Name)
|
||||
}
|
||||
if dists[0].Stats.Mean != 2.0 {
|
||||
t.Errorf("expected axis 0 mean=2.0, got %f", dists[0].Stats.Mean)
|
||||
}
|
||||
|
||||
if dists[1].Axis != 1 || dists[1].Name != "y" {
|
||||
t.Errorf("expected second axis=1, name=y, got axis=%d, name=%s", dists[1].Axis, dists[1].Name)
|
||||
}
|
||||
if dists[1].Stats.Mean != 20.0 {
|
||||
t.Errorf("expected axis 1 mean=20.0, got %f", dists[1].Stats.Mean)
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// NAT Routing Tests
|
||||
// ============================================================================
|
||||
|
||||
func TestPeerQualityScoreDefaults(t *testing.T) {
|
||||
// Perfect peer
|
||||
perfect := NATRoutingMetrics{
|
||||
ConnectivityScore: 1.0,
|
||||
SymmetryScore: 1.0,
|
||||
RelayProbability: 0.0,
|
||||
DirectSuccessRate: 1.0,
|
||||
AvgRTTMs: 10,
|
||||
JitterMs: 5,
|
||||
PacketLossRate: 0.0,
|
||||
BandwidthMbps: 100,
|
||||
NATType: string(NATTypeOpen),
|
||||
}
|
||||
score := PeerQualityScore(perfect, nil)
|
||||
if score < 0.9 {
|
||||
t.Errorf("expected perfect peer score > 0.9, got %f", score)
|
||||
}
|
||||
|
||||
// Poor peer
|
||||
poor := NATRoutingMetrics{
|
||||
ConnectivityScore: 0.2,
|
||||
SymmetryScore: 0.1,
|
||||
RelayProbability: 0.9,
|
||||
DirectSuccessRate: 0.1,
|
||||
AvgRTTMs: 500,
|
||||
JitterMs: 100,
|
||||
PacketLossRate: 0.5,
|
||||
BandwidthMbps: 1,
|
||||
NATType: string(NATTypeSymmetric),
|
||||
}
|
||||
poorScore := PeerQualityScore(poor, nil)
|
||||
if poorScore > 0.5 {
|
||||
t.Errorf("expected poor peer score < 0.5, got %f", poorScore)
|
||||
}
|
||||
if poorScore >= score {
|
||||
t.Error("poor peer should have lower score than perfect peer")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerQualityScoreCustomWeights(t *testing.T) {
|
||||
metrics := NATRoutingMetrics{
|
||||
ConnectivityScore: 1.0,
|
||||
SymmetryScore: 0.5,
|
||||
RelayProbability: 0.0,
|
||||
DirectSuccessRate: 1.0,
|
||||
AvgRTTMs: 100,
|
||||
JitterMs: 10,
|
||||
PacketLossRate: 0.01,
|
||||
BandwidthMbps: 50,
|
||||
NATType: string(NATTypeFullCone),
|
||||
}
|
||||
|
||||
// Weight latency heavily
|
||||
latencyWeights := QualityWeights{
|
||||
Latency: 10.0,
|
||||
Jitter: 1.0,
|
||||
PacketLoss: 1.0,
|
||||
Bandwidth: 1.0,
|
||||
Connectivity: 1.0,
|
||||
Symmetry: 1.0,
|
||||
DirectSuccess: 1.0,
|
||||
RelayPenalty: 1.0,
|
||||
NATType: 1.0,
|
||||
}
|
||||
scoreLatency := PeerQualityScore(metrics, &latencyWeights)
|
||||
|
||||
// Weight bandwidth heavily
|
||||
bandwidthWeights := QualityWeights{
|
||||
Latency: 1.0,
|
||||
Jitter: 1.0,
|
||||
PacketLoss: 1.0,
|
||||
Bandwidth: 10.0,
|
||||
Connectivity: 1.0,
|
||||
Symmetry: 1.0,
|
||||
DirectSuccess: 1.0,
|
||||
RelayPenalty: 1.0,
|
||||
NATType: 1.0,
|
||||
}
|
||||
scoreBandwidth := PeerQualityScore(metrics, &bandwidthWeights)
|
||||
|
||||
// Scores should differ based on weights
|
||||
if scoreLatency == scoreBandwidth {
|
||||
t.Error("different weights should produce different scores")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultQualityWeights(t *testing.T) {
|
||||
w := DefaultQualityWeights()
|
||||
if w.Latency <= 0 {
|
||||
t.Error("Latency weight should be positive")
|
||||
}
|
||||
if w.Total() <= 0 {
|
||||
t.Error("Total weights should be positive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNatTypeScore(t *testing.T) {
|
||||
tests := []struct {
|
||||
natType string
|
||||
minScore float64
|
||||
maxScore float64
|
||||
}{
|
||||
{string(NATTypeOpen), 0.9, 1.0},
|
||||
{string(NATTypeFullCone), 0.8, 1.0},
|
||||
{string(NATTypeSymmetric), 0.2, 0.4},
|
||||
{string(NATTypeRelayRequired), 0.0, 0.1},
|
||||
{"unknown", 0.3, 0.5},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
score := natTypeScore(tc.natType)
|
||||
if score < tc.minScore || score > tc.maxScore {
|
||||
t.Errorf("natType %s: expected score in [%f, %f], got %f",
|
||||
tc.natType, tc.minScore, tc.maxScore, score)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Trust Score Tests
|
||||
// ============================================================================
|
||||
|
||||
func TestComputeTrustScoreNewPeer(t *testing.T) {
|
||||
// New peer with no history
|
||||
metrics := TrustMetrics{
|
||||
SuccessfulTransactions: 0,
|
||||
FailedTransactions: 0,
|
||||
AgeSeconds: 86400, // 1 day old
|
||||
}
|
||||
score := ComputeTrustScore(metrics)
|
||||
// New peer should get moderate trust
|
||||
if score < 0.4 || score > 0.7 {
|
||||
t.Errorf("expected new peer score in [0.4, 0.7], got %f", score)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeTrustScoreGoodPeer(t *testing.T) {
|
||||
metrics := TrustMetrics{
|
||||
SuccessfulTransactions: 100,
|
||||
FailedTransactions: 2,
|
||||
AgeSeconds: 86400 * 30, // 30 days
|
||||
VouchCount: 5,
|
||||
FlagCount: 0,
|
||||
LastSuccessAt: time.Now(),
|
||||
}
|
||||
score := ComputeTrustScore(metrics)
|
||||
if score < 0.8 {
|
||||
t.Errorf("expected good peer score > 0.8, got %f", score)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeTrustScoreBadPeer(t *testing.T) {
|
||||
metrics := TrustMetrics{
|
||||
SuccessfulTransactions: 5,
|
||||
FailedTransactions: 20,
|
||||
AgeSeconds: 86400,
|
||||
VouchCount: 0,
|
||||
FlagCount: 10,
|
||||
}
|
||||
score := ComputeTrustScore(metrics)
|
||||
if score > 0.3 {
|
||||
t.Errorf("expected bad peer score < 0.3, got %f", score)
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Feature Normalization Tests
|
||||
// ============================================================================
|
||||
|
||||
func TestStandardPeerFeaturesToSlice(t *testing.T) {
|
||||
features := StandardPeerFeatures{
|
||||
LatencyMs: 100,
|
||||
HopCount: 5,
|
||||
GeoDistanceKm: 1000,
|
||||
TrustScore: 0.9,
|
||||
BandwidthMbps: 50,
|
||||
PacketLossRate: 0.01,
|
||||
ConnectivityPct: 95,
|
||||
NATScore: 0.8,
|
||||
}
|
||||
|
||||
slice := features.ToFeatureSlice()
|
||||
if len(slice) != 8 {
|
||||
t.Errorf("expected 8 features, got %d", len(slice))
|
||||
}
|
||||
|
||||
// TrustScore should be inverted (0.9 -> 0.1)
|
||||
if math.Abs(slice[3]-0.1) > 0.001 {
|
||||
t.Errorf("expected inverted trust score ~0.1, got %f", slice[3])
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizePeerFeatures(t *testing.T) {
|
||||
features := []float64{100, 5, 1000, 0.5, 50, 0.01, 50, 0.5}
|
||||
ranges := DefaultPeerFeatureRanges()
|
||||
|
||||
normalized := NormalizePeerFeatures(features, ranges)
|
||||
|
||||
for i, v := range normalized {
|
||||
if v < 0 || v > 1 {
|
||||
t.Errorf("normalized feature %d out of range [0,1]: %f", i, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWeightedPeerFeatures(t *testing.T) {
|
||||
normalized := []float64{0.5, 0.5, 0.5, 0.5}
|
||||
weights := []float64{1.0, 2.0, 0.5, 1.5}
|
||||
|
||||
weighted := WeightedPeerFeatures(normalized, weights)
|
||||
|
||||
expected := []float64{0.5, 1.0, 0.25, 0.75}
|
||||
for i, v := range weighted {
|
||||
if math.Abs(v-expected[i]) > 0.001 {
|
||||
t.Errorf("weighted feature %d: expected %f, got %f", i, expected[i], v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardFeatureLabels(t *testing.T) {
|
||||
labels := StandardFeatureLabels()
|
||||
if len(labels) != 8 {
|
||||
t.Errorf("expected 8 feature labels, got %d", len(labels))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// KDTree Analytics Integration Tests
|
||||
// ============================================================================
|
||||
|
||||
func TestKDTreeAnalyticsIntegration(t *testing.T) {
|
||||
points := []KDPoint[string]{
|
||||
{ID: "a", Coords: []float64{0, 0}, Value: "A"},
|
||||
{ID: "b", Coords: []float64{1, 1}, Value: "B"},
|
||||
{ID: "c", Coords: []float64{2, 2}, Value: "C"},
|
||||
}
|
||||
tree, err := NewKDTree(points)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check initial analytics
|
||||
if tree.Analytics() == nil {
|
||||
t.Fatal("Analytics should not be nil")
|
||||
}
|
||||
if tree.PeerAnalytics() == nil {
|
||||
t.Fatal("PeerAnalytics should not be nil")
|
||||
}
|
||||
|
||||
// Perform queries
|
||||
tree.Nearest([]float64{0.1, 0.1})
|
||||
tree.Nearest([]float64{0.9, 0.9})
|
||||
tree.KNearest([]float64{0.5, 0.5}, 2)
|
||||
|
||||
snap := tree.GetAnalyticsSnapshot()
|
||||
if snap.QueryCount != 3 {
|
||||
t.Errorf("expected QueryCount=3, got %d", snap.QueryCount)
|
||||
}
|
||||
if snap.InsertCount != 0 {
|
||||
t.Errorf("expected InsertCount=0, got %d", snap.InsertCount)
|
||||
}
|
||||
|
||||
// Check peer stats
|
||||
peerStats := tree.GetPeerStats()
|
||||
if len(peerStats) == 0 {
|
||||
t.Error("expected some peer stats after queries")
|
||||
}
|
||||
|
||||
// Peer 'a' should have been selected for query [0.1, 0.1]
|
||||
var foundA bool
|
||||
for _, ps := range peerStats {
|
||||
if ps.PeerID == "a" && ps.SelectionCount > 0 {
|
||||
foundA = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundA {
|
||||
t.Error("expected peer 'a' to be recorded in analytics")
|
||||
}
|
||||
|
||||
// Test top peers
|
||||
topPeers := tree.GetTopPeers(1)
|
||||
if len(topPeers) != 1 {
|
||||
t.Errorf("expected 1 top peer, got %d", len(topPeers))
|
||||
}
|
||||
|
||||
// Test insert analytics
|
||||
tree.Insert(KDPoint[string]{ID: "d", Coords: []float64{3, 3}, Value: "D"})
|
||||
snap = tree.GetAnalyticsSnapshot()
|
||||
if snap.InsertCount != 1 {
|
||||
t.Errorf("expected InsertCount=1, got %d", snap.InsertCount)
|
||||
}
|
||||
|
||||
// Test delete analytics
|
||||
tree.DeleteByID("d")
|
||||
snap = tree.GetAnalyticsSnapshot()
|
||||
if snap.DeleteCount != 1 {
|
||||
t.Errorf("expected DeleteCount=1, got %d", snap.DeleteCount)
|
||||
}
|
||||
|
||||
// Test reset
|
||||
tree.ResetAnalytics()
|
||||
snap = tree.GetAnalyticsSnapshot()
|
||||
if snap.QueryCount != 0 || snap.InsertCount != 0 || snap.DeleteCount != 0 {
|
||||
t.Error("expected all counts to be 0 after reset")
|
||||
}
|
||||
}
|
||||
|
||||
func TestKDTreeDistanceDistribution(t *testing.T) {
|
||||
points := []KDPoint[string]{
|
||||
{ID: "a", Coords: []float64{0, 10}, Value: "A"},
|
||||
{ID: "b", Coords: []float64{1, 20}, Value: "B"},
|
||||
{ID: "c", Coords: []float64{2, 30}, Value: "C"},
|
||||
}
|
||||
tree, _ := NewKDTree(points)
|
||||
|
||||
dists := tree.ComputeDistanceDistribution([]string{"x", "y"})
|
||||
if len(dists) != 2 {
|
||||
t.Errorf("expected 2 axis distributions, got %d", len(dists))
|
||||
}
|
||||
|
||||
if dists[0].Name != "x" || dists[0].Stats.Mean != 1.0 {
|
||||
t.Errorf("unexpected axis 0 distribution: name=%s, mean=%f",
|
||||
dists[0].Name, dists[0].Stats.Mean)
|
||||
}
|
||||
if dists[1].Name != "y" || dists[1].Stats.Mean != 20.0 {
|
||||
t.Errorf("unexpected axis 1 distribution: name=%s, mean=%f",
|
||||
dists[1].Name, dists[1].Stats.Mean)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKDTreePointsExport(t *testing.T) {
|
||||
points := []KDPoint[string]{
|
||||
{ID: "a", Coords: []float64{0, 0}, Value: "A"},
|
||||
{ID: "b", Coords: []float64{1, 1}, Value: "B"},
|
||||
}
|
||||
tree, _ := NewKDTree(points)
|
||||
|
||||
exported := tree.Points()
|
||||
if len(exported) != 2 {
|
||||
t.Errorf("expected 2 points, got %d", len(exported))
|
||||
}
|
||||
|
||||
// Verify it's a copy, not a reference
|
||||
exported[0].ID = "modified"
|
||||
original := tree.Points()
|
||||
if original[0].ID == "modified" {
|
||||
t.Error("Points() should return a copy, not a reference")
|
||||
}
|
||||
}
|
||||
|
||||
func TestKDTreeBackend(t *testing.T) {
|
||||
tree, _ := NewKDTreeFromDim[string](2)
|
||||
backend := tree.Backend()
|
||||
if backend != BackendLinear && backend != BackendGonum {
|
||||
t.Errorf("unexpected backend: %s", backend)
|
||||
}
|
||||
}
|
||||
173
npm/poindexter-wasm/index.d.ts
vendored
173
npm/poindexter-wasm/index.d.ts
vendored
|
|
@ -15,7 +15,153 @@ export interface KNearestResult {
|
|||
dists: number[];
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Analytics Types
|
||||
// ============================================================================
|
||||
|
||||
/** Tree operation analytics snapshot */
|
||||
export interface TreeAnalytics {
|
||||
queryCount: number;
|
||||
insertCount: number;
|
||||
deleteCount: number;
|
||||
avgQueryTimeNs: number;
|
||||
minQueryTimeNs: number;
|
||||
maxQueryTimeNs: number;
|
||||
lastQueryTimeNs: number;
|
||||
lastQueryAt: number; // Unix milliseconds
|
||||
createdAt: number; // Unix milliseconds
|
||||
backendRebuildCount: number;
|
||||
lastRebuiltAt: number; // Unix milliseconds
|
||||
}
|
||||
|
||||
/** Per-peer selection statistics */
|
||||
export interface PeerStats {
|
||||
peerId: string;
|
||||
selectionCount: number;
|
||||
avgDistance: number;
|
||||
lastSelectedAt: number; // Unix milliseconds
|
||||
}
|
||||
|
||||
/** Statistical distribution analysis */
|
||||
export interface DistributionStats {
|
||||
count: number;
|
||||
min: number;
|
||||
max: number;
|
||||
mean: number;
|
||||
median: number;
|
||||
stdDev: number;
|
||||
p25: number;
|
||||
p75: number;
|
||||
p90: number;
|
||||
p99: number;
|
||||
variance: number;
|
||||
skewness: number;
|
||||
sampleSize?: number;
|
||||
computedAt?: number; // Unix milliseconds
|
||||
}
|
||||
|
||||
/** Per-axis distribution in the KD-Tree */
|
||||
export interface AxisDistribution {
|
||||
axis: number;
|
||||
name: string;
|
||||
stats: DistributionStats;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// NAT Routing Types
|
||||
// ============================================================================
|
||||
|
||||
/** NAT type classification for routing decisions */
|
||||
export type NATTypeClassification =
|
||||
| 'open'
|
||||
| 'full_cone'
|
||||
| 'restricted_cone'
|
||||
| 'port_restricted'
|
||||
| 'symmetric'
|
||||
| 'symmetric_udp'
|
||||
| 'cgnat'
|
||||
| 'firewalled'
|
||||
| 'relay_required'
|
||||
| 'unknown';
|
||||
|
||||
/** Network metrics for NAT routing decisions */
|
||||
export interface NATRoutingMetrics {
|
||||
connectivityScore: number; // 0-1: higher = better reachability
|
||||
symmetryScore: number; // 0-1: higher = more symmetric NAT
|
||||
relayProbability: number; // 0-1: likelihood peer needs relay
|
||||
directSuccessRate: number; // 0-1: historical direct connection success
|
||||
avgRttMs: number; // Average RTT in milliseconds
|
||||
jitterMs: number; // RTT variance in milliseconds
|
||||
packetLossRate: number; // 0-1: packet loss rate
|
||||
bandwidthMbps: number; // Bandwidth estimate in Mbps
|
||||
natType: NATTypeClassification;
|
||||
lastProbeAt?: number; // Unix milliseconds
|
||||
}
|
||||
|
||||
/** Weights for peer quality scoring */
|
||||
export interface QualityWeights {
|
||||
latency: number;
|
||||
jitter: number;
|
||||
packetLoss: number;
|
||||
bandwidth: number;
|
||||
connectivity: number;
|
||||
symmetry: number;
|
||||
directSuccess: number;
|
||||
relayPenalty: number;
|
||||
natType: number;
|
||||
}
|
||||
|
||||
/** Trust metrics for peer reputation */
|
||||
export interface TrustMetrics {
|
||||
reputationScore: number; // 0-1: aggregated trust score
|
||||
successfulTransactions: number;
|
||||
failedTransactions: number;
|
||||
ageSeconds: number; // How long this peer has been known
|
||||
lastSuccessAt?: number; // Unix milliseconds
|
||||
lastFailureAt?: number; // Unix milliseconds
|
||||
vouchCount: number; // Peers vouching for this peer
|
||||
flagCount: number; // Reports against this peer
|
||||
proofOfWork: number; // Computational proof of stake/work
|
||||
}
|
||||
|
||||
/** Axis min/max range for normalization */
|
||||
export interface AxisRange {
|
||||
min: number;
|
||||
max: number;
|
||||
}
|
||||
|
||||
/** Feature ranges for peer feature normalization */
|
||||
export interface FeatureRanges {
|
||||
ranges: AxisRange[];
|
||||
labels?: string[];
|
||||
}
|
||||
|
||||
/** Standard peer features for KD-Tree based selection */
|
||||
export interface StandardPeerFeatures {
|
||||
latencyMs: number;
|
||||
hopCount: number;
|
||||
geoDistanceKm: number;
|
||||
trustScore: number;
|
||||
bandwidthMbps: number;
|
||||
packetLossRate: number;
|
||||
connectivityPct: number;
|
||||
natScore: number;
|
||||
}
|
||||
|
||||
/** Export data with all points */
|
||||
export interface TreeExport {
|
||||
dim: number;
|
||||
len: number;
|
||||
backend: string;
|
||||
points: PxPoint[];
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Tree Interface
|
||||
// ============================================================================
|
||||
|
||||
export interface PxTree {
|
||||
// Core operations
|
||||
len(): Promise<number>;
|
||||
dim(): Promise<number>;
|
||||
insert(point: PxPoint): Promise<boolean>;
|
||||
|
|
@ -24,18 +170,45 @@ export interface PxTree {
|
|||
kNearest(query: number[], k: number): Promise<KNearestResult>;
|
||||
radius(query: number[], r: number): Promise<KNearestResult>;
|
||||
exportJSON(): Promise<string>;
|
||||
|
||||
// Analytics operations
|
||||
getAnalytics(): Promise<TreeAnalytics>;
|
||||
getPeerStats(): Promise<PeerStats[]>;
|
||||
getTopPeers(n: number): Promise<PeerStats[]>;
|
||||
getAxisDistributions(axisNames?: string[]): Promise<AxisDistribution[]>;
|
||||
resetAnalytics(): Promise<boolean>;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Init Options
|
||||
// ============================================================================
|
||||
|
||||
export interface InitOptions {
|
||||
wasmURL?: string;
|
||||
wasmExecURL?: string;
|
||||
instantiateWasm?: (source: ArrayBuffer, importObject: WebAssembly.Imports) => Promise<WebAssembly.Instance> | WebAssembly.Instance;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Main API
|
||||
// ============================================================================
|
||||
|
||||
export interface PxAPI {
|
||||
// Core functions
|
||||
version(): Promise<string>;
|
||||
hello(name?: string): Promise<string>;
|
||||
newTree(dim: number): Promise<PxTree>;
|
||||
|
||||
// Statistics utilities
|
||||
computeDistributionStats(distances: number[]): Promise<DistributionStats>;
|
||||
|
||||
// NAT routing / peer quality functions
|
||||
computePeerQualityScore(metrics: NATRoutingMetrics, weights?: QualityWeights): Promise<number>;
|
||||
computeTrustScore(metrics: TrustMetrics): Promise<number>;
|
||||
getDefaultQualityWeights(): Promise<QualityWeights>;
|
||||
getDefaultPeerFeatureRanges(): Promise<FeatureRanges>;
|
||||
normalizePeerFeatures(features: number[], ranges?: FeatureRanges): Promise<number[]>;
|
||||
weightedPeerFeatures(normalized: number[], weights: number[]): Promise<number[]>;
|
||||
}
|
||||
|
||||
export function init(options?: InitOptions): Promise<PxAPI>;
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ function call(name, ...args) {
|
|||
|
||||
class PxTree {
|
||||
constructor(treeId) { this.treeId = treeId; }
|
||||
// Core operations
|
||||
async len() { return call('pxTreeLen', this.treeId); }
|
||||
async dim() { return call('pxTreeDim', this.treeId); }
|
||||
async insert(point) { return call('pxInsert', this.treeId, point); }
|
||||
|
|
@ -48,6 +49,12 @@ class PxTree {
|
|||
async kNearest(query, k) { return call('pxKNearest', this.treeId, query, k); }
|
||||
async radius(query, r) { return call('pxRadius', this.treeId, query, r); }
|
||||
async exportJSON() { return call('pxExportJSON', this.treeId); }
|
||||
// Analytics operations
|
||||
async getAnalytics() { return call('pxGetAnalytics', this.treeId); }
|
||||
async getPeerStats() { return call('pxGetPeerStats', this.treeId); }
|
||||
async getTopPeers(n) { return call('pxGetTopPeers', this.treeId, n); }
|
||||
async getAxisDistributions(axisNames) { return call('pxGetAxisDistributions', this.treeId, axisNames); }
|
||||
async resetAnalytics() { return call('pxResetAnalytics', this.treeId); }
|
||||
}
|
||||
|
||||
export async function init(options = {}) {
|
||||
|
|
@ -78,12 +85,22 @@ export async function init(options = {}) {
|
|||
go.run(result.instance);
|
||||
|
||||
const api = {
|
||||
// Core functions
|
||||
version: async () => call('pxVersion'),
|
||||
hello: async (name) => call('pxHello', name ?? ''),
|
||||
newTree: async (dim) => {
|
||||
const info = call('pxNewTree', dim);
|
||||
return new PxTree(info.treeId);
|
||||
}
|
||||
},
|
||||
// Statistics utilities
|
||||
computeDistributionStats: async (distances) => call('pxComputeDistributionStats', distances),
|
||||
// NAT routing / peer quality functions
|
||||
computePeerQualityScore: async (metrics, weights) => call('pxComputePeerQualityScore', metrics, weights),
|
||||
computeTrustScore: async (metrics) => call('pxComputeTrustScore', metrics),
|
||||
getDefaultQualityWeights: async () => call('pxGetDefaultQualityWeights'),
|
||||
getDefaultPeerFeatureRanges: async () => call('pxGetDefaultPeerFeatureRanges'),
|
||||
normalizePeerFeatures: async (features, ranges) => call('pxNormalizePeerFeatures', features, ranges),
|
||||
weightedPeerFeatures: async (normalized, weights) => call('pxWeightedPeerFeatures', normalized, weights)
|
||||
};
|
||||
|
||||
return api;
|
||||
|
|
|
|||
328
wasm/main.go
328
wasm/main.go
|
|
@ -215,14 +215,318 @@ func exportJSON(_ js.Value, args []js.Value) (any, error) {
|
|||
if !ok {
|
||||
return nil, fmt.Errorf("unknown treeId %d", id)
|
||||
}
|
||||
// naive export: ask for all points by radius from origin with large r; or keep
|
||||
// internal slice? KDTree doesn't expose iteration, so skip heavy export here.
|
||||
// Return metrics only for now.
|
||||
m := map[string]any{"dim": t.Dim(), "len": t.Len()}
|
||||
// Export all points
|
||||
points := t.Points()
|
||||
jsPts := make([]any, len(points))
|
||||
for i, p := range points {
|
||||
jsPts[i] = map[string]any{"id": p.ID, "coords": p.Coords, "value": p.Value}
|
||||
}
|
||||
m := map[string]any{
|
||||
"dim": t.Dim(),
|
||||
"len": t.Len(),
|
||||
"backend": string(t.Backend()),
|
||||
"points": jsPts,
|
||||
}
|
||||
b, _ := json.Marshal(m)
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func getAnalytics(_ js.Value, args []js.Value) (any, error) {
|
||||
// getAnalytics(treeId) -> analytics snapshot
|
||||
if len(args) < 1 {
|
||||
return nil, errors.New("getAnalytics(treeId)")
|
||||
}
|
||||
id := args[0].Int()
|
||||
t, ok := treeRegistry[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown treeId %d", id)
|
||||
}
|
||||
snap := t.GetAnalyticsSnapshot()
|
||||
return map[string]any{
|
||||
"queryCount": snap.QueryCount,
|
||||
"insertCount": snap.InsertCount,
|
||||
"deleteCount": snap.DeleteCount,
|
||||
"avgQueryTimeNs": snap.AvgQueryTimeNs,
|
||||
"minQueryTimeNs": snap.MinQueryTimeNs,
|
||||
"maxQueryTimeNs": snap.MaxQueryTimeNs,
|
||||
"lastQueryTimeNs": snap.LastQueryTimeNs,
|
||||
"lastQueryAt": snap.LastQueryAt.UnixMilli(),
|
||||
"createdAt": snap.CreatedAt.UnixMilli(),
|
||||
"backendRebuildCount": snap.BackendRebuildCnt,
|
||||
"lastRebuiltAt": snap.LastRebuiltAt.UnixMilli(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getPeerStats(_ js.Value, args []js.Value) (any, error) {
|
||||
// getPeerStats(treeId) -> array of peer stats
|
||||
if len(args) < 1 {
|
||||
return nil, errors.New("getPeerStats(treeId)")
|
||||
}
|
||||
id := args[0].Int()
|
||||
t, ok := treeRegistry[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown treeId %d", id)
|
||||
}
|
||||
stats := t.GetPeerStats()
|
||||
jsStats := make([]any, len(stats))
|
||||
for i, s := range stats {
|
||||
jsStats[i] = map[string]any{
|
||||
"peerId": s.PeerID,
|
||||
"selectionCount": s.SelectionCount,
|
||||
"avgDistance": s.AvgDistance,
|
||||
"lastSelectedAt": s.LastSelectedAt.UnixMilli(),
|
||||
}
|
||||
}
|
||||
return jsStats, nil
|
||||
}
|
||||
|
||||
func getTopPeers(_ js.Value, args []js.Value) (any, error) {
|
||||
// getTopPeers(treeId, n) -> array of top n peer stats
|
||||
if len(args) < 2 {
|
||||
return nil, errors.New("getTopPeers(treeId, n)")
|
||||
}
|
||||
id := args[0].Int()
|
||||
n := args[1].Int()
|
||||
t, ok := treeRegistry[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown treeId %d", id)
|
||||
}
|
||||
stats := t.GetTopPeers(n)
|
||||
jsStats := make([]any, len(stats))
|
||||
for i, s := range stats {
|
||||
jsStats[i] = map[string]any{
|
||||
"peerId": s.PeerID,
|
||||
"selectionCount": s.SelectionCount,
|
||||
"avgDistance": s.AvgDistance,
|
||||
"lastSelectedAt": s.LastSelectedAt.UnixMilli(),
|
||||
}
|
||||
}
|
||||
return jsStats, nil
|
||||
}
|
||||
|
||||
func getAxisDistributions(_ js.Value, args []js.Value) (any, error) {
|
||||
// getAxisDistributions(treeId, axisNames?: string[]) -> array of axis distribution stats
|
||||
if len(args) < 1 {
|
||||
return nil, errors.New("getAxisDistributions(treeId)")
|
||||
}
|
||||
id := args[0].Int()
|
||||
t, ok := treeRegistry[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown treeId %d", id)
|
||||
}
|
||||
|
||||
var axisNames []string
|
||||
if len(args) > 1 && !args[1].IsUndefined() && !args[1].IsNull() {
|
||||
ln := args[1].Length()
|
||||
axisNames = make([]string, ln)
|
||||
for i := 0; i < ln; i++ {
|
||||
axisNames[i] = args[1].Index(i).String()
|
||||
}
|
||||
}
|
||||
|
||||
dists := t.ComputeDistanceDistribution(axisNames)
|
||||
jsDists := make([]any, len(dists))
|
||||
for i, d := range dists {
|
||||
jsDists[i] = map[string]any{
|
||||
"axis": d.Axis,
|
||||
"name": d.Name,
|
||||
"stats": map[string]any{
|
||||
"count": d.Stats.Count,
|
||||
"min": d.Stats.Min,
|
||||
"max": d.Stats.Max,
|
||||
"mean": d.Stats.Mean,
|
||||
"median": d.Stats.Median,
|
||||
"stdDev": d.Stats.StdDev,
|
||||
"p25": d.Stats.P25,
|
||||
"p75": d.Stats.P75,
|
||||
"p90": d.Stats.P90,
|
||||
"p99": d.Stats.P99,
|
||||
"variance": d.Stats.Variance,
|
||||
"skewness": d.Stats.Skewness,
|
||||
},
|
||||
}
|
||||
}
|
||||
return jsDists, nil
|
||||
}
|
||||
|
||||
func resetAnalytics(_ js.Value, args []js.Value) (any, error) {
|
||||
// resetAnalytics(treeId) -> resets all analytics
|
||||
if len(args) < 1 {
|
||||
return nil, errors.New("resetAnalytics(treeId)")
|
||||
}
|
||||
id := args[0].Int()
|
||||
t, ok := treeRegistry[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown treeId %d", id)
|
||||
}
|
||||
t.ResetAnalytics()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func computeDistributionStats(_ js.Value, args []js.Value) (any, error) {
|
||||
// computeDistributionStats(distances: number[]) -> distribution stats
|
||||
if len(args) < 1 {
|
||||
return nil, errors.New("computeDistributionStats(distances)")
|
||||
}
|
||||
distances, err := getFloatSlice(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stats := pd.ComputeDistributionStats(distances)
|
||||
return map[string]any{
|
||||
"count": stats.Count,
|
||||
"min": stats.Min,
|
||||
"max": stats.Max,
|
||||
"mean": stats.Mean,
|
||||
"median": stats.Median,
|
||||
"stdDev": stats.StdDev,
|
||||
"p25": stats.P25,
|
||||
"p75": stats.P75,
|
||||
"p90": stats.P90,
|
||||
"p99": stats.P99,
|
||||
"variance": stats.Variance,
|
||||
"skewness": stats.Skewness,
|
||||
"sampleSize": stats.SampleSize,
|
||||
"computedAt": stats.ComputedAt.UnixMilli(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func computePeerQualityScore(_ js.Value, args []js.Value) (any, error) {
|
||||
// computePeerQualityScore(metrics: NATRoutingMetrics, weights?: QualityWeights) -> score
|
||||
if len(args) < 1 {
|
||||
return nil, errors.New("computePeerQualityScore(metrics)")
|
||||
}
|
||||
m := args[0]
|
||||
metrics := pd.NATRoutingMetrics{
|
||||
ConnectivityScore: m.Get("connectivityScore").Float(),
|
||||
SymmetryScore: m.Get("symmetryScore").Float(),
|
||||
RelayProbability: m.Get("relayProbability").Float(),
|
||||
DirectSuccessRate: m.Get("directSuccessRate").Float(),
|
||||
AvgRTTMs: m.Get("avgRttMs").Float(),
|
||||
JitterMs: m.Get("jitterMs").Float(),
|
||||
PacketLossRate: m.Get("packetLossRate").Float(),
|
||||
BandwidthMbps: m.Get("bandwidthMbps").Float(),
|
||||
NATType: m.Get("natType").String(),
|
||||
}
|
||||
|
||||
var weights *pd.QualityWeights
|
||||
if len(args) > 1 && !args[1].IsUndefined() && !args[1].IsNull() {
|
||||
w := args[1]
|
||||
weights = &pd.QualityWeights{
|
||||
Latency: w.Get("latency").Float(),
|
||||
Jitter: w.Get("jitter").Float(),
|
||||
PacketLoss: w.Get("packetLoss").Float(),
|
||||
Bandwidth: w.Get("bandwidth").Float(),
|
||||
Connectivity: w.Get("connectivity").Float(),
|
||||
Symmetry: w.Get("symmetry").Float(),
|
||||
DirectSuccess: w.Get("directSuccess").Float(),
|
||||
RelayPenalty: w.Get("relayPenalty").Float(),
|
||||
NATType: w.Get("natType").Float(),
|
||||
}
|
||||
}
|
||||
|
||||
score := pd.PeerQualityScore(metrics, weights)
|
||||
return score, nil
|
||||
}
|
||||
|
||||
func computeTrustScore(_ js.Value, args []js.Value) (any, error) {
|
||||
// computeTrustScore(metrics: TrustMetrics) -> score
|
||||
if len(args) < 1 {
|
||||
return nil, errors.New("computeTrustScore(metrics)")
|
||||
}
|
||||
m := args[0]
|
||||
metrics := pd.TrustMetrics{
|
||||
ReputationScore: m.Get("reputationScore").Float(),
|
||||
SuccessfulTransactions: int64(m.Get("successfulTransactions").Int()),
|
||||
FailedTransactions: int64(m.Get("failedTransactions").Int()),
|
||||
AgeSeconds: int64(m.Get("ageSeconds").Int()),
|
||||
VouchCount: m.Get("vouchCount").Int(),
|
||||
FlagCount: m.Get("flagCount").Int(),
|
||||
ProofOfWork: m.Get("proofOfWork").Float(),
|
||||
}
|
||||
|
||||
score := pd.ComputeTrustScore(metrics)
|
||||
return score, nil
|
||||
}
|
||||
|
||||
func getDefaultQualityWeights(_ js.Value, _ []js.Value) (any, error) {
|
||||
w := pd.DefaultQualityWeights()
|
||||
return map[string]any{
|
||||
"latency": w.Latency,
|
||||
"jitter": w.Jitter,
|
||||
"packetLoss": w.PacketLoss,
|
||||
"bandwidth": w.Bandwidth,
|
||||
"connectivity": w.Connectivity,
|
||||
"symmetry": w.Symmetry,
|
||||
"directSuccess": w.DirectSuccess,
|
||||
"relayPenalty": w.RelayPenalty,
|
||||
"natType": w.NATType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getDefaultPeerFeatureRanges(_ js.Value, _ []js.Value) (any, error) {
|
||||
ranges := pd.DefaultPeerFeatureRanges()
|
||||
jsRanges := make([]any, len(ranges.Ranges))
|
||||
for i, r := range ranges.Ranges {
|
||||
jsRanges[i] = map[string]any{
|
||||
"min": r.Min,
|
||||
"max": r.Max,
|
||||
}
|
||||
}
|
||||
return map[string]any{
|
||||
"ranges": jsRanges,
|
||||
"labels": pd.StandardFeatureLabels(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func normalizePeerFeatures(_ js.Value, args []js.Value) (any, error) {
|
||||
// normalizePeerFeatures(features: number[], ranges?: FeatureRanges) -> number[]
|
||||
if len(args) < 1 {
|
||||
return nil, errors.New("normalizePeerFeatures(features)")
|
||||
}
|
||||
features, err := getFloatSlice(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ranges := pd.DefaultPeerFeatureRanges()
|
||||
if len(args) > 1 && !args[1].IsUndefined() && !args[1].IsNull() {
|
||||
rangesArg := args[1].Get("ranges")
|
||||
if !rangesArg.IsUndefined() && !rangesArg.IsNull() {
|
||||
ln := rangesArg.Length()
|
||||
ranges.Ranges = make([]pd.AxisStats, ln)
|
||||
for i := 0; i < ln; i++ {
|
||||
r := rangesArg.Index(i)
|
||||
ranges.Ranges[i] = pd.AxisStats{
|
||||
Min: r.Get("min").Float(),
|
||||
Max: r.Get("max").Float(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
normalized := pd.NormalizePeerFeatures(features, ranges)
|
||||
return normalized, nil
|
||||
}
|
||||
|
||||
func weightedPeerFeatures(_ js.Value, args []js.Value) (any, error) {
|
||||
// weightedPeerFeatures(normalized: number[], weights: number[]) -> number[]
|
||||
if len(args) < 2 {
|
||||
return nil, errors.New("weightedPeerFeatures(normalized, weights)")
|
||||
}
|
||||
normalized, err := getFloatSlice(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
weights, err := getFloatSlice(args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
weighted := pd.WeightedPeerFeatures(normalized, weights)
|
||||
return weighted, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Export core API
|
||||
export("pxVersion", version)
|
||||
|
|
@ -237,6 +541,22 @@ func main() {
|
|||
export("pxRadius", radius)
|
||||
export("pxExportJSON", exportJSON)
|
||||
|
||||
// Export analytics API
|
||||
export("pxGetAnalytics", getAnalytics)
|
||||
export("pxGetPeerStats", getPeerStats)
|
||||
export("pxGetTopPeers", getTopPeers)
|
||||
export("pxGetAxisDistributions", getAxisDistributions)
|
||||
export("pxResetAnalytics", resetAnalytics)
|
||||
export("pxComputeDistributionStats", computeDistributionStats)
|
||||
|
||||
// Export NAT routing / peer quality API
|
||||
export("pxComputePeerQualityScore", computePeerQualityScore)
|
||||
export("pxComputeTrustScore", computeTrustScore)
|
||||
export("pxGetDefaultQualityWeights", getDefaultQualityWeights)
|
||||
export("pxGetDefaultPeerFeatureRanges", getDefaultPeerFeatureRanges)
|
||||
export("pxNormalizePeerFeatures", normalizePeerFeatures)
|
||||
export("pxWeightedPeerFeatures", weightedPeerFeatures)
|
||||
|
||||
// Keep running
|
||||
select {}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue