Merge pull request #4 from Snider/claude/expose-networking-analytics-LNUmx

feat: Expose networking analytics for KD-Tree NAT routing
This commit is contained in:
Snider 2025-12-29 18:32:16 +00:00 committed by GitHub
commit e182d4f497
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 4430 additions and 25 deletions

1006
dns_tools.go Normal file

File diff suppressed because it is too large Load diff

732
dns_tools_test.go Normal file
View file

@ -0,0 +1,732 @@
package poindexter
import (
"strings"
"testing"
)
// ============================================================================
// External Tool Links Tests
// ============================================================================
func TestGetExternalToolLinks(t *testing.T) {
links := GetExternalToolLinks("example.com")
if links.Target != "example.com" {
t.Errorf("expected target=example.com, got %s", links.Target)
}
if links.Type != "domain" {
t.Errorf("expected type=domain, got %s", links.Type)
}
// Check MXToolbox links
if !strings.Contains(links.MXToolboxDNS, "mxtoolbox.com") {
t.Error("MXToolboxDNS should contain mxtoolbox.com")
}
if !strings.Contains(links.MXToolboxDNS, "example.com") {
t.Error("MXToolboxDNS should contain the domain")
}
if !strings.Contains(links.MXToolboxMX, "mxtoolbox.com") {
t.Error("MXToolboxMX should contain mxtoolbox.com")
}
if !strings.Contains(links.MXToolboxSPF, "spf") {
t.Error("MXToolboxSPF should contain 'spf'")
}
if !strings.Contains(links.MXToolboxDMARC, "dmarc") {
t.Error("MXToolboxDMARC should contain 'dmarc'")
}
// Check DNSChecker links
if !strings.Contains(links.DNSCheckerDNS, "dnschecker.org") {
t.Error("DNSCheckerDNS should contain dnschecker.org")
}
// Check other tools
if !strings.Contains(links.WhoIs, "who.is") {
t.Error("WhoIs should contain who.is")
}
if !strings.Contains(links.SSLLabs, "ssllabs.com") {
t.Error("SSLLabs should contain ssllabs.com")
}
if !strings.Contains(links.VirusTotal, "virustotal.com") {
t.Error("VirusTotal should contain virustotal.com")
}
}
func TestGetExternalToolLinksIP(t *testing.T) {
links := GetExternalToolLinksIP("8.8.8.8")
if links.Target != "8.8.8.8" {
t.Errorf("expected target=8.8.8.8, got %s", links.Target)
}
if links.Type != "ip" {
t.Errorf("expected type=ip, got %s", links.Type)
}
// Check IP-specific links
if !strings.Contains(links.IPInfo, "ipinfo.io") {
t.Error("IPInfo should contain ipinfo.io")
}
if !strings.Contains(links.IPInfo, "8.8.8.8") {
t.Error("IPInfo should contain the IP address")
}
if !strings.Contains(links.AbuseIPDB, "abuseipdb.com") {
t.Error("AbuseIPDB should contain abuseipdb.com")
}
if !strings.Contains(links.Shodan, "shodan.io") {
t.Error("Shodan should contain shodan.io")
}
if !strings.Contains(links.MXToolboxBlacklist, "blacklist") {
t.Error("MXToolboxBlacklist should contain 'blacklist'")
}
}
func TestGetExternalToolLinksEmail(t *testing.T) {
// Test with email address
links := GetExternalToolLinksEmail("test@example.com")
if links.Target != "test@example.com" {
t.Errorf("expected target=test@example.com, got %s", links.Target)
}
if links.Type != "email" {
t.Errorf("expected type=email, got %s", links.Type)
}
// Email tools should use the domain
if !strings.Contains(links.MXToolboxMX, "example.com") {
t.Error("MXToolboxMX should contain the domain from email")
}
if !strings.Contains(links.MXToolboxSPF, "spf") {
t.Error("MXToolboxSPF should contain 'spf'")
}
if !strings.Contains(links.MXToolboxDMARC, "dmarc") {
t.Error("MXToolboxDMARC should contain 'dmarc'")
}
// Test with just domain
links2 := GetExternalToolLinksEmail("example.org")
if links2.Target != "example.org" {
t.Errorf("expected target=example.org, got %s", links2.Target)
}
}
func TestGetExternalToolLinksSpecialChars(t *testing.T) {
// Test URL encoding
links := GetExternalToolLinks("test-domain.example.com")
if !strings.Contains(links.MXToolboxDNS, "test-domain.example.com") {
t.Error("Should handle hyphens in domain")
}
}
// ============================================================================
// DNS Lookup Tests (Unit tests for structure, not network)
// ============================================================================
func TestDNSRecordTypes(t *testing.T) {
types := []DNSRecordType{
DNSRecordA,
DNSRecordAAAA,
DNSRecordMX,
DNSRecordTXT,
DNSRecordNS,
DNSRecordCNAME,
DNSRecordSOA,
DNSRecordPTR,
DNSRecordSRV,
DNSRecordCAA,
}
expected := []string{"A", "AAAA", "MX", "TXT", "NS", "CNAME", "SOA", "PTR", "SRV", "CAA"}
for i, typ := range types {
if string(typ) != expected[i] {
t.Errorf("expected type %s, got %s", expected[i], typ)
}
}
}
func TestDNSRecordTypesExtended(t *testing.T) {
// Test all ClouDNS record types are defined
types := []DNSRecordType{
DNSRecordALIAS,
DNSRecordRP,
DNSRecordSSHFP,
DNSRecordTLSA,
DNSRecordDS,
DNSRecordDNSKEY,
DNSRecordNAPTR,
DNSRecordLOC,
DNSRecordHINFO,
DNSRecordCERT,
DNSRecordSMIMEA,
DNSRecordWR,
DNSRecordSPF,
}
expected := []string{"ALIAS", "RP", "SSHFP", "TLSA", "DS", "DNSKEY", "NAPTR", "LOC", "HINFO", "CERT", "SMIMEA", "WR", "SPF"}
for i, typ := range types {
if string(typ) != expected[i] {
t.Errorf("expected type %s, got %s", expected[i], typ)
}
}
}
func TestGetDNSRecordTypeInfo(t *testing.T) {
info := GetDNSRecordTypeInfo()
if len(info) == 0 {
t.Error("GetDNSRecordTypeInfo should return non-empty list")
}
// Check that common types exist
commonFound := 0
for _, r := range info {
if r.Common {
commonFound++
}
// Each entry should have type, name, and description
if r.Type == "" {
t.Error("Record type should not be empty")
}
if r.Name == "" {
t.Error("Record name should not be empty")
}
if r.Description == "" {
t.Error("Record description should not be empty")
}
}
if commonFound < 10 {
t.Errorf("Expected at least 10 common record types, got %d", commonFound)
}
// Check for specific types
typeMap := make(map[DNSRecordType]DNSRecordTypeInfo)
for _, r := range info {
typeMap[r.Type] = r
}
if _, ok := typeMap[DNSRecordA]; !ok {
t.Error("A record type should be in info")
}
if _, ok := typeMap[DNSRecordALIAS]; !ok {
t.Error("ALIAS record type should be in info")
}
if _, ok := typeMap[DNSRecordTLSA]; !ok {
t.Error("TLSA record type should be in info")
}
if _, ok := typeMap[DNSRecordWR]; !ok {
t.Error("WR (Web Redirect) record type should be in info")
}
}
func TestGetCommonDNSRecordTypes(t *testing.T) {
types := GetCommonDNSRecordTypes()
if len(types) == 0 {
t.Error("GetCommonDNSRecordTypes should return non-empty list")
}
// Check that standard types are present
typeSet := make(map[DNSRecordType]bool)
for _, typ := range types {
typeSet[typ] = true
}
if !typeSet[DNSRecordA] {
t.Error("A record should be in common types")
}
if !typeSet[DNSRecordAAAA] {
t.Error("AAAA record should be in common types")
}
if !typeSet[DNSRecordMX] {
t.Error("MX record should be in common types")
}
if !typeSet[DNSRecordTXT] {
t.Error("TXT record should be in common types")
}
if !typeSet[DNSRecordALIAS] {
t.Error("ALIAS record should be in common types")
}
}
func TestGetAllDNSRecordTypes(t *testing.T) {
types := GetAllDNSRecordTypes()
if len(types) < 20 {
t.Errorf("GetAllDNSRecordTypes should return at least 20 types, got %d", len(types))
}
// Check for ClouDNS-specific types
typeSet := make(map[DNSRecordType]bool)
for _, typ := range types {
typeSet[typ] = true
}
if !typeSet[DNSRecordWR] {
t.Error("WR (Web Redirect) should be in all types")
}
if !typeSet[DNSRecordNAPTR] {
t.Error("NAPTR should be in all types")
}
if !typeSet[DNSRecordDS] {
t.Error("DS should be in all types")
}
}
func TestDNSLookupResultStructure(t *testing.T) {
result := DNSLookupResult{
Domain: "example.com",
QueryType: "A",
Records: []DNSRecord{
{Type: DNSRecordA, Name: "example.com", Value: "93.184.216.34"},
},
LookupTimeMs: 50,
}
if result.Domain != "example.com" {
t.Error("Domain should be set")
}
if len(result.Records) != 1 {
t.Error("Should have 1 record")
}
if result.Records[0].Type != DNSRecordA {
t.Error("Record type should be A")
}
}
func TestCompleteDNSLookupStructure(t *testing.T) {
result := CompleteDNSLookup{
Domain: "example.com",
A: []string{"93.184.216.34"},
AAAA: []string{"2606:2800:220:1:248:1893:25c8:1946"},
MX: []MXRecord{
{Host: "mail.example.com", Priority: 10},
},
NS: []string{"ns1.example.com", "ns2.example.com"},
TXT: []string{"v=spf1 include:_spf.example.com ~all"},
}
if result.Domain != "example.com" {
t.Error("Domain should be set")
}
if len(result.A) != 1 {
t.Error("Should have 1 A record")
}
if len(result.AAAA) != 1 {
t.Error("Should have 1 AAAA record")
}
if len(result.MX) != 1 {
t.Error("Should have 1 MX record")
}
if result.MX[0].Priority != 10 {
t.Error("MX priority should be 10")
}
if len(result.NS) != 2 {
t.Error("Should have 2 NS records")
}
}
// ============================================================================
// RDAP Tests (Unit tests for structure, not network)
// ============================================================================
func TestRDAPResponseStructure(t *testing.T) {
resp := RDAPResponse{
LDHName: "example.com",
Status: []string{"active", "client transfer prohibited"},
Events: []RDAPEvent{
{EventAction: "registration", EventDate: "2020-01-01T00:00:00Z"},
{EventAction: "expiration", EventDate: "2025-01-01T00:00:00Z"},
},
Entities: []RDAPEntity{
{Handle: "REGISTRAR-1", Roles: []string{"registrar"}},
},
Nameservers: []RDAPNs{
{LDHName: "ns1.example.com"},
{LDHName: "ns2.example.com"},
},
}
if resp.LDHName != "example.com" {
t.Error("LDHName should be set")
}
if len(resp.Status) != 2 {
t.Error("Should have 2 status values")
}
if len(resp.Events) != 2 {
t.Error("Should have 2 events")
}
if resp.Events[0].EventAction != "registration" {
t.Error("First event should be registration")
}
if len(resp.Nameservers) != 2 {
t.Error("Should have 2 nameservers")
}
}
func TestParseRDAPResponse(t *testing.T) {
resp := RDAPResponse{
LDHName: "example.com",
Status: []string{"active", "dnssecSigned"},
Events: []RDAPEvent{
{EventAction: "registration", EventDate: "2020-01-01T00:00:00Z"},
{EventAction: "expiration", EventDate: "2025-01-01T00:00:00Z"},
{EventAction: "last changed", EventDate: "2024-06-15T00:00:00Z"},
},
Entities: []RDAPEntity{
{Handle: "REGISTRAR-123", Roles: []string{"registrar"}},
},
Nameservers: []RDAPNs{
{LDHName: "ns1.example.com"},
{LDHName: "ns2.example.com"},
},
}
info := ParseRDAPResponse(resp)
if info.Domain != "example.com" {
t.Errorf("expected domain=example.com, got %s", info.Domain)
}
if info.RegistrationDate != "2020-01-01T00:00:00Z" {
t.Errorf("expected registration date, got %s", info.RegistrationDate)
}
if info.ExpirationDate != "2025-01-01T00:00:00Z" {
t.Errorf("expected expiration date, got %s", info.ExpirationDate)
}
if info.UpdatedDate != "2024-06-15T00:00:00Z" {
t.Errorf("expected updated date, got %s", info.UpdatedDate)
}
if info.Registrar != "REGISTRAR-123" {
t.Errorf("expected registrar, got %s", info.Registrar)
}
if len(info.Nameservers) != 2 {
t.Error("Should have 2 nameservers")
}
if !info.DNSSEC {
t.Error("DNSSEC should be true (detected from status)")
}
}
func TestParseRDAPResponseEmpty(t *testing.T) {
resp := RDAPResponse{
LDHName: "test.com",
}
info := ParseRDAPResponse(resp)
if info.Domain != "test.com" {
t.Error("Domain should be set even with minimal response")
}
if info.DNSSEC {
t.Error("DNSSEC should be false with no status")
}
if len(info.Nameservers) != 0 {
t.Error("Nameservers should be empty")
}
}
// ============================================================================
// RDAP Server Tests
// ============================================================================
func TestRDAPServers(t *testing.T) {
// Check that we have servers for common TLDs
commonTLDs := []string{"com", "net", "org", "io"}
for _, tld := range commonTLDs {
if _, ok := rdapServers[tld]; !ok {
t.Errorf("missing RDAP server for TLD: %s", tld)
}
}
// Check RIRs
rirs := []string{"arin", "ripe", "apnic", "afrinic", "lacnic"}
for _, rir := range rirs {
if _, ok := rdapServers[rir]; !ok {
t.Errorf("missing RDAP server for RIR: %s", rir)
}
}
}
// ============================================================================
// MX Record Tests
// ============================================================================
func TestMXRecordStructure(t *testing.T) {
mx := MXRecord{
Host: "mail.example.com",
Priority: 10,
}
if mx.Host != "mail.example.com" {
t.Error("Host should be set")
}
if mx.Priority != 10 {
t.Error("Priority should be 10")
}
}
// ============================================================================
// SRV Record Tests
// ============================================================================
func TestSRVRecordStructure(t *testing.T) {
srv := SRVRecord{
Target: "sipserver.example.com",
Port: 5060,
Priority: 10,
Weight: 100,
}
if srv.Target != "sipserver.example.com" {
t.Error("Target should be set")
}
if srv.Port != 5060 {
t.Error("Port should be 5060")
}
if srv.Priority != 10 {
t.Error("Priority should be 10")
}
if srv.Weight != 100 {
t.Error("Weight should be 100")
}
}
// ============================================================================
// SOA Record Tests
// ============================================================================
func TestSOARecordStructure(t *testing.T) {
soa := SOARecord{
PrimaryNS: "ns1.example.com",
AdminEmail: "admin.example.com",
Serial: 2024010101,
Refresh: 7200,
Retry: 3600,
Expire: 1209600,
MinTTL: 86400,
}
if soa.PrimaryNS != "ns1.example.com" {
t.Error("PrimaryNS should be set")
}
if soa.Serial != 2024010101 {
t.Error("Serial should match")
}
if soa.Refresh != 7200 {
t.Error("Refresh should be 7200")
}
}
// ============================================================================
// Extended Record Type Structure Tests
// ============================================================================
func TestCAARecordStructure(t *testing.T) {
caa := CAARecord{
Flag: 0,
Tag: "issue",
Value: "letsencrypt.org",
}
if caa.Tag != "issue" {
t.Error("Tag should be 'issue'")
}
if caa.Value != "letsencrypt.org" {
t.Error("Value should be set")
}
}
func TestSSHFPRecordStructure(t *testing.T) {
sshfp := SSHFPRecord{
Algorithm: 4, // Ed25519
FPType: 2, // SHA-256
Fingerprint: "abc123def456",
}
if sshfp.Algorithm != 4 {
t.Error("Algorithm should be 4 (Ed25519)")
}
if sshfp.FPType != 2 {
t.Error("FPType should be 2 (SHA-256)")
}
}
func TestTLSARecordStructure(t *testing.T) {
tlsa := TLSARecord{
Usage: 3, // Domain-issued certificate
Selector: 1, // SubjectPublicKeyInfo
MatchingType: 1, // SHA-256
CertData: "abcd1234",
}
if tlsa.Usage != 3 {
t.Error("Usage should be 3")
}
if tlsa.Selector != 1 {
t.Error("Selector should be 1")
}
}
func TestDSRecordStructure(t *testing.T) {
ds := DSRecord{
KeyTag: 12345,
Algorithm: 13, // ECDSAP256SHA256
DigestType: 2, // SHA-256
Digest: "deadbeef",
}
if ds.KeyTag != 12345 {
t.Error("KeyTag should be 12345")
}
if ds.Algorithm != 13 {
t.Error("Algorithm should be 13")
}
}
func TestNAPTRRecordStructure(t *testing.T) {
naptr := NAPTRRecord{
Order: 100,
Preference: 10,
Flags: "U",
Service: "E2U+sip",
Regexp: "!^.*$!sip:info@example.com!",
Replacement: ".",
}
if naptr.Order != 100 {
t.Error("Order should be 100")
}
if naptr.Service != "E2U+sip" {
t.Error("Service should be E2U+sip")
}
}
func TestRPRecordStructure(t *testing.T) {
rp := RPRecord{
Mailbox: "admin.example.com",
TxtDom: "info.example.com",
}
if rp.Mailbox != "admin.example.com" {
t.Error("Mailbox should be set")
}
}
func TestLOCRecordStructure(t *testing.T) {
loc := LOCRecord{
Latitude: 51.5074,
Longitude: -0.1278,
Altitude: 11,
Size: 10,
HPrecis: 10,
VPrecis: 10,
}
if loc.Latitude < 51.5 || loc.Latitude > 51.6 {
t.Error("Latitude should be near 51.5074")
}
}
func TestALIASRecordStructure(t *testing.T) {
alias := ALIASRecord{
Target: "target.example.com",
}
if alias.Target != "target.example.com" {
t.Error("Target should be set")
}
}
func TestWebRedirectRecordStructure(t *testing.T) {
wr := WebRedirectRecord{
URL: "https://www.example.com",
RedirectType: 301,
Frame: false,
}
if wr.URL != "https://www.example.com" {
t.Error("URL should be set")
}
if wr.RedirectType != 301 {
t.Error("RedirectType should be 301")
}
}
// ============================================================================
// Helper Function Tests
// ============================================================================
func TestIsNoSuchHostError(t *testing.T) {
tests := []struct {
errStr string
expected bool
}{
{"no such host", true},
{"NXDOMAIN", true},
{"not found", true},
{"connection refused", false},
{"timeout", false},
{"", false},
}
for _, tc := range tests {
var err error
if tc.errStr != "" {
err = &testError{msg: tc.errStr}
}
result := isNoSuchHostError(err)
if result != tc.expected {
t.Errorf("isNoSuchHostError(%q) = %v, want %v", tc.errStr, result, tc.expected)
}
}
}
type testError struct {
msg string
}
func (e *testError) Error() string {
return e.msg
}
// ============================================================================
// URL Building Tests
// ============================================================================
func TestBuildRDAPURLs(t *testing.T) {
// These test the URL structure, not actual lookups
// Domain URL
domain := "example.com"
expectedDomainPrefix := "https://rdap.org/domain/"
if !strings.HasPrefix("https://rdap.org/domain/"+domain, expectedDomainPrefix) {
t.Error("Domain URL format is incorrect")
}
// IP URL
ip := "8.8.8.8"
expectedIPPrefix := "https://rdap.org/ip/"
if !strings.HasPrefix("https://rdap.org/ip/"+ip, expectedIPPrefix) {
t.Error("IP URL format is incorrect")
}
// ASN URL
asn := "15169"
expectedASNPrefix := "https://rdap.org/autnum/"
if !strings.HasPrefix("https://rdap.org/autnum/"+asn, expectedASNPrefix) {
t.Error("ASN URL format is incorrect")
}
}

View file

@ -13,19 +13,159 @@ async function run() {
console.log('Poindexter (WASM) version:', await px.version());
// =========================================================================
// Basic KD-Tree operations
// =========================================================================
const tree = await px.newTree(2);
await tree.insert({ id: 'a', coords: [0, 0], value: 'A' });
await tree.insert({ id: 'b', coords: [1, 0], value: 'B' });
await tree.insert({ id: 'c', coords: [0, 1], value: 'C' });
await tree.insert({ id: 'peer-a', coords: [0, 0], value: 'Peer A' });
await tree.insert({ id: 'peer-b', coords: [1, 0], value: 'Peer B' });
await tree.insert({ id: 'peer-c', coords: [0, 1], value: 'Peer C' });
await tree.insert({ id: 'peer-d', coords: [0.5, 0.5], value: 'Peer D' });
console.log('\n=== Basic Queries ===');
const nn = await tree.nearest([0.9, 0.1]);
console.log('Nearest [0.9,0.1]:', nn);
const kn = await tree.kNearest([0.9, 0.9], 2);
console.log('kNN k=2 [0.9,0.9]:', kn);
const kn = await tree.kNearest([0.5, 0.5], 3);
console.log('kNN k=3 [0.5,0.5]:', kn);
const rad = await tree.radius([0, 0], 1.1);
console.log('Radius r=1.1 [0,0]:', rad);
// =========================================================================
// Analytics Demo
// =========================================================================
console.log('\n=== Tree Analytics ===');
// Perform more queries to generate analytics
for (let i = 0; i < 10; i++) {
await tree.nearest([Math.random(), Math.random()]);
}
await tree.kNearest([0.2, 0.8], 2);
await tree.kNearest([0.7, 0.3], 2);
// Get tree-level analytics
const analytics = await tree.getAnalytics();
console.log('Tree Analytics:', {
queryCount: analytics.queryCount,
insertCount: analytics.insertCount,
avgQueryTimeNs: analytics.avgQueryTimeNs,
minQueryTimeNs: analytics.minQueryTimeNs,
maxQueryTimeNs: analytics.maxQueryTimeNs,
});
// =========================================================================
// Peer Selection Analytics
// =========================================================================
console.log('\n=== Peer Selection Analytics ===');
// Get all peer stats
const peerStats = await tree.getPeerStats();
console.log('All Peer Stats:', peerStats);
// Get top 3 most frequently selected peers
const topPeers = await tree.getTopPeers(3);
console.log('Top 3 Peers:', topPeers);
// =========================================================================
// Axis Distribution Analysis
// =========================================================================
console.log('\n=== Axis Distributions ===');
const axisDists = await tree.getAxisDistributions(['latency', 'hops']);
console.log('Axis Distributions:', axisDists);
// =========================================================================
// NAT Routing / Peer Quality Scoring
// =========================================================================
console.log('\n=== NAT Routing & Peer Quality ===');
// Simulate peer network metrics
const peerMetrics = {
connectivityScore: 0.9,
symmetryScore: 0.8,
relayProbability: 0.1,
directSuccessRate: 0.95,
avgRttMs: 50,
jitterMs: 10,
packetLossRate: 0.01,
bandwidthMbps: 100,
natType: 'full_cone' as const,
};
const qualityScore = await px.computePeerQualityScore(peerMetrics);
console.log('Peer Quality Score (0-1):', qualityScore.toFixed(3));
// Get default quality weights
const defaultWeights = await px.getDefaultQualityWeights();
console.log('Default Quality Weights:', defaultWeights);
// =========================================================================
// Trust Score Calculation
// =========================================================================
console.log('\n=== Trust Score ===');
const trustMetrics = {
reputationScore: 0.8,
successfulTransactions: 150,
failedTransactions: 3,
ageSeconds: 86400 * 30, // 30 days
vouchCount: 5,
flagCount: 0,
proofOfWork: 0.5,
};
const trustScore = await px.computeTrustScore(trustMetrics);
console.log('Trust Score (0-1):', trustScore.toFixed(3));
// =========================================================================
// Distribution Statistics
// =========================================================================
console.log('\n=== Distribution Statistics ===');
// Simulate some distance measurements
const distances = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.8, 1.2];
const distStats = await px.computeDistributionStats(distances);
console.log('Distance Distribution Stats:', {
count: distStats.count,
min: distStats.min.toFixed(3),
max: distStats.max.toFixed(3),
mean: distStats.mean.toFixed(3),
median: distStats.median.toFixed(3),
stdDev: distStats.stdDev.toFixed(3),
p90: distStats.p90.toFixed(3),
});
// =========================================================================
// Feature Normalization for KD-Tree
// =========================================================================
console.log('\n=== Feature Normalization ===');
// Raw peer features: [latency_ms, hops, geo_km, trust_inv, bw_inv, loss, conn_inv, nat_inv]
const rawFeatures = [100, 5, 500, 0.1, 50, 0.02, 5, 0.1];
// Get default feature ranges
const featureRanges = await px.getDefaultPeerFeatureRanges();
console.log('Feature Labels:', featureRanges.labels);
// Normalize features
const normalizedFeatures = await px.normalizePeerFeatures(rawFeatures);
console.log('Normalized Features:', normalizedFeatures.map((f: number) => f.toFixed(3)));
// Apply custom weights
const customWeights = [1.5, 1.0, 0.5, 1.2, 0.8, 2.0, 1.0, 0.7];
const weightedFeatures = await px.weightedPeerFeatures(normalizedFeatures, customWeights);
console.log('Weighted Features:', weightedFeatures.map((f: number) => f.toFixed(3)));
// =========================================================================
// Analytics Reset
// =========================================================================
console.log('\n=== Analytics Reset ===');
await tree.resetAnalytics();
const resetAnalytics = await tree.getAnalytics();
console.log('After Reset - Query Count:', resetAnalytics.queryCount);
console.log('\n=== Demo Complete ===');
}
run().catch((err) => {

158
kdtree.go
View file

@ -4,6 +4,7 @@ import (
"errors"
"math"
"sort"
"time"
)
var (
@ -218,6 +219,10 @@ type KDTree[T any] struct {
idIndex map[string]int
backend KDBackend
backendData any // opaque handle for backend-specific structures (e.g., gonum tree)
// Analytics tracking (optional, enabled by default)
analytics *TreeAnalytics
peerAnalytics *PeerAnalytics
}
// NewKDTree builds a KDTree from the given points.
@ -259,12 +264,14 @@ func NewKDTree[T any](pts []KDPoint[T], opts ...KDOption) (*KDTree[T], error) {
backend = BackendLinear // tag not enabled → fallback
}
t := &KDTree[T]{
points: append([]KDPoint[T](nil), pts...),
dim: dim,
metric: cfg.metric,
idIndex: idIndex,
backend: backend,
backendData: backendData,
points: append([]KDPoint[T](nil), pts...),
dim: dim,
metric: cfg.metric,
idIndex: idIndex,
backend: backend,
backendData: backendData,
analytics: NewTreeAnalytics(),
peerAnalytics: NewPeerAnalytics(),
}
return t, nil
}
@ -284,12 +291,14 @@ func NewKDTreeFromDim[T any](dim int, opts ...KDOption) (*KDTree[T], error) {
backend = BackendLinear
}
return &KDTree[T]{
points: nil,
dim: dim,
metric: cfg.metric,
idIndex: make(map[string]int),
backend: backend,
backendData: nil,
points: nil,
dim: dim,
metric: cfg.metric,
idIndex: make(map[string]int),
backend: backend,
backendData: nil,
analytics: NewTreeAnalytics(),
peerAnalytics: NewPeerAnalytics(),
}, nil
}
@ -305,12 +314,23 @@ func (t *KDTree[T]) Nearest(query []float64) (KDPoint[T], float64, bool) {
if len(query) != t.dim || t.Len() == 0 {
return KDPoint[T]{}, 0, false
}
start := time.Now()
defer func() {
if t.analytics != nil {
t.analytics.RecordQuery(time.Since(start).Nanoseconds())
}
}()
// Gonum backend (if available and built)
if t.backend == BackendGonum && t.backendData != nil {
if idx, dist, ok := gonumNearest[T](t.backendData, query); ok && idx >= 0 && idx < len(t.points) {
return t.points[idx], dist, true
p := t.points[idx]
if t.peerAnalytics != nil {
t.peerAnalytics.RecordSelection(p.ID, dist)
}
return p, dist, true
}
// fall through to linear scan if backend didnt return a result
// fall through to linear scan if backend didn't return a result
}
bestIdx := -1
bestDist := math.MaxFloat64
@ -324,7 +344,11 @@ func (t *KDTree[T]) Nearest(query []float64) (KDPoint[T], float64, bool) {
if bestIdx < 0 {
return KDPoint[T]{}, 0, false
}
return t.points[bestIdx], bestDist, true
p := t.points[bestIdx]
if t.peerAnalytics != nil {
t.peerAnalytics.RecordSelection(p.ID, bestDist)
}
return p, bestDist, true
}
// KNearest returns up to k nearest neighbors to the query in ascending distance order.
@ -333,6 +357,13 @@ func (t *KDTree[T]) KNearest(query []float64, k int) ([]KDPoint[T], []float64) {
if k <= 0 || len(query) != t.dim || t.Len() == 0 {
return nil, nil
}
start := time.Now()
defer func() {
if t.analytics != nil {
t.analytics.RecordQuery(time.Since(start).Nanoseconds())
}
}()
// Gonum backend path
if t.backend == BackendGonum && t.backendData != nil {
idxs, dists := gonumKNearest[T](t.backendData, query, k)
@ -340,6 +371,9 @@ func (t *KDTree[T]) KNearest(query []float64, k int) ([]KDPoint[T], []float64) {
neighbors := make([]KDPoint[T], len(idxs))
for i := range idxs {
neighbors[i] = t.points[idxs[i]]
if t.peerAnalytics != nil {
t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i])
}
}
return neighbors, dists
}
@ -362,6 +396,9 @@ func (t *KDTree[T]) KNearest(query []float64, k int) ([]KDPoint[T], []float64) {
for i := 0; i < k; i++ {
neighbors[i] = t.points[tmp[i].idx]
dists[i] = tmp[i].dist
if t.peerAnalytics != nil {
t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i])
}
}
return neighbors, dists
}
@ -371,6 +408,13 @@ func (t *KDTree[T]) Radius(query []float64, r float64) ([]KDPoint[T], []float64)
if r < 0 || len(query) != t.dim || t.Len() == 0 {
return nil, nil
}
start := time.Now()
defer func() {
if t.analytics != nil {
t.analytics.RecordQuery(time.Since(start).Nanoseconds())
}
}()
// Gonum backend path
if t.backend == BackendGonum && t.backendData != nil {
idxs, dists := gonumRadius[T](t.backendData, query, r)
@ -378,6 +422,9 @@ func (t *KDTree[T]) Radius(query []float64, r float64) ([]KDPoint[T], []float64)
neighbors := make([]KDPoint[T], len(idxs))
for i := range idxs {
neighbors[i] = t.points[idxs[i]]
if t.peerAnalytics != nil {
t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i])
}
}
return neighbors, dists
}
@ -402,6 +449,9 @@ func (t *KDTree[T]) Radius(query []float64, r float64) ([]KDPoint[T], []float64)
for i := range sel {
neighbors[i] = t.points[sel[i].idx]
dists[i] = sel[i].dist
if t.peerAnalytics != nil {
t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i])
}
}
return neighbors, dists
}
@ -421,10 +471,17 @@ func (t *KDTree[T]) Insert(p KDPoint[T]) bool {
if p.ID != "" {
t.idIndex[p.ID] = len(t.points) - 1
}
// Record insert in analytics
if t.analytics != nil {
t.analytics.RecordInsert()
}
// Rebuild backend if using Gonum
if t.backend == BackendGonum && hasGonum() {
if bd, err := buildGonumBackend(t.points, t.metric); err == nil {
t.backendData = bd
if t.analytics != nil {
t.analytics.RecordRebuild()
}
} else {
// fallback to linear if rebuild fails
t.backend = BackendLinear
@ -451,10 +508,17 @@ func (t *KDTree[T]) DeleteByID(id string) bool {
}
t.points = t.points[:last]
delete(t.idIndex, id)
// Record delete in analytics
if t.analytics != nil {
t.analytics.RecordDelete()
}
// Rebuild backend if using Gonum
if t.backend == BackendGonum && hasGonum() {
if bd, err := buildGonumBackend(t.points, t.metric); err == nil {
t.backendData = bd
if t.analytics != nil {
t.analytics.RecordRebuild()
}
} else {
// fallback to linear if rebuild fails
t.backend = BackendLinear
@ -463,3 +527,67 @@ func (t *KDTree[T]) DeleteByID(id string) bool {
}
return true
}
// Analytics returns the tree analytics tracker.
// Returns nil if analytics tracking is disabled.
func (t *KDTree[T]) Analytics() *TreeAnalytics {
return t.analytics
}
// PeerAnalytics returns the peer analytics tracker.
// Returns nil if peer analytics tracking is disabled.
func (t *KDTree[T]) PeerAnalytics() *PeerAnalytics {
return t.peerAnalytics
}
// GetAnalyticsSnapshot returns a point-in-time snapshot of tree analytics.
func (t *KDTree[T]) GetAnalyticsSnapshot() TreeAnalyticsSnapshot {
if t.analytics == nil {
return TreeAnalyticsSnapshot{}
}
return t.analytics.Snapshot()
}
// GetPeerStats returns per-peer selection statistics.
func (t *KDTree[T]) GetPeerStats() []PeerStats {
if t.peerAnalytics == nil {
return nil
}
return t.peerAnalytics.GetAllPeerStats()
}
// GetTopPeers returns the top N most frequently selected peers.
func (t *KDTree[T]) GetTopPeers(n int) []PeerStats {
if t.peerAnalytics == nil {
return nil
}
return t.peerAnalytics.GetTopPeers(n)
}
// ComputeDistanceDistribution analyzes the distribution of current point coordinates.
func (t *KDTree[T]) ComputeDistanceDistribution(axisNames []string) []AxisDistribution {
return ComputeAxisDistributions(t.points, axisNames)
}
// ResetAnalytics clears all analytics data.
func (t *KDTree[T]) ResetAnalytics() {
if t.analytics != nil {
t.analytics.Reset()
}
if t.peerAnalytics != nil {
t.peerAnalytics.Reset()
}
}
// Points returns a copy of all points in the tree.
// This is useful for analytics and export operations.
func (t *KDTree[T]) Points() []KDPoint[T] {
result := make([]KDPoint[T], len(t.points))
copy(result, t.points)
return result
}
// Backend returns the active backend type.
func (t *KDTree[T]) Backend() KDBackend {
return t.backend
}

700
kdtree_analytics.go Normal file
View file

@ -0,0 +1,700 @@
package poindexter
import (
"math"
"sort"
"sync"
"sync/atomic"
"time"
)
// TreeAnalytics tracks operational statistics for a KDTree.
// All counters are safe for concurrent reads; use the Reset() method for atomic reset.
type TreeAnalytics struct {
QueryCount atomic.Int64 // Total nearest/kNearest/radius queries
InsertCount atomic.Int64 // Total successful inserts
DeleteCount atomic.Int64 // Total successful deletes
// Timing statistics (nanoseconds)
TotalQueryTimeNs atomic.Int64
LastQueryTimeNs atomic.Int64
MinQueryTimeNs atomic.Int64
MaxQueryTimeNs atomic.Int64
LastQueryAt atomic.Int64 // Unix nanoseconds
CreatedAt time.Time
LastRebuiltAt atomic.Int64 // Unix nanoseconds (for gonum backend rebuilds)
BackendRebuildCnt atomic.Int64 // Number of backend rebuilds
}
// NewTreeAnalytics creates a new analytics tracker.
func NewTreeAnalytics() *TreeAnalytics {
a := &TreeAnalytics{
CreatedAt: time.Now(),
}
a.MinQueryTimeNs.Store(math.MaxInt64)
return a
}
// RecordQuery records a query operation with timing.
func (a *TreeAnalytics) RecordQuery(durationNs int64) {
a.QueryCount.Add(1)
a.TotalQueryTimeNs.Add(durationNs)
a.LastQueryTimeNs.Store(durationNs)
a.LastQueryAt.Store(time.Now().UnixNano())
// Update min/max (best-effort, not strictly atomic)
for {
cur := a.MinQueryTimeNs.Load()
if durationNs >= cur || a.MinQueryTimeNs.CompareAndSwap(cur, durationNs) {
break
}
}
for {
cur := a.MaxQueryTimeNs.Load()
if durationNs <= cur || a.MaxQueryTimeNs.CompareAndSwap(cur, durationNs) {
break
}
}
}
// RecordInsert records a successful insert.
func (a *TreeAnalytics) RecordInsert() {
a.InsertCount.Add(1)
}
// RecordDelete records a successful delete.
func (a *TreeAnalytics) RecordDelete() {
a.DeleteCount.Add(1)
}
// RecordRebuild records a backend rebuild.
func (a *TreeAnalytics) RecordRebuild() {
a.BackendRebuildCnt.Add(1)
a.LastRebuiltAt.Store(time.Now().UnixNano())
}
// Snapshot returns a point-in-time view of the analytics.
func (a *TreeAnalytics) Snapshot() TreeAnalyticsSnapshot {
avgNs := int64(0)
qc := a.QueryCount.Load()
if qc > 0 {
avgNs = a.TotalQueryTimeNs.Load() / qc
}
minNs := a.MinQueryTimeNs.Load()
if minNs == math.MaxInt64 {
minNs = 0
}
return TreeAnalyticsSnapshot{
QueryCount: qc,
InsertCount: a.InsertCount.Load(),
DeleteCount: a.DeleteCount.Load(),
AvgQueryTimeNs: avgNs,
MinQueryTimeNs: minNs,
MaxQueryTimeNs: a.MaxQueryTimeNs.Load(),
LastQueryTimeNs: a.LastQueryTimeNs.Load(),
LastQueryAt: time.Unix(0, a.LastQueryAt.Load()),
CreatedAt: a.CreatedAt,
BackendRebuildCnt: a.BackendRebuildCnt.Load(),
LastRebuiltAt: time.Unix(0, a.LastRebuiltAt.Load()),
}
}
// Reset atomically resets all counters.
func (a *TreeAnalytics) Reset() {
a.QueryCount.Store(0)
a.InsertCount.Store(0)
a.DeleteCount.Store(0)
a.TotalQueryTimeNs.Store(0)
a.LastQueryTimeNs.Store(0)
a.MinQueryTimeNs.Store(math.MaxInt64)
a.MaxQueryTimeNs.Store(0)
a.LastQueryAt.Store(0)
a.BackendRebuildCnt.Store(0)
a.LastRebuiltAt.Store(0)
}
// TreeAnalyticsSnapshot is an immutable snapshot for JSON serialization.
type TreeAnalyticsSnapshot struct {
QueryCount int64 `json:"queryCount"`
InsertCount int64 `json:"insertCount"`
DeleteCount int64 `json:"deleteCount"`
AvgQueryTimeNs int64 `json:"avgQueryTimeNs"`
MinQueryTimeNs int64 `json:"minQueryTimeNs"`
MaxQueryTimeNs int64 `json:"maxQueryTimeNs"`
LastQueryTimeNs int64 `json:"lastQueryTimeNs"`
LastQueryAt time.Time `json:"lastQueryAt"`
CreatedAt time.Time `json:"createdAt"`
BackendRebuildCnt int64 `json:"backendRebuildCount"`
LastRebuiltAt time.Time `json:"lastRebuiltAt"`
}
// PeerAnalytics tracks per-peer selection statistics for NAT routing optimization.
type PeerAnalytics struct {
mu sync.RWMutex
// Per-peer hit counters (peer ID -> selection count)
hitCounts map[string]*atomic.Int64
// Per-peer cumulative distance sums for average calculation
distanceSums map[string]*atomic.Uint64 // stored as bits of float64
// Last selection time per peer
lastSelected map[string]*atomic.Int64 // Unix nano
}
// NewPeerAnalytics creates a new peer analytics tracker.
func NewPeerAnalytics() *PeerAnalytics {
return &PeerAnalytics{
hitCounts: make(map[string]*atomic.Int64),
distanceSums: make(map[string]*atomic.Uint64),
lastSelected: make(map[string]*atomic.Int64),
}
}
// RecordSelection records that a peer was selected/returned in a query result.
func (p *PeerAnalytics) RecordSelection(peerID string, distance float64) {
if peerID == "" {
return
}
p.mu.RLock()
hc, hok := p.hitCounts[peerID]
ds, dok := p.distanceSums[peerID]
ls, lok := p.lastSelected[peerID]
p.mu.RUnlock()
if !hok || !dok || !lok {
p.mu.Lock()
if _, ok := p.hitCounts[peerID]; !ok {
p.hitCounts[peerID] = &atomic.Int64{}
}
if _, ok := p.distanceSums[peerID]; !ok {
p.distanceSums[peerID] = &atomic.Uint64{}
}
if _, ok := p.lastSelected[peerID]; !ok {
p.lastSelected[peerID] = &atomic.Int64{}
}
hc = p.hitCounts[peerID]
ds = p.distanceSums[peerID]
ls = p.lastSelected[peerID]
p.mu.Unlock()
}
hc.Add(1)
// Atomic float add via CAS
for {
old := ds.Load()
oldF := math.Float64frombits(old)
newF := oldF + distance
if ds.CompareAndSwap(old, math.Float64bits(newF)) {
break
}
}
ls.Store(time.Now().UnixNano())
}
// GetPeerStats returns statistics for a specific peer.
func (p *PeerAnalytics) GetPeerStats(peerID string) PeerStats {
p.mu.RLock()
defer p.mu.RUnlock()
hc, hok := p.hitCounts[peerID]
ds, dok := p.distanceSums[peerID]
ls, lok := p.lastSelected[peerID]
stats := PeerStats{PeerID: peerID}
if hok {
stats.SelectionCount = hc.Load()
}
if dok && stats.SelectionCount > 0 {
stats.AvgDistance = math.Float64frombits(ds.Load()) / float64(stats.SelectionCount)
}
if lok {
stats.LastSelectedAt = time.Unix(0, ls.Load())
}
return stats
}
// GetAllPeerStats returns statistics for all tracked peers.
func (p *PeerAnalytics) GetAllPeerStats() []PeerStats {
p.mu.RLock()
defer p.mu.RUnlock()
result := make([]PeerStats, 0, len(p.hitCounts))
for id := range p.hitCounts {
stats := PeerStats{PeerID: id}
if hc := p.hitCounts[id]; hc != nil {
stats.SelectionCount = hc.Load()
}
if ds := p.distanceSums[id]; ds != nil && stats.SelectionCount > 0 {
stats.AvgDistance = math.Float64frombits(ds.Load()) / float64(stats.SelectionCount)
}
if ls := p.lastSelected[id]; ls != nil {
stats.LastSelectedAt = time.Unix(0, ls.Load())
}
result = append(result, stats)
}
// Sort by selection count descending
sort.Slice(result, func(i, j int) bool {
return result[i].SelectionCount > result[j].SelectionCount
})
return result
}
// GetTopPeers returns the top N most frequently selected peers.
func (p *PeerAnalytics) GetTopPeers(n int) []PeerStats {
all := p.GetAllPeerStats()
if n > len(all) {
n = len(all)
}
return all[:n]
}
// Reset clears all peer analytics data.
func (p *PeerAnalytics) Reset() {
p.mu.Lock()
defer p.mu.Unlock()
p.hitCounts = make(map[string]*atomic.Int64)
p.distanceSums = make(map[string]*atomic.Uint64)
p.lastSelected = make(map[string]*atomic.Int64)
}
// PeerStats holds statistics for a single peer.
type PeerStats struct {
PeerID string `json:"peerId"`
SelectionCount int64 `json:"selectionCount"`
AvgDistance float64 `json:"avgDistance"`
LastSelectedAt time.Time `json:"lastSelectedAt"`
}
// DistributionStats provides statistical analysis of distances in query results.
type DistributionStats struct {
Count int `json:"count"`
Min float64 `json:"min"`
Max float64 `json:"max"`
Mean float64 `json:"mean"`
Median float64 `json:"median"`
StdDev float64 `json:"stdDev"`
P25 float64 `json:"p25"` // 25th percentile
P75 float64 `json:"p75"` // 75th percentile
P90 float64 `json:"p90"` // 90th percentile
P99 float64 `json:"p99"` // 99th percentile
Variance float64 `json:"variance"`
Skewness float64 `json:"skewness"`
SampleSize int `json:"sampleSize"`
ComputedAt time.Time `json:"computedAt"`
}
// ComputeDistributionStats calculates distribution statistics from a slice of distances.
func ComputeDistributionStats(distances []float64) DistributionStats {
n := len(distances)
if n == 0 {
return DistributionStats{ComputedAt: time.Now()}
}
// Sort for percentile calculations
sorted := make([]float64, n)
copy(sorted, distances)
sort.Float64s(sorted)
// Basic stats
min, max := sorted[0], sorted[n-1]
sum := 0.0
for _, d := range sorted {
sum += d
}
mean := sum / float64(n)
// Variance and standard deviation
sumSqDiff := 0.0
for _, d := range sorted {
diff := d - mean
sumSqDiff += diff * diff
}
variance := sumSqDiff / float64(n)
stdDev := math.Sqrt(variance)
// Skewness
skewness := 0.0
if stdDev > 0 {
sumCubeDiff := 0.0
for _, d := range sorted {
diff := (d - mean) / stdDev
sumCubeDiff += diff * diff * diff
}
skewness = sumCubeDiff / float64(n)
}
return DistributionStats{
Count: n,
Min: min,
Max: max,
Mean: mean,
Median: percentile(sorted, 0.5),
StdDev: stdDev,
P25: percentile(sorted, 0.25),
P75: percentile(sorted, 0.75),
P90: percentile(sorted, 0.90),
P99: percentile(sorted, 0.99),
Variance: variance,
Skewness: skewness,
SampleSize: n,
ComputedAt: time.Now(),
}
}
// percentile returns the p-th percentile from a sorted slice.
func percentile(sorted []float64, p float64) float64 {
if len(sorted) == 0 {
return 0
}
if len(sorted) == 1 {
return sorted[0]
}
idx := p * float64(len(sorted)-1)
lower := int(idx)
upper := lower + 1
if upper >= len(sorted) {
return sorted[len(sorted)-1]
}
frac := idx - float64(lower)
return sorted[lower]*(1-frac) + sorted[upper]*frac
}
// AxisDistribution provides per-axis (feature) distribution analysis.
type AxisDistribution struct {
Axis int `json:"axis"`
Name string `json:"name,omitempty"`
Stats DistributionStats `json:"stats"`
}
// ComputeAxisDistributions analyzes the distribution of values along each axis.
func ComputeAxisDistributions[T any](points []KDPoint[T], axisNames []string) []AxisDistribution {
if len(points) == 0 {
return nil
}
dim := len(points[0].Coords)
result := make([]AxisDistribution, dim)
for axis := 0; axis < dim; axis++ {
values := make([]float64, len(points))
for i, p := range points {
if axis < len(p.Coords) {
values[i] = p.Coords[axis]
}
}
name := ""
if axis < len(axisNames) {
name = axisNames[axis]
}
result[axis] = AxisDistribution{
Axis: axis,
Name: name,
Stats: ComputeDistributionStats(values),
}
}
return result
}
// NATRoutingMetrics provides metrics specifically for NAT traversal routing decisions.
type NATRoutingMetrics struct {
// Connectivity score (0-1): higher means better reachability
ConnectivityScore float64 `json:"connectivityScore"`
// Symmetry score (0-1): higher means more symmetric NAT (easier to traverse)
SymmetryScore float64 `json:"symmetryScore"`
// Relay requirement probability (0-1): likelihood peer needs relay
RelayProbability float64 `json:"relayProbability"`
// Direct connection success rate (historical)
DirectSuccessRate float64 `json:"directSuccessRate"`
// Average RTT in milliseconds
AvgRTTMs float64 `json:"avgRttMs"`
// Jitter (RTT variance) in milliseconds
JitterMs float64 `json:"jitterMs"`
// Packet loss rate (0-1)
PacketLossRate float64 `json:"packetLossRate"`
// Bandwidth estimate in Mbps
BandwidthMbps float64 `json:"bandwidthMbps"`
// NAT type classification
NATType string `json:"natType"`
// Last probe timestamp
LastProbeAt time.Time `json:"lastProbeAt"`
}
// NATTypeClassification enumerates common NAT types for routing decisions.
type NATTypeClassification string
const (
NATTypeOpen NATTypeClassification = "open" // No NAT / Public IP
NATTypeFullCone NATTypeClassification = "full_cone" // Easy to traverse
NATTypeRestrictedCone NATTypeClassification = "restricted_cone" // Moderate difficulty
NATTypePortRestricted NATTypeClassification = "port_restricted" // Harder to traverse
NATTypeSymmetric NATTypeClassification = "symmetric" // Hardest to traverse
NATTypeSymmetricUDP NATTypeClassification = "symmetric_udp" // UDP-only symmetric
NATTypeUnknown NATTypeClassification = "unknown" // Not yet classified
NATTypeBehindCGNAT NATTypeClassification = "cgnat" // Carrier-grade NAT
NATTypeFirewalled NATTypeClassification = "firewalled" // Blocked by firewall
NATTypeRelayRequired NATTypeClassification = "relay_required" // Must use relay
)
// PeerQualityScore computes a composite quality score for peer selection.
// Higher scores indicate better peers for routing.
// Weights can be customized; default weights emphasize latency and reliability.
func PeerQualityScore(metrics NATRoutingMetrics, weights *QualityWeights) float64 {
w := DefaultQualityWeights()
if weights != nil {
w = *weights
}
// Normalize metrics to 0-1 scale (higher is better)
latencyScore := 1.0 - math.Min(metrics.AvgRTTMs/1000.0, 1.0) // <1000ms is acceptable
jitterScore := 1.0 - math.Min(metrics.JitterMs/100.0, 1.0) // <100ms jitter
lossScore := 1.0 - metrics.PacketLossRate // 0 loss is best
bandwidthScore := math.Min(metrics.BandwidthMbps/100.0, 1.0) // 100Mbps is excellent
connectivityScore := metrics.ConnectivityScore // Already 0-1
symmetryScore := metrics.SymmetryScore // Already 0-1
directScore := metrics.DirectSuccessRate // Already 0-1
relayPenalty := 1.0 - metrics.RelayProbability // Prefer non-relay
// NAT type bonus/penalty
natScore := natTypeScore(metrics.NATType)
// Weighted combination
score := (w.Latency*latencyScore +
w.Jitter*jitterScore +
w.PacketLoss*lossScore +
w.Bandwidth*bandwidthScore +
w.Connectivity*connectivityScore +
w.Symmetry*symmetryScore +
w.DirectSuccess*directScore +
w.RelayPenalty*relayPenalty +
w.NATType*natScore) / w.Total()
return math.Max(0, math.Min(1, score))
}
// QualityWeights configures the importance of each metric in peer selection.
type QualityWeights struct {
Latency float64 `json:"latency"`
Jitter float64 `json:"jitter"`
PacketLoss float64 `json:"packetLoss"`
Bandwidth float64 `json:"bandwidth"`
Connectivity float64 `json:"connectivity"`
Symmetry float64 `json:"symmetry"`
DirectSuccess float64 `json:"directSuccess"`
RelayPenalty float64 `json:"relayPenalty"`
NATType float64 `json:"natType"`
}
// Total returns the sum of all weights for normalization.
func (w QualityWeights) Total() float64 {
return w.Latency + w.Jitter + w.PacketLoss + w.Bandwidth +
w.Connectivity + w.Symmetry + w.DirectSuccess + w.RelayPenalty + w.NATType
}
// DefaultQualityWeights returns sensible defaults for peer selection.
func DefaultQualityWeights() QualityWeights {
return QualityWeights{
Latency: 3.0, // Most important
Jitter: 1.5,
PacketLoss: 2.0,
Bandwidth: 1.0,
Connectivity: 2.0,
Symmetry: 1.0,
DirectSuccess: 2.0,
RelayPenalty: 1.5,
NATType: 1.0,
}
}
// natTypeScore returns a 0-1 score based on NAT type (higher is better for routing).
func natTypeScore(natType string) float64 {
switch NATTypeClassification(natType) {
case NATTypeOpen:
return 1.0
case NATTypeFullCone:
return 0.9
case NATTypeRestrictedCone:
return 0.7
case NATTypePortRestricted:
return 0.5
case NATTypeSymmetric:
return 0.3
case NATTypeSymmetricUDP:
return 0.25
case NATTypeBehindCGNAT:
return 0.2
case NATTypeFirewalled:
return 0.1
case NATTypeRelayRequired:
return 0.05
default:
return 0.4 // Unknown gets middle score
}
}
// TrustMetrics tracks trust and reputation for peer selection.
type TrustMetrics struct {
// ReputationScore (0-1): aggregated trust score
ReputationScore float64 `json:"reputationScore"`
// SuccessfulTransactions: count of successful exchanges
SuccessfulTransactions int64 `json:"successfulTransactions"`
// FailedTransactions: count of failed/aborted exchanges
FailedTransactions int64 `json:"failedTransactions"`
// AgeSeconds: how long this peer has been known
AgeSeconds int64 `json:"ageSeconds"`
// LastSuccessAt: last successful interaction
LastSuccessAt time.Time `json:"lastSuccessAt"`
// LastFailureAt: last failed interaction
LastFailureAt time.Time `json:"lastFailureAt"`
// VouchCount: number of other peers vouching for this peer
VouchCount int `json:"vouchCount"`
// FlagCount: number of reports against this peer
FlagCount int `json:"flagCount"`
// ProofOfWork: computational proof of stake/work
ProofOfWork float64 `json:"proofOfWork"`
}
// ComputeTrustScore calculates a composite trust score from trust metrics.
func ComputeTrustScore(t TrustMetrics) float64 {
total := t.SuccessfulTransactions + t.FailedTransactions
if total == 0 {
// New peer with no history: moderate trust with age bonus
ageBonus := math.Min(float64(t.AgeSeconds)/(86400*30), 0.2) // Up to 0.2 for 30 days
return 0.5 + ageBonus
}
// Base score from success rate
successRate := float64(t.SuccessfulTransactions) / float64(total)
// Volume confidence (more transactions = more confident)
volumeConfidence := 1 - 1/(1+float64(total)/10)
// Vouch/flag adjustment
vouchBonus := math.Min(float64(t.VouchCount)*0.02, 0.15)
flagPenalty := math.Min(float64(t.FlagCount)*0.05, 0.3)
// Recency bonus (recent success = better)
recencyBonus := 0.0
if !t.LastSuccessAt.IsZero() {
hoursSince := time.Since(t.LastSuccessAt).Hours()
recencyBonus = 0.1 * math.Exp(-hoursSince/168) // Decays over ~1 week
}
// Proof of work bonus
powBonus := math.Min(t.ProofOfWork*0.1, 0.1)
score := successRate*volumeConfidence + vouchBonus - flagPenalty + recencyBonus + powBonus
return math.Max(0, math.Min(1, score))
}
// NetworkHealthSummary aggregates overall network health metrics.
type NetworkHealthSummary struct {
TotalPeers int `json:"totalPeers"`
ActivePeers int `json:"activePeers"` // Peers queried recently
HealthyPeers int `json:"healthyPeers"` // Peers with good metrics
DegradedPeers int `json:"degradedPeers"` // Peers with some issues
UnhealthyPeers int `json:"unhealthyPeers"` // Peers with poor metrics
AvgLatencyMs float64 `json:"avgLatencyMs"`
MedianLatencyMs float64 `json:"medianLatencyMs"`
AvgTrustScore float64 `json:"avgTrustScore"`
AvgQualityScore float64 `json:"avgQualityScore"`
DirectConnectRate float64 `json:"directConnectRate"` // % of peers directly reachable
RelayDependency float64 `json:"relayDependency"` // % of peers needing relay
ComputedAt time.Time `json:"computedAt"`
}
// FeatureVector represents a normalized feature vector for a peer.
// This is the core structure for KD-Tree based peer selection.
type FeatureVector struct {
PeerID string `json:"peerId"`
Features []float64 `json:"features"`
Labels []string `json:"labels,omitempty"` // Optional feature names
}
// StandardPeerFeatures defines the standard feature set for peer selection.
// These map to dimensions in the KD-Tree.
type StandardPeerFeatures struct {
LatencyMs float64 `json:"latencyMs"` // Lower is better
HopCount int `json:"hopCount"` // Lower is better
GeoDistanceKm float64 `json:"geoDistanceKm"` // Lower is better
TrustScore float64 `json:"trustScore"` // Higher is better (invert)
BandwidthMbps float64 `json:"bandwidthMbps"` // Higher is better (invert)
PacketLossRate float64 `json:"packetLossRate"` // Lower is better
ConnectivityPct float64 `json:"connectivityPct"` // Higher is better (invert)
NATScore float64 `json:"natScore"` // Higher is better (invert)
}
// ToFeatureSlice converts structured features to a slice for KD-Tree operations.
// Inversion is handled so that lower distance = better peer.
func (f StandardPeerFeatures) ToFeatureSlice() []float64 {
return []float64{
f.LatencyMs,
float64(f.HopCount),
f.GeoDistanceKm,
1 - f.TrustScore, // Invert: higher trust = lower value
100 - f.BandwidthMbps, // Invert: higher bandwidth = lower value (capped at 100)
f.PacketLossRate,
100 - f.ConnectivityPct, // Invert: higher connectivity = lower value
1 - f.NATScore, // Invert: higher NAT score = lower value
}
}
// StandardFeatureLabels returns the labels for standard peer features.
func StandardFeatureLabels() []string {
return []string{
"latency_ms",
"hop_count",
"geo_distance_km",
"trust_score_inv",
"bandwidth_inv",
"packet_loss_rate",
"connectivity_inv",
"nat_score_inv",
}
}
// FeatureRanges defines min/max ranges for feature normalization.
type FeatureRanges struct {
Ranges []AxisStats `json:"ranges"`
}
// DefaultPeerFeatureRanges returns sensible default ranges for peer features.
func DefaultPeerFeatureRanges() FeatureRanges {
return FeatureRanges{
Ranges: []AxisStats{
{Min: 0, Max: 1000}, // Latency: 0-1000ms
{Min: 0, Max: 20}, // Hops: 0-20
{Min: 0, Max: 20000}, // Geo distance: 0-20000km (half Earth circumference)
{Min: 0, Max: 1}, // Trust score (inverted): 0-1
{Min: 0, Max: 100}, // Bandwidth (inverted): 0-100Mbps
{Min: 0, Max: 1}, // Packet loss: 0-100%
{Min: 0, Max: 100}, // Connectivity (inverted): 0-100%
{Min: 0, Max: 1}, // NAT score (inverted): 0-1
},
}
}
// NormalizePeerFeatures normalizes peer features to [0,1] using provided ranges.
func NormalizePeerFeatures(features []float64, ranges FeatureRanges) []float64 {
result := make([]float64, len(features))
for i, v := range features {
if i < len(ranges.Ranges) {
result[i] = scale01(v, ranges.Ranges[i].Min, ranges.Ranges[i].Max)
} else {
result[i] = v
}
}
return result
}
// WeightedPeerFeatures applies per-feature weights after normalization.
func WeightedPeerFeatures(normalized []float64, weights []float64) []float64 {
result := make([]float64, len(normalized))
for i, v := range normalized {
w := 1.0
if i < len(weights) {
w = weights[i]
}
result[i] = v * w
}
return result
}

662
kdtree_analytics_test.go Normal file
View file

@ -0,0 +1,662 @@
package poindexter
import (
"math"
"testing"
"time"
)
// ============================================================================
// TreeAnalytics Tests
// ============================================================================
func TestNewTreeAnalytics(t *testing.T) {
a := NewTreeAnalytics()
if a == nil {
t.Fatal("NewTreeAnalytics returned nil")
}
if a.QueryCount.Load() != 0 {
t.Errorf("expected QueryCount=0, got %d", a.QueryCount.Load())
}
if a.InsertCount.Load() != 0 {
t.Errorf("expected InsertCount=0, got %d", a.InsertCount.Load())
}
if a.CreatedAt.IsZero() {
t.Error("CreatedAt should not be zero")
}
}
func TestTreeAnalyticsRecordQuery(t *testing.T) {
a := NewTreeAnalytics()
a.RecordQuery(1000) // 1μs
a.RecordQuery(2000) // 2μs
a.RecordQuery(500) // 0.5μs
if a.QueryCount.Load() != 3 {
t.Errorf("expected QueryCount=3, got %d", a.QueryCount.Load())
}
if a.TotalQueryTimeNs.Load() != 3500 {
t.Errorf("expected TotalQueryTimeNs=3500, got %d", a.TotalQueryTimeNs.Load())
}
if a.MinQueryTimeNs.Load() != 500 {
t.Errorf("expected MinQueryTimeNs=500, got %d", a.MinQueryTimeNs.Load())
}
if a.MaxQueryTimeNs.Load() != 2000 {
t.Errorf("expected MaxQueryTimeNs=2000, got %d", a.MaxQueryTimeNs.Load())
}
if a.LastQueryTimeNs.Load() != 500 {
t.Errorf("expected LastQueryTimeNs=500, got %d", a.LastQueryTimeNs.Load())
}
}
func TestTreeAnalyticsSnapshot(t *testing.T) {
a := NewTreeAnalytics()
a.RecordQuery(1000)
a.RecordQuery(3000)
a.RecordInsert()
a.RecordInsert()
a.RecordDelete()
a.RecordRebuild()
snap := a.Snapshot()
if snap.QueryCount != 2 {
t.Errorf("expected QueryCount=2, got %d", snap.QueryCount)
}
if snap.InsertCount != 2 {
t.Errorf("expected InsertCount=2, got %d", snap.InsertCount)
}
if snap.DeleteCount != 1 {
t.Errorf("expected DeleteCount=1, got %d", snap.DeleteCount)
}
if snap.AvgQueryTimeNs != 2000 {
t.Errorf("expected AvgQueryTimeNs=2000, got %d", snap.AvgQueryTimeNs)
}
if snap.MinQueryTimeNs != 1000 {
t.Errorf("expected MinQueryTimeNs=1000, got %d", snap.MinQueryTimeNs)
}
if snap.MaxQueryTimeNs != 3000 {
t.Errorf("expected MaxQueryTimeNs=3000, got %d", snap.MaxQueryTimeNs)
}
if snap.BackendRebuildCnt != 1 {
t.Errorf("expected BackendRebuildCnt=1, got %d", snap.BackendRebuildCnt)
}
}
func TestTreeAnalyticsReset(t *testing.T) {
a := NewTreeAnalytics()
a.RecordQuery(1000)
a.RecordInsert()
a.RecordDelete()
a.Reset()
if a.QueryCount.Load() != 0 {
t.Errorf("expected QueryCount=0 after reset, got %d", a.QueryCount.Load())
}
if a.InsertCount.Load() != 0 {
t.Errorf("expected InsertCount=0 after reset, got %d", a.InsertCount.Load())
}
if a.DeleteCount.Load() != 0 {
t.Errorf("expected DeleteCount=0 after reset, got %d", a.DeleteCount.Load())
}
}
// ============================================================================
// PeerAnalytics Tests
// ============================================================================
func TestNewPeerAnalytics(t *testing.T) {
p := NewPeerAnalytics()
if p == nil {
t.Fatal("NewPeerAnalytics returned nil")
}
}
func TestPeerAnalyticsRecordSelection(t *testing.T) {
p := NewPeerAnalytics()
p.RecordSelection("peer1", 0.5)
p.RecordSelection("peer1", 0.3)
p.RecordSelection("peer2", 1.0)
stats := p.GetPeerStats("peer1")
if stats.SelectionCount != 2 {
t.Errorf("expected peer1 SelectionCount=2, got %d", stats.SelectionCount)
}
if math.Abs(stats.AvgDistance-0.4) > 0.001 {
t.Errorf("expected peer1 AvgDistance~0.4, got %f", stats.AvgDistance)
}
stats2 := p.GetPeerStats("peer2")
if stats2.SelectionCount != 1 {
t.Errorf("expected peer2 SelectionCount=1, got %d", stats2.SelectionCount)
}
}
func TestPeerAnalyticsGetAllPeerStats(t *testing.T) {
p := NewPeerAnalytics()
p.RecordSelection("peer1", 0.5)
p.RecordSelection("peer1", 0.5)
p.RecordSelection("peer2", 1.0)
p.RecordSelection("peer3", 0.8)
p.RecordSelection("peer3", 0.8)
p.RecordSelection("peer3", 0.8)
all := p.GetAllPeerStats()
if len(all) != 3 {
t.Errorf("expected 3 peers, got %d", len(all))
}
// Should be sorted by selection count descending
if all[0].PeerID != "peer3" || all[0].SelectionCount != 3 {
t.Errorf("expected first peer to be peer3 with count=3, got %s with count=%d",
all[0].PeerID, all[0].SelectionCount)
}
}
func TestPeerAnalyticsGetTopPeers(t *testing.T) {
p := NewPeerAnalytics()
for i := 0; i < 5; i++ {
p.RecordSelection("peer1", 0.5)
}
for i := 0; i < 3; i++ {
p.RecordSelection("peer2", 0.3)
}
p.RecordSelection("peer3", 0.1)
top := p.GetTopPeers(2)
if len(top) != 2 {
t.Errorf("expected 2 top peers, got %d", len(top))
}
if top[0].PeerID != "peer1" {
t.Errorf("expected top peer to be peer1, got %s", top[0].PeerID)
}
if top[1].PeerID != "peer2" {
t.Errorf("expected second peer to be peer2, got %s", top[1].PeerID)
}
}
func TestPeerAnalyticsReset(t *testing.T) {
p := NewPeerAnalytics()
p.RecordSelection("peer1", 0.5)
p.Reset()
stats := p.GetAllPeerStats()
if len(stats) != 0 {
t.Errorf("expected 0 peers after reset, got %d", len(stats))
}
}
// ============================================================================
// DistributionStats Tests
// ============================================================================
func TestComputeDistributionStatsEmpty(t *testing.T) {
stats := ComputeDistributionStats(nil)
if stats.Count != 0 {
t.Errorf("expected Count=0 for empty input, got %d", stats.Count)
}
}
func TestComputeDistributionStatsSingle(t *testing.T) {
stats := ComputeDistributionStats([]float64{5.0})
if stats.Count != 1 {
t.Errorf("expected Count=1, got %d", stats.Count)
}
if stats.Min != 5.0 || stats.Max != 5.0 {
t.Errorf("expected Min=Max=5.0, got Min=%f, Max=%f", stats.Min, stats.Max)
}
if stats.Mean != 5.0 {
t.Errorf("expected Mean=5.0, got %f", stats.Mean)
}
if stats.Median != 5.0 {
t.Errorf("expected Median=5.0, got %f", stats.Median)
}
}
func TestComputeDistributionStatsMultiple(t *testing.T) {
// Values: 1, 2, 3, 4, 5 - mean=3, median=3
stats := ComputeDistributionStats([]float64{1, 2, 3, 4, 5})
if stats.Count != 5 {
t.Errorf("expected Count=5, got %d", stats.Count)
}
if stats.Min != 1.0 {
t.Errorf("expected Min=1.0, got %f", stats.Min)
}
if stats.Max != 5.0 {
t.Errorf("expected Max=5.0, got %f", stats.Max)
}
if stats.Mean != 3.0 {
t.Errorf("expected Mean=3.0, got %f", stats.Mean)
}
if stats.Median != 3.0 {
t.Errorf("expected Median=3.0, got %f", stats.Median)
}
// Variance = 2.0 for this dataset
if math.Abs(stats.Variance-2.0) > 0.001 {
t.Errorf("expected Variance~2.0, got %f", stats.Variance)
}
}
func TestComputeDistributionStatsPercentiles(t *testing.T) {
// 100 values from 0 to 99
values := make([]float64, 100)
for i := 0; i < 100; i++ {
values[i] = float64(i)
}
stats := ComputeDistributionStats(values)
// P25 should be around 24.75, P75 around 74.25
if math.Abs(stats.P25-24.75) > 0.1 {
t.Errorf("expected P25~24.75, got %f", stats.P25)
}
if math.Abs(stats.P75-74.25) > 0.1 {
t.Errorf("expected P75~74.25, got %f", stats.P75)
}
if math.Abs(stats.P90-89.1) > 0.1 {
t.Errorf("expected P90~89.1, got %f", stats.P90)
}
}
// ============================================================================
// AxisDistribution Tests
// ============================================================================
func TestComputeAxisDistributions(t *testing.T) {
points := []KDPoint[string]{
{ID: "a", Coords: []float64{1.0, 10.0}},
{ID: "b", Coords: []float64{2.0, 20.0}},
{ID: "c", Coords: []float64{3.0, 30.0}},
}
dists := ComputeAxisDistributions(points, []string{"x", "y"})
if len(dists) != 2 {
t.Errorf("expected 2 axis distributions, got %d", len(dists))
}
if dists[0].Axis != 0 || dists[0].Name != "x" {
t.Errorf("expected first axis=0, name=x, got axis=%d, name=%s", dists[0].Axis, dists[0].Name)
}
if dists[0].Stats.Mean != 2.0 {
t.Errorf("expected axis 0 mean=2.0, got %f", dists[0].Stats.Mean)
}
if dists[1].Axis != 1 || dists[1].Name != "y" {
t.Errorf("expected second axis=1, name=y, got axis=%d, name=%s", dists[1].Axis, dists[1].Name)
}
if dists[1].Stats.Mean != 20.0 {
t.Errorf("expected axis 1 mean=20.0, got %f", dists[1].Stats.Mean)
}
}
// ============================================================================
// NAT Routing Tests
// ============================================================================
func TestPeerQualityScoreDefaults(t *testing.T) {
// Perfect peer
perfect := NATRoutingMetrics{
ConnectivityScore: 1.0,
SymmetryScore: 1.0,
RelayProbability: 0.0,
DirectSuccessRate: 1.0,
AvgRTTMs: 10,
JitterMs: 5,
PacketLossRate: 0.0,
BandwidthMbps: 100,
NATType: string(NATTypeOpen),
}
score := PeerQualityScore(perfect, nil)
if score < 0.9 {
t.Errorf("expected perfect peer score > 0.9, got %f", score)
}
// Poor peer
poor := NATRoutingMetrics{
ConnectivityScore: 0.2,
SymmetryScore: 0.1,
RelayProbability: 0.9,
DirectSuccessRate: 0.1,
AvgRTTMs: 500,
JitterMs: 100,
PacketLossRate: 0.5,
BandwidthMbps: 1,
NATType: string(NATTypeSymmetric),
}
poorScore := PeerQualityScore(poor, nil)
if poorScore > 0.5 {
t.Errorf("expected poor peer score < 0.5, got %f", poorScore)
}
if poorScore >= score {
t.Error("poor peer should have lower score than perfect peer")
}
}
func TestPeerQualityScoreCustomWeights(t *testing.T) {
metrics := NATRoutingMetrics{
ConnectivityScore: 1.0,
SymmetryScore: 0.5,
RelayProbability: 0.0,
DirectSuccessRate: 1.0,
AvgRTTMs: 100,
JitterMs: 10,
PacketLossRate: 0.01,
BandwidthMbps: 50,
NATType: string(NATTypeFullCone),
}
// Weight latency heavily
latencyWeights := QualityWeights{
Latency: 10.0,
Jitter: 1.0,
PacketLoss: 1.0,
Bandwidth: 1.0,
Connectivity: 1.0,
Symmetry: 1.0,
DirectSuccess: 1.0,
RelayPenalty: 1.0,
NATType: 1.0,
}
scoreLatency := PeerQualityScore(metrics, &latencyWeights)
// Weight bandwidth heavily
bandwidthWeights := QualityWeights{
Latency: 1.0,
Jitter: 1.0,
PacketLoss: 1.0,
Bandwidth: 10.0,
Connectivity: 1.0,
Symmetry: 1.0,
DirectSuccess: 1.0,
RelayPenalty: 1.0,
NATType: 1.0,
}
scoreBandwidth := PeerQualityScore(metrics, &bandwidthWeights)
// Scores should differ based on weights
if scoreLatency == scoreBandwidth {
t.Error("different weights should produce different scores")
}
}
func TestDefaultQualityWeights(t *testing.T) {
w := DefaultQualityWeights()
if w.Latency <= 0 {
t.Error("Latency weight should be positive")
}
if w.Total() <= 0 {
t.Error("Total weights should be positive")
}
}
func TestNatTypeScore(t *testing.T) {
tests := []struct {
natType string
minScore float64
maxScore float64
}{
{string(NATTypeOpen), 0.9, 1.0},
{string(NATTypeFullCone), 0.8, 1.0},
{string(NATTypeSymmetric), 0.2, 0.4},
{string(NATTypeRelayRequired), 0.0, 0.1},
{"unknown", 0.3, 0.5},
}
for _, tc := range tests {
score := natTypeScore(tc.natType)
if score < tc.minScore || score > tc.maxScore {
t.Errorf("natType %s: expected score in [%f, %f], got %f",
tc.natType, tc.minScore, tc.maxScore, score)
}
}
}
// ============================================================================
// Trust Score Tests
// ============================================================================
func TestComputeTrustScoreNewPeer(t *testing.T) {
// New peer with no history
metrics := TrustMetrics{
SuccessfulTransactions: 0,
FailedTransactions: 0,
AgeSeconds: 86400, // 1 day old
}
score := ComputeTrustScore(metrics)
// New peer should get moderate trust
if score < 0.4 || score > 0.7 {
t.Errorf("expected new peer score in [0.4, 0.7], got %f", score)
}
}
func TestComputeTrustScoreGoodPeer(t *testing.T) {
metrics := TrustMetrics{
SuccessfulTransactions: 100,
FailedTransactions: 2,
AgeSeconds: 86400 * 30, // 30 days
VouchCount: 5,
FlagCount: 0,
LastSuccessAt: time.Now(),
}
score := ComputeTrustScore(metrics)
if score < 0.8 {
t.Errorf("expected good peer score > 0.8, got %f", score)
}
}
func TestComputeTrustScoreBadPeer(t *testing.T) {
metrics := TrustMetrics{
SuccessfulTransactions: 5,
FailedTransactions: 20,
AgeSeconds: 86400,
VouchCount: 0,
FlagCount: 10,
}
score := ComputeTrustScore(metrics)
if score > 0.3 {
t.Errorf("expected bad peer score < 0.3, got %f", score)
}
}
// ============================================================================
// Feature Normalization Tests
// ============================================================================
func TestStandardPeerFeaturesToSlice(t *testing.T) {
features := StandardPeerFeatures{
LatencyMs: 100,
HopCount: 5,
GeoDistanceKm: 1000,
TrustScore: 0.9,
BandwidthMbps: 50,
PacketLossRate: 0.01,
ConnectivityPct: 95,
NATScore: 0.8,
}
slice := features.ToFeatureSlice()
if len(slice) != 8 {
t.Errorf("expected 8 features, got %d", len(slice))
}
// TrustScore should be inverted (0.9 -> 0.1)
if math.Abs(slice[3]-0.1) > 0.001 {
t.Errorf("expected inverted trust score ~0.1, got %f", slice[3])
}
}
func TestNormalizePeerFeatures(t *testing.T) {
features := []float64{100, 5, 1000, 0.5, 50, 0.01, 50, 0.5}
ranges := DefaultPeerFeatureRanges()
normalized := NormalizePeerFeatures(features, ranges)
for i, v := range normalized {
if v < 0 || v > 1 {
t.Errorf("normalized feature %d out of range [0,1]: %f", i, v)
}
}
}
func TestWeightedPeerFeatures(t *testing.T) {
normalized := []float64{0.5, 0.5, 0.5, 0.5}
weights := []float64{1.0, 2.0, 0.5, 1.5}
weighted := WeightedPeerFeatures(normalized, weights)
expected := []float64{0.5, 1.0, 0.25, 0.75}
for i, v := range weighted {
if math.Abs(v-expected[i]) > 0.001 {
t.Errorf("weighted feature %d: expected %f, got %f", i, expected[i], v)
}
}
}
func TestStandardFeatureLabels(t *testing.T) {
labels := StandardFeatureLabels()
if len(labels) != 8 {
t.Errorf("expected 8 feature labels, got %d", len(labels))
}
}
// ============================================================================
// KDTree Analytics Integration Tests
// ============================================================================
func TestKDTreeAnalyticsIntegration(t *testing.T) {
points := []KDPoint[string]{
{ID: "a", Coords: []float64{0, 0}, Value: "A"},
{ID: "b", Coords: []float64{1, 1}, Value: "B"},
{ID: "c", Coords: []float64{2, 2}, Value: "C"},
}
tree, err := NewKDTree(points)
if err != nil {
t.Fatal(err)
}
// Check initial analytics
if tree.Analytics() == nil {
t.Fatal("Analytics should not be nil")
}
if tree.PeerAnalytics() == nil {
t.Fatal("PeerAnalytics should not be nil")
}
// Perform queries
tree.Nearest([]float64{0.1, 0.1})
tree.Nearest([]float64{0.9, 0.9})
tree.KNearest([]float64{0.5, 0.5}, 2)
snap := tree.GetAnalyticsSnapshot()
if snap.QueryCount != 3 {
t.Errorf("expected QueryCount=3, got %d", snap.QueryCount)
}
if snap.InsertCount != 0 {
t.Errorf("expected InsertCount=0, got %d", snap.InsertCount)
}
// Check peer stats
peerStats := tree.GetPeerStats()
if len(peerStats) == 0 {
t.Error("expected some peer stats after queries")
}
// Peer 'a' should have been selected for query [0.1, 0.1]
var foundA bool
for _, ps := range peerStats {
if ps.PeerID == "a" && ps.SelectionCount > 0 {
foundA = true
break
}
}
if !foundA {
t.Error("expected peer 'a' to be recorded in analytics")
}
// Test top peers
topPeers := tree.GetTopPeers(1)
if len(topPeers) != 1 {
t.Errorf("expected 1 top peer, got %d", len(topPeers))
}
// Test insert analytics
tree.Insert(KDPoint[string]{ID: "d", Coords: []float64{3, 3}, Value: "D"})
snap = tree.GetAnalyticsSnapshot()
if snap.InsertCount != 1 {
t.Errorf("expected InsertCount=1, got %d", snap.InsertCount)
}
// Test delete analytics
tree.DeleteByID("d")
snap = tree.GetAnalyticsSnapshot()
if snap.DeleteCount != 1 {
t.Errorf("expected DeleteCount=1, got %d", snap.DeleteCount)
}
// Test reset
tree.ResetAnalytics()
snap = tree.GetAnalyticsSnapshot()
if snap.QueryCount != 0 || snap.InsertCount != 0 || snap.DeleteCount != 0 {
t.Error("expected all counts to be 0 after reset")
}
}
func TestKDTreeDistanceDistribution(t *testing.T) {
points := []KDPoint[string]{
{ID: "a", Coords: []float64{0, 10}, Value: "A"},
{ID: "b", Coords: []float64{1, 20}, Value: "B"},
{ID: "c", Coords: []float64{2, 30}, Value: "C"},
}
tree, _ := NewKDTree(points)
dists := tree.ComputeDistanceDistribution([]string{"x", "y"})
if len(dists) != 2 {
t.Errorf("expected 2 axis distributions, got %d", len(dists))
}
if dists[0].Name != "x" || dists[0].Stats.Mean != 1.0 {
t.Errorf("unexpected axis 0 distribution: name=%s, mean=%f",
dists[0].Name, dists[0].Stats.Mean)
}
if dists[1].Name != "y" || dists[1].Stats.Mean != 20.0 {
t.Errorf("unexpected axis 1 distribution: name=%s, mean=%f",
dists[1].Name, dists[1].Stats.Mean)
}
}
func TestKDTreePointsExport(t *testing.T) {
points := []KDPoint[string]{
{ID: "a", Coords: []float64{0, 0}, Value: "A"},
{ID: "b", Coords: []float64{1, 1}, Value: "B"},
}
tree, _ := NewKDTree(points)
exported := tree.Points()
if len(exported) != 2 {
t.Errorf("expected 2 points, got %d", len(exported))
}
// Verify it's a copy, not a reference
exported[0].ID = "modified"
original := tree.Points()
if original[0].ID == "modified" {
t.Error("Points() should return a copy, not a reference")
}
}
func TestKDTreeBackend(t *testing.T) {
tree, _ := NewKDTreeFromDim[string](2)
backend := tree.Backend()
if backend != BackendLinear && backend != BackendGonum {
t.Errorf("unexpected backend: %s", backend)
}
}

View file

@ -15,7 +15,153 @@ export interface KNearestResult {
dists: number[];
}
// ============================================================================
// Analytics Types
// ============================================================================
/** Tree operation analytics snapshot */
export interface TreeAnalytics {
queryCount: number;
insertCount: number;
deleteCount: number;
avgQueryTimeNs: number;
minQueryTimeNs: number;
maxQueryTimeNs: number;
lastQueryTimeNs: number;
lastQueryAt: number; // Unix milliseconds
createdAt: number; // Unix milliseconds
backendRebuildCount: number;
lastRebuiltAt: number; // Unix milliseconds
}
/** Per-peer selection statistics */
export interface PeerStats {
peerId: string;
selectionCount: number;
avgDistance: number;
lastSelectedAt: number; // Unix milliseconds
}
/** Statistical distribution analysis */
export interface DistributionStats {
count: number;
min: number;
max: number;
mean: number;
median: number;
stdDev: number;
p25: number;
p75: number;
p90: number;
p99: number;
variance: number;
skewness: number;
sampleSize?: number;
computedAt?: number; // Unix milliseconds
}
/** Per-axis distribution in the KD-Tree */
export interface AxisDistribution {
axis: number;
name: string;
stats: DistributionStats;
}
// ============================================================================
// NAT Routing Types
// ============================================================================
/** NAT type classification for routing decisions */
export type NATTypeClassification =
| 'open'
| 'full_cone'
| 'restricted_cone'
| 'port_restricted'
| 'symmetric'
| 'symmetric_udp'
| 'cgnat'
| 'firewalled'
| 'relay_required'
| 'unknown';
/** Network metrics for NAT routing decisions */
export interface NATRoutingMetrics {
connectivityScore: number; // 0-1: higher = better reachability
symmetryScore: number; // 0-1: higher = more symmetric NAT
relayProbability: number; // 0-1: likelihood peer needs relay
directSuccessRate: number; // 0-1: historical direct connection success
avgRttMs: number; // Average RTT in milliseconds
jitterMs: number; // RTT variance in milliseconds
packetLossRate: number; // 0-1: packet loss rate
bandwidthMbps: number; // Bandwidth estimate in Mbps
natType: NATTypeClassification;
lastProbeAt?: number; // Unix milliseconds
}
/** Weights for peer quality scoring */
export interface QualityWeights {
latency: number;
jitter: number;
packetLoss: number;
bandwidth: number;
connectivity: number;
symmetry: number;
directSuccess: number;
relayPenalty: number;
natType: number;
}
/** Trust metrics for peer reputation */
export interface TrustMetrics {
reputationScore: number; // 0-1: aggregated trust score
successfulTransactions: number;
failedTransactions: number;
ageSeconds: number; // How long this peer has been known
lastSuccessAt?: number; // Unix milliseconds
lastFailureAt?: number; // Unix milliseconds
vouchCount: number; // Peers vouching for this peer
flagCount: number; // Reports against this peer
proofOfWork: number; // Computational proof of stake/work
}
/** Axis min/max range for normalization */
export interface AxisRange {
min: number;
max: number;
}
/** Feature ranges for peer feature normalization */
export interface FeatureRanges {
ranges: AxisRange[];
labels?: string[];
}
/** Standard peer features for KD-Tree based selection */
export interface StandardPeerFeatures {
latencyMs: number;
hopCount: number;
geoDistanceKm: number;
trustScore: number;
bandwidthMbps: number;
packetLossRate: number;
connectivityPct: number;
natScore: number;
}
/** Export data with all points */
export interface TreeExport {
dim: number;
len: number;
backend: string;
points: PxPoint[];
}
// ============================================================================
// Tree Interface
// ============================================================================
export interface PxTree {
// Core operations
len(): Promise<number>;
dim(): Promise<number>;
insert(point: PxPoint): Promise<boolean>;
@ -24,18 +170,377 @@ export interface PxTree {
kNearest(query: number[], k: number): Promise<KNearestResult>;
radius(query: number[], r: number): Promise<KNearestResult>;
exportJSON(): Promise<string>;
// Analytics operations
getAnalytics(): Promise<TreeAnalytics>;
getPeerStats(): Promise<PeerStats[]>;
getTopPeers(n: number): Promise<PeerStats[]>;
getAxisDistributions(axisNames?: string[]): Promise<AxisDistribution[]>;
resetAnalytics(): Promise<boolean>;
}
// ============================================================================
// Init Options
// ============================================================================
export interface InitOptions {
wasmURL?: string;
wasmExecURL?: string;
instantiateWasm?: (source: ArrayBuffer, importObject: WebAssembly.Imports) => Promise<WebAssembly.Instance> | WebAssembly.Instance;
}
// ============================================================================
// DNS Tools Types
// ============================================================================
/** DNS record types - standard and extended (ClouDNS compatible) */
export type DNSRecordType =
// Standard record types
| 'A'
| 'AAAA'
| 'MX'
| 'TXT'
| 'NS'
| 'CNAME'
| 'SOA'
| 'PTR'
| 'SRV'
| 'CAA'
// Additional record types (ClouDNS and others)
| 'ALIAS' // Virtual A record - CNAME-like for apex domain
| 'RP' // Responsible Person
| 'SSHFP' // SSH Fingerprint
| 'TLSA' // DANE TLS Authentication
| 'DS' // DNSSEC Delegation Signer
| 'DNSKEY' // DNSSEC Key
| 'NAPTR' // Naming Authority Pointer
| 'LOC' // Geographic Location
| 'HINFO' // Host Information
| 'CERT' // Certificate record
| 'SMIMEA' // S/MIME Certificate Association
| 'WR' // Web Redirect (ClouDNS specific)
| 'SPF'; // Sender Policy Framework (legacy)
/** DNS record type metadata */
export interface DNSRecordTypeInfo {
type: DNSRecordType;
name: string;
description: string;
rfc?: string;
common: boolean;
}
/** CAA record */
export interface CAARecord {
flag: number;
tag: string; // "issue", "issuewild", "iodef"
value: string;
}
/** SSHFP record */
export interface SSHFPRecord {
algorithm: number; // 1=RSA, 2=DSA, 3=ECDSA, 4=Ed25519
fpType: number; // 1=SHA-1, 2=SHA-256
fingerprint: string;
}
/** TLSA (DANE) record */
export interface TLSARecord {
usage: number; // 0-3: CA constraint, Service cert, Trust anchor, Domain-issued
selector: number; // 0=Full cert, 1=SubjectPublicKeyInfo
matchingType: number; // 0=Exact, 1=SHA-256, 2=SHA-512
certData: string;
}
/** DS (DNSSEC Delegation Signer) record */
export interface DSRecord {
keyTag: number;
algorithm: number;
digestType: number;
digest: string;
}
/** DNSKEY record */
export interface DNSKEYRecord {
flags: number;
protocol: number;
algorithm: number;
publicKey: string;
}
/** NAPTR record */
export interface NAPTRRecord {
order: number;
preference: number;
flags: string;
service: string;
regexp: string;
replacement: string;
}
/** RP (Responsible Person) record */
export interface RPRecord {
mailbox: string; // Email as DNS name (user.domain.com)
txtDom: string; // Domain with TXT record containing more info
}
/** LOC (Location) record */
export interface LOCRecord {
latitude: number;
longitude: number;
altitude: number;
size: number;
hPrecision: number;
vPrecision: number;
}
/** ALIAS record (provider-specific) */
export interface ALIASRecord {
target: string;
}
/** Web Redirect record (ClouDNS specific) */
export interface WebRedirectRecord {
url: string;
redirectType: number; // 301, 302, etc.
frame: boolean; // Frame redirect vs HTTP redirect
}
/** External tool links for domain/IP/email analysis */
export interface ExternalToolLinks {
target: string;
type: 'domain' | 'ip' | 'email';
// MXToolbox links
mxtoolboxDns?: string;
mxtoolboxMx?: string;
mxtoolboxBlacklist?: string;
mxtoolboxSmtp?: string;
mxtoolboxSpf?: string;
mxtoolboxDmarc?: string;
mxtoolboxDkim?: string;
mxtoolboxHttp?: string;
mxtoolboxHttps?: string;
mxtoolboxPing?: string;
mxtoolboxTrace?: string;
mxtoolboxWhois?: string;
mxtoolboxAsn?: string;
// DNSChecker links
dnscheckerDns?: string;
dnscheckerPropagation?: string;
// Other tools
whois?: string;
viewdns?: string;
intodns?: string;
dnsviz?: string;
securitytrails?: string;
shodan?: string;
censys?: string;
builtwith?: string;
ssllabs?: string;
hstsPreload?: string;
hardenize?: string;
// IP-specific tools
ipinfo?: string;
abuseipdb?: string;
virustotal?: string;
threatcrowd?: string;
// Email-specific tools
mailtester?: string;
learndmarc?: string;
}
/** RDAP server registry */
export interface RDAPServers {
tlds: Record<string, string>;
rirs: Record<string, string>;
universal: string;
}
/** RDAP response event */
export interface RDAPEvent {
eventAction: string;
eventDate: string;
eventActor?: string;
}
/** RDAP entity (registrar, registrant, etc.) */
export interface RDAPEntity {
handle?: string;
roles?: string[];
vcardArray?: any[];
entities?: RDAPEntity[];
events?: RDAPEvent[];
}
/** RDAP nameserver */
export interface RDAPNameserver {
ldhName: string;
ipAddresses?: {
v4?: string[];
v6?: string[];
};
}
/** RDAP link */
export interface RDAPLink {
value?: string;
rel?: string;
href?: string;
type?: string;
}
/** RDAP remark/notice */
export interface RDAPRemark {
title?: string;
description?: string[];
links?: RDAPLink[];
}
/** RDAP response (for domain, IP, or ASN lookups) */
export interface RDAPResponse {
// Common fields
handle?: string;
ldhName?: string;
unicodeName?: string;
status?: string[];
events?: RDAPEvent[];
entities?: RDAPEntity[];
nameservers?: RDAPNameserver[];
links?: RDAPLink[];
remarks?: RDAPRemark[];
notices?: RDAPRemark[];
// Network-specific (for IP lookups)
startAddress?: string;
endAddress?: string;
ipVersion?: string;
name?: string;
type?: string;
country?: string;
parentHandle?: string;
// Error fields
errorCode?: number;
title?: string;
description?: string[];
// Metadata
rawJson?: string;
lookupTimeMs: number;
timestamp: string;
error?: string;
}
/** Parsed domain info from RDAP */
export interface ParsedDomainInfo {
domain: string;
registrar?: string;
registrationDate?: string;
expirationDate?: string;
updatedDate?: string;
status?: string[];
nameservers?: string[];
dnssec: boolean;
}
/** DNS lookup result */
export interface DNSLookupResult {
domain: string;
queryType: string;
records: DNSRecord[];
mxRecords?: MXRecord[];
srvRecords?: SRVRecord[];
soaRecord?: SOARecord;
lookupTimeMs: number;
error?: string;
timestamp: string;
}
/** DNS record */
export interface DNSRecord {
type: DNSRecordType;
name: string;
value: string;
ttl?: number;
}
/** MX record */
export interface MXRecord {
host: string;
priority: number;
}
/** SRV record */
export interface SRVRecord {
target: string;
port: number;
priority: number;
weight: number;
}
/** SOA record */
export interface SOARecord {
primaryNs: string;
adminEmail: string;
serial: number;
refresh: number;
retry: number;
expire: number;
minTtl: number;
}
/** Complete DNS lookup result */
export interface CompleteDNSLookup {
domain: string;
a?: string[];
aaaa?: string[];
mx?: MXRecord[];
ns?: string[];
txt?: string[];
cname?: string;
soa?: SOARecord;
lookupTimeMs: number;
errors?: string[];
timestamp: string;
}
// ============================================================================
// Main API
// ============================================================================
export interface PxAPI {
// Core functions
version(): Promise<string>;
hello(name?: string): Promise<string>;
newTree(dim: number): Promise<PxTree>;
// Statistics utilities
computeDistributionStats(distances: number[]): Promise<DistributionStats>;
// NAT routing / peer quality functions
computePeerQualityScore(metrics: NATRoutingMetrics, weights?: QualityWeights): Promise<number>;
computeTrustScore(metrics: TrustMetrics): Promise<number>;
getDefaultQualityWeights(): Promise<QualityWeights>;
getDefaultPeerFeatureRanges(): Promise<FeatureRanges>;
normalizePeerFeatures(features: number[], ranges?: FeatureRanges): Promise<number[]>;
weightedPeerFeatures(normalized: number[], weights: number[]): Promise<number[]>;
// DNS tools
getExternalToolLinks(domain: string): Promise<ExternalToolLinks>;
getExternalToolLinksIP(ip: string): Promise<ExternalToolLinks>;
getExternalToolLinksEmail(emailOrDomain: string): Promise<ExternalToolLinks>;
getRDAPServers(): Promise<RDAPServers>;
buildRDAPDomainURL(domain: string): Promise<string>;
buildRDAPIPURL(ip: string): Promise<string>;
buildRDAPASNURL(asn: string): Promise<string>;
getDNSRecordTypes(): Promise<DNSRecordType[]>;
getDNSRecordTypeInfo(): Promise<DNSRecordTypeInfo[]>;
getCommonDNSRecordTypes(): Promise<DNSRecordType[]>;
}
export function init(options?: InitOptions): Promise<PxAPI>;

View file

@ -40,6 +40,7 @@ function call(name, ...args) {
class PxTree {
constructor(treeId) { this.treeId = treeId; }
// Core operations
async len() { return call('pxTreeLen', this.treeId); }
async dim() { return call('pxTreeDim', this.treeId); }
async insert(point) { return call('pxInsert', this.treeId, point); }
@ -48,6 +49,12 @@ class PxTree {
async kNearest(query, k) { return call('pxKNearest', this.treeId, query, k); }
async radius(query, r) { return call('pxRadius', this.treeId, query, r); }
async exportJSON() { return call('pxExportJSON', this.treeId); }
// Analytics operations
async getAnalytics() { return call('pxGetAnalytics', this.treeId); }
async getPeerStats() { return call('pxGetPeerStats', this.treeId); }
async getTopPeers(n) { return call('pxGetTopPeers', this.treeId, n); }
async getAxisDistributions(axisNames) { return call('pxGetAxisDistributions', this.treeId, axisNames); }
async resetAnalytics() { return call('pxResetAnalytics', this.treeId); }
}
export async function init(options = {}) {
@ -78,12 +85,33 @@ export async function init(options = {}) {
go.run(result.instance);
const api = {
// Core functions
version: async () => call('pxVersion'),
hello: async (name) => call('pxHello', name ?? ''),
newTree: async (dim) => {
const info = call('pxNewTree', dim);
return new PxTree(info.treeId);
}
},
// Statistics utilities
computeDistributionStats: async (distances) => call('pxComputeDistributionStats', distances),
// NAT routing / peer quality functions
computePeerQualityScore: async (metrics, weights) => call('pxComputePeerQualityScore', metrics, weights),
computeTrustScore: async (metrics) => call('pxComputeTrustScore', metrics),
getDefaultQualityWeights: async () => call('pxGetDefaultQualityWeights'),
getDefaultPeerFeatureRanges: async () => call('pxGetDefaultPeerFeatureRanges'),
normalizePeerFeatures: async (features, ranges) => call('pxNormalizePeerFeatures', features, ranges),
weightedPeerFeatures: async (normalized, weights) => call('pxWeightedPeerFeatures', normalized, weights),
// DNS tools
getExternalToolLinks: async (domain) => call('pxGetExternalToolLinks', domain),
getExternalToolLinksIP: async (ip) => call('pxGetExternalToolLinksIP', ip),
getExternalToolLinksEmail: async (emailOrDomain) => call('pxGetExternalToolLinksEmail', emailOrDomain),
getRDAPServers: async () => call('pxGetRDAPServers'),
buildRDAPDomainURL: async (domain) => call('pxBuildRDAPDomainURL', domain),
buildRDAPIPURL: async (ip) => call('pxBuildRDAPIPURL', ip),
buildRDAPASNURL: async (asn) => call('pxBuildRDAPASNURL', asn),
getDNSRecordTypes: async () => call('pxGetDNSRecordTypes'),
getDNSRecordTypeInfo: async () => call('pxGetDNSRecordTypeInfo'),
getCommonDNSRecordTypes: async () => call('pxGetCommonDNSRecordTypes')
};
return api;

View file

@ -215,14 +215,490 @@ func exportJSON(_ js.Value, args []js.Value) (any, error) {
if !ok {
return nil, fmt.Errorf("unknown treeId %d", id)
}
// naive export: ask for all points by radius from origin with large r; or keep
// internal slice? KDTree doesn't expose iteration, so skip heavy export here.
// Return metrics only for now.
m := map[string]any{"dim": t.Dim(), "len": t.Len()}
// Export all points
points := t.Points()
jsPts := make([]any, len(points))
for i, p := range points {
jsPts[i] = map[string]any{"id": p.ID, "coords": p.Coords, "value": p.Value}
}
m := map[string]any{
"dim": t.Dim(),
"len": t.Len(),
"backend": string(t.Backend()),
"points": jsPts,
}
b, _ := json.Marshal(m)
return string(b), nil
}
func getAnalytics(_ js.Value, args []js.Value) (any, error) {
// getAnalytics(treeId) -> analytics snapshot
if len(args) < 1 {
return nil, errors.New("getAnalytics(treeId)")
}
id := args[0].Int()
t, ok := treeRegistry[id]
if !ok {
return nil, fmt.Errorf("unknown treeId %d", id)
}
snap := t.GetAnalyticsSnapshot()
return map[string]any{
"queryCount": snap.QueryCount,
"insertCount": snap.InsertCount,
"deleteCount": snap.DeleteCount,
"avgQueryTimeNs": snap.AvgQueryTimeNs,
"minQueryTimeNs": snap.MinQueryTimeNs,
"maxQueryTimeNs": snap.MaxQueryTimeNs,
"lastQueryTimeNs": snap.LastQueryTimeNs,
"lastQueryAt": snap.LastQueryAt.UnixMilli(),
"createdAt": snap.CreatedAt.UnixMilli(),
"backendRebuildCount": snap.BackendRebuildCnt,
"lastRebuiltAt": snap.LastRebuiltAt.UnixMilli(),
}, nil
}
func getPeerStats(_ js.Value, args []js.Value) (any, error) {
// getPeerStats(treeId) -> array of peer stats
if len(args) < 1 {
return nil, errors.New("getPeerStats(treeId)")
}
id := args[0].Int()
t, ok := treeRegistry[id]
if !ok {
return nil, fmt.Errorf("unknown treeId %d", id)
}
stats := t.GetPeerStats()
jsStats := make([]any, len(stats))
for i, s := range stats {
jsStats[i] = map[string]any{
"peerId": s.PeerID,
"selectionCount": s.SelectionCount,
"avgDistance": s.AvgDistance,
"lastSelectedAt": s.LastSelectedAt.UnixMilli(),
}
}
return jsStats, nil
}
func getTopPeers(_ js.Value, args []js.Value) (any, error) {
// getTopPeers(treeId, n) -> array of top n peer stats
if len(args) < 2 {
return nil, errors.New("getTopPeers(treeId, n)")
}
id := args[0].Int()
n := args[1].Int()
t, ok := treeRegistry[id]
if !ok {
return nil, fmt.Errorf("unknown treeId %d", id)
}
stats := t.GetTopPeers(n)
jsStats := make([]any, len(stats))
for i, s := range stats {
jsStats[i] = map[string]any{
"peerId": s.PeerID,
"selectionCount": s.SelectionCount,
"avgDistance": s.AvgDistance,
"lastSelectedAt": s.LastSelectedAt.UnixMilli(),
}
}
return jsStats, nil
}
func getAxisDistributions(_ js.Value, args []js.Value) (any, error) {
// getAxisDistributions(treeId, axisNames?: string[]) -> array of axis distribution stats
if len(args) < 1 {
return nil, errors.New("getAxisDistributions(treeId)")
}
id := args[0].Int()
t, ok := treeRegistry[id]
if !ok {
return nil, fmt.Errorf("unknown treeId %d", id)
}
var axisNames []string
if len(args) > 1 && !args[1].IsUndefined() && !args[1].IsNull() {
ln := args[1].Length()
axisNames = make([]string, ln)
for i := 0; i < ln; i++ {
axisNames[i] = args[1].Index(i).String()
}
}
dists := t.ComputeDistanceDistribution(axisNames)
jsDists := make([]any, len(dists))
for i, d := range dists {
jsDists[i] = map[string]any{
"axis": d.Axis,
"name": d.Name,
"stats": map[string]any{
"count": d.Stats.Count,
"min": d.Stats.Min,
"max": d.Stats.Max,
"mean": d.Stats.Mean,
"median": d.Stats.Median,
"stdDev": d.Stats.StdDev,
"p25": d.Stats.P25,
"p75": d.Stats.P75,
"p90": d.Stats.P90,
"p99": d.Stats.P99,
"variance": d.Stats.Variance,
"skewness": d.Stats.Skewness,
},
}
}
return jsDists, nil
}
func resetAnalytics(_ js.Value, args []js.Value) (any, error) {
// resetAnalytics(treeId) -> resets all analytics
if len(args) < 1 {
return nil, errors.New("resetAnalytics(treeId)")
}
id := args[0].Int()
t, ok := treeRegistry[id]
if !ok {
return nil, fmt.Errorf("unknown treeId %d", id)
}
t.ResetAnalytics()
return true, nil
}
func computeDistributionStats(_ js.Value, args []js.Value) (any, error) {
// computeDistributionStats(distances: number[]) -> distribution stats
if len(args) < 1 {
return nil, errors.New("computeDistributionStats(distances)")
}
distances, err := getFloatSlice(args[0])
if err != nil {
return nil, err
}
stats := pd.ComputeDistributionStats(distances)
return map[string]any{
"count": stats.Count,
"min": stats.Min,
"max": stats.Max,
"mean": stats.Mean,
"median": stats.Median,
"stdDev": stats.StdDev,
"p25": stats.P25,
"p75": stats.P75,
"p90": stats.P90,
"p99": stats.P99,
"variance": stats.Variance,
"skewness": stats.Skewness,
"sampleSize": stats.SampleSize,
"computedAt": stats.ComputedAt.UnixMilli(),
}, nil
}
func computePeerQualityScore(_ js.Value, args []js.Value) (any, error) {
// computePeerQualityScore(metrics: NATRoutingMetrics, weights?: QualityWeights) -> score
if len(args) < 1 {
return nil, errors.New("computePeerQualityScore(metrics)")
}
m := args[0]
metrics := pd.NATRoutingMetrics{
ConnectivityScore: m.Get("connectivityScore").Float(),
SymmetryScore: m.Get("symmetryScore").Float(),
RelayProbability: m.Get("relayProbability").Float(),
DirectSuccessRate: m.Get("directSuccessRate").Float(),
AvgRTTMs: m.Get("avgRttMs").Float(),
JitterMs: m.Get("jitterMs").Float(),
PacketLossRate: m.Get("packetLossRate").Float(),
BandwidthMbps: m.Get("bandwidthMbps").Float(),
NATType: m.Get("natType").String(),
}
var weights *pd.QualityWeights
if len(args) > 1 && !args[1].IsUndefined() && !args[1].IsNull() {
w := args[1]
weights = &pd.QualityWeights{
Latency: w.Get("latency").Float(),
Jitter: w.Get("jitter").Float(),
PacketLoss: w.Get("packetLoss").Float(),
Bandwidth: w.Get("bandwidth").Float(),
Connectivity: w.Get("connectivity").Float(),
Symmetry: w.Get("symmetry").Float(),
DirectSuccess: w.Get("directSuccess").Float(),
RelayPenalty: w.Get("relayPenalty").Float(),
NATType: w.Get("natType").Float(),
}
}
score := pd.PeerQualityScore(metrics, weights)
return score, nil
}
func computeTrustScore(_ js.Value, args []js.Value) (any, error) {
// computeTrustScore(metrics: TrustMetrics) -> score
if len(args) < 1 {
return nil, errors.New("computeTrustScore(metrics)")
}
m := args[0]
metrics := pd.TrustMetrics{
ReputationScore: m.Get("reputationScore").Float(),
SuccessfulTransactions: int64(m.Get("successfulTransactions").Int()),
FailedTransactions: int64(m.Get("failedTransactions").Int()),
AgeSeconds: int64(m.Get("ageSeconds").Int()),
VouchCount: m.Get("vouchCount").Int(),
FlagCount: m.Get("flagCount").Int(),
ProofOfWork: m.Get("proofOfWork").Float(),
}
score := pd.ComputeTrustScore(metrics)
return score, nil
}
func getDefaultQualityWeights(_ js.Value, _ []js.Value) (any, error) {
w := pd.DefaultQualityWeights()
return map[string]any{
"latency": w.Latency,
"jitter": w.Jitter,
"packetLoss": w.PacketLoss,
"bandwidth": w.Bandwidth,
"connectivity": w.Connectivity,
"symmetry": w.Symmetry,
"directSuccess": w.DirectSuccess,
"relayPenalty": w.RelayPenalty,
"natType": w.NATType,
}, nil
}
func getDefaultPeerFeatureRanges(_ js.Value, _ []js.Value) (any, error) {
ranges := pd.DefaultPeerFeatureRanges()
jsRanges := make([]any, len(ranges.Ranges))
for i, r := range ranges.Ranges {
jsRanges[i] = map[string]any{
"min": r.Min,
"max": r.Max,
}
}
return map[string]any{
"ranges": jsRanges,
"labels": pd.StandardFeatureLabels(),
}, nil
}
func normalizePeerFeatures(_ js.Value, args []js.Value) (any, error) {
// normalizePeerFeatures(features: number[], ranges?: FeatureRanges) -> number[]
if len(args) < 1 {
return nil, errors.New("normalizePeerFeatures(features)")
}
features, err := getFloatSlice(args[0])
if err != nil {
return nil, err
}
ranges := pd.DefaultPeerFeatureRanges()
if len(args) > 1 && !args[1].IsUndefined() && !args[1].IsNull() {
rangesArg := args[1].Get("ranges")
if !rangesArg.IsUndefined() && !rangesArg.IsNull() {
ln := rangesArg.Length()
ranges.Ranges = make([]pd.AxisStats, ln)
for i := 0; i < ln; i++ {
r := rangesArg.Index(i)
ranges.Ranges[i] = pd.AxisStats{
Min: r.Get("min").Float(),
Max: r.Get("max").Float(),
}
}
}
}
normalized := pd.NormalizePeerFeatures(features, ranges)
return normalized, nil
}
func weightedPeerFeatures(_ js.Value, args []js.Value) (any, error) {
// weightedPeerFeatures(normalized: number[], weights: number[]) -> number[]
if len(args) < 2 {
return nil, errors.New("weightedPeerFeatures(normalized, weights)")
}
normalized, err := getFloatSlice(args[0])
if err != nil {
return nil, err
}
weights, err := getFloatSlice(args[1])
if err != nil {
return nil, err
}
weighted := pd.WeightedPeerFeatures(normalized, weights)
return weighted, nil
}
// ============================================================================
// DNS Tools Functions
// ============================================================================
func getExternalToolLinks(_ js.Value, args []js.Value) (any, error) {
// getExternalToolLinks(domain: string) -> ExternalToolLinks
if len(args) < 1 {
return nil, errors.New("getExternalToolLinks(domain)")
}
domain := args[0].String()
links := pd.GetExternalToolLinks(domain)
return externalToolLinksToJS(links), nil
}
func getExternalToolLinksIP(_ js.Value, args []js.Value) (any, error) {
// getExternalToolLinksIP(ip: string) -> ExternalToolLinks
if len(args) < 1 {
return nil, errors.New("getExternalToolLinksIP(ip)")
}
ip := args[0].String()
links := pd.GetExternalToolLinksIP(ip)
return externalToolLinksToJS(links), nil
}
func getExternalToolLinksEmail(_ js.Value, args []js.Value) (any, error) {
// getExternalToolLinksEmail(emailOrDomain: string) -> ExternalToolLinks
if len(args) < 1 {
return nil, errors.New("getExternalToolLinksEmail(emailOrDomain)")
}
emailOrDomain := args[0].String()
links := pd.GetExternalToolLinksEmail(emailOrDomain)
return externalToolLinksToJS(links), nil
}
func externalToolLinksToJS(links pd.ExternalToolLinks) map[string]any {
return map[string]any{
"target": links.Target,
"type": links.Type,
// MXToolbox
"mxtoolboxDns": links.MXToolboxDNS,
"mxtoolboxMx": links.MXToolboxMX,
"mxtoolboxBlacklist": links.MXToolboxBlacklist,
"mxtoolboxSmtp": links.MXToolboxSMTP,
"mxtoolboxSpf": links.MXToolboxSPF,
"mxtoolboxDmarc": links.MXToolboxDMARC,
"mxtoolboxDkim": links.MXToolboxDKIM,
"mxtoolboxHttp": links.MXToolboxHTTP,
"mxtoolboxHttps": links.MXToolboxHTTPS,
"mxtoolboxPing": links.MXToolboxPing,
"mxtoolboxTrace": links.MXToolboxTrace,
"mxtoolboxWhois": links.MXToolboxWhois,
"mxtoolboxAsn": links.MXToolboxASN,
// DNSChecker
"dnscheckerDns": links.DNSCheckerDNS,
"dnscheckerPropagation": links.DNSCheckerPropagation,
// Other tools
"whois": links.WhoIs,
"viewdns": links.ViewDNS,
"intodns": links.IntoDNS,
"dnsviz": links.DNSViz,
"securitytrails": links.SecurityTrails,
"shodan": links.Shodan,
"censys": links.Censys,
"builtwith": links.BuiltWith,
"ssllabs": links.SSLLabs,
"hstsPreload": links.HSTSPreload,
"hardenize": links.Hardenize,
// IP-specific
"ipinfo": links.IPInfo,
"abuseipdb": links.AbuseIPDB,
"virustotal": links.VirusTotal,
"threatcrowd": links.ThreatCrowd,
// Email-specific
"mailtester": links.MailTester,
"learndmarc": links.LearnDMARC,
}
}
func getRDAPServers(_ js.Value, _ []js.Value) (any, error) {
// Returns a list of known RDAP servers for reference
servers := map[string]any{
"tlds": map[string]string{
"com": "https://rdap.verisign.com/com/v1/",
"net": "https://rdap.verisign.com/net/v1/",
"org": "https://rdap.publicinterestregistry.org/rdap/",
"info": "https://rdap.afilias.net/rdap/info/",
"io": "https://rdap.nic.io/",
"co": "https://rdap.nic.co/",
"dev": "https://rdap.nic.google/",
"app": "https://rdap.nic.google/",
},
"rirs": map[string]string{
"arin": "https://rdap.arin.net/registry/",
"ripe": "https://rdap.db.ripe.net/",
"apnic": "https://rdap.apnic.net/",
"afrinic": "https://rdap.afrinic.net/rdap/",
"lacnic": "https://rdap.lacnic.net/rdap/",
},
"universal": "https://rdap.org/",
}
return servers, nil
}
func buildRDAPDomainURL(_ js.Value, args []js.Value) (any, error) {
// buildRDAPDomainURL(domain: string) -> string
if len(args) < 1 {
return nil, errors.New("buildRDAPDomainURL(domain)")
}
domain := args[0].String()
// Use universal RDAP redirector
return fmt.Sprintf("https://rdap.org/domain/%s", domain), nil
}
func buildRDAPIPURL(_ js.Value, args []js.Value) (any, error) {
// buildRDAPIPURL(ip: string) -> string
if len(args) < 1 {
return nil, errors.New("buildRDAPIPURL(ip)")
}
ip := args[0].String()
return fmt.Sprintf("https://rdap.org/ip/%s", ip), nil
}
func buildRDAPASNURL(_ js.Value, args []js.Value) (any, error) {
// buildRDAPASNURL(asn: string) -> string
if len(args) < 1 {
return nil, errors.New("buildRDAPASNURL(asn)")
}
asn := args[0].String()
// Normalize ASN
asnNum := asn
if len(asn) > 2 && (asn[:2] == "AS" || asn[:2] == "as") {
asnNum = asn[2:]
}
return fmt.Sprintf("https://rdap.org/autnum/%s", asnNum), nil
}
func getDNSRecordTypes(_ js.Value, _ []js.Value) (any, error) {
// Returns all available DNS record types
types := pd.GetAllDNSRecordTypes()
result := make([]string, len(types))
for i, t := range types {
result[i] = string(t)
}
return result, nil
}
func getDNSRecordTypeInfo(_ js.Value, _ []js.Value) (any, error) {
// Returns detailed info about all DNS record types
info := pd.GetDNSRecordTypeInfo()
result := make([]any, len(info))
for i, r := range info {
result[i] = map[string]any{
"type": string(r.Type),
"name": r.Name,
"description": r.Description,
"rfc": r.RFC,
"common": r.Common,
}
}
return result, nil
}
func getCommonDNSRecordTypes(_ js.Value, _ []js.Value) (any, error) {
// Returns only commonly used DNS record types
types := pd.GetCommonDNSRecordTypes()
result := make([]string, len(types))
for i, t := range types {
result[i] = string(t)
}
return result, nil
}
func main() {
// Export core API
export("pxVersion", version)
@ -237,6 +713,34 @@ func main() {
export("pxRadius", radius)
export("pxExportJSON", exportJSON)
// Export analytics API
export("pxGetAnalytics", getAnalytics)
export("pxGetPeerStats", getPeerStats)
export("pxGetTopPeers", getTopPeers)
export("pxGetAxisDistributions", getAxisDistributions)
export("pxResetAnalytics", resetAnalytics)
export("pxComputeDistributionStats", computeDistributionStats)
// Export NAT routing / peer quality API
export("pxComputePeerQualityScore", computePeerQualityScore)
export("pxComputeTrustScore", computeTrustScore)
export("pxGetDefaultQualityWeights", getDefaultQualityWeights)
export("pxGetDefaultPeerFeatureRanges", getDefaultPeerFeatureRanges)
export("pxNormalizePeerFeatures", normalizePeerFeatures)
export("pxWeightedPeerFeatures", weightedPeerFeatures)
// Export DNS tools API
export("pxGetExternalToolLinks", getExternalToolLinks)
export("pxGetExternalToolLinksIP", getExternalToolLinksIP)
export("pxGetExternalToolLinksEmail", getExternalToolLinksEmail)
export("pxGetRDAPServers", getRDAPServers)
export("pxBuildRDAPDomainURL", buildRDAPDomainURL)
export("pxBuildRDAPIPURL", buildRDAPIPURL)
export("pxBuildRDAPASNURL", buildRDAPASNURL)
export("pxGetDNSRecordTypes", getDNSRecordTypes)
export("pxGetDNSRecordTypeInfo", getDNSRecordTypeInfo)
export("pxGetCommonDNSRecordTypes", getCommonDNSRecordTypes)
// Keep running
select {}
}