diff --git a/dns_tools.go b/dns_tools.go new file mode 100644 index 0000000..de57110 --- /dev/null +++ b/dns_tools.go @@ -0,0 +1,1006 @@ +package poindexter + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// ============================================================================ +// DNS Record Types +// ============================================================================ + +// DNSRecordType represents DNS record types +type DNSRecordType string + +const ( + // Standard record types + DNSRecordA DNSRecordType = "A" + DNSRecordAAAA DNSRecordType = "AAAA" + DNSRecordMX DNSRecordType = "MX" + DNSRecordTXT DNSRecordType = "TXT" + DNSRecordNS DNSRecordType = "NS" + DNSRecordCNAME DNSRecordType = "CNAME" + DNSRecordSOA DNSRecordType = "SOA" + DNSRecordPTR DNSRecordType = "PTR" + DNSRecordSRV DNSRecordType = "SRV" + DNSRecordCAA DNSRecordType = "CAA" + + // Additional record types (ClouDNS and others) + DNSRecordALIAS DNSRecordType = "ALIAS" // Virtual ANAME record (ClouDNS, Route53, etc.) + DNSRecordRP DNSRecordType = "RP" // Responsible Person + DNSRecordSSHFP DNSRecordType = "SSHFP" // SSH Fingerprint + DNSRecordTLSA DNSRecordType = "TLSA" // DANE TLS Authentication + DNSRecordDS DNSRecordType = "DS" // DNSSEC Delegation Signer + DNSRecordDNSKEY DNSRecordType = "DNSKEY" // DNSSEC Key + DNSRecordNAPTR DNSRecordType = "NAPTR" // Naming Authority Pointer + DNSRecordLOC DNSRecordType = "LOC" // Geographic Location + DNSRecordHINFO DNSRecordType = "HINFO" // Host Information + DNSRecordCERT DNSRecordType = "CERT" // Certificate record + DNSRecordSMIMEA DNSRecordType = "SMIMEA" // S/MIME Certificate Association + DNSRecordWR DNSRecordType = "WR" // Web Redirect (ClouDNS specific) + DNSRecordSPF DNSRecordType = "SPF" // Sender Policy Framework (legacy, use TXT) +) + +// DNSRecord represents a generic DNS record +type DNSRecord struct { + Type DNSRecordType `json:"type"` + Name string `json:"name"` + Value string `json:"value"` + TTL int `json:"ttl,omitempty"` +} + +// MXRecord represents an MX record with priority +type MXRecord struct { + Host string `json:"host"` + Priority uint16 `json:"priority"` +} + +// SRVRecord represents an SRV record +type SRVRecord struct { + Target string `json:"target"` + Port uint16 `json:"port"` + Priority uint16 `json:"priority"` + Weight uint16 `json:"weight"` +} + +// SOARecord represents an SOA record +type SOARecord struct { + PrimaryNS string `json:"primaryNs"` + AdminEmail string `json:"adminEmail"` + Serial uint32 `json:"serial"` + Refresh uint32 `json:"refresh"` + Retry uint32 `json:"retry"` + Expire uint32 `json:"expire"` + MinTTL uint32 `json:"minTtl"` +} + +// CAARecord represents a CAA record +type CAARecord struct { + Flag uint8 `json:"flag"` + Tag string `json:"tag"` // "issue", "issuewild", "iodef" + Value string `json:"value"` +} + +// SSHFPRecord represents an SSHFP record +type SSHFPRecord struct { + Algorithm uint8 `json:"algorithm"` // 1=RSA, 2=DSA, 3=ECDSA, 4=Ed25519 + FPType uint8 `json:"fpType"` // 1=SHA-1, 2=SHA-256 + Fingerprint string `json:"fingerprint"` +} + +// TLSARecord represents a TLSA (DANE) record +type TLSARecord struct { + Usage uint8 `json:"usage"` // 0-3: CA constraint, Service cert, Trust anchor, Domain-issued + Selector uint8 `json:"selector"` // 0=Full cert, 1=SubjectPublicKeyInfo + MatchingType uint8 `json:"matchingType"` // 0=Exact, 1=SHA-256, 2=SHA-512 + CertData string `json:"certData"` +} + +// DSRecord represents a DS (DNSSEC Delegation Signer) record +type DSRecord struct { + KeyTag uint16 `json:"keyTag"` + Algorithm uint8 `json:"algorithm"` + DigestType uint8 `json:"digestType"` + Digest string `json:"digest"` +} + +// DNSKEYRecord represents a DNSKEY record +type DNSKEYRecord struct { + Flags uint16 `json:"flags"` + Protocol uint8 `json:"protocol"` + Algorithm uint8 `json:"algorithm"` + PublicKey string `json:"publicKey"` +} + +// NAPTRRecord represents a NAPTR record +type NAPTRRecord struct { + Order uint16 `json:"order"` + Preference uint16 `json:"preference"` + Flags string `json:"flags"` + Service string `json:"service"` + Regexp string `json:"regexp"` + Replacement string `json:"replacement"` +} + +// RPRecord represents an RP (Responsible Person) record +type RPRecord struct { + Mailbox string `json:"mailbox"` // Email as DNS name (user.domain.com) + TxtDom string `json:"txtDom"` // Domain with TXT record containing more info +} + +// LOCRecord represents a LOC (Location) record +type LOCRecord struct { + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + Altitude float64 `json:"altitude"` + Size float64 `json:"size"` + HPrecis float64 `json:"hPrecision"` + VPrecis float64 `json:"vPrecision"` +} + +// ALIASRecord represents an ALIAS/ANAME record (provider-specific) +type ALIASRecord struct { + Target string `json:"target"` +} + +// WebRedirectRecord represents a Web Redirect record (ClouDNS specific) +type WebRedirectRecord struct { + URL string `json:"url"` + RedirectType int `json:"redirectType"` // 301, 302, etc. + Frame bool `json:"frame"` // Frame redirect vs HTTP redirect +} + +// DNSRecordTypeInfo provides metadata about a DNS record type +type DNSRecordTypeInfo struct { + Type DNSRecordType `json:"type"` + Name string `json:"name"` + Description string `json:"description"` + RFC string `json:"rfc,omitempty"` + Common bool `json:"common"` // Commonly used record type +} + +// GetDNSRecordTypeInfo returns metadata for all supported DNS record types +func GetDNSRecordTypeInfo() []DNSRecordTypeInfo { + return []DNSRecordTypeInfo{ + // Common record types + {DNSRecordA, "A", "IPv4 address record - maps hostname to IPv4", "RFC 1035", true}, + {DNSRecordAAAA, "AAAA", "IPv6 address record - maps hostname to IPv6", "RFC 3596", true}, + {DNSRecordCNAME, "CNAME", "Canonical name - alias to another domain", "RFC 1035", true}, + {DNSRecordMX, "MX", "Mail exchanger - specifies mail servers", "RFC 1035", true}, + {DNSRecordTXT, "TXT", "Text record - stores arbitrary text (SPF, DKIM, etc.)", "RFC 1035", true}, + {DNSRecordNS, "NS", "Nameserver - delegates DNS zone to nameservers", "RFC 1035", true}, + {DNSRecordSOA, "SOA", "Start of Authority - zone administration data", "RFC 1035", true}, + {DNSRecordPTR, "PTR", "Pointer - reverse DNS lookup (IP to hostname)", "RFC 1035", true}, + {DNSRecordSRV, "SRV", "Service - locates services (port, priority, weight)", "RFC 2782", true}, + {DNSRecordCAA, "CAA", "Certification Authority Authorization", "RFC 6844", true}, + + // Additional/specialized record types + {DNSRecordALIAS, "ALIAS", "Virtual A record - CNAME-like for apex domain", "", true}, + {DNSRecordRP, "RP", "Responsible Person - contact info for domain", "RFC 1183", false}, + {DNSRecordSSHFP, "SSHFP", "SSH Fingerprint - SSH host key verification", "RFC 4255", false}, + {DNSRecordTLSA, "TLSA", "DANE TLS Authentication - certificate pinning", "RFC 6698", false}, + {DNSRecordDS, "DS", "Delegation Signer - DNSSEC chain of trust", "RFC 4034", false}, + {DNSRecordDNSKEY, "DNSKEY", "DNSSEC public key", "RFC 4034", false}, + {DNSRecordNAPTR, "NAPTR", "Naming Authority Pointer - ENUM, SIP routing", "RFC 2915", false}, + {DNSRecordLOC, "LOC", "Location - geographic coordinates", "RFC 1876", false}, + {DNSRecordHINFO, "HINFO", "Host Information - CPU and OS type", "RFC 1035", false}, + {DNSRecordCERT, "CERT", "Certificate - stores certificates", "RFC 4398", false}, + {DNSRecordSMIMEA, "SMIMEA", "S/MIME Certificate Association", "RFC 8162", false}, + {DNSRecordSPF, "SPF", "Sender Policy Framework (legacy, use TXT)", "RFC 4408", false}, + {DNSRecordWR, "WR", "Web Redirect - HTTP redirect (ClouDNS specific)", "", false}, + } +} + +// GetCommonDNSRecordTypes returns only commonly used record types +func GetCommonDNSRecordTypes() []DNSRecordType { + info := GetDNSRecordTypeInfo() + result := make([]DNSRecordType, 0) + for _, r := range info { + if r.Common { + result = append(result, r.Type) + } + } + return result +} + +// GetAllDNSRecordTypes returns all supported record types +func GetAllDNSRecordTypes() []DNSRecordType { + return []DNSRecordType{ + DNSRecordA, DNSRecordAAAA, DNSRecordCNAME, DNSRecordMX, DNSRecordTXT, + DNSRecordNS, DNSRecordSOA, DNSRecordPTR, DNSRecordSRV, DNSRecordCAA, + DNSRecordALIAS, DNSRecordRP, DNSRecordSSHFP, DNSRecordTLSA, DNSRecordDS, + DNSRecordDNSKEY, DNSRecordNAPTR, DNSRecordLOC, DNSRecordHINFO, DNSRecordCERT, + DNSRecordSMIMEA, DNSRecordSPF, DNSRecordWR, + } +} + +// DNSLookupResult contains the results of a DNS lookup +type DNSLookupResult struct { + Domain string `json:"domain"` + QueryType string `json:"queryType"` + Records []DNSRecord `json:"records"` + MXRecords []MXRecord `json:"mxRecords,omitempty"` + SRVRecords []SRVRecord `json:"srvRecords,omitempty"` + SOARecord *SOARecord `json:"soaRecord,omitempty"` + LookupTimeMs int64 `json:"lookupTimeMs"` + Error string `json:"error,omitempty"` + Timestamp time.Time `json:"timestamp"` +} + +// CompleteDNSLookup contains all DNS records for a domain +type CompleteDNSLookup struct { + Domain string `json:"domain"` + A []string `json:"a,omitempty"` + AAAA []string `json:"aaaa,omitempty"` + MX []MXRecord `json:"mx,omitempty"` + NS []string `json:"ns,omitempty"` + TXT []string `json:"txt,omitempty"` + CNAME string `json:"cname,omitempty"` + SOA *SOARecord `json:"soa,omitempty"` + LookupTimeMs int64 `json:"lookupTimeMs"` + Errors []string `json:"errors,omitempty"` + Timestamp time.Time `json:"timestamp"` +} + +// ============================================================================ +// DNS Lookup Functions +// ============================================================================ + +// DNSLookup performs a DNS lookup for the specified record type +func DNSLookup(domain string, recordType DNSRecordType) DNSLookupResult { + return DNSLookupWithTimeout(domain, recordType, 10*time.Second) +} + +// DNSLookupWithTimeout performs a DNS lookup with a custom timeout +func DNSLookupWithTimeout(domain string, recordType DNSRecordType, timeout time.Duration) DNSLookupResult { + start := time.Now() + result := DNSLookupResult{ + Domain: domain, + QueryType: string(recordType), + Timestamp: start, + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + resolver := net.Resolver{} + + switch recordType { + case DNSRecordA: + ips, err := resolver.LookupIP(ctx, "ip4", domain) + if err != nil { + result.Error = err.Error() + } else { + for _, ip := range ips { + result.Records = append(result.Records, DNSRecord{ + Type: DNSRecordA, + Name: domain, + Value: ip.String(), + }) + } + } + + case DNSRecordAAAA: + ips, err := resolver.LookupIP(ctx, "ip6", domain) + if err != nil { + result.Error = err.Error() + } else { + for _, ip := range ips { + result.Records = append(result.Records, DNSRecord{ + Type: DNSRecordAAAA, + Name: domain, + Value: ip.String(), + }) + } + } + + case DNSRecordMX: + mxs, err := resolver.LookupMX(ctx, domain) + if err != nil { + result.Error = err.Error() + } else { + for _, mx := range mxs { + result.MXRecords = append(result.MXRecords, MXRecord{ + Host: strings.TrimSuffix(mx.Host, "."), + Priority: mx.Pref, + }) + result.Records = append(result.Records, DNSRecord{ + Type: DNSRecordMX, + Name: domain, + Value: fmt.Sprintf("%d %s", mx.Pref, mx.Host), + }) + } + // Sort by priority + sort.Slice(result.MXRecords, func(i, j int) bool { + return result.MXRecords[i].Priority < result.MXRecords[j].Priority + }) + } + + case DNSRecordTXT: + txts, err := resolver.LookupTXT(ctx, domain) + if err != nil { + result.Error = err.Error() + } else { + for _, txt := range txts { + result.Records = append(result.Records, DNSRecord{ + Type: DNSRecordTXT, + Name: domain, + Value: txt, + }) + } + } + + case DNSRecordNS: + nss, err := resolver.LookupNS(ctx, domain) + if err != nil { + result.Error = err.Error() + } else { + for _, ns := range nss { + result.Records = append(result.Records, DNSRecord{ + Type: DNSRecordNS, + Name: domain, + Value: strings.TrimSuffix(ns.Host, "."), + }) + } + } + + case DNSRecordCNAME: + cname, err := resolver.LookupCNAME(ctx, domain) + if err != nil { + result.Error = err.Error() + } else { + result.Records = append(result.Records, DNSRecord{ + Type: DNSRecordCNAME, + Name: domain, + Value: strings.TrimSuffix(cname, "."), + }) + } + + case DNSRecordSRV: + // SRV records require a service and protocol prefix, e.g., _http._tcp.example.com + _, srvs, err := resolver.LookupSRV(ctx, "", "", domain) + if err != nil { + result.Error = err.Error() + } else { + for _, srv := range srvs { + result.SRVRecords = append(result.SRVRecords, SRVRecord{ + Target: strings.TrimSuffix(srv.Target, "."), + Port: srv.Port, + Priority: srv.Priority, + Weight: srv.Weight, + }) + result.Records = append(result.Records, DNSRecord{ + Type: DNSRecordSRV, + Name: domain, + Value: fmt.Sprintf("%d %d %d %s", srv.Priority, srv.Weight, srv.Port, srv.Target), + }) + } + } + + case DNSRecordPTR: + names, err := resolver.LookupAddr(ctx, domain) + if err != nil { + result.Error = err.Error() + } else { + for _, name := range names { + result.Records = append(result.Records, DNSRecord{ + Type: DNSRecordPTR, + Name: domain, + Value: strings.TrimSuffix(name, "."), + }) + } + } + + default: + result.Error = fmt.Sprintf("unsupported record type: %s", recordType) + } + + result.LookupTimeMs = time.Since(start).Milliseconds() + return result +} + +// DNSLookupAll performs lookups for all common record types +func DNSLookupAll(domain string) CompleteDNSLookup { + return DNSLookupAllWithTimeout(domain, 10*time.Second) +} + +// DNSLookupAllWithTimeout performs lookups for all common record types with timeout +func DNSLookupAllWithTimeout(domain string, timeout time.Duration) CompleteDNSLookup { + start := time.Now() + result := CompleteDNSLookup{ + Domain: domain, + Timestamp: start, + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + resolver := net.Resolver{} + + // A records + if ips, err := resolver.LookupIP(ctx, "ip4", domain); err == nil { + for _, ip := range ips { + result.A = append(result.A, ip.String()) + } + } else if !isNoSuchHostError(err) { + result.Errors = append(result.Errors, fmt.Sprintf("A: %s", err.Error())) + } + + // AAAA records + if ips, err := resolver.LookupIP(ctx, "ip6", domain); err == nil { + for _, ip := range ips { + result.AAAA = append(result.AAAA, ip.String()) + } + } else if !isNoSuchHostError(err) { + result.Errors = append(result.Errors, fmt.Sprintf("AAAA: %s", err.Error())) + } + + // MX records + if mxs, err := resolver.LookupMX(ctx, domain); err == nil { + for _, mx := range mxs { + result.MX = append(result.MX, MXRecord{ + Host: strings.TrimSuffix(mx.Host, "."), + Priority: mx.Pref, + }) + } + sort.Slice(result.MX, func(i, j int) bool { + return result.MX[i].Priority < result.MX[j].Priority + }) + } else if !isNoSuchHostError(err) { + result.Errors = append(result.Errors, fmt.Sprintf("MX: %s", err.Error())) + } + + // NS records + if nss, err := resolver.LookupNS(ctx, domain); err == nil { + for _, ns := range nss { + result.NS = append(result.NS, strings.TrimSuffix(ns.Host, ".")) + } + } else if !isNoSuchHostError(err) { + result.Errors = append(result.Errors, fmt.Sprintf("NS: %s", err.Error())) + } + + // TXT records + if txts, err := resolver.LookupTXT(ctx, domain); err == nil { + result.TXT = txts + } else if !isNoSuchHostError(err) { + result.Errors = append(result.Errors, fmt.Sprintf("TXT: %s", err.Error())) + } + + // CNAME record + if cname, err := resolver.LookupCNAME(ctx, domain); err == nil { + result.CNAME = strings.TrimSuffix(cname, ".") + // If CNAME equals domain, it's not really a CNAME + if result.CNAME == domain { + result.CNAME = "" + } + } + + result.LookupTimeMs = time.Since(start).Milliseconds() + return result +} + +// ReverseDNSLookup performs a reverse DNS lookup for an IP address +func ReverseDNSLookup(ip string) DNSLookupResult { + return DNSLookupWithTimeout(ip, DNSRecordPTR, 10*time.Second) +} + +func isNoSuchHostError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "no such host") || + strings.Contains(err.Error(), "NXDOMAIN") || + strings.Contains(err.Error(), "not found") +} + +// ============================================================================ +// RDAP (Registration Data Access Protocol) - New Style WHOIS +// ============================================================================ + +// RDAPResponse represents an RDAP response +type RDAPResponse struct { + // Common fields + Handle string `json:"handle,omitempty"` + LDHName string `json:"ldhName,omitempty"` // Domain name + UnicodeName string `json:"unicodeName,omitempty"` + Status []string `json:"status,omitempty"` + Events []RDAPEvent `json:"events,omitempty"` + Entities []RDAPEntity `json:"entities,omitempty"` + Nameservers []RDAPNs `json:"nameservers,omitempty"` + Links []RDAPLink `json:"links,omitempty"` + Remarks []RDAPRemark `json:"remarks,omitempty"` + Notices []RDAPNotice `json:"notices,omitempty"` + + // Network-specific (for IP lookups) + StartAddress string `json:"startAddress,omitempty"` + EndAddress string `json:"endAddress,omitempty"` + IPVersion string `json:"ipVersion,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Country string `json:"country,omitempty"` + ParentHandle string `json:"parentHandle,omitempty"` + + // Error fields + ErrorCode int `json:"errorCode,omitempty"` + Title string `json:"title,omitempty"` + Description []string `json:"description,omitempty"` + + // Metadata + RawJSON string `json:"rawJson,omitempty"` + LookupTimeMs int64 `json:"lookupTimeMs"` + Timestamp time.Time `json:"timestamp"` + Error string `json:"error,omitempty"` +} + +// RDAPEvent represents an RDAP event (registration, expiration, etc.) +type RDAPEvent struct { + EventAction string `json:"eventAction"` + EventDate string `json:"eventDate"` + EventActor string `json:"eventActor,omitempty"` +} + +// RDAPEntity represents an entity (registrar, registrant, etc.) +type RDAPEntity struct { + Handle string `json:"handle,omitempty"` + Roles []string `json:"roles,omitempty"` + VCardArray []any `json:"vcardArray,omitempty"` + Entities []RDAPEntity `json:"entities,omitempty"` + Events []RDAPEvent `json:"events,omitempty"` + Links []RDAPLink `json:"links,omitempty"` + Remarks []RDAPRemark `json:"remarks,omitempty"` +} + +// RDAPNs represents a nameserver in RDAP +type RDAPNs struct { + LDHName string `json:"ldhName"` + IPAddresses *RDAPIPs `json:"ipAddresses,omitempty"` +} + +// RDAPIPs represents IP addresses for a nameserver +type RDAPIPs struct { + V4 []string `json:"v4,omitempty"` + V6 []string `json:"v6,omitempty"` +} + +// RDAPLink represents a link in RDAP +type RDAPLink struct { + Value string `json:"value,omitempty"` + Rel string `json:"rel,omitempty"` + Href string `json:"href,omitempty"` + Type string `json:"type,omitempty"` +} + +// RDAPRemark represents a remark/notice +type RDAPRemark struct { + Title string `json:"title,omitempty"` + Description []string `json:"description,omitempty"` + Links []RDAPLink `json:"links,omitempty"` +} + +// RDAPNotice is an alias for RDAPRemark +type RDAPNotice = RDAPRemark + +// RDAPBootstrapRegistry holds the RDAP bootstrap data +type RDAPBootstrapRegistry struct { + Services [][]interface{} `json:"services"` + Version string `json:"version"` +} + +// RDAP server URLs for different TLDs and RIRs +var rdapServers = map[string]string{ + // Generic TLDs (ICANN) + "com": "https://rdap.verisign.com/com/v1/", + "net": "https://rdap.verisign.com/net/v1/", + "org": "https://rdap.publicinterestregistry.org/rdap/", + "info": "https://rdap.afilias.net/rdap/info/", + "biz": "https://rdap.afilias.net/rdap/biz/", + "io": "https://rdap.nic.io/", + "co": "https://rdap.nic.co/", + "me": "https://rdap.nic.me/", + "app": "https://rdap.nic.google/", + "dev": "https://rdap.nic.google/", + + // Country code TLDs + "uk": "https://rdap.nominet.uk/uk/", + "de": "https://rdap.denic.de/", + "nl": "https://rdap.sidn.nl/", + "au": "https://rdap.auda.org.au/", + "nz": "https://rdap.dns.net.nz/", + "br": "https://rdap.registro.br/", + "jp": "https://rdap.jprs.jp/", + + // RIRs for IP lookups + "arin": "https://rdap.arin.net/registry/", + "ripe": "https://rdap.db.ripe.net/", + "apnic": "https://rdap.apnic.net/", + "afrinic": "https://rdap.afrinic.net/rdap/", + "lacnic": "https://rdap.lacnic.net/rdap/", +} + +// RDAPLookupDomain performs an RDAP lookup for a domain +func RDAPLookupDomain(domain string) RDAPResponse { + return RDAPLookupDomainWithTimeout(domain, 15*time.Second) +} + +// RDAPLookupDomainWithTimeout performs an RDAP lookup with custom timeout +func RDAPLookupDomainWithTimeout(domain string, timeout time.Duration) RDAPResponse { + start := time.Now() + result := RDAPResponse{ + LDHName: domain, + Timestamp: start, + } + + // Extract TLD + parts := strings.Split(strings.ToLower(domain), ".") + if len(parts) < 2 { + result.Error = "invalid domain format" + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + tld := parts[len(parts)-1] + + // Find RDAP server + serverURL, ok := rdapServers[tld] + if !ok { + // Try to use IANA bootstrap + serverURL = fmt.Sprintf("https://rdap.org/domain/%s", domain) + } else { + serverURL = serverURL + "domain/" + domain + } + + client := &http.Client{Timeout: timeout} + resp, err := client.Get(serverURL) + if err != nil { + result.Error = fmt.Sprintf("RDAP request failed: %s", err.Error()) + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + result.Error = fmt.Sprintf("failed to read response: %s", err.Error()) + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + + result.RawJSON = string(body) + + if resp.StatusCode != http.StatusOK { + result.Error = fmt.Sprintf("RDAP server returned status %d", resp.StatusCode) + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + + if err := json.Unmarshal(body, &result); err != nil { + result.Error = fmt.Sprintf("failed to parse RDAP response: %s", err.Error()) + } + + result.LookupTimeMs = time.Since(start).Milliseconds() + return result +} + +// RDAPLookupIP performs an RDAP lookup for an IP address +func RDAPLookupIP(ip string) RDAPResponse { + return RDAPLookupIPWithTimeout(ip, 15*time.Second) +} + +// RDAPLookupIPWithTimeout performs an RDAP lookup for an IP with custom timeout +func RDAPLookupIPWithTimeout(ip string, timeout time.Duration) RDAPResponse { + start := time.Now() + result := RDAPResponse{ + StartAddress: ip, + Timestamp: start, + } + + parsedIP := net.ParseIP(ip) + if parsedIP == nil { + result.Error = "invalid IP address" + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + + // Use rdap.org as a universal redirector + serverURL := fmt.Sprintf("https://rdap.org/ip/%s", ip) + + client := &http.Client{Timeout: timeout} + resp, err := client.Get(serverURL) + if err != nil { + result.Error = fmt.Sprintf("RDAP request failed: %s", err.Error()) + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + result.Error = fmt.Sprintf("failed to read response: %s", err.Error()) + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + + result.RawJSON = string(body) + + if resp.StatusCode != http.StatusOK { + result.Error = fmt.Sprintf("RDAP server returned status %d", resp.StatusCode) + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + + if err := json.Unmarshal(body, &result); err != nil { + result.Error = fmt.Sprintf("failed to parse RDAP response: %s", err.Error()) + } + + result.LookupTimeMs = time.Since(start).Milliseconds() + return result +} + +// RDAPLookupASN performs an RDAP lookup for an ASN +func RDAPLookupASN(asn string) RDAPResponse { + return RDAPLookupASNWithTimeout(asn, 15*time.Second) +} + +// RDAPLookupASNWithTimeout performs an RDAP lookup for an ASN with timeout +func RDAPLookupASNWithTimeout(asn string, timeout time.Duration) RDAPResponse { + start := time.Now() + result := RDAPResponse{ + Handle: asn, + Timestamp: start, + } + + // Normalize ASN (remove "AS" prefix if present) + asnNum := strings.TrimPrefix(strings.ToUpper(asn), "AS") + + // Use rdap.org as a universal redirector + serverURL := fmt.Sprintf("https://rdap.org/autnum/%s", asnNum) + + client := &http.Client{Timeout: timeout} + resp, err := client.Get(serverURL) + if err != nil { + result.Error = fmt.Sprintf("RDAP request failed: %s", err.Error()) + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + result.Error = fmt.Sprintf("failed to read response: %s", err.Error()) + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + + result.RawJSON = string(body) + + if resp.StatusCode != http.StatusOK { + result.Error = fmt.Sprintf("RDAP server returned status %d", resp.StatusCode) + result.LookupTimeMs = time.Since(start).Milliseconds() + return result + } + + if err := json.Unmarshal(body, &result); err != nil { + result.Error = fmt.Sprintf("failed to parse RDAP response: %s", err.Error()) + } + + result.LookupTimeMs = time.Since(start).Milliseconds() + return result +} + +// ============================================================================ +// External Tool Links +// ============================================================================ + +// ExternalToolLinks contains links to external DNS/network analysis tools +type ExternalToolLinks struct { + // Target being analyzed + Target string `json:"target"` + Type string `json:"type"` // "domain", "ip", "email" + + // MXToolbox links + MXToolboxDNS string `json:"mxtoolboxDns,omitempty"` + MXToolboxMX string `json:"mxtoolboxMx,omitempty"` + MXToolboxBlacklist string `json:"mxtoolboxBlacklist,omitempty"` + MXToolboxSMTP string `json:"mxtoolboxSmtp,omitempty"` + MXToolboxSPF string `json:"mxtoolboxSpf,omitempty"` + MXToolboxDMARC string `json:"mxtoolboxDmarc,omitempty"` + MXToolboxDKIM string `json:"mxtoolboxDkim,omitempty"` + MXToolboxHTTP string `json:"mxtoolboxHttp,omitempty"` + MXToolboxHTTPS string `json:"mxtoolboxHttps,omitempty"` + MXToolboxPing string `json:"mxtoolboxPing,omitempty"` + MXToolboxTrace string `json:"mxtoolboxTrace,omitempty"` + MXToolboxWhois string `json:"mxtoolboxWhois,omitempty"` + MXToolboxASN string `json:"mxtoolboxAsn,omitempty"` + + // DNSChecker links + DNSCheckerDNS string `json:"dnscheckerDns,omitempty"` + DNSCheckerPropagation string `json:"dnscheckerPropagation,omitempty"` + + // Other tools + WhoIs string `json:"whois,omitempty"` + ViewDNS string `json:"viewdns,omitempty"` + IntoDNS string `json:"intodns,omitempty"` + DNSViz string `json:"dnsviz,omitempty"` + SecurityTrails string `json:"securitytrails,omitempty"` + Shodan string `json:"shodan,omitempty"` + Censys string `json:"censys,omitempty"` + BuiltWith string `json:"builtwith,omitempty"` + SSLLabs string `json:"ssllabs,omitempty"` + HSTSPreload string `json:"hstsPreload,omitempty"` + Hardenize string `json:"hardenize,omitempty"` + + // IP-specific tools + IPInfo string `json:"ipinfo,omitempty"` + AbuseIPDB string `json:"abuseipdb,omitempty"` + VirusTotal string `json:"virustotal,omitempty"` + ThreatCrowd string `json:"threatcrowd,omitempty"` + + // Email-specific tools + MailTester string `json:"mailtester,omitempty"` + LearnDMARC string `json:"learndmarc,omitempty"` +} + +// GetExternalToolLinks generates links to external analysis tools for a domain +func GetExternalToolLinks(domain string) ExternalToolLinks { + encoded := url.QueryEscape(domain) + + return ExternalToolLinks{ + Target: domain, + Type: "domain", + + // MXToolbox + MXToolboxDNS: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=dns%%3a%s&run=toolpage", encoded), + MXToolboxMX: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=mx%%3a%s&run=toolpage", encoded), + MXToolboxBlacklist: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=blacklist%%3a%s&run=toolpage", encoded), + MXToolboxSMTP: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=smtp%%3a%s&run=toolpage", encoded), + MXToolboxSPF: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=spf%%3a%s&run=toolpage", encoded), + MXToolboxDMARC: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=dmarc%%3a%s&run=toolpage", encoded), + MXToolboxDKIM: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=dkim%%3a%s&run=toolpage", encoded), + MXToolboxHTTP: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=http%%3a%s&run=toolpage", encoded), + MXToolboxHTTPS: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=https%%3a%s&run=toolpage", encoded), + MXToolboxPing: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=ping%%3a%s&run=toolpage", encoded), + MXToolboxTrace: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=trace%%3a%s&run=toolpage", encoded), + MXToolboxWhois: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=whois%%3a%s&run=toolpage", encoded), + + // DNSChecker + DNSCheckerDNS: fmt.Sprintf("https://dnschecker.org/#A/%s", encoded), + DNSCheckerPropagation: fmt.Sprintf("https://dnschecker.org/dns-propagation.php?domain=%s", encoded), + + // Other tools + WhoIs: fmt.Sprintf("https://who.is/whois/%s", encoded), + ViewDNS: fmt.Sprintf("https://viewdns.info/dnsrecord/?domain=%s", encoded), + IntoDNS: fmt.Sprintf("https://intodns.com/%s", encoded), + DNSViz: fmt.Sprintf("https://dnsviz.net/d/%s/analyze/", encoded), + SecurityTrails: fmt.Sprintf("https://securitytrails.com/domain/%s", encoded), + BuiltWith: fmt.Sprintf("https://builtwith.com/%s", encoded), + SSLLabs: fmt.Sprintf("https://www.ssllabs.com/ssltest/analyze.html?d=%s", encoded), + HSTSPreload: fmt.Sprintf("https://hstspreload.org/?domain=%s", encoded), + Hardenize: fmt.Sprintf("https://www.hardenize.com/report/%s", encoded), + VirusTotal: fmt.Sprintf("https://www.virustotal.com/gui/domain/%s", encoded), + } +} + +// GetExternalToolLinksIP generates links to external analysis tools for an IP +func GetExternalToolLinksIP(ip string) ExternalToolLinks { + encoded := url.QueryEscape(ip) + + return ExternalToolLinks{ + Target: ip, + Type: "ip", + + // MXToolbox + MXToolboxBlacklist: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=blacklist%%3a%s&run=toolpage", encoded), + MXToolboxPing: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=ping%%3a%s&run=toolpage", encoded), + MXToolboxTrace: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=trace%%3a%s&run=toolpage", encoded), + MXToolboxWhois: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=whois%%3a%s&run=toolpage", encoded), + MXToolboxASN: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=asn%%3a%s&run=toolpage", encoded), + + // IP-specific tools + IPInfo: fmt.Sprintf("https://ipinfo.io/%s", encoded), + AbuseIPDB: fmt.Sprintf("https://www.abuseipdb.com/check/%s", encoded), + VirusTotal: fmt.Sprintf("https://www.virustotal.com/gui/ip-address/%s", encoded), + Shodan: fmt.Sprintf("https://www.shodan.io/host/%s", encoded), + Censys: fmt.Sprintf("https://search.censys.io/hosts/%s", encoded), + ThreatCrowd: fmt.Sprintf("https://www.threatcrowd.org/ip.php?ip=%s", encoded), + } +} + +// GetExternalToolLinksEmail generates links for email-related checks +func GetExternalToolLinksEmail(emailOrDomain string) ExternalToolLinks { + // Extract domain from email if needed + domain := emailOrDomain + if strings.Contains(emailOrDomain, "@") { + parts := strings.Split(emailOrDomain, "@") + if len(parts) == 2 { + domain = parts[1] + } + } + + encoded := url.QueryEscape(domain) + emailEncoded := url.QueryEscape(emailOrDomain) + + return ExternalToolLinks{ + Target: emailOrDomain, + Type: "email", + + // MXToolbox email checks + MXToolboxMX: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=mx%%3a%s&run=toolpage", encoded), + MXToolboxSMTP: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=smtp%%3a%s&run=toolpage", encoded), + MXToolboxSPF: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=spf%%3a%s&run=toolpage", encoded), + MXToolboxDMARC: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=dmarc%%3a%s&run=toolpage", encoded), + MXToolboxDKIM: fmt.Sprintf("https://mxtoolbox.com/SuperTool.aspx?action=dkim%%3a%s&run=toolpage", encoded), + + // Email-specific tools + MailTester: fmt.Sprintf("https://www.mail-tester.com/test-%s", emailEncoded), + LearnDMARC: fmt.Sprintf("https://www.learndmarc.com/?domain=%s", encoded), + } +} + +// ============================================================================ +// Convenience Types for Parsed Results +// ============================================================================ + +// ParsedDomainInfo provides a simplified view of domain information +type ParsedDomainInfo struct { + Domain string `json:"domain"` + Registrar string `json:"registrar,omitempty"` + RegistrationDate string `json:"registrationDate,omitempty"` + ExpirationDate string `json:"expirationDate,omitempty"` + UpdatedDate string `json:"updatedDate,omitempty"` + Status []string `json:"status,omitempty"` + Nameservers []string `json:"nameservers,omitempty"` + DNSSEC bool `json:"dnssec"` +} + +// ParseRDAPResponse extracts key information from an RDAP response +func ParseRDAPResponse(resp RDAPResponse) ParsedDomainInfo { + info := ParsedDomainInfo{ + Domain: resp.LDHName, + Status: resp.Status, + } + + // Extract dates from events + for _, event := range resp.Events { + switch event.EventAction { + case "registration": + info.RegistrationDate = event.EventDate + case "expiration": + info.ExpirationDate = event.EventDate + case "last changed", "last update": + info.UpdatedDate = event.EventDate + } + } + + // Extract registrar from entities + for _, entity := range resp.Entities { + for _, role := range entity.Roles { + if role == "registrar" { + info.Registrar = entity.Handle + break + } + } + } + + // Extract nameservers + for _, ns := range resp.Nameservers { + info.Nameservers = append(info.Nameservers, ns.LDHName) + } + + // Check for DNSSEC + for _, status := range resp.Status { + if strings.Contains(strings.ToLower(status), "dnssec") || + strings.Contains(strings.ToLower(status), "signed") { + info.DNSSEC = true + break + } + } + + return info +} diff --git a/dns_tools_test.go b/dns_tools_test.go new file mode 100644 index 0000000..ddcbfca --- /dev/null +++ b/dns_tools_test.go @@ -0,0 +1,732 @@ +package poindexter + +import ( + "strings" + "testing" +) + +// ============================================================================ +// External Tool Links Tests +// ============================================================================ + +func TestGetExternalToolLinks(t *testing.T) { + links := GetExternalToolLinks("example.com") + + if links.Target != "example.com" { + t.Errorf("expected target=example.com, got %s", links.Target) + } + if links.Type != "domain" { + t.Errorf("expected type=domain, got %s", links.Type) + } + + // Check MXToolbox links + if !strings.Contains(links.MXToolboxDNS, "mxtoolbox.com") { + t.Error("MXToolboxDNS should contain mxtoolbox.com") + } + if !strings.Contains(links.MXToolboxDNS, "example.com") { + t.Error("MXToolboxDNS should contain the domain") + } + + if !strings.Contains(links.MXToolboxMX, "mxtoolbox.com") { + t.Error("MXToolboxMX should contain mxtoolbox.com") + } + + if !strings.Contains(links.MXToolboxSPF, "spf") { + t.Error("MXToolboxSPF should contain 'spf'") + } + + if !strings.Contains(links.MXToolboxDMARC, "dmarc") { + t.Error("MXToolboxDMARC should contain 'dmarc'") + } + + // Check DNSChecker links + if !strings.Contains(links.DNSCheckerDNS, "dnschecker.org") { + t.Error("DNSCheckerDNS should contain dnschecker.org") + } + + // Check other tools + if !strings.Contains(links.WhoIs, "who.is") { + t.Error("WhoIs should contain who.is") + } + + if !strings.Contains(links.SSLLabs, "ssllabs.com") { + t.Error("SSLLabs should contain ssllabs.com") + } + + if !strings.Contains(links.VirusTotal, "virustotal.com") { + t.Error("VirusTotal should contain virustotal.com") + } +} + +func TestGetExternalToolLinksIP(t *testing.T) { + links := GetExternalToolLinksIP("8.8.8.8") + + if links.Target != "8.8.8.8" { + t.Errorf("expected target=8.8.8.8, got %s", links.Target) + } + if links.Type != "ip" { + t.Errorf("expected type=ip, got %s", links.Type) + } + + // Check IP-specific links + if !strings.Contains(links.IPInfo, "ipinfo.io") { + t.Error("IPInfo should contain ipinfo.io") + } + if !strings.Contains(links.IPInfo, "8.8.8.8") { + t.Error("IPInfo should contain the IP address") + } + + if !strings.Contains(links.AbuseIPDB, "abuseipdb.com") { + t.Error("AbuseIPDB should contain abuseipdb.com") + } + + if !strings.Contains(links.Shodan, "shodan.io") { + t.Error("Shodan should contain shodan.io") + } + + if !strings.Contains(links.MXToolboxBlacklist, "blacklist") { + t.Error("MXToolboxBlacklist should contain 'blacklist'") + } +} + +func TestGetExternalToolLinksEmail(t *testing.T) { + // Test with email address + links := GetExternalToolLinksEmail("test@example.com") + + if links.Target != "test@example.com" { + t.Errorf("expected target=test@example.com, got %s", links.Target) + } + if links.Type != "email" { + t.Errorf("expected type=email, got %s", links.Type) + } + + // Email tools should use the domain + if !strings.Contains(links.MXToolboxMX, "example.com") { + t.Error("MXToolboxMX should contain the domain from email") + } + + if !strings.Contains(links.MXToolboxSPF, "spf") { + t.Error("MXToolboxSPF should contain 'spf'") + } + + if !strings.Contains(links.MXToolboxDMARC, "dmarc") { + t.Error("MXToolboxDMARC should contain 'dmarc'") + } + + // Test with just domain + links2 := GetExternalToolLinksEmail("example.org") + if links2.Target != "example.org" { + t.Errorf("expected target=example.org, got %s", links2.Target) + } +} + +func TestGetExternalToolLinksSpecialChars(t *testing.T) { + // Test URL encoding + links := GetExternalToolLinks("test-domain.example.com") + + if !strings.Contains(links.MXToolboxDNS, "test-domain.example.com") { + t.Error("Should handle hyphens in domain") + } +} + +// ============================================================================ +// DNS Lookup Tests (Unit tests for structure, not network) +// ============================================================================ + +func TestDNSRecordTypes(t *testing.T) { + types := []DNSRecordType{ + DNSRecordA, + DNSRecordAAAA, + DNSRecordMX, + DNSRecordTXT, + DNSRecordNS, + DNSRecordCNAME, + DNSRecordSOA, + DNSRecordPTR, + DNSRecordSRV, + DNSRecordCAA, + } + + expected := []string{"A", "AAAA", "MX", "TXT", "NS", "CNAME", "SOA", "PTR", "SRV", "CAA"} + + for i, typ := range types { + if string(typ) != expected[i] { + t.Errorf("expected type %s, got %s", expected[i], typ) + } + } +} + +func TestDNSRecordTypesExtended(t *testing.T) { + // Test all ClouDNS record types are defined + types := []DNSRecordType{ + DNSRecordALIAS, + DNSRecordRP, + DNSRecordSSHFP, + DNSRecordTLSA, + DNSRecordDS, + DNSRecordDNSKEY, + DNSRecordNAPTR, + DNSRecordLOC, + DNSRecordHINFO, + DNSRecordCERT, + DNSRecordSMIMEA, + DNSRecordWR, + DNSRecordSPF, + } + + expected := []string{"ALIAS", "RP", "SSHFP", "TLSA", "DS", "DNSKEY", "NAPTR", "LOC", "HINFO", "CERT", "SMIMEA", "WR", "SPF"} + + for i, typ := range types { + if string(typ) != expected[i] { + t.Errorf("expected type %s, got %s", expected[i], typ) + } + } +} + +func TestGetDNSRecordTypeInfo(t *testing.T) { + info := GetDNSRecordTypeInfo() + + if len(info) == 0 { + t.Error("GetDNSRecordTypeInfo should return non-empty list") + } + + // Check that common types exist + commonFound := 0 + for _, r := range info { + if r.Common { + commonFound++ + } + // Each entry should have type, name, and description + if r.Type == "" { + t.Error("Record type should not be empty") + } + if r.Name == "" { + t.Error("Record name should not be empty") + } + if r.Description == "" { + t.Error("Record description should not be empty") + } + } + + if commonFound < 10 { + t.Errorf("Expected at least 10 common record types, got %d", commonFound) + } + + // Check for specific types + typeMap := make(map[DNSRecordType]DNSRecordTypeInfo) + for _, r := range info { + typeMap[r.Type] = r + } + + if _, ok := typeMap[DNSRecordA]; !ok { + t.Error("A record type should be in info") + } + if _, ok := typeMap[DNSRecordALIAS]; !ok { + t.Error("ALIAS record type should be in info") + } + if _, ok := typeMap[DNSRecordTLSA]; !ok { + t.Error("TLSA record type should be in info") + } + if _, ok := typeMap[DNSRecordWR]; !ok { + t.Error("WR (Web Redirect) record type should be in info") + } +} + +func TestGetCommonDNSRecordTypes(t *testing.T) { + types := GetCommonDNSRecordTypes() + + if len(types) == 0 { + t.Error("GetCommonDNSRecordTypes should return non-empty list") + } + + // Check that standard types are present + typeSet := make(map[DNSRecordType]bool) + for _, typ := range types { + typeSet[typ] = true + } + + if !typeSet[DNSRecordA] { + t.Error("A record should be in common types") + } + if !typeSet[DNSRecordAAAA] { + t.Error("AAAA record should be in common types") + } + if !typeSet[DNSRecordMX] { + t.Error("MX record should be in common types") + } + if !typeSet[DNSRecordTXT] { + t.Error("TXT record should be in common types") + } + if !typeSet[DNSRecordALIAS] { + t.Error("ALIAS record should be in common types") + } +} + +func TestGetAllDNSRecordTypes(t *testing.T) { + types := GetAllDNSRecordTypes() + + if len(types) < 20 { + t.Errorf("GetAllDNSRecordTypes should return at least 20 types, got %d", len(types)) + } + + // Check for ClouDNS-specific types + typeSet := make(map[DNSRecordType]bool) + for _, typ := range types { + typeSet[typ] = true + } + + if !typeSet[DNSRecordWR] { + t.Error("WR (Web Redirect) should be in all types") + } + if !typeSet[DNSRecordNAPTR] { + t.Error("NAPTR should be in all types") + } + if !typeSet[DNSRecordDS] { + t.Error("DS should be in all types") + } +} + +func TestDNSLookupResultStructure(t *testing.T) { + result := DNSLookupResult{ + Domain: "example.com", + QueryType: "A", + Records: []DNSRecord{ + {Type: DNSRecordA, Name: "example.com", Value: "93.184.216.34"}, + }, + LookupTimeMs: 50, + } + + if result.Domain != "example.com" { + t.Error("Domain should be set") + } + if len(result.Records) != 1 { + t.Error("Should have 1 record") + } + if result.Records[0].Type != DNSRecordA { + t.Error("Record type should be A") + } +} + +func TestCompleteDNSLookupStructure(t *testing.T) { + result := CompleteDNSLookup{ + Domain: "example.com", + A: []string{"93.184.216.34"}, + AAAA: []string{"2606:2800:220:1:248:1893:25c8:1946"}, + MX: []MXRecord{ + {Host: "mail.example.com", Priority: 10}, + }, + NS: []string{"ns1.example.com", "ns2.example.com"}, + TXT: []string{"v=spf1 include:_spf.example.com ~all"}, + } + + if result.Domain != "example.com" { + t.Error("Domain should be set") + } + if len(result.A) != 1 { + t.Error("Should have 1 A record") + } + if len(result.AAAA) != 1 { + t.Error("Should have 1 AAAA record") + } + if len(result.MX) != 1 { + t.Error("Should have 1 MX record") + } + if result.MX[0].Priority != 10 { + t.Error("MX priority should be 10") + } + if len(result.NS) != 2 { + t.Error("Should have 2 NS records") + } +} + +// ============================================================================ +// RDAP Tests (Unit tests for structure, not network) +// ============================================================================ + +func TestRDAPResponseStructure(t *testing.T) { + resp := RDAPResponse{ + LDHName: "example.com", + Status: []string{"active", "client transfer prohibited"}, + Events: []RDAPEvent{ + {EventAction: "registration", EventDate: "2020-01-01T00:00:00Z"}, + {EventAction: "expiration", EventDate: "2025-01-01T00:00:00Z"}, + }, + Entities: []RDAPEntity{ + {Handle: "REGISTRAR-1", Roles: []string{"registrar"}}, + }, + Nameservers: []RDAPNs{ + {LDHName: "ns1.example.com"}, + {LDHName: "ns2.example.com"}, + }, + } + + if resp.LDHName != "example.com" { + t.Error("LDHName should be set") + } + if len(resp.Status) != 2 { + t.Error("Should have 2 status values") + } + if len(resp.Events) != 2 { + t.Error("Should have 2 events") + } + if resp.Events[0].EventAction != "registration" { + t.Error("First event should be registration") + } + if len(resp.Nameservers) != 2 { + t.Error("Should have 2 nameservers") + } +} + +func TestParseRDAPResponse(t *testing.T) { + resp := RDAPResponse{ + LDHName: "example.com", + Status: []string{"active", "dnssecSigned"}, + Events: []RDAPEvent{ + {EventAction: "registration", EventDate: "2020-01-01T00:00:00Z"}, + {EventAction: "expiration", EventDate: "2025-01-01T00:00:00Z"}, + {EventAction: "last changed", EventDate: "2024-06-15T00:00:00Z"}, + }, + Entities: []RDAPEntity{ + {Handle: "REGISTRAR-123", Roles: []string{"registrar"}}, + }, + Nameservers: []RDAPNs{ + {LDHName: "ns1.example.com"}, + {LDHName: "ns2.example.com"}, + }, + } + + info := ParseRDAPResponse(resp) + + if info.Domain != "example.com" { + t.Errorf("expected domain=example.com, got %s", info.Domain) + } + if info.RegistrationDate != "2020-01-01T00:00:00Z" { + t.Errorf("expected registration date, got %s", info.RegistrationDate) + } + if info.ExpirationDate != "2025-01-01T00:00:00Z" { + t.Errorf("expected expiration date, got %s", info.ExpirationDate) + } + if info.UpdatedDate != "2024-06-15T00:00:00Z" { + t.Errorf("expected updated date, got %s", info.UpdatedDate) + } + if info.Registrar != "REGISTRAR-123" { + t.Errorf("expected registrar, got %s", info.Registrar) + } + if len(info.Nameservers) != 2 { + t.Error("Should have 2 nameservers") + } + if !info.DNSSEC { + t.Error("DNSSEC should be true (detected from status)") + } +} + +func TestParseRDAPResponseEmpty(t *testing.T) { + resp := RDAPResponse{ + LDHName: "test.com", + } + + info := ParseRDAPResponse(resp) + + if info.Domain != "test.com" { + t.Error("Domain should be set even with minimal response") + } + if info.DNSSEC { + t.Error("DNSSEC should be false with no status") + } + if len(info.Nameservers) != 0 { + t.Error("Nameservers should be empty") + } +} + +// ============================================================================ +// RDAP Server Tests +// ============================================================================ + +func TestRDAPServers(t *testing.T) { + // Check that we have servers for common TLDs + commonTLDs := []string{"com", "net", "org", "io"} + for _, tld := range commonTLDs { + if _, ok := rdapServers[tld]; !ok { + t.Errorf("missing RDAP server for TLD: %s", tld) + } + } + + // Check RIRs + rirs := []string{"arin", "ripe", "apnic", "afrinic", "lacnic"} + for _, rir := range rirs { + if _, ok := rdapServers[rir]; !ok { + t.Errorf("missing RDAP server for RIR: %s", rir) + } + } +} + +// ============================================================================ +// MX Record Tests +// ============================================================================ + +func TestMXRecordStructure(t *testing.T) { + mx := MXRecord{ + Host: "mail.example.com", + Priority: 10, + } + + if mx.Host != "mail.example.com" { + t.Error("Host should be set") + } + if mx.Priority != 10 { + t.Error("Priority should be 10") + } +} + +// ============================================================================ +// SRV Record Tests +// ============================================================================ + +func TestSRVRecordStructure(t *testing.T) { + srv := SRVRecord{ + Target: "sipserver.example.com", + Port: 5060, + Priority: 10, + Weight: 100, + } + + if srv.Target != "sipserver.example.com" { + t.Error("Target should be set") + } + if srv.Port != 5060 { + t.Error("Port should be 5060") + } + if srv.Priority != 10 { + t.Error("Priority should be 10") + } + if srv.Weight != 100 { + t.Error("Weight should be 100") + } +} + +// ============================================================================ +// SOA Record Tests +// ============================================================================ + +func TestSOARecordStructure(t *testing.T) { + soa := SOARecord{ + PrimaryNS: "ns1.example.com", + AdminEmail: "admin.example.com", + Serial: 2024010101, + Refresh: 7200, + Retry: 3600, + Expire: 1209600, + MinTTL: 86400, + } + + if soa.PrimaryNS != "ns1.example.com" { + t.Error("PrimaryNS should be set") + } + if soa.Serial != 2024010101 { + t.Error("Serial should match") + } + if soa.Refresh != 7200 { + t.Error("Refresh should be 7200") + } +} + +// ============================================================================ +// Extended Record Type Structure Tests +// ============================================================================ + +func TestCAARecordStructure(t *testing.T) { + caa := CAARecord{ + Flag: 0, + Tag: "issue", + Value: "letsencrypt.org", + } + + if caa.Tag != "issue" { + t.Error("Tag should be 'issue'") + } + if caa.Value != "letsencrypt.org" { + t.Error("Value should be set") + } +} + +func TestSSHFPRecordStructure(t *testing.T) { + sshfp := SSHFPRecord{ + Algorithm: 4, // Ed25519 + FPType: 2, // SHA-256 + Fingerprint: "abc123def456", + } + + if sshfp.Algorithm != 4 { + t.Error("Algorithm should be 4 (Ed25519)") + } + if sshfp.FPType != 2 { + t.Error("FPType should be 2 (SHA-256)") + } +} + +func TestTLSARecordStructure(t *testing.T) { + tlsa := TLSARecord{ + Usage: 3, // Domain-issued certificate + Selector: 1, // SubjectPublicKeyInfo + MatchingType: 1, // SHA-256 + CertData: "abcd1234", + } + + if tlsa.Usage != 3 { + t.Error("Usage should be 3") + } + if tlsa.Selector != 1 { + t.Error("Selector should be 1") + } +} + +func TestDSRecordStructure(t *testing.T) { + ds := DSRecord{ + KeyTag: 12345, + Algorithm: 13, // ECDSAP256SHA256 + DigestType: 2, // SHA-256 + Digest: "deadbeef", + } + + if ds.KeyTag != 12345 { + t.Error("KeyTag should be 12345") + } + if ds.Algorithm != 13 { + t.Error("Algorithm should be 13") + } +} + +func TestNAPTRRecordStructure(t *testing.T) { + naptr := NAPTRRecord{ + Order: 100, + Preference: 10, + Flags: "U", + Service: "E2U+sip", + Regexp: "!^.*$!sip:info@example.com!", + Replacement: ".", + } + + if naptr.Order != 100 { + t.Error("Order should be 100") + } + if naptr.Service != "E2U+sip" { + t.Error("Service should be E2U+sip") + } +} + +func TestRPRecordStructure(t *testing.T) { + rp := RPRecord{ + Mailbox: "admin.example.com", + TxtDom: "info.example.com", + } + + if rp.Mailbox != "admin.example.com" { + t.Error("Mailbox should be set") + } +} + +func TestLOCRecordStructure(t *testing.T) { + loc := LOCRecord{ + Latitude: 51.5074, + Longitude: -0.1278, + Altitude: 11, + Size: 10, + HPrecis: 10, + VPrecis: 10, + } + + if loc.Latitude < 51.5 || loc.Latitude > 51.6 { + t.Error("Latitude should be near 51.5074") + } +} + +func TestALIASRecordStructure(t *testing.T) { + alias := ALIASRecord{ + Target: "target.example.com", + } + + if alias.Target != "target.example.com" { + t.Error("Target should be set") + } +} + +func TestWebRedirectRecordStructure(t *testing.T) { + wr := WebRedirectRecord{ + URL: "https://www.example.com", + RedirectType: 301, + Frame: false, + } + + if wr.URL != "https://www.example.com" { + t.Error("URL should be set") + } + if wr.RedirectType != 301 { + t.Error("RedirectType should be 301") + } +} + +// ============================================================================ +// Helper Function Tests +// ============================================================================ + +func TestIsNoSuchHostError(t *testing.T) { + tests := []struct { + errStr string + expected bool + }{ + {"no such host", true}, + {"NXDOMAIN", true}, + {"not found", true}, + {"connection refused", false}, + {"timeout", false}, + {"", false}, + } + + for _, tc := range tests { + var err error + if tc.errStr != "" { + err = &testError{msg: tc.errStr} + } + result := isNoSuchHostError(err) + if result != tc.expected { + t.Errorf("isNoSuchHostError(%q) = %v, want %v", tc.errStr, result, tc.expected) + } + } +} + +type testError struct { + msg string +} + +func (e *testError) Error() string { + return e.msg +} + +// ============================================================================ +// URL Building Tests +// ============================================================================ + +func TestBuildRDAPURLs(t *testing.T) { + // These test the URL structure, not actual lookups + + // Domain URL + domain := "example.com" + expectedDomainPrefix := "https://rdap.org/domain/" + if !strings.HasPrefix("https://rdap.org/domain/"+domain, expectedDomainPrefix) { + t.Error("Domain URL format is incorrect") + } + + // IP URL + ip := "8.8.8.8" + expectedIPPrefix := "https://rdap.org/ip/" + if !strings.HasPrefix("https://rdap.org/ip/"+ip, expectedIPPrefix) { + t.Error("IP URL format is incorrect") + } + + // ASN URL + asn := "15169" + expectedASNPrefix := "https://rdap.org/autnum/" + if !strings.HasPrefix("https://rdap.org/autnum/"+asn, expectedASNPrefix) { + t.Error("ASN URL format is incorrect") + } +} diff --git a/examples/wasm-browser-ts/src/main.ts b/examples/wasm-browser-ts/src/main.ts index 574a8bc..2425444 100644 --- a/examples/wasm-browser-ts/src/main.ts +++ b/examples/wasm-browser-ts/src/main.ts @@ -13,19 +13,159 @@ async function run() { console.log('Poindexter (WASM) version:', await px.version()); + // ========================================================================= + // Basic KD-Tree operations + // ========================================================================= const tree = await px.newTree(2); - await tree.insert({ id: 'a', coords: [0, 0], value: 'A' }); - await tree.insert({ id: 'b', coords: [1, 0], value: 'B' }); - await tree.insert({ id: 'c', coords: [0, 1], value: 'C' }); + await tree.insert({ id: 'peer-a', coords: [0, 0], value: 'Peer A' }); + await tree.insert({ id: 'peer-b', coords: [1, 0], value: 'Peer B' }); + await tree.insert({ id: 'peer-c', coords: [0, 1], value: 'Peer C' }); + await tree.insert({ id: 'peer-d', coords: [0.5, 0.5], value: 'Peer D' }); + console.log('\n=== Basic Queries ==='); const nn = await tree.nearest([0.9, 0.1]); console.log('Nearest [0.9,0.1]:', nn); - const kn = await tree.kNearest([0.9, 0.9], 2); - console.log('kNN k=2 [0.9,0.9]:', kn); + const kn = await tree.kNearest([0.5, 0.5], 3); + console.log('kNN k=3 [0.5,0.5]:', kn); const rad = await tree.radius([0, 0], 1.1); console.log('Radius r=1.1 [0,0]:', rad); + + // ========================================================================= + // Analytics Demo + // ========================================================================= + console.log('\n=== Tree Analytics ==='); + + // Perform more queries to generate analytics + for (let i = 0; i < 10; i++) { + await tree.nearest([Math.random(), Math.random()]); + } + await tree.kNearest([0.2, 0.8], 2); + await tree.kNearest([0.7, 0.3], 2); + + // Get tree-level analytics + const analytics = await tree.getAnalytics(); + console.log('Tree Analytics:', { + queryCount: analytics.queryCount, + insertCount: analytics.insertCount, + avgQueryTimeNs: analytics.avgQueryTimeNs, + minQueryTimeNs: analytics.minQueryTimeNs, + maxQueryTimeNs: analytics.maxQueryTimeNs, + }); + + // ========================================================================= + // Peer Selection Analytics + // ========================================================================= + console.log('\n=== Peer Selection Analytics ==='); + + // Get all peer stats + const peerStats = await tree.getPeerStats(); + console.log('All Peer Stats:', peerStats); + + // Get top 3 most frequently selected peers + const topPeers = await tree.getTopPeers(3); + console.log('Top 3 Peers:', topPeers); + + // ========================================================================= + // Axis Distribution Analysis + // ========================================================================= + console.log('\n=== Axis Distributions ==='); + + const axisDists = await tree.getAxisDistributions(['latency', 'hops']); + console.log('Axis Distributions:', axisDists); + + // ========================================================================= + // NAT Routing / Peer Quality Scoring + // ========================================================================= + console.log('\n=== NAT Routing & Peer Quality ==='); + + // Simulate peer network metrics + const peerMetrics = { + connectivityScore: 0.9, + symmetryScore: 0.8, + relayProbability: 0.1, + directSuccessRate: 0.95, + avgRttMs: 50, + jitterMs: 10, + packetLossRate: 0.01, + bandwidthMbps: 100, + natType: 'full_cone' as const, + }; + + const qualityScore = await px.computePeerQualityScore(peerMetrics); + console.log('Peer Quality Score (0-1):', qualityScore.toFixed(3)); + + // Get default quality weights + const defaultWeights = await px.getDefaultQualityWeights(); + console.log('Default Quality Weights:', defaultWeights); + + // ========================================================================= + // Trust Score Calculation + // ========================================================================= + console.log('\n=== Trust Score ==='); + + const trustMetrics = { + reputationScore: 0.8, + successfulTransactions: 150, + failedTransactions: 3, + ageSeconds: 86400 * 30, // 30 days + vouchCount: 5, + flagCount: 0, + proofOfWork: 0.5, + }; + + const trustScore = await px.computeTrustScore(trustMetrics); + console.log('Trust Score (0-1):', trustScore.toFixed(3)); + + // ========================================================================= + // Distribution Statistics + // ========================================================================= + console.log('\n=== Distribution Statistics ==='); + + // Simulate some distance measurements + const distances = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.8, 1.2]; + const distStats = await px.computeDistributionStats(distances); + console.log('Distance Distribution Stats:', { + count: distStats.count, + min: distStats.min.toFixed(3), + max: distStats.max.toFixed(3), + mean: distStats.mean.toFixed(3), + median: distStats.median.toFixed(3), + stdDev: distStats.stdDev.toFixed(3), + p90: distStats.p90.toFixed(3), + }); + + // ========================================================================= + // Feature Normalization for KD-Tree + // ========================================================================= + console.log('\n=== Feature Normalization ==='); + + // Raw peer features: [latency_ms, hops, geo_km, trust_inv, bw_inv, loss, conn_inv, nat_inv] + const rawFeatures = [100, 5, 500, 0.1, 50, 0.02, 5, 0.1]; + + // Get default feature ranges + const featureRanges = await px.getDefaultPeerFeatureRanges(); + console.log('Feature Labels:', featureRanges.labels); + + // Normalize features + const normalizedFeatures = await px.normalizePeerFeatures(rawFeatures); + console.log('Normalized Features:', normalizedFeatures.map((f: number) => f.toFixed(3))); + + // Apply custom weights + const customWeights = [1.5, 1.0, 0.5, 1.2, 0.8, 2.0, 1.0, 0.7]; + const weightedFeatures = await px.weightedPeerFeatures(normalizedFeatures, customWeights); + console.log('Weighted Features:', weightedFeatures.map((f: number) => f.toFixed(3))); + + // ========================================================================= + // Analytics Reset + // ========================================================================= + console.log('\n=== Analytics Reset ==='); + await tree.resetAnalytics(); + const resetAnalytics = await tree.getAnalytics(); + console.log('After Reset - Query Count:', resetAnalytics.queryCount); + + console.log('\n=== Demo Complete ==='); } run().catch((err) => { diff --git a/kdtree.go b/kdtree.go index e177613..657453f 100644 --- a/kdtree.go +++ b/kdtree.go @@ -4,6 +4,7 @@ import ( "errors" "math" "sort" + "time" ) var ( @@ -218,6 +219,10 @@ type KDTree[T any] struct { idIndex map[string]int backend KDBackend backendData any // opaque handle for backend-specific structures (e.g., gonum tree) + + // Analytics tracking (optional, enabled by default) + analytics *TreeAnalytics + peerAnalytics *PeerAnalytics } // NewKDTree builds a KDTree from the given points. @@ -259,12 +264,14 @@ func NewKDTree[T any](pts []KDPoint[T], opts ...KDOption) (*KDTree[T], error) { backend = BackendLinear // tag not enabled → fallback } t := &KDTree[T]{ - points: append([]KDPoint[T](nil), pts...), - dim: dim, - metric: cfg.metric, - idIndex: idIndex, - backend: backend, - backendData: backendData, + points: append([]KDPoint[T](nil), pts...), + dim: dim, + metric: cfg.metric, + idIndex: idIndex, + backend: backend, + backendData: backendData, + analytics: NewTreeAnalytics(), + peerAnalytics: NewPeerAnalytics(), } return t, nil } @@ -284,12 +291,14 @@ func NewKDTreeFromDim[T any](dim int, opts ...KDOption) (*KDTree[T], error) { backend = BackendLinear } return &KDTree[T]{ - points: nil, - dim: dim, - metric: cfg.metric, - idIndex: make(map[string]int), - backend: backend, - backendData: nil, + points: nil, + dim: dim, + metric: cfg.metric, + idIndex: make(map[string]int), + backend: backend, + backendData: nil, + analytics: NewTreeAnalytics(), + peerAnalytics: NewPeerAnalytics(), }, nil } @@ -305,12 +314,23 @@ func (t *KDTree[T]) Nearest(query []float64) (KDPoint[T], float64, bool) { if len(query) != t.dim || t.Len() == 0 { return KDPoint[T]{}, 0, false } + start := time.Now() + defer func() { + if t.analytics != nil { + t.analytics.RecordQuery(time.Since(start).Nanoseconds()) + } + }() + // Gonum backend (if available and built) if t.backend == BackendGonum && t.backendData != nil { if idx, dist, ok := gonumNearest[T](t.backendData, query); ok && idx >= 0 && idx < len(t.points) { - return t.points[idx], dist, true + p := t.points[idx] + if t.peerAnalytics != nil { + t.peerAnalytics.RecordSelection(p.ID, dist) + } + return p, dist, true } - // fall through to linear scan if backend didn’t return a result + // fall through to linear scan if backend didn't return a result } bestIdx := -1 bestDist := math.MaxFloat64 @@ -324,7 +344,11 @@ func (t *KDTree[T]) Nearest(query []float64) (KDPoint[T], float64, bool) { if bestIdx < 0 { return KDPoint[T]{}, 0, false } - return t.points[bestIdx], bestDist, true + p := t.points[bestIdx] + if t.peerAnalytics != nil { + t.peerAnalytics.RecordSelection(p.ID, bestDist) + } + return p, bestDist, true } // KNearest returns up to k nearest neighbors to the query in ascending distance order. @@ -333,6 +357,13 @@ func (t *KDTree[T]) KNearest(query []float64, k int) ([]KDPoint[T], []float64) { if k <= 0 || len(query) != t.dim || t.Len() == 0 { return nil, nil } + start := time.Now() + defer func() { + if t.analytics != nil { + t.analytics.RecordQuery(time.Since(start).Nanoseconds()) + } + }() + // Gonum backend path if t.backend == BackendGonum && t.backendData != nil { idxs, dists := gonumKNearest[T](t.backendData, query, k) @@ -340,6 +371,9 @@ func (t *KDTree[T]) KNearest(query []float64, k int) ([]KDPoint[T], []float64) { neighbors := make([]KDPoint[T], len(idxs)) for i := range idxs { neighbors[i] = t.points[idxs[i]] + if t.peerAnalytics != nil { + t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i]) + } } return neighbors, dists } @@ -362,6 +396,9 @@ func (t *KDTree[T]) KNearest(query []float64, k int) ([]KDPoint[T], []float64) { for i := 0; i < k; i++ { neighbors[i] = t.points[tmp[i].idx] dists[i] = tmp[i].dist + if t.peerAnalytics != nil { + t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i]) + } } return neighbors, dists } @@ -371,6 +408,13 @@ func (t *KDTree[T]) Radius(query []float64, r float64) ([]KDPoint[T], []float64) if r < 0 || len(query) != t.dim || t.Len() == 0 { return nil, nil } + start := time.Now() + defer func() { + if t.analytics != nil { + t.analytics.RecordQuery(time.Since(start).Nanoseconds()) + } + }() + // Gonum backend path if t.backend == BackendGonum && t.backendData != nil { idxs, dists := gonumRadius[T](t.backendData, query, r) @@ -378,6 +422,9 @@ func (t *KDTree[T]) Radius(query []float64, r float64) ([]KDPoint[T], []float64) neighbors := make([]KDPoint[T], len(idxs)) for i := range idxs { neighbors[i] = t.points[idxs[i]] + if t.peerAnalytics != nil { + t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i]) + } } return neighbors, dists } @@ -402,6 +449,9 @@ func (t *KDTree[T]) Radius(query []float64, r float64) ([]KDPoint[T], []float64) for i := range sel { neighbors[i] = t.points[sel[i].idx] dists[i] = sel[i].dist + if t.peerAnalytics != nil { + t.peerAnalytics.RecordSelection(neighbors[i].ID, dists[i]) + } } return neighbors, dists } @@ -421,10 +471,17 @@ func (t *KDTree[T]) Insert(p KDPoint[T]) bool { if p.ID != "" { t.idIndex[p.ID] = len(t.points) - 1 } + // Record insert in analytics + if t.analytics != nil { + t.analytics.RecordInsert() + } // Rebuild backend if using Gonum if t.backend == BackendGonum && hasGonum() { if bd, err := buildGonumBackend(t.points, t.metric); err == nil { t.backendData = bd + if t.analytics != nil { + t.analytics.RecordRebuild() + } } else { // fallback to linear if rebuild fails t.backend = BackendLinear @@ -451,10 +508,17 @@ func (t *KDTree[T]) DeleteByID(id string) bool { } t.points = t.points[:last] delete(t.idIndex, id) + // Record delete in analytics + if t.analytics != nil { + t.analytics.RecordDelete() + } // Rebuild backend if using Gonum if t.backend == BackendGonum && hasGonum() { if bd, err := buildGonumBackend(t.points, t.metric); err == nil { t.backendData = bd + if t.analytics != nil { + t.analytics.RecordRebuild() + } } else { // fallback to linear if rebuild fails t.backend = BackendLinear @@ -463,3 +527,67 @@ func (t *KDTree[T]) DeleteByID(id string) bool { } return true } + +// Analytics returns the tree analytics tracker. +// Returns nil if analytics tracking is disabled. +func (t *KDTree[T]) Analytics() *TreeAnalytics { + return t.analytics +} + +// PeerAnalytics returns the peer analytics tracker. +// Returns nil if peer analytics tracking is disabled. +func (t *KDTree[T]) PeerAnalytics() *PeerAnalytics { + return t.peerAnalytics +} + +// GetAnalyticsSnapshot returns a point-in-time snapshot of tree analytics. +func (t *KDTree[T]) GetAnalyticsSnapshot() TreeAnalyticsSnapshot { + if t.analytics == nil { + return TreeAnalyticsSnapshot{} + } + return t.analytics.Snapshot() +} + +// GetPeerStats returns per-peer selection statistics. +func (t *KDTree[T]) GetPeerStats() []PeerStats { + if t.peerAnalytics == nil { + return nil + } + return t.peerAnalytics.GetAllPeerStats() +} + +// GetTopPeers returns the top N most frequently selected peers. +func (t *KDTree[T]) GetTopPeers(n int) []PeerStats { + if t.peerAnalytics == nil { + return nil + } + return t.peerAnalytics.GetTopPeers(n) +} + +// ComputeDistanceDistribution analyzes the distribution of current point coordinates. +func (t *KDTree[T]) ComputeDistanceDistribution(axisNames []string) []AxisDistribution { + return ComputeAxisDistributions(t.points, axisNames) +} + +// ResetAnalytics clears all analytics data. +func (t *KDTree[T]) ResetAnalytics() { + if t.analytics != nil { + t.analytics.Reset() + } + if t.peerAnalytics != nil { + t.peerAnalytics.Reset() + } +} + +// Points returns a copy of all points in the tree. +// This is useful for analytics and export operations. +func (t *KDTree[T]) Points() []KDPoint[T] { + result := make([]KDPoint[T], len(t.points)) + copy(result, t.points) + return result +} + +// Backend returns the active backend type. +func (t *KDTree[T]) Backend() KDBackend { + return t.backend +} diff --git a/kdtree_analytics.go b/kdtree_analytics.go new file mode 100644 index 0000000..91a9b12 --- /dev/null +++ b/kdtree_analytics.go @@ -0,0 +1,700 @@ +package poindexter + +import ( + "math" + "sort" + "sync" + "sync/atomic" + "time" +) + +// TreeAnalytics tracks operational statistics for a KDTree. +// All counters are safe for concurrent reads; use the Reset() method for atomic reset. +type TreeAnalytics struct { + QueryCount atomic.Int64 // Total nearest/kNearest/radius queries + InsertCount atomic.Int64 // Total successful inserts + DeleteCount atomic.Int64 // Total successful deletes + + // Timing statistics (nanoseconds) + TotalQueryTimeNs atomic.Int64 + LastQueryTimeNs atomic.Int64 + MinQueryTimeNs atomic.Int64 + MaxQueryTimeNs atomic.Int64 + LastQueryAt atomic.Int64 // Unix nanoseconds + CreatedAt time.Time + LastRebuiltAt atomic.Int64 // Unix nanoseconds (for gonum backend rebuilds) + BackendRebuildCnt atomic.Int64 // Number of backend rebuilds +} + +// NewTreeAnalytics creates a new analytics tracker. +func NewTreeAnalytics() *TreeAnalytics { + a := &TreeAnalytics{ + CreatedAt: time.Now(), + } + a.MinQueryTimeNs.Store(math.MaxInt64) + return a +} + +// RecordQuery records a query operation with timing. +func (a *TreeAnalytics) RecordQuery(durationNs int64) { + a.QueryCount.Add(1) + a.TotalQueryTimeNs.Add(durationNs) + a.LastQueryTimeNs.Store(durationNs) + a.LastQueryAt.Store(time.Now().UnixNano()) + + // Update min/max (best-effort, not strictly atomic) + for { + cur := a.MinQueryTimeNs.Load() + if durationNs >= cur || a.MinQueryTimeNs.CompareAndSwap(cur, durationNs) { + break + } + } + for { + cur := a.MaxQueryTimeNs.Load() + if durationNs <= cur || a.MaxQueryTimeNs.CompareAndSwap(cur, durationNs) { + break + } + } +} + +// RecordInsert records a successful insert. +func (a *TreeAnalytics) RecordInsert() { + a.InsertCount.Add(1) +} + +// RecordDelete records a successful delete. +func (a *TreeAnalytics) RecordDelete() { + a.DeleteCount.Add(1) +} + +// RecordRebuild records a backend rebuild. +func (a *TreeAnalytics) RecordRebuild() { + a.BackendRebuildCnt.Add(1) + a.LastRebuiltAt.Store(time.Now().UnixNano()) +} + +// Snapshot returns a point-in-time view of the analytics. +func (a *TreeAnalytics) Snapshot() TreeAnalyticsSnapshot { + avgNs := int64(0) + qc := a.QueryCount.Load() + if qc > 0 { + avgNs = a.TotalQueryTimeNs.Load() / qc + } + minNs := a.MinQueryTimeNs.Load() + if minNs == math.MaxInt64 { + minNs = 0 + } + return TreeAnalyticsSnapshot{ + QueryCount: qc, + InsertCount: a.InsertCount.Load(), + DeleteCount: a.DeleteCount.Load(), + AvgQueryTimeNs: avgNs, + MinQueryTimeNs: minNs, + MaxQueryTimeNs: a.MaxQueryTimeNs.Load(), + LastQueryTimeNs: a.LastQueryTimeNs.Load(), + LastQueryAt: time.Unix(0, a.LastQueryAt.Load()), + CreatedAt: a.CreatedAt, + BackendRebuildCnt: a.BackendRebuildCnt.Load(), + LastRebuiltAt: time.Unix(0, a.LastRebuiltAt.Load()), + } +} + +// Reset atomically resets all counters. +func (a *TreeAnalytics) Reset() { + a.QueryCount.Store(0) + a.InsertCount.Store(0) + a.DeleteCount.Store(0) + a.TotalQueryTimeNs.Store(0) + a.LastQueryTimeNs.Store(0) + a.MinQueryTimeNs.Store(math.MaxInt64) + a.MaxQueryTimeNs.Store(0) + a.LastQueryAt.Store(0) + a.BackendRebuildCnt.Store(0) + a.LastRebuiltAt.Store(0) +} + +// TreeAnalyticsSnapshot is an immutable snapshot for JSON serialization. +type TreeAnalyticsSnapshot struct { + QueryCount int64 `json:"queryCount"` + InsertCount int64 `json:"insertCount"` + DeleteCount int64 `json:"deleteCount"` + AvgQueryTimeNs int64 `json:"avgQueryTimeNs"` + MinQueryTimeNs int64 `json:"minQueryTimeNs"` + MaxQueryTimeNs int64 `json:"maxQueryTimeNs"` + LastQueryTimeNs int64 `json:"lastQueryTimeNs"` + LastQueryAt time.Time `json:"lastQueryAt"` + CreatedAt time.Time `json:"createdAt"` + BackendRebuildCnt int64 `json:"backendRebuildCount"` + LastRebuiltAt time.Time `json:"lastRebuiltAt"` +} + +// PeerAnalytics tracks per-peer selection statistics for NAT routing optimization. +type PeerAnalytics struct { + mu sync.RWMutex + + // Per-peer hit counters (peer ID -> selection count) + hitCounts map[string]*atomic.Int64 + // Per-peer cumulative distance sums for average calculation + distanceSums map[string]*atomic.Uint64 // stored as bits of float64 + // Last selection time per peer + lastSelected map[string]*atomic.Int64 // Unix nano +} + +// NewPeerAnalytics creates a new peer analytics tracker. +func NewPeerAnalytics() *PeerAnalytics { + return &PeerAnalytics{ + hitCounts: make(map[string]*atomic.Int64), + distanceSums: make(map[string]*atomic.Uint64), + lastSelected: make(map[string]*atomic.Int64), + } +} + +// RecordSelection records that a peer was selected/returned in a query result. +func (p *PeerAnalytics) RecordSelection(peerID string, distance float64) { + if peerID == "" { + return + } + + p.mu.RLock() + hc, hok := p.hitCounts[peerID] + ds, dok := p.distanceSums[peerID] + ls, lok := p.lastSelected[peerID] + p.mu.RUnlock() + + if !hok || !dok || !lok { + p.mu.Lock() + if _, ok := p.hitCounts[peerID]; !ok { + p.hitCounts[peerID] = &atomic.Int64{} + } + if _, ok := p.distanceSums[peerID]; !ok { + p.distanceSums[peerID] = &atomic.Uint64{} + } + if _, ok := p.lastSelected[peerID]; !ok { + p.lastSelected[peerID] = &atomic.Int64{} + } + hc = p.hitCounts[peerID] + ds = p.distanceSums[peerID] + ls = p.lastSelected[peerID] + p.mu.Unlock() + } + + hc.Add(1) + // Atomic float add via CAS + for { + old := ds.Load() + oldF := math.Float64frombits(old) + newF := oldF + distance + if ds.CompareAndSwap(old, math.Float64bits(newF)) { + break + } + } + ls.Store(time.Now().UnixNano()) +} + +// GetPeerStats returns statistics for a specific peer. +func (p *PeerAnalytics) GetPeerStats(peerID string) PeerStats { + p.mu.RLock() + defer p.mu.RUnlock() + + hc, hok := p.hitCounts[peerID] + ds, dok := p.distanceSums[peerID] + ls, lok := p.lastSelected[peerID] + + stats := PeerStats{PeerID: peerID} + if hok { + stats.SelectionCount = hc.Load() + } + if dok && stats.SelectionCount > 0 { + stats.AvgDistance = math.Float64frombits(ds.Load()) / float64(stats.SelectionCount) + } + if lok { + stats.LastSelectedAt = time.Unix(0, ls.Load()) + } + return stats +} + +// GetAllPeerStats returns statistics for all tracked peers. +func (p *PeerAnalytics) GetAllPeerStats() []PeerStats { + p.mu.RLock() + defer p.mu.RUnlock() + + result := make([]PeerStats, 0, len(p.hitCounts)) + for id := range p.hitCounts { + stats := PeerStats{PeerID: id} + if hc := p.hitCounts[id]; hc != nil { + stats.SelectionCount = hc.Load() + } + if ds := p.distanceSums[id]; ds != nil && stats.SelectionCount > 0 { + stats.AvgDistance = math.Float64frombits(ds.Load()) / float64(stats.SelectionCount) + } + if ls := p.lastSelected[id]; ls != nil { + stats.LastSelectedAt = time.Unix(0, ls.Load()) + } + result = append(result, stats) + } + + // Sort by selection count descending + sort.Slice(result, func(i, j int) bool { + return result[i].SelectionCount > result[j].SelectionCount + }) + return result +} + +// GetTopPeers returns the top N most frequently selected peers. +func (p *PeerAnalytics) GetTopPeers(n int) []PeerStats { + all := p.GetAllPeerStats() + if n > len(all) { + n = len(all) + } + return all[:n] +} + +// Reset clears all peer analytics data. +func (p *PeerAnalytics) Reset() { + p.mu.Lock() + defer p.mu.Unlock() + p.hitCounts = make(map[string]*atomic.Int64) + p.distanceSums = make(map[string]*atomic.Uint64) + p.lastSelected = make(map[string]*atomic.Int64) +} + +// PeerStats holds statistics for a single peer. +type PeerStats struct { + PeerID string `json:"peerId"` + SelectionCount int64 `json:"selectionCount"` + AvgDistance float64 `json:"avgDistance"` + LastSelectedAt time.Time `json:"lastSelectedAt"` +} + +// DistributionStats provides statistical analysis of distances in query results. +type DistributionStats struct { + Count int `json:"count"` + Min float64 `json:"min"` + Max float64 `json:"max"` + Mean float64 `json:"mean"` + Median float64 `json:"median"` + StdDev float64 `json:"stdDev"` + P25 float64 `json:"p25"` // 25th percentile + P75 float64 `json:"p75"` // 75th percentile + P90 float64 `json:"p90"` // 90th percentile + P99 float64 `json:"p99"` // 99th percentile + Variance float64 `json:"variance"` + Skewness float64 `json:"skewness"` + SampleSize int `json:"sampleSize"` + ComputedAt time.Time `json:"computedAt"` +} + +// ComputeDistributionStats calculates distribution statistics from a slice of distances. +func ComputeDistributionStats(distances []float64) DistributionStats { + n := len(distances) + if n == 0 { + return DistributionStats{ComputedAt: time.Now()} + } + + // Sort for percentile calculations + sorted := make([]float64, n) + copy(sorted, distances) + sort.Float64s(sorted) + + // Basic stats + min, max := sorted[0], sorted[n-1] + sum := 0.0 + for _, d := range sorted { + sum += d + } + mean := sum / float64(n) + + // Variance and standard deviation + sumSqDiff := 0.0 + for _, d := range sorted { + diff := d - mean + sumSqDiff += diff * diff + } + variance := sumSqDiff / float64(n) + stdDev := math.Sqrt(variance) + + // Skewness + skewness := 0.0 + if stdDev > 0 { + sumCubeDiff := 0.0 + for _, d := range sorted { + diff := (d - mean) / stdDev + sumCubeDiff += diff * diff * diff + } + skewness = sumCubeDiff / float64(n) + } + + return DistributionStats{ + Count: n, + Min: min, + Max: max, + Mean: mean, + Median: percentile(sorted, 0.5), + StdDev: stdDev, + P25: percentile(sorted, 0.25), + P75: percentile(sorted, 0.75), + P90: percentile(sorted, 0.90), + P99: percentile(sorted, 0.99), + Variance: variance, + Skewness: skewness, + SampleSize: n, + ComputedAt: time.Now(), + } +} + +// percentile returns the p-th percentile from a sorted slice. +func percentile(sorted []float64, p float64) float64 { + if len(sorted) == 0 { + return 0 + } + if len(sorted) == 1 { + return sorted[0] + } + idx := p * float64(len(sorted)-1) + lower := int(idx) + upper := lower + 1 + if upper >= len(sorted) { + return sorted[len(sorted)-1] + } + frac := idx - float64(lower) + return sorted[lower]*(1-frac) + sorted[upper]*frac +} + +// AxisDistribution provides per-axis (feature) distribution analysis. +type AxisDistribution struct { + Axis int `json:"axis"` + Name string `json:"name,omitempty"` + Stats DistributionStats `json:"stats"` +} + +// ComputeAxisDistributions analyzes the distribution of values along each axis. +func ComputeAxisDistributions[T any](points []KDPoint[T], axisNames []string) []AxisDistribution { + if len(points) == 0 { + return nil + } + dim := len(points[0].Coords) + result := make([]AxisDistribution, dim) + + for axis := 0; axis < dim; axis++ { + values := make([]float64, len(points)) + for i, p := range points { + if axis < len(p.Coords) { + values[i] = p.Coords[axis] + } + } + name := "" + if axis < len(axisNames) { + name = axisNames[axis] + } + result[axis] = AxisDistribution{ + Axis: axis, + Name: name, + Stats: ComputeDistributionStats(values), + } + } + return result +} + +// NATRoutingMetrics provides metrics specifically for NAT traversal routing decisions. +type NATRoutingMetrics struct { + // Connectivity score (0-1): higher means better reachability + ConnectivityScore float64 `json:"connectivityScore"` + // Symmetry score (0-1): higher means more symmetric NAT (easier to traverse) + SymmetryScore float64 `json:"symmetryScore"` + // Relay requirement probability (0-1): likelihood peer needs relay + RelayProbability float64 `json:"relayProbability"` + // Direct connection success rate (historical) + DirectSuccessRate float64 `json:"directSuccessRate"` + // Average RTT in milliseconds + AvgRTTMs float64 `json:"avgRttMs"` + // Jitter (RTT variance) in milliseconds + JitterMs float64 `json:"jitterMs"` + // Packet loss rate (0-1) + PacketLossRate float64 `json:"packetLossRate"` + // Bandwidth estimate in Mbps + BandwidthMbps float64 `json:"bandwidthMbps"` + // NAT type classification + NATType string `json:"natType"` + // Last probe timestamp + LastProbeAt time.Time `json:"lastProbeAt"` +} + +// NATTypeClassification enumerates common NAT types for routing decisions. +type NATTypeClassification string + +const ( + NATTypeOpen NATTypeClassification = "open" // No NAT / Public IP + NATTypeFullCone NATTypeClassification = "full_cone" // Easy to traverse + NATTypeRestrictedCone NATTypeClassification = "restricted_cone" // Moderate difficulty + NATTypePortRestricted NATTypeClassification = "port_restricted" // Harder to traverse + NATTypeSymmetric NATTypeClassification = "symmetric" // Hardest to traverse + NATTypeSymmetricUDP NATTypeClassification = "symmetric_udp" // UDP-only symmetric + NATTypeUnknown NATTypeClassification = "unknown" // Not yet classified + NATTypeBehindCGNAT NATTypeClassification = "cgnat" // Carrier-grade NAT + NATTypeFirewalled NATTypeClassification = "firewalled" // Blocked by firewall + NATTypeRelayRequired NATTypeClassification = "relay_required" // Must use relay +) + +// PeerQualityScore computes a composite quality score for peer selection. +// Higher scores indicate better peers for routing. +// Weights can be customized; default weights emphasize latency and reliability. +func PeerQualityScore(metrics NATRoutingMetrics, weights *QualityWeights) float64 { + w := DefaultQualityWeights() + if weights != nil { + w = *weights + } + + // Normalize metrics to 0-1 scale (higher is better) + latencyScore := 1.0 - math.Min(metrics.AvgRTTMs/1000.0, 1.0) // <1000ms is acceptable + jitterScore := 1.0 - math.Min(metrics.JitterMs/100.0, 1.0) // <100ms jitter + lossScore := 1.0 - metrics.PacketLossRate // 0 loss is best + bandwidthScore := math.Min(metrics.BandwidthMbps/100.0, 1.0) // 100Mbps is excellent + connectivityScore := metrics.ConnectivityScore // Already 0-1 + symmetryScore := metrics.SymmetryScore // Already 0-1 + directScore := metrics.DirectSuccessRate // Already 0-1 + relayPenalty := 1.0 - metrics.RelayProbability // Prefer non-relay + + // NAT type bonus/penalty + natScore := natTypeScore(metrics.NATType) + + // Weighted combination + score := (w.Latency*latencyScore + + w.Jitter*jitterScore + + w.PacketLoss*lossScore + + w.Bandwidth*bandwidthScore + + w.Connectivity*connectivityScore + + w.Symmetry*symmetryScore + + w.DirectSuccess*directScore + + w.RelayPenalty*relayPenalty + + w.NATType*natScore) / w.Total() + + return math.Max(0, math.Min(1, score)) +} + +// QualityWeights configures the importance of each metric in peer selection. +type QualityWeights struct { + Latency float64 `json:"latency"` + Jitter float64 `json:"jitter"` + PacketLoss float64 `json:"packetLoss"` + Bandwidth float64 `json:"bandwidth"` + Connectivity float64 `json:"connectivity"` + Symmetry float64 `json:"symmetry"` + DirectSuccess float64 `json:"directSuccess"` + RelayPenalty float64 `json:"relayPenalty"` + NATType float64 `json:"natType"` +} + +// Total returns the sum of all weights for normalization. +func (w QualityWeights) Total() float64 { + return w.Latency + w.Jitter + w.PacketLoss + w.Bandwidth + + w.Connectivity + w.Symmetry + w.DirectSuccess + w.RelayPenalty + w.NATType +} + +// DefaultQualityWeights returns sensible defaults for peer selection. +func DefaultQualityWeights() QualityWeights { + return QualityWeights{ + Latency: 3.0, // Most important + Jitter: 1.5, + PacketLoss: 2.0, + Bandwidth: 1.0, + Connectivity: 2.0, + Symmetry: 1.0, + DirectSuccess: 2.0, + RelayPenalty: 1.5, + NATType: 1.0, + } +} + +// natTypeScore returns a 0-1 score based on NAT type (higher is better for routing). +func natTypeScore(natType string) float64 { + switch NATTypeClassification(natType) { + case NATTypeOpen: + return 1.0 + case NATTypeFullCone: + return 0.9 + case NATTypeRestrictedCone: + return 0.7 + case NATTypePortRestricted: + return 0.5 + case NATTypeSymmetric: + return 0.3 + case NATTypeSymmetricUDP: + return 0.25 + case NATTypeBehindCGNAT: + return 0.2 + case NATTypeFirewalled: + return 0.1 + case NATTypeRelayRequired: + return 0.05 + default: + return 0.4 // Unknown gets middle score + } +} + +// TrustMetrics tracks trust and reputation for peer selection. +type TrustMetrics struct { + // ReputationScore (0-1): aggregated trust score + ReputationScore float64 `json:"reputationScore"` + // SuccessfulTransactions: count of successful exchanges + SuccessfulTransactions int64 `json:"successfulTransactions"` + // FailedTransactions: count of failed/aborted exchanges + FailedTransactions int64 `json:"failedTransactions"` + // AgeSeconds: how long this peer has been known + AgeSeconds int64 `json:"ageSeconds"` + // LastSuccessAt: last successful interaction + LastSuccessAt time.Time `json:"lastSuccessAt"` + // LastFailureAt: last failed interaction + LastFailureAt time.Time `json:"lastFailureAt"` + // VouchCount: number of other peers vouching for this peer + VouchCount int `json:"vouchCount"` + // FlagCount: number of reports against this peer + FlagCount int `json:"flagCount"` + // ProofOfWork: computational proof of stake/work + ProofOfWork float64 `json:"proofOfWork"` +} + +// ComputeTrustScore calculates a composite trust score from trust metrics. +func ComputeTrustScore(t TrustMetrics) float64 { + total := t.SuccessfulTransactions + t.FailedTransactions + if total == 0 { + // New peer with no history: moderate trust with age bonus + ageBonus := math.Min(float64(t.AgeSeconds)/(86400*30), 0.2) // Up to 0.2 for 30 days + return 0.5 + ageBonus + } + + // Base score from success rate + successRate := float64(t.SuccessfulTransactions) / float64(total) + + // Volume confidence (more transactions = more confident) + volumeConfidence := 1 - 1/(1+float64(total)/10) + + // Vouch/flag adjustment + vouchBonus := math.Min(float64(t.VouchCount)*0.02, 0.15) + flagPenalty := math.Min(float64(t.FlagCount)*0.05, 0.3) + + // Recency bonus (recent success = better) + recencyBonus := 0.0 + if !t.LastSuccessAt.IsZero() { + hoursSince := time.Since(t.LastSuccessAt).Hours() + recencyBonus = 0.1 * math.Exp(-hoursSince/168) // Decays over ~1 week + } + + // Proof of work bonus + powBonus := math.Min(t.ProofOfWork*0.1, 0.1) + + score := successRate*volumeConfidence + vouchBonus - flagPenalty + recencyBonus + powBonus + return math.Max(0, math.Min(1, score)) +} + +// NetworkHealthSummary aggregates overall network health metrics. +type NetworkHealthSummary struct { + TotalPeers int `json:"totalPeers"` + ActivePeers int `json:"activePeers"` // Peers queried recently + HealthyPeers int `json:"healthyPeers"` // Peers with good metrics + DegradedPeers int `json:"degradedPeers"` // Peers with some issues + UnhealthyPeers int `json:"unhealthyPeers"` // Peers with poor metrics + AvgLatencyMs float64 `json:"avgLatencyMs"` + MedianLatencyMs float64 `json:"medianLatencyMs"` + AvgTrustScore float64 `json:"avgTrustScore"` + AvgQualityScore float64 `json:"avgQualityScore"` + DirectConnectRate float64 `json:"directConnectRate"` // % of peers directly reachable + RelayDependency float64 `json:"relayDependency"` // % of peers needing relay + ComputedAt time.Time `json:"computedAt"` +} + +// FeatureVector represents a normalized feature vector for a peer. +// This is the core structure for KD-Tree based peer selection. +type FeatureVector struct { + PeerID string `json:"peerId"` + Features []float64 `json:"features"` + Labels []string `json:"labels,omitempty"` // Optional feature names +} + +// StandardPeerFeatures defines the standard feature set for peer selection. +// These map to dimensions in the KD-Tree. +type StandardPeerFeatures struct { + LatencyMs float64 `json:"latencyMs"` // Lower is better + HopCount int `json:"hopCount"` // Lower is better + GeoDistanceKm float64 `json:"geoDistanceKm"` // Lower is better + TrustScore float64 `json:"trustScore"` // Higher is better (invert) + BandwidthMbps float64 `json:"bandwidthMbps"` // Higher is better (invert) + PacketLossRate float64 `json:"packetLossRate"` // Lower is better + ConnectivityPct float64 `json:"connectivityPct"` // Higher is better (invert) + NATScore float64 `json:"natScore"` // Higher is better (invert) +} + +// ToFeatureSlice converts structured features to a slice for KD-Tree operations. +// Inversion is handled so that lower distance = better peer. +func (f StandardPeerFeatures) ToFeatureSlice() []float64 { + return []float64{ + f.LatencyMs, + float64(f.HopCount), + f.GeoDistanceKm, + 1 - f.TrustScore, // Invert: higher trust = lower value + 100 - f.BandwidthMbps, // Invert: higher bandwidth = lower value (capped at 100) + f.PacketLossRate, + 100 - f.ConnectivityPct, // Invert: higher connectivity = lower value + 1 - f.NATScore, // Invert: higher NAT score = lower value + } +} + +// StandardFeatureLabels returns the labels for standard peer features. +func StandardFeatureLabels() []string { + return []string{ + "latency_ms", + "hop_count", + "geo_distance_km", + "trust_score_inv", + "bandwidth_inv", + "packet_loss_rate", + "connectivity_inv", + "nat_score_inv", + } +} + +// FeatureRanges defines min/max ranges for feature normalization. +type FeatureRanges struct { + Ranges []AxisStats `json:"ranges"` +} + +// DefaultPeerFeatureRanges returns sensible default ranges for peer features. +func DefaultPeerFeatureRanges() FeatureRanges { + return FeatureRanges{ + Ranges: []AxisStats{ + {Min: 0, Max: 1000}, // Latency: 0-1000ms + {Min: 0, Max: 20}, // Hops: 0-20 + {Min: 0, Max: 20000}, // Geo distance: 0-20000km (half Earth circumference) + {Min: 0, Max: 1}, // Trust score (inverted): 0-1 + {Min: 0, Max: 100}, // Bandwidth (inverted): 0-100Mbps + {Min: 0, Max: 1}, // Packet loss: 0-100% + {Min: 0, Max: 100}, // Connectivity (inverted): 0-100% + {Min: 0, Max: 1}, // NAT score (inverted): 0-1 + }, + } +} + +// NormalizePeerFeatures normalizes peer features to [0,1] using provided ranges. +func NormalizePeerFeatures(features []float64, ranges FeatureRanges) []float64 { + result := make([]float64, len(features)) + for i, v := range features { + if i < len(ranges.Ranges) { + result[i] = scale01(v, ranges.Ranges[i].Min, ranges.Ranges[i].Max) + } else { + result[i] = v + } + } + return result +} + +// WeightedPeerFeatures applies per-feature weights after normalization. +func WeightedPeerFeatures(normalized []float64, weights []float64) []float64 { + result := make([]float64, len(normalized)) + for i, v := range normalized { + w := 1.0 + if i < len(weights) { + w = weights[i] + } + result[i] = v * w + } + return result +} diff --git a/kdtree_analytics_test.go b/kdtree_analytics_test.go new file mode 100644 index 0000000..0033a09 --- /dev/null +++ b/kdtree_analytics_test.go @@ -0,0 +1,662 @@ +package poindexter + +import ( + "math" + "testing" + "time" +) + +// ============================================================================ +// TreeAnalytics Tests +// ============================================================================ + +func TestNewTreeAnalytics(t *testing.T) { + a := NewTreeAnalytics() + if a == nil { + t.Fatal("NewTreeAnalytics returned nil") + } + if a.QueryCount.Load() != 0 { + t.Errorf("expected QueryCount=0, got %d", a.QueryCount.Load()) + } + if a.InsertCount.Load() != 0 { + t.Errorf("expected InsertCount=0, got %d", a.InsertCount.Load()) + } + if a.CreatedAt.IsZero() { + t.Error("CreatedAt should not be zero") + } +} + +func TestTreeAnalyticsRecordQuery(t *testing.T) { + a := NewTreeAnalytics() + + a.RecordQuery(1000) // 1μs + a.RecordQuery(2000) // 2μs + a.RecordQuery(500) // 0.5μs + + if a.QueryCount.Load() != 3 { + t.Errorf("expected QueryCount=3, got %d", a.QueryCount.Load()) + } + if a.TotalQueryTimeNs.Load() != 3500 { + t.Errorf("expected TotalQueryTimeNs=3500, got %d", a.TotalQueryTimeNs.Load()) + } + if a.MinQueryTimeNs.Load() != 500 { + t.Errorf("expected MinQueryTimeNs=500, got %d", a.MinQueryTimeNs.Load()) + } + if a.MaxQueryTimeNs.Load() != 2000 { + t.Errorf("expected MaxQueryTimeNs=2000, got %d", a.MaxQueryTimeNs.Load()) + } + if a.LastQueryTimeNs.Load() != 500 { + t.Errorf("expected LastQueryTimeNs=500, got %d", a.LastQueryTimeNs.Load()) + } +} + +func TestTreeAnalyticsSnapshot(t *testing.T) { + a := NewTreeAnalytics() + + a.RecordQuery(1000) + a.RecordQuery(3000) + a.RecordInsert() + a.RecordInsert() + a.RecordDelete() + a.RecordRebuild() + + snap := a.Snapshot() + + if snap.QueryCount != 2 { + t.Errorf("expected QueryCount=2, got %d", snap.QueryCount) + } + if snap.InsertCount != 2 { + t.Errorf("expected InsertCount=2, got %d", snap.InsertCount) + } + if snap.DeleteCount != 1 { + t.Errorf("expected DeleteCount=1, got %d", snap.DeleteCount) + } + if snap.AvgQueryTimeNs != 2000 { + t.Errorf("expected AvgQueryTimeNs=2000, got %d", snap.AvgQueryTimeNs) + } + if snap.MinQueryTimeNs != 1000 { + t.Errorf("expected MinQueryTimeNs=1000, got %d", snap.MinQueryTimeNs) + } + if snap.MaxQueryTimeNs != 3000 { + t.Errorf("expected MaxQueryTimeNs=3000, got %d", snap.MaxQueryTimeNs) + } + if snap.BackendRebuildCnt != 1 { + t.Errorf("expected BackendRebuildCnt=1, got %d", snap.BackendRebuildCnt) + } +} + +func TestTreeAnalyticsReset(t *testing.T) { + a := NewTreeAnalytics() + + a.RecordQuery(1000) + a.RecordInsert() + a.RecordDelete() + + a.Reset() + + if a.QueryCount.Load() != 0 { + t.Errorf("expected QueryCount=0 after reset, got %d", a.QueryCount.Load()) + } + if a.InsertCount.Load() != 0 { + t.Errorf("expected InsertCount=0 after reset, got %d", a.InsertCount.Load()) + } + if a.DeleteCount.Load() != 0 { + t.Errorf("expected DeleteCount=0 after reset, got %d", a.DeleteCount.Load()) + } +} + +// ============================================================================ +// PeerAnalytics Tests +// ============================================================================ + +func TestNewPeerAnalytics(t *testing.T) { + p := NewPeerAnalytics() + if p == nil { + t.Fatal("NewPeerAnalytics returned nil") + } +} + +func TestPeerAnalyticsRecordSelection(t *testing.T) { + p := NewPeerAnalytics() + + p.RecordSelection("peer1", 0.5) + p.RecordSelection("peer1", 0.3) + p.RecordSelection("peer2", 1.0) + + stats := p.GetPeerStats("peer1") + if stats.SelectionCount != 2 { + t.Errorf("expected peer1 SelectionCount=2, got %d", stats.SelectionCount) + } + if math.Abs(stats.AvgDistance-0.4) > 0.001 { + t.Errorf("expected peer1 AvgDistance~0.4, got %f", stats.AvgDistance) + } + + stats2 := p.GetPeerStats("peer2") + if stats2.SelectionCount != 1 { + t.Errorf("expected peer2 SelectionCount=1, got %d", stats2.SelectionCount) + } +} + +func TestPeerAnalyticsGetAllPeerStats(t *testing.T) { + p := NewPeerAnalytics() + + p.RecordSelection("peer1", 0.5) + p.RecordSelection("peer1", 0.5) + p.RecordSelection("peer2", 1.0) + p.RecordSelection("peer3", 0.8) + p.RecordSelection("peer3", 0.8) + p.RecordSelection("peer3", 0.8) + + all := p.GetAllPeerStats() + if len(all) != 3 { + t.Errorf("expected 3 peers, got %d", len(all)) + } + + // Should be sorted by selection count descending + if all[0].PeerID != "peer3" || all[0].SelectionCount != 3 { + t.Errorf("expected first peer to be peer3 with count=3, got %s with count=%d", + all[0].PeerID, all[0].SelectionCount) + } +} + +func TestPeerAnalyticsGetTopPeers(t *testing.T) { + p := NewPeerAnalytics() + + for i := 0; i < 5; i++ { + p.RecordSelection("peer1", 0.5) + } + for i := 0; i < 3; i++ { + p.RecordSelection("peer2", 0.3) + } + p.RecordSelection("peer3", 0.1) + + top := p.GetTopPeers(2) + if len(top) != 2 { + t.Errorf("expected 2 top peers, got %d", len(top)) + } + if top[0].PeerID != "peer1" { + t.Errorf("expected top peer to be peer1, got %s", top[0].PeerID) + } + if top[1].PeerID != "peer2" { + t.Errorf("expected second peer to be peer2, got %s", top[1].PeerID) + } +} + +func TestPeerAnalyticsReset(t *testing.T) { + p := NewPeerAnalytics() + + p.RecordSelection("peer1", 0.5) + p.Reset() + + stats := p.GetAllPeerStats() + if len(stats) != 0 { + t.Errorf("expected 0 peers after reset, got %d", len(stats)) + } +} + +// ============================================================================ +// DistributionStats Tests +// ============================================================================ + +func TestComputeDistributionStatsEmpty(t *testing.T) { + stats := ComputeDistributionStats(nil) + if stats.Count != 0 { + t.Errorf("expected Count=0 for empty input, got %d", stats.Count) + } +} + +func TestComputeDistributionStatsSingle(t *testing.T) { + stats := ComputeDistributionStats([]float64{5.0}) + if stats.Count != 1 { + t.Errorf("expected Count=1, got %d", stats.Count) + } + if stats.Min != 5.0 || stats.Max != 5.0 { + t.Errorf("expected Min=Max=5.0, got Min=%f, Max=%f", stats.Min, stats.Max) + } + if stats.Mean != 5.0 { + t.Errorf("expected Mean=5.0, got %f", stats.Mean) + } + if stats.Median != 5.0 { + t.Errorf("expected Median=5.0, got %f", stats.Median) + } +} + +func TestComputeDistributionStatsMultiple(t *testing.T) { + // Values: 1, 2, 3, 4, 5 - mean=3, median=3 + stats := ComputeDistributionStats([]float64{1, 2, 3, 4, 5}) + + if stats.Count != 5 { + t.Errorf("expected Count=5, got %d", stats.Count) + } + if stats.Min != 1.0 { + t.Errorf("expected Min=1.0, got %f", stats.Min) + } + if stats.Max != 5.0 { + t.Errorf("expected Max=5.0, got %f", stats.Max) + } + if stats.Mean != 3.0 { + t.Errorf("expected Mean=3.0, got %f", stats.Mean) + } + if stats.Median != 3.0 { + t.Errorf("expected Median=3.0, got %f", stats.Median) + } + // Variance = 2.0 for this dataset + if math.Abs(stats.Variance-2.0) > 0.001 { + t.Errorf("expected Variance~2.0, got %f", stats.Variance) + } +} + +func TestComputeDistributionStatsPercentiles(t *testing.T) { + // 100 values from 0 to 99 + values := make([]float64, 100) + for i := 0; i < 100; i++ { + values[i] = float64(i) + } + stats := ComputeDistributionStats(values) + + // P25 should be around 24.75, P75 around 74.25 + if math.Abs(stats.P25-24.75) > 0.1 { + t.Errorf("expected P25~24.75, got %f", stats.P25) + } + if math.Abs(stats.P75-74.25) > 0.1 { + t.Errorf("expected P75~74.25, got %f", stats.P75) + } + if math.Abs(stats.P90-89.1) > 0.1 { + t.Errorf("expected P90~89.1, got %f", stats.P90) + } +} + +// ============================================================================ +// AxisDistribution Tests +// ============================================================================ + +func TestComputeAxisDistributions(t *testing.T) { + points := []KDPoint[string]{ + {ID: "a", Coords: []float64{1.0, 10.0}}, + {ID: "b", Coords: []float64{2.0, 20.0}}, + {ID: "c", Coords: []float64{3.0, 30.0}}, + } + + dists := ComputeAxisDistributions(points, []string{"x", "y"}) + + if len(dists) != 2 { + t.Errorf("expected 2 axis distributions, got %d", len(dists)) + } + + if dists[0].Axis != 0 || dists[0].Name != "x" { + t.Errorf("expected first axis=0, name=x, got axis=%d, name=%s", dists[0].Axis, dists[0].Name) + } + if dists[0].Stats.Mean != 2.0 { + t.Errorf("expected axis 0 mean=2.0, got %f", dists[0].Stats.Mean) + } + + if dists[1].Axis != 1 || dists[1].Name != "y" { + t.Errorf("expected second axis=1, name=y, got axis=%d, name=%s", dists[1].Axis, dists[1].Name) + } + if dists[1].Stats.Mean != 20.0 { + t.Errorf("expected axis 1 mean=20.0, got %f", dists[1].Stats.Mean) + } +} + +// ============================================================================ +// NAT Routing Tests +// ============================================================================ + +func TestPeerQualityScoreDefaults(t *testing.T) { + // Perfect peer + perfect := NATRoutingMetrics{ + ConnectivityScore: 1.0, + SymmetryScore: 1.0, + RelayProbability: 0.0, + DirectSuccessRate: 1.0, + AvgRTTMs: 10, + JitterMs: 5, + PacketLossRate: 0.0, + BandwidthMbps: 100, + NATType: string(NATTypeOpen), + } + score := PeerQualityScore(perfect, nil) + if score < 0.9 { + t.Errorf("expected perfect peer score > 0.9, got %f", score) + } + + // Poor peer + poor := NATRoutingMetrics{ + ConnectivityScore: 0.2, + SymmetryScore: 0.1, + RelayProbability: 0.9, + DirectSuccessRate: 0.1, + AvgRTTMs: 500, + JitterMs: 100, + PacketLossRate: 0.5, + BandwidthMbps: 1, + NATType: string(NATTypeSymmetric), + } + poorScore := PeerQualityScore(poor, nil) + if poorScore > 0.5 { + t.Errorf("expected poor peer score < 0.5, got %f", poorScore) + } + if poorScore >= score { + t.Error("poor peer should have lower score than perfect peer") + } +} + +func TestPeerQualityScoreCustomWeights(t *testing.T) { + metrics := NATRoutingMetrics{ + ConnectivityScore: 1.0, + SymmetryScore: 0.5, + RelayProbability: 0.0, + DirectSuccessRate: 1.0, + AvgRTTMs: 100, + JitterMs: 10, + PacketLossRate: 0.01, + BandwidthMbps: 50, + NATType: string(NATTypeFullCone), + } + + // Weight latency heavily + latencyWeights := QualityWeights{ + Latency: 10.0, + Jitter: 1.0, + PacketLoss: 1.0, + Bandwidth: 1.0, + Connectivity: 1.0, + Symmetry: 1.0, + DirectSuccess: 1.0, + RelayPenalty: 1.0, + NATType: 1.0, + } + scoreLatency := PeerQualityScore(metrics, &latencyWeights) + + // Weight bandwidth heavily + bandwidthWeights := QualityWeights{ + Latency: 1.0, + Jitter: 1.0, + PacketLoss: 1.0, + Bandwidth: 10.0, + Connectivity: 1.0, + Symmetry: 1.0, + DirectSuccess: 1.0, + RelayPenalty: 1.0, + NATType: 1.0, + } + scoreBandwidth := PeerQualityScore(metrics, &bandwidthWeights) + + // Scores should differ based on weights + if scoreLatency == scoreBandwidth { + t.Error("different weights should produce different scores") + } +} + +func TestDefaultQualityWeights(t *testing.T) { + w := DefaultQualityWeights() + if w.Latency <= 0 { + t.Error("Latency weight should be positive") + } + if w.Total() <= 0 { + t.Error("Total weights should be positive") + } +} + +func TestNatTypeScore(t *testing.T) { + tests := []struct { + natType string + minScore float64 + maxScore float64 + }{ + {string(NATTypeOpen), 0.9, 1.0}, + {string(NATTypeFullCone), 0.8, 1.0}, + {string(NATTypeSymmetric), 0.2, 0.4}, + {string(NATTypeRelayRequired), 0.0, 0.1}, + {"unknown", 0.3, 0.5}, + } + + for _, tc := range tests { + score := natTypeScore(tc.natType) + if score < tc.minScore || score > tc.maxScore { + t.Errorf("natType %s: expected score in [%f, %f], got %f", + tc.natType, tc.minScore, tc.maxScore, score) + } + } +} + +// ============================================================================ +// Trust Score Tests +// ============================================================================ + +func TestComputeTrustScoreNewPeer(t *testing.T) { + // New peer with no history + metrics := TrustMetrics{ + SuccessfulTransactions: 0, + FailedTransactions: 0, + AgeSeconds: 86400, // 1 day old + } + score := ComputeTrustScore(metrics) + // New peer should get moderate trust + if score < 0.4 || score > 0.7 { + t.Errorf("expected new peer score in [0.4, 0.7], got %f", score) + } +} + +func TestComputeTrustScoreGoodPeer(t *testing.T) { + metrics := TrustMetrics{ + SuccessfulTransactions: 100, + FailedTransactions: 2, + AgeSeconds: 86400 * 30, // 30 days + VouchCount: 5, + FlagCount: 0, + LastSuccessAt: time.Now(), + } + score := ComputeTrustScore(metrics) + if score < 0.8 { + t.Errorf("expected good peer score > 0.8, got %f", score) + } +} + +func TestComputeTrustScoreBadPeer(t *testing.T) { + metrics := TrustMetrics{ + SuccessfulTransactions: 5, + FailedTransactions: 20, + AgeSeconds: 86400, + VouchCount: 0, + FlagCount: 10, + } + score := ComputeTrustScore(metrics) + if score > 0.3 { + t.Errorf("expected bad peer score < 0.3, got %f", score) + } +} + +// ============================================================================ +// Feature Normalization Tests +// ============================================================================ + +func TestStandardPeerFeaturesToSlice(t *testing.T) { + features := StandardPeerFeatures{ + LatencyMs: 100, + HopCount: 5, + GeoDistanceKm: 1000, + TrustScore: 0.9, + BandwidthMbps: 50, + PacketLossRate: 0.01, + ConnectivityPct: 95, + NATScore: 0.8, + } + + slice := features.ToFeatureSlice() + if len(slice) != 8 { + t.Errorf("expected 8 features, got %d", len(slice)) + } + + // TrustScore should be inverted (0.9 -> 0.1) + if math.Abs(slice[3]-0.1) > 0.001 { + t.Errorf("expected inverted trust score ~0.1, got %f", slice[3]) + } +} + +func TestNormalizePeerFeatures(t *testing.T) { + features := []float64{100, 5, 1000, 0.5, 50, 0.01, 50, 0.5} + ranges := DefaultPeerFeatureRanges() + + normalized := NormalizePeerFeatures(features, ranges) + + for i, v := range normalized { + if v < 0 || v > 1 { + t.Errorf("normalized feature %d out of range [0,1]: %f", i, v) + } + } +} + +func TestWeightedPeerFeatures(t *testing.T) { + normalized := []float64{0.5, 0.5, 0.5, 0.5} + weights := []float64{1.0, 2.0, 0.5, 1.5} + + weighted := WeightedPeerFeatures(normalized, weights) + + expected := []float64{0.5, 1.0, 0.25, 0.75} + for i, v := range weighted { + if math.Abs(v-expected[i]) > 0.001 { + t.Errorf("weighted feature %d: expected %f, got %f", i, expected[i], v) + } + } +} + +func TestStandardFeatureLabels(t *testing.T) { + labels := StandardFeatureLabels() + if len(labels) != 8 { + t.Errorf("expected 8 feature labels, got %d", len(labels)) + } +} + +// ============================================================================ +// KDTree Analytics Integration Tests +// ============================================================================ + +func TestKDTreeAnalyticsIntegration(t *testing.T) { + points := []KDPoint[string]{ + {ID: "a", Coords: []float64{0, 0}, Value: "A"}, + {ID: "b", Coords: []float64{1, 1}, Value: "B"}, + {ID: "c", Coords: []float64{2, 2}, Value: "C"}, + } + tree, err := NewKDTree(points) + if err != nil { + t.Fatal(err) + } + + // Check initial analytics + if tree.Analytics() == nil { + t.Fatal("Analytics should not be nil") + } + if tree.PeerAnalytics() == nil { + t.Fatal("PeerAnalytics should not be nil") + } + + // Perform queries + tree.Nearest([]float64{0.1, 0.1}) + tree.Nearest([]float64{0.9, 0.9}) + tree.KNearest([]float64{0.5, 0.5}, 2) + + snap := tree.GetAnalyticsSnapshot() + if snap.QueryCount != 3 { + t.Errorf("expected QueryCount=3, got %d", snap.QueryCount) + } + if snap.InsertCount != 0 { + t.Errorf("expected InsertCount=0, got %d", snap.InsertCount) + } + + // Check peer stats + peerStats := tree.GetPeerStats() + if len(peerStats) == 0 { + t.Error("expected some peer stats after queries") + } + + // Peer 'a' should have been selected for query [0.1, 0.1] + var foundA bool + for _, ps := range peerStats { + if ps.PeerID == "a" && ps.SelectionCount > 0 { + foundA = true + break + } + } + if !foundA { + t.Error("expected peer 'a' to be recorded in analytics") + } + + // Test top peers + topPeers := tree.GetTopPeers(1) + if len(topPeers) != 1 { + t.Errorf("expected 1 top peer, got %d", len(topPeers)) + } + + // Test insert analytics + tree.Insert(KDPoint[string]{ID: "d", Coords: []float64{3, 3}, Value: "D"}) + snap = tree.GetAnalyticsSnapshot() + if snap.InsertCount != 1 { + t.Errorf("expected InsertCount=1, got %d", snap.InsertCount) + } + + // Test delete analytics + tree.DeleteByID("d") + snap = tree.GetAnalyticsSnapshot() + if snap.DeleteCount != 1 { + t.Errorf("expected DeleteCount=1, got %d", snap.DeleteCount) + } + + // Test reset + tree.ResetAnalytics() + snap = tree.GetAnalyticsSnapshot() + if snap.QueryCount != 0 || snap.InsertCount != 0 || snap.DeleteCount != 0 { + t.Error("expected all counts to be 0 after reset") + } +} + +func TestKDTreeDistanceDistribution(t *testing.T) { + points := []KDPoint[string]{ + {ID: "a", Coords: []float64{0, 10}, Value: "A"}, + {ID: "b", Coords: []float64{1, 20}, Value: "B"}, + {ID: "c", Coords: []float64{2, 30}, Value: "C"}, + } + tree, _ := NewKDTree(points) + + dists := tree.ComputeDistanceDistribution([]string{"x", "y"}) + if len(dists) != 2 { + t.Errorf("expected 2 axis distributions, got %d", len(dists)) + } + + if dists[0].Name != "x" || dists[0].Stats.Mean != 1.0 { + t.Errorf("unexpected axis 0 distribution: name=%s, mean=%f", + dists[0].Name, dists[0].Stats.Mean) + } + if dists[1].Name != "y" || dists[1].Stats.Mean != 20.0 { + t.Errorf("unexpected axis 1 distribution: name=%s, mean=%f", + dists[1].Name, dists[1].Stats.Mean) + } +} + +func TestKDTreePointsExport(t *testing.T) { + points := []KDPoint[string]{ + {ID: "a", Coords: []float64{0, 0}, Value: "A"}, + {ID: "b", Coords: []float64{1, 1}, Value: "B"}, + } + tree, _ := NewKDTree(points) + + exported := tree.Points() + if len(exported) != 2 { + t.Errorf("expected 2 points, got %d", len(exported)) + } + + // Verify it's a copy, not a reference + exported[0].ID = "modified" + original := tree.Points() + if original[0].ID == "modified" { + t.Error("Points() should return a copy, not a reference") + } +} + +func TestKDTreeBackend(t *testing.T) { + tree, _ := NewKDTreeFromDim[string](2) + backend := tree.Backend() + if backend != BackendLinear && backend != BackendGonum { + t.Errorf("unexpected backend: %s", backend) + } +} diff --git a/npm/poindexter-wasm/index.d.ts b/npm/poindexter-wasm/index.d.ts index c57c8c8..6c55e1c 100644 --- a/npm/poindexter-wasm/index.d.ts +++ b/npm/poindexter-wasm/index.d.ts @@ -15,7 +15,153 @@ export interface KNearestResult { dists: number[]; } +// ============================================================================ +// Analytics Types +// ============================================================================ + +/** Tree operation analytics snapshot */ +export interface TreeAnalytics { + queryCount: number; + insertCount: number; + deleteCount: number; + avgQueryTimeNs: number; + minQueryTimeNs: number; + maxQueryTimeNs: number; + lastQueryTimeNs: number; + lastQueryAt: number; // Unix milliseconds + createdAt: number; // Unix milliseconds + backendRebuildCount: number; + lastRebuiltAt: number; // Unix milliseconds +} + +/** Per-peer selection statistics */ +export interface PeerStats { + peerId: string; + selectionCount: number; + avgDistance: number; + lastSelectedAt: number; // Unix milliseconds +} + +/** Statistical distribution analysis */ +export interface DistributionStats { + count: number; + min: number; + max: number; + mean: number; + median: number; + stdDev: number; + p25: number; + p75: number; + p90: number; + p99: number; + variance: number; + skewness: number; + sampleSize?: number; + computedAt?: number; // Unix milliseconds +} + +/** Per-axis distribution in the KD-Tree */ +export interface AxisDistribution { + axis: number; + name: string; + stats: DistributionStats; +} + +// ============================================================================ +// NAT Routing Types +// ============================================================================ + +/** NAT type classification for routing decisions */ +export type NATTypeClassification = + | 'open' + | 'full_cone' + | 'restricted_cone' + | 'port_restricted' + | 'symmetric' + | 'symmetric_udp' + | 'cgnat' + | 'firewalled' + | 'relay_required' + | 'unknown'; + +/** Network metrics for NAT routing decisions */ +export interface NATRoutingMetrics { + connectivityScore: number; // 0-1: higher = better reachability + symmetryScore: number; // 0-1: higher = more symmetric NAT + relayProbability: number; // 0-1: likelihood peer needs relay + directSuccessRate: number; // 0-1: historical direct connection success + avgRttMs: number; // Average RTT in milliseconds + jitterMs: number; // RTT variance in milliseconds + packetLossRate: number; // 0-1: packet loss rate + bandwidthMbps: number; // Bandwidth estimate in Mbps + natType: NATTypeClassification; + lastProbeAt?: number; // Unix milliseconds +} + +/** Weights for peer quality scoring */ +export interface QualityWeights { + latency: number; + jitter: number; + packetLoss: number; + bandwidth: number; + connectivity: number; + symmetry: number; + directSuccess: number; + relayPenalty: number; + natType: number; +} + +/** Trust metrics for peer reputation */ +export interface TrustMetrics { + reputationScore: number; // 0-1: aggregated trust score + successfulTransactions: number; + failedTransactions: number; + ageSeconds: number; // How long this peer has been known + lastSuccessAt?: number; // Unix milliseconds + lastFailureAt?: number; // Unix milliseconds + vouchCount: number; // Peers vouching for this peer + flagCount: number; // Reports against this peer + proofOfWork: number; // Computational proof of stake/work +} + +/** Axis min/max range for normalization */ +export interface AxisRange { + min: number; + max: number; +} + +/** Feature ranges for peer feature normalization */ +export interface FeatureRanges { + ranges: AxisRange[]; + labels?: string[]; +} + +/** Standard peer features for KD-Tree based selection */ +export interface StandardPeerFeatures { + latencyMs: number; + hopCount: number; + geoDistanceKm: number; + trustScore: number; + bandwidthMbps: number; + packetLossRate: number; + connectivityPct: number; + natScore: number; +} + +/** Export data with all points */ +export interface TreeExport { + dim: number; + len: number; + backend: string; + points: PxPoint[]; +} + +// ============================================================================ +// Tree Interface +// ============================================================================ + export interface PxTree { + // Core operations len(): Promise; dim(): Promise; insert(point: PxPoint): Promise; @@ -24,18 +170,377 @@ export interface PxTree { kNearest(query: number[], k: number): Promise; radius(query: number[], r: number): Promise; exportJSON(): Promise; + + // Analytics operations + getAnalytics(): Promise; + getPeerStats(): Promise; + getTopPeers(n: number): Promise; + getAxisDistributions(axisNames?: string[]): Promise; + resetAnalytics(): Promise; } +// ============================================================================ +// Init Options +// ============================================================================ + export interface InitOptions { wasmURL?: string; wasmExecURL?: string; instantiateWasm?: (source: ArrayBuffer, importObject: WebAssembly.Imports) => Promise | WebAssembly.Instance; } +// ============================================================================ +// DNS Tools Types +// ============================================================================ + +/** DNS record types - standard and extended (ClouDNS compatible) */ +export type DNSRecordType = + // Standard record types + | 'A' + | 'AAAA' + | 'MX' + | 'TXT' + | 'NS' + | 'CNAME' + | 'SOA' + | 'PTR' + | 'SRV' + | 'CAA' + // Additional record types (ClouDNS and others) + | 'ALIAS' // Virtual A record - CNAME-like for apex domain + | 'RP' // Responsible Person + | 'SSHFP' // SSH Fingerprint + | 'TLSA' // DANE TLS Authentication + | 'DS' // DNSSEC Delegation Signer + | 'DNSKEY' // DNSSEC Key + | 'NAPTR' // Naming Authority Pointer + | 'LOC' // Geographic Location + | 'HINFO' // Host Information + | 'CERT' // Certificate record + | 'SMIMEA' // S/MIME Certificate Association + | 'WR' // Web Redirect (ClouDNS specific) + | 'SPF'; // Sender Policy Framework (legacy) + +/** DNS record type metadata */ +export interface DNSRecordTypeInfo { + type: DNSRecordType; + name: string; + description: string; + rfc?: string; + common: boolean; +} + +/** CAA record */ +export interface CAARecord { + flag: number; + tag: string; // "issue", "issuewild", "iodef" + value: string; +} + +/** SSHFP record */ +export interface SSHFPRecord { + algorithm: number; // 1=RSA, 2=DSA, 3=ECDSA, 4=Ed25519 + fpType: number; // 1=SHA-1, 2=SHA-256 + fingerprint: string; +} + +/** TLSA (DANE) record */ +export interface TLSARecord { + usage: number; // 0-3: CA constraint, Service cert, Trust anchor, Domain-issued + selector: number; // 0=Full cert, 1=SubjectPublicKeyInfo + matchingType: number; // 0=Exact, 1=SHA-256, 2=SHA-512 + certData: string; +} + +/** DS (DNSSEC Delegation Signer) record */ +export interface DSRecord { + keyTag: number; + algorithm: number; + digestType: number; + digest: string; +} + +/** DNSKEY record */ +export interface DNSKEYRecord { + flags: number; + protocol: number; + algorithm: number; + publicKey: string; +} + +/** NAPTR record */ +export interface NAPTRRecord { + order: number; + preference: number; + flags: string; + service: string; + regexp: string; + replacement: string; +} + +/** RP (Responsible Person) record */ +export interface RPRecord { + mailbox: string; // Email as DNS name (user.domain.com) + txtDom: string; // Domain with TXT record containing more info +} + +/** LOC (Location) record */ +export interface LOCRecord { + latitude: number; + longitude: number; + altitude: number; + size: number; + hPrecision: number; + vPrecision: number; +} + +/** ALIAS record (provider-specific) */ +export interface ALIASRecord { + target: string; +} + +/** Web Redirect record (ClouDNS specific) */ +export interface WebRedirectRecord { + url: string; + redirectType: number; // 301, 302, etc. + frame: boolean; // Frame redirect vs HTTP redirect +} + +/** External tool links for domain/IP/email analysis */ +export interface ExternalToolLinks { + target: string; + type: 'domain' | 'ip' | 'email'; + + // MXToolbox links + mxtoolboxDns?: string; + mxtoolboxMx?: string; + mxtoolboxBlacklist?: string; + mxtoolboxSmtp?: string; + mxtoolboxSpf?: string; + mxtoolboxDmarc?: string; + mxtoolboxDkim?: string; + mxtoolboxHttp?: string; + mxtoolboxHttps?: string; + mxtoolboxPing?: string; + mxtoolboxTrace?: string; + mxtoolboxWhois?: string; + mxtoolboxAsn?: string; + + // DNSChecker links + dnscheckerDns?: string; + dnscheckerPropagation?: string; + + // Other tools + whois?: string; + viewdns?: string; + intodns?: string; + dnsviz?: string; + securitytrails?: string; + shodan?: string; + censys?: string; + builtwith?: string; + ssllabs?: string; + hstsPreload?: string; + hardenize?: string; + + // IP-specific tools + ipinfo?: string; + abuseipdb?: string; + virustotal?: string; + threatcrowd?: string; + + // Email-specific tools + mailtester?: string; + learndmarc?: string; +} + +/** RDAP server registry */ +export interface RDAPServers { + tlds: Record; + rirs: Record; + universal: string; +} + +/** RDAP response event */ +export interface RDAPEvent { + eventAction: string; + eventDate: string; + eventActor?: string; +} + +/** RDAP entity (registrar, registrant, etc.) */ +export interface RDAPEntity { + handle?: string; + roles?: string[]; + vcardArray?: any[]; + entities?: RDAPEntity[]; + events?: RDAPEvent[]; +} + +/** RDAP nameserver */ +export interface RDAPNameserver { + ldhName: string; + ipAddresses?: { + v4?: string[]; + v6?: string[]; + }; +} + +/** RDAP link */ +export interface RDAPLink { + value?: string; + rel?: string; + href?: string; + type?: string; +} + +/** RDAP remark/notice */ +export interface RDAPRemark { + title?: string; + description?: string[]; + links?: RDAPLink[]; +} + +/** RDAP response (for domain, IP, or ASN lookups) */ +export interface RDAPResponse { + // Common fields + handle?: string; + ldhName?: string; + unicodeName?: string; + status?: string[]; + events?: RDAPEvent[]; + entities?: RDAPEntity[]; + nameservers?: RDAPNameserver[]; + links?: RDAPLink[]; + remarks?: RDAPRemark[]; + notices?: RDAPRemark[]; + + // Network-specific (for IP lookups) + startAddress?: string; + endAddress?: string; + ipVersion?: string; + name?: string; + type?: string; + country?: string; + parentHandle?: string; + + // Error fields + errorCode?: number; + title?: string; + description?: string[]; + + // Metadata + rawJson?: string; + lookupTimeMs: number; + timestamp: string; + error?: string; +} + +/** Parsed domain info from RDAP */ +export interface ParsedDomainInfo { + domain: string; + registrar?: string; + registrationDate?: string; + expirationDate?: string; + updatedDate?: string; + status?: string[]; + nameservers?: string[]; + dnssec: boolean; +} + +/** DNS lookup result */ +export interface DNSLookupResult { + domain: string; + queryType: string; + records: DNSRecord[]; + mxRecords?: MXRecord[]; + srvRecords?: SRVRecord[]; + soaRecord?: SOARecord; + lookupTimeMs: number; + error?: string; + timestamp: string; +} + +/** DNS record */ +export interface DNSRecord { + type: DNSRecordType; + name: string; + value: string; + ttl?: number; +} + +/** MX record */ +export interface MXRecord { + host: string; + priority: number; +} + +/** SRV record */ +export interface SRVRecord { + target: string; + port: number; + priority: number; + weight: number; +} + +/** SOA record */ +export interface SOARecord { + primaryNs: string; + adminEmail: string; + serial: number; + refresh: number; + retry: number; + expire: number; + minTtl: number; +} + +/** Complete DNS lookup result */ +export interface CompleteDNSLookup { + domain: string; + a?: string[]; + aaaa?: string[]; + mx?: MXRecord[]; + ns?: string[]; + txt?: string[]; + cname?: string; + soa?: SOARecord; + lookupTimeMs: number; + errors?: string[]; + timestamp: string; +} + +// ============================================================================ +// Main API +// ============================================================================ + export interface PxAPI { + // Core functions version(): Promise; hello(name?: string): Promise; newTree(dim: number): Promise; + + // Statistics utilities + computeDistributionStats(distances: number[]): Promise; + + // NAT routing / peer quality functions + computePeerQualityScore(metrics: NATRoutingMetrics, weights?: QualityWeights): Promise; + computeTrustScore(metrics: TrustMetrics): Promise; + getDefaultQualityWeights(): Promise; + getDefaultPeerFeatureRanges(): Promise; + normalizePeerFeatures(features: number[], ranges?: FeatureRanges): Promise; + weightedPeerFeatures(normalized: number[], weights: number[]): Promise; + + // DNS tools + getExternalToolLinks(domain: string): Promise; + getExternalToolLinksIP(ip: string): Promise; + getExternalToolLinksEmail(emailOrDomain: string): Promise; + getRDAPServers(): Promise; + buildRDAPDomainURL(domain: string): Promise; + buildRDAPIPURL(ip: string): Promise; + buildRDAPASNURL(asn: string): Promise; + getDNSRecordTypes(): Promise; + getDNSRecordTypeInfo(): Promise; + getCommonDNSRecordTypes(): Promise; } export function init(options?: InitOptions): Promise; diff --git a/npm/poindexter-wasm/loader.js b/npm/poindexter-wasm/loader.js index a45c990..e8da38d 100644 --- a/npm/poindexter-wasm/loader.js +++ b/npm/poindexter-wasm/loader.js @@ -40,6 +40,7 @@ function call(name, ...args) { class PxTree { constructor(treeId) { this.treeId = treeId; } + // Core operations async len() { return call('pxTreeLen', this.treeId); } async dim() { return call('pxTreeDim', this.treeId); } async insert(point) { return call('pxInsert', this.treeId, point); } @@ -48,6 +49,12 @@ class PxTree { async kNearest(query, k) { return call('pxKNearest', this.treeId, query, k); } async radius(query, r) { return call('pxRadius', this.treeId, query, r); } async exportJSON() { return call('pxExportJSON', this.treeId); } + // Analytics operations + async getAnalytics() { return call('pxGetAnalytics', this.treeId); } + async getPeerStats() { return call('pxGetPeerStats', this.treeId); } + async getTopPeers(n) { return call('pxGetTopPeers', this.treeId, n); } + async getAxisDistributions(axisNames) { return call('pxGetAxisDistributions', this.treeId, axisNames); } + async resetAnalytics() { return call('pxResetAnalytics', this.treeId); } } export async function init(options = {}) { @@ -78,12 +85,33 @@ export async function init(options = {}) { go.run(result.instance); const api = { + // Core functions version: async () => call('pxVersion'), hello: async (name) => call('pxHello', name ?? ''), newTree: async (dim) => { const info = call('pxNewTree', dim); return new PxTree(info.treeId); - } + }, + // Statistics utilities + computeDistributionStats: async (distances) => call('pxComputeDistributionStats', distances), + // NAT routing / peer quality functions + computePeerQualityScore: async (metrics, weights) => call('pxComputePeerQualityScore', metrics, weights), + computeTrustScore: async (metrics) => call('pxComputeTrustScore', metrics), + getDefaultQualityWeights: async () => call('pxGetDefaultQualityWeights'), + getDefaultPeerFeatureRanges: async () => call('pxGetDefaultPeerFeatureRanges'), + normalizePeerFeatures: async (features, ranges) => call('pxNormalizePeerFeatures', features, ranges), + weightedPeerFeatures: async (normalized, weights) => call('pxWeightedPeerFeatures', normalized, weights), + // DNS tools + getExternalToolLinks: async (domain) => call('pxGetExternalToolLinks', domain), + getExternalToolLinksIP: async (ip) => call('pxGetExternalToolLinksIP', ip), + getExternalToolLinksEmail: async (emailOrDomain) => call('pxGetExternalToolLinksEmail', emailOrDomain), + getRDAPServers: async () => call('pxGetRDAPServers'), + buildRDAPDomainURL: async (domain) => call('pxBuildRDAPDomainURL', domain), + buildRDAPIPURL: async (ip) => call('pxBuildRDAPIPURL', ip), + buildRDAPASNURL: async (asn) => call('pxBuildRDAPASNURL', asn), + getDNSRecordTypes: async () => call('pxGetDNSRecordTypes'), + getDNSRecordTypeInfo: async () => call('pxGetDNSRecordTypeInfo'), + getCommonDNSRecordTypes: async () => call('pxGetCommonDNSRecordTypes') }; return api; diff --git a/wasm/main.go b/wasm/main.go index 4f046d8..373b827 100644 --- a/wasm/main.go +++ b/wasm/main.go @@ -215,14 +215,490 @@ func exportJSON(_ js.Value, args []js.Value) (any, error) { if !ok { return nil, fmt.Errorf("unknown treeId %d", id) } - // naive export: ask for all points by radius from origin with large r; or keep - // internal slice? KDTree doesn't expose iteration, so skip heavy export here. - // Return metrics only for now. - m := map[string]any{"dim": t.Dim(), "len": t.Len()} + // Export all points + points := t.Points() + jsPts := make([]any, len(points)) + for i, p := range points { + jsPts[i] = map[string]any{"id": p.ID, "coords": p.Coords, "value": p.Value} + } + m := map[string]any{ + "dim": t.Dim(), + "len": t.Len(), + "backend": string(t.Backend()), + "points": jsPts, + } b, _ := json.Marshal(m) return string(b), nil } +func getAnalytics(_ js.Value, args []js.Value) (any, error) { + // getAnalytics(treeId) -> analytics snapshot + if len(args) < 1 { + return nil, errors.New("getAnalytics(treeId)") + } + id := args[0].Int() + t, ok := treeRegistry[id] + if !ok { + return nil, fmt.Errorf("unknown treeId %d", id) + } + snap := t.GetAnalyticsSnapshot() + return map[string]any{ + "queryCount": snap.QueryCount, + "insertCount": snap.InsertCount, + "deleteCount": snap.DeleteCount, + "avgQueryTimeNs": snap.AvgQueryTimeNs, + "minQueryTimeNs": snap.MinQueryTimeNs, + "maxQueryTimeNs": snap.MaxQueryTimeNs, + "lastQueryTimeNs": snap.LastQueryTimeNs, + "lastQueryAt": snap.LastQueryAt.UnixMilli(), + "createdAt": snap.CreatedAt.UnixMilli(), + "backendRebuildCount": snap.BackendRebuildCnt, + "lastRebuiltAt": snap.LastRebuiltAt.UnixMilli(), + }, nil +} + +func getPeerStats(_ js.Value, args []js.Value) (any, error) { + // getPeerStats(treeId) -> array of peer stats + if len(args) < 1 { + return nil, errors.New("getPeerStats(treeId)") + } + id := args[0].Int() + t, ok := treeRegistry[id] + if !ok { + return nil, fmt.Errorf("unknown treeId %d", id) + } + stats := t.GetPeerStats() + jsStats := make([]any, len(stats)) + for i, s := range stats { + jsStats[i] = map[string]any{ + "peerId": s.PeerID, + "selectionCount": s.SelectionCount, + "avgDistance": s.AvgDistance, + "lastSelectedAt": s.LastSelectedAt.UnixMilli(), + } + } + return jsStats, nil +} + +func getTopPeers(_ js.Value, args []js.Value) (any, error) { + // getTopPeers(treeId, n) -> array of top n peer stats + if len(args) < 2 { + return nil, errors.New("getTopPeers(treeId, n)") + } + id := args[0].Int() + n := args[1].Int() + t, ok := treeRegistry[id] + if !ok { + return nil, fmt.Errorf("unknown treeId %d", id) + } + stats := t.GetTopPeers(n) + jsStats := make([]any, len(stats)) + for i, s := range stats { + jsStats[i] = map[string]any{ + "peerId": s.PeerID, + "selectionCount": s.SelectionCount, + "avgDistance": s.AvgDistance, + "lastSelectedAt": s.LastSelectedAt.UnixMilli(), + } + } + return jsStats, nil +} + +func getAxisDistributions(_ js.Value, args []js.Value) (any, error) { + // getAxisDistributions(treeId, axisNames?: string[]) -> array of axis distribution stats + if len(args) < 1 { + return nil, errors.New("getAxisDistributions(treeId)") + } + id := args[0].Int() + t, ok := treeRegistry[id] + if !ok { + return nil, fmt.Errorf("unknown treeId %d", id) + } + + var axisNames []string + if len(args) > 1 && !args[1].IsUndefined() && !args[1].IsNull() { + ln := args[1].Length() + axisNames = make([]string, ln) + for i := 0; i < ln; i++ { + axisNames[i] = args[1].Index(i).String() + } + } + + dists := t.ComputeDistanceDistribution(axisNames) + jsDists := make([]any, len(dists)) + for i, d := range dists { + jsDists[i] = map[string]any{ + "axis": d.Axis, + "name": d.Name, + "stats": map[string]any{ + "count": d.Stats.Count, + "min": d.Stats.Min, + "max": d.Stats.Max, + "mean": d.Stats.Mean, + "median": d.Stats.Median, + "stdDev": d.Stats.StdDev, + "p25": d.Stats.P25, + "p75": d.Stats.P75, + "p90": d.Stats.P90, + "p99": d.Stats.P99, + "variance": d.Stats.Variance, + "skewness": d.Stats.Skewness, + }, + } + } + return jsDists, nil +} + +func resetAnalytics(_ js.Value, args []js.Value) (any, error) { + // resetAnalytics(treeId) -> resets all analytics + if len(args) < 1 { + return nil, errors.New("resetAnalytics(treeId)") + } + id := args[0].Int() + t, ok := treeRegistry[id] + if !ok { + return nil, fmt.Errorf("unknown treeId %d", id) + } + t.ResetAnalytics() + return true, nil +} + +func computeDistributionStats(_ js.Value, args []js.Value) (any, error) { + // computeDistributionStats(distances: number[]) -> distribution stats + if len(args) < 1 { + return nil, errors.New("computeDistributionStats(distances)") + } + distances, err := getFloatSlice(args[0]) + if err != nil { + return nil, err + } + stats := pd.ComputeDistributionStats(distances) + return map[string]any{ + "count": stats.Count, + "min": stats.Min, + "max": stats.Max, + "mean": stats.Mean, + "median": stats.Median, + "stdDev": stats.StdDev, + "p25": stats.P25, + "p75": stats.P75, + "p90": stats.P90, + "p99": stats.P99, + "variance": stats.Variance, + "skewness": stats.Skewness, + "sampleSize": stats.SampleSize, + "computedAt": stats.ComputedAt.UnixMilli(), + }, nil +} + +func computePeerQualityScore(_ js.Value, args []js.Value) (any, error) { + // computePeerQualityScore(metrics: NATRoutingMetrics, weights?: QualityWeights) -> score + if len(args) < 1 { + return nil, errors.New("computePeerQualityScore(metrics)") + } + m := args[0] + metrics := pd.NATRoutingMetrics{ + ConnectivityScore: m.Get("connectivityScore").Float(), + SymmetryScore: m.Get("symmetryScore").Float(), + RelayProbability: m.Get("relayProbability").Float(), + DirectSuccessRate: m.Get("directSuccessRate").Float(), + AvgRTTMs: m.Get("avgRttMs").Float(), + JitterMs: m.Get("jitterMs").Float(), + PacketLossRate: m.Get("packetLossRate").Float(), + BandwidthMbps: m.Get("bandwidthMbps").Float(), + NATType: m.Get("natType").String(), + } + + var weights *pd.QualityWeights + if len(args) > 1 && !args[1].IsUndefined() && !args[1].IsNull() { + w := args[1] + weights = &pd.QualityWeights{ + Latency: w.Get("latency").Float(), + Jitter: w.Get("jitter").Float(), + PacketLoss: w.Get("packetLoss").Float(), + Bandwidth: w.Get("bandwidth").Float(), + Connectivity: w.Get("connectivity").Float(), + Symmetry: w.Get("symmetry").Float(), + DirectSuccess: w.Get("directSuccess").Float(), + RelayPenalty: w.Get("relayPenalty").Float(), + NATType: w.Get("natType").Float(), + } + } + + score := pd.PeerQualityScore(metrics, weights) + return score, nil +} + +func computeTrustScore(_ js.Value, args []js.Value) (any, error) { + // computeTrustScore(metrics: TrustMetrics) -> score + if len(args) < 1 { + return nil, errors.New("computeTrustScore(metrics)") + } + m := args[0] + metrics := pd.TrustMetrics{ + ReputationScore: m.Get("reputationScore").Float(), + SuccessfulTransactions: int64(m.Get("successfulTransactions").Int()), + FailedTransactions: int64(m.Get("failedTransactions").Int()), + AgeSeconds: int64(m.Get("ageSeconds").Int()), + VouchCount: m.Get("vouchCount").Int(), + FlagCount: m.Get("flagCount").Int(), + ProofOfWork: m.Get("proofOfWork").Float(), + } + + score := pd.ComputeTrustScore(metrics) + return score, nil +} + +func getDefaultQualityWeights(_ js.Value, _ []js.Value) (any, error) { + w := pd.DefaultQualityWeights() + return map[string]any{ + "latency": w.Latency, + "jitter": w.Jitter, + "packetLoss": w.PacketLoss, + "bandwidth": w.Bandwidth, + "connectivity": w.Connectivity, + "symmetry": w.Symmetry, + "directSuccess": w.DirectSuccess, + "relayPenalty": w.RelayPenalty, + "natType": w.NATType, + }, nil +} + +func getDefaultPeerFeatureRanges(_ js.Value, _ []js.Value) (any, error) { + ranges := pd.DefaultPeerFeatureRanges() + jsRanges := make([]any, len(ranges.Ranges)) + for i, r := range ranges.Ranges { + jsRanges[i] = map[string]any{ + "min": r.Min, + "max": r.Max, + } + } + return map[string]any{ + "ranges": jsRanges, + "labels": pd.StandardFeatureLabels(), + }, nil +} + +func normalizePeerFeatures(_ js.Value, args []js.Value) (any, error) { + // normalizePeerFeatures(features: number[], ranges?: FeatureRanges) -> number[] + if len(args) < 1 { + return nil, errors.New("normalizePeerFeatures(features)") + } + features, err := getFloatSlice(args[0]) + if err != nil { + return nil, err + } + + ranges := pd.DefaultPeerFeatureRanges() + if len(args) > 1 && !args[1].IsUndefined() && !args[1].IsNull() { + rangesArg := args[1].Get("ranges") + if !rangesArg.IsUndefined() && !rangesArg.IsNull() { + ln := rangesArg.Length() + ranges.Ranges = make([]pd.AxisStats, ln) + for i := 0; i < ln; i++ { + r := rangesArg.Index(i) + ranges.Ranges[i] = pd.AxisStats{ + Min: r.Get("min").Float(), + Max: r.Get("max").Float(), + } + } + } + } + + normalized := pd.NormalizePeerFeatures(features, ranges) + return normalized, nil +} + +func weightedPeerFeatures(_ js.Value, args []js.Value) (any, error) { + // weightedPeerFeatures(normalized: number[], weights: number[]) -> number[] + if len(args) < 2 { + return nil, errors.New("weightedPeerFeatures(normalized, weights)") + } + normalized, err := getFloatSlice(args[0]) + if err != nil { + return nil, err + } + weights, err := getFloatSlice(args[1]) + if err != nil { + return nil, err + } + + weighted := pd.WeightedPeerFeatures(normalized, weights) + return weighted, nil +} + +// ============================================================================ +// DNS Tools Functions +// ============================================================================ + +func getExternalToolLinks(_ js.Value, args []js.Value) (any, error) { + // getExternalToolLinks(domain: string) -> ExternalToolLinks + if len(args) < 1 { + return nil, errors.New("getExternalToolLinks(domain)") + } + domain := args[0].String() + links := pd.GetExternalToolLinks(domain) + return externalToolLinksToJS(links), nil +} + +func getExternalToolLinksIP(_ js.Value, args []js.Value) (any, error) { + // getExternalToolLinksIP(ip: string) -> ExternalToolLinks + if len(args) < 1 { + return nil, errors.New("getExternalToolLinksIP(ip)") + } + ip := args[0].String() + links := pd.GetExternalToolLinksIP(ip) + return externalToolLinksToJS(links), nil +} + +func getExternalToolLinksEmail(_ js.Value, args []js.Value) (any, error) { + // getExternalToolLinksEmail(emailOrDomain: string) -> ExternalToolLinks + if len(args) < 1 { + return nil, errors.New("getExternalToolLinksEmail(emailOrDomain)") + } + emailOrDomain := args[0].String() + links := pd.GetExternalToolLinksEmail(emailOrDomain) + return externalToolLinksToJS(links), nil +} + +func externalToolLinksToJS(links pd.ExternalToolLinks) map[string]any { + return map[string]any{ + "target": links.Target, + "type": links.Type, + // MXToolbox + "mxtoolboxDns": links.MXToolboxDNS, + "mxtoolboxMx": links.MXToolboxMX, + "mxtoolboxBlacklist": links.MXToolboxBlacklist, + "mxtoolboxSmtp": links.MXToolboxSMTP, + "mxtoolboxSpf": links.MXToolboxSPF, + "mxtoolboxDmarc": links.MXToolboxDMARC, + "mxtoolboxDkim": links.MXToolboxDKIM, + "mxtoolboxHttp": links.MXToolboxHTTP, + "mxtoolboxHttps": links.MXToolboxHTTPS, + "mxtoolboxPing": links.MXToolboxPing, + "mxtoolboxTrace": links.MXToolboxTrace, + "mxtoolboxWhois": links.MXToolboxWhois, + "mxtoolboxAsn": links.MXToolboxASN, + // DNSChecker + "dnscheckerDns": links.DNSCheckerDNS, + "dnscheckerPropagation": links.DNSCheckerPropagation, + // Other tools + "whois": links.WhoIs, + "viewdns": links.ViewDNS, + "intodns": links.IntoDNS, + "dnsviz": links.DNSViz, + "securitytrails": links.SecurityTrails, + "shodan": links.Shodan, + "censys": links.Censys, + "builtwith": links.BuiltWith, + "ssllabs": links.SSLLabs, + "hstsPreload": links.HSTSPreload, + "hardenize": links.Hardenize, + // IP-specific + "ipinfo": links.IPInfo, + "abuseipdb": links.AbuseIPDB, + "virustotal": links.VirusTotal, + "threatcrowd": links.ThreatCrowd, + // Email-specific + "mailtester": links.MailTester, + "learndmarc": links.LearnDMARC, + } +} + +func getRDAPServers(_ js.Value, _ []js.Value) (any, error) { + // Returns a list of known RDAP servers for reference + servers := map[string]any{ + "tlds": map[string]string{ + "com": "https://rdap.verisign.com/com/v1/", + "net": "https://rdap.verisign.com/net/v1/", + "org": "https://rdap.publicinterestregistry.org/rdap/", + "info": "https://rdap.afilias.net/rdap/info/", + "io": "https://rdap.nic.io/", + "co": "https://rdap.nic.co/", + "dev": "https://rdap.nic.google/", + "app": "https://rdap.nic.google/", + }, + "rirs": map[string]string{ + "arin": "https://rdap.arin.net/registry/", + "ripe": "https://rdap.db.ripe.net/", + "apnic": "https://rdap.apnic.net/", + "afrinic": "https://rdap.afrinic.net/rdap/", + "lacnic": "https://rdap.lacnic.net/rdap/", + }, + "universal": "https://rdap.org/", + } + return servers, nil +} + +func buildRDAPDomainURL(_ js.Value, args []js.Value) (any, error) { + // buildRDAPDomainURL(domain: string) -> string + if len(args) < 1 { + return nil, errors.New("buildRDAPDomainURL(domain)") + } + domain := args[0].String() + // Use universal RDAP redirector + return fmt.Sprintf("https://rdap.org/domain/%s", domain), nil +} + +func buildRDAPIPURL(_ js.Value, args []js.Value) (any, error) { + // buildRDAPIPURL(ip: string) -> string + if len(args) < 1 { + return nil, errors.New("buildRDAPIPURL(ip)") + } + ip := args[0].String() + return fmt.Sprintf("https://rdap.org/ip/%s", ip), nil +} + +func buildRDAPASNURL(_ js.Value, args []js.Value) (any, error) { + // buildRDAPASNURL(asn: string) -> string + if len(args) < 1 { + return nil, errors.New("buildRDAPASNURL(asn)") + } + asn := args[0].String() + // Normalize ASN + asnNum := asn + if len(asn) > 2 && (asn[:2] == "AS" || asn[:2] == "as") { + asnNum = asn[2:] + } + return fmt.Sprintf("https://rdap.org/autnum/%s", asnNum), nil +} + +func getDNSRecordTypes(_ js.Value, _ []js.Value) (any, error) { + // Returns all available DNS record types + types := pd.GetAllDNSRecordTypes() + result := make([]string, len(types)) + for i, t := range types { + result[i] = string(t) + } + return result, nil +} + +func getDNSRecordTypeInfo(_ js.Value, _ []js.Value) (any, error) { + // Returns detailed info about all DNS record types + info := pd.GetDNSRecordTypeInfo() + result := make([]any, len(info)) + for i, r := range info { + result[i] = map[string]any{ + "type": string(r.Type), + "name": r.Name, + "description": r.Description, + "rfc": r.RFC, + "common": r.Common, + } + } + return result, nil +} + +func getCommonDNSRecordTypes(_ js.Value, _ []js.Value) (any, error) { + // Returns only commonly used DNS record types + types := pd.GetCommonDNSRecordTypes() + result := make([]string, len(types)) + for i, t := range types { + result[i] = string(t) + } + return result, nil +} + func main() { // Export core API export("pxVersion", version) @@ -237,6 +713,34 @@ func main() { export("pxRadius", radius) export("pxExportJSON", exportJSON) + // Export analytics API + export("pxGetAnalytics", getAnalytics) + export("pxGetPeerStats", getPeerStats) + export("pxGetTopPeers", getTopPeers) + export("pxGetAxisDistributions", getAxisDistributions) + export("pxResetAnalytics", resetAnalytics) + export("pxComputeDistributionStats", computeDistributionStats) + + // Export NAT routing / peer quality API + export("pxComputePeerQualityScore", computePeerQualityScore) + export("pxComputeTrustScore", computeTrustScore) + export("pxGetDefaultQualityWeights", getDefaultQualityWeights) + export("pxGetDefaultPeerFeatureRanges", getDefaultPeerFeatureRanges) + export("pxNormalizePeerFeatures", normalizePeerFeatures) + export("pxWeightedPeerFeatures", weightedPeerFeatures) + + // Export DNS tools API + export("pxGetExternalToolLinks", getExternalToolLinks) + export("pxGetExternalToolLinksIP", getExternalToolLinksIP) + export("pxGetExternalToolLinksEmail", getExternalToolLinksEmail) + export("pxGetRDAPServers", getRDAPServers) + export("pxBuildRDAPDomainURL", buildRDAPDomainURL) + export("pxBuildRDAPIPURL", buildRDAPIPURL) + export("pxBuildRDAPASNURL", buildRDAPASNURL) + export("pxGetDNSRecordTypes", getDNSRecordTypes) + export("pxGetDNSRecordTypeInfo", getDNSRecordTypeInfo) + export("pxGetCommonDNSRecordTypes", getCommonDNSRecordTypes) + // Keep running select {} }